Re: [PATCH] drm/panfrost: devfreq: Don't display error for EPROBE_DEFER

2021-07-23 Thread Steven Price
On 21/07/2021 22:48, Chris Morgan wrote:
> From: Chris Morgan 
> 
> Set a condition for the message of "Couldn't set OPP regulators" to not
> display if the error code is EPROBE_DEFER. Note that I used an if
> statement to capture the condition instead of the dev_err_probe
> function because I didn't want to change the DRM_DEV_ERROR usage.

Note that this file (panfost_devfreq.c) is actually the odd one out in
terms of using the DRM_DEV_xxx macros. The rest of the panfrost driver
uses the standard dev_xxx ones. So tidying this up to match the rest of
the driver would also allow us to use dev_err_probe(). But as a point
fix this patch is fine and correct. Thanks!

> Signed-off-by: Chris Morgan 

Reviewed-by: Steven Price 

I'll apply this to drm-misc-next.

Thanks,

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_devfreq.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c 
> b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
> index 3644652f726f..194af7f607a6 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
> @@ -106,7 +106,8 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
>   if (ret) {
>   /* Continue if the optional regulator is missing */
>   if (ret != -ENODEV) {
> - DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
> + if (ret != -EPROBE_DEFER)
> + DRM_DEV_ERROR(dev, "Couldn't set OPP 
> regulators\n");
>   return ret;
>   }
>   }
> 



Re: [PATCH v5 05/16] swiotlb: Add restricted DMA pool initialization

2021-04-23 Thread Steven Price

On 22/04/2021 09:14, Claire Chang wrote:

Add the initialization function to create restricted DMA pools from
matching reserved-memory nodes.

Signed-off-by: Claire Chang 
---
  include/linux/device.h  |  4 +++
  include/linux/swiotlb.h |  3 +-
  kernel/dma/swiotlb.c| 80 +
  3 files changed, 86 insertions(+), 1 deletion(-)

diff --git a/include/linux/device.h b/include/linux/device.h
index 38a2071cf776..4987608ea4ff 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -416,6 +416,7 @@ struct dev_links_info {
   * @dma_pools:Dma pools (if dma'ble device).
   * @dma_mem:  Internal for coherent mem override.
   * @cma_area: Contiguous memory area for dma allocations
+ * @dma_io_tlb_mem: Internal for swiotlb io_tlb_mem override.
   * @archdata: For arch-specific additions.
   * @of_node:  Associated device tree node.
   * @fwnode:   Associated device node supplied by platform firmware.
@@ -521,6 +522,9 @@ struct device {
  #ifdef CONFIG_DMA_CMA
struct cma *cma_area;   /* contiguous memory area for dma
   allocations */
+#endif
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+   struct io_tlb_mem *dma_io_tlb_mem;
  #endif
/* arch specific additions */
struct dev_archdata archdata;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 216854a5e513..03ad6e3b4056 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -72,7 +72,8 @@ extern enum swiotlb_force swiotlb_force;
   *range check to see if the memory was in fact allocated by this
   *API.
   * @nslabs:   The number of IO TLB blocks (in groups of 64) between @start and
- * @end. This is command line adjustable via setup_io_tlb_npages.
+ * @end. For default swiotlb, this is command line adjustable via
+ * setup_io_tlb_npages.
   * @used: The number of used IO TLB block.
   * @list: The free list describing the number of free entries available
   *from each index.
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 57a9adb920bf..ffbb8724e06c 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -39,6 +39,13 @@
  #ifdef CONFIG_DEBUG_FS
  #include 
  #endif
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+#include 
+#include 
+#include 
+#include 
+#include 
+#endif
  
  #include 

  #include 
@@ -681,3 +688,76 @@ static int __init swiotlb_create_default_debugfs(void)
  late_initcall(swiotlb_create_default_debugfs);
  
  #endif

+
+#ifdef CONFIG_DMA_RESTRICTED_POOL
+static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
+   struct device *dev)
+{
+   struct io_tlb_mem *mem = rmem->priv;
+   unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
+
+   if (dev->dma_io_tlb_mem)
+   return 0;
+
+   /* Since multiple devices can share the same pool, the private data,
+* io_tlb_mem struct, will be initialized by the first device attached
+* to it.
+*/
+   if (!mem) {
+   mem = kzalloc(struct_size(mem, slots, nslabs), GFP_KERNEL);
+   if (!mem)
+   return -ENOMEM;
+#ifdef CONFIG_ARM
+   if (!PageHighMem(pfn_to_page(PHYS_PFN(rmem->base {
+   kfree(mem);
+   return -EINVAL;
+   }
+#endif /* CONFIG_ARM */
+   swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
+
+   rmem->priv = mem;
+   }
+
+#ifdef CONFIG_DEBUG_FS
+   if (!io_tlb_default_mem->debugfs)
+   io_tlb_default_mem->debugfs =
+   debugfs_create_dir("swiotlb", NULL);


At this point it's possible for io_tlb_default_mem to be NULL, leading 
to a splat.


But even then if it's not and we have the situation where debugfs==NULL 
then the debugfs_create_dir() here will cause a subsequent attempt in 
swiotlb_create_debugfs() to fail (directory already exists) leading to 
mem->debugfs being assigned an error value. I suspect the creation of 
the debugfs directory needs to be separated from io_tlb_default_mem 
being set.


Other than that I gave this series a go with our prototype of Arm's 
Confidential Computer Architecture[1] - since the majority of the 
guest's memory is protected from the host the restricted DMA pool allows 
(only) a small area to be shared with the host.


After fixing (well hacking round) the above it all seems to be working 
fine with virtio drivers.


Thanks,

Steve

[1] 
https://www.arm.com/why-arm/architecture/security-features/arm-confidential-compute-architecture

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v5 05/16] swiotlb: Add restricted DMA pool initialization

2021-04-28 Thread Steven Price

On 26/04/2021 17:37, Claire Chang wrote:

On Fri, Apr 23, 2021 at 7:34 PM Steven Price  wrote:

[...]


But even then if it's not and we have the situation where debugfs==NULL
then the debugfs_create_dir() here will cause a subsequent attempt in
swiotlb_create_debugfs() to fail (directory already exists) leading to
mem->debugfs being assigned an error value. I suspect the creation of
the debugfs directory needs to be separated from io_tlb_default_mem
being set.


debugfs creation should move into the if (!mem) {...} above to avoid
duplication.
I think having a separated struct dentry pointer for the default
debugfs should be enough?

if (!debugfs)
 debugfs = debugfs_create_dir("swiotlb", NULL);
swiotlb_create_debugfs(mem, rmem->name, debugfs);


Yes that looks like a good solution to me. Although I'd name the 
variable something a bit more descriptive than just "debugfs" e.g. 
"debugfs_dir" or "debugfs_root".


Thanks,

Steve
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 1/3] drm/panfrost: Simplify lock_region calculation

2021-08-23 Thread Steven Price
On 20/08/2021 22:31, Alyssa Rosenzweig wrote:
> In lock_region, simplify the calculation of the region_width parameter.
> This field is the size, but encoded as log2(ceil(size)) - 1.
> log2(ceil(size)) may be computed directly as fls(size - 1). However, we
> want to use the 64-bit versions as the amount to lock can exceed
> 32-bits.
> 
> This avoids undefined behaviour when locking all memory (size ~0),
> caught by UBSAN.

It might have been useful to mention what it is that UBSAN specifically
picked up (it took me a while to spot) - but anyway I think there's a
bigger issue with it being completely wrong when size == ~0 (see below).

> Signed-off-by: Alyssa Rosenzweig 
> Reported-and-tested-by: Chris Morgan 
> Cc: 

However, I've confirmed this returns the same values and is certainly
more simple, so:

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 19 +--
>  1 file changed, 5 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 0da5b3100ab1..f6e02d0392f4 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -62,21 +62,12 @@ static void lock_region(struct panfrost_device *pfdev, 
> u32 as_nr,
>  {
>   u8 region_width;
>   u64 region = iova & PAGE_MASK;
> - /*
> -  * fls returns:
> -  * 1 .. 32
> -  *
> -  * 10 + fls(num_pages)
> -  * results in the range (11 .. 42)
> -  */
> -
> - size = round_up(size, PAGE_SIZE);

This seems to be the first issue - ~0 will be 'rounded up' to 0.

>  
> - region_width = 10 + fls(size >> PAGE_SHIFT);

fls(0) == 0, so region_width == 10.

> - if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {

Presumably here's where UBSAN objects - we're shifting by a negative
value, which even it it happens to works means the lock region is tiny
and certainly not what was intended! It might well be worth a:

Fixes: f3ba91228e8e ("drm/panfrost: Add initial panfrost driver")

Note for anyone following along at (working-from-) home: although this
code was cargo culted from kbase - kbase is fine because it takes a pfn
and doesn't do the round_up() stage.

Which also exposes the second bug (fixed in patch 2): a size_t isn't big
enough on 32 bit platforms (all Midgard/Bifrost GPUs have a VA size
bigger than 32 bits). Again kbase gets away with a u32 because it's a pfn.

There is potentially a third bug which kbase only recently attempted to
fix. The lock address is effectively rounded down by the hardware (the
bottom bits are ignored). So if you have mask=(1<   /* Round up if some memory pages spill into the next region. */
>   region_frame_number_start = pfn >> (lockaddr_size_log2 - PAGE_SHIFT);
>   region_frame_number_end =
>   (pfn + num_pages - 1) >> (lockaddr_size_log2 - PAGE_SHIFT);
> 
>   if (region_frame_number_start < region_frame_number_end)
>   lockaddr_size_log2 += 1;

I guess we should too?

Steve

> - /* not pow2, so must go up to the next pow2 */
> - region_width += 1;
> - }
> + /* The size is encoded as ceil(log2) minus(1), which may be calculated
> +  * with fls. The size must be clamped to hardware bounds.
> +  */
> + size = max_t(u64, size, PAGE_SIZE);
> + region_width = fls64(size - 1) - 1;
>   region |= region_width;
>  
>   /* Lock the region that needs to be updated */
> 



Re: [PATCH 2/3] drm/panfrost: Use u64 for size in lock_region

2021-08-23 Thread Steven Price
On 20/08/2021 22:31, Alyssa Rosenzweig wrote:
> Mali virtual addresses are 48-bit. Use a u64 instead of size_t to ensure
> we can express the "lock everything" condition as ~0ULL without relying
> on platform-specific behaviour.

'platform-specific behaviour' makes it sound like this is something to
do with a particular board. This is 32bit/64bit - it's going to be
broken on 32bit: large lock regions are not going to work.

> Signed-off-by: Alyssa Rosenzweig 
> Suggested-by: Rob Herring 
> Tested-by: Chris Morgan 

Fixes: f3ba91228e8e ("drm/panfrost: Add initial panfrost driver")

Reviewed-by: Steven Price 

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 12 ++--
>  1 file changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index f6e02d0392f4..3a795273e505 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -58,7 +58,7 @@ static int write_cmd(struct panfrost_device *pfdev, u32 
> as_nr, u32 cmd)
>  }
>  
>  static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
> - u64 iova, size_t size)
> + u64 iova, u64 size)
>  {
>   u8 region_width;
>   u64 region = iova & PAGE_MASK;
> @@ -78,7 +78,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 
> as_nr,
>  
>  
>  static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int 
> as_nr,
> -   u64 iova, size_t size, u32 op)
> +   u64 iova, u64 size, u32 op)
>  {
>   if (as_nr < 0)
>   return 0;
> @@ -95,7 +95,7 @@ static int mmu_hw_do_operation_locked(struct 
> panfrost_device *pfdev, int as_nr,
>  
>  static int mmu_hw_do_operation(struct panfrost_device *pfdev,
>  struct panfrost_mmu *mmu,
> -u64 iova, size_t size, u32 op)
> +u64 iova, u64 size, u32 op)
>  {
>   int ret;
>  
> @@ -112,7 +112,7 @@ static void panfrost_mmu_enable(struct panfrost_device 
> *pfdev, struct panfrost_m
>   u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
>   u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
>  
> - mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
> + mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, 
> AS_COMMAND_FLUSH_MEM);
>  
>   mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xUL);
>   mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
> @@ -128,7 +128,7 @@ static void panfrost_mmu_enable(struct panfrost_device 
> *pfdev, struct panfrost_m
>  
>  static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
>  {
> - mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
> + mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, 
> AS_COMMAND_FLUSH_MEM);
>  
>   mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
>   mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
> @@ -242,7 +242,7 @@ static size_t get_pgsize(u64 addr, size_t size)
>  
>  static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
>struct panfrost_mmu *mmu,
> -  u64 iova, size_t size)
> +  u64 iova, u64 size)
>  {
>   if (mmu->as < 0)
>   return;
> 



Re: [PATCH 3/3] drm/panfrost: Clamp lock region to Bifrost minimum

2021-08-23 Thread Steven Price
On 20/08/2021 22:31, Alyssa Rosenzweig wrote:
> When locking a region, we currently clamp to a PAGE_SIZE as the minimum
> lock region. While this is valid for Midgard, it is invalid for Bifrost,

While the spec does seem to state it's invalid for Bifrost - kbase
didn't bother with a lower clamp for a long time. I actually think this
is in many ways more of a spec bug: i.e. implementation details of the
round-up that the hardware does. But it's much safer following the spec
;) And it seems like kbase eventually caught up too.

> where the minimum locking size is 8x larger than the 4k page size. Add a
> hardware definition for the minimum lock region size (corresponding to
> KBASE_LOCK_REGION_MIN_SIZE_LOG2 in kbase) and respect it.
> 
> Signed-off-by: Alyssa Rosenzweig 
> Tested-by: Chris Morgan 
> Cc: 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c  | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_regs.h | 2 ++
>  2 files changed, 3 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 3a795273e505..dfe5f1d29763 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -66,7 +66,7 @@ static void lock_region(struct panfrost_device *pfdev, u32 
> as_nr,
>   /* The size is encoded as ceil(log2) minus(1), which may be calculated
>* with fls. The size must be clamped to hardware bounds.
>*/
> - size = max_t(u64, size, PAGE_SIZE);
> + size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
>   region_width = fls64(size - 1) - 1;
>   region |= region_width;
>  
> diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h 
> b/drivers/gpu/drm/panfrost/panfrost_regs.h
> index 1940ff86e49a..6c5a11ef1ee8 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_regs.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
> @@ -316,6 +316,8 @@
>  #define AS_FAULTSTATUS_ACCESS_TYPE_READ  (0x2 << 8)
>  #define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
>  
> +#define AS_LOCK_REGION_MIN_SIZE (1ULL << 15)
> +
>  #define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
>  #define gpu_read(dev, reg) readl(dev->iomem + reg)
>  
> 



Re: [PATCH 1/3] drm/panfrost: Simplify lock_region calculation

2021-08-23 Thread Steven Price
On 23 August 2021 22:09:08 BST, Alyssa Rosenzweig  wrote:
>> > In lock_region, simplify the calculation of the region_width parameter.
>> > This field is the size, but encoded as log2(ceil(size)) - 1.
>> > log2(ceil(size)) may be computed directly as fls(size - 1). However, we
>> > want to use the 64-bit versions as the amount to lock can exceed
>> > 32-bits.
>> > 
>> > This avoids undefined behaviour when locking all memory (size ~0),
>> > caught by UBSAN.
>> 
>> It might have been useful to mention what it is that UBSAN specifically
>> picked up (it took me a while to spot) - but anyway I think there's a
>> bigger issue with it being completely wrong when size == ~0 (see below).
>
>Indeed. I've updated the commit message in v2 to explain what goes
>wrong (your analysis was spot on, but a mailing list message is more
>ephermal than a commit message). I'll send out v2 tomorrow assuming
>nobody objects to v1 in the mean time.
>
>Thanks for the review.
>
>> There is potentially a third bug which kbase only recently attempted to
>> fix. The lock address is effectively rounded down by the hardware (the
>> bottom bits are ignored). So if you have mask=(1<> (iova & mask) != ((iova + size) & mask) then you are potentially failing
>> to lock the end of the intended region. kbase has added some code to
>> handle this:
>> 
>> >/* Round up if some memory pages spill into the next region. */
>> >region_frame_number_start = pfn >> (lockaddr_size_log2 - PAGE_SHIFT);
>> >region_frame_number_end =
>> >(pfn + num_pages - 1) >> (lockaddr_size_log2 - PAGE_SHIFT);
>> > 
>> >if (region_frame_number_start < region_frame_number_end)
>> >lockaddr_size_log2 += 1;
>> 
>> I guess we should too?
>
>Oh, I missed this one. Guess we have 4 bugs with this code instead of
>just 3, yikes. How could such a short function be so deeply and horribly
>broken? 
>
>Should I add a fourth patch to the series to fix this?

Yes please!

Thanks,
Steve


Re: [PATCH 2/3] drm/panfrost: Use u64 for size in lock_region

2021-08-23 Thread Steven Price
On 23 August 2021 22:11:09 BST, Alyssa Rosenzweig  wrote:
>> > Mali virtual addresses are 48-bit. Use a u64 instead of size_t to ensure
>> > we can express the "lock everything" condition as ~0ULL without relying
>> > on platform-specific behaviour.
>> 
>> 'platform-specific behaviour' makes it sound like this is something to
>> do with a particular board. This is 32bit/64bit - it's going to be
>> broken on 32bit: large lock regions are not going to work.
>
>Oh, my. I used the term as a weasel word since the spec is loose on how
>big a size_t actually is. But if this is causing actual breakage on
>armv7 we're in trouble. I'll add a Cc stable tag on v2, unless the Fixes
>implies that?

The usual practice is to put CC: stable in the commit message or the fixes tag 
(no need to actually CC the stable email address). So just Fixes: should work

>Thanks for pointing this out.

It's not actually quite so bad as it could be as >4GB regions are unlikely 
(especially on 32bit), but the GPU does of course support such a thing.

Thanks,
Steve


Re: [PATCH 3/3] drm/panfrost: Clamp lock region to Bifrost minimum

2021-08-23 Thread Steven Price
On 23 August 2021 22:13:45 BST, Alyssa Rosenzweig  wrote:
>> > When locking a region, we currently clamp to a PAGE_SIZE as the minimum
>> > lock region. While this is valid for Midgard, it is invalid for Bifrost,
>> 
>> While the spec does seem to state it's invalid for Bifrost - kbase
>> didn't bother with a lower clamp for a long time. I actually think this
>> is in many ways more of a spec bug: i.e. implementation details of the
>> round-up that the hardware does. But it's much safer following the spec
>> ;) And it seems like kbase eventually caught up too.
>
>Yeah, makes sense. Should I drop the Cc: stable in that case? If the
>issue is purely theoretical.

I think it might still be worth fixing. Early Bifrost should be fine, but 
something triggered a bug report that caused kbase to be fixed, so I'm less 
confident that there's nothing out there that cares. Following both kbase and 
the spec seems the safest approach.

Thanks,
Steve


Re: [PATCH v2 1/4] drm/panfrost: Simplify lock_region calculation

2021-08-25 Thread Steven Price
On 24/08/2021 18:30, Alyssa Rosenzweig wrote:
> In lock_region, simplify the calculation of the region_width parameter.
> This field is the size, but encoded as ceil(log2(size)) - 1.
> ceil(log2(size)) may be computed directly as fls(size - 1). However, we
> want to use the 64-bit versions as the amount to lock can exceed
> 32-bits.
> 
> This avoids undefined (and completely wrong) behaviour when locking all
> memory (size ~0). In this case, the old code would "round up" ~0 to the
> nearest page, overflowing to 0. Since fls(0) == 0, this would calculate
> a region width of 10 + 0 = 10. But then the code would shift by
> (region_width - 11) = -1. As shifting by a negative number is undefined,
> UBSAN flags the bug. Of course, even if it were defined the behaviour is
> wrong, instead of locking all memory almost none would get locked.
> 
> The new form of the calculation corrects this special case and avoids
> the undefined behaviour.
> 
> Signed-off-by: Alyssa Rosenzweig 
> Reported-and-tested-by: Chris Morgan 
> Fixes: f3ba91228e8e ("drm/panfrost: Add initial panfrost driver")
> Cc: 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 19 +--
>  1 file changed, 5 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 0da5b3100ab1..f6e02d0392f4 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -62,21 +62,12 @@ static void lock_region(struct panfrost_device *pfdev, 
> u32 as_nr,
>  {
>   u8 region_width;
>   u64 region = iova & PAGE_MASK;
> - /*
> -  * fls returns:
> -  * 1 .. 32
> -  *
> -  * 10 + fls(num_pages)
> -  * results in the range (11 .. 42)
> -  */
> -
> - size = round_up(size, PAGE_SIZE);
>  
> - region_width = 10 + fls(size >> PAGE_SHIFT);
> - if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
> - /* not pow2, so must go up to the next pow2 */
> - region_width += 1;
> - }
> + /* The size is encoded as ceil(log2) minus(1), which may be calculated
> +  * with fls. The size must be clamped to hardware bounds.
> +  */
> + size = max_t(u64, size, PAGE_SIZE);
> + region_width = fls64(size - 1) - 1;
>   region |= region_width;
>  
>   /* Lock the region that needs to be updated */
> 



Re: [PATCH v2 4/4] drm/panfrost: Handle non-aligned lock addresses

2021-08-25 Thread Steven Price
On 24/08/2021 18:30, Alyssa Rosenzweig wrote:
> When locking memory, the base address is rounded down to the nearest
> page. The current code does not adjust the size in this case,
> truncating the lock region:
> 
>   Input:  [size]
>   Round: [size]
> 
> To fix the truncation, extend the lock region by the amount rounded off.
> 
>   Input:  [size]
>   Round: [---size--]
> 
> This bug is difficult to hit under current assumptions: since the size
> of the lock region is stored as a ceil(log2), the truncation must cause
> us to cross a power-of-two boundary. This is possible, for example if
> the caller tries to lock 65535 bytes starting at iova 0xCAFE0010. The
> existing code rounds down the iova to 0xCAFE and rounds up the lock
> region to 65536 bytes, locking until 0xCAFF. This fails to lock the
> last 15 bytes.
> 
> In practice, the current callers pass PAGE_SIZE aligned inputs, avoiding
> the bug. Therefore this doesn't need to be backported. Still, that's a
> happy accident and not a precondition of lock_region, so we let's do the
> right thing to future proof.

Actually it's worse than that due to the hardware behaviour, the spec
states (for LOCKADDR_BASE):

> Only the upper bits of the address are used. The address is aligned to a
> multiple of the region size, so a variable number of low-order bits are
> ignored, depending on the selected region size. It is recommended that 
> software
> ensures that these low bits in the address are cleared, to avoid confusion.

It appears that indeed this has caused confusion ;)

So for a simple request like locking from 0xCAFE - 0xCB01 (size
= 0x3) the region width gets rounded up (to 0x4) which causes
the start address to be effectively rounded down (by the hardware) to
0xCAFC and we fail to lock 0xCB00-0xCB01.

Interestingly (unless my reading of this is wrong) that means to lock
0x-0x10001 (i.e. crossing the 4GB boundary) requires locking
*at least* 0x-0x2 (i.e. locking the first 8GB).

This appears to be broken in kbase (which actually does zero out the low
bits of the address) - I've raised a bug internally so hopefully someone
will tell me if I've read the spec completely wrong here.

Steve

> Signed-off-by: Alyssa Rosenzweig 
> Reported-by: Steven Price 
> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index dfe5f1d29763..14be32497ec3 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -63,6 +63,9 @@ static void lock_region(struct panfrost_device *pfdev, u32 
> as_nr,
>   u8 region_width;
>   u64 region = iova & PAGE_MASK;
>  
> + /* After rounding the address down, extend the size to lock the end. */
> + size += (region - iova);
> +
>   /* The size is encoded as ceil(log2) minus(1), which may be calculated
>* with fls. The size must be clamped to hardware bounds.
>*/
> 



Re: [PATCH] drm/panfrost: Use upper/lower_32_bits helpers

2021-08-25 Thread Steven Price
On 24/08/2021 18:26, Alyssa Rosenzweig wrote:
> Use upper_32_bits/lower_32_bits helpers instead of open-coding them.
> This is easier to scan quickly compared to bitwise manipulation, and it
> is pleasingly symmetric. I noticed this when debugging lock_region,
> which had a particularly "creative" way of writing upper_32_bits.
> 
> Signed-off-by: Alyssa Rosenzweig 

It looks like there's another case (slightly different format) in
perfcnt:

---8<---
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c 
b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 5ab03d605f57..e116a4d9b8e5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -51,8 +51,8 @@ static int panfrost_perfcnt_dump_locked(struct 
panfrost_device *pfdev)
 
reinit_completion(&pfdev->perfcnt->dump_comp);
gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
-   gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
-   gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
+   gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva));
+   gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva));
gpu_write(pfdev, GPU_INT_CLEAR,
  GPU_IRQ_CLEAN_CACHES_COMPLETED |
  GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
---8<--


With that squashed in:

Reviewed-by: Steven Price 

Thanks,

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_job.c |  8 
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 12 ++--
>  2 files changed, 10 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
> b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 71a72fb50e6b..763b7abfc88e 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -137,8 +137,8 @@ static void panfrost_job_write_affinity(struct 
> panfrost_device *pfdev,
>*/
>   affinity = pfdev->features.shader_present;
>  
> - job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0x);
> - job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
> + job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
> + job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
>  }
>  
>  static u32
> @@ -203,8 +203,8 @@ static void panfrost_job_hw_submit(struct panfrost_job 
> *job, int js)
>  
>   cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
>  
> - job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0x);
> - job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
> + job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
> + job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
>  
>   panfrost_job_write_affinity(pfdev, job->requirements, js);
>  
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 0da5b3100ab1..c3fbe0ad9090 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -80,8 +80,8 @@ static void lock_region(struct panfrost_device *pfdev, u32 
> as_nr,
>   region |= region_width;
>  
>   /* Lock the region that needs to be updated */
> - mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xUL);
> - mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xUL);
> + mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
> + mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
>   write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
>  }
>  
> @@ -123,14 +123,14 @@ static void panfrost_mmu_enable(struct panfrost_device 
> *pfdev, struct panfrost_m
>  
>   mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
>  
> - mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xUL);
> - mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
> + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
> + mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
>  
>   /* Need to revisit mem attrs.
>* NC is the default, Mali driver is inner WT.
>*/
> - mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xUL);
> - mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
> + mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
> + mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
>  
>   write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
>  }
> 



Re: [PATCH v2 4/4] drm/panfrost: Handle non-aligned lock addresses

2021-08-25 Thread Steven Price
On 25/08/2021 15:07, Alyssa Rosenzweig wrote:
>>> In practice, the current callers pass PAGE_SIZE aligned inputs, avoiding
>>> the bug. Therefore this doesn't need to be backported. Still, that's a
>>> happy accident and not a precondition of lock_region, so we let's do the
>>> right thing to future proof.
>>
>> Actually it's worse than that due to the hardware behaviour, the spec
>> states (for LOCKADDR_BASE):
>>
>>> Only the upper bits of the address are used. The address is aligned to a
>>> multiple of the region size, so a variable number of low-order bits are
>>> ignored, depending on the selected region size. It is recommended that 
>>> software
>>> ensures that these low bits in the address are cleared, to avoid confusion.
>>
>> It appears that indeed this has caused confusion ;)
>>
>> So for a simple request like locking from 0xCAFE - 0xCB01 (size
>> = 0x3) the region width gets rounded up (to 0x4) which causes
>> the start address to be effectively rounded down (by the hardware) to
>> 0xCAFC and we fail to lock 0xCB00-0xCB01.
>>
>> Interestingly (unless my reading of this is wrong) that means to lock
>> 0x-0x10001 (i.e. crossing the 4GB boundary) requires locking
>> *at least* 0x-0x2 (i.e. locking the first 8GB).
>>
>> This appears to be broken in kbase (which actually does zero out the low
>> bits of the address) - I've raised a bug internally so hopefully someone
>> will tell me if I've read the spec completely wrong here.
> 
> Horrifying, and not what I wanted to read my last day before 2 weeks of
> leave. Let's drop this patch, hopefully by the time I'm back, your
> friends in GPU can confirm that's a spec bug and not an actual
> hardware/driver one...
> 
> Can you apply the other 3 patches in the mean time? Thanks :-)
> 

Yeah, sure. I'll push the first 3 to drm-misc-next-fixes (should land in
v5.15).

It's interesting that if my (new) reading of the spec is correct then
kbase has been horribly broken in this respect forever. So clearly it
can't be something that crops up very often. It would have been good if
the spec could have included wording such as "naturally aligned" if
that's what was intended.

Enjoy your holiday!

Thanks,
Steve


Re: [PATCH v2] drm/panfrost: Use upper/lower_32_bits helpers

2021-08-26 Thread Steven Price
On 25/08/2021 16:33, Alyssa Rosenzweig wrote:
> Use upper_32_bits/lower_32_bits helpers instead of open-coding them.
> This is easier to scan quickly compared to bitwise manipulation, and it
> is pleasingly symmetric. I noticed this when debugging lock_region,
> which had a particularly "creative" way of writing upper_32_bits.
> 
> v2: Use helpers for one more call site and add review tag (Steven).
> 
> Signed-off-by: Alyssa Rosenzweig 
> Reviewed-by: Rob Herring  (v1)
> Reviewed-by: Steven Price 

Pushed to drm-misc-next

Thanks,

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_job.c |  8 
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 12 ++--
>  drivers/gpu/drm/panfrost/panfrost_perfcnt.c |  4 ++--
>  3 files changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
> b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 71a72fb50e6b..763b7abfc88e 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -137,8 +137,8 @@ static void panfrost_job_write_affinity(struct 
> panfrost_device *pfdev,
>*/
>   affinity = pfdev->features.shader_present;
>  
> - job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0x);
> - job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
> + job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
> + job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
>  }
>  
>  static u32
> @@ -203,8 +203,8 @@ static void panfrost_job_hw_submit(struct panfrost_job 
> *job, int js)
>  
>   cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
>  
> - job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0x);
> - job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
> + job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
> + job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
>  
>   panfrost_job_write_affinity(pfdev, job->requirements, js);
>  
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 0da5b3100ab1..c3fbe0ad9090 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -80,8 +80,8 @@ static void lock_region(struct panfrost_device *pfdev, u32 
> as_nr,
>   region |= region_width;
>  
>   /* Lock the region that needs to be updated */
> - mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xUL);
> - mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xUL);
> + mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
> + mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
>   write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
>  }
>  
> @@ -123,14 +123,14 @@ static void panfrost_mmu_enable(struct panfrost_device 
> *pfdev, struct panfrost_m
>  
>   mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
>  
> - mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xUL);
> - mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
> + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
> + mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
>  
>   /* Need to revisit mem attrs.
>* NC is the default, Mali driver is inner WT.
>*/
> - mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xUL);
> - mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
> + mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
> + mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
>  
>   write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
>  }
> diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c 
> b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
> index 5ab03d605f57..e116a4d9b8e5 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
> @@ -51,8 +51,8 @@ static int panfrost_perfcnt_dump_locked(struct 
> panfrost_device *pfdev)
>  
>   reinit_completion(&pfdev->perfcnt->dump_comp);
>   gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
> - gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
> - gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
> + gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva));
> + gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva));
>   gpu_write(pfdev, GPU_INT_CLEAR,
> GPU_IRQ_CLEAN_CACHES_COMPLETED |
> GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
> 



Re: [PATCH v3] drm/scheduler re-insert Bailing job to avoid memleak

2021-03-22 Thread Steven Price

On 15/03/2021 05:23, Zhang, Jack (Jian) wrote:

[AMD Public Use]

Hi, Rob/Tomeu/Steven,

Would you please help to review this patch for panfrost driver?

Thanks,
Jack Zhang

-Original Message-
From: Jack Zhang 
Sent: Monday, March 15, 2021 1:21 PM
To: dri-devel@lists.freedesktop.org; amd-...@lists.freedesktop.org; Koenig, Christian 
; Grodzovsky, Andrey ; Liu, Monk 
; Deng, Emily 
Cc: Zhang, Jack (Jian) 
Subject: [PATCH v3] drm/scheduler re-insert Bailing job to avoid memleak

re-insert Bailing jobs to avoid memory leak.

V2: move re-insert step to drm/scheduler logic
V3: add panfrost's return value for bailing jobs
in case it hits the memleak issue.


This commit message could do with some work - it's really hard to 
decipher what the actual problem you're solving is.




Signed-off-by: Jack Zhang 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +++-
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c| 8 ++--
  drivers/gpu/drm/panfrost/panfrost_job.c| 4 ++--
  drivers/gpu/drm/scheduler/sched_main.c | 8 +++-
  include/drm/gpu_scheduler.h| 1 +
  5 files changed, 19 insertions(+), 6 deletions(-)


[...]

diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
b/drivers/gpu/drm/panfrost/panfrost_job.c
index 6003cfeb1322..e2cb4f32dae1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -444,7 +444,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct 
drm_sched_job
 * spurious. Bail out.
 */
if (dma_fence_is_signaled(job->done_fence))
-   return DRM_GPU_SCHED_STAT_NOMINAL;
+   return DRM_GPU_SCHED_STAT_BAILING;
  
  	dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",

js,
@@ -456,7 +456,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct 
drm_sched_job
  
  	/* Scheduler is already stopped, nothing to do. */

if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
-   return DRM_GPU_SCHED_STAT_NOMINAL;
+   return DRM_GPU_SCHED_STAT_BAILING;
  
  	/* Schedule a reset if there's no reset in progress. */

if (!atomic_xchg(&pfdev->reset.pending, 1))


This looks correct to me - in these two cases drm_sched_stop() is not 
called on the sched_job, so it looks like currently the job will be leaked.



diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index 92d8de24d0a1..a44f621fb5c4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -314,6 +314,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
  {
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
+   int ret;
  
  	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
  
@@ -331,8 +332,13 @@ static void drm_sched_job_timedout(struct work_struct *work)

list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
  
-		job->sched->ops->timedout_job(job);

+   ret = job->sched->ops->timedout_job(job);
  
+		if (ret == DRM_GPU_SCHED_STAT_BAILING) {

+   spin_lock(&sched->job_list_lock);
+   list_add(&job->node, &sched->ring_mirror_list);
+   spin_unlock(&sched->job_list_lock);
+   }


I think we could really do with a comment somewhere explaining what 
"bailing" means in this context. For the Panfrost case we have two cases:


 * The GPU job actually finished while the timeout code was running 
(done_fence is signalled).


 * The GPU is already in the process of being reset (Panfrost has 
multiple queues, so mostly like a bad job in another queue).


I'm also not convinced that (for Panfrost) it makes sense to be adding 
the jobs back to the list. For the first case above clearly the job 
could just be freed (it's complete). The second case is more interesting 
and Panfrost currently doesn't handle this well. In theory the driver 
could try to rescue the job ('soft stop' in Mali language) so that it 
could be resubmitted. Panfrost doesn't currently support that, so 
attempting to resubmit the job is almost certainly going to fail.


It's on my TODO list to look at improving Panfrost in this regard, but 
sadly still quite far down.


Steve


/*
 * Guilty job did complete and hence needs to be manually 
removed
 * See drm_sched_stop doc.
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 4ea8606d91fe..8093ac2427ef 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -210,6 +210,7 @@ enum drm_gpu_sched_stat {
DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
DRM_GPU_SCHED_STAT_NOMINAL,
DRM_GPU_SCHED_STAT_ENODEV,
+   DRM_GPU_SCHED_STAT_BAILING,
  };
  
  /**




___
dri-

Re: [PATCH v3] drm/scheduler re-insert Bailing job to avoid memleak

2021-03-26 Thread Steven Price

On 26/03/2021 02:04, Zhang, Jack (Jian) wrote:

[AMD Official Use Only - Internal Distribution Only]

Hi, Steve,

Thank you for your detailed comments.

But currently the patch is not finalized.
We found some potential race condition even with this patch. The solution is 
under discussion and hopefully we could find an ideal one.
After that, I will start to consider other drm-driver if it will influence 
other drivers(except for amdgpu).


No problem. Please keep me CC'd, the suggestion of using reference 
counts may be beneficial for Panfrost as we already build a reference 
count on top of struct drm_sched_job. So there may be scope for cleaning 
up Panfrost afterwards even if your work doesn't directly affect it.


Thanks,

Steve


Best,
Jack

-Original Message-----
From: Steven Price 
Sent: Monday, March 22, 2021 11:29 PM
To: Zhang, Jack (Jian) ; dri-devel@lists.freedesktop.org; amd-...@lists.freedesktop.org; 
Koenig, Christian ; Grodzovsky, Andrey ; Liu, Monk 
; Deng, Emily ; Rob Herring ; Tomeu Vizoso 

Subject: Re: [PATCH v3] drm/scheduler re-insert Bailing job to avoid memleak

On 15/03/2021 05:23, Zhang, Jack (Jian) wrote:

[AMD Public Use]

Hi, Rob/Tomeu/Steven,

Would you please help to review this patch for panfrost driver?

Thanks,
Jack Zhang

-Original Message-
From: Jack Zhang 
Sent: Monday, March 15, 2021 1:21 PM
To: dri-devel@lists.freedesktop.org; amd-...@lists.freedesktop.org;
Koenig, Christian ; Grodzovsky, Andrey
; Liu, Monk ; Deng, Emily

Cc: Zhang, Jack (Jian) 
Subject: [PATCH v3] drm/scheduler re-insert Bailing job to avoid
memleak

re-insert Bailing jobs to avoid memory leak.

V2: move re-insert step to drm/scheduler logic
V3: add panfrost's return value for bailing jobs in case it hits the
memleak issue.


This commit message could do with some work - it's really hard to decipher what 
the actual problem you're solving is.



Signed-off-by: Jack Zhang 
---
   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +++-
   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c| 8 ++--
   drivers/gpu/drm/panfrost/panfrost_job.c| 4 ++--
   drivers/gpu/drm/scheduler/sched_main.c | 8 +++-
   include/drm/gpu_scheduler.h| 1 +
   5 files changed, 19 insertions(+), 6 deletions(-)


[...]

diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c
b/drivers/gpu/drm/panfrost/panfrost_job.c
index 6003cfeb1322..e2cb4f32dae1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -444,7 +444,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct 
drm_sched_job
* spurious. Bail out.
*/
   if (dma_fence_is_signaled(job->done_fence))
-return DRM_GPU_SCHED_STAT_NOMINAL;
+return DRM_GPU_SCHED_STAT_BAILING;

   dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, 
head=0x%x, tail=0x%x, sched_job=%p",
   js,
@@ -456,7 +456,7 @@ static enum drm_gpu_sched_stat
panfrost_job_timedout(struct drm_sched_job

   /* Scheduler is already stopped, nothing to do. */
   if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
-return DRM_GPU_SCHED_STAT_NOMINAL;
+return DRM_GPU_SCHED_STAT_BAILING;

   /* Schedule a reset if there's no reset in progress. */
   if (!atomic_xchg(&pfdev->reset.pending, 1))


This looks correct to me - in these two cases drm_sched_stop() is not called on 
the sched_job, so it looks like currently the job will be leaked.


diff --git a/drivers/gpu/drm/scheduler/sched_main.c
b/drivers/gpu/drm/scheduler/sched_main.c
index 92d8de24d0a1..a44f621fb5c4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -314,6 +314,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
   {
   struct drm_gpu_scheduler *sched;
   struct drm_sched_job *job;
+int ret;

   sched = container_of(work, struct drm_gpu_scheduler,
work_tdr.work);

@@ -331,8 +332,13 @@ static void drm_sched_job_timedout(struct work_struct 
*work)
   list_del_init(&job->list);
   spin_unlock(&sched->job_list_lock);

-job->sched->ops->timedout_job(job);
+ret = job->sched->ops->timedout_job(job);

+if (ret == DRM_GPU_SCHED_STAT_BAILING) {
+spin_lock(&sched->job_list_lock);
+list_add(&job->node, &sched->ring_mirror_list);
+spin_unlock(&sched->job_list_lock);
+}


I think we could really do with a comment somewhere explaining what "bailing" 
means in this context. For the Panfrost case we have two cases:

   * The GPU job actually finished while the timeout code was running 
(done_fence is signalled).

   * The GPU is already in the process of being reset (Panfrost has multiple 
queues, so mostly like a bad job in another queue).

I'm also not convinced that (for Panfrost) it makes sense to be adding the jobs 
back to the list. For the first case above clearly the job could just be freed 
(it's complete). The second case is more in

Re: [PATCH v10 3/4] drm/panfrost: devfreq: Disable devfreq when num_supplies > 1

2021-01-14 Thread Steven Price

On 13/01/2021 06:07, Nicolas Boichat wrote:

GPUs with more than a single regulator (e.g. G72 on MT8183) will
require platform-specific handling for devfreq, for 2 reasons:
  1. The opp core (drivers/opp/core.c:_generic_set_opp_regulator)
 does not support multiple regulators, so we'll need custom
 handlers.
  2. Generally, platforms with 2 regulators have platform-specific
 constraints on how the voltages should be set (e.g.
 minimum/maximum voltage difference between them), so we
 should not just create generic handlers that simply
 change the voltages without taking care of those constraints.

Disable devfreq for now on those GPUs.

Signed-off-by: Nicolas Boichat 
Reviewed-by: Tomeu Vizoso 


Thanks for the clarification in the commit message.

Reviewed-by: Steven Price 


---

(no changes since v9)

Changes in v9:
  - Explain why devfreq needs to be disabled for GPUs with >1
regulators.

Changes in v8:
  - Use DRM_DEV_INFO instead of ERROR

Changes in v7:
  - Fix GPU ID in commit message

Changes in v6:
  - New change

  drivers/gpu/drm/panfrost/panfrost_devfreq.c | 9 +
  1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index f44d28fad085..812cfecdee3b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -92,6 +92,15 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
struct thermal_cooling_device *cooling;
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
  
+	if (pfdev->comp->num_supplies > 1) {

+   /*
+* GPUs with more than 1 supply require platform-specific 
handling:
+* continue without devfreq
+*/
+   DRM_DEV_INFO(dev, "More than 1 supply is not supported yet\n");
+   return 0;
+   }
+
opp_table = dev_pm_opp_set_regulators(dev, pfdev->comp->supply_names,
  pfdev->comp->num_supplies);
if (IS_ERR(opp_table)) {



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v10 4/4] drm/panfrost: Add mt8183-mali compatible string

2021-01-14 Thread Steven Price

On 13/01/2021 06:07, Nicolas Boichat wrote:

Add support for MT8183's G72 Bifrost.

Signed-off-by: Nicolas Boichat 
Reviewed-by: Tomeu Vizoso 


LGTM

Reviewed-by: Steven Price 


---

(no changes since v7)

Changes in v7:
  - Fix GPU ID in commit message

Changes in v6:
  - Context conflicts, reflow the code.
  - Use ARRAY_SIZE for power domains too.

Changes in v5:
  - Change power domain name from 2d to core2.

Changes in v4:
  - Add power domain names.

Changes in v3:
  - Match mt8183-mali instead of bifrost, as we require special
handling for the 2 regulators and 3 power domains.

  drivers/gpu/drm/panfrost/panfrost_drv.c | 10 ++
  1 file changed, 10 insertions(+)

diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 83a461bdeea8..ca07098a6141 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -665,6 +665,15 @@ static const struct panfrost_compatible amlogic_data = {
.vendor_quirk = panfrost_gpu_amlogic_quirk,
  };
  
+const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };

+const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" 
};
+static const struct panfrost_compatible mediatek_mt8183_data = {
+   .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
+   .supply_names = mediatek_mt8183_supplies,
+   .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains),
+   .pm_domain_names = mediatek_mt8183_pm_domains,
+};
+
  static const struct of_device_id dt_match[] = {
/* Set first to probe before the generic compatibles */
{ .compatible = "amlogic,meson-gxm-mali",
@@ -681,6 +690,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "arm,mali-t860", .data = &default_data, },
{ .compatible = "arm,mali-t880", .data = &default_data, },
{ .compatible = "arm,mali-bifrost", .data = &default_data, },
+   { .compatible = "mediatek,mt8183-mali", .data = &mediatek_mt8183_data },
{}
  };
  MODULE_DEVICE_TABLE(of, dt_match);



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/komeda: Fix bit check to import to value of proper type

2021-01-20 Thread Steven Price

On 18/01/2021 14:20, carsten.haitz...@foss.arm.com wrote:

From: Carsten Haitzler 

Another issue found by KASAN. The bit finding is bueried inside the


NIT: s/bueried/buried/


dp_for_each_set_bit() macro (that passes on to for_each_set_bit() that
calls the bit stuff. These bit functions want an unsigned long pointer
as input and just dumbly casting leads to out-of-bounds accesses.
This fixes that.


This seems like an underlying bug/lack of clear documentation for the
underlying find_*_bit() functions. dp_for_each_set_bit() tries to do the
right thing:

  #define dp_for_each_set_bit(bit, mask) \
for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)

i.e. passing the actual size of type. However because of the case to
unsigned long (and subsequent accesses using that type) the compiler is
free to make accesses beyond the size of the variable (and indeed this
is completely broken on big-endian). The implementation actually
requires that the bitmap is really an array of unsigned long - no other
type will work.

So I think the fix should also include fixing the dp_for_each_set_bit()
macro - the cast is bogus as it stands.

Doing that I also get warnings on komeda_pipeline::avail_comps and
komeda_pipeline::supported_inputs - although I note there are other
bitmasks mentioned.

The other option is to fix dp_for_each_set_bit() itself using a little hack:

-   for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
+   for_each_set_bit((bit), (&((unsigned long){mask})), sizeof(mask) * 8)

With that I don't think you need any other change as the mask is actually
in a new unsigned long on the stack.

Steve



Signed-off-by: Carsten Haitzler 
---
  .../drm/arm/display/komeda/komeda_pipeline_state.c | 14 --
  1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c 
b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index e8b1e15312d8..f7dddb9f015d 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -1232,7 +1232,8 @@ komeda_pipeline_unbound_components(struct komeda_pipeline 
*pipe,
struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
struct komeda_component_state *c_st;
struct komeda_component *c;
-   u32 disabling_comps, id;
+   u32 id;
+   unsigned long disabling_comps;
  
  	WARN_ON(!old);
  
@@ -1262,7 +1263,6 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,

st = komeda_pipeline_get_new_state(pipe, drm_st);
else
st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
-


NIT: Random white space change


if (WARN_ON(IS_ERR_OR_NULL(st)))
return -EINVAL;
  
@@ -1287,7 +1287,8 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,

struct komeda_pipeline_state *old;
struct komeda_component *c;
struct komeda_component_state *c_st;
-   u32 id, disabling_comps = 0;
+   u32 id;
+   unsigned long disabling_comps;
  
  	old = komeda_pipeline_get_old_state(pipe, old_state);
  
@@ -1297,7 +1298,7 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,

disabling_comps = old->active_comps &
  pipe->standalone_disabled_comps;
  
-	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",

+   DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 
0x%lx.\n",
 pipe->id, old->active_comps, disabling_comps);
  
  	dp_for_each_set_bit(id, disabling_comps) {

@@ -1331,13 +1332,14 @@ void komeda_pipeline_update(struct komeda_pipeline 
*pipe,
struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
struct komeda_pipeline_state *old;
struct komeda_component *c;
-   u32 id, changed_comps = 0;
+   u32 id;
+   unsigned long changed_comps;
  
  	old = komeda_pipeline_get_old_state(pipe, old_state);
  
  	changed_comps = new->active_comps | old->active_comps;
  
-	DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",

+   DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
 pipe->id, new->active_comps, changed_comps);
  
  	dp_for_each_set_bit(id, changed_comps) {




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 1/1] drm/scheduler: Job timeout handler returns status (v3)

2021-01-21 Thread Steven Price

On 20/01/2021 20:09, Luben Tuikov wrote:

This patch does not change current behaviour.

The driver's job timeout handler now returns
status indicating back to the DRM layer whether
the device (GPU) is no longer available, such as
after it's been unplugged, or whether all is
normal, i.e. current behaviour.

All drivers which make use of the
drm_sched_backend_ops' .timedout_job() callback
have been accordingly renamed and return the
would've-been default value of
DRM_GPU_SCHED_STAT_NOMINAL to restart the task's
timeout timer--this is the old behaviour, and is
preserved by this patch.

v2: Use enum as the status of a driver's job
 timeout callback method.

v3: Return scheduler/device information, rather
 than task information.

Cc: Alexander Deucher 
Cc: Andrey Grodzovsky 
Cc: Christian König 
Cc: Daniel Vetter 
Cc: Lucas Stach 
Cc: Russell King 
Cc: Christian Gmeiner 
Cc: Qiang Yu 
Cc: Rob Herring 
Cc: Tomeu Vizoso 
Cc: Steven Price 
Cc: Alyssa Rosenzweig 
Cc: Eric Anholt 
Reported-by: kernel test robot 
Signed-off-by: Luben Tuikov 


Acked-by: Steven Price 
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/komeda: Fix bit check to import to value of proper type

2021-01-21 Thread Steven Price

On 21/01/2021 12:22, Carsten Haitzler wrote:

On 1/20/21 3:44 PM, Steven Price wrote:

On 18/01/2021 14:20, carsten.haitz...@foss.arm.com wrote:

From: Carsten Haitzler 

Another issue found by KASAN. The bit finding is bueried inside the


NIT: s/bueried/buried/


Yup. typo not spotted by me. Thanks. Also - i spotted an accidental 
whitespace change along the way so can fix both in a new patch.



dp_for_each_set_bit() macro (that passes on to for_each_set_bit() that
calls the bit stuff. These bit functions want an unsigned long pointer
as input and just dumbly casting leads to out-of-bounds accesses.
This fixes that.


This seems like an underlying bug/lack of clear documentation for the
underlying find_*_bit() functions. dp_for_each_set_bit() tries to do the
right thing:


Correct. This was a general problem I spotted - the bit funcs were 
written to want a unsigned long but were being used on u32's by just 
casting the ptr to the type and this did indeed have technical issues.



   #define dp_for_each_set_bit(bit, mask) \
   for_each_set_bit((bit), ((unsigned long *)&(mask)), 
sizeof(mask) * 8)


i.e. passing the actual size of type. However because of the case to
unsigned long (and subsequent accesses using that type) the compiler is
free to make accesses beyond the size of the variable (and indeed this
is completely broken on big-endian). The implementation actually
requires that the bitmap is really an array of unsigned long - no other
type will work.


Precisely. So a bit loose. The bit funcs are used widely enough, so just 
fixing this code here to pass in the expected type is probably the least 
disruptive fix.



So I think the fix should also include fixing the dp_for_each_set_bit()
macro - the cast is bogus as it stands.


Yup. Removing the cast does indeed find more badness that needs fixing. 
I'll do an updated patch with this.



Doing that I also get warnings on komeda_pipeline::avail_comps and
komeda_pipeline::supported_inputs - although I note there are other
bitmasks mentioned.

The other option is to fix dp_for_each_set_bit() itself using a little 
hack:


-   for_each_set_bit((bit), ((unsigned long *)&(mask)), 
sizeof(mask) * 8)
+   for_each_set_bit((bit), (&((unsigned long){mask})), 
sizeof(mask) * 8)


With that I don't think you need any other change as the mask is actually
in a new unsigned long on the stack.


That would be wonderful if it worked :). Unfortunately your above 
proposal leads to:


./drivers/gpu/drm/arm/display/komeda/../include/malidp_utils.h:17:27: 
error: lvalue required as unary ‘&’ operand
    17 |  for_each_set_bit((bit), (&((unsigned long)(mask))), 
sizeof(mask) * 8)

   |   ^


Looks like you didn't notice the subtle change above. My change uses 
braces ('{}') around 'mask' - I believe it's a GCC extension ("compound 
literals") and it creates an lvalue so you can then take the address of 
it...


I'm not sure if it's a good approach to the problem or not. The 
alternative is to fix up various places to use unsigned longs so you can 
use the unwrapped for_each_set_bit().


Steve

./include/linux/bitops.h:34:30: note: in definition of macro 
‘for_each_set_bit’

    34 |   (bit) = find_next_bit((addr), (size), (bit) + 1))
   |  ^~~~
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c:1243:2: note: 
in expansion of macro ‘dp_for_each_set_bit’

  1243 |  dp_for_each_set_bit(id, disabling_comps) {
   |  ^~~

Basically can't take address of an "unnamed local var". :| That is with:

#define dp_for_each_set_bit(bit, mask) \
     for_each_set_bit((bit), (&((unsigned long)(mask))), 
sizeof(mask) * 8)


I could try have the dp_for_each macro create new local vars on its own 
a bit like:


#define dp_for_each_set_bit(bit, mask) \
     unsigned long __local_mask = mask; \
     for_each_set_bit((bit), (&__local_mask), sizeof(mask) * 8)

But we all know where this leads... (multiple bad places with adding 
warnings and potential and pitfalls that then lead with ever more 
invasive changes to address like if code in future might do if (x) 
dp_for_each...). I'd prefer to be able to write code more loosely (pass 
in any time and it just does the right thing), but trying to balance 
this with least disruption and ugliness.



Steve



Signed-off-by: Carsten Haitzler 
---
  .../drm/arm/display/komeda/komeda_pipeline_state.c | 14 --
  1 file changed, 8 insertions(+), 6 deletions(-)

diff --git 
a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c 
b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c

index e8b1e15312d8..f7dddb9f015d 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -1232,7 +1232,8 @@ komeda_pipeline_unbound_com

Re: [PATCH] drm/panfrost: Add governor data with pre-defined thresholds

2021-01-22 Thread Steven Price

On 21/01/2021 17:04, Lukasz Luba wrote:

The simple_ondemand devfreq governor uses two thresholds to decide about
the frequency change: upthreshold, downdifferential. These two tunable
change the behavior of the governor decision, e.g. how fast to increase
the frequency or how rapidly limit the frequency. This patch adds needed
governor data with thresholds values gathered experimentally in different
workloads.

Signed-off-by: Lukasz Luba 
---
Hi all,

This patch aims to improve the panfrost performance in various workloads,
(benchmarks, games). The simple_ondemand devfreq governor supports
tunables to tweak the behaviour of the internal algorithm. The default
values for these two thresholds (90 and 5) do not work well with panfrost.
These new settings should provide good performance, short latency for
rising the frequency due to rapid workload change and decent freq slow
down when the load is decaying. Based on frequency change statistics,
gathered during experiments, all frequencies are used, depending on
the load. This provides some power savings (statistically). The highest
frequency is also used when needed.

Example glmark2 results:
1. freq fixed to max: 153
2. these new thresholds values (w/ patch): 151
3. default governor values (w/o patch): 114


It would be good to state which platform this is on as this obviously 
can vary depending on the OPPs available.


Of course the real fix here would be to improve the utilisation of the 
GPU[1] so we actually hit the 90% threshold more easily (AFAICT kbase 
uses the default 90/5 thresholds), but this seems like a reasonable 
change for now.


Reviewed-by: Steven Price 

Thanks,

Steve

[1] When I get some time I need to rework the "queue jobs on the 
hardware"[2] patch I posted ages ago. Last time it actually caused a 
performance regression though...


[2] https://lore.kernel.org/r/20190816093107.30518-2-steven.price%40arm.com


In future the devfreq framework would expose via sysfs these two
tunables, so they can be adjusted by the middleware based on currently
running workload (game, desktop, web browser, etc). These new values
should be good enough, though.

Regards,
Lukasz Luba

  drivers/gpu/drm/panfrost/panfrost_devfreq.c | 10 +-
  drivers/gpu/drm/panfrost/panfrost_devfreq.h |  2 ++
  2 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 56b3f5935703..7c5ffc81dce1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -130,8 +130,16 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
  
+	/*

+* Setup default thresholds for the simple_ondemand governor.
+* The values are chosen based on experiments.
+*/
+   pfdevfreq->gov_data.upthreshold = 45;
+   pfdevfreq->gov_data.downdifferential = 5;
+
devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
- DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &pfdevfreq->gov_data);
if (IS_ERR(devfreq)) {
DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
ret = PTR_ERR(devfreq);
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index db6ea48e21f9..1e2a4de941aa 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -4,6 +4,7 @@
  #ifndef __PANFROST_DEVFREQ_H__
  #define __PANFROST_DEVFREQ_H__
  
+#include 

  #include 
  #include 
  
@@ -17,6 +18,7 @@ struct panfrost_devfreq {

struct devfreq *devfreq;
struct opp_table *regulators_opp_table;
struct thermal_cooling_device *cooling;
+   struct devfreq_simple_ondemand_data gov_data;
bool opp_of_table_added;
  
  	ktime_t busy_time;




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/panfrost: Add governor data with pre-defined thresholds

2021-01-22 Thread Steven Price

On 22/01/2021 10:11, Lukasz Luba wrote:



On 1/21/21 5:15 PM, Daniel Lezcano wrote:

On 21/01/2021 18:04, Lukasz Luba wrote:

The simple_ondemand devfreq governor uses two thresholds to decide about
the frequency change: upthreshold, downdifferential. These two tunable
change the behavior of the governor decision, e.g. how fast to increase
the frequency or how rapidly limit the frequency. This patch adds needed
governor data with thresholds values gathered experimentally in 
different

workloads.

Signed-off-by: Lukasz Luba 
---
Hi all,

This patch aims to improve the panfrost performance in various 
workloads,

(benchmarks, games). The simple_ondemand devfreq governor supports
tunables to tweak the behaviour of the internal algorithm. The default
values for these two thresholds (90 and 5) do not work well with 
panfrost.

These new settings should provide good performance, short latency for
rising the frequency due to rapid workload change and decent freq slow
down when the load is decaying. Based on frequency change statistics,
gathered during experiments, all frequencies are used, depending on
the load. This provides some power savings (statistically). The highest
frequency is also used when needed.

Example glmark2 results:
1. freq fixed to max: 153
2. these new thresholds values (w/ patch): 151
3. default governor values (w/o patch): 114

In future the devfreq framework would expose via sysfs these two
tunables, so they can be adjusted by the middleware based on currently
running workload (game, desktop, web browser, etc). These new values
should be good enough, though.

Regards,
Lukasz Luba

  drivers/gpu/drm/panfrost/panfrost_devfreq.c | 10 +-
  drivers/gpu/drm/panfrost/panfrost_devfreq.h |  2 ++
  2 files changed, 11 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.c

index 56b3f5935703..7c5ffc81dce1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -130,8 +130,16 @@ int panfrost_devfreq_init(struct panfrost_device 
*pfdev)

  panfrost_devfreq_profile.initial_freq = cur_freq;
  dev_pm_opp_put(opp);
+    /*
+ * Setup default thresholds for the simple_ondemand governor.
+ * The values are chosen based on experiments.
+ */
+    pfdevfreq->gov_data.upthreshold = 45;
+    pfdevfreq->gov_data.downdifferential = 5;
+
  devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
-  DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+  DEVFREQ_GOV_SIMPLE_ONDEMAND,
+  &pfdevfreq->gov_data);
  if (IS_ERR(devfreq)) {
  DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
  ret = PTR_ERR(devfreq);
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.h

index db6ea48e21f9..1e2a4de941aa 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -4,6 +4,7 @@
  #ifndef __PANFROST_DEVFREQ_H__
  #define __PANFROST_DEVFREQ_H__
+#include 
  #include 
  #include 
@@ -17,6 +18,7 @@ struct panfrost_devfreq {
  struct devfreq *devfreq;
  struct opp_table *regulators_opp_table;
  struct thermal_cooling_device *cooling;
+    struct devfreq_simple_ondemand_data gov_data;
  bool opp_of_table_added;
  ktime_t busy_time;


I think it is simpler to do:

+static struct devfreq_simple_ondemand_data panfrost_ondemand_data = {
+   .upthreshold = 45,
+   .downdifferential = 5,
+};

[ ... ]

    devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
- DEVFREQ_GOV_SIMPLE_ONDEMAND,
NULL);
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &panfrost_ondemand_data);




Yes, it's simpler. The driver would probably never have to serve two
GPUs. I've tried to keep this thing inside the panfrost struct,
forgetting about it.


The Juno platform with an FPGA attached is the only example I know of 
where a system has multiple Mali GPUs - so it can happen, but it rare.


As it stands a static structure would work because the values are 
constant - but Lukasz mentioned that they would be exported in sysfs in 
the future, in which case they really should be part of the panfrost struct.


Ultimately having a (non-const) static struct like above would mean 
wasting a few bytes on systems with Panfrost loaded but no Mali GPU. 
Having it in struct panfrost means the cost is only for Mali. Admittedly 
it's only a few bytes in this case and often Panfrost will be a module.


Steve


Steven already reviewed the patch, so it can probably stay.
I will keep it in mind. Thank you for the comments.

Regards,
Lukasz


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-deve

Re: [PATCH] drm/panfrost: Add governor data with pre-defined thresholds

2021-01-22 Thread Steven Price

On 22/01/2021 10:00, Lukasz Luba wrote:



On 1/22/21 8:21 AM, Steven Price wrote:

On 21/01/2021 17:04, Lukasz Luba wrote:

The simple_ondemand devfreq governor uses two thresholds to decide about
the frequency change: upthreshold, downdifferential. These two tunable
change the behavior of the governor decision, e.g. how fast to increase
the frequency or how rapidly limit the frequency. This patch adds needed
governor data with thresholds values gathered experimentally in 
different

workloads.

Signed-off-by: Lukasz Luba 
---
Hi all,

This patch aims to improve the panfrost performance in various 
workloads,

(benchmarks, games). The simple_ondemand devfreq governor supports
tunables to tweak the behaviour of the internal algorithm. The default
values for these two thresholds (90 and 5) do not work well with 
panfrost.

These new settings should provide good performance, short latency for
rising the frequency due to rapid workload change and decent freq slow
down when the load is decaying. Based on frequency change statistics,
gathered during experiments, all frequencies are used, depending on
the load. This provides some power savings (statistically). The highest
frequency is also used when needed.

Example glmark2 results:
1. freq fixed to max: 153
2. these new thresholds values (w/ patch): 151
3. default governor values (w/o patch): 114


It would be good to state which platform this is on as this obviously 
can vary depending on the OPPs available.


Sorry about that. It was Rock Pi 4B and I have mesa 20.2.4.



Of course the real fix here would be to improve the utilisation of the 
GPU[1] so we actually hit the 90% threshold more easily (AFAICT kbase 
uses the default 90/5 thresholds), but this seems like a reasonable 
change for now.


Agree, improving the scheduler would be the best option. I'll have a
look at that patch and why it got this 10% lower performance. Maybe
I would find something during testing.


I'm afraid it'll probably need a fair bit of work to rebase - things 
have changed around that code. I'm hoping that most of the problem was 
really around how Mesa was driving the GPU at that time and things 
should be better. The DDK (hacked to talk Panfrost ioctls) saw a 
performance improvement.


Let me know if you hit problems and need any help.



Reviewed-by: Steven Price 


Thank you for the review. I guess this patch would go through drm tree?


Yes, I'll push it to drm-misc-next later.

Thanks,

Steve


Regards,
Lukasz



Thanks,

Steve

[1] When I get some time I need to rework the "queue jobs on the 
hardware"[2] patch I posted ages ago. Last time it actually caused a 
performance regression though...


[2] 
https://lore.kernel.org/r/20190816093107.30518-2-steven.price%40arm.com




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/komeda: Fix bit check to import to value of proper type

2021-01-27 Thread Steven Price
NIT: This is the second version of this patch so should have "[PATCH 
v2]" in the subject.


On 27/01/2021 12:34, carsten.haitz...@foss.arm.com wrote:

From: Carsten Haitzler 

Another issue found by KASAN. The bit finding is buried inside the
dp_for_each_set_bit() macro (that passes on to for_each_set_bit() that
calls the bit stuff. These bit functions want an unsigned long pointer
as input and just dumbly casting leads to out-of-bounds accesses.
This fixes that.

Signed-off-by: Carsten Haitzler 
---
  .../gpu/drm/arm/display/include/malidp_utils.h   | 10 --
  .../gpu/drm/arm/display/komeda/komeda_pipeline.c | 16 +++-
  .../arm/display/komeda/komeda_pipeline_state.c   | 13 -
  3 files changed, 27 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h 
b/drivers/gpu/drm/arm/display/include/malidp_utils.h
index 3bc383d5bf73..8d289cd0b5b8 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
@@ -12,9 +12,15 @@
  
  #define has_bit(nr, mask)	(BIT(nr) & (mask))

  #define has_bits(bits, mask)  (((bits) & (mask)) == (bits))
-
+/*
+#define dp_for_each_set_bit(bit, mask) \
+   for_each_set_bit((bit), (&((unsigned long)(mask))), sizeof(mask) * 8)
+#define dp_for_each_set_bit(bit, mask) \
+   unsigned long __local_mask = mask; \
+   for_each_set_bit((bit), (&__local_mask), sizeof(mask) * 8)
+*/


Commented out code left in - please remove it.


  #define dp_for_each_set_bit(bit, mask) \
-   for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
+   for_each_set_bit((bit), &(mask), sizeof(mask) * 8)


I'm not really sure if there's much point in this macro now. In practice 
the uses below are now getting the wrong length (because sizeof(mask) == 
sizeof(unsigned long) ) but we actually know the size is smaller in most 
cases, so we could pass a more appropriate value in.


Other than that the changes below look correct to me.

Steve

  
  #define dp_wait_cond(__cond, __tries, __min_range, __max_range)	\

  ({\
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 
b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index 719a79728e24..a85c8a806334 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
  {
struct komeda_component *c;
int i;
+   unsigned long avail_comps = pipe->avail_comps;
  
-	dp_for_each_set_bit(i, pipe->avail_comps) {

+   dp_for_each_set_bit(i, avail_comps) {
c = komeda_pipeline_get_component(pipe, i);
komeda_component_destroy(mdev, c);
}
@@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline 
*pipe)
  {
struct komeda_component *c;
int id;
+   unsigned long avail_comps = pipe->avail_comps;
  
  	DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",

 pipe->id, pipe->n_layers, pipe->n_scalers,
@@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline 
*pipe)
 pipe->of_output_links[1] ?
 pipe->of_output_links[1]->full_name : "none");
  
-	dp_for_each_set_bit(id, pipe->avail_comps) {

+   dp_for_each_set_bit(id, avail_comps) {
c = komeda_pipeline_get_component(pipe, id);
  
  		komeda_component_dump(c);

@@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct 
komeda_component *c)
struct komeda_pipeline *pipe = c->pipeline;
struct komeda_component *input;
int id;
+   unsigned long supported_inputs = c->supported_inputs;
  
-	dp_for_each_set_bit(id, c->supported_inputs) {

+   dp_for_each_set_bit(id, supported_inputs) {
input = komeda_pipeline_get_component(pipe, id);
if (!input) {
c->supported_inputs &= ~(BIT(id));
@@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline 
*pipe)
struct komeda_component *c;
struct komeda_layer *layer;
int i, id;
+   unsigned long avail_comps = pipe->avail_comps;
  
-	dp_for_each_set_bit(id, pipe->avail_comps) {

+   dp_for_each_set_bit(id, avail_comps) {
c = komeda_pipeline_get_component(pipe, id);
komeda_component_verify_inputs(c);
}
@@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline 
*pipe,
  {
struct komeda_component *c;
u32 id;
+   unsigned long avail_comps;
  
  	seq_printf(sf, "\n Pipeline-%d ==\n", pipe->id);
  
  	if (pipe->funcs && pipe->funcs->dump_register)

pipe->funcs->dump_register(pipe, sf);
  
-	dp_for_each_set_bit(id, pipe->avail_comps) {

+   avail_comps = pipe->avail_comps;
+   dp_for_each

Re: [PATCH 1/3] drm/panfrost: Clear MMU irqs before handling the fault

2021-02-01 Thread Steven Price

On 01/02/2021 08:21, Boris Brezillon wrote:

When a fault is handled it will unblock the GPU which will continue
executing its shader and might fault almost immediately on a different
page. If we clear interrupts after handling the fault we might miss new
faults, so clear them before.

Cc: 
Fixes: 187d2929206e ("drm/panfrost: Add support for GPU heap allocations")
Signed-off-by: Boris Brezillon 


Good catch (although this oddly rings a bell - so perhaps it was me you 
discussed it with before)


Reviewed-by: Steven Price 


---
  drivers/gpu/drm/panfrost/panfrost_mmu.c | 4 ++--
  1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 7c1b3481b785..904d63450862 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -593,6 +593,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, 
void *data)
access_type = (fault_status >> 8) & 0x3;
source_id = (fault_status >> 16);
  
+		mmu_write(pfdev, MMU_INT_CLEAR, mask);

+
/* Page fault only */
ret = -1;
if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 
0xC0)
@@ -616,8 +618,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, 
void *data)
access_type, access_type_name(pfdev, 
fault_status),
source_id);
  
-		mmu_write(pfdev, MMU_INT_CLEAR, mask);

-
status &= ~mask;
}
  



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 2/3] drm/panfrost: Don't try to map pages that are already mapped

2021-02-01 Thread Steven Price

On 01/02/2021 08:21, Boris Brezillon wrote:

We allocate 2MB chunks at a time, so it might appear that a page fault
has already been handled by a previous page fault when we reach
panfrost_mmu_map_fault_addr(). Bail out in that case to avoid mapping the
same area twice.

Cc: 
Fixes: 187d2929206e ("drm/panfrost: Add support for GPU heap allocations")
Signed-off-by: Boris Brezillon 


Reviewed-by: Steven Price 


---
  drivers/gpu/drm/panfrost/panfrost_mmu.c | 9 -
  1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 904d63450862..21e552d1ac71 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -488,8 +488,14 @@ static int panfrost_mmu_map_fault_addr(struct 
panfrost_device *pfdev, int as,
}
bo->base.pages = pages;
bo->base.pages_use_count = 1;
-   } else
+   } else {
pages = bo->base.pages;
+   if (pages[page_offset]) {
+   /* Pages are already mapped, bail out. */
+   mutex_unlock(&bo->base.pages_lock);
+   goto out;
+   }
+   }
  
  	mapping = bo->base.base.filp->f_mapping;

mapping_set_unevictable(mapping);
@@ -522,6 +528,7 @@ static int panfrost_mmu_map_fault_addr(struct 
panfrost_device *pfdev, int as,
  
  	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
  
+out:

panfrost_gem_mapping_put(bomapping);
  
  	return 0;




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 3/3] drm/panfrost: Stay in the threaded MMU IRQ handler until we've handled all IRQs

2021-02-01 Thread Steven Price

On 01/02/2021 08:21, Boris Brezillon wrote:

Doing a hw-irq -> threaded-irq round-trip is counter-productive, stay
in the threaded irq handler as long as we can.

Signed-off-by: Boris Brezillon 


Looks fine to me, but I'm interested to know if you actually saw a 
performance improvement. Back-to-back MMU faults should (hopefully) be 
fairly uncommon.


Regardless:

Reviewed-by: Steven Price 


---
  drivers/gpu/drm/panfrost/panfrost_mmu.c | 7 +++
  1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 21e552d1ac71..65bc20628c4e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -580,6 +580,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, 
void *data)
u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
int i, ret;
  
+again:

+
for (i = 0; status; i++) {
u32 mask = BIT(i) | BIT(i + 16);
u64 addr;
@@ -628,6 +630,11 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int 
irq, void *data)
status &= ~mask;
}
  
+	/* If we received new MMU interrupts, process them before returning. */

+   status = mmu_read(pfdev, MMU_INT_RAWSTAT);
+   if (status)
+   goto again;
+
mmu_write(pfdev, MMU_INT_MASK, ~0);
return IRQ_HANDLED;
  };



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 3/3] drm/panfrost: Stay in the threaded MMU IRQ handler until we've handled all IRQs

2021-02-01 Thread Steven Price

On 01/02/2021 12:59, Boris Brezillon wrote:

On Mon, 1 Feb 2021 12:13:49 +
Steven Price  wrote:


On 01/02/2021 08:21, Boris Brezillon wrote:

Doing a hw-irq -> threaded-irq round-trip is counter-productive, stay
in the threaded irq handler as long as we can.

Signed-off-by: Boris Brezillon 


Looks fine to me, but I'm interested to know if you actually saw a
performance improvement. Back-to-back MMU faults should (hopefully) be
fairly uncommon.


I actually didn't check the perf improvement or the actual number of
back-to-back MMU faults, but
dEQP-GLES31.functional.draw_indirect.compute_interop.large.drawelements_combined_grid_1000x1000_drawcount_5000
seemed to generate a few of those, so I thought it'd be good to
optimize that case given how trivial it is.


Fair enough! I was just a little concerned that Panfrost was somehow 
provoking enough interrupts that this was a measurable performance 
improvement.


I assume you'll push these to drm-misc-next (/fixes) as appropriate.

Thanks,

Steve



Regardless:

Reviewed-by: Steven Price 


---
   drivers/gpu/drm/panfrost/panfrost_mmu.c | 7 +++
   1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 21e552d1ac71..65bc20628c4e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -580,6 +580,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, 
void *data)
u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
int i, ret;
   
+again:

+
for (i = 0; status; i++) {
u32 mask = BIT(i) | BIT(i + 16);
u64 addr;
@@ -628,6 +630,11 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int 
irq, void *data)
status &= ~mask;
}
   
+	/* If we received new MMU interrupts, process them before returning. */

+   status = mmu_read(pfdev, MMU_INT_RAWSTAT);
+   if (status)
+   goto again;
+
mmu_write(pfdev, MMU_INT_MASK, ~0);
return IRQ_HANDLED;
   };
   






___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 3/3] drm/panfrost: Stay in the threaded MMU IRQ handler until we've handled all IRQs

2021-02-03 Thread Steven Price

On 03/02/2021 14:45, Rob Herring wrote:

On Mon, Feb 1, 2021 at 2:21 AM Boris Brezillon
 wrote:


Doing a hw-irq -> threaded-irq round-trip is counter-productive, stay
in the threaded irq handler as long as we can.

Signed-off-by: Boris Brezillon 
---
  drivers/gpu/drm/panfrost/panfrost_mmu.c | 7 +++
  1 file changed, 7 insertions(+)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 21e552d1ac71..65bc20628c4e 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -580,6 +580,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, 
void *data)
 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
 int i, ret;

+again:
+
 for (i = 0; status; i++) {
 u32 mask = BIT(i) | BIT(i + 16);
 u64 addr;
@@ -628,6 +630,11 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int 
irq, void *data)
 status &= ~mask;
 }

+   /* If we received new MMU interrupts, process them before returning. */
+   status = mmu_read(pfdev, MMU_INT_RAWSTAT);
+   if (status)
+   goto again;
+


Can't we avoid the goto? Change the for loop like this:

i = 0;
while (status = mmu_read(pfdev, MMU_INT_RAWSTAT)) {
 ...

 i++;
 if (i == 16)
 i = 0;
}


Well that reads from the RAWSTAT register excessively (which could be 
expensive at low GPU clock speeds), but we could do:


for(i = 0; status; i++) {
...

if (!status) {
i = 0;
status = mmu_read(pfdev, MMU_INT_RAWSTAT);
}
}

(or similar with a while() if you prefer). Of course we could even get 
rid of the 'i' loop altogether:


status = mmu_read(pfdev, MMU_INT_RAWSTAT);
while (status) {
int i = ffs(status | (status >> 16)) - 1;

... existing code ...

status &= ~mask;
if (!status)
status = mmu_read(pfdev, MMU_INT_RAWSTAT);
}

Steve


 mmu_write(pfdev, MMU_INT_MASK, ~0);
 return IRQ_HANDLED;
  };
--
2.26.2



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/komeda: Fix bit check to import to value of proper type

2021-02-05 Thread Steven Price

+CC the other Komeda maintainers

On 04/02/2021 13:11, carsten.haitz...@foss.arm.com wrote:

From: Carsten Haitzler 

Another issue found by KASAN. The bit finding is buried inside the
dp_for_each_set_bit() macro (that passes on to for_each_set_bit() that
calls the bit stuff. These bit functions want an unsigned long pointer
as input and just dumbly casting leads to out-of-bounds accesses.
This fixes that.

Signed-off-by: Carsten Haitzler 


Looks fine to me:

Reviewed-by: Steven Price 


---
  .../drm/arm/display/include/malidp_utils.h|  3 ---
  .../drm/arm/display/komeda/komeda_pipeline.c  | 16 +++-
  .../display/komeda/komeda_pipeline_state.c| 19 +++
  3 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h 
b/drivers/gpu/drm/arm/display/include/malidp_utils.h
index 3bc383d5bf73..49a1d7f3539c 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
@@ -13,9 +13,6 @@
  #define has_bit(nr, mask) (BIT(nr) & (mask))
  #define has_bits(bits, mask)  (((bits) & (mask)) == (bits))
  
-#define dp_for_each_set_bit(bit, mask) \

-   for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
-
  #define dp_wait_cond(__cond, __tries, __min_range, __max_range)   \
  ({\
int num_tries = __tries;\
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c 
b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index 719a79728e24..06c595378dda 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
  {
struct komeda_component *c;
int i;
+   unsigned long avail_comps = pipe->avail_comps;
  
-	dp_for_each_set_bit(i, pipe->avail_comps) {

+   for_each_set_bit(i, &avail_comps, 32) {
c = komeda_pipeline_get_component(pipe, i);
komeda_component_destroy(mdev, c);
}
@@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline 
*pipe)
  {
struct komeda_component *c;
int id;
+   unsigned long avail_comps = pipe->avail_comps;
  
  	DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",

 pipe->id, pipe->n_layers, pipe->n_scalers,
@@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline 
*pipe)
 pipe->of_output_links[1] ?
 pipe->of_output_links[1]->full_name : "none");
  
-	dp_for_each_set_bit(id, pipe->avail_comps) {

+   for_each_set_bit(id, &avail_comps, 32) {
c = komeda_pipeline_get_component(pipe, id);
  
  		komeda_component_dump(c);

@@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct 
komeda_component *c)
struct komeda_pipeline *pipe = c->pipeline;
struct komeda_component *input;
int id;
+   unsigned long supported_inputs = c->supported_inputs;
  
-	dp_for_each_set_bit(id, c->supported_inputs) {

+   for_each_set_bit(id, &supported_inputs, 32) {
input = komeda_pipeline_get_component(pipe, id);
if (!input) {
c->supported_inputs &= ~(BIT(id));
@@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline 
*pipe)
struct komeda_component *c;
struct komeda_layer *layer;
int i, id;
+   unsigned long avail_comps = pipe->avail_comps;
  
-	dp_for_each_set_bit(id, pipe->avail_comps) {

+   for_each_set_bit(id, &avail_comps, 32) {
c = komeda_pipeline_get_component(pipe, id);
komeda_component_verify_inputs(c);
}
@@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline 
*pipe,
  {
struct komeda_component *c;
u32 id;
+   unsigned long avail_comps;
  
  	seq_printf(sf, "\n Pipeline-%d ==\n", pipe->id);
  
  	if (pipe->funcs && pipe->funcs->dump_register)

pipe->funcs->dump_register(pipe, sf);
  
-	dp_for_each_set_bit(id, pipe->avail_comps) {

+   avail_comps = pipe->avail_comps;
+   for_each_set_bit(id, &avail_comps, 32) {
c = komeda_pipeline_get_component(pipe, id);
  
  		seq_printf(sf, "\n--%s--\n", c->name);

diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c 
b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index e8b1e15312d8..176cdf411f9f 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -1232,14 +1232,15 @@ komeda_pipeline_unbound_components(struct 
k

Re: [PATCH v2 3/3] drm/panfrost: Stay in the threaded MMU IRQ handler until we've handled all IRQs

2021-02-05 Thread Steven Price

On 05/02/2021 11:17, Boris Brezillon wrote:

Doing a hw-irq -> threaded-irq round-trip is counter-productive, stay
in the threaded irq handler as long as we can.

v2:
* Rework the loop to avoid a goto

Signed-off-by: Boris Brezillon 


Reviewed-by: Steven Price 


---
  drivers/gpu/drm/panfrost/panfrost_mmu.c | 26 +
  1 file changed, 14 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 21e552d1ac71..0581186ebfb3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -578,22 +578,20 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int 
irq, void *data)
  {
struct panfrost_device *pfdev = data;
u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
-   int i, ret;
+   int ret;
  
-	for (i = 0; status; i++) {

-   u32 mask = BIT(i) | BIT(i + 16);
+   while (status) {
+   u32 as = ffs(status | (status >> 16)) - 1;
+   u32 mask = BIT(as) | BIT(as + 16);
u64 addr;
u32 fault_status;
u32 exception_type;
u32 access_type;
u32 source_id;
  
-		if (!(status & mask))

-   continue;
-
-   fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
-   addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
-   addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
+   fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
+   addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
+   addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
  
  		/* decode the fault status */

exception_type = fault_status & 0xFF;
@@ -604,8 +602,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, 
void *data)
  
  		/* Page fault only */

ret = -1;
-   if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 
0xC0)
-   ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
+   if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 
0xC0)
+   ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
  
  		if (ret)

/* terminal fault, print info about the fault */
@@ -617,7 +615,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, 
void *data)
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
-   i, addr,
+   as, addr,
"TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE 
FAULT"),
@@ -626,6 +624,10 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int 
irq, void *data)
source_id);
  
  		status &= ~mask;

+
+   /* If we received new MMU interrupts, process them before 
returning. */
+   if (!status)
+   status = mmu_read(pfdev, MMU_INT_RAWSTAT);
}
  
  	mmu_write(pfdev, MMU_INT_MASK, ~0);




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/shmem-helper: Don't remove the offset in vm_area_struct pgoff

2021-02-18 Thread Steven Price

On 17/02/2021 16:59, Neil Roberts wrote:

When mmapping the shmem, it would previously adjust the pgoff in the
vm_area_struct to remove the fake offset that is added to be able to
identify the buffer. This patch removes the adjustment and makes the
fault handler use the vm_fault address to calculate the page offset
instead. Although using this address is apparently discouraged, several
DRM drivers seem to be doing it anyway.

The problem with removing the pgoff is that it prevents
drm_vma_node_unmap from working because that searches the mapping tree
by address. That doesn't work because all of the mappings are at offset
0. drm_vma_node_unmap is being used by the shmem helpers when purging
the buffer.

It looks like panfrost is using drm_gem_shmem_purge so this might fix a
potential bug there.

Signed-off-by: Neil Roberts 


As the test robot points out pgoff_t is unsigned, so the <0 test makes 
no sense. But apart from that it looks good to me.


I think this is worth a "Fixes:" line too - as you point out 
drm_vma_node_unmap() won't be working correctly - which means we're 
potentially leaving user space with pages pointing to freed memory - not 
good! 17acb9f35ed7 is my best guess at the commit that introduced this.


Steve


---
  drivers/gpu/drm/drm_gem_shmem_helper.c | 12 +++-
  1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 9825c378dfa6..4b14157f1962 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -526,11 +526,16 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault 
*vmf)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
struct page *page;
+   pgoff_t page_offset;
  
-	if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))

+   /* We don't use vmf->pgoff since that has the fake offset */
+   page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+   if (page_offset < 0 || page_offset >= num_pages ||
+   WARN_ON_ONCE(!shmem->pages))
return VM_FAULT_SIGBUS;
  
-	page = shmem->pages[vmf->pgoff];

+   page = shmem->pages[page_offset];
  
  	return vmf_insert_page(vma, vmf->address, page);

  }
@@ -581,9 +586,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct 
vm_area_struct *vma)
struct drm_gem_shmem_object *shmem;
int ret;
  
-	/* Remove the fake offset */

-   vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
-
if (obj->import_attach) {
/* Drop the reference drm_gem_mmap_obj() acquired.*/
drm_gem_object_put(obj);



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/shmem-helper: Don't remove the offset in vm_area_struct pgoff

2021-02-18 Thread Steven Price

On 18/02/2021 15:45, Alyssa Rosenzweig wrote:

Yeah plus Cc: stable for backporting and I think an igt or similar for
panfrost to check this works correctly would be pretty good too. Since
if it took us over 1 year to notice this bug it's pretty clear that
normal testing doesn't catch this. So very likely we'll break this
again.


Unfortunately there are a lot of kernel bugs which are noticed during actual
use (but not CI runs), some of which have never been fixed. I do know
the shrinker impl is buggy for us, if this is the fix I'm very happy.


I doubt this will actually "fix" anything - if I understand correctly 
then the sequence which is broken is:


 * allocate BO, mmap to CPU
 * madvise(DONTNEED)
 * trigger purge
 * try to access the BO memory

which is an invalid sequence for user space - the attempt to access 
memory should cause a SIGSEGV. However because drm_vma_node_unmap() is 
unable to find the mappings there may still be page table entries 
present which would provide access to memory the kernel has freed. Which 
is of course a big security hole and so this fix is needed.


In what way do you find the shrinker impl buggy? I'm aware there's some 
dodgy locking (although I haven't worked out how to fix it) - but AFAICT 
it's more deadlock territory rather than lacking in locks. Are there 
correctness issues?



btw for testing shrinkers recommended way is to have a debugfs file
that just force-shrinks everything. That way you avoid all the trouble
that tend to happen when you drive a system close to OOM on linux, and
it's also much faster.


2nding this as a good idea.



Sounds like a good idea to me too. But equally I'm wondering whether the 
best (short term) solution is to actually disable the shrinker. I'm 
somewhat surprised that nobody has got fed up with the "Purging xxx 
bytes" message spam - which makes me think that most people are not 
hitting memory pressure to trigger the shrinker.


The shrinker on kbase caused a lot of grief - and the only way I managed 
to get that under control was by writing a static analysis tool for the 
locking, and by upsetting people by enforcing the (rather dumb) rules of 
the tool on the code base. I've been meaning to look at whether sparse 
can do a similar check of locks.


Sadly at the moment I'm struggling to find time to look at such things.

Steve
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/shmem-helper: Don't remove the offset in vm_area_struct pgoff

2021-02-18 Thread Steven Price

On 18/02/2021 16:38, Rob Herring wrote:

On Thu, Feb 18, 2021 at 10:15 AM Steven Price  wrote:


On 18/02/2021 15:45, Alyssa Rosenzweig wrote:

Yeah plus Cc: stable for backporting and I think an igt or similar for
panfrost to check this works correctly would be pretty good too. Since
if it took us over 1 year to notice this bug it's pretty clear that
normal testing doesn't catch this. So very likely we'll break this
again.


Unfortunately there are a lot of kernel bugs which are noticed during actual
use (but not CI runs), some of which have never been fixed. I do know
the shrinker impl is buggy for us, if this is the fix I'm very happy.


I doubt this will actually "fix" anything - if I understand correctly
then the sequence which is broken is:

   * allocate BO, mmap to CPU
   * madvise(DONTNEED)
   * trigger purge
   * try to access the BO memory

which is an invalid sequence for user space - the attempt to access
memory should cause a SIGSEGV. However because drm_vma_node_unmap() is
unable to find the mappings there may still be page table entries
present which would provide access to memory the kernel has freed. Which
is of course a big security hole and so this fix is needed.

In what way do you find the shrinker impl buggy? I'm aware there's some
dodgy locking (although I haven't worked out how to fix it) - but AFAICT
it's more deadlock territory rather than lacking in locks. Are there
correctness issues?


What's there was largely a result of getting lockdep happy.


btw for testing shrinkers recommended way is to have a debugfs file
that just force-shrinks everything. That way you avoid all the trouble
that tend to happen when you drive a system close to OOM on linux, and
it's also much faster.


2nding this as a good idea.



Sounds like a good idea to me too. But equally I'm wondering whether the
best (short term) solution is to actually disable the shrinker. I'm
somewhat surprised that nobody has got fed up with the "Purging xxx
bytes" message spam - which makes me think that most people are not
hitting memory pressure to trigger the shrinker.


If the shrinker is dodgy, then it's probably good to have the messages
to know if it ran.


The shrinker on kbase caused a lot of grief - and the only way I managed
to get that under control was by writing a static analysis tool for the
locking, and by upsetting people by enforcing the (rather dumb) rules of
the tool on the code base. I've been meaning to look at whether sparse
can do a similar check of locks.


Lockdep doesn't cover it?


Short answer: no ;)

The problem with lockdep is that you have to trigger the locking
scenario to get a warning out of it. For example you obviously won't get
any warnings about the shrinker without triggering the shrinker (which
means memory pressure since we don't have the debugfs file to trigger it).

I have to admit I'm not 100% sure I've seen any lockdep warnings based
on buffer objects recently. I can trigger them based on jobs:

-8<--
[  265.764474] ==
[  265.771380] WARNING: possible circular locking dependency detected
[  265.778294] 5.11.0-rc2+ #22 Tainted: GW
[  265.784148] --
[  265.791050] kworker/0:3/90 is trying to acquire lock:
[  265.796694] c0d982b0 (fs_reclaim){+.+.}-{0:0}, at: 
__fs_reclaim_acquire+0x0/0x38
[  265.804994]
[  265.804994] but task is already holding lock:
[  265.811513] c49a348c (&js->queue[j].lock){+.+.}-{3:3}, at: 
panfrost_reset+0x124/0x1cc [panfrost]
[  265.821375]
[  265.821375] which lock already depends on the new lock.
[  265.821375]
[  265.830524]
[  265.830524] the existing dependency chain (in reverse order) is:
[  265.838892]
[  265.838892] -> #2 (&js->queue[j].lock){+.+.}-{3:3}:
[  265.845996]mutex_lock_nested+0x18/0x20
[  265.850961]panfrost_scheduler_stop+0x1c/0x94 [panfrost]
[  265.857590]panfrost_reset+0x54/0x1cc [panfrost]
[  265.863441]process_one_work+0x238/0x51c
[  265.868503]worker_thread+0x22c/0x2e0
[  265.873270]kthread+0x128/0x138
[  265.877455]ret_from_fork+0x14/0x38
[  265.882028]0x0
[  265.884657]
[  265.884657] -> #1 (dma_fence_map){}-{0:0}:
[  265.891277]dma_resv_lockdep+0x1b4/0x290
[  265.896339]do_one_initcall+0x5c/0x2e8
[  265.901206]kernel_init_freeable+0x184/0x1d4
[  265.906651]kernel_init+0x8/0x11c
[  265.911029]ret_from_fork+0x14/0x38
[  265.915610]0x0
[  265.918247]
[  265.918247] -> #0 (fs_reclaim){+.+.}-{0:0}:
[  265.924579]lock_acquire+0x3a4/0x45c
[  265.929260]__fs_reclaim_acquire+0x28/0x38
[  265.934523]slab_pre_alloc_hook.constprop.28+0x1c/0x64
[  265.940948]kmem_cache_alloc_trace+0x38/0x114
[  265.946493]panfrost_job_r

Re: [PATCH] drm/shmem-helper: Don't remove the offset in vm_area_struct pgoff

2021-02-19 Thread Steven Price

On 18/02/2021 18:20, Daniel Vetter wrote:

On Thu, Feb 18, 2021 at 6:16 PM Rob Herring  wrote:


On Thu, Feb 18, 2021 at 10:51 AM Steven Price  wrote:


On 18/02/2021 16:38, Rob Herring wrote:

On Thu, Feb 18, 2021 at 10:15 AM Steven Price  wrote:


On 18/02/2021 15:45, Alyssa Rosenzweig wrote:

Yeah plus Cc: stable for backporting and I think an igt or similar for
panfrost to check this works correctly would be pretty good too. Since
if it took us over 1 year to notice this bug it's pretty clear that
normal testing doesn't catch this. So very likely we'll break this
again.


Unfortunately there are a lot of kernel bugs which are noticed during actual
use (but not CI runs), some of which have never been fixed. I do know
the shrinker impl is buggy for us, if this is the fix I'm very happy.


I doubt this will actually "fix" anything - if I understand correctly
then the sequence which is broken is:

* allocate BO, mmap to CPU
* madvise(DONTNEED)
* trigger purge
* try to access the BO memory

which is an invalid sequence for user space - the attempt to access
memory should cause a SIGSEGV. However because drm_vma_node_unmap() is
unable to find the mappings there may still be page table entries
present which would provide access to memory the kernel has freed. Which
is of course a big security hole and so this fix is needed.

In what way do you find the shrinker impl buggy? I'm aware there's some
dodgy locking (although I haven't worked out how to fix it) - but AFAICT
it's more deadlock territory rather than lacking in locks. Are there
correctness issues?


What's there was largely a result of getting lockdep happy.


btw for testing shrinkers recommended way is to have a debugfs file
that just force-shrinks everything. That way you avoid all the trouble
that tend to happen when you drive a system close to OOM on linux, and
it's also much faster.


2nding this as a good idea.



Sounds like a good idea to me too. But equally I'm wondering whether the
best (short term) solution is to actually disable the shrinker. I'm
somewhat surprised that nobody has got fed up with the "Purging xxx
bytes" message spam - which makes me think that most people are not
hitting memory pressure to trigger the shrinker.


If the shrinker is dodgy, then it's probably good to have the messages
to know if it ran.


The shrinker on kbase caused a lot of grief - and the only way I managed
to get that under control was by writing a static analysis tool for the
locking, and by upsetting people by enforcing the (rather dumb) rules of
the tool on the code base. I've been meaning to look at whether sparse
can do a similar check of locks.


Lockdep doesn't cover it?


Short answer: no ;)


It's pretty good actually, if you correctly annotate things up.


I agree - it's pretty good, the problem is you need reasonable test 
coverage, and getting good test coverage of shrinkers is hard.



The problem with lockdep is that you have to trigger the locking
scenario to get a warning out of it. For example you obviously won't get
any warnings about the shrinker without triggering the shrinker (which
means memory pressure since we don't have the debugfs file to trigger it).


Actually, you don't need debugfs. Writing to /proc/sys/vm/drop_caches
will do it. Though maybe there's other code path scenarios that
wouldn't cover.


Huh didn't know, but it's a bit a shotgun, plus it doesn't use
fs_reclaim shrinker annotations, which means you don't have lockdep
checks. I think at least, would need some deadlock and testing.


The big problem with this sort of method for triggering the shrinkers is 
that they are called without (many) locks held. Whereas it's entirely 
possible for a shrinker to be called at (almost) any allocation in the 
kernel.


Admittedly the Panfrost shrinkers are fairly safe - because most things 
are xxx_trylock(). kbase avoids trylock which makes reclaim more 
reliable, but means deadlocks are much easier.





I have to admit I'm not 100% sure I've seen any lockdep warnings based
on buffer objects recently. I can trigger them based on jobs:


[snip]


Certainly here the mutex causing the problem is the shrinker_lock!

The above is triggered by chucking a whole ton of jobs which
fault at the GPU.

Sadly I haven't found time to work out how to untangle the locks.


They are tricky because pretty much any memory allocation can trigger
things as I recall.


The above should only be possible with my dma_fence annotations, and
yes the point to bugs in the drm/scheduler. They shouldn't matter for
panfrost, and those patches aren't in upstream yet.


Yes that's on a (random version of) drm-misc - just what I happened to 
have built recently. Good news if that's not actually Panfrost's bug. I 
haven't had the time to track do

Re: [PATCH] drm/shmem-helper: Don't remove the offset in vm_area_struct pgoff

2021-02-19 Thread Steven Price

On 19/02/2021 15:13, Daniel Vetter wrote:

On Fri, Feb 19, 2021 at 01:36:06PM +, Steven Price wrote:

On 18/02/2021 18:20, Daniel Vetter wrote:

On Thu, Feb 18, 2021 at 6:16 PM Rob Herring  wrote:


On Thu, Feb 18, 2021 at 10:51 AM Steven Price  wrote:


On 18/02/2021 16:38, Rob Herring wrote:

On Thu, Feb 18, 2021 at 10:15 AM Steven Price  wrote:


On 18/02/2021 15:45, Alyssa Rosenzweig wrote:

Yeah plus Cc: stable for backporting and I think an igt or similar for
panfrost to check this works correctly would be pretty good too. Since
if it took us over 1 year to notice this bug it's pretty clear that
normal testing doesn't catch this. So very likely we'll break this
again.


Unfortunately there are a lot of kernel bugs which are noticed during actual
use (but not CI runs), some of which have never been fixed. I do know
the shrinker impl is buggy for us, if this is the fix I'm very happy.


I doubt this will actually "fix" anything - if I understand correctly
then the sequence which is broken is:

 * allocate BO, mmap to CPU
 * madvise(DONTNEED)
 * trigger purge
 * try to access the BO memory

which is an invalid sequence for user space - the attempt to access
memory should cause a SIGSEGV. However because drm_vma_node_unmap() is
unable to find the mappings there may still be page table entries
present which would provide access to memory the kernel has freed. Which
is of course a big security hole and so this fix is needed.

In what way do you find the shrinker impl buggy? I'm aware there's some
dodgy locking (although I haven't worked out how to fix it) - but AFAICT
it's more deadlock territory rather than lacking in locks. Are there
correctness issues?


What's there was largely a result of getting lockdep happy.


btw for testing shrinkers recommended way is to have a debugfs file
that just force-shrinks everything. That way you avoid all the trouble
that tend to happen when you drive a system close to OOM on linux, and
it's also much faster.


2nding this as a good idea.



Sounds like a good idea to me too. But equally I'm wondering whether the
best (short term) solution is to actually disable the shrinker. I'm
somewhat surprised that nobody has got fed up with the "Purging xxx
bytes" message spam - which makes me think that most people are not
hitting memory pressure to trigger the shrinker.


If the shrinker is dodgy, then it's probably good to have the messages
to know if it ran.


The shrinker on kbase caused a lot of grief - and the only way I managed
to get that under control was by writing a static analysis tool for the
locking, and by upsetting people by enforcing the (rather dumb) rules of
the tool on the code base. I've been meaning to look at whether sparse
can do a similar check of locks.


Lockdep doesn't cover it?


Short answer: no ;)


It's pretty good actually, if you correctly annotate things up.


I agree - it's pretty good, the problem is you need reasonable test
coverage, and getting good test coverage of shrinkers is hard.


The problem with lockdep is that you have to trigger the locking
scenario to get a warning out of it. For example you obviously won't get
any warnings about the shrinker without triggering the shrinker (which
means memory pressure since we don't have the debugfs file to trigger it).


Actually, you don't need debugfs. Writing to /proc/sys/vm/drop_caches
will do it. Though maybe there's other code path scenarios that
wouldn't cover.


Huh didn't know, but it's a bit a shotgun, plus it doesn't use
fs_reclaim shrinker annotations, which means you don't have lockdep
checks. I think at least, would need some deadlock and testing.


The big problem with this sort of method for triggering the shrinkers is
that they are called without (many) locks held. Whereas it's entirely
possible for a shrinker to be called at (almost) any allocation in the
kernel.

Admittedly the Panfrost shrinkers are fairly safe - because most things are
xxx_trylock(). kbase avoids trylock which makes reclaim more reliable, but
means deadlocks are much easier.


This is why you need the fs_reclaim annotation. With that lockdep can
connect the dots. See also might_alloc() annotations I've added in 5.11 or
so.

Validating shrinkers for deadlocks is actually not that hard, you just
need the debugfs interface to run your shrinker at will under the
fs_reclaim_acquire/release annotations. You do _not_ need to hit the full
combinatorial test matrix of making sure that your shrinker is called in
any possible place where memory is allocated.


Cool - I hadn't looked at that code before, but it does look like it 
should pick up the problem cases. I wish that had existed back when I 
was dealing with kbase! :)



I have to admit I'm not 100% sure I've seen any lockdep warnings based
on buffer objects recently

Re: [PATCH 1/2] drm/shmem-helper: Check for purged buffers in fault handler

2021-02-24 Thread Steven Price

On 23/02/2021 15:51, Neil Roberts wrote:

When a buffer is madvised as not needed and then purged, any attempts to
access the buffer from user-space should cause a bus fault. This patch
adds a check for that.

Cc: sta...@vger.kernel.org
Fixes: 17acb9f35ed7 ("drm/shmem: Add madvise state and purge helpers")
Signed-off-by: Neil Roberts 


Reviewed-by: Steven Price 


---
  drivers/gpu/drm/drm_gem_shmem_helper.c | 18 ++
  1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 9825c378dfa6..b26139b1dc35 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -525,14 +525,24 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault 
*vmf)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
+   vm_fault_t ret;
struct page *page;
  
-	if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))

-   return VM_FAULT_SIGBUS;
+   mutex_lock(&shmem->pages_lock);
+
+   if (vmf->pgoff >= num_pages ||
+   WARN_ON_ONCE(!shmem->pages) ||
+   shmem->madv < 0) {
+   ret = VM_FAULT_SIGBUS;
+   } else {
+   page = shmem->pages[vmf->pgoff];
  
-	page = shmem->pages[vmf->pgoff];

+   ret = vmf_insert_page(vma, vmf->address, page);
+   }
  
-	return vmf_insert_page(vma, vmf->address, page);

+   mutex_unlock(&shmem->pages_lock);
+
+   return ret;
  }
  
  static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v2 2/2] drm/shmem-helper: Don't remove the offset in vm_area_struct pgoff

2021-02-24 Thread Steven Price

On 23/02/2021 15:51, Neil Roberts wrote:

When mmapping the shmem, it would previously adjust the pgoff in the
vm_area_struct to remove the fake offset that is added to be able to
identify the buffer. This patch removes the adjustment and makes the
fault handler use the vm_fault address to calculate the page offset
instead. Although using this address is apparently discouraged, several
DRM drivers seem to be doing it anyway.

The problem with removing the pgoff is that it prevents
drm_vma_node_unmap from working because that searches the mapping tree
by address. That doesn't work because all of the mappings are at offset
0. drm_vma_node_unmap is being used by the shmem helpers when purging
the buffer.

This fixes a bug in Panfrost which is using drm_gem_shmem_purge. Without
this the mapping for the purged buffer can still be accessed which might
mean it would access random pages from other buffers

v2: Don't check whether the unsigned page_offset is less than 0.

Cc: sta...@vger.kernel.org
Fixes: 17acb9f35ed7 ("drm/shmem: Add madvise state and purge helpers")
Signed-off-by: Neil Roberts 


Reviewed-by: Steven Price 


---
  drivers/gpu/drm/drm_gem_shmem_helper.c | 11 ++-
  1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
b/drivers/gpu/drm/drm_gem_shmem_helper.c
index b26139b1dc35..5b5c095e86a9 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -527,15 +527,19 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault 
*vmf)
loff_t num_pages = obj->size >> PAGE_SHIFT;
vm_fault_t ret;
struct page *page;
+   pgoff_t page_offset;
+
+   /* We don't use vmf->pgoff since that has the fake offset */
+   page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
  
  	mutex_lock(&shmem->pages_lock);
  
-	if (vmf->pgoff >= num_pages ||

+   if (page_offset >= num_pages ||
WARN_ON_ONCE(!shmem->pages) ||
shmem->madv < 0) {
ret = VM_FAULT_SIGBUS;
} else {
-   page = shmem->pages[vmf->pgoff];
+   page = shmem->pages[page_offset];
  
  		ret = vmf_insert_page(vma, vmf->address, page);

}
@@ -591,9 +595,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct 
vm_area_struct *vma)
struct drm_gem_shmem_object *shmem;
int ret;
  
-	/* Remove the fake offset */

-   vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
-
if (obj->import_attach) {
/* Drop the reference drm_gem_mmap_obj() acquired.*/
drm_gem_object_put(obj);



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] devfreq: Register devfreq as a cooling device

2021-03-05 Thread Steven Price

On 04/03/2021 12:50, Daniel Lezcano wrote:

Currently the default behavior is to manually having the devfreq
backend to register themselves as a devfreq cooling device.

There are no so many and actually it makes more sense to register the
devfreq device when adding it.

Consequently, every devfreq becomes a cooling device like cpufreq is.

Having a devfreq being registered as a cooling device can not mitigate
a thermal zone if it is not bound to this one. Thus, the current
configurations are not impacted by this change.

Signed-off-by: Daniel Lezcano 
---
  drivers/devfreq/devfreq.c   |  8 
  drivers/gpu/drm/lima/lima_devfreq.c | 13 -
  drivers/gpu/drm/lima/lima_devfreq.h |  2 --
  drivers/gpu/drm/msm/msm_gpu.c   | 11 ---
  drivers/gpu/drm/msm/msm_gpu.h   |  2 --
  drivers/gpu/drm/panfrost/panfrost_devfreq.c | 13 -
  include/linux/devfreq.h |  3 +++
  7 files changed, 11 insertions(+), 41 deletions(-)


[...]

diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 56b3f5935703..2cb6300de1f1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -3,7 +3,6 @@
  
  #include 

  #include 
-#include 
  #include 
  #include 
  
@@ -90,7 +89,6 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)

struct device *dev = &pfdev->pdev->dev;
struct devfreq *devfreq;
struct opp_table *opp_table;
-   struct thermal_cooling_device *cooling;
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
  
  	opp_table = dev_pm_opp_set_regulators(dev, pfdev->comp->supply_names,

@@ -139,12 +137,6 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
}
pfdevfreq->devfreq = devfreq;
  
-	cooling = devfreq_cooling_em_register(devfreq, NULL);

-   if (IS_ERR(cooling))
-   DRM_DEV_INFO(dev, "Failed to register cooling device\n");
-   else
-   pfdevfreq->cooling = cooling;
-
return 0;
  
  err_fini:

@@ -156,11 +148,6 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
  {
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
  
-	if (pfdevfreq->cooling) {

-   devfreq_cooling_unregister(pfdevfreq->cooling);
-   pfdevfreq->cooling = NULL;
-   }
-
if (pfdevfreq->opp_of_table_added) {
dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
pfdevfreq->opp_of_table_added = false;


You've removed all references to pfdevfreq->cooling, so please also 
remove the member from struct panfrost_devfreq (as already done with 
lima and msm).


Thanks,

Steve
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 0/2] Fix purging buffers in the shmem helpers

2021-03-05 Thread Steven Price

On 23/02/2021 15:51, Neil Roberts wrote:

These two patches fix a problem with the madvise purging code for the
shmem helpers where the mmaping for a purged buffer wouldn't get
invalidated correctly. This presumably ends up as a security hole
where the mapping can be accessed from user-space to read and write
random pages from other buffers. This is currently affecting Panfrost.
The second patch is a v2 from a patch that was sent standalone.

There is a WIP IGT test for Panfrost which demonstrates the bug here:

https://gitlab.freedesktop.org/nroberts/igt-gpu-tools/-/commits/panfrost-purgemap/

Neil Roberts (2):
   drm/shmem-helper: Check for purged buffers in fault handler
   drm/shmem-helper: Don't remove the offset in vm_area_struct pgoff

  drivers/gpu/drm/drm_gem_shmem_helper.c | 25 ++---
  1 file changed, 18 insertions(+), 7 deletions(-)



Pushed to drm-misc-fixes

Thanks,

Steve
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v5 3/4] PM / devfreq: panfrost: Use devfreq cooling device registration

2021-03-08 Thread Steven Price

On 08/03/2021 09:16, Daniel Lezcano wrote:

The devfreq core code is able to register the devfreq device as a
cooling device if the 'is_cooling_device' flag is set in the profile.

Use this flag and remove the cooling device registering code.

Tested on rock960.

Signed-off-by: Daniel Lezcano 


Reviewed-by: Steven Price 


---
  drivers/gpu/drm/panfrost/panfrost_devfreq.c | 14 +-
  drivers/gpu/drm/panfrost/panfrost_devfreq.h |  3 ---
  2 files changed, 1 insertion(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 56b3f5935703..4d96edf1bc54 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -3,7 +3,6 @@
  
  #include 

  #include 
-#include 
  #include 
  #include 
  
@@ -80,6 +79,7 @@ static struct devfreq_dev_profile panfrost_devfreq_profile = {

.polling_ms = 50, /* ~3 frames */
.target = panfrost_devfreq_target,
.get_dev_status = panfrost_devfreq_get_dev_status,
+   .is_cooling_device = true,
  };
  
  int panfrost_devfreq_init(struct panfrost_device *pfdev)

@@ -90,7 +90,6 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
struct device *dev = &pfdev->pdev->dev;
struct devfreq *devfreq;
struct opp_table *opp_table;
-   struct thermal_cooling_device *cooling;
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
  
  	opp_table = dev_pm_opp_set_regulators(dev, pfdev->comp->supply_names,

@@ -139,12 +138,6 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
}
pfdevfreq->devfreq = devfreq;
  
-	cooling = devfreq_cooling_em_register(devfreq, NULL);

-   if (IS_ERR(cooling))
-   DRM_DEV_INFO(dev, "Failed to register cooling device\n");
-   else
-   pfdevfreq->cooling = cooling;
-
return 0;
  
  err_fini:

@@ -156,11 +149,6 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
  {
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
  
-	if (pfdevfreq->cooling) {

-   devfreq_cooling_unregister(pfdevfreq->cooling);
-   pfdevfreq->cooling = NULL;
-   }
-
if (pfdevfreq->opp_of_table_added) {
dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
pfdevfreq->opp_of_table_added = false;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index db6ea48e21f9..470f5c974703 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -9,14 +9,11 @@
  
  struct devfreq;

  struct opp_table;
-struct thermal_cooling_device;
-
  struct panfrost_device;
  
  struct panfrost_devfreq {

struct devfreq *devfreq;
struct opp_table *regulators_opp_table;
-   struct thermal_cooling_device *cooling;
bool opp_of_table_added;
  
  	ktime_t busy_time;




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [RFC PATCH 0/7] drm/panfrost: Add a new submit ioctl

2021-03-11 Thread Steven Price

On 11/03/2021 09:25, Boris Brezillon wrote:

Hello,

I've been playing with Vulkan lately and struggled quite a bit to
implement VkQueueSubmit with the submit ioctl we have. There are
several limiting factors that can be worked around if we really have to,
but I think it'd be much easier and future-proof if we introduce a new
ioctl that addresses the current limitations:


Hi Boris,

I think what you've proposed is quite reasonable, some detailed comments 
to your points below.




1/ There can only be one out_sync, but Vulkan might ask us to signal
several VkSemaphores and possibly one VkFence too, both of those
being based on sync objects in my PoC. Making out_sync an array of
syncobjs to attach the render_done fence to would make that possible.
The other option would be to collect syncobj updates in userspace
in a separate thread and propagate those updates to all
semaphores+fences waiting on those events (I think the v3dv driver
does something like that, but I didn't spend enough time studying
the code to be sure, so I might be wrong).


You should be able to avoid the separate thread to propagate by having a 
proxy object in user space that maps between the one outsync of the job 
and the possibly many Vulkan objects. But I've had this argument before 
with the DDK... and the upshot of it was that he Vulkan API is 
unnecessarily complex here and makes this really hard to do in practise. 
So I agree adding this capability to the kernel is likely the best approach.



2/ Queued jobs might be executed out-of-order (unless they have
explicit/implicit deps between them), and Vulkan asks that the out
fence be signaled when all jobs are done. Timeline syncobjs are a
good match for that use case. All we need to do is pass the same
fence syncobj to all jobs being attached to a single QueueSubmit
request, but a different point on the timeline. The syncobj
timeline wait does the rest and guarantees that we've reached a
given timeline point (IOW, all jobs before that point are done)
before declaring the fence as signaled.
One alternative would be to have dummy 'synchronization' jobs that
don't actually execute anything on the GPU but declare a dependency
on all other jobs that are part of the QueueSubmit request, and
signal the out fence (the scheduler would do most of the work for
us, all we have to do is support NULL job heads and signal the
fence directly when that happens instead of queueing the job).


I have to admit to being rather hazy on the details of timeline 
syncobjs, but I thought there was a requirement that the timeline moves 
monotonically. I.e. if you have multiple jobs signalling the same 
syncobj just with different points, then AFAIU the API requires that the 
points are triggered in order.


So I'm not sure that you've actually fixed this point - you either need 
to force an order (in which case the last job can signal the Vulkan 
fence) or you still need a dummy job to do the many-to-one dependency.


Or I may have completely misunderstood timeline syncobjs - definitely a 
possibility :)



3/ The current implementation lacks information about BO access,
so we serialize all jobs accessing the same set of BOs, even
if those jobs might just be reading from them (which can
happen concurrently). Other drivers pass an access type to the
list of referenced BOs to address that. Another option would be
to disable implicit deps (deps based on BOs) and force the driver
to pass all deps explicitly (interestingly, some drivers have
both the no-implicit-dep and r/w flags, probably to support
sub-resource access, so we might want to add that one too).
I don't see any userspace workaround to that problem, so that one
alone would justify extending the existing ioctl or adding a new
one.


Yeah - I think we need this. My only comment is that I think the 
read/write terminology may come back to bite. Better to use 'shared' and 
'exclusive' - which better matches the dma_resv_xxx APIs anyway.


Also the current code completely ignores PANFROST_BO_REF_READ. So either 
that should be defined as 0, or even better we support 3 modes:


 * Exclusive ('write' access)
 * Shared ('read' access)
 * No fence - ensures the BO is mapped, but doesn't add any implicit 
fences.


The last may make sense when doing explicit fences and e.g. doing 
front-buffer rendering with a display driver which does implicit fencing.



4/ There's also the fact that submitting one job at a time adds an
overhead when QueueSubmit is being passed more than one
CommandBuffer. That one is less problematic, but if we're adding
a new ioctl we'd better design it to limit the userspace -> kernel
transition overhead.


I've no objection - but I doubt the performance effect is significant. I 
was pleased to see the handling of stride which makes the interface 
extendable. In particular I suspect at some 

Re: [PATCH -next] drm/panfrost: Fix PM reference leak in panfrost_job_hw_submit()

2021-05-12 Thread Steven Price

On 11/05/2021 07:29, Zou Wei wrote:

pm_runtime_get_sync will increment pm usage counter even it failed.
Forgetting to putting operation will result in reference leak here.
Fix it by replacing it with pm_runtime_resume_and_get to keep usage
counter balanced.

Reported-by: Hulk Robot 
Signed-off-by: Zou Wei 


Thanks for the patch, but this is actually incorrect. 
panfrost_job_hw_submit() is expected to unconditionally increment the pm 
usage counter. This is because panfrost_job_hw_submit() can (currently) 
never fail, so in this case the job is considered "submitted" (even 
though it never reaches the hardware) and it's handled by the job timeout.


However this is at least the second time[1] this phantom "reference 
leak" has been raised, so perhaps it's time to handle this better. I'll 
post a patch reworking panfrost_job_hw_submit() so it can fail.


Thanks,

Steve

[1] 
https://lore.kernel.org/r/20200520110504.24388-1-dinghao.liu%40zju.edu.cn



---
  drivers/gpu/drm/panfrost/panfrost_job.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
b/drivers/gpu/drm/panfrost/panfrost_job.c
index 6003cfe..42d8dbc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -157,7 +157,7 @@ static void panfrost_job_hw_submit(struct panfrost_job 
*job, int js)
  
  	panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
  
-	ret = pm_runtime_get_sync(pfdev->dev);

+   ret = pm_runtime_resume_and_get(pfdev->dev);
if (ret < 0)
return;
  





[PATCH] drm/panfrost: Handle failure in panfrost_job_hw_submit()

2021-05-12 Thread Steven Price
Currently panfrost_job_hw_submit() returns void and therefore cannot
propagate failures to it's caller, which is a shame because it has two
failure paths. Currently these are handled by waiting for a job timeout
on the job even though it was never submitted. But we can do better.

Refactor to return a failure code from panfrost_job_hw_submit() and
report the failure back to the DRM scheduler. This means there's no need
to wait for the scheduler to timeout on the job and the failure can be
handled immediately.

Signed-off-by: Steven Price 

---
This hopefully will also stop future reports of a PM reference
leak[1][2] which doesn't actually exist.

[1] https://lore.kernel.org/r/20200520110504.24388-1-dinghao.liu%40zju.edu.cn
[2] 
https://lore.kernel.org/r/1620714551-106976-1-git-send-email-zou_wei%40huawei.com
---
 drivers/gpu/drm/panfrost/panfrost_job.c | 27 -
 1 file changed, 18 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
b/drivers/gpu/drm/panfrost/panfrost_job.c
index 6003cfeb1322..ac1ae38aaf12 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -148,21 +148,22 @@ static void panfrost_job_write_affinity(struct 
panfrost_device *pfdev,
job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
 }
 
-static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
+static int panfrost_job_hw_submit(struct panfrost_job *job, int js)
 {
struct panfrost_device *pfdev = job->pfdev;
u32 cfg;
u64 jc_head = job->jc;
int ret;
 
-   panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
-
-   ret = pm_runtime_get_sync(pfdev->dev);
+   ret = pm_runtime_resume_and_get(pfdev->dev);
if (ret < 0)
-   return;
+   return ret;
+
+   panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
 
if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js {
-   return;
+   pm_runtime_put_autosuspend(pfdev->dev);
+   return -EBUSY;
}
 
cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
@@ -194,6 +195,8 @@ static void panfrost_job_hw_submit(struct panfrost_job 
*job, int js)
job, js, jc_head);
 
job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
+
+   return 0;
 }
 
 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
@@ -347,12 +350,11 @@ static struct dma_fence *panfrost_job_run(struct 
drm_sched_job *sched_job)
struct panfrost_device *pfdev = job->pfdev;
int slot = panfrost_job_get_slot(job);
struct dma_fence *fence = NULL;
+   int err;
 
if (unlikely(job->base.s_fence->finished.error))
return NULL;
 
-   pfdev->jobs[slot] = job;
-
fence = panfrost_fence_create(pfdev, slot);
if (IS_ERR(fence))
return NULL;
@@ -361,7 +363,14 @@ static struct dma_fence *panfrost_job_run(struct 
drm_sched_job *sched_job)
dma_fence_put(job->done_fence);
job->done_fence = dma_fence_get(fence);
 
-   panfrost_job_hw_submit(job, slot);
+   err = panfrost_job_hw_submit(job, slot);
+
+   if (err) {
+   dma_fence_put(fence);
+   return NULL;
+   }
+
+   pfdev->jobs[slot] = job;
 
return fence;
 }
-- 
2.20.1



Re: [PATCH 1/1] drm/panfrost: Remove redundant error printing in panfrost_device_init()

2021-05-12 Thread Steven Price

On 11/05/2021 10:04, Zhen Lei wrote:

When devm_ioremap_resource() fails, a clear enough error message will be
printed by its subfunction __devm_ioremap_resource(). The error
information contains the device name, failure cause, and possibly resource
information.

Therefore, remove the error printing here to simplify code and reduce the
binary size.

Reported-by: Hulk Robot 
Signed-off-by: Zhen Lei 


Reviewed-by: Steven Price 

I'll push to drm-misc-next.

Thanks,

Steve


---
  drivers/gpu/drm/panfrost/panfrost_device.c | 1 -
  1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c 
b/drivers/gpu/drm/panfrost/panfrost_device.c
index fbcf5edbe367521..125ed973feaad0a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -238,7 +238,6 @@ int panfrost_device_init(struct panfrost_device *pfdev)
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
if (IS_ERR(pfdev->iomem)) {
-   dev_err(pfdev->dev, "failed to ioremap iomem\n");
err = PTR_ERR(pfdev->iomem);
goto out_pm_domain;
}





Re: [PATCH v13 0/4] drm/panfrost: Add support for mt8183 GPU

2021-05-14 Thread Steven Price
On 14/05/2021 15:48, Neil Armstrong wrote:
> On 13/05/2021 16:55, Ezequiel Garcia wrote:
>> Hi Neil,
>>
>> On Mon, 26 Apr 2021 at 06:59, Neil Armstrong  wrote:
>>>
>>> Hi,
>>>
>>> On 21/04/2021 07:28, Nicolas Boichat wrote:
 Hi!

 This is just a rebase of the v11, untested (but it seems like
 Neil Armstrong recently tested it), with small changes in
 binding and dts. v11 cover follows:

 Follow-up on the v5 [1], things have gotten significantly
 better in the last year, thanks to the efforts on Bifrost
 support by the Collabora team (and probably others I'm not
 aware of).

 I've been testing this series on a MT8183/kukui device, with a
 chromeos-5.10 kernel [2], and got basic Chromium OS UI up with
 mesa 20.3.2 (lots of artifacts though).

 devfreq is currently not supported, as we'll need:
  - Clock core support for switching the GPU core clock (see 2/4).
  - Platform-specific handling of the 2-regulator (see 3/4).

 Since the latter is easy to detect, patch 3/4 just disables
 devfreq if the more than one regulator is specified in the
 compatible matching table.

 [1] 
 https://patchwork.kernel.org/project/linux-mediatek/cover/20200306041345.259332-1-drink...@chromium.org/
 [2] https://crrev.com/c/2608070

 Changes in v13:
  - devfreq: Fix conflict resolution mistake when rebasing, didn't
even compile. Oops.

 Changes in v12:
  - binding: Fix min/maxItems logic (Rob Herring)
  - Add gpu node to mt8183-pumpkin.dts as well (Neil Armstrong).

 Changes in v11:
  - binding: power-domain-names not power-domainS-names
  - mt8183*.dts: remove incorrect supply-names

 Changes in v10:
  - Fix the binding to make sure sram-supply property can be provided.

 Changes in v9:
  - Explain why devfreq needs to be disabled for GPUs with >1
regulators.

 Changes in v8:
  - Use DRM_DEV_INFO instead of ERROR

 Changes in v7:
  - Fix GPU ID in commit message
  - Fix GPU ID in commit message

 Changes in v6:
  - Rebased, actually tested with recent mesa driver.
  - Add gpu regulators to kukui dtsi as well.
  - Power domains are now attached to spm, not scpsys
  - Drop R-B.
  - devfreq: New change
  - Context conflicts, reflow the code.
  - Use ARRAY_SIZE for power domains too.

 Changes in v5:
  - Rename "2d" power domain to "core2"
  - Rename "2d" power domain to "core2" (keep R-B again).
  - Change power domain name from 2d to core2.

 Changes in v4:
  - Add power-domain-names description
(kept Alyssa's reviewed-by as the change is minor)
  - Add power-domain-names to describe the 3 domains.
(kept Alyssa's reviewed-by as the change is minor)
  - Add power domain names.

 Changes in v3:
  - Match mt8183-mali instead of bifrost, as we require special
handling for the 2 regulators and 3 power domains.

 Changes in v2:
  - Use sram instead of mali_sram as SRAM supply name.
  - Rename mali@ to gpu@.

 Nicolas Boichat (4):
   dt-bindings: gpu: mali-bifrost: Add Mediatek MT8183
   arm64: dts: mt8183: Add node for the Mali GPU
   drm/panfrost: devfreq: Disable devfreq when num_supplies > 1
   drm/panfrost: Add mt8183-mali compatible string

  .../bindings/gpu/arm,mali-bifrost.yaml|  30 -
  arch/arm64/boot/dts/mediatek/mt8183-evb.dts   |   5 +
  .../arm64/boot/dts/mediatek/mt8183-kukui.dtsi |   5 +
  .../boot/dts/mediatek/mt8183-pumpkin.dts  |   5 +
  arch/arm64/boot/dts/mediatek/mt8183.dtsi  | 105 ++
  drivers/gpu/drm/panfrost/panfrost_devfreq.c   |   9 ++
  drivers/gpu/drm/panfrost/panfrost_drv.c   |  10 ++
  7 files changed, 168 insertions(+), 1 deletion(-)

>>>
>>> Seems this version is ready to be applied if we get a review on the DT ?
>>>
>>> Mathias ? could you have a look ?
>>>
>>
>> Given Rob has Acked the DT bindings, I think it's OK to apply patches
>> 1, 3 and 4 via drm-misc, letting Mediatek people sort out the DT changes.
>>
>> My two unsolicited cents :-)

You make a convincing point - and if everyone is happy for the DT
changes to be handled separately I don't see a reason for the other
patches to be held up.

> Yeah sure, is there a panfrost maintainer in the room ? I can apply them if 
> you ack me.

I seem to be applying most Panfrost changes these days, so I'll save you
the effort and push 1,3,4 to drm-misc-next.

Thanks,

Steve


Re: [PATCH] drm/panfrost: Fix the panfrost_mmu_map_fault_addr() error path

2021-05-21 Thread Steven Price
On 21/05/2021 10:38, Boris Brezillon wrote:
> Make sure all bo->base.pages entries are either NULL or pointing to a
> valid page before calling drm_gem_shmem_put_pages().
> 
> Reported-by: Tomeu Vizoso 
> Cc: 
> Fixes: 187d2929206e ("drm/panfrost: Add support for GPU heap allocations")
> Signed-off-by: Boris Brezillon 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 569509c2ba27..d76dff201ea6 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -460,6 +460,7 @@ static int panfrost_mmu_map_fault_addr(struct 
> panfrost_device *pfdev, int as,
>   if (IS_ERR(pages[i])) {
>   mutex_unlock(&bo->base.pages_lock);
>   ret = PTR_ERR(pages[i]);
> + pages[i] = NULL;
>   goto err_pages;
>   }
>   }
> 



Re: [PATCH 3/6] drm/scheduler: Job timeout handler returns status

2020-11-25 Thread Steven Price

On 25/11/2020 03:17, Luben Tuikov wrote:

The job timeout handler now returns status
indicating back to the DRM layer whether the job
was successfully cancelled or whether more time
should be given to the job to complete.


I'm not sure I understand in what circumstances you would want to give 
the job more time to complete. Could you expand on that?


One thing we're missing at the moment in Panfrost is the ability to 
suspend ("soft stop" is the Mali jargon) a job and pick something else 
to run. The propitiatory driver stack uses this to avoid timing out long 
running jobs while still allowing other processes to have time on the 
GPU. But this interface as it stands doesn't seem to provide that.


As the kernel test robot has already pointed out - you'll need to at the 
very least update the other uses of this interface.


Steve



Signed-off-by: Luben Tuikov 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |  6 --
  include/drm/gpu_scheduler.h | 13 ++---
  2 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index ff48101bab55..81b73790ecc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
  
-static void amdgpu_job_timedout(struct drm_sched_job *s_job)

+static int amdgpu_job_timedout(struct drm_sched_job *s_job)
  {
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
@@ -41,7 +41,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) 
{
DRM_ERROR("ring %s timeout, but soft recovered\n",
  s_job->sched->name);
-   return;
+   return 0;
}
  
  	amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);

@@ -53,10 +53,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
  
  	if (amdgpu_device_should_recover_gpu(ring->adev)) {

amdgpu_device_gpu_recover(ring->adev, job);
+   return 0;
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
adev->virt.tdr_debug = true;
+   return 1;
}
  }
  
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h

index 2e0c368e19f6..61f7121e1c19 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -230,10 +230,17 @@ struct drm_sched_backend_ops {
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
  
  	/**

- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
+* @timedout_job: Called when a job has taken too long to execute,
+* to trigger GPU recovery.
+*
+* Return 0, if the job has been aborted successfully and will
+* never be heard of from the device. Return non-zero if the
+* job wasn't able to be aborted, i.e. if more time should be
+* given to this job. The result is not "bool" as this
+* function is not a predicate, although its result may seem
+* as one.
 */
-   void (*timedout_job)(struct drm_sched_job *sched_job);
+   int (*timedout_job)(struct drm_sched_job *sched_job);
  
  	/**

   * @free_job: Called once the job's finished fence has been signaled



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 6/6] drm/sched: Make use of a "done" thread

2020-11-25 Thread Steven Price

On 25/11/2020 03:17, Luben Tuikov wrote:

Add a "done" list to which all completed jobs are added
to be freed. The drm_sched_job_done() callback is the
producer of jobs to this list.

Add a "done" thread which consumes from the done list
and frees up jobs. Now, the main scheduler thread only
pushes jobs to the GPU and the "done" thread frees them
up, on the way out of the GPU when they've completed
execution.


Generally I'd be in favour of a "done thread" as I think there are some 
murky corners of Panfrost's locking that would be helped by deferring 
the free_job() callback.


But I think you're trying to do too much in one patch here. And as 
Christian has pointed out there's some dodgy looking changes to locking 
which aren't explained.


Steve



Make use of the status returned by the GPU driver
timeout handler to decide whether to leave the job in
the pending list, or to send it off to the done list.
If a job is done, it is added to the done list and the
done thread woken up. If a job needs more time, it is
left on the pending list and the timeout timer
restarted.

Eliminate the polling mechanism of picking out done
jobs from the pending list, i.e. eliminate
drm_sched_get_cleanup_job(). Now the main scheduler
thread only pushes jobs down to the GPU.

Various other optimizations to the GPU scheduler
and job recovery are possible with this format.

Signed-off-by: Luben Tuikov 
---
  drivers/gpu/drm/scheduler/sched_main.c | 173 +
  include/drm/gpu_scheduler.h|  14 ++
  2 files changed, 101 insertions(+), 86 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index 3eb7618a627d..289ae68cd97f 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -164,7 +164,8 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
   * drm_sched_job_done - complete a job
   * @s_job: pointer to the job which is done
   *
- * Finish the job's fence and wake up the worker thread.
+ * Finish the job's fence, move it to the done list,
+ * and wake up the done thread.
   */
  static void drm_sched_job_done(struct drm_sched_job *s_job)
  {
@@ -179,7 +180,12 @@ static void drm_sched_job_done(struct drm_sched_job *s_job)
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence);
dma_fence_put(&s_fence->finished);
-   wake_up_interruptible(&sched->wake_up_worker);
+
+   spin_lock(&sched->job_list_lock);
+   list_move(&s_job->list, &sched->done_list);
+   spin_unlock(&sched->job_list_lock);
+
+   wake_up_interruptible(&sched->done_wait_q);
  }
  
  /**

@@ -221,11 +227,10 @@ bool drm_sched_dependency_optimized(struct dma_fence* 
fence,
  EXPORT_SYMBOL(drm_sched_dependency_optimized);
  
  /**

- * drm_sched_start_timeout - start timeout for reset worker
- *
- * @sched: scheduler instance to start the worker for
+ * drm_sched_start_timeout - start a timeout timer
+ * @sched: scheduler instance whose job we're timing
   *
- * Start the timeout for the given scheduler.
+ * Start a timeout timer for the given scheduler.
   */
  static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
  {
@@ -305,8 +310,8 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
  
  	spin_lock(&sched->job_list_lock);

list_add_tail(&s_job->list, &sched->pending_list);
-   drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
+   drm_sched_start_timeout(sched);
  }
  
  static void drm_sched_job_timedout(struct work_struct *work)

@@ -316,37 +321,30 @@ static void drm_sched_job_timedout(struct work_struct 
*work)
  
  	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
  
-	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */

spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
   struct drm_sched_job, list);
+   spin_unlock(&sched->job_list_lock);
  
  	if (job) {

-   /*
-* Remove the bad job so it cannot be freed by concurrent
-* drm_sched_cleanup_jobs. It will be reinserted back after 
sched->thread
-* is parked at which point it's safe.
-*/
-   list_del_init(&job->list);
-   spin_unlock(&sched->job_list_lock);
+   int res;
  
-		job->sched->ops->timedout_job(job);

+   job->job_status |= DRM_JOB_STATUS_TIMEOUT;
+   res = job->sched->ops->timedout_job(job);
+   if (res == 0) {
+   /* The job is out of the device.
+*/
+   spin_lock(&sched->job_list_lock);
+   list_move(&job->list, &sched->done_list);
+   spin_unlock(&sched->job_list_lock);
  
-		/*

-* Guilty job did complete and hence needs to be manually 
removed
- 

Re: [PATCH 3/6] drm/scheduler: Job timeout handler returns status

2020-11-25 Thread Steven Price

On 25/11/2020 11:15, Lucas Stach wrote:

Am Mittwoch, den 25.11.2020, 11:04 + schrieb Steven Price:

On 25/11/2020 03:17, Luben Tuikov wrote:

The job timeout handler now returns status
indicating back to the DRM layer whether the job
was successfully cancelled or whether more time
should be given to the job to complete.


I'm not sure I understand in what circumstances you would want to give
the job more time to complete. Could you expand on that?


On etnaviv we don't have the ability to preempt a running job, but we
can look at the GPU state to determine if it's still making progress
with the current job, so we want to extend the timeout in that case to
not kill a long running but valid job.


Ok, fair enough. Although from my experience (on Mali) jobs very rarely 
"get stuck" it's just that their run time can be excessive[1] causing 
other processes to not make forward progress. So I'd expect the timeout 
to be set based on how long a job can run before you need to stop it to 
allow other processes to run their jobs.


But I'm not familiar with etnaviv so perhaps stuck jobs are actually a 
thing there.


Thanks,

Steve

[1] Also on Mali it's quite possible to create an infinite duration job 
which appears to be making forward progress, so in that case our measure 
of progress isn't useful against these malicious jobs.



Regards,
Lucas


One thing we're missing at the moment in Panfrost is the ability to
suspend ("soft stop" is the Mali jargon) a job and pick something else
to run. The propitiatory driver stack uses this to avoid timing out long
running jobs while still allowing other processes to have time on the
GPU. But this interface as it stands doesn't seem to provide that.

As the kernel test robot has already pointed out - you'll need to at the
very least update the other uses of this interface.

Steve


Signed-off-by: Luben Tuikov 
---
   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |  6 --
   include/drm/gpu_scheduler.h | 13 ++---
   2 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index ff48101bab55..81b73790ecc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
   #include "amdgpu.h"
   #include "amdgpu_trace.h"
   
-static void amdgpu_job_timedout(struct drm_sched_job *s_job)

+static int amdgpu_job_timedout(struct drm_sched_job *s_job)
   {
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
@@ -41,7 +41,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) 
{
DRM_ERROR("ring %s timeout, but soft recovered\n",
  s_job->sched->name);
-   return;
+   return 0;
}
   
   	amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);

@@ -53,10 +53,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
   
   	if (amdgpu_device_should_recover_gpu(ring->adev)) {

amdgpu_device_gpu_recover(ring->adev, job);
+   return 0;
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
adev->virt.tdr_debug = true;
+   return 1;
}
   }
   
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h

index 2e0c368e19f6..61f7121e1c19 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -230,10 +230,17 @@ struct drm_sched_backend_ops {
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
   
   	/**

- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
+* @timedout_job: Called when a job has taken too long to execute,
+* to trigger GPU recovery.
+*
+* Return 0, if the job has been aborted successfully and will
+* never be heard of from the device. Return non-zero if the
+* job wasn't able to be aborted, i.e. if more time should be
+* given to this job. The result is not "bool" as this
+* function is not a predicate, although its result may seem
+* as one.
 */
-   void (*timedout_job)(struct drm_sched_job *sched_job);
+   int (*timedout_job)(struct drm_sched_job *sched_job);
   
   	/**

* @free_job: Called once the job's finished fence has been signaled





___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] iommu/io-pgtable: Remove tlb_flush_leaf

2020-11-26 Thread Steven Price

On 25/11/2020 17:29, Robin Murphy wrote:

The only user of tlb_flush_leaf is a particularly hairy corner of the
Arm short-descriptor code, which wants a synchronous invalidation to
minimise the races inherent in trying to split a large page mapping.
This is already far enough into "here be dragons" territory that no
sensible caller should ever hit it, and thus it really doesn't need
optimising. Although using tlb_flush_walk there may technically be
more heavyweight than needed, it does the job and saves everyone else
having to carry around useless baggage.

Signed-off-by: Robin Murphy 


LGTM

Reviewed-by: Steven Price 


---

Reviewing the Mediatek TLB optimisation patches just left me thinking
"why do we even have this?"... Panfrost folks, this has zero functional
impact to you, merely wants an ack for straying outside drivers/iommu.

Robin.

  drivers/gpu/drm/msm/msm_iommu.c |  1 -
  drivers/gpu/drm/panfrost/panfrost_mmu.c |  7 --
  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c |  7 --
  drivers/iommu/arm/arm-smmu/arm-smmu.c   | 25 +++--
  drivers/iommu/arm/arm-smmu/qcom_iommu.c |  8 ---
  drivers/iommu/io-pgtable-arm-v7s.c  |  3 +--
  drivers/iommu/io-pgtable-arm.c  |  1 -
  drivers/iommu/ipmmu-vmsa.c  |  1 -
  drivers/iommu/msm_iommu.c   |  7 --
  drivers/iommu/mtk_iommu.c   |  1 -
  include/linux/io-pgtable.h  | 11 -
  11 files changed, 4 insertions(+), 68 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 22ac7c692a81..50d881794758 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -139,7 +139,6 @@ static void msm_iommu_tlb_add_page(struct 
iommu_iotlb_gather *gather,
  static const struct iommu_flush_ops null_tlb_ops = {
.tlb_flush_all = msm_iommu_tlb_flush_all,
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
-   .tlb_flush_leaf = msm_iommu_tlb_flush_walk,
.tlb_add_page = msm_iommu_tlb_add_page,
  };

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 776448c527ea..c186914cc4f9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -347,16 +347,9 @@ static void mmu_tlb_flush_walk(unsigned long iova, size_t 
size, size_t granule,
mmu_tlb_sync_context(cookie);
  }

-static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
-  void *cookie)
-{
-   mmu_tlb_sync_context(cookie);
-}
-
  static const struct iommu_flush_ops mmu_tlb_ops = {
.tlb_flush_all  = mmu_tlb_inv_context_s1,
.tlb_flush_walk = mmu_tlb_flush_walk,
-   .tlb_flush_leaf = mmu_tlb_flush_leaf,
  };

  int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c 
b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index e634bbe60573..fb684a393118 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1741,16 +1741,9 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, 
size_t size,
arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
  }

-static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
-   arm_smmu_tlb_inv_range(iova, size, granule, true, cookie);
-}
-
  static const struct iommu_flush_ops arm_smmu_flush_ops = {
.tlb_flush_all  = arm_smmu_tlb_inv_context,
.tlb_flush_walk = arm_smmu_tlb_inv_walk,
-   .tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
.tlb_add_page   = arm_smmu_tlb_inv_page_nosync,
  };

diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c 
b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index dad7fa86fbd4..0b8c59922a2b 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -333,14 +333,6 @@ static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, 
size_t size,
arm_smmu_tlb_sync_context(cookie);
  }

-static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
-size_t granule, void *cookie)
-{
-   arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie,
- ARM_SMMU_CB_S1_TLBIVAL);
-   arm_smmu_tlb_sync_context(cookie);
-}
-
  static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
 unsigned long iova, size_t granule,
 void *cookie)
@@ -357,14 +349,6 @@ static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, 
size_t size,
arm_smmu_tlb_sync_context(cookie);
  }

-static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
-size_t granule, void *cookie)
-{
-   arm_smmu_tl

Re: [PATCH] drm/panfrost: fix reference leak in panfrost_job_hw_submit

2020-11-27 Thread Steven Price

On 27/11/2020 09:44, Qinglang Miao wrote:

pm_runtime_get_sync will increment pm usage counter even it
failed. Forgetting to putting operation will result in a
reference leak here.

A new function pm_runtime_resume_and_get is introduced in
[0] to keep usage counter balanced. So We fix the reference
leak by replacing it with new funtion.

[0] dd8088d5a896 ("PM: runtime: Add  pm_runtime_resume_and_get to deal with usage 
counter")

Fixes: f3ba91228e8e ("drm/panfrost: Add initial panfrost driver")
Reported-by: Hulk Robot 
Signed-off-by: Qinglang Miao 
---
  drivers/gpu/drm/panfrost/panfrost_job.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
b/drivers/gpu/drm/panfrost/panfrost_job.c
index 30e7b7196..04cf3bb67 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -147,7 +147,7 @@ static void panfrost_job_hw_submit(struct panfrost_job 
*job, int js)
  
  	panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
  
-	ret = pm_runtime_get_sync(pfdev->dev);

+   ret = pm_runtime_resume_and_get(pfdev->dev);


Sorry, but in this case this change isn't correct. 
panfrost_job_hw_submit() is expected to be unbalanced (the PM reference 
count is expected to be incremented on return).


In the case where pm_runtime_get_sync() fails, the job will eventually 
timeout, and there's a corresponding pm_runtime_put_noidle() in 
panfrost_reset().


Potentially this could be handled better (e.g. without waiting for the 
timeout to occur), but equally this isn't something we expect to happen 
in normal operation).


Steve


if (ret < 0)
return;
  



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/komeda: Remove useless variable assignment

2020-11-27 Thread Steven Price

On 27/11/2020 11:00, carsten.haitz...@foss.arm.com wrote:

From: Carsten Haitzler 

ret is not actually read after this (only written in one case then
returned), so this assign line is useless. This removes that assignment.

Signed-off-by: Carsten Haitzler 


Reviewed-by: Steven Price 


---
  drivers/gpu/drm/arm/display/komeda/komeda_dev.c | 1 -
  1 file changed, 1 deletion(-)

diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c 
b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 1d767473ba8a..eea76f51f662 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -163,7 +163,6 @@ static int komeda_parse_dt(struct device *dev, struct 
komeda_dev *mdev)
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV)
return ret;
-   ret = 0;
  
  	for_each_available_child_of_node(np, child) {

if (of_node_name_eq(child, "pipeline")) {



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/komeda: Handle NULL pointer access code path in error case

2020-11-27 Thread Steven Price

On 27/11/2020 11:00, carsten.haitz...@foss.arm.com wrote:

From: Carsten Haitzler 

komeda_component_get_old_state() technically can return a NULL
pointer. komeda_compiz_set_input() even warns when this happens, but
then proceeeds to use that NULL pointer tocompare memory content there
agains the new sate to see if it changed. In this case, it's better to
assume that the input changed as there is no old state to compare
against and thus assume the changes happen anyway.

Signed-off-by: Carsten Haitzler 
---
  drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c | 3 ++-
  1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c 
b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index 8f32ae7c25d0..e8b1e15312d8 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -707,7 +707,8 @@ komeda_compiz_set_input(struct komeda_compiz *compiz,
WARN_ON(!old_st);
  
  	/* compare with old to check if this input has been changed */

-   if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
+   if (!old_st ||
+   memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
c_st->changed_active_inputs |= BIT(idx);


Even better, you can move the WARN_ON into the if:

if (WARN_ON(!old_st) || ...

Either way:

Reviewed-by: Steven Price 

Steve

  
  	komeda_component_add_input(c_st, &dflow->input, idx);




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 1/1] drm/scheduler: Job timeout handler returns status (v2)

2020-12-10 Thread Steven Price

On 10/12/2020 02:14, Luben Tuikov wrote:

This patch does not change current behaviour.

The driver's job timeout handler now returns
status indicating back to the DRM layer whether
the task (job) was successfully aborted or whether
more time should be given to the task to complete.


I find the definitions given a little confusing, see below.


Default behaviour as of this patch, is preserved,
except in obvious-by-comment case in the Panfrost
driver, as documented below.

All drivers which make use of the
drm_sched_backend_ops' .timedout_job() callback
have been accordingly renamed and return the
would've-been default value of
DRM_TASK_STATUS_ALIVE to restart the task's
timeout timer--this is the old behaviour, and
is preserved by this patch.

In the case of the Panfrost driver, its timedout
callback correctly first checks if the job had
completed in due time and if so, it now returns
DRM_TASK_STATUS_COMPLETE to notify the DRM layer
that the task can be moved to the done list, to be
freed later. In the other two subsequent checks,
the value of DRM_TASK_STATUS_ALIVE is returned, as
per the default behaviour.

A more involved driver's solutions can be had
in subequent patches.


NIT: ^ subsequent



v2: Use enum as the status of a driver's job
 timeout callback method.

Cc: Alexander Deucher 
Cc: Andrey Grodzovsky 
Cc: Christian König 
Cc: Daniel Vetter 
Cc: Lucas Stach 
Cc: Russell King 
Cc: Christian Gmeiner 
Cc: Qiang Yu 
Cc: Rob Herring 
Cc: Tomeu Vizoso 
Cc: Steven Price 
Cc: Alyssa Rosenzweig 
Cc: Eric Anholt 
Reported-by: kernel test robot 


This reported-by seems a little odd for this patch.


Signed-off-by: Luben Tuikov 
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c |  6 +++--
  drivers/gpu/drm/etnaviv/etnaviv_sched.c | 10 +++-
  drivers/gpu/drm/lima/lima_sched.c   |  4 +++-
  drivers/gpu/drm/panfrost/panfrost_job.c |  9 ---
  drivers/gpu/drm/scheduler/sched_main.c  |  4 +---
  drivers/gpu/drm/v3d/v3d_sched.c | 32 +
  include/drm/gpu_scheduler.h | 20 +---
  7 files changed, 57 insertions(+), 28 deletions(-)



[]


diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 2e0c368e19f6..cedfc5394e52 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -206,6 +206,11 @@ static inline bool drm_sched_invalidate_job(struct 
drm_sched_job *s_job,
return s_job && atomic_inc_return(&s_job->karma) > threshold;
  }
  
+enum drm_task_status {

+   DRM_TASK_STATUS_COMPLETE,
+   DRM_TASK_STATUS_ALIVE
+};
+
  /**
   * struct drm_sched_backend_ops
   *
@@ -230,10 +235,19 @@ struct drm_sched_backend_ops {
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
  
  	/**

- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
+* @timedout_job: Called when a job has taken too long to execute,
+* to trigger GPU recovery.
+*
+* Return DRM_TASK_STATUS_ALIVE, if the task (job) is healthy
+* and executing in the hardware, i.e. it needs more time.


So 'alive' means the job (was) alive, and GPU recovery is happening. 
I.e. it's the job just takes too long. Panfrost will trigger a GPU reset 
(killing the job) in this case while returning DRM_TASK_STATUS_ALIVE.



+*
+* Return DRM_TASK_STATUS_COMPLETE, if the task (job) has
+* been aborted or is unknown to the hardware, i.e. if
+* the task is out of the hardware, and maybe it is now
+* in the done list, or it was completed long ago, or
+* if it is unknown to the hardware.


Where 'complete' seems to mean a variety of things:

 * The job completed successfully (i.e. the timeout raced), this is the 
situation that Panfrost detects. In this case (and only this case) the 
GPU reset will *not* happen.


 * The job failed (aborted) and is no longer on the hardware. Panfrost 
currently handles a job failure by triggering drm_sched_fault() to 
trigger the timeout handler. But the timeout handler doesn't handle this 
differently so will return DRM_TASK_STATUS_ALIVE.


 * The job is "unknown to hardware". There are some corner cases in 
Panfrost (specifically two early returns from panfrost_job_hw_submit()) 
where the job never actually lands on the hardware, but the scheduler 
isn't informed. We currently rely on the timeout handling to recover 
from that. However, again, the timeout handler doesn't know about this 
soo will return DRM_TASK_STATUS_ALIVE.


So of the four cases listed in these comments, Panfrost is only getting 
2 'correct' after this change.


But what I really want to know is what the scheduler is planning to do 
in these situations? The Panfrost return value in this patch is really a 
"did we trigger a GPU reset" - and doesn't seem to match

Re: [PATCH v6 14/16] drm/panfrost: Kill in-flight jobs on FD close

2021-06-30 Thread Steven Price
On 30/06/2021 07:27, Boris Brezillon wrote:
> If the process who submitted these jobs decided to close the FD before
> the jobs are done it probably means it doesn't care about the result.
> 
> v5:
> * Add a panfrost_exception_is_fault() helper and the
>   DRM_PANFROST_EXCEPTION_MAX_NON_FAULT value
> 
> v4:
> * Don't disable/restore irqs when taking the job_lock (not needed since
>   this lock is never taken from an interrupt context)
> 
> v3:
> * Set fence error to ECANCELED when a TERMINATED exception is received
> 
> Signed-off-by: Boris Brezillon 
> ---
>  drivers/gpu/drm/panfrost/panfrost_device.h |  7 
>  drivers/gpu/drm/panfrost/panfrost_job.c| 42 ++
>  2 files changed, 43 insertions(+), 6 deletions(-)

The panfrost_exception_is_fault() makes the code much more readable -
thanks!

Reviewed-by: Steven Price 

> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h 
> b/drivers/gpu/drm/panfrost/panfrost_device.h
> index 68e93b7e5b61..193cd87f643c 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_device.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_device.h
> @@ -184,6 +184,7 @@ enum drm_panfrost_exception_type {
>   DRM_PANFROST_EXCEPTION_KABOOM = 0x05,
>   DRM_PANFROST_EXCEPTION_EUREKA = 0x06,
>   DRM_PANFROST_EXCEPTION_ACTIVE = 0x08,
> + DRM_PANFROST_EXCEPTION_MAX_NON_FAULT = 0x3f,
>   DRM_PANFROST_EXCEPTION_JOB_CONFIG_FAULT = 0x40,
>   DRM_PANFROST_EXCEPTION_JOB_POWER_FAULT = 0x41,
>   DRM_PANFROST_EXCEPTION_JOB_READ_FAULT = 0x42,
> @@ -244,6 +245,12 @@ enum drm_panfrost_exception_type {
>   DRM_PANFROST_EXCEPTION_MEM_ATTR_NONCACHE_3 = 0xef,
>  };
>  
> +static inline bool
> +panfrost_exception_is_fault(u32 exception_code)
> +{
> + return exception_code > DRM_PANFROST_EXCEPTION_MAX_NON_FAULT;
> +}
> +
>  const char *panfrost_exception_name(u32 exception_code);
>  bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev,
>   u32 exception_code);
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
> b/drivers/gpu/drm/panfrost/panfrost_job.c
> index cf5f9e8b2a27..8a0db9571bfd 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -483,14 +483,21 @@ static void panfrost_job_handle_irq(struct 
> panfrost_device *pfdev, u32 status)
>  
>   if (status & JOB_INT_MASK_ERR(j)) {
>   u32 js_status = job_read(pfdev, JS_STATUS(j));
> + const char *exception_name = 
> panfrost_exception_name(js_status);
>  
>   job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
>  
> - dev_err(pfdev->dev, "js fault, js=%d, status=%s, 
> head=0x%x, tail=0x%x",
> - j,
> - panfrost_exception_name(js_status),
> - job_read(pfdev, JS_HEAD_LO(j)),
> - job_read(pfdev, JS_TAIL_LO(j)));
> + if (!panfrost_exception_is_fault(js_status)) {
> + dev_dbg(pfdev->dev, "js interrupt, js=%d, 
> status=%s, head=0x%x, tail=0x%x",
> + j, exception_name,
> + job_read(pfdev, JS_HEAD_LO(j)),
> + job_read(pfdev, JS_TAIL_LO(j)));
> + } else {
> + dev_err(pfdev->dev, "js fault, js=%d, 
> status=%s, head=0x%x, tail=0x%x",
> + j, exception_name,
> + job_read(pfdev, JS_HEAD_LO(j)),
> + job_read(pfdev, JS_TAIL_LO(j)));
> + }
>  
>   /* If we need a reset, signal it to the timeout
>* handler, otherwise, update the fence error field and
> @@ -499,7 +506,16 @@ static void panfrost_job_handle_irq(struct 
> panfrost_device *pfdev, u32 status)
>   if (panfrost_exception_needs_reset(pfdev, js_status)) {
>   drm_sched_fault(&pfdev->js->queue[j].sched);
>   } else {
> - dma_fence_set_error(pfdev->jobs[j]->done_fence, 
> -EINVAL);
> + int error = 0;
> +
> + if (js_status == 
> DRM_PANFROST_EXCEPTION_TERMINATED)
> + error = -ECANCELED;
> + else if (panfrost_exception_is_fault(js_status))
> + error = -EINVAL;
> +
> +   

Re: [PATCH v6 15/16] drm/panfrost: Queue jobs on the hardware

2021-06-30 Thread Steven Price
On 30/06/2021 07:27, Boris Brezillon wrote:
> From: Steven Price 
> 
> The hardware has a set of '_NEXT' registers that can hold a second job
> while the first is executing. Make use of these registers to enqueue a
> second job per slot.
> 
> v5:
> * Fix a comment in panfrost_job_init()
> 
> v3:
> * Fix the done/err job dequeuing logic to get a valid active state
> * Only enable the second slot on GPUs supporting jobchain disambiguation
> * Split interrupt handling in sub-functions
> 
> Signed-off-by: Steven Price 
> Signed-off-by: Boris Brezillon 

FWIW (it has changed a bit since my original version):

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_device.h |   2 +-
>  drivers/gpu/drm/panfrost/panfrost_job.c| 467 +++--
>  2 files changed, 351 insertions(+), 118 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h 
> b/drivers/gpu/drm/panfrost/panfrost_device.h
> index 193cd87f643c..8b25278f34c8 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_device.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_device.h
> @@ -102,7 +102,7 @@ struct panfrost_device {
>  
>   struct panfrost_job_slot *js;
>  
> - struct panfrost_job *jobs[NUM_JOB_SLOTS];
> + struct panfrost_job *jobs[NUM_JOB_SLOTS][2];
>   struct list_head scheduled_jobs;
>  
>   struct panfrost_perfcnt *perfcnt;
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
> b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 8a0db9571bfd..71a72fb50e6b 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -4,6 +4,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -140,9 +141,52 @@ static void panfrost_job_write_affinity(struct 
> panfrost_device *pfdev,
>   job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
>  }
>  
> +static u32
> +panfrost_get_job_chain_flag(const struct panfrost_job *job)
> +{
> + struct panfrost_fence *f = to_panfrost_fence(job->done_fence);
> +
> + if (!panfrost_has_hw_feature(job->pfdev, 
> HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
> + return 0;
> +
> + return (f->seqno & 1) ? JS_CONFIG_JOB_CHAIN_FLAG : 0;
> +}
> +
> +static struct panfrost_job *
> +panfrost_dequeue_job(struct panfrost_device *pfdev, int slot)
> +{
> + struct panfrost_job *job = pfdev->jobs[slot][0];
> +
> + WARN_ON(!job);
> + pfdev->jobs[slot][0] = pfdev->jobs[slot][1];
> + pfdev->jobs[slot][1] = NULL;
> +
> + return job;
> +}
> +
> +static unsigned int
> +panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
> +  struct panfrost_job *job)
> +{
> + if (WARN_ON(!job))
> + return 0;
> +
> + if (!pfdev->jobs[slot][0]) {
> + pfdev->jobs[slot][0] = job;
> + return 0;
> + }
> +
> + WARN_ON(pfdev->jobs[slot][1]);
> + pfdev->jobs[slot][1] = job;
> + WARN_ON(panfrost_get_job_chain_flag(job) ==
> + panfrost_get_job_chain_flag(pfdev->jobs[slot][0]));
> + return 1;
> +}
> +
>  static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
>  {
>   struct panfrost_device *pfdev = job->pfdev;
> + unsigned int subslot;
>   u32 cfg;
>   u64 jc_head = job->jc;
>   int ret;
> @@ -168,7 +212,8 @@ static void panfrost_job_hw_submit(struct panfrost_job 
> *job, int js)
>* start */
>   cfg |= JS_CONFIG_THREAD_PRI(8) |
>   JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
> - JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
> + JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE |
> + panfrost_get_job_chain_flag(job);
>  
>   if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
>   cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
> @@ -182,10 +227,17 @@ static void panfrost_job_hw_submit(struct panfrost_job 
> *job, int js)
>   job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
>  
>   /* GO ! */
> - dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
> - job, js, jc_head);
>  
> - job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
> + spin_lock(&pfdev->js->job_lock);
> + subslot = panfrost_enqueue_job(pfdev, js, job);
> + /* Don't queue the job if a reset is in progress */
> + if (!atomic_read(&pfdev->reset.pending)) {
> + job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
> + dev_dbg

Re: [PATCH v6 16/16] drm/panfrost: Increase the AS_ACTIVE polling timeout

2021-06-30 Thread Steven Price
On 30/06/2021 07:27, Boris Brezillon wrote:
> Experience has shown that 1ms is sometimes not enough, even when the GPU
> is running at its maximum frequency, not to mention that an MMU operation
> might take longer if the GPU is running at a lower frequency, which is
> likely to be the case if devfreq is active.
> 
> Let's pick a significantly bigger timeout value (1ms -> 100ms) to be on
> the safe side.
> 
> v5:
> * New patch
> 
> Signed-off-by: Boris Brezillon 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index e0356e68e768..0da5b3100ab1 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -34,7 +34,7 @@ static int wait_ready(struct panfrost_device *pfdev, u32 
> as_nr)
>   /* Wait for the MMU status to indicate there is no active command, in
>* case one is pending. */
>   ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
> - val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
> + val, !(val & AS_STATUS_AS_ACTIVE), 10, 10);
>  
>   if (ret) {
>   /* The GPU hung, let's trigger a reset */
> 



Re: [PATCH v2] drm/panfrost:report the full raw fault information instead

2021-07-01 Thread Steven Price
On 29/06/2021 04:04, Chunyou Tang wrote:
> Hi Steve,
>   thinks for your reply.
>   I set the pte in arm_lpae_prot_to_pte(),
> ***
>   /*
>* Also Mali has its own notions of shareability wherein its
> Inner
>* domain covers the cores within the GPU, and its Outer domain
> is
>* "outside the GPU" (i.e. either the Inner or System domain in
> CPU
>* terms, depending on coherency).
>*/
>   if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
>   pte |= ARM_LPAE_PTE_SH_IS;
>   else
>   pte |= ARM_LPAE_PTE_SH_OS;
> ***
> I set pte |= ARM_LPAE_PTE_SH_NS.
> 
>   If I set pte to ARM_LPAE_PTE_SH_OS or
>   ARM_LPAE_PTE_SH_IS,whether I use singel core GPU or multi core
>   GPU,it will occur GPU Fault.
>   if I set pte to ARM_LPAE_PTE_SH_NS,whether I use singel core
>   GPU or multi core GPU,it will not occur GPU Fault.

Hi,

So this is a difference between Panfrost and kbase. Panfrost (well
technically the IOMMU framework) enables the inner-shareable bit for all
memory, whereas kbase only enables it for some memory types (the
BASE_MEM_COHERENT_LOCAL flag in the UABI controls it). However this
should only be a performance/power difference (and AFAIK probably an
irrelevant one) and it's definitely required that "inner shareable"
(i.e. within the GPU) works for communication between the different
units of the GPU.

You didn't answer my previous question:

> Is this device working with the kbase/DDK proprietary driver?

What you are describing sounds like a hardware integration issue, so it
would be good to check that the hardware is working with the proprietary
driver to rule that out. And perhaps there is something in the kbase for
this device that is setting a chicken bit to 'fix' the coherency?

Steve


Re: [PATCH v2 1/7] drm/panfrost: Pass a job to panfrost_{acquire,attach_object_fences}()

2021-07-02 Thread Steven Price
On 01/07/2021 10:12, Boris Brezillon wrote:
> So we don't have to change the prototype if we extend the function.
> 
> Signed-off-by: Boris Brezillon 

Subject NIT:
> drm/panfrost: Pass a job to panfrost_{acquire,attach_object_fences}()

Should be panfrost_{acquire,attach}_object_fences()

Otherwise:

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_job.c | 22 --
>  1 file changed, 8 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
> b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 71a72fb50e6b..fdc1bd7ecf12 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -240,15 +240,13 @@ static void panfrost_job_hw_submit(struct panfrost_job 
> *job, int js)
>   spin_unlock(&pfdev->js->job_lock);
>  }
>  
> -static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
> -   int bo_count,
> -   struct xarray *deps)
> +static int panfrost_acquire_object_fences(struct panfrost_job *job)
>  {
>   int i, ret;
>  
> - for (i = 0; i < bo_count; i++) {
> + for (i = 0; i < job->bo_count; i++) {
>   /* panfrost always uses write mode in its current uapi */
> - ret = drm_gem_fence_array_add_implicit(deps, bos[i], true);
> + ret = drm_gem_fence_array_add_implicit(&job->deps, job->bos[i], 
> true);
>   if (ret)
>   return ret;
>   }
> @@ -256,14 +254,12 @@ static int panfrost_acquire_object_fences(struct 
> drm_gem_object **bos,
>   return 0;
>  }
>  
> -static void panfrost_attach_object_fences(struct drm_gem_object **bos,
> -   int bo_count,
> -   struct dma_fence *fence)
> +static void panfrost_attach_object_fences(struct panfrost_job *job)
>  {
>   int i;
>  
> - for (i = 0; i < bo_count; i++)
> - dma_resv_add_excl_fence(bos[i]->resv, fence);
> + for (i = 0; i < job->bo_count; i++)
> + dma_resv_add_excl_fence(job->bos[i]->resv, 
> job->render_done_fence);
>  }
>  
>  int panfrost_job_push(struct panfrost_job *job)
> @@ -290,8 +286,7 @@ int panfrost_job_push(struct panfrost_job *job)
>  
>   job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
>  
> - ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
> -  &job->deps);
> + ret = panfrost_acquire_object_fences(job);
>   if (ret) {
>   mutex_unlock(&pfdev->sched_lock);
>   goto unlock;
> @@ -303,8 +298,7 @@ int panfrost_job_push(struct panfrost_job *job)
>  
>   mutex_unlock(&pfdev->sched_lock);
>  
> - panfrost_attach_object_fences(job->bos, job->bo_count,
> -   job->render_done_fence);
> + panfrost_attach_object_fences(job);
>  
>  unlock:
>   drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
> 



Re: [PATCH v2 2/7] drm/panfrost: Move the mappings collection out of panfrost_lookup_bos()

2021-07-02 Thread Steven Price
On 01/07/2021 10:12, Boris Brezillon wrote:
> So we can re-use it from elsewhere.
> 
> Signed-off-by: Boris Brezillon 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c | 52 ++---
>  1 file changed, 29 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 1ffaef5ec5ff..9bbc9e78cc85 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -109,6 +109,34 @@ static int panfrost_ioctl_create_bo(struct drm_device 
> *dev, void *data,
>   return 0;
>  }
>  
> +static int
> +panfrost_get_job_mappings(struct drm_file *file_priv, struct panfrost_job 
> *job)
> +{
> + struct panfrost_file_priv *priv = file_priv->driver_priv;
> + unsigned int i;
> +
> + job->mappings = kvmalloc_array(job->bo_count,
> +sizeof(*job->mappings),
> +GFP_KERNEL | __GFP_ZERO);
> + if (!job->mappings)
> + return -ENOMEM;
> +
> + for (i = 0; i < job->bo_count; i++) {
> + struct panfrost_gem_mapping *mapping;
> + struct panfrost_gem_object *bo;
> +
> + bo = to_panfrost_bo(job->bos[i]);
> + mapping = panfrost_gem_mapping_get(bo, priv);
> + if (!mapping)
> + return -EINVAL;
> +
> + atomic_inc(&bo->gpu_usecount);
> + job->mappings[i] = mapping;
> + }
> +
> + return 0;
> +}
> +
>  /**
>   * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
>   * referenced by the job.
> @@ -128,8 +156,6 @@ panfrost_lookup_bos(struct drm_device *dev,
> struct drm_panfrost_submit *args,
> struct panfrost_job *job)
>  {
> - struct panfrost_file_priv *priv = file_priv->driver_priv;
> - struct panfrost_gem_object *bo;
>   unsigned int i;
>   int ret;
>  
> @@ -144,27 +170,7 @@ panfrost_lookup_bos(struct drm_device *dev,
>   if (ret)
>   return ret;
>  
> - job->mappings = kvmalloc_array(job->bo_count,
> -sizeof(struct panfrost_gem_mapping *),
> -GFP_KERNEL | __GFP_ZERO);
> - if (!job->mappings)
> - return -ENOMEM;
> -
> - for (i = 0; i < job->bo_count; i++) {
> - struct panfrost_gem_mapping *mapping;
> -
> - bo = to_panfrost_bo(job->bos[i]);
> - mapping = panfrost_gem_mapping_get(bo, priv);
> - if (!mapping) {
> - ret = -EINVAL;
> - break;
> - }
> -
> - atomic_inc(&bo->gpu_usecount);
> - job->mappings[i] = mapping;
> - }
> -
> - return ret;
> + return panfrost_get_job_mappings(file_priv, job);
>  }
>  
>  /**
> 



Re: [PATCH v2 3/7] drm/panfrost: Add BO access flags to relax dependencies between jobs

2021-07-02 Thread Steven Price
On 01/07/2021 10:12, Boris Brezillon wrote:
> Jobs reading from the same BO should not be serialized. Add access
> flags so we can relax the implicit dependencies in that case. We force
> exclusive access for now to keep the behavior unchanged, but a new
> SUBMIT ioctl taking explicit access flags will be introduced.
> 
> Signed-off-by: Boris Brezillon 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c |  9 +
>  drivers/gpu/drm/panfrost/panfrost_job.c | 23 +++
>  drivers/gpu/drm/panfrost/panfrost_job.h |  1 +
>  include/uapi/drm/panfrost_drm.h |  2 ++
>  4 files changed, 31 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 9bbc9e78cc85..b6b5997c9366 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -164,6 +164,15 @@ panfrost_lookup_bos(struct drm_device *dev,
>   if (!job->bo_count)
>   return 0;
>  
> + job->bo_flags = kvmalloc_array(job->bo_count,
> +sizeof(*job->bo_flags),
> +GFP_KERNEL | __GFP_ZERO);
> + if (!job->bo_flags)
> + return -ENOMEM;
> +
> + for (i = 0; i < job->bo_count; i++)
> + job->bo_flags[i] = PANFROST_BO_REF_EXCLUSIVE;
> +
>   ret = drm_gem_objects_lookup(file_priv,
>(void __user *)(uintptr_t)args->bo_handles,
>job->bo_count, &job->bos);
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
> b/drivers/gpu/drm/panfrost/panfrost_job.c
> index fdc1bd7ecf12..152245b122be 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -245,8 +245,16 @@ static int panfrost_acquire_object_fences(struct 
> panfrost_job *job)
>   int i, ret;
>  
>   for (i = 0; i < job->bo_count; i++) {
> - /* panfrost always uses write mode in its current uapi */
> - ret = drm_gem_fence_array_add_implicit(&job->deps, job->bos[i], 
> true);
> + bool exclusive = job->bo_flags[i] & PANFROST_BO_REF_EXCLUSIVE;
> +
> + if (!exclusive) {
> + ret = dma_resv_reserve_shared(job->bos[i]->resv, 1);
> + if (ret)
> + return ret;
> + }
> +
> + ret = drm_gem_fence_array_add_implicit(&job->deps, job->bos[i],
> +exclusive);
>   if (ret)
>   return ret;
>   }
> @@ -258,8 +266,14 @@ static void panfrost_attach_object_fences(struct 
> panfrost_job *job)
>  {
>   int i;
>  
> - for (i = 0; i < job->bo_count; i++)
> - dma_resv_add_excl_fence(job->bos[i]->resv, 
> job->render_done_fence);
> + for (i = 0; i < job->bo_count; i++) {
> + struct dma_resv *robj = job->bos[i]->resv;
> +
> + if (job->bo_flags[i] & PANFROST_BO_REF_EXCLUSIVE)
> + dma_resv_add_excl_fence(robj, job->render_done_fence);
> + else
> + dma_resv_add_shared_fence(robj, job->render_done_fence);
> + }
>  }
>  
>  int panfrost_job_push(struct panfrost_job *job)
> @@ -340,6 +354,7 @@ static void panfrost_job_cleanup(struct kref *ref)
>   kvfree(job->bos);
>   }
>  
> + kvfree(job->bo_flags);
>   kfree(job);
>  }
>  
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h 
> b/drivers/gpu/drm/panfrost/panfrost_job.h
> index 82306a03b57e..1cbc3621b663 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.h
> @@ -32,6 +32,7 @@ struct panfrost_job {
>  
>   struct panfrost_gem_mapping **mappings;
>   struct drm_gem_object **bos;
> + u32 *bo_flags;
>   u32 bo_count;
>  
>   /* Fence to be signaled by drm-sched once its done with the job */
> diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
> index 061e700dd06c..45d6c600475c 100644
> --- a/include/uapi/drm/panfrost_drm.h
> +++ b/include/uapi/drm/panfrost_drm.h
> @@ -224,6 +224,8 @@ struct drm_panfrost_madvise {
>   __u32 retained;   /* out, whether backing store still exists */
>  };
>  
> +#define PANFROST_BO_REF_EXCLUSIVE0x1
> +
>  #if defined(__cplusplus)
>  }
>  #endif
> 



Re: [PATCH v2 4/7] drm/panfrost: Add the ability to create submit queues

2021-07-02 Thread Steven Price
On 01/07/2021 10:12, Boris Brezillon wrote:
> Needed to keep VkQueues isolated from each other.
> 
> Signed-off-by: Boris Brezillon 

My Vulkan knowledge is limited so I'm not sure whether this is the right
approach or not. In particular is it correct that an application can
create a high priority queue which could affect other (normal priority)
applications?

Also does it really make sense to allow user space to create an
unlimited number of queues? It feels like an ideal way for an malicious
application to waste kernel memory.

In terms of implementation it looks correct, but one comment below

> ---
>  drivers/gpu/drm/panfrost/Makefile |   3 +-
>  drivers/gpu/drm/panfrost/panfrost_device.h|   2 +-
>  drivers/gpu/drm/panfrost/panfrost_drv.c   |  69 --
>  drivers/gpu/drm/panfrost/panfrost_job.c   |  47 ++-
>  drivers/gpu/drm/panfrost/panfrost_job.h   |   9 +-
>  .../gpu/drm/panfrost/panfrost_submitqueue.c   | 130 ++
>  .../gpu/drm/panfrost/panfrost_submitqueue.h   |  27 
>  include/uapi/drm/panfrost_drm.h   |  17 +++
>  8 files changed, 258 insertions(+), 46 deletions(-)
>  create mode 100644 drivers/gpu/drm/panfrost/panfrost_submitqueue.c
>  create mode 100644 drivers/gpu/drm/panfrost/panfrost_submitqueue.h
> 
[...]
> diff --git a/drivers/gpu/drm/panfrost/panfrost_submitqueue.c 
> b/drivers/gpu/drm/panfrost/panfrost_submitqueue.c
> new file mode 100644
> index ..98050f7690df
> --- /dev/null
> +++ b/drivers/gpu/drm/panfrost/panfrost_submitqueue.c
> @@ -0,0 +1,130 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright 2021 Collabora ltd. */
> +
> +#include 
> +
> +#include "panfrost_device.h"
> +#include "panfrost_job.h"
> +#include "panfrost_submitqueue.h"
> +
> +static enum drm_sched_priority
> +to_sched_prio(enum panfrost_submitqueue_priority priority)
> +{
> + switch (priority) {
> + case PANFROST_SUBMITQUEUE_PRIORITY_LOW:
> + return DRM_SCHED_PRIORITY_MIN;
> + case PANFROST_SUBMITQUEUE_PRIORITY_MEDIUM:
> + return DRM_SCHED_PRIORITY_NORMAL;
> + case PANFROST_SUBMITQUEUE_PRIORITY_HIGH:
> + return DRM_SCHED_PRIORITY_HIGH;
> + default:
> + break;
> + }
> +
> + return DRM_SCHED_PRIORITY_UNSET;
> +}
> +
> +static void
> +panfrost_submitqueue_cleanup(struct kref *ref)
> +{
> + struct panfrost_submitqueue *queue;
> + unsigned int i;
> +
> + queue = container_of(ref, struct panfrost_submitqueue, refcount);
> +
> + for (i = 0; i < NUM_JOB_SLOTS; i++)
> + drm_sched_entity_destroy(&queue->sched_entity[i]);
> +
> + /* Kill in-flight jobs */
> + panfrost_job_kill_queue(queue);
> +
> + kfree(queue);
> +}
> +
> +void panfrost_submitqueue_put(struct panfrost_submitqueue *queue)
> +{
> + if (!IS_ERR_OR_NULL(queue))
> + kref_put(&queue->refcount, panfrost_submitqueue_cleanup);
> +}
> +
> +struct panfrost_submitqueue *
> +panfrost_submitqueue_create(struct panfrost_file_priv *ctx,
> + enum panfrost_submitqueue_priority priority,
> + u32 flags)

If this function returned an 'int' we could simplify some code. So
instead of returning the struct panfrost_submitqueue just return the ID
(or negative error). The only caller (panfrost_ioctl_create_submitqueue)
doesn't actually want the object just the ID and we can ditch the 'id'
field from panfrost_submitqueue.

Steve

> +{
> + struct panfrost_submitqueue *queue;
> + enum drm_sched_priority sched_prio;
> + int ret, i;
> +
> + if (flags || priority >= PANFROST_SUBMITQUEUE_PRIORITY_COUNT)
> + return ERR_PTR(-EINVAL);
> +
> + queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> + if (!queue)
> + return ERR_PTR(-ENOMEM);
> +
> + queue->pfdev = ctx->pfdev;
> + sched_prio = to_sched_prio(priority);
> + for (i = 0; i < NUM_JOB_SLOTS; i++) {
> + struct drm_gpu_scheduler *sched;
> +
> + sched = panfrost_job_get_sched(ctx->pfdev, i);
> + ret = drm_sched_entity_init(&queue->sched_entity[i],
> + sched_prio, &sched, 1, NULL);
> + if (ret)
> + break;
> + }
> +
> + if (ret) {
> + for (i--; i >= 0; i--)
> + drm_sched_entity_destroy(&queue->sched_entity[i]);
> +
> + return ERR_PTR(ret);
> + }
> +
> + kref_init(&queue->refcount);
> + idr_lock(&ctx->queues);
> + ret = idr_alloc(&ctx->queues, queue, 0, INT_MAX, GFP_KERNEL);
> + if (ret >= 0)
> + queue->id = ret;
> + idr_unlock(&ctx->queues);
> +
> + if (ret < 0) {
> + panfrost_submitqueue_put(queue);
> + return ERR_PTR(ret);
> + }
> +
> + return queue;
> +}
> +
> +int panfrost_submitqueue_destroy(struct panfrost_file_priv *ctx, u32 id)
> +{
> + struct panfrost_submitqueue *queue;
> +
> + idr_lock(

Re: [PATCH v2 7/7] drm/panfrost: Bump minor version to reflect the feature additions

2021-07-02 Thread Steven Price
On 01/07/2021 10:12, Boris Brezillon wrote:
> We now have a new ioctl that allows submitting multiple jobs at once
> (among other things) and we support timelined syncobjs. Bump the
> minor version number to reflect those changes.
> 
> Signed-off-by: Boris Brezillon 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c | 4 +++-
>  1 file changed, 3 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index b0978fe4fa36..1859e6887877 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -884,6 +884,8 @@ DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
>   * - 1.0 - initial interface
>   * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
>   * - 1.2 - adds AFBC_FEATURES query
> + * - 1.3 - adds the BATCH_SUBMIT, CREATE_SUBMITQUEUE, DESTROY_SUBMITQUEUE
> + *  ioctls and advertises the SYNCOBJ_TIMELINE feature
>   */
>  static const struct drm_driver panfrost_drm_driver = {
>   .driver_features= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
> @@ -897,7 +899,7 @@ static const struct drm_driver panfrost_drm_driver = {
>   .desc   = "panfrost DRM",
>   .date   = "20180908",
>   .major  = 1,
> - .minor  = 2,
> + .minor  = 3,
>  
>   .gem_create_object  = panfrost_gem_create_object,
>   .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> 



Re: [PATCH v2 5/7] drm/panfrost: Add a new ioctl to submit batches

2021-07-02 Thread Steven Price
On 01/07/2021 10:12, Boris Brezillon wrote:
> This should help limit the number of ioctls when submitting multiple
> jobs. The new ioctl also supports syncobj timelines and BO access flags.
> 
> Signed-off-by: Boris Brezillon 

The new ioctl looks reasonable, but I think there's a lot of scope for
combining the code for the old/new ioctls. panfrost_submit_job() is
quite similar to panfrost_ioctl_submit(). And there are tricks we can
play to handle some of the differences.

For example the old ioctl took an array of just handles and the new one
extends this to be a handle and a point. But we can use
copy_struct_from_user to read a shortened version of struct
drm_panfrost_syncobj_ref which is just the handle. So
panfrost_get_job_in_syncs can actually be directly used against the old
list of handles just with a stride of sizeof(u32) and we can delete
panfrost_copy_in_sync().

I've not tried the refactor but I think it should be possible to remove
most of the code for the old ioctl and make almost all the code between
the ioctls common. Which will obviously help if anything needs changing
in the future.

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c | 305 
>  drivers/gpu/drm/panfrost/panfrost_job.c |   3 +
>  include/uapi/drm/panfrost_drm.h |  84 +++
>  3 files changed, 392 insertions(+)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 6529e5972b47..7ed0773a5c19 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -491,6 +491,310 @@ panfrost_ioctl_destroy_submitqueue(struct drm_device 
> *dev, void *data,
>   return panfrost_submitqueue_destroy(priv, id);
>  }
>  
> +static int
> +panfrost_get_job_in_syncs(struct drm_file *file_priv,
> +   u64 refs, u32 ref_stride,
> +   u32 count, struct panfrost_job *job)
> +{
> + const void __user *in = u64_to_user_ptr(refs);
> + unsigned int i;
> + int ret;
> +
> + if (!count)
> + return 0;
> +
> + for (i = 0; i < count; i++) {
> + struct drm_panfrost_syncobj_ref ref = { };
> + struct dma_fence *fence;
> +
> + ret = copy_struct_from_user(&ref, sizeof(ref),
> + in + (i * ref_stride),
> + ref_stride);
> + if (ret)
> + return ret;
> +
> + if (ref.pad)
> + return -EINVAL;
> +
> + ret = drm_syncobj_find_fence(file_priv, ref.handle, ref.point,
> +  0, &fence);
> + if (ret)
> + return ret;
> +
> + ret = drm_gem_fence_array_add(&job->deps, fence);
> + if (ret)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +struct panfrost_job_out_sync {
> + struct drm_syncobj *syncobj;
> + struct dma_fence_chain *chain;
> + u64 point;
> +};
> +
> +static void
> +panfrost_put_job_out_syncs(struct panfrost_job_out_sync *out_syncs, u32 
> count)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < count; i++) {
> + if (!out_syncs[i].syncobj)
> + break;
> +
> + drm_syncobj_put(out_syncs[i].syncobj);
> + kvfree(out_syncs[i].chain);
> + }
> +
> + kvfree(out_syncs);
> +}
> +
> +static struct panfrost_job_out_sync *
> +panfrost_get_job_out_syncs(struct drm_file *file_priv,
> +u64 refs, u32 ref_stride,
> +u32 count)
> +{
> + void __user *in = u64_to_user_ptr(refs);
> + struct panfrost_job_out_sync *out_syncs;
> + unsigned int i;
> + int ret;
> +
> + if (!count)
> + return NULL;
> +
> + out_syncs = kvmalloc_array(count, sizeof(*out_syncs),
> +GFP_KERNEL | __GFP_ZERO);
> + if (!out_syncs)
> + return ERR_PTR(-ENOMEM);
> +
> + for (i = 0; i < count; i++) {
> + struct drm_panfrost_syncobj_ref ref = { };
> +
> + ret = copy_struct_from_user(&ref, sizeof(ref),
> + in + (i * ref_stride),
> + ref_stride);
> + if (ret)
> + goto err_free_out_syncs;
> +
> + if (ref.pad) {
> + ret = -EINVAL;
> + goto err_free_out_syncs;
> + }
> +
> + out_syncs[i].syncobj = drm_syncobj_find(file_priv, ref.handle);
> + if (!out_syncs[i].syncobj) {
> + ret = -EINVAL;
> + goto err_free_out_syncs;
> + }
> +
> + out_syncs[i].point = ref.point;
> + if (!out_syncs[i].point)
> + continue;
> +
> + out_syncs[i].chain = kmalloc(sizeof(*out_syncs[i].chain),
> +  

Re: [PATCH v2 6/7] drm/panfrost: Advertise the SYNCOBJ_TIMELINE feature

2021-07-02 Thread Steven Price
On 01/07/2021 10:12, Boris Brezillon wrote:
> Now that we have a new SUBMIT ioctl dealing with timelined syncojbs we
Typo: s/syncojbs/syncobjs/
> can advertise the feature.
> 
> Signed-off-by: Boris Brezillon 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 7ed0773a5c19..b0978fe4fa36 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -886,7 +886,8 @@ DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
>   * - 1.2 - adds AFBC_FEATURES query
>   */
>  static const struct drm_driver panfrost_drm_driver = {
> - .driver_features= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
> + .driver_features= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
> +   DRIVER_SYNCOBJ_TIMELINE,
>   .open   = panfrost_open,
>   .postclose  = panfrost_postclose,
>   .ioctls = panfrost_drm_driver_ioctls,
> 



Re: [PATCH v2 4/7] drm/panfrost: Add the ability to create submit queues

2021-07-02 Thread Steven Price
On 01/07/2021 10:12, Boris Brezillon wrote:
> Needed to keep VkQueues isolated from each other.

One more comment I noticed when I tried this out:

[...]
> +struct panfrost_submitqueue *
> +panfrost_submitqueue_create(struct panfrost_file_priv *ctx,
> + enum panfrost_submitqueue_priority priority,
> + u32 flags)
> +{
> + struct panfrost_submitqueue *queue;
> + enum drm_sched_priority sched_prio;
> + int ret, i;
> +
> + if (flags || priority >= PANFROST_SUBMITQUEUE_PRIORITY_COUNT)
> + return ERR_PTR(-EINVAL);
> +
> + queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> + if (!queue)
> + return ERR_PTR(-ENOMEM);
> +
> + queue->pfdev = ctx->pfdev;
> + sched_prio = to_sched_prio(priority);
> + for (i = 0; i < NUM_JOB_SLOTS; i++) {
> + struct drm_gpu_scheduler *sched;
> +
> + sched = panfrost_job_get_sched(ctx->pfdev, i);
> + ret = drm_sched_entity_init(&queue->sched_entity[i],
> + sched_prio, &sched, 1, NULL);
> + if (ret)
> + break;
> + }
> +
> + if (ret) {
> + for (i--; i >= 0; i--)
> + drm_sched_entity_destroy(&queue->sched_entity[i]);
> +
> + return ERR_PTR(ret);
> + }
> +
> + kref_init(&queue->refcount);
> + idr_lock(&ctx->queues);
> + ret = idr_alloc(&ctx->queues, queue, 0, INT_MAX, GFP_KERNEL);

This makes lockdep complain. idr_lock() is a spinlock and GFP_KERNEL can
sleep. So either we need to bring our own mutex here or not use GFP_KERNEL.

Steve


Re: [PATCH v2 4/7] drm/panfrost: Add the ability to create submit queues

2021-07-02 Thread Steven Price
On 02/07/2021 11:43, Boris Brezillon wrote:
> On Fri, 2 Jul 2021 10:56:29 +0100
> Steven Price  wrote:
> 
>> On 01/07/2021 10:12, Boris Brezillon wrote:
>>> Needed to keep VkQueues isolated from each other.
>>>
>>> Signed-off-by: Boris Brezillon   
>>
>> My Vulkan knowledge is limited so I'm not sure whether this is the right
>> approach or not. In particular is it correct that an application can
>> create a high priority queue which could affect other (normal priority)
>> applications?
> 
> That's what msm does (with no extra CAPS check AFAICT), and the
> freedreno driver can already create high priority queues if
> PIPE_CONTEXT_HIGH_PRIORITY is passed. Not saying that's okay to allow
> userspace to tweak the priority, but if that's a problem, other drivers
> are in trouble too ;-).

Oh well I guess if others are doing the same ;) I have to admit kbase
has always struggled with how to identify a "privileged" process - it's
something that makes a bit of sense on Android but for other userspaces
there really doesn't seem to be a good way of identifying what should or
should not be allowed to create high priority queues.

>>
>> Also does it really make sense to allow user space to create an
>> unlimited number of queues? It feels like an ideal way for an malicious
>> application to waste kernel memory.
> 
> Same here, I see no limit on the number of queues the msm driver can
> create. I can definitely pick an arbitrary limit of 2^16 (or 2^8 if
> 2^16 is too high) if you prefer, but I feel like there's plenty of ways
> to force kernel allocations already, like allocating a gazillion of 4k
> GEM buffers (cgroup can probably limit the total amount of memory
> allocated, but you'd still have all gem-buf meta data in kernel memory).

I guess the real problem is picking a sensible limit ;) My main concern
here is that there doesn't appear to be any memory accounted against the
process. For GEM buffers at least there is some cost to the application
- so an unbounded allocation isn't possible, even if the bounds are
likely to be very high.

With kbase we found that syzcaller was good at finding ways of using up
all the memory on the platform - and if it wasn't accounted to the right
process that meant the OOM-killer knocked out the wrong process and
produced a bug report to investigate. Perhaps I'm just scarred by that
history ;)

Steve

>>
>> In terms of implementation it looks correct, but one comment below
>>
>>> ---
>>>  drivers/gpu/drm/panfrost/Makefile |   3 +-
>>>  drivers/gpu/drm/panfrost/panfrost_device.h|   2 +-
>>>  drivers/gpu/drm/panfrost/panfrost_drv.c   |  69 --
>>>  drivers/gpu/drm/panfrost/panfrost_job.c   |  47 ++-
>>>  drivers/gpu/drm/panfrost/panfrost_job.h   |   9 +-
>>>  .../gpu/drm/panfrost/panfrost_submitqueue.c   | 130 ++
>>>  .../gpu/drm/panfrost/panfrost_submitqueue.h   |  27 
>>>  include/uapi/drm/panfrost_drm.h   |  17 +++
>>>  8 files changed, 258 insertions(+), 46 deletions(-)
>>>  create mode 100644 drivers/gpu/drm/panfrost/panfrost_submitqueue.c
>>>  create mode 100644 drivers/gpu/drm/panfrost/panfrost_submitqueue.h
>>>   
>> [...]
>>> diff --git a/drivers/gpu/drm/panfrost/panfrost_submitqueue.c 
>>> b/drivers/gpu/drm/panfrost/panfrost_submitqueue.c
>>> new file mode 100644
>>> index ..98050f7690df
>>> --- /dev/null
>>> +++ b/drivers/gpu/drm/panfrost/panfrost_submitqueue.c
>>> @@ -0,0 +1,130 @@
>>> +// SPDX-License-Identifier: GPL-2.0
>>> +/* Copyright 2021 Collabora ltd. */
>>> +
>>> +#include 
>>> +
>>> +#include "panfrost_device.h"
>>> +#include "panfrost_job.h"
>>> +#include "panfrost_submitqueue.h"
>>> +
>>> +static enum drm_sched_priority
>>> +to_sched_prio(enum panfrost_submitqueue_priority priority)
>>> +{
>>> +   switch (priority) {
>>> +   case PANFROST_SUBMITQUEUE_PRIORITY_LOW:
>>> +   return DRM_SCHED_PRIORITY_MIN;
>>> +   case PANFROST_SUBMITQUEUE_PRIORITY_MEDIUM:
>>> +   return DRM_SCHED_PRIORITY_NORMAL;
>>> +   case PANFROST_SUBMITQUEUE_PRIORITY_HIGH:
>>> +   return DRM_SCHED_PRIORITY_HIGH;
>>> +   default:
>>> +   break;
>>> +   }
>>> +
>>> +   return DRM_SCHED_PRIORITY_UNSET;
>>> +}
>>> +
>>> +static void
>>> +panfrost_submitqueue_cleanup(struct kref *ref)
&

Re: [PATCH v2 4/7] drm/panfrost: Add the ability to create submit queues

2021-07-02 Thread Steven Price
On 02/07/2021 11:52, Boris Brezillon wrote:
> On Fri, 2 Jul 2021 11:08:58 +0100
> Steven Price  wrote:
> 
>> On 01/07/2021 10:12, Boris Brezillon wrote:
>>> Needed to keep VkQueues isolated from each other.  
>>
>> One more comment I noticed when I tried this out:
>>
>> [...]
>>> +struct panfrost_submitqueue *
>>> +panfrost_submitqueue_create(struct panfrost_file_priv *ctx,
>>> +   enum panfrost_submitqueue_priority priority,
>>> +   u32 flags)
>>> +{
>>> +   struct panfrost_submitqueue *queue;
>>> +   enum drm_sched_priority sched_prio;
>>> +   int ret, i;
>>> +
>>> +   if (flags || priority >= PANFROST_SUBMITQUEUE_PRIORITY_COUNT)
>>> +   return ERR_PTR(-EINVAL);
>>> +
>>> +   queue = kzalloc(sizeof(*queue), GFP_KERNEL);
>>> +   if (!queue)
>>> +   return ERR_PTR(-ENOMEM);
>>> +
>>> +   queue->pfdev = ctx->pfdev;
>>> +   sched_prio = to_sched_prio(priority);
>>> +   for (i = 0; i < NUM_JOB_SLOTS; i++) {
>>> +   struct drm_gpu_scheduler *sched;
>>> +
>>> +   sched = panfrost_job_get_sched(ctx->pfdev, i);
>>> +   ret = drm_sched_entity_init(&queue->sched_entity[i],
>>> +   sched_prio, &sched, 1, NULL);
>>> +   if (ret)
>>> +   break;
>>> +   }
>>> +
>>> +   if (ret) {
>>> +   for (i--; i >= 0; i--)
>>> +   drm_sched_entity_destroy(&queue->sched_entity[i]);
>>> +
>>> +   return ERR_PTR(ret);
>>> +   }
>>> +
>>> +   kref_init(&queue->refcount);
>>> +   idr_lock(&ctx->queues);
>>> +   ret = idr_alloc(&ctx->queues, queue, 0, INT_MAX, GFP_KERNEL);  
>>
>> This makes lockdep complain. idr_lock() is a spinlock and GFP_KERNEL can
>> sleep. So either we need to bring our own mutex here or not use GFP_KERNEL.
>>
> 
> Ouch! I wonder why I don't see that (I have lockdep enabled, and the
> igt tests should have exercised this path).

Actually I'm not sure it technically lockdep - have you got
CONFIG_DEBUG_ATOMIC_SLEEP set?

Steve


Re: [PATCH v3 4/7] drm/panfrost: Add the ability to create submit queues

2021-07-02 Thread Steven Price
On 02/07/2021 15:32, Boris Brezillon wrote:
> Needed to keep VkQueues isolated from each other.
> 
> v3:
> * Limit the number of submitqueue per context to 16
> * Fix a deadlock
> 
> Signed-off-by: Boris Brezillon 

16 ought to be enough for anyone ;)

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/Makefile |   3 +-
>  drivers/gpu/drm/panfrost/panfrost_device.h|   2 +-
>  drivers/gpu/drm/panfrost/panfrost_drv.c   |  69 +++--
>  drivers/gpu/drm/panfrost/panfrost_job.c   |  47 ++
>  drivers/gpu/drm/panfrost/panfrost_job.h   |   9 +-
>  .../gpu/drm/panfrost/panfrost_submitqueue.c   | 136 ++
>  .../gpu/drm/panfrost/panfrost_submitqueue.h   |  27 
>  include/uapi/drm/panfrost_drm.h   |  17 +++
>  8 files changed, 264 insertions(+), 46 deletions(-)
>  create mode 100644 drivers/gpu/drm/panfrost/panfrost_submitqueue.c
>  create mode 100644 drivers/gpu/drm/panfrost/panfrost_submitqueue.h
> 
> diff --git a/drivers/gpu/drm/panfrost/Makefile 
> b/drivers/gpu/drm/panfrost/Makefile
> index b71935862417..e99192b66ec9 100644
> --- a/drivers/gpu/drm/panfrost/Makefile
> +++ b/drivers/gpu/drm/panfrost/Makefile
> @@ -9,6 +9,7 @@ panfrost-y := \
>   panfrost_gpu.o \
>   panfrost_job.o \
>   panfrost_mmu.o \
> - panfrost_perfcnt.o
> + panfrost_perfcnt.o \
> + panfrost_submitqueue.o
>  
>  obj-$(CONFIG_DRM_PANFROST) += panfrost.o
> diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h 
> b/drivers/gpu/drm/panfrost/panfrost_device.h
> index 8b25278f34c8..51c0ba4e50f5 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_device.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_device.h
> @@ -137,7 +137,7 @@ struct panfrost_mmu {
>  struct panfrost_file_priv {
>   struct panfrost_device *pfdev;
>  
> - struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
> + struct idr queues;
>  
>   struct panfrost_mmu *mmu;
>  };
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index b6b5997c9366..6529e5972b47 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -19,6 +19,7 @@
>  #include "panfrost_job.h"
>  #include "panfrost_gpu.h"
>  #include "panfrost_perfcnt.h"
> +#include "panfrost_submitqueue.h"
>  
>  static bool unstable_ioctls;
>  module_param_unsafe(unstable_ioctls, bool, 0600);
> @@ -250,6 +251,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
> void *data,
>   struct panfrost_device *pfdev = dev->dev_private;
>   struct drm_panfrost_submit *args = data;
>   struct drm_syncobj *sync_out = NULL;
> + struct panfrost_submitqueue *queue;
>   struct panfrost_job *job;
>   int ret = 0;
>  
> @@ -259,10 +261,16 @@ static int panfrost_ioctl_submit(struct drm_device 
> *dev, void *data,
>   if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
>   return -EINVAL;
>  
> + queue = panfrost_submitqueue_get(file->driver_priv, 0);
> + if (IS_ERR(queue))
> + return PTR_ERR(queue);
> +
>   if (args->out_sync > 0) {
>   sync_out = drm_syncobj_find(file, args->out_sync);
> - if (!sync_out)
> - return -ENODEV;
> + if (!sync_out) {
> + ret = -ENODEV;
> + goto fail_put_queue;
> + }
>   }
>  
>   job = kzalloc(sizeof(*job), GFP_KERNEL);
> @@ -289,7 +297,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
> void *data,
>   if (ret)
>   goto fail_job;
>  
> - ret = panfrost_job_push(job);
> + ret = panfrost_job_push(queue, job);
>   if (ret)
>   goto fail_job;
>  
> @@ -302,6 +310,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
> void *data,
>  fail_out_sync:
>   if (sync_out)
>   drm_syncobj_put(sync_out);
> +fail_put_queue:
> + panfrost_submitqueue_put(queue);
>  
>   return ret;
>  }
> @@ -451,6 +461,36 @@ static int panfrost_ioctl_madvise(struct drm_device 
> *dev, void *data,
>   return ret;
>  }
>  
> +static int
> +panfrost_ioctl_create_submitqueue(struct drm_device *dev, void *data,
> +   struct drm_file *file_priv)
> +{
> + struct panfrost_file_priv *priv = file_priv->driver_priv;
> + struct drm_panfrost_create_submitqueue *args = data;
> + struct panfrost_submitqueue *queue;
> +
> + queue = panfrost_submitqueue_create(priv, args->priority, args->fl

Re: [PATCH v3 5/7] drm/panfrost: Add a new ioctl to submit batches

2021-07-02 Thread Steven Price
On 02/07/2021 15:32, Boris Brezillon wrote:
> This should help limit the number of ioctls when submitting multiple
> jobs. The new ioctl also supports syncobj timelines and BO access flags.
> 
> v3:
> * Re-use panfrost_get_job_bos() and panfrost_get_job_in_syncs() in the
>   old submit path
> 
> Signed-off-by: Boris Brezillon 

Better, but I was hoping we can mostly delete panfrost_ioctl_submit(),
leaving something along the lines of:

static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct panfrost_submitqueue *queue;
struct drm_panfrost_submit *args = data;
struct drm_panfrost_job submit_args = {
.head = args->jc,
.bos = args->bo_handles,
.in_syncs = args->in_syncs,
.out_syncs = &args->out_sync, // FIXME
.in_sync_count = args->in_sync_count,
.out_sync_count = args->out_sync > 0 ? 1 : 0,
.bo_count = args->bo_handle_count,
.requirements = args->requirements
};
int ret;

queue = panfrost_submitqueue_get(file->driver_priv, 0);

ret = panfrost_submit_job(dev, file, queue, &submit_args,
  sizeof(u32), ...);

return ret;
}

But obviously the out_sync part needs special handling as we can't just
pass a kernel pointer in like that ;)

I'd like the above the duplication of things like this:

> + kref_init(&job->refcount);
> +
> + job->pfdev = pfdev;
> + job->jc = args->head;
> + job->requirements = args->requirements;
> + job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
> + job->file_priv = file_priv->driver_priv;
> + xa_init_flags(&job->deps, XA_FLAGS_ALLOC);

As otherwise someone is going to mess up in the future and this is going
to diverge between the two ioctls.

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c | 366 +++-
>  drivers/gpu/drm/panfrost/panfrost_job.c |   3 +
>  include/uapi/drm/panfrost_drm.h |  84 ++
>  3 files changed, 375 insertions(+), 78 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 6529e5972b47..e2897de6e77d 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -138,111 +138,95 @@ panfrost_get_job_mappings(struct drm_file *file_priv, 
> struct panfrost_job *job)
>   return 0;
>  }
>  
> -/**
> - * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
> - * referenced by the job.
> - * @dev: DRM device
> - * @file_priv: DRM file for this fd
> - * @args: IOCTL args
> - * @job: job being set up
> - *
> - * Resolve handles from userspace to BOs and attach them to job.
> - *
> - * Note that this function doesn't need to unreference the BOs on
> - * failure, because that will happen at panfrost_job_cleanup() time.
> - */
> +#define PANFROST_BO_REF_ALLOWED_FLAGS \
> + (PANFROST_BO_REF_EXCLUSIVE | PANFROST_BO_REF_NO_IMPLICIT_DEP)
> +
>  static int
> -panfrost_lookup_bos(struct drm_device *dev,
> -   struct drm_file *file_priv,
> -   struct drm_panfrost_submit *args,
> -   struct panfrost_job *job)
> +panfrost_get_job_bos(struct drm_file *file_priv,
> +  u64 refs, u32 ref_stride, u32 count,
> +  struct panfrost_job *job)
>  {
> + void __user *in = u64_to_user_ptr(refs);
>   unsigned int i;
> - int ret;
>  
> - job->bo_count = args->bo_handle_count;
> + job->bo_count = count;
>  
> - if (!job->bo_count)
> + if (!count)
>   return 0;
>  
> + job->bos = kvmalloc_array(job->bo_count, sizeof(*job->bos),
> +   GFP_KERNEL | __GFP_ZERO);
>   job->bo_flags = kvmalloc_array(job->bo_count,
>  sizeof(*job->bo_flags),
>  GFP_KERNEL | __GFP_ZERO);
> - if (!job->bo_flags)
> + if (!job->bos || !job->bo_flags)
>   return -ENOMEM;
>  
> - for (i = 0; i < job->bo_count; i++)
> - job->bo_flags[i] = PANFROST_BO_REF_EXCLUSIVE;
> + for (i = 0; i < count; i++) {
> + struct drm_panfrost_bo_ref ref = { };
> + int ret;
>  
> - ret = drm_gem_objects_lookup(file_priv,
> -  (void __user *)(uintptr_t)args->bo_handles,
> -  job->bo_count, &job->bos);
> - if (ret)
> - return ret;
> + ret = copy_struct_from_user(&ref, sizeof(ref),
> + in + (i * ref_stride),
> + ref_stride);
> + if (ret)
> + return ret;
>  
> - return panfrost_get_job_mappings(file_priv, job);
> + /* Prior to the BATCH_SUBMIT ioctl all accessed BOs were
> +  * 

Re: [PATCH v3 5/7] drm/panfrost: Add a new ioctl to submit batches

2021-07-05 Thread Steven Price
On 02/07/2021 19:11, Boris Brezillon wrote:
> On Fri, 2 Jul 2021 12:49:55 -0400
> Alyssa Rosenzweig  wrote:
> 
 ```  
>  #define PANFROST_BO_REF_EXCLUSIVE0x1
> +#define PANFROST_BO_REF_NO_IMPLICIT_DEP  0x2
 ```

 This seems logically backwards. NO_IMPLICIT_DEP makes sense if we're
 trying to keep backwards compatibility, but here you're crafting a new
 interface totally from scratch. If anything, isn't BO_REF_IMPLICIT_DEP
 the flag you'd want?  
>>>
>>> AFAICT, all other drivers make the no-implicit-dep an opt-in, and I
>>> didn't want to do things differently in panfrost. But if that's really
>>> an issue, I can make it an opt-out.  
>>
>> I don't have strong feelings either way. I was just under the
>> impressions other drivers did this for b/w compat reasons which don't
>> apply here.
> 
> Okay, I think I'll keep it like that unless there's a strong reason to
> make no-implicit dep the default. It's safer to oversync than the skip
> the synchronization, so it does feel like something the user should
> explicitly enable.

I don't have strong feelings - ultimately the number of projects caring
about the uABI is so limited the "default" is pretty irrelevant (it's
not as if we are needing to guide random developers who are new to the
interface). But a conservative default seems sensible.

>>
 Hmm. I'm not /opposed/ and I know kbase uses strides but it seems like
 somewhat unwarranted complexity, and there is a combinatoric explosion
 here (if jobs, bo refs, and syncobj refs use 3 different versions, as
 this encoding permits... as opposed to just specifying a UABI version or
 something like that)  
>>>
>>> Sounds like a good idea. I'll add a version field and map that
>>> to a  tuple.  
>>
>> Cc Steven, does this make sense?
> 
> I have this approach working, and I must admit I prefer it to the
> per-object stride field passed to the submit struct.
> 

There are benefits both ways:

 * Version number: easier to think about, and less worries about
combinatorial explosion of possible options to test.

 * Explicit structure sizes/strides: much harder to accidentally forgot
to change, clients 'naturally' move to newer versions just with recompiling.

For now I'd be tempted to go for the version number, but I suspect we
should also ensure there's a generic 'flags' field in there. That would
allow us to introduce new features/behaviours in a way which can be
backported more easily if necessary.

The main benefit of structure sizes/strides is if you can break binary
backwards compatibility after a few years - because source compatibility
can easily be maintained while dropping the code for the shorter/older
structs. But Linux tries to maintain binary compatibility so this isn't
so relevant.

Steve


Re: [PATCH v3 5/7] drm/panfrost: Add a new ioctl to submit batches

2021-07-05 Thread Steven Price
On 05/07/2021 09:43, Boris Brezillon wrote:
> Hi Steven,
> 
> On Mon, 5 Jul 2021 09:22:39 +0100
> Steven Price  wrote:
> 
>> On 02/07/2021 19:11, Boris Brezillon wrote:
>>> On Fri, 2 Jul 2021 12:49:55 -0400
>>> Alyssa Rosenzweig  wrote:
>>>   
>>>>>> ```
>>>>>>>  #define PANFROST_BO_REF_EXCLUSIVE  0x1
>>>>>>> +#define PANFROST_BO_REF_NO_IMPLICIT_DEP0x2  
>>>>>> ```
>>>>>>
>>>>>> This seems logically backwards. NO_IMPLICIT_DEP makes sense if we're
>>>>>> trying to keep backwards compatibility, but here you're crafting a new
>>>>>> interface totally from scratch. If anything, isn't BO_REF_IMPLICIT_DEP
>>>>>> the flag you'd want?
>>>>>
>>>>> AFAICT, all other drivers make the no-implicit-dep an opt-in, and I
>>>>> didn't want to do things differently in panfrost. But if that's really
>>>>> an issue, I can make it an opt-out.
>>>>
>>>> I don't have strong feelings either way. I was just under the
>>>> impressions other drivers did this for b/w compat reasons which don't
>>>> apply here.  
>>>
>>> Okay, I think I'll keep it like that unless there's a strong reason to
>>> make no-implicit dep the default. It's safer to oversync than the skip
>>> the synchronization, so it does feel like something the user should
>>> explicitly enable.  
>>
>> I don't have strong feelings - ultimately the number of projects caring
>> about the uABI is so limited the "default" is pretty irrelevant (it's
>> not as if we are needing to guide random developers who are new to the
>> interface). But a conservative default seems sensible.
>>
>>>>  
>>>>>> Hmm. I'm not /opposed/ and I know kbase uses strides but it seems like
>>>>>> somewhat unwarranted complexity, and there is a combinatoric explosion
>>>>>> here (if jobs, bo refs, and syncobj refs use 3 different versions, as
>>>>>> this encoding permits... as opposed to just specifying a UABI version or
>>>>>> something like that)
>>>>>
>>>>> Sounds like a good idea. I'll add a version field and map that
>>>>> to a  tuple.
>>>>
>>>> Cc Steven, does this make sense?  
>>>
>>> I have this approach working, and I must admit I prefer it to the
>>> per-object stride field passed to the submit struct.
>>>   
>>
>> There are benefits both ways:
>>
>>  * Version number: easier to think about, and less worries about
>> combinatorial explosion of possible options to test.
>>
>>  * Explicit structure sizes/strides: much harder to accidentally forgot
>> to change, clients 'naturally' move to newer versions just with recompiling.
> 
> The version I just sent has a PANFROST_SUBMIT_BATCH_VERSION macro
> defined in the the uAPI header, so getting right without changing the
> code should be fine (same has with the sizeof(struct xx)) trick with
> the per-desc stride approach).
> 
>>
>> For now I'd be tempted to go for the version number, but I suspect we
>> should also ensure there's a generic 'flags' field in there. That would
>> allow us to introduce new features/behaviours in a way which can be
>> backported more easily if necessary.
> 
> Adding features at the submit level without changing the version number
> is already possible (we can extend drm_panfrost_batch_submit without
> breaking the uABI), but I'm not sure that's a good idea...

Ah, yes I'd forgotten the ioctl itself already had the implicit
sizeof(struct) encoding. I guess there's no need for flags (now) because
it can be added later if it even becomes useful.

> If you mean adding a flags field at the job level, I can add it, but I
> wonder what you have in mind when you say some features might be
> interesting to backport. I really thought we'd force people to update
> their kernel when they want those new features.

My concern is if we ever find a security bug which requires new
information/behaviour in the submit ABI to properly fix. In this case it
would be appropriate to backport a 'feature' (bug fix) which provides a
new ABI but it would need to be a small change. A flags field where we
can set a "PANFROST_ACTUALLY_BE_SECURE" bit would be useful then - but
we wouldn't want to start bumping version numbers in the backport.

But at least for now we could just assume we'll expand the ioctl struct
if we ever hit that situation, so no need for an explicit flags field.

Steve


Re: [PATCH v4 4/7] drm/panfrost: Add the ability to create submit queues

2021-07-05 Thread Steven Price
On 05/07/2021 09:29, Boris Brezillon wrote:
> Needed to keep VkQueues isolated from each other.
> 
> v4:
> * Make panfrost_ioctl_create_submitqueue() return the queue ID
>   instead of a queue object
> 
> v3:
> * Limit the number of submitqueue per context to 16
> * Fix a deadlock
> 
> Signed-off-by: Boris Brezillon 

Reviewed-by: Steven Price 

> ---
>  drivers/gpu/drm/panfrost/Makefile |   3 +-
>  drivers/gpu/drm/panfrost/panfrost_device.h|   2 +-
>  drivers/gpu/drm/panfrost/panfrost_drv.c   |  69 -
>  drivers/gpu/drm/panfrost/panfrost_job.c   |  47 ++-
>  drivers/gpu/drm/panfrost/panfrost_job.h   |   9 +-
>  .../gpu/drm/panfrost/panfrost_submitqueue.c   | 132 ++
>  .../gpu/drm/panfrost/panfrost_submitqueue.h   |  26 
>  include/uapi/drm/panfrost_drm.h   |  17 +++
>  8 files changed, 260 insertions(+), 45 deletions(-)
>  create mode 100644 drivers/gpu/drm/panfrost/panfrost_submitqueue.c
>  create mode 100644 drivers/gpu/drm/panfrost/panfrost_submitqueue.h
> 
> diff --git a/drivers/gpu/drm/panfrost/Makefile 
> b/drivers/gpu/drm/panfrost/Makefile
> index b71935862417..e99192b66ec9 100644
> --- a/drivers/gpu/drm/panfrost/Makefile
> +++ b/drivers/gpu/drm/panfrost/Makefile
> @@ -9,6 +9,7 @@ panfrost-y := \
>   panfrost_gpu.o \
>   panfrost_job.o \
>   panfrost_mmu.o \
> - panfrost_perfcnt.o
> + panfrost_perfcnt.o \
> + panfrost_submitqueue.o
>  
>  obj-$(CONFIG_DRM_PANFROST) += panfrost.o
> diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h 
> b/drivers/gpu/drm/panfrost/panfrost_device.h
> index 8b25278f34c8..51c0ba4e50f5 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_device.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_device.h
> @@ -137,7 +137,7 @@ struct panfrost_mmu {
>  struct panfrost_file_priv {
>   struct panfrost_device *pfdev;
>  
> - struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
> + struct idr queues;
>  
>   struct panfrost_mmu *mmu;
>  };
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index b6b5997c9366..8e28ef30310b 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -19,6 +19,7 @@
>  #include "panfrost_job.h"
>  #include "panfrost_gpu.h"
>  #include "panfrost_perfcnt.h"
> +#include "panfrost_submitqueue.h"
>  
>  static bool unstable_ioctls;
>  module_param_unsafe(unstable_ioctls, bool, 0600);
> @@ -250,6 +251,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
> void *data,
>   struct panfrost_device *pfdev = dev->dev_private;
>   struct drm_panfrost_submit *args = data;
>   struct drm_syncobj *sync_out = NULL;
> + struct panfrost_submitqueue *queue;
>   struct panfrost_job *job;
>   int ret = 0;
>  
> @@ -259,10 +261,16 @@ static int panfrost_ioctl_submit(struct drm_device 
> *dev, void *data,
>   if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
>   return -EINVAL;
>  
> + queue = panfrost_submitqueue_get(file->driver_priv, 0);
> + if (IS_ERR(queue))
> + return PTR_ERR(queue);
> +
>   if (args->out_sync > 0) {
>   sync_out = drm_syncobj_find(file, args->out_sync);
> - if (!sync_out)
> - return -ENODEV;
> + if (!sync_out) {
> + ret = -ENODEV;
> + goto fail_put_queue;
> + }
>   }
>  
>   job = kzalloc(sizeof(*job), GFP_KERNEL);
> @@ -289,7 +297,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
> void *data,
>   if (ret)
>   goto fail_job;
>  
> - ret = panfrost_job_push(job);
> + ret = panfrost_job_push(queue, job);
>   if (ret)
>   goto fail_job;
>  
> @@ -302,6 +310,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
> void *data,
>  fail_out_sync:
>   if (sync_out)
>   drm_syncobj_put(sync_out);
> +fail_put_queue:
> + panfrost_submitqueue_put(queue);
>  
>   return ret;
>  }
> @@ -451,6 +461,36 @@ static int panfrost_ioctl_madvise(struct drm_device 
> *dev, void *data,
>   return ret;
>  }
>  
> +static int
> +panfrost_ioctl_create_submitqueue(struct drm_device *dev, void *data,
> +   struct drm_file *file_priv)
> +{
> + struct panfrost_file_priv *priv = file_priv->driver_priv;
> + struct drm_panfrost_create_submitqueue *args = data;
> + int ret;
> +
> + ret = panf

Re: [PATCH v4 5/7] drm/panfrost: Add a new ioctl to submit batches

2021-07-05 Thread Steven Price
On 05/07/2021 09:29, Boris Brezillon wrote:
> This should help limit the number of ioctls when submitting multiple
> jobs. The new ioctl also supports syncobj timelines and BO access flags.
> 
> v4:
> * Implement panfrost_ioctl_submit() as a wrapper around
>   panfrost_submit_job()
> * Replace stride fields by a version field which is mapped to
>   a  tuple internally
> 
> v3:
> * Re-use panfrost_get_job_bos() and panfrost_get_job_in_syncs() in the
>   old submit path
> 
> Signed-off-by: Boris Brezillon 
> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c | 562 
>  drivers/gpu/drm/panfrost/panfrost_job.c |   3 +
>  include/uapi/drm/panfrost_drm.h |  92 
>  3 files changed, 479 insertions(+), 178 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 8e28ef30310b..a624e4f86aff 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -138,184 +138,6 @@ panfrost_get_job_mappings(struct drm_file *file_priv, 
> struct panfrost_job *job)
>   return 0;
>  }
>  
> -/**
> - * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
> - * referenced by the job.
> - * @dev: DRM device
> - * @file_priv: DRM file for this fd
> - * @args: IOCTL args
> - * @job: job being set up
> - *
> - * Resolve handles from userspace to BOs and attach them to job.
> - *
> - * Note that this function doesn't need to unreference the BOs on
> - * failure, because that will happen at panfrost_job_cleanup() time.
> - */
> -static int
> -panfrost_lookup_bos(struct drm_device *dev,
> -   struct drm_file *file_priv,
> -   struct drm_panfrost_submit *args,
> -   struct panfrost_job *job)
> -{
> - unsigned int i;
> - int ret;
> -
> - job->bo_count = args->bo_handle_count;
> -
> - if (!job->bo_count)
> - return 0;
> -
> - job->bo_flags = kvmalloc_array(job->bo_count,
> -sizeof(*job->bo_flags),
> -GFP_KERNEL | __GFP_ZERO);
> - if (!job->bo_flags)
> - return -ENOMEM;
> -
> - for (i = 0; i < job->bo_count; i++)
> - job->bo_flags[i] = PANFROST_BO_REF_EXCLUSIVE;
> -
> - ret = drm_gem_objects_lookup(file_priv,
> -  (void __user *)(uintptr_t)args->bo_handles,
> -  job->bo_count, &job->bos);
> - if (ret)
> - return ret;
> -
> - return panfrost_get_job_mappings(file_priv, job);
> -}
> -
> -/**
> - * panfrost_copy_in_sync() - Sets up job->deps with the sync objects
> - * referenced by the job.
> - * @dev: DRM device
> - * @file_priv: DRM file for this fd
> - * @args: IOCTL args
> - * @job: job being set up
> - *
> - * Resolve syncobjs from userspace to fences and attach them to job.
> - *
> - * Note that this function doesn't need to unreference the fences on
> - * failure, because that will happen at panfrost_job_cleanup() time.
> - */
> -static int
> -panfrost_copy_in_sync(struct drm_device *dev,
> -   struct drm_file *file_priv,
> -   struct drm_panfrost_submit *args,
> -   struct panfrost_job *job)
> -{
> - u32 *handles;
> - int ret = 0;
> - int i, in_fence_count;
> -
> - in_fence_count = args->in_sync_count;
> -
> - if (!in_fence_count)
> - return 0;
> -
> - handles = kvmalloc_array(in_fence_count, sizeof(u32), GFP_KERNEL);
> - if (!handles) {
> - ret = -ENOMEM;
> - DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
> - goto fail;
> - }
> -
> - if (copy_from_user(handles,
> -(void __user *)(uintptr_t)args->in_syncs,
> -in_fence_count * sizeof(u32))) {
> - ret = -EFAULT;
> - DRM_DEBUG("Failed to copy in syncobj handles\n");
> - goto fail;
> - }
> -
> - for (i = 0; i < in_fence_count; i++) {
> - struct dma_fence *fence;
> -
> - ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
> -  &fence);
> - if (ret)
> - goto fail;
> -
> - ret = drm_gem_fence_array_add(&job->deps, fence);
> -
> - if (ret)
> - goto fail;
> - }
> -
> -fail:
> - kvfree(handles);
> - return ret;
> -}
> -
> -static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
> - struct drm_file *file)
> -{
> - struct panfrost_device *pfdev = dev->dev_private;
> - struct drm_panfrost_submit *args = data;
> - struct drm_syncobj *sync_out = NULL;
> - struct panfrost_submitqueue *queue;
> - struct panfrost_job *job;
> - int ret = 0;
> -
> - if (!args->jc)
> - return -EINVAL;
> -
> - if (args->requirements && args->requirements != PANFROST_JD_REQ_

Re: [PATCH v2] drm/panfrost:report the full raw fault information instead

2021-07-05 Thread Steven Price
On 02/07/2021 02:40, Chunyou Tang wrote:
> Hi Steve,
>> You didn't answer my previous question:
>>
>>> Is this device working with the kbase/DDK proprietary driver?
> 
> I don't know whether I used kbase/DDK,I only know I used the driver of
> panfrost in linux 5.11.

kbase is the Linux kernel driver for Arm's proprietary driver. The
proprietary driver from Arm is often called "the [Mali] DDK" by Arm (or
"the blob" by others). I guess "libmali.so" is the closest it has to a
real name.

>> What you are describing sounds like a hardware integration issue, so
>> it would be good to check that the hardware is working with the
>> proprietary driver to rule that out. And perhaps there is something
>> in the kbase for this device that is setting a chicken bit to 'fix'
>> the coherency?
> 
> I don't have the proprietary driver,I only used driver in linux 5.11.

Interesting - I would have expected the initial hardware bring up to
have happened with the proprietary driver from Arm. And my first step
would be to check whether any workarounds for integration issues were
applied in the kernel.

I think we'd need some input from the people who did the hardware
integration, and hopefully they would have access to the proprietary
driver from Arm.

Steve

> Thinks very much!
> 
> Chunyou.
> 
> 
> ?? Thu, 1 Jul 2021 11:15:14 +0100
> Steven Price  :
> 
>> On 29/06/2021 04:04, Chunyou Tang wrote:
> 
> 
>>> Hi Steve,
>>> thinks for your reply.
>>> I set the pte in arm_lpae_prot_to_pte(),
>>> ***
>>> /*
>>>  * Also Mali has its own notions of shareability wherein its
>>> Inner
>>>  * domain covers the cores within the GPU, and its Outer
>>> domain is
>>>  * "outside the GPU" (i.e. either the Inner or System
>>> domain in CPU
>>>  * terms, depending on coherency).
>>>  */
>>> if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
>>> pte |= ARM_LPAE_PTE_SH_IS;
>>> else
>>> pte |= ARM_LPAE_PTE_SH_OS;
>>> ***
>>> I set pte |= ARM_LPAE_PTE_SH_NS.
>>>
>>> If I set pte to ARM_LPAE_PTE_SH_OS or
>>> ARM_LPAE_PTE_SH_IS,whether I use singel core GPU or multi
>>> core GPU,it will occur GPU Fault.
>>> if I set pte to ARM_LPAE_PTE_SH_NS,whether I use singel core
>>> GPU or multi core GPU,it will not occur GPU Fault.
>>
>> Hi,
>>
>> So this is a difference between Panfrost and kbase. Panfrost (well
>> technically the IOMMU framework) enables the inner-shareable bit for
>> all memory, whereas kbase only enables it for some memory types (the
>> BASE_MEM_COHERENT_LOCAL flag in the UABI controls it). However this
>> should only be a performance/power difference (and AFAIK probably an
>> irrelevant one) and it's definitely required that "inner shareable"
>> (i.e. within the GPU) works for communication between the different
>> units of the GPU.
>>
>> You didn't answer my previous question:
>>
>>> Is this device working with the kbase/DDK proprietary driver?
>>
>> What you are describing sounds like a hardware integration issue, so
>> it would be good to check that the hardware is working with the
>> proprietary driver to rule that out. And perhaps there is something
>> in the kbase for this device that is setting a chicken bit to 'fix'
>> the coherency?
>>
>> Steve
> 
> 



Re: [PATCH v3 5/7] drm/panfrost: Add a new ioctl to submit batches

2021-07-07 Thread Steven Price
On 06/07/2021 13:48, Alyssa Rosenzweig wrote:
>> My concern is if we ever find a security bug which requires new
>> information/behaviour in the submit ABI to properly fix. In this case it
>> would be appropriate to backport a 'feature' (bug fix) which provides a
>> new ABI but it would need to be a small change. A flags field where we
>> can set a "PANFROST_ACTUALLY_BE_SECURE" bit would be useful then - but
>> we wouldn't want to start bumping version numbers in the backport.
>>
>> But at least for now we could just assume we'll expand the ioctl struct
>> if we ever hit that situation, so no need for an explicit flags field.
> 
> I'm curious if kbase ever hit something like this? It wouldn't have
> occurred to me as a possibility.
> 

kbase (at least historically) didn't care about backwards compatibility
- so has tended to just break the ABI if necessary.

We have had workarounds such as BASE_HW_ISSUE_8987 (with the lovely
named DEFAULT_SECURE_BUT_LOSS_OF_PERFORMANCE flag) where the isolation
between address spaces was broken. It might have been reasonable in that
situation to have exposed a new flag which allows security sensitive
applications (e.g. the on-screen keyboard) to force the more secure mode
of operation (taking the performance hit) while not penalising other
applications.

But it's probably just my paranoia ;) All the serious security bugs I
can think of were genuine software bugs and could just be fixed.

Steve


Re: [PATCH v3 07/20] drm/panfrost: use scheduler dependency tracking

2021-07-12 Thread Steven Price
On 08/07/2021 18:37, Daniel Vetter wrote:
> Just deletes some code that's now more shared.
> 
> Note that thanks to the split into drm_sched_job_init/arm we can now
> easily pull the _init() part from under the submission lock way ahead
> where we're adding the sync file in-fences as dependencies.
> 
> v2: Correctly clean up the partially set up job, now that job_init()
> and job_arm() are apart (Emma).
> 
> Reviewed-by: Steven Price  (v1)

v2/v3 are also:

Reviewed-by: Steven Price 

Thanks,

Steve

> Signed-off-by: Daniel Vetter 
> Cc: Rob Herring 
> Cc: Tomeu Vizoso 
> Cc: Steven Price 
> Cc: Alyssa Rosenzweig 
> Cc: Sumit Semwal 
> Cc: "Christian König" 
> Cc: linux-me...@vger.kernel.org
> Cc: linaro-mm-...@lists.linaro.org
> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c | 16 ---
>  drivers/gpu/drm/panfrost/panfrost_job.c | 37 +++--
>  drivers/gpu/drm/panfrost/panfrost_job.h |  5 +---
>  3 files changed, 17 insertions(+), 41 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 1ffaef5ec5ff..9f53bea07d61 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -218,7 +218,7 @@ panfrost_copy_in_sync(struct drm_device *dev,
>   if (ret)
>   goto fail;
>  
> - ret = drm_gem_fence_array_add(&job->deps, fence);
> + ret = drm_sched_job_await_fence(&job->base, fence);
>  
>   if (ret)
>   goto fail;
> @@ -236,7 +236,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
> void *data,
>   struct drm_panfrost_submit *args = data;
>   struct drm_syncobj *sync_out = NULL;
>   struct panfrost_job *job;
> - int ret = 0;
> + int ret = 0, slot;
>  
>   if (!args->jc)
>   return -EINVAL;
> @@ -258,14 +258,20 @@ static int panfrost_ioctl_submit(struct drm_device 
> *dev, void *data,
>  
>   kref_init(&job->refcount);
>  
> - xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
> -
>   job->pfdev = pfdev;
>   job->jc = args->jc;
>   job->requirements = args->requirements;
>   job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
>   job->file_priv = file->driver_priv;
>  
> + slot = panfrost_job_get_slot(job);
> +
> + ret = drm_sched_job_init(&job->base,
> +  &job->file_priv->sched_entity[slot],
> +  NULL);
> + if (ret)
> + goto fail_job_put;
> +
>   ret = panfrost_copy_in_sync(dev, file, args, job);
>   if (ret)
>   goto fail_job;
> @@ -283,6 +289,8 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
> void *data,
>   drm_syncobj_replace_fence(sync_out, job->render_done_fence);
>  
>  fail_job:
> + drm_sched_job_cleanup(&job->base);
> +fail_job_put:
>   panfrost_job_put(job);
>  fail_out_sync:
>   if (sync_out)
> diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
> b/drivers/gpu/drm/panfrost/panfrost_job.c
> index 4bc962763e1f..86c843d8822e 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_job.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_job.c
> @@ -102,7 +102,7 @@ static struct dma_fence *panfrost_fence_create(struct 
> panfrost_device *pfdev, in
>   return &fence->base;
>  }
>  
> -static int panfrost_job_get_slot(struct panfrost_job *job)
> +int panfrost_job_get_slot(struct panfrost_job *job)
>  {
>   /* JS0: fragment jobs.
>* JS1: vertex/tiler jobs
> @@ -242,13 +242,13 @@ static void panfrost_job_hw_submit(struct panfrost_job 
> *job, int js)
>  
>  static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
> int bo_count,
> -   struct xarray *deps)
> +   struct drm_sched_job *job)
>  {
>   int i, ret;
>  
>   for (i = 0; i < bo_count; i++) {
>   /* panfrost always uses write mode in its current uapi */
> - ret = drm_gem_fence_array_add_implicit(deps, bos[i], true);
> + ret = drm_sched_job_await_implicit(job, bos[i], true);
>   if (ret)
>   return ret;
>   }
> @@ -269,31 +269,21 @@ static void panfrost_attach_object_fences(struct 
> drm_gem_object **bos,
>  int panfrost_job_push(struct panfrost_job *job)
>  {
>   struct panfrost_device *pfdev = job->pfdev;
> - int 

Re: [PATCH v3] drm/panfrost:fix the exception name always "UNKNOWN"

2021-07-12 Thread Steven Price
On 08/07/2021 08:34, ChunyouTang wrote:
> From: ChunyouTang 
> 
> The exception_code in register is only 8 bits,So if
> fault_status in panfrost_gpu_irq_handler() don't
> (& 0xFF),it can't get correct exception reason.
> 
> and it's better to show all of the register value
> to custom,so it's better fault_status don't (& 0xFF).
> 
> Signed-off-by: ChunyouTang 

Reviewed-by: Steven Price 

Boris's change has actually modified panfrost_exception_name() to no
longer take pfdev in drm-misc-next. However, I'll just fix this up when
I apply it.

Thanks,

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_gpu.c | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c 
> b/drivers/gpu/drm/panfrost/panfrost_gpu.c
> index 1fffb6a0b24f..d2d287bbf4e7 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
> @@ -33,7 +33,7 @@ static irqreturn_t panfrost_gpu_irq_handler(int irq, void 
> *data)
>   address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO);
>  
>   dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
> -  fault_status & 0xFF, panfrost_exception_name(pfdev, 
> fault_status & 0xFF),
> +  fault_status, panfrost_exception_name(pfdev, 
> fault_status & 0xFF),
>address);
>  
>   if (state & GPU_IRQ_MULTIPLE_FAULT)
> 



[PATCH] drm/of: free the iterator object on failure

2021-07-12 Thread Steven Price
When bailing out due to the sanity check the iterator value needs to be
freed because the early return prevents for_each_child_of_node() from
doing the dereference itself.

Fixes: 4ee48cc5586b ("drm: of: Fix double-free bug")
Signed-off-by: Steven Price 
---
 drivers/gpu/drm/drm_of.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

Daniel's email[1] made me take a look at this function and it appears
that for_each_child_of_node()'s interface had caused a bad bug fix due
to the hidden reference counting in the iterator.

[1] https://lore.kernel.org/r/YOxQ5TbkNrqCGBDJ%40phenom.ffwll.local

diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 197c57477344..997b8827fed2 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -331,8 +331,10 @@ static int drm_of_lvds_get_remote_pixels_type(
 * configurations by passing the endpoints explicitly to
 * drm_of_lvds_get_dual_link_pixel_order().
 */
-   if (!current_pt || pixels_type != current_pt)
+   if (!current_pt || pixels_type != current_pt) {
+   of_node_put(endpoint);
return -EINVAL;
+   }
}
 
return pixels_type;
-- 
2.20.1



Re: [PATCH] drm/of: free the iterator object on failure

2021-07-12 Thread Steven Price
On 12/07/2021 17:50, Laurent Pinchart wrote:
> Hi Steven,
> 
> Thank you for the patch.
> 
> On Mon, Jul 12, 2021 at 04:57:58PM +0100, Steven Price wrote:
>> When bailing out due to the sanity check the iterator value needs to be
>> freed because the early return prevents for_each_child_of_node() from
>> doing the dereference itself.
>>
>> Fixes: 4ee48cc5586b ("drm: of: Fix double-free bug")
> 
> I don't think the Fixes tag is correct, the issue was already present
> before 4ee48cc5586b. The fix looks right though.

I'm not sure quite what you mean by "already present". As I understand
it the timeline was:

1. 6529007522de drm: of: Add drm_of_lvds_get_dual_link_pixel_order
   The function was originally added. This made the mistake twice of
   calling of_node_put() on the wrong variable (remote_port rather than
   endpoint).

2. 4ee48cc5586b drm: of: Fix double-free bug
   One of the of_node_put() calls was removed as it was a double-free.
   This left the first incorrect of_node_put() in place, and the second
   is now a straight leak.

3. b557a5f8da57 drm/of: free the right object
   This (correctly) fixes the first of_node_put() to free endpoint. And
   the post from Daniel was what caused me to look.

4. This patch
   Reintroduces the of_node_put() removed in (2) but putting endpoint
   rather than remote_port.

I've put (2) in the Fixes line as this patch is fixing the leak
introduced by that patch, but that in itself was of course 'fixing' the
double free of the original patch.

Steve

>> Signed-off-by: Steven Price 
>> ---
>>  drivers/gpu/drm/drm_of.c | 4 +++-
>>  1 file changed, 3 insertions(+), 1 deletion(-)
>>
>> Daniel's email[1] made me take a look at this function and it appears
>> that for_each_child_of_node()'s interface had caused a bad bug fix due
>> to the hidden reference counting in the iterator.
>>
>> [1] https://lore.kernel.org/r/YOxQ5TbkNrqCGBDJ%40phenom.ffwll.local
>>
>> diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
>> index 197c57477344..997b8827fed2 100644
>> --- a/drivers/gpu/drm/drm_of.c
>> +++ b/drivers/gpu/drm/drm_of.c
>> @@ -331,8 +331,10 @@ static int drm_of_lvds_get_remote_pixels_type(
>>   * configurations by passing the endpoints explicitly to
>>   * drm_of_lvds_get_dual_link_pixel_order().
>>   */
>> -if (!current_pt || pixels_type != current_pt)
>> +if (!current_pt || pixels_type != current_pt) {
>> +of_node_put(endpoint);
>>  return -EINVAL;
>> +}
>>  }
>>  
>>  return pixels_type;
> 



Re: [PATCH] drm/of: free the iterator object on failure

2021-07-13 Thread Steven Price
On 12/07/2021 22:55, Laurent Pinchart wrote:
> Hi Steven,

Hi Laurent,

> On Mon, Jul 12, 2021 at 10:31:52PM +0100, Steven Price wrote:
>> On 12/07/2021 17:50, Laurent Pinchart wrote:
>>> On Mon, Jul 12, 2021 at 04:57:58PM +0100, Steven Price wrote:
>>>> When bailing out due to the sanity check the iterator value needs to be
>>>> freed because the early return prevents for_each_child_of_node() from
>>>> doing the dereference itself.
>>>>
>>>> Fixes: 4ee48cc5586b ("drm: of: Fix double-free bug")
>>>
>>> I don't think the Fixes tag is correct, the issue was already present
>>> before 4ee48cc5586b. The fix looks right though.
>>
>> I'm not sure quite what you mean by "already present". As I understand
>> it the timeline was:
>>
>> 1. 6529007522de drm: of: Add drm_of_lvds_get_dual_link_pixel_order
>>The function was originally added. This made the mistake twice of
>>calling of_node_put() on the wrong variable (remote_port rather than
>>endpoint).
> 
> Correct.
> 
>> 2. 4ee48cc5586b drm: of: Fix double-free bug
>>One of the of_node_put() calls was removed as it was a double-free.
>>This left the first incorrect of_node_put() in place, and the second
>>is now a straight leak.
> 
> That's right, but this commit didn't introduce the leak, it was already
> there in 6529007522de (in addition to the double-free).

Ah, I see what you mean. My thought process was that the original
comment had the bug "using the wrong variable", and (2) (partially)
fixed that but in the process introduced a new bug (a memory leak). But
I guess technically the memory leak was there from the beginning.

The other reason I referenced (2) in the Fixes line is because this
patch depends on patch (2), whereas it won't apply cleanly without.

However I don't think it really matters either way: (2) has already been
backported, and either way this needs fixing if either (1) or (2) are
present.

Would you like me to resend with a "Fixes: 6529007522de drm: of: Add
drm_of_lvds_get_dual_link_pixel_order", or are you happy to just fix
this up when merging?

Thanks,

Steve

>> 3. b557a5f8da57 drm/of: free the right object
>>This (correctly) fixes the first of_node_put() to free endpoint. And
>>the post from Daniel was what caused me to look.
>>
>> 4. This patch
>>Reintroduces the of_node_put() removed in (2) but putting endpoint
>>rather than remote_port.
>>
>> I've put (2) in the Fixes line as this patch is fixing the leak
>> introduced by that patch, but that in itself was of course 'fixing' the
>> double free of the original patch.
>>
>>>> Signed-off-by: Steven Price 
>>>> ---
>>>>  drivers/gpu/drm/drm_of.c | 4 +++-
>>>>  1 file changed, 3 insertions(+), 1 deletion(-)
>>>>
>>>> Daniel's email[1] made me take a look at this function and it appears
>>>> that for_each_child_of_node()'s interface had caused a bad bug fix due
>>>> to the hidden reference counting in the iterator.
>>>>
>>>> [1] https://lore.kernel.org/r/YOxQ5TbkNrqCGBDJ%40phenom.ffwll.local
>>>>
>>>> diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
>>>> index 197c57477344..997b8827fed2 100644
>>>> --- a/drivers/gpu/drm/drm_of.c
>>>> +++ b/drivers/gpu/drm/drm_of.c
>>>> @@ -331,8 +331,10 @@ static int drm_of_lvds_get_remote_pixels_type(
>>>> * configurations by passing the endpoints explicitly to
>>>> * drm_of_lvds_get_dual_link_pixel_order().
>>>> */
>>>> -  if (!current_pt || pixels_type != current_pt)
>>>> +  if (!current_pt || pixels_type != current_pt) {
>>>> +  of_node_put(endpoint);
>>>>return -EINVAL;
>>>> +  }
>>>>}
>>>>  
>>>>return pixels_type;
> 



[PATCH v2] drm/of: free the iterator object on failure

2021-07-14 Thread Steven Price
When bailing out due to the sanity check the iterator value needs to be
freed because the early return prevents for_each_child_of_node() from
doing the dereference itself.

Fixes: 6529007522de ("drm: of: Add drm_of_lvds_get_dual_link_pixel_order")
Signed-off-by: Steven Price 
---
 drivers/gpu/drm/drm_of.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

v2: Fixes now refers to the original commit as suggested by Laurent, rather
than 4ee48cc5586b ("drm: of: Fix double-free bug") which only fixed part of
the problem. Note that 4ee48cc5586b is a dependency for this patch to
cleanly apply.

diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 197c57477344..997b8827fed2 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -331,8 +331,10 @@ static int drm_of_lvds_get_remote_pixels_type(
 * configurations by passing the endpoints explicitly to
 * drm_of_lvds_get_dual_link_pixel_order().
 */
-   if (!current_pt || pixels_type != current_pt)
+   if (!current_pt || pixels_type != current_pt) {
+   of_node_put(endpoint);
return -EINVAL;
+   }
}
 
return pixels_type;
-- 
2.20.1



Re: [PATCH v2] drm/of: free the iterator object on failure

2021-07-15 Thread Steven Price
On 14/07/2021 16:26, Laurent Pinchart wrote:
> Hi Steven,
> 
> Thank you for the patch.
> 
> On Wed, Jul 14, 2021 at 03:33:00PM +0100, Steven Price wrote:
>> When bailing out due to the sanity check the iterator value needs to be
>> freed because the early return prevents for_each_child_of_node() from
>> doing the dereference itself.
>>
>> Fixes: 6529007522de ("drm: of: Add drm_of_lvds_get_dual_link_pixel_order")
>> Signed-off-by: Steven Price 
> 
> Reviewed-by: Laurent Pinchart 

Thanks! Applied to drm-misc-next.

Steve

>> ---
>>  drivers/gpu/drm/drm_of.c | 4 +++-
>>  1 file changed, 3 insertions(+), 1 deletion(-)
>>
>> v2: Fixes now refers to the original commit as suggested by Laurent, rather
>> than 4ee48cc5586b ("drm: of: Fix double-free bug") which only fixed part of
>> the problem. Note that 4ee48cc5586b is a dependency for this patch to
>> cleanly apply.
>>
>> diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
>> index 197c57477344..997b8827fed2 100644
>> --- a/drivers/gpu/drm/drm_of.c
>> +++ b/drivers/gpu/drm/drm_of.c
>> @@ -331,8 +331,10 @@ static int drm_of_lvds_get_remote_pixels_type(
>>   * configurations by passing the endpoints explicitly to
>>   * drm_of_lvds_get_dual_link_pixel_order().
>>   */
>> -if (!current_pt || pixels_type != current_pt)
>> +if (!current_pt || pixels_type != current_pt) {
>> +of_node_put(endpoint);
>>  return -EINVAL;
>> +}
>>  }
>>  
>>  return pixels_type;
> 



Re: [PATCH] drm/panfrost: Replace devm_reset_control_array_get()

2020-11-04 Thread Steven Price

On 03/11/2020 01:48, Yejune Deng wrote:

devm_reset_control_array_get_optional_exclusive() looks more readable

Signed-off-by: Yejune Deng 


Reviewed-by: Steven Price 

Thanks, I'll push this to drm-misc-next.

Steve


---
  drivers/gpu/drm/panfrost/panfrost_device.c | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c 
b/drivers/gpu/drm/panfrost/panfrost_device.c
index ea8d318..1daf932 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -18,7 +18,7 @@
  
  static int panfrost_reset_init(struct panfrost_device *pfdev)

  {
-   pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
+   pfdev->rstc = 
devm_reset_control_array_get_optional_exclusive(pfdev->dev);
if (IS_ERR(pfdev->rstc)) {
dev_err(pfdev->dev, "get reset failed %ld\n", 
PTR_ERR(pfdev->rstc));
return PTR_ERR(pfdev->rstc);



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v4] drm/panfrost: Move the GPU reset bits outside the timeout handler

2020-11-05 Thread Steven Price

On 04/11/2020 17:07, Boris Brezillon wrote:

We've fixed many races in panfrost_job_timedout() but some remain.
Instead of trying to fix it again, let's simplify the logic and move
the reset bits to a separate work scheduled when one of the queue
reports a timeout.

v4:
- Rework the logic to prevent a race between drm_sched_start()
   (reset work) and drm_sched_job_timedout() (timeout work)
- Drop Steven's R-b
- Add dma_fence annotation to the panfrost_reset() function (Daniel Vetter)

v3:
- Replace the atomic_cmpxchg() by an atomic_xchg() (Robin Murphy)
- Add Steven's R-b

v2:
- Use atomic_cmpxchg() to conditionally schedule the reset work (Steven Price)

Fixes: 1a11a88cfd9a ("drm/panfrost: Fix job timeout handling")
Cc: 
Signed-off-by: Boris Brezillon 


Hi Boris,

A couple of nits below, but otherwise looks good.


---
  drivers/gpu/drm/panfrost/panfrost_device.c |   1 -
  drivers/gpu/drm/panfrost/panfrost_device.h |   6 +-
  drivers/gpu/drm/panfrost/panfrost_job.c| 190 ++---
  3 files changed, 133 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c 
b/drivers/gpu/drm/panfrost/panfrost_device.c
index 1daf9322954a..fbcf5edbe367 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -200,7 +200,6 @@ int panfrost_device_init(struct panfrost_device *pfdev)
struct resource *res;
  
  	mutex_init(&pfdev->sched_lock);

-   mutex_init(&pfdev->reset_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
INIT_LIST_HEAD(&pfdev->as_lru_list);
  
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h

index 140e004a3790..597cf1459b0a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -106,7 +106,11 @@ struct panfrost_device {
struct panfrost_perfcnt *perfcnt;
  
  	struct mutex sched_lock;

-   struct mutex reset_lock;
+
+   struct {
+   struct work_struct work;
+   atomic_t pending;
+   } reset;
  
  	struct mutex shrinker_lock;

struct list_head shrinker_list;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
b/drivers/gpu/drm/panfrost/panfrost_job.c
index e75b7d2192f7..643d26854b46 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -20,12 +20,21 @@
  #include "panfrost_gpu.h"
  #include "panfrost_mmu.h"
  
+#define JOB_TIMEOUT_MS 500

+
  #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
  #define job_read(dev, reg) readl(dev->iomem + (reg))
  
+enum panfrost_queue_status {

+   PANFROST_QUEUE_STATUS_ACTIVE,
+   PANFROST_QUEUE_STATUS_STOPPED,
+   PANFROST_QUEUE_STATUS_STARTING,
+   PANFROST_QUEUE_STATUS_FAULT_PENDING,
+};
+
  struct panfrost_queue_state {
struct drm_gpu_scheduler sched;
-   bool stopped;
+   atomic_t status;
struct mutex lock;
u64 fence_context;
u64 emit_seqno;
@@ -373,28 +382,64 @@ void panfrost_job_enable_interrupts(struct 
panfrost_device *pfdev)
  static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
struct drm_sched_job *bad)
  {
+   enum panfrost_queue_status old_status;
bool stopped = false;
  
  	mutex_lock(&queue->lock);

-   if (!queue->stopped) {
-   drm_sched_stop(&queue->sched, bad);
-   if (bad)
-   drm_sched_increase_karma(bad);
-   queue->stopped = true;
-   stopped = true;
-   }
+   old_status = atomic_xchg(&queue->status,
+PANFROST_QUEUE_STATUS_STOPPED);
+   WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE &&
+   old_status != PANFROST_QUEUE_STATUS_STOPPED);
+   if (old_status == PANFROST_QUEUE_STATUS_STOPPED)
+   goto out;


NIT: It's slightly cleaner if you swap the above lines, i.e.:

if (old_status == PANFROST_QUEUE_STATUS_STOPPED)
goto out;
WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE);


+
+   drm_sched_stop(&queue->sched, bad);
+   if (bad)
+   drm_sched_increase_karma(bad);
+
+   stopped = true;
+
+   /*
+* Set the timeout to max so the timer doesn't get started
+* when we return from the timeout handler (restored in
+* panfrost_scheduler_start()).
+*/
+   queue->sched.timeout = MAX_SCHEDULE_TIMEOUT;
+
+out:
mutex_unlock(&queue->lock);
  
  	return stopped;

  }
  
+static void panfrost_scheduler_start(struct panfrost_queue_state *queue)

+{
+   enum panfrost_queue_status old_status;
+
+   mutex_lock(&queue->lock);
+   old_status = atomic_xchg(&queue->status,
+ 

Re: [PATCH v5] drm/panfrost: Move the GPU reset bits outside the timeout handler

2020-11-05 Thread Steven Price

On 05/11/2020 15:17, Boris Brezillon wrote:

We've fixed many races in panfrost_job_timedout() but some remain.
Instead of trying to fix it again, let's simplify the logic and move
the reset bits to a separate work scheduled when one of the queue
reports a timeout.

v5:
- Simplify panfrost_scheduler_stop() (Steven Price)
- Always restart the queue in panfrost_scheduler_start() even if
   the status is corrupted (Steven Price)

v4:
- Rework the logic to prevent a race between drm_sched_start()
   (reset work) and drm_sched_job_timedout() (timeout work)
- Drop Steven's R-b
- Add dma_fence annotation to the panfrost_reset() function (Daniel Vetter)

v3:
- Replace the atomic_cmpxchg() by an atomic_xchg() (Robin Murphy)
- Add Steven's R-b

v2:
- Use atomic_cmpxchg() to conditionally schedule the reset work (Steven Price)

Fixes: 1a11a88cfd9a ("drm/panfrost: Fix job timeout handling")
Cc: 
Signed-off-by: Boris Brezillon 


Reviewed-by: Steven Price 

Thanks,

Steve


---
  drivers/gpu/drm/panfrost/panfrost_device.c |   1 -
  drivers/gpu/drm/panfrost/panfrost_device.h |   6 +-
  drivers/gpu/drm/panfrost/panfrost_job.c| 187 ++---
  3 files changed, 130 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c 
b/drivers/gpu/drm/panfrost/panfrost_device.c
index 1daf9322954a..fbcf5edbe367 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -200,7 +200,6 @@ int panfrost_device_init(struct panfrost_device *pfdev)
struct resource *res;
  
  	mutex_init(&pfdev->sched_lock);

-   mutex_init(&pfdev->reset_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
INIT_LIST_HEAD(&pfdev->as_lru_list);
  
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h

index 140e004a3790..597cf1459b0a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -106,7 +106,11 @@ struct panfrost_device {
struct panfrost_perfcnt *perfcnt;
  
  	struct mutex sched_lock;

-   struct mutex reset_lock;
+
+   struct {
+   struct work_struct work;
+   atomic_t pending;
+   } reset;
  
  	struct mutex shrinker_lock;

struct list_head shrinker_list;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
b/drivers/gpu/drm/panfrost/panfrost_job.c
index e75b7d2192f7..04e6f6f9b742 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -20,12 +20,21 @@
  #include "panfrost_gpu.h"
  #include "panfrost_mmu.h"
  
+#define JOB_TIMEOUT_MS 500

+
  #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
  #define job_read(dev, reg) readl(dev->iomem + (reg))
  
+enum panfrost_queue_status {

+   PANFROST_QUEUE_STATUS_ACTIVE,
+   PANFROST_QUEUE_STATUS_STOPPED,
+   PANFROST_QUEUE_STATUS_STARTING,
+   PANFROST_QUEUE_STATUS_FAULT_PENDING,
+};
+
  struct panfrost_queue_state {
struct drm_gpu_scheduler sched;
-   bool stopped;
+   atomic_t status;
struct mutex lock;
u64 fence_context;
u64 emit_seqno;
@@ -373,28 +382,61 @@ void panfrost_job_enable_interrupts(struct 
panfrost_device *pfdev)
  static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
struct drm_sched_job *bad)
  {
+   enum panfrost_queue_status old_status;
bool stopped = false;
  
  	mutex_lock(&queue->lock);

-   if (!queue->stopped) {
-   drm_sched_stop(&queue->sched, bad);
-   if (bad)
-   drm_sched_increase_karma(bad);
-   queue->stopped = true;
-   stopped = true;
-   }
+   old_status = atomic_xchg(&queue->status,
+PANFROST_QUEUE_STATUS_STOPPED);
+   if (old_status == PANFROST_QUEUE_STATUS_STOPPED)
+   goto out;
+
+   WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE);
+   drm_sched_stop(&queue->sched, bad);
+   if (bad)
+   drm_sched_increase_karma(bad);
+
+   stopped = true;
+
+   /*
+* Set the timeout to max so the timer doesn't get started
+* when we return from the timeout handler (restored in
+* panfrost_scheduler_start()).
+*/
+   queue->sched.timeout = MAX_SCHEDULE_TIMEOUT;
+
+out:
mutex_unlock(&queue->lock);
  
  	return stopped;

  }
  
+static void panfrost_scheduler_start(struct panfrost_queue_state *queue)

+{
+   enum panfrost_queue_status old_status;
+
+   mutex_lock(&queue->lock);
+   old_status = atomic_xchg(&queue->status,
+PANFROST_QUEUE_STATUS_STARTING);
+   WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED);
+
+   /* Restore the origina

Re: [PATCH 6/7] drm/panfrost: dev_pm_opp_put_*() accepts NULL argument

2020-11-09 Thread Steven Price

On 06/11/2020 07:03, Viresh Kumar wrote:

The dev_pm_opp_put_*() APIs now accepts a NULL opp_table pointer and so
there is no need for us to carry the extra check. Drop them.

Signed-off-by: Viresh Kumar 


Reviewed-by: Steven Price 


---
  drivers/gpu/drm/panfrost/panfrost_devfreq.c | 6 ++
  1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c 
b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 8ab025d0035f..97b5abc7c188 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -170,10 +170,8 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
pfdevfreq->opp_of_table_added = false;
}
  
-	if (pfdevfreq->regulators_opp_table) {

-   dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table);
-   pfdevfreq->regulators_opp_table = NULL;
-   }
+   dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table);
+   pfdevfreq->regulators_opp_table = NULL;
  }
  
  void panfrost_devfreq_resume(struct panfrost_device *pfdev)




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v5] drm/panfrost: Move the GPU reset bits outside the timeout handler

2020-11-16 Thread Steven Price

On 05/11/2020 15:17, Boris Brezillon wrote:

We've fixed many races in panfrost_job_timedout() but some remain.
Instead of trying to fix it again, let's simplify the logic and move
the reset bits to a separate work scheduled when one of the queue
reports a timeout.

v5:
- Simplify panfrost_scheduler_stop() (Steven Price)
- Always restart the queue in panfrost_scheduler_start() even if
   the status is corrupted (Steven Price)

v4:
- Rework the logic to prevent a race between drm_sched_start()
   (reset work) and drm_sched_job_timedout() (timeout work)
- Drop Steven's R-b
- Add dma_fence annotation to the panfrost_reset() function (Daniel Vetter)

v3:
- Replace the atomic_cmpxchg() by an atomic_xchg() (Robin Murphy)
- Add Steven's R-b

v2:
- Use atomic_cmpxchg() to conditionally schedule the reset work (Steven Price)

Fixes: 1a11a88cfd9a ("drm/panfrost: Fix job timeout handling")
Cc: 
Signed-off-by: Boris Brezillon 


Pushed to drm-misc-next

Thanks,

Steve


---
  drivers/gpu/drm/panfrost/panfrost_device.c |   1 -
  drivers/gpu/drm/panfrost/panfrost_device.h |   6 +-
  drivers/gpu/drm/panfrost/panfrost_job.c| 187 ++---
  3 files changed, 130 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c 
b/drivers/gpu/drm/panfrost/panfrost_device.c
index 1daf9322954a..fbcf5edbe367 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -200,7 +200,6 @@ int panfrost_device_init(struct panfrost_device *pfdev)
struct resource *res;
  
  	mutex_init(&pfdev->sched_lock);

-   mutex_init(&pfdev->reset_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
INIT_LIST_HEAD(&pfdev->as_lru_list);
  
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h

index 140e004a3790..597cf1459b0a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -106,7 +106,11 @@ struct panfrost_device {
struct panfrost_perfcnt *perfcnt;
  
  	struct mutex sched_lock;

-   struct mutex reset_lock;
+
+   struct {
+   struct work_struct work;
+   atomic_t pending;
+   } reset;
  
  	struct mutex shrinker_lock;

struct list_head shrinker_list;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c 
b/drivers/gpu/drm/panfrost/panfrost_job.c
index e75b7d2192f7..04e6f6f9b742 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -20,12 +20,21 @@
  #include "panfrost_gpu.h"
  #include "panfrost_mmu.h"
  
+#define JOB_TIMEOUT_MS 500

+
  #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
  #define job_read(dev, reg) readl(dev->iomem + (reg))
  
+enum panfrost_queue_status {

+   PANFROST_QUEUE_STATUS_ACTIVE,
+   PANFROST_QUEUE_STATUS_STOPPED,
+   PANFROST_QUEUE_STATUS_STARTING,
+   PANFROST_QUEUE_STATUS_FAULT_PENDING,
+};
+
  struct panfrost_queue_state {
struct drm_gpu_scheduler sched;
-   bool stopped;
+   atomic_t status;
struct mutex lock;
u64 fence_context;
u64 emit_seqno;
@@ -373,28 +382,61 @@ void panfrost_job_enable_interrupts(struct 
panfrost_device *pfdev)
  static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
struct drm_sched_job *bad)
  {
+   enum panfrost_queue_status old_status;
bool stopped = false;
  
  	mutex_lock(&queue->lock);

-   if (!queue->stopped) {
-   drm_sched_stop(&queue->sched, bad);
-   if (bad)
-   drm_sched_increase_karma(bad);
-   queue->stopped = true;
-   stopped = true;
-   }
+   old_status = atomic_xchg(&queue->status,
+PANFROST_QUEUE_STATUS_STOPPED);
+   if (old_status == PANFROST_QUEUE_STATUS_STOPPED)
+   goto out;
+
+   WARN_ON(old_status != PANFROST_QUEUE_STATUS_ACTIVE);
+   drm_sched_stop(&queue->sched, bad);
+   if (bad)
+   drm_sched_increase_karma(bad);
+
+   stopped = true;
+
+   /*
+* Set the timeout to max so the timer doesn't get started
+* when we return from the timeout handler (restored in
+* panfrost_scheduler_start()).
+*/
+   queue->sched.timeout = MAX_SCHEDULE_TIMEOUT;
+
+out:
mutex_unlock(&queue->lock);
  
  	return stopped;

  }
  
+static void panfrost_scheduler_start(struct panfrost_queue_state *queue)

+{
+   enum panfrost_queue_status old_status;
+
+   mutex_lock(&queue->lock);
+   old_status = atomic_xchg(&queue->status,
+PANFROST_QUEUE_STATUS_STARTING);
+   WARN_ON(old_status != PANFROST_QUEUE_STATUS_STOPPED);
+
+   /* Restore the origina

Re: [PATCH v2 6/8] drm/shmem-helper: Add generic memory shrinker

2022-03-16 Thread Steven Price
On 14/03/2022 22:42, Dmitry Osipenko wrote:
> Introduce a common DRM SHMEM shrinker. It allows to reduce code
> duplication among DRM drivers, it also handles complicated lockings
> for the drivers. This is initial version of the shrinker that covers
> basic needs of GPU drivers.
> 
> This patch is based on a couple ideas borrowed from Rob's Clark MSM
> shrinker and Thomas' Zimmermann variant of SHMEM shrinker.
> 
> GPU drivers that want to use generic DRM memory shrinker must support
> generic GEM reservations.
> 
> Signed-off-by: Daniel Almeida 
> Signed-off-by: Dmitry Osipenko 

This looks fine to me, but one nitpick: you should update the comment in
struct drm_gem_shmem_object:

>   /**
>* @madv: State for madvise
>*
>* 0 is active/inuse.
>* A negative value is the object is purged.
>* Positive values are driver specific and not used by the helpers.
>*/
>   int madv;

This is adding a helper which cares about the positive values.

Steve

> ---
>  drivers/gpu/drm/drm_gem_shmem_helper.c | 194 +
>  include/drm/drm_device.h   |   4 +
>  include/drm/drm_gem.h  |  11 ++
>  include/drm/drm_gem_shmem_helper.h |  25 
>  4 files changed, 234 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c 
> b/drivers/gpu/drm/drm_gem_shmem_helper.c
> index 37009418cd28..35be2ee98f11 100644
> --- a/drivers/gpu/drm/drm_gem_shmem_helper.c
> +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
> @@ -139,6 +139,9 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object 
> *shmem)
>  {
>   struct drm_gem_object *obj = &shmem->base;
>  
> + /* take out shmem GEM object from the memory shrinker */
> + drm_gem_shmem_madvise(shmem, 0);
> +
>   WARN_ON(shmem->vmap_use_count);
>  
>   if (obj->import_attach) {
> @@ -163,6 +166,42 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object 
> *shmem)
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
>  
> +static void drm_gem_shmem_update_purgeable_status(struct 
> drm_gem_shmem_object *shmem)
> +{
> + struct drm_gem_object *obj = &shmem->base;
> + struct drm_gem_shmem_shrinker *gem_shrinker = obj->dev->shmem_shrinker;
> + size_t page_count = obj->size >> PAGE_SHIFT;
> +
> + if (!gem_shrinker || obj->import_attach || !obj->funcs->purge)
> + return;
> +
> + mutex_lock(&shmem->vmap_lock);
> + mutex_lock(&shmem->pages_lock);
> + mutex_lock(&gem_shrinker->lock);
> +
> + if (shmem->madv < 0) {
> + list_del_init(&shmem->madv_list);
> + goto unlock;
> + } else if (shmem->madv > 0) {
> + if (!list_empty(&shmem->madv_list))
> + goto unlock;
> +
> + WARN_ON(gem_shrinker->shrinkable_count + page_count < 
> page_count);
> + gem_shrinker->shrinkable_count += page_count;
> +
> + list_add_tail(&shmem->madv_list, &gem_shrinker->lru);
> + } else if (!list_empty(&shmem->madv_list)) {
> + list_del_init(&shmem->madv_list);
> +
> + WARN_ON(gem_shrinker->shrinkable_count < page_count);
> + gem_shrinker->shrinkable_count -= page_count;
> + }
> +unlock:
> + mutex_unlock(&gem_shrinker->lock);
> + mutex_unlock(&shmem->pages_lock);
> + mutex_unlock(&shmem->vmap_lock);
> +}
> +
>  static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
>  {
>   struct drm_gem_object *obj = &shmem->base;
> @@ -366,6 +405,8 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
>   ret = drm_gem_shmem_vmap_locked(shmem, map);
>   mutex_unlock(&shmem->vmap_lock);
>  
> + drm_gem_shmem_update_purgeable_status(shmem);
> +
>   return ret;
>  }
>  EXPORT_SYMBOL(drm_gem_shmem_vmap);
> @@ -409,6 +450,8 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object 
> *shmem,
>   mutex_lock(&shmem->vmap_lock);
>   drm_gem_shmem_vunmap_locked(shmem, map);
>   mutex_unlock(&shmem->vmap_lock);
> +
> + drm_gem_shmem_update_purgeable_status(shmem);
>  }
>  EXPORT_SYMBOL(drm_gem_shmem_vunmap);
>  
> @@ -451,6 +494,8 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object 
> *shmem, int madv)
>  
>   mutex_unlock(&shmem->pages_lock);
>  
> + drm_gem_shmem_update_purgeable_status(shmem);
> +
>   return (madv >= 0);
>  }
>  EXPORT_SYMBOL(drm_gem_shmem_madvise);
> @@ -763,6 +808,155 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device 
> *dev,
>  }
>  EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
>  
> +static struct drm_gem_shmem_shrinker *
> +to_drm_shrinker(struct shrinker *shrinker)
> +{
> + return container_of(shrinker, struct drm_gem_shmem_shrinker, base);
> +}
> +
> +static unsigned long
> +drm_gem_shmem_shrinker_count_objects(struct shrinker *shrinker,
> +  struct shrink_control *sc)
> +{
> + struct drm_gem_shmem_shrinker *gem_shrinker = to_drm_shrinker(shrinker);
> + u64 count = gem_shrin

Re: [PATCH v2 8/8] drm/panfrost: Switch to generic memory shrinker

2022-03-16 Thread Steven Price
On 14/03/2022 22:42, Dmitry Osipenko wrote:
> Replace Panfrost's memory shrinker with a generic DRM memory shrinker.
> 
> Signed-off-by: Dmitry Osipenko 
> ---

I gave this a spin on my Firefly-RK3288 board and everything seems to
work. So feel free to add a:

Tested-by: Steven Price 

As Alyssa has already pointed out you need to remove the
panfrost_gem_shrinker.c file. But otherwise everything looks fine, and
I'm very happy to see the shrinker code gone ;)

Thanks,

Steve

>  drivers/gpu/drm/panfrost/Makefile  |  1 -
>  drivers/gpu/drm/panfrost/panfrost_device.h |  4 
>  drivers/gpu/drm/panfrost/panfrost_drv.c| 19 ++-
>  drivers/gpu/drm/panfrost/panfrost_gem.c| 27 ++
>  drivers/gpu/drm/panfrost/panfrost_gem.h|  9 
>  drivers/gpu/drm/panfrost/panfrost_job.c| 22 +-
>  6 files changed, 40 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/Makefile 
> b/drivers/gpu/drm/panfrost/Makefile
> index b71935862417..ecf0864cb515 100644
> --- a/drivers/gpu/drm/panfrost/Makefile
> +++ b/drivers/gpu/drm/panfrost/Makefile
> @@ -5,7 +5,6 @@ panfrost-y := \
>   panfrost_device.o \
>   panfrost_devfreq.o \
>   panfrost_gem.o \
> - panfrost_gem_shrinker.o \
>   panfrost_gpu.o \
>   panfrost_job.o \
>   panfrost_mmu.o \
> diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h 
> b/drivers/gpu/drm/panfrost/panfrost_device.h
> index 8b25278f34c8..fe04b21fc044 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_device.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_device.h
> @@ -115,10 +115,6 @@ struct panfrost_device {
>   atomic_t pending;
>   } reset;
>  
> - struct mutex shrinker_lock;
> - struct list_head shrinker_list;
> - struct shrinker shrinker;
> -
>   struct panfrost_devfreq pfdevfreq;
>  };
>  
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 94b6f0a19c83..b014dadcf51f 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -160,7 +160,6 @@ panfrost_lookup_bos(struct drm_device *dev,
>   break;
>   }
>  
> - atomic_inc(&bo->gpu_usecount);
>   job->mappings[i] = mapping;
>   }
>  
> @@ -390,7 +389,6 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, 
> void *data,
>  {
>   struct panfrost_file_priv *priv = file_priv->driver_priv;
>   struct drm_panfrost_madvise *args = data;
> - struct panfrost_device *pfdev = dev->dev_private;
>   struct drm_gem_object *gem_obj;
>   struct panfrost_gem_object *bo;
>   int ret = 0;
> @@ -403,7 +401,6 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, 
> void *data,
>  
>   bo = to_panfrost_bo(gem_obj);
>  
> - mutex_lock(&pfdev->shrinker_lock);
>   mutex_lock(&bo->mappings.lock);
>   if (args->madv == PANFROST_MADV_DONTNEED) {
>   struct panfrost_gem_mapping *first;
> @@ -429,17 +426,8 @@ static int panfrost_ioctl_madvise(struct drm_device 
> *dev, void *data,
>  
>   args->retained = drm_gem_shmem_madvise(&bo->base, args->madv);
>  
> - if (args->retained) {
> - if (args->madv == PANFROST_MADV_DONTNEED)
> - list_add_tail(&bo->base.madv_list,
> -   &pfdev->shrinker_list);
> - else if (args->madv == PANFROST_MADV_WILLNEED)
> - list_del_init(&bo->base.madv_list);
> - }
> -
>  out_unlock_mappings:
>   mutex_unlock(&bo->mappings.lock);
> - mutex_unlock(&pfdev->shrinker_lock);
>  
>   drm_gem_object_put(gem_obj);
>   return ret;
> @@ -570,9 +558,6 @@ static int panfrost_probe(struct platform_device *pdev)
>   ddev->dev_private = pfdev;
>   pfdev->ddev = ddev;
>  
> - mutex_init(&pfdev->shrinker_lock);
> - INIT_LIST_HEAD(&pfdev->shrinker_list);
> -
>   err = panfrost_device_init(pfdev);
>   if (err) {
>   if (err != -EPROBE_DEFER)
> @@ -594,7 +579,7 @@ static int panfrost_probe(struct platform_device *pdev)
>   if (err < 0)
>   goto err_out1;
>  
> - panfrost_gem_shrinker_init(ddev);
> + drm_gem_shmem_shrinker_register(ddev);
>  
>   return 0;
>  
> @@ -612,8 +597,8 @@ static int panfrost_remove(struct platform_device *pdev)
>   struct panfrost_device *pfdev = platform_get_drvdata(pdev);
>   struct drm_device *ddev = pfdev->ddev;
>  

Re: [PATCH v2 8/8] drm/panfrost: Switch to generic memory shrinker

2022-03-18 Thread Steven Price
On 18/03/2022 14:41, Dmitry Osipenko wrote:
> 
> On 3/17/22 02:04, Dmitry Osipenko wrote:
>>
>> On 3/16/22 18:04, Steven Price wrote:
>>> On 14/03/2022 22:42, Dmitry Osipenko wrote:
>>>> Replace Panfrost's memory shrinker with a generic DRM memory shrinker.
>>>>
>>>> Signed-off-by: Dmitry Osipenko 
>>>> ---
>>> I gave this a spin on my Firefly-RK3288 board and everything seems to
>>> work. So feel free to add a:
>>>
>>> Tested-by: Steven Price 
>>>
>>> As Alyssa has already pointed out you need to remove the
>>> panfrost_gem_shrinker.c file. But otherwise everything looks fine, and
>>> I'm very happy to see the shrinker code gone ;)
>>
>> Awesome, thank you.
> 
> Steven, could you please tell me how exactly you tested the shrinker?
> 
> I realized that today's IGT doesn't have any tests for the Panfrost's
> madvise ioctl.
> 
> You may invoke "echo 2 > /proc/sys/vm/drop_caches" manually in order to
> trigger shrinker while 3d app is running actively (like a game or
> benchmark). Nothing crashing will be a good enough indicator that it
> works okay.
> 
> I may get an RK board next week and then will be able to test it by
> myself, so please don't hurry.

I have to admit it wasn't a very thorough test. I run glmark on the
board with the following hack:

diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
b/drivers/gpu/drm/panfrost/panfrost_drv.c
index b014dadcf51f..194dec00695a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -279,6 +279,14 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
void *data,
if (ret)
goto out_cleanup_job;
 
+   {
+   struct shrink_control sc = {
+   .nr_to_scan = 1000
+   };
+   dev->shmem_shrinker->base.scan_objects(&dev->shmem_shrinker->base,
+   &sc);
+   }
+
ret = panfrost_job_push(job);
if (ret)
goto out_cleanup_job;

That hack was specifically because I had some doubts about the removal
of the 'gpu_usecount' counter and wanted to ensure that purging as the
job is submitted wouldn't cause problems.

The drop_caches file should also work AFAIK.

Steve


Re: [PATCH] drm/panfrost: cleanup comments

2022-03-02 Thread Steven Price
On 01/03/2022 12:43, t...@redhat.com wrote:
> From: Tom Rix 
> 
> For spdx
> change tab to space delimiter
> Use // for *.c
> 
> Replacements
> commited to committed, use multiline comment style
> regsiters to registers
> initialze to initialize
> 
> Signed-off-by: Tom Rix 

Thanks, most of the changes look reasonable (although I've never
understood the reason for using // for SPDX comments), but there's one
below that I think needs rethinking.

> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c  | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_issues.h   | 6 --
>  drivers/gpu/drm/panfrost/panfrost_mmu.c  | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_regs.h | 2 +-
>  5 files changed, 8 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 96bb5a4656278..94b6f0a19c83a 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -562,7 +562,7 @@ static int panfrost_probe(struct platform_device *pdev)
>  
>   pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
>  
> - /* Allocate and initialze the DRM device. */
> + /* Allocate and initialize the DRM device. */
>   ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
>   if (IS_ERR(ddev))
>   return PTR_ERR(ddev);
> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c 
> b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
> index b0142341e2235..77e7cb6d1ae3b 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
> @@ -1,4 +1,4 @@
> -/* SPDX-License-Identifier: GPL-2.0 */
> +// SPDX-License-Identifier: GPL-2.0
>  /* Copyright (C) 2019 Arm Ltd.
>   *
>   * Based on msm_gem_freedreno.c:
> diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h 
> b/drivers/gpu/drm/panfrost/panfrost_issues.h
> index 8e59d765bf19f..4e7cf979ee67a 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_issues.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
> @@ -13,8 +13,10 @@
>   * to care about.
>   */
>  enum panfrost_hw_issue {
> - /* Need way to guarantee that all previously-translated memory accesses
> -  * are commited */
> + /*
> +  * Need way to guarantee that all previously-translated memory accesses
> +  * are committed
> +  */

This file has a whole load of multiline comments that don't technically
follow the coding style. Fixing just one comment makes the file
inconsistent. Note we recently had a discussion about this[1] and
decided to leave the comment style as is. And I have to admit in this
instance avoiding the extra mostly-blank lines makes the list easier to
read. The typo fix is obviously welcomed though!

[1] https://lore.kernel.org/r/c7331489-ad04-0f35-224e-164f144fb819%40arm.com

>   HW_ISSUE_6367,
>  
>   /* On job complete with non-done the cache is not flushed */
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 39562f2d11a47..d3f82b26a631d 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -1,4 +1,4 @@
> -// SPDX-License-Identifier:  GPL-2.0
> +// SPDX-License-Identifier: GPL-2.0
>  /* Copyright 2019 Linaro, Ltd, Rob Herring  */
>  
>  #include 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h 
> b/drivers/gpu/drm/panfrost/panfrost_regs.h
> index 6c5a11ef1ee87..efe4b75149d35 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_regs.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
> @@ -292,7 +292,7 @@
>  #define AS_FAULTADDRESS_LO(as)   (MMU_AS(as) + 0x20) /* (RO) 
> Fault Address for address space n, low word */
>  #define AS_FAULTADDRESS_HI(as)   (MMU_AS(as) + 0x24) /* (RO) 
> Fault Address for address space n, high word */
>  #define AS_STATUS(as)(MMU_AS(as) + 0x28) /* (RO) 
> Status flags for address space n */
> -/* Additional Bifrost AS regsiters */
> +/* Additional Bifrost AS registers */
>  #define AS_TRANSCFG_LO(as)   (MMU_AS(as) + 0x30) /* (RW) Translation 
> table configuration for address space n, low word */
>  #define AS_TRANSCFG_HI(as)   (MMU_AS(as) + 0x34) /* (RW) Translation 
> table configuration for address space n, high word */
>  #define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary 
> fault address for address space n, low word */



Re: [PATCH v2] drm/panfrost: cleanup comments

2022-03-02 Thread Steven Price
On 02/03/2022 12:45, t...@redhat.com wrote:
> From: Tom Rix 
> 
> For spdx
> change tab to space delimiter
> Use // for *.c
> 
> Replacements
> commited to committed
> regsiters to registers
> initialze to initialize
> 
> Signed-off-by: Tom Rix 

Reviewed-by: Steven Price 

> ---
> v2: remove multiline comment change

Thanks for the update. I'll push this to drm-misc-next.

Steve

> 
>  drivers/gpu/drm/panfrost/panfrost_drv.c  | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_issues.h   | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_mmu.c  | 2 +-
>  drivers/gpu/drm/panfrost/panfrost_regs.h | 2 +-
>  5 files changed, 5 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 96bb5a4656278..94b6f0a19c83a 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -562,7 +562,7 @@ static int panfrost_probe(struct platform_device *pdev)
>  
>   pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
>  
> - /* Allocate and initialze the DRM device. */
> + /* Allocate and initialize the DRM device. */
>   ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
>   if (IS_ERR(ddev))
>   return PTR_ERR(ddev);
> diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c 
> b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
> index b0142341e2235..77e7cb6d1ae3b 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
> @@ -1,4 +1,4 @@
> -/* SPDX-License-Identifier: GPL-2.0 */
> +// SPDX-License-Identifier: GPL-2.0
>  /* Copyright (C) 2019 Arm Ltd.
>   *
>   * Based on msm_gem_freedreno.c:
> diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h 
> b/drivers/gpu/drm/panfrost/panfrost_issues.h
> index 8e59d765bf19f..501a76c5e95ff 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_issues.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
> @@ -14,7 +14,7 @@
>   */
>  enum panfrost_hw_issue {
>   /* Need way to guarantee that all previously-translated memory accesses
> -  * are commited */
> +  * are committed */
>   HW_ISSUE_6367,
>  
>   /* On job complete with non-done the cache is not flushed */
> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> index 39562f2d11a47..d3f82b26a631d 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
> @@ -1,4 +1,4 @@
> -// SPDX-License-Identifier:  GPL-2.0
> +// SPDX-License-Identifier: GPL-2.0
>  /* Copyright 2019 Linaro, Ltd, Rob Herring  */
>  
>  #include 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h 
> b/drivers/gpu/drm/panfrost/panfrost_regs.h
> index 6c5a11ef1ee87..efe4b75149d35 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_regs.h
> +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
> @@ -292,7 +292,7 @@
>  #define AS_FAULTADDRESS_LO(as)   (MMU_AS(as) + 0x20) /* (RO) 
> Fault Address for address space n, low word */
>  #define AS_FAULTADDRESS_HI(as)   (MMU_AS(as) + 0x24) /* (RO) 
> Fault Address for address space n, high word */
>  #define AS_STATUS(as)(MMU_AS(as) + 0x28) /* (RO) 
> Status flags for address space n */
> -/* Additional Bifrost AS regsiters */
> +/* Additional Bifrost AS registers */
>  #define AS_TRANSCFG_LO(as)   (MMU_AS(as) + 0x30) /* (RW) Translation 
> table configuration for address space n, low word */
>  #define AS_TRANSCFG_HI(as)   (MMU_AS(as) + 0x34) /* (RW) Translation 
> table configuration for address space n, high word */
>  #define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary 
> fault address for address space n, low word */



Re: [PATCH] panfrost: Don't cleanup the job if it was successfully queued

2021-09-01 Thread Steven Price
On 31/08/2021 14:35, Boris Brezillon wrote:
> The labels are misleading. Even though they are all prefixed with 'fail_'
> the success case also takes that path, and we should definitely not
> cleanup the job if it's been queued. While at it, let's rename those
> labels so we don't do the same mistake again.
> 
> Fixes: 53516280cc38 ("drm/panfrost: use scheduler dependency tracking")
> Signed-off-by: Boris Brezillon 

Reviewed-by: Steven Price 

And also unlike last time...

Tested-by: Steven Price 

Thanks for the clean up - I should have actually tested the previous
patch, but from the diff (and the previous label names) it was obviously
correctâ„¢! But it of course blows up pretty quickly without this change.

Thanks,

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_drv.c | 19 ++-
>  1 file changed, 10 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c 
> b/drivers/gpu/drm/panfrost/panfrost_drv.c
> index 16212b6b202e..077cbbfa506b 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_drv.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
> @@ -253,7 +253,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, 
> void *data,
>   job = kzalloc(sizeof(*job), GFP_KERNEL);
>   if (!job) {
>   ret = -ENOMEM;
> - goto fail_out_sync;
> + goto out_put_syncout;
>   }
>  
>   kref_init(&job->refcount);
> @@ -270,29 +270,30 @@ static int panfrost_ioctl_submit(struct drm_device 
> *dev, void *data,
>&job->file_priv->sched_entity[slot],
>NULL);
>   if (ret)
> - goto fail_job_put;
> + goto out_put_job;
>  
>   ret = panfrost_copy_in_sync(dev, file, args, job);
>   if (ret)
> - goto fail_job;
> + goto out_cleanup_job;
>  
>   ret = panfrost_lookup_bos(dev, file, args, job);
>   if (ret)
> - goto fail_job;
> + goto out_cleanup_job;
>  
>   ret = panfrost_job_push(job);
>   if (ret)
> - goto fail_job;
> + goto out_cleanup_job;
>  
>   /* Update the return sync object for the job */
>   if (sync_out)
>   drm_syncobj_replace_fence(sync_out, job->render_done_fence);
>  
> -fail_job:
> - drm_sched_job_cleanup(&job->base);
> -fail_job_put:
> +out_cleanup_job:
> + if (ret)
> + drm_sched_job_cleanup(&job->base);
> +out_put_job:
>   panfrost_job_put(job);
> -fail_out_sync:
> +out_put_syncout:
>   if (sync_out)
>   drm_syncobj_put(sync_out);
>  
> 



Re: [PATCH] drm/panfrost: Make use of the helper function devm_platform_ioremap_resource()

2021-09-02 Thread Steven Price
On 31/08/2021 08:53, Cai Huoqing wrote:
> Use the devm_platform_ioremap_resource() helper instead of
> calling platform_get_resource() and devm_ioremap_resource()
> separately
> 
> Signed-off-by: Cai Huoqing 

Reviewed-by: Steven Price 

I'll push this to drm-misc-next.

Thanks,

Steve

> ---
>  drivers/gpu/drm/panfrost/panfrost_device.c | 4 +---
>  1 file changed, 1 insertion(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c 
> b/drivers/gpu/drm/panfrost/panfrost_device.c
> index bd9b7be63b0f..1c692428b0d4 100644
> --- a/drivers/gpu/drm/panfrost/panfrost_device.c
> +++ b/drivers/gpu/drm/panfrost/panfrost_device.c
> @@ -198,7 +198,6 @@ static int panfrost_pm_domain_init(struct panfrost_device 
> *pfdev)
>  int panfrost_device_init(struct panfrost_device *pfdev)
>  {
>   int err;
> - struct resource *res;
>  
>   mutex_init(&pfdev->sched_lock);
>   INIT_LIST_HEAD(&pfdev->scheduled_jobs);
> @@ -236,8 +235,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
>   if (err)
>   goto out_reset;
>  
> - res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
> - pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
> + pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
>   if (IS_ERR(pfdev->iomem)) {
>   err = PTR_ERR(pfdev->iomem);
>   goto out_pm_domain;
> 



[PATCH] drm/panfrost: Calculate lock region size correctly

2021-09-02 Thread Steven Price
It turns out that when locking a region, the region must be a naturally
aligned power of 2. The upshot of this is that if the desired region
crosses a 'large boundary' the region size must be increased
significantly to ensure that the locked region completely covers the
desired region. Previous calculations (including in kbase for the
proprietary driver) failed to take this into account.

Since it's known that the lock region must be naturally aligned we can
compute the required size by looking at the highest bit position which
changes between the start/end of the lock region (subtracting 1 from the
end because the end address is exclusive). The start address is then
aligned based on the size (this is technically unnecessary as the
hardware will ignore these bits, but the spec advises to do this "to
avoid confusion").

Signed-off-by: Steven Price 
---
See previous discussion[1] for more details. This bug also existed in
the 'kbase' driver, so it's unlikely to actually hit very often.

This patch is based on drm-misc-next-fixes as it builds on top of
Alyssa's changes to lock_region.

[1] 
https://lore.kernel.org/dri-devel/6fe675c4-d22b-22da-ba3c-f6d33419b...@arm.com/

 drivers/gpu/drm/panfrost/panfrost_mmu.c | 33 +++--
 1 file changed, 26 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index dfe5f1d29763..afec15bb3db5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -58,17 +58,36 @@ static int write_cmd(struct panfrost_device *pfdev, u32 
as_nr, u32 cmd)
 }
 
 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
-   u64 iova, u64 size)
+   u64 region_start, u64 size)
 {
u8 region_width;
-   u64 region = iova & PAGE_MASK;
+   u64 region;
+   u64 region_size;
+   u64 region_end = region_start + size;
 
-   /* The size is encoded as ceil(log2) minus(1), which may be calculated
-* with fls. The size must be clamped to hardware bounds.
+   if (!size)
+   return;
+
+   /*
+* The locked region is a naturally aligned power of 2 block encoded as
+* log2 minus(1).
+* Calculate the desired start/end and look for the highest bit which
+* differs. The smallest naturally aligned block must include this bit
+* change the desired region starts with this bit (and subsequent bits)
+* zeroed and ends with the bit (and subsequent bits) set to one.
+*
 */
-   size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
-   region_width = fls64(size - 1) - 1;
-   region |= region_width;
+   region_size = region_start ^ (region_end - 1);
+   region_width = max(fls64(region_size),
+  const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
+
+   /*
+* Mask off the low bits of region_start (which would be ignored by
+* the hardware anyway)
+*/
+   region_start &= GENMASK_ULL(63, region_width);
+
+   region = region_width | region_start;
 
/* Lock the region that needs to be updated */
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xUL);
-- 
2.30.2



Re: [PATCH] drm/panfrost: Calculate lock region size correctly

2021-09-03 Thread Steven Price
On 03/09/2021 09:51, Boris Brezillon wrote:
> On Thu,  2 Sep 2021 15:00:38 +0100
> Steven Price  wrote:
> 
>> It turns out that when locking a region, the region must be a naturally
>> aligned power of 2. The upshot of this is that if the desired region
>> crosses a 'large boundary' the region size must be increased
>> significantly to ensure that the locked region completely covers the
>> desired region. Previous calculations (including in kbase for the
>> proprietary driver) failed to take this into account.
>>
>> Since it's known that the lock region must be naturally aligned we can
>> compute the required size by looking at the highest bit position which
>> changes between the start/end of the lock region (subtracting 1 from the
>> end because the end address is exclusive). The start address is then
>> aligned based on the size (this is technically unnecessary as the
>> hardware will ignore these bits, but the spec advises to do this "to
>> avoid confusion").
>>
>> Signed-off-by: Steven Price 
>> ---
>> See previous discussion[1] for more details. This bug also existed in
>> the 'kbase' driver, so it's unlikely to actually hit very often.
>>
>> This patch is based on drm-misc-next-fixes as it builds on top of
>> Alyssa's changes to lock_region.
>>
>> [1] 
>> https://lore.kernel.org/dri-devel/6fe675c4-d22b-22da-ba3c-f6d33419b...@arm.com/
>>
>>  drivers/gpu/drm/panfrost/panfrost_mmu.c | 33 +++--
>>  1 file changed, 26 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
>> b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> index dfe5f1d29763..afec15bb3db5 100644
>> --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
>> @@ -58,17 +58,36 @@ static int write_cmd(struct panfrost_device *pfdev, u32 
>> as_nr, u32 cmd)
>>  }
>>  
>>  static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
>> -u64 iova, u64 size)
>> +u64 region_start, u64 size)
>>  {
>>  u8 region_width;
>> -u64 region = iova & PAGE_MASK;
>> +u64 region;
>> +u64 region_size;
>> +u64 region_end = region_start + size;
>>  
>> -/* The size is encoded as ceil(log2) minus(1), which may be calculated
>> - * with fls. The size must be clamped to hardware bounds.
>> +if (!size)
>> +return;
>> +
>> +/*
>> + * The locked region is a naturally aligned power of 2 block encoded as
>> + * log2 minus(1).
>> + * Calculate the desired start/end and look for the highest bit which
>> + * differs. The smallest naturally aligned block must include this bit
>> + * change the desired region starts with this bit (and subsequent bits)
>> + * zeroed and ends with the bit (and subsequent bits) set to one.
>> + *
> 
> Nit: you can drop the empty comment line.

Whoops - I reordered this comment and didn't spot the blank line getting
left.

>>   */
>> -size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
>> -region_width = fls64(size - 1) - 1;
>> -region |= region_width;
>> +region_size = region_start ^ (region_end - 1);
> 
> Hm, is region_size really encoding the size of the region to lock? I
> mean, the logic seems correct but I wonder if it wouldn't be better to
> drop the region_size variable and inline
> 'region_start ^ (region_end - 1)' in the region_width calculation to
> avoid confusion.

Yeah I wasn't happy about the variable name either, but I couldn't think
of a better one. Inlining it into the following line nicely avoids the
problem ;)

> Looks good otherwise.
> 
> Reviewed-by: Boris Brezillon 

Thanks, I'll post a v2 in case anyone else has other comments.

Steve

>> +region_width = max(fls64(region_size),
>> +   const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
>> +
>> +/*
>> + * Mask off the low bits of region_start (which would be ignored by
>> + * the hardware anyway)
>> + */
>> +region_start &= GENMASK_ULL(63, region_width);
>> +
>> +region = region_width | region_start;
>>  
>>  /* Lock the region that needs to be updated */
>>  mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xUL);
> 



[PATCH v2] drm/panfrost: Calculate lock region size correctly

2021-09-03 Thread Steven Price
It turns out that when locking a region, the region must be a naturally
aligned power of 2. The upshot of this is that if the desired region
crosses a 'large boundary' the region size must be increased
significantly to ensure that the locked region completely covers the
desired region. Previous calculations (including in kbase for the
proprietary driver) failed to take this into account.

Since it's known that the lock region must be naturally aligned we can
compute the required size by looking at the highest bit position which
changes between the start/end of the lock region (subtracting 1 from the
end because the end address is exclusive). The start address is then
aligned based on the size (this is technically unnecessary as the
hardware will ignore these bits, but the spec advises to do this "to
avoid confusion").

Reviewed-by: Boris Brezillon 
Signed-off-by: Steven Price 
---
 drivers/gpu/drm/panfrost/panfrost_mmu.c | 30 +++--
 1 file changed, 23 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c 
b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index dfe5f1d29763..e2629b8d6a02 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -58,17 +58,33 @@ static int write_cmd(struct panfrost_device *pfdev, u32 
as_nr, u32 cmd)
 }
 
 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
-   u64 iova, u64 size)
+   u64 region_start, u64 size)
 {
u8 region_width;
-   u64 region = iova & PAGE_MASK;
+   u64 region;
+   u64 region_end = region_start + size;
 
-   /* The size is encoded as ceil(log2) minus(1), which may be calculated
-* with fls. The size must be clamped to hardware bounds.
+   if (!size)
+   return;
+
+   /*
+* The locked region is a naturally aligned power of 2 block encoded as
+* log2 minus(1).
+* Calculate the desired start/end and look for the highest bit which
+* differs. The smallest naturally aligned block must include this bit
+* change, the desired region starts with this bit (and subsequent bits)
+* zeroed and ends with the bit (and subsequent bits) set to one.
 */
-   size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
-   region_width = fls64(size - 1) - 1;
-   region |= region_width;
+   region_width = max(fls64(region_start ^ (region_end - 1)),
+  const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
+
+   /*
+* Mask off the low bits of region_start (which would be ignored by
+* the hardware anyway)
+*/
+   region_start &= GENMASK_ULL(63, region_width);
+
+   region = region_width | region_start;
 
/* Lock the region that needs to be updated */
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xUL);
-- 
2.30.2



  1   2   3   4   5   6   7   8   9   10   >