[PATCH -next] backlight: cr_bllcd: Remove unused variable 'intensity'

2020-07-22 Thread Wei Yongjun
Gcc report unused-variable warning as follows:

drivers/video/backlight/cr_bllcd.c:62:6: warning:
 unused variable 'intensity' [-Wunused-variable]
   62 |  int intensity = bd->props.brightness;
  |  ^

After commit 24d34617c24f ("backlight: cr_bllcd: Introduce
gpio-backlight semantics"), this variable is never used, this
commit removing it.

Fixes: 24d34617c24f ("backlight: cr_bllcd: Introduce gpio-backlight semantics")
Reported-by: Hulk Robot 
Signed-off-by: Wei Yongjun 
---
 drivers/video/backlight/cr_bllcd.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/drivers/video/backlight/cr_bllcd.c 
b/drivers/video/backlight/cr_bllcd.c
index a24d42e1ea3c..4ad0a72531fe 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -59,7 +59,6 @@ struct cr_panel {
 
 static int cr_backlight_set_intensity(struct backlight_device *bd)
 {
-   int intensity = bd->props.brightness;
u32 addr = gpio_bar + CRVML_PANEL_PORT;
u32 cur = inl(addr);
 

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v1] drm/i915/dsi: Drop double check for ACPI companion device

2020-07-22 Thread Andy Shevchenko
On Fri, May 29, 2020 at 03:33:17PM +0300, Andy Shevchenko wrote:
> acpi_dev_get_resources() does perform the NULL pointer check against
> ACPI companion device which is given as function parameter. Thus,
> there is no need to duplicate this check in the caller.

Any comment so far?

> Signed-off-by: Andy Shevchenko 
> ---
>  drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 24 
>  1 file changed, 10 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c 
> b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
> index 574dcfec9577..6f9e08cda964 100644
> --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
> +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
> @@ -426,23 +426,19 @@ static void i2c_acpi_find_adapter(struct intel_dsi 
> *intel_dsi,
>  {
>   struct drm_device *drm_dev = intel_dsi->base.base.dev;
>   struct device *dev = &drm_dev->pdev->dev;
> - struct acpi_device *acpi_dev;
> + struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
>   struct list_head resource_list;
>   struct i2c_adapter_lookup lookup;
>  
> - acpi_dev = ACPI_COMPANION(dev);
> - if (acpi_dev) {
> - memset(&lookup, 0, sizeof(lookup));
> - lookup.slave_addr = slave_addr;
> - lookup.intel_dsi = intel_dsi;
> - lookup.dev_handle = acpi_device_handle(acpi_dev);
> -
> - INIT_LIST_HEAD(&resource_list);
> - acpi_dev_get_resources(acpi_dev, &resource_list,
> -i2c_adapter_lookup,
> -&lookup);
> - acpi_dev_free_resource_list(&resource_list);
> - }
> + memset(&lookup, 0, sizeof(lookup));
> + lookup.slave_addr = slave_addr;
> + lookup.intel_dsi = intel_dsi;
> + lookup.dev_handle = acpi_device_handle(acpi_dev);
> +
> + INIT_LIST_HEAD(&resource_list);
> + acpi_dev_get_resources(acpi_dev, &resource_list,
> +i2c_adapter_lookup, &lookup);
> + acpi_dev_free_resource_list(&resource_list);
>  }
>  #else
>  static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
> -- 
> 2.26.2
> 

-- 
With Best Regards,
Andy Shevchenko


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH -next] gpu: drm: Fix spinlock vblank_time_lock use error.

2020-07-22 Thread Xu Qiang
The drm_handle_vblank function is in the interrupt context.
Therefore, the spin lock vblank_time_lock is obtained
from the interrupt context.

Cc: 
Signed-off-by: Xu Qiang 
---
 drivers/gpu/drm/drm_vblank.c | 17 ++---
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index f402c75b9d34..4ca63ff33a43 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -229,10 +229,11 @@ static void drm_reset_vblank_timestamp(struct drm_device 
*dev, unsigned int pipe
 {
u32 cur_vblank;
bool rc;
+   unsigned long irqflags;
ktime_t t_vblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
 
-   spin_lock(&dev->vblank_time_lock);
+   spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
/*
 * sample the current counter to avoid random jumps
@@ -257,7 +258,7 @@ static void drm_reset_vblank_timestamp(struct drm_device 
*dev, unsigned int pipe
 */
store_vblank(dev, pipe, 1, t_vblank, cur_vblank);
 
-   spin_unlock(&dev->vblank_time_lock);
+   spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 }
 
 /*
@@ -1106,11 +1107,12 @@ static int __enable_vblank(struct drm_device *dev, 
unsigned int pipe)
 static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
 {
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+   unsigned long irqflags;
int ret = 0;
 
assert_spin_locked(&dev->vbl_lock);
 
-   spin_lock(&dev->vblank_time_lock);
+   spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
if (!vblank->enabled) {
/*
@@ -1136,7 +1138,7 @@ static int drm_vblank_enable(struct drm_device *dev, 
unsigned int pipe)
}
}
 
-   spin_unlock(&dev->vblank_time_lock);
+   spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 
return ret;
 }
@@ -1917,6 +1919,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned 
int pipe)
 {
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
+   unsigned long irqflags_vblank;
bool disable_irq;
 
if (drm_WARN_ON_ONCE(dev, !drm_dev_has_vblank(dev)))
@@ -1931,18 +1934,18 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned 
int pipe)
 * vblank enable/disable, as this would cause inconsistent
 * or corrupted timestamps and vblank counts.
 */
-   spin_lock(&dev->vblank_time_lock);
+   spin_lock_irqsave(&dev->vblank_time_lock, irqflags_vblank);
 
/* Vblank irq handling disabled. Nothing to do. */
if (!vblank->enabled) {
-   spin_unlock(&dev->vblank_time_lock);
+   spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags_vblank);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
return false;
}
 
drm_update_vblank_count(dev, pipe, true);
 
-   spin_unlock(&dev->vblank_time_lock);
+   spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags_vblank);
 
wake_up(&vblank->queue);
 
-- 
2.25.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: nouveau regression with 5.7 caused by "PCI/PM: Assume ports without DLL Link Active train links in 100 ms"

2020-07-22 Thread Mika Westerberg
On Tue, Jul 21, 2020 at 11:01:55AM -0400, Lyude Paul wrote:
> Sure thing. Also, feel free to let me know if you'd like access to one of the
> systems we saw breaking with this patch - I'm fairly sure I've got one of them
> locally at my apartment and don't mind setting up AMT/KVM/SSH

Probably no need for remote access (thanks for the offer, though). I
attached a test patch to the bug report:

  https://bugzilla.kernel.org/show_bug.cgi?id=208597

that tries to work it around (based on the ->pm_cap == 0). I wonder if
anyone would have time to try it out.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: io-mapping: Indicate mapping failure

2020-07-22 Thread Andy Shevchenko
On Tue, Jul 21, 2020 at 11:34:25AM -0400, Michael J. Ruhl wrote:
> I found this when my system crashed long after the mapping failure.
> The expected behavior should have been driver exit.
> 
> Since this is almost exclusively used for drm, I am posting to
> the dri mailing list.  Should this go to another list as well?

Just drop this cover letter. For single patch is not needed. Use place
immediately after cutter '---' line for comments like above.

-- 
With Best Regards,
Andy Shevchenko


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH] drm/virtio: fix memory leak in virtio_gpu_cleanup_object()

2020-07-22 Thread Xin He
Before setting shmem->pages to NULL, kfree() should
be called.

Signed-off-by: Xin He 
Reviewed-by: Qi Liu 
---
 drivers/gpu/drm/virtio/virtgpu_object.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c 
b/drivers/gpu/drm/virtio/virtgpu_object.c
index 6ccbd01cd888..703b5cd51751 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -79,6 +79,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
}
 
sg_free_table(shmem->pages);
+   kfree(shmem->pages);
shmem->pages = NULL;
drm_gem_shmem_unpin(&bo->base.base);
}
-- 
2.21.1 (Apple Git-122.3)

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v5 0/6] Add support for GPU DDR BW scaling

2020-07-22 Thread Viresh Kumar
On 21-07-20, 07:28, Rob Clark wrote:
> With your ack, I can add the patch the dev_pm_opp_set_bw patch to my
> tree and merge it via msm-next -> drm-next -> linus

I wanted to send it via my tree, but its okay. Pick this patch from
linux-next and add my Ack, I will drop it after that.

a8351c12c6c7 OPP: Add and export helper to set bandwidth

> Otherwise I can send a second later pull req that adds the final patch
> after has rebased to 5.9-rc1 (by which point the opp next tree will
> have presumably been merged

The PM stuff gets pushed fairly early and so I was asking you to
rebase just on my tree, so you could have sent the pull request right
after the PM tree landed there instead of waiting for rc1.

But its fine now.

-- 
viresh
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/msm/dp: Add DP compliance tests on Snapdragon Chipsets

2020-07-22 Thread khsieh

On 2020-07-20 19:57, Rob Clark wrote:
On Mon, Jul 20, 2020 at 4:32 PM Stephen Boyd  
wrote:


Quoting khs...@codeaurora.org (2020-07-20 15:48:13)
> On 2020-07-20 13:18, Stephen Boyd wrote:
> > Quoting Kuogee Hsieh (2020-07-07 11:41:25)
> >>  drivers/gpu/drm/msm/dp/dp_power.c   |  32 +-
> >>  drivers/gpu/drm/msm/dp/dp_power.h   |   1 +
> >>  drivers/gpu/drm/msm/dp/dp_reg.h |   1 +
> >>  17 files changed, 861 insertions(+), 424 deletions(-)
> >
> > It seems to spread various changes throughout the DP bits and only has
> > a
> > short description about what's changing. Given that the series above
> > isn't merged it would be better to get rid of this change and make the
> > changes in the patches that introduce these files.
> >
>
> Yes, the base DP driver is not yet merged as its still in reviews and
> has been for a while.
> While it is being reviewed, different developers are working on
> different aspects of DP such as base DP driver, DP compliance, audio etc
> to keep things going in parallel.
> To maintain the authorship of the different developers, we prefer having
> them as separate changes and not merge them.
> We can make all these changes as part of the same series if that shall
> help to keep things together but would prefer the changes themselves to
> be separate.
> Please consider this and let us know if that works.
>

I'm not the maintainer here so it's not really up to me, but this is 
why
we have the Co-developed-by tag, to show that multiple people worked 
on

some patch. The patch is supposed to logically stand on its own
regardless of how many people worked on it. Authorship is a single
person but the Co-developed-by tag helps express that more than one
person is the actual author of the patch. Can you use that tag instead
and then squash this into the other DP patches?


The dpu mega-patches are hard enough to review already.. I'd really
appreciated it if the dpu dev's sort out some way to squash later
fixups into earlier patches

BR,
-R
as per discussion on IRC, I have separated the parts of this change 
which are
unrelated to compliance and we have merged it to the base DP driver and 
added
the Co-developed-by tag there. Since this change adds supports for DP 
compliance
on MSM chipsets which is a new feature and not fixes to the base driver, 
we will
prefer to have this as a separate change as it will make it easier for 
you to

review it instead of continuing to expand the base DP driver
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH] drm/panel: remove meaningless if(ret) check code.

2020-07-22 Thread Bernard Zhao
The function drm_panel_add always returns true.
So if(ret) check code will never run into error branch.
Remove these check make the code a bit readable.

Signed-off-by: Bernard Zhao 
---
 drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c 
b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index fddbfddf6566..7f2eb54e4254 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -478,9 +478,7 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device 
*dsi)
if (ret)
return ret;
 
-   ret = drm_panel_add(&ctx->panel);
-   if (ret < 0)
-   return ret;
+   drm_panel_add(&ctx->panel);
 
dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
dsi->format = MIPI_DSI_FMT_RGB888;
-- 
2.17.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH -next] drm/nouveau/kms/nvd9-: Fix file release memory leak

2020-07-22 Thread Wei Yongjun
When using single_open() for opening, single_release() should be
used instead of seq_release(), otherwise there is a memory leak.

Fixes: 12885ecbfe62 ("drm/nouveau/kms/nvd9-: Add CRC support")
Reported-by: Hulk Robot 
Signed-off-by: Wei Yongjun 
---
 drivers/gpu/drm/nouveau/dispnv50/crc.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/nouveau/dispnv50/crc.c 
b/drivers/gpu/drm/nouveau/dispnv50/crc.c
index f17fb6d56757..4971a1042415 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crc.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crc.c
@@ -706,6 +706,7 @@ static const struct file_operations 
nv50_crc_flip_threshold_fops = {
.open = nv50_crc_debugfs_flip_threshold_open,
.read = seq_read,
.write = nv50_crc_debugfs_flip_threshold_set,
+   .release = single_release,
 };
 
 int nv50_head_crc_late_register(struct nv50_head *head)



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH] drm/panel: remove meaningless if(ret) check code.

2020-07-22 Thread Bernard Zhao
The function drm_panel_add always returns true.
So if(ret) check code will never run into error branch.
Remove these check will make the code a bit readable.

Signed-off-by: Bernard Zhao 
---
 drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c 
b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 95b789ab9d29..2c168a405928 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -225,9 +225,7 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
 
-   ret = drm_panel_add(&ctx->panel);
-   if (ret < 0)
-   return ret;
+   drm_panel_add(&ctx->panel);
 
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST;
dsi->format = MIPI_DSI_FMT_RGB888;
-- 
2.17.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] io-mapping: Indicate mapping failure

2020-07-22 Thread Mike Rapoport
On Tue, Jul 21, 2020 at 03:00:41PM +, Ruhl, Michael J wrote:
> >-Original Message-
> >From: Andy Shevchenko 
> >Sent: Tuesday, July 21, 2020 10:47 AM
> >To: Ruhl, Michael J 
> >Cc: dri-devel@lists.freedesktop.org; Andrew Morton  >foundation.org>; Mike Rapoport ; Chris Wilson
> >; sta...@vger.kernel.org
> >Subject: Re: [PATCH] io-mapping: Indicate mapping failure
> >
> >On Tue, Jul 21, 2020 at 10:16:41AM -0400, Michael J. Ruhl wrote:
> >> Sometimes it is good to know when your mapping failed.

I was going to say it's always a good idea ;-)

> >Can you elaborate...
> 
> Sure, guess I was too glib. 😊
> 
> Currently  the io_mapping_init_wc (the !ATOMIC_IOMAP version), function will
> always return success.
> 
> If the setting of the iomem (from ioremap_wc) fails, the only way for the 
> caller to know is to check the value of iomap->iomem.
> 
> Since all of the callers expect a NULL return on error, and check for a NULL,
> I felt this needed a fixes (i.e. unexpected behavior).
> 
> >> Fixes: cafaf14a5d8f ("io-mapping: Always create a struct to hold metadata
> >about the io-mapping"
> >
> >...especially taking into account that Fixes implies regression / bug?
> 
> The failure (in my case a crash) is not revealed until the address is accessed
> long after the init.
> 
> I will update the commit.
> 
> Mike
> 
> >--
> >With Best Regards,
> >Andy Shevchenko
> >
> 

-- 
Sincerely yours,
Mike.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] io-mapping: Indicate mapping failure

2020-07-22 Thread Andy Shevchenko
On Tue, Jul 21, 2020 at 10:16:41AM -0400, Michael J. Ruhl wrote:
> Sometimes it is good to know when your mapping failed.

Can you elaborate...

> Fixes: cafaf14a5d8f ("io-mapping: Always create a struct to hold metadata 
> about the io-mapping"

...especially taking into account that Fixes implies regression / bug?

-- 
With Best Regards,
Andy Shevchenko


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v1] io-mapping: Indicate mapping failure

2020-07-22 Thread Andy Shevchenko
On Tue, Jul 21, 2020 at 11:34:26AM -0400, Michael J. Ruhl wrote:

Thanks for an update, my comments below.

> The !ATOMIC_IOMAP version of io_maping_init_wc will always return
> success, even when the ioremap fails.
> 
> Since the ATOMIC_IOMAP version returns NULL when the init fails, and
> callers check for a NULL return on error this is unexpected.
> 
> Return NULL on ioremap failure.
> 
> Fixes: cafaf14a5d8f ("io-mapping: Always create a struct to hold metadata 
> about the io-mapping"

Missed parenthesis.

Still not visible why Fixes tag.
Provide also couple of lines of crash and add a paragraph about it.

-- 
With Best Regards,
Andy Shevchenko


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: nouveau regression with 5.7 caused by "PCI/PM: Assume ports without DLL Link Active train links in 100 ms"

2020-07-22 Thread Patrick Volkerding
On 7/21/20 10:27 AM, Mika Westerberg wrote:
> On Tue, Jul 21, 2020 at 11:01:55AM -0400, Lyude Paul wrote:
>> Sure thing. Also, feel free to let me know if you'd like access to one of the
>> systems we saw breaking with this patch - I'm fairly sure I've got one of 
>> them
>> locally at my apartment and don't mind setting up AMT/KVM/SSH
> Probably no need for remote access (thanks for the offer, though). I
> attached a test patch to the bug report:
>
>   https://bugzilla.kernel.org/show_bug.cgi?id=208597
>
> that tries to work it around (based on the ->pm_cap == 0). I wonder if
> anyone would have time to try it out.


Hi Mika,

I can confirm that this patch applied to 5.4.52 fixes the issue with
hybrid graphics on the Thinkpad X1 Extreme gen2.

Thanks,

Pat

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH -next] drm/amdgpu/vcn3.0: Remove set but not used variable 'direct_poll'

2020-07-22 Thread YueHaibing
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c: In function vcn_v3_0_start_sriov:
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c:1235:3:
 warning: variable direct_poll set but not used [-Wunused-but-set-variable]

It is never used, so can remove it.

Signed-off-by: YueHaibing 
---
 drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 4 
 1 file changed, 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 910a4a32ff78..53f680134c40 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -1231,8 +1231,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device 
*adev)
direct_wt = { {0} };
struct mmsch_v3_0_cmd_direct_read_modify_write
direct_rd_mod_wt = { {0} };
-   struct mmsch_v3_0_cmd_direct_polling
-   direct_poll = { {0} };
struct mmsch_v3_0_cmd_end end = { {0} };
struct mmsch_v3_0_init_header header;
 
@@ -1240,8 +1238,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device 
*adev)
MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
-   direct_poll.cmd_header.command_type =
-   MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type =
MMSCH_COMMAND__END;
 
-- 
2.17.1


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PULL] drm-misc-fixes

2020-07-22 Thread Thomas Zimmermann
Hi Dave and Daniel,

here's this week's PR for drm-misc-fixes. There are only 2 fixes. The sun4i
patch updates a bugfix that was merged via drm-misc-fixes a few weeks ago.

Best regards
Thomas

drm-misc-fixes-2020-07-22:
 * sun4i: Fix inverted HPD result; fixes an earlier fix
 * lima: fix timeout during reset

The following changes since commit 6348dd291e3653534a9e28e6917569bc9967b35b:

  dmabuf: use spinlock to access dmabuf->name (2020-07-10 15:39:29 +0530)

are available in the Git repository at:

  git://anongit.freedesktop.org/drm/drm-misc tags/drm-misc-fixes-2020-07-22

for you to fetch changes up to f3f90c6db188d437add55aaffadd5ad5bcb8cda6:

  drm/lima: fix wait pp reset timeout (2020-07-20 08:46:06 +0800)


 * sun4i: Fix inverted HPD result; fixes an earlier fix
 * lima: fix timeout during reset


Chen-Yu Tsai (1):
  drm: sun4i: hdmi: Fix inverted HPD result

Qiang Yu (1):
  drm/lima: fix wait pp reset timeout

 drivers/gpu/drm/lima/lima_pp.c | 2 ++
 drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


RE: [PATCH 02/11] drm/ttm: cleanup io_mem interface with nouveau

2020-07-22 Thread Chauhan, Madhav
[AMD Public Use]

-Original Message-
From: Christian König  
Sent: Tuesday, July 21, 2020 1:03 PM
To: dri-devel@lists.freedesktop.org
Cc: Chauhan, Madhav ; tzimmerm...@suse.de; 
michael.j.r...@intel.com
Subject: [PATCH 02/11] drm/ttm: cleanup io_mem interface with nouveau

Nouveau is the only user of this functionality and evicting io space on -EAGAIN 
is really a misuse of the return code.

Instead switch to using -ENOSPC here which makes much more sense and simplifies 
the code.

Signed-off-by: Christian König 

Complete remaining cleanup patches (Patch 2- 11) looks fine,
Patch 2-11: Reviewed-by: Madhav Chauhan 

Regards,
Madhav
---
 drivers/gpu/drm/nouveau/nouveau_bo.c | 2 --
 drivers/gpu/drm/ttm/ttm_bo_util.c| 4 ++--
 2 files changed, 2 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c 
b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 61355cfb7335..a48652826f67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1505,8 +1505,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 
struct ttm_mem_reg *reg)
if (ret != 1) {
if (WARN_ON(ret == 0))
return -EINVAL;
-   if (ret == -ENOSPC)
-   return -EAGAIN;
return ret;
}
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 5e0f3a9caedc..7d2c50fef456 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -116,7 +116,7 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager 
*man)
struct ttm_buffer_object *bo;
 
if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
-   return -EAGAIN;
+   return -ENOSPC;
 
bo = list_first_entry(&man->io_reserve_lru,
  struct ttm_buffer_object,
@@ -143,7 +143,7 @@ int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
mem->bus.io_reserved_count++ == 0) {
 retry:
ret = bdev->driver->io_mem_reserve(bdev, mem);
-   if (ret == -EAGAIN) {
+   if (ret == -ENOSPC) {
ret = ttm_mem_io_evict(man);
if (ret == 0)
goto retry;
--
2.17.1
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 8:45 AM Thomas Hellström (Intel)
 wrote:
>
>
> On 2020-07-22 00:45, Dave Airlie wrote:
> > On Tue, 21 Jul 2020 at 18:47, Thomas Hellström (Intel)
> >  wrote:
> >>
> >> On 7/21/20 9:45 AM, Christian König wrote:
> >>> Am 21.07.20 um 09:41 schrieb Daniel Vetter:
>  On Mon, Jul 20, 2020 at 01:15:17PM +0200, Thomas Hellström (Intel)
>  wrote:
> > Hi,
> >
> > On 7/9/20 2:33 PM, Daniel Vetter wrote:
> >> Comes up every few years, gets somewhat tedious to discuss, let's
> >> write this down once and for all.
> >>
> >> What I'm not sure about is whether the text should be more explicit in
> >> flat out mandating the amdkfd eviction fences for long running compute
> >> workloads or workloads where userspace fencing is allowed.
> > Although (in my humble opinion) it might be possible to completely
> > untangle
> > kernel-introduced fences for resource management and dma-fences used
> > for
> > completion- and dependency tracking and lift a lot of restrictions
> > for the
> > dma-fences, including prohibiting infinite ones, I think this makes
> > sense
> > describing the current state.
>  Yeah I think a future patch needs to type up how we want to make that
>  happen (for some cross driver consistency) and what needs to be
>  considered. Some of the necessary parts are already there (with like the
>  preemption fences amdkfd has as an example), but I think some clear docs
>  on what's required from both hw, drivers and userspace would be really
>  good.
> >>> I'm currently writing that up, but probably still need a few days for
> >>> this.
> >> Great! I put down some (very) initial thoughts a couple of weeks ago
> >> building on eviction fences for various hardware complexity levels here:
> >>
> >> https://gitlab.freedesktop.org/thomash/docs/-/blob/master/Untangling%20dma-fence%20and%20memory%20allocation.odt
> > We are seeing HW that has recoverable GPU page faults but only for
> > compute tasks, and scheduler without semaphores hw for graphics.
> >
> > So a single driver may have to expose both models to userspace and
> > also introduces the problem of how to interoperate between the two
> > models on one card.
> >
> > Dave.
>
> Hmm, yes to begin with it's important to note that this is not a
> replacement for new programming models or APIs, This is something that
> takes place internally in drivers to mitigate many of the restrictions
> that are currently imposed on dma-fence and documented in this and
> previous series. It's basically the driver-private narrow completions
> Jason suggested in the lockdep patches discussions implemented the same
> way as eviction-fences.
>
> The memory fence API would be local to helpers and middle-layers like
> TTM, and the corresponding drivers.  The only cross-driver-like
> visibility would be that the dma-buf move_notify() callback would not be
> allowed to wait on dma-fences or something that depends on a dma-fence.

Because we can't preempt (on some engines at least) we already have
the requirement that cross driver buffer management can get stuck on a
dma-fence. Not even taking into account the horrors we do with
userptr, which are cross driver no matter what. Limiting move_notify
to memory fences only doesn't work, since the pte clearing might need
to wait for a dma_fence first. Hence this becomes a full end-of-batch
fence, not just a limited kernel-internal memory fence.

That's kinda why I think only reasonable option is to toss in the
towel and declare dma-fence to be the memory fence (and suck up all
the consequences of that decision as uapi, which is kinda where we
are), and construct something new&entirely free-wheeling for userspace
fencing. But only for engines that allow enough preempt/gpu page
faulting to make that possible. Free wheeling userspace fences/gpu
semaphores or whatever you want to call them (on windows I think it's
monitored fence) only work if you can preempt to decouple the memory
fences from your gpu command execution.

There's the in-between step of just decoupling the batchbuffer
submission prep for hw without any preempt (but a scheduler), but that
seems kinda pointless. Modern execbuf should be O(1) fastpath, with
all the allocation/mapping work pulled out ahead. vk exposes that
model directly to clients, GL drivers could use it internally too, so
I see zero value in spending lots of time engineering very tricky
kernel code just for old userspace. Much more reasonable to do that in
userspace, where we have real debuggers and no panics about security
bugs (or well, a lot less, webgl is still a thing, but at least
browsers realized you need to container that completely).

Cheers, Daniel

> So with that in mind, I don't foresee engines with different
> capabilities on the same card being a problem.
>
> /Thomas
>
>


-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
__

Re: pages pinned for BO lifetime and security

2020-07-22 Thread Christian König

Am 22.07.20 um 02:22 schrieb Gurchetan Singh:

+Christian who added DMABUF_MOVE_NOTIFY which added the relevant blurb:

https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/dma-buf/Kconfig#n46 



Currently, the user seems to amdgpu for P2P dma-buf and it seems to 
plumb ttm (*move_notify) callback to dma-buf.  We're not sure if it's 
a security issue occurring across DRM drivers, or one more specific to 
the new amdgpu use case.


On Tue, Jul 21, 2020 at 1:03 PM Chia-I Wu > wrote:


Hi list,

virtio-gpu is moving in the direction where BO pages are pinned for
the lifetime for simplicity.  I am wondering if that is considered a
security issue in general, especially after running into the
description of the new DMABUF_MOVE_NOTIFY config option.



Yes, that is generally considered a deny of service possibility and so 
far Dave and Daniel have rejected all tries to upstream stuff like this 
as far as I know.


DMA-buf an pinning for scanout are the only exceptions since the 
implementation wouldn't have been possible otherwise.




Most drivers do not have a shrinker, or whether a BO is purgeable is
entirely controlled by the userspace (madvice).  They can be
categorized as "a security problem where userspace is able to pin
unrestricted amounts of memory".  But those drivers are normally found
on systems without swap.  I don't think the issue applies.



This is completely independent of the availability of swap or not.

Pinning of pages in large quantities can result in all kind of problems 
and needs to be prevented even without swap.


Otherwise you can ran into problems even with simple I/O operations for 
example.




Of the desktop GPU drivers, i915's shrinker certainly supports purging
to swap.  TTM is a bit hard to follow.  I can't really tell if amdgpu
or nouveau supports that.  virtio-gpu is more commonly found on
systems with swaps so I think it should follow the desktop practices?



What we do at least in the amdgpu, radeon, i915 and nouveau is to only 
allow it for scanout and that in turn is limited by the physical number 
of CRTCs on the board.




Truth is, the emulated virtio-gpu device always supports page moves
with VIRTIO_GPU_CMD_RESOURCE_{ATTACH,DETACH}_BACKING.  It is just that
the driver does not make use of them.  That makes this less of an
issue because the driver can be fixed anytime (finger crossed that the
emulator won't have bugs in these untested paths).  This issue becomes
more urgent because we are considering adding a new HW command[1]
where page moves will be disallowed.  We definitely don't want a HW
command that is inherently insecure, if BO pages pinned for the
lifetime is considered a security issue on desktops.



Yeah, that's probably not such a good idea :)

Regards,
Christian.



[1] VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB

https://gitlab.freedesktop.org/virgl/drm-misc-next/-/blob/virtio-gpu-next/include/uapi/linux/virtio_gpu.h#L396





___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: pages pinned for BO lifetime and security

2020-07-22 Thread Christian König

Am 22.07.20 um 09:19 schrieb Christian König:

Am 22.07.20 um 02:22 schrieb Gurchetan Singh:

+Christian who added DMABUF_MOVE_NOTIFY which added the relevant blurb:

https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/dma-buf/Kconfig#n46 



Currently, the user seems to amdgpu for P2P dma-buf and it seems to 
plumb ttm (*move_notify) callback to dma-buf.  We're not sure if it's 
a security issue occurring across DRM drivers, or one more specific 
to the new amdgpu use case.


On Tue, Jul 21, 2020 at 1:03 PM Chia-I Wu > wrote:


Hi list,

virtio-gpu is moving in the direction where BO pages are pinned for
the lifetime for simplicity.  I am wondering if that is considered a
security issue in general, especially after running into the
description of the new DMABUF_MOVE_NOTIFY config option.



Yes, that is generally considered a deny of service possibility and so 
far Dave and Daniel have rejected all tries to upstream stuff like 
this as far as I know.


DMA-buf an pinning for scanout are the only exceptions since the 
implementation wouldn't have been possible otherwise.


Or better say for scanout pinning is a hardware requirement. For DMA-buf 
we obviously can have a better approach :)


Christian.





Most drivers do not have a shrinker, or whether a BO is purgeable is
entirely controlled by the userspace (madvice).  They can be
categorized as "a security problem where userspace is able to pin
unrestricted amounts of memory".  But those drivers are normally
found
on systems without swap.  I don't think the issue applies.



This is completely independent of the availability of swap or not.

Pinning of pages in large quantities can result in all kind of 
problems and needs to be prevented even without swap.


Otherwise you can ran into problems even with simple I/O operations 
for example.




Of the desktop GPU drivers, i915's shrinker certainly supports
purging
to swap.  TTM is a bit hard to follow.  I can't really tell if amdgpu
or nouveau supports that.  virtio-gpu is more commonly found on
systems with swaps so I think it should follow the desktop practices?



What we do at least in the amdgpu, radeon, i915 and nouveau is to only 
allow it for scanout and that in turn is limited by the physical 
number of CRTCs on the board.




Truth is, the emulated virtio-gpu device always supports page moves
with VIRTIO_GPU_CMD_RESOURCE_{ATTACH,DETACH}_BACKING.  It is just
that
the driver does not make use of them.  That makes this less of an
issue because the driver can be fixed anytime (finger crossed
that the
emulator won't have bugs in these untested paths).  This issue
becomes
more urgent because we are considering adding a new HW command[1]
where page moves will be disallowed.  We definitely don't want a HW
command that is inherently insecure, if BO pages pinned for the
lifetime is considered a security issue on desktops.



Yeah, that's probably not such a good idea :)

Regards,
Christian.



[1] VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB

https://gitlab.freedesktop.org/virgl/drm-misc-next/-/blob/virtio-gpu-next/include/uapi/linux/virtio_gpu.h#L396







___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[Bug 207383] [Regression] 5.7 amdgpu/polaris11 gpf: amdgpu_atomic_commit_tail

2020-07-22 Thread bugzilla-daemon
https://bugzilla.kernel.org/show_bug.cgi?id=207383

--- Comment #83 from Christian König (christian.koe...@amd.com) ---
Instead of working around the bug I think we should concentrate on nailing the
root cause.

I suggest to insert an use after free check into just that structure. In other
words add a field "magic_number" will it with 0xdeadbeef on allocation and set
it to zero before the kfree().

A simple BUG_ON(ptr->magic_number != 0xdeadbeef) should yield results rather
quickly.

Then just add printk()s before the kfree() to figure out why we have this use
after free race.

-- 
You are receiving this mail because:
You are watching the assignee of the bug.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: pages pinned for BO lifetime and security

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 9:19 AM Christian König
 wrote:
>
> Am 22.07.20 um 02:22 schrieb Gurchetan Singh:
>
> +Christian who added DMABUF_MOVE_NOTIFY which added the relevant blurb:
>
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/dma-buf/Kconfig#n46
>
> Currently, the user seems to amdgpu for P2P dma-buf and it seems to plumb ttm 
> (*move_notify) callback to dma-buf.  We're not sure if it's a security issue 
> occurring across DRM drivers, or one more specific to the new amdgpu use case.
>
> On Tue, Jul 21, 2020 at 1:03 PM Chia-I Wu  wrote:
>>
>> Hi list,
>>
>> virtio-gpu is moving in the direction where BO pages are pinned for
>> the lifetime for simplicity.  I am wondering if that is considered a
>> security issue in general, especially after running into the
>> description of the new DMABUF_MOVE_NOTIFY config option.
>
>
> Yes, that is generally considered a deny of service possibility and so far 
> Dave and Daniel have rejected all tries to upstream stuff like this as far as 
> I know.

Uh we have merged pretty much all arm-soc drivers without real
shrinkers. Whether that was a good idea or not is maybe different
question - now that we do have pretty good helpers maybe we should
poke this a bit more. But then SoCs Suck (tm).

But for real gpus they do indeed all have shrinkers, and not just "pin
everything forever" model. Real gpus = stuff you might run on servers
or multi-app and all that stuff, not with a simple "we just kill all
background jobs if memory gets low" model like on android and other
such things.

> DMA-buf an pinning for scanout are the only exceptions since the 
> implementation wouldn't have been possible otherwise.
>
>>
>> Most drivers do not have a shrinker, or whether a BO is purgeable is
>> entirely controlled by the userspace (madvice).  They can be
>> categorized as "a security problem where userspace is able to pin
>> unrestricted amounts of memory".  But those drivers are normally found
>> on systems without swap.  I don't think the issue applies.
>
>
> This is completely independent of the availability of swap or not.
>
> Pinning of pages in large quantities can result in all kind of problems and 
> needs to be prevented even without swap.

Yeah you don't just kill swap, you kill a ton of other kernel services
with mass pinning. I think even the pinning of scanout buffers for
i915 from system memory is somewhat questionable (but I guess small
enough to not matter in practice).

> Otherwise you can ran into problems even with simple I/O operations for 
> example.
>
>>
>> Of the desktop GPU drivers, i915's shrinker certainly supports purging
>> to swap.  TTM is a bit hard to follow.  I can't really tell if amdgpu
>> or nouveau supports that.  virtio-gpu is more commonly found on
>> systems with swaps so I think it should follow the desktop practices?
>
>
> What we do at least in the amdgpu, radeon, i915 and nouveau is to only allow 
> it for scanout and that in turn is limited by the physical number of CRTCs on 
> the board.
>
>>
>> Truth is, the emulated virtio-gpu device always supports page moves
>> with VIRTIO_GPU_CMD_RESOURCE_{ATTACH,DETACH}_BACKING.  It is just that
>> the driver does not make use of them.  That makes this less of an
>> issue because the driver can be fixed anytime (finger crossed that the
>> emulator won't have bugs in these untested paths).  This issue becomes
>> more urgent because we are considering adding a new HW command[1]
>> where page moves will be disallowed.  We definitely don't want a HW
>> command that is inherently insecure, if BO pages pinned for the
>> lifetime is considered a security issue on desktops.
>
>
> Yeah, that's probably not such a good idea :)

Well if the pinning is just for the duration of the hw command, it's
fine, just like batch buffers. But if it's long term pinning then that
doesn't sound like a good idea. RDMA has this as their inherit hw
programming model (except if your hw is really fancy and has hw page
fault handling on the rdma nic), and they hard limit such pins to what
you can mlock (or something similar within rdma).
-Daniel

>
> Regards,
> Christian.
>
>>
>> [1] VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
>> https://gitlab.freedesktop.org/virgl/drm-misc-next/-/blob/virtio-gpu-next/include/uapi/linux/virtio_gpu.h#L396
>
>
> ___
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: pages pinned for BO lifetime and security

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 9:19 AM Christian König
 wrote:
> Am 22.07.20 um 02:22 schrieb Gurchetan Singh:
>> Of the desktop GPU drivers, i915's shrinker certainly supports purging
>> to swap.  TTM is a bit hard to follow.  I can't really tell if amdgpu
>> or nouveau supports that.  virtio-gpu is more commonly found on
>> systems with swaps so I think it should follow the desktop practices?
>
>
> What we do at least in the amdgpu, radeon, i915 and nouveau is to only allow 
> it for scanout and that in turn is limited by the physical number of CRTCs on 
> the board.

Somewhat aside, but I'm not sure the ttm shrinker really works like
that. I think there's two parts:
1. kernel thread which takes buffers and unbinds them when we're over
the ttm global limit. This is the ttm_shrink_work stuff, and it only
shrinks if the zone is over a hard limit. Below that it just leaves
buffers pinned.

2. Actual core mm shrinker, which releases buffers held in cache by
ttm_page_alloc_dma.c. But that only happens when buffers have been
unbound by the first thread, so anything below those hard limits is
not shrinkable. And iirc those hard limits are like half of system
memory or so (last time I looked through this stuff at least).

No idea why exactly things are like they are, since the first thread
already does a dma_resv_trylock, and that's enough to avoid locking
inversions when being called from 2. Or well, should be at least, for
reasonable driver design.

The only other thing I'm seeing is the global lru, but that could be
fixed by having a per-device core mm shrinker instance which directly
shrinks the per-device lru. And then we just globally balance like
with all shrinkers through the core mm "shrink everyone equally"
approach. You can even keep the separate page alloc shrinker, since
core mm always loops over all shrinkers - we're not the only ones
where shrinking one cache makes more memory available for another
cache to shrink, e.g. you can't throw out an inode without first
throwing out all the dentry pointing at them.

Another problem would be allocating memory while holding per-device
lru locks (since trylock on such a global lock in shrinkers is a
really bad idea, we know that from all the dev->struct_mutex lolz in
i915). But for ttm that's not a problem since all lru are spinlock, so
only GFP_ATOMIC allowed anyway, hence no problem.

Adding Thomas for this ttm tangent.
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Intel


On 2020-07-22 09:11, Daniel Vetter wrote:

On Wed, Jul 22, 2020 at 8:45 AM Thomas Hellström (Intel)
 wrote:


On 2020-07-22 00:45, Dave Airlie wrote:

On Tue, 21 Jul 2020 at 18:47, Thomas Hellström (Intel)
 wrote:

On 7/21/20 9:45 AM, Christian König wrote:

Am 21.07.20 um 09:41 schrieb Daniel Vetter:

On Mon, Jul 20, 2020 at 01:15:17PM +0200, Thomas Hellström (Intel)
wrote:

Hi,

On 7/9/20 2:33 PM, Daniel Vetter wrote:

Comes up every few years, gets somewhat tedious to discuss, let's
write this down once and for all.

What I'm not sure about is whether the text should be more explicit in
flat out mandating the amdkfd eviction fences for long running compute
workloads or workloads where userspace fencing is allowed.

Although (in my humble opinion) it might be possible to completely
untangle
kernel-introduced fences for resource management and dma-fences used
for
completion- and dependency tracking and lift a lot of restrictions
for the
dma-fences, including prohibiting infinite ones, I think this makes
sense
describing the current state.

Yeah I think a future patch needs to type up how we want to make that
happen (for some cross driver consistency) and what needs to be
considered. Some of the necessary parts are already there (with like the
preemption fences amdkfd has as an example), but I think some clear docs
on what's required from both hw, drivers and userspace would be really
good.

I'm currently writing that up, but probably still need a few days for
this.

Great! I put down some (very) initial thoughts a couple of weeks ago
building on eviction fences for various hardware complexity levels here:

https://gitlab.freedesktop.org/thomash/docs/-/blob/master/Untangling%20dma-fence%20and%20memory%20allocation.odt

We are seeing HW that has recoverable GPU page faults but only for
compute tasks, and scheduler without semaphores hw for graphics.

So a single driver may have to expose both models to userspace and
also introduces the problem of how to interoperate between the two
models on one card.

Dave.

Hmm, yes to begin with it's important to note that this is not a
replacement for new programming models or APIs, This is something that
takes place internally in drivers to mitigate many of the restrictions
that are currently imposed on dma-fence and documented in this and
previous series. It's basically the driver-private narrow completions
Jason suggested in the lockdep patches discussions implemented the same
way as eviction-fences.

The memory fence API would be local to helpers and middle-layers like
TTM, and the corresponding drivers.  The only cross-driver-like
visibility would be that the dma-buf move_notify() callback would not be
allowed to wait on dma-fences or something that depends on a dma-fence.

Because we can't preempt (on some engines at least) we already have
the requirement that cross driver buffer management can get stuck on a
dma-fence. Not even taking into account the horrors we do with
userptr, which are cross driver no matter what. Limiting move_notify
to memory fences only doesn't work, since the pte clearing might need
to wait for a dma_fence first. Hence this becomes a full end-of-batch
fence, not just a limited kernel-internal memory fence.


For non-preemptible hardware the memory fence typically *is* the 
end-of-batch fence. (Unless, as documented, there is a scheduler 
consuming sync-file dependencies in which case the memory fence wait 
needs to be able to break out of that). The key thing is not that we can 
break out of execution, but that we can break out of dependencies, since 
when we're executing all dependecies (modulo semaphores) are already 
fulfilled. That's what's eliminating the deadlocks.




That's kinda why I think only reasonable option is to toss in the
towel and declare dma-fence to be the memory fence (and suck up all
the consequences of that decision as uapi, which is kinda where we
are), and construct something new&entirely free-wheeling for userspace
fencing. But only for engines that allow enough preempt/gpu page
faulting to make that possible. Free wheeling userspace fences/gpu
semaphores or whatever you want to call them (on windows I think it's
monitored fence) only work if you can preempt to decouple the memory
fences from your gpu command execution.

There's the in-between step of just decoupling the batchbuffer
submission prep for hw without any preempt (but a scheduler), but that
seems kinda pointless. Modern execbuf should be O(1) fastpath, with
all the allocation/mapping work pulled out ahead. vk exposes that
model directly to clients, GL drivers could use it internally too, so
I see zero value in spending lots of time engineering very tricky
kernel code just for old userspace. Much more reasonable to do that in
userspace, where we have real debuggers and no panics about security
bugs (or well, a lot less, webgl is still a thing, but at least
browsers realized you need to container that completely).


Sure, it's definitely a big chunk o

Re: [PATCH v2] fbdev: Detect integer underflow at "struct fbcon_ops"->clear_margins.

2020-07-22 Thread Daniel Vetter
On Tue, Jul 21, 2020 at 6:08 PM Greg Kroah-Hartman
 wrote:
>
> On Thu, Jul 16, 2020 at 08:27:21PM +0900, Tetsuo Handa wrote:
> > On 2020/07/16 19:00, Daniel Vetter wrote:
> > > On Thu, Jul 16, 2020 at 12:29:00AM +0900, Tetsuo Handa wrote:
> > >> On 2020/07/16 0:12, Dan Carpenter wrote:
> > >>> I've complained about integer overflows in fbdev for a long time...
> > >>>
> > >>> What I'd like to see is something like the following maybe.  I don't
> > >>> know how to get the vc_data in fbmem.c so it doesn't include your checks
> > >>> for negative.
> > >>
> > >> Yes. Like I said "Thus, I consider that we need more sanity/constraints 
> > >> checks." at
> > >> https://lore.kernel.org/lkml/b1e7dd6a-fc22-bba8-0abb-d3e779329...@i-love.sakura.ne.jp/
> > >>  ,
> > >> we want basic checks. That's a task for fbdev people who should be 
> > >> familiar with
> > >> necessary constraints.
> > >
> > > I think the worldwide supply of people who understand fbdev and willing to
> > > work on it is roughly 0. So if someone wants to fix this mess properly
> > > (which likely means adding tons of over/underflow checks at entry points,
> > > since you're never going to catch the driver bugs, there's too many and
> > > not enough people who care) they need to fix this themselves.
> >
> > But I think we can enforce reasonable constraint which is much stricter 
> > than Dan's basic_checks()
> > (which used INT_MAX). For example, do we need to accept var->{xres,yres} >= 
> > 1048576, for
> > "32768 rows or cols" * "32 pixels per character" = 1045876 and 
> > vc_do_resize() accepts only
> > rows and cols < 32768 ?
> >
> > >
> > > Just to avoid confusion here.
> > >
> > >> Anyway, my two patches are small and low cost; can we apply these 
> > >> patches regardless
> > >> of basic checks?
> > >
> > > Which two patches where?
> >
> > [PATCH v3] vt: Reject zero-sized screen buffer size.
> >  from 
> > https://lkml.kernel.org/r/20200712111013.11881-1-penguin-ker...@i-love.sakura.ne.jp
>
> This is now in my tree.
>
> > [PATCH v2] fbdev: Detect integer underflow at "struct 
> > fbcon_ops"->clear_margins.
> >  from 
> > https://lkml.kernel.org/r/20200715015102.3814-1-penguin-ker...@i-love.sakura.ne.jp
>
> That should be taken by the fbdev maintainer, but I can take it too if
> people want.

Just missed this weeks pull request train and feeling like not worth
making this an exception (it's been broken forever after all), so
maybe best if you just add this to vt.

Acked-by: Daniel Vetter 

Also this avoids the impression I know what's going on in fbdev code,
maybe with sufficient abandon from my side someone will pop up who
cares an fixes the bazillion of syzkaller issues we seem to have
around console/vt and everything related.
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 10:05 AM Thomas Hellström (Intel)
 wrote:
>
>
> On 2020-07-22 09:11, Daniel Vetter wrote:
> > On Wed, Jul 22, 2020 at 8:45 AM Thomas Hellström (Intel)
> >  wrote:
> >>
> >> On 2020-07-22 00:45, Dave Airlie wrote:
> >>> On Tue, 21 Jul 2020 at 18:47, Thomas Hellström (Intel)
> >>>  wrote:
>  On 7/21/20 9:45 AM, Christian König wrote:
> > Am 21.07.20 um 09:41 schrieb Daniel Vetter:
> >> On Mon, Jul 20, 2020 at 01:15:17PM +0200, Thomas Hellström (Intel)
> >> wrote:
> >>> Hi,
> >>>
> >>> On 7/9/20 2:33 PM, Daniel Vetter wrote:
>  Comes up every few years, gets somewhat tedious to discuss, let's
>  write this down once and for all.
> 
>  What I'm not sure about is whether the text should be more explicit 
>  in
>  flat out mandating the amdkfd eviction fences for long running 
>  compute
>  workloads or workloads where userspace fencing is allowed.
> >>> Although (in my humble opinion) it might be possible to completely
> >>> untangle
> >>> kernel-introduced fences for resource management and dma-fences used
> >>> for
> >>> completion- and dependency tracking and lift a lot of restrictions
> >>> for the
> >>> dma-fences, including prohibiting infinite ones, I think this makes
> >>> sense
> >>> describing the current state.
> >> Yeah I think a future patch needs to type up how we want to make that
> >> happen (for some cross driver consistency) and what needs to be
> >> considered. Some of the necessary parts are already there (with like 
> >> the
> >> preemption fences amdkfd has as an example), but I think some clear 
> >> docs
> >> on what's required from both hw, drivers and userspace would be really
> >> good.
> > I'm currently writing that up, but probably still need a few days for
> > this.
>  Great! I put down some (very) initial thoughts a couple of weeks ago
>  building on eviction fences for various hardware complexity levels here:
> 
>  https://gitlab.freedesktop.org/thomash/docs/-/blob/master/Untangling%20dma-fence%20and%20memory%20allocation.odt
> >>> We are seeing HW that has recoverable GPU page faults but only for
> >>> compute tasks, and scheduler without semaphores hw for graphics.
> >>>
> >>> So a single driver may have to expose both models to userspace and
> >>> also introduces the problem of how to interoperate between the two
> >>> models on one card.
> >>>
> >>> Dave.
> >> Hmm, yes to begin with it's important to note that this is not a
> >> replacement for new programming models or APIs, This is something that
> >> takes place internally in drivers to mitigate many of the restrictions
> >> that are currently imposed on dma-fence and documented in this and
> >> previous series. It's basically the driver-private narrow completions
> >> Jason suggested in the lockdep patches discussions implemented the same
> >> way as eviction-fences.
> >>
> >> The memory fence API would be local to helpers and middle-layers like
> >> TTM, and the corresponding drivers.  The only cross-driver-like
> >> visibility would be that the dma-buf move_notify() callback would not be
> >> allowed to wait on dma-fences or something that depends on a dma-fence.
> > Because we can't preempt (on some engines at least) we already have
> > the requirement that cross driver buffer management can get stuck on a
> > dma-fence. Not even taking into account the horrors we do with
> > userptr, which are cross driver no matter what. Limiting move_notify
> > to memory fences only doesn't work, since the pte clearing might need
> > to wait for a dma_fence first. Hence this becomes a full end-of-batch
> > fence, not just a limited kernel-internal memory fence.
>
> For non-preemptible hardware the memory fence typically *is* the
> end-of-batch fence. (Unless, as documented, there is a scheduler
> consuming sync-file dependencies in which case the memory fence wait
> needs to be able to break out of that). The key thing is not that we can
> break out of execution, but that we can break out of dependencies, since
> when we're executing all dependecies (modulo semaphores) are already
> fulfilled. That's what's eliminating the deadlocks.
>
> > That's kinda why I think only reasonable option is to toss in the
> > towel and declare dma-fence to be the memory fence (and suck up all
> > the consequences of that decision as uapi, which is kinda where we
> > are), and construct something new&entirely free-wheeling for userspace
> > fencing. But only for engines that allow enough preempt/gpu page
> > faulting to make that possible. Free wheeling userspace fences/gpu
> > semaphores or whatever you want to call them (on windows I think it's
> > monitored fence) only work if you can preempt to decouple the memory
> > fences from your gpu command execution.
> >
> > There's the in-between step of just decoupling the batchbuffer
> > submission p

Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Intel


On 2020-07-22 11:45, Daniel Vetter wrote:

On Wed, Jul 22, 2020 at 10:05 AM Thomas Hellström (Intel)
 wrote:


On 2020-07-22 09:11, Daniel Vetter wrote:

On Wed, Jul 22, 2020 at 8:45 AM Thomas Hellström (Intel)
 wrote:

On 2020-07-22 00:45, Dave Airlie wrote:

On Tue, 21 Jul 2020 at 18:47, Thomas Hellström (Intel)
 wrote:

On 7/21/20 9:45 AM, Christian König wrote:

Am 21.07.20 um 09:41 schrieb Daniel Vetter:

On Mon, Jul 20, 2020 at 01:15:17PM +0200, Thomas Hellström (Intel)
wrote:

Hi,

On 7/9/20 2:33 PM, Daniel Vetter wrote:

Comes up every few years, gets somewhat tedious to discuss, let's
write this down once and for all.

What I'm not sure about is whether the text should be more explicit in
flat out mandating the amdkfd eviction fences for long running compute
workloads or workloads where userspace fencing is allowed.

Although (in my humble opinion) it might be possible to completely
untangle
kernel-introduced fences for resource management and dma-fences used
for
completion- and dependency tracking and lift a lot of restrictions
for the
dma-fences, including prohibiting infinite ones, I think this makes
sense
describing the current state.

Yeah I think a future patch needs to type up how we want to make that
happen (for some cross driver consistency) and what needs to be
considered. Some of the necessary parts are already there (with like the
preemption fences amdkfd has as an example), but I think some clear docs
on what's required from both hw, drivers and userspace would be really
good.

I'm currently writing that up, but probably still need a few days for
this.

Great! I put down some (very) initial thoughts a couple of weeks ago
building on eviction fences for various hardware complexity levels here:

https://gitlab.freedesktop.org/thomash/docs/-/blob/master/Untangling%20dma-fence%20and%20memory%20allocation.odt

We are seeing HW that has recoverable GPU page faults but only for
compute tasks, and scheduler without semaphores hw for graphics.

So a single driver may have to expose both models to userspace and
also introduces the problem of how to interoperate between the two
models on one card.

Dave.

Hmm, yes to begin with it's important to note that this is not a
replacement for new programming models or APIs, This is something that
takes place internally in drivers to mitigate many of the restrictions
that are currently imposed on dma-fence and documented in this and
previous series. It's basically the driver-private narrow completions
Jason suggested in the lockdep patches discussions implemented the same
way as eviction-fences.

The memory fence API would be local to helpers and middle-layers like
TTM, and the corresponding drivers.  The only cross-driver-like
visibility would be that the dma-buf move_notify() callback would not be
allowed to wait on dma-fences or something that depends on a dma-fence.

Because we can't preempt (on some engines at least) we already have
the requirement that cross driver buffer management can get stuck on a
dma-fence. Not even taking into account the horrors we do with
userptr, which are cross driver no matter what. Limiting move_notify
to memory fences only doesn't work, since the pte clearing might need
to wait for a dma_fence first. Hence this becomes a full end-of-batch
fence, not just a limited kernel-internal memory fence.

For non-preemptible hardware the memory fence typically *is* the
end-of-batch fence. (Unless, as documented, there is a scheduler
consuming sync-file dependencies in which case the memory fence wait
needs to be able to break out of that). The key thing is not that we can
break out of execution, but that we can break out of dependencies, since
when we're executing all dependecies (modulo semaphores) are already
fulfilled. That's what's eliminating the deadlocks.


That's kinda why I think only reasonable option is to toss in the
towel and declare dma-fence to be the memory fence (and suck up all
the consequences of that decision as uapi, which is kinda where we
are), and construct something new&entirely free-wheeling for userspace
fencing. But only for engines that allow enough preempt/gpu page
faulting to make that possible. Free wheeling userspace fences/gpu
semaphores or whatever you want to call them (on windows I think it's
monitored fence) only work if you can preempt to decouple the memory
fences from your gpu command execution.

There's the in-between step of just decoupling the batchbuffer
submission prep for hw without any preempt (but a scheduler), but that
seems kinda pointless. Modern execbuf should be O(1) fastpath, with
all the allocation/mapping work pulled out ahead. vk exposes that
model directly to clients, GL drivers could use it internally too, so
I see zero value in spending lots of time engineering very tricky
kernel code just for old userspace. Much more reasonable to do that in
userspace, where we have real debuggers and no panics about security
bugs (or well, a lot less, webgl is still a thing, but at 

Re: pages pinned for BO lifetime and security

2020-07-22 Thread Christian König

Am 22.07.20 um 09:46 schrieb Daniel Vetter:

On Wed, Jul 22, 2020 at 9:19 AM Christian König
 wrote:

Am 22.07.20 um 02:22 schrieb Gurchetan Singh:

Of the desktop GPU drivers, i915's shrinker certainly supports purging
to swap.  TTM is a bit hard to follow.  I can't really tell if amdgpu
or nouveau supports that.  virtio-gpu is more commonly found on
systems with swaps so I think it should follow the desktop practices?


What we do at least in the amdgpu, radeon, i915 and nouveau is to only allow it 
for scanout and that in turn is limited by the physical number of CRTCs on the 
board.

Somewhat aside, but I'm not sure the ttm shrinker really works like
that. I think there's two parts:
1. kernel thread which takes buffers and unbinds them when we're over
the ttm global limit. This is the ttm_shrink_work stuff, and it only
shrinks if the zone is over a hard limit. Below that it just leaves
buffers pinned.

2. Actual core mm shrinker, which releases buffers held in cache by
ttm_page_alloc_dma.c. But that only happens when buffers have been
unbound by the first thread, so anything below those hard limits is
not shrinkable. And iirc those hard limits are like half of system
memory or so (last time I looked through this stuff at least).

No idea why exactly things are like they are, since the first thread
already does a dma_resv_trylock, and that's enough to avoid locking
inversions when being called from 2. Or well, should be at least, for
reasonable driver design.


Yes, that's currently a bit messy in TTM and not such a good design over 
all.



The only other thing I'm seeing is the global lru, but that could be
fixed by having a per-device core mm shrinker instance which directly
shrinks the per-device lru. And then we just globally balance like
with all shrinkers through the core mm "shrink everyone equally"
approach. You can even keep the separate page alloc shrinker, since
core mm always loops over all shrinkers - we're not the only ones
where shrinking one cache makes more memory available for another
cache to shrink, e.g. you can't throw out an inode without first
throwing out all the dentry pointing at them.


My plan is to replace all this with an explicit SWAP domain for buffer 
objects.


One idea was to make the SYSTEM and SWAP domain global and express all 
this with transits between the different domains. But having one 
shrinker per device sounds like an even better idea now.



Another problem would be allocating memory while holding per-device
lru locks (since trylock on such a global lock in shrinkers is a
really bad idea, we know that from all the dev->struct_mutex lolz in
i915). But for ttm that's not a problem since all lru are spinlock, so
only GFP_ATOMIC allowed anyway, hence no problem.


Yes, exactly.

Christian.



Adding Thomas for this ttm tangent.
-Daniel


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PULL] drm-misc-next

2020-07-22 Thread Maarten Lankhorst
drm-misc-next-2020-07-22:
drm-misc-next for v5.9:

UAPI Changes:

Cross-subsystem Changes:
- Convert panel-dsi-cm and ingenic bindings to YAML.
- Add lockdep annotations for dma-fence. \o/
- Describe why indefinite fences are a bad idea
- Update binding for rocktech jh057n00900.

Core Changes:
- Add vblank workers.
- Use spin_(un)lock_irq instead of the irqsave/restore variants in crtc code.
- Add managed vram helpers.
- Convert more logging to drm functions.
- Replace more http links with https in core and drivers.
- Cleanup to ttm iomem functions and implementation.
- Remove TTM CMA memtype as it doesn't work correctly.
- Remove TTM_MEMTYPE_FLAG_MAPPABLE for many drivers that have no
  unmappable memory resources.

Driver Changes:
- Add CRC support to nouveau, using the new vblank workers.
- Dithering and atomic state fix for nouveau.
- Fixes for Frida FRD350H54004 panel.
- Add support for OSD mode (sprite planes), IPU (scaling) and multiple
  panels/bridges to ingenic.
- Use managed vram helpers in ast.
- Assorted small fixes to ingenic, i810, mxsfb.
- Remove optional unused ttm dummy functions.
The following changes since commit 947fcfeac3295ff0961bb50803e0c4ae63cff65b:

  drm: drm_rect.h: delete duplicated word in comment (2020-07-15 14:03:02 +0200)

are available in the Git repository at:

  git://anongit.freedesktop.org/drm/drm-misc tags/drm-misc-next-2020-07-22

for you to fetch changes up to acc0c39a59ccd8161b9066265fb8798b4ee07dc9:

  dt-binding: display: Allow a single port node on rocktech, jh057n00900 
(2020-07-21 09:34:23 -0600)


drm-misc-next for v5.9:

UAPI Changes:

Cross-subsystem Changes:
- Convert panel-dsi-cm and ingenic bindings to YAML.
- Add lockdep annotations for dma-fence. \o/
- Describe why indefinite fences are a bad idea
- Update binding for rocktech jh057n00900.

Core Changes:
- Add vblank workers.
- Use spin_(un)lock_irq instead of the irqsave/restore variants in crtc code.
- Add managed vram helpers.
- Convert more logging to drm functions.
- Replace more http links with https in core and drivers.
- Cleanup to ttm iomem functions and implementation.
- Remove TTM CMA memtype as it doesn't work correctly.
- Remove TTM_MEMTYPE_FLAG_MAPPABLE for many drivers that have no
  unmappable memory resources.

Driver Changes:
- Add CRC support to nouveau, using the new vblank workers.
- Dithering and atomic state fix for nouveau.
- Fixes for Frida FRD350H54004 panel.
- Add support for OSD mode (sprite planes), IPU (scaling) and multiple
  panels/bridges to ingenic.
- Use managed vram helpers in ast.
- Assorted small fixes to ingenic, i810, mxsfb.
- Remove optional unused ttm dummy functions.


Alexander A. Klimov (4):
  drm/vboxvideo: Replace HTTP links with HTTPS ones
  drm/tidss: Replace HTTP links with HTTPS ones
  drm: Replace HTTP links with HTTPS ones
  video: fbdev: Replace HTTP links with HTTPS ones

Christian König (9):
  drm/vram-helper: stop using TTM_MEMTYPE_FLAG_MAPPABLE
  drm: remove optional dummy function from drivers using TTM
  drm/ttm: cleanup io_mem interface with nouveau
  drm/ttm: remove io_reserve_fastpath flag
  drm/ttm: cleanup coding style and implementation.
  drm/ttm: remove TTM_MEMTYPE_FLAG_CMA
  drm/vmwgfx: stop using TTM_MEMTYPE_FLAG_MAPPABLE
  drm/nouveau: stop using TTM_MEMTYPE_FLAG_MAPPABLE
  drm/qxl: stop using TTM_MEMTYPE_FLAG_MAPPABLE v2

Christophe JAILLET (1):
  drm/i810: switch from 'pci_' to 'dma_' API

Daniel Vetter (3):
  dma-fence: basic lockdep annotations
  dma-fence: prime lockdep annotations
  dma-buf.rst: Document why indefinite fences are a bad idea

Guido Günther (1):
  drm/mxsfb: Make supported modifiers explicit

Lyude Paul (15):
  drm/vblank: Register drmm cleanup action once per drm_vblank_crtc
  drm/vblank: Use spin_(un)lock_irq() in drm_crtc_vblank_off()
  drm/vblank: Add vblank works
  drm/nouveau/kms/nv140-: Don't modify depth in state during atomic commit
  drm/nouveau/kms/nv50-: Fix disabling dithering
  drm/nouveau/kms/nv140-: Track wndw mappings in nv50_head_atom
  drm/nouveau/kms/nv50-: Expose nv50_outp_atom in disp.h
  drm/nouveau/kms/nv50-: Move hard-coded object handles into header
  drm/nouveau/kms/nvd9-: Add CRC support
  drm/nouveau/kms/nvd9-: Fix disabling CRCs alongside OR reprogramming
  drm/vblank: Use spin_(un)lock_irq() in drm_crtc_vblank_reset()
  drm/vblank: Use spin_(un)lock_irq() in drm_crtc_vblank_on()
  drm/vblank: Use spin_(un)lock_irq() in drm_legacy_vblank_post_modeset()
  drm/vblank: Use spin_(un)lock_irq() in drm_queue_vblank_event()
  drm/vblank: Use spin_(un)lock_irq() in drm_crtc_queue_sequence_ioctl()

Ondrej Jirman (2):
  dt-bindings: display: Fix example in nwl-dsi.yaml
  dt-binding: display: Allow a single port node on rocktech, 

Re: [PATCH 09/20] Documentation: i2c: eliminate duplicated word

2020-07-22 Thread Wolfram Sang
On Tue, Jul 07, 2020 at 11:04:03AM -0700, Randy Dunlap wrote:
> Drop doubled word "new".
> 
> Signed-off-by: Randy Dunlap 

For the record:

Acked-by: Wolfram Sang 



signature.asc
Description: PGP signature
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH] drm/vkms: add missing drm_crtc_vblank_put to the get/put pair on flush

2020-07-22 Thread Melissa Wen
This patch adds a missing drm_crtc_vblank_put op to the pair
drm_crtc_vblank_get/put (inc/decrement counter to guarantee vblanks).

It clears the execution of the following kms_cursor_crc subtests:
1. pipe-A-cursor-[size,alpha-opaque, NxN-(on-screen, off-screen, sliding,
   random, fast-moving])] - successful when running individually.
2. pipe-A-cursor-dpms passes again
3. pipe-A-cursor-suspend also passes

The issue was initially tracked in the sequential execution of IGT
kms_cursor_crc subtest: when running the test sequence or one of its
subtests twice, the odd execs complete and the pairs get stuck in an
endless wait. In the IGT code, calling a wait_for_vblank before the start
of CRC capture prevented the busy-wait. But the problem persisted in the
pipe-A-cursor-dpms and -suspend subtests.

Checking the history, the pipe-A-cursor-dpms subtest was successful when,
in vkms_atomic_commit_tail, instead of using the flip_done op, it used
wait_for_vblanks. Another way to prevent blocking was wait_one_vblank when
enabling crtc. However, in both cases, pipe-A-cursor-suspend persisted
blocking in the 2nd start of CRC capture, which may indicate that
something got stuck in the step of CRC setup. Indeed, wait_one_vblank in
the crc setup was able to sync things and free all kms_cursor_crc
subtests.

Tracing and comparing a clean run with a blocked one:
- in a clean one, vkms_crtc_atomic_flush enables vblanks;
- when blocked, only in next op, vkms_crtc_atomic_enable, the vblanks
started. Moreover, a series of vkms_vblank_simulate flow out until
disabling vblanks.
Also watching the steps of vkms_crtc_atomic_flush, when the very first
drm_crtc_vblank_get returned an error, the subtest crashed. On the other
hand, when vblank_get succeeded, the subtest completed. Finally, checking
the flush steps: it increases counter to hold a vblank reference (get),
but there isn't a op to decreased it and release vblanks (put).

Cc: Daniel Vetter 
Cc: Rodrigo Siqueira 
Cc: Haneen Mohammed 
Signed-off-by: Melissa Wen 
---
 drivers/gpu/drm/vkms/vkms_crtc.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index ac85e17428f8..a99d6b4a92dd 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -246,6 +246,7 @@ static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
 
spin_unlock(&crtc->dev->event_lock);
 
+   drm_crtc_vblank_put(crtc);
crtc->state->event = NULL;
}
 
-- 
2.27.0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: pages pinned for BO lifetime and security

2020-07-22 Thread Christian König

Am 22.07.20 um 09:32 schrieb Daniel Vetter:

On Wed, Jul 22, 2020 at 9:19 AM Christian König
 wrote:

Am 22.07.20 um 02:22 schrieb Gurchetan Singh:

+Christian who added DMABUF_MOVE_NOTIFY which added the relevant blurb:

https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit.kernel.org%2Fpub%2Fscm%2Flinux%2Fkernel%2Fgit%2Ftorvalds%2Flinux.git%2Ftree%2Fdrivers%2Fdma-buf%2FKconfig%23n46&data=02%7C01%7Cchristian.koenig%40amd.com%7C916f6a9b64884e21fd7f08d82e115c8b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C63731122752271&sdata=WKs3KSr3K1DdVmDaIZ%2FnV8VEBPWMGMSGweeay0HIOxw%3D&reserved=0

Currently, the user seems to amdgpu for P2P dma-buf and it seems to plumb ttm 
(*move_notify) callback to dma-buf.  We're not sure if it's a security issue 
occurring across DRM drivers, or one more specific to the new amdgpu use case.

On Tue, Jul 21, 2020 at 1:03 PM Chia-I Wu  wrote:

Hi list,

virtio-gpu is moving in the direction where BO pages are pinned for
the lifetime for simplicity.  I am wondering if that is considered a
security issue in general, especially after running into the
description of the new DMABUF_MOVE_NOTIFY config option.


Yes, that is generally considered a deny of service possibility and so far Dave 
and Daniel have rejected all tries to upstream stuff like this as far as I know.

Uh we have merged pretty much all arm-soc drivers without real
shrinkers. Whether that was a good idea or not is maybe different
question - now that we do have pretty good helpers maybe we should
poke this a bit more. But then SoCs Suck (tm).


I was under the impression that those SoC drivers still use the GEM 
helpers which unpinns stuff when it is not in use. But I might be wrong.




But for real gpus they do indeed all have shrinkers, and not just "pin
everything forever" model. Real gpus = stuff you might run on servers
or multi-app and all that stuff, not with a simple "we just kill all
background jobs if memory gets low" model like on android and other
such things.


DMA-buf an pinning for scanout are the only exceptions since the implementation 
wouldn't have been possible otherwise.


Most drivers do not have a shrinker, or whether a BO is purgeable is
entirely controlled by the userspace (madvice).  They can be
categorized as "a security problem where userspace is able to pin
unrestricted amounts of memory".  But those drivers are normally found
on systems without swap.  I don't think the issue applies.


This is completely independent of the availability of swap or not.

Pinning of pages in large quantities can result in all kind of problems and 
needs to be prevented even without swap.

Yeah you don't just kill swap, you kill a ton of other kernel services
with mass pinning. I think even the pinning of scanout buffers for
i915 from system memory is somewhat questionable (but I guess small
enough to not matter in practice).


Yeah, we had a really hard time explaining that internally as well.

Christian.


Otherwise you can ran into problems even with simple I/O operations for example.


Of the desktop GPU drivers, i915's shrinker certainly supports purging
to swap.  TTM is a bit hard to follow.  I can't really tell if amdgpu
or nouveau supports that.  virtio-gpu is more commonly found on
systems with swaps so I think it should follow the desktop practices?


What we do at least in the amdgpu, radeon, i915 and nouveau is to only allow it 
for scanout and that in turn is limited by the physical number of CRTCs on the 
board.


Truth is, the emulated virtio-gpu device always supports page moves
with VIRTIO_GPU_CMD_RESOURCE_{ATTACH,DETACH}_BACKING.  It is just that
the driver does not make use of them.  That makes this less of an
issue because the driver can be fixed anytime (finger crossed that the
emulator won't have bugs in these untested paths).  This issue becomes
more urgent because we are considering adding a new HW command[1]
where page moves will be disallowed.  We definitely don't want a HW
command that is inherently insecure, if BO pages pinned for the
lifetime is considered a security issue on desktops.


Yeah, that's probably not such a good idea :)

Well if the pinning is just for the duration of the hw command, it's
fine, just like batch buffers. But if it's long term pinning then that
doesn't sound like a good idea. RDMA has this as their inherit hw
programming model (except if your hw is really fancy and has hw page
fault handling on the rdma nic), and they hard limit such pins to what
you can mlock (or something similar within rdma).
-Daniel


Regards,
Christian.


[1] VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgitlab.freedesktop.org%2Fvirgl%2Fdrm-misc-next%2F-%2Fblob%2Fvirtio-gpu-next%2Finclude%2Fuapi%2Flinux%2Fvirtio_gpu.h%23L396&data=02%7C01%7Cchristian.koenig%40amd.com%7C916f6a9b64884e21fd7f08d82e115c8b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C63731122752271&sdata=AWtwzk%2BP7P7031ibTumr2J%2FQB%2Fzssg1Imag

Re: [PATCH 06/11] drm/radeon: stop using TTM_MEMTYPE_FLAG_MAPPABLE

2020-07-22 Thread Christian König

Am 22.07.20 um 07:34 schrieb Daniel Vetter:

On Tue, Jul 21, 2020 at 4:46 PM Christian König
 wrote:

Am 21.07.20 um 11:24 schrieb dan...@ffwll.ch:

On Tue, Jul 21, 2020 at 09:32:40AM +0200, Christian König wrote:

The driver doesn't expose any not-mapable memory resources.

Signed-off-by: Christian König 
---
   drivers/gpu/drm/radeon/radeon_ttm.c | 13 -
   1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c 
b/drivers/gpu/drm/radeon/radeon_ttm.c
index 54af06df865b..b474781a0920 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -76,7 +76,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
  switch (type) {
  case TTM_PL_SYSTEM:
  /* System memory */
-man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+man->flags = 0;
  man->available_caching = TTM_PL_MASK_CACHING;
  man->default_caching = TTM_PL_FLAG_CACHED;
  break;
@@ -84,7 +84,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
  man->func = &ttm_bo_manager_func;
  man->available_caching = TTM_PL_MASK_CACHING;
  man->default_caching = TTM_PL_FLAG_CACHED;
-man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+man->flags = 0;
   #if IS_ENABLED(CONFIG_AGP)
  if (rdev->flags & RADEON_IS_AGP) {
  if (!rdev->ddev->agp) {
@@ -92,8 +92,6 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
(unsigned)type);
  return -EINVAL;
  }
-if (!rdev->ddev->agp->cant_use_aperture)
-man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;

There is a bunch of agp drivers (alpha, ppc, that kind of stuff) with this
flag set. And radeon.ko did at least once work on these. And your patch to
disable agp only changes the default, it doesn't rip out the code.

The key pint is that the flags for AGP are the same as the one for the
PCIe path. So no functional change at all :)

I misread the code somehow, I didn't spot the unconditional setting of
FLAG_MAPPABLE for all TTM_PL_TT, irrespective of agp or not, somehow
thought that's another case.

Reviewed-by: Daniel Vetter 


And for the amdgpu patch? Otherwise I just ping Alex for an rb.

Thanks,
Christian.




The real handling of cant_use_aperture is in radeon_ttm_io_mem_reserve().

Christian.


So not sure your assumption here is correct.
-Daniel


  man->available_caching = TTM_PL_FLAG_UNCACHED |
   TTM_PL_FLAG_WC;
  man->default_caching = TTM_PL_FLAG_WC;
@@ -103,8 +101,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
  case TTM_PL_VRAM:
  /* "On-card" video ram */
  man->func = &ttm_bo_manager_func;
-man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
+man->flags = TTM_MEMTYPE_FLAG_FIXED;
  man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
  man->default_caching = TTM_PL_FLAG_WC;
  break;
@@ -394,7 +391,6 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, 
bool evict,

   static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct 
ttm_mem_reg *mem)
   {
-struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  struct radeon_device *rdev = radeon_get_rdev(bdev);

  mem->bus.addr = NULL;
@@ -402,8 +398,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device 
*bdev, struct ttm_mem_
  mem->bus.size = mem->num_pages << PAGE_SHIFT;
  mem->bus.base = 0;
  mem->bus.is_iomem = false;
-if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-return -EINVAL;
+
  switch (mem->mem_type) {
  case TTM_PL_SYSTEM:
  /* system memory */
--
2.17.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Fdri-devel&data=02%7C01%7Cchristian.koenig%40amd.com%7C2fb0200ef32f4afcc3e208d82e00ef7d%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637309928847998899&sdata=9VclRJ7e3xfohsaLsiVF6Y83c%2Bkncmbo5uqoeV6tT9M%3D&reserved=0




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v1 1/1] drm/bridge: nwl-dsi: Drop DRM_BRIDGE_ATTACH_NO_CONNECTOR check.

2020-07-22 Thread Laurent Pinchart
Hi Guido,

Thank you for the patch.

On Sat, Jul 18, 2020 at 08:26:37PM +0200, Guido Günther wrote:
> We don't create a connector but let panel_bridge handle that so there's
> no point in rejecting DRM_BRIDGE_ATTACH_NO_CONNECTOR.
> 
> Signed-off-by: Guido Günther 

Reviewed-by: Laurent Pinchart 

> ---
>  drivers/gpu/drm/bridge/nwl-dsi.c | 5 -
>  1 file changed, 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c 
> b/drivers/gpu/drm/bridge/nwl-dsi.c
> index 77a79af70914..ce94f797d090 100644
> --- a/drivers/gpu/drm/bridge/nwl-dsi.c
> +++ b/drivers/gpu/drm/bridge/nwl-dsi.c
> @@ -918,11 +918,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge 
> *bridge,
>   struct drm_panel *panel;
>   int ret;
>  
> - if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
> - DRM_ERROR("Fix bridge driver to make connector optional!");
> - return -EINVAL;
> - }
> -
>   ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
> &panel_bridge);
>   if (ret)

-- 
Regards,

Laurent Pinchart
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: pages pinned for BO lifetime and security

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 1:12 PM Christian König
 wrote:
>
> Am 22.07.20 um 09:32 schrieb Daniel Vetter:
> > On Wed, Jul 22, 2020 at 9:19 AM Christian König
> >  wrote:
> >> Am 22.07.20 um 02:22 schrieb Gurchetan Singh:
> >>
> >> +Christian who added DMABUF_MOVE_NOTIFY which added the relevant blurb:
> >>
> >> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit.kernel.org%2Fpub%2Fscm%2Flinux%2Fkernel%2Fgit%2Ftorvalds%2Flinux.git%2Ftree%2Fdrivers%2Fdma-buf%2FKconfig%23n46&data=02%7C01%7Cchristian.koenig%40amd.com%7C916f6a9b64884e21fd7f08d82e115c8b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C63731122752271&sdata=WKs3KSr3K1DdVmDaIZ%2FnV8VEBPWMGMSGweeay0HIOxw%3D&reserved=0
> >>
> >> Currently, the user seems to amdgpu for P2P dma-buf and it seems to plumb 
> >> ttm (*move_notify) callback to dma-buf.  We're not sure if it's a security 
> >> issue occurring across DRM drivers, or one more specific to the new amdgpu 
> >> use case.
> >>
> >> On Tue, Jul 21, 2020 at 1:03 PM Chia-I Wu  wrote:
> >>> Hi list,
> >>>
> >>> virtio-gpu is moving in the direction where BO pages are pinned for
> >>> the lifetime for simplicity.  I am wondering if that is considered a
> >>> security issue in general, especially after running into the
> >>> description of the new DMABUF_MOVE_NOTIFY config option.
> >>
> >> Yes, that is generally considered a deny of service possibility and so far 
> >> Dave and Daniel have rejected all tries to upstream stuff like this as far 
> >> as I know.
> > Uh we have merged pretty much all arm-soc drivers without real
> > shrinkers. Whether that was a good idea or not is maybe different
> > question - now that we do have pretty good helpers maybe we should
> > poke this a bit more. But then SoCs Suck (tm).
>
> I was under the impression that those SoC drivers still use the GEM
> helpers which unpinns stuff when it is not in use. But I might be wrong.

It's kinda mostly there, even some helpers for shrinking but a)
helpers on, not all drivers use it b) for purgeable objects only, not
generally for inactive stuff - there's no active use tracking c) cma
helpers (ok that one is only for vc4 as the render driver) don't even
have that. I had some slow burner series to get us towards dma_resv
locking in shmem helpers and then maybe even a common shrinker helper
with some "actually kick it out now" callback, but yeah never got
there.

So maybe per-device object shrinker helper would be something neat we
could lift out of ttm (when it's happening), maybe with a simple
callback somewhere in it's lru tracking. Probably best if the shrinker
lru is outright separate from anything else or it just gets messy.
-Daniel

> > But for real gpus they do indeed all have shrinkers, and not just "pin
> > everything forever" model. Real gpus = stuff you might run on servers
> > or multi-app and all that stuff, not with a simple "we just kill all
> > background jobs if memory gets low" model like on android and other
> > such things.
> >
> >> DMA-buf an pinning for scanout are the only exceptions since the 
> >> implementation wouldn't have been possible otherwise.
> >>
> >>> Most drivers do not have a shrinker, or whether a BO is purgeable is
> >>> entirely controlled by the userspace (madvice).  They can be
> >>> categorized as "a security problem where userspace is able to pin
> >>> unrestricted amounts of memory".  But those drivers are normally found
> >>> on systems without swap.  I don't think the issue applies.
> >>
> >> This is completely independent of the availability of swap or not.
> >>
> >> Pinning of pages in large quantities can result in all kind of problems 
> >> and needs to be prevented even without swap.
> > Yeah you don't just kill swap, you kill a ton of other kernel services
> > with mass pinning. I think even the pinning of scanout buffers for
> > i915 from system memory is somewhat questionable (but I guess small
> > enough to not matter in practice).
>
> Yeah, we had a really hard time explaining that internally as well.
>
> Christian.
>
> >> Otherwise you can ran into problems even with simple I/O operations for 
> >> example.
> >>
> >>> Of the desktop GPU drivers, i915's shrinker certainly supports purging
> >>> to swap.  TTM is a bit hard to follow.  I can't really tell if amdgpu
> >>> or nouveau supports that.  virtio-gpu is more commonly found on
> >>> systems with swaps so I think it should follow the desktop practices?
> >>
> >> What we do at least in the amdgpu, radeon, i915 and nouveau is to only 
> >> allow it for scanout and that in turn is limited by the physical number of 
> >> CRTCs on the board.
> >>
> >>> Truth is, the emulated virtio-gpu device always supports page moves
> >>> with VIRTIO_GPU_CMD_RESOURCE_{ATTACH,DETACH}_BACKING.  It is just that
> >>> the driver does not make use of them.  That makes this less of an
> >>> issue because the driver can be fixed anytime (finger crossed that the
> >>> emulator won't have bugs in these untested paths).  This issue 

Re: [Intel-gfx] [PATCH v1] drm/i915/dsi: Drop double check for ACPI companion device

2020-07-22 Thread Ville Syrjälä
On Fri, May 29, 2020 at 03:33:17PM +0300, Andy Shevchenko wrote:
> acpi_dev_get_resources() does perform the NULL pointer check against
> ACPI companion device which is given as function parameter. Thus,
> there is no need to duplicate this check in the caller.
> 
> Signed-off-by: Andy Shevchenko 

Sorry, I did look at this but apparently forgot to reply...

> ---
>  drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 24 
>  1 file changed, 10 insertions(+), 14 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c 
> b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
> index 574dcfec9577..6f9e08cda964 100644
> --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
> +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
> @@ -426,23 +426,19 @@ static void i2c_acpi_find_adapter(struct intel_dsi 
> *intel_dsi,
>  {
>   struct drm_device *drm_dev = intel_dsi->base.base.dev;
>   struct device *dev = &drm_dev->pdev->dev;
> - struct acpi_device *acpi_dev;
> + struct acpi_device *acpi_dev = ACPI_COMPANION(dev);
>   struct list_head resource_list;
>   struct i2c_adapter_lookup lookup;
>  
> - acpi_dev = ACPI_COMPANION(dev);
> - if (acpi_dev) {
> - memset(&lookup, 0, sizeof(lookup));
> - lookup.slave_addr = slave_addr;
> - lookup.intel_dsi = intel_dsi;
> - lookup.dev_handle = acpi_device_handle(acpi_dev);
> -
> - INIT_LIST_HEAD(&resource_list);
> - acpi_dev_get_resources(acpi_dev, &resource_list,
> -i2c_adapter_lookup,
> -&lookup);
> - acpi_dev_free_resource_list(&resource_list);
> - }
> + memset(&lookup, 0, sizeof(lookup));
> + lookup.slave_addr = slave_addr;
> + lookup.intel_dsi = intel_dsi;
> + lookup.dev_handle = acpi_device_handle(acpi_dev);

struct i2c_adapter_lookup lookup = {
.slave_addr = ...
};

?

> +
> + INIT_LIST_HEAD(&resource_list);

Declare as LIST_HEAD(resource_list); ?

> + acpi_dev_get_resources(acpi_dev, &resource_list,
> +i2c_adapter_lookup, &lookup);
> + acpi_dev_free_resource_list(&resource_list);

I was very confused by this code since on the first glance it appears to
absolutely nothing. After a deeper look it looks like
i2c_adapter_lookup() magically mutates intel_dsi->i2c_bus_num.
Did I mention I hate functions with side effects? IMO would be much
better if i2c_adapter_lookup() did what it says on the tin and just
returned the adapter number and let the caller deal with it. But
this is a pre-existing issue with the code and so not directly related
to your patch.

>  }
>  #else
>  static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
> -- 
> 2.26.2
> 
> ___
> Intel-gfx mailing list
> intel-...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Ville Syrjälä
Intel
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 12:31 PM Thomas Hellström (Intel)
 wrote:
>
>
> On 2020-07-22 11:45, Daniel Vetter wrote:
> > On Wed, Jul 22, 2020 at 10:05 AM Thomas Hellström (Intel)
> >  wrote:
> >>
> >> On 2020-07-22 09:11, Daniel Vetter wrote:
> >>> On Wed, Jul 22, 2020 at 8:45 AM Thomas Hellström (Intel)
> >>>  wrote:
>  On 2020-07-22 00:45, Dave Airlie wrote:
> > On Tue, 21 Jul 2020 at 18:47, Thomas Hellström (Intel)
> >  wrote:
> >> On 7/21/20 9:45 AM, Christian König wrote:
> >>> Am 21.07.20 um 09:41 schrieb Daniel Vetter:
>  On Mon, Jul 20, 2020 at 01:15:17PM +0200, Thomas Hellström (Intel)
>  wrote:
> > Hi,
> >
> > On 7/9/20 2:33 PM, Daniel Vetter wrote:
> >> Comes up every few years, gets somewhat tedious to discuss, let's
> >> write this down once and for all.
> >>
> >> What I'm not sure about is whether the text should be more 
> >> explicit in
> >> flat out mandating the amdkfd eviction fences for long running 
> >> compute
> >> workloads or workloads where userspace fencing is allowed.
> > Although (in my humble opinion) it might be possible to completely
> > untangle
> > kernel-introduced fences for resource management and dma-fences used
> > for
> > completion- and dependency tracking and lift a lot of restrictions
> > for the
> > dma-fences, including prohibiting infinite ones, I think this makes
> > sense
> > describing the current state.
>  Yeah I think a future patch needs to type up how we want to make that
>  happen (for some cross driver consistency) and what needs to be
>  considered. Some of the necessary parts are already there (with like 
>  the
>  preemption fences amdkfd has as an example), but I think some clear 
>  docs
>  on what's required from both hw, drivers and userspace would be 
>  really
>  good.
> >>> I'm currently writing that up, but probably still need a few days for
> >>> this.
> >> Great! I put down some (very) initial thoughts a couple of weeks ago
> >> building on eviction fences for various hardware complexity levels 
> >> here:
> >>
> >> https://gitlab.freedesktop.org/thomash/docs/-/blob/master/Untangling%20dma-fence%20and%20memory%20allocation.odt
> > We are seeing HW that has recoverable GPU page faults but only for
> > compute tasks, and scheduler without semaphores hw for graphics.
> >
> > So a single driver may have to expose both models to userspace and
> > also introduces the problem of how to interoperate between the two
> > models on one card.
> >
> > Dave.
>  Hmm, yes to begin with it's important to note that this is not a
>  replacement for new programming models or APIs, This is something that
>  takes place internally in drivers to mitigate many of the restrictions
>  that are currently imposed on dma-fence and documented in this and
>  previous series. It's basically the driver-private narrow completions
>  Jason suggested in the lockdep patches discussions implemented the same
>  way as eviction-fences.
> 
>  The memory fence API would be local to helpers and middle-layers like
>  TTM, and the corresponding drivers.  The only cross-driver-like
>  visibility would be that the dma-buf move_notify() callback would not be
>  allowed to wait on dma-fences or something that depends on a dma-fence.
> >>> Because we can't preempt (on some engines at least) we already have
> >>> the requirement that cross driver buffer management can get stuck on a
> >>> dma-fence. Not even taking into account the horrors we do with
> >>> userptr, which are cross driver no matter what. Limiting move_notify
> >>> to memory fences only doesn't work, since the pte clearing might need
> >>> to wait for a dma_fence first. Hence this becomes a full end-of-batch
> >>> fence, not just a limited kernel-internal memory fence.
> >> For non-preemptible hardware the memory fence typically *is* the
> >> end-of-batch fence. (Unless, as documented, there is a scheduler
> >> consuming sync-file dependencies in which case the memory fence wait
> >> needs to be able to break out of that). The key thing is not that we can
> >> break out of execution, but that we can break out of dependencies, since
> >> when we're executing all dependecies (modulo semaphores) are already
> >> fulfilled. That's what's eliminating the deadlocks.
> >>
> >>> That's kinda why I think only reasonable option is to toss in the
> >>> towel and declare dma-fence to be the memory fence (and suck up all
> >>> the consequences of that decision as uapi, which is kinda where we
> >>> are), and construct something new&entirely free-wheeling for userspace
> >>> fencing. But only for engines that allow enough preempt/gpu page
> >>> faulting to make that possib

Re: [PATCH -next] gpu: drm: Fix spinlock vblank_time_lock use error.

2020-07-22 Thread Ville Syrjälä
On Wed, Jul 22, 2020 at 01:05:27AM +, Xu Qiang wrote:
> The drm_handle_vblank function is in the interrupt context.
> Therefore, the spin lock vblank_time_lock is obtained
> from the interrupt context.
> 
> Cc: 
> Signed-off-by: Xu Qiang 
> ---
>  drivers/gpu/drm/drm_vblank.c | 17 ++---
>  1 file changed, 10 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
> index f402c75b9d34..4ca63ff33a43 100644
> --- a/drivers/gpu/drm/drm_vblank.c
> +++ b/drivers/gpu/drm/drm_vblank.c
> @@ -229,10 +229,11 @@ static void drm_reset_vblank_timestamp(struct 
> drm_device *dev, unsigned int pipe
>  {
>   u32 cur_vblank;
>   bool rc;
> + unsigned long irqflags;
>   ktime_t t_vblank;
>   int count = DRM_TIMESTAMP_MAXRETRIES;
>  
> - spin_lock(&dev->vblank_time_lock);
> + spin_lock_irqsave(&dev->vblank_time_lock, irqflags);

Nak. This is always called with interrupts off, so no point on wasting
time saving/restoring the flags. And it's the same situation for all the
other cases you have below.

>  
>   /*
>* sample the current counter to avoid random jumps
> @@ -257,7 +258,7 @@ static void drm_reset_vblank_timestamp(struct drm_device 
> *dev, unsigned int pipe
>*/
>   store_vblank(dev, pipe, 1, t_vblank, cur_vblank);
>  
> - spin_unlock(&dev->vblank_time_lock);
> + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
>  }
>  
>  /*
> @@ -1106,11 +1107,12 @@ static int __enable_vblank(struct drm_device *dev, 
> unsigned int pipe)
>  static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
>  {
>   struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
> + unsigned long irqflags;
>   int ret = 0;
>  
>   assert_spin_locked(&dev->vbl_lock);
>  
> - spin_lock(&dev->vblank_time_lock);
> + spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
>  
>   if (!vblank->enabled) {
>   /*
> @@ -1136,7 +1138,7 @@ static int drm_vblank_enable(struct drm_device *dev, 
> unsigned int pipe)
>   }
>   }
>  
> - spin_unlock(&dev->vblank_time_lock);
> + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
>  
>   return ret;
>  }
> @@ -1917,6 +1919,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned 
> int pipe)
>  {
>   struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
>   unsigned long irqflags;
> + unsigned long irqflags_vblank;
>   bool disable_irq;
>  
>   if (drm_WARN_ON_ONCE(dev, !drm_dev_has_vblank(dev)))
> @@ -1931,18 +1934,18 @@ bool drm_handle_vblank(struct drm_device *dev, 
> unsigned int pipe)
>* vblank enable/disable, as this would cause inconsistent
>* or corrupted timestamps and vblank counts.
>*/
> - spin_lock(&dev->vblank_time_lock);
> + spin_lock_irqsave(&dev->vblank_time_lock, irqflags_vblank);
>  
>   /* Vblank irq handling disabled. Nothing to do. */
>   if (!vblank->enabled) {
> - spin_unlock(&dev->vblank_time_lock);
> + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags_vblank);
>   spin_unlock_irqrestore(&dev->event_lock, irqflags);
>   return false;
>   }
>  
>   drm_update_vblank_count(dev, pipe, true);
>  
> - spin_unlock(&dev->vblank_time_lock);
> + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags_vblank);
>  
>   wake_up(&vblank->queue);
>  
> -- 
> 2.25.0
> 
> ___
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

-- 
Ville Syrjälä
Intel
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 06/11] drm/radeon: stop using TTM_MEMTYPE_FLAG_MAPPABLE

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 1:13 PM Christian König
 wrote:
>
> Am 22.07.20 um 07:34 schrieb Daniel Vetter:
> > On Tue, Jul 21, 2020 at 4:46 PM Christian König
> >  wrote:
> >> Am 21.07.20 um 11:24 schrieb dan...@ffwll.ch:
> >>> On Tue, Jul 21, 2020 at 09:32:40AM +0200, Christian König wrote:
>  The driver doesn't expose any not-mapable memory resources.
> 
>  Signed-off-by: Christian König 
>  ---
> drivers/gpu/drm/radeon/radeon_ttm.c | 13 -
> 1 file changed, 4 insertions(+), 9 deletions(-)
> 
>  diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c 
>  b/drivers/gpu/drm/radeon/radeon_ttm.c
>  index 54af06df865b..b474781a0920 100644
>  --- a/drivers/gpu/drm/radeon/radeon_ttm.c
>  +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
>  @@ -76,7 +76,7 @@ static int radeon_init_mem_type(struct ttm_bo_device 
>  *bdev, uint32_t type,
>    switch (type) {
>    case TTM_PL_SYSTEM:
>    /* System memory */
>  -man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
>  +man->flags = 0;
>    man->available_caching = TTM_PL_MASK_CACHING;
>    man->default_caching = TTM_PL_FLAG_CACHED;
>    break;
>  @@ -84,7 +84,7 @@ static int radeon_init_mem_type(struct ttm_bo_device 
>  *bdev, uint32_t type,
>    man->func = &ttm_bo_manager_func;
>    man->available_caching = TTM_PL_MASK_CACHING;
>    man->default_caching = TTM_PL_FLAG_CACHED;
>  -man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
>  +man->flags = 0;
> #if IS_ENABLED(CONFIG_AGP)
>    if (rdev->flags & RADEON_IS_AGP) {
>    if (!rdev->ddev->agp) {
>  @@ -92,8 +92,6 @@ static int radeon_init_mem_type(struct ttm_bo_device 
>  *bdev, uint32_t type,
>  (unsigned)type);
>    return -EINVAL;
>    }
>  -if (!rdev->ddev->agp->cant_use_aperture)
>  -man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
> >>> There is a bunch of agp drivers (alpha, ppc, that kind of stuff) with this
> >>> flag set. And radeon.ko did at least once work on these. And your patch to
> >>> disable agp only changes the default, it doesn't rip out the code.
> >> The key pint is that the flags for AGP are the same as the one for the
> >> PCIe path. So no functional change at all :)
> > I misread the code somehow, I didn't spot the unconditional setting of
> > FLAG_MAPPABLE for all TTM_PL_TT, irrespective of agp or not, somehow
> > thought that's another case.
> >
> > Reviewed-by: Daniel Vetter 
>
> And for the amdgpu patch? Otherwise I just ping Alex for an rb.

See my question over there, I'm not seeing how the code prevents mmap
for AMDGPU_PL_* domains after your patch. Once that's cleared up happy
to r-b that one and the final one too.
-Daniel

>
> Thanks,
> Christian.
>
> >
> >> The real handling of cant_use_aperture is in radeon_ttm_io_mem_reserve().
> >>
> >> Christian.
> >>
> >>> So not sure your assumption here is correct.
> >>> -Daniel
> >>>
>    man->available_caching = TTM_PL_FLAG_UNCACHED |
> TTM_PL_FLAG_WC;
>    man->default_caching = TTM_PL_FLAG_WC;
>  @@ -103,8 +101,7 @@ static int radeon_init_mem_type(struct ttm_bo_device 
>  *bdev, uint32_t type,
>    case TTM_PL_VRAM:
>    /* "On-card" video ram */
>    man->func = &ttm_bo_manager_func;
>  -man->flags = TTM_MEMTYPE_FLAG_FIXED |
>  - TTM_MEMTYPE_FLAG_MAPPABLE;
>  +man->flags = TTM_MEMTYPE_FLAG_FIXED;
>    man->available_caching = TTM_PL_FLAG_UNCACHED | 
>  TTM_PL_FLAG_WC;
>    man->default_caching = TTM_PL_FLAG_WC;
>    break;
>  @@ -394,7 +391,6 @@ static int radeon_bo_move(struct ttm_buffer_object 
>  *bo, bool evict,
> 
> static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, 
>  struct ttm_mem_reg *mem)
> {
>  -struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
>    struct radeon_device *rdev = radeon_get_rdev(bdev);
> 
>    mem->bus.addr = NULL;
>  @@ -402,8 +398,7 @@ static int radeon_ttm_io_mem_reserve(struct 
>  ttm_bo_device *bdev, struct ttm_mem_
>    mem->bus.size = mem->num_pages << PAGE_SHIFT;
>    mem->bus.base = 0;
>    mem->bus.is_iomem = false;
>  -if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
>  -return -EINVAL;
>  +
>    switch (mem->mem_type) {
>    case TTM_PL_SYSTEM:
>    /* system memory */
>  --
>  2.17.1
> 
>  

Re: [PATCH v7 2/3] drm: bridge: Add support for Cadence MHDP DPI/DP bridge

2020-07-22 Thread kernel test robot
Hi Swapnil,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on robh/for-next]
[also build test WARNING on linus/master v5.8-rc6 next-20200721]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/Swapnil-Jakhade/drm-Add-support-for-Cadence-MHDP-DPI-DP-bridge-and-J721E-wrapper/20200722-154322
base:   https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
config: x86_64-allmodconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-14) 9.3.0
reproduce (this is a W=1 build):
# save the attached .config to linux build tree
make W=1 ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All warnings (new ones prefixed by >>):

   drivers/gpu/drm/bridge/cdns-mhdp-core.c: In function 'cdns_mhdp_fw_activate':
>> drivers/gpu/drm/bridge/cdns-mhdp-core.c:765:10: warning: conversion from 
>> 'long unsigned int' to 'unsigned int' changes value from 
>> '18446744073709551613' to '4294967293' [-Woverflow]
 765 |   writel(~CDNS_APB_INT_MASK_SW_EVENT_INT,
   In file included from include/linux/bits.h:23,
from include/linux/bitops.h:5,
from include/linux/kernel.h:12,
from include/linux/clk.h:13,
from drivers/gpu/drm/bridge/cdns-mhdp-core.c:10:
   drivers/gpu/drm/bridge/cdns-mhdp-core.c: In function 
'cdns_mhdp_link_training_init':
   include/linux/bits.h:26:28: warning: comparison of unsigned expression < 0 
is always false [-Wtype-limits]
  26 |   __builtin_constant_p((l) > (h)), (l) > (h), 0)))
 |^
   include/linux/build_bug.h:16:62: note: in definition of macro 
'BUILD_BUG_ON_ZERO'
  16 | #define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); 
})))
 |  ^
   include/linux/bits.h:39:3: note: in expansion of macro 'GENMASK_INPUT_CHECK'
  39 |  (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
 |   ^~~
   drivers/gpu/drm/bridge/cdns-mhdp-core.h:116:35: note: in expansion of macro 
'GENMASK'
 116 | #define CDNS_DP_LANE_EN_LANES(x)  GENMASK((x) - 1, 0)
 |   ^~~
   drivers/gpu/drm/bridge/cdns-mhdp-core.c:888:8: note: in expansion of macro 
'CDNS_DP_LANE_EN_LANES'
 888 |CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
 |^
   include/linux/bits.h:26:40: warning: comparison of unsigned expression < 0 
is always false [-Wtype-limits]
  26 |   __builtin_constant_p((l) > (h)), (l) > (h), 0)))
 |^
   include/linux/build_bug.h:16:62: note: in definition of macro 
'BUILD_BUG_ON_ZERO'
  16 | #define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); 
})))
 |  ^
   include/linux/bits.h:39:3: note: in expansion of macro 'GENMASK_INPUT_CHECK'
  39 |  (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
 |   ^~~
   drivers/gpu/drm/bridge/cdns-mhdp-core.h:116:35: note: in expansion of macro 
'GENMASK'
 116 | #define CDNS_DP_LANE_EN_LANES(x)  GENMASK((x) - 1, 0)
 |   ^~~
   drivers/gpu/drm/bridge/cdns-mhdp-core.c:888:8: note: in expansion of macro 
'CDNS_DP_LANE_EN_LANES'
 888 |CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
 |^
   drivers/gpu/drm/bridge/cdns-mhdp-core.c: In function 
'cdns_mhdp_fill_host_caps':
   drivers/gpu/drm/bridge/cdns-mhdp-core.c:1378:2: error: implicit declaration 
of function 'phy_get_attrs'; did you mean 'vfs_getattr'? 
[-Werror=implicit-function-declaration]
1378 |  phy_get_attrs(mhdp->phy, &attrs);
 |  ^
 |  vfs_getattr
   drivers/gpu/drm/bridge/cdns-mhdp-core.c:1384:19: error: 'struct phy_attrs' 
has no member named 'max_link_rate'
1384 |  link_rate = attrs.max_link_rate;
 |   ^
   drivers/gpu/drm/bridge/cdns-mhdp-core.c: In function 'cdns_mhdp_attach':
   drivers/gpu/drm/bridge/cdns-mhdp-core.c:1709:10: warning: conversion from 
'long unsigned int' to 'unsigned int' changes value from '18446744073709551613' 
to '4294967293' [-Woverflow]
1709 |   writel(~CDNS_APB_INT_MASK_SW_EVENT_INT,
   drivers/gpu/drm/bridge/cdns-mhdp-core.c: In function 
'cdns_mhdp_validate_mode_params'

Re: [PATCH 06/11] drm/radeon: stop using TTM_MEMTYPE_FLAG_MAPPABLE

2020-07-22 Thread Christian König

Am 22.07.20 um 13:42 schrieb Daniel Vetter:

On Wed, Jul 22, 2020 at 1:13 PM Christian König
 wrote:

Am 22.07.20 um 07:34 schrieb Daniel Vetter:

On Tue, Jul 21, 2020 at 4:46 PM Christian König
 wrote:

Am 21.07.20 um 11:24 schrieb dan...@ffwll.ch:

On Tue, Jul 21, 2020 at 09:32:40AM +0200, Christian König wrote:

The driver doesn't expose any not-mapable memory resources.

Signed-off-by: Christian König 
---
drivers/gpu/drm/radeon/radeon_ttm.c | 13 -
1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c 
b/drivers/gpu/drm/radeon/radeon_ttm.c
index 54af06df865b..b474781a0920 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -76,7 +76,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
   switch (type) {
   case TTM_PL_SYSTEM:
   /* System memory */
-man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+man->flags = 0;
   man->available_caching = TTM_PL_MASK_CACHING;
   man->default_caching = TTM_PL_FLAG_CACHED;
   break;
@@ -84,7 +84,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
   man->func = &ttm_bo_manager_func;
   man->available_caching = TTM_PL_MASK_CACHING;
   man->default_caching = TTM_PL_FLAG_CACHED;
-man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+man->flags = 0;
#if IS_ENABLED(CONFIG_AGP)
   if (rdev->flags & RADEON_IS_AGP) {
   if (!rdev->ddev->agp) {
@@ -92,8 +92,6 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
 (unsigned)type);
   return -EINVAL;
   }
-if (!rdev->ddev->agp->cant_use_aperture)
-man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;

There is a bunch of agp drivers (alpha, ppc, that kind of stuff) with this
flag set. And radeon.ko did at least once work on these. And your patch to
disable agp only changes the default, it doesn't rip out the code.

The key pint is that the flags for AGP are the same as the one for the
PCIe path. So no functional change at all :)

I misread the code somehow, I didn't spot the unconditional setting of
FLAG_MAPPABLE for all TTM_PL_TT, irrespective of agp or not, somehow
thought that's another case.

Reviewed-by: Daniel Vetter 

And for the amdgpu patch? Otherwise I just ping Alex for an rb.

See my question over there, I'm not seeing how the code prevents mmap
for AMDGPU_PL_* domains after your patch. Once that's cleared up happy
to r-b that one and the final one too.


I already replied, sounds like you never got that.

Anyway see the switch just below the two lines I removed:

    switch (mem->mem_type) {
    case TTM_PL_SYSTEM:



    case TTM_PL_TT:

...

    case TTM_PL_VRAM:

...

    default:
    return -EINVAL;
    }


So again, no functional change at all.

Cheers,
Christian.


-Daniel


Thanks,
Christian.


The real handling of cant_use_aperture is in radeon_ttm_io_mem_reserve().

Christian.


So not sure your assumption here is correct.
-Daniel


   man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
   man->default_caching = TTM_PL_FLAG_WC;
@@ -103,8 +101,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, 
uint32_t type,
   case TTM_PL_VRAM:
   /* "On-card" video ram */
   man->func = &ttm_bo_manager_func;
-man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
+man->flags = TTM_MEMTYPE_FLAG_FIXED;
   man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
   man->default_caching = TTM_PL_FLAG_WC;
   break;
@@ -394,7 +391,6 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, 
bool evict,

static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct 
ttm_mem_reg *mem)
{
-struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
   struct radeon_device *rdev = radeon_get_rdev(bdev);

   mem->bus.addr = NULL;
@@ -402,8 +398,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device 
*bdev, struct ttm_mem_
   mem->bus.size = mem->num_pages << PAGE_SHIFT;
   mem->bus.base = 0;
   mem->bus.is_iomem = false;
-if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-return -EINVAL;
+
   switch (mem->mem_type) {
   case TTM_PL_SYSTEM:
   /* system memory */
--
2.17.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Fdri-devel&data=02%7C01%7Cchrist

Re: [PATCH v7 2/3] drm: bridge: Add support for Cadence MHDP DPI/DP bridge

2020-07-22 Thread Laurent Pinchart
Hi Swapnil,

Thank you for the patch.

On Wed, Jul 22, 2020 at 09:40:39AM +0200, Swapnil Jakhade wrote:
> Add a new DRM bridge driver for Cadence MHDP DPTX IP used in TI J721e SoC.
> MHDP DPTX IP is the component that complies with VESA DisplayPort (DP) and
> embedded Display Port (eDP) standards. It integrates uCPU running the
> embedded Firmware (FW) interfaced over APB interface.
> 
> Basically, it takes a DPI stream as input and outputs it encoded in DP
> format. Currently, it supports only SST mode.
> 
> Co-developed-by: Tomi Valkeinen 
> Signed-off-by: Tomi Valkeinen 
> Co-developed-by: Jyri Sarha 
> Signed-off-by: Jyri Sarha 
> Signed-off-by: Quentin Schulz 
> Signed-off-by: Yuti Amonkar 
> Signed-off-by: Swapnil Jakhade 
> ---
>  drivers/gpu/drm/bridge/Kconfig  |   11 +
>  drivers/gpu/drm/bridge/Makefile |2 +
>  drivers/gpu/drm/bridge/cdns-mhdp-core.c | 2493 +++
>  drivers/gpu/drm/bridge/cdns-mhdp-core.h |  396 
>  4 files changed, 2902 insertions(+)
>  create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-core.c
>  create mode 100644 drivers/gpu/drm/bridge/cdns-mhdp-core.h
> 
> diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
> index 43271c21d3fc..6a4c324302a8 100644
> --- a/drivers/gpu/drm/bridge/Kconfig
> +++ b/drivers/gpu/drm/bridge/Kconfig
> @@ -27,6 +27,17 @@ config DRM_CDNS_DSI
> Support Cadence DPI to DSI bridge. This is an internal
> bridge and is meant to be directly embedded in a SoC.
>  
> +config DRM_CDNS_MHDP
> + tristate "Cadence DPI/DP bridge"
> + select DRM_KMS_HELPER
> + select DRM_PANEL_BRIDGE
> + depends on OF
> + help
> +   Support Cadence DPI to DP bridge. This is an internal
> +   bridge and is meant to be directly embedded in a SoC.
> +   It takes a DPI stream as input and outputs it encoded
> +   in DP format.
> +
>  config DRM_CHRONTEL_CH7033
>   tristate "Chrontel CH7033 Video Encoder"
>   depends on OF
> diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
> index d63d4b7e4347..0080a9f80f29 100644
> --- a/drivers/gpu/drm/bridge/Makefile
> +++ b/drivers/gpu/drm/bridge/Makefile
> @@ -1,5 +1,7 @@
>  # SPDX-License-Identifier: GPL-2.0
>  obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
> +obj-$(CONFIG_DRM_CDNS_MHDP) += cdns-mhdp.o
> +cdns-mhdp-objs := cdns-mhdp-core.o
>  obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
>  obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
>  obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
> diff --git a/drivers/gpu/drm/bridge/cdns-mhdp-core.c 
> b/drivers/gpu/drm/bridge/cdns-mhdp-core.c
> new file mode 100644
> index ..b16c5503cef1
> --- /dev/null
> +++ b/drivers/gpu/drm/bridge/cdns-mhdp-core.c
> @@ -0,0 +1,2493 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Cadence MHDP DP bridge driver.
> + *
> + * Copyright: 2019 Cadence Design Systems, Inc.
> + *
> + * Author: Quentin Schulz 
> + */
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#include 
> +
> +#include "cdns-mhdp-core.h"
> +
> +static DECLARE_WAIT_QUEUE_HEAD(fw_load_wq);
> +
> +static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
> +{
> + int ret, empty;
> +
> + WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
> +
> + ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
> +  empty, !empty, MAILBOX_RETRY_US,
> +  MAILBOX_TIMEOUT_US);
> + if (ret < 0)
> + return ret;
> +
> + return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
> +}
> +
> +static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
> +{
> + int ret, full;
> +
> + WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
> +
> + ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
> +  full, !full, MAILBOX_RETRY_US,
> +  MAILBOX_TIMEOUT_US);
> + if (ret < 0)
> + return ret;
> +
> + writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
> +
> + return 0;
> +}
> +
> +static int cdns_mhdp_mailbox_validate_receive(struct cdns_mhdp_device *mhdp,
> +   u8 module_id, u8 opcode,
> +   u16 req_size)
> +{
> + u32 mbox_size, i;
> + u8 header[4];
> + int ret;
> +
> + /* read the header of the message */
> + for (i = 0; i < 4; i++) {
> + ret = cdns_mhdp_mailbox_read(mhdp);
> + if (ret < 0)
> + return ret;
> +
> + header[i] = ret;
> + }
> +
> + mbox_size = get_unaligned_be16(header + 2);
> +
> + if (opco

RE: [PATCH v2] io-mapping: Indicate mapping failure

2020-07-22 Thread Ruhl, Michael J
>-Original Message-
>From: Andrew Morton 
>Sent: Tuesday, July 21, 2020 5:24 PM
>To: Ruhl, Michael J 
>Cc: dri-devel@lists.freedesktop.org; Mike Rapoport ;
>Andy Shevchenko ; Chris Wilson
>; sta...@vger.kernel.org
>Subject: Re: [PATCH v2] io-mapping: Indicate mapping failure
>
>On Tue, 21 Jul 2020 21:02:44 + "Ruhl, Michael J"
> wrote:
>
>> >--- a/include/linux/io-mapping.h~io-mapping-indicate-mapping-failure-fix
>> >+++ a/include/linux/io-mapping.h
>> >@@ -107,9 +107,12 @@ io_mapping_init_wc(struct io_mapping *io
>> >   resource_size_t base,
>> >   unsigned long size)
>> > {
>> >+   iomap->iomem = ioremap_wc(base, size);
>> >+   if (!iomap->iomem)
>> >+   return NULL;
>> >+
>>
>> This does make more sense.
>>
>> I am confused by the two follow up emails I just got.
>
>One was your original patch, the other is my suggested alteration.
>
>> Shall I resubmit, or is this path (if !iomap->iomem) return NULL)
>> now in the tree.
>
>All is OK.  If my alteration is acceptable (and, preferably, tested!)
>then when the time comes, I'll fold it into the base patch, add a
>note indicating this change and shall then send it to Linus.

I am good with the change and have tested it.

Instead of the system crashing I get:

i915 :01:00.0: [drm] *ERROR* Failed to setup region(-5) type=1
i915 :01:00.0: Device initialization failed (-5)
i915: probe of :01:00.0 failed with error -5

Which is the expected error.

If you would like this for the updated patch:

Tested-By: Michael J. Ruhl 

Thanks!

Mike

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 06/11] drm/radeon: stop using TTM_MEMTYPE_FLAG_MAPPABLE

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 1:50 PM Christian König
 wrote:
>
> Am 22.07.20 um 13:42 schrieb Daniel Vetter:
> > On Wed, Jul 22, 2020 at 1:13 PM Christian König
> >  wrote:
> >> Am 22.07.20 um 07:34 schrieb Daniel Vetter:
> >>> On Tue, Jul 21, 2020 at 4:46 PM Christian König
> >>>  wrote:
>  Am 21.07.20 um 11:24 schrieb dan...@ffwll.ch:
> > On Tue, Jul 21, 2020 at 09:32:40AM +0200, Christian König wrote:
> >> The driver doesn't expose any not-mapable memory resources.
> >>
> >> Signed-off-by: Christian König 
> >> ---
> >> drivers/gpu/drm/radeon/radeon_ttm.c | 13 -
> >> 1 file changed, 4 insertions(+), 9 deletions(-)
> >>
> >> diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c 
> >> b/drivers/gpu/drm/radeon/radeon_ttm.c
> >> index 54af06df865b..b474781a0920 100644
> >> --- a/drivers/gpu/drm/radeon/radeon_ttm.c
> >> +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
> >> @@ -76,7 +76,7 @@ static int radeon_init_mem_type(struct ttm_bo_device 
> >> *bdev, uint32_t type,
> >>switch (type) {
> >>case TTM_PL_SYSTEM:
> >>/* System memory */
> >> -man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
> >> +man->flags = 0;
> >>man->available_caching = TTM_PL_MASK_CACHING;
> >>man->default_caching = TTM_PL_FLAG_CACHED;
> >>break;
> >> @@ -84,7 +84,7 @@ static int radeon_init_mem_type(struct ttm_bo_device 
> >> *bdev, uint32_t type,
> >>man->func = &ttm_bo_manager_func;
> >>man->available_caching = TTM_PL_MASK_CACHING;
> >>man->default_caching = TTM_PL_FLAG_CACHED;
> >> -man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
> >> +man->flags = 0;
> >> #if IS_ENABLED(CONFIG_AGP)
> >>if (rdev->flags & RADEON_IS_AGP) {
> >>if (!rdev->ddev->agp) {
> >> @@ -92,8 +92,6 @@ static int radeon_init_mem_type(struct ttm_bo_device 
> >> *bdev, uint32_t type,
> >>  (unsigned)type);
> >>return -EINVAL;
> >>}
> >> -if (!rdev->ddev->agp->cant_use_aperture)
> >> -man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
> > There is a bunch of agp drivers (alpha, ppc, that kind of stuff) with 
> > this
> > flag set. And radeon.ko did at least once work on these. And your patch 
> > to
> > disable agp only changes the default, it doesn't rip out the code.
>  The key pint is that the flags for AGP are the same as the one for the
>  PCIe path. So no functional change at all :)
> >>> I misread the code somehow, I didn't spot the unconditional setting of
> >>> FLAG_MAPPABLE for all TTM_PL_TT, irrespective of agp or not, somehow
> >>> thought that's another case.
> >>>
> >>> Reviewed-by: Daniel Vetter 
> >> And for the amdgpu patch? Otherwise I just ping Alex for an rb.
> > See my question over there, I'm not seeing how the code prevents mmap
> > for AMDGPU_PL_* domains after your patch. Once that's cleared up happy
> > to r-b that one and the final one too.
>
> I already replied, sounds like you never got that.

I got it, but I suck at reading mailing lists.

> Anyway see the switch just below the two lines I removed:
> > switch (mem->mem_type) {
> > case TTM_PL_SYSTEM:
> 
> > case TTM_PL_TT:
> ...
> > case TTM_PL_VRAM:
> ...
> > default:
> > return -EINVAL;
> > }
>
> So again, no functional change at all.

Indeed, I score another point for being blind. r-b: also on the amdgpu
and final cleanup patch.
-Daniel

>
> Cheers,
> Christian.
>
> > -Daniel
> >
> >> Thanks,
> >> Christian.
> >>
>  The real handling of cant_use_aperture is in radeon_ttm_io_mem_reserve().
> 
>  Christian.
> 
> > So not sure your assumption here is correct.
> > -Daniel
> >
> >>man->available_caching = TTM_PL_FLAG_UNCACHED |
> >> TTM_PL_FLAG_WC;
> >>man->default_caching = TTM_PL_FLAG_WC;
> >> @@ -103,8 +101,7 @@ static int radeon_init_mem_type(struct 
> >> ttm_bo_device *bdev, uint32_t type,
> >>case TTM_PL_VRAM:
> >>/* "On-card" video ram */
> >>man->func = &ttm_bo_manager_func;
> >> -man->flags = TTM_MEMTYPE_FLAG_FIXED |
> >> - TTM_MEMTYPE_FLAG_MAPPABLE;
> >> +man->flags = TTM_MEMTYPE_FLAG_FIXED;
> >>man->available_caching = TTM_PL_FLAG_UNCACHED | 
> >> TTM_PL_FLAG_WC;
> >>man->default_caching = TTM_PL_FLAG_WC;
> >>break;
> >> @@ -394,7 +391,6 @@ static int radeon_bo_move(struct 

Re: [PATCH] drm/vkms: add missing drm_crtc_vblank_put to the get/put pair on flush

2020-07-22 Thread daniel
On Wed, Jul 22, 2020 at 08:04:11AM -0300, Melissa Wen wrote:
> This patch adds a missing drm_crtc_vblank_put op to the pair
> drm_crtc_vblank_get/put (inc/decrement counter to guarantee vblanks).
> 
> It clears the execution of the following kms_cursor_crc subtests:
> 1. pipe-A-cursor-[size,alpha-opaque, NxN-(on-screen, off-screen, sliding,
>random, fast-moving])] - successful when running individually.
> 2. pipe-A-cursor-dpms passes again
> 3. pipe-A-cursor-suspend also passes
> 
> The issue was initially tracked in the sequential execution of IGT
> kms_cursor_crc subtest: when running the test sequence or one of its
> subtests twice, the odd execs complete and the pairs get stuck in an
> endless wait. In the IGT code, calling a wait_for_vblank before the start
> of CRC capture prevented the busy-wait. But the problem persisted in the
> pipe-A-cursor-dpms and -suspend subtests.
> 
> Checking the history, the pipe-A-cursor-dpms subtest was successful when,
> in vkms_atomic_commit_tail, instead of using the flip_done op, it used
> wait_for_vblanks. Another way to prevent blocking was wait_one_vblank when
> enabling crtc. However, in both cases, pipe-A-cursor-suspend persisted
> blocking in the 2nd start of CRC capture, which may indicate that
> something got stuck in the step of CRC setup. Indeed, wait_one_vblank in
> the crc setup was able to sync things and free all kms_cursor_crc
> subtests.
> 
> Tracing and comparing a clean run with a blocked one:
> - in a clean one, vkms_crtc_atomic_flush enables vblanks;
> - when blocked, only in next op, vkms_crtc_atomic_enable, the vblanks
> started. Moreover, a series of vkms_vblank_simulate flow out until
> disabling vblanks.
> Also watching the steps of vkms_crtc_atomic_flush, when the very first
> drm_crtc_vblank_get returned an error, the subtest crashed. On the other
> hand, when vblank_get succeeded, the subtest completed. Finally, checking
> the flush steps: it increases counter to hold a vblank reference (get),
> but there isn't a op to decreased it and release vblanks (put).
> 
> Cc: Daniel Vetter 
> Cc: Rodrigo Siqueira 
> Cc: Haneen Mohammed 
> Signed-off-by: Melissa Wen 
> ---
>  drivers/gpu/drm/vkms/vkms_crtc.c | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c 
> b/drivers/gpu/drm/vkms/vkms_crtc.c
> index ac85e17428f8..a99d6b4a92dd 100644
> --- a/drivers/gpu/drm/vkms/vkms_crtc.c
> +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
> @@ -246,6 +246,7 @@ static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
>  
>   spin_unlock(&crtc->dev->event_lock);
>  
> + drm_crtc_vblank_put(crtc);

Uh so I reviewed this a bit more carefully now, and I dont think this is
the correct bugfix. From the kerneldoc of drm_crtc_arm_vblank_event():

 * Caller must hold a vblank reference for the event @e acquired by a
 * drm_crtc_vblank_get(), which will be dropped when the next vblank arrives.

So when we call drm_crtc_arm_vblank_event then the vblank_put gets called
for us. And that's the only case where we successfully acquired a vblank
interrupt reference since on failure of drm_crtc_vblank_get (0 indicates
success for that function, failure negative error number) we directly send
out the event.

So something else fishy is going on, and now I'm totally confused why this
even happens.

We also have a pile of WARN_ON checks in drm_crtc_vblank_put to make sure
we don't underflow the refcount, so it's also not that I think (except if
this patch creates more WARNING backtraces).

But clearly it changes behaviour somehow ... can you try to figure out
what changes? Maybe print out the vblank->refcount at various points in
the driver, and maybe also trace when exactly the fake vkms vblank hrtimer
is enabled/disabled ...

I'm totally confused about what's going on here now.
-Daniel

>   crtc->state->event = NULL;
>   }
>  
> -- 
> 2.27.0
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/bridge/adv7511: set the bridge type properly

2020-07-22 Thread Laurent Pinchart
Hi Laurentiu,

Thank you for the patch.

On Mon, Jul 20, 2020 at 03:42:27PM +0300, Laurentiu Palcu wrote:
> From: Laurentiu Palcu 
> 
> After the drm_bridge_connector_init() helper function has been added, the ADV
> driver has been changed accordingly. However, the 'type' field of the bridge
> structure was left unset, which makes the helper function always return 
> -EINVAL.
> 
> Signed-off-by: Laurentiu Palcu 

Reviewed-by: Laurent Pinchart 

> ---
> Hi,
> 
> I've hit this while trying to use this helper in the new i.MX8MQ DCSS
> driver, as suggested by Sam, and I wanted to test it with NWL MIPI_DSI and
> ADV since support is already in mainline.
> 
>  drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c 
> b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
> index f45cdca9cce5..a0d392c338da 100644
> --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
> +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
> @@ -1283,6 +1283,7 @@ static int adv7511_probe(struct i2c_client *i2c, const 
> struct i2c_device_id *id)
>   adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
>   | DRM_BRIDGE_OP_HPD;
>   adv7511->bridge.of_node = dev->of_node;
> + adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
>  
>   drm_bridge_add(&adv7511->bridge);
>  

-- 
Regards,

Laurent Pinchart
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Intel


On 2020-07-22 13:39, Daniel Vetter wrote:

On Wed, Jul 22, 2020 at 12:31 PM Thomas Hellström (Intel)
 wrote:


On 2020-07-22 11:45, Daniel Vetter wrote:

On Wed, Jul 22, 2020 at 10:05 AM Thomas Hellström (Intel)
 wrote:

On 2020-07-22 09:11, Daniel Vetter wrote:

On Wed, Jul 22, 2020 at 8:45 AM Thomas Hellström (Intel)
 wrote:

On 2020-07-22 00:45, Dave Airlie wrote:

On Tue, 21 Jul 2020 at 18:47, Thomas Hellström (Intel)
 wrote:

On 7/21/20 9:45 AM, Christian König wrote:

Am 21.07.20 um 09:41 schrieb Daniel Vetter:

On Mon, Jul 20, 2020 at 01:15:17PM +0200, Thomas Hellström (Intel)
wrote:

Hi,

On 7/9/20 2:33 PM, Daniel Vetter wrote:

Comes up every few years, gets somewhat tedious to discuss, let's
write this down once and for all.

What I'm not sure about is whether the text should be more explicit in
flat out mandating the amdkfd eviction fences for long running compute
workloads or workloads where userspace fencing is allowed.

Although (in my humble opinion) it might be possible to completely
untangle
kernel-introduced fences for resource management and dma-fences used
for
completion- and dependency tracking and lift a lot of restrictions
for the
dma-fences, including prohibiting infinite ones, I think this makes
sense
describing the current state.

Yeah I think a future patch needs to type up how we want to make that
happen (for some cross driver consistency) and what needs to be
considered. Some of the necessary parts are already there (with like the
preemption fences amdkfd has as an example), but I think some clear docs
on what's required from both hw, drivers and userspace would be really
good.

I'm currently writing that up, but probably still need a few days for
this.

Great! I put down some (very) initial thoughts a couple of weeks ago
building on eviction fences for various hardware complexity levels here:

https://gitlab.freedesktop.org/thomash/docs/-/blob/master/Untangling%20dma-fence%20and%20memory%20allocation.odt

We are seeing HW that has recoverable GPU page faults but only for
compute tasks, and scheduler without semaphores hw for graphics.

So a single driver may have to expose both models to userspace and
also introduces the problem of how to interoperate between the two
models on one card.

Dave.

Hmm, yes to begin with it's important to note that this is not a
replacement for new programming models or APIs, This is something that
takes place internally in drivers to mitigate many of the restrictions
that are currently imposed on dma-fence and documented in this and
previous series. It's basically the driver-private narrow completions
Jason suggested in the lockdep patches discussions implemented the same
way as eviction-fences.

The memory fence API would be local to helpers and middle-layers like
TTM, and the corresponding drivers.  The only cross-driver-like
visibility would be that the dma-buf move_notify() callback would not be
allowed to wait on dma-fences or something that depends on a dma-fence.

Because we can't preempt (on some engines at least) we already have
the requirement that cross driver buffer management can get stuck on a
dma-fence. Not even taking into account the horrors we do with
userptr, which are cross driver no matter what. Limiting move_notify
to memory fences only doesn't work, since the pte clearing might need
to wait for a dma_fence first. Hence this becomes a full end-of-batch
fence, not just a limited kernel-internal memory fence.

For non-preemptible hardware the memory fence typically *is* the
end-of-batch fence. (Unless, as documented, there is a scheduler
consuming sync-file dependencies in which case the memory fence wait
needs to be able to break out of that). The key thing is not that we can
break out of execution, but that we can break out of dependencies, since
when we're executing all dependecies (modulo semaphores) are already
fulfilled. That's what's eliminating the deadlocks.


That's kinda why I think only reasonable option is to toss in the
towel and declare dma-fence to be the memory fence (and suck up all
the consequences of that decision as uapi, which is kinda where we
are), and construct something new&entirely free-wheeling for userspace
fencing. But only for engines that allow enough preempt/gpu page
faulting to make that possible. Free wheeling userspace fences/gpu
semaphores or whatever you want to call them (on windows I think it's
monitored fence) only work if you can preempt to decouple the memory
fences from your gpu command execution.

There's the in-between step of just decoupling the batchbuffer
submission prep for hw without any preempt (but a scheduler), but that
seems kinda pointless. Modern execbuf should be O(1) fastpath, with
all the allocation/mapping work pulled out ahead. vk exposes that
model directly to clients, GL drivers could use it internally too, so
I see zero value in spending lots of time engineering very tricky
kernel code just for old userspace. Much more reasonable to do that in
userspace, where 

[PATCH] drm/amdgpu/dc: Simplify drm_crtc_state::active checks

2020-07-22 Thread Michel Dänzer
From: Michel Dänzer 

drm_atomic_crtc_check enforces that ::active can only be true if
::enable is as well.

Signed-off-by: Michel Dänzer 
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c| 16 +++-
 1 file changed, 3 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 312c543b258f..dabef307a74f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3415,21 +3415,12 @@ static bool modeset_required(struct drm_crtc_state 
*crtc_state,
 struct dc_stream_state *new_stream,
 struct dc_stream_state *old_stream)
 {
-   if (!drm_atomic_crtc_needs_modeset(crtc_state))
-   return false;
-
-   if (!crtc_state->enable)
-   return false;
-
-   return crtc_state->active;
+   return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
 }
 
 static bool modereset_required(struct drm_crtc_state *crtc_state)
 {
-   if (!drm_atomic_crtc_needs_modeset(crtc_state))
-   return false;
-
-   return !crtc_state->enable || !crtc_state->active;
+   return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
 }
 
 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -8108,8 +8099,7 @@ static int dm_update_crtc_state(struct 
amdgpu_display_manager *dm,
 * We want to do dc stream updates that do not require a
 * full modeset below.
 */
-   if (!(enable && aconnector && new_crtc_state->enable &&
- new_crtc_state->active))
+   if (!(enable && aconnector && new_crtc_state->active))
return 0;
/*
 * Given above conditions, the dc state cannot be NULL because:
-- 
2.28.0.rc0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v3 06/21] drm/bridge: tc358767: add drm_panel_bridge support

2020-07-22 Thread Laurent Pinchart
Hi Sam,

On Sun, Jul 19, 2020 at 03:06:56PM +0200, Sam Ravnborg wrote:
> On Sat, Jul 11, 2020 at 01:19:35AM +0300, Laurent Pinchart wrote:
> > On Fri, Jul 03, 2020 at 09:24:02PM +0200, Sam Ravnborg wrote:
> > > Prepare the bridge driver for use in a chained setup by
> > > replacing direct use of drm_panel with drm_panel_bridge support.
> > > 
> > > The bridge driver assume the panel is optional.
> > > The relevant tests are migrated over to check for the
> > > pnale bridge to keep the same functionality.
> > 
> > s/pnale/panel/
> > 
> > > Note: the bridge panel will use the connector type from the panel.
> > > 
> > > Signed-off-by: Sam Ravnborg 
> > > Cc: Andrzej Hajda 
> > > Cc: Neil Armstrong 
> > > Cc: Laurent Pinchart 
> > > Cc: Jonas Karlman 
> > > Cc: Jernej Skrabec 
> > > ---
> > >  drivers/gpu/drm/bridge/tc358767.c | 57 +++
> > >  1 file changed, 27 insertions(+), 30 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/bridge/tc358767.c 
> > > b/drivers/gpu/drm/bridge/tc358767.c
> > > index c2777b226c75..08d483664258 100644
> > > --- a/drivers/gpu/drm/bridge/tc358767.c
> > > +++ b/drivers/gpu/drm/bridge/tc358767.c
> > > @@ -244,8 +244,8 @@ struct tc_data {
> > >   struct drm_dp_aux   aux;
> > >  
> > >   struct drm_bridge   bridge;
> > > + struct drm_bridge   *panel_bridge;
> > >   struct drm_connectorconnector;
> > > - struct drm_panel*panel;
> > >  
> > >   /* link settings */
> > >   struct tc_edp_link  link;
> > > @@ -1236,13 +1236,6 @@ static int tc_stream_disable(struct tc_data *tc)
> > >   return 0;
> > >  }
> > >  
> > > -static void tc_bridge_pre_enable(struct drm_bridge *bridge)
> > > -{
> > > - struct tc_data *tc = bridge_to_tc(bridge);
> > > -
> > > - drm_panel_prepare(tc->panel);
> > > -}
> > > -
> > >  static void tc_bridge_enable(struct drm_bridge *bridge)
> > >  {
> > >   struct tc_data *tc = bridge_to_tc(bridge);
> > > @@ -1266,8 +1259,6 @@ static void tc_bridge_enable(struct drm_bridge 
> > > *bridge)
> > >   tc_main_link_disable(tc);
> > >   return;
> > >   }
> > > -
> > > - drm_panel_enable(tc->panel);
> > >  }
> > >  
> > >  static void tc_bridge_disable(struct drm_bridge *bridge)
> > > @@ -1275,8 +1266,6 @@ static void tc_bridge_disable(struct drm_bridge 
> > > *bridge)
> > >   struct tc_data *tc = bridge_to_tc(bridge);
> > >   int ret;
> > >  
> > > - drm_panel_disable(tc->panel);
> > > -
> > >   ret = tc_stream_disable(tc);
> > >   if (ret < 0)
> > >   dev_err(tc->dev, "main link stream stop error: %d\n", ret);
> > > @@ -1286,13 +1275,6 @@ static void tc_bridge_disable(struct drm_bridge 
> > > *bridge)
> > >   dev_err(tc->dev, "main link disable error: %d\n", ret);
> > >  }
> > >  
> > > -static void tc_bridge_post_disable(struct drm_bridge *bridge)
> > > -{
> > > - struct tc_data *tc = bridge_to_tc(bridge);
> > > -
> > > - drm_panel_unprepare(tc->panel);
> > > -}
> > > -
> > >  static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
> > >const struct drm_display_mode *mode,
> > >struct drm_display_mode *adj)
> > > @@ -1348,9 +1330,11 @@ static int tc_connector_get_modes(struct 
> > > drm_connector *connector)
> > >   return 0;
> > >   }
> > >  
> > > - count = drm_panel_get_modes(tc->panel, connector);
> > > - if (count > 0)
> > > - return count;
> > > + if (tc->panel_bridge) {
> > > + count = drm_bridge_get_modes(tc->panel_bridge, connector);
> > > + if (count > 0)
> > > + return count;
> > > + }
> > >  
> > >   edid = drm_get_edid(connector, &tc->aux.ddc);
> > >  
> > > @@ -1378,7 +1362,7 @@ static enum drm_connector_status 
> > > tc_connector_detect(struct drm_connector *conne
> > >   int ret;
> > >  
> > >   if (tc->hpd_pin < 0) {
> > > - if (tc->panel)
> > > + if (tc->panel_bridge)
> > >   return connector_status_connected;
> > >   else
> > >   return connector_status_unknown;
> > > @@ -1413,6 +1397,13 @@ static int tc_bridge_attach(struct drm_bridge 
> > > *bridge,
> > >   struct drm_device *drm = bridge->dev;
> > >   int ret;
> > >  
> > > + if (tc->panel_bridge) {
> > > + ret = drm_bridge_attach(tc->bridge.encoder, tc->panel_bridge,
> > > + &tc->bridge, flags);
> > > + if (ret < 0)
> > > + return ret;
> > > + }
> > 
> > With this both this driver and the panel bridge driver will create a
> > connector. The simplest way to handle that is probably to pass
> > flags & ~DRM_BRIDGE_ATTACH_NO_CONNECTOR to drm_bridge_attach(). It's a
> > bit of a hack, but should go away once all users are converted to
> > !DRM_BRIDGE_ATTACH_NO_CONNECTOR.
> 
> I do not follow you here - sorry.
> 
> We have two situations:
> 
> display driver creates the connector - and passes 
> DRM_BRIDGE_ATTACH_NO_CONNECTOR.
> - bridge driver shall not create connector
> - bridge panel shall 

Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 2:22 PM Thomas Hellström (Intel)
 wrote:
>
>
> On 2020-07-22 13:39, Daniel Vetter wrote:
> > On Wed, Jul 22, 2020 at 12:31 PM Thomas Hellström (Intel)
> >  wrote:
> >>
> >> On 2020-07-22 11:45, Daniel Vetter wrote:
> >>> On Wed, Jul 22, 2020 at 10:05 AM Thomas Hellström (Intel)
> >>>  wrote:
>  On 2020-07-22 09:11, Daniel Vetter wrote:
> > On Wed, Jul 22, 2020 at 8:45 AM Thomas Hellström (Intel)
> >  wrote:
> >> On 2020-07-22 00:45, Dave Airlie wrote:
> >>> On Tue, 21 Jul 2020 at 18:47, Thomas Hellström (Intel)
> >>>  wrote:
>  On 7/21/20 9:45 AM, Christian König wrote:
> > Am 21.07.20 um 09:41 schrieb Daniel Vetter:
> >> On Mon, Jul 20, 2020 at 01:15:17PM +0200, Thomas Hellström (Intel)
> >> wrote:
> >>> Hi,
> >>>
> >>> On 7/9/20 2:33 PM, Daniel Vetter wrote:
>  Comes up every few years, gets somewhat tedious to discuss, let's
>  write this down once and for all.
> 
>  What I'm not sure about is whether the text should be more 
>  explicit in
>  flat out mandating the amdkfd eviction fences for long running 
>  compute
>  workloads or workloads where userspace fencing is allowed.
> >>> Although (in my humble opinion) it might be possible to completely
> >>> untangle
> >>> kernel-introduced fences for resource management and dma-fences 
> >>> used
> >>> for
> >>> completion- and dependency tracking and lift a lot of restrictions
> >>> for the
> >>> dma-fences, including prohibiting infinite ones, I think this 
> >>> makes
> >>> sense
> >>> describing the current state.
> >> Yeah I think a future patch needs to type up how we want to make 
> >> that
> >> happen (for some cross driver consistency) and what needs to be
> >> considered. Some of the necessary parts are already there (with 
> >> like the
> >> preemption fences amdkfd has as an example), but I think some 
> >> clear docs
> >> on what's required from both hw, drivers and userspace would be 
> >> really
> >> good.
> > I'm currently writing that up, but probably still need a few days 
> > for
> > this.
>  Great! I put down some (very) initial thoughts a couple of weeks ago
>  building on eviction fences for various hardware complexity levels 
>  here:
> 
>  https://gitlab.freedesktop.org/thomash/docs/-/blob/master/Untangling%20dma-fence%20and%20memory%20allocation.odt
> >>> We are seeing HW that has recoverable GPU page faults but only for
> >>> compute tasks, and scheduler without semaphores hw for graphics.
> >>>
> >>> So a single driver may have to expose both models to userspace and
> >>> also introduces the problem of how to interoperate between the two
> >>> models on one card.
> >>>
> >>> Dave.
> >> Hmm, yes to begin with it's important to note that this is not a
> >> replacement for new programming models or APIs, This is something that
> >> takes place internally in drivers to mitigate many of the restrictions
> >> that are currently imposed on dma-fence and documented in this and
> >> previous series. It's basically the driver-private narrow completions
> >> Jason suggested in the lockdep patches discussions implemented the same
> >> way as eviction-fences.
> >>
> >> The memory fence API would be local to helpers and middle-layers like
> >> TTM, and the corresponding drivers.  The only cross-driver-like
> >> visibility would be that the dma-buf move_notify() callback would not 
> >> be
> >> allowed to wait on dma-fences or something that depends on a dma-fence.
> > Because we can't preempt (on some engines at least) we already have
> > the requirement that cross driver buffer management can get stuck on a
> > dma-fence. Not even taking into account the horrors we do with
> > userptr, which are cross driver no matter what. Limiting move_notify
> > to memory fences only doesn't work, since the pte clearing might need
> > to wait for a dma_fence first. Hence this becomes a full end-of-batch
> > fence, not just a limited kernel-internal memory fence.
>  For non-preemptible hardware the memory fence typically *is* the
>  end-of-batch fence. (Unless, as documented, there is a scheduler
>  consuming sync-file dependencies in which case the memory fence wait
>  needs to be able to break out of that). The key thing is not that we can
>  break out of execution, but that we can break out of dependencies, since
>  when we're executing all dependecies (modulo semaphores) are already
>  fulfilled. That's what's eliminating the deadlocks.
> 
> > That's kinda why I think only reasonable

[PATCH 2/2] drm/imx: imx-tve: remove redundant enable tracking

2020-07-22 Thread Philipp Zabel
The DRM core already takes care that encoder enable and disable calls
are balanced.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/imx-tve.c | 16 
 1 file changed, 4 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 854f56603210..ef3c25d87d87 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -103,7 +103,6 @@ struct imx_tve {
struct drm_connector connector;
struct drm_encoder encoder;
struct device *dev;
-   bool enabled;
int mode;
int di_hsync_pin;
int di_vsync_pin;
@@ -129,12 +128,8 @@ static inline struct imx_tve *enc_to_tve(struct 
drm_encoder *e)
 
 static void tve_enable(struct imx_tve *tve)
 {
-   if (!tve->enabled) {
-   tve->enabled = true;
-   clk_prepare_enable(tve->clk);
-   regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
-  TVE_EN, TVE_EN);
-   }
+   clk_prepare_enable(tve->clk);
+   regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, TVE_EN);
 
/* clear interrupt status register */
regmap_write(tve->regmap, TVE_STAT_REG, 0x);
@@ -151,11 +146,8 @@ static void tve_enable(struct imx_tve *tve)
 
 static void tve_disable(struct imx_tve *tve)
 {
-   if (tve->enabled) {
-   tve->enabled = false;
-   regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
-   clk_disable_unprepare(tve->clk);
-   }
+   regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
+   clk_disable_unprepare(tve->clk);
 }
 
 static int tve_setup_tvout(struct imx_tve *tve)
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 1/2] drm/imx: imx-tve: use regmap fast_io spinlock

2020-07-22 Thread Philipp Zabel
Replace the custom spinlock with the fast_io spinlock provided by
regmap.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/imx-tve.c | 22 +-
 1 file changed, 1 insertion(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 813bb6156a68..854f56603210 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -13,7 +13,6 @@
 #include 
 #include 
 #include 
-#include 
 #include 
 
 #include 
@@ -104,7 +103,6 @@ struct imx_tve {
struct drm_connector connector;
struct drm_encoder encoder;
struct device *dev;
-   spinlock_t lock;/* register lock */
bool enabled;
int mode;
int di_hsync_pin;
@@ -129,22 +127,6 @@ static inline struct imx_tve *enc_to_tve(struct 
drm_encoder *e)
return container_of(e, struct imx_tve, encoder);
 }
 
-static void tve_lock(void *__tve)
-__acquires(&tve->lock)
-{
-   struct imx_tve *tve = __tve;
-
-   spin_lock(&tve->lock);
-}
-
-static void tve_unlock(void *__tve)
-__releases(&tve->lock)
-{
-   struct imx_tve *tve = __tve;
-
-   spin_unlock(&tve->lock);
-}
-
 static void tve_enable(struct imx_tve *tve)
 {
if (!tve->enabled) {
@@ -500,8 +482,7 @@ static struct regmap_config tve_regmap_config = {
 
.readable_reg = imx_tve_readable_reg,
 
-   .lock = tve_lock,
-   .unlock = tve_unlock,
+   .fast_io = true,
 
.max_register = 0xdc,
 };
@@ -544,7 +525,6 @@ static int imx_tve_bind(struct device *dev, struct device 
*master, void *data)
memset(tve, 0, sizeof(*tve));
 
tve->dev = dev;
-   spin_lock_init(&tve->lock);
 
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
if (ddc_node) {
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/amdgpu/dc: Simplify drm_crtc_state::active checks

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 2:38 PM Michel Dänzer  wrote:
>
> From: Michel Dänzer 
>
> drm_atomic_crtc_check enforces that ::active can only be true if
> ::enable is as well.
>
> Signed-off-by: Michel Dänzer 

modeset vs modereset is a bit an inglorious name choice ... since this
seems to be glue code and not part of core dc, maybe rename to
enable_required/disable_required to keep it consistent with the
wording atomic helpers use? DC also seems to use reset for a lot of
other things already (state reset, like atomic, or gpu reset like
drm/scheduler's td_r_), so I think this would also help clarity from a
DC perspective.

Patch itself is good, above just an idea for another patch on top.

Reviewed-by: Daniel Vetter 
> ---
>  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c| 16 +++-
>  1 file changed, 3 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
> b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> index 312c543b258f..dabef307a74f 100644
> --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
> @@ -3415,21 +3415,12 @@ static bool modeset_required(struct drm_crtc_state 
> *crtc_state,
>  struct dc_stream_state *new_stream,
>  struct dc_stream_state *old_stream)
>  {
> -   if (!drm_atomic_crtc_needs_modeset(crtc_state))
> -   return false;
> -
> -   if (!crtc_state->enable)
> -   return false;
> -
> -   return crtc_state->active;
> +   return crtc_state->active && 
> drm_atomic_crtc_needs_modeset(crtc_state);
>  }
>
>  static bool modereset_required(struct drm_crtc_state *crtc_state)
>  {
> -   if (!drm_atomic_crtc_needs_modeset(crtc_state))
> -   return false;
> -
> -   return !crtc_state->enable || !crtc_state->active;
> +   return !crtc_state->active && 
> drm_atomic_crtc_needs_modeset(crtc_state);
>  }
>
>  static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
> @@ -8108,8 +8099,7 @@ static int dm_update_crtc_state(struct 
> amdgpu_display_manager *dm,
>  * We want to do dc stream updates that do not require a
>  * full modeset below.
>  */
> -   if (!(enable && aconnector && new_crtc_state->enable &&
> - new_crtc_state->active))
> +   if (!(enable && aconnector && new_crtc_state->active))
> return 0;
> /*
>  * Given above conditions, the dc state cannot be NULL because:
> --
> 2.28.0.rc0
>
> ___
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[Bug 207383] [Regression] 5.7 amdgpu/polaris11 gpf: amdgpu_atomic_commit_tail

2020-07-22 Thread bugzilla-daemon
https://bugzilla.kernel.org/show_bug.cgi?id=207383

--- Comment #84 from Nicholas Kazlauskas (nicholas.kazlaus...@amd.com) ---
We don't manually free the dm_state from amdgpu, that should be handled by the
DRM core.

It should generally only be freed once it's no longer use by the DRM core as
well once the state has been swapped and we drop the reference on the old state
at the end of commit tail.

If DRM private objects work the same as regular DRM objects - which from my
impression they should - then they should be NULL until they've been acquired
for a new state as needed.

This turns out to be on almost every commit in our current code. I think most
commits that touch planes or CRTCs would end up doing this.

I kind of wonder if we're keeping the old dm_state pointer that was freed in
the case where it isn't duplicated and for whatever reason it isn't actually
NULL.

Based on the above discussion I guess we're probably not doing a use after free
on the dc_state itself.

There's been other bugs with private objects in the past with DRM that didn't
exist with the regular objects that I'd almost consider finding an alternative
solution here and not keeping an old vs new dc_state just to avoid using them
in the first place.

-- 
You are receiving this mail because:
You are watching the assignee of the bug.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/amdgpu/dc: Simplify drm_crtc_state::active checks

2020-07-22 Thread Kazlauskas, Nicholas

On 2020-07-22 8:51 a.m., Daniel Vetter wrote:

On Wed, Jul 22, 2020 at 2:38 PM Michel Dänzer  wrote:


From: Michel Dänzer 

drm_atomic_crtc_check enforces that ::active can only be true if
::enable is as well.

Signed-off-by: Michel Dänzer 


Looks fine to me. The check is sufficiently old enough that I don't mind 
relying on the core for this either.


Reviewed-by: Nicholas Kazlauskas 



modeset vs modereset is a bit an inglorious name choice ... since this
seems to be glue code and not part of core dc, maybe rename to
enable_required/disable_required to keep it consistent with the
wording atomic helpers use? DC also seems to use reset for a lot of
other things already (state reset, like atomic, or gpu reset like
drm/scheduler's td_r_), so I think this would also help clarity from a
DC perspective.

Patch itself is good, above just an idea for another patch on top.

Reviewed-by: Daniel Vetter 


That sounds like a reasonable idea to me. These are used more as a 
stream_changed / stream_removed flag, but I don't think these helpers 
really need to exist at all.


That could come as a follow up patch.

Regards,
Nicholas Kazlauskas


---
  .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c| 16 +++-
  1 file changed, 3 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c 
b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 312c543b258f..dabef307a74f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3415,21 +3415,12 @@ static bool modeset_required(struct drm_crtc_state 
*crtc_state,
  struct dc_stream_state *new_stream,
  struct dc_stream_state *old_stream)
  {
-   if (!drm_atomic_crtc_needs_modeset(crtc_state))
-   return false;
-
-   if (!crtc_state->enable)
-   return false;
-
-   return crtc_state->active;
+   return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
  }

  static bool modereset_required(struct drm_crtc_state *crtc_state)
  {
-   if (!drm_atomic_crtc_needs_modeset(crtc_state))
-   return false;
-
-   return !crtc_state->enable || !crtc_state->active;
+   return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
  }

  static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
@@ -8108,8 +8099,7 @@ static int dm_update_crtc_state(struct 
amdgpu_display_manager *dm,
  * We want to do dc stream updates that do not require a
  * full modeset below.
  */
-   if (!(enable && aconnector && new_crtc_state->enable &&
- new_crtc_state->active))
+   if (!(enable && aconnector && new_crtc_state->active))
 return 0;
 /*
  * Given above conditions, the dc state cannot be NULL because:
--
2.28.0.rc0

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel






___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v5 3/4] drm/bridge: Introduce LT9611 DSI to HDMI bridge

2020-07-22 Thread Laurent Pinchart
Hello,

On Sun, Jul 19, 2020 at 07:18:06PM +0200, Sam Ravnborg wrote:
> Hi Vinod.
> 
> Three trivial points below.
> The rest looks good.
> 
> With these fixed you can add:
> Reviewed-by: Sam Ravnborg 
> 
>   Sam
> 
> On Wed, Jul 08, 2020 at 04:05:58PM +0530, Vinod Koul wrote:
> > Lontium Lt9611 is a DSI to HDMI bridge which supports two DSI ports and
> > I2S port as an input and HDMI port as output
> > 
> > Co-developed-by: Bjorn Andersson 
> > Signed-off-by: Bjorn Andersson 
> > Co-developed-by: Srinivas Kandagatla 
> > Signed-off-by: Srinivas Kandagatla 
> > Tested-by: John Stultz 
> > Signed-off-by: Vinod Koul 
> > ---
> >  drivers/gpu/drm/bridge/Kconfig  |   13 +
> >  drivers/gpu/drm/bridge/Makefile |1 +
> >  drivers/gpu/drm/bridge/lontium-lt9611.c | 1142 +++
> >  3 files changed, 1156 insertions(+)
> >  create mode 100644 drivers/gpu/drm/bridge/lontium-lt9611.c
> > 
> > +
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> 
> In alphabetical order. drm_probe_helper needs to be moved.
> 
> > +
> > +#define EDID_SEG_SIZE  256
> > +#define EDID_LEN   32
> > +#define EDID_LOOP  8
> > +#define KEY_DDC_ACCS_DONE 0x02
> > +#define DDC_NO_ACK 0x50
> > +
> 
> > +static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct 
> > drm_display_mode *mode)
> > +{
> > +   const struct reg_sequence reg_cfg[] = {
> > +   { 0x830b, 0x01 },
> > +   { 0x830c, 0x10 },
> > +   { 0x8348, 0x00 },
> > +   { 0x8349, 0x81 },
> > +
> > +   /* stage 1 */
> > +   { 0x8321, 0x4a },
> > +   { 0x8324, 0x71 },
> > +   { 0x8325, 0x30 },
> > +   { 0x832a, 0x01 },
> > +
> > +   /* stage 2 */
> > +   { 0x834a, 0x40 },
> > +   { 0x831d, 0x10 },
> > +
> > +   /* MK limit */
> > +   { 0x832d, 0x38 },
> > +   { 0x8331, 0x08 },
> > +   };
> > +   const struct reg_sequence reg_cfg2[] = {
> > +   { 0x830b, 0x03 },
> > +   { 0x830c, 0xd0 },
> > +   { 0x8348, 0x03 },
> > +   { 0x8349, 0xe0 },
> > +   { 0x8324, 0x72 },
> > +   { 0x8325, 0x00 },
> > +   { 0x832a, 0x01 },
> > +   { 0x834a, 0x10 },
> > +   { 0x831d, 0x10 },
> > +   { 0x8326, 0x37 },
> 
> Block above is indented one tab too much.
> 
> > +static int lt9611_bridge_attach(struct drm_bridge *bridge,
> > +   enum drm_bridge_attach_flags flags)
> > +{
> > +   struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
> > +   int ret;
> > +
> > +   if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
> > +   dev_err(lt9611->dev, "Fix bridge driver to make connector 
> > optional!");
> > +   return -EINVAL;
> > +   }
>
> This should say that the display driver should be fixed.
> If a display driver expects this bridge to create the connector
> it would not work.

Actually, for new bridge drivers, connector creation should be optional
from the start. We don't want a failure in that case, the feature should
be implemented.

-- 
Regards,

Laurent Pinchart
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Intel



On 2020-07-22 14:41, Daniel Vetter wrote:


Ah I think I misunderstood which options you want to compare here. I'm
not sure how much pain fixing up "dma-fence as memory fence" really
is. That's kinda why I want a lot more testing on my annotation
patches, to figure that out. Not much feedback aside from amdgpu and
intel, and those two drivers pretty much need to sort out their memory
fence issues anyway (because of userptr and stuff like that).

The only other issues outside of these two drivers I'm aware of:
- various scheduler drivers doing allocations in the drm/scheduler
critical section. Since all arm-soc drivers have a mildly shoddy
memory model of "we just pin everything" they don't really have to
deal with this. So we might just declare arm as a platform broken and
not taint the dma-fence critical sections with fs_reclaim. Otoh we
need to fix this for drm/scheduler anyway, I think best option would
be to have a mempool for hw fences in the scheduler itself, and at
that point fixing the other drivers shouldn't be too onerous.

- vmwgfx doing a dma_resv in the atomic commit tail. Entirely
orthogonal to the entire memory fence discussion.


With vmwgfx there is another issue that is hit when the gpu signals an 
error. At that point the batch might be restarted with a new meta 
command buffer that needs to be allocated out of a dma pool. in the 
fence critical section. That's probably a bit nasty to fix, but not 
impossible.




I'm pretty sure there's more bugs, I just haven't heard from them yet.
Also due to the opt-in nature of dma-fence we can limit the scope of
what we fix fairly naturally, just don't put them where no one cares
:-) Of course that also hides general locking issues in dma_fence
signalling code, but well *shrug*.
Hmm, yes. Another potential big problem would be drivers that want to 
use gpu page faults in the dma-fence critical sections with the 
batch-based programming model.


/Thomas


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH] drm/simple_kms_helper: add drmm_simple_encoder_init()

2020-07-22 Thread Philipp Zabel
Add a drm_simple_encoder_init() variant that registers
drm_encoder_cleanup() with drmm_add_action().

Now drivers can store encoders in memory allocated with drmm_kmalloc()
after the call to drmm_mode_config_init(), without having to manually
make sure that drm_encoder_cleanup() is called before the memory is
freed.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/drm_simple_kms_helper.c | 42 +
 include/drm/drm_simple_kms_helper.h |  4 +++
 2 files changed, 46 insertions(+)

diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c 
b/drivers/gpu/drm/drm_simple_kms_helper.c
index 74946690aba4..a243f00cf63d 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -9,6 +9,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -71,6 +72,47 @@ int drm_simple_encoder_init(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_simple_encoder_init);
 
+static void drmm_encoder_cleanup(struct drm_device *dev, void *ptr)
+{
+   struct drm_encoder *encoder = ptr;
+
+   drm_encoder_cleanup(encoder);
+}
+
+/**
+ * drmm_simple_encoder_init - Initialize a preallocated encoder with
+ *basic functionality.
+ * @dev: drm device
+ * @encoder: the encoder to initialize
+ * @encoder_type: user visible type of the encoder
+ *
+ * Initialises a preallocated encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * Cleanup is automatically handled through registering drm_encoder_cleanup()
+ * with drmm_add_action().
+ *
+ * The caller of drmm_simple_encoder_init() is responsible for allocating
+ * the encoder's memory with drmm_kzalloc() to ensure it is automatically
+ * freed after the encoder has been cleaned up.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drmm_simple_encoder_init(struct drm_device *dev,
+struct drm_encoder *encoder,
+int encoder_type)
+{
+   int ret;
+
+   ret = drm_encoder_init(dev, encoder, &drm_simple_encoder_funcs_cleanup,
+  encoder_type, NULL);
+   if (ret)
+   return ret;
+
+   return drmm_add_action_or_reset(dev, drmm_encoder_cleanup, encoder);
+}
+EXPORT_SYMBOL(drmm_simple_encoder_init);
+
 static enum drm_mode_status
 drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
   const struct drm_display_mode *mode)
diff --git a/include/drm/drm_simple_kms_helper.h 
b/include/drm/drm_simple_kms_helper.h
index a026375464ff..27f0915599c8 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -185,4 +185,8 @@ int drm_simple_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
int encoder_type);
 
+int drmm_simple_encoder_init(struct drm_device *dev,
+struct drm_encoder *encoder,
+int encoder_type);
+
 #endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 2/8] drm/imx: dw_hdmi-imx: use drm managed resources, switch to dw_hdmi_probe

2020-07-22 Thread Philipp Zabel
Move bridge creation into probe, during bind only create the encoder and
attach the bridge.
Use drmm_kzalloc() to align encoder memory lifetime with the drm device,
and use drmm_add_action_or_reset() to make sure drm_encoder_cleanup() is
called before the memory is freed.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/dw_hdmi-imx.c | 108 ++
 1 file changed, 51 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c 
b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index a4f178c1d9bc..b5106792725f 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -15,23 +15,32 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
 #include "imx-drm.h"
 
+struct imx_hdmi;
+
+struct imx_hdmi_encoder {
+   struct drm_encoder encoder;
+   struct imx_hdmi *hdmi;
+};
+
 struct imx_hdmi {
struct device *dev;
-   struct drm_encoder encoder;
+   struct drm_bridge *bridge;
struct dw_hdmi *hdmi;
struct regmap *regmap;
 };
 
 static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
 {
-   return container_of(e, struct imx_hdmi, encoder);
+   return container_of(e, struct imx_hdmi_encoder, encoder)->hdmi;
 }
 
 static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
@@ -98,23 +107,6 @@ static const struct dw_hdmi_phy_config imx_phy_config[] = {
{ ~0UL,  0x, 0x, 0x}
 };
 
-static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
-{
-   struct device_node *np = hdmi->dev->of_node;
-
-   hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
-   if (IS_ERR(hdmi->regmap)) {
-   dev_err(hdmi->dev, "Unable to get gpr\n");
-   return PTR_ERR(hdmi->regmap);
-   }
-
-   return 0;
-}
-
-static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
-{
-}
-
 static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
 {
struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
@@ -140,7 +132,6 @@ static int dw_hdmi_imx_atomic_check(struct drm_encoder 
*encoder,
 
 static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs 
= {
.enable = dw_hdmi_imx_encoder_enable,
-   .disable= dw_hdmi_imx_encoder_disable,
.atomic_check = dw_hdmi_imx_atomic_check,
 };
 
@@ -195,68 +186,51 @@ static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids);
 
+static void dw_hdmi_imx_encoder_cleanup(struct drm_device *drm, void *data)
+{
+   struct drm_encoder *encoder = data;
+
+   drm_encoder_cleanup(encoder);
+}
+
 static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
void *data)
 {
-   struct platform_device *pdev = to_platform_device(dev);
-   const struct dw_hdmi_plat_data *plat_data;
-   const struct of_device_id *match;
struct drm_device *drm = data;
struct drm_encoder *encoder;
-   struct imx_hdmi *hdmi;
+   struct imx_hdmi_encoder *hdmi_encoder;
+   struct imx_hdmi *hdmi = dev_get_drvdata(dev);
int ret;
 
-   if (!pdev->dev.of_node)
-   return -ENODEV;
-
-   hdmi = dev_get_drvdata(dev);
-   memset(hdmi, 0, sizeof(*hdmi));
+   hdmi_encoder = drmm_kzalloc(drm, sizeof(*hdmi_encoder), GFP_KERNEL);
+   if (!hdmi_encoder)
+   return -ENOMEM;
 
-   match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node);
-   plat_data = match->data;
-   hdmi->dev = &pdev->dev;
-   encoder = &hdmi->encoder;
+   hdmi_encoder->hdmi = hdmi;
+   encoder = &hdmi_encoder->encoder;
 
ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
if (ret)
return ret;
 
-   ret = dw_hdmi_imx_parse_dt(hdmi);
-   if (ret < 0)
-   return ret;
-
drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
 
-   hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data);
-
-   /*
-* If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(),
-* which would have called the encoder cleanup.  Do it manually.
-*/
-   if (IS_ERR(hdmi->hdmi)) {
-   ret = PTR_ERR(hdmi->hdmi);
-   drm_encoder_cleanup(encoder);
-   }
-
-   return ret;
-}
-
-static void dw_hdmi_imx_unbind(struct device *dev, struct device *master,
-  void *data)
-{
-   struct imx_hdmi *hdmi = dev_get_drvdata(dev);
+   ret = drmm_add_action_or_reset(drm, dw_hdmi_imx_encoder_cleanup, 
encoder);
+   if (ret)
+   return ret;
 
-   dw_hdmi_unbind(hdmi->hdmi);
+   return drm_bridge_attach(encoder, hdmi->bridge, NULL, 0);
 }
 
 static const struct component_ops dw_hdmi_imx_ops = {
.bind   = dw_hdmi_imx_bind,
-   .unbind = dw_hdmi_imx_unbind,
 };
 
 stati

[PATCH 7/8] drm/imx: move call to ipu_plane_get_resources() into ipu_plane_init()

2020-07-22 Thread Philipp Zabel
Use drm managed resources to get and put IPU resources automatically.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/ipuv3-crtc.c  | 25 +
 drivers/gpu/drm/imx/ipuv3-plane.c | 29 -
 drivers/gpu/drm/imx/ipuv3-plane.h |  3 ---
 3 files changed, 21 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 2256c9789fc2..b0dacbadaf52 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -385,29 +385,14 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
drm_crtc_init_with_planes(drm, crtc, &ipu_crtc->plane[0]->base, NULL,
  &ipu_crtc_funcs, NULL);
 
-   ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
-   if (ret) {
-   dev_err(ipu_crtc->dev, "getting plane 0 resources failed with 
%d.\n",
-   ret);
-   goto err_put_resources;
-   }
-
/* If this crtc is using the DP, add an overlay plane */
if (pdata->dp >= 0 && pdata->dma[1] > 0) {
ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1],
IPU_DP_FLOW_SYNC_FG,
drm_crtc_mask(&ipu_crtc->base),
DRM_PLANE_TYPE_OVERLAY);
-   if (IS_ERR(ipu_crtc->plane[1])) {
+   if (IS_ERR(ipu_crtc->plane[1]))
ipu_crtc->plane[1] = NULL;
-   } else {
-   ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
-   if (ret) {
-   dev_err(ipu_crtc->dev, "getting plane 1 "
-   "resources failed with %d.\n", ret);
-   goto err_put_plane0_res;
-   }
-   }
}
 
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -422,11 +407,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
 
return 0;
 
-err_put_plane1_res:
-   if (ipu_crtc->plane[1])
-   ipu_plane_put_resources(ipu_crtc->plane[1]);
-err_put_plane0_res:
-   ipu_plane_put_resources(ipu_crtc->plane[0]);
 err_put_resources:
ipu_put_resources(ipu_crtc);
 
@@ -453,9 +433,6 @@ static void ipu_drm_unbind(struct device *dev, struct 
device *master,
struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
 
ipu_put_resources(ipu_crtc);
-   if (ipu_crtc->plane[1])
-   ipu_plane_put_resources(ipu_crtc->plane[1]);
-   ipu_plane_put_resources(ipu_crtc->plane[0]);
 }
 
 static const struct component_ops ipu_crtc_ops = {
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c 
b/drivers/gpu/drm/imx/ipuv3-plane.c
index 9543e4c2907a..d7464051514f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -143,8 +143,10 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
   fb->format->cpp[2] * x - eba;
 }
 
-void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
+static void ipu_plane_put_resources(struct drm_device *dev, void *ptr)
 {
+   struct ipu_plane *ipu_plane = ptr;
+
if (!IS_ERR_OR_NULL(ipu_plane->dp))
ipu_dp_put(ipu_plane->dp);
if (!IS_ERR_OR_NULL(ipu_plane->dmfc))
@@ -155,7 +157,8 @@ void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
ipu_idmac_put(ipu_plane->alpha_ch);
 }
 
-int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
+static int ipu_plane_get_resources(struct drm_device *dev,
+  struct ipu_plane *ipu_plane)
 {
int ret;
int alpha_ch;
@@ -167,6 +170,10 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
return ret;
}
 
+   ret = drmm_add_action_or_reset(dev, ipu_plane_put_resources, ipu_plane);
+   if (ret)
+   return ret;
+
alpha_ch = ipu_channel_alpha_channel(ipu_plane->dma);
if (alpha_ch >= 0) {
ipu_plane->alpha_ch = ipu_idmac_get(ipu_plane->ipu, alpha_ch);
@@ -182,7 +189,7 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
if (IS_ERR(ipu_plane->dmfc)) {
ret = PTR_ERR(ipu_plane->dmfc);
DRM_ERROR("failed to get dmfc: ret %d\n", ret);
-   goto err_out;
+   return ret;
}
 
if (ipu_plane->dp_flow >= 0) {
@@ -190,15 +197,11 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
if (IS_ERR(ipu_plane->dp)) {
ret = PTR_ERR(ipu_plane->dp);
DRM_ERROR("failed to get dp flow: %d\n", ret);
-   goto err_out;
+   return ret;
}
}
 
return 0;
-err_out:
-   ipu_plane_put_resources(ipu_plane);
-
-   return ret;
 }
 
 static bool ipu_plane_separate_alpha(struc

[PATCH 6/8] drm/imx: ipuv3-plane: use drm managed resources

2020-07-22 Thread Philipp Zabel
Use drmm_kzalloc() to align plane memory lifetime with the drm device,
and use drmm_add_action_or_reset() to make sure drm_plane_cleanup() is
called before the memory is freed. Also handle error return values of
the plane property creation functions.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/ipuv3-plane.c | 34 +--
 1 file changed, 19 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c 
b/drivers/gpu/drm/imx/ipuv3-plane.c
index 6776ebb3246d..9543e4c2907a 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -11,6 +11,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #include 
@@ -262,16 +263,6 @@ void ipu_plane_disable_deferred(struct drm_plane *plane)
 }
 EXPORT_SYMBOL_GPL(ipu_plane_disable_deferred);
 
-static void ipu_plane_destroy(struct drm_plane *plane)
-{
-   struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
-   DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
-
-   drm_plane_cleanup(plane);
-   kfree(ipu_plane);
-}
-
 static void ipu_plane_state_reset(struct drm_plane *plane)
 {
unsigned int zpos = (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1;
@@ -336,7 +327,6 @@ static bool ipu_plane_format_mod_supported(struct drm_plane 
*plane,
 static const struct drm_plane_funcs ipu_plane_funcs = {
.update_plane   = drm_atomic_helper_update_plane,
.disable_plane  = drm_atomic_helper_disable_plane,
-   .destroy= ipu_plane_destroy,
.reset  = ipu_plane_state_reset,
.atomic_duplicate_state = ipu_plane_duplicate_state,
.atomic_destroy_state   = ipu_plane_destroy_state,
@@ -822,6 +812,13 @@ int ipu_planes_assign_pre(struct drm_device *dev,
 }
 EXPORT_SYMBOL_GPL(ipu_planes_assign_pre);
 
+static void ipu_plane_cleanup(struct drm_device *dev, void *data)
+{
+   struct ipu_plane *ipu_plane = data;
+
+   drm_plane_cleanup(&ipu_plane->base);
+}
+
 struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
 int dma, int dp, unsigned int possible_crtcs,
 enum drm_plane_type type)
@@ -834,7 +831,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, 
struct ipu_soc *ipu,
DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
  dma, dp, possible_crtcs);
 
-   ipu_plane = kzalloc(sizeof(*ipu_plane), GFP_KERNEL);
+   ipu_plane = drmm_kzalloc(dev, sizeof(*ipu_plane), GFP_KERNEL);
if (!ipu_plane) {
DRM_ERROR("failed to allocate plane\n");
return ERR_PTR(-ENOMEM);
@@ -853,16 +850,23 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, 
struct ipu_soc *ipu,
   modifiers, type, NULL);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
-   kfree(ipu_plane);
return ERR_PTR(ret);
}
 
+   ret = drmm_add_action_or_reset(dev, ipu_plane_cleanup, ipu_plane);
+   if (ret)
+   return ERR_PTR(ret);
+
drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
 
if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG)
-   drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1);
+   ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0,
+1);
else
-   drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0);
+   ret = drm_plane_create_zpos_immutable_property(&ipu_plane->base,
+  0);
+   if (ret)
+   return ERR_PTR(ret);
 
return ipu_plane;
 }
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 4/8] drm/imx: imx-tve: use drm managed resources

2020-07-22 Thread Philipp Zabel
Move devres regmap, clock, and interrupt requests into probe.
Use drmm_kzalloc() to align encoder memory lifetime with the drm device,
and use drmm_add_action_or_reset() to make sure drm_encoder_cleanup() is
called before the memory is freed.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/imx-tve.c | 95 +--
 1 file changed, 57 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index ef3c25d87d87..257a06f6e408 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -19,6 +19,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -99,9 +100,13 @@ enum {
TVE_MODE_VGA,
 };
 
-struct imx_tve {
+struct imx_tve_encoder {
struct drm_connector connector;
struct drm_encoder encoder;
+   struct imx_tve *tve;
+};
+
+struct imx_tve {
struct device *dev;
int mode;
int di_hsync_pin;
@@ -118,12 +123,12 @@ struct imx_tve {
 
 static inline struct imx_tve *con_to_tve(struct drm_connector *c)
 {
-   return container_of(c, struct imx_tve, connector);
+   return container_of(c, struct imx_tve_encoder, connector)->tve;
 }
 
 static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
 {
-   return container_of(e, struct imx_tve, encoder);
+   return container_of(e, struct imx_tve_encoder, encoder)->tve;
 }
 
 static void tve_enable(struct imx_tve *tve)
@@ -418,7 +423,7 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem 
*base)
init.parent_names = (const char **)&tve_di_parent;
 
tve->clk_hw_di.init = &init;
-   tve->di_clk = clk_register(tve->dev, &tve->clk_hw_di);
+   tve->di_clk = devm_clk_register(tve->dev, &tve->clk_hw_di);
if (IS_ERR(tve->di_clk)) {
dev_err(tve->dev, "failed to register TVE output clock: %ld\n",
PTR_ERR(tve->di_clk));
@@ -428,31 +433,45 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem 
*base)
return 0;
 }
 
-static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
+static void imx_tve_encoder_cleanup(struct drm_device *drm, void *ptr)
+{
+   struct drm_encoder *encoder = ptr;
+
+   drm_encoder_cleanup(encoder);
+}
+
+static int imx_tve_register(struct drm_device *drm, struct imx_tve_encoder 
*tvee)
 {
+   struct imx_tve *tve = tvee->tve;
+   struct drm_encoder *encoder = &tvee->encoder;
+   struct drm_connector *connector = &tvee->connector;
int encoder_type;
int ret;
 
encoder_type = tve->mode == TVE_MODE_VGA ?
DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
 
-   ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
+   ret = imx_drm_encoder_parse_of(drm, encoder, tve->dev->of_node);
if (ret)
return ret;
 
-   drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
-   drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
+   drm_encoder_helper_add(encoder, &imx_tve_encoder_helper_funcs);
+   ret = drm_simple_encoder_init(drm, encoder, encoder_type);
+   if (ret)
+   return ret;
 
-   drm_connector_helper_add(&tve->connector,
-   &imx_tve_connector_helper_funcs);
-   drm_connector_init_with_ddc(drm, &tve->connector,
-   &imx_tve_connector_funcs,
-   DRM_MODE_CONNECTOR_VGA,
-   tve->ddc);
+   ret = drmm_add_action_or_reset(drm, imx_tve_encoder_cleanup, encoder);
+   if (ret)
+   return ret;
 
-   drm_connector_attach_encoder(&tve->connector, &tve->encoder);
+   drm_connector_helper_add(connector, &imx_tve_connector_helper_funcs);
+   ret = drm_connector_init_with_ddc(drm, connector,
+ &imx_tve_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA, tve->ddc);
+   if (ret)
+   return ret;
 
-   return 0;
+   return drm_connector_attach_encoder(connector, encoder);
 }
 
 static void imx_tve_disable_regulator(void *data)
@@ -502,8 +521,26 @@ static const int of_get_tve_mode(struct device_node *np)
 
 static int imx_tve_bind(struct device *dev, struct device *master, void *data)
 {
-   struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
+   struct imx_tve *tve = dev_get_drvdata(dev);
+   struct imx_tve_encoder *tvee;
+
+   tvee = drmm_kzalloc(drm, sizeof(*tvee), GFP_KERNEL);
+   if (!tvee)
+   return -ENOMEM;
+
+   tvee->tve = tve;
+
+   return imx_tve_register(drm, tvee);
+}
+
+static const struct component_ops imx_tve_ops = {
+   .bind   = imx_tve_bind,
+};
+
+static int imx_tve_probe(struct platform_device *pdev)
+{
+   struct device *dev = &pdev->dev;
struct device_node *np =

[PATCH 8/8] drm/imx: ipuv3-crtc: use drm managed resources

2020-07-22 Thread Philipp Zabel
Use drmm_kzalloc() to align crtc memory lifetime with the drm device,
and use drmm_add_action_or_reset() to make sure IPU resources are
released and drm_crtc_cleanup() is called before the memory is freed.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/ipuv3-crtc.c | 75 ++--
 1 file changed, 33 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index b0dacbadaf52..0e2f4b30d9ba 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -166,7 +167,6 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
 
 static const struct drm_crtc_funcs ipu_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
-   .destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = imx_drm_crtc_reset,
.atomic_duplicate_state = imx_drm_crtc_duplicate_state,
@@ -323,37 +323,42 @@ static const struct drm_crtc_helper_funcs 
ipu_helper_funcs = {
.atomic_enable = ipu_crtc_atomic_enable,
 };
 
-static void ipu_put_resources(struct ipu_crtc *ipu_crtc)
+static void ipu_put_resources(struct drm_device *dev, void *ptr)
 {
+   struct ipu_crtc *ipu_crtc = ptr;
+
if (!IS_ERR_OR_NULL(ipu_crtc->dc))
ipu_dc_put(ipu_crtc->dc);
if (!IS_ERR_OR_NULL(ipu_crtc->di))
ipu_di_put(ipu_crtc->di);
 }
 
-static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
+static int ipu_get_resources(struct drm_device *dev, struct ipu_crtc *ipu_crtc,
struct ipu_client_platformdata *pdata)
 {
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int ret;
 
ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc);
-   if (IS_ERR(ipu_crtc->dc)) {
-   ret = PTR_ERR(ipu_crtc->dc);
-   goto err_out;
-   }
+   if (IS_ERR(ipu_crtc->dc))
+   return PTR_ERR(ipu_crtc->dc);
+
+   ret = drmm_add_action_or_reset(dev, ipu_put_resources, ipu_crtc);
+   if (ret)
+   return ret;
 
ipu_crtc->di = ipu_di_get(ipu, pdata->di);
-   if (IS_ERR(ipu_crtc->di)) {
-   ret = PTR_ERR(ipu_crtc->di);
-   goto err_out;
-   }
+   if (IS_ERR(ipu_crtc->di))
+   return PTR_ERR(ipu_crtc->di);
 
return 0;
-err_out:
-   ipu_put_resources(ipu_crtc);
+}
 
-   return ret;
+static void ipu_crtc_cleanup(struct drm_device *drm, void *ptr)
+{
+   struct drm_crtc *crtc = ptr;
+
+   drm_crtc_cleanup(crtc);
 }
 
 static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
@@ -364,7 +369,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
int dp = -EINVAL;
int ret;
 
-   ret = ipu_get_resources(ipu_crtc, pdata);
+   ret = ipu_get_resources(drm, ipu_crtc, pdata);
if (ret) {
dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
ret);
@@ -377,13 +382,19 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(ipu_crtc->plane[0])) {
ret = PTR_ERR(ipu_crtc->plane[0]);
-   goto err_put_resources;
+   return ret;
}
 
crtc->port = pdata->of_node;
drm_crtc_helper_add(crtc, &ipu_helper_funcs);
-   drm_crtc_init_with_planes(drm, crtc, &ipu_crtc->plane[0]->base, NULL,
- &ipu_crtc_funcs, NULL);
+   ret = drm_crtc_init_with_planes(drm, crtc, &ipu_crtc->plane[0]->base,
+   NULL, &ipu_crtc_funcs, NULL);
+   if (ret)
+   return ret;
+
+   ret = drmm_add_action_or_reset(drm, ipu_crtc_cleanup, crtc);
+   if (ret)
+   return ret;
 
/* If this crtc is using the DP, add an overlay plane */
if (pdata->dp >= 0 && pdata->dma[1] > 0) {
@@ -400,17 +411,12 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
"imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
-   goto err_put_plane1_res;
+   return ret;
}
/* Only enable IRQ when we actually need it to trigger work. */
disable_irq(ipu_crtc->irq);
 
return 0;
-
-err_put_resources:
-   ipu_put_resources(ipu_crtc);
-
-   return ret;
 }
 
 static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
@@ -419,31 +425,22 @@ static int ipu_drm_bind(struct device *dev, struct device 
*master, void *data)
struct drm_device *drm = data;
struct ipu_crtc *ipu_crtc;
 
-   ipu_crtc = dev_get_drvdata(dev);
-   memset(ipu_crtc, 0, sizeof(*ipu_crtc));
+   ipu_crtc = drmm_kzalloc(drm, sizeof(*ipu_crtc), GFP_KERNEL);
+   if (!ipu_crtc)
+  

[PATCH 1/8] drm/imx: drop explicit drm_mode_config_cleanup

2020-07-22 Thread Philipp Zabel
Use drmm_mode_config_init() and drop the explicit calls to
drm_mode_config_cleanup().

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/imx-drm-core.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/imx/imx-drm-core.c 
b/drivers/gpu/drm/imx/imx-drm-core.c
index 3421043a558d..d10887b9b2e4 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -20,6 +20,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -222,7 +223,9 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.normalize_zpos = true;
 
-   drm_mode_config_init(drm);
+   ret = drmm_mode_config_init(drm);
+   if (ret)
+   return ret;
 
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
@@ -261,7 +264,6 @@ static int imx_drm_bind(struct device *dev)
drm_kms_helper_poll_fini(drm);
component_unbind_all(drm->dev, drm);
 err_kms:
-   drm_mode_config_cleanup(drm);
drm_dev_put(drm);
 
return ret;
@@ -277,11 +279,9 @@ static void imx_drm_unbind(struct device *dev)
 
component_unbind_all(drm->dev, drm);
 
-   drm_mode_config_cleanup(drm);
+   drm_dev_put(drm);
 
dev_set_drvdata(dev, NULL);
-
-   drm_dev_put(drm);
 }
 
 static const struct component_master_ops imx_drm_ops = {
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 5/8] drm/imx: parallel-display: use drm managed resources

2020-07-22 Thread Philipp Zabel
Use drmm_kzalloc() to align encoder memory lifetime with the drm device,
and use drmm_add_action_or_reset() to make sure drm_encoder_cleanup() is
called before the memory is freed.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/parallel-display.c | 50 +-
 1 file changed, 26 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/imx/parallel-display.c 
b/drivers/gpu/drm/imx/parallel-display.c
index 8232f512b9ed..182d88d7f666 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -15,6 +15,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -28,7 +29,6 @@ struct imx_parallel_display {
struct drm_bridge bridge;
struct device *dev;
void *edid;
-   int edid_len;
u32 bus_format;
u32 bus_flags;
struct drm_display_mode mode;
@@ -259,6 +259,13 @@ static const struct drm_bridge_funcs imx_pd_bridge_funcs = 
{
.atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
 };
 
+static void imx_pd_encoder_cleanup(struct drm_device *drm, void *ptr)
+{
+   struct drm_encoder *encoder = ptr;
+
+   drm_encoder_cleanup(encoder);
+}
+
 static int imx_pd_register(struct drm_device *drm,
struct imx_parallel_display *imxpd)
 {
@@ -276,7 +283,13 @@ static int imx_pd_register(struct drm_device *drm,
 */
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
 
-   drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
+   ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
+   if (ret)
+   return ret;
+
+   ret = drmm_add_action_or_reset(drm, imx_pd_encoder_cleanup, encoder);
+   if (ret)
+   return ret;
 
imxpd->bridge.funcs = &imx_pd_bridge_funcs;
drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
@@ -310,12 +323,14 @@ static int imx_pd_bind(struct device *dev, struct device 
*master, void *data)
struct device_node *np = dev->of_node;
const u8 *edidp;
struct imx_parallel_display *imxpd;
+   int edid_len;
int ret;
u32 bus_format = 0;
const char *fmt;
 
-   imxpd = dev_get_drvdata(dev);
-   memset(imxpd, 0, sizeof(*imxpd));
+   imxpd = drmm_kzalloc(drm, sizeof(*imxpd), GFP_KERNEL);
+   if (!imxpd)
+   return -ENOMEM;
 
/* port@1 is the output port */
ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
@@ -323,9 +338,13 @@ static int imx_pd_bind(struct device *dev, struct device 
*master, void *data)
if (ret && ret != -ENODEV)
return ret;
 
-   edidp = of_get_property(np, "edid", &imxpd->edid_len);
-   if (edidp)
-   imxpd->edid = kmemdup(edidp, imxpd->edid_len, GFP_KERNEL);
+   edidp = of_get_property(np, "edid", &edid_len);
+   if (edidp) {
+   imxpd->edid = drmm_kmalloc(drm, edid_len, GFP_KERNEL);
+   if (!imxpd->edid)
+   return -ENOMEM;
+   memcpy(imxpd->edid, edidp, edid_len);
+   }
 
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
if (!ret) {
@@ -349,29 +368,12 @@ static int imx_pd_bind(struct device *dev, struct device 
*master, void *data)
return 0;
 }
 
-static void imx_pd_unbind(struct device *dev, struct device *master,
-   void *data)
-{
-   struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
-
-   kfree(imxpd->edid);
-}
-
 static const struct component_ops imx_pd_ops = {
.bind   = imx_pd_bind,
-   .unbind = imx_pd_unbind,
 };
 
 static int imx_pd_probe(struct platform_device *pdev)
 {
-   struct imx_parallel_display *imxpd;
-
-   imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL);
-   if (!imxpd)
-   return -ENOMEM;
-
-   platform_set_drvdata(pdev, imxpd);
-
return component_add(&pdev->dev, &imx_pd_ops);
 }
 
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 3/8] drm/imx: imx-ldb: use drm managed resources

2020-07-22 Thread Philipp Zabel
Use drmm_kzalloc() to align encoder memory lifetime with the drm device,
and use drmm_add_action_or_reset() to make sure drm_encoder_cleanup() is
called before the memory is freed.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/imx-ldb.c | 67 +++
 1 file changed, 36 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index af757d1e21fe..ef26a2960db9 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -22,6 +22,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -62,7 +63,6 @@ struct imx_ldb_channel {
struct i2c_adapter *ddc;
int chno;
void *edid;
-   int edid_len;
struct drm_display_mode mode;
int mode_valid;
u32 bus_format;
@@ -408,6 +408,13 @@ static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
return PTR_ERR_OR_ZERO(ldb->clk_pll[chno]);
 }
 
+static void imx_ldb_encoder_cleanup(struct drm_device *drm, void *data)
+{
+   struct drm_encoder *encoder = data;
+
+   drm_encoder_cleanup(encoder);
+}
+
 static int imx_ldb_register(struct drm_device *drm,
struct imx_ldb_channel *imx_ldb_ch)
 {
@@ -432,6 +439,10 @@ static int imx_ldb_register(struct drm_device *drm,
drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
 
+   ret = drmm_add_action_or_reset(drm, imx_ldb_encoder_cleanup, encoder);
+   if (ret)
+   return ret;
+
if (imx_ldb_ch->bridge) {
ret = drm_bridge_attach(&imx_ldb_ch->encoder,
imx_ldb_ch->bridge, NULL, 0);
@@ -536,15 +547,14 @@ static int imx_ldb_panel_ddc(struct device *dev,
}
 
if (!channel->ddc) {
+   int edid_len;
+
/* if no DDC available, fallback to hardcoded EDID */
dev_dbg(dev, "no ddc available\n");
 
-   edidp = of_get_property(child, "edid",
-   &channel->edid_len);
+   edidp = of_get_property(child, "edid", &edid_len);
if (edidp) {
-   channel->edid = kmemdup(edidp,
-   channel->edid_len,
-   GFP_KERNEL);
+   channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
} else if (!channel->panel) {
/* fallback to display-timings node */
ret = of_get_drm_display_mode(child,
@@ -558,6 +568,19 @@ static int imx_ldb_panel_ddc(struct device *dev,
return 0;
 }
 
+static void imx_ldb_cleanup(struct drm_device *drm, void *data)
+{
+   struct imx_ldb *imx_ldb = data;
+   int i;
+
+   for (i = 0; i < 2; i++) {
+   struct imx_ldb_channel *channel = &imx_ldb->channel[i];
+
+   kfree(channel->edid);
+   i2c_put_adapter(channel->ddc);
+   }
+}
+
 static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
 {
struct drm_device *drm = data;
@@ -570,8 +593,13 @@ static int imx_ldb_bind(struct device *dev, struct device 
*master, void *data)
int ret;
int i;
 
-   imx_ldb = dev_get_drvdata(dev);
-   memset(imx_ldb, 0, sizeof(*imx_ldb));
+   imx_ldb = drmm_kzalloc(drm, sizeof(*imx_ldb), GFP_KERNEL);
+   if (!imx_ldb)
+   return -ENOMEM;
+
+   ret = drmm_add_action_or_reset(drm, imx_ldb_cleanup, imx_ldb);
+   if (ret)
+   return ret;
 
imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
if (IS_ERR(imx_ldb->regmap)) {
@@ -686,35 +714,12 @@ static int imx_ldb_bind(struct device *dev, struct device 
*master, void *data)
return ret;
 }
 
-static void imx_ldb_unbind(struct device *dev, struct device *master,
-   void *data)
-{
-   struct imx_ldb *imx_ldb = dev_get_drvdata(dev);
-   int i;
-
-   for (i = 0; i < 2; i++) {
-   struct imx_ldb_channel *channel = &imx_ldb->channel[i];
-
-   kfree(channel->edid);
-   i2c_put_adapter(channel->ddc);
-   }
-}
-
 static const struct component_ops imx_ldb_ops = {
.bind   = imx_ldb_bind,
-   .unbind = imx_ldb_unbind,
 };
 
 static int imx_ldb_probe(struct platform_device *pdev)
 {
-   struct imx_ldb *imx_ldb;
-
-   imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL);
-   if (!imx_ldb)
-   return -ENOMEM;
-
-   platform_set_drvdata(pdev, imx_ldb);
-
return component_add(&pdev->dev, &imx_ldb_ops);
 }
 
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH 1/2] gpu: ipu-v3: Add Rec.709 limited range support to DP

2020-07-22 Thread Philipp Zabel
Add YCbCr encoding and quantization range parameters to
ipu_dp_setup_channel() and configure the CSC DP matrix
accordingly.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/ipuv3-plane.c |  9 ++---
 drivers/gpu/ipu-v3/ipu-dp.c   | 25 ++---
 include/video/imx-ipu-v3.h|  2 ++
 3 files changed, 30 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c 
b/drivers/gpu/drm/imx/ipuv3-plane.c
index d7464051514f..faecba638b76 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -608,11 +608,14 @@ static void ipu_plane_atomic_update(struct drm_plane 
*plane,
ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
-   ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
+   ipu_dp_setup_channel(ipu_plane->dp, DRM_COLOR_YCBCR_BT601,
+DRM_COLOR_YCBCR_LIMITED_RANGE, ics,
+IPUV3_COLORSPACE_RGB);
break;
case IPU_DP_FLOW_SYNC_FG:
-   ipu_dp_setup_channel(ipu_plane->dp, ics,
-   IPUV3_COLORSPACE_UNKNOWN);
+   ipu_dp_setup_channel(ipu_plane->dp, DRM_COLOR_YCBCR_BT601,
+DRM_COLOR_YCBCR_LIMITED_RANGE, ics,
+IPUV3_COLORSPACE_UNKNOWN);
break;
}
 
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index 8f67e985f26a..6a558205db96 100644
--- a/drivers/gpu/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 
+#include 
 #include 
 #include "ipu-prv.h"
 
@@ -125,6 +126,8 @@ int ipu_dp_set_window_pos(struct ipu_dp *dp, u16 x_pos, u16 
y_pos)
 EXPORT_SYMBOL_GPL(ipu_dp_set_window_pos);
 
 static void ipu_dp_csc_init(struct ipu_flow *flow,
+   enum drm_color_encoding ycbcr_enc,
+   enum drm_color_range range,
enum ipu_color_space in,
enum ipu_color_space out,
u32 place)
@@ -148,7 +151,18 @@ static void ipu_dp_csc_init(struct ipu_flow *flow,
flow->base + DP_CSC_0);
writel(0x200 | (2 << 14) | (0x200 << 16) | (2 << 30),
flow->base + DP_CSC_1);
+   } else if (ycbcr_enc == DRM_COLOR_YCBCR_BT709) {
+   /* Rec.709 limited range */
+   writel(0x095 | (0x000 << 16), flow->base + DP_CSC_A_0);
+   writel(0x0e5 | (0x095 << 16), flow->base + DP_CSC_A_1);
+   writel(0x3e5 | (0x3bc << 16), flow->base + DP_CSC_A_2);
+   writel(0x095 | (0x10e << 16), flow->base + DP_CSC_A_3);
+   writel(0x000 | (0x3e10 << 16) | (1 << 30),
+   flow->base + DP_CSC_0);
+   writel(0x09a | (1 << 14) | (0x3dbe << 16) | (1 << 30),
+   flow->base + DP_CSC_1);
} else {
+   /* BT.601 limited range */
writel(0x095 | (0x000 << 16), flow->base + DP_CSC_A_0);
writel(0x0cc | (0x095 << 16), flow->base + DP_CSC_A_1);
writel(0x3ce | (0x398 << 16), flow->base + DP_CSC_A_2);
@@ -165,6 +179,8 @@ static void ipu_dp_csc_init(struct ipu_flow *flow,
 }
 
 int ipu_dp_setup_channel(struct ipu_dp *dp,
+   enum drm_color_encoding ycbcr_enc,
+   enum drm_color_range range,
enum ipu_color_space in,
enum ipu_color_space out)
 {
@@ -183,7 +199,8 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
 * foreground and background are of same colorspace, put
 * colorspace converter after combining unit.
 */
-   ipu_dp_csc_init(flow, flow->foreground.in_cs, flow->out_cs,
+   ipu_dp_csc_init(flow, ycbcr_enc, range,
+   flow->foreground.in_cs, flow->out_cs,
DP_COM_CONF_CSC_DEF_BOTH);
} else {
if (flow->foreground.in_cs == IPUV3_COLORSPACE_UNKNOWN ||
@@ -192,10 +209,12 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
 * foreground identical to output, apply color
 * conversion on background
 */
-   ipu_dp_csc_init(flow, flow->background.in_cs,
+   ipu_dp_csc_init(flow, ycbcr_enc, range,
+   flow->background.in_cs,
flow->out_cs, DP_COM_CONF_CSC_DEF_BG);
else
-   ipu_dp_csc_init(flow, flow->foreground.in_cs,
+   ipu_dp_csc_init(flow, ycbcr_enc, range,
+   flow->foreground.in_cs,
flow->out_cs, DP_COM_CONF_CSC_DEF_FG);
  

[PATCH 2/2] drm/imx: ipuv3-plane: add color encoding and range properties

2020-07-22 Thread Philipp Zabel
Add COLOR_ENCODING and COLOR_RANGE plane properties and use them to
control the DP CSC matrix.

Signed-off-by: Philipp Zabel 
---
 drivers/gpu/drm/imx/ipuv3-plane.c | 44 +--
 1 file changed, 30 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c 
b/drivers/gpu/drm/imx/ipuv3-plane.c
index faecba638b76..270e1e21baf7 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -284,6 +284,8 @@ static void ipu_plane_state_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_reset(plane, &ipu_state->base);
ipu_state->base.zpos = zpos;
ipu_state->base.normalized_zpos = zpos;
+   ipu_state->base.color_encoding = DRM_COLOR_YCBCR_BT601;
+   ipu_state->base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
}
 }
 
@@ -589,6 +591,25 @@ static void ipu_plane_atomic_update(struct drm_plane 
*plane,
  fb->modifier, &eba);
}
 
+   if (!old_state->fb ||
+   old_state->fb->format->format != fb->format->format ||
+   old_state->color_encoding != state->color_encoding ||
+   old_state->color_range != state->color_range) {
+   ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
+   switch (ipu_plane->dp_flow) {
+   case IPU_DP_FLOW_SYNC_BG:
+   ipu_dp_setup_channel(ipu_plane->dp, 
state->color_encoding,
+state->color_range, ics,
+IPUV3_COLORSPACE_RGB);
+   break;
+   case IPU_DP_FLOW_SYNC_FG:
+   ipu_dp_setup_channel(ipu_plane->dp, 
state->color_encoding,
+state->color_range, ics,
+IPUV3_COLORSPACE_UNKNOWN);
+   break;
+   }
+   }
+
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
/* nothing to do if PRE is used */
if (ipu_state->use_pre)
@@ -605,20 +626,6 @@ static void ipu_plane_atomic_update(struct drm_plane 
*plane,
return;
}
 
-   ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
-   switch (ipu_plane->dp_flow) {
-   case IPU_DP_FLOW_SYNC_BG:
-   ipu_dp_setup_channel(ipu_plane->dp, DRM_COLOR_YCBCR_BT601,
-DRM_COLOR_YCBCR_LIMITED_RANGE, ics,
-IPUV3_COLORSPACE_RGB);
-   break;
-   case IPU_DP_FLOW_SYNC_FG:
-   ipu_dp_setup_channel(ipu_plane->dp, DRM_COLOR_YCBCR_BT601,
-DRM_COLOR_YCBCR_LIMITED_RANGE, ics,
-IPUV3_COLORSPACE_UNKNOWN);
-   break;
-   }
-
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
 
width = drm_rect_width(&state->src) >> 16;
@@ -875,6 +882,15 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, 
struct ipu_soc *ipu,
if (ret)
return ERR_PTR(ret);
 
+   ret = drm_plane_create_color_properties(&ipu_plane->base,
+   BIT(DRM_COLOR_YCBCR_BT601) |
+   BIT(DRM_COLOR_YCBCR_BT709),
+   BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+   DRM_COLOR_YCBCR_BT601,
+   DRM_COLOR_YCBCR_LIMITED_RANGE);
+   if (ret)
+   return ERR_PTR(ret);
+
ret = ipu_plane_get_resources(dev, ipu_plane);
if (ret) {
DRM_ERROR("failed to get %s plane resources: %pe\n",
-- 
2.20.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH 5/8] drm/imx: parallel-display: use drm managed resources

2020-07-22 Thread Philipp Zabel
On Wed, 2020-07-22 at 15:30 +0200, Philipp Zabel wrote:
[...]
> and use drmm_add_action_or_reset() to make sure drm_encoder_cleanup() is
> called before the memory is freed.
[...]
> @@ -259,6 +259,13 @@ static const struct drm_bridge_funcs imx_pd_bridge_funcs 
> = {
>   .atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
>  };
>  
> +static void imx_pd_encoder_cleanup(struct drm_device *drm, void *ptr)
> +{
> + struct drm_encoder *encoder = ptr;
> +
> + drm_encoder_cleanup(encoder);
> +}
> +
>  static int imx_pd_register(struct drm_device *drm,
>   struct imx_parallel_display *imxpd)
>  {
> @@ -276,7 +283,13 @@ static int imx_pd_register(struct drm_device *drm,
>*/
>   imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
>  
> - drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
> + ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
> + if (ret)
> + return ret;
> +
> + ret = drmm_add_action_or_reset(drm, imx_pd_encoder_cleanup, encoder);
> + if (ret)
> + return ret;

This is only required because this is a component driver: our
drmm_kzalloc() is called after drmm_mode_config_init(), so we can't rely
on drm_mode_config_init_release() for cleanup. That is only called after
drmres already freed our memory.

regards
Philipp
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/vkms: add missing drm_crtc_vblank_put to the get/put pair on flush

2020-07-22 Thread Melissa Wen
On 07/22, dan...@ffwll.ch wrote:
> On Wed, Jul 22, 2020 at 08:04:11AM -0300, Melissa Wen wrote:
> > This patch adds a missing drm_crtc_vblank_put op to the pair
> > drm_crtc_vblank_get/put (inc/decrement counter to guarantee vblanks).
> > 
> > It clears the execution of the following kms_cursor_crc subtests:
> > 1. pipe-A-cursor-[size,alpha-opaque, NxN-(on-screen, off-screen, sliding,
> >random, fast-moving])] - successful when running individually.
> > 2. pipe-A-cursor-dpms passes again
> > 3. pipe-A-cursor-suspend also passes
> > 
> > The issue was initially tracked in the sequential execution of IGT
> > kms_cursor_crc subtest: when running the test sequence or one of its
> > subtests twice, the odd execs complete and the pairs get stuck in an
> > endless wait. In the IGT code, calling a wait_for_vblank before the start
> > of CRC capture prevented the busy-wait. But the problem persisted in the
> > pipe-A-cursor-dpms and -suspend subtests.
> > 
> > Checking the history, the pipe-A-cursor-dpms subtest was successful when,
> > in vkms_atomic_commit_tail, instead of using the flip_done op, it used
> > wait_for_vblanks. Another way to prevent blocking was wait_one_vblank when
> > enabling crtc. However, in both cases, pipe-A-cursor-suspend persisted
> > blocking in the 2nd start of CRC capture, which may indicate that
> > something got stuck in the step of CRC setup. Indeed, wait_one_vblank in
> > the crc setup was able to sync things and free all kms_cursor_crc
> > subtests.
> > 
> > Tracing and comparing a clean run with a blocked one:
> > - in a clean one, vkms_crtc_atomic_flush enables vblanks;
> > - when blocked, only in next op, vkms_crtc_atomic_enable, the vblanks
> > started. Moreover, a series of vkms_vblank_simulate flow out until
> > disabling vblanks.
> > Also watching the steps of vkms_crtc_atomic_flush, when the very first
> > drm_crtc_vblank_get returned an error, the subtest crashed. On the other
> > hand, when vblank_get succeeded, the subtest completed. Finally, checking
> > the flush steps: it increases counter to hold a vblank reference (get),
> > but there isn't a op to decreased it and release vblanks (put).
> > 
> > Cc: Daniel Vetter 
> > Cc: Rodrigo Siqueira 
> > Cc: Haneen Mohammed 
> > Signed-off-by: Melissa Wen 
> > ---
> >  drivers/gpu/drm/vkms/vkms_crtc.c | 1 +
> >  1 file changed, 1 insertion(+)
> > 
> > diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c 
> > b/drivers/gpu/drm/vkms/vkms_crtc.c
> > index ac85e17428f8..a99d6b4a92dd 100644
> > --- a/drivers/gpu/drm/vkms/vkms_crtc.c
> > +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
> > @@ -246,6 +246,7 @@ static void vkms_crtc_atomic_flush(struct drm_crtc 
> > *crtc,
> >  
> > spin_unlock(&crtc->dev->event_lock);
> >  
> > +   drm_crtc_vblank_put(crtc);
> 
> Uh so I reviewed this a bit more carefully now, and I dont think this is
> the correct bugfix. From the kerneldoc of drm_crtc_arm_vblank_event():
> 
>  * Caller must hold a vblank reference for the event @e acquired by a
>  * drm_crtc_vblank_get(), which will be dropped when the next vblank arrives.
> 
> So when we call drm_crtc_arm_vblank_event then the vblank_put gets called
> for us. And that's the only case where we successfully acquired a vblank
> interrupt reference since on failure of drm_crtc_vblank_get (0 indicates
> success for that function, failure negative error number) we directly send
> out the event.
> 
> So something else fishy is going on, and now I'm totally confused why this
> even happens.
> 
> We also have a pile of WARN_ON checks in drm_crtc_vblank_put to make sure
> we don't underflow the refcount, so it's also not that I think (except if
> this patch creates more WARNING backtraces).
> 
> But clearly it changes behaviour somehow ... can you try to figure out
> what changes? Maybe print out the vblank->refcount at various points in
> the driver, and maybe also trace when exactly the fake vkms vblank hrtimer
> is enabled/disabled ...

:(

I can check these, but I also have other suspicions. When I place the
drm_crct_vblank_put out of the if (at the end of flush), it not only solve
the issue of blocking on kms_cursor_crc, but also the WARN_ON on kms_flip
doesn't appear anymore (a total cleanup). Just after:

vkms_output->composer_state = to_vkms_crtc_state(crtc->state);

looks like there is something stuck around here.

Besides, there is a lock at atomic_begin:

  /* This lock is held across the atomic commit to block vblank timer
   * from scheduling vkms_composer_worker until the composer is updated
   */
  spin_lock_irq(&vkms_output->lock);

that seems to be released on atomic_flush and make me suspect something
missing on the composer update.

I'll check all these things and come back with news (hope) :)

Thanks,

Melissa
> 
> I'm totally confused about what's going on here now.
> -Daniel
> 
> > crtc->state->event = NULL;
> > }
> >  
> > -- 
> > 2.27.0
> > 
> 
> -- 
> Daniel Vetter
> Software Engineer, Intel Corporation
> ht

Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 3:12 PM Thomas Hellström (Intel)
 wrote:
> On 2020-07-22 14:41, Daniel Vetter wrote:
> > Ah I think I misunderstood which options you want to compare here. I'm
> > not sure how much pain fixing up "dma-fence as memory fence" really
> > is. That's kinda why I want a lot more testing on my annotation
> > patches, to figure that out. Not much feedback aside from amdgpu and
> > intel, and those two drivers pretty much need to sort out their memory
> > fence issues anyway (because of userptr and stuff like that).
> >
> > The only other issues outside of these two drivers I'm aware of:
> > - various scheduler drivers doing allocations in the drm/scheduler
> > critical section. Since all arm-soc drivers have a mildly shoddy
> > memory model of "we just pin everything" they don't really have to
> > deal with this. So we might just declare arm as a platform broken and
> > not taint the dma-fence critical sections with fs_reclaim. Otoh we
> > need to fix this for drm/scheduler anyway, I think best option would
> > be to have a mempool for hw fences in the scheduler itself, and at
> > that point fixing the other drivers shouldn't be too onerous.
> >
> > - vmwgfx doing a dma_resv in the atomic commit tail. Entirely
> > orthogonal to the entire memory fence discussion.
>
> With vmwgfx there is another issue that is hit when the gpu signals an
> error. At that point the batch might be restarted with a new meta
> command buffer that needs to be allocated out of a dma pool. in the
> fence critical section. That's probably a bit nasty to fix, but not
> impossible.

Yeah reset is fun. From what I've seen this isn't any worse than the
hw allocation issue for drm/scheduler drivers, they just allocate
another hw fence with all that drags along. So the same mempool should
be sufficient.

The really nasty thing around reset is display interactions, because
you just can't take drm_modeset_lock. amdgpu fixed that now (at least
the modeset_lock side, not yet the memory allocations that brings
along). i915 has the same problem for gen2/3 (so really old stuff),
and we've solved that by breaking&restarting all i915 fence waits, but
that predates multi-gpu and wont work for shared fences ofc. But it's
so old and predates all multi-gpu laptops that I think wontfix is the
right take.

Other drm/scheduler drivers don't have that problem since they're all
render-only, so no display driver interaction.

> > I'm pretty sure there's more bugs, I just haven't heard from them yet.
> > Also due to the opt-in nature of dma-fence we can limit the scope of
> > what we fix fairly naturally, just don't put them where no one cares
> > :-) Of course that also hides general locking issues in dma_fence
> > signalling code, but well *shrug*.
> Hmm, yes. Another potential big problem would be drivers that want to
> use gpu page faults in the dma-fence critical sections with the
> batch-based programming model.

Yeah that's a massive can of worms. But luckily there's no such driver
merged in upstream, so hopefully we can think about all the
constraints and how to best annotate&enforce this before we land any
code and have big regrets.
-Daniel



--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[Bug 208657] New: protection fault rx580, Video and input freeze, audio keeps working. Related: 207383?

2020-07-22 Thread bugzilla-daemon
https://bugzilla.kernel.org/show_bug.cgi?id=208657

Bug ID: 208657
   Summary: protection fault rx580, Video and input freeze, audio
keeps working. Related: 207383?
   Product: Drivers
   Version: 2.5
Kernel Version: 5.7.7, 5.7.8
  Hardware: Intel
OS: Linux
  Tree: Mainline
Status: NEW
  Severity: normal
  Priority: P1
 Component: Video(DRI - non Intel)
  Assignee: drivers_video-...@kernel-bugs.osdl.org
  Reporter: max.k@gmail.com
Regression: No

Created attachment 290447
  --> https://bugzilla.kernel.org/attachment.cgi?id=290447&action=edit
Kernel log excerpts

Hello, I can reproduce on my system. RX580, four Monitors, x86_64, Archlinux.



Seems related to this bug(s):
https://bugzilla.kernel.org/show_bug.cgi?id=207383



I first noticed this on 5.7.7(-arch1-1) as I updated from 5.6.14(-arch1-1).
I used 5.5.14 for two month, no such problems occured. 
Affected kernel versions: 5.7.7, 5.7.8, (5.8.9 probably too, has not yet
happened).



When I use the browser and it freezes, audio keeps playing, same as others have
noticed. I have attached logs of multiple instances of this happening, there
are different Call Traces leading to this bug, maybe it is of use to you.


Greetings, M.K.

-- 
You are receiving this mail because:
You are watching the assignee of the bug.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Christian König

Am 22.07.20 um 16:07 schrieb Daniel Vetter:

On Wed, Jul 22, 2020 at 3:12 PM Thomas Hellström (Intel)
 wrote:

On 2020-07-22 14:41, Daniel Vetter wrote:

I'm pretty sure there's more bugs, I just haven't heard from them yet.
Also due to the opt-in nature of dma-fence we can limit the scope of
what we fix fairly naturally, just don't put them where no one cares
:-) Of course that also hides general locking issues in dma_fence
signalling code, but well *shrug*.

Hmm, yes. Another potential big problem would be drivers that want to
use gpu page faults in the dma-fence critical sections with the
batch-based programming model.

Yeah that's a massive can of worms. But luckily there's no such driver
merged in upstream, so hopefully we can think about all the
constraints and how to best annotate&enforce this before we land any
code and have big regrets.


Do you want a bad news? I once made a prototype for that when Vega10 
came out.


But we abandoned this approach for the the batch based approach because 
of the horrible performance.


KFD is going to see that, but this is only with user queues and no 
dma_fence involved whatsoever.


Christian.


-Daniel



--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
amd-gfx mailing list
amd-...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/amdgpu/dc: Simplify drm_crtc_state::active checks

2020-07-22 Thread Michel Dänzer
On 2020-07-22 3:10 p.m., Kazlauskas, Nicholas wrote:
> On 2020-07-22 8:51 a.m., Daniel Vetter wrote:
>> On Wed, Jul 22, 2020 at 2:38 PM Michel Dänzer  wrote:
>>>
>>> From: Michel Dänzer 
>>>
>>> drm_atomic_crtc_check enforces that ::active can only be true if
>>> ::enable is as well.
>>>
>>> Signed-off-by: Michel Dänzer 
> 
> Looks fine to me. The check is sufficiently old enough that I don't mind
> relying on the core for this either.
> 
> Reviewed-by: Nicholas Kazlauskas 
> 
>>
>> modeset vs modereset is a bit an inglorious name choice ... since this
>> seems to be glue code and not part of core dc, maybe rename to
>> enable_required/disable_required to keep it consistent with the
>> wording atomic helpers use? DC also seems to use reset for a lot of
>> other things already (state reset, like atomic, or gpu reset like
>> drm/scheduler's td_r_), so I think this would also help clarity from a
>> DC perspective.
>>
>> Patch itself is good, above just an idea for another patch on top.
>>
>> Reviewed-by: Daniel Vetter 

Thanks for the reviews! I assume this will get picked up by a DC
developer or Alex/Christian.


> That sounds like a reasonable idea to me. These are used more as a
> stream_changed / stream_removed flag, but I don't think these helpers
> really need to exist at all.
> 
> That could come as a follow up patch.

Yeah, I'm leaving that to you guys. :)


-- 
Earthling Michel Dänzer   |   https://redhat.com
Libre software enthusiast | Mesa and X developer
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Intel


On 2020-07-22 16:23, Christian König wrote:

Am 22.07.20 um 16:07 schrieb Daniel Vetter:

On Wed, Jul 22, 2020 at 3:12 PM Thomas Hellström (Intel)
 wrote:

On 2020-07-22 14:41, Daniel Vetter wrote:

I'm pretty sure there's more bugs, I just haven't heard from them yet.
Also due to the opt-in nature of dma-fence we can limit the scope of
what we fix fairly naturally, just don't put them where no one cares
:-) Of course that also hides general locking issues in dma_fence
signalling code, but well *shrug*.

Hmm, yes. Another potential big problem would be drivers that want to
use gpu page faults in the dma-fence critical sections with the
batch-based programming model.

Yeah that's a massive can of worms. But luckily there's no such driver
merged in upstream, so hopefully we can think about all the
constraints and how to best annotate&enforce this before we land any
code and have big regrets.


Do you want a bad news? I once made a prototype for that when Vega10 
came out.


But we abandoned this approach for the the batch based approach 
because of the horrible performance.


In context of the previous discussion I'd consider the fact that it's 
not performant in the batch-based model good news :)


Thomas




KFD is going to see that, but this is only with user queues and no 
dma_fence involved whatsoever.


Christian.


-Daniel



--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
amd-gfx mailing list
amd-...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [Linaro-mm-sig] [PATCH 1/2] dma-buf.rst: Document why indefinite fences are a bad idea

2020-07-22 Thread Christian König

Am 22.07.20 um 16:30 schrieb Thomas Hellström (Intel):


On 2020-07-22 16:23, Christian König wrote:

Am 22.07.20 um 16:07 schrieb Daniel Vetter:

On Wed, Jul 22, 2020 at 3:12 PM Thomas Hellström (Intel)
 wrote:

On 2020-07-22 14:41, Daniel Vetter wrote:
I'm pretty sure there's more bugs, I just haven't heard from them 
yet.

Also due to the opt-in nature of dma-fence we can limit the scope of
what we fix fairly naturally, just don't put them where no one cares
:-) Of course that also hides general locking issues in dma_fence
signalling code, but well *shrug*.

Hmm, yes. Another potential big problem would be drivers that want to
use gpu page faults in the dma-fence critical sections with the
batch-based programming model.

Yeah that's a massive can of worms. But luckily there's no such driver
merged in upstream, so hopefully we can think about all the
constraints and how to best annotate&enforce this before we land any
code and have big regrets.


Do you want a bad news? I once made a prototype for that when Vega10 
came out.


But we abandoned this approach for the the batch based approach 
because of the horrible performance.


In context of the previous discussion I'd consider the fact that it's 
not performant in the batch-based model good news :)


Well the Vega10 had such a horrible page fault performance because it 
was the first generation which enabled it.


Later hardware versions are much better, but we just didn't push for 
this feature on them any more.


But yeah, now you mentioned it we did discuss this locking problem on 
tons of team calls as well.


Our solution at that time was to just not allow waiting if we do any 
allocation in the page fault handler. But this is of course not 
practical for a production environment.


Christian.



Thomas




KFD is going to see that, but this is only with user queues and no 
dma_fence involved whatsoever.


Christian.


-Daniel



--
Daniel Vetter
Software Engineer, Intel Corporation
https://nam11.safelinks.protection.outlook.com/?url=http%3A%2F%2Fblog.ffwll.ch%2F&data=02%7C01%7Cchristian.koenig%40amd.com%7C65836d463c6a43425a0b08d82e4bc09e%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637310250203344946&sdata=F8LZEnsMOJLeC3Sr%2BPn2HjGHlttdkVUiOzW7mYeijys%3D&reserved=0 


___
amd-gfx mailing list
amd-...@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7Cchristian.koenig%40amd.com%7C65836d463c6a43425a0b08d82e4bc09e%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637310250203344946&sdata=V3FsfahK6344%2FXujtLA%2BazWV0XjKWDXFWObRWc1JUKs%3D&reserved=0 



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[Bug 208657] protection fault rx580, Video and input freeze, audio keeps working. Related: 207383?

2020-07-22 Thread bugzilla-daemon
https://bugzilla.kernel.org/show_bug.cgi?id=208657

Alex Deucher (alexdeuc...@gmail.com) changed:

   What|Removed |Added

 CC||alexdeuc...@gmail.com

--- Comment #1 from Alex Deucher (alexdeuc...@gmail.com) ---
Looks like a duplicate of bug 207383.

-- 
You are receiving this mail because:
You are watching the assignee of the bug.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/simple_kms_helper: add drmm_simple_encoder_init()

2020-07-22 Thread Thomas Zimmermann
Hi

Am 22.07.20 um 15:25 schrieb Philipp Zabel:
> Add a drm_simple_encoder_init() variant that registers
> drm_encoder_cleanup() with drmm_add_action().
> 
> Now drivers can store encoders in memory allocated with drmm_kmalloc()
> after the call to drmm_mode_config_init(), without having to manually
> make sure that drm_encoder_cleanup() is called before the memory is
> freed.
> 
> Signed-off-by: Philipp Zabel 
> ---
>  drivers/gpu/drm/drm_simple_kms_helper.c | 42 +
>  include/drm/drm_simple_kms_helper.h |  4 +++
>  2 files changed, 46 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c 
> b/drivers/gpu/drm/drm_simple_kms_helper.c
> index 74946690aba4..a243f00cf63d 100644
> --- a/drivers/gpu/drm/drm_simple_kms_helper.c
> +++ b/drivers/gpu/drm/drm_simple_kms_helper.c
> @@ -9,6 +9,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -71,6 +72,47 @@ int drm_simple_encoder_init(struct drm_device *dev,
>  }
>  EXPORT_SYMBOL(drm_simple_encoder_init);
>  
> +static void drmm_encoder_cleanup(struct drm_device *dev, void *ptr)
> +{
> + struct drm_encoder *encoder = ptr;
> +
> + drm_encoder_cleanup(encoder);
> +}

This doesn't work. DRM cleans up the encoder by invoking the destroy
callback from the encoder functions. This additional helper would
cleanup the encoder a second time.

You can already embed the encoder in another structure and things should
work as expected.

Best regards
Thomas

> +
> +/**
> + * drmm_simple_encoder_init - Initialize a preallocated encoder with
> + *basic functionality.
> + * @dev: drm device
> + * @encoder: the encoder to initialize
> + * @encoder_type: user visible type of the encoder
> + *
> + * Initialises a preallocated encoder that has no further functionality.
> + * Settings for possible CRTC and clones are left to their initial values.
> + * Cleanup is automatically handled through registering drm_encoder_cleanup()
> + * with drmm_add_action().
> + *
> + * The caller of drmm_simple_encoder_init() is responsible for allocating
> + * the encoder's memory with drmm_kzalloc() to ensure it is automatically
> + * freed after the encoder has been cleaned up.
> + *
> + * Returns:
> + * Zero on success, error code on failure.
> + */
> +int drmm_simple_encoder_init(struct drm_device *dev,
> +  struct drm_encoder *encoder,
> +  int encoder_type)
> +{
> + int ret;
> +
> + ret = drm_encoder_init(dev, encoder, &drm_simple_encoder_funcs_cleanup,
> +encoder_type, NULL);
> + if (ret)
> + return ret;
> +
> + return drmm_add_action_or_reset(dev, drmm_encoder_cleanup, encoder);
> +}
> +EXPORT_SYMBOL(drmm_simple_encoder_init);
> +
>  static enum drm_mode_status
>  drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
>  const struct drm_display_mode *mode)
> diff --git a/include/drm/drm_simple_kms_helper.h 
> b/include/drm/drm_simple_kms_helper.h
> index a026375464ff..27f0915599c8 100644
> --- a/include/drm/drm_simple_kms_helper.h
> +++ b/include/drm/drm_simple_kms_helper.h
> @@ -185,4 +185,8 @@ int drm_simple_encoder_init(struct drm_device *dev,
>   struct drm_encoder *encoder,
>   int encoder_type);
>  
> +int drmm_simple_encoder_init(struct drm_device *dev,
> +  struct drm_encoder *encoder,
> +  int encoder_type);
> +
>  #endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
> 

-- 
Thomas Zimmermann
Graphics Driver Developer
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5, 90409 Nürnberg, Germany
(HRB 36809, AG Nürnberg)
Geschäftsführer: Felix Imendörffer



signature.asc
Description: OpenPGP digital signature
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/simple_kms_helper: add drmm_simple_encoder_init()

2020-07-22 Thread Philipp Zabel
Hi Thomas,

thank you for your comment.

On Wed, 2020-07-22 at 16:43 +0200, Thomas Zimmermann wrote:
> Hi
> 
> Am 22.07.20 um 15:25 schrieb Philipp Zabel:
> > Add a drm_simple_encoder_init() variant that registers
> > drm_encoder_cleanup() with drmm_add_action().
> > 
> > Now drivers can store encoders in memory allocated with drmm_kmalloc()
> > after the call to drmm_mode_config_init(), without having to manually
> > make sure that drm_encoder_cleanup() is called before the memory is
> > freed.
> > 
> > Signed-off-by: Philipp Zabel 
> > ---
> >  drivers/gpu/drm/drm_simple_kms_helper.c | 42 +
> >  include/drm/drm_simple_kms_helper.h |  4 +++
> >  2 files changed, 46 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c 
> > b/drivers/gpu/drm/drm_simple_kms_helper.c
> > index 74946690aba4..a243f00cf63d 100644
> > --- a/drivers/gpu/drm/drm_simple_kms_helper.c
> > +++ b/drivers/gpu/drm/drm_simple_kms_helper.c
> > @@ -9,6 +9,7 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> >  #include 
> >  #include 
> >  #include 
> > @@ -71,6 +72,47 @@ int drm_simple_encoder_init(struct drm_device *dev,
> >  }
> >  EXPORT_SYMBOL(drm_simple_encoder_init);
> >  
> > +static void drmm_encoder_cleanup(struct drm_device *dev, void *ptr)
> > +{
> > +   struct drm_encoder *encoder = ptr;
> > +
> > +   drm_encoder_cleanup(encoder);
> > +}
> 
> This doesn't work. DRM cleans up the encoder by invoking the destroy
> callback from the encoder functions. This additional helper would
> cleanup the encoder a second time.

Indeed this would require the encoder destroy callback to be NULL.

> You can already embed the encoder in another structure and things should
> work as expected.

If the embedding structure is a component allocated with drmm_kmalloc()
after the call to drmm_mode_config_init(), the structure will already be
freed before the destroy callback is run from
drmm_mode_config_init_release().

regards
Philipp
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/vkms: add missing drm_crtc_vblank_put to the get/put pair on flush

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 4:06 PM Melissa Wen  wrote:
>
> On 07/22, dan...@ffwll.ch wrote:
> > On Wed, Jul 22, 2020 at 08:04:11AM -0300, Melissa Wen wrote:
> > > This patch adds a missing drm_crtc_vblank_put op to the pair
> > > drm_crtc_vblank_get/put (inc/decrement counter to guarantee vblanks).
> > >
> > > It clears the execution of the following kms_cursor_crc subtests:
> > > 1. pipe-A-cursor-[size,alpha-opaque, NxN-(on-screen, off-screen, sliding,
> > >random, fast-moving])] - successful when running individually.
> > > 2. pipe-A-cursor-dpms passes again
> > > 3. pipe-A-cursor-suspend also passes
> > >
> > > The issue was initially tracked in the sequential execution of IGT
> > > kms_cursor_crc subtest: when running the test sequence or one of its
> > > subtests twice, the odd execs complete and the pairs get stuck in an
> > > endless wait. In the IGT code, calling a wait_for_vblank before the start
> > > of CRC capture prevented the busy-wait. But the problem persisted in the
> > > pipe-A-cursor-dpms and -suspend subtests.
> > >
> > > Checking the history, the pipe-A-cursor-dpms subtest was successful when,
> > > in vkms_atomic_commit_tail, instead of using the flip_done op, it used
> > > wait_for_vblanks. Another way to prevent blocking was wait_one_vblank when
> > > enabling crtc. However, in both cases, pipe-A-cursor-suspend persisted
> > > blocking in the 2nd start of CRC capture, which may indicate that
> > > something got stuck in the step of CRC setup. Indeed, wait_one_vblank in
> > > the crc setup was able to sync things and free all kms_cursor_crc
> > > subtests.
> > >
> > > Tracing and comparing a clean run with a blocked one:
> > > - in a clean one, vkms_crtc_atomic_flush enables vblanks;
> > > - when blocked, only in next op, vkms_crtc_atomic_enable, the vblanks
> > > started. Moreover, a series of vkms_vblank_simulate flow out until
> > > disabling vblanks.
> > > Also watching the steps of vkms_crtc_atomic_flush, when the very first
> > > drm_crtc_vblank_get returned an error, the subtest crashed. On the other
> > > hand, when vblank_get succeeded, the subtest completed. Finally, checking
> > > the flush steps: it increases counter to hold a vblank reference (get),
> > > but there isn't a op to decreased it and release vblanks (put).
> > >
> > > Cc: Daniel Vetter 
> > > Cc: Rodrigo Siqueira 
> > > Cc: Haneen Mohammed 
> > > Signed-off-by: Melissa Wen 
> > > ---
> > >  drivers/gpu/drm/vkms/vkms_crtc.c | 1 +
> > >  1 file changed, 1 insertion(+)
> > >
> > > diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c 
> > > b/drivers/gpu/drm/vkms/vkms_crtc.c
> > > index ac85e17428f8..a99d6b4a92dd 100644
> > > --- a/drivers/gpu/drm/vkms/vkms_crtc.c
> > > +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
> > > @@ -246,6 +246,7 @@ static void vkms_crtc_atomic_flush(struct drm_crtc 
> > > *crtc,
> > >
> > > spin_unlock(&crtc->dev->event_lock);
> > >
> > > +   drm_crtc_vblank_put(crtc);
> >
> > Uh so I reviewed this a bit more carefully now, and I dont think this is
> > the correct bugfix. From the kerneldoc of drm_crtc_arm_vblank_event():
> >
> >  * Caller must hold a vblank reference for the event @e acquired by a
> >  * drm_crtc_vblank_get(), which will be dropped when the next vblank 
> > arrives.
> >
> > So when we call drm_crtc_arm_vblank_event then the vblank_put gets called
> > for us. And that's the only case where we successfully acquired a vblank
> > interrupt reference since on failure of drm_crtc_vblank_get (0 indicates
> > success for that function, failure negative error number) we directly send
> > out the event.
> >
> > So something else fishy is going on, and now I'm totally confused why this
> > even happens.
> >
> > We also have a pile of WARN_ON checks in drm_crtc_vblank_put to make sure
> > we don't underflow the refcount, so it's also not that I think (except if
> > this patch creates more WARNING backtraces).
> >
> > But clearly it changes behaviour somehow ... can you try to figure out
> > what changes? Maybe print out the vblank->refcount at various points in
> > the driver, and maybe also trace when exactly the fake vkms vblank hrtimer
> > is enabled/disabled ...
>
> :(
>
> I can check these, but I also have other suspicions. When I place the
> drm_crct_vblank_put out of the if (at the end of flush), it not only solve
> the issue of blocking on kms_cursor_crc, but also the WARN_ON on kms_flip
> doesn't appear anymore (a total cleanup). Just after:
>
> vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
>
> looks like there is something stuck around here.

Hm do you have the full WARNING for this? Maybe this gives me an idea
what's going wrong.

> Besides, there is a lock at atomic_begin:
>
>   /* This lock is held across the atomic commit to block vblank timer
>* from scheduling vkms_composer_worker until the composer is updated
>*/
>   spin_lock_irq(&vkms_output->lock);
>
> that seems to be released on atomic_flush and make me suspect something
> missi

Re: [PATCH] drm/amdgpu/dc: Simplify drm_crtc_state::active checks

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 4:25 PM Michel Dänzer  wrote:
>
> On 2020-07-22 3:10 p.m., Kazlauskas, Nicholas wrote:
> > On 2020-07-22 8:51 a.m., Daniel Vetter wrote:
> >> On Wed, Jul 22, 2020 at 2:38 PM Michel Dänzer  wrote:
> >>>
> >>> From: Michel Dänzer 
> >>>
> >>> drm_atomic_crtc_check enforces that ::active can only be true if
> >>> ::enable is as well.
> >>>
> >>> Signed-off-by: Michel Dänzer 
> >
> > Looks fine to me. The check is sufficiently old enough that I don't mind
> > relying on the core for this either.

"active implies enabled" has been a hard assumption of atomic from day
1. So should work anywhere you have atomic.
-Daniel

> > Reviewed-by: Nicholas Kazlauskas 
> >
> >>
> >> modeset vs modereset is a bit an inglorious name choice ... since this
> >> seems to be glue code and not part of core dc, maybe rename to
> >> enable_required/disable_required to keep it consistent with the
> >> wording atomic helpers use? DC also seems to use reset for a lot of
> >> other things already (state reset, like atomic, or gpu reset like
> >> drm/scheduler's td_r_), so I think this would also help clarity from a
> >> DC perspective.
> >>
> >> Patch itself is good, above just an idea for another patch on top.
> >>
> >> Reviewed-by: Daniel Vetter 
>
> Thanks for the reviews! I assume this will get picked up by a DC
> developer or Alex/Christian.
>
>
> > That sounds like a reasonable idea to me. These are used more as a
> > stream_changed / stream_removed flag, but I don't think these helpers
> > really need to exist at all.
> >
> > That could come as a follow up patch.
>
> Yeah, I'm leaving that to you guys. :)
>
>
> --
> Earthling Michel Dänzer   |   https://redhat.com
> Libre software enthusiast | Mesa and X developer



-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v5 0/6] Add support for GPU DDR BW scaling

2020-07-22 Thread Rob Clark
On Tue, Jul 21, 2020 at 10:30 PM Viresh Kumar  wrote:
>
> On 21-07-20, 07:28, Rob Clark wrote:
> > With your ack, I can add the patch the dev_pm_opp_set_bw patch to my
> > tree and merge it via msm-next -> drm-next -> linus
>
> I wanted to send it via my tree, but its okay. Pick this patch from
> linux-next and add my Ack, I will drop it after that.
>
> a8351c12c6c7 OPP: Add and export helper to set bandwidth

Thanks, I'll do that

>
> > Otherwise I can send a second later pull req that adds the final patch
> > after has rebased to 5.9-rc1 (by which point the opp next tree will
> > have presumably been merged
>
> The PM stuff gets pushed fairly early and so I was asking you to
> rebase just on my tree, so you could have sent the pull request right
> after the PM tree landed there instead of waiting for rc1.

I guess I should have explained that my tree gets pulled first into
drm-next, which then gets pulled by Linus.

BR,
-R

> But its fine now.
>
> --
> viresh
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH -next] dma-fence: Make symbol 'dma_fence_lockdep_map' static

2020-07-22 Thread Koenig, Christian


Am 22.07.2020 18:04 schrieb Wei Yongjun :
The sparse tool complains as follows:

drivers/dma-buf/dma-fence.c:249:25: warning:
 symbol 'dma_fence_lockdep_map' was not declared. Should it be static?

This variable is not used outside of dma-fence.c, so this commit
marks it static.

Fixes: 5fbff813a4a3 ("dma-fence: basic lockdep annotations")
Reported-by: Hulk Robot 
Signed-off-by: Wei Yongjun 

Reviewed-by: Christian König 

---
 drivers/dma-buf/dma-fence.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index af1d8ea926b3..43624b4ee13d 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -246,7 +246,7 @@ EXPORT_SYMBOL(dma_fence_context_alloc);
  *   concerned.
  */
 #ifdef CONFIG_LOCKDEP
-struct lockdep_map dma_fence_lockdep_map = {
+static struct lockdep_map dma_fence_lockdep_map = {
 .name = "dma_fence_map"
 };



___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/amdgpu/dc: Simplify drm_crtc_state::active checks

2020-07-22 Thread Alex Deucher
On Wed, Jul 22, 2020 at 10:25 AM Michel Dänzer  wrote:
>
> On 2020-07-22 3:10 p.m., Kazlauskas, Nicholas wrote:
> > On 2020-07-22 8:51 a.m., Daniel Vetter wrote:
> >> On Wed, Jul 22, 2020 at 2:38 PM Michel Dänzer  wrote:
> >>>
> >>> From: Michel Dänzer 
> >>>
> >>> drm_atomic_crtc_check enforces that ::active can only be true if
> >>> ::enable is as well.
> >>>
> >>> Signed-off-by: Michel Dänzer 
> >
> > Looks fine to me. The check is sufficiently old enough that I don't mind
> > relying on the core for this either.
> >
> > Reviewed-by: Nicholas Kazlauskas 
> >
> >>
> >> modeset vs modereset is a bit an inglorious name choice ... since this
> >> seems to be glue code and not part of core dc, maybe rename to
> >> enable_required/disable_required to keep it consistent with the
> >> wording atomic helpers use? DC also seems to use reset for a lot of
> >> other things already (state reset, like atomic, or gpu reset like
> >> drm/scheduler's td_r_), so I think this would also help clarity from a
> >> DC perspective.
> >>
> >> Patch itself is good, above just an idea for another patch on top.
> >>
> >> Reviewed-by: Daniel Vetter 
>
> Thanks for the reviews! I assume this will get picked up by a DC
> developer or Alex/Christian.

Applied.  Thanks!

Alex

>
>
> > That sounds like a reasonable idea to me. These are used more as a
> > stream_changed / stream_removed flag, but I don't think these helpers
> > really need to exist at all.
> >
> > That could come as a follow up patch.
>
> Yeah, I'm leaving that to you guys. :)
>
>
> --
> Earthling Michel Dänzer   |   https://redhat.com
> Libre software enthusiast | Mesa and X developer
> ___
> amd-gfx mailing list
> amd-...@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v2] drm/of: Consider the state in which the ep is disabled

2020-07-22 Thread Heiko Stübner
Am Dienstag, 7. Juli 2020, 13:25:26 CEST schrieb Sandy Huang:
> don't mask possible_crtcs if remote-point is disabled.
> 
> Signed-off-by: Sandy Huang 

Reviewed-by: Heiko Stuebner 

changes in v2:
- drop additional of_node_put, as ep will be put with the next
  iteration of for_each_endpoint_of_node()


As this touches a pretty central function is there something
to keep in mind in regards to other DRM drivers?
[question for the broader audience ;-) ]

Heiko

> ---
>  drivers/gpu/drm/drm_of.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
> index fdb05fbf72a0..565f05f5f11b 100644
> --- a/drivers/gpu/drm/drm_of.c
> +++ b/drivers/gpu/drm/drm_of.c
> @@ -66,6 +66,9 @@ uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
>   uint32_t possible_crtcs = 0;
>  
>   for_each_endpoint_of_node(port, ep) {
> + if (!of_device_is_available(ep))
> + continue;
> +
>   remote_port = of_graph_get_remote_port(ep);
>   if (!remote_port) {
>   of_node_put(ep);
> 




___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: pages pinned for BO lifetime and security

2020-07-22 Thread Chia-I Wu
On Wed, Jul 22, 2020 at 4:28 AM Daniel Vetter  wrote:
>
> On Wed, Jul 22, 2020 at 1:12 PM Christian König
>  wrote:
> >
> > Am 22.07.20 um 09:32 schrieb Daniel Vetter:
> > > On Wed, Jul 22, 2020 at 9:19 AM Christian König
> > >  wrote:
> > >> Am 22.07.20 um 02:22 schrieb Gurchetan Singh:
> > >>
> > >> +Christian who added DMABUF_MOVE_NOTIFY which added the relevant blurb:
> > >>
> > >> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit.kernel.org%2Fpub%2Fscm%2Flinux%2Fkernel%2Fgit%2Ftorvalds%2Flinux.git%2Ftree%2Fdrivers%2Fdma-buf%2FKconfig%23n46&data=02%7C01%7Cchristian.koenig%40amd.com%7C916f6a9b64884e21fd7f08d82e115c8b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C63731122752271&sdata=WKs3KSr3K1DdVmDaIZ%2FnV8VEBPWMGMSGweeay0HIOxw%3D&reserved=0
> > >>
> > >> Currently, the user seems to amdgpu for P2P dma-buf and it seems to 
> > >> plumb ttm (*move_notify) callback to dma-buf.  We're not sure if it's a 
> > >> security issue occurring across DRM drivers, or one more specific to the 
> > >> new amdgpu use case.
> > >>
> > >> On Tue, Jul 21, 2020 at 1:03 PM Chia-I Wu  wrote:
> > >>> Hi list,
> > >>>
> > >>> virtio-gpu is moving in the direction where BO pages are pinned for
> > >>> the lifetime for simplicity.  I am wondering if that is considered a
> > >>> security issue in general, especially aCan you elaborate a little bit 
> > >>> what these other problems might be?  Memory fragmentation?fter running 
> > >>> into the
> > >>> description of the new DMABUF_MOVE_NOTIFY config option.
> > >>
> > >> Yes, that is generally considered a deny of service possibility and so 
> > >> far Dave and Daniel have rejected all tries to upstream stuff like this 
> > >> as far as I know.
> > > Uh we have merged pretty much all arm-soc drivers without real
> > > shrinkers. Whether that was a good idea or not is maybe different
> > > question - now that we do have pretty good helpers maybe we should
> > > poke this a bit more. But then SoCs Suck (tm).
> >
> > I was under the impression that those SoC drivers still use the GEM
> > helpers which unpinns stuff when it is not in use. But I might be wrong.
>
> It's kinda mostly there, even some helpers for shrinking but a)
> helpers on, not all drivers use it b) for purgeable objects only, not
> generally for inactive stuff - there's no active use tracking c) cma
> helpers (ok that one is only for vc4 as the render driver) don't even
> have that. I had some slow burner series to get us towards dma_resv
> locking in shmem helpers and then maybe even a common shrinker helper
> with some "actually kick it out now" callback, but yeah never got
> there.
My quick survey of the SoC drivers also told me that they tend to
demonstrate a) or b).

About b), I was thinking maybe that's because the systems the drivers
run on are historically swap-less.  There is no place to write the
dirty pages back to and thus less incentive to support shrinking
inactive objects.

You both mentioned that the lack of swap is irrelevant (or at least
not the only factor).  Can you elaborate a little bit on that?
Shrinking inactive objects returns the pages to swap cache... hmm, I
guess that helps memory defragmentation?

>
> So maybe per-device object shrinker helper would be something neat we
> could lift out of ttm (when it's happening), maybe with a simple
> callback somewhere in it's lru tracking. Probably best if the shrinker
> lru is outright separate from anything else or it just gets messy.
> -Daniel
>
> > > But for real gpus they do indeed all have shrinkers, and not just "pin
> > > everything forever" model. Real gpus = stuff you might run on servers
> > > or multi-app and all that stuff, not with a simple "we just kill all
> > > background jobs if memory gets low" model like on android and other
> > > such things.
> > >
> > >> DMA-buf an pinning for scanout are the only exceptions since the 
> > >> implementation wouldn't have been possible otherwise.
> > >>
> > >>> Most drivers do not have a shrinker, or whether a BO is purgeable is
> > >>> entirely controlled by the userspace (madvice).  They can be
> > >>> categorized as "a security problem where userspace is able to pin
> > >>> unrestricted amounts of memory".  But those drivers are normally found
> > >>> on systems without swap.  I don't think the issue applies.
> > >>
> > >> This is completely independent of the availability of swap or not.
> > >>
> > >> Pinning of pages in large quantities can result in all kind of problems 
> > >> and needs to be prevented even without swap.
> > > Yeah you don't just kill swap, you kill a ton of other kernel services
> > > with mass pinning. I think even the pinning of scanout buffers for
> > > i915 from system memory is somewhat questionable (but I guess small
> > > enough to not matter in practice).
> >
> > Yeah, we had a really hard time explaining that internally as well.
> >
> > Christian.
> >
> > >> Otherwise you can ran into problems even with simple I/O operations for 
> > >> e

Re: [PATCH v7 2/3] drm: bridge: Add support for Cadence MHDP DPI/DP bridge

2020-07-22 Thread kernel test robot
Hi Swapnil,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on robh/for-next]
[also build test ERROR on linus/master v5.8-rc6 next-20200722]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:
https://github.com/0day-ci/linux/commits/Swapnil-Jakhade/drm-Add-support-for-Cadence-MHDP-DPI-DP-bridge-and-J721E-wrapper/20200722-154322
base:   https://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git for-next
config: x86_64-allyesconfig (attached as .config)
compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project 
3d9967039d4191b77f939ddc6c6ff4275df620c2)
reproduce (this is a W=1 build):
wget 
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O 
~/bin/make.cross
chmod +x ~/bin/make.cross
# install x86_64 cross compiling tool for clang build
# apt-get install binutils-x86-64-linux-gnu
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot 

All error/warnings (new ones prefixed by >>):

>> drivers/gpu/drm/bridge/cdns-mhdp-core.c:765:10: warning: implicit conversion 
>> from 'unsigned long' to 'unsigned int' changes value from 
>> 18446744073709551613 to 4294967293 [-Wconstant-conversion]
   writel(~CDNS_APB_INT_MASK_SW_EVENT_INT,
   ~~ ^~~
>> drivers/gpu/drm/bridge/cdns-mhdp-core.c:1378:2: error: implicit declaration 
>> of function 'phy_get_attrs' [-Werror,-Wimplicit-function-declaration]
   phy_get_attrs(mhdp->phy, &attrs);
   ^
>> drivers/gpu/drm/bridge/cdns-mhdp-core.c:1384:20: error: no member named 
>> 'max_link_rate' in 'struct phy_attrs'
   link_rate = attrs.max_link_rate;
   ~ ^
   drivers/gpu/drm/bridge/cdns-mhdp-core.c:1709:10: warning: implicit 
conversion from 'unsigned long' to 'unsigned int' changes value from 
18446744073709551613 to 4294967293 [-Wconstant-conversion]
   writel(~CDNS_APB_INT_MASK_SW_EVENT_INT,
   ~~ ^~~
   2 warnings and 2 errors generated.

vim +/phy_get_attrs +1378 drivers/gpu/drm/bridge/cdns-mhdp-core.c

  1371  
  1372  static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
  1373  {
  1374  unsigned int link_rate;
  1375  struct phy_attrs attrs;
  1376  
  1377  /* Get source capabilities based on PHY attributes */
> 1378  phy_get_attrs(mhdp->phy, &attrs);
  1379  
  1380  mhdp->host.lanes_cnt = attrs.bus_width;
  1381  if (!mhdp->host.lanes_cnt)
  1382  mhdp->host.lanes_cnt = 4;
  1383  
> 1384  link_rate = attrs.max_link_rate;
  1385  if (!link_rate)
  1386  link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
  1387  else
  1388  /* PHY uses Mb/s, DRM uses tens of kb/s. */
  1389  link_rate *= 100;
  1390  
  1391  mhdp->host.link_rate = link_rate;
  1392  mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
  1393  mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
  1394  mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
  1395CDNS_SUPPORT_TPS(2) | 
CDNS_SUPPORT_TPS(3) |
  1396CDNS_SUPPORT_TPS(4);
  1397  mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
  1398  mhdp->host.fast_link = false;
  1399  mhdp->host.enhanced = true;
  1400  mhdp->host.scrambler = true;
  1401  mhdp->host.ssc = false;
  1402  }
  1403  

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-...@lists.01.org


.config.gz
Description: application/gzip
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v2 0/5] drm: rockchip: various ports for older VOPs

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 8:13 PM Alex Bee  wrote:
>
> Hi,
>
> this series mainly ports existining functionality to older SoCs - most
> importantly enables alpha blending for RK3036, RK3066, RK3126 and
> RK3188.
> Besides that, it also changes the window type from DRM_PLANE_TYPE_CURSOR
> to DRM_PLANE_TYPE_OVERLAY for VOPs that have only one (1) overlay window.

This doesn't make much sense, the cursor overlay is really just a hint
for legacy ioctls that this is the overlay that should be used for
cursors. Compositors should try to use such planes as full overlays
(if they don't want to use them as a cursor). So sounds like a case of
"fix your compositor".

For atomic there's 0 difference between a overlay or a cursor (primary
plane is still treated somewhat special in the RMFB ioctl, but again
that's for backwards compat reasons with existing uapi, not because
the primary plane is different).

What does happen though is that this breaks cursor for legacy
userspace, which is probably not really what you want.
-Daniel


>
> Regards,
> Alex
>
> Changes in v2:
> - drop not yet upstreamed dsp_data_swap from RK3188 regs
> - rephrase most commit messages
>
> Alex Bee (5):
>   drm: rockchip: add scaling for RK3036 win1
>   drm: rockchip: add missing registers for RK3188
>   drm: rockchip: add alpha support for RK3036, RK3066, RK3126 and RK3188
>   drm: rockchip: set alpha_en to 0 if it is not used
>   drm: rockchip: use overlay windows as such
>
>  drivers/gpu/drm/rockchip/rockchip_drm_vop.c |  1 +
>  drivers/gpu/drm/rockchip/rockchip_vop_reg.c | 42 ++---
>  drivers/gpu/drm/rockchip/rockchip_vop_reg.h |  1 +
>  3 files changed, 38 insertions(+), 6 deletions(-)
>
> --
> 2.17.1
>


--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: pages pinned for BO lifetime and security

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 9:27 PM Chia-I Wu  wrote:
>
> On Wed, Jul 22, 2020 at 4:28 AM Daniel Vetter  wrote:
> >
> > On Wed, Jul 22, 2020 at 1:12 PM Christian König
> >  wrote:
> > >
> > > Am 22.07.20 um 09:32 schrieb Daniel Vetter:
> > > > On Wed, Jul 22, 2020 at 9:19 AM Christian König
> > > >  wrote:
> > > >> Am 22.07.20 um 02:22 schrieb Gurchetan Singh:
> > > >>
> > > >> +Christian who added DMABUF_MOVE_NOTIFY which added the relevant blurb:
> > > >>
> > > >> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit.kernel.org%2Fpub%2Fscm%2Flinux%2Fkernel%2Fgit%2Ftorvalds%2Flinux.git%2Ftree%2Fdrivers%2Fdma-buf%2FKconfig%23n46&data=02%7C01%7Cchristian.koenig%40amd.com%7C916f6a9b64884e21fd7f08d82e115c8b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C63731122752271&sdata=WKs3KSr3K1DdVmDaIZ%2FnV8VEBPWMGMSGweeay0HIOxw%3D&reserved=0
> > > >>
> > > >> Currently, the user seems to amdgpu for P2P dma-buf and it seems to 
> > > >> plumb ttm (*move_notify) callback to dma-buf.  We're not sure if it's 
> > > >> a security issue occurring across DRM drivers, or one more specific to 
> > > >> the new amdgpu use case.
> > > >>
> > > >> On Tue, Jul 21, 2020 at 1:03 PM Chia-I Wu  wrote:
> > > >>> Hi list,
> > > >>>
> > > >>> virtio-gpu is moving in the direction where BO pages are pinned for
> > > >>> the lifetime for simplicity.  I am wondering if that is considered a
> > > >>> security issue in general, especially aCan you elaborate a little bit 
> > > >>> what these other problems might be?  Memory fragmentation?fter 
> > > >>> running into the
> > > >>> description of the new DMABUF_MOVE_NOTIFY config option.
> > > >>
> > > >> Yes, that is generally considered a deny of service possibility and so 
> > > >> far Dave and Daniel have rejected all tries to upstream stuff like 
> > > >> this as far as I know.
> > > > Uh we have merged pretty much all arm-soc drivers without real
> > > > shrinkers. Whether that was a good idea or not is maybe different
> > > > question - now that we do have pretty good helpers maybe we should
> > > > poke this a bit more. But then SoCs Suck (tm).
> > >
> > > I was under the impression that those SoC drivers still use the GEM
> > > helpers which unpinns stuff when it is not in use. But I might be wrong.
> >
> > It's kinda mostly there, even some helpers for shrinking but a)
> > helpers on, not all drivers use it b) for purgeable objects only, not
> > generally for inactive stuff - there's no active use tracking c) cma
> > helpers (ok that one is only for vc4 as the render driver) don't even
> > have that. I had some slow burner series to get us towards dma_resv
> > locking in shmem helpers and then maybe even a common shrinker helper
> > with some "actually kick it out now" callback, but yeah never got
> > there.
> My quick survey of the SoC drivers also told me that they tend to
> demonstrate a) or b).
>
> About b), I was thinking maybe that's because the systems the drivers
> run on are historically swap-less.  There is no place to write the
> dirty pages back to and thus less incentive to support shrinking
> inactive objects.
>
> You both mentioned that the lack of swap is irrelevant (or at least
> not the only factor).  Can you elaborate a little bit on that?
> Shrinking inactive objects returns the pages to swap cache... hmm, I
> guess that helps memory defragmentation?

Yup, kernel can then move it around as it sees fit.

Also, zswap is a thing nowadays, and I think used by default on android now.
-Daniel

> >
> > So maybe per-device object shrinker helper would be something neat we
> > could lift out of ttm (when it's happening), maybe with a simple
> > callback somewhere in it's lru tracking. Probably best if the shrinker
> > lru is outright separate from anything else or it just gets messy.
> > -Daniel
> >
> > > > But for real gpus they do indeed all have shrinkers, and not just "pin
> > > > everything forever" model. Real gpus = stuff you might run on servers
> > > > or multi-app and all that stuff, not with a simple "we just kill all
> > > > background jobs if memory gets low" model like on android and other
> > > > such things.
> > > >
> > > >> DMA-buf an pinning for scanout are the only exceptions since the 
> > > >> implementation wouldn't have been possible otherwise.
> > > >>
> > > >>> Most drivers do not have a shrinker, or whether a BO is purgeable is
> > > >>> entirely controlled by the userspace (madvice).  They can be
> > > >>> categorized as "a security problem where userspace is able to pin
> > > >>> unrestricted amounts of memory".  But those drivers are normally found
> > > >>> on systems without swap.  I don't think the issue applies.
> > > >>
> > > >> This is completely independent of the availability of swap or not.
> > > >>
> > > >> Pinning of pages in large quantities can result in all kind of 
> > > >> problems and needs to be prevented even without swap.
> > > > Yeah you don't just kill swap, you kill a ton of other kernel services
> > > > with

Re: [PATCH 5/8] drm/imx: parallel-display: use drm managed resources

2020-07-22 Thread daniel
On Wed, Jul 22, 2020 at 04:01:53PM +0200, Philipp Zabel wrote:
> On Wed, 2020-07-22 at 15:30 +0200, Philipp Zabel wrote:
> [...]
> > and use drmm_add_action_or_reset() to make sure drm_encoder_cleanup() is
> > called before the memory is freed.
> [...]
> > @@ -259,6 +259,13 @@ static const struct drm_bridge_funcs 
> > imx_pd_bridge_funcs = {
> > .atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
> >  };
> >  
> > +static void imx_pd_encoder_cleanup(struct drm_device *drm, void *ptr)
> > +{
> > +   struct drm_encoder *encoder = ptr;
> > +
> > +   drm_encoder_cleanup(encoder);
> > +}
> > +
> >  static int imx_pd_register(struct drm_device *drm,
> > struct imx_parallel_display *imxpd)
> >  {
> > @@ -276,7 +283,13 @@ static int imx_pd_register(struct drm_device *drm,
> >  */
> > imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
> >  
> > -   drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
> > +   ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
> > +   if (ret)
> > +   return ret;
> > +
> > +   ret = drmm_add_action_or_reset(drm, imx_pd_encoder_cleanup, encoder);
> > +   if (ret)
> > +   return ret;
> 
> This is only required because this is a component driver: our
> drmm_kzalloc() is called after drmm_mode_config_init(), so we can't rely
> on drm_mode_config_init_release() for cleanup. That is only called after
> drmres already freed our memory.

Yeah I know about the inversion, which is why I haven't yet started typing
the mass conversion for all the drm objects. I think the explicit
drmm_add_action_or_reset is indeed the way to go, except we probably want
some helpers to wrap the allocation, drm_foo_init and adding the reset
action all into one thing (plus you can stuff the reset action into the
allocation instead of the kfree action only, even nicer that way).

But that's maybe for later, and good to have some examples in drivers
already converted over as guidance.

On the series: Acked-by: Daniel Vetter 

But way too late for solid review :-)

Cheers, Daniel

> 
> regards
> Philipp
> ___
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH] drm/simple_kms_helper: add drmm_simple_encoder_init()

2020-07-22 Thread daniel
On Wed, Jul 22, 2020 at 05:08:03PM +0200, Philipp Zabel wrote:
> Hi Thomas,
> 
> thank you for your comment.
> 
> On Wed, 2020-07-22 at 16:43 +0200, Thomas Zimmermann wrote:
> > Hi
> > 
> > Am 22.07.20 um 15:25 schrieb Philipp Zabel:
> > > Add a drm_simple_encoder_init() variant that registers
> > > drm_encoder_cleanup() with drmm_add_action().
> > > 
> > > Now drivers can store encoders in memory allocated with drmm_kmalloc()
> > > after the call to drmm_mode_config_init(), without having to manually
> > > make sure that drm_encoder_cleanup() is called before the memory is
> > > freed.
> > > 
> > > Signed-off-by: Philipp Zabel 
> > > ---
> > >  drivers/gpu/drm/drm_simple_kms_helper.c | 42 +
> > >  include/drm/drm_simple_kms_helper.h |  4 +++
> > >  2 files changed, 46 insertions(+)
> > > 
> > > diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c 
> > > b/drivers/gpu/drm/drm_simple_kms_helper.c
> > > index 74946690aba4..a243f00cf63d 100644
> > > --- a/drivers/gpu/drm/drm_simple_kms_helper.c
> > > +++ b/drivers/gpu/drm/drm_simple_kms_helper.c
> > > @@ -9,6 +9,7 @@
> > >  #include 
> > >  #include 
> > >  #include 
> > > +#include 
> > >  #include 
> > >  #include 
> > >  #include 
> > > @@ -71,6 +72,47 @@ int drm_simple_encoder_init(struct drm_device *dev,
> > >  }
> > >  EXPORT_SYMBOL(drm_simple_encoder_init);
> > >  
> > > +static void drmm_encoder_cleanup(struct drm_device *dev, void *ptr)
> > > +{
> > > + struct drm_encoder *encoder = ptr;
> > > +
> > > + drm_encoder_cleanup(encoder);
> > > +}
> > 
> > This doesn't work. DRM cleans up the encoder by invoking the destroy
> > callback from the encoder functions. This additional helper would
> > cleanup the encoder a second time.
> 
> Indeed this would require the encoder destroy callback to be NULL.

Yeah the drmm_ versions of these need to check that the ->cleanup hook is
NULL.

Also there's not actually a double-free, since drm_foo_cleanup removes it
from the lists, which means drm_mode_config_cleanup won't even see it. But
if the driver has some additional code in ->cleanup that won't ever run,
so probably still a bug.

I also think that the drmm_foo_ wrappers should also do the allocation
(and upcasting) kinda like drmm_dev_alloc(). Otherwise we're still stuck
with tons of boilerplate.

For now I think it's ok if drivers that switch to drmm_ just copypaste,
until we're sure this is the right thing to do. And then maybe also roll
these out for all objects that stay for the entire lifetime of drm_device
(plane, crtc, encoder, plus variants). Just to make sure we're consistent
across all of them.
-Daniel

> > You can already embed the encoder in another structure and things should
> > work as expected.
> 
> If the embedding structure is a component allocated with drmm_kmalloc()
> after the call to drmm_mode_config_init(), the structure will already be
> freed before the destroy callback is run from
> drmm_mode_config_init_release().
> 
> regards
> Philipp
> ___
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: [PATCH v5 0/6] Add support for GPU DDR BW scaling

2020-07-22 Thread Daniel Vetter
On Wed, Jul 22, 2020 at 5:47 PM Rob Clark  wrote:
>
> On Tue, Jul 21, 2020 at 10:30 PM Viresh Kumar  wrote:
> >
> > On 21-07-20, 07:28, Rob Clark wrote:
> > > With your ack, I can add the patch the dev_pm_opp_set_bw patch to my
> > > tree and merge it via msm-next -> drm-next -> linus
> >
> > I wanted to send it via my tree, but its okay. Pick this patch from
> > linux-next and add my Ack, I will drop it after that.
> >
> > a8351c12c6c7 OPP: Add and export helper to set bandwidth
>
> Thanks, I'll do that
>
> >
> > > Otherwise I can send a second later pull req that adds the final patch
> > > after has rebased to 5.9-rc1 (by which point the opp next tree will
> > > have presumably been merged
> >
> > The PM stuff gets pushed fairly early and so I was asking you to
> > rebase just on my tree, so you could have sent the pull request right
> > after the PM tree landed there instead of waiting for rc1.
>
> I guess I should have explained that my tree gets pulled first into
> drm-next, which then gets pulled by Linus.

Yeah either topic tree or acks for merging in the other branch. No
rebasing in the middle of the merge window, that's rather uncool.
-Daniel

>
> BR,
> -R
>
> > But its fine now.
> >
> > --
> > viresh
> ___
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[Bug 208661] New: Backlight doesn't work with both nv_backlight and acpi_video

2020-07-22 Thread bugzilla-daemon
https://bugzilla.kernel.org/show_bug.cgi?id=208661

Bug ID: 208661
   Summary: Backlight doesn't work with both nv_backlight and
acpi_video
   Product: Drivers
   Version: 2.5
Kernel Version: 5.7.0-1-amd64
  Hardware: All
OS: Linux
  Tree: Mainline
Status: NEW
  Severity: normal
  Priority: P1
 Component: Video(DRI - non Intel)
  Assignee: drivers_video-...@kernel-bugs.osdl.org
  Reporter: iknstu...@protonmail.com
Regression: No

Created attachment 290457
  --> https://bugzilla.kernel.org/attachment.cgi?id=290457&action=edit
vbios.rom

Hi,

I'm trying to get backlight working on this laptop. I am using the nouveau
driver, and kernel 5.7.
I tried a few different parameters for acpi_osi=, like the following:
 - acpi_osi=linux
 - acpi_osi=! acpi_osi="Windows 2012"
 - acpi_osi=! acpi_osi="Windows 2015"
however, none of them worked. /sys/class/backlight/*/bl_power is 0 for both
devices at boot. acpi_video0 has a max_brightness of 10 while nv_backlight has
a max brightness of 100.

I am attaching the vbios. The graphics card is nVidia GT540M.

-- 
You are receiving this mail because:
You are watching the assignee of the bug.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 2/2] drm/i915/dp: TPS4 PHY test pattern compliance support

2020-07-22 Thread Khaled Almahallawy
Adding support for TPS4 (CP2520 Pattern 3) PHY pattern source tests.

v2: uniform bit names TP4a/b/c (Manasi)

Signed-off-by: Khaled Almahallawy 
---
 drivers/gpu/drm/i915/display/intel_dp.c | 14 --
 drivers/gpu/drm/i915/i915_reg.h |  4 
 2 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_dp.c 
b/drivers/gpu/drm/i915/display/intel_dp.c
index d6295eb20b63..4b74b2ec5665 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -5371,7 +5371,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp 
*intel_dp)
&intel_dp->compliance.test_data.phytest;
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
enum pipe pipe = crtc->pipe;
-   u32 pattern_val;
+   u32 pattern_val, dp_tp_ctl;
 
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_NONE:
@@ -5411,7 +5411,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp 
*intel_dp)
   DDI_DP_COMP_CTL_ENABLE |
   DDI_DP_COMP_CTL_CUSTOM80);
break;
-   case DP_PHY_TEST_PATTERN_CP2520:
+   case DP_PHY_TEST_PATTERN_CP2520_PAT1:
/*
 * FIXME: Ideally pattern should come from DPCD 0x24A. As
 * current firmware of DPR-100 could not set it, so hardcoding
@@ -5423,6 +5423,16 @@ static void intel_dp_phy_pattern_update(struct intel_dp 
*intel_dp)
   DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
   pattern_val);
break;
+   case DP_PHY_TEST_PATTERN_CP2520_PAT3:
+   DRM_DEBUG_KMS("Set TPS4 Phy Test Pattern\n");
+   intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
+   dp_tp_ctl = intel_de_read(dev_priv, 
TGL_DP_TP_CTL(pipe));
+   dp_tp_ctl &= ~DP_TP_CTL_TRAIN_PAT4_SEL_MASK;
+   dp_tp_ctl |= DP_TP_CTL_TRAIN_PAT4_SEL_TP4a;
+   dp_tp_ctl &= ~DP_TP_CTL_LINK_TRAIN_MASK;
+   dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT4;
+   intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), 
dp_tp_ctl);
+   break;
default:
WARN(1, "Invalid Phy Test Pattern\n");
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a0d31f3bf634..c586595b9e76 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -9982,6 +9982,10 @@ enum skl_power_gate {
 #define  DP_TP_CTL_MODE_SST(0 << 27)
 #define  DP_TP_CTL_MODE_MST(1 << 27)
 #define  DP_TP_CTL_FORCE_ACT   (1 << 25)
+#define  DP_TP_CTL_TRAIN_PAT4_SEL_MASK (3 << 19)
+#define  DP_TP_CTL_TRAIN_PAT4_SEL_TP4a (0 << 19)
+#define  DP_TP_CTL_TRAIN_PAT4_SEL_TP4b (1 << 19)
+#define  DP_TP_CTL_TRAIN_PAT4_SEL_TP4c (2 << 19)
 #define  DP_TP_CTL_ENHANCED_FRAME_ENABLE   (1 << 18)
 #define  DP_TP_CTL_FDI_AUTOTRAIN   (1 << 15)
 #define  DP_TP_CTL_LINK_TRAIN_MASK (7 << 8)
-- 
2.17.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[PATCH v2 1/2] drm/dp: Add PHY_TEST_PATTERN CP2520 Pattern 2 and 3

2020-07-22 Thread Khaled Almahallawy
Add the missing CP2520 pattern 2 and 3 phy compliance patterns

v2: cosemtic changes

Reviewed-by: Manasi Navare  (v1)
Signed-off-by: Khaled Almahallawy 
---
 drivers/gpu/drm/drm_dp_helper.c | 2 +-
 include/drm/drm_dp_helper.h | 4 +++-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a3c82e726057..d0fb78c6aca6 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1583,7 +1583,7 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux,
return err;
 
break;
-   case DP_PHY_TEST_PATTERN_CP2520:
+   case DP_PHY_TEST_PATTERN_CP2520_PAT1:
err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET,
   &data->hbr2_reset,
   sizeof(data->hbr2_reset));
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index e47dc22ebf50..fbf83f207b15 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -708,7 +708,9 @@
 # define DP_PHY_TEST_PATTERN_ERROR_COUNT0x2
 # define DP_PHY_TEST_PATTERN_PRBS7  0x3
 # define DP_PHY_TEST_PATTERN_80BIT_CUSTOM   0x4
-# define DP_PHY_TEST_PATTERN_CP2520 0x5
+# define DP_PHY_TEST_PATTERN_CP2520_PAT10x5
+# define DP_PHY_TEST_PATTERN_CP2520_PAT20x6
+# define DP_PHY_TEST_PATTERN_CP2520_PAT30x7
 
 #define DP_TEST_HBR2_SCRAMBLER_RESET0x24A
 #define DP_TEST_80BIT_CUSTOM_PATTERN_7_00x250
-- 
2.17.1

___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[Bug 207383] [Regression] 5.7 amdgpu/polaris11 gpf: amdgpu_atomic_commit_tail

2020-07-22 Thread bugzilla-daemon
https://bugzilla.kernel.org/show_bug.cgi?id=207383

--- Comment #85 from mn...@protonmail.com ---
(In reply to Christian König from comment #83)
> Instead of working around the bug I think we should concentrate on nailing
> the root cause.
> 
> I suggest to insert an use after free check into just that structure. In
> other words add a field "magic_number" will it with 0xdeadbeef on allocation
> and set it to zero before the kfree().
> 
> A simple BUG_ON(ptr->magic_number != 0xdeadbeef) should yield results rather
> quickly.
> 
> Then just add printk()s before the kfree() to figure out why we have this
> use after free race.

Fair point, I was just trying to confirm my hypothesis.

I realised why the test failed, adding 8 bytes of padding to the middle
made the struct size 24 bytes. Since the freelist pointer is being added
to the middle (12 bytes) and that's aligned to the nearest 8 bytes, the
pointer ended up being placed at an offset of 16 bytes (context).

After making the padding an array of 2 void* and initialising it to
{0xDEADBEEFCAFEF00D, 0x1BADF00D1BADC0DE}, the padding was eventually
corrupted with the context being left intact and therefore, no crashing.

GDB output of dm_struct:
{
base = {state = 0x888273884c00},
padding = {0xdeadbeefcafef00d, 0x513df83afd3ad7b2},
context = 0x88824e68
}

That said, I still don't know the root cause of the bug, I'll see
if I can use KASAN or something to figure out what exactly freed
dm_state. If anyone is more familiar with this code has any advice
for me, please let me know.

-- 
You are receiving this mail because:
You are watching the assignee of the bug.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[Bug 206987] [drm] [amdgpu] Whole system crashes when the driver is in mode_support_and_system_configuration

2020-07-22 Thread bugzilla-daemon
https://bugzilla.kernel.org/show_bug.cgi?id=206987

Cyrax (ev...@hotmail.com) changed:

   What|Removed |Added

 Status|NEW |RESOLVED
 Resolution|--- |DUPLICATE

--- Comment #32 from Cyrax (ev...@hotmail.com) ---
Fix is in stable 5.7.10 kernel.

*** This bug has been marked as a duplicate of bug 207979 ***

-- 
You are receiving this mail because:
You are watching the assignee of the bug.
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


[pull] amdgpu drm-fixes-5.8

2020-07-22 Thread Alex Deucher
Hi Dave, Daniel,

Couple of fixes for 5.8.

The following changes since commit adbe8a3cae94a63e9f416795c750237a9b789124:

  Merge tag 'amd-drm-fixes-5.8-2020-07-15' of 
git://people.freedesktop.org/~agd5f/linux into drm-fixes (2020-07-17 13:29:00 
+1000)

are available in the Git repository at:

  git://people.freedesktop.org/~agd5f/linux tags/amd-drm-fixes-5.8-2020-07-22

for you to fetch changes up to 38e0c89a19fd13f28d2b4721035160a3e66e270b:

  drm/amdgpu: Fix NULL dereference in dpm sysfs handlers (2020-07-21 16:00:01 
-0400)


amd-drm-fixes-5.8-2020-07-22:

amdgpu:
- Fix crash when overclocking VegaM
- Fix possible crash when editing dpm levels


Paweł Gronowski (1):
  drm/amdgpu: Fix NULL dereference in dpm sysfs handlers

Qiu Wenbo (1):
  drm/amd/powerplay: fix a crash when overclocking Vega M

 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c  |  9 +++--
 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 10 ++
 2 files changed, 9 insertions(+), 10 deletions(-)
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


Re: warning in omap_connector

2020-07-22 Thread Dave Airlie
I think I'm still seeing this.

Dave.

On Wed, 1 Jul 2020 at 01:08, Ville Syrjälä
 wrote:
>
> On Tue, Jun 30, 2020 at 05:41:32PM +0300, Laurent Pinchart wrote:
> > Hi Ville,
> >
> > On Tue, Jun 30, 2020 at 05:39:02PM +0300, Ville Syrjälä wrote:
> > > On Tue, Jun 30, 2020 at 10:19:23AM -0400, Alex Deucher wrote:
> > > > On Tue, Jun 30, 2020 at 10:15 AM Ville Syrjälä wrote:
> > > > >
> > > > > On Tue, Jun 30, 2020 at 04:33:37PM +1000, Dave Airlie wrote:
> > > > > > Hey Laurent,
> > > > > >
> > > > > > I merged drm-misc-next and noticed this, I'm not sure if it's
> > > > > > collateral damage from something else changing or I've just missed 
> > > > > > it
> > > > > > previously. 32-bit arm build.
> > > > > >
> > > > > >
> > > > > > /home/airlied/devel/kernel/dim/src/drivers/gpu/drm/omapdrm/omap_connector.c:
> > > > > > In function ‘omap_connector_mode_valid’:
> > > > > > /home/airlied/devel/kernel/dim/src/drivers/gpu/drm/omapdrm/omap_connector.c:92:9:
> > > > > > warning: braces around scalar initializer
> > > > > >   struct drm_display_mode new_mode = { { 0 } };
> > > > >
> > > > > Probably fallout from my drm_display_mode shrinkage.
> > > > >
> > > > > Going to repeat my usual "just use {} when zero initializing
> > > > > structs" recommendation. Avoids these stupid warnings, and IMO
> > > > > also conveys the meaning better since there's no ambiguity
> > > > > between zero initializing the whole struct vs. zero initializing
> > > > > just the first member.
> > > >
> > > > IIRC, LLVM and GCC treat these slightly differently.  We've generally
> > > > just moved to using memset to avoid different compiler complaints when
> > > > using these.
> > >
> > > I don't particularly like memset() since the requirement to
> > > pass the size just adds another way to screw things up. The
> > > usual 'sizeof(*thing)' makes that slightly less of an issue,
> > > but I've noticed that people often don't use that.
> > >
> > > Another issue with memset() is that you then can end up with
> > > a block of seemingly random collection of memsets()s between
> > > the variable declarations and the rest of the code. I suppose
> > > if we could declare variables anywhere we could always keep
> > > the two together so it wouldn't look so weird, but can't do
> > > that for the time being. And even with that it would still
> > > lead to less succinct code, which I generally dislike.
> >
> > I'd prefer { } over memset, assuming clang and gcc would treat it
> > correctly. Ville, I can submit a patch, unless you want to do it
> > yourself as it's a fallout from drm_display_mode shrinkage ;-)
> > (seriously speaking, not pushing you, I just want to avoid duplicating
> > work).
>
> Go ahead if you want to. I'm in middle of a bigger rebase atm
> so can't do it right this minute myself.
>
> --
> Ville Syrjälä
> Intel
___
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


  1   2   >