Den 23.08.2016 08:17, skrev Daniel Vetter: > On Mon, Aug 22, 2016 at 10:25:23PM +0200, Noralf Trønnes wrote: >> The SimpleDRM driver binds to simple-framebuffer devices and provides a >> DRM/KMS API. It provides only a single CRTC+encoder+connector combination >> plus one initial mode. >> >> Userspace can create dumb-buffers which can be blit into the real >> framebuffer similar to UDL. No access to the real framebuffer is allowed >> (compared to earlier version of this driver) to avoid security issues. >> Furthermore, this way we can support arbitrary modes as long as we have a >> conversion-helper. >> >> The driver was originally written by David Herrmann in 2014. >> My main contribution is to make use of drm_simple_kms_helper and >> rework the probe path to avoid use of the deprecated drm_platform_init() >> and drm_driver.{load,unload}(). >> Additions have also been made for later changes to the Device Tree binding >> document, like support for clocks, regulators and having the node under >> /chosen. This code was lifted verbatim from simplefb.c. >> >> Cc: dh.herrmann at gmail.com >> Cc: libv at skynet.be >> Signed-off-by: Noralf Trønnes <noralf at tronnes.org> >> ---
<snip> >> +static const struct file_operations sdrm_drm_fops = { >> + .owner = THIS_MODULE, >> + .open = drm_open, >> + .mmap = sdrm_drm_mmap, >> + .poll = drm_poll, >> + .read = drm_read, >> + .unlocked_ioctl = drm_ioctl, >> + .release = drm_release, >> +#ifdef CONFIG_COMPAT >> + .compat_ioctl = drm_compat_ioctl, >> +#endif >> + .llseek = noop_llseek, >> +}; >> + >> +static const struct vm_operations_struct sdrm_gem_vm_ops = { >> + .fault = sdrm_gem_fault, >> + .open = drm_gem_vm_open, >> + .close = drm_gem_vm_close, >> +}; >> + <snip> >> diff --git a/drivers/gpu/drm/simpledrm/simpledrm_gem.c >> b/drivers/gpu/drm/simpledrm/simpledrm_gem.c >> new file mode 100644 >> index 0000000..8cced80 >> --- /dev/null >> +++ b/drivers/gpu/drm/simpledrm/simpledrm_gem.c >> @@ -0,0 +1,202 @@ >> +/* >> + * SimpleDRM firmware framebuffer driver >> + * Copyright (c) 2012-2014 David Herrmann <dh.herrmann at gmail.com> >> + * >> + * This program is free software; you can redistribute it and/or modify it >> + * under the terms of the GNU General Public License as published by the >> Free >> + * Software Foundation; either version 2 of the License, or (at your option) >> + * any later version. >> + */ >> + >> +#include <drm/drmP.h> >> +#include <linux/errno.h> >> +#include <linux/mm.h> >> +#include <linux/slab.h> >> +#include <linux/vmalloc.h> >> + >> +#include "simpledrm.h" >> + >> +struct sdrm_gem_object *sdrm_gem_alloc_object(struct drm_device *ddev, >> + size_t size) >> +{ >> + struct sdrm_gem_object *obj; >> + >> + WARN_ON(!size || (size & ~PAGE_MASK) != 0); >> + >> + obj = kzalloc(sizeof(*obj), GFP_KERNEL); >> + if (!obj) >> + return NULL; >> + >> + if (drm_gem_object_init(ddev, &obj->base, size)) { >> + kfree(obj); >> + return NULL; >> + } >> + >> + return obj; >> +} >> + >> +int sdrm_dumb_create(struct drm_file *dfile, struct drm_device *ddev, >> + struct drm_mode_create_dumb *args) >> +{ >> + struct sdrm_gem_object *obj; >> + int r; >> + >> + /* overflow checks are done by DRM core */ >> + args->pitch = (args->bpp + 7) / 8 * args->width; >> + args->size = PAGE_ALIGN(args->pitch * args->height); >> + >> + obj = sdrm_gem_alloc_object(ddev, args->size); >> + if (!obj) >> + return -ENOMEM; >> + >> + r = drm_gem_handle_create(dfile, &obj->base, &args->handle); >> + if (r) { >> + drm_gem_object_unreference_unlocked(&obj->base); >> + return r; >> + } >> + >> + /* handle owns a reference */ >> + drm_gem_object_unreference_unlocked(&obj->base); >> + >> + return 0; >> +} >> + >> +int sdrm_drm_mmap(struct file *filp, struct vm_area_struct *vma) >> +{ >> + int ret; >> + >> + ret = drm_gem_mmap(filp, vma); >> + if (ret) >> + return ret; >> + >> + vma->vm_flags &= ~VM_PFNMAP; >> + vma->vm_flags |= VM_MIXEDMAP; >> + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); > Hm, this is still the hand-rolled mmap support. Did my more detailed plan > not work, now that you've switched to more native gem objects? Doing it > that way would allow us to remove all the hand-rolled fault handling > (especially sdrm_gem_fault), which is think would be nice. > > I know, udl doesn't do it that way, not sure exactly why. I'm really walking in the dark here. I have deleted drm_driver.gem_vm_ops and used this function: int sdrm_drm_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; struct drm_gem_object *obj = NULL; struct drm_vma_offset_node *node; int ret; drm_vma_offset_lock_lookup(dev->vma_offset_manager); node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, vma->vm_pgoff, vma_pages(vma)); if (likely(node)) { obj = container_of(node, struct drm_gem_object, vma_node); /* * When the object is being freed, after it hits 0-refcnt it * proceeds to tear down the object. In the process it will * attempt to remove the VMA offset and so acquire this * mgr->vm_lock. Therefore if we find an object with a 0-refcnt * that matches our range, we know it is in the process of being * destroyed and will be freed as soon as we release the lock - * so we have to check for the 0-refcnted object and treat it as * invalid. */ if (!kref_get_unless_zero(&obj->refcount)) obj = NULL; } drm_vma_offset_unlock_lookup(dev->vma_offset_manager); if (!obj) return -EINVAL; if (!drm_vma_node_is_allowed(node, filp)) { drm_gem_object_unreference_unlocked(obj); return -EACCES; } /* redirect to shmem mmap */ vma->vm_file = obj->filp; vma->vm_pgoff = 0; ret = obj->filp->f_op->mmap(obj->filp, vma); drm_gem_object_unreference_unlocked(obj); return ret; } But that works only partly. Using modetest I get a picture, but fbdev doesn't return. Turning on drm debug the two variants are identical up to DRM_IOCTL_MODE_DESTROY_DUMB. The shmem mmap version: [identical] [ 74.939660] [drm:drm_ioctl] pid=721, dev=0xe200, auth=1, DRM_IOCTL_MODE_DESTROY_DUMB And nothing more Whereas the working one gives me this: [identical] [ 70.373029] [drm:drm_ioctl] pid=721, dev=0xe200, auth=1, DRM_IOCTL_MODE_DESTROY_DUMB [ 70.393401] [drm:drm_release] open_count = 1 [ 70.393429] [drm:drm_release] pid = 721, device = 0xe200, open_count = 1 [ 70.393468] [drm:drm_lastclose] [ 70.393501] [drm:drm_atomic_state_init] Allocated atomic state dad61e00 [ 70.393521] [drm:drm_atomic_get_plane_state] Added [PLANE:24:plane-0] dad61e40 state to dad61e00 [ 70.393543] [drm:drm_atomic_get_crtc_state] Added [CRTC:25:crtc-0] dad73a00 state to dad61e00 [ 70.393588] [drm:drm_atomic_set_mode_for_crtc] Set [MODE:1824x984] for CRTC state dad73a00 [ 70.393604] [drm:drm_atomic_set_crtc_for_plane] Link plane state dad61e40 to [CRTC:25:crtc-0] [ 70.393619] [drm:drm_mode_object_reference] OBJ ID: 29 (1) [ 70.393629] [drm:drm_atomic_set_fb_for_plane] Set [FB:29] for plane state dad61e40 [ 70.393643] [drm:drm_atomic_add_affected_connectors] Adding all current connectors for [CRTC:25:crtc-0] to dad61e00 [ 70.393662] [drm:drm_mode_object_reference] OBJ ID: 23 (2) [ 70.393674] [drm:drm_atomic_get_connector_state] Added [CONNECTOR:23] dad613c0 state to dad61e00 [ 70.393835] [drm:drm_mode_object_reference] OBJ ID: 23 (3) [ 70.393848] [drm:drm_atomic_set_crtc_for_connector] Link connector state dad613c0 to [CRTC:25:crtc-0] [ 70.393859] [drm:drm_atomic_check_only] checking dad61e00 [ 70.393873] [drm:drm_atomic_helper_check_modeset] [CRTC:25:crtc-0] mode changed [ 70.393881] [drm:drm_atomic_helper_check_modeset] [CRTC:25:crtc-0] enable changed [ 70.403886] [drm:update_connector_routing] Updating routing for [CONNECTOR:23:Virtual-1] [ 70.403916] [drm:update_connector_routing] [CONNECTOR:23:Virtual-1] using [ENCODER:26:None-26] on [CRTC:25:crtc-0] [ 70.403926] [drm:drm_atomic_helper_check_modeset] [CRTC:25:crtc-0] active changed [ 70.403956] [drm:drm_atomic_helper_check_modeset] [CRTC:25:crtc-0] needs all connectors, enable: y, active: y [ 70.403972] [drm:drm_atomic_add_affected_connectors] Adding all current connectors for [CRTC:25:crtc-0] to dad61e00 [ 70.404006] [drm:drm_atomic_commit] commiting dad61e00 [ 70.404043] [drm:crtc_set_mode] modeset on [ENCODER:26:None-26] [ 70.422427] [drm:drm_atomic_helper_commit_modeset_enables] enabling [CRTC:25:crtc-0] [ 70.422465] [drm:drm_atomic_helper_commit_modeset_enables] enabling [ENCODER:26:None-26] [ 70.422490] [drm:drm_atomic_state_default_clear] Clearing atomic state dad61e00 [ 70.422504] [drm:drm_mode_object_unreference] OBJ ID: 23 (4) [ 70.422519] [drm:drm_atomic_state_free] Freeing atomic state dad61e00 [ 70.422532] [drm:drm_mode_object_reference] OBJ ID: 29 (2) [ 70.422546] [drm:drm_lastclose] driver lastclose completed Noralf. >> + >> + return 0; >> +} >> + >> +int sdrm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) >> +{ >> + struct drm_gem_object *gobj = vma->vm_private_data; >> + struct sdrm_gem_object *obj = to_sdrm_bo(gobj); >> + pgoff_t offset; >> + int ret; >> + >> + if (!obj->pages) >> + return VM_FAULT_SIGBUS; >> + >> + offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> >> + PAGE_SHIFT; >> + >> + ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, >> + obj->pages[offset]); >> + switch (ret) { >> + case -EAGAIN: >> + case 0: >> + case -ERESTARTSYS: >> + case -EINTR: >> + case -EBUSY: >> + return VM_FAULT_NOPAGE; >> + >> + case -ENOMEM: >> + return VM_FAULT_OOM; >> + } >> + >> + return VM_FAULT_SIGBUS; >> +} >> + >> +int sdrm_gem_get_pages(struct sdrm_gem_object *obj) >> +{ >> + struct page **pages; >> + >> + if (obj->pages) >> + return 0; >> + >> + pages = drm_gem_get_pages(&obj->base); >> + if (IS_ERR(pages)) >> + return PTR_ERR(pages); >> + >> + obj->pages = pages; >> + >> + return 0; >> +} >> + >> +static void sdrm_gem_put_pages(struct sdrm_gem_object *obj) >> +{ >> + if (!obj->pages) >> + return; >> + >> + drm_gem_put_pages(&obj->base, obj->pages, false, false); >> + obj->pages = NULL; >> +} >> + >> +int sdrm_gem_vmap(struct sdrm_gem_object *obj) >> +{ >> + int page_count = obj->base.size / PAGE_SIZE; >> + int ret; >> + >> + if (obj->vmapping) >> + return 0; >> + >> + ret = sdrm_gem_get_pages(obj); >> + if (ret) >> + return ret; >> + >> + obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL); >> + if (!obj->vmapping) >> + return -ENOMEM; >> + >> + return 0; >> +} >> + >> +static void sdrm_gem_vunmap(struct sdrm_gem_object *obj) >> +{ >> + vunmap(obj->vmapping); >> + obj->vmapping = NULL; >> + >> + sdrm_gem_put_pages(obj); >> +} >> + >> +void sdrm_gem_free_object(struct drm_gem_object *gobj) >> +{ >> + struct sdrm_gem_object *obj = to_sdrm_bo(gobj); >> + >> + if (obj->vmapping) >> + sdrm_gem_vunmap(obj); >> + >> + if (obj->pages) >> + sdrm_gem_put_pages(obj); >> + >> + drm_gem_object_release(gobj); >> + kfree(obj); >> +} >> + >> +int sdrm_dumb_map_offset(struct drm_file *dfile, struct drm_device *ddev, >> + uint32_t handle, uint64_t *offset) >> +{ >> + struct sdrm_gem_object *obj; >> + struct drm_gem_object *gobj; >> + int ret; >> + >> + gobj = drm_gem_object_lookup(dfile, handle); >> + if (!gobj) >> + return -ENOENT; >> + >> + obj = to_sdrm_bo(gobj); >> + >> + ret = sdrm_gem_get_pages(obj); >> + if (ret) >> + goto out_unref; >> + >> + ret = drm_gem_create_mmap_offset(gobj); >> + if (ret) >> + goto out_unref; >> + >> + *offset = drm_vma_node_offset_addr(&gobj->vma_node); >> + >> +out_unref: >> + drm_gem_object_unreference_unlocked(gobj); >> + >> + return ret; >> +}