Module Name: src Committed By: martin Date: Sat Feb 3 11:15:12 UTC 2024
Modified Files: src/sys/external/bsd/drm2/dist/drm/i915/gem [netbsd-10]: i915_gem_mman.c i915_gem_pages.c i915_gem_phys.c i915_gem_region.c i915_gem_stolen.c Log Message: Pull up following revision(s) (requested by riastradh in ticket #559): sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c: revision 1.6 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c: revision 1.7 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c: revision 1.7 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c: revision 1.9 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c: revision 1.22 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c: revision 1.5 sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c: revision 1.6 i915_gem_region: Reduce diff from upstream a little. No functional change intended. Prompted by upcoming nearby changes related to PR kern/57833. i915_gem: Avoid walking off end of sg_pgs. sg_npgs currently fails to match obj->base.size/PAGE_SIZE only due to bugs in the construction of sg_pgs in various i915 gem object types, which we should also fix, but let's avoid compounding it here. Related to PR kern/57833. i915_gem: Assert page array size. Let's detect the bug of sg_npgs failing to match obj->base.size/PAGE_SIZE earlier. Related to PR kern/57833. i915_gem_phys: Fill sg_pgs. This is needed by i915 gem fault, which maps user virtual addresses to those pages, and by i915 gem object destruction, which does pmap_page_protect on the pages to remove any of those user virtual mappings. This needs pmap_kenter_pa rather than pmap_enter(pmap_kernel(), ...) in order to preserve the _kernel's_ mapping of the pages after pmap_page_protect. But bus_dmamem_map currently uses pmap_enter(pmap_kernel(), ...) instead which creates a mapping that is removed by pmap_page_protect. So we use a variant of bus_dmamem_map that uses pmap_kenter_pa instead. Perhaps bus_dmamem_map should do this itself, but this change is less risky to pull up than a change to bus_dmamem_map itself. PR kern/57833: kernel panic on xorg exit i915_gem_region: Fill sg_pgs, with size/PAGE_SIZE entries. Use sg_alloc_table_from_bus_dmamem to do this. i915_gem_mman.c vm_fault_cpu and i915_gem_object_release_mmap_offset both rely on sg_pgs to be a page array, so using something else like size >> ilog2(mem->mm.chunk_size) entries doesn't work. And they rely on the sg_pgs entries to be initialized, which we weren't doing before, and which sg_alloc_table_from_bus_dmamem does for us. Related to PR kern/57833. i915_gem_stolen: Fix memory leak. Found while trying to address the PR 57833 class of problems. i915_gem_stolen: Fill sg_pgs, with size/PAGE_SIZE entries. Use sg_alloc_table_from_bus_dmamem to do this. i915_gem_mman.c vm_fault_cpu and i915_gem_object_release_mmap_offset both rely on sg_pgs to be a page array, so providing a table with only one entry doesn't work (except by accident, if the object is page-sized anyway). And they rely on the sg_pgs entries to be initialized, which we weren't doing before, and which sg_alloc_table_from_bus_dmamem does for us. Related to PR kern/57833. To generate a diff of this commit: cvs rdiff -u -r1.21 -r1.21.4.1 \ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c cvs rdiff -u -r1.6 -r1.6.4.1 \ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c cvs rdiff -u -r1.8 -r1.8.4.1 \ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c cvs rdiff -u -r1.4 -r1.4.4.1 \ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c cvs rdiff -u -r1.5 -r1.5.4.1 \ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c:1.21 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c:1.21.4.1 --- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c:1.21 Sun Dec 19 12:26:55 2021 +++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_mman.c Sat Feb 3 11:15:12 2024 @@ -1,4 +1,4 @@ -/* $NetBSD: i915_gem_mman.c,v 1.21 2021/12/19 12:26:55 riastradh Exp $ */ +/* $NetBSD: i915_gem_mman.c,v 1.21.4.1 2024/02/03 11:15:12 martin Exp $ */ /* * SPDX-License-Identifier: MIT @@ -7,7 +7,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: i915_gem_mman.c,v 1.21 2021/12/19 12:26:55 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: i915_gem_mman.c,v 1.21.4.1 2024/02/03 11:15:12 martin Exp $"); #include <linux/anon_inodes.h> #include <linux/mman.h> @@ -675,7 +675,7 @@ void i915_gem_object_release_mmap_offset if (!i915_gem_object_has_pages(obj)) return; - for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { + for (i = 0; i < obj->mm.pages->sgl->sg_npgs; i++) { page = obj->mm.pages->sgl->sg_pgs[i]; vm_page = &page->p_vmp; pmap_page_protect(vm_page, VM_PROT_NONE); Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c:1.6 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c:1.6.4.1 --- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c:1.6 Sun Dec 19 12:00:57 2021 +++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_pages.c Sat Feb 3 11:15:12 2024 @@ -1,4 +1,4 @@ -/* $NetBSD: i915_gem_pages.c,v 1.6 2021/12/19 12:00:57 riastradh Exp $ */ +/* $NetBSD: i915_gem_pages.c,v 1.6.4.1 2024/02/03 11:15:12 martin Exp $ */ /* * SPDX-License-Identifier: MIT @@ -7,7 +7,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: i915_gem_pages.c,v 1.6 2021/12/19 12:00:57 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: i915_gem_pages.c,v 1.6.4.1 2024/02/03 11:15:12 martin Exp $"); #include "i915_drv.h" #include "i915_gem_object.h" @@ -42,6 +42,18 @@ void __i915_gem_object_set_pages(struct } #ifndef __NetBSD__ + /* + * Paranoia: In NetBSD, a scatterlist is just an array of + * pages, not an array of segments that might be larger than + * pages, so the number of entries must exactly match the size + * of the object (which should also be page-aligned). + * + * Both vm_fault_cpu and i915_gem_object_release_mmap_offset in + * i915_gem_mman.c rely on this page array as such. + */ + KASSERTMSG(pages->sgl->sg_npgs == obj->base.size >> PAGE_SHIFT, + "npgs=%zu size=%zu", pages->sgl->sg_npgs, obj->base.size); + obj->mm.get_page.sg_pos = pages->sgl; obj->mm.get_page.sg_idx = 0; #endif Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c:1.8 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c:1.8.4.1 --- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c:1.8 Sun Dec 19 12:45:43 2021 +++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_phys.c Sat Feb 3 11:15:12 2024 @@ -1,4 +1,4 @@ -/* $NetBSD: i915_gem_phys.c,v 1.8 2021/12/19 12:45:43 riastradh Exp $ */ +/* $NetBSD: i915_gem_phys.c,v 1.8.4.1 2024/02/03 11:15:12 martin Exp $ */ /* * SPDX-License-Identifier: MIT @@ -7,7 +7,91 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.8 2021/12/19 12:45:43 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: i915_gem_phys.c,v 1.8.4.1 2024/02/03 11:15:12 martin Exp $"); + +#ifdef __NetBSD__ +/* + * Make sure this block comes before any linux includes, so we don't + * get mixed up by the PAGE_MASK complementation. + */ + +#include <sys/bus.h> + +#include <uvm/uvm.h> +#include <uvm/uvm_extern.h> + +#include <machine/pmap_private.h> /* kvtopte, pmap_pte_clearbits */ + +/* + * Version of bus_dmamem_map that uses pmap_kenter_pa, not pmap_enter, + * so that it isn't affected by pmap_page_protect on the physical + * address. Adapted from sys/arch/x86/x86/bus_dma.c. + */ +static int +bus_dmamem_kmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, + size_t size, void **kvap, int flags) +{ + vaddr_t va; + bus_addr_t addr; + int curseg; + const uvm_flag_t kmflags = + (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0; + u_int pmapflags = PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE; + + size = round_page(size); + if (flags & BUS_DMA_NOCACHE) + pmapflags |= PMAP_NOCACHE; + + va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags); + + if (va == 0) + return ENOMEM; + + *kvap = (void *)va; + + for (curseg = 0; curseg < nsegs; curseg++) { + for (addr = segs[curseg].ds_addr; + addr < (segs[curseg].ds_addr + segs[curseg].ds_len); + addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) { + if (size == 0) + panic("bus_dmamem_kmap: size botch"); + pmap_kenter_pa(va, addr, + VM_PROT_READ | VM_PROT_WRITE, + pmapflags); + } + } + pmap_update(pmap_kernel()); + + return 0; +} + +static void +bus_dmamem_kunmap(bus_dma_tag_t t, void *kva, size_t size) +{ + pt_entry_t *pte, opte; + vaddr_t va, sva, eva; + + KASSERTMSG(((uintptr_t)kva & PGOFSET) == 0, "kva=%p", kva); + + size = round_page(size); + sva = (vaddr_t)kva; + eva = sva + size; + + /* + * mark pages cacheable again. + */ + for (va = sva; va < eva; va += PAGE_SIZE) { + pte = kvtopte(va); + opte = *pte; + if ((opte & PTE_PCD) != 0) + pmap_pte_clearbits(pte, PTE_PCD); + } + pmap_kremove((vaddr_t)kva, size); + pmap_update(pmap_kernel()); + uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY); +} + +#endif #include <linux/highmem.h> #include <linux/shmem_fs.h> @@ -65,7 +149,7 @@ static int i915_gem_object_get_pages_phy if (ret) return -ENOMEM; KASSERT(rsegs == 1); - ret = -bus_dmamem_map(dmat, &obj->mm.u.phys.seg, 1, + ret = -bus_dmamem_kmap(dmat, &obj->mm.u.phys.seg, 1, roundup_pow_of_two(obj->base.size), &vaddr, BUS_DMA_WAITOK|BUS_DMA_COHERENT); if (ret) @@ -83,7 +167,12 @@ static int i915_gem_object_get_pages_phy if (!st) goto err_pci; +#ifdef __NetBSD__ + if (sg_alloc_table_from_bus_dmamem(st, dmat, &obj->mm.u.phys.seg, 1, + GFP_KERNEL)) +#else if (sg_alloc_table(st, 1, GFP_KERNEL)) +#endif goto err_st; sg = st->sgl; @@ -151,7 +240,7 @@ err_st: err_pci: #ifdef __NetBSD__ if (vaddr) { - bus_dmamem_unmap(dmat, vaddr, + bus_dmamem_kunmap(dmat, vaddr, roundup_pow_of_two(obj->base.size)); } obj->mm.u.phys.kva = NULL; @@ -225,7 +314,7 @@ i915_gem_object_put_pages_phys(struct dr kfree(pages); #ifdef __NetBSD__ - bus_dmamem_unmap(dmat, obj->mm.u.phys.kva, + bus_dmamem_kunmap(dmat, obj->mm.u.phys.kva, roundup_pow_of_two(obj->base.size)); obj->mm.u.phys.kva = NULL; bus_dmamem_free(dmat, &obj->mm.u.phys.seg, 1); Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c:1.4 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c:1.4.4.1 --- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c:1.4 Sun Dec 19 12:10:42 2021 +++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_region.c Sat Feb 3 11:15:12 2024 @@ -1,4 +1,4 @@ -/* $NetBSD: i915_gem_region.c,v 1.4 2021/12/19 12:10:42 riastradh Exp $ */ +/* $NetBSD: i915_gem_region.c,v 1.4.4.1 2024/02/03 11:15:12 martin Exp $ */ // SPDX-License-Identifier: MIT /* @@ -6,7 +6,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: i915_gem_region.c,v 1.4 2021/12/19 12:10:42 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: i915_gem_region.c,v 1.4.4.1 2024/02/03 11:15:12 martin Exp $"); #include "intel_memory_region.h" #include "i915_gem_region.h" @@ -45,10 +45,12 @@ i915_gem_object_get_pages_buddy(struct d if (!st) return -ENOMEM; +#ifndef __NetBSD__ if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) { kfree(st); return -ENOMEM; } +#endif flags = I915_ALLOC_MIN_PAGE_SIZE; if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) @@ -60,15 +62,15 @@ i915_gem_object_get_pages_buddy(struct d GEM_BUG_ON(list_empty(blocks)); - sg = st->sgl; #ifdef __NetBSD__ __USE(prev_end); - __USE(sg_page_sizes); bus_dma_tag_t dmat = obj->base.dev->dmat; bus_dma_segment_t *segs = NULL; int i = 0, nsegs = 0; bool loaded = false; + sg = NULL; + list_for_each_entry(block, blocks, link) { if (nsegs >= INT_MAX || nsegs >= SIZE_MAX/sizeof(segs[0])) @@ -85,7 +87,15 @@ i915_gem_object_get_pages_buddy(struct d segs[i].ds_addr = mem->region.start + offset; segs[i].ds_len = block_size; + i++; } + KASSERT(i == nsegs); + + ret = sg_alloc_table_from_bus_dmamem(st, dmat, segs, nsegs, + GFP_KERNEL); + if (ret) + goto err; + sg = st->sgl; /* XXX errno NetBSD->Linux */ ret = -bus_dmamap_create(dmat, size, nsegs, size, 0, BUS_DMA_WAITOK, @@ -106,8 +116,9 @@ i915_gem_object_get_pages_buddy(struct d kmem_free(segs, nsegs * sizeof(segs[0])); segs = NULL; - __i915_gem_object_set_pages(obj, st, i915_sg_page_sizes(sg)); + sg_page_sizes = i915_sg_page_sizes(sg); #else + sg = st->sgl; st->nents = 0; sg_page_sizes = 0; prev_end = (resource_size_t)-1; @@ -145,9 +156,9 @@ i915_gem_object_get_pages_buddy(struct d sg_page_sizes |= sg->length; sg_mark_end(sg); i915_sg_trim(st); +#endif __i915_gem_object_set_pages(obj, st, sg_page_sizes); -#endif return 0; @@ -155,6 +166,8 @@ i915_gem_object_get_pages_buddy(struct d err: if (loaded) bus_dmamap_unload(dmat, st->sgl->sg_dmamap); + if (sg && sg->sg_dmamap) + bus_dmamap_destroy(dmat, sg->sg_dmamap); if (segs) kmem_free(segs, nsegs * sizeof(segs[0])); __intel_memory_region_put_pages_buddy(mem, blocks); Index: src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c diff -u src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c:1.5 src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c:1.5.4.1 --- src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c:1.5 Sun Dec 19 12:10:42 2021 +++ src/sys/external/bsd/drm2/dist/drm/i915/gem/i915_gem_stolen.c Sat Feb 3 11:15:12 2024 @@ -1,4 +1,4 @@ -/* $NetBSD: i915_gem_stolen.c,v 1.5 2021/12/19 12:10:42 riastradh Exp $ */ +/* $NetBSD: i915_gem_stolen.c,v 1.5.4.1 2024/02/03 11:15:12 martin Exp $ */ /* * SPDX-License-Identifier: MIT @@ -7,7 +7,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: i915_gem_stolen.c,v 1.5 2021/12/19 12:10:42 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: i915_gem_stolen.c,v 1.5.4.1 2024/02/03 11:15:12 martin Exp $"); #include <linux/errno.h> #include <linux/mutex.h> @@ -506,14 +506,13 @@ i915_pages_create_for_stolen(struct drm_ { struct drm_i915_private *i915 = to_i915(dev); struct sg_table *st; + struct scatterlist *sg; #ifdef __NetBSD__ bus_dma_tag_t dmat = i915->drm.dmat; bus_dma_segment_t *seg = NULL; int nseg = 0, i; bool loaded = false; int ret; -#else - struct scatterlist *sg; #endif GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm))); @@ -527,11 +526,6 @@ i915_pages_create_for_stolen(struct drm_ if (st == NULL) return ERR_PTR(-ENOMEM); - if (sg_alloc_table(st, 1, GFP_KERNEL)) { - kfree(st); - return ERR_PTR(-ENOMEM); - } - #ifdef __NetBSD__ KASSERT((size % PAGE_SIZE) == 0); nseg = size / PAGE_SIZE; @@ -548,6 +542,17 @@ i915_pages_create_for_stolen(struct drm_ seg[i].ds_len = PAGE_SIZE; } + sg = NULL; + + ret = sg_alloc_table_from_bus_dmamem(st, dmat, seg, nseg, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to alloc sg table for stolen object: %d\n", + ret); + ret = -ENOMEM; + goto out; + } + sg = st->sgl; + /* XXX errno NetBSD->Linux */ ret = -bus_dmamap_create(dmat, size, nseg, PAGE_SIZE, 0, BUS_DMA_WAITOK, &st->sgl->sg_dmamap); @@ -569,14 +574,23 @@ i915_pages_create_for_stolen(struct drm_ } loaded = true; -out: if (ret) { +out: kmem_free(seg, nseg * sizeof(seg[0])); + if (ret) { if (loaded) bus_dmamap_unload(dmat, st->sgl->sg_dmamap); - sg_free_table(st); + if (sg && sg->sg_dmamap) + bus_dmamap_destroy(dmat, sg->sg_dmamap); + if (sg) + sg_free_table(st); kfree(st); return ERR_PTR(ret); } #else + if (sg_alloc_table(st, 1, GFP_KERNEL)) { + kfree(st); + return ERR_PTR(-ENOMEM); + } + sg = st->sgl; sg->offset = 0; sg->length = size;