On Sun, Feb 12, 2023 at 02:25:47PM -0500, Thomas Frohwein wrote: > On Tue, Feb 07, 2023 at 08:51:40PM +1100, Jonathan Gray wrote: > > [...] > > > ... > > > i915_resize_lmem_bar: stub > > > panic: kernel diagnostic assertion "pdev->pci->sc_bridgetag == NULL" > > > failed: file "/usr/src/sys/dev/pci/drm/drm_linux.c", line 1277 > > > ... > > > > The vga arbiter bits need to change. There isn't an easy way to get > > a softc to avoid having state shared by multiple inteldrm instances. > > > > perhaps they can be skipped for the moment? > > Thanks, this leads to a uvm_fault now: > > xehp_load_dss_mask: stub > xehp_load_dss_mask: stub > intel_slicemask_from_xehp_dssmask: stub > intel_slicemask_from_xehp_dssmask: stub > i915_resize_lmem_bar: stub > uvm_fault(0xffffffff825181f8, 0x10, 0, 1) -> e > > screen photo at https://thfr.info/tmp/DG2-error-20230212.jpg
unfortunately, that isn't much help If you boot -d and do: w db_panic 0 c you should be able to get a backtrace. > > > > > Index: sys/dev/pci/drm/drm_linux.c > > =================================================================== > > RCS file: /cvs/src/sys/dev/pci/drm/drm_linux.c,v > > retrieving revision 1.95 > > diff -u -p -r1.95 drm_linux.c > > --- sys/dev/pci/drm/drm_linux.c 1 Jan 2023 01:34:34 -0000 1.95 > > +++ sys/dev/pci/drm/drm_linux.c 7 Feb 2023 09:31:55 -0000 > > @@ -1274,7 +1274,8 @@ vga_disable_bridge(struct pci_attach_arg > > void > > vga_get_uninterruptible(struct pci_dev *pdev, int rsrc) > > { > > - KASSERT(pdev->pci->sc_bridgetag == NULL); > > + if (pdev->pci->sc_bridgetag != NULL) > > + return; > > pci_enumerate_bus(pdev->pci, vga_disable_bridge, NULL); > > } > > > > @@ -1282,6 +1283,9 @@ void > > vga_put(struct pci_dev *pdev, int rsrc) > > { > > pcireg_t bc; > > + > > + if (pdev->pci->sc_bridgetag != NULL) > > + return; > > > > if (!vga_bridge_disabled) > > return; > > Index: sys/dev/pci/drm/i915/i915_pci.c > > =================================================================== > > RCS file: /cvs/src/sys/dev/pci/drm/i915/i915_pci.c,v > > retrieving revision 1.15 > > diff -u -p -r1.15 i915_pci.c > > --- sys/dev/pci/drm/i915/i915_pci.c 25 Jan 2023 01:51:59 -0000 1.15 > > +++ sys/dev/pci/drm/i915/i915_pci.c 3 Feb 2023 01:43:02 -0000 > > @@ -1078,7 +1078,6 @@ static const struct intel_device_info dg > > XE_LPD_FEATURES, > > .__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | > > BIT(TRANSCODER_C) | BIT(TRANSCODER_D), > > - .require_force_probe = 1, > > }; > > > > static const struct intel_device_info ats_m_info = { > > Index: sys/dev/pci/drm/i915/intel_memory_region.h > > =================================================================== > > RCS file: /cvs/src/sys/dev/pci/drm/i915/intel_memory_region.h,v > > retrieving revision 1.3 > > diff -u -p -r1.3 intel_memory_region.h > > --- sys/dev/pci/drm/i915/intel_memory_region.h 1 Jan 2023 01:34:55 > > -0000 1.3 > > +++ sys/dev/pci/drm/i915/intel_memory_region.h 4 Feb 2023 00:59:23 > > -0000 > > @@ -70,8 +70,13 @@ struct intel_memory_region { > > > > const struct intel_memory_region_ops *ops; > > > > -#ifdef notyet > > +#ifdef __linux__ > > struct io_mapping iomap; > > +#else > > + struct vm_page *pgs; > > + struct agp_map *agph; > > + bus_space_handle_t bsh; > > + bus_space_tag_t bst; > > #endif > > struct resource region; > > > > Index: sys/dev/pci/drm/i915/gem/i915_gem_lmem.c > > =================================================================== > > RCS file: /cvs/src/sys/dev/pci/drm/i915/gem/i915_gem_lmem.c,v > > retrieving revision 1.4 > > diff -u -p -r1.4 i915_gem_lmem.c > > --- sys/dev/pci/drm/i915/gem/i915_gem_lmem.c 1 Jan 2023 01:34:56 > > -0000 1.4 > > +++ sys/dev/pci/drm/i915/gem/i915_gem_lmem.c 4 Feb 2023 00:58:16 > > -0000 > > @@ -15,9 +15,6 @@ i915_gem_object_lmem_io_map(struct drm_i > > unsigned long n, > > unsigned long size) > > { > > - STUB(); > > - return NULL; > > -#ifdef notyet > > resource_size_t offset; > > > > GEM_BUG_ON(!i915_gem_object_is_contiguous(obj)); > > @@ -25,7 +22,11 @@ i915_gem_object_lmem_io_map(struct drm_i > > offset = i915_gem_object_get_dma_address(obj, n); > > offset -= obj->mm.region->region.start; > > > > +#ifdef __linux__ > > return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); > > +#else > > + agp_map_atomic(obj->mm.region->agph, offset, &obj->mm.region->bsh); > > + return bus_space_vaddr(obj->mm.region->bst, obj->mm.region->bsh); > > #endif > > } > > > > Index: sys/dev/pci/drm/i915/gem/i915_gem_stolen.c > > =================================================================== > > RCS file: /cvs/src/sys/dev/pci/drm/i915/gem/i915_gem_stolen.c,v > > retrieving revision 1.5 > > diff -u -p -r1.5 i915_gem_stolen.c > > --- sys/dev/pci/drm/i915/gem/i915_gem_stolen.c 4 Feb 2023 00:07:11 > > -0000 1.5 > > +++ sys/dev/pci/drm/i915/gem/i915_gem_stolen.c 4 Feb 2023 01:23:34 > > -0000 > > @@ -774,18 +774,44 @@ static int init_stolen_lmem(struct intel > > if (err) > > return err; > > > > - STUB(); > > - return -ENOSYS; > > -#ifdef notyet > > +#ifdef __linux__ > > if (mem->io_size && !io_mapping_init_wc(&mem->iomap, > > mem->io_start, > > mem->io_size)) { > > err = -EIO; > > goto err_cleanup; > > } > > +#else > > + if (mem->io_size) { > > + int i; > > + > > + uvm_page_physload(atop(mem->io_start), > > + atop(mem->io_start + mem->io_size), > > + atop(mem->io_start), > > + atop(mem->io_start + mem->io_size), > > + PHYSLOAD_DEVICE); > > + /* array of vm pages that physload introduced. */ > > + mem->pgs = PHYS_TO_VM_PAGE(mem->io_start); > > + KASSERT(mem->pgs != NULL); > > + /* > > + * XXX mark all pages write combining so user mmaps get the > > + * right bits. We really need a proper MI api for doing this, > > + * but for now this allows us to use PAT where available. > > + */ > > + for (i = 0; i < atop(mem->io_size); i++) > > + atomic_setbits_int(&(mem->pgs[i].pg_flags), > > + PG_PMAP_WC); > > + if (agp_init_map(mem->bst, mem->io_start, > > + mem->io_size, > > + BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, > > + &mem->agph)) > > + panic("can't map lmem"); > > + } > > +#endif > > > > return 0; > > > > +#ifdef __linux__ > > err_cleanup: > > i915_gem_cleanup_stolen(mem->i915); > > return err; > > Index: sys/dev/pci/drm/i915/gt/intel_region_lmem.c > > =================================================================== > > RCS file: /cvs/src/sys/dev/pci/drm/i915/gt/intel_region_lmem.c,v > > retrieving revision 1.2 > > diff -u -p -r1.2 intel_region_lmem.c > > --- sys/dev/pci/drm/i915/gt/intel_region_lmem.c 1 Jan 2023 01:34:57 > > -0000 1.2 > > +++ sys/dev/pci/drm/i915/gt/intel_region_lmem.c 4 Feb 2023 01:31:29 > > -0000 > > @@ -142,15 +142,37 @@ region_lmem_release(struct intel_memory_ > > static int > > region_lmem_init(struct intel_memory_region *mem) > > { > > - STUB(); > > - return -ENOSYS; > > -#ifdef notyet > > int ret; > > > > +#ifdef __linux__ > > if (!io_mapping_init_wc(&mem->iomap, > > mem->io_start, > > mem->io_size)) > > return -EIO; > > +#else > > + int i; > > + uvm_page_physload(atop(mem->io_start), > > + atop(mem->io_start + mem->io_size), > > + atop(mem->io_start), > > + atop(mem->io_start + mem->io_size), > > + PHYSLOAD_DEVICE); > > + /* array of vm pages that physload introduced. */ > > + mem->pgs = PHYS_TO_VM_PAGE(mem->io_start); > > + KASSERT(mem->pgs != NULL); > > + /* > > + * XXX mark all pages write combining so user mmaps get the > > + * right bits. We really need a proper MI api for doing this, > > + * but for now this allows us to use PAT where available. > > + */ > > + for (i = 0; i < atop(mem->io_size); i++) > > + atomic_setbits_int(&(mem->pgs[i].pg_flags), > > + PG_PMAP_WC); > > + if (agp_init_map(mem->i915->bst, mem->io_start, > > + mem->io_size, > > + BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, > > + &mem->agph)) > > + panic("can't init lmem"); > > +#endif > > > > ret = intel_region_ttm_init(mem); > > if (ret) > > @@ -159,10 +181,13 @@ region_lmem_init(struct intel_memory_reg > > return 0; > > > > out_no_buddy: > > +#ifdef __linux__ > > io_mapping_fini(&mem->iomap); > > +#else > > + agp_destroy_map(mem->agph); > > +#endif > > > > return ret; > > -#endif > > } > > > > static const struct intel_memory_region_ops intel_region_lmem_ops = { > >