Module Name: src Committed By: riastradh Date: Sat Dec 18 23:54:51 UTC 2021
Added Files: src/sys/external/bsd/drm2/dist/drm/i915: i915_dma.c src/sys/external/bsd/drm2/dist/include/drm: drmP.h drm_gem_cma_helper.h Log Message: drm: Temporarily restore some local files deleted upstream. These had local changes which needed to be distributed into other files upstream, but that didn't happen in the merge commit. It will happen, and the files will be deleted, in a later commit. To generate a diff of this commit: cvs rdiff -u -r0 -r1.33 src/sys/external/bsd/drm2/dist/drm/i915/i915_dma.c cvs rdiff -u -r0 -r1.43 src/sys/external/bsd/drm2/dist/include/drm/drmP.h cvs rdiff -u -r0 -r1.8 \ src/sys/external/bsd/drm2/dist/include/drm/drm_gem_cma_helper.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Added files: Index: src/sys/external/bsd/drm2/dist/drm/i915/i915_dma.c diff -u /dev/null src/sys/external/bsd/drm2/dist/drm/i915/i915_dma.c:1.33 --- /dev/null Sat Dec 18 23:54:51 2021 +++ src/sys/external/bsd/drm2/dist/drm/i915/i915_dma.c Sat Dec 18 23:54:51 2021 @@ -0,0 +1,1423 @@ +/* $NetBSD: i915_dma.c,v 1.33 2021/12/18 23:54:51 riastradh Exp $ */ + +/* i915_dma.c -- DMA support for the I915 -*- linux-c -*- + */ +/* + * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <sys/cdefs.h> +__KERNEL_RCSID(0, "$NetBSD: i915_dma.c,v 1.33 2021/12/18 23:54:51 riastradh Exp $"); + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/async.h> +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_legacy.h> +#include "intel_drv.h" +#include <drm/i915_drm.h> +#include "i915_drv.h" +#include "i915_vgpu.h" +#include "i915_trace.h" +#include <linux/pci.h> +#include <linux/console.h> +#include <linux/vt.h> +#include <linux/vgaarb.h> +#include <linux/acpi.h> +#include <linux/pnp.h> +#include <linux/vga_switcheroo.h> +#include <linux/slab.h> +#include <acpi/video.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/oom.h> + +#include <linux/nbsd-namespace.h> + +static int i915_getparam(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + drm_i915_getparam_t *param = data; + int value; + + switch (param->param) { + case I915_PARAM_IRQ_ACTIVE: + case I915_PARAM_ALLOW_BATCHBUFFER: + case I915_PARAM_LAST_DISPATCH: + /* Reject all old ums/dri params. */ + return -ENODEV; + case I915_PARAM_CHIPSET_ID: + value = dev->pdev->device; + break; + case I915_PARAM_REVISION: + value = dev->pdev->revision; + break; + case I915_PARAM_HAS_GEM: + value = 1; + break; + case I915_PARAM_NUM_FENCES_AVAIL: + value = dev_priv->num_fence_regs; + break; + case I915_PARAM_HAS_OVERLAY: + value = dev_priv->overlay ? 1 : 0; + break; + case I915_PARAM_HAS_PAGEFLIPPING: + value = 1; + break; + case I915_PARAM_HAS_EXECBUF2: + /* depends on GEM */ + value = 1; + break; + case I915_PARAM_HAS_BSD: + value = intel_ring_initialized(&dev_priv->ring[VCS]); + break; + case I915_PARAM_HAS_BLT: + value = intel_ring_initialized(&dev_priv->ring[BCS]); + break; + case I915_PARAM_HAS_VEBOX: + value = intel_ring_initialized(&dev_priv->ring[VECS]); + break; + case I915_PARAM_HAS_BSD2: + value = intel_ring_initialized(&dev_priv->ring[VCS2]); + break; + case I915_PARAM_HAS_RELAXED_FENCING: + value = 1; + break; + case I915_PARAM_HAS_COHERENT_RINGS: + value = 1; + break; + case I915_PARAM_HAS_EXEC_CONSTANTS: + value = INTEL_INFO(dev)->gen >= 4; + break; + case I915_PARAM_HAS_RELAXED_DELTA: + value = 1; + break; + case I915_PARAM_HAS_GEN7_SOL_RESET: + value = 1; + break; + case I915_PARAM_HAS_LLC: + value = HAS_LLC(dev); + break; + case I915_PARAM_HAS_WT: + value = HAS_WT(dev); + break; + case I915_PARAM_HAS_ALIASING_PPGTT: + value = USES_PPGTT(dev); + break; + case I915_PARAM_HAS_WAIT_TIMEOUT: + value = 1; + break; + case I915_PARAM_HAS_SEMAPHORES: + value = i915_semaphore_is_enabled(dev); + break; + case I915_PARAM_HAS_PRIME_VMAP_FLUSH: + value = 1; + break; + case I915_PARAM_HAS_SECURE_BATCHES: + value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN); + break; + case I915_PARAM_HAS_PINNED_BATCHES: + value = 1; + break; + case I915_PARAM_HAS_EXEC_NO_RELOC: + value = 1; + break; + case I915_PARAM_HAS_EXEC_HANDLE_LUT: + value = 1; + break; + case I915_PARAM_CMD_PARSER_VERSION: + value = i915_cmd_parser_get_version(dev_priv); + break; + case I915_PARAM_HAS_COHERENT_PHYS_GTT: + value = 1; + break; + case I915_PARAM_MMAP_VERSION: +#ifdef __NetBSD__ + dev_priv->quirks |= QUIRK_NETBSD_VERSION_CALLED; +#endif + value = 1; + break; + case I915_PARAM_SUBSLICE_TOTAL: + value = INTEL_INFO(dev)->subslice_total; + if (!value) + return -ENODEV; + break; + case I915_PARAM_EU_TOTAL: + value = INTEL_INFO(dev)->eu_total; + if (!value) + return -ENODEV; + break; + case I915_PARAM_HAS_GPU_RESET: + value = i915.enable_hangcheck && + intel_has_gpu_reset(dev); + break; + case I915_PARAM_HAS_RESOURCE_STREAMER: + value = HAS_RESOURCE_STREAMER(dev); + break; + default: + DRM_DEBUG("Unknown parameter %d\n", param->param); + return -EINVAL; + } + + if (copy_to_user(param->value, &value, sizeof(int))) { + DRM_ERROR("copy_to_user failed\n"); + return -EFAULT; + } + + return 0; +} + +static int i915_get_bridge_dev(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); + if (!dev_priv->bridge_dev) { + DRM_ERROR("bridge device not found\n"); + return -1; + } + return 0; +} + +#define MCHBAR_I915 0x44 +#define MCHBAR_I965 0x48 +#define MCHBAR_SIZE (4*4096) + +#define DEVEN_REG 0x54 +#define DEVEN_MCHBAR_EN (1 << 28) + +/* Allocate space for the MCH regs if needed, return nonzero on error */ +static int +intel_alloc_mchbar_resource(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; +#ifdef CONFIG_PNP + u32 temp_lo, temp_hi = 0; + u64 mchbar_addr; +#endif + int ret; + +#ifdef CONFIG_PNP + if (INTEL_INFO(dev)->gen >= 4) + pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); + pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); + mchbar_addr = ((u64)temp_hi << 32) | temp_lo; + + /* If ACPI doesn't have it, assume we need to allocate it ourselves */ + if (mchbar_addr && + pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) + return 0; +#endif + + /* Get some space for it */ + dev_priv->mch_res.name = "i915 MCHBAR"; + dev_priv->mch_res.flags = IORESOURCE_MEM; + ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, + &dev_priv->mch_res, + MCHBAR_SIZE, MCHBAR_SIZE, + PCIBIOS_MIN_MEM, + 0, pcibios_align_resource, + dev_priv->bridge_dev); + if (ret) { + DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); + dev_priv->mch_res.start = 0; + return ret; + } + + if (INTEL_INFO(dev)->gen >= 4) + pci_write_config_dword(dev_priv->bridge_dev, reg + 4, + upper_32_bits(dev_priv->mch_res.start)); + + pci_write_config_dword(dev_priv->bridge_dev, reg, + lower_32_bits(dev_priv->mch_res.start)); + return 0; +} + +/* Setup MCHBAR if possible, return true if we should disable it again */ +static void +intel_setup_mchbar(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + bool enabled; + + if (IS_VALLEYVIEW(dev)) + return; + + dev_priv->mchbar_need_disable = false; + + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); + enabled = !!(temp & DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + enabled = temp & 1; + } + + /* If it's already enabled, don't have to do anything */ + if (enabled) + return; + + if (intel_alloc_mchbar_resource(dev)) + return; + + dev_priv->mchbar_need_disable = true; + + /* Space is allocated or reserved, so enable it. */ + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, + temp | DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); + } +} + +static void +intel_teardown_mchbar(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + + if (dev_priv->mchbar_need_disable) { + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); + temp &= ~DEVEN_MCHBAR_EN; + pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + temp &= ~1; + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); + } + } + + if (dev_priv->mch_res.start) + release_resource(&dev_priv->mch_res); +} + +#ifndef __NetBSD__ /* XXX vga */ +/* true = enable decode, false = disable decoder */ +static unsigned int i915_vga_set_decode(void *cookie, bool state) +{ + struct drm_device *dev = cookie; + + intel_modeset_vga_set_state(dev, state); + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + +static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + + if (state == VGA_SWITCHEROO_ON) { + pr_info("switched on\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + /* i915 resume handler doesn't set to D0 */ + pci_set_power_state(dev->pdev, PCI_D0); + i915_resume_switcheroo(dev); + dev->switch_power_state = DRM_SWITCH_POWER_ON; + } else { + pr_err("switched off\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + i915_suspend_switcheroo(dev, pmm); + dev->switch_power_state = DRM_SWITCH_POWER_OFF; + } +} + +static bool i915_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + /* + * FIXME: open_count is protected by drm_global_mutex but that would lead to + * locking inversion with the driver load path. And the access here is + * completely racy anyway. So don't bother with locking for now. + */ + return dev->open_count == 0; +} + +static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { + .set_gpu_state = i915_switcheroo_set_state, + .reprobe = NULL, + .can_switch = i915_switcheroo_can_switch, +}; +#endif + +static int i915_load_modeset_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + ret = intel_parse_bios(dev); + if (ret) + DRM_INFO("failed to find VBIOS tables\n"); + +#ifndef __NetBSD__ /* XXX vga */ + /* If we have > 1 VGA cards, then we need to arbitrate access + * to the common VGA resources. + * + * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), + * then we do not take part in VGA arbitration and the + * vga_client_register() fails with -ENODEV. + */ + ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); + if (ret && ret != -ENODEV) + goto out; +#endif + +#ifdef __NetBSD__ + intel_register_dsm_handler(dev); +#else + intel_register_dsm_handler(); +#endif + +#ifndef __NetBSD__ /* XXX vga */ + ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); + if (ret) + goto cleanup_vga_client; +#endif + + /* Initialise stolen first so that we may reserve preallocated + * objects for the BIOS to KMS transition. + */ + ret = i915_gem_init_stolen(dev); + if (ret) + goto cleanup_vga_switcheroo; + + intel_power_domains_init_hw(dev_priv); + + ret = intel_irq_install(dev_priv); + if (ret) + goto cleanup_gem_stolen; + + intel_setup_gmbus(dev); + + /* Important: The output setup functions called by modeset_init need + * working irqs for e.g. gmbus and dp aux transfers. */ + intel_modeset_init(dev); + + intel_guc_ucode_init(dev); + + ret = i915_gem_init(dev); + if (ret) + goto cleanup_irq; + + intel_modeset_gem_init(dev); + + /* Always safe in the mode setting case. */ + /* FIXME: do pre/post-mode set stuff in core KMS code */ + dev->vblank_disable_allowed = true; + if (INTEL_INFO(dev)->num_pipes == 0) + return 0; + + ret = intel_fbdev_init(dev); + if (ret) + goto cleanup_gem; + + /* Only enable hotplug handling once the fbdev is fully set up. */ + intel_hpd_init(dev_priv); + + /* + * Some ports require correctly set-up hpd registers for detection to + * work properly (leading to ghost connected connector status), e.g. VGA + * on gm45. Hence we can only set up the initial fbdev config after hpd + * irqs are fully enabled. Now we should scan for the initial config + * only once hotplug handling is enabled, but due to screwed-up locking + * around kms/fbdev init we can't protect the fdbev initial config + * scanning against hotplug events. Hence do this first and ignore the + * tiny window where we will loose hotplug notifactions. + */ + async_schedule(intel_fbdev_initial_config, dev_priv); + + drm_kms_helper_poll_init(dev); + + return 0; + +cleanup_gem: + mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); + i915_gem_context_fini(dev); + mutex_unlock(&dev->struct_mutex); +cleanup_irq: + intel_guc_ucode_fini(dev); + drm_irq_uninstall(dev); + intel_teardown_gmbus(dev); +cleanup_gem_stolen: + intel_modeset_cleanup(dev); + i915_gem_cleanup_stolen(dev); +cleanup_vga_switcheroo: +#ifndef __NetBSD__ /* XXX vga */ + vga_switcheroo_unregister_client(dev->pdev); +cleanup_vga_client: + vga_client_register(dev->pdev, NULL, NULL, NULL); +out: +#endif + return ret; +} + +#if IS_ENABLED(CONFIG_FB) +static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ + struct apertures_struct *ap; + struct pci_dev *pdev = dev_priv->dev->pdev; + bool primary; + int ret; + + ap = alloc_apertures(1); + if (!ap) + return -ENOMEM; + + ap->ranges[0].base = dev_priv->gtt.mappable_base; + ap->ranges[0].size = dev_priv->gtt.mappable_end; + + primary = + pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; + + ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + + kfree(ap); + + return ret; +} +#else +static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ + return 0; +} +#endif + +#if !defined(CONFIG_VGA_CONSOLE) +static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) +{ + return 0; +} +#elif !defined(CONFIG_DUMMY_CONSOLE) +static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) +{ + return -ENODEV; +} +#else +static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) +{ + int ret = 0; + + DRM_INFO("Replacing VGA console driver\n"); + + console_lock(); + if (con_is_bound(&vga_con)) + ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); + if (ret == 0) { + ret = do_unregister_con_driver(&vga_con); + + /* Ignore "already unregistered". */ + if (ret == -ENODEV) + ret = 0; + } + console_unlock(); + + return ret; +} +#endif + +static void i915_dump_device_info(struct drm_i915_private *dev_priv) +{ + const struct intel_device_info *info = &dev_priv->info; + +#define PRINT_S(name) "%s" +#define SEP_EMPTY +#define PRINT_FLAG(name) info->name ? #name "," : "" +#define SEP_COMMA , + DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags=" + DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY), + info->gen, + dev_priv->dev->pdev->device, + dev_priv->dev->pdev->revision, + DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA)); +#undef PRINT_S +#undef SEP_EMPTY +#undef PRINT_FLAG +#undef SEP_COMMA +} + +static void cherryview_sseu_info_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_device_info *info; + u32 fuse, eu_dis; + + info = (struct intel_device_info *)&dev_priv->info; + fuse = I915_READ(CHV_FUSE_GT); + + info->slice_total = 1; + + if (!(fuse & CHV_FGT_DISABLE_SS0)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | + CHV_FGT_EU_DIS_SS0_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + if (!(fuse & CHV_FGT_DISABLE_SS1)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | + CHV_FGT_EU_DIS_SS1_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + info->subslice_total = info->subslice_per_slice; + /* + * CHV expected to always have a uniform distribution of EU + * across subslices. + */ + info->eu_per_subslice = info->subslice_total ? + info->eu_total / info->subslice_total : + 0; + /* + * CHV supports subslice power gating on devices with more than + * one subslice, and supports EU power gating on devices with + * more than one EU pair per subslice. + */ + info->has_slice_pg = 0; + info->has_subslice_pg = (info->subslice_total > 1); + info->has_eu_pg = (info->eu_per_subslice > 2); +} + +static void gen9_sseu_info_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_device_info *info; + int s_max = 3, ss_max = 4, eu_max = 8; + int s, ss; + u32 fuse2, s_enable, ss_disable, eu_disable; + u8 eu_mask = 0xff; + + info = (struct intel_device_info *)&dev_priv->info; + fuse2 = I915_READ(GEN8_FUSE2); + s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> + GEN8_F2_S_ENA_SHIFT; + ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> + GEN9_F2_SS_DIS_SHIFT; + + info->slice_total = hweight32(s_enable); + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + info->subslice_per_slice = ss_max - hweight32(ss_disable); + info->subslice_total = info->slice_total * + info->subslice_per_slice; + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < s_max; s++) { + if (!(s_enable & (0x1 << s))) + /* skip disabled slice */ + continue; + + eu_disable = I915_READ(GEN9_EU_DISABLE(s)); + for (ss = 0; ss < ss_max; ss++) { + int eu_per_ss; + + if (ss_disable & (0x1 << ss)) + /* skip disabled subslice */ + continue; + + eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) & + eu_mask); + + /* + * Record which subslice(s) has(have) 7 EUs. we + * can tune the hash used to spread work among + * subslices if they are unbalanced. + */ + if (eu_per_ss == 7) + info->subslice_7eu[s] |= 1 << ss; + + info->eu_total += eu_per_ss; + } + } + + /* + * SKL is expected to always have a uniform distribution + * of EU across subslices with the exception that any one + * EU in any one subslice may be fused off for die + * recovery. BXT is expected to be perfectly uniform in EU + * distribution. + */ + info->eu_per_subslice = info->subslice_total ? + DIV_ROUND_UP(info->eu_total, + info->subslice_total) : 0; + /* + * SKL supports slice power gating on devices with more than + * one slice, and supports EU power gating on devices with + * more than one EU pair per subslice. BXT supports subslice + * power gating on devices with more than one subslice, and + * supports EU power gating on devices with more than one EU + * pair per subslice. + */ + info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && + (info->slice_total > 1)); + info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1)); + info->has_eu_pg = (info->eu_per_subslice > 2); +} + +static void broadwell_sseu_info_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_device_info *info; + const int s_max = 3, ss_max = 3, eu_max = 8; + int s, ss; + u32 fuse2, eu_disable[3], s_enable, ss_disable; + + fuse2 = I915_READ(GEN8_FUSE2); + s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; + ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT; + + eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; + eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | + ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << + (32 - GEN8_EU_DIS0_S1_SHIFT)); + eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | + ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << + (32 - GEN8_EU_DIS1_S2_SHIFT)); + + + info = (struct intel_device_info *)&dev_priv->info; + info->slice_total = hweight32(s_enable); + + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + info->subslice_per_slice = ss_max - hweight32(ss_disable); + info->subslice_total = info->slice_total * info->subslice_per_slice; + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < s_max; s++) { + if (!(s_enable & (0x1 << s))) + /* skip disabled slice */ + continue; + + for (ss = 0; ss < ss_max; ss++) { + u32 n_disabled; + + if (ss_disable & (0x1 << ss)) + /* skip disabled subslice */ + continue; + + n_disabled = hweight8(eu_disable[s] >> (ss * eu_max)); + + /* + * Record which subslices have 7 EUs. + */ + if (eu_max - n_disabled == 7) + info->subslice_7eu[s] |= 1 << ss; + + info->eu_total += eu_max - n_disabled; + } + } + + /* + * BDW is expected to always have a uniform distribution of EU across + * subslices with the exception that any one EU in any one subslice may + * be fused off for die recovery. + */ + info->eu_per_subslice = info->subslice_total ? + DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0; + + /* + * BDW supports slice power gating on devices with more than + * one slice. + */ + info->has_slice_pg = (info->slice_total > 1); + info->has_subslice_pg = 0; + info->has_eu_pg = 0; +} + +/* + * Determine various intel_device_info fields at runtime. + * + * Use it when either: + * - it's judged too laborious to fill n static structures with the limit + * when a simple if statement does the job, + * - run-time checks (eg read fuse/strap registers) are needed. + * + * This function needs to be called: + * - after the MMIO has been setup as we are reading registers, + * - after the PCH has been detected, + * - before the first usage of the fields it can tweak. + */ +static void intel_device_info_runtime_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_device_info *info; + enum pipe pipe; + + info = (struct intel_device_info *)&dev_priv->info; + + /* + * Skylake and Broxton currently don't expose the topmost plane as its + * use is exclusive with the legacy cursor and we only want to expose + * one of those, not both. Until we can safely expose the topmost plane + * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, + * we don't expose the topmost plane at all to prevent ABI breakage + * down the line. + */ + if (IS_BROXTON(dev)) { + info->num_sprites[PIPE_A] = 2; + info->num_sprites[PIPE_B] = 2; + info->num_sprites[PIPE_C] = 1; + } else if (IS_VALLEYVIEW(dev)) + for_each_pipe(dev_priv, pipe) + info->num_sprites[pipe] = 2; + else + for_each_pipe(dev_priv, pipe) + info->num_sprites[pipe] = 1; + + if (i915.disable_display) { + DRM_INFO("Display disabled (module parameter)\n"); + info->num_pipes = 0; + } else if (info->num_pipes > 0 && + (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) && + !IS_VALLEYVIEW(dev)) { + u32 fuse_strap = I915_READ(FUSE_STRAP); + u32 sfuse_strap = I915_READ(SFUSE_STRAP); + + /* + * SFUSE_STRAP is supposed to have a bit signalling the display + * is fused off. Unfortunately it seems that, at least in + * certain cases, fused off display means that PCH display + * reads don't land anywhere. In that case, we read 0s. + * + * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK + * should be set when taking over after the firmware. + */ + if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE || + sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED || + (dev_priv->pch_type == PCH_CPT && + !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { + DRM_INFO("Display fused off, disabling\n"); + info->num_pipes = 0; + } + } + + /* Initialize slice/subslice/EU info */ + if (IS_CHERRYVIEW(dev)) + cherryview_sseu_info_init(dev); + else if (IS_BROADWELL(dev)) + broadwell_sseu_info_init(dev); + else if (INTEL_INFO(dev)->gen >= 9) + gen9_sseu_info_init(dev); + + DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); + DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); + DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); + DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); + DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); + DRM_DEBUG_DRIVER("has slice power gating: %s\n", + info->has_slice_pg ? "y" : "n"); + DRM_DEBUG_DRIVER("has subslice power gating: %s\n", + info->has_subslice_pg ? "y" : "n"); + DRM_DEBUG_DRIVER("has EU power gating: %s\n", + info->has_eu_pg ? "y" : "n"); +} + +static void intel_init_dpio(struct drm_i915_private *dev_priv) +{ + if (!IS_VALLEYVIEW(dev_priv)) + return; + + /* + * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), + * CHV x1 PHY (DP/HDMI D) + * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) + */ + if (IS_CHERRYVIEW(dev_priv)) { + DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; + DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; + } else { + DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; + } +} + +/** + * i915_driver_load - setup chip and create an initial config + * @dev: DRM device + * @flags: startup flags + * + * The driver load routine has to do several things: + * - drive output discovery via intel_modeset_init() + * - initialize the memory manager + * - allocate initial config memory + * - setup the DRM framebuffer with the allocated memory + */ +int i915_driver_load(struct drm_device *dev, unsigned long flags) +{ + struct drm_i915_private *dev_priv; + struct intel_device_info *info, *device_info; + int ret = 0, mmio_bar, mmio_size; + uint32_t aperture_size; + + info = (struct intel_device_info *) flags; + + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); + if (dev_priv == NULL) + return -ENOMEM; + + dev->dev_private = dev_priv; + dev_priv->dev = dev; + + /* Setup the write-once "constant" device info */ + device_info = (struct intel_device_info *)&dev_priv->info; + memcpy(device_info, info, sizeof(dev_priv->info)); + device_info->device_id = dev->pdev->device; + + spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->gpu_error.lock); + mutex_init(&dev_priv->backlight_lock); + spin_lock_init(&dev_priv->uncore.lock); + spin_lock_init(&dev_priv->mm.object_stat_lock); + spin_lock_init(&dev_priv->mmio_flip_lock); + mutex_init(&dev_priv->sb_lock); + mutex_init(&dev_priv->modeset_restore_lock); + mutex_init(&dev_priv->csr_lock); + mutex_init(&dev_priv->av_mutex); + + intel_pm_setup(dev); + + intel_display_crc_init(dev); + + i915_dump_device_info(dev_priv); + + /* Not all pre-production machines fall into this category, only the + * very first ones. Almost everything should work, except for maybe + * suspend/resume. And we don't implement workarounds that affect only + * pre-production machines. */ + if (IS_HSW_EARLY_SDV(dev)) + DRM_INFO("This is an early pre-production Haswell machine. " + "It may not be fully functional.\n"); + + if (i915_get_bridge_dev(dev)) { + ret = -EIO; + goto free_priv; + } + + mmio_bar = IS_GEN2(dev) ? 1 : 0; + /* Before gen4, the registers and the GTT are behind different BARs. + * However, from gen4 onwards, the registers and the GTT are shared + * in the same BAR, so we want to restrict this ioremap from + * clobbering the GTT which we want ioremap_wc instead. Fortunately, + * the register BAR remains the same size for all the earlier + * generations up to Ironlake. + */ + if (info->gen < 5) + mmio_size = 512*1024; + else + mmio_size = 2*1024*1024; + + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); +#ifdef __NetBSD__ + if (!dev_priv->regs) + dev_priv->regs = drm_agp_borrow(dev, mmio_bar, mmio_size); +#endif + if (!dev_priv->regs) { + DRM_ERROR("failed to map registers\n"); + ret = -EIO; + goto put_bridge; + } + +#ifdef __NetBSD__ + dev_priv->regs_bst = dev_priv->dev->pdev->pd_resources[mmio_bar].bst; + dev_priv->regs_bsh = dev_priv->dev->pdev->pd_resources[mmio_bar].bsh; +#endif + + /* This must be called before any calls to HAS_PCH_* */ + intel_detect_pch(dev); + + intel_uncore_init(dev); + + /* Load CSR Firmware for SKL */ + intel_csr_ucode_init(dev); + + ret = i915_gem_gtt_init(dev); + if (ret) + goto out_freecsr; + + /* WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. */ + ret = i915_kick_out_firmware_fb(dev_priv); + if (ret) { + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + goto out_gtt; + } + + ret = i915_kick_out_vgacon(dev_priv); + if (ret) { + DRM_ERROR("failed to remove conflicting VGA console\n"); + goto out_gtt; + } + + pci_set_master(dev->pdev); + +#ifndef __NetBSD__ /* Handled in i915_gem_gtt. */ + /* overlay on gen2 is broken and can't address above 1G */ + if (IS_GEN2(dev)) + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + + /* 965GM sometimes incorrectly writes to hardware status page (HWS) + * using 32bit addressing, overwriting memory if HWS is located + * above 4GB. + * + * The documentation also mentions an issue with undefined + * behaviour if any general state is accessed within a page above 4GB, + * which also needs to be handled carefully. + */ + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); +#endif + + aperture_size = dev_priv->gtt.mappable_end; + +#ifdef __NetBSD__ + dev_priv->gtt.mappable = + drm_io_mapping_create_wc(dev, dev_priv->gtt.mappable_base, + aperture_size); + /* Note: mappable_end is the size, not end paddr, of the aperture. */ + pmap_pv_track(dev_priv->gtt.mappable_base, dev_priv->gtt.mappable_end); +#else + dev_priv->gtt.mappable = + io_mapping_create_wc(dev_priv->gtt.mappable_base, + aperture_size); +#endif + if (dev_priv->gtt.mappable == NULL) { + ret = -EIO; + goto out_gtt; + } + + dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, + aperture_size); + + /* The i915 workqueue is primarily used for batched retirement of + * requests (and thus managing bo) once the task has been completed + * by the GPU. i915_gem_retire_requests() is called directly when we + * need high-priority retirement, such as waiting for an explicit + * bo. + * + * It is also used for periodic low-priority events, such as + * idle-timers and recording error state. + * + * All tasks on the workqueue are expected to acquire the dev mutex + * so there is no point in running more than one instance of the + * workqueue at any time. Use an ordered one. + */ + dev_priv->wq = alloc_ordered_workqueue("i915", 0); + if (dev_priv->wq == NULL) { + DRM_ERROR("Failed to create our workqueue.\n"); + ret = -ENOMEM; + goto out_mtrrfree; + } + + dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->hotplug.dp_wq == NULL) { + DRM_ERROR("Failed to create our dp workqueue.\n"); + ret = -ENOMEM; + goto out_freewq; + } + + dev_priv->gpu_error.hangcheck_wq = + alloc_ordered_workqueue("i915-hangcheck", 0); + if (dev_priv->gpu_error.hangcheck_wq == NULL) { + DRM_ERROR("Failed to create our hangcheck workqueue.\n"); + ret = -ENOMEM; + goto out_freedpwq; + } + + intel_irq_init(dev_priv); + intel_uncore_sanitize(dev); + + /* Try to make sure MCHBAR is enabled before poking at it */ + intel_setup_mchbar(dev); + intel_opregion_setup(dev); + + i915_gem_load(dev); + + /* On the 945G/GM, the chipset reports the MSI capability on the + * integrated graphics even though the support isn't actually there + * according to the published specs. It doesn't appear to function + * correctly in testing on 945G. + * This may be a side effect of MSI having been made available for PEG + * and the registers being closely associated. + * + * According to chipset errata, on the 965GM, MSI interrupts may + * be lost or delayed, but we use them anyways to avoid + * stuck interrupts on some machines. + */ + if (!IS_I945G(dev) && !IS_I945GM(dev)) + pci_enable_msi(dev->pdev); + + intel_device_info_runtime_init(dev); + + intel_init_dpio(dev_priv); + + if (INTEL_INFO(dev)->num_pipes) { + ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); + if (ret) + goto out_gem_unload; + } + + intel_power_domains_init(dev_priv); + + ret = i915_load_modeset_init(dev); + if (ret < 0) { + DRM_ERROR("failed to init modeset\n"); + goto out_power_well; + } + + /* + * Notify a valid surface after modesetting, + * when running inside a VM. + */ + if (intel_vgpu_active(dev)) + I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); + + i915_setup_sysfs(dev); + + if (INTEL_INFO(dev)->num_pipes) { + /* Must be done after probing outputs */ + intel_opregion_init(dev); + acpi_video_register(); + } + + if (IS_GEN5(dev)) + intel_gpu_ips_init(dev_priv); + + intel_runtime_pm_enable(dev_priv); + + i915_audio_component_init(dev_priv); + + return 0; + +out_power_well: + intel_power_domains_fini(dev_priv); + drm_vblank_cleanup(dev); +out_gem_unload: + WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); + unregister_shrinker(&dev_priv->mm.shrinker); + /* XXX i915_gem_unload */ + mutex_destroy(&dev_priv->fb_tracking.lock); + DRM_DESTROY_WAITQUEUE(&dev_priv->pending_flip_queue); + spin_lock_destroy(&dev_priv->pending_flip_lock); + DRM_DESTROY_WAITQUEUE(&dev_priv->gpu_error.reset_queue); + spin_lock_destroy(&dev_priv->gpu_error.reset_lock); + kmem_cache_destroy(dev_priv->requests); + kmem_cache_destroy(dev_priv->vmas); + kmem_cache_destroy(dev_priv->objects); + /* XXX end i915_gem_unload */ + + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + + intel_teardown_mchbar(dev); + pm_qos_remove_request(&dev_priv->pm_qos); + destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); +out_freedpwq: + destroy_workqueue(dev_priv->hotplug.dp_wq); +out_freewq: + destroy_workqueue(dev_priv->wq); +out_mtrrfree: + arch_phys_wc_del(dev_priv->gtt.mtrr); +#ifdef __NetBSD__ + /* Note: mappable_end is the size, not end paddr, of the aperture. */ + pmap_pv_untrack(dev_priv->gtt.mappable_base, + dev_priv->gtt.mappable_end); +#endif + io_mapping_free(dev_priv->gtt.mappable); +out_gtt: + i915_global_gtt_cleanup(dev); +out_freecsr: + intel_csr_ucode_fini(dev); + intel_uncore_fini(dev); + pci_iounmap(dev->pdev, dev_priv->regs); +put_bridge: + pci_dev_put(dev_priv->bridge_dev); +free_priv: + /* XXX intel_pm_fini */ + spin_lock_destroy(&dev_priv->rps.client_lock); + mutex_destroy(&dev_priv->rps.hw_lock); + /* XXX end intel_pm_fini */ + mutex_destroy(&dev_priv->av_mutex); + mutex_destroy(&dev_priv->csr_lock); + mutex_destroy(&dev_priv->modeset_restore_lock); + mutex_destroy(&dev_priv->sb_lock); + spin_lock_destroy(&dev_priv->mmio_flip_lock); + spin_lock_destroy(&dev_priv->mm.object_stat_lock); + spin_lock_destroy(&dev_priv->uncore.lock); + mutex_destroy(&dev_priv->backlight_lock); + spin_lock_destroy(&dev_priv->gpu_error.lock); + spin_lock_destroy(&dev_priv->irq_lock); + kfree(dev_priv); + return ret; +} + +int i915_driver_unload(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + i915_audio_component_cleanup(dev_priv); + + ret = i915_gem_suspend(dev); + if (ret) { + DRM_ERROR("failed to idle hardware: %d\n", ret); + return ret; + } + + intel_power_domains_fini(dev_priv); + + intel_gpu_ips_teardown(); + + i915_teardown_sysfs(dev); + + WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); + unregister_shrinker(&dev_priv->mm.shrinker); + + io_mapping_free(dev_priv->gtt.mappable); + arch_phys_wc_del(dev_priv->gtt.mtrr); + + acpi_video_unregister(); + + intel_fbdev_fini(dev); + + drm_vblank_cleanup(dev); + + intel_modeset_cleanup(dev); + + /* + * free the memory space allocated for the child device + * config parsed from VBT + */ + if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { + kfree(dev_priv->vbt.child_dev); + dev_priv->vbt.child_dev = NULL; + dev_priv->vbt.child_dev_num = 0; + } + kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); + dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; + kfree(dev_priv->vbt.lfp_lvds_vbt_mode); + dev_priv->vbt.lfp_lvds_vbt_mode = NULL; + +#ifndef __NetBSD__ /* XXX vga */ + vga_switcheroo_unregister_client(dev->pdev); + vga_client_register(dev->pdev, NULL, NULL, NULL); +#endif + + /* Free error state after interrupts are fully disabled. */ + cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + i915_destroy_error_state(dev); + + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + + intel_opregion_fini(dev); + + /* Flush any outstanding unpin_work. */ + flush_workqueue(dev_priv->wq); + + intel_guc_ucode_fini(dev); + mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); + i915_gem_context_fini(dev); + mutex_unlock(&dev->struct_mutex); + intel_fbc_cleanup_cfb(dev_priv); + i915_gem_cleanup_stolen(dev); + + intel_csr_ucode_fini(dev); + + intel_teardown_mchbar(dev); + + destroy_workqueue(dev_priv->hotplug.dp_wq); + destroy_workqueue(dev_priv->wq); + destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); + pm_qos_remove_request(&dev_priv->pm_qos); + + i915_global_gtt_cleanup(dev); + + intel_uncore_fini(dev); + if (dev_priv->regs != NULL) + pci_iounmap(dev->pdev, dev_priv->regs); + + /* XXX i915_gem_unload */ + mutex_destroy(&dev_priv->fb_tracking.lock); + DRM_DESTROY_WAITQUEUE(&dev_priv->pending_flip_queue); + spin_lock_destroy(&dev_priv->pending_flip_lock); + DRM_DESTROY_WAITQUEUE(&dev_priv->gpu_error.reset_queue); + spin_lock_destroy(&dev_priv->gpu_error.reset_lock); + /* XXX end i915_gem_unload */ + kmem_cache_destroy(dev_priv->requests); + kmem_cache_destroy(dev_priv->vmas); + kmem_cache_destroy(dev_priv->objects); + /* XXX intel_pm_fini */ + spin_lock_destroy(&dev_priv->rps.client_lock); + mutex_destroy(&dev_priv->rps.hw_lock); + /* XXX end intel_pm_fini */ + pci_dev_put(dev_priv->bridge_dev); + kfree(dev_priv); + + return 0; +} + +int i915_driver_open(struct drm_device *dev, struct drm_file *file) +{ + int ret; + + ret = i915_gem_open(dev, file); + if (ret) + return ret; + + return 0; +} + +/** + * i915_driver_lastclose - clean up after all DRM clients have exited + * @dev: DRM device + * + * Take care of cleaning up after all DRM clients have exited. In the + * mode setting case, we want to restore the kernel's initial mode (just + * in case the last client left us in a bad state). + * + * Additionally, in the non-mode setting case, we'll tear down the GTT + * and DMA structures, since the kernel won't be using them, and clea + * up any GEM state. + */ +void i915_driver_lastclose(struct drm_device *dev) +{ + intel_fbdev_restore_mode(dev); +#ifndef __NetBSD__ /* XXX vga */ + vga_switcheroo_process_delayed_switch(); +#endif +} + +void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) +{ + mutex_lock(&dev->struct_mutex); + i915_gem_context_close(dev, file); + i915_gem_release(dev, file); + mutex_unlock(&dev->struct_mutex); + + intel_modeset_preclose(dev, file); +} + +void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + +#ifdef __NetBSD__ + spin_lock_destroy(&file_priv->mm.lock); +#endif + + if (file_priv && file_priv->bsd_ring) + file_priv->bsd_ring = NULL; + kfree(file_priv); +} + +static int +i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + return -ENODEV; +} + +const struct drm_ioctl_desc i915_ioctls[] = { + DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), + DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), +}; + +int i915_max_ioctl = ARRAY_SIZE(i915_ioctls); Index: src/sys/external/bsd/drm2/dist/include/drm/drmP.h diff -u /dev/null src/sys/external/bsd/drm2/dist/include/drm/drmP.h:1.43 --- /dev/null Sat Dec 18 23:54:51 2021 +++ src/sys/external/bsd/drm2/dist/include/drm/drmP.h Sat Dec 18 23:54:51 2021 @@ -0,0 +1,1308 @@ +/* $NetBSD: drmP.h,v 1.43 2021/12/18 23:54:51 riastradh Exp $ */ + +/* + * Internal Header for the Direct Rendering Manager + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * Copyright (c) 2009-2010, Code Aurora Forum. + * All rights reserved. + * + * Author: Rickard E. (Rik) Faith <fa...@valinux.com> + * Author: Gareth Hughes <gar...@valinux.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DRM_P_H_ +#define _DRM_P_H_ + +#include <linux/agp_backend.h> +#include <linux/cdev.h> +#include <linux/dma-mapping.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/highmem.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/poll.h> +#include <linux/ratelimit.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/vmalloc.h> +#include <linux/workqueue.h> + +#include <asm/mman.h> +#include <asm/pgalloc.h> +#include <asm/uaccess.h> + +#include <uapi/drm/drm.h> +#include <uapi/drm/drm_mode.h> + +#ifdef __NetBSD__ +#include <drm/drm_os_netbsd.h> +#include <asm/barrier.h> +#include <asm/bug.h> +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/fence.h> +#include <linux/interrupt.h> +#include <linux/ktime.h> +#include <linux/module.h> +#include <linux/pm.h> +#include <linux/string.h> +#include <linux/timer.h> +#include <linux/uidgid.h> +#else +#include <drm/drm_os_linux.h> +#endif + +#include <drm/drm.h> +#include <drm/drm_agpsupport.h> +#include <drm/drm_crtc.h> +#include <drm/drm_global.h> +#include <drm/drm_hashtab.h> +#include <drm/drm_mem_util.h> +#include <drm/drm_mm.h> +#include <drm/drm_sarea.h> +#include <drm/drm_vma_manager.h> + +struct module; + +struct drm_file; +struct drm_device; +struct drm_agp_head; +struct drm_local_map; +struct drm_device_dma; +struct drm_dma_handle; +struct drm_gem_object; +struct drm_bus_irq_cookie; + +struct device_node; +struct videomode; +struct reservation_object; +struct dma_buf_attachment; + +/* + * 4 debug categories are defined: + * + * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... + * This is the category used by the DRM_DEBUG() macro. + * + * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... + * This is the category used by the DRM_DEBUG_DRIVER() macro. + * + * KMS: used in the modesetting code. + * This is the category used by the DRM_DEBUG_KMS() macro. + * + * PRIME: used in the prime code. + * This is the category used by the DRM_DEBUG_PRIME() macro. + * + * ATOMIC: used in the atomic code. + * This is the category used by the DRM_DEBUG_ATOMIC() macro. + * + * VBL: used for verbose debug message in the vblank code + * This is the category used by the DRM_DEBUG_VBL() macro. + * + * Enabling verbose debug messages is done through the drm.debug parameter, + * each category being enabled by a bit. + * + * drm.debug=0x1 will enable CORE messages + * drm.debug=0x2 will enable DRIVER messages + * drm.debug=0x3 will enable CORE and DRIVER messages + * ... + * drm.debug=0x3f will enable all messages + * + * An interesting feature is that it's possible to enable verbose logging at + * run-time by echoing the debug value in its sysfs node: + * # echo 0xf > /sys/module/drm/parameters/debug + */ +#define DRM_UT_CORE 0x01 +#define DRM_UT_DRIVER 0x02 +#define DRM_UT_KMS 0x04 +#define DRM_UT_PRIME 0x08 +#define DRM_UT_ATOMIC 0x10 +#define DRM_UT_VBL 0x20 + +extern __printf(2, 3) +void drm_ut_debug_printk(const char *function_name, + const char *format, ...); +extern __printf(4, 5) +void drm_err(const char *file, int line, const char *func, const char *format, ...); + +/***********************************************************************/ +/** \name DRM template customization defaults */ +/*@{*/ + +/* driver capabilities and requirements mask */ +#define DRIVER_USE_AGP 0x1 +#define DRIVER_PCI_DMA 0x8 +#define DRIVER_SG 0x10 +#define DRIVER_HAVE_DMA 0x20 +#define DRIVER_HAVE_IRQ 0x40 +#define DRIVER_IRQ_SHARED 0x80 +#define DRIVER_GEM 0x1000 +#define DRIVER_MODESET 0x2000 +#define DRIVER_PRIME 0x4000 +#define DRIVER_RENDER 0x8000 +#define DRIVER_ATOMIC 0x10000 +#define DRIVER_KMS_LEGACY_CONTEXT 0x20000 + +/***********************************************************************/ +/** \name Macros to make printk easier */ +/*@{*/ + +#define _DRM_PRINTK(once, level, fmt, ...) \ + do { \ + printk##once(KERN_##level "[" DRM_NAME "] " fmt, \ + ##__VA_ARGS__); \ + } while (0) + +#define DRM_INFO(fmt, ...) \ + _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE(fmt, ...) \ + _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN(fmt, ...) \ + _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) + +#define DRM_INFO_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) +#define DRM_NOTE_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) +#define DRM_WARN_ONCE(fmt, ...) \ + _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) + +/** + * Error output. + * + * \param fmt printf() like format string. + * \param arg arguments + */ +#define DRM_ERROR(fmt, ...) \ + drm_err(__FILE__, __LINE__, __func__, fmt, ##__VA_ARGS__) + +/** + * Rate limited error output. Like DRM_ERROR() but won't flood the log. + * + * \param fmt printf() like format string. + * \param arg arguments + */ +#define DRM_ERROR_RATELIMITED(fmt, ...) \ +({ \ + static DEFINE_RATELIMIT_STATE(_rs, \ + DEFAULT_RATELIMIT_INTERVAL, \ + DEFAULT_RATELIMIT_BURST); \ + \ + if (__ratelimit(&_rs)) \ + drm_err(__FILE__, __LINE__, __func__, fmt, ##__VA_ARGS__); \ +}) + +/** + * Debug output. + * + * \param fmt printf() like format string. + * \param arg arguments + */ +#define DRM_DEBUG(fmt, args...) \ + do { \ + if (unlikely(drm_debug & DRM_UT_CORE)) \ + drm_ut_debug_printk(__func__, fmt, ##args); \ + } while (0) + +#define DRM_DEBUG_DRIVER(fmt, args...) \ + do { \ + if (unlikely(drm_debug & DRM_UT_DRIVER)) \ + drm_ut_debug_printk(__func__, fmt, ##args); \ + } while (0) +#define DRM_DEBUG_KMS(fmt, args...) \ + do { \ + if (unlikely(drm_debug & DRM_UT_KMS)) \ + drm_ut_debug_printk(__func__, fmt, ##args); \ + } while (0) +#define DRM_DEBUG_PRIME(fmt, args...) \ + do { \ + if (unlikely(drm_debug & DRM_UT_PRIME)) \ + drm_ut_debug_printk(__func__, fmt, ##args); \ + } while (0) +#define DRM_DEBUG_ATOMIC(fmt, args...) \ + do { \ + if (unlikely(drm_debug & DRM_UT_ATOMIC)) \ + drm_ut_debug_printk(__func__, fmt, ##args); \ + } while (0) +#define DRM_DEBUG_VBL(fmt, args...) \ + do { \ + if (unlikely(drm_debug & DRM_UT_VBL)) \ + drm_ut_debug_printk(__func__, fmt, ##args); \ + } while (0) + +/*@}*/ + +/***********************************************************************/ +/** \name Internal types and structures */ +/*@{*/ + +#define DRM_IF_VERSION(maj, min) (maj << 16 | min) + +/** + * Ioctl function type. + * + * \param inode device inode. + * \param file_priv DRM file private pointer. + * \param cmd command. + * \param arg argument. + */ +typedef int drm_ioctl_t(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, + unsigned long arg); + +#ifdef __NetBSD__ +/* XXX Kludge...is there a better way to do this? */ +#define DRM_IOCTL_NR(n) \ + (IOCBASECMD(n) &~ (IOC_DIRMASK | (IOCGROUP(n) << IOCGROUP_SHIFT))) +#define DRM_MAJOR cdevsw_lookup_major(&drm_cdevsw) +#else +#define DRM_IOCTL_NR(n) _IOC_NR(n) +#define DRM_MAJOR 226 +#endif + +#define DRM_AUTH 0x1 +#define DRM_MASTER 0x2 +#define DRM_ROOT_ONLY 0x4 +#define DRM_CONTROL_ALLOW 0x8 +#define DRM_UNLOCKED 0x10 +#define DRM_RENDER_ALLOW 0x20 + +struct drm_ioctl_desc { + unsigned int cmd; + int flags; + drm_ioctl_t *func; + const char *name; +}; + +/** + * Creates a driver or general drm_ioctl_desc array entry for the given + * ioctl, for use by drm_ioctl(). + */ + +#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ + [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \ + .cmd = DRM_IOCTL_##ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +/* Event queued up for userspace to read */ +struct drm_pending_event { + struct drm_event *event; + struct list_head link; + struct drm_file *file_priv; + pid_t pid; /* pid of requester, no guarantee it's valid by the time + we deliver the event, for tracing only */ + void (*destroy)(struct drm_pending_event *event); +}; + +/* initial implementaton using a linked list - todo hashtab */ +struct drm_prime_file_private { + struct list_head head; + struct mutex lock; +}; + +/** File private data */ +struct drm_file { + unsigned authenticated :1; + /* Whether we're master for a minor. Protected by master_mutex */ + unsigned is_master :1; + /* true when the client has asked us to expose stereo 3D mode flags */ + unsigned stereo_allowed :1; + /* + * true if client understands CRTC primary planes and cursor planes + * in the plane list + */ + unsigned universal_planes:1; + /* true if client understands atomic properties */ + unsigned atomic:1; + /* + * This client is allowed to gain master privileges for @master. + * Protected by struct drm_device::master_mutex. + */ + unsigned allowed_master:1; + +#ifndef __NetBSD__ + struct pid *pid; + kuid_t uid; +#endif + drm_magic_t magic; + struct list_head lhead; + struct drm_minor *minor; + unsigned long lock_count; + + /** Mapping of mm object handles to object pointers. */ + struct idr object_idr; + /** Lock for synchronization of access to object_idr. */ + spinlock_t table_lock; + + struct file *filp; + void *driver_priv; + + struct drm_master *master; /* master this node is currently associated with + N.B. not always minor->master */ + /** + * fbs - List of framebuffers associated with this file. + * + * Protected by fbs_lock. Note that the fbs list holds a reference on + * the fb object to prevent it from untimely disappearing. + */ + struct list_head fbs; + struct mutex fbs_lock; + + /** User-created blob properties; this retains a reference on the + * property. */ + struct list_head blobs; + +#ifdef __NetBSD__ + drm_waitqueue_t event_wait; + struct selinfo event_selq; +#else + wait_queue_head_t event_wait; +#endif + struct list_head event_list; + int event_space; + + struct drm_prime_file_private prime; +}; + +/** + * Lock data. + */ +struct drm_lock_data { + struct drm_hw_lock *hw_lock; /**< Hardware lock */ + /** Private of lock holder's file (NULL=kernel) */ + struct drm_file *file_priv; +#ifdef __NetBSD__ + drm_waitqueue_t lock_queue; /**< Queue of blocked processes */ +#else + wait_queue_head_t lock_queue; /**< Queue of blocked processes */ +#endif + unsigned long lock_time; /**< Time of last lock in jiffies */ + spinlock_t spinlock; + uint32_t kernel_waiters; + uint32_t user_waiters; + int idle_has_lock; +}; + +/** + * struct drm_master - drm master structure + * + * @refcount: Refcount for this master object. + * @minor: Link back to minor char device we are master for. Immutable. + * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. + * @unique_len: Length of unique field. Protected by drm_global_mutex. + * @magic_map: Map of used authentication tokens. Protected by struct_mutex. + * @lock: DRI lock information. + * @driver_priv: Pointer to driver-private information. + */ +struct drm_master { + struct kref refcount; + struct drm_minor *minor; + char *unique; + int unique_len; + struct idr magic_map; + struct drm_lock_data lock; + void *driver_priv; +}; + +/* Size of ringbuffer for vblank timestamps. Just double-buffer + * in initial implementation. + */ +#define DRM_VBLANKTIME_RBSIZE 2 + +/* Flags and return codes for get_vblank_timestamp() driver function. */ +#define DRM_CALLED_FROM_VBLIRQ 1 +#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) +#define DRM_VBLANKTIME_IN_VBLANK (1 << 1) + +/* get_scanout_position() return flags */ +#define DRM_SCANOUTPOS_VALID (1 << 0) +#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1) +#define DRM_SCANOUTPOS_ACCURATE (1 << 2) + +/** + * DRM driver structure. This structure represent the common code for + * a family of cards. There will one drm_device for each card present + * in this family + */ +struct drm_driver { + int (*load) (struct drm_device *, unsigned long flags); + int (*firstopen) (struct drm_device *); + int (*open) (struct drm_device *, struct drm_file *); + void (*preclose) (struct drm_device *, struct drm_file *file_priv); + void (*postclose) (struct drm_device *, struct drm_file *); + void (*lastclose) (struct drm_device *); + int (*unload) (struct drm_device *); + int (*suspend) (struct drm_device *, pm_message_t state); + int (*resume) (struct drm_device *); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); + int (*dma_quiescent) (struct drm_device *); + int (*context_dtor) (struct drm_device *dev, int context); + int (*set_busid)(struct drm_device *dev, struct drm_master *master); + int (*set_unique)(struct drm_device *dev, struct drm_master *master, + struct drm_unique *); + + /** + * get_vblank_counter - get raw hardware vblank counter + * @dev: DRM device + * @pipe: counter to fetch + * + * Driver callback for fetching a raw hardware vblank counter for @crtc. + * If a device doesn't have a hardware counter, the driver can simply + * return the value of drm_vblank_count. The DRM core will account for + * missed vblank events while interrupts where disabled based on system + * timestamps. + * + * Wraparound handling and loss of events due to modesetting is dealt + * with in the DRM core code. + * + * RETURNS + * Raw vblank counter value. + */ + u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe); + + /** + * enable_vblank - enable vblank interrupt events + * @dev: DRM device + * @pipe: which irq to enable + * + * Enable vblank interrupts for @crtc. If the device doesn't have + * a hardware vblank counter, this routine should be a no-op, since + * interrupts will have to stay on to keep the count accurate. + * + * RETURNS + * Zero on success, appropriate errno if the given @crtc's vblank + * interrupt cannot be enabled. + */ + int (*enable_vblank) (struct drm_device *dev, unsigned int pipe); + + /** + * disable_vblank - disable vblank interrupt events + * @dev: DRM device + * @pipe: which irq to enable + * + * Disable vblank interrupts for @crtc. If the device doesn't have + * a hardware vblank counter, this routine should be a no-op, since + * interrupts will have to stay on to keep the count accurate. + */ + void (*disable_vblank) (struct drm_device *dev, unsigned int pipe); + + /** + * Called by \c drm_device_is_agp. Typically used to determine if a + * card is really attached to AGP or not. + * + * \param dev DRM device handle + * + * \returns + * One of three values is returned depending on whether or not the + * card is absolutely \b not AGP (return of 0), absolutely \b is AGP + * (return of 1), or may or may not be AGP (return of 2). + */ + int (*device_is_agp) (struct drm_device *dev); + + /** + * Called by vblank timestamping code. + * + * Return the current display scanout position from a crtc, and an + * optional accurate ktime_get timestamp of when position was measured. + * + * \param dev DRM device. + * \param pipe Id of the crtc to query. + * \param flags Flags from the caller (DRM_CALLED_FROM_VBLIRQ or 0). + * \param *vpos Target location for current vertical scanout position. + * \param *hpos Target location for current horizontal scanout position. + * \param *stime Target location for timestamp taken immediately before + * scanout position query. Can be NULL to skip timestamp. + * \param *etime Target location for timestamp taken immediately after + * scanout position query. Can be NULL to skip timestamp. + * \param mode Current display timings. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * \return Flags, or'ed together as follows: + * + * DRM_SCANOUTPOS_VALID = Query successful. + * DRM_SCANOUTPOS_INVBL = Inside vblank. + * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of + * this flag means that returned position may be offset by a constant + * but unknown small number of scanlines wrt. real scanout position. + * + */ + int (*get_scanout_position) (struct drm_device *dev, unsigned int pipe, + unsigned int flags, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); + + /** + * Called by \c drm_get_last_vbltimestamp. Should return a precise + * timestamp when the most recent VBLANK interval ended or will end. + * + * Specifically, the timestamp in @vblank_time should correspond as + * closely as possible to the time when the first video scanline of + * the video frame after the end of VBLANK will start scanning out, + * the time immediately after end of the VBLANK interval. If the + * @crtc is currently inside VBLANK, this will be a time in the future. + * If the @crtc is currently scanning out a frame, this will be the + * past start time of the current scanout. This is meant to adhere + * to the OpenML OML_sync_control extension specification. + * + * \param dev dev DRM device handle. + * \param pipe crtc for which timestamp should be returned. + * \param *max_error Maximum allowable timestamp error in nanoseconds. + * Implementation should strive to provide timestamp + * with an error of at most *max_error nanoseconds. + * Returns true upper bound on error for timestamp. + * \param *vblank_time Target location for returned vblank timestamp. + * \param flags 0 = Defaults, no special treatment needed. + * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank + * irq handler. Some drivers need to apply some workarounds + * for gpu-specific vblank irq quirks if flag is set. + * + * \returns + * Zero if timestamping isn't supported in current display mode or a + * negative number on failure. A positive status code on success, + * which describes how the vblank_time timestamp was computed. + */ + int (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe, + int *max_error, + struct timeval *vblank_time, + unsigned flags); + + /* these have to be filled in */ + + irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); + void (*irq_preinstall) (struct drm_device *dev); + int (*irq_postinstall) (struct drm_device *dev); + void (*irq_uninstall) (struct drm_device *dev); + +#ifdef __NetBSD__ + int (*request_irq)(struct drm_device *, int); + void (*free_irq)(struct drm_device *); +#endif + + /* Master routines */ + int (*master_create)(struct drm_device *dev, struct drm_master *master); + void (*master_destroy)(struct drm_device *dev, struct drm_master *master); + /** + * master_set is called whenever the minor master is set. + * master_drop is called whenever the minor master is dropped. + */ + + int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, + bool from_open); + void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv, + bool from_release); + + int (*debugfs_init)(struct drm_minor *minor); + void (*debugfs_cleanup)(struct drm_minor *minor); + + /** + * Driver-specific constructor for drm_gem_objects, to set up + * obj->driver_private. + * + * Returns 0 on success. + */ + void (*gem_free_object) (struct drm_gem_object *obj); + int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); + void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); + + /* prime: */ + /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ + int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, + uint32_t handle, uint32_t flags, int *prime_fd); + /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */ + int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, + int prime_fd, uint32_t *handle); + /* export GEM -> dmabuf */ + struct dma_buf * (*gem_prime_export)(struct drm_device *dev, + struct drm_gem_object *obj, int flags); + /* import dmabuf -> GEM */ + struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, + struct dma_buf *dma_buf); + /* low-level interface used by drm_gem_prime_{import,export} */ + int (*gem_prime_pin)(struct drm_gem_object *obj); + void (*gem_prime_unpin)(struct drm_gem_object *obj); + struct reservation_object * (*gem_prime_res_obj)( + struct drm_gem_object *obj); + struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj); + struct drm_gem_object *(*gem_prime_import_sg_table)( + struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + void *(*gem_prime_vmap)(struct drm_gem_object *obj); + void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr); +#ifdef __NetBSD__ + int (*gem_prime_mmap)(struct drm_gem_object *obj, off_t *offp, + size_t len, int prot, int *flagsp, int *advicep, + struct uvm_object **uobjp, int *maxprotp); +#else + int (*gem_prime_mmap)(struct drm_gem_object *obj, + struct vm_area_struct *vma); +#endif + + /* vga arb irq handler */ + void (*vgaarb_irq)(struct drm_device *dev, bool state); + + /* dumb alloc support */ + int (*dumb_create)(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + int (*dumb_map_offset)(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); + int (*dumb_destroy)(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle); + + /* Driver private ops for this object */ +#ifdef __NetBSD__ + int (*mmap_object)(struct drm_device *, off_t, size_t, int, + struct uvm_object **, voff_t *, struct file *); + const struct uvm_pagerops *gem_uvm_ops; +#else + const struct vm_operations_struct *gem_vm_ops; +#endif + + int major; + int minor; + int patchlevel; + const char *name; + const char *desc; + const char *date; + + u32 driver_features; + int dev_priv_size; + const struct drm_ioctl_desc *ioctls; + int num_ioctls; + const struct file_operations *fops; + +#ifdef __NetBSD__ + int (*ioctl_override)(struct file *, unsigned long, void *); +#endif + + /* List of devices hanging off this driver with stealth attach. */ + struct list_head legacy_dev_list; +}; + +enum drm_minor_type { + DRM_MINOR_LEGACY, + DRM_MINOR_CONTROL, + DRM_MINOR_RENDER, + DRM_MINOR_CNT, +}; + +#ifdef __NetBSD__ /* XXX debugfs */ +struct seq_file; +#endif + +/** + * Info file list entry. This structure represents a debugfs or proc file to + * be created by the drm core + */ +struct drm_info_list { + const char *name; /** file name */ + int (*show)(struct seq_file*, void*); /** show callback */ + u32 driver_features; /**< Required driver features for this entry */ + void *data; +}; + +/** + * debugfs node structure. This structure represents a debugfs file. + */ +struct drm_info_node { + struct list_head list; + struct drm_minor *minor; + const struct drm_info_list *info_ent; + struct dentry *dent; +}; + +/** + * DRM minor structure. This structure represents a drm minor number. + */ +struct drm_minor { + int index; /**< Minor device number */ + int type; /**< Control or render */ + struct device *kdev; /**< Linux device */ + struct drm_device *dev; + +#ifndef __NetBSD__ /* XXX debugfs */ + struct dentry *debugfs_root; + + struct list_head debugfs_list; + struct mutex debugfs_lock; /* Protects debugfs_list. */ +#endif + + /* currently active master for this node. Protected by master_mutex */ + struct drm_master *master; +}; + + +struct drm_pending_vblank_event { + struct drm_pending_event base; + unsigned int pipe; + struct drm_event_vblank event; +}; + +struct drm_vblank_crtc { + struct drm_device *dev; /* pointer to the drm_device */ +#ifdef __NetBSD__ + drm_waitqueue_t queue; +#else + wait_queue_head_t queue; /**< VBLANK wait queue */ +#endif + struct timer_list disable_timer; /* delayed disable timer */ + + /* vblank counter, protected by dev->vblank_time_lock for writes */ + u32 count; + /* vblank timestamps, protected by dev->vblank_time_lock for writes */ + struct timeval time[DRM_VBLANKTIME_RBSIZE]; + + atomic_t refcount; /* number of users of vblank interruptsper crtc */ + u32 last; /* protected by dev->vbl_lock, used */ + /* for wraparound handling */ + u32 last_wait; /* Last vblank seqno waited per CRTC */ + unsigned int inmodeset; /* Display driver is setting mode */ + unsigned int pipe; /* crtc index */ + int framedur_ns; /* frame/field duration in ns */ + int linedur_ns; /* line duration in ns */ + bool enabled; /* so we don't call enable more than + once per disable */ +}; + +/** + * DRM device structure. This structure represent a complete card that + * may contain multiple heads. + */ +struct drm_device { + struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ + int if_version; /**< Highest interface version set */ + + /** \name Lifetime Management */ + /*@{ */ + struct kref ref; /**< Object ref-count */ + struct device *dev; /**< Device structure of bus-device */ + struct drm_driver *driver; /**< DRM driver managing the device */ + void *dev_private; /**< DRM driver private data */ + struct drm_minor *control; /**< Control node */ + struct drm_minor *primary; /**< Primary node */ + struct drm_minor *render; /**< Render node */ + atomic_t unplugged; /**< Flag whether dev is dead */ + struct inode *anon_inode; /**< inode for private address-space */ + char *unique; /**< unique name of the device */ + /*@} */ + + /** \name Locks */ + /*@{ */ + struct mutex struct_mutex; /**< For others */ + struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ + /*@} */ + + /** \name Usage Counters */ + /*@{ */ + int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ + spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ + int buf_use; /**< Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /**< Buffer allocation in progress */ + /*@} */ + + struct list_head filelist; + + /** \name Memory management */ + /*@{ */ + struct list_head maplist; /**< Linked list of regions */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ + + /** \name Context handle management */ + /*@{ */ + struct list_head ctxlist; /**< Linked list of context handles */ + struct mutex ctxlist_mutex; /**< For ctxlist */ + + struct idr ctx_idr; + + struct list_head vmalist; /**< List of vmas (for debugging) */ + + /*@} */ + + /** \name DMA support */ + /*@{ */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ + /*@} */ + + /** \name Context support */ + /*@{ */ + + __volatile__ long context_flag; /**< Context swapping flag */ + int last_context; /**< Last current context */ + /*@} */ + + /** \name VBLANK IRQ support */ + /*@{ */ + bool irq_enabled; + int irq; +#ifdef __NetBSD__ + struct drm_bus_irq_cookie *irq_cookie; +#endif + + /* + * At load time, disabling the vblank interrupt won't be allowed since + * old clients may not call the modeset ioctl and therefore misbehave. + * Once the modeset ioctl *has* been called though, we can safely + * disable them when unused. + */ + bool vblank_disable_allowed; + + /* + * If true, vblank interrupt will be disabled immediately when the + * refcount drops to zero, as opposed to via the vblank disable + * timer. + * This can be set to true it the hardware has a working vblank + * counter and the driver uses drm_vblank_on() and drm_vblank_off() + * appropriately. + */ + bool vblank_disable_immediate; + + /* array of size num_crtcs */ + struct drm_vblank_crtc *vblank; + + spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ + spinlock_t vbl_lock; + + u32 max_vblank_count; /**< size of vblank counter register */ + + /** + * List of events + */ + struct list_head vblank_event_list; + spinlock_t event_lock; + + /*@} */ + + struct drm_agp_head *agp; /**< AGP data */ + + struct pci_dev *pdev; /**< PCI device structure */ +#ifdef __alpha__ + struct pci_controller *hose; +#endif + + struct platform_device *platformdev; /**< Platform device struture */ + struct virtio_device *virtdev; + +#ifdef __NetBSD__ + bus_space_tag_t bst; + struct drm_bus_map *bus_maps; + unsigned bus_nmaps; + bus_dma_tag_t bus_dmat; /* bus's full DMA tag, for internal use */ + bus_dma_tag_t bus_dmat32; /* bus's 32-bit DMA tag */ + bus_dma_tag_t dmat; /* DMA tag for driver, may be subregion */ + bool dmat_subregion_p; + bus_addr_t dmat_subregion_min; + bus_addr_t dmat_subregion_max; + vmem_t *cma_pool; +#endif + + struct drm_sg_mem *sg; /**< Scatter gather memory */ + unsigned int num_crtcs; /**< Number of CRTCs on this device */ + + struct { + int context; + struct drm_hw_lock *lock; + } sigdata; + + struct drm_local_map *agp_buffer_map; + unsigned int agp_buffer_token; + + struct drm_mode_config mode_config; /**< Current mode config */ + + /** \name GEM information */ + /*@{ */ + struct mutex object_name_lock; + struct idr object_name_idr; + struct drm_vma_offset_manager *vma_offset_manager; + /*@} */ + int switch_power_state; +}; + +#define DRM_SWITCH_POWER_ON 0 +#define DRM_SWITCH_POWER_OFF 1 +#define DRM_SWITCH_POWER_CHANGING 2 +#define DRM_SWITCH_POWER_DYNAMIC_OFF 3 + +static __inline__ int drm_core_check_feature(struct drm_device *dev, + int feature) +{ + return ((dev->driver->driver_features & feature) ? 1 : 0); +} + +static inline void drm_device_set_unplugged(struct drm_device *dev) +{ + smp_wmb(); + atomic_set(&dev->unplugged, 1); +} + +static inline int drm_device_is_unplugged(struct drm_device *dev) +{ + int ret = atomic_read(&dev->unplugged); + smp_rmb(); + return ret; +} + +static inline bool drm_is_render_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_RENDER; +} + +static inline bool drm_is_control_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_CONTROL; +} + +static inline bool drm_is_primary_client(const struct drm_file *file_priv) +{ + return file_priv->minor->type == DRM_MINOR_LEGACY; +} + +/******************************************************************/ +/** \name Internal function definitions */ +/*@{*/ + + /* Driver support (drm_drv.h) */ +extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv); +#ifdef __NetBSD__ +extern int drm_ioctl(struct file *, unsigned long, void *); +extern struct spinlock drm_minor_lock; +extern struct idr drm_minors_idr; +#else +extern long drm_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); +extern long drm_compat_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); +#endif +extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); + + /* Device support (drm_fops.h) */ +#ifdef __NetBSD__ +extern int drm_open_file(struct drm_file *, void *, struct drm_minor *); +extern void drm_close_file(struct drm_file *); +#else +extern int drm_open(struct inode *inode, struct file *filp); +extern ssize_t drm_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset); +extern int drm_release(struct inode *inode, struct file *filp); +#endif +extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv); + + /* Mapping support (drm_vm.h) */ +#ifndef __NetBSD__ +extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); +#endif + +/* Misc. IOCTL support (drm_ioctl.c) */ +int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_invalid_op(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +/* Cache management (drm_cache.c) */ +void drm_clflush_pages(struct page *pages[], unsigned long num_pages); +#ifdef __NetBSD__ /* XXX drm clflush */ +void drm_clflush_pglist(struct pglist *); +void drm_clflush_page(struct page *); +void drm_clflush_virt_range(const void *, size_t); +#else +void drm_clflush_sg(struct sg_table *st); +void drm_clflush_virt_range(void *addr, unsigned long length); +#endif + +/* + * These are exported to drivers so that they can implement fencing using + * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. + */ + + /* IRQ support (drm_irq.h) */ +#ifdef __NetBSD__ +extern int drm_irq_install(struct drm_device *dev); +#else +extern int drm_irq_install(struct drm_device *dev, int irq); +#endif +extern int drm_irq_uninstall(struct drm_device *dev); + +extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs); +extern int drm_wait_vblank(struct drm_device *dev, void *data, + struct drm_file *filp); +extern u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe); +extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc); +extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, + struct timeval *vblanktime); +extern u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, + struct timeval *vblanktime); +extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, + struct drm_pending_vblank_event *e); +extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, + struct drm_pending_vblank_event *e); +extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, + struct drm_pending_vblank_event *e); +extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); +extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); +extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); +extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe); +extern int drm_crtc_vblank_get(struct drm_crtc *crtc); +extern void drm_crtc_vblank_put(struct drm_crtc *crtc); +extern int drm_vblank_get_locked(struct drm_device *dev, unsigned int pipe); +extern void drm_vblank_put_locked(struct drm_device *dev, unsigned int pipe); +extern int drm_crtc_vblank_get_locked(struct drm_crtc *crtc); +extern void drm_crtc_vblank_put_locked(struct drm_crtc *crtc); +extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe); +extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc); +extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe); +extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe); +extern void drm_crtc_vblank_off(struct drm_crtc *crtc); +extern void drm_crtc_vblank_reset(struct drm_crtc *crtc); +extern void drm_crtc_vblank_on(struct drm_crtc *crtc); +extern void drm_vblank_cleanup(struct drm_device *dev); +extern u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe); + +extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, + unsigned int pipe, int *max_error, + struct timeval *vblank_time, + unsigned flags, + const struct drm_display_mode *mode); +extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, + const struct drm_display_mode *mode); + +/** + * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC + * @crtc: which CRTC's vblank waitqueue to retrieve + * + * This function returns a pointer to the vblank waitqueue for the CRTC. + * Drivers can use this to implement vblank waits using wait_event() & co. + */ +#ifdef __NetBSD__ +static inline drm_waitqueue_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc) +#else +static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc) +#endif +{ + return &crtc->dev->vblank[drm_crtc_index(crtc)].queue; +} + +/* Modesetting support */ +extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe); +extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe); + + /* Stub support (drm_stub.h) */ +extern struct drm_master *drm_master_get(struct drm_master *master); +extern void drm_master_put(struct drm_master **master); + +extern void drm_put_dev(struct drm_device *dev); +extern void drm_unplug_dev(struct drm_device *dev); +extern unsigned int drm_debug; +extern bool drm_atomic; + + /* Debugfs support */ +#if defined(CONFIG_DEBUG_FS) +extern int drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor); +extern int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor); +#else +static inline int drm_debugfs_create_files(const struct drm_info_list *files, + int count, struct dentry *root, + struct drm_minor *minor) +{ + return 0; +} + +static inline int drm_debugfs_remove_files(const struct drm_info_list *files, + int count, struct drm_minor *minor) +{ + return 0; +} +#endif + +extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, + int flags); +extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, + struct drm_file *file_priv, uint32_t handle, uint32_t flags, + int *prime_fd); +extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); +extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, + struct drm_file *file_priv, int prime_fd, uint32_t *handle); +extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf); + +#ifdef __NetBSD__ +extern struct sg_table *drm_prime_bus_dmamem_to_sg(bus_dma_tag_t, const bus_dma_segment_t *, int); +extern struct sg_table *drm_prime_pglist_to_sg(struct pglist *, unsigned); +extern int drm_prime_sg_to_bus_dmamem(bus_dma_tag_t, bus_dma_segment_t *, int, int *, const struct sg_table *); +extern int drm_prime_bus_dmamap_load_sgt(bus_dma_tag_t, bus_dmamap_t, struct sg_table *); +extern bus_size_t drm_prime_sg_size(struct sg_table *); +extern void drm_prime_sg_free(struct sg_table *); +extern bool drm_prime_sg_importable(bus_dma_tag_t, struct sg_table *); +#else +extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, + dma_addr_t *addrs, int max_pages); +#endif +extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages); +extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); + + +int drm_pci_set_unique(struct drm_device *dev, + struct drm_master *master, + struct drm_unique *u); +extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size, + size_t align); +extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah); + + /* sysfs support (drm_sysfs.c) */ +extern void drm_sysfs_hotplug_event(struct drm_device *dev); + + +struct drm_device *drm_dev_alloc(struct drm_driver *driver, + struct device *parent); +void drm_dev_ref(struct drm_device *dev); +void drm_dev_unref(struct drm_device *dev); +int drm_dev_register(struct drm_device *dev, unsigned long flags); +void drm_dev_unregister(struct drm_device *dev); +int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...); + +struct drm_minor *drm_minor_acquire(unsigned int minor_id); +void drm_minor_release(struct drm_minor *minor); + +#ifdef __NetBSD__ +int drm_limit_dma_space(struct drm_device *, resource_size_t, resource_size_t); +int drm_guarantee_initialized(void); +#endif + +/*@}*/ + +/* PCI section */ +static __inline__ int drm_pci_device_is_agp(struct drm_device *dev) +{ + if (dev->driver->device_is_agp != NULL) { + int err = (*dev->driver->device_is_agp) (dev); + + if (err != 2) { + return err; + } + } + + return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); +} +void drm_pci_agp_destroy(struct drm_device *dev); + +extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); +extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); +#ifdef __NetBSD__ +int drm_pci_request_irq(struct drm_device *, int); +void drm_pci_free_irq(struct drm_device *); +extern int drm_pci_attach(device_t, const struct pci_attach_args *, + struct pci_dev *, struct drm_driver *, unsigned long, + struct drm_device **); +extern int drm_pci_detach(struct drm_device *, int); +#endif +#ifdef CONFIG_PCI +extern int drm_get_pci_dev(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct drm_driver *driver); +extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); +#else +static inline int drm_get_pci_dev(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct drm_driver *driver) +{ + return -ENOSYS; +} + +static inline int drm_pci_set_busid(struct drm_device *dev, + struct drm_master *master) +{ + return -ENOSYS; +} +#endif + +#define DRM_PCIE_SPEED_25 1 +#define DRM_PCIE_SPEED_50 2 +#define DRM_PCIE_SPEED_80 4 + +extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); + +/* platform section */ +extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device); +extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m); + +/* returns true if currently okay to sleep */ +static __inline__ bool drm_can_sleep(void) +{ +#ifdef __NetBSD__ + return false; /* XXX */ +#else + if (in_atomic() || in_dbg_master() || irqs_disabled()) + return false; + return true; +#endif +} + +#ifdef __NetBSD__ + +/* XXX This is pretty kludgerific. */ + +#include <linux/io-mapping.h> + +static inline struct io_mapping * +drm_io_mapping_create_wc(struct drm_device *dev, resource_size_t addr, + unsigned long size) +{ + return bus_space_io_mapping_create_wc(dev->bst, addr, size); +} + +#endif /* defined(__NetBSD__) */ + +#ifdef __NetBSD__ +extern const struct cdevsw drm_cdevsw; +#endif + +#endif Index: src/sys/external/bsd/drm2/dist/include/drm/drm_gem_cma_helper.h diff -u /dev/null src/sys/external/bsd/drm2/dist/include/drm/drm_gem_cma_helper.h:1.8 --- /dev/null Sat Dec 18 23:54:51 2021 +++ src/sys/external/bsd/drm2/dist/include/drm/drm_gem_cma_helper.h Sat Dec 18 23:54:51 2021 @@ -0,0 +1,155 @@ +/* $NetBSD: drm_gem_cma_helper.h,v 1.8 2021/12/18 23:54:51 riastradh Exp $ */ + +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DRM_GEM_CMA_HELPER_H__ +#define __DRM_GEM_CMA_HELPER_H__ + +#include <drm/drm_file.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_gem.h> + +struct drm_mode_create_dumb; + +/** + * struct drm_gem_cma_object - GEM object backed by CMA memory allocations + * @base: base GEM object + * @paddr: physical address of the backing memory + * @sgt: scatter/gather table for imported PRIME buffers. The table can have + * more than one entry but they are guaranteed to have contiguous + * DMA addresses. + * @vaddr: kernel virtual address of the backing memory + */ +struct drm_gem_cma_object { + struct drm_gem_object base; +#ifdef __NetBSD__ + bus_dma_tag_t dmat; + bus_dma_segment_t dmasegs[1]; + bus_size_t dmasize; + bus_dmamap_t dmamap; + vmem_t *vmem_pool; + vmem_addr_t vmem_addr; +#else + dma_addr_t paddr; +#endif + struct sg_table *sgt; + + /* For objects with DMA memory allocated by GEM CMA */ + void *vaddr; +}; + +#define to_drm_gem_cma_obj(gem_obj) \ + container_of(gem_obj, struct drm_gem_cma_object, base) + +#ifndef CONFIG_MMU +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + .get_unmapped_area = drm_gem_cma_get_unmapped_area, +#else +#define DRM_GEM_CMA_UNMAPPED_AREA_FOPS +#endif + +/** + * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers + * @name: name for the generated structure + * + * This macro autogenerates a suitable &struct file_operations for CMA based + * drivers, which can be assigned to &drm_driver.fops. Note that this structure + * cannot be shared between drivers, because it contains a reference to the + * current module using THIS_MODULE. + * + * Note that the declaration is already marked as static - if you need a + * non-static version of this you're probably doing it wrong and will break the + * THIS_MODULE reference by accident. + */ +#define DEFINE_DRM_GEM_CMA_FOPS(name) \ + static const struct file_operations name = {\ + .owner = THIS_MODULE,\ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_cma_mmap,\ + DRM_GEM_CMA_UNMAPPED_AREA_FOPS \ + } + +/* free GEM object */ +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); + +/* create memory region for DRM framebuffer */ +int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +/* create memory region for DRM framebuffer */ +int drm_gem_cma_dumb_create(struct drm_file *file_priv, + struct drm_device *drm, + struct drm_mode_create_dumb *args); + +/* set vm_flags and we can change the VM attribute to other one at here */ +#ifndef __NetBSD__ +int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); +#endif + +/* allocate physical memory */ +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, + size_t size); + +#ifdef __NetBSD__ +extern const struct uvm_pagerops drm_gem_cma_uvm_ops; +#else +extern const struct vm_operations_struct drm_gem_cma_vm_ops; +#endif + +#ifndef CONFIG_MMU +unsigned long drm_gem_cma_get_unmapped_area(struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#endif + +void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *obj); + +struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object * +drm_gem_cma_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt); +#ifdef __NetBSD__ +int drm_gem_cma_prime_mmap(struct drm_gem_object *, off_t *, size_t, int, + int *, int *, struct uvm_object **, int *); +#else +int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma); +#endif +void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj); +void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + +struct drm_gem_object * +drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size); + +/** + * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual + * address on the buffer + * + * This macro provides a shortcut for setting the default GEM operations in the + * &drm_driver structure for drivers that need the virtual address also on + * imported buffers. + */ +#define DRM_GEM_CMA_VMAP_DRIVER_OPS \ + .gem_create_object = drm_cma_gem_create_object_default_funcs, \ + .dumb_create = drm_gem_cma_dumb_create, \ + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \ + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \ + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \ + .gem_prime_mmap = drm_gem_prime_mmap + +struct drm_gem_object * +drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm, + struct dma_buf_attachment *attach, + struct sg_table *sgt); + +#endif /* __DRM_GEM_CMA_HELPER_H__ */