Author: kib
Date: Fri Nov  1 17:38:52 2013
New Revision: 257512
URL: http://svnweb.freebsd.org/changeset/base/257512

Log:
  Add support for queued invalidation.
  
  Right now, the semaphore write is scheduled after each batch, which is
  not optimal and must be tuned.
  
  Discussed with:       alc
  Tested by:    pho
  MFC after:    1 month

Added:
  head/sys/x86/iommu/intel_qi.c   (contents, props changed)
Modified:
  head/sys/conf/files.amd64
  head/sys/conf/files.i386
  head/sys/x86/iommu/intel_ctx.c
  head/sys/x86/iommu/intel_dmar.h
  head/sys/x86/iommu/intel_drv.c
  head/sys/x86/iommu/intel_fault.c
  head/sys/x86/iommu/intel_gas.c
  head/sys/x86/iommu/intel_idpgtbl.c
  head/sys/x86/iommu/intel_reg.h
  head/sys/x86/iommu/intel_utils.c

Modified: head/sys/conf/files.amd64
==============================================================================
--- head/sys/conf/files.amd64   Fri Nov  1 17:16:44 2013        (r257511)
+++ head/sys/conf/files.amd64   Fri Nov  1 17:38:52 2013        (r257512)
@@ -537,6 +537,7 @@ x86/iommu/intel_drv.c               optional        acpi acp
 x86/iommu/intel_fault.c                optional        acpi acpi_dmar pci
 x86/iommu/intel_gas.c          optional        acpi acpi_dmar pci
 x86/iommu/intel_idpgtbl.c      optional        acpi acpi_dmar pci
+x86/iommu/intel_qi.c           optional        acpi acpi_dmar pci
 x86/iommu/intel_quirks.c       optional        acpi acpi_dmar pci
 x86/iommu/intel_utils.c                optional        acpi acpi_dmar pci
 x86/isa/atpic.c                        optional        atpic isa

Modified: head/sys/conf/files.i386
==============================================================================
--- head/sys/conf/files.i386    Fri Nov  1 17:16:44 2013        (r257511)
+++ head/sys/conf/files.i386    Fri Nov  1 17:38:52 2013        (r257512)
@@ -560,6 +560,7 @@ x86/iommu/intel_drv.c               optional acpi acp
 x86/iommu/intel_fault.c                optional acpi acpi_dmar pci
 x86/iommu/intel_gas.c          optional acpi acpi_dmar pci
 x86/iommu/intel_idpgtbl.c      optional acpi acpi_dmar pci
+x86/iommu/intel_qi.c           optional acpi acpi_dmar pci
 x86/iommu/intel_quirks.c       optional acpi acpi_dmar pci
 x86/iommu/intel_utils.c                optional acpi acpi_dmar pci
 x86/isa/atpic.c                        optional atpic

Modified: head/sys/x86/iommu/intel_ctx.c
==============================================================================
--- head/sys/x86/iommu/intel_ctx.c      Fri Nov  1 17:16:44 2013        
(r257511)
+++ head/sys/x86/iommu/intel_ctx.c      Fri Nov  1 17:38:52 2013        
(r257512)
@@ -385,17 +385,29 @@ dmar_get_ctx(struct dmar_unit *dmar, dev
         * negative TLB entries.
         */
        if ((dmar->hw_cap & DMAR_CAP_CM) != 0 || enable) {
-               error = dmar_inv_ctx_glob(dmar);
-               if (error == 0 &&
-                   (dmar->hw_ecap & DMAR_ECAP_DI) != 0)
-                       error = dmar_inv_iotlb_glob(dmar);
-               if (error != 0) {
-                       dmar_free_ctx_locked(dmar, ctx);
-                       TD_PINNED_ASSERT;
-                       return (NULL);
+               if (dmar->qi_enabled) {
+                       dmar_qi_invalidate_ctx_glob_locked(dmar);
+                       if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0)
+                               dmar_qi_invalidate_iotlb_glob_locked(dmar);
+               } else {
+                       error = dmar_inv_ctx_glob(dmar);
+                       if (error == 0 &&
+                           (dmar->hw_ecap & DMAR_ECAP_DI) != 0)
+                               error = dmar_inv_iotlb_glob(dmar);
+                       if (error != 0) {
+                               dmar_free_ctx_locked(dmar, ctx);
+                               TD_PINNED_ASSERT;
+                               return (NULL);
+                       }
                }
        }
-       if (enable && !rmrr_init) {
+
+       /*
+        * The dmar lock was potentially dropped between check for the
+        * empty context list and now.  Recheck the state of GCMD_TE
+        * to avoid unneeded command.
+        */
+       if (enable && !rmrr_init && (dmar->hw_gcmd & DMAR_GCMD_TE) == 0) {
                error = dmar_enable_translation(dmar);
                if (error != 0) {
                        dmar_free_ctx_locked(dmar, ctx);
@@ -469,8 +481,12 @@ dmar_free_ctx_locked(struct dmar_unit *d
        dmar_pte_clear(&ctxp->ctx1);
        ctxp->ctx2 = 0;
        dmar_inv_ctx_glob(dmar);
-       if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0)
-               dmar_inv_iotlb_glob(dmar);
+       if ((dmar->hw_ecap & DMAR_ECAP_DI) != 0) {
+               if (dmar->qi_enabled)
+                       dmar_qi_invalidate_iotlb_glob_locked(dmar);
+               else
+                       dmar_inv_iotlb_glob(dmar);
+       }
        LIST_REMOVE(ctx, link);
        DMAR_UNLOCK(dmar);
 
@@ -512,24 +528,86 @@ dmar_find_ctx_locked(struct dmar_unit *d
 }
 
 void
+dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free)
+{
+       struct dmar_ctx *ctx;
+
+       ctx = entry->ctx;
+       DMAR_CTX_LOCK(ctx);
+       if ((entry->flags & DMAR_MAP_ENTRY_RMRR) != 0)
+               dmar_gas_free_region(ctx, entry);
+       else
+               dmar_gas_free_space(ctx, entry);
+       DMAR_CTX_UNLOCK(ctx);
+       if (free)
+               dmar_gas_free_entry(ctx, entry);
+       else
+               entry->flags = 0;
+}
+
+void
+dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free)
+{
+       struct dmar_unit *unit;
+
+       unit = entry->ctx->dmar;
+       if (unit->qi_enabled) {
+               DMAR_LOCK(unit);
+               dmar_qi_invalidate_locked(entry->ctx, entry->start,
+                   entry->end - entry->start, &entry->gseq);
+               if (!free)
+                       entry->flags |= DMAR_MAP_ENTRY_QI_NF;
+               TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
+               DMAR_UNLOCK(unit);
+       } else {
+               ctx_flush_iotlb_sync(entry->ctx, entry->start, entry->end -
+                   entry->start);
+               dmar_ctx_free_entry(entry, free);
+       }
+}
+
+void
 dmar_ctx_unload(struct dmar_ctx *ctx, struct dmar_map_entries_tailq *entries,
     bool cansleep)
 {
-       struct dmar_map_entry *entry;
+       struct dmar_unit *unit;
+       struct dmar_map_entry *entry, *entry1;
+       struct dmar_qi_genseq gseq;
        int error;
 
-       while ((entry = TAILQ_FIRST(entries)) != NULL) {
+       unit = ctx->dmar;
+
+       TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
                KASSERT((entry->flags & DMAR_MAP_ENTRY_MAP) != 0,
                    ("not mapped entry %p %p", ctx, entry));
-               TAILQ_REMOVE(entries, entry, dmamap_link);
                error = ctx_unmap_buf(ctx, entry->start, entry->end -
                    entry->start, cansleep ? DMAR_PGF_WAITOK : 0);
                KASSERT(error == 0, ("unmap %p error %d", ctx, error));
-               DMAR_CTX_LOCK(ctx);
-               dmar_gas_free_space(ctx, entry);
-               DMAR_CTX_UNLOCK(ctx);
-               dmar_gas_free_entry(ctx, entry);
+               if (!unit->qi_enabled) {
+                       ctx_flush_iotlb_sync(ctx, entry->start,
+                           entry->end - entry->start);
+                       TAILQ_REMOVE(entries, entry, dmamap_link);
+                       dmar_ctx_free_entry(entry, true);
+               }
+       }
+       if (TAILQ_EMPTY(entries))
+               return;
+
+       KASSERT(unit->qi_enabled, ("loaded entry left"));
+       DMAR_LOCK(unit);
+       TAILQ_FOREACH(entry, entries, dmamap_link) {
+               entry->gseq.gen = 0;
+               entry->gseq.seq = 0;
+               dmar_qi_invalidate_locked(ctx, entry->start, entry->end -
+                   entry->start, TAILQ_NEXT(entry, dmamap_link) == NULL ?
+                   &gseq : NULL);
+       }
+       TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
+               entry->gseq = gseq;
+               TAILQ_REMOVE(entries, entry, dmamap_link);
+               TAILQ_INSERT_TAIL(&unit->tlb_flush_entries, entry, dmamap_link);
        }
+       DMAR_UNLOCK(unit);
 }      
 
 static void

Modified: head/sys/x86/iommu/intel_dmar.h
==============================================================================
--- head/sys/x86/iommu/intel_dmar.h     Fri Nov  1 17:16:44 2013        
(r257511)
+++ head/sys/x86/iommu/intel_dmar.h     Fri Nov  1 17:38:52 2013        
(r257512)
@@ -37,6 +37,11 @@ typedef uint64_t dmar_haddr_t;
 /* Guest or bus address, before translation. */
 typedef uint64_t dmar_gaddr_t;
 
+struct dmar_qi_genseq {
+       u_int gen;
+       uint32_t seq;
+};
+
 struct dmar_map_entry {
        dmar_gaddr_t start;
        dmar_gaddr_t end;
@@ -48,6 +53,8 @@ struct dmar_map_entry {
        RB_ENTRY(dmar_map_entry) rb_entry;       /* Links for ctx entries */
        TAILQ_ENTRY(dmar_map_entry) unroll_link; /* Link for unroll after
                                                    dmamap_load failure */
+       struct dmar_ctx *ctx;
+       struct dmar_qi_genseq gseq;
 };
 
 RB_HEAD(dmar_gas_entries_tree, dmar_map_entry);
@@ -60,6 +67,7 @@ RB_PROTOTYPE(dmar_gas_entries_tree, dmar
 #define        DMAR_MAP_ENTRY_MAP      0x0004  /* Busdma created, linked by
                                           dmamap_link */
 #define        DMAR_MAP_ENTRY_UNMAPPED 0x0010  /* No backing pages */
+#define        DMAR_MAP_ENTRY_QI_NF    0x0020  /* qi task, do not free entry */
 #define        DMAR_MAP_ENTRY_READ     0x1000  /* Read permitted */
 #define        DMAR_MAP_ENTRY_WRITE    0x2000  /* Write permitted */
 #define        DMAR_MAP_ENTRY_SNOOP    0x4000  /* Snoop */
@@ -113,6 +121,24 @@ struct dmar_ctx {
 #define        DMAR_CTX_UNLOCK(ctx)    mtx_unlock(&(ctx)->lock)
 #define        DMAR_CTX_ASSERT_LOCKED(ctx) mtx_assert(&(ctx)->lock, MA_OWNED)
 
+struct dmar_msi_data {
+       int irq;
+       int irq_rid;
+       struct resource *irq_res;
+       void *intr_handle;
+       int (*handler)(void *);
+       int msi_data_reg;
+       int msi_addr_reg;
+       int msi_uaddr_reg;
+       void (*enable_intr)(struct dmar_unit *);
+       void (*disable_intr)(struct dmar_unit *);
+       const char *name;
+};
+
+#define        DMAR_INTR_FAULT         0
+#define        DMAR_INTR_QI            1
+#define        DMAR_INTR_TOTAL         2
+
 struct dmar_unit {
        device_t dev;
        int unit;
@@ -122,10 +148,8 @@ struct dmar_unit {
        /* Resources */
        int reg_rid;
        struct resource *regs;
-       int irq;
-       int irq_rid;
-       struct resource *irq_res;
-       void *intr_handle;
+
+       struct dmar_msi_data intrs[DMAR_INTR_TOTAL];
 
        /* Hardware registers cache */
        uint32_t hw_ver;
@@ -149,6 +173,25 @@ struct dmar_unit {
        struct task fault_task;
        struct taskqueue *fault_taskqueue;
 
+       /* QI */
+       int qi_enabled;
+       vm_offset_t inv_queue;
+       vm_size_t inv_queue_size;
+       uint32_t inv_queue_avail;
+       uint32_t inv_queue_tail;
+       volatile uint32_t inv_waitd_seq_hw; /* hw writes there on wait
+                                              descr completion */
+       uint64_t inv_waitd_seq_hw_phys;
+       uint32_t inv_waitd_seq; /* next sequence number to use for wait descr */
+       u_int inv_waitd_gen;    /* seq number generation AKA seq overflows */
+       u_int inv_seq_waiters;  /* count of waiters for seq */
+       u_int inv_queue_full;   /* informational counter */
+
+       /* Delayed freeing of map entries queue processing */
+       struct dmar_map_entries_tailq tlb_flush_entries;
+       struct task qi_task;
+       struct taskqueue *qi_taskqueue;
+
        /* Busdma delayed map load */
        struct task dmamap_load_task;
        TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
@@ -164,6 +207,7 @@ struct dmar_unit {
 #define        DMAR_FAULT_ASSERT_LOCKED(dmar) mtx_assert(&(dmar)->fault_lock, 
MA_OWNED)
 
 #define        DMAR_IS_COHERENT(dmar)  (((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
+#define        DMAR_HAS_QI(dmar)       (((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
 
 /* Barrier ids */
 #define        DMAR_BARRIER_RMRR       0
@@ -180,6 +224,8 @@ vm_pindex_t pglvl_max_pages(int pglvl);
 int ctx_is_sp_lvl(struct dmar_ctx *ctx, int lvl);
 dmar_gaddr_t pglvl_page_size(int total_pglvl, int lvl);
 dmar_gaddr_t ctx_page_size(struct dmar_ctx *ctx, int lvl);
+int calc_am(struct dmar_unit *unit, dmar_gaddr_t base, dmar_gaddr_t size,
+    dmar_gaddr_t *isizep);
 struct vm_page *dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags);
 void dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags);
 void *dmar_map_pgtbl(vm_object_t obj, vm_pindex_t idx, int flags,
@@ -191,21 +237,33 @@ int dmar_inv_iotlb_glob(struct dmar_unit
 int dmar_flush_write_bufs(struct dmar_unit *unit);
 int dmar_enable_translation(struct dmar_unit *unit);
 int dmar_disable_translation(struct dmar_unit *unit);
-void dmar_enable_intr(struct dmar_unit *unit);
-void dmar_disable_intr(struct dmar_unit *unit);
 bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
 void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
 
-int dmar_intr(void *arg);
+int dmar_fault_intr(void *arg);
+void dmar_enable_fault_intr(struct dmar_unit *unit);
+void dmar_disable_fault_intr(struct dmar_unit *unit);
 int dmar_init_fault_log(struct dmar_unit *unit);
 void dmar_fini_fault_log(struct dmar_unit *unit);
 
+int dmar_qi_intr(void *arg);
+void dmar_enable_qi_intr(struct dmar_unit *unit);
+void dmar_disable_qi_intr(struct dmar_unit *unit);
+int dmar_init_qi(struct dmar_unit *unit);
+void dmar_fini_qi(struct dmar_unit *unit);
+void dmar_qi_invalidate_locked(struct dmar_ctx *ctx, dmar_gaddr_t start,
+    dmar_gaddr_t size, struct dmar_qi_genseq *pseq);
+void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
+void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
+
 vm_object_t ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr);
 void put_idmap_pgtbl(vm_object_t obj);
 int ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
     vm_page_t *ma, uint64_t pflags, int flags);
 int ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
     int flags);
+void ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base,
+    dmar_gaddr_t size);
 int ctx_alloc_pgtbl(struct dmar_ctx *ctx);
 void ctx_free_pgtbl(struct dmar_ctx *ctx);
 
@@ -217,8 +275,10 @@ void dmar_free_ctx_locked(struct dmar_un
 void dmar_free_ctx(struct dmar_ctx *ctx);
 struct dmar_ctx *dmar_find_ctx_locked(struct dmar_unit *dmar, int bus,
     int slot, int func);
+void dmar_ctx_unload_entry(struct dmar_map_entry *entry, bool free);
 void dmar_ctx_unload(struct dmar_ctx *ctx,
     struct dmar_map_entries_tailq *entries, bool cansleep);
+void dmar_ctx_free_entry(struct dmar_map_entry *entry, bool free);
 
 int dmar_init_busdma(struct dmar_unit *unit);
 void dmar_fini_busdma(struct dmar_unit *unit);
@@ -231,6 +291,7 @@ void dmar_gas_free_space(struct dmar_ctx
 int dmar_gas_map(struct dmar_ctx *ctx, const struct bus_dma_tag_common *common,
     dmar_gaddr_t size, u_int eflags, u_int flags, vm_page_t *ma,
     struct dmar_map_entry **res);
+void dmar_gas_free_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry);
 int dmar_gas_map_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry,
     u_int eflags, u_int flags, vm_page_t *ma);
 int dmar_gas_reserve_region(struct dmar_ctx *ctx, dmar_gaddr_t start,

Modified: head/sys/x86/iommu/intel_drv.c
==============================================================================
--- head/sys/x86/iommu/intel_drv.c      Fri Nov  1 17:16:44 2013        
(r257511)
+++ head/sys/x86/iommu/intel_drv.c      Fri Nov  1 17:38:52 2013        
(r257512)
@@ -71,8 +71,9 @@ __FBSDID("$FreeBSD$");
 #include "pcib_if.h"
 #endif
 
-#define        DMAR_REG_RID    1
-#define        DMAR_IRQ_RID    0
+#define        DMAR_FAULT_IRQ_RID      0
+#define        DMAR_QI_IRQ_RID         1
+#define        DMAR_REG_RID            2
 
 static devclass_t dmar_devclass;
 static device_t *dmar_devs;
@@ -221,20 +222,31 @@ dmar_probe(device_t dev)
 }
 
 static void
+dmar_release_intr(device_t dev, struct dmar_unit *unit, int idx)
+{
+       struct dmar_msi_data *dmd;
+
+       dmd = &unit->intrs[idx];
+       if (dmd->irq == -1)
+               return;
+       bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
+       bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
+       bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
+       PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
+           dev, dmd->irq);
+       dmd->irq = -1;
+}
+
+static void
 dmar_release_resources(device_t dev, struct dmar_unit *unit)
 {
+       int i;
 
        dmar_fini_busdma(unit);
+       dmar_fini_qi(unit);
        dmar_fini_fault_log(unit);
-       if (unit->irq != -1) {
-               bus_teardown_intr(dev, unit->irq_res, unit->intr_handle);
-               bus_release_resource(dev, SYS_RES_IRQ, unit->irq_rid,
-                   unit->irq_res);
-               bus_delete_resource(dev, SYS_RES_IRQ, unit->irq_rid);
-               PCIB_RELEASE_MSIX(device_get_parent(device_get_parent(dev)),
-                   dev, unit->irq);
-               unit->irq = -1;
-       }
+       for (i = 0; i < DMAR_INTR_TOTAL; i++)
+               dmar_release_intr(dev, unit, i);
        if (unit->regs != NULL) {
                bus_deactivate_resource(dev, SYS_RES_MEMORY, unit->reg_rid,
                    unit->regs);
@@ -253,62 +265,66 @@ dmar_release_resources(device_t dev, str
 }
 
 static int
-dmar_alloc_irq(device_t dev, struct dmar_unit *unit)
+dmar_alloc_irq(device_t dev, struct dmar_unit *unit, int idx)
 {
        device_t pcib;
+       struct dmar_msi_data *dmd;
        uint64_t msi_addr;
        uint32_t msi_data;
        int error;
 
+       dmd = &unit->intrs[idx];
        pcib = device_get_parent(device_get_parent(dev)); /* Really not pcib */
-       error = PCIB_ALLOC_MSIX(pcib, dev, &unit->irq);
+       error = PCIB_ALLOC_MSIX(pcib, dev, &dmd->irq);
        if (error != 0) {
-               device_printf(dev, "cannot allocate fault interrupt, %d\n",
-                   error);
+               device_printf(dev, "cannot allocate %s interrupt, %d\n",
+                   dmd->name, error);
                goto err1;
        }
-       unit->irq_rid = DMAR_IRQ_RID;
-       error = bus_set_resource(dev, SYS_RES_IRQ, unit->irq_rid, unit->irq,
-           1);
+       error = bus_set_resource(dev, SYS_RES_IRQ, dmd->irq_rid,
+           dmd->irq, 1);
        if (error != 0) {
-               device_printf(dev, "cannot set interrupt resource, %d\n",
-                   error);
+               device_printf(dev, "cannot set %s interrupt resource, %d\n",
+                   dmd->name, error);
                goto err2;
        }
-       unit->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
-           &unit->irq_rid, RF_ACTIVE);
-       if (unit->irq_res == NULL) {
-               device_printf(dev, "cannot map fault interrupt\n");
+       dmd->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+           &dmd->irq_rid, RF_ACTIVE);
+       if (dmd->irq_res == NULL) {
+               device_printf(dev,
+                   "cannot allocate resource for %s interrupt\n", dmd->name);
                error = ENXIO;
                goto err3;
        }
-       error = bus_setup_intr(dev, unit->irq_res, INTR_TYPE_MISC,
-           dmar_intr, NULL, unit, &unit->intr_handle);
+       error = bus_setup_intr(dev, dmd->irq_res, INTR_TYPE_MISC,
+           dmd->handler, NULL, unit, &dmd->intr_handle);
        if (error != 0) {
-               device_printf(dev, "cannot setup fault interrupt, %d\n", error);
+               device_printf(dev, "cannot setup %s interrupt, %d\n",
+                   dmd->name, error);
                goto err4;
        }
-       bus_describe_intr(dev, unit->irq_res, unit->intr_handle, "fault");
-       error = PCIB_MAP_MSI(pcib, dev, unit->irq, &msi_addr, &msi_data);
+       bus_describe_intr(dev, dmd->irq_res, dmd->intr_handle, dmd->name);
+       error = PCIB_MAP_MSI(pcib, dev, dmd->irq, &msi_addr, &msi_data);
        if (error != 0) {
-               device_printf(dev, "cannot map interrupt, %d\n", error);
+               device_printf(dev, "cannot map %s interrupt, %d\n",
+                   dmd->name, error);
                goto err5;
        }
-       dmar_write4(unit, DMAR_FEDATA_REG, msi_data);
-       dmar_write4(unit, DMAR_FEADDR_REG, msi_addr);
+       dmar_write4(unit, dmd->msi_data_reg, msi_data);
+       dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
        /* Only for xAPIC mode */
-       dmar_write4(unit, DMAR_FEUADDR_REG, msi_addr >> 32);
+       dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
        return (0);
 
 err5:
-       bus_teardown_intr(dev, unit->irq_res, unit->intr_handle);
+       bus_teardown_intr(dev, dmd->irq_res, dmd->intr_handle);
 err4:
-       bus_release_resource(dev, SYS_RES_IRQ, unit->irq_rid, unit->irq_res);
+       bus_release_resource(dev, SYS_RES_IRQ, dmd->irq_rid, dmd->irq_res);
 err3:
-       bus_delete_resource(dev, SYS_RES_IRQ, unit->irq_rid);
+       bus_delete_resource(dev, SYS_RES_IRQ, dmd->irq_rid);
 err2:
-       PCIB_RELEASE_MSIX(pcib, dev, unit->irq);
-       unit->irq = -1;
+       PCIB_RELEASE_MSIX(pcib, dev, dmd->irq);
+       dmd->irq = -1;
 err1:
        return (error);
 }
@@ -318,23 +334,31 @@ static int
 dmar_remap_intr(device_t dev, device_t child, u_int irq)
 {
        struct dmar_unit *unit;
+       struct dmar_msi_data *dmd;
        uint64_t msi_addr;
        uint32_t msi_data;
-       int error;
+       int i, error;
 
        unit = device_get_softc(dev);
-       if (irq != unit->irq)
-               return (ENOENT);
-       error = PCIB_MAP_MSI(device_get_parent(device_get_parent(dev)), dev,
-           irq, &msi_addr, &msi_data);
-       if (error != 0)
-               return (error);
-       dmar_disable_intr(unit);
-       dmar_write4(unit, DMAR_FEDATA_REG, msi_data);
-       dmar_write4(unit, DMAR_FEADDR_REG, msi_addr);
-       dmar_write4(unit, DMAR_FEUADDR_REG, msi_addr >> 32);
-       dmar_enable_intr(unit);
-       return (0);
+       for (i = 0; i < DMAR_INTR_TOTAL; i++) {
+               dmd = &unit->intrs[i];
+               if (irq == dmd->irq) {
+                       error = PCIB_MAP_MSI(device_get_parent(
+                           device_get_parent(dev)),
+                           dev, irq, &msi_addr, &msi_data);
+                       if (error != 0)
+                               return (error);
+                       DMAR_LOCK(unit);
+                       (dmd->disable_intr)(unit);
+                       dmar_write4(unit, dmd->msi_data_reg, msi_data);
+                       dmar_write4(unit, dmd->msi_addr_reg, msi_addr);
+                       dmar_write4(unit, dmd->msi_uaddr_reg, msi_addr >> 32);
+                       (dmd->enable_intr)(unit);
+                       DMAR_UNLOCK(unit);
+                       return (0);
+               }
+       }
+       return (ENOENT);
 }
 #endif
 
@@ -372,7 +396,7 @@ dmar_attach(device_t dev)
 {
        struct dmar_unit *unit;
        ACPI_DMAR_HARDWARE_UNIT *dmaru;
-       int error;
+       int i, error;
 
        unit = device_get_softc(dev);
        unit->dev = dev;
@@ -380,7 +404,6 @@ dmar_attach(device_t dev)
        dmaru = dmar_find_by_index(unit->unit);
        if (dmaru == NULL)
                return (EINVAL);
-       unit->irq = -1;
        unit->segment = dmaru->Segment;
        unit->base = dmaru->Address;
        unit->reg_rid = DMAR_REG_RID;
@@ -397,11 +420,38 @@ dmar_attach(device_t dev)
                dmar_print_caps(dev, unit, dmaru);
        dmar_quirks_post_ident(unit);
 
-       error = dmar_alloc_irq(dev, unit);
+       for (i = 0; i < DMAR_INTR_TOTAL; i++)
+               unit->intrs[i].irq = -1;
+
+       unit->intrs[DMAR_INTR_FAULT].name = "fault";
+       unit->intrs[DMAR_INTR_FAULT].irq_rid = DMAR_FAULT_IRQ_RID;
+       unit->intrs[DMAR_INTR_FAULT].handler = dmar_fault_intr;
+       unit->intrs[DMAR_INTR_FAULT].msi_data_reg = DMAR_FEDATA_REG;
+       unit->intrs[DMAR_INTR_FAULT].msi_addr_reg = DMAR_FEADDR_REG;
+       unit->intrs[DMAR_INTR_FAULT].msi_uaddr_reg = DMAR_FEUADDR_REG;
+       unit->intrs[DMAR_INTR_FAULT].enable_intr = dmar_enable_fault_intr;
+       unit->intrs[DMAR_INTR_FAULT].disable_intr = dmar_disable_fault_intr;
+       error = dmar_alloc_irq(dev, unit, DMAR_INTR_FAULT);
        if (error != 0) {
                dmar_release_resources(dev, unit);
                return (error);
        }
+       if (DMAR_HAS_QI(unit)) {
+               unit->intrs[DMAR_INTR_QI].name = "qi";
+               unit->intrs[DMAR_INTR_QI].irq_rid = DMAR_QI_IRQ_RID;
+               unit->intrs[DMAR_INTR_QI].handler = dmar_qi_intr;
+               unit->intrs[DMAR_INTR_QI].msi_data_reg = DMAR_IEDATA_REG;
+               unit->intrs[DMAR_INTR_QI].msi_addr_reg = DMAR_IEADDR_REG;
+               unit->intrs[DMAR_INTR_QI].msi_uaddr_reg = DMAR_IEUADDR_REG;
+               unit->intrs[DMAR_INTR_QI].enable_intr = dmar_enable_qi_intr;
+               unit->intrs[DMAR_INTR_QI].disable_intr = dmar_disable_qi_intr;
+               error = dmar_alloc_irq(dev, unit, DMAR_INTR_QI);
+               if (error != 0) {
+                       dmar_release_resources(dev, unit);
+                       return (error);
+               }
+       }
+
        mtx_init(&unit->lock, "dmarhw", NULL, MTX_DEF);
        unit->domids = new_unrhdr(0, dmar_nd2mask(DMAR_CAP_ND(unit->hw_cap)),
            &unit->lock);
@@ -453,6 +503,11 @@ dmar_attach(device_t dev)
                dmar_release_resources(dev, unit);
                return (error);
        }
+       error = dmar_init_qi(unit);
+       if (error != 0) {
+               dmar_release_resources(dev, unit);
+               return (error);
+       }
        error = dmar_init_busdma(unit);
        if (error != 0) {
                dmar_release_resources(dev, unit);
@@ -1058,6 +1113,33 @@ dmar_print_one(int idx, bool show_ctxs, 
                    (uintmax_t)dmar_read8(unit, frir),
                    (uintmax_t)dmar_read8(unit, frir + 8));
        }
+       if (DMAR_HAS_QI(unit)) {
+               db_printf("ied 0x%x iea 0x%x ieua 0x%x\n",
+                   dmar_read4(unit, DMAR_IEDATA_REG),
+                   dmar_read4(unit, DMAR_IEADDR_REG),
+                   dmar_read4(unit, DMAR_IEUADDR_REG));
+               if (unit->qi_enabled) {
+                       db_printf("qi is enabled: queue @0x%jx (IQA 0x%jx) "
+                           "size 0x%jx\n"
+                   "  head 0x%x tail 0x%x avail 0x%x status 0x%x ctrl 0x%x\n"
+                   "  hw compl 0x%x@%p/phys@%jx next seq 0x%x gen 0x%x\n",
+                           (uintmax_t)unit->inv_queue,
+                           (uintmax_t)dmar_read8(unit, DMAR_IQA_REG),
+                           (uintmax_t)unit->inv_queue_size,
+                           dmar_read4(unit, DMAR_IQH_REG),
+                           dmar_read4(unit, DMAR_IQT_REG),
+                           unit->inv_queue_avail,
+                           dmar_read4(unit, DMAR_ICS_REG),
+                           dmar_read4(unit, DMAR_IECTL_REG),
+                           unit->inv_waitd_seq_hw,
+                           &unit->inv_waitd_seq_hw,
+                           (uintmax_t)unit->inv_waitd_seq_hw_phys,
+                           unit->inv_waitd_seq,
+                           unit->inv_waitd_gen);
+               } else {
+                       db_printf("qi is disabled\n");
+               }
+       }
        if (show_ctxs) {
                db_printf("contexts:\n");
                LIST_FOREACH(ctx, &unit->contexts, link) {

Modified: head/sys/x86/iommu/intel_fault.c
==============================================================================
--- head/sys/x86/iommu/intel_fault.c    Fri Nov  1 17:16:44 2013        
(r257511)
+++ head/sys/x86/iommu/intel_fault.c    Fri Nov  1 17:38:52 2013        
(r257512)
@@ -85,7 +85,7 @@ dmar_fault_next(struct dmar_unit *unit, 
 }
 
 static void
-dmar_intr_clear(struct dmar_unit *unit, uint32_t fsts)
+dmar_fault_intr_clear(struct dmar_unit *unit, uint32_t fsts)
 {
        uint32_t clear;
 
@@ -117,7 +117,7 @@ dmar_intr_clear(struct dmar_unit *unit, 
 }
 
 int
-dmar_intr(void *arg)
+dmar_fault_intr(void *arg)
 {
        struct dmar_unit *unit;
        uint64_t fault_rec[2];
@@ -128,7 +128,7 @@ dmar_intr(void *arg)
        unit = arg;
        enqueue = false;
        fsts = dmar_read4(unit, DMAR_FSTS_REG);
-       dmar_intr_clear(unit, fsts);
+       dmar_fault_intr_clear(unit, fsts);
 
        if ((fsts & DMAR_FSTS_PPF) == 0)
                goto done;
@@ -263,9 +263,11 @@ dmar_init_fault_log(struct dmar_unit *un
        taskqueue_start_threads(&unit->fault_taskqueue, 1, PI_AV,
            "dmar%d fault taskq", unit->unit);
 
-       dmar_disable_intr(unit);
+       DMAR_LOCK(unit);
+       dmar_disable_fault_intr(unit);
        dmar_clear_faults(unit);
-       dmar_enable_intr(unit);
+       dmar_enable_fault_intr(unit);
+       DMAR_UNLOCK(unit);
 
        return (0);
 }
@@ -274,16 +276,40 @@ void
 dmar_fini_fault_log(struct dmar_unit *unit)
 {
 
-       dmar_disable_intr(unit);
+       DMAR_LOCK(unit);
+       dmar_disable_fault_intr(unit);
+       DMAR_UNLOCK(unit);
 
        if (unit->fault_taskqueue == NULL)
                return;
 
        taskqueue_drain(unit->fault_taskqueue, &unit->fault_task);
        taskqueue_free(unit->fault_taskqueue);
+       unit->fault_taskqueue = NULL;
        mtx_destroy(&unit->fault_lock);
 
        free(unit->fault_log, M_DEVBUF);
        unit->fault_log = NULL;
        unit->fault_log_head = unit->fault_log_tail = 0;
 }
+
+void
+dmar_enable_fault_intr(struct dmar_unit *unit)
+{
+       uint32_t fectl;
+
+       DMAR_ASSERT_LOCKED(unit);
+       fectl = dmar_read4(unit, DMAR_FECTL_REG);
+       fectl &= ~DMAR_FECTL_IM;
+       dmar_write4(unit, DMAR_FECTL_REG, fectl);
+}
+
+void
+dmar_disable_fault_intr(struct dmar_unit *unit)
+{
+       uint32_t fectl;
+
+       DMAR_ASSERT_LOCKED(unit);
+       fectl = dmar_read4(unit, DMAR_FECTL_REG);
+       dmar_write4(unit, DMAR_FECTL_REG, fectl | DMAR_FECTL_IM);
+}

Modified: head/sys/x86/iommu/intel_gas.c
==============================================================================
--- head/sys/x86/iommu/intel_gas.c      Fri Nov  1 17:16:44 2013        
(r257511)
+++ head/sys/x86/iommu/intel_gas.c      Fri Nov  1 17:38:52 2013        
(r257512)
@@ -92,8 +92,10 @@ dmar_gas_alloc_entry(struct dmar_ctx *ct
 
        res = uma_zalloc(dmar_map_entry_zone, ((flags & DMAR_PGF_WAITOK) !=
            0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
-       if (res != NULL)
+       if (res != NULL) {
+               res->ctx = ctx;
                atomic_add_int(&ctx->entries_cnt, 1);
+       }
        return (res);
 }
 
@@ -101,6 +103,9 @@ void
 dmar_gas_free_entry(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
 {
 
+       KASSERT(ctx == entry->ctx,
+           ("mismatched free ctx %p entry %p entry->ctx %p", ctx,
+           entry, entry->ctx));
        atomic_subtract_int(&ctx->entries_cnt, 1);
        uma_zfree(dmar_map_entry_zone, entry);
 }
@@ -170,6 +175,9 @@ dmar_gas_check_free(struct dmar_ctx *ctx
        dmar_gaddr_t v;
 
        RB_FOREACH(entry, dmar_gas_entries_tree, &ctx->rb_root) {
+               KASSERT(ctx == entry->ctx,
+                   ("mismatched free ctx %p entry %p entry->ctx %p", ctx,
+                   entry, entry->ctx));
                next = RB_NEXT(dmar_gas_entries_tree, &ctx->rb_root, entry);
                if (next == NULL) {
                        MPASS(entry->free_after == ctx->end - entry->end);
@@ -583,7 +591,7 @@ dmar_gas_free_space(struct dmar_ctx *ctx
 #endif
 }
 
-static void
+void
 dmar_gas_free_region(struct dmar_ctx *ctx, struct dmar_map_entry *entry)
 {
        struct dmar_map_entry *next, *prev;
@@ -644,10 +652,7 @@ dmar_gas_map(struct dmar_ctx *ctx, const
            ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
            (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
        if (error == ENOMEM) {
-               DMAR_CTX_LOCK(ctx);
-               dmar_gas_free_space(ctx, entry);
-               DMAR_CTX_UNLOCK(ctx);
-               dmar_gas_free_entry(ctx, entry);
+               dmar_ctx_unload_entry(entry, true);
                return (error);
        }
        KASSERT(error == 0,
@@ -689,10 +694,7 @@ dmar_gas_map_region(struct dmar_ctx *ctx
            ((eflags & DMAR_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0),
            (flags & DMAR_GM_CANWAIT) != 0 ? DMAR_PGF_WAITOK : 0);
        if (error == ENOMEM) {
-               DMAR_CTX_LOCK(ctx);
-               dmar_gas_free_region(ctx, entry);
-               DMAR_CTX_UNLOCK(ctx);
-               entry->flags = 0;
+               dmar_ctx_unload_entry(entry, false);
                return (error);
        }
        KASSERT(error == 0,

Modified: head/sys/x86/iommu/intel_idpgtbl.c
==============================================================================
--- head/sys/x86/iommu/intel_idpgtbl.c  Fri Nov  1 17:16:44 2013        
(r257511)
+++ head/sys/x86/iommu/intel_idpgtbl.c  Fri Nov  1 17:38:52 2013        
(r257512)
@@ -67,8 +67,6 @@ __FBSDID("$FreeBSD$");
 
 static int ctx_unmap_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base,
     dmar_gaddr_t size, int flags);
-static void ctx_flush_iotlb(struct dmar_ctx *ctx, dmar_gaddr_t base,
-    dmar_gaddr_t size, int flags);
 
 /*
  * The cache of the identity mapping page tables for the DMARs.  Using
@@ -412,7 +410,6 @@ static int
 ctx_map_buf_locked(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
     vm_page_t *ma, uint64_t pflags, int flags)
 {
-       struct dmar_unit *unit;
        dmar_pte_t *pte;
        struct sf_buf *sf;
        dmar_gaddr_t pg_sz, base1, size1;
@@ -482,17 +479,6 @@ ctx_map_buf_locked(struct dmar_ctx *ctx,
        }
        if (sf != NULL)
                dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar));
-       DMAR_CTX_PGUNLOCK(ctx);
-       unit = ctx->dmar;
-       if ((unit->hw_cap & DMAR_CAP_CM) != 0)
-               ctx_flush_iotlb(ctx, base1, size1, flags);
-       else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
-               /* See 11.1 Write Buffer Flushing. */
-               DMAR_LOCK(unit);
-               dmar_flush_write_bufs(unit);
-               DMAR_UNLOCK(unit);
-       }
-
        TD_PINNED_ASSERT;
        return (0);
 }
@@ -501,6 +487,10 @@ int
 ctx_map_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
     vm_page_t *ma, uint64_t pflags, int flags)
 {
+       struct dmar_unit *unit;
+       int error;
+
+       unit = ctx->dmar;
 
        KASSERT((ctx->flags & DMAR_CTX_IDMAP) == 0,
            ("modifying idmap pagetable ctx %p", ctx));
@@ -527,17 +517,30 @@ ctx_map_buf(struct dmar_ctx *ctx, dmar_g
            DMAR_PTE_TM)) == 0,
            ("invalid pte flags %jx", (uintmax_t)pflags));
        KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
-           (ctx->dmar->hw_ecap & DMAR_ECAP_SC) != 0,
+           (unit->hw_ecap & DMAR_ECAP_SC) != 0,
            ("PTE_SNP for dmar without snoop control %p %jx",
            ctx, (uintmax_t)pflags));
        KASSERT((pflags & DMAR_PTE_TM) == 0 ||
-           (ctx->dmar->hw_ecap & DMAR_ECAP_DI) != 0,
+           (unit->hw_ecap & DMAR_ECAP_DI) != 0,
            ("PTE_TM for dmar without DIOTLB %p %jx",
            ctx, (uintmax_t)pflags));
        KASSERT((flags & ~DMAR_PGF_WAITOK) == 0, ("invalid flags %x", flags));
 
        DMAR_CTX_PGLOCK(ctx);
-       return (ctx_map_buf_locked(ctx, base, size, ma, pflags, flags));
+       error = ctx_map_buf_locked(ctx, base, size, ma, pflags, flags);
+       DMAR_CTX_PGUNLOCK(ctx);
+       if (error != 0)
+               return (error);
+
+       if ((unit->hw_cap & DMAR_CAP_CM) != 0)
+               ctx_flush_iotlb_sync(ctx, base, size);
+       else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
+               /* See 11.1 Write Buffer Flushing. */
+               DMAR_LOCK(unit);
+               dmar_flush_write_bufs(unit);
+               DMAR_UNLOCK(unit);
+       }
+       return (0);
 }
 
 static void ctx_unmap_clear_pte(struct dmar_ctx *ctx, dmar_gaddr_t base,
@@ -646,8 +649,6 @@ ctx_unmap_buf_locked(struct dmar_ctx *ct
        }
        if (sf != NULL)
                dmar_unmap_pgtbl(sf, DMAR_IS_COHERENT(ctx->dmar));
-       DMAR_CTX_PGUNLOCK(ctx);
-       ctx_flush_iotlb(ctx, base1, size1, flags);
        /*
         * See 11.1 Write Buffer Flushing for an explanation why RWBF
         * can be ignored there.
@@ -661,9 +662,12 @@ int
 ctx_unmap_buf(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
     int flags)
 {
+       int error;
 
        DMAR_CTX_PGLOCK(ctx);
-       return (ctx_unmap_buf_locked(ctx, base, size, flags));
+       error = ctx_unmap_buf_locked(ctx, base, size, flags);
+       DMAR_CTX_PGUNLOCK(ctx);
+       return (error);
 }
 
 int
@@ -730,13 +734,8 @@ ctx_wait_iotlb_flush(struct dmar_unit *u
        return (iotlbr);
 }
 
-/*
- * flags is only intended for PGF_WAITOK, to disallow queued
- * invalidation.
- */
-static void
-ctx_flush_iotlb(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t size,
-    int flags)
+void
+ctx_flush_iotlb_sync(struct dmar_ctx *ctx, dmar_gaddr_t base, dmar_gaddr_t 
size)
 {
        struct dmar_unit *unit;
        dmar_gaddr_t isize;
@@ -744,20 +743,8 @@ ctx_flush_iotlb(struct dmar_ctx *ctx, dm
        int am, iro;
 
        unit = ctx->dmar;
-#if 0
-       if ((unit->hw_ecap & DMAR_ECAP_QI) != 0 &&
-           (flags & DMAR_PGF_WAITOK) != 0) {
-               /*
-                * XXXKIB: There, a queued invalidation interface
-                * could be used.  But since queued and registered
-                * interfaces cannot be used simultaneously, and we
-                * must use sleep-less (i.e. register) interface when
-                * DMAR_PGF_WAITOK is not specified, only register
-                * interface is suitable.
-                */
-               return;
-       }
-#endif
+       KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
+           unit->unit));
        iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
        DMAR_LOCK(unit);
        if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
@@ -769,13 +756,7 @@ ctx_flush_iotlb(struct dmar_ctx *ctx, dm
                    (uintmax_t)iotlbr));
        } else {
                for (; size > 0; base += isize, size -= isize) {
-                       for (am = DMAR_CAP_MAMV(unit->hw_cap);; am--) {
-                               isize = 1ULL << (am + DMAR_PAGE_SHIFT);
-                               if ((base & (isize - 1)) == 0 && size >= isize)
-                                       break;
-                               if (am == 0)
-                                       break;
-                       }
+                       am = calc_am(unit, base, size, &isize);
                        dmar_write8(unit, iro, base | am);
                        iotlbr = ctx_wait_iotlb_flush(unit,
                            DMAR_IOTLB_IIRG_PAGE | DMAR_IOTLB_DID(ctx->domain),

Added: head/sys/x86/iommu/intel_qi.c
==============================================================================
--- /dev/null   00:00:00 1970   (empty, because file is newly added)
+++ head/sys/x86/iommu/intel_qi.c       Fri Nov  1 17:38:52 2013        
(r257512)
@@ -0,0 +1,414 @@
+/*-
+ * Copyright (c) 2013 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov <k...@freebsd.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_acpi.h"
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/memdesc.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/taskqueue.h>
+#include <sys/tree.h>
+#include <machine/bus.h>
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to