The branch main has been updated by andrew:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=47e073941f4e7ca6e9bde3fa65abbfcfed6bfa2b

commit 47e073941f4e7ca6e9bde3fa65abbfcfed6bfa2b
Author:     Andrew Turner <and...@freebsd.org>
AuthorDate: 2024-01-09 15:22:27 +0000
Commit:     Andrew Turner <and...@freebsd.org>
CommitDate: 2024-02-21 18:55:32 +0000

    Import the kernel parts of bhyve/arm64
    
    To support virtual machines on arm64 add the vmm code. This is based on
    earlier work by Mihai Carabas and Alexandru Elisei at University
    Politehnica of Bucharest, with further work by myself and Mark Johnston.
    
    All AArch64 CPUs should work, however only the GICv3 interrupt
    controller is supported. There is initial support to allow the GICv2
    to be supported in the future. Only pure Armv8.0 virtualisation is
    supported, the Virtualization Host Extensions are not currently used.
    
    With a separate userspace patch and U-Boot port FreeBSD guests are able
    to boot to multiuser mode, and the hypervisor can be tested with the
    kvm unit tests. Linux partially boots, but hangs before entering
    userspace. Other operating systems are untested.
    
    Sponsored by:   Arm Ltd
    Sponsored by:   Innovate UK
    Sponsored by:   The FreeBSD Foundation
    Sponsored by:   University Politehnica of Bucharest
    Differential Revision:  https://reviews.freebsd.org/D37428
---
 sys/arm64/include/vmm.h                  |  362 +++++
 sys/arm64/include/vmm_dev.h              |  272 ++++
 sys/arm64/include/vmm_instruction_emul.h |   83 ++
 sys/arm64/include/vmm_snapshot.h         |    1 +
 sys/arm64/vmm/arm64.h                    |  165 +++
 sys/arm64/vmm/hyp.h                      |  114 ++
 sys/arm64/vmm/io/vgic.c                  |  122 ++
 sys/arm64/vmm/io/vgic.h                  |   52 +
 sys/arm64/vmm/io/vgic_if.m               |  104 ++
 sys/arm64/vmm/io/vgic_v3.c               | 2348 ++++++++++++++++++++++++++++++
 sys/arm64/vmm/io/vgic_v3.h               |   57 +
 sys/arm64/vmm/io/vgic_v3_reg.h           |  129 ++
 sys/arm64/vmm/io/vtimer.c                |  503 +++++++
 sys/arm64/vmm/io/vtimer.h                |   85 ++
 sys/arm64/vmm/mmu.h                      |   52 +
 sys/arm64/vmm/reset.h                    |   33 +
 sys/arm64/vmm/vmm.c                      | 1803 +++++++++++++++++++++++
 sys/arm64/vmm/vmm_arm64.c                | 1337 +++++++++++++++++
 sys/arm64/vmm/vmm_call.S                 |   39 +
 sys/arm64/vmm/vmm_dev.c                  | 1054 ++++++++++++++
 sys/arm64/vmm/vmm_hyp.c                  |  735 ++++++++++
 sys/arm64/vmm/vmm_hyp_el2.S              |   39 +
 sys/arm64/vmm/vmm_hyp_exception.S        |  384 +++++
 sys/arm64/vmm/vmm_instruction_emul.c     |  102 ++
 sys/arm64/vmm/vmm_ktr.h                  |   69 +
 sys/arm64/vmm/vmm_mmu.c                  |  430 ++++++
 sys/arm64/vmm/vmm_reset.c                |  177 +++
 sys/arm64/vmm/vmm_stat.c                 |  165 +++
 sys/arm64/vmm/vmm_stat.h                 |  145 ++
 sys/conf/files.arm64                     |   33 +
 sys/conf/ldscript.arm64                  |    2 +
 sys/conf/options.arm64                   |    3 +
 sys/modules/Makefile                     |    2 +
 sys/modules/vmm/Makefile                 |   78 +-
 34 files changed, 11066 insertions(+), 13 deletions(-)

diff --git a/sys/arm64/include/vmm.h b/sys/arm64/include/vmm.h
new file mode 100644
index 000000000000..8e2c9c868635
--- /dev/null
+++ b/sys/arm64/include/vmm.h
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2015 Mihai Carabas <mihai.cara...@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _VMM_H_
+#define        _VMM_H_
+
+#include <sys/param.h>
+#include <sys/cpuset.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "pte.h"
+#include "pmap.h"
+
+struct vcpu;
+
+enum vm_suspend_how {
+       VM_SUSPEND_NONE,
+       VM_SUSPEND_RESET,
+       VM_SUSPEND_POWEROFF,
+       VM_SUSPEND_HALT,
+       VM_SUSPEND_LAST
+};
+
+/*
+ * Identifiers for architecturally defined registers.
+ */
+enum vm_reg_name {
+       VM_REG_GUEST_X0 = 0,
+       VM_REG_GUEST_X1,
+       VM_REG_GUEST_X2,
+       VM_REG_GUEST_X3,
+       VM_REG_GUEST_X4,
+       VM_REG_GUEST_X5,
+       VM_REG_GUEST_X6,
+       VM_REG_GUEST_X7,
+       VM_REG_GUEST_X8,
+       VM_REG_GUEST_X9,
+       VM_REG_GUEST_X10,
+       VM_REG_GUEST_X11,
+       VM_REG_GUEST_X12,
+       VM_REG_GUEST_X13,
+       VM_REG_GUEST_X14,
+       VM_REG_GUEST_X15,
+       VM_REG_GUEST_X16,
+       VM_REG_GUEST_X17,
+       VM_REG_GUEST_X18,
+       VM_REG_GUEST_X19,
+       VM_REG_GUEST_X20,
+       VM_REG_GUEST_X21,
+       VM_REG_GUEST_X22,
+       VM_REG_GUEST_X23,
+       VM_REG_GUEST_X24,
+       VM_REG_GUEST_X25,
+       VM_REG_GUEST_X26,
+       VM_REG_GUEST_X27,
+       VM_REG_GUEST_X28,
+       VM_REG_GUEST_X29,
+       VM_REG_GUEST_LR,
+       VM_REG_GUEST_SP,
+       VM_REG_GUEST_PC,
+       VM_REG_GUEST_CPSR,
+
+       VM_REG_GUEST_SCTLR_EL1,
+       VM_REG_GUEST_TTBR0_EL1,
+       VM_REG_GUEST_TTBR1_EL1,
+       VM_REG_GUEST_TCR_EL1,
+       VM_REG_GUEST_TCR2_EL1,
+       VM_REG_LAST
+};
+
+#define        VM_INTINFO_VECTOR(info) ((info) & 0xff)
+#define        VM_INTINFO_DEL_ERRCODE  0x800
+#define        VM_INTINFO_RSVD         0x7ffff000
+#define        VM_INTINFO_VALID        0x80000000
+#define        VM_INTINFO_TYPE         0x700
+#define        VM_INTINFO_HWINTR       (0 << 8)
+#define        VM_INTINFO_NMI          (2 << 8)
+#define        VM_INTINFO_HWEXCEPTION  (3 << 8)
+#define        VM_INTINFO_SWINTR       (4 << 8)
+
+#define VM_MAX_SUFFIXLEN 15
+
+#define VM_GUEST_BASE_IPA      0x80000000UL    /* Guest kernel start ipa */
+
+#ifdef _KERNEL
+
+#define        VM_MAX_NAMELEN  32
+
+struct vm;
+struct vm_exception;
+struct vm_exit;
+struct vm_run;
+struct vm_object;
+struct vm_guest_paging;
+struct vm_vgic_descr;
+struct pmap;
+
+struct vm_eventinfo {
+       void    *rptr;          /* rendezvous cookie */
+       int     *sptr;          /* suspend cookie */
+       int     *iptr;          /* reqidle cookie */
+};
+
+int vm_create(const char *name, struct vm **retvm);
+struct vcpu *vm_alloc_vcpu(struct vm *vm, int vcpuid);
+void vm_slock_vcpus(struct vm *vm);
+void vm_unlock_vcpus(struct vm *vm);
+void vm_destroy(struct vm *vm);
+int vm_reinit(struct vm *vm);
+const char *vm_name(struct vm *vm);
+
+/*
+ * APIs that modify the guest memory map require all vcpus to be frozen.
+ */
+void vm_slock_memsegs(struct vm *vm);
+void vm_xlock_memsegs(struct vm *vm);
+void vm_unlock_memsegs(struct vm *vm);
+int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
+    size_t len, int prot, int flags);
+int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
+int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem);
+void vm_free_memseg(struct vm *vm, int ident);
+
+/*
+ * APIs that inspect the guest memory map require only a *single* vcpu to
+ * be frozen. This acts like a read lock on the guest memory map since any
+ * modification requires *all* vcpus to be frozen.
+ */
+int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
+    vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
+int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
+    struct vm_object **objptr);
+vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
+void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
+    int prot, void **cookie);
+void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
+    int prot, void **cookie);
+void vm_gpa_release(void *cookie);
+bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
+
+int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
+    uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
+
+uint16_t vm_get_maxcpus(struct vm *vm);
+void vm_get_topology(struct vm *vm, uint16_t *sockets, uint16_t *cores,
+    uint16_t *threads, uint16_t *maxcpus);
+int vm_set_topology(struct vm *vm, uint16_t sockets, uint16_t cores,
+    uint16_t threads, uint16_t maxcpus);
+int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
+int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
+int vm_run(struct vcpu *vcpu);
+int vm_suspend(struct vm *vm, enum vm_suspend_how how);
+void* vm_get_cookie(struct vm *vm);
+int vcpu_vcpuid(struct vcpu *vcpu);
+void *vcpu_get_cookie(struct vcpu *vcpu);
+struct vm *vcpu_vm(struct vcpu *vcpu);
+struct vcpu *vm_vcpu(struct vm *vm, int cpu);
+int vm_get_capability(struct vcpu *vcpu, int type, int *val);
+int vm_set_capability(struct vcpu *vcpu, int type, int val);
+int vm_activate_cpu(struct vcpu *vcpu);
+int vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu);
+int vm_resume_cpu(struct vm *vm, struct vcpu *vcpu);
+int vm_inject_exception(struct vcpu *vcpu, uint64_t esr, uint64_t far);
+int vm_attach_vgic(struct vm *vm, struct vm_vgic_descr *descr);
+int vm_assert_irq(struct vm *vm, uint32_t irq);
+int vm_deassert_irq(struct vm *vm, uint32_t irq);
+int vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot,
+    int func);
+struct vm_exit *vm_exitinfo(struct vcpu *vcpu);
+void vm_exit_suspended(struct vcpu *vcpu, uint64_t pc);
+void vm_exit_debug(struct vcpu *vcpu, uint64_t pc);
+void vm_exit_rendezvous(struct vcpu *vcpu, uint64_t pc);
+void vm_exit_astpending(struct vcpu *vcpu, uint64_t pc);
+
+cpuset_t vm_active_cpus(struct vm *vm);
+cpuset_t vm_debug_cpus(struct vm *vm);
+cpuset_t vm_suspended_cpus(struct vm *vm);
+
+static __inline bool
+virt_enabled(void)
+{
+
+       return (has_hyp());
+}
+
+static __inline int
+vcpu_rendezvous_pending(struct vm_eventinfo *info)
+{
+
+       return (*((uintptr_t *)(info->rptr)) != 0);
+}
+
+static __inline int
+vcpu_suspended(struct vm_eventinfo *info)
+{
+
+       return (*info->sptr);
+}
+
+int vcpu_debugged(struct vcpu *vcpu);
+
+enum vcpu_state {
+       VCPU_IDLE,
+       VCPU_FROZEN,
+       VCPU_RUNNING,
+       VCPU_SLEEPING,
+};
+
+int vcpu_set_state(struct vcpu *vcpu, enum vcpu_state state, bool from_idle);
+enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
+
+static int __inline
+vcpu_is_running(struct vcpu *vcpu, int *hostcpu)
+{
+       return (vcpu_get_state(vcpu, hostcpu) == VCPU_RUNNING);
+}
+
+#ifdef _SYS_PROC_H_
+static int __inline
+vcpu_should_yield(struct vcpu *vcpu)
+{
+       struct thread *td;
+
+       td = curthread;
+       return (td->td_ast != 0 || td->td_owepreempt != 0);
+}
+#endif
+
+void *vcpu_stats(struct vcpu *vcpu);
+void vcpu_notify_event(struct vcpu *vcpu);
+
+enum vm_reg_name vm_segment_name(int seg_encoding);
+
+struct vm_copyinfo {
+       uint64_t        gpa;
+       size_t          len;
+       void            *hva;
+       void            *cookie;
+};
+
+#endif /* _KERNEL */
+
+#define        VM_DIR_READ     0
+#define        VM_DIR_WRITE    1
+
+#define        VM_GP_M_MASK            0x1f
+#define        VM_GP_MMU_ENABLED       (1 << 5)
+
+struct vm_guest_paging {
+       uint64_t        ttbr0_addr;
+       uint64_t        ttbr1_addr;
+       uint64_t        tcr_el1;
+       uint64_t        tcr2_el1;
+       int             flags;
+       int             padding;
+};
+
+struct vie {
+       uint8_t access_size:4, sign_extend:1, dir:1, unused:2;
+       enum vm_reg_name reg;
+};
+
+struct vre {
+       uint32_t inst_syndrome;
+       uint8_t dir:1, unused:7;
+       enum vm_reg_name reg;
+};
+
+/*
+ * Identifiers for optional vmm capabilities
+ */
+enum vm_cap_type {
+       VM_CAP_HALT_EXIT,
+       VM_CAP_MTRAP_EXIT,
+       VM_CAP_PAUSE_EXIT,
+       VM_CAP_UNRESTRICTED_GUEST,
+       VM_CAP_MAX
+};
+
+enum vm_exitcode {
+       VM_EXITCODE_BOGUS,
+       VM_EXITCODE_INST_EMUL,
+       VM_EXITCODE_REG_EMUL,
+       VM_EXITCODE_HVC,
+       VM_EXITCODE_SUSPENDED,
+       VM_EXITCODE_HYP,
+       VM_EXITCODE_WFI,
+       VM_EXITCODE_PAGING,
+       VM_EXITCODE_SMCCC,
+       VM_EXITCODE_DEBUG,
+       VM_EXITCODE_MAX
+};
+
+struct vm_exit {
+       enum vm_exitcode        exitcode;
+       int                     inst_length;
+       uint64_t                pc;
+       union {
+               /*
+                * ARM specific payload.
+                */
+               struct {
+                       uint32_t        exception_nr;
+                       uint32_t        pad;
+                       uint64_t        esr_el2;        /* Exception Syndrome 
Register */
+                       uint64_t        far_el2;        /* Fault Address 
Register */
+                       uint64_t        hpfar_el2;      /* Hypervisor IPA Fault 
Address Register */
+               } hyp;
+               struct {
+                       struct vre      vre;
+               } reg_emul;
+               struct {
+                       uint64_t        gpa;
+                       uint64_t        esr;
+               } paging;
+               struct {
+                       uint64_t        gpa;
+                       struct vm_guest_paging paging;
+                       struct vie      vie;
+               } inst_emul;
+
+               /*
+                * A SMCCC call, e.g. starting a core via PSCI.
+                * Further arguments can be read by asking the kernel for
+                * all register values.
+                */
+               struct {
+                       uint64_t        func_id;
+                       uint64_t        args[7];
+               } smccc_call;
+
+               struct {
+                       enum vm_suspend_how how;
+               } suspended;
+       } u;
+};
+
+#endif /* _VMM_H_ */
diff --git a/sys/arm64/include/vmm_dev.h b/sys/arm64/include/vmm_dev.h
new file mode 100644
index 000000000000..9e229665a71e
--- /dev/null
+++ b/sys/arm64/include/vmm_dev.h
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2015 Mihai Carabas <mihai.cara...@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef        _VMM_DEV_H_
+#define        _VMM_DEV_H_
+
+#ifdef _KERNEL
+void   vmmdev_init(void);
+int    vmmdev_cleanup(void);
+#endif
+
+struct vm_memmap {
+       vm_paddr_t      gpa;
+       int             segid;          /* memory segment */
+       vm_ooffset_t    segoff;         /* offset into memory segment */
+       size_t          len;            /* mmap length */
+       int             prot;           /* RWX */
+       int             flags;
+};
+#define        VM_MEMMAP_F_WIRED       0x01
+
+struct vm_munmap {
+       vm_paddr_t      gpa;
+       size_t          len;
+};
+
+#define        VM_MEMSEG_NAME(m)       ((m)->name[0] != '\0' ? (m)->name : 
NULL)
+struct vm_memseg {
+       int             segid;
+       size_t          len;
+       char            name[VM_MAX_SUFFIXLEN + 1];
+};
+
+struct vm_register {
+       int             cpuid;
+       int             regnum;         /* enum vm_reg_name */
+       uint64_t        regval;
+};
+
+struct vm_register_set {
+       int             cpuid;
+       unsigned int    count;
+       const int       *regnums;       /* enum vm_reg_name */
+       uint64_t        *regvals;
+};
+
+struct vm_run {
+       int             cpuid;
+       cpuset_t        *cpuset;        /* CPU set storage */
+       size_t          cpusetsize;
+       struct vm_exit  *vm_exit;
+};
+
+struct vm_exception {
+       int             cpuid;
+       uint64_t        esr;
+       uint64_t        far;
+};
+
+struct vm_msi {
+       uint64_t        msg;
+       uint64_t        addr;
+       int             bus;
+       int             slot;
+       int             func;
+};
+
+struct vm_capability {
+       int             cpuid;
+       enum vm_cap_type captype;
+       int             capval;
+       int             allcpus;
+};
+
+#define        MAX_VM_STATS    64
+struct vm_stats {
+       int             cpuid;                          /* in */
+       int             index;                          /* in */
+       int             num_entries;                    /* out */
+       struct timeval  tv;
+       uint64_t        statbuf[MAX_VM_STATS];
+};
+struct vm_stat_desc {
+       int             index;                          /* in */
+       char            desc[128];                      /* out */
+};
+
+struct vm_suspend {
+       enum vm_suspend_how how;
+};
+
+struct vm_gla2gpa {
+       int             vcpuid;         /* inputs */
+       int             prot;           /* PROT_READ or PROT_WRITE */
+       uint64_t        gla;
+       struct vm_guest_paging paging;
+       int             fault;          /* outputs */
+       uint64_t        gpa;
+};
+
+struct vm_activate_cpu {
+       int             vcpuid;
+};
+
+struct vm_cpuset {
+       int             which;
+       int             cpusetsize;
+       cpuset_t        *cpus;
+};
+#define        VM_ACTIVE_CPUS          0
+#define        VM_SUSPENDED_CPUS       1
+#define        VM_DEBUG_CPUS           2
+
+struct vm_vgic_version {
+       u_int version;
+       u_int flags;
+};
+
+struct vm_vgic_descr {
+       struct vm_vgic_version ver;
+       union {
+               struct {
+                       uint64_t dist_start;
+                       uint64_t dist_size;
+                       uint64_t redist_start;
+                       uint64_t redist_size;
+               } v3_regs;
+       };
+};
+
+struct vm_irq {
+       uint32_t irq;
+};
+
+struct vm_cpu_topology {
+       uint16_t        sockets;
+       uint16_t        cores;
+       uint16_t        threads;
+       uint16_t        maxcpus;
+};
+
+enum {
+       /* general routines */
+       IOCNUM_ABIVERS = 0,
+       IOCNUM_RUN = 1,
+       IOCNUM_SET_CAPABILITY = 2,
+       IOCNUM_GET_CAPABILITY = 3,
+       IOCNUM_SUSPEND = 4,
+       IOCNUM_REINIT = 5,
+
+       /* memory apis */
+       IOCNUM_GET_GPA_PMAP = 12,
+       IOCNUM_GLA2GPA_NOFAULT = 13,
+       IOCNUM_ALLOC_MEMSEG = 14,
+       IOCNUM_GET_MEMSEG = 15,
+       IOCNUM_MMAP_MEMSEG = 16,
+       IOCNUM_MMAP_GETNEXT = 17,
+       IOCNUM_MUNMAP_MEMSEG = 18,
+
+       /* register/state accessors */
+       IOCNUM_SET_REGISTER = 20,
+       IOCNUM_GET_REGISTER = 21,
+       IOCNUM_SET_REGISTER_SET = 24,
+       IOCNUM_GET_REGISTER_SET = 25,
+
+       /* statistics */
+       IOCNUM_VM_STATS = 50, 
+       IOCNUM_VM_STAT_DESC = 51,
+
+       /* CPU Topology */
+       IOCNUM_SET_TOPOLOGY = 63,
+       IOCNUM_GET_TOPOLOGY = 64,
+
+       /* interrupt injection */
+       IOCNUM_ASSERT_IRQ = 80,
+       IOCNUM_DEASSERT_IRQ = 81,
+       IOCNUM_RAISE_MSI = 82,
+       IOCNUM_INJECT_EXCEPTION = 83,
+
+       /* vm_cpuset */
+       IOCNUM_ACTIVATE_CPU = 90,
+       IOCNUM_GET_CPUSET = 91,
+       IOCNUM_SUSPEND_CPU = 92,
+       IOCNUM_RESUME_CPU = 93,
+
+       /* vm_attach_vgic */
+       IOCNUM_GET_VGIC_VERSION = 110,
+       IOCNUM_ATTACH_VGIC = 111,
+};
+
+#define        VM_RUN          \
+       _IOWR('v', IOCNUM_RUN, struct vm_run)
+#define        VM_SUSPEND      \
+       _IOW('v', IOCNUM_SUSPEND, struct vm_suspend)
+#define        VM_REINIT       \
+       _IO('v', IOCNUM_REINIT)
+#define        VM_ALLOC_MEMSEG \
+       _IOW('v', IOCNUM_ALLOC_MEMSEG, struct vm_memseg)
+#define        VM_GET_MEMSEG   \
+       _IOWR('v', IOCNUM_GET_MEMSEG, struct vm_memseg)
+#define        VM_MMAP_MEMSEG  \
+       _IOW('v', IOCNUM_MMAP_MEMSEG, struct vm_memmap)
+#define        VM_MMAP_GETNEXT \
+       _IOWR('v', IOCNUM_MMAP_GETNEXT, struct vm_memmap)
+#define        VM_MUNMAP_MEMSEG        \
+       _IOW('v', IOCNUM_MUNMAP_MEMSEG, struct vm_munmap)
+#define        VM_SET_REGISTER \
+       _IOW('v', IOCNUM_SET_REGISTER, struct vm_register)
+#define        VM_GET_REGISTER \
+       _IOWR('v', IOCNUM_GET_REGISTER, struct vm_register)
+#define        VM_SET_REGISTER_SET \
+       _IOW('v', IOCNUM_SET_REGISTER_SET, struct vm_register_set)
+#define        VM_GET_REGISTER_SET \
+       _IOWR('v', IOCNUM_GET_REGISTER_SET, struct vm_register_set)
+#define        VM_SET_CAPABILITY \
+       _IOW('v', IOCNUM_SET_CAPABILITY, struct vm_capability)
+#define        VM_GET_CAPABILITY \
+       _IOWR('v', IOCNUM_GET_CAPABILITY, struct vm_capability)
+#define        VM_STATS \
+       _IOWR('v', IOCNUM_VM_STATS, struct vm_stats)
+#define        VM_STAT_DESC \
+       _IOWR('v', IOCNUM_VM_STAT_DESC, struct vm_stat_desc)
+#define VM_ASSERT_IRQ \
+       _IOW('v', IOCNUM_ASSERT_IRQ, struct vm_irq)
+#define VM_DEASSERT_IRQ \
+       _IOW('v', IOCNUM_DEASSERT_IRQ, struct vm_irq)
+#define VM_RAISE_MSI \
+       _IOW('v', IOCNUM_RAISE_MSI, struct vm_msi)
+#define        VM_INJECT_EXCEPTION     \
+       _IOW('v', IOCNUM_INJECT_EXCEPTION, struct vm_exception)
+#define VM_SET_TOPOLOGY \
+       _IOW('v', IOCNUM_SET_TOPOLOGY, struct vm_cpu_topology)
+#define VM_GET_TOPOLOGY \
+       _IOR('v', IOCNUM_GET_TOPOLOGY, struct vm_cpu_topology)
+#define        VM_GLA2GPA_NOFAULT \
+       _IOWR('v', IOCNUM_GLA2GPA_NOFAULT, struct vm_gla2gpa)
+#define        VM_ACTIVATE_CPU \
+       _IOW('v', IOCNUM_ACTIVATE_CPU, struct vm_activate_cpu)
+#define        VM_GET_CPUS     \
+       _IOW('v', IOCNUM_GET_CPUSET, struct vm_cpuset)
+#define        VM_SUSPEND_CPU \
+       _IOW('v', IOCNUM_SUSPEND_CPU, struct vm_activate_cpu)
+#define        VM_RESUME_CPU \
+       _IOW('v', IOCNUM_RESUME_CPU, struct vm_activate_cpu)
+#define        VM_GET_VGIC_VERSION     \
+       _IOR('v', IOCNUM_GET_VGIC_VERSION, struct vm_vgic_version)
+#define        VM_ATTACH_VGIC  \
+       _IOW('v', IOCNUM_ATTACH_VGIC, struct vm_vgic_descr)
+#endif
diff --git a/sys/arm64/include/vmm_instruction_emul.h 
b/sys/arm64/include/vmm_instruction_emul.h
new file mode 100644
index 000000000000..a295f7cce127
--- /dev/null
+++ b/sys/arm64/include/vmm_instruction_emul.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2015 Mihai Carabas <mihai.cara...@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef        _VMM_INSTRUCTION_EMUL_H_
+#define        _VMM_INSTRUCTION_EMUL_H_
+
+/*
+ * Callback functions to read and write memory regions.
+ */
+typedef int (*mem_region_read_t)(struct vcpu *vcpu, uint64_t gpa,
+                                uint64_t *rval, int rsize, void *arg);
+typedef int (*mem_region_write_t)(struct vcpu *vcpu, uint64_t gpa,
+                                 uint64_t wval, int wsize, void *arg);
+
+/*
+ * Callback functions to read and write registers.
+ */
+typedef int (*reg_read_t)(struct vcpu *vcpu, uint64_t *rval, void *arg);
+typedef int (*reg_write_t)(struct vcpu *vcpu, uint64_t wval, void *arg);
+
+/*
+ * Emulate the decoded 'vie' instruction when it contains a memory operation.
+ *
+ * The callbacks 'mrr' and 'mrw' emulate reads and writes to the memory region
+ * containing 'gpa'. 'mrarg' is an opaque argument that is passed into the
+ * callback functions.
+ *
+ * 'void *vm' should be 'struct vm *' when called from kernel context and
+ * 'struct vmctx *' when called from user context.
+ *
+ */
+int vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
+    struct vm_guest_paging *paging, mem_region_read_t mrr,
+    mem_region_write_t mrw, void *mrarg);
+
+/*
+ * Emulate the decoded 'vre' instruction when it contains a register access.
+ *
+ * The callbacks 'regread' and 'regwrite' emulate reads and writes to the
+ * register from 'vie'. 'regarg' is an opaque argument that is passed into the
+ * callback functions.
+ *
+ * 'void *vm' should be 'struct vm *' when called from kernel context and
+ * 'struct vmctx *' when called from user context.
+ *
+ */
+int vmm_emulate_register(struct vcpu *vcpu, struct vre *vre, reg_read_t 
regread,
+    reg_write_t regwrite, void *regarg);
+
+#ifdef _KERNEL
+void vm_register_reg_handler(struct vm *vm, uint64_t iss, uint64_t mask,
+    reg_read_t reg_read, reg_write_t reg_write, void *arg);
+void vm_deregister_reg_handler(struct vm *vm, uint64_t iss, uint64_t mask);
+
+void vm_register_inst_handler(struct vm *vm, uint64_t start, uint64_t size,
+    mem_region_read_t mmio_read, mem_region_write_t mmio_write);
+void vm_deregister_inst_handler(struct vm *vm, uint64_t start, uint64_t size);
+#endif
+
+#endif /* _VMM_INSTRUCTION_EMUL_H_ */
diff --git a/sys/arm64/include/vmm_snapshot.h b/sys/arm64/include/vmm_snapshot.h
new file mode 100644
index 000000000000..da23dbe43a4f
--- /dev/null
+++ b/sys/arm64/include/vmm_snapshot.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
diff --git a/sys/arm64/vmm/arm64.h b/sys/arm64/vmm/arm64.h
new file mode 100644
index 000000000000..43459d14e143
--- /dev/null
+++ b/sys/arm64/vmm/arm64.h
@@ -0,0 +1,165 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (C) 2015 Mihai Carabas <mihai.cara...@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _VMM_ARM64_H_
+#define _VMM_ARM64_H_
+
+#include <machine/reg.h>
+#include <machine/hypervisor.h>
+#include <machine/pcpu.h>
+
+#include "mmu.h"
+#include "io/vgic_v3.h"
+#include "io/vtimer.h"
+
+struct vgic_v3;
+struct vgic_v3_cpu;
+
+struct hypctx {
+       struct trapframe tf;
+
+       /*
+        * EL1 control registers.
+        */
+       uint64_t        elr_el1;        /* Exception Link Register */
+       uint64_t        sp_el0;         /* Stack pointer */
+       uint64_t        tpidr_el0;      /* EL0 Software ID Register */
+       uint64_t        tpidrro_el0;    /* Read-only Thread ID Register */
+       uint64_t        tpidr_el1;      /* EL1 Software ID Register */
+       uint64_t        vbar_el1;       /* Vector Base Address Register */
+
+       uint64_t        actlr_el1;      /* Auxiliary Control Register */
+       uint64_t        afsr0_el1;      /* Auxiliary Fault Status Register 0 */
+       uint64_t        afsr1_el1;      /* Auxiliary Fault Status Register 1 */
+       uint64_t        amair_el1;      /* Auxiliary Memory Attribute 
Indirection Register */
+       uint64_t        contextidr_el1; /* Current Process Identifier */
+       uint64_t        cpacr_el1;      /* Architectural Feature Access Control 
Register */
+       uint64_t        csselr_el1;     /* Cache Size Selection Register */
+       uint64_t        esr_el1;        /* Exception Syndrome Register */
+       uint64_t        far_el1;        /* Fault Address Register */
+       uint64_t        mair_el1;       /* Memory Attribute Indirection 
Register */
+       uint64_t        mdccint_el1;    /* Monitor DCC Interrupt Enable 
Register */
+       uint64_t        mdscr_el1;      /* Monitor Debug System Control 
Register */
+       uint64_t        par_el1;        /* Physical Address Register */
+       uint64_t        sctlr_el1;      /* System Control Register */
+       uint64_t        tcr_el1;        /* Translation Control Register */
+       uint64_t        tcr2_el1;       /* Translation Control Register 2 */
+       uint64_t        ttbr0_el1;      /* Translation Table Base Register 0 */
+       uint64_t        ttbr1_el1;      /* Translation Table Base Register 1 */
+       uint64_t        spsr_el1;       /* Saved Program Status Register */
+
+       uint64_t        pmcr_el0;       /* Performance Monitors Control 
Register */
+       uint64_t        pmccntr_el0;
+       uint64_t        pmccfiltr_el0;
+       uint64_t        pmcntenset_el0;
+       uint64_t        pmintenset_el1;
+       uint64_t        pmovsset_el0;
+       uint64_t        pmselr_el0;
+       uint64_t        pmuserenr_el0;
+       uint64_t        pmevcntr_el0[31];
+       uint64_t        pmevtyper_el0[31];
+
+       uint64_t        dbgbcr_el1[16]; /* Debug Breakpoint Control Registers */
+       uint64_t        dbgbvr_el1[16]; /* Debug Breakpoint Value Registers */
+       uint64_t        dbgwcr_el1[16]; /* Debug Watchpoint Control Registers */
+       uint64_t        dbgwvr_el1[16]; /* Debug Watchpoint Value Registers */
+
+       /* EL2 control registers */
+       uint64_t        cptr_el2;       /* Architectural Feature Trap Register 
*/
+       uint64_t        hcr_el2;        /* Hypervisor Configuration Register */
+       uint64_t        mdcr_el2;       /* Monitor Debug Configuration Register 
*/
+       uint64_t        vpidr_el2;      /* Virtualization Processor ID Register 
*/
+       uint64_t        vmpidr_el2;     /* Virtualization Multiprocessor ID 
Register */
+       uint64_t        el2_addr;       /* The address of this in el2 space */
+       struct hyp      *hyp;
+       struct vcpu     *vcpu;
+       struct {
+               uint64_t        far_el2;        /* Fault Address Register */
+               uint64_t        hpfar_el2;      /* Hypervisor IPA Fault Address 
Register */
+       } exit_info;
+
+       struct vtimer_cpu       vtimer_cpu;
+
+       struct vgic_v3_regs     vgic_v3_regs;
+       struct vgic_v3_cpu      *vgic_cpu;
+       bool                    has_exception;
+};
+
+struct hyp {
+       struct vm       *vm;
+       struct vtimer   vtimer;
+       uint64_t        vmid_generation;
+       uint64_t        vttbr_el2;
+       uint64_t        el2_addr;       /* The address of this in el2 space */
+       bool            vgic_attached;
+       struct vgic_v3  *vgic;
+       struct hypctx   *ctx[];
+};
+
+#define        DEFINE_VMMOPS_IFUNC(ret_type, opname, args)                     
\
+       ret_type vmmops_##opname args;
+
+DEFINE_VMMOPS_IFUNC(int, modinit, (int ipinum))
+DEFINE_VMMOPS_IFUNC(int, modcleanup, (void))
+DEFINE_VMMOPS_IFUNC(void *, init, (struct vm *vm, struct pmap *pmap))
+DEFINE_VMMOPS_IFUNC(int, gla2gpa, (void *vcpui, struct vm_guest_paging *paging,
+    uint64_t gla, int prot, uint64_t *gpa, int *is_fault))
+DEFINE_VMMOPS_IFUNC(int, run, (void *vcpui, register_t pc, struct pmap *pmap,
+    struct vm_eventinfo *info))
+DEFINE_VMMOPS_IFUNC(void, cleanup, (void *vmi))
+DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
+    int vcpu_id))
+DEFINE_VMMOPS_IFUNC(void, vcpu_cleanup, (void *vcpui))
+DEFINE_VMMOPS_IFUNC(int, exception, (void *vcpui, uint64_t esr, uint64_t far))
+DEFINE_VMMOPS_IFUNC(int, getreg, (void *vcpui, int num, uint64_t *retval))
+DEFINE_VMMOPS_IFUNC(int, setreg, (void *vcpui, int num, uint64_t val))
+DEFINE_VMMOPS_IFUNC(int, getcap, (void *vcpui, int num, int *retval))
+DEFINE_VMMOPS_IFUNC(int, setcap, (void *vcpui, int num, int val))
+DEFINE_VMMOPS_IFUNC(struct vmspace *, vmspace_alloc, (vm_offset_t min,
+    vm_offset_t max))
+DEFINE_VMMOPS_IFUNC(void, vmspace_free, (struct vmspace *vmspace))
+#ifdef notyet
+#ifdef BHYVE_SNAPSHOT
+DEFINE_VMMOPS_IFUNC(int, snapshot, (void *vmi, struct vm_snapshot_meta *meta))
+DEFINE_VMMOPS_IFUNC(int, vcpu_snapshot, (void *vcpui,
+    struct vm_snapshot_meta *meta))
+DEFINE_VMMOPS_IFUNC(int, restore_tsc, (void *vcpui, uint64_t now))
+#endif
+#endif
+
+uint64_t       vmm_call_hyp(uint64_t, ...);
+
+#if 0
+#define        eprintf(fmt, ...)       printf("%s:%d " fmt, __func__, 
__LINE__, ##__VA_ARGS__)
+#else
+#define        eprintf(fmt, ...)       do {} while(0)
+#endif
+
+struct hypctx *arm64_get_active_vcpu(void);
+void raise_data_insn_abort(struct hypctx *, uint64_t, bool, int);
+
+#endif /* !_VMM_ARM64_H_ */
diff --git a/sys/arm64/vmm/hyp.h b/sys/arm64/vmm/hyp.h
new file mode 100644
index 000000000000..0b2977c73960
--- /dev/null
+++ b/sys/arm64/vmm/hyp.h
*** 10428 LINES SKIPPED ***


Reply via email to