This actually implements pre_save and post_load methods for in-kernel vGICv3.
Signed-off-by: Pavel Fedin <p.fe...@samsung.com> --- hw/intc/arm_gicv3_kvm.c | 456 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 452 insertions(+), 4 deletions(-) diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index b48f78f..ce8d2a0 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -21,8 +21,11 @@ #include "hw/intc/arm_gicv3_common.h" #include "hw/sysbus.h" +#include "migration/migration.h" +#include "qemu/error-report.h" #include "sysemu/kvm.h" #include "kvm_arm.h" +#include "gicv3_internal.h" #include "vgic_common.h" #ifdef DEBUG_GICV3_KVM @@ -41,6 +44,23 @@ #define KVM_ARM_GICV3_GET_CLASS(obj) \ OBJECT_GET_CLASS(KVMARMGICv3Class, (obj), TYPE_KVM_ARM_GICV3) +#define ICC_PMR_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b0100, 0b0110, 0b000) +#define ICC_BPR0_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1000, 0b011) +#define ICC_APR0_EL1(n) \ + KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1000, 0b100 | n) +#define ICC_APR1_EL1(n) \ + KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1001, 0b000 | n) +#define ICC_BPR1_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b011) +#define ICC_CTLR_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b100) +#define ICC_IGRPEN0_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b110) +#define ICC_IGRPEN1_EL1 \ + KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b111) + typedef struct KVMARMGICv3Class { ARMGICv3CommonClass parent_class; DeviceRealize parent_realize; @@ -54,16 +74,431 @@ static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level) kvm_arm_gic_set_irq(s->num_irq, irq, level); } +#define VGIC_CPUID(cpuid) ((((cpuid) & ARM_AFF3_MASK) >> 8) | \ + ((cpuid) & ARM32_AFFINITY_MASK)) +#define KVM_VGIC_ATTR(reg, cpuid) \ + ((VGIC_CPUID(cpuid) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) | (reg)) + +static inline void kvm_gicd_access(GICv3State *s, int offset, int cpu, + uint64_t *val, bool write) +{ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, + KVM_VGIC_ATTR(offset, s->cpu[cpu].affinity_id), + val, write); +} + +static inline void kvm_gicr_access(GICv3State *s, int offset, int cpu, + uint64_t *val, bool write) +{ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, + KVM_VGIC_ATTR(offset, s->cpu[cpu].affinity_id), + val, write); +} + +static inline void kvm_gicc_access(GICv3State *s, uint64_t reg, int cpu, + uint64_t *val, bool write) +{ + kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS, + KVM_VGIC_ATTR(reg, s->cpu[cpu].affinity_id), + val, write); +} + +/* + * Translate from the in-kernel field for an IRQ value to/from the qemu + * representation. + */ +typedef void (*vgic_translate_fn)(GICv3State *s, int irq, int cpu, + uint32_t *field, bool to_kernel); + +/* synthetic translate function used for clear/set registers to completely + * clear a setting using a clear-register before setting the remaining bits + * using a set-register */ +static void translate_clear(GICv3State *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = ~0; + } else { + /* does not make sense: qemu model doesn't use set/clear regs */ + abort(); + } +} + +static void translate_enabled(GICv3State *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = GIC_TEST_ENABLED(irq, cpu); + } else { + GIC_REPLACE_ENABLED(irq, cpu, *field); + } +} + +static void translate_group(GICv3State *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = GIC_TEST_GROUP(irq, cpu); + } else { + GIC_REPLACE_GROUP(irq, cpu, *field); + } +} + +static void translate_trigger(GICv3State *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = GIC_TEST_EDGE_TRIGGER(irq, cpu) ? 2 : 0; + } else { + GIC_REPLACE_EDGE_TRIGGER(irq, cpu, *field & 2); + } +} + +static void translate_pending(GICv3State *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = gic_test_pending(s, irq, cpu); + } else { + GIC_REPLACE_PENDING(irq, cpu, *field); + /* TODO: Capture if level-line is held high in the kernel */ + } +} + +static void translate_active(GICv3State *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = GIC_TEST_ACTIVE(irq, cpu); + } else { + GIC_REPLACE_ACTIVE(irq, cpu, *field); + } +} + +static void translate_priority(GICv3State *s, int irq, int cpu, + uint32_t *field, bool to_kernel) +{ + if (to_kernel) { + *field = GIC_GET_PRIORITY(irq, cpu); + } else { + GIC_SET_PRIORITY(irq, cpu, *field); + } +} + +#define for_each_irq_reg(_irq, _max, _field_width) \ + for (_irq = 0; _irq < _max; _irq += (32 / _field_width)) + +/* Read a register group from the kernel VGIC */ +static void kvm_dist_get(GICv3State *s, uint32_t offset, int width, + vgic_translate_fn translate_fn) +{ + uint64_t reg; + int j; + int irq, cpu, maxcpu; + uint32_t field; + int regsz = 32 / width; /* irqs per kernel register */ + + for_each_irq_reg(irq, s->num_irq, width) { + maxcpu = irq < GIC_INTERNAL ? s->num_cpu : 1; + for (cpu = 0; cpu < maxcpu; cpu++) { + /* In GICv3 SGI/PPIs are stored in redistributor + * Offsets in SGI area are the same as in distributor + */ + if (irq < GIC_INTERNAL) { + kvm_gicr_access(s, offset + GICR_SGI_OFFSET, cpu, ®, false); + } else { + kvm_gicd_access(s, offset, cpu, ®, false); + } + for (j = 0; j < regsz; j++) { + field = extract32(reg, j * width, width); + translate_fn(s, irq + j, cpu, &field, false); + } + } + offset += 4; + } +} + +/* Write a register group to the kernel VGIC */ +static void kvm_dist_put(GICv3State *s, uint32_t offset, int width, + vgic_translate_fn translate_fn) +{ + uint64_t reg; + int j; + int irq, cpu, maxcpu; + uint32_t field; + int regsz = 32 / width; /* irqs per kernel register */ + + for_each_irq_reg(irq, s->num_irq, width) { + maxcpu = irq < GIC_INTERNAL ? s->num_cpu : 1; + for (cpu = 0; cpu < maxcpu; cpu++) { + reg = 0; + for (j = 0; j < regsz; j++) { + translate_fn(s, irq + j, cpu, &field, true); + reg = deposit32(reg, j * width, width, field); + } + /* In GICv3 SGI/PPIs are stored in redistributor + * Offsets in SGI area are the same as in distributor + */ + if (irq < GIC_INTERNAL) { + kvm_gicr_access(s, offset + GICR_SGI_OFFSET, cpu, ®, true); + } else { + kvm_gicd_access(s, offset, cpu, ®, true); + } + } + offset += 4; + } +} + +static void kvm_arm_gicv3_check(GICv3State *s) +{ + uint64_t reg; + uint32_t num_irq; + + /* Sanity checking s->num_irq */ + kvm_gicd_access(s, GICD_TYPER, 0, ®, false); + num_irq = ((reg & 0x1f) + 1) * 32; + + if (num_irq < s->num_irq) { + error_report("Model requests %u IRQs, but kernel supports max %u\n", + s->num_irq, num_irq); + abort(); + } + + /* TODO: Consider checking compatibility with the IIDR ? */ +} + static void kvm_arm_gicv3_put(GICv3State *s) { - /* TODO */ - DPRINTF("Cannot put kernel gic state, no kernel interface\n"); + uint64_t reg, redist_typer; + int ncpu, i; + + kvm_arm_gicv3_check(s); + + kvm_gicr_access(s, GICR_TYPER, 0, &redist_typer, false); + + /***************************************************************** + * (Re)distributor State + */ + + reg = s->ctlr; + kvm_gicd_access(s, GICD_CTLR, 0, ®, true); + + if (redist_typer & GICR_TYPER_PLPIS) { + /* Set base addresses before LPIs are enabled by GICR_CTLR write */ + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + reg = c->propbaser & (GICR_PROPBASER_OUTER_CACHEABILITY_MASK | + GICR_PROPBASER_ADDR_MASK | + GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK | + GICR_PROPBASER_IDBITS_MASK); + kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®, true); + + reg = c->pendbaser & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK | + GICR_PENDBASER_ADDR_MASK | + GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + if (!c->redist_ctlr & GICR_CTLR_ENABLE_LPIS) { + reg |= GICR_PENDBASER_PTZ; + } + kvm_gicr_access(s, GICR_PENDBASER, ncpu, ®, true); + } + } + + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + reg = c->redist_ctlr & (GICR_CTLR_ENABLE_LPIS | GICR_CTLR_DPG0 | + GICR_CTLR_DPG1NS | GICR_CTLR_DPG1S); + kvm_gicr_access(s, GICR_CTLR, ncpu, ®, true); + + reg = c->cpu_enabled ? 0 : GICR_WAKER_ProcessorSleep; + kvm_gicr_access(s, GICR_WAKER, ncpu, ®, true); + } + + /* irq_state[n].enabled -> GICD_ISENABLERn */ + kvm_dist_put(s, GICD_ICENABLER, 1, translate_clear); + kvm_dist_put(s, GICD_ISENABLER, 1, translate_enabled); + + /* irq_state[n].group -> GICD_IGROUPRn */ + kvm_dist_put(s, GICD_IGROUPR, 1, translate_group); + + /* Restore targets before pending to ensure the pending state is set on + * the appropriate CPU interfaces in the kernel */ + + /* s->route[irq] -> GICD_IROUTERn */ + for (i = GIC_INTERNAL; i < s->num_irq; i++) { + uint32_t offset = GICD_IROUTER + (sizeof(reg) * i); + + reg = s->irq_route[i - GIC_INTERNAL]; + kvm_gicd_access(s, offset, 0, ®, true); + } + + /* irq_state[n].trigger -> GICD_ICFGRn + * (restore configuration registers before pending IRQs so we treat + * level/edge correctly) */ + kvm_dist_put(s, GICD_ICFGR, 2, translate_trigger); + + /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */ + kvm_dist_put(s, GICD_ICPENDR, 1, translate_clear); + kvm_dist_put(s, GICD_ISPENDR, 1, translate_pending); + + /* irq_state[n].active -> GICD_ISACTIVERn */ + kvm_dist_put(s, GICD_ICACTIVER, 1, translate_clear); + kvm_dist_put(s, GICD_ISACTIVER, 1, translate_active); + + /* s->priorityX[irq] -> ICD_IPRIORITYRn */ + kvm_dist_put(s, GICD_IPRIORITYR, 8, translate_priority); + + /***************************************************************** + * CPU Interface(s) State + */ + + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + reg = c->ctlr[1] & (ICC_CTLR_CBPR | ICC_CTLR_EOIMODE | ICC_CTLR_PMHE); + kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, ®, true); + + reg = gicv3_get_igrpen0(s, ncpu); + kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, ®, true); + + reg = gicv3_get_igrpen1(s, ncpu); + kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, ®, true); + + reg = c->priority_mask; + kvm_gicc_access(s, ICC_PMR_EL1, ncpu, ®, true); + + reg = c->bpr[0]; + kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, ®, true); + + reg = c->bpr[1]; + kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, ®, true); + + for (i = 0; i < 4; i++) { + reg = c->apr[i][0]; + kvm_gicc_access(s, ICC_APR0_EL1(i), ncpu, ®, true); + } + + for (i = 0; i < 4; i++) { + reg = c->apr[i][1]; + kvm_gicc_access(s, ICC_APR1_EL1(i), ncpu, ®, true); + } + } } static void kvm_arm_gicv3_get(GICv3State *s) { - /* TODO */ - DPRINTF("Cannot get kernel gic state, no kernel interface\n"); + uint64_t reg, redist_typer; + int ncpu, i; + + kvm_arm_gicv3_check(s); + + kvm_gicr_access(s, GICR_TYPER, 0, &redist_typer, false); + + /***************************************************************** + * (Re)distributor State + */ + + /* GICD_CTLR -> s->ctlr */ + kvm_gicd_access(s, GICD_CTLR, 0, ®, false); + s->ctlr = reg; + + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + kvm_gicr_access(s, GICR_CTLR, ncpu, ®, false); + c->redist_ctlr = reg & (GICR_CTLR_ENABLE_LPIS | GICR_CTLR_DPG0 | + GICR_CTLR_DPG1NS | GICR_CTLR_DPG1S); + + kvm_gicr_access(s, GICR_WAKER, ncpu, ®, false); + c->cpu_enabled = !(reg & GICR_WAKER_ProcessorSleep); + } + + if (redist_typer & GICR_TYPER_PLPIS) { + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®, false); + c->propbaser = reg & (GICR_PROPBASER_OUTER_CACHEABILITY_MASK | + GICR_PROPBASER_ADDR_MASK | + GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK | + GICR_PROPBASER_IDBITS_MASK); + + kvm_gicr_access(s, GICR_PENDBASER, ncpu, ®, false); + c->pendbaser = reg & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK | + GICR_PENDBASER_ADDR_MASK | + GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + } + } + + /* GICD_IIDR -> ? */ + /* kvm_gicd_access(s, GICD_IIDR, 0, ®, false); */ + + /* GICD_IGROUPRn -> irq_state[n].group */ + kvm_dist_get(s, GICD_IGROUPR, 1, translate_group); + + /* GICD_ISENABLERn -> irq_state[n].enabled */ + kvm_dist_get(s, GICD_ISENABLER, 1, translate_enabled); + + /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */ + kvm_dist_get(s, GICD_ISPENDR, 1, translate_pending); + + /* GICD_ISACTIVERn -> irq_state[n].active */ + kvm_dist_get(s, GICD_ISACTIVER, 1, translate_active); + + /* GICD_ICFRn -> irq_state[n].trigger */ + kvm_dist_get(s, GICD_ICFGR, 2, translate_trigger); + + /* GICD_IPRIORITYRn -> s->priorityX[irq] */ + kvm_dist_get(s, GICD_IPRIORITYR, 8, translate_priority); + + /* GICD_IROUTERn -> s->route[irq] */ + for (i = GIC_INTERNAL; i < s->num_irq; i++) { + uint32_t offset = GICD_IROUTER + (sizeof(reg) * i); + + kvm_gicd_access(s, offset, 0, ®, false); + s->irq_route[i - GIC_INTERNAL] = reg; + } + + /***************************************************************** + * CPU Interface(s) State + */ + + for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + GICv3CPUState *c = &s->cpu[ncpu]; + + kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, ®, false); + c->ctlr[1] = reg & (ICC_CTLR_CBPR | ICC_CTLR_EOIMODE | ICC_CTLR_PMHE); + + kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, ®, false); + gicv3_set_igrpen0(s, ncpu, reg); + + kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, ®, false); + gicv3_set_igrpen1(s, ncpu, reg); + + kvm_gicc_access(s, ICC_PMR_EL1, ncpu, ®, false); + c->priority_mask = reg & ICC_PMR_PRIORITY_MASK; + + kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, ®, false); + c->bpr[0] = reg & ICC_BPR_BINARYPOINT_MASK; + + kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, ®, false); + c->bpr[1] = reg & ICC_BPR_BINARYPOINT_MASK; + + for (i = 0; i < 4; i++) { + kvm_gicc_access(s, ICC_APR0_EL1(i), ncpu, ®, false); + c->apr[i][0] = reg; + } + + for (i = 0; i < 4; i++) { + kvm_gicc_access(s, ICC_APR1_EL1(i), ncpu, ®, false); + c->apr[i][1] = reg; + } + } } static void kvm_arm_gicv3_reset(DeviceState *dev) @@ -74,6 +509,12 @@ static void kvm_arm_gicv3_reset(DeviceState *dev) DPRINTF("Reset\n"); kgc->parent_reset(dev); + + if (s->migration_blocker) { + DPRINTF("Cannot put kernel gic state, no kernel interface\n"); + return; + } + kvm_arm_gicv3_put(s); } @@ -117,6 +558,13 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd); kvm_arm_register_device(&s->iomem_redist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd); + + if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, + GICD_CTLR)) { + error_setg(&s->migration_blocker, "This operating system kernel does " + "not support vGICv3 migration"); + migrate_add_blocker(s->migration_blocker); + } } static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data) -- 2.4.4