To be able to easily send commands to the ITS, create the respective wrapper functions, which take care of the ring buffer. The first two commands we implement provide methods to map a collection to a redistributor (aka host core) and to flush the command queue (SYNC). Start using these commands for mapping one collection to each host CPU.
Signed-off-by: Andre Przywara <andre.przyw...@arm.com> --- xen/arch/arm/gic-v3-its.c | 142 +++++++++++++++++++++++++++++++++++++- xen/arch/arm/gic-v3-lpi.c | 20 ++++++ xen/arch/arm/gic-v3.c | 18 ++++- xen/include/asm-arm/gic_v3_defs.h | 2 + xen/include/asm-arm/gic_v3_its.h | 36 ++++++++++ 5 files changed, 215 insertions(+), 3 deletions(-) diff --git a/xen/arch/arm/gic-v3-its.c b/xen/arch/arm/gic-v3-its.c index ad7cd2a..6578e8a 100644 --- a/xen/arch/arm/gic-v3-its.c +++ b/xen/arch/arm/gic-v3-its.c @@ -19,6 +19,7 @@ #include <xen/config.h> #include <xen/lib.h> #include <xen/device_tree.h> +#include <xen/delay.h> #include <xen/libfdt/libfdt.h> #include <xen/mm.h> #include <xen/sizes.h> @@ -29,6 +30,98 @@ #define ITS_CMD_QUEUE_SZ SZ_64K +#define BUFPTR_MASK GENMASK(19, 5) +static int its_send_command(struct host_its *hw_its, const void *its_cmd) +{ + uint64_t readp, writep; + + spin_lock(&hw_its->cmd_lock); + + readp = readq_relaxed(hw_its->its_base + GITS_CREADR) & BUFPTR_MASK; + writep = readq_relaxed(hw_its->its_base + GITS_CWRITER) & BUFPTR_MASK; + + if ( ((writep + ITS_CMD_SIZE) % ITS_CMD_QUEUE_SZ) == readp ) + { + spin_unlock(&hw_its->cmd_lock); + return -EBUSY; + } + + memcpy(hw_its->cmd_buf + writep, its_cmd, ITS_CMD_SIZE); + if ( hw_its->flags & HOST_ITS_FLUSH_CMD_QUEUE ) + __flush_dcache_area(hw_its->cmd_buf + writep, ITS_CMD_SIZE); + else + dsb(ishst); + + writep = (writep + ITS_CMD_SIZE) % ITS_CMD_QUEUE_SZ; + writeq_relaxed(writep & BUFPTR_MASK, hw_its->its_base + GITS_CWRITER); + + spin_unlock(&hw_its->cmd_lock); + + return 0; +} + +static uint64_t encode_rdbase(struct host_its *hw_its, int cpu, uint64_t reg) +{ + reg &= ~GENMASK(51, 16); + + reg |= gicv3_get_redist_address(cpu, hw_its->flags & HOST_ITS_USES_PTA); + + return reg; +} + +static int its_send_cmd_sync(struct host_its *its, int cpu) +{ + uint64_t cmd[4]; + + cmd[0] = GITS_CMD_SYNC; + cmd[1] = 0x00; + cmd[2] = encode_rdbase(its, cpu, 0x0); + cmd[3] = 0x00; + + return its_send_command(its, cmd); +} + +static int its_send_cmd_mapc(struct host_its *its, int collection_id, int cpu) +{ + uint64_t cmd[4]; + + cmd[0] = GITS_CMD_MAPC; + cmd[1] = 0x00; + cmd[2] = encode_rdbase(its, cpu, (collection_id & GENMASK(15, 0))); + cmd[2] |= GITS_VALID_BIT; + cmd[3] = 0x00; + + return its_send_command(its, cmd); +} + +/* Set up the (1:1) collection mapping for the given host CPU. */ +int gicv3_its_setup_collection(int cpu) +{ + struct host_its *its; + int ret; + + list_for_each_entry(its, &host_its_list, entry) + { + /* + * This function is called on CPU0 before any ITSes have been + * properly initialized. Skip the collection setup in this case, + * it will be done explicitly for CPU0 upon initializing the ITS. + */ + if ( !its->cmd_buf ) + continue; + + ret = its_send_cmd_mapc(its, cpu, cpu); + if ( ret ) + return ret; + + ret = its_send_cmd_sync(its, cpu); + if ( ret ) + return ret; + } + + return 0; +} + #define BASER_ATTR_MASK \ ((0x3UL << GITS_BASER_SHAREABILITY_SHIFT) | \ (0x7UL << GITS_BASER_OUTER_CACHEABILITY_SHIFT) | \ @@ -156,18 +249,51 @@ static int its_map_baser(void __iomem *basereg, uint64_t regc, int nr_items) return -EINVAL; } +/* Wait for an ITS to become quiescient (all ITS operations completed). */ +static int gicv3_its_wait_quiescient(struct host_its *hw_its) +{ + uint32_t reg; + s_time_t deadline = NOW() + MILLISECS(1000); + + reg = readl_relaxed(hw_its->its_base + GITS_CTLR); + if ( (reg & (GITS_CTLR_QUIESCENT | GITS_CTLR_ENABLE)) == GITS_CTLR_QUIESCENT ) + return 0; + + writel_relaxed(reg & ~GITS_CTLR_ENABLE, hw_its->its_base + GITS_CTLR); + + do { + reg = readl_relaxed(hw_its->its_base + GITS_CTLR); + if ( reg & GITS_CTLR_QUIESCENT ) + return 0; + + cpu_relax(); + udelay(1); + } while ( NOW() <= deadline ); + + dprintk(XENLOG_ERR, "ITS not quiescient\n"); + return -ETIMEDOUT; +} + static unsigned int max_its_device_bits = CONFIG_MAX_PHYS_ITS_DEVICE_BITS; integer_param("max_its_device_bits", max_its_device_bits); int gicv3_its_init(struct host_its *hw_its) { uint64_t reg; - int i; + int i, ret; hw_its->its_base = ioremap_nocache(hw_its->addr, hw_its->size); if ( !hw_its->its_base ) return -ENOMEM; + ret = gicv3_its_wait_quiescient(hw_its); + if ( ret ) + return ret; + + reg = readq_relaxed(hw_its->its_base + GITS_TYPER); + if ( reg & GITS_TYPER_PTA ) + hw_its->flags |= HOST_ITS_USES_PTA; + for ( i = 0; i < GITS_BASER_NR_REGS; i++ ) { void __iomem *basereg = hw_its->its_base + GITS_BASER0 + i * 8; @@ -196,6 +322,20 @@ int gicv3_its_init(struct host_its *hw_its) return -ENOMEM; writeq_relaxed(0, hw_its->its_base + GITS_CWRITER); + /* + * We issue the collection mapping calls upon initialising the + * redistributors, which for CPU 0 happens before the ITS gets initialised + * here. So we skip this mapping for CPU 0 there (since the ITS is not + * ready), instead do it explicitly here for CPU 0. + */ + ret = its_send_cmd_mapc(hw_its, smp_processor_id(), smp_processor_id()); + if ( ret ) + return ret; + + ret = its_send_cmd_sync(hw_its, smp_processor_id()); + if ( ret ) + return ret; + return 0; } diff --git a/xen/arch/arm/gic-v3-lpi.c b/xen/arch/arm/gic-v3-lpi.c index e2fc901..5911b91 100644 --- a/xen/arch/arm/gic-v3-lpi.c +++ b/xen/arch/arm/gic-v3-lpi.c @@ -30,11 +30,31 @@ static struct { unsigned int host_lpi_bits; } lpi_data; +/* Physical redistributor address */ +static DEFINE_PER_CPU(paddr_t, redist_addr); +/* Redistributor ID */ +static DEFINE_PER_CPU(int, redist_id); /* Pending table for each redistributor */ static DEFINE_PER_CPU(void *, pending_table); #define MAX_PHYS_LPIS (BIT_ULL(lpi_data.host_lpi_bits) - LPI_OFFSET) +/* Stores this redistributor's physical address and ID in a per-CPU variable */ +void gicv3_set_redist_address(paddr_t address, int redist_id) +{ + this_cpu(redist_addr) = address; + this_cpu(redist_id) = redist_id; +} + +/* Returns a redistributor's ID (either as an address or as an ID) */ +uint64_t gicv3_get_redist_address(int cpu, bool use_pta) +{ + if ( use_pta ) + return per_cpu(redist_addr, cpu) & GENMASK(51, 16); + else + return per_cpu(redist_id, cpu) << 16; +} + uint64_t gicv3_lpi_allocate_pendtable(void) { uint64_t reg; diff --git a/xen/arch/arm/gic-v3.c b/xen/arch/arm/gic-v3.c index 440c079..5f825a6 100644 --- a/xen/arch/arm/gic-v3.c +++ b/xen/arch/arm/gic-v3.c @@ -644,7 +644,7 @@ static int gicv3_rdist_init_lpis(void __iomem * rdist_base) return -ENOMEM; writeq_relaxed(table_reg, rdist_base + GICR_PROPBASER); - return 0; + return gicv3_its_setup_collection(smp_processor_id()); } static int __init gicv3_populate_rdist(void) @@ -692,7 +692,21 @@ static int __init gicv3_populate_rdist(void) if ( typer & GICR_TYPER_PLPIS ) { - int ret; + paddr_t rdist_addr; + int procnum, ret; + + rdist_addr = gicv3.rdist_regions[i].base; + rdist_addr += ptr - gicv3.rdist_regions[i].map_base; + procnum = (typer & GICR_TYPER_PROC_NUM_MASK); + procnum >>= GICR_TYPER_PROC_NUM_SHIFT; + + /* + * The ITS refers to redistributors either by their physical + * address or by their ID. Determine those two values and + * let the ITS code store them in per host CPU variables to + * later be able to address those redistributors. + */ + gicv3_set_redist_address(rdist_addr, procnum); ret = gicv3_rdist_init_lpis(ptr); if ( ret && ret != -ENODEV ) diff --git a/xen/include/asm-arm/gic_v3_defs.h b/xen/include/asm-arm/gic_v3_defs.h index b307322..878bae2 100644 --- a/xen/include/asm-arm/gic_v3_defs.h +++ b/xen/include/asm-arm/gic_v3_defs.h @@ -101,6 +101,8 @@ #define GICR_TYPER_PLPIS (1U << 0) #define GICR_TYPER_VLPIS (1U << 1) #define GICR_TYPER_LAST (1U << 4) +#define GICR_TYPER_PROC_NUM_SHIFT 8 +#define GICR_TYPER_PROC_NUM_MASK (0xffff << GICR_TYPER_PROC_NUM_SHIFT) /* For specifying the inner cacheability type only */ #define GIC_BASER_CACHE_nCnB 0ULL diff --git a/xen/include/asm-arm/gic_v3_its.h b/xen/include/asm-arm/gic_v3_its.h index ff5572f..8288185 100644 --- a/xen/include/asm-arm/gic_v3_its.h +++ b/xen/include/asm-arm/gic_v3_its.h @@ -40,6 +40,9 @@ #define GITS_CTLR_QUIESCENT BIT(31) #define GITS_CTLR_ENABLE BIT(0) +#define GITS_TYPER_PTA BIT_ULL(19) +#define GITS_TYPER_IDBITS_SHIFT 8 + #define GITS_IIDR_VALUE 0x34c #define GITS_BASER_INDIRECT BIT_ULL(62) @@ -67,10 +70,27 @@ #define GITS_CBASER_SIZE_MASK 0xff +/* ITS command definitions */ +#define ITS_CMD_SIZE 32 + +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_INV 0x0c +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_DISCARD 0x0f + #ifndef __ASSEMBLY__ #include <xen/device_tree.h> #define HOST_ITS_FLUSH_CMD_QUEUE (1U << 0) +#define HOST_ITS_USES_PTA (1U << 1) /* data structure for each hardware ITS */ struct host_its { @@ -79,6 +99,7 @@ struct host_its { paddr_t addr; paddr_t size; void __iomem *its_base; + spinlock_t cmd_lock; void *cmd_buf; unsigned int flags; }; @@ -100,6 +121,13 @@ uint64_t gicv3_lpi_allocate_pendtable(void); int gicv3_lpi_init_host_lpis(unsigned int nr_lpis); int gicv3_its_init(struct host_its *hw_its); +/* Store the physical address and ID for each redistributor as read from DT. */ +void gicv3_set_redist_address(paddr_t address, int redist_id); +uint64_t gicv3_get_redist_address(int cpu, bool use_pta); + +/* Map a collection for this host CPU to each host ITS. */ +int gicv3_its_setup_collection(int cpu); + #else static inline void gicv3_its_dt_init(const struct dt_device_node *node) @@ -121,6 +149,14 @@ static inline int gicv3_its_init(struct host_its *hw_its) { return 0; } +static inline void gicv3_set_redist_address(paddr_t address, int redist_id) +{ +} +static inline int gicv3_its_setup_collection(int cpu) +{ + return 0; +} + #endif /* CONFIG_HAS_ITS */ #endif /* __ASSEMBLY__ */ -- 2.9.0 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel