Reviewed-by: Glenn Miles <mil...@linux.ibm.com>
On Mon, 2025-05-12 at 13:10 +1000, Nicholas Piggin wrote:
> Certain TIMA operations should only be performed when a ring is valid,
> others when the ring is invalid, and they are considered undefined if
> used incorrectly. Add checks for this condition.
>
> Signed-off-by: Nicholas Piggin <npig...@gmail.com>
> ---
> hw/intc/xive.c | 196 +++++++++++++++++++++++++-----------------
> include/hw/ppc/xive.h | 1 +
> 2 files changed, 116 insertions(+), 81 deletions(-)
>
> diff --git a/hw/intc/xive.c b/hw/intc/xive.c
> index aeca66e56e..d5bbd8f4c6 100644
> --- a/hw/intc/xive.c
> +++ b/hw/intc/xive.c
> @@ -25,6 +25,19 @@
> /*
> * XIVE Thread Interrupt Management context
> */
> +bool xive_ring_valid(XiveTCTX *tctx, uint8_t ring)
> +{
> + uint8_t cur_ring;
> +
> + for (cur_ring = ring; cur_ring <= TM_QW3_HV_PHYS;
> + cur_ring += XIVE_TM_RING_SIZE) {
> + if (!(tctx->regs[cur_ring + TM_WORD2] & 0x80)) {
> + return false;
> + }
> + }
> + return true;
> +}
> +
> bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr)
> {
> switch (ring) {
> @@ -663,6 +676,8 @@ typedef struct XiveTmOp {
> uint8_t page_offset;
> uint32_t op_offset;
> unsigned size;
> + bool hw_ok;
> + bool sw_ok;
> void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx,
> hwaddr offset,
> uint64_t value, unsigned size);
> @@ -675,34 +690,34 @@ static const XiveTmOp xive_tm_operations[] = {
> * MMIOs below 2K : raw values and special operations without side
> * effects
> */
> - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
> - xive_tm_vt_poll },
> + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true,
> + xive_tm_set_os_cppr, NULL },
> + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true,
> + xive_tm_push_os_ctx, NULL },
> + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true,
> + xive_tm_set_hv_cppr, NULL },
> + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, false, true,
> + xive_tm_vt_push, NULL },
> + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true,
> + NULL, xive_tm_vt_poll },
>
> /* MMIOs above 2K : special operations with side effects */
> - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
> - xive_tm_ack_os_reg },
> - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
> - xive_tm_pull_os_ctx },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
> - xive_tm_pull_os_ctx },
> - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
> - xive_tm_ack_hv_reg },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
> - xive_tm_pull_pool_ctx },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
> - xive_tm_pull_pool_ctx },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
> - xive_tm_pull_phys_ctx },
> + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false,
> + NULL, xive_tm_ack_os_reg },
> + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false,
> + xive_tm_set_os_pending, NULL },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false,
> + NULL, xive_tm_pull_os_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false,
> + NULL, xive_tm_pull_os_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false,
> + NULL, xive_tm_ack_hv_reg },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false,
> + NULL, xive_tm_pull_pool_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false,
> + NULL, xive_tm_pull_pool_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false,
> + NULL, xive_tm_pull_phys_ctx },
> };
>
> static const XiveTmOp xive2_tm_operations[] = {
> @@ -710,52 +725,48 @@ static const XiveTmOp xive2_tm_operations[] = {
> * MMIOs below 2K : raw values and special operations without side
> * effects
> */
> - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx,
> - NULL },
> - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
> - xive_tm_vt_poll },
> - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target,
> - NULL },
> + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true,
> + xive2_tm_set_os_cppr, NULL },
> + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true,
> + xive2_tm_push_os_ctx, NULL },
> + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, true, true,
> + xive2_tm_push_os_ctx, NULL },
> + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, true, true,
> + xive_tm_set_os_lgs, NULL },
> + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true,
> + xive2_tm_set_hv_cppr, NULL },
> + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true,
> + NULL, xive_tm_vt_poll },
> + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, true, true,
> + xive2_tm_set_hv_target, NULL },
>
> /* MMIOs above 2K : special operations with side effects */
> - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
> - xive_tm_ack_os_reg },
> - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL,
> - xive2_tm_pull_os_ctx },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
> - xive2_tm_pull_os_ctx },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
> - xive2_tm_pull_os_ctx },
> - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
> - xive_tm_ack_hv_reg },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL,
> - xive2_tm_pull_pool_ctx
> },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
> - xive2_tm_pull_pool_ctx
> },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
> - xive2_tm_pull_pool_ctx
> },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol,
> - NULL },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL,
> - xive2_tm_pull_phys_ctx
> },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
> - xive2_tm_pull_phys_ctx
> },
> - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1,
> xive2_tm_pull_phys_ctx_ol,
> - NULL },
> - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_EL, 1, xive2_tm_ack_os_el,
> - NULL },
> + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false,
> + NULL, xive_tm_ack_os_reg },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, true, false,
> + NULL, xive2_tm_pull_os_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false,
> + NULL, xive2_tm_pull_os_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false,
> + NULL, xive2_tm_pull_os_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false,
> + NULL, xive_tm_ack_hv_reg },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, true, false,
> + NULL, xive2_tm_pull_pool_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false,
> + NULL, xive2_tm_pull_pool_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false,
> + NULL, xive2_tm_pull_pool_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, true, false,
> + xive2_tm_pull_os_ctx_ol, NULL },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, true, false,
> + NULL, xive2_tm_pull_phys_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false,
> + NULL, xive2_tm_pull_phys_ctx },
> + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, true, false,
> + xive2_tm_pull_phys_ctx_ol, NULL },
> + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_EL, 1, true, false,
> + xive2_tm_ack_os_el, NULL },
> };
>
> static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset,
> @@ -797,18 +808,28 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX
> *tctx, hwaddr offset,
> uint64_t value, unsigned size)
> {
> const XiveTmOp *xto;
> + uint8_t ring = offset & TM_RING_OFFSET;
> + bool is_valid = xive_ring_valid(tctx, ring);
> + bool hw_owned = is_valid;
>
> trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value);
>
> - /*
> - * TODO: check V bit in Q[0-3]W2
> - */
> -
> /*
> * First, check for special operations in the 2K region
> */
> + xto = xive_tm_find_op(tctx->xptr, offset, size, true);
> + if (xto) {
> + if (hw_owned && !xto->hw_ok) {
> + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA
> "
> + "@%"HWADDR_PRIx" size %d\n", offset, size);
> + }
> + if (!hw_owned && !xto->sw_ok) {
> + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to SW TIMA
> "
> + "@%"HWADDR_PRIx" size %d\n", offset, size);
> + }
> + }
> +
> if (offset & TM_SPECIAL_OP) {
> - xto = xive_tm_find_op(tctx->xptr, offset, size, true);
> if (!xto) {
> qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at
> TIMA "
> "@%"HWADDR_PRIx" size %d\n", offset, size);
> @@ -821,7 +842,6 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX
> *tctx, hwaddr offset,
> /*
> * Then, for special operations in the region below 2K.
> */
> - xto = xive_tm_find_op(tctx->xptr, offset, size, true);
> if (xto) {
> xto->write_handler(xptr, tctx, offset, value, size);
> return;
> @@ -830,6 +850,11 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX
> *tctx, hwaddr offset,
> /*
> * Finish with raw access to the register values
> */
> + if (hw_owned) {
> + /* Store context operations are dangerous when context is valid */
> + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA "
> + "@%"HWADDR_PRIx" size %d\n", offset, size);
> + }
> xive_tm_raw_write(tctx, offset, value, size);
> }
>
> @@ -837,17 +862,27 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr,
> XiveTCTX *tctx, hwaddr offset,
> unsigned size)
> {
> const XiveTmOp *xto;
> + uint8_t ring = offset & TM_RING_OFFSET;
> + bool is_valid = xive_ring_valid(tctx, ring);
> + bool hw_owned = is_valid;
> uint64_t ret;
>
> - /*
> - * TODO: check V bit in Q[0-3]W2
> - */
> + xto = xive_tm_find_op(tctx->xptr, offset, size, false);
> + if (xto) {
> + if (hw_owned && !xto->hw_ok) {
> + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to HW TIMA "
> + "@%"HWADDR_PRIx" size %d\n", offset, size);
> + }
> + if (!hw_owned && !xto->sw_ok) {
> + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to SW TIMA "
> + "@%"HWADDR_PRIx" size %d\n", offset, size);
> + }
> + }
>
> /*
> * First, check for special operations in the 2K region
> */
> if (offset & TM_SPECIAL_OP) {
> - xto = xive_tm_find_op(tctx->xptr, offset, size, false);
> if (!xto) {
> qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to
> TIMA"
> "@%"HWADDR_PRIx" size %d\n", offset, size);
> @@ -860,7 +895,6 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX
> *tctx, hwaddr offset,
> /*
> * Then, for special operations in the region below 2K.
> */
> - xto = xive_tm_find_op(tctx->xptr, offset, size, false);
> if (xto) {
> ret = xto->read_handler(xptr, tctx, offset, size);
> goto out;
> diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h
> index 2372d1014b..b7ca8544e4 100644
> --- a/include/hw/ppc/xive.h
> +++ b/include/hw/ppc/xive.h
> @@ -365,6 +365,7 @@ static inline uint32_t xive_tctx_word2(uint8_t *ring)
> return *((uint32_t *) &ring[TM_WORD2]);
> }
>
> +bool xive_ring_valid(XiveTCTX *tctx, uint8_t ring);
> bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr);
> bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr);
> uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr);