On Thu, Dec 5, 2024 at 11:35 PM Daniel Henrique Barboza
<dbarb...@ventanamicro.com> wrote:
>
> From: Tomasz Jeznach <tjezn...@rivosinc.com>
>
> RISCV_IOMMU_REG_IOCOUNTINH is done by riscv_iommu_process_iocntinh_cy(),
> which is called during riscv_iommu_mmio_write() callback via a new
> riscv_iommu_pricess_hpm_writes() helper.
>
> Signed-off-by: Tomasz Jeznach <tjezn...@rivosinc.com>
> Signed-off-by: Daniel Henrique Barboza <dbarb...@ventanamicro.com>

Acked-by: Alistair Francis <alistair.fran...@wdc.com>

Alistair

> ---
>  hw/riscv/riscv-iommu-hpm.c | 60 ++++++++++++++++++++++++++++++++++++++
>  hw/riscv/riscv-iommu-hpm.h |  1 +
>  hw/riscv/riscv-iommu.c     | 38 ++++++++++++++++++++++++
>  3 files changed, 99 insertions(+)
>
> diff --git a/hw/riscv/riscv-iommu-hpm.c b/hw/riscv/riscv-iommu-hpm.c
> index 325088333e..70814b942d 100644
> --- a/hw/riscv/riscv-iommu-hpm.c
> +++ b/hw/riscv/riscv-iommu-hpm.c
> @@ -202,3 +202,63 @@ void riscv_iommu_hpm_timer_cb(void *priv)
>          riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
>      }
>  }
> +
> +static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value)
> +{
> +    const uint32_t inhibit = riscv_iommu_reg_get32(
> +        s, RISCV_IOMMU_REG_IOCOUNTINH);
> +    uint64_t overflow_at, overflow_ns;
> +
> +    if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
> +        return;
> +    }
> +
> +    /*
> +     * We are using INT64_MAX here instead to UINT64_MAX because cycle 
> counter
> +     * has 63-bit precision and INT64_MAX is the maximum it can store.
> +     */
> +    if (value) {
> +        overflow_ns = INT64_MAX - value + 1;
> +    } else {
> +        overflow_ns = INT64_MAX;
> +    }
> +
> +    overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 
> overflow_ns;
> +
> +    if (overflow_at > INT64_MAX) {
> +        s->irq_overflow_left = overflow_at - INT64_MAX;
> +        overflow_at = INT64_MAX;
> +    }
> +
> +    timer_mod_anticipate_ns(s->hpm_timer, overflow_at);
> +}
> +
> +/* Updates the internal cycle counter state when iocntinh:CY is changed. */
> +void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
> +{
> +    const uint32_t inhibit = riscv_iommu_reg_get32(
> +        s, RISCV_IOMMU_REG_IOCOUNTINH);
> +
> +    /* We only need to process CY bit toggle. */
> +    if (!(inhibit ^ prev_cy_inh)) {
> +        return;
> +    }
> +
> +    if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
> +        /*
> +         * Cycle counter is enabled. Just start the timer again and update
> +         * the clock snapshot value to point to the current time to make
> +         * sure iohpmcycles read is correct.
> +         */
> +        s->hpmcycle_prev = get_cycles();
> +        hpm_setup_timer(s, s->hpmcycle_val);
> +    } else {
> +        /*
> +         * Cycle counter is disabled. Stop the timer and update the cycle
> +         * counter to record the current value which is last programmed
> +         * value + the cycles passed so far.
> +         */
> +        s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - 
> s->hpmcycle_prev);
> +        timer_del(s->hpm_timer);
> +    }
> +}
> diff --git a/hw/riscv/riscv-iommu-hpm.h b/hw/riscv/riscv-iommu-hpm.h
> index cd896d3b7c..ee888650fb 100644
> --- a/hw/riscv/riscv-iommu-hpm.h
> +++ b/hw/riscv/riscv-iommu-hpm.h
> @@ -26,5 +26,6 @@ uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s);
>  void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
>                                unsigned event_id);
>  void riscv_iommu_hpm_timer_cb(void *priv);
> +void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh);
>
>  #endif
> diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c
> index 2ec388ff3d..56ec2d6d42 100644
> --- a/hw/riscv/riscv-iommu.c
> +++ b/hw/riscv/riscv-iommu.c
> @@ -1923,6 +1923,27 @@ static void riscv_iommu_update_ipsr(RISCVIOMMUState 
> *s, uint64_t data)
>      riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IPSR, ipsr_set, ipsr_clr);
>  }
>
> +static void riscv_iommu_process_hpm_writes(RISCVIOMMUState *s,
> +                                           uint32_t regb,
> +                                           bool prev_cy_inh)
> +{
> +    switch (regb) {
> +    case RISCV_IOMMU_REG_IOCOUNTINH:
> +        riscv_iommu_process_iocntinh_cy(s, prev_cy_inh);
> +        break;
> +
> +    case RISCV_IOMMU_REG_IOHPMCYCLES:
> +    case RISCV_IOMMU_REG_IOHPMCYCLES + 4:
> +        /* not yet implemented */
> +        break;
> +
> +    case RISCV_IOMMU_REG_IOHPMEVT_BASE ...
> +        RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4:
> +        /* not yet implemented */
> +        break;
> +    }
> +}
> +
>  /*
>   * Write the resulting value of 'data' for the reg specified
>   * by 'reg_addr', after considering read-only/read-write/write-clear
> @@ -1950,6 +1971,7 @@ static MemTxResult riscv_iommu_mmio_write(void *opaque, 
> hwaddr addr,
>      uint32_t regb = addr & ~3;
>      uint32_t busy = 0;
>      uint64_t val = 0;
> +    bool cy_inh = false;
>
>      if ((addr & (size - 1)) != 0) {
>          /* Unsupported MMIO alignment or access size */
> @@ -2017,6 +2039,16 @@ static MemTxResult riscv_iommu_mmio_write(void 
> *opaque, hwaddr addr,
>          busy = RISCV_IOMMU_TR_REQ_CTL_GO_BUSY;
>          break;
>
> +    case RISCV_IOMMU_REG_IOCOUNTINH:
> +        if (addr != RISCV_IOMMU_REG_IOCOUNTINH) {
> +            break;
> +        }
> +        /* Store previous value of CY bit. */
> +        cy_inh = !!(riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTINH) &
> +            RISCV_IOMMU_IOCOUNTINH_CY);
> +        break;
> +
> +
>      default:
>          break;
>      }
> @@ -2035,6 +2067,12 @@ static MemTxResult riscv_iommu_mmio_write(void 
> *opaque, hwaddr addr,
>          stl_le_p(&s->regs_rw[regb], rw | busy);
>      }
>
> +    /* Process HPM writes and update any internal state if needed. */
> +    if (regb >= RISCV_IOMMU_REG_IOCOUNTOVF &&
> +        regb <= (RISCV_IOMMU_REG_IOHPMEVT(RISCV_IOMMU_IOCOUNT_NUM) + 4)) {
> +        riscv_iommu_process_hpm_writes(s, regb, cy_inh);
> +    }
> +
>      if (process_fn) {
>          process_fn(s);
>      }
> --
> 2.47.1
>
>

Reply via email to