The VCPU config deals with hideleg, hedeleg, henvcfg, and hstateenX
CSR configuration for each VCPU. Factor-out VCPU config into separate
sources so that VCPU config can do things differently for guest HS-mode
and guest VS/VU-mode.

Signed-off-by: Anup Patel <[email protected]>
---
 arch/riscv/include/asm/kvm_host.h        |  20 +----
 arch/riscv/include/asm/kvm_vcpu_config.h |  25 ++++++
 arch/riscv/kvm/Makefile                  |   1 +
 arch/riscv/kvm/main.c                    |   4 +-
 arch/riscv/kvm/vcpu.c                    |  79 ++++--------------
 arch/riscv/kvm/vcpu_config.c             | 101 +++++++++++++++++++++++
 6 files changed, 144 insertions(+), 86 deletions(-)
 create mode 100644 arch/riscv/include/asm/kvm_vcpu_config.h
 create mode 100644 arch/riscv/kvm/vcpu_config.c

diff --git a/arch/riscv/include/asm/kvm_host.h 
b/arch/riscv/include/asm/kvm_host.h
index f3a41a1be678..11c3566318ae 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -18,6 +18,7 @@
 #include <asm/ptrace.h>
 #include <asm/kvm_tlb.h>
 #include <asm/kvm_vmid.h>
+#include <asm/kvm_vcpu_config.h>
 #include <asm/kvm_vcpu_fp.h>
 #include <asm/kvm_vcpu_insn.h>
 #include <asm/kvm_vcpu_sbi.h>
@@ -47,18 +48,6 @@
 
 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
 
-#define KVM_HEDELEG_DEFAULT            (BIT(EXC_INST_MISALIGNED) | \
-                                        BIT(EXC_INST_ILLEGAL)     | \
-                                        BIT(EXC_BREAKPOINT)      | \
-                                        BIT(EXC_SYSCALL)         | \
-                                        BIT(EXC_INST_PAGE_FAULT) | \
-                                        BIT(EXC_LOAD_PAGE_FAULT) | \
-                                        BIT(EXC_STORE_PAGE_FAULT))
-
-#define KVM_HIDELEG_DEFAULT            (BIT(IRQ_VS_SOFT)  | \
-                                        BIT(IRQ_VS_TIMER) | \
-                                        BIT(IRQ_VS_EXT))
-
 #define KVM_DIRTY_LOG_MANUAL_CAPS      (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
                                         KVM_DIRTY_LOG_INITIALLY_SET)
 
@@ -167,13 +156,6 @@ struct kvm_vcpu_csr {
        unsigned long senvcfg;
 };
 
-struct kvm_vcpu_config {
-       u64 henvcfg;
-       u64 hstateen0;
-       unsigned long hedeleg;
-       unsigned long hideleg;
-};
-
 struct kvm_vcpu_smstateen_csr {
        unsigned long sstateen0;
 };
diff --git a/arch/riscv/include/asm/kvm_vcpu_config.h 
b/arch/riscv/include/asm/kvm_vcpu_config.h
new file mode 100644
index 000000000000..fcc15a0296b3
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_config.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2026 Qualcomm Technologies, Inc.
+ */
+
+#ifndef __KVM_VCPU_RISCV_CONFIG_H
+#define __KVM_VCPU_RISCV_CONFIG_H
+
+#include <linux/types.h>
+
+struct kvm_vcpu;
+
+struct kvm_vcpu_config {
+       u64 henvcfg;
+       u64 hstateen0;
+       unsigned long hedeleg;
+       unsigned long hideleg;
+};
+
+void kvm_riscv_vcpu_config_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_config_guest_debug(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_config_ran_once(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_config_load(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 07eab96189e7..296c2ba05089 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -21,6 +21,7 @@ kvm-y += mmu.o
 kvm-y += nacl.o
 kvm-y += tlb.o
 kvm-y += vcpu.o
+kvm-y += vcpu_config.o
 kvm-y += vcpu_exit.o
 kvm-y += vcpu_fp.o
 kvm-y += vcpu_insn.o
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index 45536af521f0..588a84783dff 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -41,8 +41,8 @@ int kvm_arch_enable_virtualization_cpu(void)
        if (rc)
                return rc;
 
-       csr_write(CSR_HEDELEG, KVM_HEDELEG_DEFAULT);
-       csr_write(CSR_HIDELEG, KVM_HIDELEG_DEFAULT);
+       csr_write(CSR_HEDELEG, 0);
+       csr_write(CSR_HIDELEG, 0);
 
        /* VS should access only the time counter directly. Everything else 
should trap */
        csr_write(CSR_HCOUNTEREN, 0x02);
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 494e0517ca4e..62599fc002e8 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -133,11 +133,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        /* Mark this VCPU never ran */
        vcpu->arch.ran_atleast_once = false;
 
-       vcpu->arch.cfg.hedeleg = KVM_HEDELEG_DEFAULT;
-       vcpu->arch.cfg.hideleg = KVM_HIDELEG_DEFAULT;
        vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
        bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
 
+       /* Setup VCPU config */
+       kvm_riscv_vcpu_config_init(vcpu);
+
        /* Setup ISA features available to VCPU */
        kvm_riscv_vcpu_setup_isa(vcpu);
 
@@ -530,57 +531,25 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
                                        struct kvm_guest_debug *dbg)
 {
-       if (dbg->control & KVM_GUESTDBG_ENABLE) {
+       if (dbg->control & KVM_GUESTDBG_ENABLE)
                vcpu->guest_debug = dbg->control;
-               vcpu->arch.cfg.hedeleg &= ~BIT(EXC_BREAKPOINT);
-       } else {
+       else
                vcpu->guest_debug = 0;
-               vcpu->arch.cfg.hedeleg |= BIT(EXC_BREAKPOINT);
-       }
-
+       kvm_riscv_vcpu_config_guest_debug(vcpu);
        return 0;
 }
 
-static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
-{
-       const unsigned long *isa = vcpu->arch.isa;
-       struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
-
-       if (riscv_isa_extension_available(isa, SVPBMT))
-               cfg->henvcfg |= ENVCFG_PBMTE;
-
-       if (riscv_isa_extension_available(isa, SSTC))
-               cfg->henvcfg |= ENVCFG_STCE;
-
-       if (riscv_isa_extension_available(isa, ZICBOM))
-               cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
-
-       if (riscv_isa_extension_available(isa, ZICBOZ))
-               cfg->henvcfg |= ENVCFG_CBZE;
-
-       if (riscv_isa_extension_available(isa, SVADU) &&
-           !riscv_isa_extension_available(isa, SVADE))
-               cfg->henvcfg |= ENVCFG_ADUE;
-
-       if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
-               cfg->hstateen0 |= SMSTATEEN0_HSENVCFG;
-               if (riscv_isa_extension_available(isa, SSAIA))
-                       cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC |
-                                         SMSTATEEN0_AIA |
-                                         SMSTATEEN0_AIA_ISEL;
-               if (riscv_isa_extension_available(isa, SMSTATEEN))
-                       cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0;
-       }
-
-       if (vcpu->guest_debug)
-               cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        void *nsh;
        struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
-       struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+
+       /*
+        * Load VCPU config CSRs before other CSRs because
+        * the read/write behaviour of certain CSRs change
+        * based on VCPU config CSRs.
+        */
+       kvm_riscv_vcpu_config_load(vcpu);
 
        if (kvm_riscv_nacl_sync_csr_available()) {
                nsh = nacl_shmem();
@@ -591,18 +560,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                nacl_csr_write(nsh, CSR_VSEPC, csr->vsepc);
                nacl_csr_write(nsh, CSR_VSCAUSE, csr->vscause);
                nacl_csr_write(nsh, CSR_VSTVAL, csr->vstval);
-               nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg);
-               nacl_csr_write(nsh, CSR_HIDELEG, cfg->hideleg);
                nacl_csr_write(nsh, CSR_HVIP, csr->hvip);
                nacl_csr_write(nsh, CSR_VSATP, csr->vsatp);
-               nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg);
-               if (IS_ENABLED(CONFIG_32BIT))
-                       nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32);
-               if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
-                       nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0);
-                       if (IS_ENABLED(CONFIG_32BIT))
-                               nacl_csr_write(nsh, CSR_HSTATEEN0H, 
cfg->hstateen0 >> 32);
-               }
        } else {
                csr_write(CSR_VSSTATUS, csr->vsstatus);
                csr_write(CSR_VSIE, csr->vsie);
@@ -611,18 +570,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                csr_write(CSR_VSEPC, csr->vsepc);
                csr_write(CSR_VSCAUSE, csr->vscause);
                csr_write(CSR_VSTVAL, csr->vstval);
-               csr_write(CSR_HEDELEG, cfg->hedeleg);
-               csr_write(CSR_HIDELEG, cfg->hideleg);
                csr_write(CSR_HVIP, csr->hvip);
                csr_write(CSR_VSATP, csr->vsatp);
-               csr_write(CSR_HENVCFG, cfg->henvcfg);
-               if (IS_ENABLED(CONFIG_32BIT))
-                       csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32);
-               if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
-                       csr_write(CSR_HSTATEEN0, cfg->hstateen0);
-                       if (IS_ENABLED(CONFIG_32BIT))
-                               csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
-               }
        }
 
        kvm_riscv_mmu_update_hgatp(vcpu);
@@ -871,7 +820,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        struct kvm_run *run = vcpu->run;
 
        if (!vcpu->arch.ran_atleast_once)
-               kvm_riscv_vcpu_setup_config(vcpu);
+               kvm_riscv_vcpu_config_ran_once(vcpu);
 
        /* Mark this VCPU ran at least once */
        vcpu->arch.ran_atleast_once = true;
diff --git a/arch/riscv/kvm/vcpu_config.c b/arch/riscv/kvm/vcpu_config.c
new file mode 100644
index 000000000000..eb7374402b07
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_config.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2026 Qualcomm Technologies, Inc.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_nacl.h>
+
+#define KVM_HEDELEG_DEFAULT    (BIT(EXC_INST_MISALIGNED) | \
+                                BIT(EXC_INST_ILLEGAL)     | \
+                                BIT(EXC_BREAKPOINT)      | \
+                                BIT(EXC_SYSCALL)         | \
+                                BIT(EXC_INST_PAGE_FAULT) | \
+                                BIT(EXC_LOAD_PAGE_FAULT) | \
+                                BIT(EXC_STORE_PAGE_FAULT))
+
+#define KVM_HIDELEG_DEFAULT    (BIT(IRQ_VS_SOFT)  | \
+                                BIT(IRQ_VS_TIMER) | \
+                                BIT(IRQ_VS_EXT))
+
+void kvm_riscv_vcpu_config_init(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.cfg.hedeleg = KVM_HEDELEG_DEFAULT;
+       vcpu->arch.cfg.hideleg = KVM_HIDELEG_DEFAULT;
+}
+
+void kvm_riscv_vcpu_config_guest_debug(struct kvm_vcpu *vcpu)
+{
+       struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+
+       if (vcpu->guest_debug)
+               cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
+       else
+               cfg->hedeleg |= BIT(EXC_BREAKPOINT);
+}
+
+void kvm_riscv_vcpu_config_ran_once(struct kvm_vcpu *vcpu)
+{
+       const unsigned long *isa = vcpu->arch.isa;
+       struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+
+       if (riscv_isa_extension_available(isa, SVPBMT))
+               cfg->henvcfg |= ENVCFG_PBMTE;
+
+       if (riscv_isa_extension_available(isa, SSTC))
+               cfg->henvcfg |= ENVCFG_STCE;
+
+       if (riscv_isa_extension_available(isa, ZICBOM))
+               cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
+
+       if (riscv_isa_extension_available(isa, ZICBOZ))
+               cfg->henvcfg |= ENVCFG_CBZE;
+
+       if (riscv_isa_extension_available(isa, SVADU) &&
+           !riscv_isa_extension_available(isa, SVADE))
+               cfg->henvcfg |= ENVCFG_ADUE;
+
+       if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
+               cfg->hstateen0 |= SMSTATEEN0_HSENVCFG;
+               if (riscv_isa_extension_available(isa, SSAIA))
+                       cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC |
+                                         SMSTATEEN0_AIA |
+                                         SMSTATEEN0_AIA_ISEL;
+               if (riscv_isa_extension_available(isa, SMSTATEEN))
+                       cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0;
+       }
+
+       if (vcpu->guest_debug)
+               cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
+}
+
+void kvm_riscv_vcpu_config_load(struct kvm_vcpu *vcpu)
+{
+       struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
+       void *nsh;
+
+       if (kvm_riscv_nacl_sync_csr_available()) {
+               nsh = nacl_shmem();
+               nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg);
+               nacl_csr_write(nsh, CSR_HIDELEG, cfg->hideleg);
+               nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg);
+               if (IS_ENABLED(CONFIG_32BIT))
+                       nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32);
+               if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
+                       nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0);
+                       if (IS_ENABLED(CONFIG_32BIT))
+                               nacl_csr_write(nsh, CSR_HSTATEEN0H, 
cfg->hstateen0 >> 32);
+               }
+       } else {
+               csr_write(CSR_HEDELEG, cfg->hedeleg);
+               csr_write(CSR_HIDELEG, cfg->hideleg);
+               csr_write(CSR_HENVCFG, cfg->henvcfg);
+               if (IS_ENABLED(CONFIG_32BIT))
+                       csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32);
+               if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
+                       csr_write(CSR_HSTATEEN0, cfg->hstateen0);
+                       if (IS_ENABLED(CONFIG_32BIT))
+                               csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
+               }
+       }
+}
-- 
2.43.0


Reply via email to