This reverts 0123456789abc ("KVM: x86: fix RSM into 64-bit protected
mode, round 2").  We've achieved the same by treating SMBASE as a
physical address in the previous patch.

Signed-off-by: Radim Krčmář <rkrc...@redhat.com>
---
 arch/x86/kvm/emulate.c | 37 +++++++------------------------------
 1 file changed, 7 insertions(+), 30 deletions(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 59e80e0de865..b60fed56671b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2311,16 +2311,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt 
*ctxt, u64 smbase, int n)
        return X86EMUL_CONTINUE;
 }
 
-struct rsm_stashed_seg_64 {
-       u16 selector;
-       struct desc_struct desc;
-       u32 base3;
-};
-
-static int rsm_stash_seg_64(struct x86_emulate_ctxt *ctxt,
-                           struct rsm_stashed_seg_64 *stash,
-                           u64 smbase,
-                           int n)
+static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
 {
        struct desc_struct desc;
        int offset;
@@ -2335,20 +2326,10 @@ static int rsm_stash_seg_64(struct x86_emulate_ctxt 
*ctxt,
        set_desc_base(&desc,      GET_SMSTATE(u32, smbase, offset + 8));
        base3 =                   GET_SMSTATE(u32, smbase, offset + 12);
 
-       stash[n].selector = selector;
-       stash[n].desc = desc;
-       stash[n].base3 = base3;
+       ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
        return X86EMUL_CONTINUE;
 }
 
-static inline void rsm_load_seg_64(struct x86_emulate_ctxt *ctxt,
-                                  struct rsm_stashed_seg_64 *stash,
-                                  int n)
-{
-       ctxt->ops->set_segment(ctxt, stash[n].selector, &stash[n].desc,
-                              stash[n].base3, n);
-}
-
 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
                                     u64 cr0, u64 cr4)
 {
@@ -2438,7 +2419,6 @@ static int rsm_load_state_64(struct x86_emulate_ctxt 
*ctxt, u64 smbase)
        u32 base3;
        u16 selector;
        int i, r;
-       struct rsm_stashed_seg_64 stash[6];
 
        for (i = 0; i < 16; i++)
                *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
@@ -2480,18 +2460,15 @@ static int rsm_load_state_64(struct x86_emulate_ctxt 
*ctxt, u64 smbase)
        dt.address =                GET_SMSTATE(u64, smbase, 0x7e68);
        ctxt->ops->set_gdt(ctxt, &dt);
 
-       for (i = 0; i < ARRAY_SIZE(stash); i++) {
-               r = rsm_stash_seg_64(ctxt, stash, smbase, i);
-               if (r != X86EMUL_CONTINUE)
-                       return r;
-       }
-
        r = rsm_enter_protected_mode(ctxt, cr0, cr4);
        if (r != X86EMUL_CONTINUE)
                return r;
 
-       for (i = 0; i < ARRAY_SIZE(stash); i++)
-               rsm_load_seg_64(ctxt, stash, i);
+       for (i = 0; i < 6; i++) {
+               r = rsm_load_seg_64(ctxt, smbase, i);
+               if (r != X86EMUL_CONTINUE)
+                       return r;
+       }
 
        return X86EMUL_CONTINUE;
 }
-- 
2.5.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to