On Wed, Apr 30, 2014 at 09:54:10AM -0700, Richard Henderson wrote: > On 04/29/2014 01:38 PM, Kevin O'Connor wrote: > > cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | > > DF_MASK)); > > env->eip = 0x00008000; > > + cpu_x86_update_cr0(env, > > + env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | > > CR0_TS_MASK | > > + CR0_PG_MASK)); > > + cpu_x86_update_cr4(env, 0); > > + env->dr[7] = 0x00000400; > > + CC_OP = CC_OP_EFLAGS; > > Please place the CC_OP update immediately after cpu_load_eflags.
Sure - see below. Paolo, if this conflicts with your work, you can use the previous version of this patch. -Kevin >From 12abb36264ac441444d21efa382bacc9f8b657ca Mon Sep 17 00:00:00 2001 Message-Id: <12abb36264ac441444d21efa382bacc9f8b657ca.1398894385.git.ke...@koconnor.net> In-Reply-To: <e4fe7c47ca705cbb3d4c27643c2d2ab60bfe59dd.1398894385.git.ke...@koconnor.net> References: <e4fe7c47ca705cbb3d4c27643c2d2ab60bfe59dd.1398894385.git.ke...@koconnor.net> From: Kevin O'Connor <ke...@koconnor.net> Date: Tue, 29 Apr 2014 11:19:06 -0400 Subject: [PATCH 2/4] Set eflags and cr0 prior to calling cpu_x86_load_seg_cache() in smm_helper.c. To: qemu-devel@nongnu.org The cpu_x86_load_seg_cache() function inspects cr0 and eflags, so make sure all changes to eflags and cr0 are done prior to loading the segment caches. Signed-off-by: Kevin O'Connor <ke...@koconnor.net> --- target-i386/smm_helper.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c index 35901c9..5b36f3f 100644 --- a/target-i386/smm_helper.c +++ b/target-i386/smm_helper.c @@ -162,7 +162,14 @@ void do_smm_enter(X86CPU *cpu) #endif cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); + CC_OP = CC_OP_EFLAGS; env->eip = 0x00008000; + cpu_x86_update_cr0(env, + env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | + CR0_PG_MASK)); + cpu_x86_update_cr4(env, 0); + env->dr[7] = 0x00000400; + cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, 0xffffffff, 0); cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0); @@ -170,13 +177,6 @@ void do_smm_enter(X86CPU *cpu) cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0); cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0); cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0); - - cpu_x86_update_cr0(env, - env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | - CR0_PG_MASK)); - cpu_x86_update_cr4(env, 0); - env->dr[7] = 0x00000400; - CC_OP = CC_OP_EFLAGS; } void helper_rsm(CPUX86State *env) @@ -191,16 +191,6 @@ void helper_rsm(CPUX86State *env) #ifdef TARGET_X86_64 cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0)); - for (i = 0; i < 6; i++) { - offset = 0x7e00 + i * 16; - cpu_x86_load_seg_cache(env, i, - lduw_phys(cs->as, sm_state + offset), - ldq_phys(cs->as, sm_state + offset + 8), - ldl_phys(cs->as, sm_state + offset + 4), - (lduw_phys(cs->as, sm_state + offset + 2) & - 0xf0ff) << 8); - } - env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68); env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64); @@ -238,6 +228,16 @@ void helper_rsm(CPUX86State *env) cpu_x86_update_cr3(env, ldl_phys(cs->as, sm_state + 0x7f50)); cpu_x86_update_cr0(env, ldl_phys(cs->as, sm_state + 0x7f58)); + for (i = 0; i < 6; i++) { + offset = 0x7e00 + i * 16; + cpu_x86_load_seg_cache(env, i, + lduw_phys(cs->as, sm_state + offset), + ldq_phys(cs->as, sm_state + offset + 8), + ldl_phys(cs->as, sm_state + offset + 4), + (lduw_phys(cs->as, sm_state + offset + 2) & + 0xf0ff) << 8); + } + val = ldl_phys(cs->as, sm_state + 0x7efc); /* revision ID */ if (val & 0x20000) { env->smbase = ldl_phys(cs->as, sm_state + 0x7f00) & ~0x7fff; -- 1.9.0