The default kernel_fpu_begin() doesn't work on systems that support XMM but haven't yet enabled OSFXSR. This causes crashes when _mmx_memcpy() is called too early.
Fix it by using kernel_fpu_begin(KFPU_MMX) explicitly. This should also be faster, since it skips both the reasonably fast LDMXCSR and also the rather slow FNINIT instructions. Fixes: 7ad816762f9b ("x86/fpu: Reset MXCSR to default in kernel_fpu_begin()") Reported-by: Krzysztof Mazur <krzys...@podlesie.net> Signed-off-by: Andy Lutomirski <l...@kernel.org> --- arch/x86/lib/mmx_32.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c index 4321fa02e18d..daa80fa005fb 100644 --- a/arch/x86/lib/mmx_32.c +++ b/arch/x86/lib/mmx_32.c @@ -37,7 +37,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) p = to; i = len >> 6; /* len/64 */ - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_MMX); __asm__ __volatile__ ( "1: prefetch (%0)\n" /* This set is 28 bytes */ @@ -127,7 +127,7 @@ static void fast_clear_page(void *page) { int i; - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_MMX); __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : @@ -160,7 +160,7 @@ static void fast_copy_page(void *to, void *from) { int i; - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_MMX); /* * maybe the prefetch stuff can go before the expensive fnsave... @@ -247,7 +247,7 @@ static void fast_clear_page(void *page) { int i; - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_MMX); __asm__ __volatile__ ( " pxor %%mm0, %%mm0\n" : : @@ -282,7 +282,7 @@ static void fast_copy_page(void *to, void *from) { int i; - kernel_fpu_begin(); + kernel_fpu_begin_mask(KFPU_MMX); __asm__ __volatile__ ( "1: prefetch (%0)\n" -- 2.29.2