Commit-ID: f4cb1cc18f364d761d5614eb6293cccc6647f259 Gitweb: http://git.kernel.org/tip/f4cb1cc18f364d761d5614eb6293cccc6647f259 Author: Fenghua Yu <fenghua...@intel.com> AuthorDate: Sat, 16 Nov 2013 12:37:01 -0800 Committer: H. Peter Anvin <h...@linux.intel.com> CommitDate: Sat, 16 Nov 2013 18:00:58 -0800
x86-64, copy_user: Remove zero byte check before copy user buffer. Operation of rep movsb instruction handles zero byte copy. As pointed out by Linus, there is no need to check zero size in kernel. Removing this redundant check saves a few cycles in copy user functions. Reported-by: Linus Torvalds <torva...@linux-foundation.org> Signed-off-by: Fenghua Yu <fenghua...@intel.com> Link: http://lkml.kernel.org/r/1384634221-6006-1-git-send-email-fenghua...@intel.com Signed-off-by: H. Peter Anvin <h...@linux.intel.com> --- arch/x86/lib/copy_user_64.S | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index a30ca15..ffe4eb9 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled) ENTRY(copy_user_generic_string) CFI_STARTPROC ASM_STAC - andl %edx,%edx - jz 4f cmpl $8,%edx jb 2f /* less than 8 bytes, go to byte copy loop */ ALIGN_DESTINATION @@ -249,7 +247,7 @@ ENTRY(copy_user_generic_string) 2: movl %edx,%ecx 3: rep movsb -4: xorl %eax,%eax + xorl %eax,%eax ASM_CLAC ret @@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string) ENTRY(copy_user_enhanced_fast_string) CFI_STARTPROC ASM_STAC - andl %edx,%edx - jz 2f movl %edx,%ecx 1: rep movsb -2: xorl %eax,%eax + xorl %eax,%eax ASM_CLAC ret -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/