Remove pointless 'const' of non-pointer input parameter.

Remove unnecessary parenthesis that shows uncertainty about arithmetic operator 
precedence.

Clarify copy_xstate_to_user() description.

No change in functionality.

Cc: Andy Lutomirski <l...@kernel.org>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Oleg Nesterov <o...@redhat.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Yu-cheng Yu <yu-cheng...@intel.com>
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/kernel/fpu/xstate.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 0a299468510f..9647e7256179 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -927,13 +927,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, 
int pkey,
 static inline int
 __copy_xstate_to_kernel(void *kbuf,
                        const void *data,
-                       unsigned int pos, unsigned int count, const int 
start_pos, const int end_pos)
+                       unsigned int pos, unsigned int count, int start_pos, 
int end_pos)
 {
        if ((count == 0) || (pos < start_pos))
                return 0;
 
        if (end_pos < 0 || pos < end_pos) {
-               unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - 
pos));
+               unsigned int copy = end_pos < 0 ? count : min(count, end_pos - 
pos);
 
                memcpy(kbuf + pos, data, copy);
        }
@@ -1010,13 +1010,13 @@ int copy_xstate_to_kernel(void *kbuf, struct 
xregs_state *xsave, unsigned int po
 }
 
 static inline int
-__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int pos, 
unsigned int count, const int start_pos, const int end_pos)
+__copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int pos, 
unsigned int count, int start_pos, int end_pos)
 {
        if ((count == 0) || (pos < start_pos))
                return 0;
 
        if (end_pos < 0 || pos < end_pos) {
-               unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - 
pos));
+               unsigned int copy = end_pos < 0 ? count : min(count, end_pos - 
pos);
 
                if (__copy_to_user(ubuf + pos, data, copy))
                        return -EFAULT;
@@ -1026,7 +1026,7 @@ __copy_xstate_to_user(void __user *ubuf, const void 
*data, unsigned int pos, uns
 
 /*
  * Convert from kernel XSAVES compacted format to standard format and copy
- * to a ptrace buffer. It supports partial copy but pos always starts from
+ * to a user-space buffer. It supports partial copy but pos always starts from
  * zero. This is called from xstateregs_get() and there we check the CPU
  * has XSAVES.
  */
-- 
2.11.0

Reply via email to