On Tue, Sep 06, 2016 at 01:56:40PM -0400, Kees Cook wrote:

SNIP

> >  static __must_check __always_inline int
> > diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
> > index a939f5ed7f89..c7a22a8a157e 100644
> > --- a/fs/proc/kcore.c
> > +++ b/fs/proc/kcore.c
> > @@ -516,7 +516,7 @@ read_kcore(struct file *file, char __user *buffer, 
> > size_t buflen, loff_t *fpos)
> >                         if (kern_addr_valid(start)) {
> >                                 unsigned long n;
> >
> > -                               n = copy_to_user(buffer, (char *)start, 
> > tsz);
> > +                               n = copy_to_user_nocheck(buffer, (char 
> > *)start, tsz);
> >                                 /*
> >                                  * We cannot distinguish between fault on 
> > source
> >                                  * and fault on destination. When this 
> > happens
> 
> This patch is x86-specific (but ARCH_PROC_KCORE_TEXT is on multiple
> architectures), which I don't think we want. Instead, let's get the
> usercopy helper code centralized (Al Viro is looking at this already),
> and then we can design arch-agnostic methods to handle this.
> 
> In the meantime, how about continuing to use a bounce buffer like
> already done in the vmalloc_or_module_addr() case immediately above?

ok, sounds good.. so something like below? untested

thanks,
jirka


---
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index a939f5ed7f89..de07c273f725 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -515,8 +515,20 @@ read_kcore(struct file *file, char __user *buffer, size_t 
buflen, loff_t *fpos)
                } else {
                        if (kern_addr_valid(start)) {
                                unsigned long n;
+                               char *buf;
 
-                               n = copy_to_user(buffer, (char *)start, tsz);
+                               buf = kzalloc(tsz, GFP_KERNEL);
+                               if (!buf)
+                                       return -ENOMEM;
+
+                               /*
+                                * Using bounce buffer to bypass the hardened
+                                * user copy kernel text checks.
+                                */
+                               memcpy(buf, (char *) start, tsz);
+
+                               n = copy_to_user(buffer, buf, tsz);
+                               kfree(buf);
                                /*
                                 * We cannot distinguish between fault on source
                                 * and fault on destination. When this happens

Reply via email to