On Tue 17-10-17 09:35:57, James Hogan wrote:
> On Tue, Oct 17, 2017 at 09:37:48AM +0200, Michal Hocko wrote:
[...]
> > This would be something like. I wanted to share elf_vm_mmap but didn't
> > find a proper place to not cause include dependency hell so I balied out
> > to c&p.
> > ---
> > diff --git a/arch/metag/kernel/process.c b/arch/metag/kernel/process.c
> > index c4606ce743d2..b20596b4c4c2 100644
> > --- a/arch/metag/kernel/process.c
> > +++ b/arch/metag/kernel/process.c
> > @@ -378,6 +378,29 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
> >  
> >  #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
> >  
> > +static unsigned long elf_vm_mmap(struct file *filep, unsigned long addr,
> > +           unsigned long size, int prot, int type, unsigned long off)
> > +{
> > +   unsigned long map_addr;
> > +
> > +   /*
> > +    * If caller requests the mapping at a specific place, make sure we fail
> > +    * rather than potentially clobber an existing mapping which can have
> > +    * security consequences (e.g. smash over the stack area).
> > +    */
> > +   map_addr = vm_mmap(filep, addr, size, prot, type & ~MAP_FIXED, off);
> > +   if (BAD_ADDR(map_addr))
> > +           return map_addr;
> > +
> > +   if ((type & MAP_FIXED) && map_addr != addr) {
> > +           pr_info("Uhuuh, elf segement at %p requested but the memory is 
> > mapped already\n",
> > +                           (void*)addr);
> > +           return -EAGAIN;
> > +   }
> > +
> > +   return map_addr;
> > +}
> > +
> >  unsigned long __metag_elf_map(struct file *filep, unsigned long addr,
> >                           struct elf_phdr *eppnt, int prot, int type,
> >                           unsigned long total_size)
> > @@ -410,11 +433,11 @@ unsigned long __metag_elf_map(struct file *filep, 
> > unsigned long addr,
> >     */
> >     if (total_size) {
> >             total_size = ELF_PAGEALIGN(total_size);
> > -           map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
> > +           map_addr = elf_vm_mmap(filep, addr, total_size, prot, type, 
> > off);
> >             if (!BAD_ADDR(map_addr))
> >                     vm_munmap(map_addr+size, total_size-size);
> >     } else
> > -           map_addr = vm_mmap(filep, addr, size, prot, type, off);
> > +           map_addr = elf_vm_mmap(filep, addr, size, prot, type, off);
> >  
> >     if (!BAD_ADDR(map_addr) && tcm_tag != TCM_INVALID_TAG) {
> >             struct tcm_allocation *tcm;
> 
> Yeh that looks reasonable to me.

Thanks for double checking. I will make sure to CC you when reposting
the patch after other concerns sort out.

-- 
Michal Hocko
SUSE Labs

Reply via email to