On Tue, Jan 14 2014, Robert Baldyga wrote:
> This patch adds asynchronous I/O support for FunctionFS endpoint
> files.

Thanks for doing this.  I never had enough time to finish this feature.

> @@ -343,6 +345,25 @@ static char *ffs_prepare_buffer(const char __user *buf, 
> size_t len)
>       __attribute__((warn_unused_result, nonnull));
>  
>  
> +/*  ffs_io_data structure 
> ***************************************************/
> +
> +struct ffs_io_data {
> +     int aio:1;
> +     int read:1;

Just make those bools.

> +     

Nit: trailing white-space here in in other places. ;)

> +     struct kiocb *kiocb;
> +     const struct iovec *iovec;
> +     unsigned long nr_segs;
> +     char __user *buf;
> +     size_t len;
> +     
> +     struct mm_struct *mm;
> +     struct work_struct work;
> +     
> +     struct usb_ep *ep;
> +     struct usb_request *req;
> +};
> +
>  /* Control file aka ep0 
> *****************************************************/
>  
>  static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
> @@ -788,8 +809,51 @@ static void ffs_epfile_io_complete(struct usb_ep *_ep, 
> struct usb_request *req)
>       }
>  }
>  
> -static ssize_t ffs_epfile_io(struct file *file,
> -                          char __user *buf, size_t len, int read)
> +static void ffs_user_copy_worker(struct work_struct *work)
> +{
> +     size_t len = 0;
> +     int i = 0;
> +     int ret;
> +
> +     struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, 
> work);

Nit: Over 80 characters.

> +
> +     use_mm(io_data->mm);
> +     for (i=0; i < io_data->nr_segs; i++) {
> +             ret = copy_to_user(io_data->iovec[i].iov_base,
> +                                &io_data->buf[len],
> +                                io_data->iovec[i].iov_len);
> +             len += io_data->iovec[i].iov_len;
> +     }
> +     unuse_mm(io_data->mm);
> +
> +     aio_complete(io_data->kiocb, 0, 0);
> +
> +     kfree(io_data->iovec);
> +     kfree(io_data->buf);
> +     kfree(io_data);
> +}

> +static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
>  {
>       struct ffs_epfile *epfile = file->private_data;
>       struct ffs_ep *ep;
> @@ -825,25 +889,12 @@ first_try:
>               }
>  
>               /* Do we halt? */
> -             halt = !read == !epfile->in;
> +             halt = !io_data->read == !epfile->in;
>               if (halt && epfile->isoc) {
>                       ret = -EINVAL;
>                       goto error;
>               }
>  
> -             /* Allocate & copy */
> -             if (!halt && !data) {
> -                     data = kzalloc(len, GFP_KERNEL);
> -                     if (unlikely(!data))
> -                             return -ENOMEM;
> -
> -                     if (!read &&
> -                         unlikely(__copy_from_user(data, buf, len))) {
> -                             ret = -EFAULT;
> -                             goto error;
> -                     }
> -             }
> -
>               /* We will be using request */
>               ret = ffs_mutex_lock(&epfile->mutex,
>                                    file->f_flags & O_NONBLOCK);
> @@ -869,33 +920,86 @@ first_try:
>               spin_unlock_irq(&epfile->ffs->eps_lock);
>               ret = -EBADMSG;
>       } else {
> -             /* Fire the request */
> -             DECLARE_COMPLETION_ONSTACK(done);
> +             struct usb_request *req;
>  
> -             struct usb_request *req = ep->req;
> -             req->context  = &done;
> -             req->complete = ffs_epfile_io_complete;
> -             req->buf      = data;
> -             req->length   = len;
> +             data = kzalloc(io_data->len, GFP_KERNEL);
> +             if (unlikely(!data))
> +                     return -ENOMEM;
> +             
> +             if(io_data->aio) {
> +                     size_t len = 0;
> +                     int i;
> +                     for (i=0; !io_data->read && i < io_data->nr_segs; i++) {
> +                             if (unlikely(copy_from_user(&data[len],

The whole if-else-if-else is under a spin lock so you cannot
copy_from_user.  This is why in original code copying was done prior to
this piece of code.

> +                                          io_data->iovec[i].iov_base,
> +                                          io_data->iovec[i].iov_len) != 0)) {
> +                                     ret = -EFAULT;
> +                                     goto error;
> +                             }
> +                             len += io_data->iovec[i].iov_len;
> +                     }
>  
> -             ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
> +                     req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
> +                     if(unlikely(!req))
> +                             goto error;
>  
> -             spin_unlock_irq(&epfile->ffs->eps_lock);
> +                     req->buf      = data;
> +                     req->length   = io_data->len;
>  
> -             if (unlikely(ret < 0)) {
> -                     /* nop */
> -             } else if (unlikely(wait_for_completion_interruptible(&done))) {
> -                     ret = -EINTR;
> -                     usb_ep_dequeue(ep->ep, req);
> -             } else {
> -                     ret = ep->status;
> -                     if (read && ret > 0 &&
> -                         unlikely(copy_to_user(buf, data, ret)))
> +                     io_data->buf = data;
> +                     io_data->ep = ep->ep;
> +                     io_data->req = req;
> +
> +                     req->context  = io_data;
> +                     req->complete = ffs_epfile_async_io_complete;
> +
> +                     ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
> +                     if(unlikely(ret)) {
> +                             usb_ep_free_request(ep->ep, req);
> +                             goto error;
> +                     }
> +                     ret = -EIOCBQUEUED;
> +
> +                     spin_unlock_irq(&epfile->ffs->eps_lock);
> +             }
> +             else {
> +                     DECLARE_COMPLETION_ONSTACK(done);
> +
> +                     if (!io_data->read &&
> +                         unlikely(__copy_from_user(data, io_data->buf,
> +                                                   io_data->len))) {
>                               ret = -EFAULT;
> +                             goto error;
> +                     }
> +
> +                     req = ep->req;
> +                     req->buf      = data;
> +                     req->length   = io_data->len;
> +
> +                     req->context  = &done;
> +                     req->complete = ffs_epfile_io_complete;
> +             
> +                     ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
> +
> +                     spin_unlock_irq(&epfile->ffs->eps_lock);
> +
> +                     if (unlikely(ret < 0)) {
> +                             /* nop */
> +                     } else if 
> (unlikely(wait_for_completion_interruptible(&done))) {
> +                             ret = -EINTR;
> +                             usb_ep_dequeue(ep->ep, req);
> +                     } else {
> +                             ret = ep->status;
> +                             if (io_data->read && ret > 0 &&
> +                                 unlikely(copy_to_user(io_data->buf, data, 
> ret)))
> +                                     ret = -EFAULT;
> +                     }
> +                     kfree(data);
>               }
>       }
>  
>       mutex_unlock(&epfile->mutex);
> +     return ret;
>  error:
>       kfree(data);
>       return ret;

> +static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, const struct iovec 
> *iovec, unsigned long nr_segs, loff_t loff)
> +{
> +     struct ffs_io_data *io_data;
> +
> +     ENTER();
> +
> +     io_data = kmalloc(sizeof(struct ffs_io_data), GFP_KERNEL);

        io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
        if (!io_data) {
                return -ENOMEM;
        }

> +     io_data->aio = 1;
> +     io_data->read = 0;
> +     io_data->kiocb = kiocb;
> +     io_data->iovec = iovec;
> +     io_data->nr_segs = nr_segs;
> +     io_data->len = kiocb->ki_nbytes;
> +     io_data->mm = current->mm;
> +
> +     kiocb->private = io_data;
> +
> +     kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
> +
> +     return ffs_epfile_io(kiocb->ki_filp, io_data);
> +}
> +
> +static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, const struct iovec 
> *iovec, unsigned long nr_segs, loff_t loff)
> +{
> +     struct ffs_io_data *io_data;
> +     struct iovec *iovec_copy;
> +
> +     ENTER();
> +
> +     iovec_copy = kmalloc(sizeof(struct iovec)*nr_segs, GFP_KERNEL);

        iovec_copy = kmalloc_array(nr_regs, sizeof(*iovec_copy), GFP_KERNEL);
        if (!iovec_copy) {
                return -ENOMEM;
        }

Besides, do we really need to make a copy?  I haven't checked AIO API
but that sounds strange to me.

> +     memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
> +
> +     io_data = kmalloc(sizeof(struct ffs_io_data), GFP_KERNEL);

        io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
        if (!io_data) {
                kfree(iovec_copy);
                return -ENOMEM;
        }

> +     io_data->aio = 1;
> +     io_data->read = 1;
> +     io_data->kiocb = kiocb;
> +     io_data->iovec = iovec_copy;
> +     io_data->nr_segs = nr_segs;
> +     io_data->len = kiocb->ki_nbytes;
> +     io_data->mm = current->mm;
> +
> +     kiocb->private = io_data;
> +
> +     kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
> +
> +     return ffs_epfile_io(kiocb->ki_filp, io_data);
> +}
> +
>  static int
>  ffs_epfile_release(struct inode *inode, struct file *file)
>  {

-- 
Best regards,                                         _     _
.o. | Liege of Serenely Enlightened Majesty of      o' \,=./ `o
..o | Computer Science,  Michał “mina86” Nazarewicz    (o o)
ooo +--<m...@google.com>--<xmpp:min...@jabber.org>--ooO--(_)--Ooo--

Attachment: signature.asc
Description: PGP signature

Reply via email to