This patch removes the limitation of having a limited amount of only
four active tds on one endpoint. We use the linked list implementation
to manage all tds which get added and removed by hardware_{en,de}queue.

Signed-off-by: Michael Grzeschik <m.grzesc...@pengutronix.de>
---
    Changes since v2:
     - checking for not page aligned first buffer, to add one more td
     - removed comment about multi td support from TODO list in core.c
    Changes since v1:
     - reworked ep_alloc so the td queue list is completely dynamic
     - rebased that patch on ci-for-greg

 drivers/usb/chipidea/core.c |   1 -
 drivers/usb/chipidea/udc.c  | 166 ++++++++++++++++++++++----------------------
 2 files changed, 82 insertions(+), 85 deletions(-)

diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 47b8da2..1df91e0 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -44,7 +44,6 @@
  * TODO List
  * - OTG
  * - Isochronous & Interrupt Traffic
- * - Handle requests which spawns into several TDs
  * - GET_STATUS(device) - always reports 0
  * - Gadget API (majority of optional features)
  * - Suspend & Remote Wakeup
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index f3498ad..a8504c9 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -392,16 +392,10 @@ static void vbus_work(struct work_struct *work)
  * UTIL block
  *****************************************************************************/
 
-static void setup_td_bits(struct td_node *tdnode, unsigned length)
-{
-       memset(tdnode->ptr, 0, sizeof(*tdnode->ptr));
-       tdnode->ptr->token = length << __ffs(TD_TOTAL_BYTES);
-       tdnode->ptr->token &= TD_TOTAL_BYTES;
-       tdnode->ptr->token |= TD_STATUS_ACTIVE;
-}
-
 static int add_td_to_list(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq, 
unsigned length, gfp_t gfp_flags)
 {
+       int i;
+       u32 temp;
        struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
                                                  gfp_flags);
 
@@ -415,7 +409,19 @@ static int add_td_to_list(struct ci13xxx_ep *mEp, struct 
ci13xxx_req *mReq, unsi
                return -ENOMEM;
        }
 
-       setup_td_bits(node, length);
+       memset(node->ptr, 0, sizeof(struct ci13xxx_td));
+       node->ptr->token = length << __ffs(TD_TOTAL_BYTES);
+       node->ptr->token &= TD_TOTAL_BYTES;
+       node->ptr->token |= TD_STATUS_ACTIVE;
+
+       temp = (u32) (mReq->req.dma + mReq->req.actual);
+       if (length) {
+               node->ptr->page[0] = temp;
+               for (i = 1; i < TD_PAGE_COUNT; i++)
+                       node->ptr->page[i] = (temp + i * CI13XXX_PAGE_SIZE) & 
~TD_RESERVED_MASK;
+       }
+
+       mReq->req.actual += length;
 
        if (!list_empty(&mReq->tds)) {
                /* get the last entry */
@@ -440,9 +446,9 @@ static int add_td_to_list(struct ci13xxx_ep *mEp, struct 
ci13xxx_req *mReq, unsi
 static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq, 
gfp_t gfp_flags)
 {
        struct ci13xxx *ci = mEp->ci;
-       unsigned i;
        int ret = 0;
-       unsigned length = mReq->req.length;
+       unsigned rest = mReq->req.length;
+       int pages = TD_PAGE_COUNT;
        struct td_node *firstnode, *lastnode;
 
        /* don't queue twice */
@@ -455,19 +461,28 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, 
struct ci13xxx_req *mReq, g
        if (ret)
                return ret;
 
-       firstnode = list_first_entry(&mReq->tds,
-                       struct td_node, td);
+       /*
+        * The first buffer could be not page aligned.
+        * In that case we have to span into one extra td.
+        */
+       if (mReq->req.dma % PAGE_SIZE)
+               pages--;
 
-       setup_td_bits(firstnode, length);
+       if (rest == 0)
+               add_td_to_list(mEp, mReq, 0, gfp_flags);
 
-       firstnode->ptr->page[0] = mReq->req.dma;
-       for (i = 1; i < TD_PAGE_COUNT; i++)
-               firstnode->ptr->page[i] =
-                       (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & 
~TD_RESERVED_MASK;
+       while (rest > 0) {
+               unsigned count = min(mReq->req.length - mReq->req.actual,
+                                       (unsigned)(pages * CI13XXX_PAGE_SIZE));
+               add_td_to_list(mEp, mReq, count, gfp_flags);
+               rest -= count;
+       }
 
-       if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0))
+       if (mReq->req.zero && mReq->req.length && (mReq->req.length % 
mEp->ep.maxpacket == 0))
                add_td_to_list(mEp, mReq, 0, gfp_flags);
 
+       firstnode = list_first_entry(&mReq->tds, struct td_node, td);
+
        lastnode = list_entry(mReq->tds.prev,
                struct td_node, td);
 
@@ -476,6 +491,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct 
ci13xxx_req *mReq, g
                lastnode->ptr->token |= TD_IOC;
        wmb();
 
+       mReq->req.actual = 0;
        if (!list_empty(&mEp->qh.queue)) {
                struct ci13xxx_req *mReqPrev;
                int n = hw_ep_bit(mEp->num, mEp->dir);
@@ -521,42 +537,53 @@ done:
 static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
 {
        u32 tmptoken;
-       struct td_node *node, *tmpnode, *firstnode;
+       struct td_node *node, *tmpnode;
+       unsigned remaining_length;
+       unsigned actual = mReq->req.length;
 
        if (mReq->req.status != -EALREADY)
                return -EINVAL;
 
-       firstnode = list_first_entry(&mReq->tds,
-               struct td_node, td);
+       mReq->req.status = 0;
 
        list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) {
                tmptoken = readl(&node->ptr->token);
-               if ((TD_STATUS_ACTIVE & tmptoken) != 0)
+               if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
+                       mReq->req.status = -EALREADY;
                        return -EBUSY;
-               if (node != firstnode) {
-                       dma_pool_free(mEp->td_pool, node->ptr, node->dma);
-                       list_del_init(&node->td);
-                       node->ptr = NULL;
-                       kfree(node);
                }
-       }
 
-       mReq->req.status = 0;
+               remaining_length = (tmptoken & TD_TOTAL_BYTES);
+               remaining_length >>= __ffs(TD_TOTAL_BYTES);
+               actual -= remaining_length;
 
-       usb_gadget_unmap_request(&mEp->ci->gadget, &mReq->req, mEp->dir);
+               mReq->req.status = tmptoken & TD_STATUS;
+               if ((TD_STATUS_HALTED & mReq->req.status)) {
+                       mReq->req.status = -EPIPE;
+                       break;
+               } else if ((TD_STATUS_DT_ERR & mReq->req.status)) {
+                       mReq->req.status = -EPROTO;
+                       break;
+               } else if ((TD_STATUS_TR_ERR & mReq->req.status)) {
+                       mReq->req.status = -EILSEQ;
+                       break;
+               }
 
-       mReq->req.status = tmptoken & TD_STATUS;
-       if ((TD_STATUS_HALTED & mReq->req.status) != 0)
-               mReq->req.status = -1;
-       else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
-               mReq->req.status = -1;
-       else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
-               mReq->req.status = -1;
+               if (remaining_length) {
+                       if (mEp->dir) {
+                               mReq->req.status = -EPROTO;
+                               break;
+                       }
+               }
+               dma_pool_free(mEp->td_pool, node->ptr, node->dma);
+               list_del_init(&node->td);
+               node->ptr = NULL;
+               kfree(node);
+       }
 
-       mReq->req.actual   = tmptoken & TD_TOTAL_BYTES;
-       mReq->req.actual >>= __ffs(TD_TOTAL_BYTES);
-       mReq->req.actual   = mReq->req.length - mReq->req.actual;
-       mReq->req.actual   = mReq->req.status ? 0 : mReq->req.actual;
+       usb_gadget_unmap_request(&mEp->ci->gadget, &mReq->req, mEp->dir);
+
+       mReq->req.actual = mReq->req.status ? 0 : actual;
 
        return mReq->req.actual;
 }
@@ -572,7 +599,7 @@ static int _ep_nuke(struct ci13xxx_ep *mEp)
 __releases(mEp->lock)
 __acquires(mEp->lock)
 {
-       struct td_node *node, *tmpnode, *firstnode;
+       struct td_node *node, *tmpnode;
        if (mEp == NULL)
                return -EINVAL;
 
@@ -585,16 +612,11 @@ __acquires(mEp->lock)
                        list_entry(mEp->qh.queue.next,
                                   struct ci13xxx_req, queue);
 
-               firstnode = list_first_entry(&mReq->tds,
-                       struct td_node, td);
-
                list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) {
-                       if (node != firstnode) {
-                               dma_pool_free(mEp->td_pool, node->ptr, 
node->dma);
-                               list_del_init(&node->td);
-                               node->ptr = NULL;
-                               kfree(node);
-                       }
+                       dma_pool_free(mEp->td_pool, node->ptr, node->dma);
+                       list_del_init(&node->td);
+                       node->ptr = NULL;
+                       kfree(node);
                }
 
                list_del_init(&mReq->queue);
@@ -738,12 +760,6 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request 
*req,
                goto done;
        }
 
-       if (req->length > (TD_PAGE_COUNT - 1) * CI13XXX_PAGE_SIZE) {
-               req->length = (TD_PAGE_COUNT - 1) * CI13XXX_PAGE_SIZE;
-               retval = -EMSGSIZE;
-               dev_warn(mEp->ci->dev, "request length truncated\n");
-       }
-
        trace_ci_ep_queue_req(mEp, retval);
 
        /* push request */
@@ -884,18 +900,13 @@ __acquires(mEp->lock)
        struct ci13xxx_req *mReq, *mReqTemp;
        struct ci13xxx_ep *mEpTemp = mEp;
        int retval = 0;
-       struct td_node *firstnode;
 
        list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
                        queue) {
-               firstnode = list_first_entry(&mReq->tds,
-                       struct td_node, td);
-
                retval = _hardware_dequeue(mEp, mReq);
                if (retval < 0)
                        break;
                list_del_init(&mReq->queue);
-               trace_ci_ep_complete_req(mEp, readl(&firstnode->ptr->token), 
retval);
                if (mReq->req.complete != NULL) {
                        spin_unlock(mEp->lock);
                        if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
@@ -908,8 +919,6 @@ __acquires(mEp->lock)
 
        if (retval == -EBUSY)
                retval = 0;
-       if (retval < 0)
-               trace_ci_ep_complete_req(mEp, mReq->ptr->token, retval);
 
        return retval;
 }
@@ -1210,27 +1219,14 @@ static struct usb_request *ep_alloc_request(struct 
usb_ep *ep, gfp_t gfp_flags)
 {
        struct ci13xxx_ep  *mEp  = container_of(ep, struct ci13xxx_ep, ep);
        struct ci13xxx_req *mReq = NULL;
-       struct td_node *node;
 
        if (ep == NULL)
                return NULL;
 
        mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
-       node = kzalloc(sizeof(struct td_node), gfp_flags);
-       if (mReq != NULL && node != NULL) {
+       if (mReq != NULL) {
                INIT_LIST_HEAD(&mReq->queue);
                INIT_LIST_HEAD(&mReq->tds);
-               INIT_LIST_HEAD(&node->td);
-
-               node->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
-                                          &node->dma);
-               if (node->ptr == NULL) {
-                       kfree(node);
-                       kfree(mReq);
-                       mReq = NULL;
-               } else {
-                       list_add_tail(&node->td, &mReq->tds);
-               }
        }
 
        trace_ci_ep_alloc_req(mEp, mReq == NULL);
@@ -1247,7 +1243,7 @@ static void ep_free_request(struct usb_ep *ep, struct 
usb_request *req)
 {
        struct ci13xxx_ep  *mEp  = container_of(ep,  struct ci13xxx_ep, ep);
        struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
-       struct td_node *firstnode;
+       struct td_node *node, *tmpnode;
        unsigned long flags;
 
        if (ep == NULL || req == NULL) {
@@ -1259,11 +1255,13 @@ static void ep_free_request(struct usb_ep *ep, struct 
usb_request *req)
 
        spin_lock_irqsave(mEp->lock, flags);
 
-       firstnode = list_first_entry(&mReq->tds,
-               struct td_node, td);
+       list_for_each_entry_safe(node, tmpnode, &mReq->tds, td) {
+               dma_pool_free(mEp->td_pool, node->ptr, node->dma);
+               list_del_init(&node->td);
+               node->ptr = NULL;
+               kfree(node);
+       }
 
-       if (firstnode->ptr)
-               dma_pool_free(mEp->td_pool, firstnode->ptr, firstnode->dma);
        kfree(mReq);
 
        trace_ci_ep_free_req(mEp, 0);
-- 
1.8.2.rc2

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to