The ring write pointer has to be left pointing at any LINK TRB
when all the TRB for a TD have been added.
This is currently acheived by advancing past a LINK in prepare_ring()
and conditonally by queue_trb() (actually inc_enq) after writing the TRB
if the caller passed 'more_trbs_coming = true'.

Instead just advance the pointer past a LINK TRB in queue_trb() before
writing the required entry. Invert ring->cycle_state when the new
ring segment is the first one.

The value saved in 'td->last_trb' is now incorrect (it could refer to
a LINK TRB). Correct by saving 'ring->enqueue - 1' after queue_trb()
has been called for the last fragment.
Done via a static inline function since ity is very implementation
specific.
It doesn't matter if td->first_trb points to a LINK, it will be skipped.

Remove the now-unused more_trbs_coming parameter to queue_trb().

The code never generates LINK TRB pointing to LINK TRB - so the loops
in the old code are not needed. Similarly inc_enq() has never been
called for event rings - so those checks aren't needed.

Signed-off-by: David Laight <david.lai...@aculab.com>
---
 drivers/usb/host/xhci-ring.c | 211 ++++++++++++-------------------------------
 drivers/usb/host/xhci.h      |   7 ++
 2 files changed, 64 insertions(+), 154 deletions(-)

diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 0d0bd7f..639704e 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -117,12 +117,6 @@ static int last_trb(struct xhci_hcd *xhci, struct 
xhci_ring *ring,
                return TRB_TYPE_LINK_LE32(trb->link.control);
 }
 
-static int enqueue_is_link_trb(struct xhci_ring *ring)
-{
-       struct xhci_link_trb *link = &ring->enqueue->link;
-       return TRB_TYPE_LINK_LE32(link->control);
-}
-
 union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
 {
        /* Enqueue pointer can be left pointing to the link TRB,
@@ -187,81 +181,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct 
xhci_ring *ring)
 }
 
 /*
- * See Cycle bit rules. SW is the consumer for the event ring only.
- * Don't make a ring full of link TRBs.  That would be dumb and this would 
loop.
- *
- * If we've just enqueued a TRB that is in the middle of a TD (meaning the
- * chain bit is set), then set the chain bit in all the following link TRBs.
- * If we've enqueued the last TRB in a TD, make sure the following link TRBs
- * have their chain bit cleared (so that each Link TRB is a separate TD).
- *
- * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
- * set, but other sections talk about dealing with the chain bit set.  This was
- * fixed in the 0.96 specification errata, but we have to assume that all 0.95
- * xHCI hardware can't handle the chain bit being cleared on a link TRB.
- *
- * @more_trbs_coming:  Will you enqueue more TRBs before calling
- *                     prepare_transfer()?
- */
-static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
-                       bool more_trbs_coming)
-{
-       u32 chain;
-       union xhci_trb *next;
-
-       chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
-       /* If this is not event ring, there is one less usable TRB */
-       if (ring->type != TYPE_EVENT &&
-                       !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
-               ring->num_trbs_free--;
-       next = ++(ring->enqueue);
-
-       ring->enq_updates++;
-       /* Update the dequeue pointer further if that was a link TRB or we're at
-        * the end of an event ring segment (which doesn't have link TRBS)
-        */
-       while (last_trb(xhci, ring, ring->enq_seg, next)) {
-               if (ring->type != TYPE_EVENT) {
-                       /*
-                        * If the caller doesn't plan on enqueueing more
-                        * TDs before ringing the doorbell, then we
-                        * don't want to give the link TRB to the
-                        * hardware just yet.  We'll give the link TRB
-                        * back in prepare_ring() just before we enqueue
-                        * the TD at the top of the ring.
-                        */
-                       if (!chain && !more_trbs_coming)
-                               break;
-
-                       /* If we're not dealing with 0.95 hardware or
-                        * isoc rings on AMD 0.96 host,
-                        * carry over the chain bit of the previous TRB
-                        * (which may mean the chain bit is cleared).
-                        */
-                       if (!(ring->type == TYPE_ISOC &&
-                                       (xhci->quirks & XHCI_AMD_0x96_HOST))
-                                               && !xhci_link_trb_quirk(xhci)) {
-                               next->link.control &=
-                                       cpu_to_le32(~TRB_CHAIN);
-                               next->link.control |=
-                                       cpu_to_le32(chain);
-                       }
-                       /* Give this link TRB to the hardware */
-                       wmb();
-                       next->link.control ^= cpu_to_le32(TRB_CYCLE);
-
-                       /* Toggle the cycle bit after the last ring segment. */
-                       if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, 
next)) {
-                               ring->cycle_state = (ring->cycle_state ? 0 : 1);
-                       }
-               }
-               ring->enq_seg = ring->enq_seg->next;
-               ring->enqueue = ring->enq_seg->trbs;
-               next = ring->enqueue;
-       }
-}
-
-/*
  * Check to see if there's room to enqueue num_trbs on the ring and make sure
  * enqueue pointer will not advance into dequeue segment. See rules above.
  */
@@ -2894,28 +2813,59 @@ irqreturn_t xhci_msi_irq(int irq, void *hcd)
 /*
  * Generic function for queueing a TRB on a ring.
  * The caller must have checked to make sure there's room on the ring.
- *
- * @more_trbs_coming:  Will you enqueue more TRBs before calling
- *                     prepare_transfer()?
  */
 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
-               bool more_trbs_coming,
                u32 field1, u32 field2, u32 field3, u32 field4)
 {
-       struct xhci_generic_trb *trb;
+       union xhci_trb *trb;
+
+       trb = ring->enqueue;
+
+       /* Don't overwrite a link TRB, just advance past it */
+       if (TRB_TYPE_LINK_LE32(trb->link.control)) {
+               /* If we're not dealing with 0.95 hardware or isoc rings
+                * on AMD 0.96 host, copy the chain bit from the previous TRB.
+                * (For 0.95 the chain bit is always set.)
+                */
+               __le32 link_control = trb->link.control;
+               if (!xhci_link_trb_quirk(xhci) &&
+                               !(ring->type == TYPE_ISOC &&
+                                (xhci->quirks & XHCI_AMD_0x96_HOST))) {
+                       link_control &= cpu_to_le32(~TRB_CHAIN);
+                       link_control |= trb[-1].link.control &
+                                       cpu_to_le32(TRB_CHAIN);
+               }
+
+               if (trb != ring->enqueue_first)
+                       link_control ^= cpu_to_le32(TRB_CYCLE);
+               trb->link.control = link_control;
+
+               /* Advance to the next ring segment */
+               ring->enq_seg = ring->enq_seg->next;
+               ring->enqueue = ring->enq_seg->trbs;
+               trb = ring->enqueue;
+
+               /* Toggle the cycle bit after the last ring segment. */
+               if (ring->enq_seg == ring->first_seg)
+                       ring->cycle_state ^= TRB_CYCLE;
+       }
 
-       trb = &ring->enqueue->generic;
 
        /* Give the controller ownership of all but the first TRB */
        field4 |= ring->cycle_state;
-       if (trb == &ring->enqueue_first->generic)
+       if (trb == ring->enqueue_first)
                field4 ^= TRB_CYCLE;
 
-       trb->field[0] = cpu_to_le32(field1);
-       trb->field[1] = cpu_to_le32(field2);
-       trb->field[2] = cpu_to_le32(field3);
-       trb->field[3] = cpu_to_le32(field4);
-       inc_enq(xhci, ring, more_trbs_coming);
+       trb->generic.field[0] = cpu_to_le32(field1);
+       trb->generic.field[1] = cpu_to_le32(field2);
+       trb->generic.field[2] = cpu_to_le32(field3);
+       trb->generic.field[3] = cpu_to_le32(field4);
+
+       ring->num_trbs_free--;
+       ring->enq_updates++;
+
+       /* Leave enqueue pointing to the next entry - which might be a LINK */
+       ring->enqueue = trb + 1;
 }
 
 /*
@@ -3031,36 +2981,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct 
xhci_ring *ep_ring,
                }
        }
 
-       if (enqueue_is_link_trb(ep_ring)) {
-               struct xhci_ring *ring = ep_ring;
-               union xhci_trb *next;
-
-               next = ring->enqueue;
-
-               while (last_trb(xhci, ring, ring->enq_seg, next)) {
-                       /* If we're not dealing with 0.95 hardware or isoc rings
-                        * on AMD 0.96 host, clear the chain bit.
-                        */
-                       if (!xhci_link_trb_quirk(xhci) &&
-                                       !(ring->type == TYPE_ISOC &&
-                                        (xhci->quirks & XHCI_AMD_0x96_HOST)))
-                               next->link.control &= cpu_to_le32(~TRB_CHAIN);
-                       else
-                               next->link.control |= cpu_to_le32(TRB_CHAIN);
-
-                       /* If LINK follows a NOP, invert cycle */
-                       if (next != ep_ring->enqueue_first)
-                               next->link.control ^= cpu_to_le32(TRB_CYCLE);
-
-                       /* Toggle the cycle bit after the last ring segment. */
-                       if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, 
next)) {
-                               ring->cycle_state = (ring->cycle_state ? 0 : 1);
-                       }
-                       ring->enq_seg = ring->enq_seg->next;
-                       ring->enqueue = ring->enq_seg->trbs;
-                       next = ring->enqueue;
-               }
-       }
+       /* queue_trb() will advance past a LINK TRB. */
 
        return 0;
 }
@@ -3281,7 +3202,6 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
        int trb_buff_len, this_sg_len, running_total;
        unsigned int total_packet_count;
        u64 addr;
-       bool more_trbs_coming;
 
        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
        if (!ep_ring)
@@ -3332,7 +3252,6 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                        field |= TRB_CHAIN;
                } else {
                        /* FIXME - add check for ZERO_PACKET flag before this */
-                       td->last_trb = ep_ring->enqueue;
                        field |= TRB_IOC;
                }
 
@@ -3362,11 +3281,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                        remainder |
                        TRB_INTR_TARGET(0);
 
-               if (num_trbs > 1)
-                       more_trbs_coming = true;
-               else
-                       more_trbs_coming = false;
-               queue_trb(xhci, ep_ring, more_trbs_coming,
+               queue_trb(xhci, ep_ring,
                                lower_32_bits(addr),
                                upper_32_bits(addr),
                                length_field,
@@ -3396,6 +3311,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                        trb_buff_len =
                                urb->transfer_buffer_length - running_total;
        } while (running_total < urb->transfer_buffer_length);
+       td->last_trb = xhci_last_written_trb(ep_ring);
 
        check_trb_math(urb, num_trbs, running_total);
        giveback_first_trb(xhci, slot_id, ep_index, ep_ring);
@@ -3410,7 +3326,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
        struct urb_priv *urb_priv;
        struct xhci_td *td;
        int num_trbs;
-       bool more_trbs_coming;
        u32 field, length_field;
 
        int running_total, trb_buff_len, ret;
@@ -3473,7 +3388,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                        field |= TRB_CHAIN;
                } else {
                        /* FIXME - add check for ZERO_PACKET flag before this */
-                       td->last_trb = ep_ring->enqueue;
                        field |= TRB_IOC;
                }
 
@@ -3495,11 +3409,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                        remainder |
                        TRB_INTR_TARGET(0);
 
-               if (num_trbs > 1)
-                       more_trbs_coming = true;
-               else
-                       more_trbs_coming = false;
-               queue_trb(xhci, ep_ring, more_trbs_coming,
+               queue_trb(xhci, ep_ring,
                                lower_32_bits(addr),
                                upper_32_bits(addr),
                                length_field,
@@ -3513,6 +3423,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                if (trb_buff_len > TRB_MAX_BUFF_SIZE)
                        trb_buff_len = TRB_MAX_BUFF_SIZE;
        } while (running_total < urb->transfer_buffer_length);
+       td->last_trb = xhci_last_written_trb(ep_ring);
 
        check_trb_math(urb, num_trbs, running_total);
        giveback_first_trb(xhci, slot_id, ep_index, ep_ring);
@@ -3576,7 +3487,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                }
        }
 
-       queue_trb(xhci, ep_ring, true,
+       queue_trb(xhci, ep_ring,
                  setup->bRequestType | setup->bRequest << 8 | 
le16_to_cpu(setup->wValue) << 16,
                  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 
16,
                  TRB_LEN(8) | TRB_INTR_TARGET(0),
@@ -3596,29 +3507,29 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
        if (urb->transfer_buffer_length > 0) {
                if (setup->bRequestType & USB_DIR_IN)
                        field |= TRB_DIR_IN;
-               queue_trb(xhci, ep_ring, true,
+               queue_trb(xhci, ep_ring,
                                lower_32_bits(urb->transfer_dma),
                                upper_32_bits(urb->transfer_dma),
                                length_field,
                                field);
        }
 
-       /* Save the DMA address of the last TRB in the TD */
-       td->last_trb = ep_ring->enqueue;
-
        /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
        /* If the device sent data, the status stage is an OUT transfer */
        if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
                field = 0;
        else
                field = TRB_DIR_IN;
-       queue_trb(xhci, ep_ring, false,
+       queue_trb(xhci, ep_ring,
                        0,
                        0,
                        TRB_INTR_TARGET(0),
                        /* Event on completion */
                        field | TRB_IOC | TRB_TYPE(TRB_STATUS));
 
+       /* Save the DMA address of the last TRB in the TD */
+       td->last_trb = xhci_last_written_trb(ep_ring);
+
        giveback_first_trb(xhci, slot_id, ep_index, ep_ring);
        return 0;
 }
@@ -3710,7 +3621,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, 
gfp_t mem_flags,
        int running_total, trb_buff_len, td_len, td_remain_len, ret;
        u64 start_addr, addr;
        int i, j;
-       bool more_trbs_coming = true;
 
        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
@@ -3782,7 +3692,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, 
gfp_t mem_flags,
                        if (j < trbs_per_td - 1) {
                                field |= TRB_CHAIN;
                        } else {
-                               td->last_trb = ep_ring->enqueue;
                                field |= TRB_IOC;
                                if (xhci->hci_version == 0x100 &&
                                                !(xhci->quirks &
@@ -3791,12 +3700,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, 
gfp_t mem_flags,
                                        if (i < num_tds - 1)
                                                field |= TRB_BEI;
                                }
-                               /*
-                                * We want to advance past LINK TRBs until the
-                                * very last TRB.
-                                */
-                               if (i == num_tds - 1)
-                                       more_trbs_coming = false;
                        }
 
                        /* Calculate TRB length */
@@ -3819,7 +3722,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, 
gfp_t mem_flags,
                                remainder |
                                TRB_INTR_TARGET(0);
 
-                       queue_trb(xhci, ep_ring, more_trbs_coming,
+                       queue_trb(xhci, ep_ring,
                                lower_32_bits(addr),
                                upper_32_bits(addr),
                                length_field,
@@ -3829,6 +3732,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, 
gfp_t mem_flags,
                        addr += trb_buff_len;
                        td_remain_len -= trb_buff_len;
                }
+               td->last_trb = xhci_last_written_trb(ep_ring);
        }
 
        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
@@ -3938,8 +3842,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 
field1, u32 field2,
                                        "unfailable commands failed.\n");
                return ret;
        }
-       queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
-                       field4);
+       queue_trb(xhci, xhci->cmd_ring, field1, field2, field3, field4);
        /* The 'first' TRB might be a LINK TRB... */
        giveback_first_trb(xhci, 0, 0, xhci->cmd_ring);
        return 0;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6973c56..7c6ae8f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1620,6 +1620,13 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd 
*xhci)
        return xhci->quirks & XHCI_LINK_TRB_QUIRK;
 }
 
+static inline union xhci_trb *xhci_last_written_trb(struct xhci_ring *ring)
+{
+       /* The last trb that contains a real request */
+       return ring->enqueue - 1;
+}
+
+
 /* xHCI debugging */
 void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
 void xhci_print_registers(struct xhci_hcd *xhci);
-- 
1.8.1.2



--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to