There is no point splitting a 64bit value into two 32bit parameters
  for almost all the calls to queue_trb().
Change the definition of the 'generic' trb to have named fields
  phys_addr, info_len and type_flags following the most common usage.
Add make_immd_data() to convert two 32bit values in the 64bit one,
  and use it is the one place where immediate data is passed.

Signed-off-by: David Laight <david.lai...@aculab.com>
---
Patch is against 3.13.0-rc8

 drivers/usb/host/xhci-dbg.c   |   8 +-
 drivers/usb/host/xhci-ring.c  | 166 ++++++++++++++++++------------------------
 drivers/usb/host/xhci-trace.h |   7 +-
 drivers/usb/host/xhci.c       |   3 +-
 drivers/usb/host/xhci.h       |  15 +++-
 5 files changed, 92 insertions(+), 107 deletions(-)

diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 73503a8..bd5f3ea 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -241,10 +241,10 @@ void xhci_print_registers(struct xhci_hcd *xhci)
 
 void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
 {
-       int i;
-       for (i = 0; i < 4; ++i)
-               xhci_dbg(xhci, "Offset 0x%x = 0x%x\n",
-                               i*4, trb->generic.field[i]);
+       xhci_dbg(xhci, "phys_addr %#12llx, info %#10x, type %#10x\n",
+               trb->generic.phys_addr,
+               trb->generic.info_len,
+               trb->generic.type_flags);
 }
 
 /**
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 53c2e29..bdd2ccd 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -214,7 +214,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring 
*ring,
        union xhci_trb *next;
        unsigned long long addr;
 
-       chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
+       chain = le32_to_cpu(ring->enqueue->generic.type_flags) & TRB_CHAIN;
        /* If this is not event ring, there is one less usable TRB */
        if (ring->type != TYPE_EVENT &&
                        !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
@@ -474,7 +474,7 @@ static struct xhci_segment *find_trb_seg(
        while (cur_seg->trbs > trb ||
                        &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
                generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
-               if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
+               if (generic_trb->type_flags & cpu_to_le32(LINK_TOGGLE))
                        *cycle_state ^= 0x1;
                cur_seg = cur_seg->next;
                if (cur_seg == start_seg)
@@ -594,8 +594,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        }
 
        trb = &state->new_deq_ptr->generic;
-       if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
-           (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
+       if (TRB_TYPE_LINK_LE32(trb->type_flags) &&
+           (trb->type_flags & cpu_to_le32(LINK_TOGGLE)))
                state->new_cycle_state ^= 0x1;
        next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
 
@@ -637,16 +637,16 @@ static void td_to_noop(struct xhci_hcd *xhci, struct 
xhci_ring *ep_ring,
        for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
                        true;
                        next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-               if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
+               if (TRB_TYPE_LINK_LE32(cur_trb->generic.type_flags)) {
                        /* Unchain any chained Link TRBs, but
                         * leave the pointers intact.
                         */
-                       cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
+                       cur_trb->generic.type_flags &= cpu_to_le32(~TRB_CHAIN);
                        /* Flip the cycle bit (link TRBs can't be the first
                         * or last TRB).
                         */
                        if (flip_cycle)
-                               cur_trb->generic.field[3] ^=
+                               cur_trb->generic.type_flags ^=
                                        cpu_to_le32(TRB_CYCLE);
                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                        "Cancel (unchain) link TRB");
@@ -658,17 +658,16 @@ static void td_to_noop(struct xhci_hcd *xhci, struct 
xhci_ring *ep_ring,
                                        cur_seg,
                                        (unsigned long long)cur_seg->dma);
                } else {
-                       cur_trb->generic.field[0] = 0;
-                       cur_trb->generic.field[1] = 0;
-                       cur_trb->generic.field[2] = 0;
+                       cur_trb->generic.phys_addr = 0;
+                       cur_trb->generic.info_len = 0;
                        /* Preserve only the cycle bit of this TRB */
-                       cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+                       cur_trb->generic.type_flags &= cpu_to_le32(TRB_CYCLE);
                        /* Flip the cycle bit except on the first or last TRB */
                        if (flip_cycle && cur_trb != cur_td->first_trb &&
                                        cur_trb != cur_td->last_trb)
-                               cur_trb->generic.field[3] ^=
+                               cur_trb->generic.type_flags ^=
                                        cpu_to_le32(TRB_CYCLE);
-                       cur_trb->generic.field[3] |= cpu_to_le32(
+                       cur_trb->generic.type_flags |= cpu_to_le32(
                                TRB_TYPE(TRB_TR_NOOP));
                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                        "TRB to noop at offset 0x%llx",
@@ -778,7 +777,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, 
int slot_id,
 
        struct xhci_dequeue_state deq_state;
 
-       if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
+       if 
(unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.type_flags)))) {
                virt_dev = xhci->devs[slot_id];
                if (virt_dev)
                        handle_cmd_in_cmd_wait_list(xhci, virt_dev,
@@ -791,7 +790,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, 
int slot_id,
        }
 
        memset(&deq_state, 0, sizeof(deq_state));
-       ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+       ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.type_flags));
        ep = &xhci->devs[slot_id]->eps[ep_index];
 
        if (list_empty(&ep->cancelled_td_list)) {
@@ -1082,8 +1081,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd 
*xhci, int slot_id,
        struct xhci_ep_ctx *ep_ctx;
        struct xhci_slot_ctx *slot_ctx;
 
-       ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
-       stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
+       ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.type_flags));
+       stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.info_len));
        dev = xhci->devs[slot_id];
 
        ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
@@ -1168,7 +1167,7 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd 
*xhci, int slot_id,
 {
        unsigned int ep_index;
 
-       ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+       ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.type_flags));
        /* This command will only fail if the endpoint wasn't halted,
         * but we don't care.
         */
@@ -1267,7 +1266,7 @@ static void xhci_cmd_to_noop(struct xhci_hcd *xhci, 
struct xhci_cd *cur_cd)
                        cmd_trb != xhci->cmd_ring->enqueue;
                        next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
                /* If the trb is link trb, continue */
-               if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
+               if (TRB_TYPE_LINK_LE32(cmd_trb->generic.type_flags))
                        continue;
 
                if (cur_cd->cmd_trb == cmd_trb) {
@@ -1280,14 +1279,13 @@ static void xhci_cmd_to_noop(struct xhci_hcd *xhci, 
struct xhci_cd *cur_cd)
                                        cur_cd->command, COMP_CMD_STOP);
 
                        /* get cycle state from the origin command trb */
-                       cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
+                       cycle_state = le32_to_cpu(cmd_trb->generic.type_flags)
                                & TRB_CYCLE;
 
                        /* modify the command trb to NO OP command */
-                       cmd_trb->generic.field[0] = 0;
-                       cmd_trb->generic.field[1] = 0;
-                       cmd_trb->generic.field[2] = 0;
-                       cmd_trb->generic.field[3] = cpu_to_le32(
+                       cmd_trb->generic.phys_addr = 0;
+                       cmd_trb->generic.info_len = 0;
+                       cmd_trb->generic.type_flags = cpu_to_le32(
                                        TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
                        break;
                }
@@ -1550,7 +1548,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                        return;
        }
 
-       cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
+       cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.type_flags));
        switch (cmd_type) {
        case TRB_ENABLE_SLOT:
                xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
@@ -1569,24 +1567,24 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                break;
        case TRB_STOP_RING:
                WARN_ON(slot_id != TRB_TO_SLOT_ID(
-                               le32_to_cpu(cmd_trb->generic.field[3])));
+                               le32_to_cpu(cmd_trb->generic.type_flags)));
                xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
                break;
        case TRB_SET_DEQ:
                WARN_ON(slot_id != TRB_TO_SLOT_ID(
-                               le32_to_cpu(cmd_trb->generic.field[3])));
+                               le32_to_cpu(cmd_trb->generic.type_flags)));
                xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
                break;
        case TRB_CMD_NOOP:
                break;
        case TRB_RESET_EP:
                WARN_ON(slot_id != TRB_TO_SLOT_ID(
-                               le32_to_cpu(cmd_trb->generic.field[3])));
+                               le32_to_cpu(cmd_trb->generic.type_flags)));
                xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
                break;
        case TRB_RESET_DEV:
                WARN_ON(slot_id != TRB_TO_SLOT_ID(
-                               le32_to_cpu(cmd_trb->generic.field[3])));
+                               le32_to_cpu(cmd_trb->generic.type_flags)));
                xhci_handle_cmd_reset_dev(xhci, slot_id, event);
                break;
        case TRB_NEC_GET_FW:
@@ -1605,7 +1603,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
 {
        u32 trb_type;
 
-       trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
+       trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.type_flags));
        xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
        if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
                handle_cmd_completion(xhci, &event->event_cmd);
@@ -1655,7 +1653,7 @@ static void handle_device_notification(struct xhci_hcd 
*xhci,
        u32 slot_id;
        struct usb_device *udev;
 
-       slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
+       slot_id = TRB_TO_SLOT_ID(event->generic.type_flags);
        if (!xhci->devs[slot_id]) {
                xhci_warn(xhci, "Device Notification event for "
                                "unused slot %u\n", slot_id);
@@ -1684,11 +1682,11 @@ static void handle_port_status(struct xhci_hcd *xhci,
        bool bogus_port_status = false;
 
        /* Port status change events always have a successful completion code */
-       if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != 
COMP_SUCCESS) {
+       if (GET_COMP_CODE(le32_to_cpu(event->generic.info_len)) != 
COMP_SUCCESS) {
                xhci_warn(xhci, "WARN: xHC returned failed port status 
event\n");
                xhci->error_bitmask |= 1 << 8;
        }
-       port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
+       port_id = GET_PORT_ID(le32_to_cpu(event->psc_event.port));
        xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
 
        max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
@@ -2246,11 +2244,11 @@ static int process_isoc_td(struct xhci_hcd *xhci, 
struct xhci_td *td,
                for (cur_trb = ep_ring->dequeue,
                     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
                     next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-                       if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
-                           !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
-                               len += 
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
+                       if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.type_flags) &&
+                           !TRB_TYPE_LINK_LE32(cur_trb->generic.type_flags))
+                               len += 
TRB_LEN(le32_to_cpu(cur_trb->generic.info_len));
                }
-               len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
+               len += TRB_LEN(le32_to_cpu(cur_trb->generic.info_len)) -
                        EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
 
                if (trb_comp_code != COMP_STOP_INVAL) {
@@ -2379,17 +2377,17 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, 
struct xhci_td *td,
                for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
                                cur_trb != event_trb;
                                next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
-                       if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
-                           !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
+                       if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.type_flags) &&
+                           !TRB_TYPE_LINK_LE32(cur_trb->generic.type_flags))
                                td->urb->actual_length +=
-                                       
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
+                                       
TRB_LEN(le32_to_cpu(cur_trb->generic.info_len));
                }
                /* If the ring didn't stop on a Link or No-op TRB, add
                 * in the actual bytes transferred from the Normal TRB
                 */
                if (trb_comp_code != COMP_STOP_INVAL)
                        td->urb->actual_length +=
-                               TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) 
-
+                               TRB_LEN(le32_to_cpu(cur_trb->generic.info_len)) 
-
                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
        }
 
@@ -2666,7 +2664,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                 * corresponding TD has been cancelled. Just ignore
                 * the TD.
                 */
-               if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
+               if (TRB_TYPE_NOOP_LE32(event_trb->generic.type_flags)) {
                        xhci_dbg(xhci,
                                 "event_trb is a no-op TRB. Skip it\n");
                        goto cleanup;
@@ -2922,16 +2920,14 @@ irqreturn_t xhci_msi_irq(int irq, void *hcd)
  *                     prepare_transfer()?
  */
 static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
-               bool more_trbs_coming,
-               u32 field1, u32 field2, u32 field3, u32 field4)
+               bool more_trbs_coming, u64 physaddr, u32 info, u32 type)
 {
        struct xhci_generic_trb *trb;
 
        trb = &ring->enqueue->generic;
-       trb->field[0] = cpu_to_le32(field1);
-       trb->field[1] = cpu_to_le32(field2);
-       trb->field[2] = cpu_to_le32(field3);
-       trb->field[3] = cpu_to_le32(field4);
+       trb->phys_addr = cpu_to_le64(physaddr);   /* May be immediate data */
+       trb->info_len = cpu_to_le32(info);
+       trb->type_flags = cpu_to_le32(type);
        inc_enq(xhci, ring, more_trbs_coming);
 }
 
@@ -3015,10 +3011,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct 
xhci_ring *ep_ring,
                                        ep_ring->cycle_state);
                        ep_ring->num_trbs_free -= usable;
                        do {
-                               trb->generic.field[0] = 0;
-                               trb->generic.field[1] = 0;
-                               trb->generic.field[2] = 0;
-                               trb->generic.field[3] = nop_cmd;
+                               trb->generic.phys_addr = 0;
+                               trb->generic.info_len = 0;
+                               trb->generic.type_flags = nop_cmd;
                                trb++;
                        } while (--usable);
                        ep_ring->enqueue = trb;
@@ -3184,9 +3179,9 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int 
slot_id,
         */
        wmb();
        if (start_cycle)
-               start_trb->field[3] |= cpu_to_le32(start_cycle);
+               start_trb->type_flags |= cpu_to_le32(start_cycle);
        else
-               start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+               start_trb->type_flags &= cpu_to_le32(~TRB_CYCLE);
        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
 }
 
@@ -3396,10 +3391,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                        more_trbs_coming = true;
                else
                        more_trbs_coming = false;
-               queue_trb(xhci, ep_ring, more_trbs_coming,
-                               lower_32_bits(addr),
-                               upper_32_bits(addr),
-                               length_field,
+               queue_trb(xhci, ep_ring, more_trbs_coming, addr, length_field,
                                field | TRB_TYPE(TRB_NORMAL));
                --num_trbs;
                running_total += trb_buff_len;
@@ -3551,10 +3543,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                        more_trbs_coming = true;
                else
                        more_trbs_coming = false;
-               queue_trb(xhci, ep_ring, more_trbs_coming,
-                               lower_32_bits(addr),
-                               upper_32_bits(addr),
-                               length_field,
+               queue_trb(xhci, ep_ring, more_trbs_coming, addr, length_field,
                                field | TRB_TYPE(TRB_NORMAL));
                --num_trbs;
                running_total += trb_buff_len;
@@ -3641,9 +3630,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                }
        }
 
-       queue_trb(xhci, ep_ring, true,
+       queue_trb(xhci, ep_ring, true, make_immd_data(
                  setup->bRequestType | setup->bRequest << 8 | 
le16_to_cpu(setup->wValue) << 16,
-                 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 
16,
+                 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 
16),
                  TRB_LEN(8) | TRB_INTR_TARGET(0),
                  /* Immediate data in pointer */
                  field);
@@ -3661,10 +3650,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
        if (urb->transfer_buffer_length > 0) {
                if (setup->bRequestType & USB_DIR_IN)
                        field |= TRB_DIR_IN;
-               queue_trb(xhci, ep_ring, true,
-                               lower_32_bits(urb->transfer_dma),
-                               upper_32_bits(urb->transfer_dma),
-                               length_field,
+               queue_trb(xhci, ep_ring, true, urb->transfer_dma, length_field,
                                field | ep_ring->cycle_state);
        }
 
@@ -3677,10 +3663,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t 
mem_flags,
                field = 0;
        else
                field = TRB_DIR_IN;
-       queue_trb(xhci, ep_ring, false,
-                       0,
-                       0,
-                       TRB_INTR_TARGET(0),
+       queue_trb(xhci, ep_ring, false, 0, TRB_INTR_TARGET(0),
                        /* Event on completion */
                        field | TRB_IOC | TRB_TYPE(TRB_STATUS) | 
ep_ring->cycle_state);
 
@@ -3893,11 +3876,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, 
gfp_t mem_flags,
                                remainder |
                                TRB_INTR_TARGET(0);
 
-                       queue_trb(xhci, ep_ring, more_trbs_coming,
-                               lower_32_bits(addr),
-                               upper_32_bits(addr),
-                               length_field,
-                               field);
+                       queue_trb(xhci, ep_ring, more_trbs_coming, addr,
+                               length_field, field);
                        running_total += trb_buff_len;
 
                        addr += trb_buff_len;
@@ -4024,8 +4004,8 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, 
gfp_t mem_flags,
  * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  * because the command event handler may want to resubmit a failed command.
  */
-static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
-               u32 field3, u32 field4, bool command_must_succeed)
+static int queue_command(struct xhci_hcd *xhci, u64 phys_addr, u32 len,
+               u32 type, bool command_must_succeed)
 {
        int reserved_trbs = xhci->cmd_ring_reserved_trbs;
        int ret;
@@ -4042,15 +4022,15 @@ static int queue_command(struct xhci_hcd *xhci, u32 
field1, u32 field2,
                                        "unfailable commands failed.\n");
                return ret;
        }
-       queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
-                       field4 | xhci->cmd_ring->cycle_state);
+       queue_trb(xhci, xhci->cmd_ring, false, phys_addr, len,
+                       type | xhci->cmd_ring->cycle_state);
        return 0;
 }
 
 /* Queue a slot enable or disable request on the command ring */
 int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
 {
-       return queue_command(xhci, 0, 0, 0,
+       return queue_command(xhci, 0, 0,
                        TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
 }
 
@@ -4058,22 +4038,21 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 
trb_type, u32 slot_id)
 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
                u32 slot_id)
 {
-       return queue_command(xhci, lower_32_bits(in_ctx_ptr),
-                       upper_32_bits(in_ctx_ptr), 0,
+       return queue_command(xhci, in_ctx_ptr, 0,
                        TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
                        false);
 }
 
 int xhci_queue_vendor_command(struct xhci_hcd *xhci,
-               u32 field1, u32 field2, u32 field3, u32 field4)
+               u64 phys_addr, u32 len, u32 type)
 {
-       return queue_command(xhci, field1, field2, field3, field4, false);
+       return queue_command(xhci, phys_addr, len, type, false);
 }
 
 /* Queue a reset device command TRB */
 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
 {
-       return queue_command(xhci, 0, 0, 0,
+       return queue_command(xhci, 0, 0,
                        TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
                        false);
 }
@@ -4082,8 +4061,7 @@ int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 
slot_id)
 int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
                u32 slot_id, bool command_must_succeed)
 {
-       return queue_command(xhci, lower_32_bits(in_ctx_ptr),
-                       upper_32_bits(in_ctx_ptr), 0,
+       return queue_command(xhci, in_ctx_ptr, 0,
                        TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
                        command_must_succeed);
 }
@@ -4092,8 +4070,7 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, 
dma_addr_t in_ctx_ptr,
 int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
                u32 slot_id, bool command_must_succeed)
 {
-       return queue_command(xhci, lower_32_bits(in_ctx_ptr),
-                       upper_32_bits(in_ctx_ptr), 0,
+       return queue_command(xhci, in_ctx_ptr, 0,
                        TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
                        command_must_succeed);
 }
@@ -4110,7 +4087,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int 
slot_id,
        u32 type = TRB_TYPE(TRB_STOP_RING);
        u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
 
-       return queue_command(xhci, 0, 0, 0,
+       return queue_command(xhci, 0, 0,
                        trb_slot_id | trb_ep_index | type | trb_suspend, false);
 }
 
@@ -4144,8 +4121,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int 
slot_id,
        }
        ep->queued_deq_seg = deq_seg;
        ep->queued_deq_ptr = deq_ptr;
-       return queue_command(xhci, lower_32_bits(addr) | cycle_state,
-                       upper_32_bits(addr), trb_stream_id,
+       return queue_command(xhci, addr | cycle_state, trb_stream_id,
                        trb_slot_id | trb_ep_index | type, false);
 }
 
@@ -4156,6 +4132,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int 
slot_id,
        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
        u32 type = TRB_TYPE(TRB_RESET_EP);
 
-       return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
+       return queue_command(xhci, 0, 0, trb_slot_id | trb_ep_index | type,
                        false);
 }
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 20364cc..7def375 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -120,10 +120,9 @@ DECLARE_EVENT_CLASS(xhci_log_event,
        ),
        TP_fast_assign(
                __entry->va = trb_va;
-               __entry->dma = le64_to_cpu(((u64)ev->field[1]) << 32 |
-                                               ev->field[0]);
-               __entry->status = le32_to_cpu(ev->field[2]);
-               __entry->flags = le32_to_cpu(ev->field[3]);
+               __entry->dma = le64_to_cpu(ev->phys_addr);
+               __entry->status = le32_to_cpu(ev->info_len);
+               __entry->flags = le32_to_cpu(ev->type_flags);
                memcpy(__get_dynamic_array(trb), trb_va,
                        sizeof(struct xhci_generic_trb));
        ),
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 4265b48..b8b1938 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -632,8 +632,7 @@ int xhci_run(struct usb_hcd *hcd)
        xhci_print_ir_set(xhci, 0);
 
        if (xhci->quirks & XHCI_NEC_HOST)
-               xhci_queue_vendor_command(xhci, 0, 0, 0,
-                               TRB_TYPE(TRB_NEC_GET_FW));
+               xhci_queue_vendor_command(xhci, 0, 0, TRB_TYPE(TRB_NEC_GET_FW));
 
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "Finished xhci_run for USB2 roothub");
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 03c74b7..ca49d05 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1116,6 +1116,12 @@ struct xhci_event_cmd {
 
 
 /* Port Status Change Event TRB fields */
+struct xhci_psc_event {
+       __le32 port;
+       __le32 pad;
+       __le32 status;
+       __le32 type;
+};
 /* Port ID - bits 31:24 */
 #define GET_PORT_ID(p)         (((p) & (0xff << 24)) >> 24)
 
@@ -1159,13 +1165,18 @@ struct xhci_event_cmd {
 #define TRB_SIA                        (1<<31)
 
 struct xhci_generic_trb {
-       __le32 field[4];
+       __le64  phys_addr;      /* Can be data itself (TRB_IDT set) */
+       __le32  info_len;       /* Typically includes any length */
+       __le32  type_flags;     /* Low 16 bits are almost type independant */
 };
 
+#define make_immd_data(lo, hi) (((u64)0 + (hi)) << 32 | (lo))
+
 union xhci_trb {
        struct xhci_link_trb            link;
        struct xhci_transfer_event      trans_event;
        struct xhci_event_cmd           event_cmd;
+       struct xhci_psc_event           psc_event;
        struct xhci_generic_trb         generic;
 };
 
@@ -1815,7 +1826,7 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 
trb_type, u32 slot_id);
 int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
                u32 slot_id);
 int xhci_queue_vendor_command(struct xhci_hcd *xhci,
-               u32 field1, u32 field2, u32 field3, u32 field4);
+               u64 phys_addr, u32 len, u32 type);
 int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
                unsigned int ep_index, int suspend);
 int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
-- 
1.8.1.2



--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to