Signed-off-by: Alistair Francis <alistair.fran...@xilinx.com> --- There is a indentation error in this patch in the gem_transmit function. I have written it like that to make it easier to see the changes. It is fixed in the next patch.
V2: - Use the new screening function - Update interrupt generation - Increase vmstate to 3.0 hw/net/cadence_gem.c | 180 ++++++++++++++++++++++++++++++++----------- include/hw/net/cadence_gem.h | 2 +- 2 files changed, 135 insertions(+), 47 deletions(-) diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index d38bc1e..28c2ddb 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -142,6 +142,30 @@ #define GEM_DESCONF6 (0x00000294/4) #define GEM_DESCONF7 (0x00000298/4) +#define GEM_INT_Q1_STATUS (0x00000400 / 4) +#define GEM_INT_Q1_MASK (0x00000640 / 4) + +#define GEM_TRANSMIT_Q1_PTR (0x00000440 / 4) +#define GEM_TRANSMIT_Q15_PTR (GEM_TRANSMIT_Q1_PTR + 14) + +#define GEM_RECEIVE_Q1_PTR (0x00000480 / 4) +#define GEM_RECEIVE_Q15_PTR (GEM_RECEIVE_Q1_PTR + 14) + +#define GEM_INT_Q1_ENABLE (0x00000600 / 4) +#define GEM_INT_Q7_ENABLE (GEM_INT_Q1_ENABLE + 6) +#define GEM_INT_Q8_ENABLE (0x00000660 / 4) +#define GEM_INT_Q15_ENABLE (GEM_INT_Q8_ENABLE + 7) + +#define GEM_INT_Q1_DISABLE (0x00000620 / 4) +#define GEM_INT_Q7_DISABLE (GEM_INT_Q1_DISABLE + 6) +#define GEM_INT_Q8_DISABLE (0x00000680 / 4) +#define GEM_INT_Q15_DISABLE (GEM_INT_Q8_DISABLE + 7) + +#define GEM_INT_Q1_MASK (0x00000640 / 4) +#define GEM_INT_Q7_MASK (GEM_INT_Q1_MASK + 6) +#define GEM_INT_Q8_MASK (0x000006A0 / 4) +#define GEM_INT_Q15_MASK (GEM_INT_Q8_MASK + 7) + #define GEM_SCREENING_TYPE1_REGISTER_0 (0x00000500 / 4) #define GEM_ST1R_UDP_PORT_MATCH_ENABLE (1 << 29) @@ -316,9 +340,9 @@ static inline unsigned tx_desc_get_length(unsigned *desc) return desc[1] & DESC_1_LENGTH; } -static inline void print_gem_tx_desc(unsigned *desc) +static inline void print_gem_tx_desc(unsigned *desc, uint8_t queue) { - DB_PRINT("TXDESC:\n"); + DB_PRINT("TXDESC (queue %" PRId8 "):\n", queue); DB_PRINT("bufaddr: 0x%08x\n", *desc); DB_PRINT("used_hw: %d\n", tx_desc_get_used(desc)); DB_PRINT("wrap: %d\n", tx_desc_get_wrap(desc)); @@ -448,6 +472,7 @@ static void phy_update_link(CadenceGEMState *s) static int gem_can_receive(NetClientState *nc) { CadenceGEMState *s; + int i; s = qemu_get_nic_opaque(nc); @@ -460,18 +485,20 @@ static int gem_can_receive(NetClientState *nc) return 0; } - if (rx_desc_get_ownership(s->rx_desc[0]) == 1) { - if (s->can_rx_state != 2) { - s->can_rx_state = 2; - DB_PRINT("can't receive - busy buffer descriptor 0x%x\n", - s->rx_desc_addr[0]); + for (i = 0; i < s->num_priority_queues; i++) { + if (rx_desc_get_ownership(s->rx_desc[i]) == 1) { + if (s->can_rx_state != 2) { + s->can_rx_state = 2; + DB_PRINT("can't receive - busy buffer descriptor (q%d) 0x%x\n", + i, s->rx_desc_addr[i]); + } + return 0; } - return 0; } if (s->can_rx_state != 0) { s->can_rx_state = 0; - DB_PRINT("can receive 0x%x\n", s->rx_desc_addr[0]); + DB_PRINT("can receive\n"); } return 1; } @@ -482,9 +509,20 @@ static int gem_can_receive(NetClientState *nc) */ static void gem_update_int_status(CadenceGEMState *s) { - if (s->regs[GEM_ISR]) { - DB_PRINT("asserting int. (0x%08x)\n", s->regs[GEM_ISR]); + int i; + + if (!s->num_priority_queues && s->regs[GEM_ISR]) { + /* No priority queues, just trigger the interrupt */ + DB_PRINT("asserting int.\n", i); qemu_set_irq(s->irq[0], 1); + return; + } + + for (i = 0; i < s->num_priority_queues; ++i) { + if (s->regs[GEM_INT_Q1_STATUS + i]) { + DB_PRINT("asserting int. (q=%d)\n", i); + qemu_set_irq(s->irq[i], 1); + } } } @@ -748,17 +786,17 @@ static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr) return 0; } -static void gem_get_rx_desc(CadenceGEMState *s) +static void gem_get_rx_desc(CadenceGEMState *s, int q) { - DB_PRINT("read descriptor 0x%x\n", (unsigned)s->rx_desc_addr[0]); + DB_PRINT("read descriptor 0x%x\n", (unsigned)s->rx_desc_addr[q]); /* read current descriptor */ cpu_physical_memory_read(s->rx_desc_addr[0], (uint8_t *)s->rx_desc[0], sizeof(s->rx_desc[0])); /* Descriptor owned by software ? */ - if (rx_desc_get_ownership(s->rx_desc[0]) == 1) { + if (rx_desc_get_ownership(s->rx_desc[q]) == 1) { DB_PRINT("descriptor 0x%x owned by sw.\n", - (unsigned)s->rx_desc_addr[0]); + (unsigned)s->rx_desc_addr[q]); s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF; s->regs[GEM_ISR] |= GEM_INT_RXUSED & ~(s->regs[GEM_IMR]); /* Handle interrupt consequences */ @@ -779,6 +817,7 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) uint8_t *rxbuf_ptr; bool first_desc = true; int maf; + int q = 0; s = qemu_get_nic_opaque(nc); @@ -857,6 +896,9 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) DB_PRINT("config bufsize: %d packet size: %ld\n", rxbufsize, size); + /* Find which queue we are targetting */ + q = get_queue_from_screen(s, rxbuf_ptr); + while (bytes_to_copy) { /* Do nothing if receive is not enabled. */ if (!gem_can_receive(nc)) { @@ -865,10 +907,10 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) } DB_PRINT("copy %d bytes to 0x%x\n", MIN(bytes_to_copy, rxbufsize), - rx_desc_get_buffer(s->rx_desc[0])); + rx_desc_get_buffer(s->rx_desc[q])); /* Copy packet data to emulated DMA buffer */ - cpu_physical_memory_write(rx_desc_get_buffer(s->rx_desc[0]) + + cpu_physical_memory_write(rx_desc_get_buffer(s->rx_desc[q]) + rxbuf_offset, rxbuf_ptr, MIN(bytes_to_copy, rxbufsize)); rxbuf_ptr += MIN(bytes_to_copy, rxbufsize); @@ -876,47 +918,48 @@ static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size) /* Update the descriptor. */ if (first_desc) { - rx_desc_set_sof(s->rx_desc[0]); + rx_desc_set_sof(s->rx_desc[q]); first_desc = false; } if (bytes_to_copy == 0) { - rx_desc_set_eof(s->rx_desc[0]); - rx_desc_set_length(s->rx_desc[0], size); + rx_desc_set_eof(s->rx_desc[q]); + rx_desc_set_length(s->rx_desc[q], size); } - rx_desc_set_ownership(s->rx_desc[0]); + rx_desc_set_ownership(s->rx_desc[q]); switch (maf) { case GEM_RX_PROMISCUOUS_ACCEPT: break; case GEM_RX_BROADCAST_ACCEPT: - rx_desc_set_broadcast(s->rx_desc[0]); + rx_desc_set_broadcast(s->rx_desc[q]); break; case GEM_RX_UNICAST_HASH_ACCEPT: - rx_desc_set_unicast_hash(s->rx_desc[0]); + rx_desc_set_unicast_hash(s->rx_desc[q]); break; case GEM_RX_MULTICAST_HASH_ACCEPT: - rx_desc_set_multicast_hash(s->rx_desc[0]); + rx_desc_set_multicast_hash(s->rx_desc[q]); break; case GEM_RX_REJECT: abort(); default: /* SAR */ - rx_desc_set_sar(s->rx_desc[0], maf); + rx_desc_set_sar(s->rx_desc[q], maf); } /* Descriptor write-back. */ - cpu_physical_memory_write(s->rx_desc_addr[0], - (uint8_t *)s->rx_desc[0], - sizeof(s->rx_desc[0])); + cpu_physical_memory_write(s->rx_desc_addr[q], + (uint8_t *)s->rx_desc[q], + sizeof(s->rx_desc[q])); /* Next descriptor */ - if (rx_desc_get_wrap(s->rx_desc[0])) { + if (rx_desc_get_wrap(s->rx_desc[q])) { DB_PRINT("wrapping RX descriptor list\n"); s->rx_desc_addr[0] = s->regs[GEM_RXQBASE]; } else { DB_PRINT("incrementing RX descriptor list\n"); s->rx_desc_addr[0] += 8; } - gem_get_rx_desc(s); + + gem_get_rx_desc(s, q); } /* Count it */ @@ -988,6 +1031,7 @@ static void gem_transmit(CadenceGEMState *s) uint8_t tx_packet[2048]; uint8_t *p; unsigned total_bytes; + int8_t q; /* Do nothing if transmit is not enabled. */ if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { @@ -1003,8 +1047,9 @@ static void gem_transmit(CadenceGEMState *s) p = tx_packet; total_bytes = 0; + for (q = s->num_priority_queues - 1; q >= 0; q--) { /* read current descriptor */ - packet_desc_addr = s->tx_desc_addr[0]; + packet_desc_addr = s->tx_desc_addr[q]; DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr); cpu_physical_memory_read(packet_desc_addr, @@ -1016,7 +1061,7 @@ static void gem_transmit(CadenceGEMState *s) if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) { return; } - print_gem_tx_desc(desc); + print_gem_tx_desc(desc, q); /* The real hardware would eat this (and possibly crash). * For QEMU let's lend a helping hand. @@ -1051,22 +1096,28 @@ static void gem_transmit(CadenceGEMState *s) /* Modify the 1st descriptor of this packet to be owned by * the processor. */ - cpu_physical_memory_read(s->tx_desc_addr[0], (uint8_t *)desc_first, + cpu_physical_memory_read(s->tx_desc_addr[q], (uint8_t *)desc_first, sizeof(desc_first)); tx_desc_set_used(desc_first); - cpu_physical_memory_write(s->tx_desc_addr[0], (uint8_t *)desc_first, + cpu_physical_memory_write(s->tx_desc_addr[q], (uint8_t *)desc_first, sizeof(desc_first)); /* Advance the hardware current descriptor past this packet */ if (tx_desc_get_wrap(desc)) { - s->tx_desc_addr[0] = s->regs[GEM_TXQBASE]; + s->tx_desc_addr[q] = s->regs[GEM_TXQBASE]; } else { - s->tx_desc_addr[0] = packet_desc_addr + 8; + s->tx_desc_addr[q] = packet_desc_addr + 8; } - DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[0]); + DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]); s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL; s->regs[GEM_ISR] |= GEM_INT_TXCMPL & ~(s->regs[GEM_IMR]); + /* Update queue interrupt status */ + if (s->num_priority_queues) { + s->regs[GEM_INT_Q1_STATUS + q] |= + GEM_INT_TXCMPL & ~(s->regs[GEM_INT_Q1_MASK + q]); + } + /* Handle interrupt consequences */ gem_update_int_status(s); @@ -1108,6 +1159,7 @@ static void gem_transmit(CadenceGEMState *s) s->regs[GEM_ISR] |= GEM_INT_TXUSED & ~(s->regs[GEM_IMR]); gem_update_int_status(s); } + } } static void gem_phy_reset(CadenceGEMState *s) @@ -1214,7 +1266,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size) { CadenceGEMState *s; uint32_t retval; - + int i; s = (CadenceGEMState *)opaque; offset >>= 2; @@ -1224,8 +1276,10 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size) switch (offset) { case GEM_ISR: - DB_PRINT("lowering irq on ISR read\n"); - qemu_set_irq(s->irq[0], 0); + DB_PRINT("lowering irqs on ISR read\n"); + for (i = 0; i < s->num_priority_queues; ++i) { + qemu_set_irq(s->irq[i], 0); + } break; case GEM_PHYMNTNC: if (retval & GEM_PHYMNTNC_OP_R) { @@ -1250,6 +1304,7 @@ static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size) retval &= ~(s->regs_wo[offset]); DB_PRINT("0x%08x\n", retval); + gem_update_int_status(s); return retval; } @@ -1262,6 +1317,7 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val, { CadenceGEMState *s = (CadenceGEMState *)opaque; uint32_t readonly; + int i; DB_PRINT("offset: 0x%04x write: 0x%08x ", (unsigned)offset, (unsigned)val); offset >>= 2; @@ -1281,14 +1337,18 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val, switch (offset) { case GEM_NWCTRL: if (val & GEM_NWCTRL_RXENA) { - gem_get_rx_desc(s); + for (i = 0; i < s->num_priority_queues; ++i) { + gem_get_rx_desc(s, i); + } } if (val & GEM_NWCTRL_TXSTART) { gem_transmit(s); } if (!(val & GEM_NWCTRL_TXENA)) { /* Reset to start of Q when transmit disabled. */ - s->tx_desc_addr[0] = s->regs[GEM_TXQBASE]; + for (i = 0; i < s->num_priority_queues; i++) { + s->tx_desc_addr[i] = s->regs[GEM_TXQBASE]; + } } if (gem_can_receive(qemu_get_queue(s->nic))) { qemu_flush_queued_packets(qemu_get_queue(s->nic)); @@ -1301,9 +1361,15 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val, case GEM_RXQBASE: s->rx_desc_addr[0] = val; break; + case GEM_RECEIVE_Q1_PTR ... GEM_RECEIVE_Q15_PTR: + s->rx_desc_addr[offset - GEM_RECEIVE_Q1_PTR + 1] = val; + break; case GEM_TXQBASE: s->tx_desc_addr[0] = val; break; + case GEM_TRANSMIT_Q1_PTR ... GEM_TRANSMIT_Q15_PTR: + s->tx_desc_addr[offset - GEM_TRANSMIT_Q1_PTR + 1] = val; + break; case GEM_RXSTATUS: gem_update_int_status(s); break; @@ -1311,10 +1377,26 @@ static void gem_write(void *opaque, hwaddr offset, uint64_t val, s->regs[GEM_IMR] &= ~val; gem_update_int_status(s); break; + case GEM_INT_Q1_ENABLE ... GEM_INT_Q7_ENABLE: + s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_ENABLE] &= ~val; + gem_update_int_status(s); + break; + case GEM_INT_Q8_ENABLE ... GEM_INT_Q15_ENABLE: + s->regs[GEM_INT_Q8_MASK + offset - GEM_INT_Q8_ENABLE] &= ~val; + gem_update_int_status(s); + break; case GEM_IDR: s->regs[GEM_IMR] |= val; gem_update_int_status(s); break; + case GEM_INT_Q1_DISABLE ... GEM_INT_Q7_DISABLE: + s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_DISABLE] |= val; + gem_update_int_status(s); + break; + case GEM_INT_Q8_DISABLE ... GEM_INT_Q15_DISABLE: + s->regs[GEM_INT_Q8_MASK + offset - GEM_INT_Q8_DISABLE] |= val; + gem_update_int_status(s); + break; case GEM_SPADDR1LO: case GEM_SPADDR2LO: case GEM_SPADDR3LO: @@ -1351,8 +1433,11 @@ static const MemoryRegionOps gem_ops = { static void gem_set_link(NetClientState *nc) { + CadenceGEMState *s = qemu_get_nic_opaque(nc); + DB_PRINT("\n"); - phy_update_link(qemu_get_nic_opaque(nc)); + phy_update_link(s); + gem_update_int_status(s); } static NetClientInfo net_gem_info = { @@ -1366,8 +1451,11 @@ static NetClientInfo net_gem_info = { static void gem_realize(DeviceState *dev, Error **errp) { CadenceGEMState *s = CADENCE_GEM(dev); + int i; - sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[0]); + for (i = 0; i < s->num_priority_queues; ++i) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); + } qemu_macaddr_default_if_unset(&s->conf.macaddr); @@ -1391,8 +1479,8 @@ static void gem_init(Object *obj) static const VMStateDescription vmstate_cadence_gem = { .name = "cadence_gem", - .version_id = 2, - .minimum_version_id = 3, + .version_id = 3, + .minimum_version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG), VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32), diff --git a/include/hw/net/cadence_gem.h b/include/hw/net/cadence_gem.h index f2f78c0..47a1661 100644 --- a/include/hw/net/cadence_gem.h +++ b/include/hw/net/cadence_gem.h @@ -30,7 +30,7 @@ #include "net/net.h" #include "hw/sysbus.h" -#define CADENCE_GEM_MAXREG (0x00000640/4) /* Last valid GEM address */ +#define CADENCE_GEM_MAXREG (0x00000800 / 4) /* Last valid GEM address */ #define MAX_PRIORITY_QUEUES 8 -- 2.7.4