When a PCI device lives behind an IOMMU, it should use 'pci_dma_*' family of functions when any transfer from/to guest memory is required while 'cpu_physical_memory_*' family of functions completely bypass any MMU/IOMMU in the system.
vmxnet3 in some places was using 'cpu_physical_memory_*' family of functions which works fine with the default QEMU setup where IOMMU is not enabled but fails miserably when IOMMU is enabled. This commit converts all such instances in favor of 'pci_dma_*' Cc: Dmitry Fleytman <dmi...@daynix.com> Cc: Jason Wang <jasow...@redhat.com> Cc: qemu-devel@nongnu.org Cc: Anthony Liguori <aligu...@amazon.com> Signed-off-by: KarimAllah Ahmed <karah...@amazon.de> --- v2: rebase on current master and fix a conflict since some change to switch pci_dma_* is in master now. --- hw/net/vmware_utils.h | 55 ++++++++------ hw/net/vmxnet3.c | 197 +++++++++++++++++++++++++++----------------------- 2 files changed, 139 insertions(+), 113 deletions(-) diff --git a/hw/net/vmware_utils.h b/hw/net/vmware_utils.h index c0dbb2f..5500601 100644 --- a/hw/net/vmware_utils.h +++ b/hw/net/vmware_utils.h @@ -26,97 +26,104 @@ * */ static inline void -vmw_shmem_read(hwaddr addr, void *buf, int len) +vmw_shmem_read(PCIDevice *d, hwaddr addr, void *buf, int len) { VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf); - cpu_physical_memory_read(addr, buf, len); + pci_dma_read(d, addr, buf, len); } static inline void -vmw_shmem_write(hwaddr addr, void *buf, int len) +vmw_shmem_write(PCIDevice *d, hwaddr addr, void *buf, int len) { VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf); - cpu_physical_memory_write(addr, buf, len); + pci_dma_write(d, addr, buf, len); } static inline void -vmw_shmem_rw(hwaddr addr, void *buf, int len, int is_write) +vmw_shmem_rw(PCIDevice *d, hwaddr addr, void *buf, int len, int is_write) { VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d", addr, len, buf, is_write); - cpu_physical_memory_rw(addr, buf, len, is_write); + if (is_write) + pci_dma_write(d, addr, buf, len); + else + pci_dma_read(d, addr, buf, len); } static inline void -vmw_shmem_set(hwaddr addr, uint8_t val, int len) +vmw_shmem_set(PCIDevice *d, hwaddr addr, uint8_t val, int len) { int i; VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val); for (i = 0; i < len; i++) { - cpu_physical_memory_write(addr + i, &val, 1); + pci_dma_write(d, addr + i, &val, 1); } } static inline uint32_t -vmw_shmem_ld8(hwaddr addr) +vmw_shmem_ld8(PCIDevice *d, hwaddr addr) { - uint8_t res = ldub_phys(&address_space_memory, addr); + uint8_t res; + pci_dma_read(d, addr, &res, 1); VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res); return res; } static inline void -vmw_shmem_st8(hwaddr addr, uint8_t value) +vmw_shmem_st8(PCIDevice *d, hwaddr addr, uint8_t value) { VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value); - stb_phys(&address_space_memory, addr, value); + pci_dma_write(d, addr, &value, 1); } static inline uint32_t -vmw_shmem_ld16(hwaddr addr) +vmw_shmem_ld16(PCIDevice *d, hwaddr addr) { - uint16_t res = lduw_le_phys(&address_space_memory, addr); + uint16_t res; + pci_dma_read(d, addr, &res, 2); VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res); return res; } static inline void -vmw_shmem_st16(hwaddr addr, uint16_t value) +vmw_shmem_st16(PCIDevice *d, hwaddr addr, uint16_t value) { VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value); - stw_le_phys(&address_space_memory, addr, value); + pci_dma_write(d, addr, &value, 2); } static inline uint32_t -vmw_shmem_ld32(hwaddr addr) +vmw_shmem_ld32(PCIDevice *d, hwaddr addr) { - uint32_t res = ldl_le_phys(&address_space_memory, addr); + uint32_t res; + pci_dma_read(d, addr, &res, 4); VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res); return res; } static inline void -vmw_shmem_st32(hwaddr addr, uint32_t value) +vmw_shmem_st32(PCIDevice *d, hwaddr addr, uint32_t value) { VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value); - stl_le_phys(&address_space_memory, addr, value); + pci_dma_write(d, addr, &value, 4); } static inline uint64_t -vmw_shmem_ld64(hwaddr addr) +vmw_shmem_ld64(PCIDevice *d, hwaddr addr) { - uint64_t res = ldq_le_phys(&address_space_memory, addr); + uint64_t res; + pci_dma_read(d, addr, &res, 8); VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res); return res; } static inline void -vmw_shmem_st64(hwaddr addr, uint64_t value) +vmw_shmem_st64(PCIDevice *d, hwaddr addr, uint64_t value) { VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value); - stq_le_phys(&address_space_memory, addr, value); + pci_dma_write(d, addr, &value, 8); } /* Macros for simplification of operations on array-style registers */ diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index d978976..92236d3 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -74,54 +74,54 @@ #define VMXNET3_MAX_NMSIX_INTRS (1) /* Macros for rings descriptors access */ -#define VMXNET3_READ_TX_QUEUE_DESCR8(dpa, field) \ - (vmw_shmem_ld8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) +#define VMXNET3_READ_TX_QUEUE_DESCR8(_d, dpa, field) \ + (vmw_shmem_ld8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) -#define VMXNET3_WRITE_TX_QUEUE_DESCR8(dpa, field, value) \ - (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value))) +#define VMXNET3_WRITE_TX_QUEUE_DESCR8(_d, dpa, field, value) \ + (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field, value))) -#define VMXNET3_READ_TX_QUEUE_DESCR32(dpa, field) \ - (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) +#define VMXNET3_READ_TX_QUEUE_DESCR32(_d, dpa, field) \ + (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) -#define VMXNET3_WRITE_TX_QUEUE_DESCR32(dpa, field, value) \ - (vmw_shmem_st32(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) +#define VMXNET3_WRITE_TX_QUEUE_DESCR32(_d, dpa, field, value) \ + (vmw_shmem_st32(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) -#define VMXNET3_READ_TX_QUEUE_DESCR64(dpa, field) \ - (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) +#define VMXNET3_READ_TX_QUEUE_DESCR64(_d, dpa, field) \ + (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field))) -#define VMXNET3_WRITE_TX_QUEUE_DESCR64(dpa, field, value) \ - (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) +#define VMXNET3_WRITE_TX_QUEUE_DESCR64(_d, dpa, field, value) \ + (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_TxQueueDesc, field), value)) -#define VMXNET3_READ_RX_QUEUE_DESCR64(dpa, field) \ - (vmw_shmem_ld64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) +#define VMXNET3_READ_RX_QUEUE_DESCR64(_d, dpa, field) \ + (vmw_shmem_ld64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) -#define VMXNET3_READ_RX_QUEUE_DESCR32(dpa, field) \ - (vmw_shmem_ld32(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) +#define VMXNET3_READ_RX_QUEUE_DESCR32(_d, dpa, field) \ + (vmw_shmem_ld32(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field))) -#define VMXNET3_WRITE_RX_QUEUE_DESCR64(dpa, field, value) \ - (vmw_shmem_st64(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) +#define VMXNET3_WRITE_RX_QUEUE_DESCR64(_d, dpa, field, value) \ + (vmw_shmem_st64(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) -#define VMXNET3_WRITE_RX_QUEUE_DESCR8(dpa, field, value) \ - (vmw_shmem_st8(dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) +#define VMXNET3_WRITE_RX_QUEUE_DESCR8(_d, dpa, field, value) \ + (vmw_shmem_st8(_d, dpa + offsetof(struct Vmxnet3_RxQueueDesc, field), value)) /* Macros for guest driver shared area access */ -#define VMXNET3_READ_DRV_SHARED64(shpa, field) \ - (vmw_shmem_ld64(shpa + offsetof(struct Vmxnet3_DriverShared, field))) +#define VMXNET3_READ_DRV_SHARED64(_d, shpa, field) \ + (vmw_shmem_ld64(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) -#define VMXNET3_READ_DRV_SHARED32(shpa, field) \ - (vmw_shmem_ld32(shpa + offsetof(struct Vmxnet3_DriverShared, field))) +#define VMXNET3_READ_DRV_SHARED32(_d, shpa, field) \ + (vmw_shmem_ld32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) -#define VMXNET3_WRITE_DRV_SHARED32(shpa, field, val) \ - (vmw_shmem_st32(shpa + offsetof(struct Vmxnet3_DriverShared, field), val)) +#define VMXNET3_WRITE_DRV_SHARED32(_d, shpa, field, val) \ + (vmw_shmem_st32(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), val)) -#define VMXNET3_READ_DRV_SHARED16(shpa, field) \ - (vmw_shmem_ld16(shpa + offsetof(struct Vmxnet3_DriverShared, field))) +#define VMXNET3_READ_DRV_SHARED16(_d, shpa, field) \ + (vmw_shmem_ld16(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) -#define VMXNET3_READ_DRV_SHARED8(shpa, field) \ - (vmw_shmem_ld8(shpa + offsetof(struct Vmxnet3_DriverShared, field))) +#define VMXNET3_READ_DRV_SHARED8(_d, shpa, field) \ + (vmw_shmem_ld8(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field))) -#define VMXNET3_READ_DRV_SHARED(shpa, field, b, l) \ - (vmw_shmem_read(shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l)) +#define VMXNET3_READ_DRV_SHARED(_d, shpa, field, b, l) \ + (vmw_shmem_read(_d, shpa + offsetof(struct Vmxnet3_DriverShared, field), b, l)) #define VMXNET_FLAG_IS_SET(field, flag) (((field) & (flag)) == (flag)) @@ -147,7 +147,8 @@ typedef struct { uint8_t gen; } Vmxnet3Ring; -static inline void vmxnet3_ring_init(Vmxnet3Ring *ring, +static inline void vmxnet3_ring_init(PCIDevice *d, + Vmxnet3Ring *ring, hwaddr pa, size_t size, size_t cell_size, @@ -160,7 +161,7 @@ static inline void vmxnet3_ring_init(Vmxnet3Ring *ring, ring->next = 0; if (zero_region) { - vmw_shmem_set(pa, 0, size * cell_size); + vmw_shmem_set(d, pa, 0, size * cell_size); } } @@ -190,14 +191,16 @@ static inline hwaddr vmxnet3_ring_curr_cell_pa(Vmxnet3Ring *ring) return ring->pa + ring->next * ring->cell_size; } -static inline void vmxnet3_ring_read_curr_cell(Vmxnet3Ring *ring, void *buff) +static inline void vmxnet3_ring_read_curr_cell(PCIDevice *d, Vmxnet3Ring *ring, + void *buff) { - vmw_shmem_read(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); + vmw_shmem_read(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); } -static inline void vmxnet3_ring_write_curr_cell(Vmxnet3Ring *ring, void *buff) +static inline void vmxnet3_ring_write_curr_cell(PCIDevice *d, Vmxnet3Ring *ring, + void *buff) { - vmw_shmem_write(vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); + vmw_shmem_write(d, vmxnet3_ring_curr_cell_pa(ring), buff, ring->cell_size); } static inline size_t vmxnet3_ring_curr_cell_idx(Vmxnet3Ring *ring) @@ -456,9 +459,9 @@ vmxnet3_on_interrupt_mask_changed(VMXNET3State *s, int lidx, bool is_masked) vmxnet3_update_interrupt_line_state(s, lidx); } -static bool vmxnet3_verify_driver_magic(hwaddr dshmem) +static bool vmxnet3_verify_driver_magic(PCIDevice *d, hwaddr dshmem) { - return (VMXNET3_READ_DRV_SHARED32(dshmem, magic) == VMXNET3_REV1_MAGIC); + return (VMXNET3_READ_DRV_SHARED32(d, dshmem, magic) == VMXNET3_REV1_MAGIC); } #define VMXNET3_GET_BYTE(x, byte_num) (((x) >> (byte_num)*8) & 0xFF) @@ -526,13 +529,14 @@ vmxnet3_dec_rx_completion_counter(VMXNET3State *s, int qidx) static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx) { struct Vmxnet3_TxCompDesc txcq_descr; + PCIDevice *d = PCI_DEVICE(s); VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring); txcq_descr.txdIdx = tx_ridx; txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring); - vmxnet3_ring_write_curr_cell(&s->txq_descr[qidx].comp_ring, &txcq_descr); + vmxnet3_ring_write_curr_cell(d, &s->txq_descr[qidx].comp_ring, &txcq_descr); /* Flush changes in TX descriptor before changing the counter value */ smp_wmb(); @@ -688,13 +692,14 @@ vmxnet3_pop_next_tx_descr(VMXNET3State *s, uint32_t *descr_idx) { Vmxnet3Ring *ring = &s->txq_descr[qidx].tx_ring; + PCIDevice *d = PCI_DEVICE(s); - vmxnet3_ring_read_curr_cell(ring, txd); + vmxnet3_ring_read_curr_cell(d, ring, txd); if (txd->gen == vmxnet3_ring_curr_gen(ring)) { /* Only read after generation field verification */ smp_rmb(); /* Re-read to be sure we got the latest version */ - vmxnet3_ring_read_curr_cell(ring, txd); + vmxnet3_ring_read_curr_cell(d, ring, txd); VMXNET3_RING_DUMP(VMW_RIPRN, "TX", qidx, ring); *descr_idx = vmxnet3_ring_curr_cell_idx(ring); vmxnet3_inc_tx_consumption_counter(s, qidx); @@ -782,9 +787,11 @@ static inline void vmxnet3_read_next_rx_descr(VMXNET3State *s, int qidx, int ridx, struct Vmxnet3_RxDesc *dbuf, uint32_t *didx) { + PCIDevice *d = PCI_DEVICE(s); + Vmxnet3Ring *ring = &s->rxq_descr[qidx].rx_ring[ridx]; *didx = vmxnet3_ring_curr_cell_idx(ring); - vmxnet3_ring_read_curr_cell(ring, dbuf); + vmxnet3_ring_read_curr_cell(d, ring, dbuf); } static inline uint8_t @@ -802,9 +809,8 @@ vmxnet3_pop_rxc_descr(VMXNET3State *s, int qidx, uint32_t *descr_gen) hwaddr daddr = vmxnet3_ring_curr_cell_pa(&s->rxq_descr[qidx].comp_ring); - pci_dma_read(PCI_DEVICE(s), daddr, - &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); - + pci_dma_read(PCI_DEVICE(s), + daddr, &rxcd, sizeof(struct Vmxnet3_RxCompDesc)); ring_gen = vmxnet3_ring_curr_gen(&s->rxq_descr[qidx].comp_ring); if (rxcd.gen != ring_gen) { @@ -1058,6 +1064,7 @@ static bool vmxnet3_indicate_packet(VMXNET3State *s) { struct Vmxnet3_RxDesc rxd; + PCIDevice *d = PCI_DEVICE(s); bool is_head = true; uint32_t rxd_idx; uint32_t rx_ridx = 0; @@ -1091,7 +1098,7 @@ vmxnet3_indicate_packet(VMXNET3State *s) } chunk_size = MIN(bytes_left, rxd.len); - vmxnet3_pci_dma_writev(PCI_DEVICE(s), data, bytes_copied, + vmxnet3_pci_dma_writev(d, data, bytes_copied, le64_to_cpu(rxd.addr), chunk_size); bytes_copied += chunk_size; bytes_left -= chunk_size; @@ -1099,7 +1106,7 @@ vmxnet3_indicate_packet(VMXNET3State *s) vmxnet3_dump_rx_descr(&rxd); if (ready_rxcd_pa != 0) { - pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd)); + pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd)); } memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc)); @@ -1131,7 +1138,7 @@ vmxnet3_indicate_packet(VMXNET3State *s) rxcd.eop = 1; rxcd.err = (bytes_left != 0); - pci_dma_write(PCI_DEVICE(s), ready_rxcd_pa, &rxcd, sizeof(rxcd)); + pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd)); /* Flush RX descriptor changes */ smp_wmb(); @@ -1250,7 +1257,9 @@ static void vmxnet3_reset(VMXNET3State *s) static void vmxnet3_update_rx_mode(VMXNET3State *s) { - s->rx_mode = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, + PCIDevice *d = PCI_DEVICE(s); + + s->rx_mode = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.rxFilterConf.rxMode); VMW_CFPRN("RX mode: 0x%08X", s->rx_mode); } @@ -1258,9 +1267,10 @@ static void vmxnet3_update_rx_mode(VMXNET3State *s) static void vmxnet3_update_vlan_filters(VMXNET3State *s) { int i; + PCIDevice *d = PCI_DEVICE(s); /* Copy configuration from shared memory */ - VMXNET3_READ_DRV_SHARED(s->drv_shmem, + VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.rxFilterConf.vfTable, s->vlan_table, sizeof(s->vlan_table)); @@ -1281,8 +1291,10 @@ static void vmxnet3_update_vlan_filters(VMXNET3State *s) static void vmxnet3_update_mcast_filters(VMXNET3State *s) { + PCIDevice *d = PCI_DEVICE(s); + uint16_t list_bytes = - VMXNET3_READ_DRV_SHARED16(s->drv_shmem, + VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.rxFilterConf.mfTableLen); s->mcast_list_len = list_bytes / sizeof(s->mcast_list[0]); @@ -1299,10 +1311,10 @@ static void vmxnet3_update_mcast_filters(VMXNET3State *s) } else { int i; hwaddr mcast_list_pa = - VMXNET3_READ_DRV_SHARED64(s->drv_shmem, + VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.rxFilterConf.mfTablePA); - pci_dma_read(PCI_DEVICE(s), mcast_list_pa, s->mcast_list, list_bytes); + pci_dma_read(d, mcast_list_pa, s->mcast_list, list_bytes); VMW_CFPRN("Current multicast list len is %d:", s->mcast_list_len); for (i = 0; i < s->mcast_list_len; i++) { @@ -1328,19 +1340,20 @@ static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s) static void vmxnet3_fill_stats(VMXNET3State *s) { int i; + PCIDevice *d = PCI_DEVICE(s); if (!s->device_active) return; for (i = 0; i < s->txq_num; i++) { - pci_dma_write(PCI_DEVICE(s), + pci_dma_write(d, s->txq_descr[i].tx_stats_pa, &s->txq_descr[i].txq_stats, sizeof(s->txq_descr[i].txq_stats)); } for (i = 0; i < s->rxq_num; i++) { - pci_dma_write(PCI_DEVICE(s), + pci_dma_write(d, s->rxq_descr[i].rx_stats_pa, &s->rxq_descr[i].rxq_stats, sizeof(s->rxq_descr[i].rxq_stats)); @@ -1350,8 +1363,9 @@ static void vmxnet3_fill_stats(VMXNET3State *s) static void vmxnet3_adjust_by_guest_type(VMXNET3State *s) { struct Vmxnet3_GOSInfo gos; + PCIDevice *d = PCI_DEVICE(s); - VMXNET3_READ_DRV_SHARED(s->drv_shmem, devRead.misc.driverInfo.gos, + VMXNET3_READ_DRV_SHARED(d, s->drv_shmem, devRead.misc.driverInfo.gos, &gos, sizeof(gos)); s->rx_packets_compound = (gos.gosType == VMXNET3_GOS_TYPE_WIN) ? false : true; @@ -1371,13 +1385,14 @@ vmxnet3_dump_conf_descr(const char *name, static void vmxnet3_update_pm_state(VMXNET3State *s) { struct Vmxnet3_VariableLenConfDesc pm_descr; + PCIDevice *d = PCI_DEVICE(s); pm_descr.confLen = - VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confLen); + VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confLen); pm_descr.confVer = - VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.pmConfDesc.confVer); + VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.pmConfDesc.confVer); pm_descr.confPA = - VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.pmConfDesc.confPA); + VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.pmConfDesc.confPA); vmxnet3_dump_conf_descr("PM State", &pm_descr); } @@ -1386,8 +1401,9 @@ static void vmxnet3_update_features(VMXNET3State *s) { uint32_t guest_features; int rxcso_supported; + PCIDevice *d = PCI_DEVICE(s); - guest_features = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, + guest_features = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.uptFeatures); rxcso_supported = VMXNET_FLAG_IS_SET(guest_features, UPT1_F_RXCSUM); @@ -1462,12 +1478,13 @@ static void vmxnet3_activate_device(VMXNET3State *s) { int i; static const uint32_t VMXNET3_DEF_TX_THRESHOLD = 1; + PCIDevice *d = PCI_DEVICE(s); hwaddr qdescr_table_pa; uint64_t pa; uint32_t size; /* Verify configuration consistency */ - if (!vmxnet3_verify_driver_magic(s->drv_shmem)) { + if (!vmxnet3_verify_driver_magic(d, s->drv_shmem)) { VMW_ERPRN("Device configuration received from driver is invalid"); return; } @@ -1483,11 +1500,11 @@ static void vmxnet3_activate_device(VMXNET3State *s) vmxnet3_update_pm_state(s); vmxnet3_setup_rx_filtering(s); /* Cache fields from shared memory */ - s->mtu = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, devRead.misc.mtu); + s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu); VMW_CFPRN("MTU is %u", s->mtu); s->max_rx_frags = - VMXNET3_READ_DRV_SHARED16(s->drv_shmem, devRead.misc.maxNumRxSG); + VMXNET3_READ_DRV_SHARED16(d, s->drv_shmem, devRead.misc.maxNumRxSG); if (s->max_rx_frags == 0) { s->max_rx_frags = 1; @@ -1496,24 +1513,24 @@ static void vmxnet3_activate_device(VMXNET3State *s) VMW_CFPRN("Max RX fragments is %u", s->max_rx_frags); s->event_int_idx = - VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.eventIntrIdx); + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.eventIntrIdx); assert(vmxnet3_verify_intx(s, s->event_int_idx)); VMW_CFPRN("Events interrupt line is %u", s->event_int_idx); s->auto_int_masking = - VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.intrConf.autoMask); + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.intrConf.autoMask); VMW_CFPRN("Automatic interrupt masking is %d", (int)s->auto_int_masking); s->txq_num = - VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numTxQueues); + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numTxQueues); s->rxq_num = - VMXNET3_READ_DRV_SHARED8(s->drv_shmem, devRead.misc.numRxQueues); + VMXNET3_READ_DRV_SHARED8(d, s->drv_shmem, devRead.misc.numRxQueues); VMW_CFPRN("Number of TX/RX queues %u/%u", s->txq_num, s->rxq_num); vmxnet3_validate_queues(s); qdescr_table_pa = - VMXNET3_READ_DRV_SHARED64(s->drv_shmem, devRead.misc.queueDescPA); + VMXNET3_READ_DRV_SHARED64(d, s->drv_shmem, devRead.misc.queueDescPA); VMW_CFPRN("TX queues descriptors table is at 0x%" PRIx64, qdescr_table_pa); /* @@ -1529,25 +1546,25 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Read interrupt number for this TX queue */ s->txq_descr[i].intr_idx = - VMXNET3_READ_TX_QUEUE_DESCR8(qdescr_pa, conf.intrIdx); + VMXNET3_READ_TX_QUEUE_DESCR8(d, qdescr_pa, conf.intrIdx); assert(vmxnet3_verify_intx(s, s->txq_descr[i].intr_idx)); VMW_CFPRN("TX Queue %d interrupt: %d", i, s->txq_descr[i].intr_idx); /* Read rings memory locations for TX queues */ - pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.txRingBasePA); - size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.txRingSize); + pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.txRingBasePA); + size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.txRingSize); - vmxnet3_ring_init(&s->txq_descr[i].tx_ring, pa, size, + vmxnet3_ring_init(d, &s->txq_descr[i].tx_ring, pa, size, sizeof(struct Vmxnet3_TxDesc), false); VMXNET3_RING_DUMP(VMW_CFPRN, "TX", i, &s->txq_descr[i].tx_ring); s->max_tx_frags += size; /* TXC ring */ - pa = VMXNET3_READ_TX_QUEUE_DESCR64(qdescr_pa, conf.compRingBasePA); - size = VMXNET3_READ_TX_QUEUE_DESCR32(qdescr_pa, conf.compRingSize); - vmxnet3_ring_init(&s->txq_descr[i].comp_ring, pa, size, + pa = VMXNET3_READ_TX_QUEUE_DESCR64(d, qdescr_pa, conf.compRingBasePA); + size = VMXNET3_READ_TX_QUEUE_DESCR32(d, qdescr_pa, conf.compRingSize); + vmxnet3_ring_init(d, &s->txq_descr[i].comp_ring, pa, size, sizeof(struct Vmxnet3_TxCompDesc), true); VMXNET3_RING_DUMP(VMW_CFPRN, "TXC", i, &s->txq_descr[i].comp_ring); @@ -1558,7 +1575,7 @@ static void vmxnet3_activate_device(VMXNET3State *s) sizeof(s->txq_descr[i].txq_stats)); /* Fill device-managed parameters for queues */ - VMXNET3_WRITE_TX_QUEUE_DESCR32(qdescr_pa, + VMXNET3_WRITE_TX_QUEUE_DESCR32(d, qdescr_pa, ctrl.txThreshold, VMXNET3_DEF_TX_THRESHOLD); } @@ -1578,7 +1595,7 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Read interrupt number for this RX queue */ s->rxq_descr[i].intr_idx = - VMXNET3_READ_TX_QUEUE_DESCR8(qd_pa, conf.intrIdx); + VMXNET3_READ_TX_QUEUE_DESCR8(d, qd_pa, conf.intrIdx); assert(vmxnet3_verify_intx(s, s->rxq_descr[i].intr_idx)); VMW_CFPRN("RX Queue %d interrupt: %d", i, s->rxq_descr[i].intr_idx); @@ -1586,18 +1603,18 @@ static void vmxnet3_activate_device(VMXNET3State *s) /* Read rings memory locations */ for (j = 0; j < VMXNET3_RX_RINGS_PER_QUEUE; j++) { /* RX rings */ - pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.rxRingBasePA[j]); - size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.rxRingSize[j]); - vmxnet3_ring_init(&s->rxq_descr[i].rx_ring[j], pa, size, + pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.rxRingBasePA[j]); + size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.rxRingSize[j]); + vmxnet3_ring_init(d, &s->rxq_descr[i].rx_ring[j], pa, size, sizeof(struct Vmxnet3_RxDesc), false); VMW_CFPRN("RX queue %d:%d: Base: %" PRIx64 ", Size: %d", i, j, pa, size); } /* RXC ring */ - pa = VMXNET3_READ_RX_QUEUE_DESCR64(qd_pa, conf.compRingBasePA); - size = VMXNET3_READ_RX_QUEUE_DESCR32(qd_pa, conf.compRingSize); - vmxnet3_ring_init(&s->rxq_descr[i].comp_ring, pa, size, + pa = VMXNET3_READ_RX_QUEUE_DESCR64(d, qd_pa, conf.compRingBasePA); + size = VMXNET3_READ_RX_QUEUE_DESCR32(d, qd_pa, conf.compRingSize); + vmxnet3_ring_init(d, &s->rxq_descr[i].comp_ring, pa, size, sizeof(struct Vmxnet3_RxCompDesc), true); VMW_CFPRN("RXC queue %d: Base: %" PRIx64 ", Size: %d", i, pa, size); @@ -1764,19 +1781,21 @@ static uint64_t vmxnet3_get_command_status(VMXNET3State *s) static void vmxnet3_set_events(VMXNET3State *s, uint32_t val) { uint32_t events; + PCIDevice *d = PCI_DEVICE(s); VMW_CBPRN("Setting events: 0x%x", val); - events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) | val; - VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events); + events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) | val; + VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events); } static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val) { + PCIDevice *d = PCI_DEVICE(s); uint32_t events; VMW_CBPRN("Clearing events: 0x%x", val); - events = VMXNET3_READ_DRV_SHARED32(s->drv_shmem, ecr) & ~val; - VMXNET3_WRITE_DRV_SHARED32(s->drv_shmem, ecr, events); + events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val; + VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events); } static void -- 2.8.2