struct iovec *raw;
uint32_t raw_frags;
@@ -59,7 +58,7 @@ struct NetTxPkt {
};
void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
- uint32_t max_frags, bool has_virt_hdr)
+ uint32_t max_frags)
{
struct NetTxPkt *p = g_malloc0(sizeof *p);
@@ -71,10 +70,8 @@ void net_tx_pkt_init(struct NetTxPkt **pkt,
PCIDevice *pci_dev,
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
- p->has_virt_hdr = has_virt_hdr;
p->vec[NET_TX_PKT_VHDR_FRAG].iov_base = &p->virt_hdr;
- p->vec[NET_TX_PKT_VHDR_FRAG].iov_len =
- p->has_virt_hdr ? sizeof p->virt_hdr : 0;
+ p->vec[NET_TX_PKT_VHDR_FRAG].iov_len = sizeof p->virt_hdr;
p->vec[NET_TX_PKT_L2HDR_FRAG].iov_base = &p->l2_hdr;
p->vec[NET_TX_PKT_L3HDR_FRAG].iov_base = &p->l3_hdr;
@@ -617,9 +614,11 @@ static bool net_tx_pkt_do_sw_fragmentation(struct
NetTxPkt *pkt,
bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
{
+ bool using_vnet_hdr = qemu_get_using_vnet_hdr(nc->peer);
+
assert(pkt);
- if (!pkt->has_virt_hdr &&
+ if (!using_vnet_hdr &&
pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
net_tx_pkt_do_sw_csum(pkt);
}
@@ -636,11 +635,13 @@ bool net_tx_pkt_send(struct NetTxPkt *pkt,
NetClientState *nc)
}
}
- if (pkt->has_virt_hdr ||
+ if (using_vnet_hdr ||
pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
+ int index = using_vnet_hdr ?
+ NET_TX_PKT_VHDR_FRAG : NET_TX_PKT_L2HDR_FRAG;
net_tx_pkt_fix_ip6_payload_len(pkt);
- net_tx_pkt_sendv(pkt, nc, pkt->vec,
- pkt->payload_frags + NET_TX_PKT_PL_START_FRAG);
+ net_tx_pkt_sendv(pkt, nc, pkt->vec + index,
+ pkt->payload_frags + NET_TX_PKT_PL_START_FRAG - index);
return true;
}
diff --git a/hw/net/net_tx_pkt.h b/hw/net/net_tx_pkt.h
index 2e38a5fa69..8d3faa42fb 100644
--- a/hw/net/net_tx_pkt.h
+++ b/hw/net/net_tx_pkt.h
@@ -32,10 +32,9 @@ struct NetTxPkt;
* @pkt: packet pointer
* @pci_dev: PCI device processing this packet
* @max_frags: max tx ip fragments
- * @has_virt_hdr: device uses virtio header.
*/
void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
- uint32_t max_frags, bool has_virt_hdr);
+ uint32_t max_frags);
/**
* Clean all tx packet resources.
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index c63bbb59bd..8c3f5d6e14 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -1521,8 +1521,7 @@ static void vmxnet3_activate_device(VMXNET3State
*s)
/* Preallocate TX packet wrapper */
VMW_CFPRN("Max TX fragments is %u", s->max_tx_frags);
- net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s),
- s->max_tx_frags, s->peer_has_vhdr);
+ net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr);
/* Read rings memory locations for RX queues */
@@ -2402,8 +2401,7 @@ static int vmxnet3_post_load(void *opaque, int
version_id)
{
VMXNET3State *s = opaque;
- net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s),
- s->max_tx_frags, s->peer_has_vhdr);
+ net_tx_pkt_init(&s->tx_pkt, PCI_DEVICE(s), s->max_tx_frags);
net_rx_pkt_init(&s->rx_pkt, s->peer_has_vhdr);
if (s->msix_used) {