There is no rte_pktmbuf structure in mbuf now. Its fields are merged to
rte_mbuf structure.

Signed-off-by: Huawei Xie <huawei.xie at intel.com>
---
 lib/librte_vhost/vhost_rxtx.c | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 81368e6..688e661 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -145,7 +145,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, 
struct rte_mbuf **pkts,
                /* Copy mbuf data to buffer */
                /* TODO fixme for sg mbuf and the case that desc couldn't hold 
the mbuf data */
                rte_memcpy((void *)(uintptr_t)buff_addr,
-                       (const void *)buff->pkt.data,
+                       rte_pktmbuf_mtod(buff, const void *),
                        rte_pktmbuf_data_len(buff));
                VHOST_PRINT_PACKET(dev, (uintptr_t)buff_addr,
                        rte_pktmbuf_data_len(buff), 0);
@@ -307,7 +307,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint16_t 
res_base_idx,
                         * This current segment complete, need continue to
                         * check if the whole packet complete or not.
                         */
-                       pkt = pkt->pkt.next;
+                       pkt = pkt->next;
                        if (pkt != NULL) {
                                /*
                                 * There are more segments.
@@ -411,7 +411,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t 
queue_id, struct rte_mbuf *
                uint32_t secure_len = 0;
                uint16_t need_cnt;
                uint32_t vec_idx = 0;
-               uint32_t pkt_len = pkts[pkt_idx]->pkt.pkt_len + vq->vhost_hlen;
+               uint32_t pkt_len = pkts[pkt_idx]->pkt_len + vq->vhost_hlen;
                uint16_t i, id;

                do {
@@ -631,8 +631,8 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t 
queue_id, struct rte_me
                                 * while the virtio buffer in TX vring has
                                 * more data to be copied.
                                 */
-                               cur->pkt.data_len = seg_offset;
-                               m->pkt.pkt_len += seg_offset;
+                               cur->data_len = seg_offset;
+                               m->pkt_len += seg_offset;
                                /* Allocate mbuf and populate the structure. */
                                cur = rte_pktmbuf_alloc(mbuf_pool);
                                if (unlikely(cur == NULL)) {
@@ -644,7 +644,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t 
queue_id, struct rte_me
                                }

                                seg_num++;
-                               prev->pkt.next = cur;
+                               prev->next = cur;
                                prev = cur;
                                seg_offset = 0;
                                seg_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
@@ -660,8 +660,8 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t 
queue_id, struct rte_me
                                                 * room to accomodate more
                                                 * data.
                                                 */
-                                               cur->pkt.data_len = seg_offset;
-                                               m->pkt.pkt_len += seg_offset;
+                                               cur->data_len = seg_offset;
+                                               m->pkt_len += seg_offset;
                                                /*
                                                 * Allocate an mbuf and
                                                 * populate the structure.
@@ -678,7 +678,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t 
queue_id, struct rte_me
                                                        break;
                                                }
                                                seg_num++;
-                                               prev->pkt.next = cur;
+                                               prev->next = cur;
                                                prev = cur;
                                                seg_offset = 0;
                                                seg_avail = cur->buf_len - 
RTE_PKTMBUF_HEADROOM;
@@ -697,8 +697,8 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t 
queue_id, struct rte_me
                                                desc->len, 0);
                                } else {
                                        /* The whole packet completes. */
-                                       cur->pkt.data_len = seg_offset;
-                                       m->pkt.pkt_len += seg_offset;
+                                       cur->data_len = seg_offset;
+                                       m->pkt_len += seg_offset;
                                        vb_avail = 0;
                                }
                        }
@@ -709,7 +709,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t 
queue_id, struct rte_me
                if (unlikely(alloc_err == 1))
                        break;

-               m->pkt.nb_segs = seg_num;
+               m->nb_segs = seg_num;

                pkts[entry_success] = m;
                vq->last_used_idx++;
-- 
1.8.1.4

Reply via email to