|
|
b91920 |
From 97ee69c836bfb08e674fd0f28d1fc7a14f2d4de0 Mon Sep 17 00:00:00 2001
|
|
|
b91920 |
From: Jens Freimann <jfreimann@redhat.com>
|
|
|
b91920 |
Date: Mon, 17 Dec 2018 22:31:34 +0100
|
|
|
b91920 |
Subject: [PATCH 05/18] net/virtio: implement Tx path for packed queues
|
|
|
b91920 |
|
|
|
b91920 |
[ upstream commit 892dc798fa9c24e6172b8bcecc9586f2f9a7a49e ]
|
|
|
b91920 |
|
|
|
b91920 |
This implements the transmit path for devices with
|
|
|
b91920 |
support for packed virtqueues.
|
|
|
b91920 |
|
|
|
b91920 |
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
|
|
|
b91920 |
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
|
|
|
b91920 |
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
|
|
|
b91920 |
(cherry picked from commit 892dc798fa9c24e6172b8bcecc9586f2f9a7a49e)
|
|
|
b91920 |
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
|
|
|
b91920 |
---
|
|
|
b91920 |
drivers/net/virtio/virtio_ethdev.c | 56 ++++---
|
|
|
b91920 |
drivers/net/virtio/virtio_ethdev.h | 2 +
|
|
|
b91920 |
drivers/net/virtio/virtio_rxtx.c | 236 ++++++++++++++++++++++++++++-
|
|
|
b91920 |
drivers/net/virtio/virtqueue.h | 20 ++-
|
|
|
b91920 |
4 files changed, 292 insertions(+), 22 deletions(-)
|
|
|
b91920 |
|
|
|
b91920 |
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
|
|
|
b91920 |
index ee52e3cdb..6023d6f2c 100644
|
|
|
b91920 |
--- a/drivers/net/virtio/virtio_ethdev.c
|
|
|
b91920 |
+++ b/drivers/net/virtio/virtio_ethdev.c
|
|
|
b91920 |
@@ -388,6 +388,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
|
|
|
b91920 |
if (vtpci_packed_queue(hw)) {
|
|
|
b91920 |
vq->avail_wrap_counter = 1;
|
|
|
b91920 |
vq->used_wrap_counter = 1;
|
|
|
b91920 |
+ vq->avail_used_flags =
|
|
|
b91920 |
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
|
|
|
b91920 |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
/*
|
|
|
b91920 |
@@ -495,17 +498,26 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
|
|
|
b91920 |
memset(txr, 0, vq_size * sizeof(*txr));
|
|
|
b91920 |
for (i = 0; i < vq_size; i++) {
|
|
|
b91920 |
struct vring_desc *start_dp = txr[i].tx_indir;
|
|
|
b91920 |
-
|
|
|
b91920 |
- vring_desc_init_split(start_dp,
|
|
|
b91920 |
- RTE_DIM(txr[i].tx_indir));
|
|
|
b91920 |
+ struct vring_packed_desc *start_dp_packed =
|
|
|
b91920 |
+ txr[i].tx_indir_pq;
|
|
|
b91920 |
|
|
|
b91920 |
/* first indirect descriptor is always the tx header */
|
|
|
b91920 |
- start_dp->addr = txvq->virtio_net_hdr_mem
|
|
|
b91920 |
- + i * sizeof(*txr)
|
|
|
b91920 |
- + offsetof(struct virtio_tx_region, tx_hdr);
|
|
|
b91920 |
-
|
|
|
b91920 |
- start_dp->len = hw->vtnet_hdr_size;
|
|
|
b91920 |
- start_dp->flags = VRING_DESC_F_NEXT;
|
|
|
b91920 |
+ if (vtpci_packed_queue(hw)) {
|
|
|
b91920 |
+ start_dp_packed->addr = txvq->virtio_net_hdr_mem
|
|
|
b91920 |
+ + i * sizeof(*txr)
|
|
|
b91920 |
+ + offsetof(struct virtio_tx_region,
|
|
|
b91920 |
+ tx_hdr);
|
|
|
b91920 |
+ start_dp_packed->len = hw->vtnet_hdr_size;
|
|
|
b91920 |
+ } else {
|
|
|
b91920 |
+ vring_desc_init_split(start_dp,
|
|
|
b91920 |
+ RTE_DIM(txr[i].tx_indir));
|
|
|
b91920 |
+ start_dp->addr = txvq->virtio_net_hdr_mem
|
|
|
b91920 |
+ + i * sizeof(*txr)
|
|
|
b91920 |
+ + offsetof(struct virtio_tx_region,
|
|
|
b91920 |
+ tx_hdr);
|
|
|
b91920 |
+ start_dp->len = hw->vtnet_hdr_size;
|
|
|
b91920 |
+ start_dp->flags = VRING_DESC_F_NEXT;
|
|
|
b91920 |
+ }
|
|
|
b91920 |
}
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
@@ -1334,6 +1346,23 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
|
|
|
b91920 |
{
|
|
|
b91920 |
struct virtio_hw *hw = eth_dev->data->dev_private;
|
|
|
b91920 |
|
|
|
b91920 |
+ if (vtpci_packed_queue(hw)) {
|
|
|
b91920 |
+ PMD_INIT_LOG(INFO,
|
|
|
b91920 |
+ "virtio: using packed ring standard Tx path on port %u",
|
|
|
b91920 |
+ eth_dev->data->port_id);
|
|
|
b91920 |
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
|
|
|
b91920 |
+ } else {
|
|
|
b91920 |
+ if (hw->use_inorder_tx) {
|
|
|
b91920 |
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
|
|
|
b91920 |
+ eth_dev->data->port_id);
|
|
|
b91920 |
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
|
|
|
b91920 |
+ } else {
|
|
|
b91920 |
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
|
|
|
b91920 |
+ eth_dev->data->port_id);
|
|
|
b91920 |
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+
|
|
|
b91920 |
if (hw->use_simple_rx) {
|
|
|
b91920 |
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
|
|
|
b91920 |
eth_dev->data->port_id);
|
|
|
b91920 |
@@ -1354,15 +1383,6 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
|
|
|
b91920 |
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
- if (hw->use_inorder_tx) {
|
|
|
b91920 |
- PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
|
|
|
b91920 |
- eth_dev->data->port_id);
|
|
|
b91920 |
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
|
|
|
b91920 |
- } else {
|
|
|
b91920 |
- PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
|
|
|
b91920 |
- eth_dev->data->port_id);
|
|
|
b91920 |
- eth_dev->tx_pkt_burst = virtio_xmit_pkts;
|
|
|
b91920 |
- }
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
/* Only support 1:1 queue/interrupt mapping so far.
|
|
|
b91920 |
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
|
|
|
b91920 |
index e0f80e5a4..05d355180 100644
|
|
|
b91920 |
--- a/drivers/net/virtio/virtio_ethdev.h
|
|
|
b91920 |
+++ b/drivers/net/virtio/virtio_ethdev.h
|
|
|
b91920 |
@@ -82,6 +82,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
|
|
|
b91920 |
|
|
|
b91920 |
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
b91920 |
uint16_t nb_pkts);
|
|
|
b91920 |
+uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
b91920 |
+ uint16_t nb_pkts);
|
|
|
b91920 |
|
|
|
b91920 |
uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
b91920 |
uint16_t nb_pkts);
|
|
|
b91920 |
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
|
|
|
b91920 |
index eb891433e..ab74917a8 100644
|
|
|
b91920 |
--- a/drivers/net/virtio/virtio_rxtx.c
|
|
|
b91920 |
+++ b/drivers/net/virtio/virtio_rxtx.c
|
|
|
b91920 |
@@ -88,6 +88,23 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
|
|
|
b91920 |
dp->next = VQ_RING_DESC_CHAIN_END;
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
+static void
|
|
|
b91920 |
+vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
|
|
|
b91920 |
+{
|
|
|
b91920 |
+ struct vq_desc_extra *dxp;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ dxp = &vq->vq_descx[id];
|
|
|
b91920 |
+ vq->vq_free_cnt += dxp->ndescs;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
|
|
|
b91920 |
+ vq->vq_desc_head_idx = id;
|
|
|
b91920 |
+ else
|
|
|
b91920 |
+ vq->vq_descx[vq->vq_desc_tail_idx].next = id;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ vq->vq_desc_tail_idx = id;
|
|
|
b91920 |
+ dxp->next = VQ_RING_DESC_CHAIN_END;
|
|
|
b91920 |
+}
|
|
|
b91920 |
+
|
|
|
b91920 |
static uint16_t
|
|
|
b91920 |
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
|
|
|
b91920 |
uint32_t *len, uint16_t num)
|
|
|
b91920 |
@@ -165,6 +182,33 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
|
|
|
b91920 |
#endif
|
|
|
b91920 |
|
|
|
b91920 |
/* Cleanup from completed transmits. */
|
|
|
b91920 |
+static void
|
|
|
b91920 |
+virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)
|
|
|
b91920 |
+{
|
|
|
b91920 |
+ uint16_t used_idx, id;
|
|
|
b91920 |
+ uint16_t size = vq->vq_nentries;
|
|
|
b91920 |
+ struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
|
|
|
b91920 |
+ struct vq_desc_extra *dxp;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ used_idx = vq->vq_used_cons_idx;
|
|
|
b91920 |
+ while (num-- && desc_is_used(&desc[used_idx], vq)) {
|
|
|
b91920 |
+ used_idx = vq->vq_used_cons_idx;
|
|
|
b91920 |
+ id = desc[used_idx].id;
|
|
|
b91920 |
+ dxp = &vq->vq_descx[id];
|
|
|
b91920 |
+ vq->vq_used_cons_idx += dxp->ndescs;
|
|
|
b91920 |
+ if (vq->vq_used_cons_idx >= size) {
|
|
|
b91920 |
+ vq->vq_used_cons_idx -= size;
|
|
|
b91920 |
+ vq->used_wrap_counter ^= 1;
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ vq_ring_free_id_packed(vq, id);
|
|
|
b91920 |
+ if (dxp->cookie != NULL) {
|
|
|
b91920 |
+ rte_pktmbuf_free(dxp->cookie);
|
|
|
b91920 |
+ dxp->cookie = NULL;
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ used_idx = vq->vq_used_cons_idx;
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+}
|
|
|
b91920 |
+
|
|
|
b91920 |
static void
|
|
|
b91920 |
virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
|
|
|
b91920 |
{
|
|
|
b91920 |
@@ -456,6 +500,107 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
|
|
|
b91920 |
vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
+static inline void
|
|
|
b91920 |
+virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
|
|
|
b91920 |
+ uint16_t needed, int can_push)
|
|
|
b91920 |
+{
|
|
|
b91920 |
+ struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
|
|
|
b91920 |
+ struct vq_desc_extra *dxp;
|
|
|
b91920 |
+ struct virtqueue *vq = txvq->vq;
|
|
|
b91920 |
+ struct vring_packed_desc *start_dp, *head_dp;
|
|
|
b91920 |
+ uint16_t idx, id, head_idx, head_flags;
|
|
|
b91920 |
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
|
|
|
b91920 |
+ struct virtio_net_hdr *hdr;
|
|
|
b91920 |
+ uint16_t prev;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ id = vq->vq_desc_head_idx;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ dxp = &vq->vq_descx[id];
|
|
|
b91920 |
+ dxp->ndescs = needed;
|
|
|
b91920 |
+ dxp->cookie = cookie;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ head_idx = vq->vq_avail_idx;
|
|
|
b91920 |
+ idx = head_idx;
|
|
|
b91920 |
+ prev = head_idx;
|
|
|
b91920 |
+ start_dp = vq->ring_packed.desc_packed;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ head_dp = &vq->ring_packed.desc_packed[idx];
|
|
|
b91920 |
+ head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
|
|
|
b91920 |
+ head_flags |= vq->avail_used_flags;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ if (can_push) {
|
|
|
b91920 |
+ /* prepend cannot fail, checked by caller */
|
|
|
b91920 |
+ hdr = (struct virtio_net_hdr *)
|
|
|
b91920 |
+ rte_pktmbuf_prepend(cookie, head_size);
|
|
|
b91920 |
+ /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
|
|
|
b91920 |
+ * which is wrong. Below subtract restores correct pkt size.
|
|
|
b91920 |
+ */
|
|
|
b91920 |
+ cookie->pkt_len -= head_size;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ /* if offload disabled, it is not zeroed below, do it now */
|
|
|
b91920 |
+ if (!vq->hw->has_tx_offload) {
|
|
|
b91920 |
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
|
|
|
b91920 |
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
|
|
|
b91920 |
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
|
|
|
b91920 |
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
|
|
|
b91920 |
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
|
|
|
b91920 |
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ } else {
|
|
|
b91920 |
+ /* setup first tx ring slot to point to header
|
|
|
b91920 |
+ * stored in reserved region.
|
|
|
b91920 |
+ */
|
|
|
b91920 |
+ start_dp[idx].addr = txvq->virtio_net_hdr_mem +
|
|
|
b91920 |
+ RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
|
|
|
b91920 |
+ start_dp[idx].len = vq->hw->vtnet_hdr_size;
|
|
|
b91920 |
+ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
|
|
|
b91920 |
+ idx++;
|
|
|
b91920 |
+ if (idx >= vq->vq_nentries) {
|
|
|
b91920 |
+ idx -= vq->vq_nentries;
|
|
|
b91920 |
+ vq->avail_wrap_counter ^= 1;
|
|
|
b91920 |
+ vq->avail_used_flags =
|
|
|
b91920 |
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
|
|
|
b91920 |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+
|
|
|
b91920 |
+ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
|
|
|
b91920 |
+
|
|
|
b91920 |
+ do {
|
|
|
b91920 |
+ uint16_t flags;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
|
|
|
b91920 |
+ start_dp[idx].len = cookie->data_len;
|
|
|
b91920 |
+ if (likely(idx != head_idx)) {
|
|
|
b91920 |
+ flags = cookie->next ? VRING_DESC_F_NEXT : 0;
|
|
|
b91920 |
+ flags |= vq->avail_used_flags;
|
|
|
b91920 |
+ start_dp[idx].flags = flags;
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ prev = idx;
|
|
|
b91920 |
+ idx++;
|
|
|
b91920 |
+ if (idx >= vq->vq_nentries) {
|
|
|
b91920 |
+ idx -= vq->vq_nentries;
|
|
|
b91920 |
+ vq->avail_wrap_counter ^= 1;
|
|
|
b91920 |
+ vq->avail_used_flags =
|
|
|
b91920 |
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
|
|
|
b91920 |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ } while ((cookie = cookie->next) != NULL);
|
|
|
b91920 |
+
|
|
|
b91920 |
+ start_dp[prev].id = id;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
|
|
|
b91920 |
+
|
|
|
b91920 |
+ vq->vq_desc_head_idx = dxp->next;
|
|
|
b91920 |
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
|
|
|
b91920 |
+ vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ vq->vq_avail_idx = idx;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ rte_smp_wmb();
|
|
|
b91920 |
+ head_dp->flags = head_flags;
|
|
|
b91920 |
+}
|
|
|
b91920 |
+
|
|
|
b91920 |
static inline void
|
|
|
b91920 |
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
|
|
|
b91920 |
uint16_t needed, int use_indirect, int can_push,
|
|
|
b91920 |
@@ -733,8 +878,10 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
|
|
|
b91920 |
|
|
|
b91920 |
PMD_INIT_FUNC_TRACE();
|
|
|
b91920 |
|
|
|
b91920 |
- if (hw->use_inorder_tx)
|
|
|
b91920 |
- vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
|
|
|
b91920 |
+ if (!vtpci_packed_queue(hw)) {
|
|
|
b91920 |
+ if (hw->use_inorder_tx)
|
|
|
b91920 |
+ vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
|
|
|
b91920 |
+ }
|
|
|
b91920 |
|
|
|
b91920 |
VIRTQUEUE_DUMP(vq);
|
|
|
b91920 |
|
|
|
b91920 |
@@ -1346,6 +1493,91 @@ virtio_recv_mergeable_pkts(void *rx_queue,
|
|
|
b91920 |
return nb_rx;
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
+uint16_t
|
|
|
b91920 |
+virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
|
|
|
b91920 |
+ uint16_t nb_pkts)
|
|
|
b91920 |
+{
|
|
|
b91920 |
+ struct virtnet_tx *txvq = tx_queue;
|
|
|
b91920 |
+ struct virtqueue *vq = txvq->vq;
|
|
|
b91920 |
+ struct virtio_hw *hw = vq->hw;
|
|
|
b91920 |
+ uint16_t hdr_size = hw->vtnet_hdr_size;
|
|
|
b91920 |
+ uint16_t nb_tx = 0;
|
|
|
b91920 |
+ int error;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
|
|
|
b91920 |
+ return nb_tx;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ if (unlikely(nb_pkts < 1))
|
|
|
b91920 |
+ return nb_pkts;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
|
|
|
b91920 |
+
|
|
|
b91920 |
+ if (nb_pkts > vq->vq_free_cnt)
|
|
|
b91920 |
+ virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt);
|
|
|
b91920 |
+
|
|
|
b91920 |
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
|
|
|
b91920 |
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
|
|
|
b91920 |
+ int can_push = 0, slots, need;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ /* Do VLAN tag insertion */
|
|
|
b91920 |
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
|
|
|
b91920 |
+ error = rte_vlan_insert(&txm);
|
|
|
b91920 |
+ if (unlikely(error)) {
|
|
|
b91920 |
+ rte_pktmbuf_free(txm);
|
|
|
b91920 |
+ continue;
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+
|
|
|
b91920 |
+ /* optimize ring usage */
|
|
|
b91920 |
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
|
|
|
b91920 |
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
|
|
|
b91920 |
+ rte_mbuf_refcnt_read(txm) == 1 &&
|
|
|
b91920 |
+ RTE_MBUF_DIRECT(txm) &&
|
|
|
b91920 |
+ txm->nb_segs == 1 &&
|
|
|
b91920 |
+ rte_pktmbuf_headroom(txm) >= hdr_size &&
|
|
|
b91920 |
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
|
|
|
b91920 |
+ __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
|
|
|
b91920 |
+ can_push = 1;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ /* How many main ring entries are needed to this Tx?
|
|
|
b91920 |
+ * any_layout => number of segments
|
|
|
b91920 |
+ * default => number of segments + 1
|
|
|
b91920 |
+ */
|
|
|
b91920 |
+ slots = txm->nb_segs + !can_push;
|
|
|
b91920 |
+ need = slots - vq->vq_free_cnt;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ /* Positive value indicates it need free vring descriptors */
|
|
|
b91920 |
+ if (unlikely(need > 0)) {
|
|
|
b91920 |
+ virtio_rmb();
|
|
|
b91920 |
+ need = RTE_MIN(need, (int)nb_pkts);
|
|
|
b91920 |
+ virtio_xmit_cleanup_packed(vq, need);
|
|
|
b91920 |
+ need = slots - vq->vq_free_cnt;
|
|
|
b91920 |
+ if (unlikely(need > 0)) {
|
|
|
b91920 |
+ PMD_TX_LOG(ERR,
|
|
|
b91920 |
+ "No free tx descriptors to transmit");
|
|
|
b91920 |
+ break;
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+
|
|
|
b91920 |
+ /* Enqueue Packet buffers */
|
|
|
b91920 |
+ virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
|
|
|
b91920 |
+
|
|
|
b91920 |
+ txvq->stats.bytes += txm->pkt_len;
|
|
|
b91920 |
+ virtio_update_packet_stats(&txvq->stats, txm);
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+
|
|
|
b91920 |
+ txvq->stats.packets += nb_tx;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ if (likely(nb_tx)) {
|
|
|
b91920 |
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
|
|
|
b91920 |
+ virtqueue_notify(vq);
|
|
|
b91920 |
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+ }
|
|
|
b91920 |
+
|
|
|
b91920 |
+ return nb_tx;
|
|
|
b91920 |
+}
|
|
|
b91920 |
+
|
|
|
b91920 |
uint16_t
|
|
|
b91920 |
virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
|
|
|
b91920 |
{
|
|
|
b91920 |
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
|
|
|
b91920 |
index e9c35a553..b142fd488 100644
|
|
|
b91920 |
--- a/drivers/net/virtio/virtqueue.h
|
|
|
b91920 |
+++ b/drivers/net/virtio/virtqueue.h
|
|
|
b91920 |
@@ -247,8 +247,12 @@ struct virtio_net_hdr_mrg_rxbuf {
|
|
|
b91920 |
#define VIRTIO_MAX_TX_INDIRECT 8
|
|
|
b91920 |
struct virtio_tx_region {
|
|
|
b91920 |
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
|
|
|
b91920 |
- struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
|
|
|
b91920 |
- __attribute__((__aligned__(16)));
|
|
|
b91920 |
+ union {
|
|
|
b91920 |
+ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
|
|
|
b91920 |
+ __attribute__((__aligned__(16)));
|
|
|
b91920 |
+ struct vring_packed_desc tx_indir_pq[VIRTIO_MAX_TX_INDIRECT]
|
|
|
b91920 |
+ __attribute__((__aligned__(16)));
|
|
|
b91920 |
+ };
|
|
|
b91920 |
};
|
|
|
b91920 |
|
|
|
b91920 |
static inline int
|
|
|
b91920 |
@@ -380,6 +384,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
|
|
|
b91920 |
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
|
|
|
b91920 |
|
|
|
b91920 |
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
|
|
|
b91920 |
+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
|
|
|
b91920 |
void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
|
|
|
b91920 |
uint16_t num);
|
|
|
b91920 |
|
|
|
b91920 |
@@ -418,6 +423,17 @@ virtqueue_kick_prepare(struct virtqueue *vq)
|
|
|
b91920 |
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
|
|
|
b91920 |
}
|
|
|
b91920 |
|
|
|
b91920 |
+static inline int
|
|
|
b91920 |
+virtqueue_kick_prepare_packed(struct virtqueue *vq)
|
|
|
b91920 |
+{
|
|
|
b91920 |
+ uint16_t flags;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ virtio_mb();
|
|
|
b91920 |
+ flags = vq->ring_packed.device_event->desc_event_flags;
|
|
|
b91920 |
+
|
|
|
b91920 |
+ return flags != RING_EVENT_FLAGS_DISABLE;
|
|
|
b91920 |
+}
|
|
|
b91920 |
+
|
|
|
b91920 |
static inline void
|
|
|
b91920 |
virtqueue_notify(struct virtqueue *vq)
|
|
|
b91920 |
{
|
|
|
b91920 |
--
|
|
|
b91920 |
2.21.0
|
|
|
b91920 |
|