Blame SOURCES/0007-net-virtio-support-packed-queue-in-send-command.patch

2c1bf6
From d8d854a2f1814e10cf51ce88bf00b020167c772e Mon Sep 17 00:00:00 2001
2c1bf6
From: Jens Freimann <jfreimann@redhat.com>
2c1bf6
Date: Mon, 17 Dec 2018 22:31:36 +0100
2c1bf6
Subject: [PATCH 07/18] net/virtio: support packed queue in send command
2c1bf6
2c1bf6
[ upstream commit ec194c2f189525b2fb4be5604422a28ea5f08acd ]
2c1bf6
2c1bf6
Use packed virtqueue format when reading and writing descriptors
2c1bf6
to/from the ring.
2c1bf6
2c1bf6
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
2c1bf6
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
2c1bf6
(cherry picked from commit ec194c2f189525b2fb4be5604422a28ea5f08acd)
2c1bf6
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
2c1bf6
---
2c1bf6
 drivers/net/virtio/virtio_ethdev.c | 96 ++++++++++++++++++++++++++++++
2c1bf6
 1 file changed, 96 insertions(+)
2c1bf6
2c1bf6
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
2c1bf6
index 4ef1da393..53773445b 100644
2c1bf6
--- a/drivers/net/virtio/virtio_ethdev.c
2c1bf6
+++ b/drivers/net/virtio/virtio_ethdev.c
2c1bf6
@@ -141,6 +141,96 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
2c1bf6
 
2c1bf6
 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
2c1bf6
 
2c1bf6
+static struct virtio_pmd_ctrl *
2c1bf6
+virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
2c1bf6
+		       int *dlen, int pkt_num)
2c1bf6
+{
2c1bf6
+	struct virtqueue *vq = cvq->vq;
2c1bf6
+	int head;
2c1bf6
+	struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
2c1bf6
+	struct virtio_pmd_ctrl *result;
2c1bf6
+	int wrap_counter;
2c1bf6
+	uint16_t flags;
2c1bf6
+	int sum = 0;
2c1bf6
+	int k;
2c1bf6
+
2c1bf6
+	/*
2c1bf6
+	 * Format is enforced in qemu code:
2c1bf6
+	 * One TX packet for header;
2c1bf6
+	 * At least one TX packet per argument;
2c1bf6
+	 * One RX packet for ACK.
2c1bf6
+	 */
2c1bf6
+	head = vq->vq_avail_idx;
2c1bf6
+	wrap_counter = vq->avail_wrap_counter;
2c1bf6
+	desc[head].flags = VRING_DESC_F_NEXT;
2c1bf6
+	desc[head].addr = cvq->virtio_net_hdr_mem;
2c1bf6
+	desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
2c1bf6
+	vq->vq_free_cnt--;
2c1bf6
+	if (++vq->vq_avail_idx >= vq->vq_nentries) {
2c1bf6
+		vq->vq_avail_idx -= vq->vq_nentries;
2c1bf6
+		vq->avail_wrap_counter ^= 1;
2c1bf6
+	}
2c1bf6
+
2c1bf6
+	for (k = 0; k < pkt_num; k++) {
2c1bf6
+		desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
2c1bf6
+			+ sizeof(struct virtio_net_ctrl_hdr)
2c1bf6
+			+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
2c1bf6
+		desc[vq->vq_avail_idx].len = dlen[k];
2c1bf6
+		flags = VRING_DESC_F_NEXT;
2c1bf6
+		sum += dlen[k];
2c1bf6
+		vq->vq_free_cnt--;
2c1bf6
+		flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
2c1bf6
+			 VRING_DESC_F_USED(!vq->avail_wrap_counter);
2c1bf6
+		desc[vq->vq_avail_idx].flags = flags;
2c1bf6
+		rte_smp_wmb();
2c1bf6
+		vq->vq_free_cnt--;
2c1bf6
+		if (++vq->vq_avail_idx >= vq->vq_nentries) {
2c1bf6
+			vq->vq_avail_idx -= vq->vq_nentries;
2c1bf6
+			vq->avail_wrap_counter ^= 1;
2c1bf6
+		}
2c1bf6
+	}
2c1bf6
+
2c1bf6
+
2c1bf6
+	desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
2c1bf6
+		+ sizeof(struct virtio_net_ctrl_hdr);
2c1bf6
+	desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
2c1bf6
+	flags = VRING_DESC_F_WRITE;
2c1bf6
+	flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
2c1bf6
+		 VRING_DESC_F_USED(!vq->avail_wrap_counter);
2c1bf6
+	desc[vq->vq_avail_idx].flags = flags;
2c1bf6
+	flags = VRING_DESC_F_NEXT;
2c1bf6
+	flags |= VRING_DESC_F_AVAIL(wrap_counter) |
2c1bf6
+		 VRING_DESC_F_USED(!wrap_counter);
2c1bf6
+	desc[head].flags = flags;
2c1bf6
+	rte_smp_wmb();
2c1bf6
+
2c1bf6
+	vq->vq_free_cnt--;
2c1bf6
+	if (++vq->vq_avail_idx >= vq->vq_nentries) {
2c1bf6
+		vq->vq_avail_idx -= vq->vq_nentries;
2c1bf6
+		vq->avail_wrap_counter ^= 1;
2c1bf6
+	}
2c1bf6
+
2c1bf6
+	virtqueue_notify(vq);
2c1bf6
+
2c1bf6
+	/* wait for used descriptors in virtqueue */
2c1bf6
+	do {
2c1bf6
+		rte_rmb();
2c1bf6
+		usleep(100);
2c1bf6
+	} while (!desc_is_used(&desc[head], vq));
2c1bf6
+
2c1bf6
+	/* now get used descriptors */
2c1bf6
+	while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
2c1bf6
+		vq->vq_free_cnt++;
2c1bf6
+		if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
2c1bf6
+			vq->vq_used_cons_idx -= vq->vq_nentries;
2c1bf6
+			vq->used_wrap_counter ^= 1;
2c1bf6
+		}
2c1bf6
+	}
2c1bf6
+
2c1bf6
+	result = cvq->virtio_net_hdr_mz->addr;
2c1bf6
+	return result;
2c1bf6
+}
2c1bf6
+
2c1bf6
 static int
2c1bf6
 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
2c1bf6
 		int *dlen, int pkt_num)
2c1bf6
@@ -174,6 +264,11 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
2c1bf6
 	memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
2c1bf6
 		sizeof(struct virtio_pmd_ctrl));
2c1bf6
 
2c1bf6
+	if (vtpci_packed_queue(vq->hw)) {
2c1bf6
+		result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
2c1bf6
+		goto out_unlock;
2c1bf6
+	}
2c1bf6
+
2c1bf6
 	/*
2c1bf6
 	 * Format is enforced in qemu code:
2c1bf6
 	 * One TX packet for header;
2c1bf6
@@ -245,6 +340,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
2c1bf6
 
2c1bf6
 	result = cvq->virtio_net_hdr_mz->addr;
2c1bf6
 
2c1bf6
+out_unlock:
2c1bf6
 	rte_spinlock_unlock(&cvq->lock);
2c1bf6
 	return result->status;
2c1bf6
 }
2c1bf6
-- 
2c1bf6
2.21.0
2c1bf6