mail archive of the barebox mailing list
 help / color / mirror / Atom feed
From: Ahmad Fatoum <ahmad@a3f.at>
To: barebox@lists.infradead.org
Cc: Ahmad Fatoum <ahmad@a3f.at>
Subject: [PATCH 5/9] virtio: ring: fix erroneous behavior around caches and MMU
Date: Sun, 28 Feb 2021 20:08:32 +0100	[thread overview]
Message-ID: <20210228190836.1451663-5-ahmad@a3f.at> (raw)
In-Reply-To: <20210228190836.1451663-1-ahmad@a3f.at>

The U-Boot code assumes 1:1 phys2virt mapping and either disabled
caches or a lot of luck. Use the DMA API to get appropriate addresses
for DMA and use coherent/streaming DMA mappings where appropriate.

This is required for proper operation on MIPS.

Signed-off-by: Ahmad Fatoum <ahmad@a3f.at>
---
 drivers/virtio/virtio_ring.c | 98 +++++++++++++++++++++++++++++++-----
 include/linux/virtio_ring.h  |  2 +
 2 files changed, 87 insertions(+), 13 deletions(-)

diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index cac3362e7251..68180fe37da0 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -22,12 +22,45 @@
 #define vq_info(vq, fmt, ...) \
 	dev_info(&vq->vdev->dev, fmt, ##__VA_ARGS__)
 
+static inline struct device_d *vring_dma_dev(const struct virtqueue *vq)
+{
+	return vq->vdev->dev.parent;
+}
+
+/* Map one sg entry. */
+static dma_addr_t vring_map_one_sg(struct virtqueue *vq,
+				   struct virtio_sg *sg,
+				   enum dma_data_direction direction)
+{
+	return dma_map_single(vring_dma_dev(vq), sg->addr, sg->length, direction);
+}
+
+static int vring_mapping_error(struct virtqueue *vq,
+			       dma_addr_t addr)
+{
+	return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+
+static void vring_unmap_one(struct virtqueue *vq,
+			    struct vring_desc *desc)
+{
+	u16 flags;
+
+	flags = virtio16_to_cpu(vq->vdev, desc->flags);
+
+	dma_unmap_single(vring_dma_dev(vq),
+		       virtio64_to_cpu(vq->vdev, desc->addr),
+		       virtio32_to_cpu(vq->vdev, desc->len),
+		       (flags & VRING_DESC_F_WRITE) ?
+		       DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+
 int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
 		  unsigned int out_sgs, unsigned int in_sgs)
 {
 	struct vring_desc *desc;
 	unsigned int total_sg = out_sgs + in_sgs;
-	unsigned int i, n, avail, descs_used, uninitialized_var(prev);
+	unsigned int i, err_idx, n, avail, descs_used, uninitialized_var(prev);
 	int head;
 
 	WARN_ON(total_sg == 0);
@@ -53,9 +86,13 @@ int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
 
 	for (n = 0; n < out_sgs; n++) {
 		struct virtio_sg *sg = sgs[n];
+		dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
+		if (vring_mapping_error(vq, addr))
+			goto unmap_release;
+
 
 		desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT);
-		desc[i].addr = cpu_to_virtio64(vq->vdev, (u64)(size_t)sg->addr);
+		desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
 		desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
 
 		prev = i;
@@ -63,11 +100,13 @@ int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
 	}
 	for (; n < (out_sgs + in_sgs); n++) {
 		struct virtio_sg *sg = sgs[n];
+		dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
+		if (vring_mapping_error(vq, addr))
+			goto unmap_release;
 
 		desc[i].flags = cpu_to_virtio16(vq->vdev, VRING_DESC_F_NEXT |
 						VRING_DESC_F_WRITE);
-		desc[i].addr = cpu_to_virtio64(vq->vdev,
-					       (u64)(uintptr_t)sg->addr);
+		desc[i].addr = cpu_to_virtio64(vq->vdev, addr);
 		desc[i].len = cpu_to_virtio32(vq->vdev, sg->length);
 
 		prev = i;
@@ -106,6 +145,19 @@ int virtqueue_add(struct virtqueue *vq, struct virtio_sg *sgs[],
 		virtqueue_kick(vq);
 
 	return 0;
+
+unmap_release:
+	err_idx = i;
+
+	for (n = 0; n < total_sg; n++) {
+		if (i == err_idx)
+			break;
+		vring_unmap_one(vq, &desc[i]);
+		i = virtio16_to_cpu(vq->vdev, desc[i].next);
+	}
+
+	return -ENOMEM;
+
 }
 
 static bool virtqueue_kick_prepare(struct virtqueue *vq)
@@ -149,10 +201,12 @@ static void detach_buf(struct virtqueue *vq, unsigned int head)
 	i = head;
 
 	while (vq->vring.desc[i].flags & nextflag) {
+		vring_unmap_one(vq, &vq->vring.desc[i]);
 		i = virtio16_to_cpu(vq->vdev, vq->vring.desc[i].next);
 		vq->num_free++;
 	}
 
+	vring_unmap_one(vq, &vq->vring.desc[i]);
 	vq->vring.desc[i].next = cpu_to_virtio16(vq->vdev, vq->free_head);
 	vq->free_head = head;
 
@@ -225,6 +279,8 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
 	vq->avail_flags_shadow = 0;
 	vq->avail_idx_shadow = 0;
 	vq->num_added = 0;
+	vq->queue_dma_addr = 0;
+	vq->queue_size_in_bytes = 0;
 	list_add_tail(&vq->list, &vdev->vqs);
 
 	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
@@ -243,12 +299,24 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
 	return vq;
 }
 
+static void *vring_alloc_queue(size_t size, dma_addr_t *dma_handle)
+{
+	return dma_alloc_coherent(size, dma_handle);
+}
+
+static void vring_free_queue(size_t size, void *queue, dma_addr_t dma_handle)
+{
+	dma_free_coherent(queue, dma_handle, size);
+}
+
 struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num,
 					 unsigned int vring_align,
 					 struct virtio_device *vdev)
 {
 	struct virtqueue *vq;
 	void *queue = NULL;
+	dma_addr_t dma_addr;
+	size_t queue_size_in_bytes;
 	struct vring vring;
 
 	/* We assume num is a power of 2 */
@@ -259,7 +327,7 @@ struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num,
 
 	/* TODO: allocate each queue chunk individually */
 	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
-		queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
+		queue = vring_alloc_queue(vring_size(num, vring_align), &dma_addr);
 		if (queue)
 			break;
 	}
@@ -269,27 +337,31 @@ struct virtqueue *vring_create_virtqueue(unsigned int index, unsigned int num,
 
 	if (!queue) {
 		/* Try to get a single page. You are my only hope! */
-		queue = memalign(PAGE_SIZE, vring_size(num, vring_align));
+		queue = vring_alloc_queue(vring_size(num, vring_align), &dma_addr);
 	}
 	if (!queue)
 		return NULL;
 
-	memset(queue, 0, vring_size(num, vring_align));
+	queue_size_in_bytes = vring_size(num, vring_align);
 	vring_init(&vring, num, queue, vring_align);
 
 	vq = __vring_new_virtqueue(index, vring, vdev);
 	if (!vq) {
-		free(queue);
+		vring_free_queue(queue_size_in_bytes, queue, dma_addr);
 		return NULL;
 	}
-	vq_debug(vq, "created vring @ %p for vq with num %u\n", queue, num);
+	vq_debug(vq, "created vring @ (virt=%p, phys=%pad) for vq with num %u\n",
+		 queue, &dma_addr, num);
+
+	vq->queue_dma_addr = dma_addr;
+	vq->queue_size_in_bytes = queue_size_in_bytes;
 
 	return vq;
 }
 
 void vring_del_virtqueue(struct virtqueue *vq)
 {
-	free(vq->vring.desc);
+	vring_free_queue(vq->queue_size_in_bytes, vq->vring.desc, vq->queue_dma_addr);
 	list_del(&vq->list);
 	free(vq);
 }
@@ -301,18 +373,18 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq)
 
 dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq)
 {
-	return (dma_addr_t)vq->vring.desc;
+	return vq->queue_dma_addr;
 }
 
 dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq)
 {
-	return (dma_addr_t)vq->vring.desc +
+	return vq->queue_dma_addr +
 	       ((char *)vq->vring.avail - (char *)vq->vring.desc);
 }
 
 dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq)
 {
-	return (dma_addr_t)vq->vring.desc +
+	return vq->queue_dma_addr +
 	       ((char *)vq->vring.used - (char *)vq->vring.desc);
 }
 
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 3c11592b09e4..c349af90ce50 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -108,6 +108,8 @@ struct virtqueue {
 	u16 last_used_idx;
 	u16 avail_flags_shadow;
 	u16 avail_idx_shadow;
+	dma_addr_t queue_dma_addr;
+	size_t queue_size_in_bytes;
 };
 
 /*
-- 
2.30.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


  parent reply	other threads:[~2021-02-28 19:10 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-28 19:08 [PATCH 1/9] virtio: align virtio_config_ops::generation return type with Linux Ahmad Fatoum
2021-02-28 19:08 ` [PATCH 2/9] virtio: remove unused, left-over, virtio_config_ops::set_features Ahmad Fatoum
2021-02-28 19:08 ` [PATCH 3/9] hw_random: virtio: simplify code Ahmad Fatoum
2021-02-28 19:08 ` [PATCH 4/9] dma: move dma_map/unmap_single from ARM to common code Ahmad Fatoum
2021-03-03 12:08   ` Sascha Hauer
2021-03-03 16:12   ` [PATCH] " Ahmad Fatoum
2021-02-28 19:08 ` Ahmad Fatoum [this message]
2021-02-28 19:08 ` [PATCH 6/9] virtio: fix support for big-endian clients Ahmad Fatoum
2021-02-28 19:08 ` [PATCH 7/9] PCI: port Linux pci_find_capability Ahmad Fatoum
2021-02-28 19:08 ` [PATCH 8/9] virtio: support virtio-based device drivers over PCI Ahmad Fatoum
2021-02-28 19:08 ` [PATCH 9/9] virtio: virtio-pci: restrict MIPS support to MMU configuration Ahmad Fatoum
2021-03-02 14:28   ` Antony Pavlov
2021-03-03 17:10     ` Ahmad Fatoum
2021-03-01 16:29 ` [PATCH 1/9] virtio: align virtio_config_ops::generation return type with Linux Sascha Hauer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210228190836.1451663-5-ahmad@a3f.at \
    --to=ahmad@a3f.at \
    --cc=barebox@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox