mail archive of the barebox mailing list
 help / color / mirror / Atom feed
* [PATCH v4 0/3] net: macb: fix dma usage
@ 2023-12-01 13:51 Steffen Trumtrar
  2023-12-01 13:51 ` [PATCH v4 1/3] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Steffen Trumtrar @ 2023-12-01 13:51 UTC (permalink / raw)
  To: barebox; +Cc: Ahmad Fatoum

The rx_buffer is only dma_alloc'ed but never properly flushed.
Fix that.

While at it, also use proper volatile access instead of sw barriers.

Also, redefine PKTSIZE to a sensible multiple of 64 bytes.

Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
Changes in v4:
- align PKTSIZE with cacheline size
- minor style changes
- Link to v3: https://lore.barebox.org/20231129-v2023-08-0-topic-macb-v3-0-65ad6db834dc@pengutronix.de

Changes in v3:
- fix dma_unmap_single direction
- dma_map_single packet in macb_send()
- Link to v2: https://lore.barebox.org/20231129-v2023-08-0-topic-macb-v2-0-4dc2cb4d5d25@pengutronix.de

Changes in v2:
- change dma_map_single to DMA_FROM_DEVICE
- drop (unsigned long) casts in dma_sync_*
- rework writel/setbits/clearbits to keep semantics
- Link to v1: https://lore.barebox.org/20231128-v2023-08-0-topic-macb-v1-0-9faff73bc990@pengutronix.de

---
Steffen Trumtrar (3):
      net: macb: fix dma_alloc for rx_buffer
      net: macb: convert to volatile accesses
      include: net: align PKTSIZE to 64 bytes

 drivers/net/macb.c | 132 +++++++++++++++++++++++++++++------------------------
 include/net.h      |   4 +-
 2 files changed, 75 insertions(+), 61 deletions(-)
---
base-commit: 5f200dd534c848dfa5d948334b6373f0310b8f73
change-id: 20231128-v2023-08-0-topic-macb-0c13ed91179d

Best regards,
-- 
Steffen Trumtrar <s.trumtrar@pengutronix.de>




^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v4 1/3] net: macb: fix dma_alloc for rx_buffer
  2023-12-01 13:51 [PATCH v4 0/3] net: macb: fix dma usage Steffen Trumtrar
@ 2023-12-01 13:51 ` Steffen Trumtrar
  2023-12-01 13:51 ` [PATCH v4 2/3] net: macb: convert to volatile accesses Steffen Trumtrar
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Steffen Trumtrar @ 2023-12-01 13:51 UTC (permalink / raw)
  To: barebox

rx_buffer gets dma_alloc'ed but is never dma_map'ed and therefor not
flushed before it is initially used.

Map the rx_buffer when the macb is initialized and unmap it on ether_halt.

While at it, cleanup the dma_alloc_coherent rx_ring/tx_ring, too.

Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
 drivers/net/macb.c | 83 +++++++++++++++++++++++++++++++++++-------------------
 1 file changed, 54 insertions(+), 29 deletions(-)

diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 260c1e806a..6aae8e340d 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -63,10 +63,13 @@ struct macb_device {
 	unsigned int		tx_head;
 
 	void			*rx_buffer;
+	dma_addr_t		rx_buffer_phys;
 	void			*tx_buffer;
 	void			*rx_packet_buf;
 	struct macb_dma_desc	*rx_ring;
+	dma_addr_t		rx_ring_phys;
 	struct macb_dma_desc	*tx_ring;
+	dma_addr_t		tx_ring_phys;
 	struct macb_dma_desc	*gem_q1_descs;
 
 	int			rx_buffer_size;
@@ -105,6 +108,7 @@ static int macb_send(struct eth_device *edev, void *packet,
 	int ret = 0;
 	uint64_t start;
 	unsigned int tx_head = macb->tx_head;
+	dma_addr_t packet_dma;
 
 	ctrl = MACB_BF(TX_FRMLEN, length);
 	ctrl |= MACB_BIT(TX_LAST);
@@ -116,10 +120,14 @@ static int macb_send(struct eth_device *edev, void *packet,
 		macb->tx_head++;
 	}
 
+	packet_dma = dma_map_single(macb->dev, packet, length, DMA_TO_DEVICE);
+	if (dma_mapping_error(macb->dev, packet_dma))
+		return -EFAULT;
+
 	macb->tx_ring[tx_head].ctrl = ctrl;
-	macb->tx_ring[tx_head].addr = (ulong)packet;
+	macb->tx_ring[tx_head].addr = packet_dma;
 	barrier();
-	dma_sync_single_for_device(macb->dev, (unsigned long)packet, length, DMA_TO_DEVICE);
+
 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
 
 	start = get_time_ns();
@@ -132,7 +140,8 @@ static int macb_send(struct eth_device *edev, void *packet,
 			break;
 		}
 	} while (!is_timeout(start, 100 * MSECOND));
-	dma_sync_single_for_cpu(macb->dev, (unsigned long)packet, length, DMA_TO_DEVICE);
+
+	dma_unmap_single(macb->dev, packet_dma, length, DMA_TO_DEVICE);
 
 	if (ctrl & MACB_BIT(TX_UNDERRUN))
 		dev_err(macb->dev, "TX underrun\n");
@@ -169,7 +178,7 @@ static void reclaim_rx_buffers(struct macb_device *macb,
 static int gem_recv(struct eth_device *edev)
 {
 	struct macb_device *macb = edev->priv;
-	void *buffer;
+	dma_addr_t buffer;
 	int length;
 	u32 status;
 
@@ -181,12 +190,12 @@ static int gem_recv(struct eth_device *edev)
 		barrier();
 		status = macb->rx_ring[macb->rx_tail].ctrl;
 		length = MACB_BFEXT(RX_FRMLEN, status);
-		buffer = macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail;
-		dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer, length,
-					DMA_FROM_DEVICE);
-		net_receive(edev, buffer, length);
-		dma_sync_single_for_device(macb->dev, (unsigned long)buffer, length,
-					   DMA_FROM_DEVICE);
+		buffer = macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail;
+		dma_sync_single_for_cpu(macb->dev, buffer, length, DMA_FROM_DEVICE);
+		net_receive(edev,
+			    macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail,
+			    length);
+		dma_sync_single_for_device(macb->dev, buffer, length, DMA_FROM_DEVICE);
 		macb->rx_ring[macb->rx_tail].addr &= ~MACB_BIT(RX_USED);
 		barrier();
 
@@ -202,7 +211,7 @@ static int macb_recv(struct eth_device *edev)
 {
 	struct macb_device *macb = edev->priv;
 	unsigned int rx_tail = macb->rx_tail;
-	void *buffer;
+	dma_addr_t buffer;
 	int length;
 	int wrapped = 0;
 	u32 status;
@@ -221,7 +230,7 @@ static int macb_recv(struct eth_device *edev)
 		}
 
 		if (status & MACB_BIT(RX_EOF)) {
-			buffer = macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail;
+			buffer = macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail;
 			length = MACB_BFEXT(RX_FRMLEN, status);
 			if (wrapped) {
 				unsigned int headlen, taillen;
@@ -229,23 +238,23 @@ static int macb_recv(struct eth_device *edev)
 				headlen = macb->rx_buffer_size * (macb->rx_ring_size
 						 - macb->rx_tail);
 				taillen = length - headlen;
-				dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer,
-							headlen, DMA_FROM_DEVICE);
-				memcpy(macb->rx_packet_buf, buffer, headlen);
-				dma_sync_single_for_cpu(macb->dev, (unsigned long)macb->rx_buffer,
+				dma_sync_single_for_cpu(macb->dev, buffer, headlen, DMA_FROM_DEVICE);
+				memcpy(macb->rx_packet_buf,
+				       macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail,
+				       headlen);
+				dma_sync_single_for_cpu(macb->dev, macb->rx_buffer_phys,
 							taillen, DMA_FROM_DEVICE);
 				memcpy(macb->rx_packet_buf + headlen, macb->rx_buffer, taillen);
-				dma_sync_single_for_device(macb->dev, (unsigned long)buffer,
-							headlen, DMA_FROM_DEVICE);
-				dma_sync_single_for_device(macb->dev, (unsigned long)macb->rx_buffer,
+				dma_sync_single_for_device(macb->dev, buffer, headlen, DMA_FROM_DEVICE);
+				dma_sync_single_for_device(macb->dev, macb->rx_buffer_phys,
 							taillen, DMA_FROM_DEVICE);
 				net_receive(edev, macb->rx_packet_buf, length);
 			} else {
-				dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer, length,
-							DMA_FROM_DEVICE);
-				net_receive(edev, buffer, length);
-				dma_sync_single_for_device(macb->dev, (unsigned long)buffer, length,
-							DMA_FROM_DEVICE);
+				dma_sync_single_for_cpu(macb->dev, buffer, length, DMA_FROM_DEVICE);
+				net_receive(edev,
+					    macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail,
+					    length);
+				dma_sync_single_for_device(macb->dev, buffer, length, DMA_FROM_DEVICE);
 			}
 			barrier();
 			if (++rx_tail >= macb->rx_ring_size)
@@ -377,7 +386,7 @@ static int gmac_init_dummy_tx_queues(struct macb_device *macb)
 	return 0;
 }
 
-static void macb_init(struct macb_device *macb)
+static int macb_init(struct macb_device *macb)
 {
 	unsigned long paddr, val = 0;
 	int i;
@@ -386,6 +395,11 @@ static void macb_init(struct macb_device *macb)
 	 * macb_halt should have been called at some point before now,
 	 * so we'll assume the controller is idle.
 	 */
+	macb->rx_buffer_phys = dma_map_single(macb->dev, macb->rx_buffer,
+					      macb->rx_buffer_size * macb->rx_ring_size,
+					      DMA_FROM_DEVICE);
+	if (dma_mapping_error(macb->dev, macb->rx_buffer_phys))
+		return -EFAULT;
 
 	/* initialize DMA descriptors */
 	paddr = (ulong)macb->rx_buffer;
@@ -442,6 +456,7 @@ static void macb_init(struct macb_device *macb)
 
 	macb_or_gem_writel(macb, USRIO, val);
 
+	return 0;
 }
 
 static void macb_halt(struct eth_device *edev)
@@ -460,6 +475,13 @@ static void macb_halt(struct eth_device *edev)
 
 	/* Disable TX and RX, and clear statistics */
 	macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
+
+	dma_unmap_single(macb->dev, macb->rx_buffer_phys,
+			 macb->rx_buffer_size * macb->rx_ring_size,
+			 DMA_FROM_DEVICE);
+	free(macb->rx_buffer);
+	dma_free_coherent((void *)macb->rx_ring, macb->rx_ring_phys, RX_RING_BYTES(macb));
+	dma_free_coherent((void *)macb->tx_ring, macb->tx_ring_phys, TX_RING_BYTES);
 }
 
 static int macb_phy_read(struct mii_bus *bus, int addr, int reg)
@@ -780,6 +802,7 @@ static int macb_probe(struct device *dev)
 	const char *pclk_name, *hclk_name;
 	const struct macb_config *config = NULL;
 	u32 ncfgr;
+	int ret;
 
 	macb = xzalloc(sizeof(*macb));
 	edev = &macb->netdev;
@@ -877,7 +900,7 @@ static int macb_probe(struct device *dev)
 		clk_enable(macb->rxclk);
 
 	if (config) {
-		int ret = config->txclk_init(dev, &macb->txclk);
+		ret = config->txclk_init(dev, &macb->txclk);
 		if (ret)
 			return ret;
 	}
@@ -891,8 +914,8 @@ static int macb_probe(struct device *dev)
 
 	macb_init_rx_buffer_size(macb, PKTSIZE);
 	macb->rx_buffer = dma_alloc(macb->rx_buffer_size * macb->rx_ring_size);
-	macb->rx_ring = dma_alloc_coherent(RX_RING_BYTES(macb), DMA_ADDRESS_BROKEN);
-	macb->tx_ring = dma_alloc_coherent(TX_RING_BYTES, DMA_ADDRESS_BROKEN);
+	macb->rx_ring = dma_alloc_coherent(RX_RING_BYTES(macb), &macb->rx_ring_phys);
+	macb->tx_ring = dma_alloc_coherent(TX_RING_BYTES, &macb->tx_ring_phys);
 
 	if (macb->is_gem)
 		macb->gem_q1_descs = dma_alloc_coherent(GEM_Q1_DESC_BYTES,
@@ -907,7 +930,9 @@ static int macb_probe(struct device *dev)
 	ncfgr |= macb_dbw(macb);
 	macb_writel(macb, NCFGR, ncfgr);
 
-	macb_init(macb);
+	ret = macb_init(macb);
+	if (ret)
+		return ret;
 
 	mdiobus_register(&macb->miibus);
 	eth_register(edev);

-- 
2.40.1




^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v4 2/3] net: macb: convert to volatile accesses
  2023-12-01 13:51 [PATCH v4 0/3] net: macb: fix dma usage Steffen Trumtrar
  2023-12-01 13:51 ` [PATCH v4 1/3] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
@ 2023-12-01 13:51 ` Steffen Trumtrar
  2023-12-01 13:51 ` [PATCH v4 3/3] include: net: align PKTSIZE to 64 bytes Steffen Trumtrar
  2023-12-05  8:04 ` [PATCH v4 0/3] net: macb: fix dma usage Sascha Hauer
  3 siblings, 0 replies; 5+ messages in thread
From: Steffen Trumtrar @ 2023-12-01 13:51 UTC (permalink / raw)
  To: barebox

Instead of directly reading from memory addresses and inserting
sw barriers to be sure that the compiler will not move loads/stores
behind this point, just use proper volatile writel/readl accesses.

Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
 drivers/net/macb.c | 53 +++++++++++++++++++++--------------------------------
 1 file changed, 21 insertions(+), 32 deletions(-)

diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 6aae8e340d..f5b2fa74dc 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -124,17 +124,14 @@ static int macb_send(struct eth_device *edev, void *packet,
 	if (dma_mapping_error(macb->dev, packet_dma))
 		return -EFAULT;
 
-	macb->tx_ring[tx_head].ctrl = ctrl;
-	macb->tx_ring[tx_head].addr = packet_dma;
-	barrier();
-
+	writel(ctrl, &macb->tx_ring[tx_head].ctrl);
+	writel(packet_dma, &macb->tx_ring[tx_head].addr);
 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
 
 	start = get_time_ns();
 	ret = -ETIMEDOUT;
 	do {
-		barrier();
-		ctrl = macb->tx_ring[0].ctrl;
+		ctrl = readl(&macb->tx_ring[0].ctrl);
 		if (ctrl & MACB_BIT(TX_USED)) {
 			ret = 0;
 			break;
@@ -160,18 +157,17 @@ static void reclaim_rx_buffers(struct macb_device *macb,
 
 	i = macb->rx_tail;
 	while (i > new_tail) {
-		macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
+		clrbits_le32(&macb->rx_ring[i].addr, MACB_BIT(RX_USED));
 		i++;
 		if (i > macb->rx_ring_size)
 			i = 0;
 	}
 
 	while (i < new_tail) {
-		macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
+		clrbits_le32(&macb->rx_ring[i].addr, MACB_BIT(RX_USED));
 		i++;
 	}
 
-	barrier();
 	macb->rx_tail = new_tail;
 }
 
@@ -183,12 +179,10 @@ static int gem_recv(struct eth_device *edev)
 	u32 status;
 
 	for (;;) {
-		barrier();
-		if (!(macb->rx_ring[macb->rx_tail].addr & MACB_BIT(RX_USED)))
+		if (!(readl(&macb->rx_ring[macb->rx_tail].addr) & MACB_BIT(RX_USED)))
 			return -1;
 
-		barrier();
-		status = macb->rx_ring[macb->rx_tail].ctrl;
+		status = readl(&macb->rx_ring[macb->rx_tail].ctrl);
 		length = MACB_BFEXT(RX_FRMLEN, status);
 		buffer = macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail;
 		dma_sync_single_for_cpu(macb->dev, buffer, length, DMA_FROM_DEVICE);
@@ -196,8 +190,7 @@ static int gem_recv(struct eth_device *edev)
 			    macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail,
 			    length);
 		dma_sync_single_for_device(macb->dev, buffer, length, DMA_FROM_DEVICE);
-		macb->rx_ring[macb->rx_tail].addr &= ~MACB_BIT(RX_USED);
-		barrier();
+		clrbits_le32(&macb->rx_ring[macb->rx_tail].addr, MACB_BIT(RX_USED));
 
 		macb->rx_tail++;
 		if (macb->rx_tail >= macb->rx_ring_size)
@@ -217,12 +210,10 @@ static int macb_recv(struct eth_device *edev)
 	u32 status;
 
 	for (;;) {
-		barrier();
-		if (!(macb->rx_ring[rx_tail].addr & MACB_BIT(RX_USED)))
+		if (!(readl(&macb->rx_ring[rx_tail].addr) & MACB_BIT(RX_USED)))
 			return -1;
 
-		barrier();
-		status = macb->rx_ring[rx_tail].ctrl;
+		status = readl(&macb->rx_ring[rx_tail].ctrl);
 		if (status & MACB_BIT(RX_SOF)) {
 			if (rx_tail != macb->rx_tail)
 				reclaim_rx_buffers(macb, rx_tail);
@@ -256,7 +247,6 @@ static int macb_recv(struct eth_device *edev)
 					    length);
 				dma_sync_single_for_device(macb->dev, buffer, length, DMA_FROM_DEVICE);
 			}
-			barrier();
 			if (++rx_tail >= macb->rx_ring_size)
 				rx_tail = 0;
 			reclaim_rx_buffers(macb, rx_tail);
@@ -376,9 +366,9 @@ static int gmac_init_dummy_tx_queues(struct macb_device *macb)
 		if (queue_mask & (1 << i))
 			num_queues++;
 
-	macb->gem_q1_descs[0].addr = 0;
-	macb->gem_q1_descs[0].ctrl = MACB_BIT(TX_WRAP) |
-		MACB_BIT(TX_LAST) | MACB_BIT(TX_USED);
+	writel(0, &macb->gem_q1_descs[0].addr);
+	writel(MACB_BIT(TX_WRAP) | MACB_BIT(TX_LAST) | MACB_BIT(TX_USED),
+	       &macb->gem_q1_descs[0].ctrl);
 
 	for (i = 1; i < num_queues; i++)
 		gem_writel_queue_TBQP(macb, (ulong)macb->gem_q1_descs, i - 1);
@@ -404,17 +394,17 @@ static int macb_init(struct macb_device *macb)
 	/* initialize DMA descriptors */
 	paddr = (ulong)macb->rx_buffer;
 	for (i = 0; i < macb->rx_ring_size; i++) {
-		macb->rx_ring[i].addr = paddr;
-		macb->rx_ring[i].ctrl = 0;
+		writel(paddr, &macb->rx_ring[i].addr);
+		writel(0, &macb->rx_ring[i].ctrl);
 		paddr += macb->rx_buffer_size;
 	}
-	macb->rx_ring[macb->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+	setbits_le32(&macb->rx_ring[macb->rx_ring_size - 1].addr, MACB_BIT(RX_WRAP));
 
 	for (i = 0; i < TX_RING_SIZE; i++) {
-		macb->tx_ring[i].addr = 0;
-		macb->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+		writel(0, &macb->tx_ring[i].addr);
+		writel(MACB_BIT(TX_USED), &macb->tx_ring[i].ctrl);
 	}
-	macb->tx_ring[TX_RING_SIZE - 1].addr |= MACB_BIT(TX_WRAP);
+	setbits_le32(&macb->tx_ring[TX_RING_SIZE - 1].addr, MACB_BIT(TX_WRAP));
 
 	macb->rx_tail = macb->tx_head = 0;
 
@@ -427,9 +417,8 @@ static int macb_init(struct macb_device *macb)
 		gmac_init_dummy_tx_queues(macb);
 
 		/* Disable the second priority rx queue */
-		macb->gem_q1_descs[1].addr = MACB_BIT(RX_USED) |
-				MACB_BIT(RX_WRAP);
-		macb->gem_q1_descs[1].ctrl = 0;
+		writel(MACB_BIT(RX_USED) | MACB_BIT(RX_WRAP), &macb->gem_q1_descs[1].addr);
+		writel(0, &macb->gem_q1_descs[1].ctrl);
 
 		gem_writel(macb, RQ1, (ulong)&macb->gem_q1_descs[1]);
 	}

-- 
2.40.1




^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v4 3/3] include: net: align PKTSIZE to 64 bytes
  2023-12-01 13:51 [PATCH v4 0/3] net: macb: fix dma usage Steffen Trumtrar
  2023-12-01 13:51 ` [PATCH v4 1/3] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
  2023-12-01 13:51 ` [PATCH v4 2/3] net: macb: convert to volatile accesses Steffen Trumtrar
@ 2023-12-01 13:51 ` Steffen Trumtrar
  2023-12-05  8:04 ` [PATCH v4 0/3] net: macb: fix dma usage Sascha Hauer
  3 siblings, 0 replies; 5+ messages in thread
From: Steffen Trumtrar @ 2023-12-01 13:51 UTC (permalink / raw)
  To: barebox; +Cc: Ahmad Fatoum

PKTSIZE is used to allocate network packet storage.
Make it fill a cache line so drivers using it don't accidentally flush
adjacent packets.

Suggested-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
 include/net.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/net.h b/include/net.h
index a0ef8bee04..63109465f9 100644
--- a/include/net.h
+++ b/include/net.h
@@ -236,9 +236,9 @@ struct icmphdr {
  * Maximum packet size; used to allocate packet storage.
  * TFTP packets can be 524 bytes + IP header + ethernet header.
  * Lets be conservative, and go for 38 * 16.  (Must also be
- * a multiple of 32 bytes).
+ * a multiple of 64 bytes).
  */
-#define PKTSIZE			1518
+#define PKTSIZE			1536
 
 /**********************************************************************/
 /*

-- 
2.40.1




^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v4 0/3] net: macb: fix dma usage
  2023-12-01 13:51 [PATCH v4 0/3] net: macb: fix dma usage Steffen Trumtrar
                   ` (2 preceding siblings ...)
  2023-12-01 13:51 ` [PATCH v4 3/3] include: net: align PKTSIZE to 64 bytes Steffen Trumtrar
@ 2023-12-05  8:04 ` Sascha Hauer
  3 siblings, 0 replies; 5+ messages in thread
From: Sascha Hauer @ 2023-12-05  8:04 UTC (permalink / raw)
  To: Steffen Trumtrar; +Cc: barebox, Ahmad Fatoum

On Fri, Dec 01, 2023 at 02:51:26PM +0100, Steffen Trumtrar wrote:
> The rx_buffer is only dma_alloc'ed but never properly flushed.
> Fix that.
> 
> While at it, also use proper volatile access instead of sw barriers.
> 
> Also, redefine PKTSIZE to a sensible multiple of 64 bytes.
> 
> Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
> ---
> Changes in v4:
> - align PKTSIZE with cacheline size
> - minor style changes
> - Link to v3: https://lore.barebox.org/20231129-v2023-08-0-topic-macb-v3-0-65ad6db834dc@pengutronix.de
> 
> Changes in v3:
> - fix dma_unmap_single direction
> - dma_map_single packet in macb_send()
> - Link to v2: https://lore.barebox.org/20231129-v2023-08-0-topic-macb-v2-0-4dc2cb4d5d25@pengutronix.de
> 
> Changes in v2:
> - change dma_map_single to DMA_FROM_DEVICE
> - drop (unsigned long) casts in dma_sync_*
> - rework writel/setbits/clearbits to keep semantics
> - Link to v1: https://lore.barebox.org/20231128-v2023-08-0-topic-macb-v1-0-9faff73bc990@pengutronix.de
> 
> ---
> Steffen Trumtrar (3):
>       net: macb: fix dma_alloc for rx_buffer
>       net: macb: convert to volatile accesses
>       include: net: align PKTSIZE to 64 bytes
> 
>  drivers/net/macb.c | 132 +++++++++++++++++++++++++++++------------------------
>  include/net.h      |   4 +-
>  2 files changed, 75 insertions(+), 61 deletions(-)

Applied, thanks

Sascha

> ---
> base-commit: 5f200dd534c848dfa5d948334b6373f0310b8f73
> change-id: 20231128-v2023-08-0-topic-macb-0c13ed91179d
> 
> Best regards,
> -- 
> Steffen Trumtrar <s.trumtrar@pengutronix.de>
> 
> 
> 

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |



^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-12-05  8:05 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-12-01 13:51 [PATCH v4 0/3] net: macb: fix dma usage Steffen Trumtrar
2023-12-01 13:51 ` [PATCH v4 1/3] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
2023-12-01 13:51 ` [PATCH v4 2/3] net: macb: convert to volatile accesses Steffen Trumtrar
2023-12-01 13:51 ` [PATCH v4 3/3] include: net: align PKTSIZE to 64 bytes Steffen Trumtrar
2023-12-05  8:04 ` [PATCH v4 0/3] net: macb: fix dma usage Sascha Hauer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox