mail archive of the barebox mailing list
 help / color / mirror / Atom feed
* [PATCH v3 0/2] net: macb: fix dma usage
@ 2023-11-29 11:33 Steffen Trumtrar
  2023-11-29 11:33 ` [PATCH v3 1/2] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
  2023-11-29 11:33 ` [PATCH v3 2/2] net: macb: convert to volatile accesses Steffen Trumtrar
  0 siblings, 2 replies; 5+ messages in thread
From: Steffen Trumtrar @ 2023-11-29 11:33 UTC (permalink / raw)
  To: barebox

The rx_buffer is only dma_alloc'ed but never properly flushed.
Fix that.

While at it, also use proper volatile access instead of sw barriers.

Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
Changes in v3:
- fix dma_unmap_single direction
- dma_map_single packet in macb_send()
- Link to v2: https://lore.barebox.org/20231129-v2023-08-0-topic-macb-v2-0-4dc2cb4d5d25@pengutronix.de

Changes in v2:
- change dma_map_single to DMA_FROM_DEVICE
- drop (unsigned long) casts in dma_sync_*
- rework writel/setbits/clearbits to keep semantics
- Link to v1: https://lore.barebox.org/20231128-v2023-08-0-topic-macb-v1-0-9faff73bc990@pengutronix.de

---
Steffen Trumtrar (2):
      net: macb: fix dma_alloc for rx_buffer
      net: macb: convert to volatile accesses

 drivers/net/macb.c | 134 ++++++++++++++++++++++++++++++-----------------------
 1 file changed, 77 insertions(+), 57 deletions(-)
---
base-commit: 5f200dd534c848dfa5d948334b6373f0310b8f73
change-id: 20231128-v2023-08-0-topic-macb-0c13ed91179d

Best regards,
-- 
Steffen Trumtrar <s.trumtrar@pengutronix.de>




^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v3 1/2] net: macb: fix dma_alloc for rx_buffer
  2023-11-29 11:33 [PATCH v3 0/2] net: macb: fix dma usage Steffen Trumtrar
@ 2023-11-29 11:33 ` Steffen Trumtrar
  2023-11-29 11:59   ` Ahmad Fatoum
  2023-11-29 11:33 ` [PATCH v3 2/2] net: macb: convert to volatile accesses Steffen Trumtrar
  1 sibling, 1 reply; 5+ messages in thread
From: Steffen Trumtrar @ 2023-11-29 11:33 UTC (permalink / raw)
  To: barebox

rx_buffer gets dma_alloc'ed but is never dma_map'ed and therefor not
flushed before it is initially used.

Map the rx_buffer when the macb is initialized and unmap it on ether_halt.

While at it, cleanup the dma_alloc_coherent rx_ring/tx_ring, too.

Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
 drivers/net/macb.c | 85 +++++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 58 insertions(+), 27 deletions(-)

diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 260c1e806a..d6708b190c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -63,10 +63,13 @@ struct macb_device {
 	unsigned int		tx_head;
 
 	void			*rx_buffer;
+	dma_addr_t		rx_buffer_phys;
 	void			*tx_buffer;
 	void			*rx_packet_buf;
 	struct macb_dma_desc	*rx_ring;
+	dma_addr_t		rx_ring_phys;
 	struct macb_dma_desc	*tx_ring;
+	dma_addr_t		tx_ring_phys;
 	struct macb_dma_desc	*gem_q1_descs;
 
 	int			rx_buffer_size;
@@ -105,6 +108,7 @@ static int macb_send(struct eth_device *edev, void *packet,
 	int ret = 0;
 	uint64_t start;
 	unsigned int tx_head = macb->tx_head;
+	dma_addr_t packet_dma;
 
 	ctrl = MACB_BF(TX_FRMLEN, length);
 	ctrl |= MACB_BIT(TX_LAST);
@@ -116,10 +120,14 @@ static int macb_send(struct eth_device *edev, void *packet,
 		macb->tx_head++;
 	}
 
+	packet_dma = dma_map_single(macb->dev, packet, length, DMA_TO_DEVICE);
+	if (dma_mapping_error(macb->dev, packet_dma))
+		return -EFAULT;
+
 	macb->tx_ring[tx_head].ctrl = ctrl;
-	macb->tx_ring[tx_head].addr = (ulong)packet;
+	macb->tx_ring[tx_head].addr = packet_dma;
 	barrier();
-	dma_sync_single_for_device(macb->dev, (unsigned long)packet, length, DMA_TO_DEVICE);
+
 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
 
 	start = get_time_ns();
@@ -132,7 +140,8 @@ static int macb_send(struct eth_device *edev, void *packet,
 			break;
 		}
 	} while (!is_timeout(start, 100 * MSECOND));
-	dma_sync_single_for_cpu(macb->dev, (unsigned long)packet, length, DMA_TO_DEVICE);
+
+	dma_unmap_single(macb->dev, packet_dma, length, DMA_TO_DEVICE);
 
 	if (ctrl & MACB_BIT(TX_UNDERRUN))
 		dev_err(macb->dev, "TX underrun\n");
@@ -169,7 +178,6 @@ static void reclaim_rx_buffers(struct macb_device *macb,
 static int gem_recv(struct eth_device *edev)
 {
 	struct macb_device *macb = edev->priv;
-	void *buffer;
 	int length;
 	u32 status;
 
@@ -181,14 +189,15 @@ static int gem_recv(struct eth_device *edev)
 		barrier();
 		status = macb->rx_ring[macb->rx_tail].ctrl;
 		length = MACB_BFEXT(RX_FRMLEN, status);
-		buffer = macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail;
-		dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer, length,
-					DMA_FROM_DEVICE);
-		net_receive(edev, buffer, length);
-		dma_sync_single_for_device(macb->dev, (unsigned long)buffer, length,
-					   DMA_FROM_DEVICE);
 		macb->rx_ring[macb->rx_tail].addr &= ~MACB_BIT(RX_USED);
 		barrier();
+		dma_sync_single_for_cpu(macb->dev,
+					macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+					length,	DMA_FROM_DEVICE);
+		net_receive(edev, macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail, length);
+		dma_sync_single_for_device(macb->dev,
+					   macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+					   length, DMA_FROM_DEVICE);
 
 		macb->rx_tail++;
 		if (macb->rx_tail >= macb->rx_ring_size)
@@ -202,7 +211,6 @@ static int macb_recv(struct eth_device *edev)
 {
 	struct macb_device *macb = edev->priv;
 	unsigned int rx_tail = macb->rx_tail;
-	void *buffer;
 	int length;
 	int wrapped = 0;
 	u32 status;
@@ -221,7 +229,6 @@ static int macb_recv(struct eth_device *edev)
 		}
 
 		if (status & MACB_BIT(RX_EOF)) {
-			buffer = macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail;
 			length = MACB_BFEXT(RX_FRMLEN, status);
 			if (wrapped) {
 				unsigned int headlen, taillen;
@@ -229,23 +236,31 @@ static int macb_recv(struct eth_device *edev)
 				headlen = macb->rx_buffer_size * (macb->rx_ring_size
 						 - macb->rx_tail);
 				taillen = length - headlen;
-				dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer,
+				dma_sync_single_for_cpu(macb->dev,
+							macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
 							headlen, DMA_FROM_DEVICE);
-				memcpy(macb->rx_packet_buf, buffer, headlen);
-				dma_sync_single_for_cpu(macb->dev, (unsigned long)macb->rx_buffer,
+				memcpy(macb->rx_packet_buf,
+				       macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail,
+				       headlen);
+				dma_sync_single_for_cpu(macb->dev, macb->rx_buffer_phys,
 							taillen, DMA_FROM_DEVICE);
 				memcpy(macb->rx_packet_buf + headlen, macb->rx_buffer, taillen);
-				dma_sync_single_for_device(macb->dev, (unsigned long)buffer,
-							headlen, DMA_FROM_DEVICE);
-				dma_sync_single_for_device(macb->dev, (unsigned long)macb->rx_buffer,
+				dma_sync_single_for_device(macb->dev,
+							   macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+							   headlen, DMA_FROM_DEVICE);
+				dma_sync_single_for_device(macb->dev, macb->rx_buffer_phys,
 							taillen, DMA_FROM_DEVICE);
 				net_receive(edev, macb->rx_packet_buf, length);
 			} else {
-				dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer, length,
-							DMA_FROM_DEVICE);
-				net_receive(edev, buffer, length);
-				dma_sync_single_for_device(macb->dev, (unsigned long)buffer, length,
+				dma_sync_single_for_cpu(macb->dev,
+							macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+							length,
 							DMA_FROM_DEVICE);
+				net_receive(edev, macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail, length);
+				dma_sync_single_for_device(macb->dev,
+							   macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+							   length,
+							   DMA_FROM_DEVICE);
 			}
 			barrier();
 			if (++rx_tail >= macb->rx_ring_size)
@@ -377,7 +392,7 @@ static int gmac_init_dummy_tx_queues(struct macb_device *macb)
 	return 0;
 }
 
-static void macb_init(struct macb_device *macb)
+static int macb_init(struct macb_device *macb)
 {
 	unsigned long paddr, val = 0;
 	int i;
@@ -386,6 +401,11 @@ static void macb_init(struct macb_device *macb)
 	 * macb_halt should have been called at some point before now,
 	 * so we'll assume the controller is idle.
 	 */
+	macb->rx_buffer_phys = dma_map_single(macb->dev, macb->rx_buffer,
+					      macb->rx_buffer_size * macb->rx_ring_size,
+					      DMA_FROM_DEVICE);
+	if (dma_mapping_error(macb->dev, macb->rx_buffer_phys))
+		return -EFAULT;
 
 	/* initialize DMA descriptors */
 	paddr = (ulong)macb->rx_buffer;
@@ -442,6 +462,7 @@ static void macb_init(struct macb_device *macb)
 
 	macb_or_gem_writel(macb, USRIO, val);
 
+	return 0;
 }
 
 static void macb_halt(struct eth_device *edev)
@@ -460,6 +481,13 @@ static void macb_halt(struct eth_device *edev)
 
 	/* Disable TX and RX, and clear statistics */
 	macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
+
+	dma_unmap_single(macb->dev, macb->rx_buffer_phys,
+			 macb->rx_buffer_size * macb->rx_ring_size,
+			 DMA_FROM_DEVICE);
+	free(macb->rx_buffer);
+	dma_free_coherent((void *)macb->rx_ring, macb->rx_ring_phys, RX_RING_BYTES(macb));
+	dma_free_coherent((void *)macb->tx_ring, macb->tx_ring_phys, TX_RING_BYTES);
 }
 
 static int macb_phy_read(struct mii_bus *bus, int addr, int reg)
@@ -780,6 +808,7 @@ static int macb_probe(struct device *dev)
 	const char *pclk_name, *hclk_name;
 	const struct macb_config *config = NULL;
 	u32 ncfgr;
+	int ret;
 
 	macb = xzalloc(sizeof(*macb));
 	edev = &macb->netdev;
@@ -877,7 +906,7 @@ static int macb_probe(struct device *dev)
 		clk_enable(macb->rxclk);
 
 	if (config) {
-		int ret = config->txclk_init(dev, &macb->txclk);
+		ret = config->txclk_init(dev, &macb->txclk);
 		if (ret)
 			return ret;
 	}
@@ -891,8 +920,8 @@ static int macb_probe(struct device *dev)
 
 	macb_init_rx_buffer_size(macb, PKTSIZE);
 	macb->rx_buffer = dma_alloc(macb->rx_buffer_size * macb->rx_ring_size);
-	macb->rx_ring = dma_alloc_coherent(RX_RING_BYTES(macb), DMA_ADDRESS_BROKEN);
-	macb->tx_ring = dma_alloc_coherent(TX_RING_BYTES, DMA_ADDRESS_BROKEN);
+	macb->rx_ring = dma_alloc_coherent(RX_RING_BYTES(macb), &macb->rx_ring_phys);
+	macb->tx_ring = dma_alloc_coherent(TX_RING_BYTES, &macb->tx_ring_phys);
 
 	if (macb->is_gem)
 		macb->gem_q1_descs = dma_alloc_coherent(GEM_Q1_DESC_BYTES,
@@ -907,7 +936,9 @@ static int macb_probe(struct device *dev)
 	ncfgr |= macb_dbw(macb);
 	macb_writel(macb, NCFGR, ncfgr);
 
-	macb_init(macb);
+	ret = macb_init(macb);
+	if (ret)
+		return ret;
 
 	mdiobus_register(&macb->miibus);
 	eth_register(edev);

-- 
2.40.1




^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v3 2/2] net: macb: convert to volatile accesses
  2023-11-29 11:33 [PATCH v3 0/2] net: macb: fix dma usage Steffen Trumtrar
  2023-11-29 11:33 ` [PATCH v3 1/2] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
@ 2023-11-29 11:33 ` Steffen Trumtrar
  2023-11-29 12:03   ` Ahmad Fatoum
  1 sibling, 1 reply; 5+ messages in thread
From: Steffen Trumtrar @ 2023-11-29 11:33 UTC (permalink / raw)
  To: barebox

Instead of directly reading from memory addresses and inserting
sw barriers to be sure that the compiler will not move loads/stores
behind this point, just use proper volatile writel/readl accesses.

Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
 drivers/net/macb.c | 53 +++++++++++++++++++++--------------------------------
 1 file changed, 21 insertions(+), 32 deletions(-)

diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index d6708b190c..06ca0e9e33 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -124,17 +124,14 @@ static int macb_send(struct eth_device *edev, void *packet,
 	if (dma_mapping_error(macb->dev, packet_dma))
 		return -EFAULT;
 
-	macb->tx_ring[tx_head].ctrl = ctrl;
-	macb->tx_ring[tx_head].addr = packet_dma;
-	barrier();
-
+	writel(ctrl, &macb->tx_ring[tx_head].ctrl);
+	writel(packet_dma, &macb->tx_ring[tx_head].addr);
 	macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
 
 	start = get_time_ns();
 	ret = -ETIMEDOUT;
 	do {
-		barrier();
-		ctrl = macb->tx_ring[0].ctrl;
+		ctrl = readl(&macb->tx_ring[0].ctrl);
 		if (ctrl & MACB_BIT(TX_USED)) {
 			ret = 0;
 			break;
@@ -160,18 +157,17 @@ static void reclaim_rx_buffers(struct macb_device *macb,
 
 	i = macb->rx_tail;
 	while (i > new_tail) {
-		macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
+		clrbits_le32(&macb->rx_ring[i].addr, MACB_BIT(RX_USED));
 		i++;
 		if (i > macb->rx_ring_size)
 			i = 0;
 	}
 
 	while (i < new_tail) {
-		macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
+		clrbits_le32(&macb->rx_ring[i].addr, MACB_BIT(RX_USED));
 		i++;
 	}
 
-	barrier();
 	macb->rx_tail = new_tail;
 }
 
@@ -182,15 +178,11 @@ static int gem_recv(struct eth_device *edev)
 	u32 status;
 
 	for (;;) {
-		barrier();
-		if (!(macb->rx_ring[macb->rx_tail].addr & MACB_BIT(RX_USED)))
+		if (!(readl(&macb->rx_ring[macb->rx_tail].addr) & MACB_BIT(RX_USED)))
 			return -1;
 
-		barrier();
-		status = macb->rx_ring[macb->rx_tail].ctrl;
+		status = readl(&macb->rx_ring[macb->rx_tail].ctrl);
 		length = MACB_BFEXT(RX_FRMLEN, status);
-		macb->rx_ring[macb->rx_tail].addr &= ~MACB_BIT(RX_USED);
-		barrier();
 		dma_sync_single_for_cpu(macb->dev,
 					macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
 					length,	DMA_FROM_DEVICE);
@@ -198,6 +190,7 @@ static int gem_recv(struct eth_device *edev)
 		dma_sync_single_for_device(macb->dev,
 					   macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
 					   length, DMA_FROM_DEVICE);
+		clrbits_le32(&macb->rx_ring[macb->rx_tail].addr, MACB_BIT(RX_USED));
 
 		macb->rx_tail++;
 		if (macb->rx_tail >= macb->rx_ring_size)
@@ -216,12 +209,10 @@ static int macb_recv(struct eth_device *edev)
 	u32 status;
 
 	for (;;) {
-		barrier();
-		if (!(macb->rx_ring[rx_tail].addr & MACB_BIT(RX_USED)))
+		if (!(readl(&macb->rx_ring[rx_tail].addr) & MACB_BIT(RX_USED)))
 			return -1;
 
-		barrier();
-		status = macb->rx_ring[rx_tail].ctrl;
+		status = readl(&macb->rx_ring[rx_tail].ctrl);
 		if (status & MACB_BIT(RX_SOF)) {
 			if (rx_tail != macb->rx_tail)
 				reclaim_rx_buffers(macb, rx_tail);
@@ -262,7 +253,6 @@ static int macb_recv(struct eth_device *edev)
 							   length,
 							   DMA_FROM_DEVICE);
 			}
-			barrier();
 			if (++rx_tail >= macb->rx_ring_size)
 				rx_tail = 0;
 			reclaim_rx_buffers(macb, rx_tail);
@@ -382,9 +372,9 @@ static int gmac_init_dummy_tx_queues(struct macb_device *macb)
 		if (queue_mask & (1 << i))
 			num_queues++;
 
-	macb->gem_q1_descs[0].addr = 0;
-	macb->gem_q1_descs[0].ctrl = MACB_BIT(TX_WRAP) |
-		MACB_BIT(TX_LAST) | MACB_BIT(TX_USED);
+	writel(0, &macb->gem_q1_descs[0].addr);
+	writel(MACB_BIT(TX_WRAP) | MACB_BIT(TX_LAST) | MACB_BIT(TX_USED),
+	       &macb->gem_q1_descs[0].ctrl);
 
 	for (i = 1; i < num_queues; i++)
 		gem_writel_queue_TBQP(macb, (ulong)macb->gem_q1_descs, i - 1);
@@ -410,17 +400,17 @@ static int macb_init(struct macb_device *macb)
 	/* initialize DMA descriptors */
 	paddr = (ulong)macb->rx_buffer;
 	for (i = 0; i < macb->rx_ring_size; i++) {
-		macb->rx_ring[i].addr = paddr;
-		macb->rx_ring[i].ctrl = 0;
+		writel(paddr, &macb->rx_ring[i].addr);
+		writel(0, &macb->rx_ring[i].ctrl);
 		paddr += macb->rx_buffer_size;
 	}
-	macb->rx_ring[macb->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+	setbits_le32(&macb->rx_ring[macb->rx_ring_size - 1].addr, MACB_BIT(RX_WRAP));
 
 	for (i = 0; i < TX_RING_SIZE; i++) {
-		macb->tx_ring[i].addr = 0;
-		macb->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+		writel(0, &macb->tx_ring[i].addr);
+		writel(MACB_BIT(TX_USED), &macb->tx_ring[i].ctrl);
 	}
-	macb->tx_ring[TX_RING_SIZE - 1].addr |= MACB_BIT(TX_WRAP);
+	setbits_le32(&macb->tx_ring[TX_RING_SIZE - 1].addr, MACB_BIT(TX_WRAP));
 
 	macb->rx_tail = macb->tx_head = 0;
 
@@ -433,9 +423,8 @@ static int macb_init(struct macb_device *macb)
 		gmac_init_dummy_tx_queues(macb);
 
 		/* Disable the second priority rx queue */
-		macb->gem_q1_descs[1].addr = MACB_BIT(RX_USED) |
-				MACB_BIT(RX_WRAP);
-		macb->gem_q1_descs[1].ctrl = 0;
+		writel(MACB_BIT(RX_USED) | MACB_BIT(RX_WRAP), &macb->gem_q1_descs[1].addr);
+		writel(0, &macb->gem_q1_descs[1].ctrl);
 
 		gem_writel(macb, RQ1, (ulong)&macb->gem_q1_descs[1]);
 	}

-- 
2.40.1




^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3 1/2] net: macb: fix dma_alloc for rx_buffer
  2023-11-29 11:33 ` [PATCH v3 1/2] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
@ 2023-11-29 11:59   ` Ahmad Fatoum
  0 siblings, 0 replies; 5+ messages in thread
From: Ahmad Fatoum @ 2023-11-29 11:59 UTC (permalink / raw)
  To: Steffen Trumtrar, barebox

On 29.11.23 12:33, Steffen Trumtrar wrote:
> @@ -181,14 +189,15 @@ static int gem_recv(struct eth_device *edev)
>  		barrier();
>  		status = macb->rx_ring[macb->rx_tail].ctrl;
>  		length = MACB_BFEXT(RX_FRMLEN, status);
> -		buffer = macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail;
> -		dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer, length,
> -					DMA_FROM_DEVICE);
> -		net_receive(edev, buffer, length);
> -		dma_sync_single_for_device(macb->dev, (unsigned long)buffer, length,
> -					   DMA_FROM_DEVICE);
>  		macb->rx_ring[macb->rx_tail].addr &= ~MACB_BIT(RX_USED);
>  		barrier();

Is moving above two lines around intentional?

> +		dma_sync_single_for_cpu(macb->dev,
> +					macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
> +					length,	DMA_FROM_DEVICE);
> +		net_receive(edev, macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail, length);
> +		dma_sync_single_for_device(macb->dev,
> +					   macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
> +					   length, DMA_FROM_DEVICE);
>  
>  		macb->rx_tail++;
>  		if (macb->rx_tail >= macb->rx_ring_size)
> @@ -202,7 +211,6 @@ static int macb_recv(struct eth_device *edev)
>  {
>  	struct macb_device *macb = edev->priv;
>  	unsigned int rx_tail = macb->rx_tail;
> -	void *buffer;
>  	int length;
>  	int wrapped = 0;
>  	u32 status;
> @@ -221,7 +229,6 @@ static int macb_recv(struct eth_device *edev)
>  		}
>  
>  		if (status & MACB_BIT(RX_EOF)) {
> -			buffer = macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail;

Nitpick: reworking this line would lead to less verbose changes below.


Cheers,
Ahmad

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |




^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v3 2/2] net: macb: convert to volatile accesses
  2023-11-29 11:33 ` [PATCH v3 2/2] net: macb: convert to volatile accesses Steffen Trumtrar
@ 2023-11-29 12:03   ` Ahmad Fatoum
  0 siblings, 0 replies; 5+ messages in thread
From: Ahmad Fatoum @ 2023-11-29 12:03 UTC (permalink / raw)
  To: Steffen Trumtrar, barebox

Hi,

On 29.11.23 12:33, Steffen Trumtrar wrote:
>  		dma_sync_single_for_cpu(macb->dev,
>  					macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
>  					length,	DMA_FROM_DEVICE);
> @@ -198,6 +190,7 @@ static int gem_recv(struct eth_device *edev)
>  		dma_sync_single_for_device(macb->dev,
>  					   macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
>  					   length, DMA_FROM_DEVICE);
> +		clrbits_le32(&macb->rx_ring[macb->rx_tail].addr, MACB_BIT(RX_USED));

This is being moved around in the previous patch and moved back here, so that move
seems to have been unintentional.

By the way, did you see my comment about increasing PKTSIZE to be a multiple
of 64 bytes?

Thanks,
Ahmad

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |




^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-11-29 12:04 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-29 11:33 [PATCH v3 0/2] net: macb: fix dma usage Steffen Trumtrar
2023-11-29 11:33 ` [PATCH v3 1/2] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
2023-11-29 11:59   ` Ahmad Fatoum
2023-11-29 11:33 ` [PATCH v3 2/2] net: macb: convert to volatile accesses Steffen Trumtrar
2023-11-29 12:03   ` Ahmad Fatoum

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox