* [PATCH v2 0/2] net: macb: fix dma usage
@ 2023-11-29 9:54 Steffen Trumtrar
2023-11-29 9:54 ` [PATCH v2 1/2] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
2023-11-29 9:54 ` [PATCH v2 2/2] net: macb: convert to volatile accesses Steffen Trumtrar
0 siblings, 2 replies; 4+ messages in thread
From: Steffen Trumtrar @ 2023-11-29 9:54 UTC (permalink / raw)
To: barebox
The rx_buffer is only dma_alloc'ed but never properly flushed.
Fix that.
While at it, also use proper volatile access instead of sw barriers.
Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
Changes in v2:
- change dma_map_single to DMA_FROM_DEVICE
- drop (unsigned long) casts in dma_sync_*
- rework writel/setbits/clearbits to keep semantics
- Link to v1: https://lore.barebox.org/20231128-v2023-08-0-topic-macb-v1-0-9faff73bc990@pengutronix.de
---
Steffen Trumtrar (2):
net: macb: fix dma_alloc for rx_buffer
net: macb: convert to volatile accesses
drivers/net/macb.c | 125 ++++++++++++++++++++++++++++++-----------------------
1 file changed, 70 insertions(+), 55 deletions(-)
---
base-commit: 5f200dd534c848dfa5d948334b6373f0310b8f73
change-id: 20231128-v2023-08-0-topic-macb-0c13ed91179d
Best regards,
--
Steffen Trumtrar <s.trumtrar@pengutronix.de>
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v2 1/2] net: macb: fix dma_alloc for rx_buffer
2023-11-29 9:54 [PATCH v2 0/2] net: macb: fix dma usage Steffen Trumtrar
@ 2023-11-29 9:54 ` Steffen Trumtrar
2023-11-29 9:54 ` [PATCH v2 2/2] net: macb: convert to volatile accesses Steffen Trumtrar
1 sibling, 0 replies; 4+ messages in thread
From: Steffen Trumtrar @ 2023-11-29 9:54 UTC (permalink / raw)
To: barebox
rx_buffer gets dma_alloc'ed but is never dma_map'ed and therefor not
flushed before it is initially used.
Map the rx_buffer when the macb is initialized and unmap it on ether_halt.
While at it, cleanup the dma_alloc_coherent rx_ring/tx_ring, too.
Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
drivers/net/macb.c | 73 ++++++++++++++++++++++++++++++++++++------------------
1 file changed, 49 insertions(+), 24 deletions(-)
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 260c1e806a..55498c9e84 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -63,10 +63,13 @@ struct macb_device {
unsigned int tx_head;
void *rx_buffer;
+ dma_addr_t rx_buffer_phys;
void *tx_buffer;
void *rx_packet_buf;
struct macb_dma_desc *rx_ring;
+ dma_addr_t rx_ring_phys;
struct macb_dma_desc *tx_ring;
+ dma_addr_t tx_ring_phys;
struct macb_dma_desc *gem_q1_descs;
int rx_buffer_size;
@@ -169,7 +172,6 @@ static void reclaim_rx_buffers(struct macb_device *macb,
static int gem_recv(struct eth_device *edev)
{
struct macb_device *macb = edev->priv;
- void *buffer;
int length;
u32 status;
@@ -181,14 +183,15 @@ static int gem_recv(struct eth_device *edev)
barrier();
status = macb->rx_ring[macb->rx_tail].ctrl;
length = MACB_BFEXT(RX_FRMLEN, status);
- buffer = macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail;
- dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer, length,
- DMA_FROM_DEVICE);
- net_receive(edev, buffer, length);
- dma_sync_single_for_device(macb->dev, (unsigned long)buffer, length,
- DMA_FROM_DEVICE);
macb->rx_ring[macb->rx_tail].addr &= ~MACB_BIT(RX_USED);
barrier();
+ dma_sync_single_for_cpu(macb->dev,
+ macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+ length, DMA_FROM_DEVICE);
+ net_receive(edev, macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail, length);
+ dma_sync_single_for_device(macb->dev,
+ macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+ length, DMA_FROM_DEVICE);
macb->rx_tail++;
if (macb->rx_tail >= macb->rx_ring_size)
@@ -202,7 +205,6 @@ static int macb_recv(struct eth_device *edev)
{
struct macb_device *macb = edev->priv;
unsigned int rx_tail = macb->rx_tail;
- void *buffer;
int length;
int wrapped = 0;
u32 status;
@@ -221,7 +223,6 @@ static int macb_recv(struct eth_device *edev)
}
if (status & MACB_BIT(RX_EOF)) {
- buffer = macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail;
length = MACB_BFEXT(RX_FRMLEN, status);
if (wrapped) {
unsigned int headlen, taillen;
@@ -229,23 +230,31 @@ static int macb_recv(struct eth_device *edev)
headlen = macb->rx_buffer_size * (macb->rx_ring_size
- macb->rx_tail);
taillen = length - headlen;
- dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer,
+ dma_sync_single_for_cpu(macb->dev,
+ macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
headlen, DMA_FROM_DEVICE);
- memcpy(macb->rx_packet_buf, buffer, headlen);
- dma_sync_single_for_cpu(macb->dev, (unsigned long)macb->rx_buffer,
+ memcpy(macb->rx_packet_buf,
+ macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail,
+ headlen);
+ dma_sync_single_for_cpu(macb->dev, macb->rx_buffer_phys,
taillen, DMA_FROM_DEVICE);
memcpy(macb->rx_packet_buf + headlen, macb->rx_buffer, taillen);
- dma_sync_single_for_device(macb->dev, (unsigned long)buffer,
- headlen, DMA_FROM_DEVICE);
- dma_sync_single_for_device(macb->dev, (unsigned long)macb->rx_buffer,
+ dma_sync_single_for_device(macb->dev,
+ macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+ headlen, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(macb->dev, macb->rx_buffer_phys,
taillen, DMA_FROM_DEVICE);
net_receive(edev, macb->rx_packet_buf, length);
} else {
- dma_sync_single_for_cpu(macb->dev, (unsigned long)buffer, length,
- DMA_FROM_DEVICE);
- net_receive(edev, buffer, length);
- dma_sync_single_for_device(macb->dev, (unsigned long)buffer, length,
+ dma_sync_single_for_cpu(macb->dev,
+ macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+ length,
DMA_FROM_DEVICE);
+ net_receive(edev, macb->rx_buffer + macb->rx_buffer_size * macb->rx_tail, length);
+ dma_sync_single_for_device(macb->dev,
+ macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
+ length,
+ DMA_FROM_DEVICE);
}
barrier();
if (++rx_tail >= macb->rx_ring_size)
@@ -377,7 +386,7 @@ static int gmac_init_dummy_tx_queues(struct macb_device *macb)
return 0;
}
-static void macb_init(struct macb_device *macb)
+static int macb_init(struct macb_device *macb)
{
unsigned long paddr, val = 0;
int i;
@@ -386,6 +395,11 @@ static void macb_init(struct macb_device *macb)
* macb_halt should have been called at some point before now,
* so we'll assume the controller is idle.
*/
+ macb->rx_buffer_phys = dma_map_single(macb->dev, macb->rx_buffer,
+ macb->rx_buffer_size * macb->rx_ring_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(macb->dev, macb->rx_buffer_phys))
+ return -EFAULT;
/* initialize DMA descriptors */
paddr = (ulong)macb->rx_buffer;
@@ -442,6 +456,7 @@ static void macb_init(struct macb_device *macb)
macb_or_gem_writel(macb, USRIO, val);
+ return 0;
}
static void macb_halt(struct eth_device *edev)
@@ -460,6 +475,13 @@ static void macb_halt(struct eth_device *edev)
/* Disable TX and RX, and clear statistics */
macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
+
+ dma_unmap_single(macb->dev, macb->rx_buffer_phys,
+ macb->rx_buffer_size * macb->rx_ring_size,
+ DMA_TO_DEVICE);
+ free(macb->rx_buffer);
+ dma_free_coherent((void *)macb->rx_ring, macb->rx_ring_phys, RX_RING_BYTES(macb));
+ dma_free_coherent((void *)macb->tx_ring, macb->tx_ring_phys, TX_RING_BYTES);
}
static int macb_phy_read(struct mii_bus *bus, int addr, int reg)
@@ -780,6 +802,7 @@ static int macb_probe(struct device *dev)
const char *pclk_name, *hclk_name;
const struct macb_config *config = NULL;
u32 ncfgr;
+ int ret;
macb = xzalloc(sizeof(*macb));
edev = &macb->netdev;
@@ -877,7 +900,7 @@ static int macb_probe(struct device *dev)
clk_enable(macb->rxclk);
if (config) {
- int ret = config->txclk_init(dev, &macb->txclk);
+ ret = config->txclk_init(dev, &macb->txclk);
if (ret)
return ret;
}
@@ -891,8 +914,8 @@ static int macb_probe(struct device *dev)
macb_init_rx_buffer_size(macb, PKTSIZE);
macb->rx_buffer = dma_alloc(macb->rx_buffer_size * macb->rx_ring_size);
- macb->rx_ring = dma_alloc_coherent(RX_RING_BYTES(macb), DMA_ADDRESS_BROKEN);
- macb->tx_ring = dma_alloc_coherent(TX_RING_BYTES, DMA_ADDRESS_BROKEN);
+ macb->rx_ring = dma_alloc_coherent(RX_RING_BYTES(macb), &macb->rx_ring_phys);
+ macb->tx_ring = dma_alloc_coherent(TX_RING_BYTES, &macb->tx_ring_phys);
if (macb->is_gem)
macb->gem_q1_descs = dma_alloc_coherent(GEM_Q1_DESC_BYTES,
@@ -907,7 +930,9 @@ static int macb_probe(struct device *dev)
ncfgr |= macb_dbw(macb);
macb_writel(macb, NCFGR, ncfgr);
- macb_init(macb);
+ ret = macb_init(macb);
+ if (ret)
+ return ret;
mdiobus_register(&macb->miibus);
eth_register(edev);
--
2.40.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH v2 2/2] net: macb: convert to volatile accesses
2023-11-29 9:54 [PATCH v2 0/2] net: macb: fix dma usage Steffen Trumtrar
2023-11-29 9:54 ` [PATCH v2 1/2] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
@ 2023-11-29 9:54 ` Steffen Trumtrar
2023-11-29 10:51 ` Ahmad Fatoum
1 sibling, 1 reply; 4+ messages in thread
From: Steffen Trumtrar @ 2023-11-29 9:54 UTC (permalink / raw)
To: barebox
Instead of directly reading from memory addresses and inserting
sw barriers to be sure that the compiler will not move loads/stores
behind this point, just use proper volatile writel/readl accesses.
Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
---
drivers/net/macb.c | 52 +++++++++++++++++++++-------------------------------
1 file changed, 21 insertions(+), 31 deletions(-)
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 55498c9e84..8a2d898d10 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -119,17 +119,15 @@ static int macb_send(struct eth_device *edev, void *packet,
macb->tx_head++;
}
- macb->tx_ring[tx_head].ctrl = ctrl;
- macb->tx_ring[tx_head].addr = (ulong)packet;
- barrier();
+ writel(ctrl, &macb->tx_ring[tx_head].ctrl);
+ writel((ulong)packet, &macb->tx_ring[tx_head].addr);
dma_sync_single_for_device(macb->dev, (unsigned long)packet, length, DMA_TO_DEVICE);
macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
start = get_time_ns();
ret = -ETIMEDOUT;
do {
- barrier();
- ctrl = macb->tx_ring[0].ctrl;
+ ctrl = readl(&macb->tx_ring[0].ctrl);
if (ctrl & MACB_BIT(TX_USED)) {
ret = 0;
break;
@@ -154,18 +152,17 @@ static void reclaim_rx_buffers(struct macb_device *macb,
i = macb->rx_tail;
while (i > new_tail) {
- macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
+ clrbits_le32(&macb->rx_ring[i].addr, MACB_BIT(RX_USED));
i++;
if (i > macb->rx_ring_size)
i = 0;
}
while (i < new_tail) {
- macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
+ clrbits_le32(&macb->rx_ring[i].addr, MACB_BIT(RX_USED));
i++;
}
- barrier();
macb->rx_tail = new_tail;
}
@@ -176,15 +173,11 @@ static int gem_recv(struct eth_device *edev)
u32 status;
for (;;) {
- barrier();
- if (!(macb->rx_ring[macb->rx_tail].addr & MACB_BIT(RX_USED)))
+ if (!(readl(&macb->rx_ring[macb->rx_tail].addr) & MACB_BIT(RX_USED)))
return -1;
- barrier();
- status = macb->rx_ring[macb->rx_tail].ctrl;
+ status = readl(&macb->rx_ring[macb->rx_tail].ctrl);
length = MACB_BFEXT(RX_FRMLEN, status);
- macb->rx_ring[macb->rx_tail].addr &= ~MACB_BIT(RX_USED);
- barrier();
dma_sync_single_for_cpu(macb->dev,
macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
length, DMA_FROM_DEVICE);
@@ -192,6 +185,7 @@ static int gem_recv(struct eth_device *edev)
dma_sync_single_for_device(macb->dev,
macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
length, DMA_FROM_DEVICE);
+ clrbits_le32(&macb->rx_ring[macb->rx_tail].addr, MACB_BIT(RX_USED));
macb->rx_tail++;
if (macb->rx_tail >= macb->rx_ring_size)
@@ -210,12 +204,10 @@ static int macb_recv(struct eth_device *edev)
u32 status;
for (;;) {
- barrier();
- if (!(macb->rx_ring[rx_tail].addr & MACB_BIT(RX_USED)))
+ if (!(readl(&macb->rx_ring[rx_tail].addr) & MACB_BIT(RX_USED)))
return -1;
- barrier();
- status = macb->rx_ring[rx_tail].ctrl;
+ status = readl(&macb->rx_ring[rx_tail].ctrl);
if (status & MACB_BIT(RX_SOF)) {
if (rx_tail != macb->rx_tail)
reclaim_rx_buffers(macb, rx_tail);
@@ -256,7 +248,6 @@ static int macb_recv(struct eth_device *edev)
length,
DMA_FROM_DEVICE);
}
- barrier();
if (++rx_tail >= macb->rx_ring_size)
rx_tail = 0;
reclaim_rx_buffers(macb, rx_tail);
@@ -376,9 +367,9 @@ static int gmac_init_dummy_tx_queues(struct macb_device *macb)
if (queue_mask & (1 << i))
num_queues++;
- macb->gem_q1_descs[0].addr = 0;
- macb->gem_q1_descs[0].ctrl = MACB_BIT(TX_WRAP) |
- MACB_BIT(TX_LAST) | MACB_BIT(TX_USED);
+ writel(0, &macb->gem_q1_descs[0].addr);
+ writel(MACB_BIT(TX_WRAP) | MACB_BIT(TX_LAST) | MACB_BIT(TX_USED),
+ &macb->gem_q1_descs[0].ctrl);
for (i = 1; i < num_queues; i++)
gem_writel_queue_TBQP(macb, (ulong)macb->gem_q1_descs, i - 1);
@@ -404,17 +395,17 @@ static int macb_init(struct macb_device *macb)
/* initialize DMA descriptors */
paddr = (ulong)macb->rx_buffer;
for (i = 0; i < macb->rx_ring_size; i++) {
- macb->rx_ring[i].addr = paddr;
- macb->rx_ring[i].ctrl = 0;
+ writel(paddr, &macb->rx_ring[i].addr);
+ writel(0, &macb->rx_ring[i].ctrl);
paddr += macb->rx_buffer_size;
}
- macb->rx_ring[macb->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+ setbits_le32(&macb->rx_ring[macb->rx_ring_size - 1].addr, MACB_BIT(RX_WRAP));
for (i = 0; i < TX_RING_SIZE; i++) {
- macb->tx_ring[i].addr = 0;
- macb->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ writel(0, &macb->tx_ring[i].addr);
+ writel(MACB_BIT(TX_USED), &macb->tx_ring[i].ctrl);
}
- macb->tx_ring[TX_RING_SIZE - 1].addr |= MACB_BIT(TX_WRAP);
+ setbits_le32(&macb->tx_ring[TX_RING_SIZE - 1].addr, MACB_BIT(TX_WRAP));
macb->rx_tail = macb->tx_head = 0;
@@ -427,9 +418,8 @@ static int macb_init(struct macb_device *macb)
gmac_init_dummy_tx_queues(macb);
/* Disable the second priority rx queue */
- macb->gem_q1_descs[1].addr = MACB_BIT(RX_USED) |
- MACB_BIT(RX_WRAP);
- macb->gem_q1_descs[1].ctrl = 0;
+ writel(MACB_BIT(RX_USED) | MACB_BIT(RX_WRAP), &macb->gem_q1_descs[1].addr);
+ writel(0, &macb->gem_q1_descs[1].ctrl);
gem_writel(macb, RQ1, (ulong)&macb->gem_q1_descs[1]);
}
--
2.40.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v2 2/2] net: macb: convert to volatile accesses
2023-11-29 9:54 ` [PATCH v2 2/2] net: macb: convert to volatile accesses Steffen Trumtrar
@ 2023-11-29 10:51 ` Ahmad Fatoum
0 siblings, 0 replies; 4+ messages in thread
From: Ahmad Fatoum @ 2023-11-29 10:51 UTC (permalink / raw)
To: Steffen Trumtrar, barebox
On 29.11.23 10:54, Steffen Trumtrar wrote:
> Instead of directly reading from memory addresses and inserting
> sw barriers to be sure that the compiler will not move loads/stores
> behind this point, just use proper volatile writel/readl accesses.
>
> Signed-off-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
> ---
> drivers/net/macb.c | 52 +++++++++++++++++++++-------------------------------
> 1 file changed, 21 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/net/macb.c b/drivers/net/macb.c
> index 55498c9e84..8a2d898d10 100644
> --- a/drivers/net/macb.c
> +++ b/drivers/net/macb.c
> @@ -119,17 +119,15 @@ static int macb_send(struct eth_device *edev, void *packet,
> macb->tx_head++;
> }
>
> - macb->tx_ring[tx_head].ctrl = ctrl;
> - macb->tx_ring[tx_head].addr = (ulong)packet;
> - barrier();
> + writel(ctrl, &macb->tx_ring[tx_head].ctrl);
> + writel((ulong)packet, &macb->tx_ring[tx_head].addr);
> dma_sync_single_for_device(macb->dev, (unsigned long)packet, length, DMA_TO_DEVICE);
> macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
>
> start = get_time_ns();
> ret = -ETIMEDOUT;
> do {
> - barrier();
> - ctrl = macb->tx_ring[0].ctrl;
> + ctrl = readl(&macb->tx_ring[0].ctrl);
> if (ctrl & MACB_BIT(TX_USED)) {
> ret = 0;
> break;
> @@ -154,18 +152,17 @@ static void reclaim_rx_buffers(struct macb_device *macb,
>
> i = macb->rx_tail;
> while (i > new_tail) {
> - macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
> + clrbits_le32(&macb->rx_ring[i].addr, MACB_BIT(RX_USED));
> i++;
> if (i > macb->rx_ring_size)
> i = 0;
> }
>
> while (i < new_tail) {
> - macb->rx_ring[i].addr &= ~MACB_BIT(RX_USED);
> + clrbits_le32(&macb->rx_ring[i].addr, MACB_BIT(RX_USED));
> i++;
> }
>
> - barrier();
> macb->rx_tail = new_tail;
> }
>
> @@ -176,15 +173,11 @@ static int gem_recv(struct eth_device *edev)
> u32 status;
>
> for (;;) {
> - barrier();
> - if (!(macb->rx_ring[macb->rx_tail].addr & MACB_BIT(RX_USED)))
> + if (!(readl(&macb->rx_ring[macb->rx_tail].addr) & MACB_BIT(RX_USED)))
> return -1;
>
> - barrier();
> - status = macb->rx_ring[macb->rx_tail].ctrl;
> + status = readl(&macb->rx_ring[macb->rx_tail].ctrl);
> length = MACB_BFEXT(RX_FRMLEN, status);
> - macb->rx_ring[macb->rx_tail].addr &= ~MACB_BIT(RX_USED);
> - barrier();
> dma_sync_single_for_cpu(macb->dev,
> macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
> length, DMA_FROM_DEVICE);
> @@ -192,6 +185,7 @@ static int gem_recv(struct eth_device *edev)
> dma_sync_single_for_device(macb->dev,
> macb->rx_buffer_phys + macb->rx_buffer_size * macb->rx_tail,
> length, DMA_FROM_DEVICE);
> + clrbits_le32(&macb->rx_ring[macb->rx_tail].addr, MACB_BIT(RX_USED));
This short be in the previous hunk, shouldn't it?
>
> macb->rx_tail++;
> if (macb->rx_tail >= macb->rx_ring_size)
> @@ -210,12 +204,10 @@ static int macb_recv(struct eth_device *edev)
> u32 status;
>
> for (;;) {
> - barrier();
> - if (!(macb->rx_ring[rx_tail].addr & MACB_BIT(RX_USED)))
> + if (!(readl(&macb->rx_ring[rx_tail].addr) & MACB_BIT(RX_USED)))
> return -1;
>
> - barrier();
> - status = macb->rx_ring[rx_tail].ctrl;
> + status = readl(&macb->rx_ring[rx_tail].ctrl);
> if (status & MACB_BIT(RX_SOF)) {
> if (rx_tail != macb->rx_tail)
> reclaim_rx_buffers(macb, rx_tail);
> @@ -256,7 +248,6 @@ static int macb_recv(struct eth_device *edev)
> length,
> DMA_FROM_DEVICE);
> }
> - barrier();
> if (++rx_tail >= macb->rx_ring_size)
> rx_tail = 0;
> reclaim_rx_buffers(macb, rx_tail);
> @@ -376,9 +367,9 @@ static int gmac_init_dummy_tx_queues(struct macb_device *macb)
> if (queue_mask & (1 << i))
> num_queues++;
>
> - macb->gem_q1_descs[0].addr = 0;
> - macb->gem_q1_descs[0].ctrl = MACB_BIT(TX_WRAP) |
> - MACB_BIT(TX_LAST) | MACB_BIT(TX_USED);
> + writel(0, &macb->gem_q1_descs[0].addr);
> + writel(MACB_BIT(TX_WRAP) | MACB_BIT(TX_LAST) | MACB_BIT(TX_USED),
> + &macb->gem_q1_descs[0].ctrl);
>
> for (i = 1; i < num_queues; i++)
> gem_writel_queue_TBQP(macb, (ulong)macb->gem_q1_descs, i - 1);
> @@ -404,17 +395,17 @@ static int macb_init(struct macb_device *macb)
> /* initialize DMA descriptors */
> paddr = (ulong)macb->rx_buffer;
> for (i = 0; i < macb->rx_ring_size; i++) {
> - macb->rx_ring[i].addr = paddr;
> - macb->rx_ring[i].ctrl = 0;
> + writel(paddr, &macb->rx_ring[i].addr);
> + writel(0, &macb->rx_ring[i].ctrl);
> paddr += macb->rx_buffer_size;
> }
> - macb->rx_ring[macb->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
> + setbits_le32(&macb->rx_ring[macb->rx_ring_size - 1].addr, MACB_BIT(RX_WRAP));
>
> for (i = 0; i < TX_RING_SIZE; i++) {
> - macb->tx_ring[i].addr = 0;
> - macb->tx_ring[i].ctrl = MACB_BIT(TX_USED);
> + writel(0, &macb->tx_ring[i].addr);
> + writel(MACB_BIT(TX_USED), &macb->tx_ring[i].ctrl);
> }
> - macb->tx_ring[TX_RING_SIZE - 1].addr |= MACB_BIT(TX_WRAP);
> + setbits_le32(&macb->tx_ring[TX_RING_SIZE - 1].addr, MACB_BIT(TX_WRAP));
>
> macb->rx_tail = macb->tx_head = 0;
>
> @@ -427,9 +418,8 @@ static int macb_init(struct macb_device *macb)
> gmac_init_dummy_tx_queues(macb);
>
> /* Disable the second priority rx queue */
> - macb->gem_q1_descs[1].addr = MACB_BIT(RX_USED) |
> - MACB_BIT(RX_WRAP);
> - macb->gem_q1_descs[1].ctrl = 0;
> + writel(MACB_BIT(RX_USED) | MACB_BIT(RX_WRAP), &macb->gem_q1_descs[1].addr);
> + writel(0, &macb->gem_q1_descs[1].ctrl);
>
> gem_writel(macb, RQ1, (ulong)&macb->gem_q1_descs[1]);
> }
>
--
Pengutronix e.K. | |
Steuerwalder Str. 21 | http://www.pengutronix.de/ |
31137 Hildesheim, Germany | Phone: +49-5121-206917-0 |
Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2023-11-29 10:53 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-29 9:54 [PATCH v2 0/2] net: macb: fix dma usage Steffen Trumtrar
2023-11-29 9:54 ` [PATCH v2 1/2] net: macb: fix dma_alloc for rx_buffer Steffen Trumtrar
2023-11-29 9:54 ` [PATCH v2 2/2] net: macb: convert to volatile accesses Steffen Trumtrar
2023-11-29 10:51 ` Ahmad Fatoum
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox