mirror of
https://github.com/VIKINGYFY/immortalwrt.git
synced 2025-12-16 17:15:26 +00:00
This series improve network reliability. Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu> Link: https://github.com/openwrt/openwrt/pull/20295 Signed-off-by: Robert Marko <robimarko@gmail.com>
136 lines
4.9 KiB
Diff
136 lines
4.9 KiB
Diff
From 352c071bc18855238565cc6417a4c15a4e24bad8 Mon Sep 17 00:00:00 2001
|
|
From: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
|
|
Date: Wed, 9 Jul 2025 12:28:09 +0300
|
|
Subject: [PATCH 3/5] drivers/net/airoha_eth: fix stalling in package
|
|
receiving
|
|
|
|
ARCH_DMA_MINALIGN is 64 for ARMv7a/ARMv8a architectures, but RX/TX
|
|
descriptors are 32 bytes long. So they may not be aligned on an
|
|
ARCH_DMA_MINALIGN boundary. In case of RX path, this may cause the
|
|
following problem
|
|
|
|
1) Assume that a packet has arrived and the EVEN rx descriptor has been
|
|
updated with the incoming data. The driver will invalidate and check
|
|
the corresponding rx descriptor.
|
|
|
|
2) Now suppose the next descriptor (ODD) has not yet completed.
|
|
|
|
Please note that all even descriptors starts on 64-byte boundary,
|
|
and the odd ones are NOT aligned on 64-byte boundary.
|
|
|
|
Inspecting even descriptor, we will read the entire CPU cache line
|
|
(64 bytes). So we read and sore in CPU cache also the next (odd)
|
|
descriptor.
|
|
|
|
3) Now suppose the next packet (for the odd rx descriptor) arrived
|
|
while the first packet was being processed. So we have new data
|
|
in memory but old data in cache.
|
|
|
|
4) After packet processing (in arht_eth_free_pkt() function) we will
|
|
cleanup the descriptor and put it back to rx queue.
|
|
|
|
This will call flush_dcache_range() function for the even descriptor,
|
|
so the odd one will be flushed as well (it is in the same cache line).
|
|
So the old data will be written to the next rx descriptor.
|
|
|
|
5) We get a freeze. The next descriptor is empty (so the driver is
|
|
waiting for packets), but the hardware will continue to receive
|
|
packets on other available descriptors. This will continue until
|
|
the last available rx descriptor is full. Then the hardware will
|
|
also freeze.
|
|
|
|
The problem will be solved if:
|
|
* do nothing in even descriptor case,
|
|
* return 2 descriptor to the queue (current and previous) in the odd
|
|
descriptor case.
|
|
|
|
If the current descriptor is even nothing will be done, so no issue
|
|
will arrise.
|
|
|
|
If the current descriptor is odd, then the previous descriptor is on
|
|
the same cache line. Both (current and previous) descriptors are not
|
|
currently in use, so issue will not arrise as well.
|
|
|
|
WARNING: The following restrictions on PKTBUFSRX must be held:
|
|
* PKTBUFSRX is even,
|
|
* PKTBUFSRX >= 4. Observations shows that PKTBUFSRX must be at least 8.
|
|
|
|
The bug appears on 32-bit airoha platform, but should be present on
|
|
64-bit as well.
|
|
|
|
The code was tested both on 32-bit and 64-bit airoha boards.
|
|
|
|
Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy@iopsys.eu>
|
|
---
|
|
drivers/net/airoha_eth.c | 33 ++++++++++++++++++++++++++-------
|
|
1 file changed, 26 insertions(+), 7 deletions(-)
|
|
|
|
diff --git a/drivers/net/airoha_eth.c b/drivers/net/airoha_eth.c
|
|
index aae6922f3c7..44d4773bc5d 100644
|
|
--- a/drivers/net/airoha_eth.c
|
|
+++ b/drivers/net/airoha_eth.c
|
|
@@ -435,13 +435,14 @@ static int airoha_fe_init(struct airoha_eth *eth)
|
|
return 0;
|
|
}
|
|
|
|
-static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index,
|
|
- uchar *rx_packet)
|
|
+static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index)
|
|
{
|
|
struct airoha_qdma_desc *desc;
|
|
+ uchar *rx_packet;
|
|
u32 val;
|
|
|
|
desc = &q->desc[index];
|
|
+ rx_packet = net_rx_packets[index];
|
|
index = (index + 1) % q->ndesc;
|
|
|
|
dma_map_single(rx_packet, PKTSIZE_ALIGN, DMA_TO_DEVICE);
|
|
@@ -463,7 +464,7 @@ static void airoha_qdma_init_rx_desc(struct airoha_queue *q)
|
|
int i;
|
|
|
|
for (i = 0; i < q->ndesc; i++)
|
|
- airoha_qdma_reset_rx_desc(q, i, net_rx_packets[i]);
|
|
+ airoha_qdma_reset_rx_desc(q, i);
|
|
}
|
|
|
|
static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
|
|
@@ -1003,12 +1004,30 @@ static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
|
|
qid = 0;
|
|
q = &qdma->q_rx[qid];
|
|
|
|
- dma_map_single(packet, length, DMA_TO_DEVICE);
|
|
+ /*
|
|
+ * Due to cpu cache issue the airoha_qdma_reset_rx_desc() function
|
|
+ * will always touch 2 descriptors placed on the same cacheline:
|
|
+ * - if current descriptor is even, then current and next
|
|
+ * descriptors will be touched
|
|
+ * - if current descriptor is odd, then current and previous
|
|
+ * descriptors will be touched
|
|
+ *
|
|
+ * Thus, to prevent possible destroying of rx queue, we should:
|
|
+ * - do nothing in the even descriptor case,
|
|
+ * - utilize 2 descriptors (current and previous one) in the
|
|
+ * odd descriptor case.
|
|
+ *
|
|
+ * WARNING: Observations shows that PKTBUFSRX must be even and
|
|
+ * larger than 7 for reliable driver operations.
|
|
+ */
|
|
+ if (q->head & 0x01) {
|
|
+ airoha_qdma_reset_rx_desc(q, q->head - 1);
|
|
+ airoha_qdma_reset_rx_desc(q, q->head);
|
|
|
|
- airoha_qdma_reset_rx_desc(q, q->head, packet);
|
|
+ airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
|
|
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
|
|
+ }
|
|
|
|
- airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
|
|
- FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
|
|
q->head = (q->head + 1) % q->ndesc;
|
|
|
|
return 0;
|
|
--
|
|
2.51.0
|
|
|