mirror of
https://github.com/hzyitc/openwrt-redmi-ax3000.git
synced 2025-12-16 16:31:57 +00:00
258 lines
9.8 KiB
Diff
258 lines
9.8 KiB
Diff
From 5b9d0813e457881f4970d330dad0e974abc30b83 Mon Sep 17 00:00:00 2001
|
|
From: hzy <hzyitc@outlook.com>
|
|
Date: Fri, 3 Mar 2023 15:44:17 +0000
|
|
Subject: [PATCH 1/3] WiP: use standard DMA api
|
|
|
|
Signed-off-by: hzy <hzyitc@outlook.com>
|
|
---
|
|
hal/dp_ops/syn_gmac_dp/syn_dp_cfg_rx.c | 13 ++++--
|
|
hal/dp_ops/syn_gmac_dp/syn_dp_cfg_tx.c | 2 +
|
|
hal/dp_ops/syn_gmac_dp/syn_dp_rx.c | 61 +++++++++++++++++++-------
|
|
hal/dp_ops/syn_gmac_dp/syn_dp_tx.c | 23 +++-------
|
|
4 files changed, 63 insertions(+), 36 deletions(-)
|
|
|
|
diff --git a/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_rx.c b/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_rx.c
|
|
index 3153b2ef..2f23c4cb 100644
|
|
--- a/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_rx.c
|
|
+++ b/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_rx.c
|
|
@@ -34,13 +34,15 @@ static int syn_dp_cfg_rx_setup_desc_queue(struct syn_dp_info *dev_info)
|
|
/*
|
|
* Allocate cacheable descriptors for Rx
|
|
*/
|
|
- first_desc = kzalloc(sizeof(struct dma_desc_rx) * SYN_DP_RX_DESC_SIZE, GFP_KERNEL);
|
|
+ first_desc = dma_alloc_coherent(rx_info->dev,
|
|
+ sizeof(struct dma_desc_rx) * SYN_DP_RX_DESC_SIZE,
|
|
+ &dma_addr, GFP_KERNEL);
|
|
if (!first_desc) {
|
|
netdev_dbg(netdev, "Error in Rx Descriptor Memory allocation in Ring mode\n");
|
|
return -ENOMEM;
|
|
}
|
|
|
|
- dev_info->rx_desc_dma_addr = (dma_addr_t)virt_to_phys(first_desc);
|
|
+ dev_info->rx_desc_dma_addr = dma_addr;
|
|
rx_info->rx_desc = first_desc;
|
|
syn_dp_gmac_rx_desc_init_ring(rx_info->rx_desc, SYN_DP_RX_DESC_SIZE);
|
|
|
|
@@ -99,6 +101,10 @@ void syn_dp_cfg_rx_cleanup_rings(struct syn_dp_info *dev_info)
|
|
for (i = 0; i < rx_info->busy_rx_desc_cnt; i++) {
|
|
rx_skb_index = (rx_skb_index + i) & SYN_DP_RX_DESC_MAX_INDEX;
|
|
rxdesc = rx_info->rx_desc;
|
|
+
|
|
+ dma_unmap_single(rx_info->dev, rxdesc->buffer1,
|
|
+ rxdesc->length, DMA_FROM_DEVICE);
|
|
+
|
|
skb = rx_info->rx_buf_pool[rx_skb_index].skb;
|
|
if (unlikely(skb != NULL)) {
|
|
dev_kfree_skb_any(skb);
|
|
@@ -106,7 +112,8 @@ void syn_dp_cfg_rx_cleanup_rings(struct syn_dp_info *dev_info)
|
|
}
|
|
}
|
|
|
|
- kfree(rx_info->rx_desc);
|
|
+ dma_free_coherent(rx_info->dev, (sizeof(struct dma_desc_rx) * SYN_DP_RX_DESC_SIZE),
|
|
+ rx_info->rx_desc, dev_info->rx_desc_dma_addr);
|
|
rx_info->rx_desc = NULL;
|
|
dev_info->rx_desc_dma_addr = (dma_addr_t)0;
|
|
}
|
|
diff --git a/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_tx.c b/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_tx.c
|
|
index bf5e19a0..284e8880 100644
|
|
--- a/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_tx.c
|
|
+++ b/hal/dp_ops/syn_gmac_dp/syn_dp_cfg_tx.c
|
|
@@ -91,6 +91,8 @@ void syn_dp_cfg_tx_cleanup_rings(struct syn_dp_info *dev_info)
|
|
tx_skb_index = syn_dp_tx_inc_index(tx_skb_index, i);
|
|
txdesc = tx_info->tx_desc;
|
|
|
|
+ dma_unmap_single(tx_info->dev, txdesc->buffer1, txdesc->length, DMA_TO_DEVICE);
|
|
+
|
|
skb = tx_info->tx_buf_pool[tx_skb_index].skb;
|
|
if (unlikely(skb != NULL)) {
|
|
dev_kfree_skb_any(skb);
|
|
diff --git a/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c b/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
|
|
index 1ddeb7d6..fff25538 100644
|
|
--- a/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
|
|
+++ b/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
|
|
@@ -73,17 +73,27 @@ static inline void syn_dp_rx_refill_one_desc(struct dma_desc_rx *rx_desc,
|
|
*/
|
|
static inline void syn_dp_rx_inval_and_flush(struct syn_dp_info_rx *rx_info, uint32_t start, uint32_t end)
|
|
{
|
|
+ int dma_size;
|
|
+
|
|
/*
|
|
- * Batched flush and invalidation of the rx descriptors
|
|
+ * flush and invalidation of the rx descriptors
|
|
*/
|
|
if (end > start) {
|
|
- dmac_flush_range_no_dsb((void *)&rx_info->rx_desc[start], (void *)&rx_info->rx_desc[end] + sizeof(struct dma_desc_rx));
|
|
+ dma_size = sizeof(struct dma_desc_rx) * (end - start + 1);
|
|
+ dma_sync_single_for_device(rx_info->dev,
|
|
+ rx_info->rx_desc[start].buffer1,
|
|
+ dma_size, DMA_FROM_DEVICE);
|
|
} else {
|
|
- dmac_flush_range_no_dsb((void *)&rx_info->rx_desc[start], (void *)&rx_info->rx_desc[SYN_DP_RX_DESC_MAX_INDEX] + sizeof(struct dma_desc_rx));
|
|
- dmac_flush_range_no_dsb((void *)&rx_info->rx_desc[0], (void *)&rx_info->rx_desc[end] + sizeof(struct dma_desc_rx));
|
|
+ dma_size = sizeof(struct dma_desc_rx) * (SYN_DP_RX_DESC_MAX_INDEX - start + 1);
|
|
+ dma_sync_single_for_device(rx_info->dev,
|
|
+ rx_info->rx_desc[start].buffer1,
|
|
+ dma_size, DMA_FROM_DEVICE);
|
|
+
|
|
+ dma_size = sizeof(struct dma_desc_rx) * (end + 1);
|
|
+ dma_sync_single_for_device(rx_info->dev,
|
|
+ rx_info->rx_desc[0].buffer1,
|
|
+ dma_size, DMA_FROM_DEVICE);
|
|
}
|
|
-
|
|
- dsb(st);
|
|
}
|
|
|
|
/*
|
|
@@ -127,12 +137,15 @@ int syn_dp_rx_refill_page_mode(struct syn_dp_info_rx *rx_info)
|
|
/*
|
|
* Get virtual address of allocated page.
|
|
*/
|
|
- page_addr = page_address(pg);
|
|
- dma_addr = (dma_addr_t)virt_to_phys(page_addr);
|
|
+ dma_addr = dma_map_page(rx_info->dev, pg, 0, rx_info->alloc_buf_len, DMA_FROM_DEVICE);
|
|
+ if (unlikely(dma_mapping_error(rx_info->dev, dma_addr))) {
|
|
+ dev_kfree_skb(skb);
|
|
+ netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
|
|
+ break;
|
|
+ }
|
|
|
|
skb_fill_page_desc(skb, 0, pg, 0, PAGE_SIZE);
|
|
|
|
- dmac_inv_range_no_dsb(page_addr, (page_addr + PAGE_SIZE));
|
|
rx_refill_idx = rx_info->rx_refill_idx;
|
|
rx_desc = rx_info->rx_desc + rx_refill_idx;
|
|
|
|
@@ -181,8 +194,13 @@ int syn_dp_rx_refill(struct syn_dp_info_rx *rx_info)
|
|
|
|
skb_reserve(skb, SYN_DP_SKB_HEADROOM + NET_IP_ALIGN);
|
|
|
|
- dma_addr = (dma_addr_t)virt_to_phys(skb->data);
|
|
- dmac_inv_range_no_dsb((void *)skb->data, (void *)(skb->data + inval_len));
|
|
+ dma_addr = dma_map_single(rx_info->dev, skb->data, rx_info->alloc_buf_len, DMA_FROM_DEVICE);
|
|
+ if (unlikely(dma_mapping_error(rx_info->dev, dma_addr))) {
|
|
+ dev_kfree_skb(skb);
|
|
+ netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
|
|
+ break;
|
|
+ }
|
|
+
|
|
rx_refill_idx = rx_info->rx_refill_idx;
|
|
rx_desc = rx_info->rx_desc + rx_refill_idx;
|
|
|
|
@@ -381,6 +399,7 @@ int syn_dp_rx(struct syn_dp_info_rx *rx_info, int budget)
|
|
struct dma_desc_rx *rx_desc_next = NULL;
|
|
uint8_t *next_skb_ptr;
|
|
skb_frag_t *frag = NULL;
|
|
+ int dma_size;
|
|
|
|
busy = atomic_read((atomic_t *)&rx_info->busy_rx_desc_cnt);
|
|
if (unlikely(!busy)) {
|
|
@@ -408,10 +427,20 @@ int syn_dp_rx(struct syn_dp_info_rx *rx_info, int budget)
|
|
*/
|
|
end = syn_dp_rx_inc_index(rx_info->rx_idx, busy);
|
|
if (end > start) {
|
|
- dmac_inv_range_no_dsb((void *)&rx_info->rx_desc[start], (void *)&rx_info->rx_desc[end] + sizeof(struct dma_desc_rx));
|
|
+ dma_size = sizeof(struct dma_desc_rx) * (end - start + 1);
|
|
+ dma_sync_single_for_cpu(rx_info->dev,
|
|
+ rx_info->rx_desc[start].buffer1,
|
|
+ dma_size, DMA_FROM_DEVICE);
|
|
} else {
|
|
- dmac_inv_range_no_dsb((void *)&rx_info->rx_desc[start], (void *)&rx_info->rx_desc[SYN_DP_RX_DESC_MAX_INDEX] + sizeof(struct dma_desc_rx));
|
|
- dmac_inv_range_no_dsb((void *)&rx_info->rx_desc[0], (void *)&rx_info->rx_desc[end] + sizeof(struct dma_desc_rx));
|
|
+ dma_size = sizeof(struct dma_desc_rx) * (SYN_DP_RX_DESC_MAX_INDEX - start + 1);
|
|
+ dma_sync_single_for_cpu(rx_info->dev,
|
|
+ rx_info->rx_desc[start].buffer1,
|
|
+ dma_size, DMA_FROM_DEVICE);
|
|
+
|
|
+ dma_size = sizeof(struct dma_desc_rx) * (end + 1);
|
|
+ dma_sync_single_for_cpu(rx_info->dev,
|
|
+ rx_info->rx_desc[0].buffer1,
|
|
+ dma_size, DMA_FROM_DEVICE);
|
|
}
|
|
|
|
dsb(st);
|
|
@@ -439,8 +468,8 @@ int syn_dp_rx(struct syn_dp_info_rx *rx_info, int budget)
|
|
* speculative prefetch by CPU may have occurred.
|
|
*/
|
|
frame_length = syn_dp_gmac_get_rx_desc_frame_length(status);
|
|
- dmac_inv_range((void *)rx_buf->map_addr_virt,
|
|
- (void *)(((uint8_t *)rx_buf->map_addr_virt) + frame_length));
|
|
+ dma_sync_single_for_cpu(rx_info->dev, rx_desc->buffer1,
|
|
+ frame_length, DMA_FROM_DEVICE);
|
|
prefetch((void *)rx_buf->map_addr_virt);
|
|
|
|
rx_next_idx = syn_dp_rx_inc_index(rx_idx, 1);
|
|
diff --git a/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c b/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c
|
|
index c97e252b..6d4adb3f 100644
|
|
--- a/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c
|
|
+++ b/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c
|
|
@@ -104,9 +104,7 @@ static inline struct dma_desc_tx *syn_dp_tx_process_nr_frags(struct syn_dp_info_
|
|
BUG_ON(!length);
|
|
#endif
|
|
|
|
- dma_addr = (dma_addr_t)virt_to_phys(frag_addr);
|
|
-
|
|
- dmac_clean_range_no_dsb(frag_addr, frag_addr + length);
|
|
+ dma_addr = dma_map_single(tx_info->dev, frag_addr, length, DMA_TO_DEVICE);
|
|
|
|
*total_length += length;
|
|
tx_desc = syn_dp_tx_set_desc_sg(tx_info, dma_addr, length, DESC_OWN_BY_DMA);
|
|
@@ -150,8 +148,7 @@ int syn_dp_tx_nr_frags(struct syn_dp_info_tx *tx_info, struct sk_buff *skb)
|
|
/*
|
|
* Flush the dma for non-paged skb data
|
|
*/
|
|
- dma_addr = (dma_addr_t)virt_to_phys(skb->data);
|
|
- dmac_clean_range_no_dsb((void *)skb->data, (void *)(skb->data + length));
|
|
+ dma_addr = dma_map_single(tx_info->dev, skb->data, length, DMA_TO_DEVICE);
|
|
|
|
total_len = length;
|
|
|
|
@@ -256,12 +253,7 @@ int syn_dp_tx_frag_list(struct syn_dp_info_tx *tx_info, struct sk_buff *skb)
|
|
return NETDEV_TX_BUSY;
|
|
}
|
|
|
|
- dma_addr = (dma_addr_t)virt_to_phys(skb->data);
|
|
-
|
|
- /*
|
|
- * Flush the data area of the head skb
|
|
- */
|
|
- dmac_clean_range_no_dsb((void *)skb->data, (void *)(skb->data + length));
|
|
+ dma_addr = dma_map_single(tx_info->dev, skb->data, length, DMA_TO_DEVICE);
|
|
|
|
total_len = length;
|
|
|
|
@@ -290,9 +282,7 @@ int syn_dp_tx_frag_list(struct syn_dp_info_tx *tx_info, struct sk_buff *skb)
|
|
BUG_ON(!length);
|
|
#endif
|
|
|
|
- dma_addr = (dma_addr_t)virt_to_phys(iter_skb->data);
|
|
-
|
|
- dmac_clean_range_no_dsb((void *)iter_skb->data, (void *)(iter_skb->data + length));
|
|
+ dma_addr = dma_map_single(tx_info->dev, iter_skb->data, length, DMA_TO_DEVICE);
|
|
|
|
total_len += length;
|
|
|
|
@@ -445,6 +435,7 @@ int syn_dp_tx_complete(struct syn_dp_info_tx *tx_info, int budget)
|
|
break;
|
|
}
|
|
|
|
+ dma_unmap_single(tx_info->dev, desc->buffer1, desc->length, DMA_TO_DEVICE);
|
|
|
|
if (likely(status & DESC_TX_LAST)) {
|
|
tx_skb_index = syn_dp_tx_comp_index_get(tx_info);
|
|
@@ -571,9 +562,7 @@ int syn_dp_tx(struct syn_dp_info_tx *tx_info, struct sk_buff *skb)
|
|
return NETDEV_TX_BUSY;
|
|
}
|
|
|
|
- dma_addr = (dma_addr_t)virt_to_phys(skb->data);
|
|
-
|
|
- dmac_clean_range_no_dsb((void *)skb->data, (void *)(skb->data + skb->len));
|
|
+ dma_addr = dma_map_single(tx_info->dev, skb->data, skb->len, DMA_TO_DEVICE);
|
|
|
|
/*
|
|
* Queue packet to the GMAC rings
|
|
--
|
|
2.40.1
|
|
|