mirror of
https://github.com/breeze303/nss-packages.git
synced 2025-12-16 16:57:29 +00:00
nss-dp: edma-v1: try to improve driver performance
2e82c51nss-dp: edma-v1: improve napi and irq handling for rx/tx92aa9banss-dp: edma-v1: add support for threaded napi75129b9nss-dp: edma-v1: split tx/rx path to specific napi and irq2c27cb4nss-dp: edma-v1: convert rx/tx store to idr implementation
This commit is contained in:
commit
abdc8ff534
@ -0,0 +1,239 @@
|
||||
From e80ad87476fe55f602c4e76f6b1068036b34b7a0 Mon Sep 17 00:00:00 2001
|
||||
From: Christian Marangi <ansuelsmth@gmail.com>
|
||||
Date: Fri, 24 Jun 2022 15:04:44 +0200
|
||||
Subject: [PATCH 1/3] edma_v1: convert rx/tx_store to idr implementation
|
||||
|
||||
Convert rx/tx store to idr implementation to correctly scale in
|
||||
preparation for support of multiqueue implementation.
|
||||
|
||||
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
|
||||
---
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_cfg.c | 32 +++++++++----
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h | 8 +++-
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c | 49 ++++++++++++++------
|
||||
3 files changed, 63 insertions(+), 26 deletions(-)
|
||||
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c b/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
index fbd315a..2e98aaf 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
@@ -72,8 +72,12 @@ static void edma_cleanup_rxfill_ring_res(struct edma_hw *ehw,
|
||||
* Get sk_buff and free it
|
||||
*/
|
||||
store_idx = rxph->opaque;
|
||||
- skb = ehw->rx_skb_store[store_idx];
|
||||
- ehw->rx_skb_store[store_idx] = NULL;
|
||||
+
|
||||
+ spin_lock_bh(&ehw->rx_skb_idr_lock);
|
||||
+ skb = idr_find(&ehw->rx_skb_idr, store_idx);
|
||||
+ idr_remove(&ehw->rx_skb_idr, store_idx);
|
||||
+ spin_unlock_bh(&ehw->rx_skb_idr_lock);
|
||||
+
|
||||
dev_kfree_skb_any(skb);
|
||||
cons_idx++;
|
||||
if (cons_idx == rxfill_ring->count)
|
||||
@@ -173,8 +177,12 @@ static void edma_cleanup_rxdesc_ring_res(struct edma_hw *ehw,
|
||||
dma_unmap_single(&pdev->dev, rxdesc_desc->buffer_addr,
|
||||
ehw->rx_alloc_size, DMA_FROM_DEVICE);
|
||||
store_idx = rxph->opaque;
|
||||
- skb = ehw->rx_skb_store[store_idx];
|
||||
- ehw->rx_skb_store[store_idx] = NULL;
|
||||
+
|
||||
+ spin_lock_bh(&ehw->rx_skb_idr_lock);
|
||||
+ skb = idr_find(&ehw->rx_skb_idr, store_idx);
|
||||
+ idr_remove(&ehw->rx_skb_idr, store_idx);
|
||||
+ spin_unlock_bh(&ehw->rx_skb_idr_lock);
|
||||
+
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
/*
|
||||
@@ -270,8 +278,11 @@ static void edma_cleanup_txdesc_ring_res(struct edma_hw *ehw,
|
||||
while (cons_idx != prod_idx) {
|
||||
txdesc = EDMA_TXDESC_DESC(txdesc_ring, cons_idx);
|
||||
store_idx = txdesc->buffer_addr;
|
||||
- skb = ehw->tx_skb_store[store_idx];
|
||||
- ehw->tx_skb_store[store_idx] = NULL;
|
||||
+
|
||||
+ spin_lock_bh(&ehw->tx_skb_idr_lock);
|
||||
+ skb = idr_find(&ehw->tx_skb_idr, store_idx);
|
||||
+ idr_remove(&ehw->tx_skb_idr, store_idx);
|
||||
+ spin_unlock_bh(&ehw->tx_skb_idr_lock);
|
||||
|
||||
buf_len = (txdesc->word1 & EDMA_TXDESC_DATA_LENGTH_MASK) >>
|
||||
EDMA_TXDESC_DATA_LENGTH_SHIFT;
|
||||
@@ -675,10 +686,11 @@ static void edma_configure_rings(struct edma_hw *ehw)
|
||||
/*
|
||||
* Initialize the store
|
||||
*/
|
||||
- for (i = 0; i < EDMA_RING_SIZE; i++) {
|
||||
- ehw->tx_skb_store[i] = NULL;
|
||||
- ehw->rx_skb_store[i] = NULL;
|
||||
- }
|
||||
+ idr_init(&ehw->rx_skb_idr);
|
||||
+ spin_lock_init(&ehw->rx_skb_idr_lock);
|
||||
+
|
||||
+ idr_init(&ehw->tx_skb_idr);
|
||||
+ spin_lock_init(&ehw->tx_skb_idr_lock);
|
||||
|
||||
/*
|
||||
* Configure TXDESC ring
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
index 0b28fe8..2bbe478 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
@@ -44,6 +44,8 @@
|
||||
#define EDMA_RXPH_SRC_INFO_TYPE_GET(rxph) (((rxph)->src_info >> 8) & 0xf0)
|
||||
#define EDMA_RXPH_SERVICE_CODE_GET(rxph) (((rxph)->rx_pre4) & 0xff)
|
||||
|
||||
+#define EDMA_TX_IDR_MAX EDMA_RING_SIZE * EDMA_MAX_TXDESC_RINGS
|
||||
+#define EDMA_RX_IDR_MAX EDMA_RING_SIZE * EDMA_MAX_RXDESC_RINGS
|
||||
/*
|
||||
* Tx descriptor
|
||||
*/
|
||||
@@ -202,8 +204,10 @@ struct edma_hw {
|
||||
/*
|
||||
* Store for tx and rx skbs
|
||||
*/
|
||||
- struct sk_buff *rx_skb_store[EDMA_RING_SIZE];
|
||||
- struct sk_buff *tx_skb_store[EDMA_RING_SIZE];
|
||||
+ struct idr rx_skb_idr;
|
||||
+ spinlock_t rx_skb_idr_lock;
|
||||
+ struct idr tx_skb_idr;
|
||||
+ spinlock_t tx_skb_idr_lock;
|
||||
|
||||
struct edma_rxfill_ring *rxfill_ring;
|
||||
/* Rx Fill Ring, SW is producer */
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
index 5780a30..8cded66 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
@@ -37,9 +37,9 @@ int edma_alloc_rx_buffer(struct edma_hw *ehw,
|
||||
uint16_t cons, next, counter;
|
||||
struct edma_rxfill_desc *rxfill_desc;
|
||||
uint32_t reg_data = 0;
|
||||
- uint32_t store_index = 0;
|
||||
uint32_t alloc_size = ehw->rx_alloc_size;
|
||||
struct edma_rx_preheader *rxph = NULL;
|
||||
+ int store_index;
|
||||
|
||||
/*
|
||||
* Read RXFILL ring producer index
|
||||
@@ -82,12 +82,16 @@ int edma_alloc_rx_buffer(struct edma_hw *ehw,
|
||||
/*
|
||||
* Store the skb in the rx store
|
||||
*/
|
||||
- store_index = next;
|
||||
- if (ehw->rx_skb_store[store_index] != NULL) {
|
||||
+ spin_lock_bh(&ehw->rx_skb_idr_lock);
|
||||
+ store_index = idr_alloc(&ehw->rx_skb_idr,
|
||||
+ skb, 0, EDMA_RX_IDR_MAX, GFP_ATOMIC);
|
||||
+ spin_unlock_bh(&ehw->rx_skb_idr_lock);
|
||||
+
|
||||
+ if (store_index < 0) {
|
||||
dev_kfree_skb_any(skb);
|
||||
break;
|
||||
}
|
||||
- ehw->rx_skb_store[store_index] = skb;
|
||||
+
|
||||
memcpy((uint8_t *)&rxph->opaque, (uint8_t *)&store_index, 4);
|
||||
/*
|
||||
* Save buffer size in RXFILL descriptor
|
||||
@@ -106,7 +110,9 @@ int edma_alloc_rx_buffer(struct edma_hw *ehw,
|
||||
|
||||
if (!rxfill_desc->buffer_addr) {
|
||||
dev_kfree_skb_any(skb);
|
||||
- ehw->rx_skb_store[store_index] = NULL;
|
||||
+ spin_lock_bh(&ehw->rx_skb_idr_lock);
|
||||
+ idr_remove(&ehw->rx_skb_idr, store_index);
|
||||
+ spin_unlock_bh(&ehw->rx_skb_idr_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -173,15 +179,19 @@ uint32_t edma_clean_tx(struct edma_hw *ehw,
|
||||
* buffer address (opaque) of txcmpl
|
||||
*/
|
||||
store_index = txcmpl->buffer_addr;
|
||||
- skb = ehw->tx_skb_store[store_index];
|
||||
- ehw->tx_skb_store[store_index] = NULL;
|
||||
+ spin_lock_bh(&ehw->tx_skb_idr_lock);
|
||||
+ skb = idr_find(&ehw->tx_skb_idr, store_index);
|
||||
|
||||
if (unlikely(!skb)) {
|
||||
pr_warn("Invalid skb: cons_idx:%u prod_idx:%u status %x\n",
|
||||
cons_idx, prod_idx, txcmpl->status);
|
||||
+ spin_unlock_bh(&ehw->tx_skb_idr_lock);
|
||||
goto next_txcmpl_desc;
|
||||
}
|
||||
|
||||
+ idr_remove(&ehw->tx_skb_idr, store_index);
|
||||
+ spin_unlock_bh(&ehw->tx_skb_idr_lock);
|
||||
+
|
||||
len = skb_headlen(skb);
|
||||
daddr = (dma_addr_t)virt_to_phys(skb->data);
|
||||
|
||||
@@ -322,14 +332,19 @@ static uint32_t edma_clean_rx(struct edma_hw *ehw,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
store_index = rxph->opaque;
|
||||
- skb = ehw->rx_skb_store[store_index];
|
||||
- ehw->rx_skb_store[store_index] = NULL;
|
||||
+ spin_lock_bh(&ehw->rx_skb_idr_lock);
|
||||
+ skb = idr_find(&ehw->rx_skb_idr, store_index);
|
||||
+
|
||||
if (unlikely(!skb)) {
|
||||
+ spin_unlock_bh(&ehw->rx_skb_idr_lock);
|
||||
pr_warn("WARN: empty skb reference in rx_store:%d\n",
|
||||
cons_idx);
|
||||
goto next_rx_desc;
|
||||
}
|
||||
|
||||
+ idr_remove(&ehw->rx_skb_idr, store_index);
|
||||
+ spin_unlock_bh(&ehw->rx_skb_idr_lock);
|
||||
+
|
||||
/*
|
||||
* Check src_info from Rx preheader
|
||||
*/
|
||||
@@ -539,7 +554,7 @@ enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
|
||||
uint16_t buf_len;
|
||||
uint16_t hw_next_to_use, hw_next_to_clean, chk_idx;
|
||||
uint32_t data;
|
||||
- uint32_t store_index = 0;
|
||||
+ int store_index = 0;
|
||||
struct edma_tx_preheader *txph = NULL;
|
||||
|
||||
/*
|
||||
@@ -616,13 +631,16 @@ enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
|
||||
/*
|
||||
* Store the skb in tx_store
|
||||
*/
|
||||
- store_index = hw_next_to_use & (txdesc_ring->count - 1);
|
||||
- if (unlikely(ehw->tx_skb_store[store_index] != NULL)) {
|
||||
+ spin_lock_bh(&ehw->tx_skb_idr_lock);
|
||||
+ store_index = idr_alloc(&ehw->tx_skb_idr,
|
||||
+ skb, 0, EDMA_RX_IDR_MAX, GFP_ATOMIC);
|
||||
+ spin_unlock_bh(&ehw->tx_skb_idr_lock);
|
||||
+
|
||||
+ if (unlikely(store_index < 0)) {
|
||||
spin_unlock_bh(&txdesc_ring->tx_lock);
|
||||
return EDMA_TX_DESC;
|
||||
}
|
||||
|
||||
- ehw->tx_skb_store[store_index] = skb;
|
||||
memcpy(skb->data, &store_index, 4);
|
||||
|
||||
/*
|
||||
@@ -645,7 +663,10 @@ enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
|
||||
*/
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
- ehw->tx_skb_store[store_index] = NULL;
|
||||
+ spin_lock_bh(&ehw->tx_skb_idr_lock);
|
||||
+ idr_remove(&ehw->tx_skb_idr, store_index);
|
||||
+ spin_unlock_bh(&ehw->tx_skb_idr_lock);
|
||||
+
|
||||
spin_unlock_bh(&txdesc_ring->tx_lock);
|
||||
return EDMA_TX_OK;
|
||||
}
|
||||
--
|
||||
2.36.1
|
||||
|
||||
@ -0,0 +1,125 @@
|
||||
From b7fe6524ef7e1b3579141030a784b311c913f721 Mon Sep 17 00:00:00 2001
|
||||
From: Christian Marangi <ansuelsmth@gmail.com>
|
||||
Date: Fri, 24 Jun 2022 16:33:07 +0200
|
||||
Subject: [PATCH 1/2] edma_v1: move rxfill to specific irq handler
|
||||
|
||||
Move rxfill logic to specific irq handler.
|
||||
|
||||
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
|
||||
---
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c | 2 +-
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h | 1 +
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c | 52 +++++++++++++-------
|
||||
3 files changed, 36 insertions(+), 19 deletions(-)
|
||||
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
index 1d748db..65bd0db 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
@@ -731,7 +731,7 @@ static int edma_irq_init(void)
|
||||
*/
|
||||
for (i = 0; i < edma_hw.rxfill_rings; i++) {
|
||||
err = request_irq(edma_hw.rxfill_intr[i],
|
||||
- edma_handle_irq, IRQF_SHARED,
|
||||
+ edma_rx_fill_handle_irq, IRQF_SHARED,
|
||||
"edma_rxfill", (void *)edma_hw.pdev);
|
||||
if (err) {
|
||||
pr_debug("RXFILL ring IRQ:%d request failed\n",
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
index 2bbe478..d0237ba 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
@@ -283,6 +283,7 @@ enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
|
||||
uint32_t edma_clean_tx(struct edma_hw *ehw,
|
||||
struct edma_txcmpl_ring *txcmpl_ring);
|
||||
irqreturn_t edma_handle_irq(int irq, void *ctx);
|
||||
+irqreturn_t edma_rx_fill_handle_irq(int irq, void *ctx);
|
||||
irqreturn_t edma_handle_misc_irq(int irq, void *ctx);
|
||||
int edma_napi(struct napi_struct *napi, int budget);
|
||||
void edma_cleanup_rings(struct edma_hw *ehw);
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
index 355fe83..374c90f 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
@@ -740,11 +740,9 @@ irqreturn_t edma_handle_irq(int irq, void *ctx)
|
||||
uint32_t reg_data = 0;
|
||||
uint32_t rxdesc_intr_status = 0;
|
||||
uint32_t txcmpl_intr_status = 0;
|
||||
- uint32_t rxfill_intr_status = 0;
|
||||
int i;
|
||||
struct edma_txcmpl_ring *txcmpl_ring = NULL;
|
||||
struct edma_rxdesc_ring *rxdesc_ring = NULL;
|
||||
- struct edma_rxfill_ring *rxfill_ring = NULL;
|
||||
struct edma_hw *ehw = NULL;
|
||||
struct platform_device *pdev = (struct platform_device *)ctx;
|
||||
|
||||
@@ -788,9 +786,40 @@ irqreturn_t edma_handle_irq(int irq, void *ctx)
|
||||
EDMA_MASK_INT_DISABLE);
|
||||
}
|
||||
|
||||
+ if ((rxdesc_intr_status == 0) && (txcmpl_intr_status == 0))
|
||||
+ return IRQ_NONE;
|
||||
+
|
||||
+ for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
+ rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
+ edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
+ EDMA_MASK_INT_DISABLE);
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
- * Read RxFill intr status
|
||||
+ *TODO - per core NAPI
|
||||
*/
|
||||
+ if (rxdesc_intr_status || txcmpl_intr_status)
|
||||
+ if (likely(napi_schedule_prep(&ehw->napi)))
|
||||
+ __napi_schedule(&ehw->napi);
|
||||
+
|
||||
+ return IRQ_HANDLED;
|
||||
+}
|
||||
+
|
||||
+irqreturn_t edma_rx_fill_handle_irq(int irq, void *ctx)
|
||||
+{
|
||||
+ struct platform_device *pdev = (struct platform_device *)ctx;
|
||||
+ struct edma_rxfill_ring *rxfill_ring;
|
||||
+ uint32_t rxfill_intr_status;
|
||||
+ struct edma_hw *ehw;
|
||||
+ uint32_t reg_data;
|
||||
+ int i;
|
||||
+
|
||||
+ ehw = platform_get_drvdata(pdev);
|
||||
+ if (!ehw) {
|
||||
+ pr_info("Unable to retrieve platrofm data");
|
||||
+ return IRQ_HANDLED;
|
||||
+ }
|
||||
+
|
||||
for (i = 0; i < ehw->rxfill_rings; i++) {
|
||||
rxfill_ring = &ehw->rxfill_ring[i];
|
||||
reg_data = edma_reg_read(
|
||||
@@ -806,22 +835,9 @@ irqreturn_t edma_handle_irq(int irq, void *ctx)
|
||||
|
||||
}
|
||||
|
||||
- if ((rxdesc_intr_status == 0) && (txcmpl_intr_status == 0) &&
|
||||
- (rxfill_intr_status == 0))
|
||||
+ if (!rxfill_intr_status)
|
||||
return IRQ_NONE;
|
||||
|
||||
- for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
- rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
- edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
- EDMA_MASK_INT_DISABLE);
|
||||
- }
|
||||
-
|
||||
- /*
|
||||
- *TODO - per core NAPI
|
||||
- */
|
||||
- if (rxdesc_intr_status || txcmpl_intr_status || rxfill_intr_status)
|
||||
- if (likely(napi_schedule_prep(&ehw->napi)))
|
||||
- __napi_schedule(&ehw->napi);
|
||||
-
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
+
|
||||
--
|
||||
2.36.1
|
||||
|
||||
@ -0,0 +1,313 @@
|
||||
From 0c0f9befa1ae766add49e1aa70a9028809526ad0 Mon Sep 17 00:00:00 2001
|
||||
From: Christian Marangi <ansuelsmth@gmail.com>
|
||||
Date: Fri, 24 Jun 2022 16:35:55 +0200
|
||||
Subject: [PATCH 3/6] edma_v1: split rx and tx napi path and irq handler
|
||||
|
||||
Split rx and tx napi and irq handler to own handler.
|
||||
|
||||
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
|
||||
---
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c | 18 ++-
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h | 10 +-
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c | 131 +++++++++++++------
|
||||
3 files changed, 109 insertions(+), 50 deletions(-)
|
||||
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
index 65bd0db..8932f40 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
@@ -407,7 +407,8 @@ void edma_cleanup(bool is_dp_override)
|
||||
synchronize_irq(edma_hw.misc_intr);
|
||||
free_irq(edma_hw.misc_intr, (void *)(edma_hw.pdev));
|
||||
|
||||
- netif_napi_del(&edma_hw.napi);
|
||||
+ netif_napi_del(&edma_hw.rx_napi);
|
||||
+ netif_napi_del(&edma_hw.tx_napi);
|
||||
edma_hw.napi_added = 0;
|
||||
}
|
||||
|
||||
@@ -451,7 +452,8 @@ static int edma_if_open(struct nss_dp_data_plane_ctx *dpc,
|
||||
if (edma_hw.active++ != 0)
|
||||
return NSS_DP_SUCCESS;
|
||||
|
||||
- napi_enable(&edma_hw.napi);
|
||||
+ napi_enable(&edma_hw.rx_napi);
|
||||
+ napi_enable(&edma_hw.tx_napi);
|
||||
|
||||
/*
|
||||
* Enable the interrupt masks.
|
||||
@@ -478,7 +480,8 @@ static int edma_if_close(struct nss_dp_data_plane_ctx *dpc)
|
||||
/*
|
||||
* Disable NAPI
|
||||
*/
|
||||
- napi_disable(&edma_hw.napi);
|
||||
+ napi_disable(&edma_hw.rx_napi);
|
||||
+ napi_disable(&edma_hw.tx_napi);
|
||||
return NSS_DP_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -716,7 +719,7 @@ static int edma_irq_init(void)
|
||||
*/
|
||||
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
|
||||
err = request_irq(edma_hw.txcmpl_intr[i],
|
||||
- edma_handle_irq, IRQF_SHARED,
|
||||
+ edma_tx_handle_irq, IRQF_SHARED,
|
||||
"edma_txcmpl", (void *)edma_hw.pdev);
|
||||
if (err) {
|
||||
pr_debug("TXCMPL ring IRQ:%d request failed\n",
|
||||
@@ -745,7 +748,7 @@ static int edma_irq_init(void)
|
||||
*/
|
||||
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
|
||||
err = request_irq(edma_hw.rxdesc_intr[i],
|
||||
- edma_handle_irq, IRQF_SHARED,
|
||||
+ edma_rx_handle_irq, IRQF_SHARED,
|
||||
"edma_rxdesc", (void *)edma_hw.pdev);
|
||||
if (err) {
|
||||
pr_debug("RXDESC ring IRQ:%d request failed\n",
|
||||
@@ -836,7 +839,10 @@ static int edma_register_netdevice(struct net_device *netdev, uint32_t macid)
|
||||
* NAPI add
|
||||
*/
|
||||
if (!edma_hw.napi_added) {
|
||||
- netif_napi_add(netdev, &edma_hw.napi, edma_napi,
|
||||
+ netif_napi_add(netdev, &edma_hw.rx_napi, edma_rx_napi,
|
||||
+ NAPI_POLL_WEIGHT);
|
||||
+
|
||||
+ netif_tx_napi_add(netdev, &edma_hw.tx_napi, edma_tx_napi,
|
||||
NAPI_POLL_WEIGHT);
|
||||
/*
|
||||
* Register the interrupt handlers and enable interrupts
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
index d0237ba..a45fb99 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
@@ -172,7 +172,9 @@ enum edma_tx {
|
||||
* EDMA private data structure
|
||||
*/
|
||||
struct edma_hw {
|
||||
- struct napi_struct napi;
|
||||
+ struct napi_struct rx_napi;
|
||||
+ /* napi structure */
|
||||
+ struct napi_struct tx_napi;
|
||||
/* napi structure */
|
||||
struct net_device *netdev_arr[EDMA_MAX_GMACS];
|
||||
/* netdev for each gmac port */
|
||||
@@ -282,10 +284,12 @@ enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
|
||||
struct edma_txdesc_ring *txdesc_ring);
|
||||
uint32_t edma_clean_tx(struct edma_hw *ehw,
|
||||
struct edma_txcmpl_ring *txcmpl_ring);
|
||||
-irqreturn_t edma_handle_irq(int irq, void *ctx);
|
||||
+irqreturn_t edma_tx_handle_irq(int irq, void *ctx);
|
||||
+irqreturn_t edma_rx_handle_irq(int irq, void *ctx);
|
||||
irqreturn_t edma_rx_fill_handle_irq(int irq, void *ctx);
|
||||
irqreturn_t edma_handle_misc_irq(int irq, void *ctx);
|
||||
-int edma_napi(struct napi_struct *napi, int budget);
|
||||
+int edma_rx_napi(struct napi_struct *napi, int budget);
|
||||
+int edma_tx_napi(struct napi_struct *napi, int budget);
|
||||
void edma_cleanup_rings(struct edma_hw *ehw);
|
||||
int edma_hw_init(struct edma_hw *ehw);
|
||||
#endif /* __NSS_DP_EDMA_DATAPLANE__ */
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
index 7c84455..ace8140 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
@@ -456,14 +456,12 @@ next_rx_desc:
|
||||
* edma_napi()
|
||||
* EDMA NAPI handler
|
||||
*/
|
||||
-int edma_napi(struct napi_struct *napi, int budget)
|
||||
+int edma_rx_napi(struct napi_struct *napi, int budget)
|
||||
{
|
||||
- struct edma_hw *ehw = container_of(napi, struct edma_hw, napi);
|
||||
- struct edma_txcmpl_ring *txcmpl_ring = NULL;
|
||||
+ struct edma_hw *ehw = container_of(napi, struct edma_hw, rx_napi);
|
||||
struct edma_rxdesc_ring *rxdesc_ring = NULL;
|
||||
struct edma_rxfill_ring *rxfill_ring = NULL;
|
||||
|
||||
- struct net_device *ndev;
|
||||
int work_done = 0;
|
||||
int i;
|
||||
|
||||
@@ -472,16 +470,56 @@ int edma_napi(struct napi_struct *napi, int budget)
|
||||
work_done += edma_clean_rx(ehw, budget, rxdesc_ring);
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ * TODO - rework and fix the budget control
|
||||
+ */
|
||||
+ if (work_done < budget) {
|
||||
+ /*
|
||||
+ * TODO per core NAPI
|
||||
+ */
|
||||
+ napi_complete(napi);
|
||||
+
|
||||
+ /*
|
||||
+ * Set RXDESC ring interrupt mask
|
||||
+ */
|
||||
+ for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
+ rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
+ edma_reg_write(
|
||||
+ EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
+ ehw->rxdesc_intr_mask);
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ * Set RXFILL ring interrupt mask
|
||||
+ */
|
||||
+ for (i = 0; i < ehw->rxfill_rings; i++) {
|
||||
+ rxfill_ring = &ehw->rxfill_ring[i];
|
||||
+ edma_reg_write(EDMA_REG_RXFILL_INT_MASK(
|
||||
+ rxfill_ring->id),
|
||||
+ edma_hw.rxfill_intr_mask);
|
||||
+ }
|
||||
+ }
|
||||
+ return work_done;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * edma_napi()
|
||||
+ * EDMA NAPI handler
|
||||
+ */
|
||||
+int edma_tx_napi(struct napi_struct *napi, int budget)
|
||||
+{
|
||||
+ struct edma_hw *ehw = container_of(napi, struct edma_hw, tx_napi);
|
||||
+ struct edma_txcmpl_ring *txcmpl_ring = NULL;
|
||||
+
|
||||
+ struct net_device *ndev;
|
||||
+ int work_done = 0;
|
||||
+ int i;
|
||||
+
|
||||
for (i = 0; i < ehw->txcmpl_rings; i++) {
|
||||
txcmpl_ring = &ehw->txcmpl_ring[i];
|
||||
edma_clean_tx(ehw, txcmpl_ring);
|
||||
}
|
||||
|
||||
- for (i = 0; i < ehw->rxfill_rings; i++) {
|
||||
- rxfill_ring = &ehw->rxfill_ring[i];
|
||||
- edma_alloc_rx_buffer(ehw, rxfill_ring);
|
||||
- }
|
||||
-
|
||||
/*
|
||||
* Resume netdev Tx queue
|
||||
*/
|
||||
@@ -508,16 +546,6 @@ int edma_napi(struct napi_struct *napi, int budget)
|
||||
*/
|
||||
napi_complete(napi);
|
||||
|
||||
- /*
|
||||
- * Set RXDESC ring interrupt mask
|
||||
- */
|
||||
- for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
- rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
- edma_reg_write(
|
||||
- EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
- ehw->rxdesc_intr_mask);
|
||||
- }
|
||||
-
|
||||
/*
|
||||
* Set TXCMPL ring interrupt mask
|
||||
*/
|
||||
@@ -527,15 +555,6 @@ int edma_napi(struct napi_struct *napi, int budget)
|
||||
ehw->txcmpl_intr_mask);
|
||||
}
|
||||
|
||||
- /*
|
||||
- * Set RXFILL ring interrupt mask
|
||||
- */
|
||||
- for (i = 0; i < ehw->rxfill_rings; i++) {
|
||||
- rxfill_ring = &ehw->rxfill_ring[i];
|
||||
- edma_reg_write(EDMA_REG_RXFILL_INT_MASK(
|
||||
- rxfill_ring->id),
|
||||
- edma_hw.rxfill_intr_mask);
|
||||
- }
|
||||
}
|
||||
return work_done;
|
||||
}
|
||||
@@ -736,13 +755,11 @@ irqreturn_t edma_handle_misc_irq(int irq, void *ctx)
|
||||
* edma_handle_irq()
|
||||
* Process IRQ and schedule napi
|
||||
*/
|
||||
-irqreturn_t edma_handle_irq(int irq, void *ctx)
|
||||
+irqreturn_t edma_rx_handle_irq(int irq, void *ctx)
|
||||
{
|
||||
uint32_t reg_data = 0;
|
||||
uint32_t rxdesc_intr_status = 0;
|
||||
- uint32_t txcmpl_intr_status = 0;
|
||||
int i;
|
||||
- struct edma_txcmpl_ring *txcmpl_ring = NULL;
|
||||
struct edma_rxdesc_ring *rxdesc_ring = NULL;
|
||||
struct edma_hw *ehw = NULL;
|
||||
struct platform_device *pdev = (struct platform_device *)ctx;
|
||||
@@ -770,6 +787,44 @@ irqreturn_t edma_handle_irq(int irq, void *ctx)
|
||||
EDMA_MASK_INT_DISABLE);
|
||||
}
|
||||
|
||||
+ if (rxdesc_intr_status == 0)
|
||||
+ return IRQ_NONE;
|
||||
+
|
||||
+ for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
+ rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
+ edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
+ EDMA_MASK_INT_DISABLE);
|
||||
+ }
|
||||
+
|
||||
+ /*
|
||||
+ *TODO - per core NAPI
|
||||
+ */
|
||||
+ if (rxdesc_intr_status)
|
||||
+ if (likely(napi_schedule_prep(&ehw->rx_napi)))
|
||||
+ __napi_schedule(&ehw->rx_napi);
|
||||
+
|
||||
+ return IRQ_HANDLED;
|
||||
+}
|
||||
+
|
||||
+/*
|
||||
+ * edma_handle_irq()
|
||||
+ * Process IRQ and schedule napi
|
||||
+ */
|
||||
+irqreturn_t edma_tx_handle_irq(int irq, void *ctx)
|
||||
+{
|
||||
+ uint32_t reg_data = 0;
|
||||
+ uint32_t txcmpl_intr_status = 0;
|
||||
+ int i;
|
||||
+ struct edma_txcmpl_ring *txcmpl_ring = NULL;
|
||||
+ struct edma_hw *ehw = NULL;
|
||||
+ struct platform_device *pdev = (struct platform_device *)ctx;
|
||||
+
|
||||
+ ehw = platform_get_drvdata(pdev);
|
||||
+ if (!ehw) {
|
||||
+ pr_info("Unable to retrieve platrofm data");
|
||||
+ return IRQ_HANDLED;
|
||||
+ }
|
||||
+
|
||||
/*
|
||||
* Read TxCmpl intr status
|
||||
*/
|
||||
@@ -787,21 +842,15 @@ irqreturn_t edma_handle_irq(int irq, void *ctx)
|
||||
EDMA_MASK_INT_DISABLE);
|
||||
}
|
||||
|
||||
- if ((rxdesc_intr_status == 0) && (txcmpl_intr_status == 0))
|
||||
+ if (txcmpl_intr_status == 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
- for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
- rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
- edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
- EDMA_MASK_INT_DISABLE);
|
||||
- }
|
||||
-
|
||||
/*
|
||||
*TODO - per core NAPI
|
||||
*/
|
||||
- if (rxdesc_intr_status || txcmpl_intr_status)
|
||||
- if (likely(napi_schedule_prep(&ehw->napi)))
|
||||
- __napi_schedule(&ehw->napi);
|
||||
+ if (txcmpl_intr_status)
|
||||
+ if (likely(napi_schedule_prep(&ehw->tx_napi)))
|
||||
+ __napi_schedule(&ehw->tx_napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
--
|
||||
2.36.1
|
||||
|
||||
@ -0,0 +1,267 @@
|
||||
From 21ffe52de4834569486619b93a059a7a92000827 Mon Sep 17 00:00:00 2001
|
||||
From: Christian Marangi <ansuelsmth@gmail.com>
|
||||
Date: Fri, 24 Jun 2022 18:07:32 +0200
|
||||
Subject: [PATCH 18/21] edma_v1: move rx napi to per ring implementation
|
||||
|
||||
Move rx napi to per ring implementation.
|
||||
|
||||
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
|
||||
---
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_cfg.c | 1 +
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c | 25 +++++--
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h | 4 +-
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c | 76 +++++++-------------
|
||||
4 files changed, 47 insertions(+), 59 deletions(-)
|
||||
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c b/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
index 2e98aaf..20d055e 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
@@ -410,6 +410,7 @@ static int edma_setup_ring_resources(struct edma_hw *ehw)
|
||||
*/
|
||||
for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
+ rxdesc_ring->ehw = ehw;
|
||||
rxdesc_ring->count = EDMA_RING_SIZE;
|
||||
rxdesc_ring->id = ehw->rxdesc_ring_start + i;
|
||||
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
index 8932f40..565564a 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
@@ -407,7 +407,9 @@ void edma_cleanup(bool is_dp_override)
|
||||
synchronize_irq(edma_hw.misc_intr);
|
||||
free_irq(edma_hw.misc_intr, (void *)(edma_hw.pdev));
|
||||
|
||||
- netif_napi_del(&edma_hw.rx_napi);
|
||||
+ for (i = 0; i < edma_hw.rxdesc_rings; i++)
|
||||
+ netif_napi_del(&edma_hw.rxdesc_ring[i].napi);
|
||||
+
|
||||
netif_napi_del(&edma_hw.tx_napi);
|
||||
edma_hw.napi_added = 0;
|
||||
}
|
||||
@@ -443,6 +445,8 @@ static int edma_if_open(struct nss_dp_data_plane_ctx *dpc,
|
||||
uint32_t tx_desc_ring, uint32_t rx_desc_ring,
|
||||
uint32_t mode)
|
||||
{
|
||||
+ int i;
|
||||
+
|
||||
if (!dpc->dev)
|
||||
return NSS_DP_FAILURE;
|
||||
|
||||
@@ -452,7 +456,9 @@ static int edma_if_open(struct nss_dp_data_plane_ctx *dpc,
|
||||
if (edma_hw.active++ != 0)
|
||||
return NSS_DP_SUCCESS;
|
||||
|
||||
- napi_enable(&edma_hw.rx_napi);
|
||||
+ for (i = 0; i < edma_hw.rxdesc_rings; i++)
|
||||
+ napi_enable(&edma_hw.rxdesc_ring[i].napi);
|
||||
+
|
||||
napi_enable(&edma_hw.tx_napi);
|
||||
|
||||
/*
|
||||
@@ -469,6 +475,8 @@ static int edma_if_open(struct nss_dp_data_plane_ctx *dpc,
|
||||
*/
|
||||
static int edma_if_close(struct nss_dp_data_plane_ctx *dpc)
|
||||
{
|
||||
+ int i;
|
||||
+
|
||||
if (--edma_hw.active != 0)
|
||||
return NSS_DP_SUCCESS;
|
||||
|
||||
@@ -480,7 +488,9 @@ static int edma_if_close(struct nss_dp_data_plane_ctx *dpc)
|
||||
/*
|
||||
* Disable NAPI
|
||||
*/
|
||||
- napi_disable(&edma_hw.rx_napi);
|
||||
+ for (i = 0; i < edma_hw.rxdesc_rings; i++)
|
||||
+ napi_disable(&edma_hw.rxdesc_ring[i].napi);
|
||||
+
|
||||
napi_disable(&edma_hw.tx_napi);
|
||||
return NSS_DP_SUCCESS;
|
||||
}
|
||||
@@ -749,7 +759,7 @@ static int edma_irq_init(void)
|
||||
for (i = 0; i < edma_hw.rxdesc_rings; i++) {
|
||||
err = request_irq(edma_hw.rxdesc_intr[i],
|
||||
edma_rx_handle_irq, IRQF_SHARED,
|
||||
- "edma_rxdesc", (void *)edma_hw.pdev);
|
||||
+ "edma_rxdesc", (void *)&edma_hw.rxdesc_ring[i]);
|
||||
if (err) {
|
||||
pr_debug("RXDESC ring IRQ:%d request failed\n",
|
||||
edma_hw.rxdesc_intr[i]);
|
||||
@@ -814,6 +824,8 @@ rx_fill_ring_intr_req_fail:
|
||||
*/
|
||||
static int edma_register_netdevice(struct net_device *netdev, uint32_t macid)
|
||||
{
|
||||
+ int i;
|
||||
+
|
||||
if (!netdev) {
|
||||
pr_info("nss_dp_edma: Invalid netdev pointer %px\n", netdev);
|
||||
return -EINVAL;
|
||||
@@ -839,8 +851,9 @@ static int edma_register_netdevice(struct net_device *netdev, uint32_t macid)
|
||||
* NAPI add
|
||||
*/
|
||||
if (!edma_hw.napi_added) {
|
||||
- netif_napi_add(netdev, &edma_hw.rx_napi, edma_rx_napi,
|
||||
- NAPI_POLL_WEIGHT);
|
||||
+ for (i = 0; i < edma_hw.rxdesc_rings; i++)
|
||||
+ netif_napi_add(netdev, &edma_hw.rxdesc_ring[i].napi, edma_rx_napi,
|
||||
+ NAPI_POLL_WEIGHT);
|
||||
|
||||
netif_tx_napi_add(netdev, &edma_hw.tx_napi, edma_tx_napi,
|
||||
NAPI_POLL_WEIGHT);
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
index a45fb99..01a6453 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
@@ -116,6 +116,8 @@ struct edma_rxfill_ring {
|
||||
* RxDesc ring
|
||||
*/
|
||||
struct edma_rxdesc_ring {
|
||||
+ struct napi_struct napi; /* napi structure */
|
||||
+ struct edma_hw *ehw;
|
||||
uint32_t id; /* RXDESC ring number */
|
||||
struct edma_rxfill_ring *rxfill; /* RXFILL ring used */
|
||||
void *desc; /* descriptor ring virtual address */
|
||||
@@ -172,8 +174,6 @@ enum edma_tx {
|
||||
* EDMA private data structure
|
||||
*/
|
||||
struct edma_hw {
|
||||
- struct napi_struct rx_napi;
|
||||
- /* napi structure */
|
||||
struct napi_struct tx_napi;
|
||||
/* napi structure */
|
||||
struct net_device *netdev_arr[EDMA_MAX_GMACS];
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
index 1fb8cbf..1d2fa8a 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
@@ -427,7 +427,7 @@ static uint32_t edma_clean_rx(struct edma_hw *ehw,
|
||||
nss_phy_tstamp_rx_buf(ndev, skb);
|
||||
else
|
||||
#if defined(NSS_DP_ENABLE_NAPI_GRO)
|
||||
- napi_gro_receive(&ehw->napi, skb);
|
||||
+ napi_gro_receive(&rxdesc_ring->napi, skb);
|
||||
#else
|
||||
netif_receive_skb(skb);
|
||||
#endif
|
||||
@@ -462,17 +462,13 @@ next_rx_desc:
|
||||
*/
|
||||
int edma_rx_napi(struct napi_struct *napi, int budget)
|
||||
{
|
||||
- struct edma_hw *ehw = container_of(napi, struct edma_hw, rx_napi);
|
||||
- struct edma_rxdesc_ring *rxdesc_ring = NULL;
|
||||
- struct edma_rxfill_ring *rxfill_ring = NULL;
|
||||
+ struct edma_rxdesc_ring *rxdesc_ring = container_of(napi, struct edma_rxdesc_ring, napi);
|
||||
+ struct edma_rxfill_ring *rxfill_ring = rxdesc_ring->rxfill;
|
||||
+ struct edma_hw *ehw = rxdesc_ring->ehw;
|
||||
|
||||
int work_done = 0;
|
||||
- int i;
|
||||
|
||||
- for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
- rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
- work_done += edma_clean_rx(ehw, budget, rxdesc_ring);
|
||||
- }
|
||||
+ work_done += edma_clean_rx(ehw, budget, rxdesc_ring);
|
||||
|
||||
/*
|
||||
* TODO - rework and fix the budget control
|
||||
@@ -486,22 +482,15 @@ int edma_rx_napi(struct napi_struct *napi, int budget)
|
||||
/*
|
||||
* Set RXDESC ring interrupt mask
|
||||
*/
|
||||
- for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
- rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
- edma_reg_write(
|
||||
- EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
- ehw->rxdesc_intr_mask);
|
||||
- }
|
||||
+ edma_reg_write(
|
||||
+ EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
+ ehw->rxdesc_intr_mask);
|
||||
|
||||
/*
|
||||
* Set RXFILL ring interrupt mask
|
||||
*/
|
||||
- for (i = 0; i < ehw->rxfill_rings; i++) {
|
||||
- rxfill_ring = &ehw->rxfill_ring[i];
|
||||
- edma_reg_write(EDMA_REG_RXFILL_INT_MASK(
|
||||
- rxfill_ring->id),
|
||||
- edma_hw.rxfill_intr_mask);
|
||||
- }
|
||||
+ edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id),
|
||||
+ edma_hw.rxfill_intr_mask);
|
||||
}
|
||||
return work_done;
|
||||
}
|
||||
@@ -761,51 +750,36 @@ irqreturn_t edma_handle_misc_irq(int irq, void *ctx)
|
||||
*/
|
||||
irqreturn_t edma_rx_handle_irq(int irq, void *ctx)
|
||||
{
|
||||
- uint32_t reg_data = 0;
|
||||
+ struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)ctx;
|
||||
uint32_t rxdesc_intr_status = 0;
|
||||
- int i;
|
||||
- struct edma_rxdesc_ring *rxdesc_ring = NULL;
|
||||
- struct edma_hw *ehw = NULL;
|
||||
- struct platform_device *pdev = (struct platform_device *)ctx;
|
||||
-
|
||||
- ehw = platform_get_drvdata(pdev);
|
||||
- if (!ehw) {
|
||||
- pr_info("Unable to retrieve platrofm data");
|
||||
- return IRQ_HANDLED;
|
||||
- }
|
||||
+ uint32_t reg_data = 0;
|
||||
|
||||
/*
|
||||
* Read RxDesc intr status
|
||||
*/
|
||||
- for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
- rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
- reg_data = edma_reg_read(
|
||||
- EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->id));
|
||||
- rxdesc_intr_status |= reg_data &
|
||||
- EDMA_RXDESC_RING_INT_STATUS_MASK;
|
||||
+ reg_data = edma_reg_read(
|
||||
+ EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->id));
|
||||
+ rxdesc_intr_status |= reg_data &
|
||||
+ EDMA_RXDESC_RING_INT_STATUS_MASK;
|
||||
|
||||
- /*
|
||||
- * Disable RxDesc intr
|
||||
- */
|
||||
- edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
- EDMA_MASK_INT_DISABLE);
|
||||
- }
|
||||
+ /*
|
||||
+ * Disable RxDesc intr
|
||||
+ */
|
||||
+ edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
+ EDMA_MASK_INT_DISABLE);
|
||||
|
||||
if (rxdesc_intr_status == 0)
|
||||
return IRQ_NONE;
|
||||
|
||||
- for (i = 0; i < ehw->rxdesc_rings; i++) {
|
||||
- rxdesc_ring = &ehw->rxdesc_ring[i];
|
||||
- edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
- EDMA_MASK_INT_DISABLE);
|
||||
- }
|
||||
+ edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
+ EDMA_MASK_INT_DISABLE);
|
||||
|
||||
/*
|
||||
*TODO - per core NAPI
|
||||
*/
|
||||
if (rxdesc_intr_status)
|
||||
- if (likely(napi_schedule_prep(&ehw->rx_napi)))
|
||||
- __napi_schedule(&ehw->rx_napi);
|
||||
+ if (likely(napi_schedule_prep(&rxdesc_ring->napi)))
|
||||
+ __napi_schedule(&rxdesc_ring->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
--
|
||||
2.36.1
|
||||
|
||||
@ -0,0 +1,206 @@
|
||||
From de169603dcfa7a33026587c4cef9938cc6c28b1e Mon Sep 17 00:00:00 2001
|
||||
From: Christian Marangi <ansuelsmth@gmail.com>
|
||||
Date: Fri, 24 Jun 2022 18:25:16 +0200
|
||||
Subject: [PATCH 19/21] edma_v1: move tx napi to per ring implementation
|
||||
|
||||
Move tx napi to per ring implementation.
|
||||
|
||||
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
|
||||
---
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_cfg.c | 1 +
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c | 17 +++---
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h | 4 +-
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c | 55 +++++++-------------
|
||||
4 files changed, 32 insertions(+), 45 deletions(-)
|
||||
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c b/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
index 20d055e..6f2c082 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
@@ -373,6 +373,7 @@ static int edma_setup_ring_resources(struct edma_hw *ehw)
|
||||
*/
|
||||
for (i = 0; i < ehw->txcmpl_rings; i++) {
|
||||
txcmpl_ring = &ehw->txcmpl_ring[i];
|
||||
+ txcmpl_ring->ehw = ehw;
|
||||
txcmpl_ring->count = EDMA_RING_SIZE;
|
||||
txcmpl_ring->id = ehw->txcmpl_ring_start + i;
|
||||
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
index 565564a..49c7f8c 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
@@ -410,7 +410,8 @@ void edma_cleanup(bool is_dp_override)
|
||||
for (i = 0; i < edma_hw.rxdesc_rings; i++)
|
||||
netif_napi_del(&edma_hw.rxdesc_ring[i].napi);
|
||||
|
||||
- netif_napi_del(&edma_hw.tx_napi);
|
||||
+ for (i = 0; i < edma_hw.txcmpl_rings; i++)
|
||||
+ netif_napi_del(&edma_hw.txcmpl_ring[i].napi);
|
||||
edma_hw.napi_added = 0;
|
||||
}
|
||||
|
||||
@@ -459,7 +460,8 @@ static int edma_if_open(struct nss_dp_data_plane_ctx *dpc,
|
||||
for (i = 0; i < edma_hw.rxdesc_rings; i++)
|
||||
napi_enable(&edma_hw.rxdesc_ring[i].napi);
|
||||
|
||||
- napi_enable(&edma_hw.tx_napi);
|
||||
+ for (i = 0; i < edma_hw.txcmpl_rings; i++)
|
||||
+ napi_enable(&edma_hw.txcmpl_ring[i].napi);
|
||||
|
||||
/*
|
||||
* Enable the interrupt masks.
|
||||
@@ -491,7 +493,9 @@ static int edma_if_close(struct nss_dp_data_plane_ctx *dpc)
|
||||
for (i = 0; i < edma_hw.rxdesc_rings; i++)
|
||||
napi_disable(&edma_hw.rxdesc_ring[i].napi);
|
||||
|
||||
- napi_disable(&edma_hw.tx_napi);
|
||||
+ for (i = 0; i < edma_hw.txcmpl_rings; i++)
|
||||
+ napi_disable(&edma_hw.txcmpl_ring[i].napi);
|
||||
+
|
||||
return NSS_DP_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -730,7 +734,7 @@ static int edma_irq_init(void)
|
||||
for (i = 0; i < edma_hw.txcmpl_rings; i++) {
|
||||
err = request_irq(edma_hw.txcmpl_intr[i],
|
||||
edma_tx_handle_irq, IRQF_SHARED,
|
||||
- "edma_txcmpl", (void *)edma_hw.pdev);
|
||||
+ "edma_txcmpl", (void *)&edma_hw.txcmpl_ring[i]);
|
||||
if (err) {
|
||||
pr_debug("TXCMPL ring IRQ:%d request failed\n",
|
||||
edma_hw.txcmpl_intr[i]);
|
||||
@@ -855,8 +859,9 @@ static int edma_register_netdevice(struct net_device *netdev, uint32_t macid)
|
||||
netif_napi_add(netdev, &edma_hw.rxdesc_ring[i].napi, edma_rx_napi,
|
||||
NAPI_POLL_WEIGHT);
|
||||
|
||||
- netif_tx_napi_add(netdev, &edma_hw.tx_napi, edma_tx_napi,
|
||||
- NAPI_POLL_WEIGHT);
|
||||
+ for (i = 0; i < edma_hw.txcmpl_rings; i++)
|
||||
+ netif_tx_napi_add(netdev, &edma_hw.txcmpl_ring[i].napi, edma_tx_napi,
|
||||
+ NAPI_POLL_WEIGHT);
|
||||
/*
|
||||
* Register the interrupt handlers and enable interrupts
|
||||
*/
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
index 01a6453..8ec7e35 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
@@ -95,6 +95,8 @@ struct edma_txdesc_ring {
|
||||
* TxCmpl ring
|
||||
*/
|
||||
struct edma_txcmpl_ring {
|
||||
+ struct napi_struct napi; /* napi structure */
|
||||
+ struct edma_hw *ehw;
|
||||
uint32_t id; /* TXCMPL ring number */
|
||||
void *desc; /* descriptor ring virtual address */
|
||||
dma_addr_t dma; /* descriptor ring physical address */
|
||||
@@ -174,8 +176,6 @@ enum edma_tx {
|
||||
* EDMA private data structure
|
||||
*/
|
||||
struct edma_hw {
|
||||
- struct napi_struct tx_napi;
|
||||
- /* napi structure */
|
||||
struct net_device *netdev_arr[EDMA_MAX_GMACS];
|
||||
/* netdev for each gmac port */
|
||||
struct device_node *device_node;
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
index 1d2fa8a..8221a9c 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
@@ -501,17 +501,14 @@ int edma_rx_napi(struct napi_struct *napi, int budget)
|
||||
*/
|
||||
int edma_tx_napi(struct napi_struct *napi, int budget)
|
||||
{
|
||||
- struct edma_hw *ehw = container_of(napi, struct edma_hw, tx_napi);
|
||||
- struct edma_txcmpl_ring *txcmpl_ring = NULL;
|
||||
+ struct edma_txcmpl_ring *txcmpl_ring = container_of(napi, struct edma_txcmpl_ring, napi);
|
||||
+ struct edma_hw *ehw = txcmpl_ring->ehw;
|
||||
|
||||
struct net_device *ndev;
|
||||
int work_done = 0;
|
||||
int i;
|
||||
|
||||
- for (i = 0; i < ehw->txcmpl_rings; i++) {
|
||||
- txcmpl_ring = &ehw->txcmpl_ring[i];
|
||||
- edma_clean_tx(ehw, txcmpl_ring);
|
||||
- }
|
||||
+ edma_clean_tx(ehw, txcmpl_ring);
|
||||
|
||||
/*
|
||||
* Resume netdev Tx queue
|
||||
@@ -542,12 +539,8 @@ int edma_tx_napi(struct napi_struct *napi, int budget)
|
||||
/*
|
||||
* Set TXCMPL ring interrupt mask
|
||||
*/
|
||||
- for (i = 0; i < ehw->txcmpl_rings; i++) {
|
||||
- txcmpl_ring = &ehw->txcmpl_ring[i];
|
||||
- edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
|
||||
- ehw->txcmpl_intr_mask);
|
||||
- }
|
||||
-
|
||||
+ edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
|
||||
+ ehw->txcmpl_intr_mask);
|
||||
}
|
||||
return work_done;
|
||||
}
|
||||
@@ -790,35 +783,23 @@ irqreturn_t edma_rx_handle_irq(int irq, void *ctx)
|
||||
*/
|
||||
irqreturn_t edma_tx_handle_irq(int irq, void *ctx)
|
||||
{
|
||||
- uint32_t reg_data = 0;
|
||||
+ struct edma_txcmpl_ring *txcmpl_ring = (struct edma_txcmpl_ring *)ctx;
|
||||
uint32_t txcmpl_intr_status = 0;
|
||||
- int i;
|
||||
- struct edma_txcmpl_ring *txcmpl_ring = NULL;
|
||||
- struct edma_hw *ehw = NULL;
|
||||
- struct platform_device *pdev = (struct platform_device *)ctx;
|
||||
-
|
||||
- ehw = platform_get_drvdata(pdev);
|
||||
- if (!ehw) {
|
||||
- pr_info("Unable to retrieve platrofm data");
|
||||
- return IRQ_HANDLED;
|
||||
- }
|
||||
+ uint32_t reg_data = 0;
|
||||
|
||||
/*
|
||||
* Read TxCmpl intr status
|
||||
*/
|
||||
- for (i = 0; i < ehw->txcmpl_rings; i++) {
|
||||
- txcmpl_ring = &ehw->txcmpl_ring[i];
|
||||
- reg_data = edma_reg_read(
|
||||
- EDMA_REG_TX_INT_STAT(txcmpl_ring->id));
|
||||
- txcmpl_intr_status |= reg_data &
|
||||
- EDMA_TXCMPL_RING_INT_STATUS_MASK;
|
||||
+ reg_data = edma_reg_read(
|
||||
+ EDMA_REG_TX_INT_STAT(txcmpl_ring->id));
|
||||
+ txcmpl_intr_status |= reg_data &
|
||||
+ EDMA_TXCMPL_RING_INT_STATUS_MASK;
|
||||
|
||||
- /*
|
||||
- * Disable TxCmpl intr
|
||||
- */
|
||||
- edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
|
||||
- EDMA_MASK_INT_DISABLE);
|
||||
- }
|
||||
+ /*
|
||||
+ * Disable TxCmpl intr
|
||||
+ */
|
||||
+ edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
|
||||
+ EDMA_MASK_INT_DISABLE);
|
||||
|
||||
if (txcmpl_intr_status == 0)
|
||||
return IRQ_NONE;
|
||||
@@ -827,8 +808,8 @@ irqreturn_t edma_tx_handle_irq(int irq, void *ctx)
|
||||
*TODO - per core NAPI
|
||||
*/
|
||||
if (txcmpl_intr_status)
|
||||
- if (likely(napi_schedule_prep(&ehw->tx_napi)))
|
||||
- __napi_schedule(&ehw->tx_napi);
|
||||
+ if (likely(napi_schedule_prep(&txcmpl_ring->napi)))
|
||||
+ __napi_schedule(&txcmpl_ring->napi);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
--
|
||||
2.36.1
|
||||
|
||||
@ -0,0 +1,112 @@
|
||||
From 0f7bf6cf6fd536cd5965d596067e469e84559761 Mon Sep 17 00:00:00 2001
|
||||
From: Christian Marangi <ansuelsmth@gmail.com>
|
||||
Date: Fri, 24 Jun 2022 20:04:17 +0200
|
||||
Subject: [PATCH 20/21] edma_v1: add support for threaded napi
|
||||
|
||||
Add required changed to enable threaded napi. Also change rxfill to use
|
||||
napi_alloc_skb.
|
||||
|
||||
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
|
||||
---
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_cfg.c | 5 -----
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c | 12 ++++++++++--
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h | 4 ++--
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c | 7 ++++---
|
||||
4 files changed, 16 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c b/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
index 6f2c082..33f4297 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_cfg.c
|
||||
@@ -670,11 +670,6 @@ static void edma_configure_rxfill_ring(struct edma_hw *ehw,
|
||||
|
||||
data = rxfill_ring->count & EDMA_RXFILL_RING_SIZE_MASK;
|
||||
edma_reg_write(EDMA_REG_RXFILL_RING_SIZE(rxfill_ring->id), data);
|
||||
-
|
||||
- /*
|
||||
- * Alloc Rx buffers
|
||||
- */
|
||||
- edma_alloc_rx_buffer(ehw, rxfill_ring);
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
index 49c7f8c..3736254 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.c
|
||||
@@ -457,8 +457,15 @@ static int edma_if_open(struct nss_dp_data_plane_ctx *dpc,
|
||||
if (edma_hw.active++ != 0)
|
||||
return NSS_DP_SUCCESS;
|
||||
|
||||
- for (i = 0; i < edma_hw.rxdesc_rings; i++)
|
||||
- napi_enable(&edma_hw.rxdesc_ring[i].napi);
|
||||
+ for (i = 0; i < edma_hw.rxdesc_rings; i++) {
|
||||
+ struct edma_rxdesc_ring *rxdesc_ring = &edma_hw.rxdesc_ring[i];
|
||||
+ /*
|
||||
+ * Alloc Rx buffers
|
||||
+ */
|
||||
+ edma_alloc_rx_buffer(&edma_hw, &rxdesc_ring->napi, rxdesc_ring->rxfill);
|
||||
+
|
||||
+ napi_enable(&rxdesc_ring->napi);
|
||||
+ }
|
||||
|
||||
for (i = 0; i < edma_hw.txcmpl_rings; i++)
|
||||
napi_enable(&edma_hw.txcmpl_ring[i].napi);
|
||||
@@ -899,6 +906,7 @@ static int edma_if_init(struct nss_dp_data_plane_ctx *dpc)
|
||||
* Headroom needed for Tx preheader
|
||||
*/
|
||||
netdev->needed_headroom += EDMA_TX_PREHDR_SIZE;
|
||||
+ dev_set_threaded(netdev, true);
|
||||
|
||||
return NSS_DP_SUCCESS;
|
||||
}
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
index 8ec7e35..7f8a8d4 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_data_plane.h
|
||||
@@ -276,8 +276,8 @@ extern struct edma_hw edma_hw;
|
||||
uint32_t edma_reg_read(uint32_t reg_off);
|
||||
void edma_reg_write(uint32_t reg_off, uint32_t val);
|
||||
|
||||
-int edma_alloc_rx_buffer(struct edma_hw *ehw,
|
||||
- struct edma_rxfill_ring *rxfill_ring);
|
||||
+int edma_alloc_rx_buffer(struct edma_hw *ehw, struct napi_struct *napi,
|
||||
+ struct edma_rxfill_ring *rxfill_ring);
|
||||
enum edma_tx edma_ring_xmit(struct edma_hw *ehw,
|
||||
struct net_device *netdev,
|
||||
struct sk_buff *skb,
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
index 8221a9c..001f883 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
@@ -29,7 +29,8 @@
|
||||
* Alloc Rx buffers for one RxFill ring
|
||||
*/
|
||||
int edma_alloc_rx_buffer(struct edma_hw *ehw,
|
||||
- struct edma_rxfill_ring *rxfill_ring)
|
||||
+ struct napi_struct *napi,
|
||||
+ struct edma_rxfill_ring *rxfill_ring)
|
||||
{
|
||||
struct platform_device *pdev = ehw->pdev;
|
||||
struct sk_buff *skb;
|
||||
@@ -64,7 +65,7 @@ int edma_alloc_rx_buffer(struct edma_hw *ehw,
|
||||
/*
|
||||
* Allocate buffer
|
||||
*/
|
||||
- skb = dev_alloc_skb(alloc_size);
|
||||
+ skb = napi_alloc_skb(napi, alloc_size);
|
||||
if (unlikely(!skb))
|
||||
break;
|
||||
|
||||
@@ -445,7 +446,7 @@ next_rx_desc:
|
||||
work_done++;
|
||||
}
|
||||
|
||||
- edma_alloc_rx_buffer(ehw, rxdesc_ring->rxfill);
|
||||
+ edma_alloc_rx_buffer(ehw, &rxdesc_ring->napi, rxdesc_ring->rxfill);
|
||||
|
||||
/*
|
||||
* make sure the consumer index is updated
|
||||
--
|
||||
2.36.1
|
||||
|
||||
@ -0,0 +1,204 @@
|
||||
From 1e1543fb908e382001881d2aad7ab9d4a903c0ee Mon Sep 17 00:00:00 2001
|
||||
From: Christian Marangi <ansuelsmth@gmail.com>
|
||||
Date: Fri, 24 Jun 2022 22:08:25 +0200
|
||||
Subject: [PATCH] edma_v1: improve handling with rx/tx irq and napi pool
|
||||
|
||||
Correctly disable interrupt and schedule napi instead of consuming the
|
||||
entire ring and emit napi_complete.
|
||||
|
||||
Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
|
||||
---
|
||||
hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c | 133 ++++++++----------------
|
||||
1 file changed, 46 insertions(+), 87 deletions(-)
|
||||
|
||||
diff --git a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
index 2bf1467..82fb697 100644
|
||||
--- a/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
+++ b/hal/dp_ops/edma_dp/edma_v1/edma_tx_rx.c
|
||||
@@ -460,35 +460,30 @@ next_rx_desc:
|
||||
int edma_rx_napi(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct edma_rxdesc_ring *rxdesc_ring = container_of(napi, struct edma_rxdesc_ring, napi);
|
||||
- struct edma_rxfill_ring *rxfill_ring = rxdesc_ring->rxfill;
|
||||
struct edma_hw *ehw = rxdesc_ring->ehw;
|
||||
+ uint32_t status;
|
||||
|
||||
int work_done = 0;
|
||||
|
||||
- work_done += edma_clean_rx(ehw, budget, rxdesc_ring);
|
||||
+ do {
|
||||
+ work_done += edma_clean_rx(ehw, budget, rxdesc_ring);
|
||||
+ if (likely(work_done >= budget)) {
|
||||
+ return work_done;
|
||||
+ }
|
||||
+
|
||||
+ status = EDMA_RXDESC_RING_INT_STATUS_MASK & edma_reg_read(
|
||||
+ EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->id));
|
||||
+ } while (likely(status));
|
||||
+
|
||||
+ napi_complete(napi);
|
||||
|
||||
/*
|
||||
- * TODO - rework and fix the budget control
|
||||
+ * Set RXDESC ring interrupt mask
|
||||
*/
|
||||
- if (work_done < budget) {
|
||||
- /*
|
||||
- * TODO per core NAPI
|
||||
- */
|
||||
- napi_complete(napi);
|
||||
+ edma_reg_write(
|
||||
+ EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
+ ehw->rxdesc_intr_mask);
|
||||
|
||||
- /*
|
||||
- * Set RXDESC ring interrupt mask
|
||||
- */
|
||||
- edma_reg_write(
|
||||
- EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
- ehw->rxdesc_intr_mask);
|
||||
-
|
||||
- /*
|
||||
- * Set RXFILL ring interrupt mask
|
||||
- */
|
||||
- edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id),
|
||||
- edma_hw.rxfill_intr_mask);
|
||||
- }
|
||||
return work_done;
|
||||
}
|
||||
|
||||
@@ -503,10 +498,27 @@ int edma_tx_napi(struct napi_struct *napi, int budget)
|
||||
|
||||
struct net_device *ndev;
|
||||
int work_done = 0;
|
||||
+ uint32_t status;
|
||||
int i;
|
||||
|
||||
- edma_clean_tx(ehw, txcmpl_ring);
|
||||
+ do {
|
||||
+ work_done += edma_clean_tx(ehw, txcmpl_ring);
|
||||
+ if (work_done >= budget) {
|
||||
+ goto exit;
|
||||
+ }
|
||||
+
|
||||
+ status = EDMA_TXCMPL_RING_INT_STATUS_MASK & edma_reg_read(EDMA_REG_TX_INT_STAT(txcmpl_ring->id));
|
||||
+ } while (status);
|
||||
|
||||
+ napi_complete(napi);
|
||||
+
|
||||
+ /*
|
||||
+ * Set TXCMPL ring interrupt mask
|
||||
+ */
|
||||
+ edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
|
||||
+ ehw->txcmpl_intr_mask);
|
||||
+
|
||||
+exit:
|
||||
/*
|
||||
* Resume netdev Tx queue
|
||||
*/
|
||||
@@ -524,21 +536,6 @@ int edma_tx_napi(struct napi_struct *napi, int budget)
|
||||
netif_start_queue(ndev);
|
||||
}
|
||||
|
||||
- /*
|
||||
- * TODO - rework and fix the budget control
|
||||
- */
|
||||
- if (work_done < budget) {
|
||||
- /*
|
||||
- * TODO per core NAPI
|
||||
- */
|
||||
- napi_complete(napi);
|
||||
-
|
||||
- /*
|
||||
- * Set TXCMPL ring interrupt mask
|
||||
- */
|
||||
- edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
|
||||
- ehw->txcmpl_intr_mask);
|
||||
- }
|
||||
return work_done;
|
||||
}
|
||||
|
||||
@@ -741,35 +738,14 @@ irqreturn_t edma_handle_misc_irq(int irq, void *ctx)
|
||||
irqreturn_t edma_rx_handle_irq(int irq, void *ctx)
|
||||
{
|
||||
struct edma_rxdesc_ring *rxdesc_ring = (struct edma_rxdesc_ring *)ctx;
|
||||
- uint32_t rxdesc_intr_status = 0;
|
||||
- uint32_t reg_data = 0;
|
||||
|
||||
- /*
|
||||
- * Read RxDesc intr status
|
||||
- */
|
||||
- reg_data = edma_reg_read(
|
||||
- EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->id));
|
||||
- rxdesc_intr_status |= reg_data &
|
||||
- EDMA_RXDESC_RING_INT_STATUS_MASK;
|
||||
+ if (likely(napi_schedule_prep(&rxdesc_ring->napi))) {
|
||||
|
||||
- /*
|
||||
- * Disable RxDesc intr
|
||||
- */
|
||||
- edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
+ edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
EDMA_MASK_INT_DISABLE);
|
||||
|
||||
- if (rxdesc_intr_status == 0)
|
||||
- return IRQ_NONE;
|
||||
-
|
||||
- edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id),
|
||||
- EDMA_MASK_INT_DISABLE);
|
||||
-
|
||||
- /*
|
||||
- *TODO - per core NAPI
|
||||
- */
|
||||
- if (rxdesc_intr_status)
|
||||
- if (likely(napi_schedule_prep(&rxdesc_ring->napi)))
|
||||
- __napi_schedule(&rxdesc_ring->napi);
|
||||
+ __napi_schedule(&rxdesc_ring->napi);
|
||||
+ }
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@@ -781,32 +757,15 @@ irqreturn_t edma_rx_handle_irq(int irq, void *ctx)
|
||||
irqreturn_t edma_tx_handle_irq(int irq, void *ctx)
|
||||
{
|
||||
struct edma_txcmpl_ring *txcmpl_ring = (struct edma_txcmpl_ring *)ctx;
|
||||
- uint32_t txcmpl_intr_status = 0;
|
||||
- uint32_t reg_data = 0;
|
||||
|
||||
- /*
|
||||
- * Read TxCmpl intr status
|
||||
- */
|
||||
- reg_data = edma_reg_read(
|
||||
- EDMA_REG_TX_INT_STAT(txcmpl_ring->id));
|
||||
- txcmpl_intr_status |= reg_data &
|
||||
- EDMA_TXCMPL_RING_INT_STATUS_MASK;
|
||||
-
|
||||
- /*
|
||||
- * Disable TxCmpl intr
|
||||
- */
|
||||
- edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
|
||||
- EDMA_MASK_INT_DISABLE);
|
||||
-
|
||||
- if (txcmpl_intr_status == 0)
|
||||
- return IRQ_NONE;
|
||||
-
|
||||
- /*
|
||||
- *TODO - per core NAPI
|
||||
- */
|
||||
- if (txcmpl_intr_status)
|
||||
- if (likely(napi_schedule_prep(&txcmpl_ring->napi)))
|
||||
- __napi_schedule(&txcmpl_ring->napi);
|
||||
+ if (likely(napi_schedule_prep(&txcmpl_ring->napi))) {
|
||||
+ /*
|
||||
+ * Disable TxCmpl intr
|
||||
+ */
|
||||
+ edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id),
|
||||
+ EDMA_MASK_INT_DISABLE);
|
||||
+ __napi_schedule(&txcmpl_ring->napi);
|
||||
+ }
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
--
|
||||
2.36.1
|
||||
|
||||
Loading…
Reference in New Issue
Block a user