mirror of
https://github.com/qosmio/nss-packages.git
synced 2025-12-19 17:51:41 +00:00
revert again...
This commit is contained in:
parent
21b3164bf7
commit
476aeca1d4
@ -1,581 +0,0 @@
|
||||
From 12cf63f66bfe509da6d845e5c716efd99dadf01e Mon Sep 17 00:00:00 2001
|
||||
From: Ansuel Smith <ansuelsmth@gmail.com>
|
||||
Date: Tue, 5 Apr 2022 15:38:18 +0200
|
||||
Subject: [PATCH 4/8] nss-drv: rework NSS_CORE_DMA_CACHE_MAINT ops
|
||||
|
||||
Rework NSS_CORE_DMA_CACHE_MAINT ops to use standard dma sync ops instead
|
||||
of using the direct arch function. This permit to skip any hack/patch
|
||||
needed for nss-drv to correctly compile on upstream kernel.
|
||||
|
||||
We drop any NSS_CORE_DMA_CACHE_MAINT use in nss_core and we correctly
|
||||
use the dma_sync_single_for_device we correctly dma addr using the new
|
||||
DMA helper.
|
||||
We drop sync for IOREMAP addr and we just leave a memory block.
|
||||
We hope the nss_profiler is correctly ported.
|
||||
We finally drop the NSS_CORE_DMA_CACHE_MAINT jus in case someone wants
|
||||
to use it.
|
||||
|
||||
Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
|
||||
---
|
||||
nss_core.c | 136 +++++++++++++++++++++++++---------
|
||||
nss_core.h | 41 +++++-----
|
||||
nss_hal/ipq806x/nss_hal_pvt.c | 5 +-
|
||||
nss_hal/ipq807x/nss_hal_pvt.c | 5 +-
|
||||
nss_meminfo.c | 5 +-
|
||||
nss_profiler.c | 3 +-
|
||||
6 files changed, 127 insertions(+), 68 deletions(-)
|
||||
|
||||
--- a/nss_core.c
|
||||
+++ b/nss_core.c
|
||||
@@ -1414,6 +1414,8 @@ static inline void nss_core_handle_empty
|
||||
uint32_t count, uint32_t hlos_index,
|
||||
uint16_t mask)
|
||||
{
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
+
|
||||
while (count) {
|
||||
/*
|
||||
* Since we only return the primary skb, we have no way to unmap
|
||||
@@ -1467,7 +1469,9 @@ next:
|
||||
n2h_desc_ring->hlos_index = hlos_index;
|
||||
if_map->n2h_hlos_index[NSS_IF_N2H_EMPTY_BUFFER_RETURN_QUEUE] = hlos_index;
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->n2h_hlos_index[NSS_IF_N2H_EMPTY_BUFFER_RETURN_QUEUE], sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev,
|
||||
+ n2h_hlos_index_to_dma(mem_ctx->if_map_dma, NSS_IF_N2H_EMPTY_BUFFER_RETURN_QUEUE),
|
||||
+ sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
}
|
||||
|
||||
@@ -1489,6 +1493,7 @@ static int32_t nss_core_handle_cause_que
|
||||
struct nss_ctx_instance *nss_ctx = int_ctx->nss_ctx;
|
||||
struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
struct nss_if_mem_map *if_map = mem_ctx->if_map;
|
||||
+ int dma_size;
|
||||
|
||||
qid = nss_core_cause_to_queue(cause);
|
||||
|
||||
@@ -1500,7 +1505,8 @@ static int32_t nss_core_handle_cause_que
|
||||
n2h_desc_ring = &nss_ctx->n2h_desc_ring[qid];
|
||||
desc_if = &n2h_desc_ring->desc_ring;
|
||||
desc_ring = desc_if->desc;
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->n2h_nss_index[qid], sizeof(uint32_t), DMA_FROM_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, n2h_nss_index_to_dma(mem_ctx->if_map_dma, qid),
|
||||
+ sizeof(uint32_t), DMA_FROM_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
nss_index = if_map->n2h_nss_index[qid];
|
||||
|
||||
@@ -1529,13 +1535,23 @@ static int32_t nss_core_handle_cause_que
|
||||
start = hlos_index;
|
||||
end = (hlos_index + count) & mask;
|
||||
if (end > start) {
|
||||
- dmac_inv_range((void *)&desc_ring[start], (void *)&desc_ring[end] + sizeof(struct n2h_descriptor));
|
||||
+ dma_size = sizeof(struct n2h_descriptor) * (end - start + 1);
|
||||
+
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, n2h_desc_index_to_dma(if_map, qid, start),
|
||||
+ dma_size, DMA_FROM_DEVICE);
|
||||
} else {
|
||||
/*
|
||||
* We have wrapped around
|
||||
*/
|
||||
- dmac_inv_range((void *)&desc_ring[start], (void *)&desc_ring[mask] + sizeof(struct n2h_descriptor));
|
||||
- dmac_inv_range((void *)&desc_ring[0], (void *)&desc_ring[end] + sizeof(struct n2h_descriptor));
|
||||
+ dma_size = sizeof(struct n2h_descriptor) * (mask - start + 1);
|
||||
+
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, n2h_desc_index_to_dma(if_map, qid, start),
|
||||
+ dma_size, DMA_FROM_DEVICE);
|
||||
+
|
||||
+ dma_size = sizeof(struct n2h_descriptor) * (end + 1);
|
||||
+
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, n2h_desc_index_to_dma(if_map, qid, 0), dma_size,
|
||||
+ DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1664,7 +1680,8 @@ next:
|
||||
n2h_desc_ring->hlos_index = hlos_index;
|
||||
if_map->n2h_hlos_index[qid] = hlos_index;
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->n2h_hlos_index[qid], sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, n2h_hlos_index_to_dma(mem_ctx->if_map_dma, qid),
|
||||
+ sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
|
||||
return count;
|
||||
@@ -1676,10 +1693,11 @@ next:
|
||||
*/
|
||||
static void nss_core_init_nss(struct nss_ctx_instance *nss_ctx, struct nss_if_mem_map *if_map)
|
||||
{
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
struct nss_top_instance *nss_top;
|
||||
int ret;
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(*if_map), DMA_FROM_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, mem_ctx->if_map_dma, sizeof(*if_map), DMA_FROM_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
|
||||
/*
|
||||
@@ -1747,6 +1765,7 @@ static void nss_core_alloc_paged_buffers
|
||||
uint16_t count, int16_t mask, int32_t hlos_index, uint32_t alloc_fail_count,
|
||||
uint32_t buffer_type, uint32_t buffer_queue, uint32_t stats_index)
|
||||
{
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
struct sk_buff *nbuf;
|
||||
struct page *npage;
|
||||
struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[buffer_queue];
|
||||
@@ -1816,7 +1835,9 @@ static void nss_core_alloc_paged_buffers
|
||||
/*
|
||||
* Flush the descriptor
|
||||
*/
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev,
|
||||
+ h2n_desc_index_to_dma(if_map, buffer_queue, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
|
||||
hlos_index = (hlos_index + 1) & (mask);
|
||||
count--;
|
||||
@@ -1830,7 +1851,8 @@ static void nss_core_alloc_paged_buffers
|
||||
h2n_desc_ring->hlos_index = hlos_index;
|
||||
if_map->h2n_hlos_index[buffer_queue] = hlos_index;
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_hlos_index[buffer_queue], sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_hlos_index_to_dma(mem_ctx->if_map_dma, buffer_queue),
|
||||
+ sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
|
||||
NSS_PKT_STATS_INC(&nss_top->stats_drv[stats_index]);
|
||||
@@ -1843,7 +1865,7 @@ static void nss_core_alloc_paged_buffers
|
||||
static void nss_core_alloc_jumbo_mru_buffers(struct nss_ctx_instance *nss_ctx, struct nss_if_mem_map *if_map,
|
||||
int jumbo_mru, uint16_t count, int16_t mask, int32_t hlos_index)
|
||||
{
|
||||
-
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
struct sk_buff *nbuf;
|
||||
struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[NSS_IF_H2N_EMPTY_BUFFER_QUEUE];
|
||||
struct h2n_desc_if_instance *desc_if = &h2n_desc_ring->desc_ring;
|
||||
@@ -1890,7 +1912,9 @@ static void nss_core_alloc_jumbo_mru_buf
|
||||
/*
|
||||
* Flush the descriptor
|
||||
*/
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev,
|
||||
+ h2n_desc_index_to_dma(if_map, NSS_IF_H2N_EMPTY_BUFFER_QUEUE, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
|
||||
hlos_index = (hlos_index + 1) & (mask);
|
||||
count--;
|
||||
@@ -1904,7 +1928,8 @@ static void nss_core_alloc_jumbo_mru_buf
|
||||
h2n_desc_ring->hlos_index = hlos_index;
|
||||
if_map->h2n_hlos_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE] = hlos_index;
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_hlos_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE], sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_hlos_index_to_dma(mem_ctx->if_map_dma, NSS_IF_H2N_EMPTY_BUFFER_QUEUE),
|
||||
+ sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
|
||||
NSS_PKT_STATS_INC(&nss_top->stats_drv[NSS_DRV_STATS_TX_EMPTY]);
|
||||
@@ -1917,6 +1942,7 @@ static void nss_core_alloc_jumbo_mru_buf
|
||||
static void nss_core_alloc_max_avail_size_buffers(struct nss_ctx_instance *nss_ctx, struct nss_if_mem_map *if_map,
|
||||
uint16_t max_buf_size, uint16_t count, int16_t mask, int32_t hlos_index)
|
||||
{
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[NSS_IF_H2N_EMPTY_BUFFER_QUEUE];
|
||||
struct h2n_desc_if_instance *desc_if = &h2n_desc_ring->desc_ring;
|
||||
struct h2n_descriptor *desc_ring = desc_if->desc;
|
||||
@@ -1924,6 +1950,7 @@ static void nss_core_alloc_max_avail_siz
|
||||
uint16_t payload_len = max_buf_size + NET_SKB_PAD;
|
||||
uint16_t start = hlos_index;
|
||||
uint16_t prev_hlos_index;
|
||||
+ int dma_size;
|
||||
|
||||
while (count) {
|
||||
dma_addr_t buffer;
|
||||
@@ -1976,13 +2003,26 @@ static void nss_core_alloc_max_avail_siz
|
||||
* Flush the descriptors, including the descriptor at prev_hlos_index.
|
||||
*/
|
||||
if (prev_hlos_index > start) {
|
||||
- dmac_clean_range((void *)&desc_ring[start], (void *)&desc_ring[prev_hlos_index] + sizeof(struct h2n_descriptor));
|
||||
+ dma_size = sizeof(struct h2n_descriptor) * (prev_hlos_index - start + 1);
|
||||
+
|
||||
+ dma_sync_single_for_device(nss_ctx->dev,
|
||||
+ h2n_desc_index_to_dma(if_map, NSS_IF_H2N_EMPTY_BUFFER_QUEUE, start),
|
||||
+ dma_size, DMA_TO_DEVICE);
|
||||
} else {
|
||||
/*
|
||||
* We have wrapped around
|
||||
*/
|
||||
- dmac_clean_range((void *)&desc_ring[start], (void *)&desc_ring[mask] + sizeof(struct h2n_descriptor));
|
||||
- dmac_clean_range((void *)&desc_ring[0], (void *)&desc_ring[prev_hlos_index] + sizeof(struct h2n_descriptor));
|
||||
+ dma_size = sizeof(struct h2n_descriptor) * (mask - start + 1);
|
||||
+
|
||||
+ dma_sync_single_for_device(nss_ctx->dev,
|
||||
+ h2n_desc_index_to_dma(if_map, NSS_IF_H2N_EMPTY_BUFFER_QUEUE, start),
|
||||
+ dma_size, DMA_TO_DEVICE);
|
||||
+
|
||||
+ dma_size = sizeof(struct h2n_descriptor) * (prev_hlos_index + 1);
|
||||
+
|
||||
+ dma_sync_single_for_device(nss_ctx->dev,
|
||||
+ h2n_desc_index_to_dma(if_map, NSS_IF_H2N_EMPTY_BUFFER_QUEUE, 0),
|
||||
+ dma_size, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1993,7 +2033,8 @@ static void nss_core_alloc_max_avail_siz
|
||||
h2n_desc_ring->hlos_index = hlos_index;
|
||||
if_map->h2n_hlos_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE] = hlos_index;
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_hlos_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE], sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_hlos_index_to_dma(mem_ctx->if_map_dma, NSS_IF_H2N_EMPTY_BUFFER_QUEUE),
|
||||
+ sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
|
||||
NSS_PKT_STATS_INC(&nss_top->stats_drv[NSS_DRV_STATS_TX_EMPTY]);
|
||||
@@ -2006,6 +2047,7 @@ static void nss_core_alloc_max_avail_siz
|
||||
static inline void nss_core_handle_empty_buffer_sos(struct nss_ctx_instance *nss_ctx,
|
||||
struct nss_if_mem_map *if_map, uint16_t max_buf_size)
|
||||
{
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
uint16_t count, size, mask;
|
||||
int32_t nss_index, hlos_index;
|
||||
struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[NSS_IF_H2N_EMPTY_BUFFER_QUEUE];
|
||||
@@ -2016,7 +2058,8 @@ static inline void nss_core_handle_empty
|
||||
/*
|
||||
* Check how many empty buffers could be filled in queue
|
||||
*/
|
||||
- NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_nss_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE], sizeof(uint32_t), DMA_FROM_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_nss_index_to_dma(mem_ctx->if_map_dma, NSS_IF_H2N_EMPTY_BUFFER_QUEUE),
|
||||
+ sizeof(uint32_t), DMA_FROM_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
nss_index = if_map->h2n_nss_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE];
|
||||
|
||||
@@ -2061,6 +2104,7 @@ static inline void nss_core_handle_empty
|
||||
static inline void nss_core_handle_paged_empty_buffer_sos(struct nss_ctx_instance *nss_ctx,
|
||||
struct nss_if_mem_map *if_map, uint16_t max_buf_size)
|
||||
{
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
uint16_t count, size, mask;
|
||||
int32_t nss_index, hlos_index;
|
||||
struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[NSS_IF_H2N_EMPTY_PAGED_BUFFER_QUEUE];
|
||||
@@ -2068,7 +2112,8 @@ static inline void nss_core_handle_paged
|
||||
/*
|
||||
* Check how many empty buffers could be filled in queue
|
||||
*/
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->h2n_nss_index[NSS_IF_H2N_EMPTY_PAGED_BUFFER_QUEUE], sizeof(uint32_t), DMA_FROM_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_nss_index_to_dma(mem_ctx->if_map_dma, NSS_IF_H2N_EMPTY_PAGED_BUFFER_QUEUE),
|
||||
+ sizeof(uint32_t), DMA_FROM_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
nss_index = if_map->h2n_nss_index[NSS_IF_H2N_EMPTY_PAGED_BUFFER_QUEUE];
|
||||
|
||||
@@ -2640,9 +2685,11 @@ void nss_skb_reuse(struct sk_buff *nbuf)
|
||||
* Sends one skb to NSS FW
|
||||
*/
|
||||
static inline int32_t nss_core_send_buffer_simple_skb(struct nss_ctx_instance *nss_ctx,
|
||||
- struct h2n_desc_if_instance *desc_if, uint32_t if_num,
|
||||
- struct sk_buff *nbuf, uint16_t hlos_index, uint16_t flags, uint8_t buffer_type, uint16_t mss)
|
||||
+ struct h2n_desc_if_instance *desc_if, uint32_t if_num, struct sk_buff *nbuf,
|
||||
+ uint16_t qid, uint16_t hlos_index, uint16_t flags, uint8_t buffer_type, uint16_t mss)
|
||||
{
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
+ struct nss_if_mem_map *if_map = mem_ctx->if_map;
|
||||
struct h2n_descriptor *desc_ring = desc_if->desc;
|
||||
struct h2n_descriptor *desc;
|
||||
uint16_t bit_flags;
|
||||
@@ -2696,7 +2743,8 @@ static inline int32_t nss_core_send_buff
|
||||
(nss_ptr_t)nbuf, (uint16_t)(nbuf->data - nbuf->head), nbuf->len,
|
||||
sz, (uint32_t)nbuf->priority, mss, bit_flags);
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_desc_index_to_dma(if_map, qid, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
|
||||
/*
|
||||
* We are done using the skb fields and can reuse it now
|
||||
@@ -2720,7 +2768,8 @@ no_reuse:
|
||||
(nss_ptr_t)nbuf, (uint16_t)(nbuf->data - nbuf->head), nbuf->len,
|
||||
(uint16_t)skb_end_offset(nbuf), (uint32_t)nbuf->priority, mss, bit_flags);
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_desc_index_to_dma(if_map, qid, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
|
||||
NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_SIMPLE]);
|
||||
return 1;
|
||||
@@ -2734,9 +2783,11 @@ no_reuse:
|
||||
* Used to differentiate from FRAGLIST
|
||||
*/
|
||||
static inline int32_t nss_core_send_buffer_nr_frags(struct nss_ctx_instance *nss_ctx,
|
||||
- struct h2n_desc_if_instance *desc_if, uint32_t if_num,
|
||||
- struct sk_buff *nbuf, uint16_t hlos_index, uint16_t flags, uint8_t buffer_type, uint16_t mss)
|
||||
+ struct h2n_desc_if_instance *desc_if, uint32_t if_num, struct sk_buff *nbuf,
|
||||
+ uint16_t qid, uint16_t hlos_index, uint16_t flags, uint8_t buffer_type, uint16_t mss)
|
||||
{
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
+ struct nss_if_mem_map *if_map = mem_ctx->if_map;
|
||||
struct h2n_descriptor *desc_ring = desc_if->desc;
|
||||
struct h2n_descriptor *desc;
|
||||
const skb_frag_t *frag;
|
||||
@@ -2776,7 +2827,8 @@ static inline int32_t nss_core_send_buff
|
||||
(nss_ptr_t)NULL, nbuf->data - nbuf->head, nbuf->len - nbuf->data_len,
|
||||
skb_end_offset(nbuf), (uint32_t)nbuf->priority, mss, bit_flags | H2N_BIT_FLAG_FIRST_SEGMENT);
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_desc_index_to_dma(if_map, qid, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
|
||||
/*
|
||||
* Now handle rest of the fragments.
|
||||
@@ -2800,7 +2852,8 @@ static inline int32_t nss_core_send_buff
|
||||
(nss_ptr_t)NULL, 0, skb_frag_size(frag), skb_frag_size(frag),
|
||||
nbuf->priority, mss, bit_flags);
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_desc_index_to_dma(if_map, qid, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2816,7 +2869,8 @@ static inline int32_t nss_core_send_buff
|
||||
desc->bit_flags &= ~(H2N_BIT_FLAG_DISCARD);
|
||||
desc->opaque = (nss_ptr_t)nbuf;
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_desc_index_to_dma(if_map, qid, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
|
||||
NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_NR_FRAGS]);
|
||||
return i+1;
|
||||
@@ -2830,9 +2884,11 @@ static inline int32_t nss_core_send_buff
|
||||
* Used to differentiate from FRAGS
|
||||
*/
|
||||
static inline int32_t nss_core_send_buffer_fraglist(struct nss_ctx_instance *nss_ctx,
|
||||
- struct h2n_desc_if_instance *desc_if, uint32_t if_num,
|
||||
- struct sk_buff *nbuf, uint16_t hlos_index, uint16_t flags, uint8_t buffer_type, uint16_t mss)
|
||||
+ struct h2n_desc_if_instance *desc_if, uint32_t if_num, struct sk_buff *nbuf,
|
||||
+ uint16_t qid, uint16_t hlos_index, uint16_t flags, uint8_t buffer_type, uint16_t mss)
|
||||
{
|
||||
+ struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx;
|
||||
+ struct nss_if_mem_map *if_map = mem_ctx->if_map;
|
||||
struct h2n_descriptor *desc_ring = desc_if->desc;
|
||||
struct h2n_descriptor *desc;
|
||||
dma_addr_t buffer;
|
||||
@@ -2871,7 +2927,8 @@ static inline int32_t nss_core_send_buff
|
||||
(nss_ptr_t)nbuf, nbuf->data - nbuf->head, nbuf->len - nbuf->data_len,
|
||||
skb_end_offset(nbuf), (uint32_t)nbuf->priority, mss, bit_flags | H2N_BIT_FLAG_FIRST_SEGMENT);
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_desc_index_to_dma(if_map, qid, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
|
||||
/*
|
||||
* Walk the frag_list in nbuf
|
||||
@@ -2924,7 +2981,8 @@ static inline int32_t nss_core_send_buff
|
||||
(nss_ptr_t)iter, iter->data - iter->head, iter->len - iter->data_len,
|
||||
skb_end_offset(iter), iter->priority, mss, bit_flags);
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_desc_index_to_dma(if_map, qid, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
|
||||
i++;
|
||||
}
|
||||
@@ -2943,7 +3001,8 @@ static inline int32_t nss_core_send_buff
|
||||
* Update bit flag for last descriptor.
|
||||
*/
|
||||
desc->bit_flags |= H2N_BIT_FLAG_LAST_SEGMENT;
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_desc_index_to_dma(if_map, qid, hlos_index),
|
||||
+ sizeof(*desc), DMA_TO_DEVICE);
|
||||
|
||||
NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_FRAGLIST]);
|
||||
return i+1;
|
||||
@@ -3014,8 +3073,10 @@ int32_t nss_core_send_buffer(struct nss_
|
||||
* We need to work out if there's sufficent space in our transmit descriptor
|
||||
* ring to place all the segments of a nbuf.
|
||||
*/
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->h2n_nss_index[qid], sizeof(uint32_t), DMA_FROM_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_nss_index_to_dma(mem_ctx->if_map_dma, qid),
|
||||
+ sizeof(uint32_t), DMA_FROM_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
+
|
||||
nss_index = if_map->h2n_nss_index[qid];
|
||||
|
||||
hlos_index = h2n_desc_ring->hlos_index;
|
||||
@@ -3084,13 +3145,13 @@ int32_t nss_core_send_buffer(struct nss_
|
||||
count = 0;
|
||||
if (likely((segments == 0) || is_bounce)) {
|
||||
count = nss_core_send_buffer_simple_skb(nss_ctx, desc_if, if_num,
|
||||
- nbuf, hlos_index, flags, buffer_type, mss);
|
||||
+ nbuf, qid, hlos_index, flags, buffer_type, mss);
|
||||
} else if (skb_has_frag_list(nbuf)) {
|
||||
count = nss_core_send_buffer_fraglist(nss_ctx, desc_if, if_num,
|
||||
- nbuf, hlos_index, flags, buffer_type, mss);
|
||||
+ nbuf, qid, hlos_index, flags, buffer_type, mss);
|
||||
} else {
|
||||
count = nss_core_send_buffer_nr_frags(nss_ctx, desc_if, if_num,
|
||||
- nbuf, hlos_index, flags, buffer_type, mss);
|
||||
+ nbuf, qid, hlos_index, flags, buffer_type, mss);
|
||||
}
|
||||
|
||||
if (unlikely(count <= 0)) {
|
||||
@@ -3114,7 +3175,8 @@ int32_t nss_core_send_buffer(struct nss_
|
||||
h2n_desc_ring->hlos_index = hlos_index;
|
||||
if_map->h2n_hlos_index[qid] = hlos_index;
|
||||
|
||||
- NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_hlos_index[qid], sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, h2n_hlos_index_to_dma(mem_ctx->if_map_dma, qid),
|
||||
+ sizeof(uint32_t), DMA_TO_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
|
||||
#ifdef CONFIG_DEBUG_KMEMLEAK
|
||||
--- a/nss_core.h
|
||||
+++ b/nss_core.h
|
||||
@@ -100,31 +100,30 @@
|
||||
#endif
|
||||
|
||||
/*
|
||||
- * Cache operation
|
||||
+ * DMA Offset helper
|
||||
*/
|
||||
-#define NSS_CORE_DSB() dsb(sy)
|
||||
-#define NSS_CORE_DMA_CACHE_MAINT(start, size, dir) nss_core_dma_cache_maint(start, size, dir)
|
||||
+#define n2h_desc_index_offset(_index) sizeof(struct n2h_descriptor) * (_index)
|
||||
+#define h2n_desc_index_offset(_index) sizeof(struct h2n_descriptor) * (_index)
|
||||
+
|
||||
+#define n2h_desc_index_to_dma(_if_map_addr, _qid, _index) (_if_map_addr)->n2h_desc_if[(_qid)].desc_addr + n2h_desc_index_offset(_index)
|
||||
+#define h2n_desc_index_to_dma(_if_map_addr, _qid, _index) (_if_map_addr)->h2n_desc_if[(_qid)].desc_addr + h2n_desc_index_offset(_index)
|
||||
+
|
||||
+#define h2n_nss_index_offset offsetof(struct nss_if_mem_map, h2n_nss_index)
|
||||
+#define n2h_nss_index_offset offsetof(struct nss_if_mem_map, n2h_nss_index)
|
||||
+#define h2n_hlos_index_offset offsetof(struct nss_if_mem_map, h2n_hlos_index)
|
||||
+#define n2h_hlos_index_offset offsetof(struct nss_if_mem_map, n2h_hlos_index)
|
||||
+
|
||||
+#define h2n_nss_index_to_dma(_if_map_addr, _index) (_if_map_addr) + h2n_nss_index_offset + (sizeof(uint32_t) * (_index))
|
||||
+#define n2h_nss_index_to_dma(_if_map_addr, _index) (_if_map_addr) + n2h_nss_index_offset + (sizeof(uint32_t) * (_index))
|
||||
+#define h2n_hlos_index_to_dma(_if_map_addr, _index) (_if_map_addr) + h2n_hlos_index_offset + (sizeof(uint32_t) * (_index))
|
||||
+#define n2h_hlos_index_to_dma(_if_map_addr, _index) (_if_map_addr) + n2h_hlos_index_offset + (sizeof(uint32_t) * (_index))
|
||||
|
||||
/*
|
||||
- * nss_core_dma_cache_maint()
|
||||
- * Perform the appropriate cache op based on direction
|
||||
+ * Cache operation
|
||||
*/
|
||||
-static inline void nss_core_dma_cache_maint(void *start, uint32_t size, int direction)
|
||||
-{
|
||||
- switch (direction) {
|
||||
- case DMA_FROM_DEVICE:/* invalidate only */
|
||||
- dmac_inv_range(start, start + size);
|
||||
- break;
|
||||
- case DMA_TO_DEVICE:/* writeback only */
|
||||
- dmac_clean_range(start, start + size);
|
||||
- break;
|
||||
- case DMA_BIDIRECTIONAL:/* writeback and invalidate */
|
||||
- dmac_flush_range(start, start + size);
|
||||
- break;
|
||||
- default:
|
||||
- BUG();
|
||||
- }
|
||||
-}
|
||||
+#define NSS_CORE_DSB() dsb(sy)
|
||||
+#define NSS_CORE_DMA_CACHE_MAINT(dev, start, size, dir) BUILD_BUG_ON_MSG(1, \
|
||||
+ "NSS_CORE_DMA_CACHE_MAINT is deprecated. Fix the code to use correct dma_sync_* API")
|
||||
|
||||
#define NSS_DEVICE_IF_START NSS_PHYSICAL_IF_START
|
||||
|
||||
--- a/nss_hal/ipq806x/nss_hal_pvt.c
|
||||
+++ b/nss_hal/ipq806x/nss_hal_pvt.c
|
||||
@@ -473,10 +473,9 @@ static struct nss_platform_data *__nss_h
|
||||
/*
|
||||
* Clear TCM memory used by this core
|
||||
*/
|
||||
- for (i = 0; i < resource_size(&res_vphys) ; i += 4) {
|
||||
+ for (i = 0; i < resource_size(&res_vphys) ; i += 4)
|
||||
nss_write_32(npd->vmap, i, 0);
|
||||
- NSS_CORE_DMA_CACHE_MAINT((npd->vmap + i), 4, DMA_TO_DEVICE);
|
||||
- }
|
||||
+
|
||||
NSS_CORE_DSB();
|
||||
|
||||
/*
|
||||
--- a/nss_hal/ipq807x/nss_hal_pvt.c
|
||||
+++ b/nss_hal/ipq807x/nss_hal_pvt.c
|
||||
@@ -255,10 +255,9 @@ static struct nss_platform_data *__nss_h
|
||||
/*
|
||||
* Clear TCM memory used by this core
|
||||
*/
|
||||
- for (i = 0; i < resource_size(&res_vphys) ; i += 4) {
|
||||
+ for (i = 0; i < resource_size(&res_vphys) ; i += 4)
|
||||
nss_write_32(npd->vmap, i, 0);
|
||||
- NSS_CORE_DMA_CACHE_MAINT((npd->vmap + i), 4, DMA_TO_DEVICE);
|
||||
- }
|
||||
+
|
||||
NSS_CORE_DSB();
|
||||
|
||||
/*
|
||||
--- a/nss_meminfo.c
|
||||
+++ b/nss_meminfo.c
|
||||
@@ -405,7 +405,6 @@ static bool nss_meminfo_init_block_lists
|
||||
/*
|
||||
* Flush the updated meminfo request.
|
||||
*/
|
||||
- NSS_CORE_DMA_CACHE_MAINT(r, sizeof(struct nss_meminfo_request), DMA_TO_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
|
||||
/*
|
||||
@@ -520,7 +519,7 @@ static bool nss_meminfo_configure_n2h_h2
|
||||
* Bring a fresh copy of if_map from memory in order to read it correctly.
|
||||
*/
|
||||
if_map = mem_ctx->if_map;
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_FROM_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, mem_ctx->if_map_dma, sizeof(struct nss_if_mem_map), DMA_FROM_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
|
||||
if_map->n2h_rings = NSS_N2H_RING_COUNT;
|
||||
@@ -558,7 +557,7 @@ static bool nss_meminfo_configure_n2h_h2
|
||||
/*
|
||||
* Flush the updated nss_if_mem_map.
|
||||
*/
|
||||
- NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_TO_DEVICE);
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, mem_ctx->if_map_dma, sizeof(struct nss_if_mem_map), DMA_TO_DEVICE);
|
||||
NSS_CORE_DSB();
|
||||
|
||||
return true;
|
||||
--- a/nss_profiler.c
|
||||
+++ b/nss_profiler.c
|
||||
@@ -153,8 +153,10 @@ void nss_profiler_release_dma(struct nss
|
||||
|
||||
ctrl = nss_ctx->meminfo_ctx.sdma_ctrl;
|
||||
|
||||
- if (ctrl && ctrl->consumer[0].ring.kp)
|
||||
+ if (ctrl && ctrl->consumer[0].ring.kp) {
|
||||
kfree(ctrl->consumer[0].ring.kp);
|
||||
+ ctrl->consumer[0].ring.kp = NULL;
|
||||
+ }
|
||||
}
|
||||
EXPORT_SYMBOL(nss_profiler_release_dma);
|
||||
|
||||
@@ -198,10 +200,13 @@ EXPORT_SYMBOL(nss_profile_dma_deregister
|
||||
struct nss_profile_sdma_ctrl *nss_profile_dma_get_ctrl(struct nss_ctx_instance *nss_ctx)
|
||||
{
|
||||
struct nss_profile_sdma_ctrl *ctrl = nss_ctx->meminfo_ctx.sdma_ctrl;
|
||||
- if (ctrl) {
|
||||
- dmac_inv_range(ctrl, &ctrl->cidx);
|
||||
- dsb(sy);
|
||||
+ int size = offsetof(struct nss_profile_sdma_ctrl, cidx);
|
||||
+ if (!ctrl) {
|
||||
+ return ctrl;
|
||||
}
|
||||
+
|
||||
+ dma_sync_single_for_device(nss_ctx->dev, (dma_addr_t) ctrl, size, DMA_FROM_DEVICE);
|
||||
+ dsb(sy);
|
||||
return ctrl;
|
||||
}
|
||||
EXPORT_SYMBOL(nss_profile_dma_get_ctrl);
|
||||
@@ -233,7 +238,7 @@ void nss_profiler_notify_unregister(nss_
|
||||
{
|
||||
nss_assert(core_id < NSS_CORE_MAX);
|
||||
|
||||
- nss_core_register_handler(&nss_top_main.nss[core_id], NSS_PROFILER_INTERFACE, NULL, NULL);
|
||||
+ nss_core_unregister_handler(&nss_top_main.nss[core_id], NSS_PROFILER_INTERFACE);
|
||||
nss_top_main.profiler_callback[core_id] = NULL;
|
||||
nss_top_main.profiler_ctx[core_id] = NULL;
|
||||
}
|
||||
@ -47,10 +47,18 @@
|
||||
#ifdef NSS_DRV_TLS_ENABLE
|
||||
#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT)
|
||||
if (npd->tls_enabled == NSS_FEATURE_ENABLED) {
|
||||
@@ -662,6 +664,7 @@ int nss_hal_probe(struct platform_device
|
||||
@@ -656,12 +658,15 @@ int nss_hal_probe(struct platform_device
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
+#ifdef NSS_DRV_MIRROR_ENABLE
|
||||
if (npd->mirror_enabled == NSS_FEATURE_ENABLED) {
|
||||
nss_top->mirror_handler_id = nss_dev->id;
|
||||
nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_MIRROR] = nss_dev->id;
|
||||
nss_mirror_register_handler();
|
||||
nss_info("%d: NSS mirror is enabled", nss_dev->id);
|
||||
}
|
||||
+#endif
|
||||
+#endif
|
||||
|
||||
if (nss_ctx->id == 0) {
|
||||
@ -73,7 +81,7 @@
|
||||
+#define __NSS_FW_VERSION_H
|
||||
+
|
||||
+#define NSS_FW_VERSION_MAJOR 11
|
||||
+#define NSS_FW_VERSION_MINOR 0
|
||||
+#define NSS_FW_VERSION_MINOR 4
|
||||
+
|
||||
+#define NSS_FW_VERSION(a,b) (((a) << 8) + (b))
|
||||
+
|
||||
|
||||
Loading…
Reference in New Issue
Block a user