nss-clients: backport and fixes for ipsecmgr, netlink, vxlanmgr, qdisc

backport 12.5:
* ipsecmgr
* netlink
* vxlanmgr
* qdisc

fixes:
* impsecmgr

Signed-off-by: Sean Khan <datapronix@protonmail.com>
This commit is contained in:
Sean Khan 2025-07-10 20:59:33 -04:00
parent 1c24304019
commit 274cc66de7
5 changed files with 3070 additions and 0 deletions

View File

@ -0,0 +1,377 @@
From fa3a58742c4721221cb9a5ab11c65b6d60b77477 Mon Sep 17 00:00:00 2001
From: Aniruddha Bhat Anemajalu <quic_aanemaja@quicinc.com>
Date: Tue, 11 Jan 2022 11:22:08 -0800
Subject: [PATCH] [qca-nss-clients] Check for qdisc before deleting the class
Do not allow deleting the class before deleting the underlying Qdisc.
Change-Id: I40f611cb1a5342ed58b4b1abcf1254d8a981a760
Signed-off-by: Aniruddha Bhat Anemajalu <quic_aanemaja@quicinc.com>
---
nss_qdisc/nss_bf.c | 14 ++++++++++----
nss_qdisc/nss_htb.c | 11 ++++++++---
nss_qdisc/nss_wrr.c | 14 ++++++++++----
3 files changed, 28 insertions(+), 11 deletions(-)
--- a/nss_qdisc/nss_bf.c
+++ b/nss_qdisc/nss_bf.c
@@ -1,9 +1,13 @@
/*
**************************************************************************
* Copyright (c) 2014-2017, 2019-2020, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
+ *
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -309,11 +313,14 @@ static int nss_bf_delete_class(struct Qd
struct nss_qdisc *nq_child = (struct nss_qdisc *)qdisc_priv(cl->qdisc);
/*
- * Since all classes are leaf nodes in our case, we dont have to make
- * that check.
+ * If the class is the root class or has qdiscs attached, we do not
+ * support deleting it.
*/
- if (cl == &q->root)
+ if ((cl == &q->root) || (cl->qdisc != &noop_qdisc)) {
+ nss_qdisc_warning("Cannot delete bf class %x as it is the root "
+ "class or has child qdisc attached\n", cl->nq.qos_tag);
return -EBUSY;
+ }
/*
* The message to NSS should be sent to the parent of this class
@@ -327,7 +334,6 @@ static int nss_bf_delete_class(struct Qd
}
sch_tree_lock(sch);
- qdisc_reset(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
refcnt = nss_qdisc_atomic_sub_return(&cl->nq);
sch_tree_unlock(sch);
--- a/nss_qdisc/nss_htb.c
+++ b/nss_qdisc/nss_htb.c
@@ -1,9 +1,13 @@
/*
**************************************************************************
* Copyright (c) 2014-2017, 2019-2021, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
+ *
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -534,10 +538,12 @@ static int nss_htb_delete_class(struct Q
int refcnt;
/*
- * If the class still has child nodes, then we do not
+ * If the class still has child nodes or qdiscs, then we do not
* support deleting it.
*/
- if (cl->children) {
+ if ((cl->children) || (cl->qdisc != &noop_qdisc)) {
+ nss_qdisc_warning("Cannot delete htb class %x with child nodes "
+ "or qdisc attached\n", cl->nq.qos_tag);
return -EBUSY;
}
@@ -568,7 +574,6 @@ static int nss_htb_delete_class(struct Q
}
sch_tree_lock(sch);
- qdisc_reset(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->sch_common);
/*
--- a/nss_qdisc/nss_wrr.c
+++ b/nss_qdisc/nss_wrr.c
@@ -1,9 +1,13 @@
/*
**************************************************************************
* Copyright (c) 2014-2017, 2019-2021, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
+ *
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -418,11 +422,14 @@ static int nss_wrr_delete_class(struct Q
int refcnt;
/*
- * Since all classes are leaf nodes in our case, we dont have to make
- * that check.
+ * If the class is a root class or has a child qdisc attached
+ * we do not support deleting it.
*/
- if (cl == &q->root)
+ if ((cl == &q->root) || (cl->qdisc != &noop_qdisc)) {
+ nss_qdisc_warning("Cannot delete wrr class %x as it is the "
+ "root class or has a child qdisc attached\n", cl->nq.qos_tag);
return -EBUSY;
+ }
/*
* The message to NSS should be sent to the parent of this class
@@ -436,7 +443,6 @@ static int nss_wrr_delete_class(struct Q
}
sch_tree_lock(sch);
- qdisc_reset(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
refcnt = nss_qdisc_atomic_sub_return(&cl->nq);
--- a/nss_qdisc/nss_ppe.c
+++ b/nss_qdisc/nss_ppe.c
@@ -1,7 +1,11 @@
/*
**************************************************************************
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
@@ -28,12 +32,9 @@
/*
* Max Resources per port
*
- * Currently, we are using only one multicast queue.
* In case of Loopback port, the resources are reserved
* for qdisc functionality.
*/
-#define NSS_PPE_MCAST_QUEUE_MAX 1
-
#define NSS_PPE_LOOPBACK_L0_SP_MAX 1
#define NSS_PPE_LOOPBACK_L0_CDRR_MAX 16
#define NSS_PPE_LOOPBACK_L0_EDRR_MAX 16
@@ -126,7 +127,7 @@ static struct nss_ppe_res *nss_ppe_res_e
spin_lock_bh(&ppe_port->lock);
for (i = max; i > 0; i--) {
- res = kzalloc(sizeof(struct nss_ppe_res), GFP_KERNEL);
+ res = kzalloc(sizeof(struct nss_ppe_res), GFP_ATOMIC);
if (!res) {
nss_qdisc_error("Free queue list allocation failed for port %u\n", port);
goto fail;
@@ -275,9 +276,10 @@ int nss_ppe_port_res_alloc(void)
ppe_qdisc_port[i].base[NSS_PPE_UCAST_QUEUE] = cfg.ucastq_start;
/*
- * Even though we reserve more mcast queues in the device tree, we only use 1 in qdiscs.
+ * Even though we reserve more mcast queues in the device tree, we only use 1 in qdiscs
+ * for the default queue.
*/
- ppe_qdisc_port[i].max[NSS_PPE_MCAST_QUEUE] = NSS_PPE_MCAST_QUEUE_MAX;
+ ppe_qdisc_port[i].max[NSS_PPE_MCAST_QUEUE] = cfg.mcastq_num;
ppe_qdisc_port[i].base[NSS_PPE_MCAST_QUEUE] = cfg.mcastq_start;
ppe_qdisc_port[i].max[NSS_PPE_L0_CDRR] = cfg.l0cdrr_num;
@@ -576,6 +578,36 @@ static void nss_ppe_all_queue_enable(uin
}
/*
+ * nss_ppe_assigned_queue_enable()
+ * Enables all level L0 queues corresponding to a port in SSDK.
+ */
+static void nss_ppe_assigned_queue_enable(uint32_t port_num)
+{
+ uint32_t qid = nss_ppe_base_get(port_num, NSS_PPE_UCAST_QUEUE);
+ uint32_t mcast_qid = nss_ppe_base_get(port_num, NSS_PPE_MCAST_QUEUE);
+ struct nss_ppe_res *res;
+ struct nss_ppe_port *ppe_port = &ppe_qdisc_port[port_num];
+
+ spin_lock_bh(&ppe_port->lock);
+ res = ppe_port->res_used[NSS_PPE_UCAST_QUEUE];
+ while (res) {
+ fal_qm_enqueue_ctrl_set(0, qid + res->offset, 1);
+ fal_scheduler_dequeue_ctrl_set(0, qid + res->offset, 1);
+ res = res->next;
+ }
+
+ res = ppe_port->res_used[NSS_PPE_MCAST_QUEUE];
+ while (res) {
+ fal_qm_enqueue_ctrl_set(0, mcast_qid + res->offset, 1);
+ fal_scheduler_dequeue_ctrl_set(0, mcast_qid + res->offset, 1);
+ res = res->next;
+ }
+
+ spin_unlock_bh(&ppe_port->lock);
+ nss_qdisc_info("Enable SSDK level0 queue scheduler successful\n");
+}
+
+/*
* nss_ppe_l1_queue_scheduler_configure()
* Configures Level 1 queue scheduler in SSDK.
*/
@@ -585,11 +617,6 @@ static int nss_ppe_l1_queue_scheduler_co
uint32_t port_num = nss_ppe_port_num_get(nq);
struct nss_ppe_qdisc *npq = &nq->npq;
- if (npq->scheduler.drr_weight >= NSS_PPE_DRR_WEIGHT_MAX) {
- nss_qdisc_warning("DRR weight:%d should be less than 1024\n", npq->scheduler.drr_weight);
- return -EINVAL;
- }
-
/*
* Disable all queues and set Level 1 SSDK configuration
* We need to disable and flush the queues before
@@ -597,6 +624,15 @@ static int nss_ppe_l1_queue_scheduler_co
*/
nss_ppe_all_queue_disable(port_num);
+ if (npq->scheduler.drr_weight >= NSS_PPE_DRR_WEIGHT_MAX) {
+ /*
+ * Currently assigned queues are enabled back by
+ * caller
+ */
+ nss_qdisc_warning("DRR weight:%d should be less than 1024\n", npq->scheduler.drr_weight);
+ return -EINVAL;
+ }
+
memset(&l1cfg, 0, sizeof(l1cfg));
l1cfg.sp_id = port_num;
@@ -614,11 +650,10 @@ static int nss_ppe_l1_queue_scheduler_co
port_num, npq->l0spid, l1cfg.c_drr_id, l1cfg.c_pri, l1cfg.c_drr_wt, l1cfg.e_drr_id, l1cfg.e_pri, l1cfg.e_drr_wt, l1cfg.sp_id);
if (fal_queue_scheduler_set(0, npq->l0spid, NSS_PPE_FLOW_LEVEL - 1, port_num, &l1cfg) != 0) {
nss_qdisc_error("SSDK level1 queue scheduler configuration failed\n");
- nss_ppe_all_queue_enable(port_num);
return -EINVAL;
}
- nss_ppe_all_queue_enable(port_num);
+ nss_ppe_assigned_queue_enable(port_num);
nss_qdisc_info("SSDK level1 queue scheduler configuration successful\n");
return 0;
@@ -672,6 +707,7 @@ static int nss_ppe_l1_queue_scheduler_se
if (nss_ppe_l1_queue_scheduler_configure(nq) != 0) {
nss_qdisc_error("SSDK level1 queue scheduler configuration failed\n");
nss_ppe_l1_res_free(nq);
+ nss_ppe_assigned_queue_enable(nss_ppe_port_num_get(nq));
return -EINVAL;
}
@@ -758,11 +794,13 @@ static int nss_ppe_l0_queue_scheduler_de
port_num, npq->q.ucast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id);
if (fal_queue_scheduler_set(0, npq->q.ucast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) {
nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n");
- nss_ppe_all_queue_enable(port_num);
+ nss_ppe_assigned_queue_enable(port_num);
return -EINVAL;
}
- nss_ppe_all_queue_enable(port_num);
+ /*
+ * Assinged queues are enabled after the current resource is freed.
+ */
nss_qdisc_info("SSDK level0 queue scheduler configuration successful\n");
return 0;
@@ -781,9 +819,11 @@ static int nss_ppe_l0_queue_scheduler_re
if (nss_ppe_l0_res_free(nq) != 0) {
nss_qdisc_error("Level0 scheduler resources de-allocation failed\n");
+ nss_ppe_assigned_queue_enable(nss_ppe_port_num_get(nq));
return -EINVAL;
}
+ nss_ppe_assigned_queue_enable(nss_ppe_port_num_get(nq));
nss_qdisc_info("SSDK level0 queue scheduler configuration successful\n");
return 0;
}
@@ -871,11 +911,6 @@ static int nss_ppe_l0_queue_scheduler_co
uint32_t port_num = nss_ppe_port_num_get(nq);
struct nss_ppe_qdisc *npq = &nq->npq;
- if (npq->scheduler.drr_weight >= NSS_PPE_DRR_WEIGHT_MAX) {
- nss_qdisc_warning("DRR weight:%d should be less than 1024\n", npq->scheduler.drr_weight);
- return -EINVAL;
- }
-
/*
* Disable all queues and set Level 0 SSDK configuration
* We need to disable and flush the queues before
@@ -883,6 +918,15 @@ static int nss_ppe_l0_queue_scheduler_co
*/
nss_ppe_all_queue_disable(port_num);
+ if (npq->scheduler.drr_weight >= NSS_PPE_DRR_WEIGHT_MAX) {
+ /*
+ * Currently assigned queues are enabled back by
+ * caller
+ */
+ nss_qdisc_warning("DRR weight:%d should be less than 1024\n", npq->scheduler.drr_weight);
+ return -EINVAL;
+ }
+
memset(&l0cfg, 0, sizeof(l0cfg));
l0cfg.sp_id = npq->l0spid;
l0cfg.c_drr_wt = npq->scheduler.drr_weight ? npq->scheduler.drr_weight : 1;
@@ -899,7 +943,6 @@ static int nss_ppe_l0_queue_scheduler_co
port_num, npq->q.ucast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id);
if (fal_queue_scheduler_set(0, npq->q.ucast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) {
nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n");
- nss_ppe_all_queue_enable(port_num);
return -EINVAL;
}
@@ -917,12 +960,11 @@ static int nss_ppe_l0_queue_scheduler_co
port_num, npq->q.mcast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id);
if (fal_queue_scheduler_set(0, npq->q.mcast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) {
nss_qdisc_error("SSDK level0 multicast queue scheduler configuration failed\n");
- nss_ppe_all_queue_enable(port_num);
return -EINVAL;
}
}
- nss_ppe_all_queue_enable(port_num);
+ nss_ppe_assigned_queue_enable(port_num);
nss_qdisc_info("SSDK level0 queue scheduler configuration successful\n");
return 0;
@@ -955,6 +997,7 @@ static int nss_ppe_l0_queue_scheduler_se
if (nss_ppe_l0_queue_scheduler_configure(nq) != 0) {
nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n");
nss_ppe_l0_res_free(nq);
+ nss_ppe_assigned_queue_enable(nss_ppe_port_num_get(nq));
return -EINVAL;
}
@@ -1381,7 +1424,7 @@ static int nss_ppe_default_conf_set(uint
*/
if (fal_port_scheduler_cfg_reset(0, port_num) != 0) {
nss_qdisc_error("SSDK reset default queue configuration failed\n");
- nss_ppe_all_queue_enable(port_num);
+ nss_ppe_assigned_queue_enable(port_num);
return -EINVAL;
}
@@ -1960,7 +2003,7 @@ void nss_ppe_all_queue_enable_hybrid(str
|| (nq->type == NSS_SHAPER_NODE_TYPE_BF)
|| (nq->type == NSS_SHAPER_NODE_TYPE_WRED)) {
uint32_t port_num = nss_ppe_port_num_get(nq);
- nss_ppe_all_queue_enable(port_num);
+ nss_ppe_assigned_queue_enable(port_num);
nss_qdisc_info("Queues in hybrid mode enabled successfully for Qdisc %px (type %d)\n", nq, nq->type);
}
}

View File

@ -0,0 +1,561 @@
--- a/ipsecmgr/v2.0/plugins/klips/nss_ipsec_klips.c
+++ b/ipsecmgr/v2.0/plugins/klips/nss_ipsec_klips.c
@@ -142,7 +142,6 @@ static int nss_ipsec_klips_offload_esp(s
static struct net_protocol esp_protocol = {
.handler = nss_ipsec_klips_offload_esp,
.no_policy = 1,
- .netns_ok = 1,
};
/*
@@ -300,7 +299,7 @@ static struct nss_ipsec_klips_tun *nss_i
* Read/write lock needs to taken by the caller since sa
* table is looked up here
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
if (!klips_dev) {
return NULL;
@@ -383,7 +382,7 @@ static struct nss_ipsec_klips_tun *nss_i
* Read/write lock needs to be taken by the caller since tunnel
* table is looked up here
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
for (i = 0, tun = tunnel_map.tbl; i < tunnel_map.max; i++, tun++) {
if (!tun->klips_dev) {
@@ -434,7 +433,7 @@ static struct nss_ipsec_klips_sa *nss_ip
* Read/write lock needs to taken by the caller since sa
* table is looked up here
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
list_for_each_entry_safe(sa, tmp, head, list) {
if (sa->sid == crypto_idx)
@@ -458,7 +457,7 @@ static void nss_ipsec_klips_sa_flush(str
* Read/write lock needs to taken by the caller since sa
* table is modified here
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
list_for_each_entry_safe(sa, tmp, head, list) {
list_del_init(&sa->list);
@@ -1220,7 +1219,7 @@ static void nss_ipsec_klips_register_nat
/*
* write lock is needed as we are modifying tunnel entry.
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
sock_hold(sk);
tun->sk_encap_rcv = udp_sk(sk)->encap_rcv;
@@ -1237,7 +1236,7 @@ static void nss_ipsec_klips_unregister_n
/*
* write lock is needed as we are modifying tunnel entry.
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
xchg(&udp_sk(tun->sk)->encap_rcv, tun->sk_encap_rcv);
sock_put(tun->sk);
--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c
+++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c
@@ -1222,6 +1222,7 @@ drop:
return -EINVAL;
}
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 4, 0))
/*
* nss_ipsec_xfrm_v4_output_finish()
* This is called for non-offloaded transformations after the NF_POST routing hooks
@@ -1243,9 +1244,8 @@ static int nss_ipsec_xfrm_v4_output_fini
*/
static int nss_ipsec_xfrm_v4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
-
nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb);
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
return drv->xsa.v4->extract_input(x, skb);
}
@@ -1257,11 +1257,12 @@ static int nss_ipsec_xfrm_v4_extract_inp
*/
static int nss_ipsec_xfrm_v4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb);
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
return drv->xsa.v4->extract_output(x, skb);
}
+#endif
/*
* nss_ipsec_xfrm_v4_transport_finish()
@@ -1360,14 +1361,14 @@ fallback:
* nss_ipsec_xfrm_esp_init_state()
* Initialize IPsec xfrm state of type ESP.
*/
-static int nss_ipsec_xfrm_esp_init_state(struct xfrm_state *x)
+static int nss_ipsec_xfrm_esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extac)
{
struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
struct nss_ipsec_xfrm_tunnel *tun = NULL;
struct nss_ipsec_xfrm_sa *sa = NULL;
xfrm_address_t remote = {0};
xfrm_address_t local = {0};
- struct net_device *local_dev;
+ struct net_device *local_dev = NULL;
bool new_tun = 0;
size_t ip_addr_len;
@@ -1375,7 +1376,7 @@ static int nss_ipsec_xfrm_esp_init_state
local_dev = ip_dev_find(&init_net, x->id.daddr.a4);
ip_addr_len = sizeof(local.a4);
} else {
- local_dev = ipv6_dev_find(&init_net, &x->id.daddr.in6, 1);
+ local_dev = ipv6_dev_find(&init_net, &x->id.daddr.in6, local_dev);
ip_addr_len = sizeof(local.a6);
}
@@ -1716,6 +1717,7 @@ drop:
return -EINVAL;
}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
/*
* nss_ipsec_xfrm_v6_output_finish()
* This is called for non-offloaded transformations after the NF_POST routing hooks
@@ -1737,9 +1739,9 @@ static int nss_ipsec_xfrm_v6_output_fini
*/
static int nss_ipsec_xfrm_v6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb);
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
return drv->xsa.v6->extract_input(x, skb);
}
@@ -1751,11 +1753,11 @@ static int nss_ipsec_xfrm_v6_extract_inp
*/
static int nss_ipsec_xfrm_v6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
-
nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb);
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
return drv->xsa.v6->extract_output(x, skb);
}
+#endif
/*
* nss_ipsec_xfrm_v6_transport_finish()
@@ -1783,22 +1785,25 @@ void nss_ipsec_xfrm_v6_local_error(struc
return drv->xsa.v6->local_error(skb, mtu);
}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
/*
* nss_ipsec_xfrm_v6_esp_hdr_offset()
* Invoked by stack for IPv6 transport mode in encap.
* Redirect to the native version.
*/
-static int nss_ipsec_xfrm_v6_esp_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
+static int nss_ipsec_xfrm_v6_esp_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
nss_ipsec_xfrm_trace("%px: Redirect to native esp6 stack\n", skb);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
- return drv->xsa.v6->type_map[IPPROTO_ESP]->hdr_offset(x, skb, prevhdr);
-#else
- return drv->xsa.v6->type_esp->hdr_offset(x, skb, prevhdr);
-#endif
+
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
+ return drv->xsa.v6->type_map[IPPROTO_ESP]->hdr_offset(x, skb, prevhdr);
+ #else
+ return drv->xsa.v6->type_esp->hdr_offset(x, skb, prevhdr);
+ #endif
}
+#endif
/*
* nss_ipsec_xfrm_esp6_rcv()
@@ -1949,7 +1954,6 @@ static void nss_ipsec_xfrm_state_delete(
nss_ipsec_xfrm_del_tun(drv, tun);
}
- return;
}
/*
@@ -2018,9 +2022,11 @@ static struct xfrm_state_afinfo xfrm_v4_
.init_temprop = nss_ipsec_xfrm_v4_init_param,
#endif
.output = nss_ipsec_xfrm_v4_output,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
.output_finish = nss_ipsec_xfrm_v4_output_finish,
.extract_input = nss_ipsec_xfrm_v4_extract_input,
.extract_output = nss_ipsec_xfrm_v4_extract_output,
+#endif
.transport_finish = nss_ipsec_xfrm_v4_transport_finish,
.local_error = nss_ipsec_xfrm_v4_local_error,
};
@@ -2065,7 +2071,6 @@ struct xfrm_mode xfrm_v6_mode_map[XFRM_M
* IPv4 xfrm_type ESP object.
*/
static const struct xfrm_type xfrm_v4_type = {
- .description = "NSS ESP4",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.flags = XFRM_TYPE_REPLAY_PROT,
@@ -2101,9 +2106,11 @@ static struct xfrm_state_afinfo xfrm_v6_
.state_sort = nss_ipsec_xfrm_v6_sort_state,
#endif
.output = nss_ipsec_xfrm_v6_output,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
.output_finish = nss_ipsec_xfrm_v6_output_finish,
.extract_input = nss_ipsec_xfrm_v6_extract_input,
.extract_output = nss_ipsec_xfrm_v6_extract_output,
+#endif
.transport_finish = nss_ipsec_xfrm_v6_transport_finish,
.local_error = nss_ipsec_xfrm_v6_local_error,
};
@@ -2112,7 +2119,6 @@ static struct xfrm_state_afinfo xfrm_v6_
* IPv6 xfrm_type ESP object.
*/
static const struct xfrm_type xfrm_v6_type = {
- .description = "NSS ESP6",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.flags = XFRM_TYPE_REPLAY_PROT,
@@ -2121,7 +2127,9 @@ static const struct xfrm_type xfrm_v6_ty
.get_mtu = nss_ipsec_xfrm_esp_get_mtu,
.input = nss_ipsec_xfrm_esp_input,
.output = nss_ipsec_xfrm_esp_output,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
.hdr_offset = nss_ipsec_xfrm_v6_esp_hdr_offset,
+#endif
};
/*
@@ -2207,7 +2215,6 @@ static void nss_ipsec_xfrm_restore_afinf
}
xfrm_unregister_type(base, family);
-
xfrm_state_update_afinfo(family, afinfo);
}
@@ -2292,14 +2299,10 @@ static void nss_ipsec_xfrm_override_afin
*/
int __init nss_ipsec_xfrm_init_module(void)
{
-
rwlock_init(&g_ipsec_xfrm.lock);
-
nss_ipsec_xfrm_init_tun_db(&g_ipsec_xfrm);
nss_ipsec_xfrm_init_flow_db(&g_ipsec_xfrm);
-
init_completion(&g_ipsec_xfrm.complete);
-
net_get_random_once(&g_ipsec_xfrm.hash_nonce, sizeof(g_ipsec_xfrm.hash_nonce));
/*
@@ -2327,7 +2330,6 @@ int __init nss_ipsec_xfrm_init_module(vo
nss_ipsec_xfrm_override_afinfo(&g_ipsec_xfrm, AF_INET6);
ecm_interface_ipsec_register_callbacks(&xfrm_ecm_ipsec_cb);
- ecm_notifier_register_connection_notify(&xfrm_ecm_notifier);
#if defined(NSS_L2TPV2_ENABLED)
l2tpmgr_register_ipsecmgr_callback_by_ipaddr(&xfrm_l2tp);
@@ -2336,6 +2338,7 @@ int __init nss_ipsec_xfrm_init_module(vo
/*
* Register for xfrm events
*/
+ ecm_notifier_register_connection_notify(&xfrm_ecm_notifier);
xfrm_register_km(&nss_ipsec_xfrm_mgr);
/*
@@ -2346,6 +2349,7 @@ int __init nss_ipsec_xfrm_init_module(vo
return 0;
unreg_v4_handler:
+ xfrm4_protocol_deregister(&xfrm4_proto, IPPROTO_ESP);
xfrm6_protocol_deregister(&xfrm6_proto, IPPROTO_ESP);
return -EAGAIN;
}
--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c
+++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c
@@ -181,7 +181,7 @@ static bool nss_ipsec_xfrm_sa_init_crypt
*/
static void nss_ipsec_xfrm_sa_init_tuple(struct nss_ipsec_xfrm_sa *sa, struct xfrm_state *x)
{
- struct net_device *local_dev;
+ struct net_device *local_dev = NULL;
sa->type = NSS_IPSECMGR_SA_TYPE_ENCAP;
sa->tuple.spi_index = ntohl(x->id.spi);
@@ -215,7 +215,7 @@ static void nss_ipsec_xfrm_sa_init_tuple
sa->tuple.dest_ip[2] = ntohl(x->id.daddr.a6[2]);
sa->tuple.dest_ip[3] = ntohl(x->id.daddr.a6[3]);
- local_dev = ipv6_dev_find(&init_net, (struct in6_addr *)x->id.daddr.a6, 1);
+ local_dev = ipv6_dev_find(&init_net, (struct in6_addr *)x->id.daddr.a6, local_dev);
}
/*
--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_tunnel.c
+++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_tunnel.c
@@ -130,7 +130,6 @@ err:
drop:
atomic64_inc(&drv->stats.inner_drop);
dev_kfree_skb_any(skb);
- return;
}
/*
@@ -194,7 +193,6 @@ static void nss_ipsec_xfrm_tunnel_rx_out
drop:
dev_kfree_skb_any(skb);
atomic64_inc(&drv->stats.outer_drop);
- return;
}
/*
From 2b32003b2e6225802361bc3bab12fcb3510f0327 Mon Sep 17 00:00:00 2001
From: Suhas N Bhargav <sbhargav@codeaurora.org>
Date: Thu, 30 Sep 2021 16:32:12 +0530
Subject: [PATCH] [qca-nss-clients] Fix to avoid contention b/w write locks in
ipsecmgr
This fix is needed to avoid contention of locks between two
entities which are in process & interrupt context
Change-Id: I9986606b99d7642cca1c105bdf05e0ed67b66374
Signed-off-by: Suhas N Bhargav <sbhargav@codeaurora.org>
---
ipsecmgr/v2.0/nss_ipsecmgr.c | 6 +++---
ipsecmgr/v2.0/nss_ipsecmgr_flow.c | 8 ++++----
ipsecmgr/v2.0/nss_ipsecmgr_tunnel.c | 10 +++++-----
3 files changed, 12 insertions(+), 12 deletions(-)
--- a/ipsecmgr/v2.0/nss_ipsecmgr.c
+++ b/ipsecmgr/v2.0/nss_ipsecmgr.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -278,11 +278,11 @@ static int __init nss_ipsecmgr_init(void
*/
nss_ipsecmgr_configure(&ipsecmgr_drv->cfg_work.work);
- write_lock(&ipsecmgr_drv->lock);
+ write_lock_bh(&ipsecmgr_drv->lock);
list_add(&tun->list, &ipsecmgr_drv->tun_db);
ipsecmgr_drv->max_mtu = dev->mtu;
- write_unlock(&ipsecmgr_drv->lock);
+ write_unlock_bh(&ipsecmgr_drv->lock);
nss_ipsecmgr_info("NSS IPsec manager loaded: %s\n", NSS_CLIENT_BUILD_ID);
return 0;
--- a/ipsecmgr/v2.0/nss_ipsecmgr_flow.c
+++ b/ipsecmgr/v2.0/nss_ipsecmgr_flow.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -149,10 +149,10 @@ static bool nss_ipsecmgr_flow_update_db(
hash_idx = nss_ipsecmgr_flow_tuple2hash(&flow->state.tuple, NSS_IPSECMGR_FLOW_MAX);
- write_lock(&ipsecmgr_drv->lock);
+ write_lock_bh(&ipsecmgr_drv->lock);
sa = nss_ipsecmgr_sa_find(ipsecmgr_drv->sa_db, sa_tuple);
if (!sa) {
- write_unlock(&ipsecmgr_drv->lock);
+ write_unlock_bh(&ipsecmgr_drv->lock);
nss_ipsecmgr_trace("%px: failed to find SA during flow update", flow);
return false;
}
@@ -163,7 +163,7 @@ static bool nss_ipsecmgr_flow_update_db(
*/
nss_ipsecmgr_ref_add(&flow->ref, &sa->ref);
list_add(&flow->list, &ipsecmgr_drv->flow_db[hash_idx]);
- write_unlock(&ipsecmgr_drv->lock);
+ write_unlock_bh(&ipsecmgr_drv->lock);
return true;
}
--- a/ipsecmgr/v2.0/nss_ipsecmgr_tunnel.c
+++ b/ipsecmgr/v2.0/nss_ipsecmgr_tunnel.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -273,7 +273,7 @@ static void nss_ipsecmgr_tunnel_mtu_upda
uint16_t max_mtu = 0;
bool update_mtu = false;
- write_lock(&ipsecmgr_drv->lock);
+ write_lock_bh(&ipsecmgr_drv->lock);
list_for_each_entry(tun, head, list) {
if (tun->dev->mtu > max_mtu)
max_mtu = tun->dev->mtu;
@@ -284,7 +284,7 @@ static void nss_ipsecmgr_tunnel_mtu_upda
update_mtu = true;
}
- write_unlock(&ipsecmgr_drv->lock);
+ write_unlock_bh(&ipsecmgr_drv->lock);
#ifdef NSS_IPSECMGR_PPE_SUPPORT
/*
@@ -627,9 +627,9 @@ struct net_device *nss_ipsecmgr_tunnel_a
#endif
}
- write_lock(&ipsecmgr_drv->lock);
+ write_lock_bh(&ipsecmgr_drv->lock);
list_add(&tun->list, &ipsecmgr_drv->tun_db);
- write_unlock(&ipsecmgr_drv->lock);
+ write_unlock_bh(&ipsecmgr_drv->lock);
nss_ipsecmgr_tunnel_mtu(dev, skb_dev ? skb_dev->mtu : dev->mtu);
--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c
+++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
@@ -254,7 +255,7 @@ static void nss_ipsec_xfrm_flush_flow_by
for (count = NSS_IPSEC_XFRM_FLOW_DB_MAX; count--; db_head++) {
list_for_each_entry_safe(flow, tmp, db_head, list_entry) {
- if (flow->sa == sa) {
+ if (READ_ONCE(flow->sa) == sa) {
list_del_init(&flow->list_entry);
list_add(&flow->list_entry, &free_head);
}
--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_flow.c
+++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_flow.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -50,9 +51,9 @@ static void nss_ipsec_xfrm_flow_final(st
/*
* Release reference to the parent SA.
*/
- if (flow->sa) {
- nss_ipsec_xfrm_sa_deref(flow->sa);
- flow->sa = NULL;
+ if (READ_ONCE(flow->sa)) {
+ nss_ipsec_xfrm_sa_deref(READ_ONCE(flow->sa));
+ WRITE_ONCE(flow->sa, NULL);
}
if (flow->pol) {
@@ -194,7 +195,7 @@ struct nss_ipsec_xfrm_flow *nss_ipsec_xf
*/
bool nss_ipsec_xfrm_flow_update(struct nss_ipsec_xfrm_flow *flow, struct nss_ipsec_xfrm_sa *sa)
{
- struct nss_ipsec_xfrm_sa *flow_sa = flow->sa;
+ struct nss_ipsec_xfrm_sa *flow_sa = READ_ONCE(flow->sa);
enum nss_ipsecmgr_status status;
/*
@@ -213,12 +214,14 @@ bool nss_ipsec_xfrm_flow_update(struct n
return true;
}
+ if (cmpxchg(&flow->sa, flow_sa, sa) != flow_sa) {
+ nss_ipsec_xfrm_info("%p: Flow migrated to newer SA by other CPU\n", flow);
+ return false;
+ }
- nss_ipsec_xfrm_info("%p: Flow migrated from SA %p to SA %p\n", flow, flow_sa, sa);
-
- xchg(&flow->sa, nss_ipsec_xfrm_sa_ref(sa));
+ nss_ipsec_xfrm_sa_ref(sa);
nss_ipsec_xfrm_sa_deref(flow_sa);
-
+ nss_ipsec_xfrm_info("%p: Flow migrated from SA %p to SA %p\n", flow, flow_sa, sa);
return true;
}
@@ -236,7 +239,7 @@ void nss_ipsec_xfrm_flow_dealloc(struct
atomic64_inc(&drv->stats.flow_dealloced);
- sa = flow->sa;
+ sa = READ_ONCE(flow->sa);
BUG_ON(!sa);
tun = sa->tun;
@@ -282,7 +285,7 @@ struct nss_ipsec_xfrm_flow *nss_ipsec_xf
flow->tuple.sport, flow->tuple.dport);
}
- flow->sa = nss_ipsec_xfrm_sa_ref(sa);
+ WRITE_ONCE(flow->sa, nss_ipsec_xfrm_sa_ref(sa));
status = nss_ipsecmgr_flow_add(sa->tun->dev, &flow->tuple, &sa->tuple);
if ((status != NSS_IPSECMGR_DUPLICATE_FLOW) && (status != NSS_IPSECMGR_OK)) {
--- a/ipsecmgr/v2.0/plugins/xfrm/Makefile
+++ b/ipsecmgr/v2.0/plugins/xfrm/Makefile
@@ -14,3 +14,7 @@ ccflags-y += -I$(obj)/
ccflags-y += -DNSS_IPSEC_XFRM_DEBUG_LEVEL=3
ccflags-y += -DNSS_CLIENT_BUILD_ID="$(BUILD_ID)"
ccflags-y += -Wall -Werror
+
+ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64))
+ccflags-y += -DNSS_IPSEC_XFRM_IPQ50XX
+endif
--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c
+++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c
@@ -55,13 +55,15 @@ struct nss_ipsec_xfrm_algo {
static struct nss_ipsec_xfrm_algo xfrm_algo[] = {
{.cipher_name = "cbc(aes)", .auth_name = "hmac(sha1)", .algo = NSS_IPSECMGR_ALGO_AES_CBC_SHA1_HMAC},
{.cipher_name = "cbc(des3_ede)", .auth_name = "hmac(sha1)", .algo = NSS_IPSECMGR_ALGO_3DES_CBC_SHA1_HMAC},
+#ifndef NSS_IPSEC_XFRM_IPQ50XX
{.cipher_name = "cbc(aes)", .auth_name = "hmac(md5)", .algo = NSS_IPSECMGR_ALGO_AES_CBC_MD5_HMAC},
{.cipher_name = "cbc(des3_ede)", .auth_name = "hmac(md5)", .algo = NSS_IPSECMGR_ALGO_3DES_CBC_MD5_HMAC},
{.cipher_name = "rfc4106(gcm(aes))", .auth_name = "rfc4106(gcm(aes))", .algo = NSS_IPSECMGR_ALGO_AES_GCM_GMAC_RFC4106},
{.cipher_name = "ecb(cipher_null)", .auth_name = "hmac(sha1)", .algo = NSS_IPSECMGR_ALGO_NULL_CIPHER_SHA1_HMAC},
+ {.cipher_name = "ecb(cipher_null)", .auth_name = "hmac(sha256)", .algo = NSS_IPSECMGR_ALGO_NULL_CIPHER_SHA256_HMAC},
+#endif
{.cipher_name = "cbc(aes)", .auth_name = "hmac(sha256)", .algo = NSS_IPSECMGR_ALGO_AES_CBC_SHA256_HMAC},
{.cipher_name = "cbc(des3_ede)", .auth_name = "hmac(sha256)", .algo = NSS_IPSECMGR_ALGO_3DES_CBC_SHA256_HMAC},
- {.cipher_name = "ecb(cipher_null)", .auth_name = "hmac(sha256)", .algo = NSS_IPSECMGR_ALGO_NULL_CIPHER_SHA256_HMAC},
};
/*

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,357 @@
From 580a9ff682ea4e19cb30720662aeecb1ab5df859 Mon Sep 17 00:00:00 2001
From: Apoorv Gupta <apoogupt@codeaurora.org>
Date: Mon, 12 Jul 2021 18:12:43 +0530
Subject: [PATCH] [qca-nss-clients] Options not supported with VxLAN
Flows through VxLAN tunnel should not be accelerated
if the following options are used,
1. RSC(route short-circuit),
2. GPE(Generic Protocol Extension)
Change-Id: I183d24925e1a99ae49a9f1f6011bb7f08eab92f2
Signed-off-by: Apoorv Gupta <apoogupt@codeaurora.org>
---
vxlanmgr/nss_vxlanmgr_tunnel.c | 22 +++++++++++++++++++---
1 file changed, 19 insertions(+), 3 deletions(-)
--- a/vxlanmgr/nss_vxlanmgr_tunnel.c
+++ b/vxlanmgr/nss_vxlanmgr_tunnel.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -91,18 +91,23 @@ static uint16_t nss_vxlanmgr_tunnel_flag
uint16_t flags = 0;
uint32_t priv_flags = priv->flags;
+ if (priv_flags & VXLAN_F_RSC)
+ return flags;
if (priv_flags & VXLAN_F_GBP)
flags |= NSS_VXLAN_RULE_FLAG_GBP_ENABLED;
- if (priv_flags & VXLAN_F_IPV6)
+
+ if (priv_flags & VXLAN_F_IPV6) {
flags |= NSS_VXLAN_RULE_FLAG_IPV6;
- else if (!(priv_flags & VXLAN_F_IPV6))
+ if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM6_TX))
+ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
+ } else {
flags |= NSS_VXLAN_RULE_FLAG_IPV4;
+ if (priv_flags & VXLAN_F_UDP_CSUM)
+ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
+ }
+
if (priv->cfg.tos == 1)
flags |= NSS_VXLAN_RULE_FLAG_INHERIT_TOS;
- if (priv_flags & VXLAN_F_UDP_CSUM)
- flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
- else if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM6_TX))
- flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
return (flags | NSS_VXLAN_RULE_FLAG_UDP);
}
@@ -113,18 +118,25 @@ static uint16_t nss_vxlanmgr_tunnel_flag
struct vxlan_config *cfg = &priv->cfg;
uint32_t priv_flags = cfg->flags;
+ if (priv_flags & VXLAN_F_RSC)
+ return flags;
+ if (priv_flags & VXLAN_F_GPE)
+ return flags;
if (priv_flags & VXLAN_F_GBP)
flags |= NSS_VXLAN_RULE_FLAG_GBP_ENABLED;
- if (priv_flags & VXLAN_F_IPV6)
+
+ if (priv_flags & VXLAN_F_IPV6) {
flags |= NSS_VXLAN_RULE_FLAG_IPV6;
- else if (!(priv_flags & VXLAN_F_IPV6))
+ if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM6_TX))
+ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
+ } else {
flags |= NSS_VXLAN_RULE_FLAG_IPV4;
+ if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM_TX))
+ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
+ }
+
if (cfg->tos == 1)
flags |= NSS_VXLAN_RULE_FLAG_INHERIT_TOS;
- if (priv_flags & VXLAN_F_UDP_ZERO_CSUM_TX)
- flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
- else if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM6_TX))
- flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
return (flags | NSS_VXLAN_RULE_FLAG_UDP);
}
@@ -436,7 +448,8 @@ static struct notifier_block nss_vxlanmg
/*
* nss_vxlanmgr_tunnel_inner_stats()
- * Update vxlan netdev stats with inner node stats
+ * Update vxlan netdev stats with inner node stats.
+ * Note: Reference on the netdevice is expected to be held by the caller at the time this function is called.
*/
static void nss_vxlanmgr_tunnel_inner_stats(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm)
{
@@ -450,7 +463,6 @@ static void nss_vxlanmgr_tunnel_inner_st
stats = &nvm->msg.stats;
dev = tun_ctx->dev;
- dev_hold(dev);
netdev_stats = (struct net_device_stats *)&dev->stats;
/*
@@ -469,7 +481,6 @@ static void nss_vxlanmgr_tunnel_inner_st
u64_stats_add(&tstats->tx_bytes, stats->node_stats.tx_bytes);
u64_stats_update_end(&tstats->syncp);
netdev_stats->tx_dropped += dropped;
- dev_put(dev);
}
/*
@@ -514,7 +525,7 @@ static void nss_vxlanmgr_tunnel_outer_st
* nss_vxlanmgr_tunnel_fdb_update()
* Update vxlan fdb entries
*/
-static void nss_vxlanmgr_tunnel_fdb_update(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm)
+static void nss_vxlanmgr_tunnel_fdb_update(struct net_device *dev, uint32_t vni, struct nss_vxlan_msg *nvm)
{
uint8_t *mac;
uint16_t i, nentries;
@@ -523,13 +534,10 @@ static void nss_vxlanmgr_tunnel_fdb_upda
db_stats = &nvm->msg.db_stats;
nentries = db_stats->cnt;
- priv = netdev_priv(tun_ctx->dev);
-
- dev_hold(tun_ctx->dev);
+ priv = netdev_priv(dev);
if (nentries > NSS_VXLAN_MACDB_ENTRIES_PER_MSG) {
- nss_vxlanmgr_warn("%px: No more than 20 entries allowed per message.\n", tun_ctx->dev);
- dev_put(tun_ctx->dev);
+ nss_vxlanmgr_warn("%px: No more than 20 entries allowed per message.\n", dev);
return;
}
@@ -539,11 +547,10 @@ static void nss_vxlanmgr_tunnel_fdb_upda
#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 5, 7))
vxlan_fdb_update_mac(priv, mac);
#else
- vxlan_fdb_update_mac(priv, mac, tun_ctx->vni);
+ vxlan_fdb_update_mac(priv, mac, vni);
#endif
}
}
- dev_put(tun_ctx->dev);
}
/*
@@ -555,20 +562,29 @@ static void nss_vxlanmgr_tunnel_inner_no
struct net_device *dev = (struct net_device *)app_data;
struct nss_vxlanmgr_tun_ctx *tun_ctx;
struct nss_vxlan_msg *nvm;
+ uint32_t vni;
if (!ncm) {
nss_vxlanmgr_info("%px: NULL msg received.\n", dev);
return;
}
+ if (!dev) {
+ nss_vxlanmgr_info("%px: NULL device received.\n", dev);
+ return;
+ }
+
spin_lock_bh(&vxlan_ctx.tun_lock);
+ dev_hold(dev);
tun_ctx = nss_vxlanmgr_tunnel_ctx_dev_get(dev);
if (!tun_ctx) {
spin_unlock_bh(&vxlan_ctx.tun_lock);
nss_vxlanmgr_warn("%px: Invalid tunnel context\n", dev);
+ dev_put(dev);
return;
}
+ vni = tun_ctx->vni;
nvm = (struct nss_vxlan_msg *)ncm;
switch (nvm->cm.type) {
case NSS_VXLAN_MSG_TYPE_STATS_SYNC:
@@ -576,14 +592,24 @@ static void nss_vxlanmgr_tunnel_inner_no
nss_vxlanmgr_tun_stats_sync(tun_ctx, nvm);
break;
case NSS_VXLAN_MSG_TYPE_MACDB_STATS:
- nss_vxlanmgr_tunnel_fdb_update(tun_ctx, nvm);
nss_vxlanmgr_tun_macdb_stats_sync(tun_ctx, nvm);
- break;
- default:
+
+ /*
+ * Release the lock before updating the Linux FDB entry.
+ * This will ensure there is no deadlock when a potential
+ * MAC add event occurs at same time, which needs to hold
+ * the kernel's hash lock followed by the tunnel ctx lock.
+ */
spin_unlock_bh(&vxlan_ctx.tun_lock);
- nss_vxlanmgr_info("%px: Unknown Event from NSS", dev);
+
+ nss_vxlanmgr_tunnel_fdb_update(dev, vni, nvm);
+ dev_put(dev);
return;
+ default:
+ nss_vxlanmgr_info("%px: Unknown Event from NSS", dev);
}
+
+ dev_put(dev);
spin_unlock_bh(&vxlan_ctx.tun_lock);
}
@@ -829,7 +855,7 @@ done:
*/
int nss_vxlanmgr_tunnel_destroy(struct net_device *dev)
{
- uint32_t inner_ifnum, outer_ifnum;
+ uint32_t inner_ifnum, outer_ifnum, tun_count;
struct nss_vxlanmgr_tun_ctx *tun_ctx;
struct nss_vxlan_msg vxlanmsg;
nss_tx_status_t ret;
@@ -866,16 +892,21 @@ int nss_vxlanmgr_tunnel_destroy(struct n
nss_vxlanmgr_tun_stats_deinit(tun_ctx);
nss_vxlanmgr_tun_stats_dentry_remove(tun_ctx);
+ dev_put(tun_ctx->dev);
kfree(tun_ctx);
- if (!vxlan_ctx.tun_count) {
- /*
- * Unregister fdb notifier chain if
- * all vxlan tunnels are destroyed.
- */
+ /*
+ * Unregister fdb notifier chain if
+ * all vxlan tunnels are destroyed.
+ */
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ tun_count = vxlan_ctx.tun_count;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ if (!tun_count) {
vxlan_fdb_unregister_notify(&nss_vxlanmgr_tunnel_fdb_notifier);
}
- nss_vxlanmgr_info("%px: VxLAN interface count is #%d\n", dev, vxlan_ctx.tun_count);
+
+ nss_vxlanmgr_info("%px: VxLAN interface count is #%d\n", dev, tun_count);
memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg));
ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
@@ -929,6 +960,7 @@ int nss_vxlanmgr_tunnel_create(struct ne
struct nss_vxlan_rule_msg *vxlan_cfg;
struct nss_ctx_instance *nss_ctx;
uint32_t inner_ifnum, outer_ifnum;
+ uint16_t parse_flags;
nss_tx_status_t ret;
spin_lock_bh(&vxlan_ctx.tun_lock);
@@ -939,7 +971,20 @@ int nss_vxlanmgr_tunnel_create(struct ne
}
spin_unlock_bh(&vxlan_ctx.tun_lock);
+ /*
+ * The reference to the dev will be released in nss_vxlanmgr_tunnel_destroy()
+ */
dev_hold(dev);
+ priv = netdev_priv(dev);
+ parse_flags = nss_vxlanmgr_tunnel_flags_parse(priv);
+
+ /*
+ * Check if the tunnel is supported.
+ */
+ if (!parse_flags) {
+ nss_vxlanmgr_warn("%px: Tunnel offload not supported\n", dev);
+ goto ctx_alloc_fail;
+ }
tun_ctx = kzalloc(sizeof(struct nss_vxlanmgr_tun_ctx), GFP_ATOMIC);
if (!tun_ctx) {
@@ -988,12 +1033,11 @@ int nss_vxlanmgr_tunnel_create(struct ne
memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg));
vxlan_cfg = &vxlanmsg.msg.vxlan_create;
- priv = netdev_priv(dev);
vxlan_cfg->vni = vxlan_get_vni(priv);
- vxlan_cfg->tunnel_flags = nss_vxlanmgr_tunnel_flags_parse(priv);
+ vxlan_cfg->tunnel_flags = parse_flags;
vxlan_cfg->src_port_min = priv->cfg.port_min;
vxlan_cfg->src_port_max = priv->cfg.port_max;
- vxlan_cfg->dest_port = priv->cfg.dst_port;
+ vxlan_cfg->dest_port = ntohs(priv->cfg.dst_port);
vxlan_cfg->tos = priv->cfg.tos;
vxlan_cfg->ttl = (priv->cfg.ttl ? priv->cfg.ttl : IPDEFTTL);
@@ -1059,7 +1103,6 @@ int nss_vxlanmgr_tunnel_create(struct ne
spin_unlock_bh(&vxlan_ctx.tun_lock);
nss_vxlanmgr_info("%px: VxLAN interface count is #%d\n", dev, vxlan_ctx.tun_count);
- dev_put(dev);
return NOTIFY_DONE;
config_fail:
--- a/vxlanmgr/nss_vxlanmgr_tun_stats.c
+++ b/vxlanmgr/nss_vxlanmgr_tun_stats.c
@@ -89,7 +89,7 @@ static int nss_vxlanmgr_tun_stats_show(s
seq_printf(m, "\t\tflow_label = %u\n", tun_ctx->flow_label);
seq_printf(m, "\t\tsrc_port_min = %u\n", tun_ctx->src_port_min);
seq_printf(m, "\t\tsrc_port_max = %u\n", tun_ctx->src_port_max);
- seq_printf(m, "\t\tdest_port = %u\n", ntohs(tun_ctx->dest_port));
+ seq_printf(m, "\t\tdest_port = %u\n", tun_ctx->dest_port);
seq_printf(m, "\t\ttos = %u\n", tun_ctx->tos);
seq_printf(m, "\t\tttl = %u\n", tun_ctx->ttl);
@@ -173,6 +173,7 @@ void nss_vxlanmgr_tun_stats_update(uint6
/*
* nss_vxlanmgr_tun_macdb_stats_sync()
* Sync function for vxlan fdb entries
+ * Note: Reference on the netdevice is expected to be held by the caller at the time this function is called.
*/
void nss_vxlanmgr_tun_macdb_stats_sync(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm)
{
@@ -183,11 +184,8 @@ void nss_vxlanmgr_tun_macdb_stats_sync(s
db_stats = &nvm->msg.db_stats;
nentries = db_stats->cnt;
- dev_hold(tun_ctx->dev);
-
if (nentries > NSS_VXLAN_MACDB_ENTRIES_PER_MSG) {
nss_vxlanmgr_warn("%px: No more than 20 entries allowed per message.\n", tun_ctx->dev);
- dev_put(tun_ctx->dev);
return;
}
@@ -203,7 +201,6 @@ void nss_vxlanmgr_tun_macdb_stats_sync(s
}
}
}
- dev_put(tun_ctx->dev);
}
/*
@@ -299,7 +296,7 @@ bool nss_vxlanmgr_tun_stats_dentry_creat
* nss_vxlanmgr_tun_stats_dentry_deinit()
* Cleanup the debugfs tree.
*/
-void nss_vxlanmgr_tun_stats_dentry_deinit()
+void nss_vxlanmgr_tun_stats_dentry_deinit(void)
{
debugfs_remove_recursive(vxlan_ctx.dentry);
}
@@ -308,7 +305,7 @@ void nss_vxlanmgr_tun_stats_dentry_deini
* nss_vxlanmgr_tun_stats_dentry_init()
* Create VxLAN tunnel statistics debugfs entry.
*/
-bool nss_vxlanmgr_tun_stats_dentry_init()
+bool nss_vxlanmgr_tun_stats_dentry_init(void)
{
/*
* initialize debugfs.

View File

@ -0,0 +1,334 @@
--- a/ipsecmgr/v2.0/plugins/klips/nss_ipsec_klips.c
+++ b/ipsecmgr/v2.0/plugins/klips/nss_ipsec_klips.c
@@ -146,7 +146,6 @@ static int nss_ipsec_klips_offload_esp(s
static struct net_protocol esp_protocol = {
.handler = nss_ipsec_klips_offload_esp,
.no_policy = 1,
- .netns_ok = 1,
};
/*
@@ -304,7 +303,7 @@ static struct nss_ipsec_klips_tun *nss_i
* Read/write lock needs to taken by the caller since sa
* table is looked up here
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
if (!klips_dev) {
return NULL;
@@ -387,7 +386,7 @@ static struct nss_ipsec_klips_tun *nss_i
* Read/write lock needs to be taken by the caller since tunnel
* table is looked up here
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
for (i = 0, tun = tunnel_map.tbl; i < tunnel_map.max; i++, tun++) {
if (!tun->klips_dev) {
@@ -507,7 +506,7 @@ static struct nss_ipsec_klips_sa *nss_ip
* Read/write lock needs to taken by the caller since sa
* table is looked up here
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
list_for_each_entry_safe(sa, tmp, head, list) {
if (sa->sid == crypto_idx)
@@ -531,7 +530,7 @@ static void nss_ipsec_klips_sa_flush(str
* Read/write lock needs to taken by the caller since sa
* table is modified here
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
list_for_each_entry_safe(sa, tmp, head, list) {
list_del_init(&sa->list);
@@ -1293,7 +1292,7 @@ static void nss_ipsec_klips_register_nat
/*
* write lock is needed as we are modifying tunnel entry.
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
sock_hold(sk);
tun->sk_encap_rcv = udp_sk(sk)->encap_rcv;
@@ -1310,7 +1309,7 @@ static void nss_ipsec_klips_unregister_n
/*
* write lock is needed as we are modifying tunnel entry.
*/
- BUG_ON(write_can_lock(&tunnel_map.lock));
+ lockdep_assert_held_write(&tunnel_map.lock);
xchg(&udp_sk(tun->sk)->encap_rcv, tun->sk_encap_rcv);
sock_put(tun->sk);
--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c
+++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c
@@ -1243,6 +1243,7 @@ drop:
return -EINVAL;
}
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 4, 0))
/*
* nss_ipsec_xfrm_v4_output_finish()
* This is called for non-offloaded transformations after the NF_POST routing hooks
@@ -1264,9 +1265,8 @@ static int nss_ipsec_xfrm_v4_output_fini
*/
static int nss_ipsec_xfrm_v4_extract_input(struct xfrm_state *x, struct sk_buff *skb)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
-
nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb);
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
return drv->xsa.v4->extract_input(x, skb);
}
@@ -1278,11 +1278,12 @@ static int nss_ipsec_xfrm_v4_extract_inp
*/
static int nss_ipsec_xfrm_v4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb);
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
return drv->xsa.v4->extract_output(x, skb);
}
+#endif
/*
* nss_ipsec_xfrm_v4_transport_finish()
@@ -1381,14 +1382,14 @@ fallback:
* nss_ipsec_xfrm_esp_init_state()
* Initialize IPsec xfrm state of type ESP.
*/
-static int nss_ipsec_xfrm_esp_init_state(struct xfrm_state *x)
+static int nss_ipsec_xfrm_esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extac)
{
struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
struct nss_ipsec_xfrm_tunnel *tun = NULL;
struct nss_ipsec_xfrm_sa *sa = NULL;
xfrm_address_t remote = {0};
xfrm_address_t local = {0};
- struct net_device *local_dev;
+ struct net_device *local_dev = NULL;
bool new_tun = 0;
size_t ip_addr_len;
@@ -1396,7 +1397,7 @@ static int nss_ipsec_xfrm_esp_init_state
local_dev = ip_dev_find(&init_net, x->id.daddr.a4);
ip_addr_len = sizeof(local.a4);
} else {
- local_dev = ipv6_dev_find(&init_net, &x->id.daddr.in6, 1);
+ local_dev = ipv6_dev_find(&init_net, &x->id.daddr.in6, local_dev);
ip_addr_len = sizeof(local.a6);
}
@@ -1737,6 +1738,7 @@ drop:
return -EINVAL;
}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
/*
* nss_ipsec_xfrm_v6_output_finish()
* This is called for non-offloaded transformations after the NF_POST routing hooks
@@ -1758,9 +1760,9 @@ static int nss_ipsec_xfrm_v6_output_fini
*/
static int nss_ipsec_xfrm_v6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb);
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
return drv->xsa.v6->extract_input(x, skb);
}
@@ -1772,11 +1774,11 @@ static int nss_ipsec_xfrm_v6_extract_inp
*/
static int nss_ipsec_xfrm_v6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
-
nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb);
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
return drv->xsa.v6->extract_output(x, skb);
}
+#endif
/*
* nss_ipsec_xfrm_v6_transport_finish()
@@ -1804,22 +1806,25 @@ void nss_ipsec_xfrm_v6_local_error(struc
return drv->xsa.v6->local_error(skb, mtu);
}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
/*
* nss_ipsec_xfrm_v6_esp_hdr_offset()
* Invoked by stack for IPv6 transport mode in encap.
* Redirect to the native version.
*/
-static int nss_ipsec_xfrm_v6_esp_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
+static int nss_ipsec_xfrm_v6_esp_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
{
- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
nss_ipsec_xfrm_trace("%px: Redirect to native esp6 stack\n", skb);
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
- return drv->xsa.v6->type_map[IPPROTO_ESP]->hdr_offset(x, skb, prevhdr);
-#else
- return drv->xsa.v6->type_esp->hdr_offset(x, skb, prevhdr);
-#endif
+
+ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm;
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
+ return drv->xsa.v6->type_map[IPPROTO_ESP]->hdr_offset(x, skb, prevhdr);
+ #else
+ return drv->xsa.v6->type_esp->hdr_offset(x, skb, prevhdr);
+ #endif
}
+#endif
/*
* nss_ipsec_xfrm_esp6_rcv()
@@ -1970,7 +1975,6 @@ static void nss_ipsec_xfrm_state_delete(
nss_ipsec_xfrm_del_tun(drv, tun);
}
- return;
}
/*
@@ -2045,9 +2049,11 @@ static struct xfrm_state_afinfo xfrm_v4_
.init_temprop = nss_ipsec_xfrm_v4_init_param,
#endif
.output = nss_ipsec_xfrm_v4_output,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
.output_finish = nss_ipsec_xfrm_v4_output_finish,
.extract_input = nss_ipsec_xfrm_v4_extract_input,
.extract_output = nss_ipsec_xfrm_v4_extract_output,
+#endif
.transport_finish = nss_ipsec_xfrm_v4_transport_finish,
.local_error = nss_ipsec_xfrm_v4_local_error,
};
@@ -2092,7 +2098,6 @@ struct xfrm_mode xfrm_v6_mode_map[XFRM_M
* IPv4 xfrm_type ESP object.
*/
static const struct xfrm_type xfrm_v4_type = {
- .description = "NSS ESP4",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.flags = XFRM_TYPE_REPLAY_PROT,
@@ -2128,9 +2133,11 @@ static struct xfrm_state_afinfo xfrm_v6_
.state_sort = nss_ipsec_xfrm_v6_sort_state,
#endif
.output = nss_ipsec_xfrm_v6_output,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
.output_finish = nss_ipsec_xfrm_v6_output_finish,
.extract_input = nss_ipsec_xfrm_v6_extract_input,
.extract_output = nss_ipsec_xfrm_v6_extract_output,
+#endif
.transport_finish = nss_ipsec_xfrm_v6_transport_finish,
.local_error = nss_ipsec_xfrm_v6_local_error,
};
@@ -2139,7 +2146,6 @@ static struct xfrm_state_afinfo xfrm_v6_
* IPv6 xfrm_type ESP object.
*/
static const struct xfrm_type xfrm_v6_type = {
- .description = "NSS ESP6",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.flags = XFRM_TYPE_REPLAY_PROT,
@@ -2148,7 +2154,9 @@ static const struct xfrm_type xfrm_v6_ty
.get_mtu = nss_ipsec_xfrm_esp_get_mtu,
.input = nss_ipsec_xfrm_esp_input,
.output = nss_ipsec_xfrm_esp_output,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0))
.hdr_offset = nss_ipsec_xfrm_v6_esp_hdr_offset,
+#endif
};
/*
@@ -2234,7 +2242,6 @@ static void nss_ipsec_xfrm_restore_afinf
}
xfrm_unregister_type(base, family);
-
xfrm_state_update_afinfo(family, afinfo);
}
@@ -2319,14 +2326,10 @@ static void nss_ipsec_xfrm_override_afin
*/
int __init nss_ipsec_xfrm_init_module(void)
{
-
rwlock_init(&g_ipsec_xfrm.lock);
-
nss_ipsec_xfrm_init_tun_db(&g_ipsec_xfrm);
nss_ipsec_xfrm_init_flow_db(&g_ipsec_xfrm);
-
init_completion(&g_ipsec_xfrm.complete);
-
net_get_random_once(&g_ipsec_xfrm.hash_nonce, sizeof(g_ipsec_xfrm.hash_nonce));
/*
@@ -2354,7 +2357,6 @@ int __init nss_ipsec_xfrm_init_module(vo
nss_ipsec_xfrm_override_afinfo(&g_ipsec_xfrm, AF_INET6);
ecm_interface_ipsec_register_callbacks(&xfrm_ecm_ipsec_cb);
- ecm_notifier_register_connection_notify(&xfrm_ecm_notifier);
#if defined(NSS_L2TPV2_ENABLED)
l2tpmgr_register_ipsecmgr_callback_by_ipaddr(&xfrm_l2tp);
@@ -2367,6 +2369,7 @@ int __init nss_ipsec_xfrm_init_module(vo
/*
* Register for xfrm events
*/
+ ecm_notifier_register_connection_notify(&xfrm_ecm_notifier);
xfrm_register_km(&nss_ipsec_xfrm_mgr);
/*
@@ -2377,6 +2380,7 @@ int __init nss_ipsec_xfrm_init_module(vo
return 0;
unreg_v4_handler:
+ xfrm4_protocol_deregister(&xfrm4_proto, IPPROTO_ESP);
xfrm6_protocol_deregister(&xfrm6_proto, IPPROTO_ESP);
return -EAGAIN;
}
--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c
+++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c
@@ -183,7 +183,7 @@ static bool nss_ipsec_xfrm_sa_init_crypt
*/
static void nss_ipsec_xfrm_sa_init_tuple(struct nss_ipsec_xfrm_sa *sa, struct xfrm_state *x)
{
- struct net_device *local_dev;
+ struct net_device *local_dev = NULL;
sa->type = NSS_IPSECMGR_SA_TYPE_ENCAP;
sa->tuple.spi_index = ntohl(x->id.spi);
@@ -217,7 +217,7 @@ static void nss_ipsec_xfrm_sa_init_tuple
sa->tuple.dest_ip[2] = ntohl(x->id.daddr.a6[2]);
sa->tuple.dest_ip[3] = ntohl(x->id.daddr.a6[3]);
- local_dev = ipv6_dev_find(&init_net, (struct in6_addr *)x->id.daddr.a6, 1);
+ local_dev = ipv6_dev_find(&init_net, (struct in6_addr *)x->id.daddr.a6, local_dev);
}
/*
--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_tunnel.c
+++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_tunnel.c
@@ -130,7 +130,6 @@ err:
drop:
atomic64_inc(&drv->stats.inner_drop);
dev_kfree_skb_any(skb);
- return;
}
/*
@@ -194,7 +193,6 @@ static void nss_ipsec_xfrm_tunnel_rx_out
drop:
dev_kfree_skb_any(skb);
atomic64_inc(&drv->stats.outer_drop);
- return;
}
/*