From 274cc66de7abec625e4df2be4f6aed04fe6e0434 Mon Sep 17 00:00:00 2001 From: Sean Khan Date: Thu, 10 Jul 2025 20:59:33 -0400 Subject: [PATCH] nss-clients: backport and fixes for ipsecmgr, netlink, vxlanmgr, qdisc backport 12.5: * ipsecmgr * netlink * vxlanmgr * qdisc fixes: * impsecmgr Signed-off-by: Sean Khan --- .../0033-qdisc-backport-12.5.patch | 377 +++++ .../0034-ipsecmgr-backport-12.5.patch | 561 +++++++ .../0035-netlink-backport-12.5.patch | 1441 +++++++++++++++++ .../0036-vxlanmgr-backport-12.5.patch | 357 ++++ .../0033-ipsecmgr-fix-compile-error.patch | 334 ++++ 5 files changed, 3070 insertions(+) create mode 100644 qca-nss-clients/patches-11.4/0033-qdisc-backport-12.5.patch create mode 100644 qca-nss-clients/patches-11.4/0034-ipsecmgr-backport-12.5.patch create mode 100644 qca-nss-clients/patches-11.4/0035-netlink-backport-12.5.patch create mode 100644 qca-nss-clients/patches-11.4/0036-vxlanmgr-backport-12.5.patch create mode 100644 qca-nss-clients/patches/0033-ipsecmgr-fix-compile-error.patch diff --git a/qca-nss-clients/patches-11.4/0033-qdisc-backport-12.5.patch b/qca-nss-clients/patches-11.4/0033-qdisc-backport-12.5.patch new file mode 100644 index 0000000..72d3453 --- /dev/null +++ b/qca-nss-clients/patches-11.4/0033-qdisc-backport-12.5.patch @@ -0,0 +1,377 @@ +From fa3a58742c4721221cb9a5ab11c65b6d60b77477 Mon Sep 17 00:00:00 2001 +From: Aniruddha Bhat Anemajalu +Date: Tue, 11 Jan 2022 11:22:08 -0800 +Subject: [PATCH] [qca-nss-clients] Check for qdisc before deleting the class + +Do not allow deleting the class before deleting the underlying Qdisc. + +Change-Id: I40f611cb1a5342ed58b4b1abcf1254d8a981a760 +Signed-off-by: Aniruddha Bhat Anemajalu +--- + nss_qdisc/nss_bf.c | 14 ++++++++++---- + nss_qdisc/nss_htb.c | 11 ++++++++--- + nss_qdisc/nss_wrr.c | 14 ++++++++++---- + 3 files changed, 28 insertions(+), 11 deletions(-) + +--- a/nss_qdisc/nss_bf.c ++++ b/nss_qdisc/nss_bf.c +@@ -1,9 +1,13 @@ + /* + ************************************************************************** + * Copyright (c) 2014-2017, 2019-2020, The Linux Foundation. All rights reserved. ++ * ++ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. ++ * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. ++ * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +@@ -309,11 +313,14 @@ static int nss_bf_delete_class(struct Qd + struct nss_qdisc *nq_child = (struct nss_qdisc *)qdisc_priv(cl->qdisc); + + /* +- * Since all classes are leaf nodes in our case, we dont have to make +- * that check. ++ * If the class is the root class or has qdiscs attached, we do not ++ * support deleting it. + */ +- if (cl == &q->root) ++ if ((cl == &q->root) || (cl->qdisc != &noop_qdisc)) { ++ nss_qdisc_warning("Cannot delete bf class %x as it is the root " ++ "class or has child qdisc attached\n", cl->nq.qos_tag); + return -EBUSY; ++ } + + /* + * The message to NSS should be sent to the parent of this class +@@ -327,7 +334,6 @@ static int nss_bf_delete_class(struct Qd + } + + sch_tree_lock(sch); +- qdisc_reset(cl->qdisc); + qdisc_class_hash_remove(&q->clhash, &cl->cl_common); + refcnt = nss_qdisc_atomic_sub_return(&cl->nq); + sch_tree_unlock(sch); +--- a/nss_qdisc/nss_htb.c ++++ b/nss_qdisc/nss_htb.c +@@ -1,9 +1,13 @@ + /* + ************************************************************************** + * Copyright (c) 2014-2017, 2019-2021, The Linux Foundation. All rights reserved. ++ * ++ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. ++ * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. ++ * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +@@ -534,10 +538,12 @@ static int nss_htb_delete_class(struct Q + int refcnt; + + /* +- * If the class still has child nodes, then we do not ++ * If the class still has child nodes or qdiscs, then we do not + * support deleting it. + */ +- if (cl->children) { ++ if ((cl->children) || (cl->qdisc != &noop_qdisc)) { ++ nss_qdisc_warning("Cannot delete htb class %x with child nodes " ++ "or qdisc attached\n", cl->nq.qos_tag); + return -EBUSY; + } + +@@ -568,7 +574,6 @@ static int nss_htb_delete_class(struct Q + } + + sch_tree_lock(sch); +- qdisc_reset(cl->qdisc); + qdisc_class_hash_remove(&q->clhash, &cl->sch_common); + + /* +--- a/nss_qdisc/nss_wrr.c ++++ b/nss_qdisc/nss_wrr.c +@@ -1,9 +1,13 @@ + /* + ************************************************************************** + * Copyright (c) 2014-2017, 2019-2021, The Linux Foundation. All rights reserved. ++ * ++ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. ++ * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. ++ * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +@@ -418,11 +422,14 @@ static int nss_wrr_delete_class(struct Q + int refcnt; + + /* +- * Since all classes are leaf nodes in our case, we dont have to make +- * that check. ++ * If the class is a root class or has a child qdisc attached ++ * we do not support deleting it. + */ +- if (cl == &q->root) ++ if ((cl == &q->root) || (cl->qdisc != &noop_qdisc)) { ++ nss_qdisc_warning("Cannot delete wrr class %x as it is the " ++ "root class or has a child qdisc attached\n", cl->nq.qos_tag); + return -EBUSY; ++ } + + /* + * The message to NSS should be sent to the parent of this class +@@ -436,7 +443,6 @@ static int nss_wrr_delete_class(struct Q + } + + sch_tree_lock(sch); +- qdisc_reset(cl->qdisc); + qdisc_class_hash_remove(&q->clhash, &cl->cl_common); + + refcnt = nss_qdisc_atomic_sub_return(&cl->nq); +--- a/nss_qdisc/nss_ppe.c ++++ b/nss_qdisc/nss_ppe.c +@@ -1,7 +1,11 @@ + /* + ************************************************************************** + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. ++ * ++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for ++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. ++ * + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +@@ -28,12 +32,9 @@ + /* + * Max Resources per port + * +- * Currently, we are using only one multicast queue. + * In case of Loopback port, the resources are reserved + * for qdisc functionality. + */ +-#define NSS_PPE_MCAST_QUEUE_MAX 1 +- + #define NSS_PPE_LOOPBACK_L0_SP_MAX 1 + #define NSS_PPE_LOOPBACK_L0_CDRR_MAX 16 + #define NSS_PPE_LOOPBACK_L0_EDRR_MAX 16 +@@ -126,7 +127,7 @@ static struct nss_ppe_res *nss_ppe_res_e + + spin_lock_bh(&ppe_port->lock); + for (i = max; i > 0; i--) { +- res = kzalloc(sizeof(struct nss_ppe_res), GFP_KERNEL); ++ res = kzalloc(sizeof(struct nss_ppe_res), GFP_ATOMIC); + if (!res) { + nss_qdisc_error("Free queue list allocation failed for port %u\n", port); + goto fail; +@@ -275,9 +276,10 @@ int nss_ppe_port_res_alloc(void) + ppe_qdisc_port[i].base[NSS_PPE_UCAST_QUEUE] = cfg.ucastq_start; + + /* +- * Even though we reserve more mcast queues in the device tree, we only use 1 in qdiscs. ++ * Even though we reserve more mcast queues in the device tree, we only use 1 in qdiscs ++ * for the default queue. + */ +- ppe_qdisc_port[i].max[NSS_PPE_MCAST_QUEUE] = NSS_PPE_MCAST_QUEUE_MAX; ++ ppe_qdisc_port[i].max[NSS_PPE_MCAST_QUEUE] = cfg.mcastq_num; + ppe_qdisc_port[i].base[NSS_PPE_MCAST_QUEUE] = cfg.mcastq_start; + + ppe_qdisc_port[i].max[NSS_PPE_L0_CDRR] = cfg.l0cdrr_num; +@@ -576,6 +578,36 @@ static void nss_ppe_all_queue_enable(uin + } + + /* ++ * nss_ppe_assigned_queue_enable() ++ * Enables all level L0 queues corresponding to a port in SSDK. ++ */ ++static void nss_ppe_assigned_queue_enable(uint32_t port_num) ++{ ++ uint32_t qid = nss_ppe_base_get(port_num, NSS_PPE_UCAST_QUEUE); ++ uint32_t mcast_qid = nss_ppe_base_get(port_num, NSS_PPE_MCAST_QUEUE); ++ struct nss_ppe_res *res; ++ struct nss_ppe_port *ppe_port = &ppe_qdisc_port[port_num]; ++ ++ spin_lock_bh(&ppe_port->lock); ++ res = ppe_port->res_used[NSS_PPE_UCAST_QUEUE]; ++ while (res) { ++ fal_qm_enqueue_ctrl_set(0, qid + res->offset, 1); ++ fal_scheduler_dequeue_ctrl_set(0, qid + res->offset, 1); ++ res = res->next; ++ } ++ ++ res = ppe_port->res_used[NSS_PPE_MCAST_QUEUE]; ++ while (res) { ++ fal_qm_enqueue_ctrl_set(0, mcast_qid + res->offset, 1); ++ fal_scheduler_dequeue_ctrl_set(0, mcast_qid + res->offset, 1); ++ res = res->next; ++ } ++ ++ spin_unlock_bh(&ppe_port->lock); ++ nss_qdisc_info("Enable SSDK level0 queue scheduler successful\n"); ++} ++ ++/* + * nss_ppe_l1_queue_scheduler_configure() + * Configures Level 1 queue scheduler in SSDK. + */ +@@ -585,11 +617,6 @@ static int nss_ppe_l1_queue_scheduler_co + uint32_t port_num = nss_ppe_port_num_get(nq); + struct nss_ppe_qdisc *npq = &nq->npq; + +- if (npq->scheduler.drr_weight >= NSS_PPE_DRR_WEIGHT_MAX) { +- nss_qdisc_warning("DRR weight:%d should be less than 1024\n", npq->scheduler.drr_weight); +- return -EINVAL; +- } +- + /* + * Disable all queues and set Level 1 SSDK configuration + * We need to disable and flush the queues before +@@ -597,6 +624,15 @@ static int nss_ppe_l1_queue_scheduler_co + */ + nss_ppe_all_queue_disable(port_num); + ++ if (npq->scheduler.drr_weight >= NSS_PPE_DRR_WEIGHT_MAX) { ++ /* ++ * Currently assigned queues are enabled back by ++ * caller ++ */ ++ nss_qdisc_warning("DRR weight:%d should be less than 1024\n", npq->scheduler.drr_weight); ++ return -EINVAL; ++ } ++ + memset(&l1cfg, 0, sizeof(l1cfg)); + l1cfg.sp_id = port_num; + +@@ -614,11 +650,10 @@ static int nss_ppe_l1_queue_scheduler_co + port_num, npq->l0spid, l1cfg.c_drr_id, l1cfg.c_pri, l1cfg.c_drr_wt, l1cfg.e_drr_id, l1cfg.e_pri, l1cfg.e_drr_wt, l1cfg.sp_id); + if (fal_queue_scheduler_set(0, npq->l0spid, NSS_PPE_FLOW_LEVEL - 1, port_num, &l1cfg) != 0) { + nss_qdisc_error("SSDK level1 queue scheduler configuration failed\n"); +- nss_ppe_all_queue_enable(port_num); + return -EINVAL; + } + +- nss_ppe_all_queue_enable(port_num); ++ nss_ppe_assigned_queue_enable(port_num); + + nss_qdisc_info("SSDK level1 queue scheduler configuration successful\n"); + return 0; +@@ -672,6 +707,7 @@ static int nss_ppe_l1_queue_scheduler_se + if (nss_ppe_l1_queue_scheduler_configure(nq) != 0) { + nss_qdisc_error("SSDK level1 queue scheduler configuration failed\n"); + nss_ppe_l1_res_free(nq); ++ nss_ppe_assigned_queue_enable(nss_ppe_port_num_get(nq)); + return -EINVAL; + } + +@@ -758,11 +794,13 @@ static int nss_ppe_l0_queue_scheduler_de + port_num, npq->q.ucast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id); + if (fal_queue_scheduler_set(0, npq->q.ucast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) { + nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n"); +- nss_ppe_all_queue_enable(port_num); ++ nss_ppe_assigned_queue_enable(port_num); + return -EINVAL; + } + +- nss_ppe_all_queue_enable(port_num); ++ /* ++ * Assinged queues are enabled after the current resource is freed. ++ */ + + nss_qdisc_info("SSDK level0 queue scheduler configuration successful\n"); + return 0; +@@ -781,9 +819,11 @@ static int nss_ppe_l0_queue_scheduler_re + + if (nss_ppe_l0_res_free(nq) != 0) { + nss_qdisc_error("Level0 scheduler resources de-allocation failed\n"); ++ nss_ppe_assigned_queue_enable(nss_ppe_port_num_get(nq)); + return -EINVAL; + } + ++ nss_ppe_assigned_queue_enable(nss_ppe_port_num_get(nq)); + nss_qdisc_info("SSDK level0 queue scheduler configuration successful\n"); + return 0; + } +@@ -871,11 +911,6 @@ static int nss_ppe_l0_queue_scheduler_co + uint32_t port_num = nss_ppe_port_num_get(nq); + struct nss_ppe_qdisc *npq = &nq->npq; + +- if (npq->scheduler.drr_weight >= NSS_PPE_DRR_WEIGHT_MAX) { +- nss_qdisc_warning("DRR weight:%d should be less than 1024\n", npq->scheduler.drr_weight); +- return -EINVAL; +- } +- + /* + * Disable all queues and set Level 0 SSDK configuration + * We need to disable and flush the queues before +@@ -883,6 +918,15 @@ static int nss_ppe_l0_queue_scheduler_co + */ + nss_ppe_all_queue_disable(port_num); + ++ if (npq->scheduler.drr_weight >= NSS_PPE_DRR_WEIGHT_MAX) { ++ /* ++ * Currently assigned queues are enabled back by ++ * caller ++ */ ++ nss_qdisc_warning("DRR weight:%d should be less than 1024\n", npq->scheduler.drr_weight); ++ return -EINVAL; ++ } ++ + memset(&l0cfg, 0, sizeof(l0cfg)); + l0cfg.sp_id = npq->l0spid; + l0cfg.c_drr_wt = npq->scheduler.drr_weight ? npq->scheduler.drr_weight : 1; +@@ -899,7 +943,6 @@ static int nss_ppe_l0_queue_scheduler_co + port_num, npq->q.ucast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id); + if (fal_queue_scheduler_set(0, npq->q.ucast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) { + nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n"); +- nss_ppe_all_queue_enable(port_num); + return -EINVAL; + } + +@@ -917,12 +960,11 @@ static int nss_ppe_l0_queue_scheduler_co + port_num, npq->q.mcast_qid, l0cfg.c_drr_id, l0cfg.c_pri, l0cfg.c_drr_wt, l0cfg.e_drr_id, l0cfg.e_pri, l0cfg.e_drr_wt, l0cfg.sp_id); + if (fal_queue_scheduler_set(0, npq->q.mcast_qid, NSS_PPE_QUEUE_LEVEL - 1, port_num, &l0cfg) != 0) { + nss_qdisc_error("SSDK level0 multicast queue scheduler configuration failed\n"); +- nss_ppe_all_queue_enable(port_num); + return -EINVAL; + } + } + +- nss_ppe_all_queue_enable(port_num); ++ nss_ppe_assigned_queue_enable(port_num); + + nss_qdisc_info("SSDK level0 queue scheduler configuration successful\n"); + return 0; +@@ -955,6 +997,7 @@ static int nss_ppe_l0_queue_scheduler_se + if (nss_ppe_l0_queue_scheduler_configure(nq) != 0) { + nss_qdisc_error("SSDK level0 queue scheduler configuration failed\n"); + nss_ppe_l0_res_free(nq); ++ nss_ppe_assigned_queue_enable(nss_ppe_port_num_get(nq)); + return -EINVAL; + } + +@@ -1381,7 +1424,7 @@ static int nss_ppe_default_conf_set(uint + */ + if (fal_port_scheduler_cfg_reset(0, port_num) != 0) { + nss_qdisc_error("SSDK reset default queue configuration failed\n"); +- nss_ppe_all_queue_enable(port_num); ++ nss_ppe_assigned_queue_enable(port_num); + return -EINVAL; + } + +@@ -1960,7 +2003,7 @@ void nss_ppe_all_queue_enable_hybrid(str + || (nq->type == NSS_SHAPER_NODE_TYPE_BF) + || (nq->type == NSS_SHAPER_NODE_TYPE_WRED)) { + uint32_t port_num = nss_ppe_port_num_get(nq); +- nss_ppe_all_queue_enable(port_num); ++ nss_ppe_assigned_queue_enable(port_num); + nss_qdisc_info("Queues in hybrid mode enabled successfully for Qdisc %px (type %d)\n", nq, nq->type); + } + } diff --git a/qca-nss-clients/patches-11.4/0034-ipsecmgr-backport-12.5.patch b/qca-nss-clients/patches-11.4/0034-ipsecmgr-backport-12.5.patch new file mode 100644 index 0000000..e3b7a9f --- /dev/null +++ b/qca-nss-clients/patches-11.4/0034-ipsecmgr-backport-12.5.patch @@ -0,0 +1,561 @@ +--- a/ipsecmgr/v2.0/plugins/klips/nss_ipsec_klips.c ++++ b/ipsecmgr/v2.0/plugins/klips/nss_ipsec_klips.c +@@ -142,7 +142,6 @@ static int nss_ipsec_klips_offload_esp(s + static struct net_protocol esp_protocol = { + .handler = nss_ipsec_klips_offload_esp, + .no_policy = 1, +- .netns_ok = 1, + }; + + /* +@@ -300,7 +299,7 @@ static struct nss_ipsec_klips_tun *nss_i + * Read/write lock needs to taken by the caller since sa + * table is looked up here + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + if (!klips_dev) { + return NULL; +@@ -383,7 +382,7 @@ static struct nss_ipsec_klips_tun *nss_i + * Read/write lock needs to be taken by the caller since tunnel + * table is looked up here + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + for (i = 0, tun = tunnel_map.tbl; i < tunnel_map.max; i++, tun++) { + if (!tun->klips_dev) { +@@ -434,7 +433,7 @@ static struct nss_ipsec_klips_sa *nss_ip + * Read/write lock needs to taken by the caller since sa + * table is looked up here + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + list_for_each_entry_safe(sa, tmp, head, list) { + if (sa->sid == crypto_idx) +@@ -458,7 +457,7 @@ static void nss_ipsec_klips_sa_flush(str + * Read/write lock needs to taken by the caller since sa + * table is modified here + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + list_for_each_entry_safe(sa, tmp, head, list) { + list_del_init(&sa->list); +@@ -1220,7 +1219,7 @@ static void nss_ipsec_klips_register_nat + /* + * write lock is needed as we are modifying tunnel entry. + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + sock_hold(sk); + tun->sk_encap_rcv = udp_sk(sk)->encap_rcv; +@@ -1237,7 +1236,7 @@ static void nss_ipsec_klips_unregister_n + /* + * write lock is needed as we are modifying tunnel entry. + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + xchg(&udp_sk(tun->sk)->encap_rcv, tun->sk_encap_rcv); + sock_put(tun->sk); +--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c ++++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c +@@ -1222,6 +1222,7 @@ drop: + return -EINVAL; + } + ++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 4, 0)) + /* + * nss_ipsec_xfrm_v4_output_finish() + * This is called for non-offloaded transformations after the NF_POST routing hooks +@@ -1243,9 +1244,8 @@ static int nss_ipsec_xfrm_v4_output_fini + */ + static int nss_ipsec_xfrm_v4_extract_input(struct xfrm_state *x, struct sk_buff *skb) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; +- + nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb); ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + return drv->xsa.v4->extract_input(x, skb); + } + +@@ -1257,11 +1257,12 @@ static int nss_ipsec_xfrm_v4_extract_inp + */ + static int nss_ipsec_xfrm_v4_extract_output(struct xfrm_state *x, struct sk_buff *skb) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + + nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb); ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + return drv->xsa.v4->extract_output(x, skb); + } ++#endif + + /* + * nss_ipsec_xfrm_v4_transport_finish() +@@ -1360,14 +1361,14 @@ fallback: + * nss_ipsec_xfrm_esp_init_state() + * Initialize IPsec xfrm state of type ESP. + */ +-static int nss_ipsec_xfrm_esp_init_state(struct xfrm_state *x) ++static int nss_ipsec_xfrm_esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extac) + { + struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + struct nss_ipsec_xfrm_tunnel *tun = NULL; + struct nss_ipsec_xfrm_sa *sa = NULL; + xfrm_address_t remote = {0}; + xfrm_address_t local = {0}; +- struct net_device *local_dev; ++ struct net_device *local_dev = NULL; + bool new_tun = 0; + size_t ip_addr_len; + +@@ -1375,7 +1376,7 @@ static int nss_ipsec_xfrm_esp_init_state + local_dev = ip_dev_find(&init_net, x->id.daddr.a4); + ip_addr_len = sizeof(local.a4); + } else { +- local_dev = ipv6_dev_find(&init_net, &x->id.daddr.in6, 1); ++ local_dev = ipv6_dev_find(&init_net, &x->id.daddr.in6, local_dev); + ip_addr_len = sizeof(local.a6); + } + +@@ -1716,6 +1717,7 @@ drop: + return -EINVAL; + } + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + /* + * nss_ipsec_xfrm_v6_output_finish() + * This is called for non-offloaded transformations after the NF_POST routing hooks +@@ -1737,9 +1739,9 @@ static int nss_ipsec_xfrm_v6_output_fini + */ + static int nss_ipsec_xfrm_v6_extract_input(struct xfrm_state *x, struct sk_buff *skb) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + + nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb); ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + return drv->xsa.v6->extract_input(x, skb); + } + +@@ -1751,11 +1753,11 @@ static int nss_ipsec_xfrm_v6_extract_inp + */ + static int nss_ipsec_xfrm_v6_extract_output(struct xfrm_state *x, struct sk_buff *skb) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; +- + nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb); ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + return drv->xsa.v6->extract_output(x, skb); + } ++#endif + + /* + * nss_ipsec_xfrm_v6_transport_finish() +@@ -1783,22 +1785,25 @@ void nss_ipsec_xfrm_v6_local_error(struc + return drv->xsa.v6->local_error(skb, mtu); + } + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + /* + * nss_ipsec_xfrm_v6_esp_hdr_offset() + * Invoked by stack for IPv6 transport mode in encap. + * Redirect to the native version. + */ +-static int nss_ipsec_xfrm_v6_esp_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr) ++static int nss_ipsec_xfrm_v6_esp_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + + nss_ipsec_xfrm_trace("%px: Redirect to native esp6 stack\n", skb); +-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +- return drv->xsa.v6->type_map[IPPROTO_ESP]->hdr_offset(x, skb, prevhdr); +-#else +- return drv->xsa.v6->type_esp->hdr_offset(x, skb, prevhdr); +-#endif ++ ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; ++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++ return drv->xsa.v6->type_map[IPPROTO_ESP]->hdr_offset(x, skb, prevhdr); ++ #else ++ return drv->xsa.v6->type_esp->hdr_offset(x, skb, prevhdr); ++ #endif + } ++#endif + + /* + * nss_ipsec_xfrm_esp6_rcv() +@@ -1949,7 +1954,6 @@ static void nss_ipsec_xfrm_state_delete( + nss_ipsec_xfrm_del_tun(drv, tun); + } + +- return; + } + + /* +@@ -2018,9 +2022,11 @@ static struct xfrm_state_afinfo xfrm_v4_ + .init_temprop = nss_ipsec_xfrm_v4_init_param, + #endif + .output = nss_ipsec_xfrm_v4_output, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + .output_finish = nss_ipsec_xfrm_v4_output_finish, + .extract_input = nss_ipsec_xfrm_v4_extract_input, + .extract_output = nss_ipsec_xfrm_v4_extract_output, ++#endif + .transport_finish = nss_ipsec_xfrm_v4_transport_finish, + .local_error = nss_ipsec_xfrm_v4_local_error, + }; +@@ -2065,7 +2071,6 @@ struct xfrm_mode xfrm_v6_mode_map[XFRM_M + * IPv4 xfrm_type ESP object. + */ + static const struct xfrm_type xfrm_v4_type = { +- .description = "NSS ESP4", + .owner = THIS_MODULE, + .proto = IPPROTO_ESP, + .flags = XFRM_TYPE_REPLAY_PROT, +@@ -2101,9 +2106,11 @@ static struct xfrm_state_afinfo xfrm_v6_ + .state_sort = nss_ipsec_xfrm_v6_sort_state, + #endif + .output = nss_ipsec_xfrm_v6_output, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + .output_finish = nss_ipsec_xfrm_v6_output_finish, + .extract_input = nss_ipsec_xfrm_v6_extract_input, + .extract_output = nss_ipsec_xfrm_v6_extract_output, ++#endif + .transport_finish = nss_ipsec_xfrm_v6_transport_finish, + .local_error = nss_ipsec_xfrm_v6_local_error, + }; +@@ -2112,7 +2119,6 @@ static struct xfrm_state_afinfo xfrm_v6_ + * IPv6 xfrm_type ESP object. + */ + static const struct xfrm_type xfrm_v6_type = { +- .description = "NSS ESP6", + .owner = THIS_MODULE, + .proto = IPPROTO_ESP, + .flags = XFRM_TYPE_REPLAY_PROT, +@@ -2121,7 +2127,9 @@ static const struct xfrm_type xfrm_v6_ty + .get_mtu = nss_ipsec_xfrm_esp_get_mtu, + .input = nss_ipsec_xfrm_esp_input, + .output = nss_ipsec_xfrm_esp_output, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + .hdr_offset = nss_ipsec_xfrm_v6_esp_hdr_offset, ++#endif + }; + + /* +@@ -2207,7 +2215,6 @@ static void nss_ipsec_xfrm_restore_afinf + } + + xfrm_unregister_type(base, family); +- + xfrm_state_update_afinfo(family, afinfo); + } + +@@ -2292,14 +2299,10 @@ static void nss_ipsec_xfrm_override_afin + */ + int __init nss_ipsec_xfrm_init_module(void) + { +- + rwlock_init(&g_ipsec_xfrm.lock); +- + nss_ipsec_xfrm_init_tun_db(&g_ipsec_xfrm); + nss_ipsec_xfrm_init_flow_db(&g_ipsec_xfrm); +- + init_completion(&g_ipsec_xfrm.complete); +- + net_get_random_once(&g_ipsec_xfrm.hash_nonce, sizeof(g_ipsec_xfrm.hash_nonce)); + + /* +@@ -2327,7 +2330,6 @@ int __init nss_ipsec_xfrm_init_module(vo + nss_ipsec_xfrm_override_afinfo(&g_ipsec_xfrm, AF_INET6); + + ecm_interface_ipsec_register_callbacks(&xfrm_ecm_ipsec_cb); +- ecm_notifier_register_connection_notify(&xfrm_ecm_notifier); + + #if defined(NSS_L2TPV2_ENABLED) + l2tpmgr_register_ipsecmgr_callback_by_ipaddr(&xfrm_l2tp); +@@ -2336,6 +2338,7 @@ int __init nss_ipsec_xfrm_init_module(vo + /* + * Register for xfrm events + */ ++ ecm_notifier_register_connection_notify(&xfrm_ecm_notifier); + xfrm_register_km(&nss_ipsec_xfrm_mgr); + + /* +@@ -2346,6 +2349,7 @@ int __init nss_ipsec_xfrm_init_module(vo + return 0; + + unreg_v4_handler: ++ xfrm4_protocol_deregister(&xfrm4_proto, IPPROTO_ESP); + xfrm6_protocol_deregister(&xfrm6_proto, IPPROTO_ESP); + return -EAGAIN; + } +--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c ++++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c +@@ -181,7 +181,7 @@ static bool nss_ipsec_xfrm_sa_init_crypt + */ + static void nss_ipsec_xfrm_sa_init_tuple(struct nss_ipsec_xfrm_sa *sa, struct xfrm_state *x) + { +- struct net_device *local_dev; ++ struct net_device *local_dev = NULL; + + sa->type = NSS_IPSECMGR_SA_TYPE_ENCAP; + sa->tuple.spi_index = ntohl(x->id.spi); +@@ -215,7 +215,7 @@ static void nss_ipsec_xfrm_sa_init_tuple + sa->tuple.dest_ip[2] = ntohl(x->id.daddr.a6[2]); + sa->tuple.dest_ip[3] = ntohl(x->id.daddr.a6[3]); + +- local_dev = ipv6_dev_find(&init_net, (struct in6_addr *)x->id.daddr.a6, 1); ++ local_dev = ipv6_dev_find(&init_net, (struct in6_addr *)x->id.daddr.a6, local_dev); + } + + /* +--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_tunnel.c ++++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_tunnel.c +@@ -130,7 +130,6 @@ err: + drop: + atomic64_inc(&drv->stats.inner_drop); + dev_kfree_skb_any(skb); +- return; + } + + /* +@@ -194,7 +193,6 @@ static void nss_ipsec_xfrm_tunnel_rx_out + drop: + dev_kfree_skb_any(skb); + atomic64_inc(&drv->stats.outer_drop); +- return; + } + + /* +From 2b32003b2e6225802361bc3bab12fcb3510f0327 Mon Sep 17 00:00:00 2001 +From: Suhas N Bhargav +Date: Thu, 30 Sep 2021 16:32:12 +0530 +Subject: [PATCH] [qca-nss-clients] Fix to avoid contention b/w write locks in + ipsecmgr + +This fix is needed to avoid contention of locks between two +entities which are in process & interrupt context + +Change-Id: I9986606b99d7642cca1c105bdf05e0ed67b66374 +Signed-off-by: Suhas N Bhargav +--- + ipsecmgr/v2.0/nss_ipsecmgr.c | 6 +++--- + ipsecmgr/v2.0/nss_ipsecmgr_flow.c | 8 ++++---- + ipsecmgr/v2.0/nss_ipsecmgr_tunnel.c | 10 +++++----- + 3 files changed, 12 insertions(+), 12 deletions(-) + +--- a/ipsecmgr/v2.0/nss_ipsecmgr.c ++++ b/ipsecmgr/v2.0/nss_ipsecmgr.c +@@ -1,6 +1,6 @@ + /* + ************************************************************************** +- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. +@@ -278,11 +278,11 @@ static int __init nss_ipsecmgr_init(void + */ + nss_ipsecmgr_configure(&ipsecmgr_drv->cfg_work.work); + +- write_lock(&ipsecmgr_drv->lock); ++ write_lock_bh(&ipsecmgr_drv->lock); + list_add(&tun->list, &ipsecmgr_drv->tun_db); + + ipsecmgr_drv->max_mtu = dev->mtu; +- write_unlock(&ipsecmgr_drv->lock); ++ write_unlock_bh(&ipsecmgr_drv->lock); + + nss_ipsecmgr_info("NSS IPsec manager loaded: %s\n", NSS_CLIENT_BUILD_ID); + return 0; +--- a/ipsecmgr/v2.0/nss_ipsecmgr_flow.c ++++ b/ipsecmgr/v2.0/nss_ipsecmgr_flow.c +@@ -1,6 +1,6 @@ + /* + ************************************************************************** +- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. +@@ -149,10 +149,10 @@ static bool nss_ipsecmgr_flow_update_db( + + hash_idx = nss_ipsecmgr_flow_tuple2hash(&flow->state.tuple, NSS_IPSECMGR_FLOW_MAX); + +- write_lock(&ipsecmgr_drv->lock); ++ write_lock_bh(&ipsecmgr_drv->lock); + sa = nss_ipsecmgr_sa_find(ipsecmgr_drv->sa_db, sa_tuple); + if (!sa) { +- write_unlock(&ipsecmgr_drv->lock); ++ write_unlock_bh(&ipsecmgr_drv->lock); + nss_ipsecmgr_trace("%px: failed to find SA during flow update", flow); + return false; + } +@@ -163,7 +163,7 @@ static bool nss_ipsecmgr_flow_update_db( + */ + nss_ipsecmgr_ref_add(&flow->ref, &sa->ref); + list_add(&flow->list, &ipsecmgr_drv->flow_db[hash_idx]); +- write_unlock(&ipsecmgr_drv->lock); ++ write_unlock_bh(&ipsecmgr_drv->lock); + return true; + } + +--- a/ipsecmgr/v2.0/nss_ipsecmgr_tunnel.c ++++ b/ipsecmgr/v2.0/nss_ipsecmgr_tunnel.c +@@ -1,6 +1,6 @@ + /* + ************************************************************************** +- * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. +@@ -273,7 +273,7 @@ static void nss_ipsecmgr_tunnel_mtu_upda + uint16_t max_mtu = 0; + bool update_mtu = false; + +- write_lock(&ipsecmgr_drv->lock); ++ write_lock_bh(&ipsecmgr_drv->lock); + list_for_each_entry(tun, head, list) { + if (tun->dev->mtu > max_mtu) + max_mtu = tun->dev->mtu; +@@ -284,7 +284,7 @@ static void nss_ipsecmgr_tunnel_mtu_upda + update_mtu = true; + } + +- write_unlock(&ipsecmgr_drv->lock); ++ write_unlock_bh(&ipsecmgr_drv->lock); + + #ifdef NSS_IPSECMGR_PPE_SUPPORT + /* +@@ -627,9 +627,9 @@ struct net_device *nss_ipsecmgr_tunnel_a + #endif + } + +- write_lock(&ipsecmgr_drv->lock); ++ write_lock_bh(&ipsecmgr_drv->lock); + list_add(&tun->list, &ipsecmgr_drv->tun_db); +- write_unlock(&ipsecmgr_drv->lock); ++ write_unlock_bh(&ipsecmgr_drv->lock); + + nss_ipsecmgr_tunnel_mtu(dev, skb_dev ? skb_dev->mtu : dev->mtu); + +--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c ++++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c +@@ -1,5 +1,6 @@ + /* Copyright (c) 2021, The Linux Foundation. All rights reserved. + * ++ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. +@@ -254,7 +255,7 @@ static void nss_ipsec_xfrm_flush_flow_by + + for (count = NSS_IPSEC_XFRM_FLOW_DB_MAX; count--; db_head++) { + list_for_each_entry_safe(flow, tmp, db_head, list_entry) { +- if (flow->sa == sa) { ++ if (READ_ONCE(flow->sa) == sa) { + list_del_init(&flow->list_entry); + list_add(&flow->list_entry, &free_head); + } +--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_flow.c ++++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_flow.c +@@ -1,4 +1,5 @@ + /* Copyright (c) 2021, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above +@@ -50,9 +51,9 @@ static void nss_ipsec_xfrm_flow_final(st + /* + * Release reference to the parent SA. + */ +- if (flow->sa) { +- nss_ipsec_xfrm_sa_deref(flow->sa); +- flow->sa = NULL; ++ if (READ_ONCE(flow->sa)) { ++ nss_ipsec_xfrm_sa_deref(READ_ONCE(flow->sa)); ++ WRITE_ONCE(flow->sa, NULL); + } + + if (flow->pol) { +@@ -194,7 +195,7 @@ struct nss_ipsec_xfrm_flow *nss_ipsec_xf + */ + bool nss_ipsec_xfrm_flow_update(struct nss_ipsec_xfrm_flow *flow, struct nss_ipsec_xfrm_sa *sa) + { +- struct nss_ipsec_xfrm_sa *flow_sa = flow->sa; ++ struct nss_ipsec_xfrm_sa *flow_sa = READ_ONCE(flow->sa); + enum nss_ipsecmgr_status status; + + /* +@@ -213,12 +214,14 @@ bool nss_ipsec_xfrm_flow_update(struct n + return true; + } + ++ if (cmpxchg(&flow->sa, flow_sa, sa) != flow_sa) { ++ nss_ipsec_xfrm_info("%p: Flow migrated to newer SA by other CPU\n", flow); ++ return false; ++ } + +- nss_ipsec_xfrm_info("%p: Flow migrated from SA %p to SA %p\n", flow, flow_sa, sa); +- +- xchg(&flow->sa, nss_ipsec_xfrm_sa_ref(sa)); ++ nss_ipsec_xfrm_sa_ref(sa); + nss_ipsec_xfrm_sa_deref(flow_sa); +- ++ nss_ipsec_xfrm_info("%p: Flow migrated from SA %p to SA %p\n", flow, flow_sa, sa); + return true; + } + +@@ -236,7 +239,7 @@ void nss_ipsec_xfrm_flow_dealloc(struct + + atomic64_inc(&drv->stats.flow_dealloced); + +- sa = flow->sa; ++ sa = READ_ONCE(flow->sa); + BUG_ON(!sa); + + tun = sa->tun; +@@ -282,7 +285,7 @@ struct nss_ipsec_xfrm_flow *nss_ipsec_xf + flow->tuple.sport, flow->tuple.dport); + } + +- flow->sa = nss_ipsec_xfrm_sa_ref(sa); ++ WRITE_ONCE(flow->sa, nss_ipsec_xfrm_sa_ref(sa)); + + status = nss_ipsecmgr_flow_add(sa->tun->dev, &flow->tuple, &sa->tuple); + if ((status != NSS_IPSECMGR_DUPLICATE_FLOW) && (status != NSS_IPSECMGR_OK)) { +--- a/ipsecmgr/v2.0/plugins/xfrm/Makefile ++++ b/ipsecmgr/v2.0/plugins/xfrm/Makefile +@@ -14,3 +14,7 @@ ccflags-y += -I$(obj)/ + ccflags-y += -DNSS_IPSEC_XFRM_DEBUG_LEVEL=3 + ccflags-y += -DNSS_CLIENT_BUILD_ID="$(BUILD_ID)" + ccflags-y += -Wall -Werror ++ ++ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64)) ++ccflags-y += -DNSS_IPSEC_XFRM_IPQ50XX ++endif +--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c ++++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c +@@ -55,13 +55,15 @@ struct nss_ipsec_xfrm_algo { + static struct nss_ipsec_xfrm_algo xfrm_algo[] = { + {.cipher_name = "cbc(aes)", .auth_name = "hmac(sha1)", .algo = NSS_IPSECMGR_ALGO_AES_CBC_SHA1_HMAC}, + {.cipher_name = "cbc(des3_ede)", .auth_name = "hmac(sha1)", .algo = NSS_IPSECMGR_ALGO_3DES_CBC_SHA1_HMAC}, ++#ifndef NSS_IPSEC_XFRM_IPQ50XX + {.cipher_name = "cbc(aes)", .auth_name = "hmac(md5)", .algo = NSS_IPSECMGR_ALGO_AES_CBC_MD5_HMAC}, + {.cipher_name = "cbc(des3_ede)", .auth_name = "hmac(md5)", .algo = NSS_IPSECMGR_ALGO_3DES_CBC_MD5_HMAC}, + {.cipher_name = "rfc4106(gcm(aes))", .auth_name = "rfc4106(gcm(aes))", .algo = NSS_IPSECMGR_ALGO_AES_GCM_GMAC_RFC4106}, + {.cipher_name = "ecb(cipher_null)", .auth_name = "hmac(sha1)", .algo = NSS_IPSECMGR_ALGO_NULL_CIPHER_SHA1_HMAC}, ++ {.cipher_name = "ecb(cipher_null)", .auth_name = "hmac(sha256)", .algo = NSS_IPSECMGR_ALGO_NULL_CIPHER_SHA256_HMAC}, ++#endif + {.cipher_name = "cbc(aes)", .auth_name = "hmac(sha256)", .algo = NSS_IPSECMGR_ALGO_AES_CBC_SHA256_HMAC}, + {.cipher_name = "cbc(des3_ede)", .auth_name = "hmac(sha256)", .algo = NSS_IPSECMGR_ALGO_3DES_CBC_SHA256_HMAC}, +- {.cipher_name = "ecb(cipher_null)", .auth_name = "hmac(sha256)", .algo = NSS_IPSECMGR_ALGO_NULL_CIPHER_SHA256_HMAC}, + }; + + /* diff --git a/qca-nss-clients/patches-11.4/0035-netlink-backport-12.5.patch b/qca-nss-clients/patches-11.4/0035-netlink-backport-12.5.patch new file mode 100644 index 0000000..646b73a --- /dev/null +++ b/qca-nss-clients/patches-11.4/0035-netlink-backport-12.5.patch @@ -0,0 +1,1441 @@ +--- a/netlink/Makefile ++++ b/netlink/Makefile +@@ -22,6 +22,7 @@ ccflags-y += -DCONFIG_NSS_NLLSO_RX=$(str + ccflags-y += -DCONFIG_NSS_NLMAP_T=$(strip $(if $(filter $(map-t), y), 1 , 0)) + ccflags-y += -DCONFIG_NSS_NLPPPOE=$(strip $(if $(filter $(pppoe), y), 1 , 0)) + ccflags-y += -DCONFIG_NSS_NLL2TPV2=$(strip $(if $(filter $(l2tp), y), 1 , 0)) ++ccflags-y += -DCONFIG_NSS_NLQRFS=$(strip $(if $(filter $(CONFIG_NSS_NLQRFS), y), 1 , 0)) + ccflags-y += -DCONFIG_NSS_NLPPTP=$(strip $(if $(filter $(pptp), y), 1 , 0)) + ccflags-y += -DCONFIG_NSS_NLCAPWAP=${CAPWAP_ENABLED} + ccflags-y += -DCONFIG_NSS_NLIPSEC=${IPSEC_ENABLED} +@@ -83,6 +84,10 @@ qca-nss-netlink-objs += nss_nludp_st.o + endif + endif + ++ifneq (,$(filter $(CONFIG_NSS_NLQRFS), y)) ++qca-nss-netlink-objs += nss_nlqrfs.o ++endif ++ + ifneq (,$(filter $(capwapmgr), y)) + qca-nss-netlink-objs += nss_nlcapwap.o + endif +--- /dev/null ++++ b/netlink/include/nss_nlqrfs_if.h +@@ -0,0 +1,59 @@ ++/* ++ ************************************************************************** ++ * Copyright (c) 2021, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ ************************************************************************** ++ */ ++ ++/* ++ * @file nss_nlqrfs_if.h ++ * NSS Netlink qrfs headers ++ */ ++#ifndef __NSS_NLQRFS_IF_H ++#define __NSS_NLQRFS_IF_H ++#include "nss_qrfs.h" ++ ++/** ++ * QRFS Family ++ */ ++#define NSS_NLQRFS_FAMILY "nss_nlqrfs" ++#define NSS_NLQRFS_MCAST_GRP "nss_nlqrfs_mc" ++ ++/** ++ * struct nss_nlqrfs_rule ++ * QRFS rule structure. ++ */ ++struct nss_nlqrfs_rule { ++ struct nss_nlcmn cm; /**< Common message header. */ ++ char gmac_ifname[IFNAMSIZ]; /**< GMAC interface name. */ ++ struct nss_qrfs_flow_rule_msg msg; /**< QRFS Configure flow rule message. */ ++}; ++ ++/** ++ * nss_nlqrfs_rule_init ++ * NETLINK qrfs message init. ++ * ++ * @param[in] rule NSS Netlink QRFS rule. ++ * @param[in] type QRFS message type. ++ * ++ * @return ++ * None. ++ */ ++static inline void nss_nlqrfs_rule_init(struct nss_nlqrfs_rule *rule, enum nss_qrfs_msg_types type) ++{ ++ nss_nlcmn_set_ver(&rule->cm, NSS_NL_VER); ++ nss_nlcmn_init_cmd(&rule->cm, sizeof(struct nss_nlqrfs_rule), type); ++} ++ ++#endif /* __NSS_NLQRFS_IF_H */ +--- a/netlink/nss_nl.c ++++ b/netlink/nss_nl.c +@@ -77,6 +77,8 @@ + #include "nss_nlpptp_if.h" + #include "nss_nludp_st.h" + #include "nss_nludp_st_if.h" ++#include "nss_nlqrfs.h" ++#include "nss_nlqrfs_if.h" + + /* + * nss_nl.c +@@ -294,7 +296,16 @@ static struct nss_nl_family family_handl + .entry = NSS_NLUDP_ST_INIT, /* init */ + .exit = NSS_NLUDP_ST_EXIT, /* exit */ + .valid = CONFIG_NSS_NLUDP_ST /* 1 or 0 */ +- } ++ }, ++ { ++ /* ++ * NSS_NLQRFS ++ */ ++ .name = NSS_NLQRFS_FAMILY, /* qrfs */ ++ .entry = NSS_NLQRFS_INIT, /* init */ ++ .exit = NSS_NLQRFS_EXIT, /* exit */ ++ .valid = CONFIG_NSS_NLQRFS /* 1 or 0 */ ++ }, + }; + + #define NSS_NL_FAMILY_HANDLER_SZ ARRAY_SIZE(family_handlers) +--- a/netlink/nss_nldtls.c ++++ b/netlink/nss_nldtls.c +@@ -1,9 +1,12 @@ + /* + ************************************************************************** +- * Copyright (c) 2015-2016,2018-2020 The Linux Foundation. All rights reserved. ++ * Copyright (c) 2015-2016,2018-2021 The Linux Foundation. All rights reserved. ++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. ++ * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. ++ * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +@@ -446,7 +449,7 @@ static void nss_nldtls_data_callback(voi + * nss_nldtls_create_session() + * Create a DTLS session through dtlsmgr driver API. + */ +-static struct net_device *nss_nldtls_create_session(struct nss_nldtls_rule *nl_rule, uint32_t flags) ++static struct net_device *nss_nldtls_create_session(struct nss_nldtls_rule *nl_rule) + { + struct nss_nldtls_tun_ctx *dtls_tun_data; + struct nss_dtlsmgr_config dcfg; +@@ -463,7 +466,7 @@ static struct net_device *nss_nldtls_cre + + memset(&dcfg, 0, sizeof(struct nss_dtlsmgr_config)); + algo = nl_rule->msg.create.encap.cfg.crypto.algo; +- dcfg.flags = flags | (NSS_DTLSMGR_ENCAP_METADATA | NSS_DTLSMGR_HDR_CAPWAP); ++ dcfg.flags = nl_rule->msg.create.flags | NSS_DTLSMGR_ENCAP_METADATA; + if (algo == NSS_DTLSMGR_ALGO_AES_GCM) + dcfg.flags |= NSS_DTLSMGR_CIPHER_MODE_GCM; + +@@ -605,7 +608,11 @@ static int nss_nldtls_create_ipv4_rule_e + ipv4.dest_port = nl_rule->msg.create.encap.cfg.sport; + ipv4.dest_port_xlate = nl_rule->msg.create.encap.cfg.sport; + +- ipv4.protocol = IPPROTO_UDP; ++ if (nl_rule->msg.create.flags & NSS_DTLSMGR_HDR_UDPLITE) ++ ipv4.protocol = IPPROTO_UDPLITE; ++ else ++ ipv4.protocol = IPPROTO_UDP; ++ + ipv4.in_vlan_tag[0] = NSS_NLDTLS_VLAN_INVALID; + ipv4.out_vlan_tag[0] = NSS_NLDTLS_VLAN_INVALID; + ipv4.in_vlan_tag[1] = NSS_NLDTLS_VLAN_INVALID; +@@ -613,6 +620,8 @@ static int nss_nldtls_create_ipv4_rule_e + + memcpy(&ipv4.src_mac[0], &nl_rule->msg.create.gmac_ifmac[0], sizeof(ipv4.src_mac)); + ++ dev_put(ndev); ++ + /* + * Create an ipv4 rule entry + */ +@@ -654,7 +663,11 @@ static int nss_nldtls_create_ipv6_rule_e + */ + memcpy(ipv6.src_ip, nl_rule->msg.create.encap.cfg.dip, sizeof(ipv6.src_ip)); + memcpy(ipv6.dest_ip, nl_rule->msg.create.encap.cfg.sip, sizeof(ipv6.dest_ip)); +- ipv6.protocol = IPPROTO_UDP; ++ ++ if (nl_rule->msg.create.flags & NSS_DTLSMGR_HDR_UDPLITE) ++ ipv6.protocol = IPPROTO_UDPLITE; ++ else ++ ipv6.protocol = IPPROTO_UDP; + + ipv6.in_vlan_tag[0] = NSS_NLDTLS_VLAN_INVALID; + ipv6.in_vlan_tag[1] = NSS_NLDTLS_VLAN_INVALID; +@@ -663,6 +676,8 @@ static int nss_nldtls_create_ipv6_rule_e + + memcpy(&ipv6.src_mac[0], &nl_rule->msg.create.gmac_ifmac[0], sizeof(ipv6.src_mac)); + ++ dev_put(ndev); ++ + /* + * Create an ipv6 rule entry + */ +@@ -729,7 +744,7 @@ static int nss_nldtls_ops_create_tun(str + * Create tunnel based on ip version + */ + if (nl_rule->msg.create.ip_version == NSS_NLDTLS_IP_VERS_4) { +- dtls_dev = nss_nldtls_create_session(nl_rule, NSS_NLDTLS_IPV4_SESSION); ++ dtls_dev = nss_nldtls_create_session(nl_rule); + if (!dtls_dev) { + nss_nl_error("%px: Unable to create dtls session for v4\n", skb); + return -EINVAL; +@@ -748,7 +763,7 @@ static int nss_nldtls_ops_create_tun(str + atomic_inc(&gbl_ctx.num_tun); + nss_nl_info("%px: Successfully created ipv4 dtls tunnel\n", skb); + } else { +- dtls_dev = nss_nldtls_create_session(nl_rule, NSS_DTLSMGR_HDR_IPV6); ++ dtls_dev = nss_nldtls_create_session(nl_rule); + if (!dtls_dev) { + nss_nl_error("%px: Unable to create dtls session for v6\n", skb); + return -EINVAL; +@@ -871,6 +886,7 @@ static int nss_nldtls_ops_update_config( + key_len = nl_rule->msg.update_config.config_update.crypto.cipher_key.len; + if (key_len > NSS_NLDTLS_CIPHER_KEY_MAX) { + nss_nl_error("Invalid cipher length: %u\n", key_len); ++ dev_put(dev); + return -EINVAL; + } + +@@ -878,6 +894,7 @@ static int nss_nldtls_ops_update_config( + key_len = nl_rule->msg.update_config.config_update.crypto.auth_key.len; + if (key_len > NSS_NLDTLS_AUTH_KEY_MAX) { + nss_nl_error("Invalid authentication length: %u\n", key_len); ++ dev_put(dev); + return -EINVAL; + } + +@@ -885,6 +902,7 @@ static int nss_nldtls_ops_update_config( + key_len = nl_rule->msg.update_config.config_update.crypto.nonce.len; + if (key_len > NSS_NLDTLS_NONCE_SIZE_MAX) { + nss_nl_error("Invalid nonce length: %u\n", key_len); ++ dev_put(dev); + return -EINVAL; + } + +--- a/netlink/nss_nlipsec.c ++++ b/netlink/nss_nlipsec.c +@@ -1,9 +1,12 @@ + /* + ************************************************************************** + * Copyright (c) 2015-2016,2018-2021 The Linux Foundation. All rights reserved. ++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. ++ * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. ++ * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +@@ -225,7 +228,7 @@ static void nss_nlipsec_process_event(vo + /* + * Initialize the NETLINK common header + */ +- nss_nlipsec_rule_init(nl_rule, ev->type); ++ nss_nlipsec_rule_init(nl_rule, (enum nss_nlipsec_cmd)ev->type); + + /* + * Copy the contents of the sync message into the NETLINK message +@@ -508,6 +511,8 @@ static struct nss_nlipsec_rule *nss_nlip + dev_put(*dev); + return NULL; + } ++ ++ dev_put(*dev); + return nl_rule; + } + +--- /dev/null ++++ b/netlink/nss_nlqrfs.c +@@ -0,0 +1,203 @@ ++/* ++ ************************************************************************** ++ * Copyright (c) 2021, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ ************************************************************************** ++ */ ++ ++/* ++ * nss_nlqrfs.c ++ * NSS Netlink qrfs Handler ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include "nss_nl.h" ++#include "nss_nlcmn_if.h" ++#include "nss_nlqrfs_if.h" ++#include ++ ++/* ++ * prototypes ++ */ ++static int nss_nlqrfs_ops_add_rule(struct sk_buff *skb, struct genl_info *info); ++static int nss_nlqrfs_ops_del_rule(struct sk_buff *skb, struct genl_info *info); ++ ++/* ++ * multicast group for sending message status & events ++ */ ++static const struct genl_multicast_group nss_nlqrfs_mcgrp[] = { ++ {.name = NSS_NLQRFS_MCAST_GRP}, ++}; ++ ++/* ++ * operation table called by the generic netlink layer based on the command ++ */ ++static struct genl_ops nss_nlqrfs_ops[] = { ++ {.cmd = NSS_QRFS_MSG_FLOW_ADD, .doit = nss_nlqrfs_ops_add_rule,}, ++ {.cmd = NSS_QRFS_MSG_FLOW_DELETE, .doit = nss_nlqrfs_ops_del_rule,}, ++}; ++ ++/* ++ * qrfs family definition ++ */ ++static struct genl_family nss_nlqrfs_family = { ++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 9, 0)) ++ .id = GENL_ID_GENERATE, /* Auto generate ID */ ++#endif ++ .name = NSS_NLQRFS_FAMILY, /* family name string */ ++ .hdrsize = sizeof(struct nss_nlqrfs_rule), /* NSS NETLINK qrfs rule */ ++ .version = NSS_NL_VER, /* Set it to NSS_NLQRFS version */ ++ .maxattr = NSS_QRFS_MSG_MAX, /* Maximum commands supported */ ++ .netnsok = true, ++ .pre_doit = NULL, ++ .post_doit = NULL, ++ .ops = nss_nlqrfs_ops, ++ .n_ops = ARRAY_SIZE(nss_nlqrfs_ops), ++ .mcgrps = nss_nlqrfs_mcgrp, ++ .n_mcgrps = ARRAY_SIZE(nss_nlqrfs_mcgrp) ++}; ++ ++/* ++ * nss_nlqrfs_ops_cfg_rule ++ * Handler for unconfiguring rules ++ */ ++static int nss_nlqrfs_ops_cfg_rule(struct sk_buff *skb, struct genl_info *info, bool action) ++{ ++ struct nss_nlqrfs_rule *nl_rule; ++ struct nss_qrfs_flow_rule_msg *nrm; ++ struct nss_nlcmn *nl_cm; ++ int ret = 0; ++ ++ /* ++ * Extract the message payload ++ */ ++ if (action) { ++ nl_cm = nss_nl_get_msg(&nss_nlqrfs_family, info, NSS_QRFS_MSG_FLOW_ADD); ++ nss_nl_info("add flow rule\n"); ++ } else { ++ nl_cm = nss_nl_get_msg(&nss_nlqrfs_family, info, NSS_QRFS_MSG_FLOW_DELETE); ++ nss_nl_info("delete flow rule\n"); ++ } ++ ++ if (!nl_cm) { ++ nss_nl_error("%px: Unable to extract configure rule data\n", skb); ++ return -EINVAL; ++ } ++ ++ /* ++ * Message validation required before accepting the configuration ++ */ ++ nl_rule = container_of(nl_cm, struct nss_nlqrfs_rule, cm); ++ nrm = &nl_rule->msg; ++ ++ if (nrm->ip_version == 4) { ++ nss_nl_trace("src_ip:%pl4h src_port:%u dst_ip:%pl4h dst_port:%u protocol:%u version:%u cpu:%u\n", ++ nrm->src_addr, nrm->src_port, nrm->dst_addr, nrm->dst_port, nrm->protocol, ++ nrm->ip_version, nrm->cpu); ++ } else if (nrm->ip_version == 6) { ++ nss_nl_trace("src_ip:%pl6 src_port:%u dst_ip:%pl6 dst_port:%u protocol:%u version:%u cpu:%u\n", ++ nrm->src_addr, nrm->src_port, nrm->dst_addr, nrm->dst_port, nrm->protocol, ++ nrm->ip_version, nrm->cpu); ++ } else { ++ nss_nl_trace("Unsupported IP version field\n"); ++ return -EINVAL; ++ } ++ ++ if (action) { ++ nss_qrfs_configure_flow_rule(nrm->dst_addr, nrm->src_addr, nrm->dst_port, nrm->src_port, ++ nrm->ip_version, nrm->protocol, nrm->cpu, NSS_QRFS_MSG_FLOW_ADD); ++ } else { ++ nss_qrfs_configure_flow_rule(nrm->dst_addr, nrm->src_addr, nrm->dst_port, nrm->src_port, ++ nrm->ip_version, nrm->protocol, nrm->cpu, NSS_QRFS_MSG_FLOW_DELETE); ++ } ++ ++ nss_nl_trace("%s flow rule finished\n", action? "add" : "delete"); ++ ++ return ret; ++} ++ ++/* ++ * nss_nlqrfs_ops_add_rule() ++ * Handler for Adding rules ++ */ ++static int nss_nlqrfs_ops_add_rule(struct sk_buff *skb, struct genl_info *info) ++{ ++ return nss_nlqrfs_ops_cfg_rule(skb, info, true); ++} ++ ++/* ++ * nss_nlqrfs_ops_del_rule() ++ * Handler for deleting rules ++ */ ++static int nss_nlqrfs_ops_del_rule(struct sk_buff *skb, struct genl_info *info) ++{ ++ return nss_nlqrfs_ops_cfg_rule(skb, info, false); ++} ++ ++/* ++ * nss_nlqrfs_exit() ++ * handler exit ++ */ ++bool nss_nlqrfs_exit(void) ++{ ++ int error; ++ ++ nss_nl_info_always("Exit NSS netlink qrfs handler\n"); ++ ++ /* ++ * unregister the ops family ++ */ ++ error = genl_unregister_family(&nss_nlqrfs_family); ++ if (error) { ++ nss_nl_info_always("unable to unregister qrfs NETLINK family\n"); ++ return false; ++ } ++ ++ return true; ++} ++ ++/* ++ * nss_nlqrfs_init() ++ * handler init ++ */ ++bool nss_nlqrfs_init(void) ++{ ++ int error; ++ ++ nss_nl_info_always("Init NSS netlink qrfs handler\n"); ++ ++ /* ++ * register Netlink ops with the family ++ */ ++ error = genl_register_family(&nss_nlqrfs_family); ++ if (error) { ++ nss_nl_info_always("Error: unable to register qrfs family\n"); ++ return false; ++ } ++ ++ return true; ++} +--- /dev/null ++++ b/netlink/nss_nlqrfs.h +@@ -0,0 +1,37 @@ ++/* ++ ************************************************************************** ++ * Copyright (c) 2021, The Linux Foundation. All rights reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for ++ * any purpose with or without fee is hereby granted, provided that the ++ * above copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ++ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ++ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT ++ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ ************************************************************************** ++ */ ++ ++/* ++ * nss_nlqrfs.h ++ * NSS Netlink qrfs API definitions ++ */ ++#ifndef __NSS_NLQRFS_H ++#define __NSS_NLQRFS_H ++ ++bool nss_nlqrfs_init(void); ++bool nss_nlqrfs_exit(void); ++ ++#if defined(CONFIG_NSS_NLQRFS) && CONFIG_NSS_NLQRFS > 0 ++#define NSS_NLQRFS_INIT nss_nlqrfs_init ++#define NSS_NLQRFS_EXIT nss_nlqrfs_exit ++#else ++#define NSS_NLQRFS_INIT 0 ++#define NSS_NLQRFS_EXIT 0 ++#endif /* !CONFIG_NSS_NLQRFS */ ++ ++#endif /* __NSS_NLQRFS_H */ +--- a/netlink/nss_nludp_st.c ++++ b/netlink/nss_nludp_st.c +@@ -1,6 +1,7 @@ + /* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the +@@ -31,6 +32,9 @@ + #include + #include + #include ++#include ++#include ++#include + + #include + #include +@@ -40,6 +44,8 @@ + #include "nss_nludp_st_if.h" + #include "nss_nludp_st.h" + ++#define NSS_NLUDP_ST_VLAN_ID_NOT_CONFIGURED 0xFFF ++ + /* + * prototypes + */ +@@ -196,7 +202,6 @@ static int nss_nludp_st_ops_start(struct + struct nss_ctx_instance *nss_ctx; + struct nss_udp_st_msg num; + struct sk_buff *resp; +- uint32_t pid; + nss_tx_status_t status; + + /* +@@ -208,7 +213,6 @@ static int nss_nludp_st_ops_start(struct + return -EINVAL; + } + +- pid = nl_cm->pid; + /* + * Message validation required before accepting the configuration + */ +@@ -259,7 +263,6 @@ static int nss_nludp_st_ops_reset_stats( + struct nss_ctx_instance *nss_ctx; + struct nss_udp_st_msg num; + struct sk_buff *resp; +- uint32_t pid; + nss_tx_status_t status; + + /* +@@ -271,7 +274,6 @@ static int nss_nludp_st_ops_reset_stats( + return -EINVAL; + } + +- pid = nl_cm->pid; + /* + * Message validation required before accepting the configuration + */ +@@ -320,7 +322,6 @@ static int nss_nludp_st_ops_tx_destroy(s + struct nss_ctx_instance *nss_ctx; + struct nss_udp_st_msg num; + struct sk_buff *resp; +- uint32_t pid; + nss_tx_status_t status; + + /* +@@ -332,7 +333,6 @@ static int nss_nludp_st_ops_tx_destroy(s + return -EINVAL; + } + +- pid = nl_cm->pid; + /* + * Message validation required before accepting the configuration + */ +@@ -381,7 +381,6 @@ static int nss_nludp_st_ops_tx_create(st + struct nss_ctx_instance *nss_ctx; + struct nss_udp_st_msg num; + struct sk_buff *resp; +- uint32_t pid; + nss_tx_status_t status; + + /* +@@ -393,7 +392,6 @@ static int nss_nludp_st_ops_tx_create(st + return -EINVAL; + } + +- pid = nl_cm->pid; + /* + * Message validation required before accepting the configuration + */ +@@ -571,6 +569,7 @@ static struct neighbour *nss_nludp_st_ge + rt = rt6_lookup(&init_net, &daddr, NULL, 0, NULL, 0); + #endif + if (!rt) { ++ nss_nl_warn("rt6 info lookup failed\n"); + return NULL; + } + +@@ -587,21 +586,20 @@ static struct neighbour *nss_nludp_st_ge + } + dst_release(dst); + ++ nss_nl_warn("dst neigh info lookup failed\n"); + return NULL; + } + + /* + * nss_nludp_st_get_addr_hton() +- * Convert the ipv6 address from host order to network order. ++ * Convert the ipv6 address from NSS host order to Linux network order. + */ + static inline void nss_nludp_st_get_addr_hton(uint32_t src[4], uint32_t dst[4]) + { +- nss_nludp_st_swap_addr_ipv6(src, dst); +- +- dst[0] = htonl(dst[0]); +- dst[1] = htonl(dst[1]); +- dst[2] = htonl(dst[2]); +- dst[3] = htonl(dst[3]); ++ dst[0] = htonl(src[0]); ++ dst[1] = htonl(src[1]); ++ dst[2] = htonl(src[2]); ++ dst[3] = htonl(src[3]); + } + + /* +@@ -731,6 +729,277 @@ static int nss_nludp_st_destroy_ipv4_rul + } + + /* ++ * nss_nludp_st_vlan_next_dev_get_and_hold() ++ * Gets and hold the next device of the VLAN. ++ * ++ * It should be a physical ethernet interface. ++ */ ++static struct net_device *nss_nludp_st_vlan_next_dev_get_and_hold(struct net_device *dev) ++{ ++ struct net_device *next_dev; ++ ++ next_dev = vlan_dev_next_dev(dev); ++ if (!next_dev) { ++ nss_nl_warn("%px: VLAN device's (%s) next dev is NULL\n", dev, dev->name); ++ return NULL; ++ } ++ ++ if (is_vlan_dev(next_dev) || next_dev->type != ARPHRD_ETHER) { ++ nss_nl_warn("%px: QinQ or non-ethernet VLAN master (%s) is not supported\n", dev, next_dev->name); ++ return NULL; ++ } ++ dev_hold(next_dev); ++ return next_dev; ++} ++ ++/* ++ * nss_nludp_st_ipv4_rawip_iface_config() ++ * Configure the WAN interface as RmNet for IPv4 protocol. ++ */ ++static int nss_nludp_st_ipv4_rawip_iface_config(struct net_device *dev, struct nss_ipv4_rule_create_msg *nircm) ++{ ++#ifdef NSS_NETLINK_UDP_ST_NO_RMNET_SUPPORT ++ nss_nl_warn("%px: RAWIP is disabled\n", dev); ++ return -EINVAL; ++#else ++ nircm->conn_rule.return_interface_num = nss_rmnet_rx_get_ifnum(dev); ++ memset(nircm->conn_rule.return_mac, 0, ETH_ALEN); ++ ether_addr_copy((uint8_t *)nircm->conn_rule.flow_mac, dev->dev_addr); ++ nss_nl_info("%px: Speedtest RmNet WAN interface %s is configured\n", dev, dev->name); ++ return 0; ++#endif ++} ++ ++/* ++ * nss_nludp_st_ipv4_eth_iface_config() ++ * Configure the WAN interface as Ethernet for IPv4 protocol. ++ */ ++static int nss_nludp_st_ipv4_eth_iface_config(struct net_device *dev, struct nss_ipv4_rule_create_msg *nircm) ++{ ++ if (nss_nludp_st_get_macaddr_ipv4(nircm->tuple.return_ip, (uint8_t *)&nircm->conn_rule.return_mac)) { ++ nss_nl_warn("%px: Error in updating the return MAC Address\n", dev); ++ return -EINVAL; ++ } ++ ether_addr_copy((uint8_t *)nircm->conn_rule.flow_mac, dev->dev_addr); ++ nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(dev); ++ if (nircm->conn_rule.return_interface_num < 0) { ++ nss_nl_warn("%px: Unable to get the NSS interface number of %s\n", dev, dev->name); ++ return -EINVAL; ++ } ++ nss_nl_info("%px: Speedtest Ethernet WAN interface %s is configured\n", dev, dev->name); ++ return 0; ++} ++ ++/* ++ * nss_nludp_st_ipv4_set_vlan_tags() ++ * Sets the VLAN tags for IPv4 protocol. ++ */ ++static void nss_nludp_st_ipv4_set_vlan_tags(struct net_device *dev, struct nss_ipv4_rule_create_msg *nircm) ++{ ++ /* ++ * Get the primary VLAN info and set in the rule. ++ */ ++ uint16_t vlan_tag = vlan_dev_vlan_id(dev); ++ uint16_t vlan_tpid = ntohs(vlan_dev_vlan_proto(dev)); ++ ++ nircm->vlan_primary_rule.egress_vlan_tag = (vlan_tpid << 16) | vlan_tag; ++ nircm->vlan_primary_rule.ingress_vlan_tag = NSS_NLUDP_ST_VLAN_ID_NOT_CONFIGURED; ++ nircm->vlan_secondary_rule.egress_vlan_tag = NSS_NLUDP_ST_VLAN_ID_NOT_CONFIGURED; ++ nircm->vlan_secondary_rule.ingress_vlan_tag = NSS_NLUDP_ST_VLAN_ID_NOT_CONFIGURED; ++} ++ ++/* ++ * nss_nludp_st_ipv4_vlan_iface_config() ++ * Configure the WAN interface as VLAN for IPv4 protocol. ++ */ ++static int nss_nludp_st_ipv4_vlan_iface_config(struct net_device *dev, struct nss_ipv4_rule_create_msg *nircm) ++{ ++ struct net_device *next_dev; ++ int ret; ++ ++ next_dev = nss_nludp_st_vlan_next_dev_get_and_hold(dev); ++ if (!next_dev) { ++ nss_nl_warn("%px: Unable to get VLAN device's (%s) next dev\n", dev, dev->name); ++ return -ENODEV; ++ } ++ ++ /* ++ * Single VLAN, next_dev should be a physical device. ++ * Copy the real device's number (eth0). ++ */ ++ nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(next_dev); ++ if (nircm->conn_rule.return_interface_num < 0) { ++ nss_nl_warn("%px: Unable to get the NSS interface number of %s\n", dev, next_dev->name); ++ ret = -EINVAL; ++ goto fail; ++ } ++ dev_put(next_dev); ++ ++ /* ++ * Get the return IP address's MAC address. ++ */ ++ if (nss_nludp_st_get_macaddr_ipv4(nircm->tuple.return_ip, (uint8_t *)&nircm->conn_rule.return_mac)) { ++ nss_nl_warn("%px: Error in updating the return MAC Address\n", dev); ++ return -EINVAL; ++ } ++ ++ /* ++ * Get the primary VLAN info and set in the rule. ++ */ ++ nss_nludp_st_ipv4_set_vlan_tags(dev, nircm); ++ nircm->valid_flags |= NSS_IPV4_RULE_CREATE_VLAN_VALID; ++ ++ ether_addr_copy((uint8_t *)nircm->conn_rule.flow_mac, dev->dev_addr); ++ ++ /* ++ * Success. Fall through for release resources ++ */ ++ nss_nl_info("%px: Speedtest VLAN WAN interface %s is configured\n", dev, dev->name); ++ return 0; ++fail: ++ dev_put(next_dev); ++ return ret; ++} ++ ++#if defined(CONFIG_NSS_NLPPPOE) && CONFIG_NSS_NLPPPOE > 0 ++/* ++ * nss_nludp_st_pppoe_get_chan_and_info() ++ * Gets the PPPoE channel information. ++ */ ++static inline bool nss_nludp_st_pppoe_get_chan_and_info(struct net_device *dev, struct ppp_channel **ppp_chan, struct pppoe_opt *info) ++{ ++ int channel_count; ++ int channel_protocol; ++ ++ channel_count = ppp_hold_channels(dev, ppp_chan, 1); ++ if (channel_count != 1) { ++ nss_nl_warn("%px: Unable to get the channel for device: %s\n", dev, dev->name); ++ return false; ++ } ++ ++ channel_protocol = ppp_channel_get_protocol(ppp_chan[0]); ++ if (channel_protocol != PX_PROTO_OE) { ++ nss_nl_warn("%px: PPP channel protocol is not PPPoE for device: %s\n", dev, dev->name); ++ ppp_release_channels(ppp_chan, 1); ++ return false; ++ } ++ ++ if (pppoe_channel_addressing_get(ppp_chan[0], info)) { ++ nss_nl_warn("%px: Unable to get the PPPoE session information for device: %s\n", dev, dev->name); ++ ppp_release_channels(ppp_chan, 1); ++ return false; ++ } ++ ++ return true; ++} ++ ++/* ++ * nss_nludp_st_ipv4_pppoe_iface_config() ++ * Configure the WAN interface as PPPoE for IPv4 protocol. ++ */ ++static int nss_nludp_st_ipv4_pppoe_iface_config(struct net_device *dev, struct nss_ipv4_rule_create_msg *nircm) ++{ ++ struct pppoe_opt info; ++ struct ppp_channel *ppp_chan[1]; ++ int ret = -EINVAL; ++ ++ if (!nss_nludp_st_pppoe_get_chan_and_info(dev, ppp_chan, &info)) { ++ nss_nl_warn("%px: Unable to get PPPoE channel and info for device: %s\n", dev, dev->name); ++ return ret; ++ } ++ ++ /* ++ * Check if the next device is a VLAN (eth0-eth0.100-pppoe-wan ++ */ ++ if (is_vlan_dev(info.dev)) { ++ /* ++ * Next device is a VLAN device (eth0.100). ++ */ ++ struct net_device *next_dev; ++ ++ /* ++ * Check if we have a single VLAN device. ++ */ ++ next_dev = nss_nludp_st_vlan_next_dev_get_and_hold(info.dev); ++ if (!next_dev) { ++ nss_nl_warn("%px: Unable to get PPPoE's VLAN device's (%s) next dev\n", dev, info.dev->name); ++ ret = -ENODEV; ++ goto fail; ++ } ++ ++ /* ++ * PPPoE + VLAN (eth0-eth0.100-pppoe-wan) ++ * Copy the physical interface number (eth0) ++ */ ++ nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(next_dev); ++ if (nircm->conn_rule.return_interface_num < 0) { ++ nss_nl_warn("%px: Unable to get the NSS interface number of %s\n", dev, next_dev->name); ++ dev_put(next_dev); ++ goto fail; ++ } ++ dev_put(next_dev); ++ ++ /* ++ * Get the primary VLAN info and set in the rule. ++ */ ++ nss_nludp_st_ipv4_set_vlan_tags(info.dev, nircm); ++ nircm->valid_flags |= NSS_IPV4_RULE_CREATE_VLAN_VALID; ++ } else { ++ /* ++ * PPPoE interface can be created on linux bridge, OVS bridge and LAG devices. ++ * udp_st doesn't support these hierarchies. ++ */ ++ if ((info.dev->priv_flags & (IFF_EBRIDGE | IFF_OPENVSWITCH)) ++ || ((info.dev->flags & IFF_MASTER) && (info.dev->priv_flags & IFF_BONDING))) { ++ nss_nl_warn("%px: PPPoE over bridge and LAG interfaces are not supported, dev: %s info.dev: %s\n", ++ dev, dev->name, info.dev->name); ++ goto fail; ++ } ++ ++ /* ++ * PPPoE only (eth0-pppoe-wan) ++ * Copy the physical interface number (eth0) ++ */ ++ nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(info.dev); ++ if (nircm->conn_rule.return_interface_num < 0) { ++ nss_nl_warn("%px: Unable to get the PPPoE's physical interface's (%s) NSS interface number\n", ++ dev, info.dev->name); ++ goto fail; ++ } ++ } ++ ++ /* ++ * For PPPoE connections, the other end's MAC address is PPPoE session's remote MAC address ++ */ ++ ether_addr_copy((uint8_t *)nircm->conn_rule.return_mac, info.pa.remote); ++ ether_addr_copy((uint8_t *)nircm->conn_rule.flow_mac, info.dev->dev_addr); ++ ++ /* ++ * Check if NSS has a valid dynamic interface number for PPPoE interface. ++ * If there is not, NSS cannot handle PPPoE flows in fast path. ++ */ ++ nircm->pppoe_rule.return_if_num = nss_cmn_get_interface_number_by_dev(dev); ++ if (nircm->pppoe_rule.return_if_num < 0) { ++ nss_nl_warn("%px: Unable to get the PPPoE interfaces (%s) dynamic interface number\n", dev, dev->name); ++ goto fail; ++ } ++ nircm->pppoe_rule.return_if_exist = 1; ++ nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID; ++ ++ /* ++ * Success. Fall through for release resources ++ */ ++ ret = 0; ++ nss_nl_info("%px: Speedtest PPPoE WAN interface %s is configured\n", dev, dev->name); ++fail: ++ dev_put(info.dev); ++ ppp_release_channels(ppp_chan, 1); ++ return ret; ++} ++#endif ++ ++ ++/* + * nss_nludp_st_create_ipv4_rule() + * Create a nss entry to accelerate the given IPv4 connection + */ +@@ -742,7 +1011,9 @@ static int nss_nludp_st_create_ipv4_rule + struct net_device *net_dev; + struct nss_ipv4_nexthop *nexthop; + struct sk_buff *resp; ++ struct nss_ipv4_src_mac_rule *src_mac; + nss_tx_status_t status; ++ int ret = 0; + + nss_ctx = nss_ipv4_get_mgr(); + if (!nss_ctx) { +@@ -801,8 +1072,6 @@ static int nss_nludp_st_create_ipv4_rule + * Copy over the connection rules and set the CONN_VALID flag + */ + nircm->conn_rule.flow_interface_num = NSS_UDP_ST_INTERFACE; +- nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(net_dev); +- memcpy(nircm->conn_rule.flow_mac, net_dev->dev_addr, 6); + + /* + * Set the MTU values of the flows. +@@ -811,12 +1080,51 @@ static int nss_nludp_st_create_ipv4_rule + nircm->conn_rule.return_mtu = net_dev->mtu; + + /* +- * Update the return MAC address +- */ +- if (nss_nludp_st_get_macaddr_ipv4(nircm->tuple.return_ip, (uint8_t *)&nircm->conn_rule.return_mac)) { +- nss_nl_info("Error in Updating the Return MAC Address \n"); +- dev_put(net_dev); +- return -EINVAL; ++ * Supported WAN interface hierarchies: ++ * ++ * rmnet <-> DUT (5G interface) ++ * eth0-eth0.100 <-> DUT (single VLAN) ++ * eth0-pppoe-wan <-> DUT ( PPPoE only) ++ * eth0-eth0.100-pppoe-wan <-> DUT (PPPoE over VLAN) ++ * eth0 <-> DUT (simple physical interface) ++ */ ++ switch (net_dev->type) { ++ case ARPHRD_RAWIP: ++ ret = nss_nludp_st_ipv4_rawip_iface_config(net_dev, nircm); ++ break; ++ ++#if defined(CONFIG_NSS_NLPPPOE) && CONFIG_NSS_NLPPPOE > 0 ++ case ARPHRD_PPP: ++ ret = nss_nludp_st_ipv4_pppoe_iface_config(net_dev, nircm); ++ break; ++#endif ++ ++ case ARPHRD_ETHER: ++ /* ++ * Bridge and LAG interfaces are not supported. ++ */ ++ if ((net_dev->priv_flags & (IFF_EBRIDGE | IFF_OPENVSWITCH)) ++ || ((net_dev->flags & IFF_MASTER) && (net_dev->priv_flags & IFF_BONDING))) { ++ nss_nl_warn("%px: Bridge and LAG interfaces are not supported, dev: %s\n", skb, net_dev->name); ++ ret = -EINVAL; ++ break; ++ } ++ ++ if (is_vlan_dev(net_dev)) { ++ ret = nss_nludp_st_ipv4_vlan_iface_config(net_dev, nircm); ++ } else { ++ ret = nss_nludp_st_ipv4_eth_iface_config(net_dev, nircm); ++ } ++ break; ++ default: ++ nss_nl_warn("%px: unsupported speedtest interface: %s\n", skb, net_dev->name); ++ ret = -EINVAL; ++ break; ++ } ++ ++ ++ if (ret < 0) { ++ goto done; + } + + nircm->valid_flags |= NSS_IPV4_RULE_CREATE_CONN_VALID; +@@ -825,15 +1133,27 @@ static int nss_nludp_st_create_ipv4_rule + nexthop->flow_nexthop = nircm->conn_rule.flow_interface_num; + nexthop->return_nexthop = nircm->conn_rule.return_interface_num; + +- nss_nl_info("flow_nexthop:%d return_nexthop:%d\n", nexthop->flow_nexthop, nexthop->return_nexthop); ++ nss_nl_info("%px: flow_nexthop:%d return_nexthop:%d\n", skb, nexthop->flow_nexthop, nexthop->return_nexthop); ++ ++ /* ++ * We want acceleration engine to choose the source MAC address which we ++ * have given to it instead of using the underlying physical interface's MAC address. ++ */ ++ src_mac = &nircm->src_mac_rule; ++ ++ memcpy(src_mac->flow_src_mac, nircm->conn_rule.return_mac, ETH_ALEN); ++ memcpy(src_mac->return_src_mac, nircm->conn_rule.flow_mac, ETH_ALEN); ++ ++ nircm->valid_flags |= NSS_IPV4_RULE_CREATE_SRC_MAC_VALID; ++ src_mac->mac_valid_flags |= (NSS_IPV4_SRC_MAC_FLOW_VALID | NSS_IPV4_SRC_MAC_RETURN_VALID); + + status = nss_ipv4_tx(nss_ctx, &nim); + if (status != NSS_TX_SUCCESS) { +- nss_nl_info("%px: Create IPv4 message failed %d\n", nss_ctx, status); ++ nss_nl_info("%px: Create IPv4 message failed %d\n", skb, status); + } +- ++done: + dev_put(net_dev); +- return 0; ++ return ret; + } + + /* +@@ -868,7 +1188,7 @@ static int nss_nludp_st_destroy_ipv6_rul + } + + memset(&nim, 0, sizeof(struct nss_ipv6_msg)); +- nss_ipv6_msg_init(&nim, ++ nss_ipv6_msg_init(&nim, + NSS_IPV6_RX_INTERFACE, + NSS_IPV6_TX_DESTROY_RULE_MSG, + sizeof(struct nss_ipv6_rule_destroy_msg), +@@ -896,6 +1216,221 @@ static int nss_nludp_st_destroy_ipv6_rul + } + + /* ++ * nss_nludp_st_ipv6_rawip_iface_config() ++ * Configure the WAN interface as RmNet for IPv6 protocol. ++ */ ++static int nss_nludp_st_ipv6_rawip_iface_config(struct net_device *dev, struct nss_ipv6_rule_create_msg *nircm) ++{ ++#ifdef NSS_NETLINK_UDP_ST_NO_RMNET_SUPPORT ++ nss_nl_warn("%px: RAWIP is disabled\n", dev); ++ return -EINVAL; ++#else ++ nircm->conn_rule.return_interface_num = nss_rmnet_rx_get_ifnum(dev); ++ memset(nircm->conn_rule.return_mac, 0, ETH_ALEN); ++ ether_addr_copy((uint8_t *)nircm->conn_rule.flow_mac, dev->dev_addr); ++ nss_nl_info("%px: Speedtest RmNet WAN interface %s is configured\n", dev, dev->name); ++ return 0; ++#endif ++} ++ ++/* ++ * nss_nludp_st_ipv6_eth_iface_config() ++ * Configure the WAN interface as Ethernet for IPv6 protocol. ++ */ ++static int nss_nludp_st_ipv6_eth_iface_config(struct net_device *dev, struct nss_ipv6_rule_create_msg *nircm) ++{ ++ if (nss_nludp_st_get_macaddr_ipv6(nircm->tuple.return_ip, (uint8_t *)&nircm->conn_rule.return_mac)) { ++ nss_nl_warn("%px: Error in updating the return MAC Address\n", dev); ++ return -EINVAL; ++ } ++ ether_addr_copy((uint8_t *)nircm->conn_rule.flow_mac, dev->dev_addr); ++ nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(dev); ++ if (nircm->conn_rule.return_interface_num < 0) { ++ nss_nl_warn("%px: Unable to get the NSS interface number of %s\n", dev, dev->name); ++ return -EINVAL; ++ } ++ nss_nl_info("%px: Speedtest Ethernet WAN interface %s is configured\n", dev, dev->name); ++ return 0; ++} ++ ++/* ++ * nss_nludp_st_ipv6_set_vlan_tags() ++ * Sets the VLAN tags for IPv6 protocol. ++ */ ++static void nss_nludp_st_ipv6_set_vlan_tags(struct net_device *dev, struct nss_ipv6_rule_create_msg *nircm) ++{ ++ /* ++ * Get the primary VLAN info and set in the rule. ++ */ ++ uint16_t vlan_tag = vlan_dev_vlan_id(dev); ++ uint16_t vlan_tpid = ntohs(vlan_dev_vlan_proto(dev)); ++ ++ nircm->vlan_primary_rule.egress_vlan_tag = (vlan_tpid << 16) | vlan_tag; ++ nircm->vlan_primary_rule.ingress_vlan_tag = NSS_NLUDP_ST_VLAN_ID_NOT_CONFIGURED; ++ nircm->vlan_secondary_rule.egress_vlan_tag = NSS_NLUDP_ST_VLAN_ID_NOT_CONFIGURED; ++ nircm->vlan_secondary_rule.ingress_vlan_tag = NSS_NLUDP_ST_VLAN_ID_NOT_CONFIGURED; ++} ++ ++/* ++ * nss_nludp_st_ipv6_vlan_iface_config() ++ * Configure the WAN interface as VLAN for IPv6 protocol. ++ */ ++static int nss_nludp_st_ipv6_vlan_iface_config(struct net_device *dev, struct nss_ipv6_rule_create_msg *nircm) ++{ ++ struct net_device *next_dev; ++ int ret; ++ ++ next_dev = nss_nludp_st_vlan_next_dev_get_and_hold(dev); ++ if (!next_dev) { ++ nss_nl_warn("%px: Unable to get VLAN device's (%s) next dev\n", dev, dev->name); ++ return -ENODEV; ++ } ++ ++ /* ++ * Single VLAN, next_dev should be a physical device. ++ * Copy the real device's number (eth0). ++ */ ++ nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(next_dev); ++ if (nircm->conn_rule.return_interface_num < 0) { ++ nss_nl_warn("%px: Unable to get the NSS interface number of %s\n", dev, next_dev->name); ++ ret = -EINVAL; ++ goto fail; ++ } ++ dev_put(next_dev); ++ ++ /* ++ * Get the return IP address's MAC address. ++ */ ++ if (nss_nludp_st_get_macaddr_ipv6(nircm->tuple.return_ip, (uint8_t *)&nircm->conn_rule.return_mac)) { ++ nss_nl_warn("%px: Error in updating the return MAC Address\n", dev); ++ return -EINVAL; ++ } ++ ++ /* ++ * Get the primary VLAN info and set in the rule. ++ */ ++ nss_nludp_st_ipv6_set_vlan_tags(dev, nircm); ++ nircm->valid_flags |= NSS_IPV6_RULE_CREATE_VLAN_VALID; ++ ++ ether_addr_copy((uint8_t *)nircm->conn_rule.flow_mac, dev->dev_addr); ++ ++ /* ++ * Success. Fall through for release resources ++ */ ++ nss_nl_info("%px: Speedtest VLAN WAN interface %s is configured\n", dev, dev->name); ++ return 0; ++fail: ++ dev_put(next_dev); ++ return ret; ++} ++ ++#if defined(CONFIG_NSS_NLPPPOE) && CONFIG_NSS_NLPPPOE > 0 ++/* ++ * nss_nludp_st_ipv6_pppoe_iface_config() ++ * Configure the WAN interface as PPPoE for IPv6 protocol. ++ */ ++static int nss_nludp_st_ipv6_pppoe_iface_config(struct net_device *dev, struct nss_ipv6_rule_create_msg *nircm) ++{ ++ struct pppoe_opt info; ++ struct ppp_channel *ppp_chan[1]; ++ int ret = -EINVAL; ++ ++ if (!nss_nludp_st_pppoe_get_chan_and_info(dev, ppp_chan, &info)) { ++ nss_nl_warn("%px: Unable to get PPPoE channel and info for device: %s\n", dev, dev->name); ++ return ret; ++ } ++ ++ /* ++ * Check if the next device is a VLAN (eth0-eth0.100-pppoe-wan ++ */ ++ if (is_vlan_dev(info.dev)) { ++ /* ++ * Next device is a VLAN device (eth0.100). ++ */ ++ struct net_device *next_dev; ++ ++ /* ++ * Check if we have a single VLAN device. ++ */ ++ next_dev = nss_nludp_st_vlan_next_dev_get_and_hold(info.dev); ++ if (!next_dev) { ++ nss_nl_warn("%px: Unable to get PPPoE's VLAN device's (%s) next dev\n", dev, info.dev->name); ++ ret = -ENODEV; ++ goto fail; ++ } ++ ++ /* ++ * PPPoE + VLAN (eth0-eth0.100-pppoe-wan) ++ * Copy the physical interface number (eth0) ++ */ ++ nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(next_dev); ++ if (nircm->conn_rule.return_interface_num < 0) { ++ nss_nl_warn("%px: Unable to get the NSS interface number of %s\n", dev, next_dev->name); ++ dev_put(next_dev); ++ goto fail; ++ } ++ dev_put(next_dev); ++ ++ /* ++ * Get the primary VLAN info and set in the rule. ++ */ ++ nss_nludp_st_ipv6_set_vlan_tags(info.dev, nircm); ++ nircm->valid_flags |= NSS_IPV4_RULE_CREATE_VLAN_VALID; ++ } else { ++ /* ++ * PPPoE interface can be created on linux bridge, OVS bridge and LAG devices. ++ * udp_st doesn't support these hierarchies. ++ */ ++ if ((info.dev->priv_flags & (IFF_EBRIDGE | IFF_OPENVSWITCH)) ++ || ((info.dev->flags & IFF_MASTER) && (info.dev->priv_flags & IFF_BONDING))) { ++ nss_nl_warn("%px: PPPoE over bridge and LAG interfaces are not supported, dev: %s info.dev: %s\n", ++ dev, dev->name, info.dev->name); ++ goto fail; ++ } ++ ++ /* ++ * PPPoE only (eth0-pppoe-wan) ++ * Copy the physical interface number (eth0) ++ */ ++ nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(info.dev); ++ if (nircm->conn_rule.return_interface_num < 0) { ++ nss_nl_warn("%px: Unable to get the PPPoE's physical interface's (%s) NSS interface number\n", ++ dev, info.dev->name); ++ goto fail; ++ } ++ } ++ ++ /* ++ * For PPPoE connections, the other end's MAC address is PPPoE session's remote MAC address ++ */ ++ ether_addr_copy((uint8_t *)nircm->conn_rule.return_mac, info.pa.remote); ++ ether_addr_copy((uint8_t *)nircm->conn_rule.flow_mac, info.dev->dev_addr); ++ ++ /* ++ * Check if NSS has a valid dynamic interface number for PPPoE interface. ++ * If there is not, NSS cannot handle PPPoE flows in fast path. ++ */ ++ nircm->pppoe_rule.return_if_num = nss_cmn_get_interface_number_by_dev(dev); ++ if (nircm->pppoe_rule.return_if_num < 0) { ++ nss_nl_warn("%px: Unable to get the PPPoE interfaces (%s) dynamic interface number\n", dev, dev->name); ++ goto fail; ++ } ++ nircm->pppoe_rule.return_if_exist = 1; ++ nircm->valid_flags |= NSS_IPV6_RULE_CREATE_PPPOE_VALID; ++ ++ /* ++ * Success. Fall through for release resources ++ */ ++ ret = 0; ++ nss_nl_info("%px: Speedtest PPPoE WAN interface %s is configured\n", dev, dev->name); ++fail: ++ dev_put(info.dev); ++ ppp_release_channels(ppp_chan, 1); ++ return ret; ++} ++#endif ++ ++/* + * nss_nludp_st_create_ipv6_rule() + * Create a nss entry to accelerate the given IPV6 connection + */ +@@ -907,7 +1442,9 @@ static int nss_nludp_st_create_ipv6_rule + struct net_device *net_dev; + struct nss_ipv6_nexthop *nexthop; + struct sk_buff *resp; ++ struct nss_ipv6_src_mac_rule *src_mac; + nss_tx_status_t status; ++ int ret = 0; + + nss_ctx = nss_ipv6_get_mgr(); + if (!nss_ctx) { +@@ -962,16 +1499,59 @@ static int nss_nludp_st_create_ipv6_rule + * Copy over the connection rules and set CONN_VALID flag + */ + nircm->conn_rule.flow_interface_num = NSS_UDP_ST_INTERFACE; +- nircm->conn_rule.return_interface_num = nss_cmn_get_interface_number_by_dev(net_dev); +- memcpy(nircm->conn_rule.flow_mac, net_dev->dev_addr, 6); + + /* +- * Update the return MAC address ++ * Set the MTU values of the flows. + */ +- if (nss_nludp_st_get_macaddr_ipv6(nircm->tuple.return_ip, (uint8_t *)&nircm->conn_rule.return_mac)) { +- nss_nl_info("Error in Updating the Return MAC Address \n"); +- dev_put(net_dev); +- return -EINVAL; ++ nircm->conn_rule.flow_mtu = NSS_NLUDP_ST_MAX_MTU; ++ nircm->conn_rule.return_mtu = net_dev->mtu; ++ ++ /* ++ * Supported WAN interface hierarchies: ++ * ++ * rmnet <-> DUT (5G interface) ++ * eth0-eth0.100 <-> DUT (single VLAN) ++ * eth0-pppoe-wan <-> DUT ( PPPoE only) ++ * eth0-eth0.100-pppoe-wan <-> DUT (PPPoE over VLAN) ++ * eth0 <-> DUT (simple physical interface) ++ */ ++ switch (net_dev->type) { ++ case ARPHRD_RAWIP: ++ ret = nss_nludp_st_ipv6_rawip_iface_config(net_dev, nircm); ++ break; ++ ++#if defined(CONFIG_NSS_NLPPPOE) && CONFIG_NSS_NLPPPOE > 0 ++ case ARPHRD_PPP: ++ ret = nss_nludp_st_ipv6_pppoe_iface_config(net_dev, nircm); ++ break; ++#endif ++ ++ case ARPHRD_ETHER: ++ /* ++ * Bridge and LAG interfaces are not supported. ++ */ ++ if ((net_dev->priv_flags & (IFF_EBRIDGE | IFF_OPENVSWITCH)) ++ || ((net_dev->flags & IFF_MASTER) && (net_dev->priv_flags & IFF_BONDING))) { ++ nss_nl_warn("%px: Bridge and LAG interfaces are not supported, dev: %s\n", skb, net_dev->name); ++ ret = -EINVAL; ++ break; ++ } ++ ++ if (is_vlan_dev(net_dev)) { ++ ret = nss_nludp_st_ipv6_vlan_iface_config(net_dev, nircm); ++ } else { ++ ret = nss_nludp_st_ipv6_eth_iface_config(net_dev, nircm); ++ } ++ break; ++ ++ default: ++ nss_nl_warn("%px: unsupported speedtest interface: %s\n", skb, net_dev->name); ++ ret = -EINVAL; ++ break; ++ } ++ ++ if (ret < 0) { ++ goto done; + } + + nircm->valid_flags |= NSS_IPV6_RULE_CREATE_CONN_VALID; +@@ -982,14 +1562,25 @@ static int nss_nludp_st_create_ipv6_rule + + nss_nl_info("flow_nexthop:%d return_nexthop:%d\n", nexthop->flow_nexthop, nexthop->return_nexthop); + ++ /* ++ * We want acceleration engine to choose the source MAC address which we ++ * have given to it instead of using the underlying physical interface's MAC address. ++ */ ++ src_mac = &nircm->src_mac_rule; ++ ++ memcpy(src_mac->flow_src_mac, nircm->conn_rule.return_mac, ETH_ALEN); ++ memcpy(src_mac->return_src_mac, nircm->conn_rule.flow_mac, ETH_ALEN); ++ ++ nircm->valid_flags |= NSS_IPV6_RULE_CREATE_SRC_MAC_VALID; ++ src_mac->mac_valid_flags |= (NSS_IPV6_SRC_MAC_FLOW_VALID | NSS_IPV6_SRC_MAC_RETURN_VALID); ++ + status = nss_ipv6_tx(nss_ctx, &nim); + if (status != NSS_TX_SUCCESS) { + nss_nl_info("%px: Create IPv6 message failed %d\n", nss_ctx, status); +- return -EPERM; + } +- ++done: + dev_put(net_dev); +- return 0; ++ return ret; + } + + /* +@@ -1089,6 +1680,7 @@ static int nss_nludp_st_ops_uncfg_rule(s + { + struct nss_nludp_st_rule *nl_rule; + struct nss_nlcmn *nl_cm; ++ struct nss_udp_st_cfg *uncfg; + int ret = 0; + + /* +@@ -1104,6 +1696,15 @@ static int nss_nludp_st_ops_uncfg_rule(s + * Message validation required before accepting the configuration + */ + nl_rule = container_of(nl_cm, struct nss_nludp_st_rule, cm); ++ uncfg = &nl_rule->num.msg.uncfg; ++ ++ /* ++ * Convert the ipv6 address to NSS foramt. ++ */ ++ if (uncfg->ip_version == NSS_UDP_ST_FLAG_IPV6) { ++ nss_nludp_st_swap_addr_ipv6(uncfg->src_ip.ip.ipv6, uncfg->src_ip.ip.ipv6); ++ nss_nludp_st_swap_addr_ipv6(uncfg->dest_ip.ip.ipv6, uncfg->dest_ip.ip.ipv6); ++ } + + /* + * Unconfigure udp_st only for the transmit node. +@@ -1118,13 +1719,13 @@ static int nss_nludp_st_ops_uncfg_rule(s + /* + * Destroy rule based on ip version + */ +- if (nl_rule->num.msg.uncfg.ip_version == NSS_UDP_ST_FLAG_IPV4) { ++ if (uncfg->ip_version == NSS_UDP_ST_FLAG_IPV4) { + ret = nss_nludp_st_destroy_ipv4_rule(skb, nl_rule); +- } else if (nl_rule->num.msg.uncfg.ip_version == NSS_UDP_ST_FLAG_IPV6) { ++ } else if (uncfg->ip_version == NSS_UDP_ST_FLAG_IPV6) { + ret = nss_nludp_st_destroy_ipv6_rule(skb, nl_rule); + } else { + goto fail; +- } ++ } + + if (ret < 0) { + nss_nl_error("%px: Unable to delete a rule entry for ipv%d.\n", skb, nl_rule->num.msg.uncfg.ip_version); +@@ -1150,6 +1751,7 @@ static int nss_nludp_st_ops_cfg_rule(str + { + struct nss_nludp_st_rule *nl_rule; + struct nss_nlcmn *nl_cm; ++ struct nss_udp_st_cfg *cfg; + int ret = 0; + + /* +@@ -1165,6 +1767,15 @@ static int nss_nludp_st_ops_cfg_rule(str + * Message validation required before accepting the configuration + */ + nl_rule = container_of(nl_cm, struct nss_nludp_st_rule, cm); ++ cfg = &nl_rule->num.msg.cfg; ++ ++ /* ++ * Convert the ipv6 address to NSS foramt. ++ */ ++ if (cfg->ip_version == NSS_UDP_ST_FLAG_IPV6) { ++ nss_nludp_st_swap_addr_ipv6(cfg->src_ip.ip.ipv6, cfg->src_ip.ip.ipv6); ++ nss_nludp_st_swap_addr_ipv6(cfg->dest_ip.ip.ipv6, cfg->dest_ip.ip.ipv6); ++ } + + /* + * Configures udp_st only for the transmit node. +@@ -1179,9 +1790,9 @@ static int nss_nludp_st_ops_cfg_rule(str + /* + * Create rule based on ip version + */ +- if (nl_rule->num.msg.cfg.ip_version == NSS_UDP_ST_FLAG_IPV4) { ++ if (cfg->ip_version == NSS_UDP_ST_FLAG_IPV4) { + ret = nss_nludp_st_create_ipv4_rule(skb, nl_rule); +- } else if (nl_rule->num.msg.cfg.ip_version == NSS_UDP_ST_FLAG_IPV6) { ++ } else if (cfg->ip_version == NSS_UDP_ST_FLAG_IPV6) { + ret = nss_nludp_st_create_ipv6_rule(skb, nl_rule); + } else { + goto fail; diff --git a/qca-nss-clients/patches-11.4/0036-vxlanmgr-backport-12.5.patch b/qca-nss-clients/patches-11.4/0036-vxlanmgr-backport-12.5.patch new file mode 100644 index 0000000..829ce4d --- /dev/null +++ b/qca-nss-clients/patches-11.4/0036-vxlanmgr-backport-12.5.patch @@ -0,0 +1,357 @@ +From 580a9ff682ea4e19cb30720662aeecb1ab5df859 Mon Sep 17 00:00:00 2001 +From: Apoorv Gupta +Date: Mon, 12 Jul 2021 18:12:43 +0530 +Subject: [PATCH] [qca-nss-clients] Options not supported with VxLAN + +Flows through VxLAN tunnel should not be accelerated +if the following options are used, + 1. RSC(route short-circuit), + 2. GPE(Generic Protocol Extension) + +Change-Id: I183d24925e1a99ae49a9f1f6011bb7f08eab92f2 +Signed-off-by: Apoorv Gupta +--- + vxlanmgr/nss_vxlanmgr_tunnel.c | 22 +++++++++++++++++++--- + 1 file changed, 19 insertions(+), 3 deletions(-) + +--- a/vxlanmgr/nss_vxlanmgr_tunnel.c ++++ b/vxlanmgr/nss_vxlanmgr_tunnel.c +@@ -1,6 +1,6 @@ + /* + ************************************************************************** +- * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. +@@ -91,18 +91,23 @@ static uint16_t nss_vxlanmgr_tunnel_flag + uint16_t flags = 0; + uint32_t priv_flags = priv->flags; + ++ if (priv_flags & VXLAN_F_RSC) ++ return flags; + if (priv_flags & VXLAN_F_GBP) + flags |= NSS_VXLAN_RULE_FLAG_GBP_ENABLED; +- if (priv_flags & VXLAN_F_IPV6) ++ ++ if (priv_flags & VXLAN_F_IPV6) { + flags |= NSS_VXLAN_RULE_FLAG_IPV6; +- else if (!(priv_flags & VXLAN_F_IPV6)) ++ if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ++ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED; ++ } else { + flags |= NSS_VXLAN_RULE_FLAG_IPV4; ++ if (priv_flags & VXLAN_F_UDP_CSUM) ++ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED; ++ } ++ + if (priv->cfg.tos == 1) + flags |= NSS_VXLAN_RULE_FLAG_INHERIT_TOS; +- if (priv_flags & VXLAN_F_UDP_CSUM) +- flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED; +- else if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) +- flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED; + + return (flags | NSS_VXLAN_RULE_FLAG_UDP); + } +@@ -113,18 +118,25 @@ static uint16_t nss_vxlanmgr_tunnel_flag + struct vxlan_config *cfg = &priv->cfg; + uint32_t priv_flags = cfg->flags; + ++ if (priv_flags & VXLAN_F_RSC) ++ return flags; ++ if (priv_flags & VXLAN_F_GPE) ++ return flags; + if (priv_flags & VXLAN_F_GBP) + flags |= NSS_VXLAN_RULE_FLAG_GBP_ENABLED; +- if (priv_flags & VXLAN_F_IPV6) ++ ++ if (priv_flags & VXLAN_F_IPV6) { + flags |= NSS_VXLAN_RULE_FLAG_IPV6; +- else if (!(priv_flags & VXLAN_F_IPV6)) ++ if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) ++ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED; ++ } else { + flags |= NSS_VXLAN_RULE_FLAG_IPV4; ++ if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM_TX)) ++ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED; ++ } ++ + if (cfg->tos == 1) + flags |= NSS_VXLAN_RULE_FLAG_INHERIT_TOS; +- if (priv_flags & VXLAN_F_UDP_ZERO_CSUM_TX) +- flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED; +- else if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) +- flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED; + + return (flags | NSS_VXLAN_RULE_FLAG_UDP); + } +@@ -436,7 +448,8 @@ static struct notifier_block nss_vxlanmg + + /* + * nss_vxlanmgr_tunnel_inner_stats() +- * Update vxlan netdev stats with inner node stats ++ * Update vxlan netdev stats with inner node stats. ++ * Note: Reference on the netdevice is expected to be held by the caller at the time this function is called. + */ + static void nss_vxlanmgr_tunnel_inner_stats(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm) + { +@@ -450,7 +463,6 @@ static void nss_vxlanmgr_tunnel_inner_st + stats = &nvm->msg.stats; + dev = tun_ctx->dev; + +- dev_hold(dev); + netdev_stats = (struct net_device_stats *)&dev->stats; + + /* +@@ -469,7 +481,6 @@ static void nss_vxlanmgr_tunnel_inner_st + u64_stats_add(&tstats->tx_bytes, stats->node_stats.tx_bytes); + u64_stats_update_end(&tstats->syncp); + netdev_stats->tx_dropped += dropped; +- dev_put(dev); + } + + /* +@@ -514,7 +525,7 @@ static void nss_vxlanmgr_tunnel_outer_st + * nss_vxlanmgr_tunnel_fdb_update() + * Update vxlan fdb entries + */ +-static void nss_vxlanmgr_tunnel_fdb_update(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm) ++static void nss_vxlanmgr_tunnel_fdb_update(struct net_device *dev, uint32_t vni, struct nss_vxlan_msg *nvm) + { + uint8_t *mac; + uint16_t i, nentries; +@@ -523,13 +534,10 @@ static void nss_vxlanmgr_tunnel_fdb_upda + + db_stats = &nvm->msg.db_stats; + nentries = db_stats->cnt; +- priv = netdev_priv(tun_ctx->dev); +- +- dev_hold(tun_ctx->dev); ++ priv = netdev_priv(dev); + + if (nentries > NSS_VXLAN_MACDB_ENTRIES_PER_MSG) { +- nss_vxlanmgr_warn("%px: No more than 20 entries allowed per message.\n", tun_ctx->dev); +- dev_put(tun_ctx->dev); ++ nss_vxlanmgr_warn("%px: No more than 20 entries allowed per message.\n", dev); + return; + } + +@@ -539,11 +547,10 @@ static void nss_vxlanmgr_tunnel_fdb_upda + #if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 5, 7)) + vxlan_fdb_update_mac(priv, mac); + #else +- vxlan_fdb_update_mac(priv, mac, tun_ctx->vni); ++ vxlan_fdb_update_mac(priv, mac, vni); + #endif + } + } +- dev_put(tun_ctx->dev); + } + + /* +@@ -555,20 +562,29 @@ static void nss_vxlanmgr_tunnel_inner_no + struct net_device *dev = (struct net_device *)app_data; + struct nss_vxlanmgr_tun_ctx *tun_ctx; + struct nss_vxlan_msg *nvm; ++ uint32_t vni; + + if (!ncm) { + nss_vxlanmgr_info("%px: NULL msg received.\n", dev); + return; + } + ++ if (!dev) { ++ nss_vxlanmgr_info("%px: NULL device received.\n", dev); ++ return; ++ } ++ + spin_lock_bh(&vxlan_ctx.tun_lock); ++ dev_hold(dev); + tun_ctx = nss_vxlanmgr_tunnel_ctx_dev_get(dev); + if (!tun_ctx) { + spin_unlock_bh(&vxlan_ctx.tun_lock); + nss_vxlanmgr_warn("%px: Invalid tunnel context\n", dev); ++ dev_put(dev); + return; + } + ++ vni = tun_ctx->vni; + nvm = (struct nss_vxlan_msg *)ncm; + switch (nvm->cm.type) { + case NSS_VXLAN_MSG_TYPE_STATS_SYNC: +@@ -576,14 +592,24 @@ static void nss_vxlanmgr_tunnel_inner_no + nss_vxlanmgr_tun_stats_sync(tun_ctx, nvm); + break; + case NSS_VXLAN_MSG_TYPE_MACDB_STATS: +- nss_vxlanmgr_tunnel_fdb_update(tun_ctx, nvm); + nss_vxlanmgr_tun_macdb_stats_sync(tun_ctx, nvm); +- break; +- default: ++ ++ /* ++ * Release the lock before updating the Linux FDB entry. ++ * This will ensure there is no deadlock when a potential ++ * MAC add event occurs at same time, which needs to hold ++ * the kernel's hash lock followed by the tunnel ctx lock. ++ */ + spin_unlock_bh(&vxlan_ctx.tun_lock); +- nss_vxlanmgr_info("%px: Unknown Event from NSS", dev); ++ ++ nss_vxlanmgr_tunnel_fdb_update(dev, vni, nvm); ++ dev_put(dev); + return; ++ default: ++ nss_vxlanmgr_info("%px: Unknown Event from NSS", dev); + } ++ ++ dev_put(dev); + spin_unlock_bh(&vxlan_ctx.tun_lock); + } + +@@ -829,7 +855,7 @@ done: + */ + int nss_vxlanmgr_tunnel_destroy(struct net_device *dev) + { +- uint32_t inner_ifnum, outer_ifnum; ++ uint32_t inner_ifnum, outer_ifnum, tun_count; + struct nss_vxlanmgr_tun_ctx *tun_ctx; + struct nss_vxlan_msg vxlanmsg; + nss_tx_status_t ret; +@@ -866,16 +892,21 @@ int nss_vxlanmgr_tunnel_destroy(struct n + + nss_vxlanmgr_tun_stats_deinit(tun_ctx); + nss_vxlanmgr_tun_stats_dentry_remove(tun_ctx); ++ dev_put(tun_ctx->dev); + kfree(tun_ctx); + +- if (!vxlan_ctx.tun_count) { +- /* +- * Unregister fdb notifier chain if +- * all vxlan tunnels are destroyed. +- */ ++ /* ++ * Unregister fdb notifier chain if ++ * all vxlan tunnels are destroyed. ++ */ ++ spin_lock_bh(&vxlan_ctx.tun_lock); ++ tun_count = vxlan_ctx.tun_count; ++ spin_unlock_bh(&vxlan_ctx.tun_lock); ++ if (!tun_count) { + vxlan_fdb_unregister_notify(&nss_vxlanmgr_tunnel_fdb_notifier); + } +- nss_vxlanmgr_info("%px: VxLAN interface count is #%d\n", dev, vxlan_ctx.tun_count); ++ ++ nss_vxlanmgr_info("%px: VxLAN interface count is #%d\n", dev, tun_count); + + memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg)); + ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx, +@@ -929,6 +960,7 @@ int nss_vxlanmgr_tunnel_create(struct ne + struct nss_vxlan_rule_msg *vxlan_cfg; + struct nss_ctx_instance *nss_ctx; + uint32_t inner_ifnum, outer_ifnum; ++ uint16_t parse_flags; + nss_tx_status_t ret; + + spin_lock_bh(&vxlan_ctx.tun_lock); +@@ -939,7 +971,20 @@ int nss_vxlanmgr_tunnel_create(struct ne + } + spin_unlock_bh(&vxlan_ctx.tun_lock); + ++ /* ++ * The reference to the dev will be released in nss_vxlanmgr_tunnel_destroy() ++ */ + dev_hold(dev); ++ priv = netdev_priv(dev); ++ parse_flags = nss_vxlanmgr_tunnel_flags_parse(priv); ++ ++ /* ++ * Check if the tunnel is supported. ++ */ ++ if (!parse_flags) { ++ nss_vxlanmgr_warn("%px: Tunnel offload not supported\n", dev); ++ goto ctx_alloc_fail; ++ } + + tun_ctx = kzalloc(sizeof(struct nss_vxlanmgr_tun_ctx), GFP_ATOMIC); + if (!tun_ctx) { +@@ -988,12 +1033,11 @@ int nss_vxlanmgr_tunnel_create(struct ne + memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg)); + vxlan_cfg = &vxlanmsg.msg.vxlan_create; + +- priv = netdev_priv(dev); + vxlan_cfg->vni = vxlan_get_vni(priv); +- vxlan_cfg->tunnel_flags = nss_vxlanmgr_tunnel_flags_parse(priv); ++ vxlan_cfg->tunnel_flags = parse_flags; + vxlan_cfg->src_port_min = priv->cfg.port_min; + vxlan_cfg->src_port_max = priv->cfg.port_max; +- vxlan_cfg->dest_port = priv->cfg.dst_port; ++ vxlan_cfg->dest_port = ntohs(priv->cfg.dst_port); + vxlan_cfg->tos = priv->cfg.tos; + vxlan_cfg->ttl = (priv->cfg.ttl ? priv->cfg.ttl : IPDEFTTL); + +@@ -1059,7 +1103,6 @@ int nss_vxlanmgr_tunnel_create(struct ne + spin_unlock_bh(&vxlan_ctx.tun_lock); + nss_vxlanmgr_info("%px: VxLAN interface count is #%d\n", dev, vxlan_ctx.tun_count); + +- dev_put(dev); + return NOTIFY_DONE; + + config_fail: +--- a/vxlanmgr/nss_vxlanmgr_tun_stats.c ++++ b/vxlanmgr/nss_vxlanmgr_tun_stats.c +@@ -89,7 +89,7 @@ static int nss_vxlanmgr_tun_stats_show(s + seq_printf(m, "\t\tflow_label = %u\n", tun_ctx->flow_label); + seq_printf(m, "\t\tsrc_port_min = %u\n", tun_ctx->src_port_min); + seq_printf(m, "\t\tsrc_port_max = %u\n", tun_ctx->src_port_max); +- seq_printf(m, "\t\tdest_port = %u\n", ntohs(tun_ctx->dest_port)); ++ seq_printf(m, "\t\tdest_port = %u\n", tun_ctx->dest_port); + seq_printf(m, "\t\ttos = %u\n", tun_ctx->tos); + seq_printf(m, "\t\tttl = %u\n", tun_ctx->ttl); + +@@ -173,6 +173,7 @@ void nss_vxlanmgr_tun_stats_update(uint6 + /* + * nss_vxlanmgr_tun_macdb_stats_sync() + * Sync function for vxlan fdb entries ++ * Note: Reference on the netdevice is expected to be held by the caller at the time this function is called. + */ + void nss_vxlanmgr_tun_macdb_stats_sync(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm) + { +@@ -183,11 +184,8 @@ void nss_vxlanmgr_tun_macdb_stats_sync(s + db_stats = &nvm->msg.db_stats; + nentries = db_stats->cnt; + +- dev_hold(tun_ctx->dev); +- + if (nentries > NSS_VXLAN_MACDB_ENTRIES_PER_MSG) { + nss_vxlanmgr_warn("%px: No more than 20 entries allowed per message.\n", tun_ctx->dev); +- dev_put(tun_ctx->dev); + return; + } + +@@ -203,7 +201,6 @@ void nss_vxlanmgr_tun_macdb_stats_sync(s + } + } + } +- dev_put(tun_ctx->dev); + } + + /* +@@ -299,7 +296,7 @@ bool nss_vxlanmgr_tun_stats_dentry_creat + * nss_vxlanmgr_tun_stats_dentry_deinit() + * Cleanup the debugfs tree. + */ +-void nss_vxlanmgr_tun_stats_dentry_deinit() ++void nss_vxlanmgr_tun_stats_dentry_deinit(void) + { + debugfs_remove_recursive(vxlan_ctx.dentry); + } +@@ -308,7 +305,7 @@ void nss_vxlanmgr_tun_stats_dentry_deini + * nss_vxlanmgr_tun_stats_dentry_init() + * Create VxLAN tunnel statistics debugfs entry. + */ +-bool nss_vxlanmgr_tun_stats_dentry_init() ++bool nss_vxlanmgr_tun_stats_dentry_init(void) + { + /* + * initialize debugfs. diff --git a/qca-nss-clients/patches/0033-ipsecmgr-fix-compile-error.patch b/qca-nss-clients/patches/0033-ipsecmgr-fix-compile-error.patch new file mode 100644 index 0000000..def9fd8 --- /dev/null +++ b/qca-nss-clients/patches/0033-ipsecmgr-fix-compile-error.patch @@ -0,0 +1,334 @@ +--- a/ipsecmgr/v2.0/plugins/klips/nss_ipsec_klips.c ++++ b/ipsecmgr/v2.0/plugins/klips/nss_ipsec_klips.c +@@ -146,7 +146,6 @@ static int nss_ipsec_klips_offload_esp(s + static struct net_protocol esp_protocol = { + .handler = nss_ipsec_klips_offload_esp, + .no_policy = 1, +- .netns_ok = 1, + }; + + /* +@@ -304,7 +303,7 @@ static struct nss_ipsec_klips_tun *nss_i + * Read/write lock needs to taken by the caller since sa + * table is looked up here + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + if (!klips_dev) { + return NULL; +@@ -387,7 +386,7 @@ static struct nss_ipsec_klips_tun *nss_i + * Read/write lock needs to be taken by the caller since tunnel + * table is looked up here + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + for (i = 0, tun = tunnel_map.tbl; i < tunnel_map.max; i++, tun++) { + if (!tun->klips_dev) { +@@ -507,7 +506,7 @@ static struct nss_ipsec_klips_sa *nss_ip + * Read/write lock needs to taken by the caller since sa + * table is looked up here + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + list_for_each_entry_safe(sa, tmp, head, list) { + if (sa->sid == crypto_idx) +@@ -531,7 +530,7 @@ static void nss_ipsec_klips_sa_flush(str + * Read/write lock needs to taken by the caller since sa + * table is modified here + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + list_for_each_entry_safe(sa, tmp, head, list) { + list_del_init(&sa->list); +@@ -1293,7 +1292,7 @@ static void nss_ipsec_klips_register_nat + /* + * write lock is needed as we are modifying tunnel entry. + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + sock_hold(sk); + tun->sk_encap_rcv = udp_sk(sk)->encap_rcv; +@@ -1310,7 +1309,7 @@ static void nss_ipsec_klips_unregister_n + /* + * write lock is needed as we are modifying tunnel entry. + */ +- BUG_ON(write_can_lock(&tunnel_map.lock)); ++ lockdep_assert_held_write(&tunnel_map.lock); + + xchg(&udp_sk(tun->sk)->encap_rcv, tun->sk_encap_rcv); + sock_put(tun->sk); +--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c ++++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm.c +@@ -1243,6 +1243,7 @@ drop: + return -EINVAL; + } + ++#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 4, 0)) + /* + * nss_ipsec_xfrm_v4_output_finish() + * This is called for non-offloaded transformations after the NF_POST routing hooks +@@ -1264,9 +1265,8 @@ static int nss_ipsec_xfrm_v4_output_fini + */ + static int nss_ipsec_xfrm_v4_extract_input(struct xfrm_state *x, struct sk_buff *skb) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; +- + nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb); ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + return drv->xsa.v4->extract_input(x, skb); + } + +@@ -1278,11 +1278,12 @@ static int nss_ipsec_xfrm_v4_extract_inp + */ + static int nss_ipsec_xfrm_v4_extract_output(struct xfrm_state *x, struct sk_buff *skb) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + + nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb); ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + return drv->xsa.v4->extract_output(x, skb); + } ++#endif + + /* + * nss_ipsec_xfrm_v4_transport_finish() +@@ -1381,14 +1382,14 @@ fallback: + * nss_ipsec_xfrm_esp_init_state() + * Initialize IPsec xfrm state of type ESP. + */ +-static int nss_ipsec_xfrm_esp_init_state(struct xfrm_state *x) ++static int nss_ipsec_xfrm_esp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extac) + { + struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + struct nss_ipsec_xfrm_tunnel *tun = NULL; + struct nss_ipsec_xfrm_sa *sa = NULL; + xfrm_address_t remote = {0}; + xfrm_address_t local = {0}; +- struct net_device *local_dev; ++ struct net_device *local_dev = NULL; + bool new_tun = 0; + size_t ip_addr_len; + +@@ -1396,7 +1397,7 @@ static int nss_ipsec_xfrm_esp_init_state + local_dev = ip_dev_find(&init_net, x->id.daddr.a4); + ip_addr_len = sizeof(local.a4); + } else { +- local_dev = ipv6_dev_find(&init_net, &x->id.daddr.in6, 1); ++ local_dev = ipv6_dev_find(&init_net, &x->id.daddr.in6, local_dev); + ip_addr_len = sizeof(local.a6); + } + +@@ -1737,6 +1738,7 @@ drop: + return -EINVAL; + } + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + /* + * nss_ipsec_xfrm_v6_output_finish() + * This is called for non-offloaded transformations after the NF_POST routing hooks +@@ -1758,9 +1760,9 @@ static int nss_ipsec_xfrm_v6_output_fini + */ + static int nss_ipsec_xfrm_v6_extract_input(struct xfrm_state *x, struct sk_buff *skb) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + + nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb); ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + return drv->xsa.v6->extract_input(x, skb); + } + +@@ -1772,11 +1774,11 @@ static int nss_ipsec_xfrm_v6_extract_inp + */ + static int nss_ipsec_xfrm_v6_extract_output(struct xfrm_state *x, struct sk_buff *skb) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; +- + nss_ipsec_xfrm_trace("%px: Redirect to native xfrm stack\n", skb); ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + return drv->xsa.v6->extract_output(x, skb); + } ++#endif + + /* + * nss_ipsec_xfrm_v6_transport_finish() +@@ -1804,22 +1806,25 @@ void nss_ipsec_xfrm_v6_local_error(struc + return drv->xsa.v6->local_error(skb, mtu); + } + ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + /* + * nss_ipsec_xfrm_v6_esp_hdr_offset() + * Invoked by stack for IPv6 transport mode in encap. + * Redirect to the native version. + */ +-static int nss_ipsec_xfrm_v6_esp_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr) ++static int nss_ipsec_xfrm_v6_esp_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr) + { +- struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; + + nss_ipsec_xfrm_trace("%px: Redirect to native esp6 stack\n", skb); +-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +- return drv->xsa.v6->type_map[IPPROTO_ESP]->hdr_offset(x, skb, prevhdr); +-#else +- return drv->xsa.v6->type_esp->hdr_offset(x, skb, prevhdr); +-#endif ++ ++ struct nss_ipsec_xfrm_drv *drv = &g_ipsec_xfrm; ++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) ++ return drv->xsa.v6->type_map[IPPROTO_ESP]->hdr_offset(x, skb, prevhdr); ++ #else ++ return drv->xsa.v6->type_esp->hdr_offset(x, skb, prevhdr); ++ #endif + } ++#endif + + /* + * nss_ipsec_xfrm_esp6_rcv() +@@ -1970,7 +1975,6 @@ static void nss_ipsec_xfrm_state_delete( + nss_ipsec_xfrm_del_tun(drv, tun); + } + +- return; + } + + /* +@@ -2045,9 +2049,11 @@ static struct xfrm_state_afinfo xfrm_v4_ + .init_temprop = nss_ipsec_xfrm_v4_init_param, + #endif + .output = nss_ipsec_xfrm_v4_output, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + .output_finish = nss_ipsec_xfrm_v4_output_finish, + .extract_input = nss_ipsec_xfrm_v4_extract_input, + .extract_output = nss_ipsec_xfrm_v4_extract_output, ++#endif + .transport_finish = nss_ipsec_xfrm_v4_transport_finish, + .local_error = nss_ipsec_xfrm_v4_local_error, + }; +@@ -2092,7 +2098,6 @@ struct xfrm_mode xfrm_v6_mode_map[XFRM_M + * IPv4 xfrm_type ESP object. + */ + static const struct xfrm_type xfrm_v4_type = { +- .description = "NSS ESP4", + .owner = THIS_MODULE, + .proto = IPPROTO_ESP, + .flags = XFRM_TYPE_REPLAY_PROT, +@@ -2128,9 +2133,11 @@ static struct xfrm_state_afinfo xfrm_v6_ + .state_sort = nss_ipsec_xfrm_v6_sort_state, + #endif + .output = nss_ipsec_xfrm_v6_output, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + .output_finish = nss_ipsec_xfrm_v6_output_finish, + .extract_input = nss_ipsec_xfrm_v6_extract_input, + .extract_output = nss_ipsec_xfrm_v6_extract_output, ++#endif + .transport_finish = nss_ipsec_xfrm_v6_transport_finish, + .local_error = nss_ipsec_xfrm_v6_local_error, + }; +@@ -2139,7 +2146,6 @@ static struct xfrm_state_afinfo xfrm_v6_ + * IPv6 xfrm_type ESP object. + */ + static const struct xfrm_type xfrm_v6_type = { +- .description = "NSS ESP6", + .owner = THIS_MODULE, + .proto = IPPROTO_ESP, + .flags = XFRM_TYPE_REPLAY_PROT, +@@ -2148,7 +2154,9 @@ static const struct xfrm_type xfrm_v6_ty + .get_mtu = nss_ipsec_xfrm_esp_get_mtu, + .input = nss_ipsec_xfrm_esp_input, + .output = nss_ipsec_xfrm_esp_output, ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 0, 0)) + .hdr_offset = nss_ipsec_xfrm_v6_esp_hdr_offset, ++#endif + }; + + /* +@@ -2234,7 +2242,6 @@ static void nss_ipsec_xfrm_restore_afinf + } + + xfrm_unregister_type(base, family); +- + xfrm_state_update_afinfo(family, afinfo); + } + +@@ -2319,14 +2326,10 @@ static void nss_ipsec_xfrm_override_afin + */ + int __init nss_ipsec_xfrm_init_module(void) + { +- + rwlock_init(&g_ipsec_xfrm.lock); +- + nss_ipsec_xfrm_init_tun_db(&g_ipsec_xfrm); + nss_ipsec_xfrm_init_flow_db(&g_ipsec_xfrm); +- + init_completion(&g_ipsec_xfrm.complete); +- + net_get_random_once(&g_ipsec_xfrm.hash_nonce, sizeof(g_ipsec_xfrm.hash_nonce)); + + /* +@@ -2354,7 +2357,6 @@ int __init nss_ipsec_xfrm_init_module(vo + nss_ipsec_xfrm_override_afinfo(&g_ipsec_xfrm, AF_INET6); + + ecm_interface_ipsec_register_callbacks(&xfrm_ecm_ipsec_cb); +- ecm_notifier_register_connection_notify(&xfrm_ecm_notifier); + + #if defined(NSS_L2TPV2_ENABLED) + l2tpmgr_register_ipsecmgr_callback_by_ipaddr(&xfrm_l2tp); +@@ -2367,6 +2369,7 @@ int __init nss_ipsec_xfrm_init_module(vo + /* + * Register for xfrm events + */ ++ ecm_notifier_register_connection_notify(&xfrm_ecm_notifier); + xfrm_register_km(&nss_ipsec_xfrm_mgr); + + /* +@@ -2377,6 +2380,7 @@ int __init nss_ipsec_xfrm_init_module(vo + return 0; + + unreg_v4_handler: ++ xfrm4_protocol_deregister(&xfrm4_proto, IPPROTO_ESP); + xfrm6_protocol_deregister(&xfrm6_proto, IPPROTO_ESP); + return -EAGAIN; + } +--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c ++++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_sa.c +@@ -183,7 +183,7 @@ static bool nss_ipsec_xfrm_sa_init_crypt + */ + static void nss_ipsec_xfrm_sa_init_tuple(struct nss_ipsec_xfrm_sa *sa, struct xfrm_state *x) + { +- struct net_device *local_dev; ++ struct net_device *local_dev = NULL; + + sa->type = NSS_IPSECMGR_SA_TYPE_ENCAP; + sa->tuple.spi_index = ntohl(x->id.spi); +@@ -217,7 +217,7 @@ static void nss_ipsec_xfrm_sa_init_tuple + sa->tuple.dest_ip[2] = ntohl(x->id.daddr.a6[2]); + sa->tuple.dest_ip[3] = ntohl(x->id.daddr.a6[3]); + +- local_dev = ipv6_dev_find(&init_net, (struct in6_addr *)x->id.daddr.a6, 1); ++ local_dev = ipv6_dev_find(&init_net, (struct in6_addr *)x->id.daddr.a6, local_dev); + } + + /* +--- a/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_tunnel.c ++++ b/ipsecmgr/v2.0/plugins/xfrm/nss_ipsec_xfrm_tunnel.c +@@ -130,7 +130,6 @@ err: + drop: + atomic64_inc(&drv->stats.inner_drop); + dev_kfree_skb_any(skb); +- return; + } + + /* +@@ -194,7 +193,6 @@ static void nss_ipsec_xfrm_tunnel_rx_out + drop: + dev_kfree_skb_any(skb); + atomic64_inc(&drv->stats.outer_drop); +- return; + } + + /*