nss-clients: Backport 12.5 changes

Backport some minor patches from QSDK 12.5

Signed-off-by: Sean Khan <datapronix@protonmail.com>
This commit is contained in:
Sean Khan 2024-07-08 10:08:59 -04:00
parent afa7e180cd
commit f7d8d21b3d
7 changed files with 1172 additions and 1 deletions

View File

@ -1,7 +1,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=qca-nss-clients
PKG_RELEASE:=4
PKG_RELEASE:=5
PKG_SOURCE_URL:=https://git.codelinaro.org/clo/qsdk/oss/lklm/nss-clients.git
PKG_SOURCE_PROTO:=git

View File

@ -0,0 +1,303 @@
--- a/bridge/nss_bridge_mgr.c
+++ b/bridge/nss_bridge_mgr.c
@@ -1,9 +1,12 @@
/*
**************************************************************************
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
+ *
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -49,6 +52,11 @@ static bool ovs_enabled = false;
static struct nss_bridge_mgr_context br_mgr_ctx;
/*
+ * Module parameter to enable/disable FDB learning.
+ */
+static bool fdb_disabled = false;
+
+/*
* nss_bridge_mgr_create_instance()
* Create a bridge instance.
*/
@@ -415,6 +423,37 @@ static int nss_bridge_mgr_del_bond_slave
}
/*
+ * nss_bridge_mgr_bond_fdb_join()
+ * Update FDB state when a bond interface joining bridge.
+ */
+static int nss_bridge_mgr_bond_fdb_join(struct nss_bridge_pvt *b_pvt)
+{
+ /*
+ * If already other bond devices are attached to bridge,
+ * only increment bond_slave_num,
+ */
+ spin_lock(&br_mgr_ctx.lock);
+ if (b_pvt->bond_slave_num) {
+ b_pvt->bond_slave_num++;
+ spin_unlock(&br_mgr_ctx.lock);
+ return NOTIFY_DONE;
+ }
+ b_pvt->bond_slave_num = 1;
+ spin_unlock(&br_mgr_ctx.lock);
+
+ /*
+ * This is the first bond device being attached to bridge. In order to enforce Linux
+ * bond slave selection in bridge flows involving bond interfaces, we need to disable
+ * fdb learning on this bridge master to allow flow based bridging.
+ */
+ if (nss_bridge_mgr_disable_fdb_learning(b_pvt) < 0) {
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/*
* nss_bridge_mgr_bond_master_join()
* Add a bond interface to bridge
*/
@@ -447,28 +486,7 @@ static int nss_bridge_mgr_bond_master_jo
}
}
- /*
- * If already other bond devices are attached to bridge,
- * only increment bond_slave_num,
- */
- spin_lock(&br_mgr_ctx.lock);
- if (b_pvt->bond_slave_num) {
- b_pvt->bond_slave_num++;
- spin_unlock(&br_mgr_ctx.lock);
- return NOTIFY_DONE;
- }
- spin_unlock(&br_mgr_ctx.lock);
-
- /*
- * This is the first bond device being attached to bridge. In order to enforce Linux
- * bond slave selection in bridge flows involving bond interfaces, we need to disable
- * fdb learning on this bridge master to allow flow based bridging.
- */
- if (!nss_bridge_mgr_disable_fdb_learning(b_pvt)) {
- spin_lock(&br_mgr_ctx.lock);
- b_pvt->bond_slave_num = 1;
- spin_unlock(&br_mgr_ctx.lock);
-
+ if (nss_bridge_mgr_bond_fdb_join(b_pvt) == NOTIFY_DONE) {
return NOTIFY_DONE;
}
@@ -488,6 +506,41 @@ cleanup:
}
/*
+ * nss_bridge_mgr_bond_fdb_leave()
+ * Update FDB state when a bond interface leaving bridge.
+ */
+static int nss_bridge_mgr_bond_fdb_leave(struct nss_bridge_pvt *b_pvt)
+{
+
+ nss_bridge_mgr_assert(b_pvt->bond_slave_num == 0);
+
+ /*
+ * If more than one bond devices are attached to bridge,
+ * only decrement the bond_slave_num
+ */
+ spin_lock(&br_mgr_ctx.lock);
+ if (b_pvt->bond_slave_num > 1) {
+ b_pvt->bond_slave_num--;
+ spin_unlock(&br_mgr_ctx.lock);
+ return NOTIFY_DONE;
+ }
+ b_pvt->bond_slave_num = 0;
+ spin_unlock(&br_mgr_ctx.lock);
+
+ /*
+ * The last bond interface is removed from bridge, we can switch back to FDB
+ * learning mode.
+ */
+ if (!fdb_disabled && (nss_bridge_mgr_enable_fdb_learning(b_pvt) < 0)) {
+ nss_bridge_mgr_warn("%px: Failed to enable fdb learning. fdb_disabled: %d\n", b_pvt, fdb_disabled);
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+}
+
+
+/*
* nss_bridge_mgr_bond_master_leave()
* Remove a bond interface from bridge
*/
@@ -516,27 +569,7 @@ static int nss_bridge_mgr_bond_master_le
}
}
- /*
- * If more than one bond devices are attached to bridge,
- * only decrement the bond_slave_num
- */
- spin_lock(&br_mgr_ctx.lock);
- if (b_pvt->bond_slave_num > 1) {
- b_pvt->bond_slave_num--;
- spin_unlock(&br_mgr_ctx.lock);
- return NOTIFY_DONE;
- }
- spin_unlock(&br_mgr_ctx.lock);
-
- /*
- * The last bond interface is removed from bridge, we can switch back to FDB
- * learning mode.
- */
- if (!nss_bridge_mgr_enable_fdb_learning(b_pvt)) {
- spin_lock(&br_mgr_ctx.lock);
- b_pvt->bond_slave_num = 0;
- spin_unlock(&br_mgr_ctx.lock);
-
+ if (nss_bridge_mgr_bond_fdb_leave(b_pvt) == NOTIFY_DONE) {
return NOTIFY_DONE;
}
@@ -803,9 +836,10 @@ int nss_bridge_mgr_join_bridge(struct ne
}
/*
- * Add the bond_master to bridge.
+ * Update FDB state of the bridge. No need to add individual interfaces of bond to the bridge.
+ * VLAN interface verifies that all interfaces are physical so, no need to verify again.
*/
- if (nss_bridge_mgr_bond_master_join(real_dev, br) != NOTIFY_DONE) {
+ if (nss_bridge_mgr_bond_fdb_join(br) != NOTIFY_DONE) {
nss_bridge_mgr_warn("%px: Slaves of bond interface %s join bridge failed\n", br, real_dev->name);
nss_bridge_tx_leave_msg(br->ifnum, dev);
nss_vlan_mgr_leave_bridge(dev, br->vsi);
@@ -905,9 +939,10 @@ int nss_bridge_mgr_leave_bridge(struct n
}
/*
- * Remove the bond_master from bridge.
+ * Update FDB state of the bridge. No need to add individual interfaces of bond to the bridge.
+ * VLAN interface verifies that all interfaces are physical so, no need to verify again.
*/
- if (nss_bridge_mgr_bond_master_leave(real_dev, br) != NOTIFY_DONE) {
+ if (nss_bridge_mgr_bond_fdb_leave(br) != NOTIFY_DONE) {
nss_bridge_mgr_warn("%px: Slaves of bond interface %s leave bridge failed\n", br, real_dev->name);
nss_vlan_mgr_join_bridge(dev, br->vsi);
nss_bridge_tx_join_msg(br->ifnum, dev);
@@ -1017,44 +1052,45 @@ int nss_bridge_mgr_register_br(struct ne
b_pvt->dev = dev;
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+ err = ppe_vsi_alloc(NSS_BRIDGE_MGR_SWITCH_ID, &vsi_id);
+ if (err) {
+ nss_bridge_mgr_warn("%px: failed to alloc bridge vsi, error = %d\n", b_pvt, err);
+ goto fail;
+ }
+
+ b_pvt->vsi = vsi_id;
+#endif
+
ifnum = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_BRIDGE);
if (ifnum < 0) {
nss_bridge_mgr_warn("%px: failed to alloc bridge di\n", b_pvt);
- nss_bridge_mgr_delete_instance(b_pvt);
- return -EFAULT;
+ goto fail_1;
}
if (!nss_bridge_register(ifnum, dev, NULL, NULL, 0, b_pvt)) {
nss_bridge_mgr_warn("%px: failed to register bridge di to NSS\n", b_pvt);
- goto fail;
+ goto fail_2;
}
#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
- err = ppe_vsi_alloc(NSS_BRIDGE_MGR_SWITCH_ID, &vsi_id);
- if (err) {
- nss_bridge_mgr_warn("%px: failed to alloc bridge vsi, error = %d\n", b_pvt, err);
- goto fail_1;
- }
-
- b_pvt->vsi = vsi_id;
-
err = nss_bridge_tx_vsi_assign_msg(ifnum, vsi_id);
if (err != NSS_TX_SUCCESS) {
nss_bridge_mgr_warn("%px: failed to assign vsi msg, error = %d\n", b_pvt, err);
- goto fail_2;
+ goto fail_3;
}
#endif
err = nss_bridge_tx_set_mac_addr_msg(ifnum, (uint8_t *) dev->dev_addr);
if (err != NSS_TX_SUCCESS) {
nss_bridge_mgr_warn("%px: failed to set mac_addr msg, error = %d\n", b_pvt, err);
- goto fail_3;
+ goto fail_4;
}
err = nss_bridge_tx_set_mtu_msg(ifnum, dev->mtu);
if (err != NSS_TX_SUCCESS) {
nss_bridge_mgr_warn("%px: failed to set mtu msg, error = %d\n", b_pvt, err);
- goto fail_3;
+ goto fail_4;
}
/*
@@ -1076,31 +1112,35 @@ int nss_bridge_mgr_register_br(struct ne
* Disable FDB learning if OVS is enabled for
* all bridges (including Linux bridge).
*/
- if (ovs_enabled) {
+ if (ovs_enabled || fdb_disabled) {
nss_bridge_mgr_disable_fdb_learning(b_pvt);
}
#endif
return 0;
-fail_3:
+fail_4:
#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
if (nss_bridge_tx_vsi_unassign_msg(ifnum, vsi_id) != NSS_TX_SUCCESS) {
nss_bridge_mgr_warn("%px: failed to unassign vsi\n", b_pvt);
}
-
-fail_2:
- ppe_vsi_free(NSS_BRIDGE_MGR_SWITCH_ID, vsi_id);
-
-fail_1:
+fail_3:
#endif
+
nss_bridge_unregister(ifnum);
-fail:
+fail_2:
if (nss_dynamic_interface_dealloc_node(ifnum, NSS_DYNAMIC_INTERFACE_TYPE_BRIDGE) != NSS_TX_SUCCESS) {
nss_bridge_mgr_warn("%px: failed to dealloc bridge di\n", b_pvt);
}
+fail_1:
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+ ppe_vsi_free(NSS_BRIDGE_MGR_SWITCH_ID, vsi_id);
+fail:
+#endif
+
nss_bridge_mgr_delete_instance(b_pvt);
+
return -EFAULT;
}
@@ -1626,3 +1666,6 @@ MODULE_DESCRIPTION("NSS bridge manager")
module_param(ovs_enabled, bool, 0644);
MODULE_PARM_DESC(ovs_enabled, "OVS bridge is enabled");
+
+module_param(fdb_disabled, bool, 0644);
+MODULE_PARM_DESC(fdb_disabled, "fdb learning is disabled");

View File

@ -0,0 +1,51 @@
commit 2396944e41307a90e9159107fd225e44980a5b2f
Author: Cemil Coskun <quic_ccoskun@quicinc.com>
AuthorDate: Tue Aug 9 21:25:13 2022 -0700
Commit: Cemil Coskun <quic_ccoskun@quicinc.com>
CommitDate: Tue Aug 9 21:27:58 2022 -0700
[qca-nss-clients] In capwapmgr use source MAC address
Currently, interface MAC address is used while sending packets in capwapmgr.
Update that to use the MAC address in the rule.
Change-Id: I2ba9df7beab39a9584a1159db3a3f3c337c219aa
Signed-off-by: Cemil Coskun <quic_ccoskun@quicinc.com>
--- a/capwapmgr/nss_capwapmgr.c
+++ b/capwapmgr/nss_capwapmgr.c
@@ -1,9 +1,12 @@
/*
**************************************************************************
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
+ *
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -866,6 +869,10 @@ static nss_tx_status_t nss_capwapmgr_cre
memcpy(nircm->conn_rule.return_mac, unic->dest_mac, 6);
}
+ nircm->valid_flags |= NSS_IPV4_RULE_CREATE_SRC_MAC_VALID;
+ nircm->src_mac_rule.mac_valid_flags |=NSS_IPV4_SRC_MAC_FLOW_VALID;
+ memcpy(nircm->src_mac_rule.flow_src_mac, nircm->conn_rule.return_mac, 6);
+
/*
* Copy over the DSCP rule parameters
*/
@@ -1001,6 +1008,10 @@ static nss_tx_status_t nss_capwapmgr_cre
memcpy(nircm->conn_rule.return_mac, unic->dest_mac, 6);
nircm->valid_flags |= NSS_IPV6_RULE_CREATE_CONN_VALID;
+ nircm->valid_flags |= NSS_IPV6_RULE_CREATE_SRC_MAC_VALID;
+ nircm->src_mac_rule.mac_valid_flags |=NSS_IPV6_SRC_MAC_FLOW_VALID;
+ memcpy(nircm->src_mac_rule.flow_src_mac, nircm->conn_rule.return_mac, 6);
+
/*
* Copy over the DSCP rule parameters
*/

View File

@ -0,0 +1,169 @@
--- a/map/map-t/nss_connmgr_map_t.c
+++ b/map/map-t/nss_connmgr_map_t.c
@@ -531,7 +531,7 @@ static void nss_connmgr_map_t_decap_exce
/*
* nss_connmgr_map_t_encap_exception()
* Exception handler registered to NSS for handling map_t ipv4 pkts
- * Translates ipv4 packet back to ipv6 and send to nat46 device directly.
+ * Send the translated ipv4 packets to the stack directly.
*/
static void nss_connmgr_map_t_encap_exception(struct net_device *dev,
struct sk_buff *skb,
@@ -539,23 +539,7 @@ static void nss_connmgr_map_t_encap_exce
{
struct iphdr *ip4_hdr;
- struct ipv6hdr *ip6_hdr;
- uint8_t v6saddr[16], v6daddr[16];
- struct tcphdr *tcph = NULL;
- struct udphdr *udph = NULL;
- struct iphdr ip4_hdr_r;
- __be16 sport, dport;
- uint8_t nexthdr, hop_limit, tos;
- int payload_len;
- bool df_bit = false;
- uint16_t append_hdr_sz = 0;
- uint16_t identifier;
- uint32_t l4_csum, orig_csum;
- uint16_t csum;
- /*
- * Discard L2 header.
- */
skb_pull(skb, sizeof(struct ethhdr));
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
@@ -563,123 +547,24 @@ static void nss_connmgr_map_t_encap_exce
ip4_hdr = ip_hdr(skb);
skb_set_transport_header(skb, ip4_hdr->ihl * 4);
- if (ip4_hdr->protocol == IPPROTO_TCP) {
- tcph = tcp_hdr(skb);
- l4_csum = tcph->check;
- sport = tcph->source;
- dport = tcph->dest;
- } else if (ip4_hdr->protocol == IPPROTO_UDP) {
- udph = udp_hdr(skb);
- orig_csum = l4_csum = udph->check;
- sport = udph->source;
- dport = udph->dest;
- } else {
- nss_connmgr_map_t_warning("%px: Unsupported protocol, free it up\n", dev);
- dev_kfree_skb_any(skb);
- return;
- }
-
- /*
- * Undo the checksum of the IPv4 source and destinationIPv4 address.
- */
- csum = ip_compute_csum(&ip4_hdr->saddr, 2 * sizeof(ip4_hdr->saddr));
- l4_csum += ((~csum) & 0xFFFF);
-
- /*`
- * IPv6 packet is xlated to ipv4 packet by acceleration engine. But there is no ipv4 rule.
- * Call xlate_4_to_6() [ which is exported by nat46.ko ] to find original ipv6 src and ipv6 dest address.
- * These functions is designed for packets from lan to wan. Since this packet is from wan, need to call
- * this function with parameters reversed. ipv4_hdr_r is used for reversing ip addresses.
- */
- ip4_hdr_r.daddr = ip4_hdr->saddr;
- ip4_hdr_r.saddr = ip4_hdr->daddr;
-
- if (unlikely(!xlate_4_to_6(dev, &ip4_hdr_r, dport, sport, v6saddr, v6daddr))) { /* exception happened after packet got xlated */
- nss_connmgr_map_t_warning("%px: Martian ipv4 packet !!..free it. (saddr = 0x%x daddr = 0x%x sport = %d dport = %d)\n", dev,\
- ip4_hdr->saddr, ip4_hdr->daddr, sport, dport);
- dev_kfree_skb_any(skb);
- return;
- }
-
- nexthdr = ip4_hdr->protocol;
- payload_len = ntohs(ip4_hdr->tot_len) - sizeof(struct iphdr);
- hop_limit = ip4_hdr->ttl;
- tos = ip4_hdr->tos;
- identifier = ntohs(ip4_hdr->id);
-
- if (ip4_hdr->frag_off & htons(IP_DF)) {
- df_bit = true;
- } else if (map_t_flags & MAPT_FLAG_ADD_DUMMY_HDR) {
- append_hdr_sz = sizeof(struct frag_hdr);
- }
-
- if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + append_hdr_sz - sizeof(struct iphdr))) {
- nss_connmgr_map_t_warning("%px: Not enough headroom for ipv6 packet...Freeing the packet\n", dev);
- dev_kfree_skb_any(skb);
- return;
- }
-
- skb_push(skb, sizeof(struct ipv6hdr) + append_hdr_sz - sizeof(struct iphdr));
- skb_reset_network_header(skb);
- skb_reset_mac_header(skb);
-
- skb->protocol = htons(ETH_P_IPV6);
-
- ip6_hdr = ipv6_hdr(skb);
- memset(ip6_hdr, 0, sizeof(struct ipv6hdr));
-
- ip6_hdr->version = 6;
- ip6_hdr->payload_len = htons(payload_len + append_hdr_sz);
- ip6_hdr->hop_limit = hop_limit;
-
- nss_connmgr_map_t_ipv6_set_tclass(ip6_hdr, tos);
- memcpy(&ip6_hdr->daddr, v6saddr, sizeof(struct in6_addr));
- memcpy(&ip6_hdr->saddr, v6daddr, sizeof(struct in6_addr));
-
- if (unlikely(df_bit) || !(map_t_flags & MAPT_FLAG_ADD_DUMMY_HDR)) {
- ip6_hdr->nexthdr = nexthdr;
- } else {
- struct frag_hdr tmp_fh, *fh;
- const __be32 *fh_addr = skb_header_pointer(skb, sizeof(struct ipv6hdr), sizeof(struct frag_hdr), &tmp_fh);
- if (!fh_addr) {
- nss_connmgr_map_t_warning("%px: Not able to offset to frag header\n", dev);
- dev_kfree_skb_any(skb);
- return;
- }
- fh = (struct frag_hdr *)fh_addr;
- memset(fh, 0, sizeof(struct frag_hdr));
- fh->identification = htonl(identifier);
- fh->nexthdr = nexthdr;
- ip6_hdr->nexthdr = NEXTHDR_FRAGMENT;
- }
-
- skb_set_transport_header(skb, sizeof(struct ipv6hdr) + append_hdr_sz);
-
/*
- * Add the checksum of the IPv6 source and destination address.
+ * IP Header checksum is not generated yet, calculate it now.
*/
- l4_csum += ip_compute_csum(ip6_hdr->saddr.s6_addr16, 2 * sizeof(ip6_hdr->saddr));
- /*
- * Fold the 32 bits checksum to 16 bits
- */
- l4_csum = (l4_csum & 0x0000FFFF) + (l4_csum >> 16);
- l4_csum = (l4_csum & 0x0000FFFF) + (l4_csum >> 16);
-
- if (nexthdr == IPPROTO_TCP) {
- tcph->check = (uint16_t)l4_csum;
- } else {
- udph->check = (orig_csum == 0)? 0:(uint16_t)l4_csum;
- }
+ ip4_hdr->check = 0;
+ ip4_hdr->check = ip_fast_csum((unsigned char *)ip4_hdr, ip4_hdr->ihl);
+ skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
skb->skb_iif = dev->ifindex;
skb->ip_summed = CHECKSUM_NONE;
skb->dev = dev;
- nss_connmgr_map_t_trace("%p: ipv4 packet exceptioned after v6 ---> v4 xlate, created original ipv6 packet\n", dev);
- nss_connmgr_map_t_trace("%p: Calculted ipv6 params: src_addr=%pI6, dest_addr=%pI6, payload_len=%d, checksum=%x\n", dev, v6saddr, v6daddr, payload_len, l4_csum);
-
- dev_queue_xmit(skb);
+ nss_connmgr_map_t_trace("%px: ipv4 packet exceptioned after v6/v4xlat src=%pI4 dest=%pI4 proto=%d\n",
+ dev, &ip4_hdr->saddr, &ip4_hdr->daddr, ip4_hdr->protocol);
+ /*
+ * Go through Linux network stack.
+ */
+ netif_receive_skb(skb);
return;
}

View File

@ -0,0 +1,92 @@
--- a/match/nss_match_l2.c
+++ b/match/nss_match_l2.c
@@ -1,6 +1,7 @@
/*
*******************************************************************************
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -133,7 +134,7 @@ static int nss_match_l2_cmd_parse(char *
struct nss_ctx_instance *nss_ctx = nss_match_get_context();
int ret = 0;
uint32_t mask_val[4] = {0};
- uint32_t actions = 0, if_num = 0, setprio = 0, nexthop = 0;
+ uint32_t actions = 0, if_num = 0, setprio = NSS_MAX_NUM_PRI, nexthop = 0;
uint16_t smac[3] = {0}, dmac[3] = {0}, mask_id = 0, ethertype = 0;
uint8_t mac_addr_tmp[6];
char tmp[4];
@@ -340,22 +341,22 @@ static int nss_match_l2_cmd_parse(char *
switch(actions) {
case NSS_MATCH_ACTION_SETPRIO:
- if (nexthop || !setprio || setprio >= NSS_MAX_NUM_PRI) {
+ if (nexthop || setprio >= NSS_MAX_NUM_PRI) {
goto fail;
}
break;
case NSS_MATCH_ACTION_FORWARD:
- if (setprio || !nexthop) {
+ if (!(setprio == NSS_MAX_NUM_PRI) || !nexthop) {
goto fail;
}
break;
case NSS_MATCH_ACTION_SETPRIO | NSS_MATCH_ACTION_FORWARD:
- if (!setprio || !nexthop || setprio >= NSS_MAX_NUM_PRI) {
+ if (!nexthop || setprio >= NSS_MAX_NUM_PRI) {
goto fail;
}
break;
case NSS_MATCH_ACTION_DROP:
- if (setprio || nexthop) {
+ if (!(setprio == NSS_MAX_NUM_PRI) || nexthop) {
goto fail;
}
break;
--- a/match/nss_match_vow.c
+++ b/match/nss_match_vow.c
@@ -1,6 +1,6 @@
/*
*******************************************************************************
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -122,7 +122,7 @@ static int nss_match_vow_cmd_parse(char
char *token, *param, *value;
struct nss_ctx_instance *nss_ctx = nss_match_get_context();
int ret = 0;
- uint32_t actions = 0, if_num = 0, dscp = 0, outer_prio = 0, inner_prio = 0, setprio = 0, nexthop = 0;
+ uint32_t actions = 0, if_num = 0, dscp = 0, outer_prio = 0, inner_prio = 0, setprio = NSS_MAX_NUM_PRI, nexthop = 0;
uint16_t mask_id = 0;
uint32_t mask_val = 0;
@@ -301,22 +301,22 @@ static int nss_match_vow_cmd_parse(char
switch(actions) {
case NSS_MATCH_ACTION_SETPRIO:
- if (nexthop || !setprio || setprio >= NSS_MAX_NUM_PRI) {
+ if (nexthop || setprio >= NSS_MAX_NUM_PRI) {
goto fail;
}
break;
case NSS_MATCH_ACTION_FORWARD:
- if (setprio || !nexthop) {
+ if (!(setprio == NSS_MAX_NUM_PRI) || !nexthop) {
goto fail;
}
break;
case NSS_MATCH_ACTION_SETPRIO | NSS_MATCH_ACTION_FORWARD:
- if (!setprio || !nexthop || setprio >= NSS_MAX_NUM_PRI) {
+ if (!nexthop || setprio >= NSS_MAX_NUM_PRI) {
goto fail;
}
break;
case NSS_MATCH_ACTION_DROP:
- if (setprio || nexthop) {
+ if (!(setprio == NSS_MAX_NUM_PRI) || nexthop) {
goto fail;
}
break;

View File

@ -0,0 +1,31 @@
--- a/pptp/nss_connmgr_pptp.c
+++ b/pptp/nss_connmgr_pptp.c
@@ -1,9 +1,12 @@
/*
**************************************************************************
* Copyright (c) 2015-2018, 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
+ *
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -272,13 +275,12 @@ static void nss_connmgr_pptp_event_recei
if_type = nss_dynamic_interface_get_type(nss_pptp_get_context(), tnlmsg->cm.interface);
- if (if_type == NSS_DYNAMIC_INTERFACE_TYPE_PPTP_OUTER) {
+ if ((if_type == NSS_DYNAMIC_INTERFACE_TYPE_PPTP_OUTER) && sync_stats->node_stats.rx_packets) {
ppp_update_stats(netdev,
(unsigned long)sync_stats->node_stats.rx_packets,
(unsigned long)sync_stats->node_stats.rx_bytes,
0, 0, 0, 0, 0, 0);
- } else {
-
+ } else if ((if_type == NSS_DYNAMIC_INTERFACE_TYPE_PPTP_INNER) && sync_stats->node_stats.tx_packets) {
ppp_update_stats(netdev, 0, 0,
(unsigned long)sync_stats->node_stats.tx_packets,
(unsigned long)sync_stats->node_stats.tx_bytes,

View File

@ -0,0 +1,525 @@
--- a/vlan/nss_vlan_mgr.c
+++ b/vlan/nss_vlan_mgr.c
@@ -1,9 +1,12 @@
/*
**************************************************************************
* Copyright (c) 2017-2018, 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
+ *
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -154,15 +157,6 @@ static struct nss_vlan_pvt *nss_vlan_mgr
return NULL;
}
-/*
- * nss_vlan_mgr_instance_deref()
- */
-static void nss_vlan_mgr_instance_deref(struct nss_vlan_pvt *v)
-{
- spin_lock(&vlan_mgr_ctx.lock);
- BUG_ON(!(--v->refs));
- spin_unlock(&vlan_mgr_ctx.lock);
-}
#ifdef NSS_VLAN_MGR_PPE_SUPPORT
/*
@@ -365,22 +359,16 @@ static void nss_vlan_mgr_port_role_event
* nss_vlan_mgr_bond_configure_ppe()
* Configure PPE for bond device
*/
-static int nss_vlan_mgr_bond_configure_ppe(struct nss_vlan_pvt *v, struct net_device *bond_dev)
+static int nss_vlan_mgr_bond_configure_ppe(struct nss_vlan_pvt *v, struct net_device *bond_dev, uint32_t vsi)
{
- uint32_t vsi;
int ret = 0;
struct net_device *slave;
int32_t port;
int vlan_mgr_bond_port_role = -1;
- if (ppe_vsi_alloc(NSS_VLAN_MGR_SWITCH_ID, &vsi)) {
- nss_vlan_mgr_warn("%s: failed to allocate VSI for bond vlan device", bond_dev->name);
- return -1;
- }
-
if (nss_vlan_tx_vsi_attach_msg(v->nss_if, vsi) != NSS_TX_SUCCESS) {
nss_vlan_mgr_warn("%s: failed to attach VSI to bond vlan interface\n", bond_dev->name);
- goto free_vsi;
+ return -1;
}
/*
@@ -393,7 +381,7 @@ static int nss_vlan_mgr_bond_configure_p
if (!NSS_VLAN_PHY_PORT_CHK(port)) {
rcu_read_unlock();
nss_vlan_mgr_warn("%s: %d is not valid physical port\n", slave->name, port);
- goto free_vsi;
+ return -1;
}
/*
@@ -409,7 +397,7 @@ static int nss_vlan_mgr_bond_configure_p
* In case the bond interface has no slaves, we do not want to proceed further
*/
if (vlan_mgr_bond_port_role == -1) {
- goto free_vsi;
+ return -1;
}
/*
@@ -436,6 +424,12 @@ static int nss_vlan_mgr_bond_configure_p
rcu_read_lock();
for_each_netdev_in_bond_rcu(bond_dev, slave) {
port = nss_cmn_get_interface_number_by_dev(slave);
+ if (!NSS_VLAN_PHY_PORT_CHK(port)) {
+ rcu_read_unlock();
+ nss_vlan_mgr_warn("%s: %d is not valid physical port\n", slave->name, port);
+ return -1;
+ }
+
ret = ppe_port_vlan_vsi_set(NSS_VLAN_MGR_SWITCH_ID, v->port[port - 1], v->ppe_svid, v->ppe_cvid, vsi);
if (ret != SW_OK) {
rcu_read_unlock();
@@ -471,6 +465,12 @@ static int nss_vlan_mgr_bond_configure_p
rcu_read_lock();
for_each_netdev_in_bond_rcu(bond_dev, slave) {
port = nss_cmn_get_interface_number_by_dev(slave);
+ if (!NSS_VLAN_PHY_PORT_CHK(port)) {
+ rcu_read_unlock();
+ nss_vlan_mgr_warn("%s: %d is not valid physical port\n", slave->name, port);
+ return -1;
+ }
+
v->eg_xlt_rule.port_bitmap |= (1 << v->port[port - 1]);
ret = fal_port_vlan_trans_adv_add(NSS_VLAN_MGR_SWITCH_ID, v->port[port - 1],
FAL_PORT_VLAN_EGRESS, &v->eg_xlt_rule, &v->eg_xlt_action);
@@ -490,6 +490,11 @@ static int nss_vlan_mgr_bond_configure_p
for_each_netdev_in_bond_rcu(bond_dev, slave) {
fal_port_qinq_role_t mode;
port = nss_cmn_get_interface_number_by_dev(slave);
+ if (!NSS_VLAN_PHY_PORT_CHK(port)) {
+ rcu_read_unlock();
+ nss_vlan_mgr_warn("%s: %d is not valid physical port\n", slave->name, port);
+ return -1;
+ }
/*
* If double tag, we should set physical port as core port
@@ -513,13 +518,18 @@ static int nss_vlan_mgr_bond_configure_p
ret = NSS_VLAN_PORT_ROLE_CHANGED;
}
- v->ppe_vsi = vsi;
return ret;
delete_egress_rule:
rcu_read_lock();
for_each_netdev_in_bond_rcu(bond_dev, slave) {
port = nss_cmn_get_interface_number_by_dev(slave);
+ if (!NSS_VLAN_PHY_PORT_CHK(port)) {
+ rcu_read_unlock();
+ nss_vlan_mgr_warn("%s: %d is not valid physical port\n", slave->name, port);
+ return -1;
+ }
+
ret = fal_port_vlan_trans_adv_del(NSS_VLAN_MGR_SWITCH_ID, v->port[port - 1],
FAL_PORT_VLAN_EGRESS,
&v->eg_xlt_rule, &v->eg_xlt_action);
@@ -533,6 +543,12 @@ delete_ingress_rule:
rcu_read_lock();
for_each_netdev_in_bond_rcu(bond_dev, slave) {
port = nss_cmn_get_interface_number_by_dev(slave);
+ if (!NSS_VLAN_PHY_PORT_CHK(port)) {
+ rcu_read_unlock();
+ nss_vlan_mgr_warn("%s: %d is not valid physical port\n", slave->name, port);
+ return -1;
+ }
+
ret = ppe_port_vlan_vsi_set(NSS_VLAN_MGR_SWITCH_ID, v->port[port - 1], v->ppe_svid, v->ppe_cvid, PPE_VSI_INVALID);
if (ret != SW_OK) {
nss_vlan_mgr_warn("%px: Failed to delete ingress translation rule for port:%d, error: %d\n", v, v->port[port - 1], ret);
@@ -545,30 +561,19 @@ detach_vsi:
nss_vlan_mgr_warn("%px: Failed to detach vsi %d\n", v, vsi);
}
-free_vsi:
- if (ppe_vsi_free(NSS_VLAN_MGR_SWITCH_ID, vsi)) {
- nss_vlan_mgr_warn("%px: Failed to free VLAN VSI\n", v);
- }
-
return -1;
}
/*
* nss_vlan_mgr_configure_ppe()
* Configure PPE for physical devices
*/
-static int nss_vlan_mgr_configure_ppe(struct nss_vlan_pvt *v, struct net_device *dev)
+static int nss_vlan_mgr_configure_ppe(struct nss_vlan_pvt *v, struct net_device *dev, uint32_t vsi)
{
- uint32_t vsi;
int ret = 0;
- if (ppe_vsi_alloc(NSS_VLAN_MGR_SWITCH_ID, &vsi)) {
- nss_vlan_mgr_warn("%s: failed to allocate VSI for vlan device", dev->name);
- return -1;
- }
-
if (nss_vlan_tx_vsi_attach_msg(v->nss_if, vsi) != NSS_TX_SUCCESS) {
nss_vlan_mgr_warn("%s: failed to attach VSI to vlan interface\n", dev->name);
- goto free_vsi;
+ return -1;
}
/*
@@ -652,7 +657,6 @@ static int nss_vlan_mgr_configure_ppe(st
ret = NSS_VLAN_PORT_ROLE_CHANGED;
}
- v->ppe_vsi = vsi;
return ret;
delete_egress_rule:
@@ -674,16 +678,119 @@ detach_vsi:
nss_vlan_mgr_warn("%px: Failed to detach vsi %d\n", v, vsi);
}
-free_vsi:
- if (ppe_vsi_free(NSS_VLAN_MGR_SWITCH_ID, vsi)) {
- nss_vlan_mgr_warn("%px: Failed to free VLAN VSI\n", v);
- }
-
return -1;
}
#endif
/*
+ * nss_vlan_mgr_instance_free()
+ * Destroy vlan instance
+ */
+static void nss_vlan_mgr_instance_free(struct nss_vlan_pvt *v)
+{
+#ifdef NSS_VLAN_MGR_PPE_SUPPORT
+ int32_t i;
+ int ret = 0;
+#endif
+
+#ifdef NSS_VLAN_MGR_PPE_SUPPORT
+ if (v->ppe_vsi) {
+ /*
+ * Detach VSI
+ */
+ if (nss_vlan_tx_vsi_detach_msg(v->nss_if, v->ppe_vsi)) {
+ nss_vlan_mgr_warn("%px: Failed to detach vsi %d\n", v, v->ppe_vsi);
+ }
+
+ /*
+ * Delete ingress vlan translation rule
+ */
+ for (i = 0; i < NSS_VLAN_PHY_PORT_MAX; i++) {
+ if (!v->port[i])
+ continue;
+ ret = ppe_port_vlan_vsi_set(NSS_VLAN_MGR_SWITCH_ID, v->port[i], v->ppe_svid, v->ppe_cvid, PPE_VSI_INVALID);
+ if (ret != SW_OK)
+ nss_vlan_mgr_warn("%px: Failed to delete old ingress translation rule, error: %d\n", v, ret);
+ }
+
+ /*
+ * Delete egress vlan translation rule
+ */
+ v->eg_xlt_rule.port_bitmap = 0;
+ for (i = 0; i < NSS_VLAN_PHY_PORT_MAX; i++) {
+ if (!v->port[i])
+ continue;
+ v->eg_xlt_rule.port_bitmap |= (1 << v->port[i]);
+ ret = fal_port_vlan_trans_adv_del(NSS_VLAN_MGR_SWITCH_ID, v->port[i],
+ FAL_PORT_VLAN_EGRESS,
+ &v->eg_xlt_rule, &v->eg_xlt_action);
+ if (ret != SW_OK) {
+ nss_vlan_mgr_warn("%px: Failed to delete vlan translation rule, error:%d\n", v, ret);
+ }
+ }
+
+ /*
+ * We will always have a VSI since this is allocated in beginning
+ * of the code.
+ */
+ if (ppe_vsi_free(NSS_VLAN_MGR_SWITCH_ID, v->ppe_vsi)) {
+ nss_vlan_mgr_warn("%px: Failed to free VLAN VSI\n", v);
+ }
+ }
+
+ /*
+ * Need to change the physical port role. While adding
+ * eth0.10.20/bond0.10.20, the role of the physical port(s) changed
+ * from EDGE to CORE. So, while removing eth0.10.20/bond0.10.20, the
+ * role of the physical port(s) should be changed from CORE to EDGE.
+ */
+ for (i = 0; i < NSS_VLAN_PHY_PORT_MAX; i++) {
+ if (v->port[i]) {
+ if (nss_vlan_mgr_calculate_new_port_role(v->port[i], i)) {
+ nss_vlan_mgr_port_role_event(v->port[i], i);
+ }
+ }
+ }
+#endif
+
+ if (v->nss_if) {
+ nss_unregister_vlan_if(v->nss_if);
+ if (nss_dynamic_interface_dealloc_node(v->nss_if, NSS_DYNAMIC_INTERFACE_TYPE_VLAN) != NSS_TX_SUCCESS)
+ nss_vlan_mgr_warn("%px: Failed to dealloc vlan dynamic interface\n", v);
+ }
+
+ kfree(v);
+}
+
+/*
+ * nss_vlan_mgr_instance_deref()
+ */
+static void nss_vlan_mgr_instance_deref(struct nss_vlan_pvt *v)
+{
+ struct nss_vlan_pvt *parent = NULL;
+ spin_lock(&vlan_mgr_ctx.lock);
+ BUG_ON(v->refs == 0);
+ v->refs--;
+
+ if (v->refs) {
+ spin_unlock(&vlan_mgr_ctx.lock);
+ return;
+ }
+
+ if (!list_empty(&v->list)) {
+ list_del(&v->list);
+ }
+
+ spin_unlock(&vlan_mgr_ctx.lock);
+
+ parent = v->parent;
+ nss_vlan_mgr_instance_free(v);
+
+ if (parent)
+ nss_vlan_mgr_instance_deref(parent);
+}
+
+/*
* nss_vlan_mgr_create_instance()
* Create vlan instance
*/
@@ -816,95 +923,6 @@ static struct nss_vlan_pvt *nss_vlan_mgr
}
/*
- * nss_vlan_mgr_instance_free()
- * Destroy vlan instance
- */
-static void nss_vlan_mgr_instance_free(struct nss_vlan_pvt *v)
-{
-#ifdef NSS_VLAN_MGR_PPE_SUPPORT
- int32_t i;
- int ret = 0;
-#endif
-
- spin_lock(&vlan_mgr_ctx.lock);
- BUG_ON(--v->refs);
- if (!list_empty(&v->list)) {
- list_del(&v->list);
- }
- spin_unlock(&vlan_mgr_ctx.lock);
-
-#ifdef NSS_VLAN_MGR_PPE_SUPPORT
- if (v->ppe_vsi) {
- /*
- * Detach VSI
- */
- if (nss_vlan_tx_vsi_detach_msg(v->nss_if, v->ppe_vsi)) {
- nss_vlan_mgr_warn("%px: Failed to detach vsi %d\n", v, v->ppe_vsi);
- }
-
- /*
- * Delete ingress vlan translation rule
- */
- for (i = 0; i < NSS_VLAN_PHY_PORT_MAX; i++) {
- if (!v->port[i])
- continue;
- ret = ppe_port_vlan_vsi_set(NSS_VLAN_MGR_SWITCH_ID, v->port[i], v->ppe_svid, v->ppe_cvid, PPE_VSI_INVALID);
- if (ret != SW_OK)
- nss_vlan_mgr_warn("%px: Failed to delete old ingress translation rule, error: %d\n", v, ret);
- }
-
- /*
- * Delete egress vlan translation rule
- */
- v->eg_xlt_rule.port_bitmap = 0;
- for (i = 0; i < NSS_VLAN_PHY_PORT_MAX; i++) {
- if (!v->port[i])
- continue;
- v->eg_xlt_rule.port_bitmap |= (1 << v->port[i]);
- ret = fal_port_vlan_trans_adv_del(NSS_VLAN_MGR_SWITCH_ID, v->port[i],
- FAL_PORT_VLAN_EGRESS,
- &v->eg_xlt_rule, &v->eg_xlt_action);
- if (ret != SW_OK) {
- nss_vlan_mgr_warn("%px: Failed to delete vlan translation rule, error:%d\n", v, ret);
- }
- }
-
- /*
- * Free PPE VSI
- */
- if (ppe_vsi_free(NSS_VLAN_MGR_SWITCH_ID, v->ppe_vsi)) {
- nss_vlan_mgr_warn("%px: Failed to free VLAN VSI\n", v);
- }
- }
-
- /*
- * Need to change the physical port role. While adding
- * eth0.10.20/bond0.10.20, the role of the physical port(s) changed
- * from EDGE to CORE. So, while removing eth0.10.20/bond0.10.20, the
- * role of the physical port(s) should be changed from CORE to EDGE.
- */
- for (i = 0; i < NSS_VLAN_PHY_PORT_MAX; i++) {
- if (v->port[i]) {
- if (nss_vlan_mgr_calculate_new_port_role(v->port[i], i)) {
- nss_vlan_mgr_port_role_event(v->port[i], i);
- }
- }
- }
-#endif
-
- if (v->nss_if) {
- nss_unregister_vlan_if(v->nss_if);
- if (nss_dynamic_interface_dealloc_node(v->nss_if, NSS_DYNAMIC_INTERFACE_TYPE_VLAN) != NSS_TX_SUCCESS)
- nss_vlan_mgr_warn("%px: Failed to dealloc vlan dynamic interface\n", v);
- }
-
- if (v->parent)
- nss_vlan_mgr_instance_deref(v->parent);
-
- kfree(v);
-}
-
-/*
* nss_vlan_mgr_changemtu_event()
*/
static int nss_vlan_mgr_changemtu_event(struct netdev_notifier_info *info)
@@ -979,6 +997,7 @@ static int nss_vlan_mgr_register_event(s
struct nss_vlan_pvt *v;
int if_num;
#ifdef NSS_VLAN_MGR_PPE_SUPPORT
+ uint32_t vsi;
int ret;
#endif
uint32_t vlan_tag;
@@ -995,19 +1014,25 @@ static int nss_vlan_mgr_register_event(s
if (!v)
return NOTIFY_DONE;
+ /*
+ * Allocate the VSI here.
+ */
+#ifdef NSS_VLAN_MGR_PPE_SUPPORT
+ if (ppe_vsi_alloc(NSS_VLAN_MGR_SWITCH_ID, &vsi)) {
+ nss_vlan_mgr_warn("%s: failed to allocate VSI for vlan device", dev->name);
+ return NOTIFY_DONE;
+ }
+#endif
+
if_num = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_VLAN);
if (if_num < 0) {
nss_vlan_mgr_warn("%s: failed to alloc NSS dynamic interface\n", dev->name);
- nss_vlan_mgr_instance_free(v);
- return NOTIFY_DONE;
+ goto vsi_alloc_free;
}
if (!nss_register_vlan_if(if_num, NULL, dev, 0, v)) {
nss_vlan_mgr_warn("%s: failed to register NSS dynamic interface", dev->name);
- if (nss_dynamic_interface_dealloc_node(if_num, NSS_DYNAMIC_INTERFACE_TYPE_VLAN) != NSS_TX_SUCCESS)
- nss_vlan_mgr_warn("%px: Failed to dealloc vlan dynamic interface\n", v);
- nss_vlan_mgr_instance_free(v);
- return NOTIFY_DONE;
+ goto free_dynamic_interface;
}
v->nss_if = if_num;
@@ -1021,26 +1046,25 @@ static int nss_vlan_mgr_register_event(s
#ifdef NSS_VLAN_MGR_PPE_SUPPORT
if (!is_bond_master)
- ret = nss_vlan_mgr_configure_ppe(v, dev);
+ ret = nss_vlan_mgr_configure_ppe(v, dev, vsi);
else
- ret = nss_vlan_mgr_bond_configure_ppe(v, real_dev);
+ ret = nss_vlan_mgr_bond_configure_ppe(v, real_dev, vsi);
if (ret < 0) {
- nss_vlan_mgr_instance_free(v);
- return NOTIFY_DONE;
+ goto vlan_instance_free;
}
+
+ v->ppe_vsi = vsi;
#endif
if (nss_vlan_tx_set_mac_addr_msg(v->nss_if, v->dev_addr) != NSS_TX_SUCCESS) {
nss_vlan_mgr_warn("%s: failed to set mac_addr msg\n", dev->name);
- nss_vlan_mgr_instance_free(v);
- return NOTIFY_DONE;
+ goto vlan_instance_free;
}
if (nss_vlan_tx_set_mtu_msg(v->nss_if, v->mtu) != NSS_TX_SUCCESS) {
nss_vlan_mgr_warn("%s: failed to set mtu msg\n", dev->name);
- nss_vlan_mgr_instance_free(v);
- return NOTIFY_DONE;
+ goto vlan_instance_free;
}
vlan_tag = (v->tpid << NSS_VLAN_TPID_SHIFT | v->vid);
@@ -1049,8 +1073,7 @@ static int nss_vlan_mgr_register_event(s
(v->parent ? v->parent->nss_if : port_if),
port_if) != NSS_TX_SUCCESS) {
nss_vlan_mgr_warn("%s: failed to add vlan in nss\n", dev->name);
- nss_vlan_mgr_instance_free(v);
- return NOTIFY_DONE;
+ goto vlan_instance_free;
}
spin_lock(&vlan_mgr_ctx.lock);
@@ -1078,6 +1101,21 @@ static int nss_vlan_mgr_register_event(s
}
#endif
return NOTIFY_DONE;
+
+free_dynamic_interface:
+ if (nss_dynamic_interface_dealloc_node(if_num, NSS_DYNAMIC_INTERFACE_TYPE_VLAN) != NSS_TX_SUCCESS)
+ nss_vlan_mgr_warn("%px: Failed to dealloc vlan dynamic interface\n", v);
+
+vsi_alloc_free:
+#ifdef NSS_VLAN_MGR_PPE_SUPPORT
+ if (ppe_vsi_free(NSS_VLAN_MGR_SWITCH_ID, v->ppe_vsi)) {
+ nss_vlan_mgr_warn("%px: Failed to free VLAN VSI\n", v);
+ }
+#endif
+
+vlan_instance_free:
+ nss_vlan_mgr_instance_free(v);
+ return NOTIFY_DONE;
}
/*
@@ -1102,9 +1140,9 @@ static int nss_vlan_mgr_unregister_event
nss_vlan_mgr_instance_deref(v);
/*
- * Free instance
+ * Release reference take during register_event
*/
- nss_vlan_mgr_instance_free(v);
+ nss_vlan_mgr_instance_deref(v);
return NOTIFY_DONE;
}