--- a/nss_qdisc/Makefile +++ b/nss_qdisc/Makefile @@ -6,7 +6,7 @@ ifeq ($(SoC),$(filter $(SoC),ipq807x ipq ccflags-y += -DNSS_QDISC_PPE_SUPPORT -DNSS_QDISC_BRIDGE_SUPPORT endif -ccflags-y += -Werror +ccflags-y += -Wall -Werror obj-m += qca-nss-qdisc.o qca-nss-qdisc-objs := nss_qdisc.o \ --- a/nss_qdisc/igs/Makefile +++ b/nss_qdisc/igs/Makefile @@ -1,8 +1,7 @@ # Makefile for IGS (Ingress Shaping) ccflags-y += $(NSS_CCFLAGS) -I$(obj)/../../exports -ccflags-y += -DNSS_IGS_DEBUG_LEVEL=2 -ccflags-y += -Werror +ccflags-y += -Wall -Werror obj-m += act_nssmirred.o act_nssmirred-objs := \ --- a/nss_qdisc/igs/nss_mirred.c +++ b/nss_qdisc/igs/nss_mirred.c @@ -1,6 +1,6 @@ /* ************************************************************************** - * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all copies. @@ -24,12 +24,20 @@ static LIST_HEAD(nss_mirred_list); /* List for all nss mirred actions */ static DEFINE_SPINLOCK(nss_mirred_list_lock); /* Lock for the nss mirred list */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +static unsigned int nss_mirred_net_id; /* NSS mirror net ID */ +static struct tc_action_ops nss_mirred_act_ops; /* NSS action mirror ops */ +#endif /* * nss_mirred_release() * Cleanup the resources for nss mirred action. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) static void nss_mirred_release(struct tc_action *tc_act, int bind) +#else +static void nss_mirred_release(struct tc_action *tc_act) +#endif { struct nss_mirred_tcf *act = nss_mirred_get(tc_act); struct net_device *dev = rcu_dereference_protected(act->tcfm_dev, 1); @@ -75,9 +83,23 @@ static const struct nla_policy nss_mirre * Initialize the nss mirred action. */ static int nss_mirred_init(struct net *net, struct nlattr *nla, - struct nlattr *est, struct tc_action *tc_act, int ovr, - int bind) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + struct nlattr *est, struct tc_action *tc_act, int ovr, + int bind) { +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)) + struct nlattr *est, struct tc_action **tc_act, int ovr, + int bind, bool rtnl_held, struct tcf_proto *tp, + u32 flags, struct netlink_ext_ack *extack) +{ +#else + struct nlattr *est, struct tc_action **tc_act, + struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) +{ + bool bind = flags & TCA_ACT_FLAGS_BIND; +#endif + struct tc_action_net *tn = net_generic(net, nss_mirred_net_id); + u32 index; struct nlattr *arr[TC_NSS_MIRRED_MAX + 1]; struct tc_nss_mirred *parm; struct nss_mirred_tcf *act; @@ -92,7 +114,11 @@ static int nss_mirred_init(struct net *n /* * Parse and validate the user configurations. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0)) ret = nla_parse_nested(arr, TC_NSS_MIRRED_MAX, nla, nss_mirred_policy); +#else + ret = nla_parse_nested_deprecated(arr, TC_NSS_MIRRED_MAX, nla, nss_mirred_policy, extack); +#endif if (ret < 0) { return ret; } @@ -193,6 +219,7 @@ static int nss_mirred_init(struct net *n /* * Return error if nss mirred action index is present in the hash. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) if (tcf_hash_check(parm->index, tc_act, bind)) { return -EEXIST; } @@ -204,7 +231,33 @@ static int nss_mirred_init(struct net *n } act = nss_mirred_get(tc_act); +#else + index = parm->index; + ret = tcf_idr_check_alloc(tn, &index, tc_act, bind); + if (ret < 0) { + return ret; + } + + if (ret && bind) { + return 0; + } + + if (!ret) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)) + ret = tcf_idr_create(tn, index, est, tc_act, &nss_mirred_act_ops, + bind, true); +#else + ret = tcf_idr_create(tn, index, est, tc_act, &nss_mirred_act_ops, + bind, true, 0); +#endif + if (ret) { + tcf_idr_cleanup(tn, index); + return ret; + } + } + act = nss_mirred_get(*tc_act); +#endif /* * Fill up the nss mirred tc parameters to * its local action structure. @@ -222,7 +275,9 @@ static int nss_mirred_init(struct net *n spin_lock_bh(&nss_mirred_list_lock); list_add(&act->tcfm_list, &nss_mirred_list); spin_unlock_bh(&nss_mirred_list_lock); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) tcf_hash_insert(tc_act); +#endif return ACT_P_CREATED; } @@ -234,10 +289,15 @@ static int nss_mirred_init(struct net *n static int nss_mirred_act(struct sk_buff *skb, const struct tc_action *tc_act, struct tcf_result *res) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) struct nss_mirred_tcf *act = tc_act->priv; +#else + struct nss_mirred_tcf *act = nss_mirred_get(tc_act); +#endif struct net_device *dev; struct sk_buff *skb_new; int retval, err; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) u32 skb_tc_at = G_TC_AT(skb->tc_verd); /* @@ -247,6 +307,12 @@ static int nss_mirred_act(struct sk_buff return TC_ACT_UNSPEC; } +#else + if (!skb_at_tc_ingress(skb)) { + return TC_ACT_UNSPEC; + } +#endif + /* * Update the last use of action. */ @@ -276,9 +342,14 @@ static int nss_mirred_act(struct sk_buff skb_new->skb_iif = skb->dev->ifindex; skb_new->dev = dev; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) skb_new->tc_verd = SET_TC_FROM(skb_new->tc_verd, skb_tc_at); skb_push_rcsum(skb_new, skb->mac_len); skb_sender_cpu_clear(skb_new); +#else + skb_set_redirected(skb_new, skb_new->tc_at_ingress); + skb_push_rcsum(skb_new, skb->mac_len); +#endif err = dev_queue_xmit(skb_new); if (!err) { @@ -300,12 +371,21 @@ static int nss_mirred_dump(struct sk_buf { struct tcf_t filter; unsigned char *tail = skb_tail_pointer(skb); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) struct nss_mirred_tcf *act = tc_act->priv; +#else + struct nss_mirred_tcf *act = nss_mirred_get(tc_act); +#endif struct tc_nss_mirred opt = { .index = act->tcf_index, .action = act->tcf_action, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) .refcnt = act->tcf_refcnt - ref, .bindcnt = act->tcf_bindcnt - bind, +#else + .refcnt = refcount_read(&act->tcf_refcnt) - ref, + .bindcnt = atomic_read(&act->tcf_bindcnt) - bind, +#endif .from_ifindex = act->tcfm_from_ifindex, .to_ifindex = act->tcfm_to_ifindex, }; @@ -470,6 +550,64 @@ static int nss_mirred_device_event(struc return NOTIFY_DONE; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +/* + * nss_mirred_walker + * nssmirred tcf_action walker + */ +static int nss_mirred_walker(struct net *net, struct sk_buff *skb, + struct netlink_callback *cb, int type, + const struct tc_action_ops *ops, + struct netlink_ext_ack *extack) +{ + struct tc_action_net *tn = net_generic(net, nss_mirred_net_id); + + return tcf_generic_walker(tn, skb, cb, type, ops, extack); +} + +/* + * nss_mirred_search + * nssmirred search idr function. + */ +static int nss_mirred_search(struct net *net, struct tc_action **a, u32 index) +{ + struct tc_action_net *tn = net_generic(net, nss_mirred_net_id); + + return tcf_idr_search(tn, a, index); +} + +/* + * nss_mirred_dev_put + * Release igs dev + */ +static void nss_mirred_dev_put(void *priv) +{ + struct net_device *dev = priv; + + dev_put(dev); +} + +/* + * nss_mirred_device + * Get the igs dev. + */ +static struct net_device *nss_mirred_device(const struct tc_action *a, tc_action_priv_destructor *destructor) +{ + struct nss_mirred_tcf *m = nss_mirred_get(a); + struct net_device *dev; + + rcu_read_lock(); + dev = rcu_dereference(m->tcfm_dev); + if (dev) { + dev_hold(dev); + *destructor = nss_mirred_dev_put; + } + rcu_read_unlock(); + + return dev; +} +#endif + /* * nss_mirred_device_notifier * nss mirred device notifier structure. @@ -482,14 +620,22 @@ static struct notifier_block nss_mirred_ * nss_mirred_act_ops * Registration structure for nss mirred action. */ -struct tc_action_ops nss_mirred_act_ops = { +static struct tc_action_ops nss_mirred_act_ops = { .kind = "nssmirred", - .type = TCA_ACT_MIRRED_NSS, .owner = THIS_MODULE, .act = nss_mirred_act, .dump = nss_mirred_dump, .cleanup = nss_mirred_release, .init = nss_mirred_init, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + .type = TCA_ACT_MIRRED_NSS, +#else + .id = TCA_ID_MIRRED_NSS, + .walk = nss_mirred_walker, + .lookup = nss_mirred_search, + .size = sizeof(struct nss_mirred_tcf), + .get_dev = nss_mirred_device +#endif }; /* @@ -514,6 +660,52 @@ struct nf_hook_ops nss_mirred_igs_nf_ops }, }; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +/* + * nss_mirred_init_net + * nssmirred net init function. + */ +static __net_init int nss_mirred_init_net(struct net *net) +{ + struct tc_action_net *tn = net_generic(net, nss_mirred_net_id); + nf_register_net_hooks(net, nss_mirred_igs_nf_ops, + ARRAY_SIZE(nss_mirred_igs_nf_ops)); + + return tc_action_net_init(net, tn, &nss_mirred_act_ops); +} + +/* + * nss_mirred_exit_net + * nssmirred net exit function. + */ +static void __net_exit nss_mirred_exit_net(struct net *net) +{ + nf_unregister_net_hooks(net, nss_mirred_igs_nf_ops, + ARRAY_SIZE(nss_mirred_igs_nf_ops)); +} + +/* + * nss_mirred_exit_batch_net + * nssmirred exit_batch_net function. + */ +static void __net_exit nss_mirred_exit_batch_net(struct list_head *net_list) +{ + tc_action_net_exit(net_list, nss_mirred_net_id); +} + +/* + * nss_mirred_net_ops + * Per netdevice ops. + */ +static struct pernet_operations nss_mirred_net_ops = { + .init = nss_mirred_init_net, + .exit = nss_mirred_exit_net, + .exit_batch = nss_mirred_exit_batch_net, + .id = &nss_mirred_net_id, + .size = sizeof(struct tc_action_net), +}; +#endif + /* * nss_mirred_init_module() * nssmirred init function. @@ -525,6 +717,7 @@ static int __init nss_mirred_init_module return err; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) err = tcf_register_action(&nss_mirred_act_ops, NSS_MIRRED_TAB_MASK); if (err) { unregister_netdevice_notifier(&nss_mirred_device_notifier); @@ -538,6 +731,13 @@ static int __init nss_mirred_init_module unregister_netdevice_notifier(&nss_mirred_device_notifier); return err; } +#else + err = tcf_register_action(&nss_mirred_act_ops, &nss_mirred_net_ops); + if (err) { + unregister_netdevice_notifier(&nss_mirred_device_notifier); + return err; + } +#endif /* * Set the IGS module reference variable. @@ -559,12 +759,16 @@ static void __exit nss_mirred_cleanup_mo */ nss_igs_module_save(&nss_mirred_act_ops, NULL); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) nf_unregister_hooks(nss_mirred_igs_nf_ops, ARRAY_SIZE(nss_mirred_igs_nf_ops)); /* * Un-register nss mirred action. */ tcf_unregister_action(&nss_mirred_act_ops); +#else + tcf_unregister_action(&nss_mirred_act_ops, &nss_mirred_net_ops); +#endif unregister_netdevice_notifier(&nss_mirred_device_notifier); } --- a/nss_qdisc/igs/nss_mirred.h +++ b/nss_qdisc/igs/nss_mirred.h @@ -1,6 +1,6 @@ /* ************************************************************************** - * Copyright (c) 2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all copies. @@ -14,6 +14,7 @@ ************************************************************************** */ +#include #include #define NSS_MIRRED_TAB_MASK 7 @@ -23,7 +24,11 @@ * nss mirred internal structure. */ struct nss_mirred_tcf { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) struct tcf_common common; /* Common filter structure */ +#else + struct tc_action common; /* Common filter structure */ +#endif __u32 tcfm_to_ifindex; /* Index number of device to which * traffic will be redirected. */ @@ -40,5 +45,10 @@ struct nss_mirred_tcf { * To get the pointer of nss mirred action structure from the common * tc_action structure pointer. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) #define nss_mirred_get(a) \ container_of(a->priv, struct nss_mirred_tcf, common) +#else +#define nss_mirred_get(a) ((struct nss_mirred_tcf *)a) +#endif + --- a/nss_qdisc/nss_bf.c +++ b/nss_qdisc/nss_bf.c @@ -68,12 +68,20 @@ static inline struct nss_bf_class_data * * nss_bf_change_class() * Configures a new class. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_bf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_bf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + struct nlattr **tca, unsigned long *arg, struct netlink_ext_ack *extack) +{ +#endif struct nss_bf_sched_data *q = qdisc_priv(sch); struct nss_bf_class_data *cl = (struct nss_bf_class_data *)*arg; struct nlattr *opt = tca[TCA_OPTIONS]; + struct nlattr *tb[TCA_NSSBF_MAX + 1]; struct tc_nssbf_class_qopt *qopt; struct nss_if_msg nim_config; struct net_device *dev = qdisc_dev(sch); @@ -84,7 +92,12 @@ static int nss_bf_change_class(struct Qd return -EINVAL; } - qopt = nss_qdisc_qopt_get(opt, nss_bf_policy, TCA_NSSBF_MAX, TCA_NSSBF_CLASS_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_bf_policy, tb, TCA_NSSBF_MAX, TCA_NSSBF_CLASS_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_bf_policy, tb, TCA_NSSBF_MAX, TCA_NSSBF_CLASS_PARMS, extack); +#endif + if (!qopt) { return -EINVAL; } @@ -111,7 +124,7 @@ static int nss_bf_change_class(struct Qd * reference count should not be 0. */ cl->qdisc = &noop_qdisc; - atomic_set(&cl->nq.refcnt, 1); + nss_qdisc_atomic_set(&cl->nq); *arg = (unsigned long)cl; nss_qdisc_info("Adding classid %u to qdisc %px hash queue %px\n", classid, sch, &q->clhash); @@ -121,7 +134,8 @@ static int nss_bf_change_class(struct Qd * that is registered to Linux. Therefore we initialize the NSSBF_GROUP shaper * here. */ - if (nss_qdisc_init(sch, &cl->nq, NSS_SHAPER_NODE_TYPE_BF_GROUP, classid, accel_mode) < 0) { + if (nss_qdisc_init(sch, &cl->nq, NSS_SHAPER_NODE_TYPE_BF_GROUP, classid, accel_mode, extack) < 0) + { nss_qdisc_error("Nss init for class %u failed\n", classid); kfree(cl); return -EINVAL; @@ -260,7 +274,7 @@ static void nss_bf_destroy_class(struct /* * And now we destroy the child. */ - qdisc_destroy(cl->qdisc); + nss_qdisc_put(cl->qdisc); /* * Stop the stats polling timer and free class @@ -282,7 +296,11 @@ static void nss_bf_destroy_class(struct * nss_bf_delete_class() * Detaches a class from operation, but does not destroy it. */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) +static int nss_bf_delete_class(struct Qdisc *sch, unsigned long arg, struct netlink_ext_ack *extack) +#else static int nss_bf_delete_class(struct Qdisc *sch, unsigned long arg) +#endif { struct nss_bf_sched_data *q = qdisc_priv(sch); struct nss_bf_class_data *cl = (struct nss_bf_class_data *)arg; @@ -311,7 +329,7 @@ static int nss_bf_delete_class(struct Qd sch_tree_lock(sch); qdisc_reset(cl->qdisc); qdisc_class_hash_remove(&q->clhash, &cl->cl_common); - refcnt = atomic_sub_return(1, &cl->nq.refcnt); + refcnt = nss_qdisc_atomic_sub_return(&cl->nq); sch_tree_unlock(sch); if (!refcnt) { nss_qdisc_error("Reference count should not be zero for class %px\n", cl); @@ -324,8 +342,13 @@ static int nss_bf_delete_class(struct Qd * nss_bf_graft_class() * Replaces the qdisc attached to the provided class. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_bf_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) +#else +static int nss_bf_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, + struct Qdisc **old, struct netlink_ext_ack *extack) +#endif { struct nss_bf_sched_data *q = qdisc_priv(sch); struct nss_bf_class_data *cl = (struct nss_bf_class_data *)arg; @@ -415,6 +438,7 @@ static void nss_bf_qlen_notify(struct Qd */ } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) /* * nss_bf_get_class() * Fetches the class pointer if provided the classid. @@ -444,10 +468,24 @@ static void nss_bf_put_class(struct Qdis * We are safe to destroy the qdisc if the reference count * goes down to 0. */ - if (atomic_sub_return(1, &cl->nq.refcnt) == 0) { + if (nss_qdisc_atomic_sub_return(&cl->nq) == 0) { nss_bf_destroy_class(sch, cl); } } +#else +/* + * nss_bf_search_class() + * Fetches the class pointer if provided the classid. + */ +static unsigned long nss_bf_search_class(struct Qdisc *sch, u32 classid) +{ + struct nss_bf_class_data *cl = nss_bf_find_class(classid, sch); + + nss_qdisc_info("Get bf class %px - class match = %px\n", sch, cl); + + return (unsigned long)cl; +} +#endif /* * nss_bf_dump_class() @@ -475,7 +513,7 @@ static int nss_bf_dump_class(struct Qdis tcm->tcm_handle = cl->cl_common.classid; tcm->tcm_info = cl->qdisc->handle; - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL || nla_put(skb, TCA_NSSBF_CLASS_PARMS, sizeof(qopt), &qopt)) { goto nla_put_failure; } @@ -495,7 +533,7 @@ static int nss_bf_dump_class_stats(struc { struct nss_qdisc *nq = (struct nss_qdisc *)arg; - if (nss_qdisc_gnet_stats_copy_basic(d, &nq->bstats) < 0 || + if (nss_qdisc_gnet_stats_copy_basic(sch, d, &nq->bstats) < 0 || nss_qdisc_gnet_stats_copy_queue(d, &nq->qstats) < 0) { return -1; } @@ -538,9 +576,15 @@ static void nss_bf_walk(struct Qdisc *sc * nss_bf_change_qdisc() * Can be used to configure a nssbf qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_bf_change_qdisc(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_bf_change_qdisc(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { struct nss_bf_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSBF_MAX + 1]; struct tc_nssbf_qopt *qopt; /* @@ -563,7 +607,11 @@ static int nss_bf_change_qdisc(struct Qd /* * If it is not NULL, parse to get qopt. */ - qopt = nss_qdisc_qopt_get(opt, nss_bf_policy, TCA_NSSBF_MAX, TCA_NSSBF_QDISC_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_bf_policy, tb, TCA_NSSBF_MAX, TCA_NSSBF_QDISC_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_bf_policy, tb, TCA_NSSBF_MAX, TCA_NSSBF_QDISC_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -643,7 +691,7 @@ static void nss_bf_destroy_qdisc(struct * Reduce refcnt by 1 before destroying. This is to * ensure that polling of stat stops properly. */ - atomic_sub(1, &cl->nq.refcnt); + nss_qdisc_atomic_sub(&cl->nq); /* * Detach class before destroying it. We dont check for noop qdisc here @@ -684,9 +732,17 @@ static void nss_bf_destroy_qdisc(struct * nss_bf_init_qdisc() * Initializes the nssbf qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_bf_init_qdisc(struct Qdisc *sch, struct nlattr *opt) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_bf_init_qdisc(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ +#endif struct nss_bf_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSBF_MAX + 1]; struct tc_nssbf_qopt *qopt; int err; unsigned int accel_mode; @@ -710,7 +766,11 @@ static int nss_bf_init_qdisc(struct Qdis if (!opt) { accel_mode = TCA_NSS_ACCEL_MODE_NSS_FW; } else { - qopt = nss_qdisc_qopt_get(opt, nss_bf_policy, TCA_NSSBF_MAX, TCA_NSSBF_QDISC_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_bf_policy, tb, TCA_NSSBF_MAX, TCA_NSSBF_QDISC_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_bf_policy, tb, TCA_NSSBF_MAX, TCA_NSSBF_QDISC_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -720,7 +780,7 @@ static int nss_bf_init_qdisc(struct Qdis /* * Initialize the NSSBF shaper in NSS */ - if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_BF, 0, accel_mode) < 0) { + if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_BF, 0, accel_mode, extack) < 0) { return -EINVAL; } @@ -729,7 +789,11 @@ static int nss_bf_init_qdisc(struct Qdis /* * Tune nss_bf parameters. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_bf_change_qdisc(sch, opt) < 0) { +#else + if (nss_bf_change_qdisc(sch, opt, extack) < 0) { +#endif nss_qdisc_destroy(&q->nq); return -EINVAL; } @@ -756,7 +820,7 @@ static int nss_bf_dump_qdisc(struct Qdis qopt.defcls = q->defcls; qopt.accel_mode = nss_qdisc_accel_mode_get(&q->nq); - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (!opts || nla_put(skb, TCA_NSSBF_QDISC_PARMS, sizeof(qopt), &qopt)) { goto nla_put_failure; } @@ -772,9 +836,18 @@ nla_put_failure: * nss_bf_enqueue() * Enqueues a skb to nssbf qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static int nss_bf_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +static int nss_bf_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return nss_qdisc_enqueue(skb, sch); +#else + return nss_qdisc_enqueue(skb, sch, to_free); +#endif } /* @@ -786,6 +859,7 @@ static struct sk_buff *nss_bf_dequeue(st return nss_qdisc_dequeue(sch); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) /* * nss_bf_drop() * Drops a single skb from linux queue, if not empty. @@ -797,6 +871,7 @@ static unsigned int nss_bf_drop(struct Q printk("In bf drop\n"); return nss_qdisc_drop(sch); } +#endif /* * Registration structure for nssbf class @@ -807,9 +882,17 @@ const struct Qdisc_class_ops nss_bf_clas .graft = nss_bf_graft_class, .leaf = nss_bf_leaf_class, .qlen_notify = nss_bf_qlen_notify, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) .get = nss_bf_get_class, .put = nss_bf_put_class, +#else + .find = nss_bf_search_class, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) .tcf_chain = nss_qdisc_tcf_chain, +#else + .tcf_block = nss_qdisc_tcf_block, +#endif .bind_tcf = nss_qdisc_tcf_bind, .unbind_tcf = nss_qdisc_tcf_unbind, .dump = nss_bf_dump_class, @@ -830,7 +913,9 @@ struct Qdisc_ops nss_bf_qdisc_ops __read .enqueue = nss_bf_enqueue, .dequeue = nss_bf_dequeue, .peek = qdisc_peek_dequeued, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_bf_drop, +#endif .cl_ops = &nss_bf_class_ops, .priv_size = sizeof(struct nss_bf_sched_data), .owner = THIS_MODULE --- a/nss_qdisc/nss_blackhole.c +++ b/nss_qdisc/nss_blackhole.c @@ -1,6 +1,6 @@ /* ************************************************************************** - * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2016-2017, 2020, The Linux Foundation. All rights reserved. * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all copies. @@ -35,9 +35,18 @@ static struct nla_policy nss_blackhole_p * nss_blackhole_enqueue() * Enqueue API for nss blackhole qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static int nss_blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +static int nss_blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return nss_qdisc_enqueue(skb, sch); +#else + return nss_qdisc_enqueue(skb, sch, to_free); +#endif } /* @@ -49,6 +58,7 @@ static struct sk_buff *nss_blackhole_deq return nss_qdisc_dequeue(sch); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) /* * nss_blackhole_drop() * The following function drops a packet from HLOS queue. @@ -60,6 +70,7 @@ static unsigned int nss_blackhole_drop(s nss_qdisc_info("qdisc %x dropping\n", sch->handle); return nss_qdisc_drop(sch); } +#endif /* * nss_blackhole_reset() @@ -92,9 +103,15 @@ static void nss_blackhole_destroy(struct * nss_blackhole_change() * Function call used to configure the parameters of the nss blackhole qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_blackhole_change(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_blackhole_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { struct nss_blackhole_sched_data *q; + struct nlattr *tb[TCA_NSSBLACKHOLE_MAX + 1]; struct tc_nssblackhole_qopt *qopt; struct nss_if_msg nim; @@ -102,7 +119,11 @@ static int nss_blackhole_change(struct Q return 0; } - qopt = nss_qdisc_qopt_get(opt, nss_blackhole_policy, TCA_NSSBLACKHOLE_MAX, TCA_NSSBLACKHOLE_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_blackhole_policy, tb, TCA_NSSBLACKHOLE_MAX, TCA_NSSBLACKHOLE_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_blackhole_policy, tb, TCA_NSSBLACKHOLE_MAX, TCA_NSSBLACKHOLE_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -154,9 +175,17 @@ static int nss_blackhole_change(struct Q * nss_blackhole_init() * Initializes a nss blackhole qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_blackhole_init(struct Qdisc *sch, struct nlattr *opt) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_blackhole_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ +#endif struct nss_qdisc *nq = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSBLACKHOLE_MAX + 1]; struct tc_nssblackhole_qopt *qopt; unsigned int accel_mode; @@ -166,7 +195,11 @@ static int nss_blackhole_init(struct Qdi if (!opt) { accel_mode = TCA_NSS_ACCEL_MODE_PPE; } else { - qopt = nss_qdisc_qopt_get(opt, nss_blackhole_policy, TCA_NSSBLACKHOLE_MAX, TCA_NSSBLACKHOLE_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_blackhole_policy, tb, TCA_NSSBLACKHOLE_MAX, TCA_NSSBLACKHOLE_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_blackhole_policy, tb, TCA_NSSBLACKHOLE_MAX, TCA_NSSBLACKHOLE_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -176,12 +209,17 @@ static int nss_blackhole_init(struct Qdi nss_qdisc_info("qdisc %x initializing\n", sch->handle); nss_blackhole_reset(sch); - if (nss_qdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_FIFO, 0, accel_mode) < 0) { + if (nss_qdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_FIFO, 0, accel_mode, extack) < 0) + { return -EINVAL; } nss_qdisc_info("qdisc %x initialized with parent %x\n", sch->handle, sch->parent); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_blackhole_change(sch, opt) < 0) { +#else + if (nss_blackhole_change(sch, opt, extack) < 0) { +#endif nss_qdisc_destroy(nq); return -EINVAL; } @@ -214,7 +252,7 @@ static int nss_blackhole_dump(struct Qdi opt.set_default = q->set_default; opt.accel_mode = nss_qdisc_accel_mode_get(&q->nq); - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) { goto nla_put_failure; } @@ -251,7 +289,9 @@ struct Qdisc_ops nss_blackhole_qdisc_ops .enqueue = nss_blackhole_enqueue, .dequeue = nss_blackhole_dequeue, .peek = nss_blackhole_peek, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_blackhole_drop, +#endif .init = nss_blackhole_init, .reset = nss_blackhole_reset, .destroy = nss_blackhole_destroy, --- a/nss_qdisc/nss_codel.c +++ b/nss_qdisc/nss_codel.c @@ -1,6 +1,6 @@ /* ************************************************************************** - * Copyright (c) 2014, 2016-2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2016-2018, 2020, The Linux Foundation. All rights reserved. * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all copies. @@ -76,9 +76,18 @@ static struct nla_policy nss_codel_polic * nss_codel_enqueue() * Enqueue a packet into nss_codel queue in NSS firmware (bounce). */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static int nss_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +static int nss_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return nss_qdisc_enqueue(skb, sch); +#else + return nss_qdisc_enqueue(skb, sch, to_free); +#endif } /* @@ -90,6 +99,7 @@ static struct sk_buff *nss_codel_dequeue return nss_qdisc_dequeue(sch); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) /* * nss_codel_drop() * Drops a packet from the bounce complete queue. @@ -100,6 +110,7 @@ static unsigned int nss_codel_drop(struc { return nss_qdisc_drop(sch); } +#endif /* * nss_codel_reset() @@ -234,9 +245,15 @@ static int nss_codel_mem_sz_get(struct Q * nss_codel_change() * Used to configure the nss_codel queue in NSS firmware. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_codel_change(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_codel_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { struct nss_codel_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSCODEL_MAX + 1]; struct tc_nsscodel_qopt *qopt; struct nss_if_msg nim; struct net_device *dev = qdisc_dev(sch); @@ -245,7 +262,11 @@ static int nss_codel_change(struct Qdisc struct nss_shaper_node_config *config; bool free_flow_queue = true; - qopt = nss_qdisc_qopt_get(opt, nss_codel_policy, TCA_NSSCODEL_MAX, TCA_NSSCODEL_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_codel_policy, tb, TCA_NSSCODEL_MAX, TCA_NSSCODEL_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_codel_policy, tb, TCA_NSSCODEL_MAX, TCA_NSSCODEL_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -381,16 +402,28 @@ fail: * nss_codel_init() * Initializes the nss_codel qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_codel_init(struct Qdisc *sch, struct nlattr *opt) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_codel_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ +#endif struct nss_qdisc *nq = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSCODEL_MAX + 1]; struct tc_nsscodel_qopt *qopt; if (!opt) { return -EINVAL; } - qopt = nss_qdisc_qopt_get(opt, nss_codel_policy, TCA_NSSCODEL_MAX, TCA_NSSCODEL_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_codel_policy, tb, TCA_NSSCODEL_MAX, TCA_NSSCODEL_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_codel_policy, tb, TCA_NSSCODEL_MAX, TCA_NSSCODEL_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -404,7 +437,8 @@ static int nss_codel_init(struct Qdisc * nss_qdisc_register_configure_callback(nq, nss_codel_configure_callback); nss_qdisc_register_stats_callback(nq, nss_codel_stats_callback); - if (nss_qdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_CODEL, 0, qopt->accel_mode) < 0) { + if (nss_qdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_CODEL, 0, qopt->accel_mode, extack) < 0) + { return -EINVAL; } @@ -412,7 +446,11 @@ static int nss_codel_init(struct Qdisc * return -EINVAL; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_codel_change(sch, opt) < 0) { +#else + if (nss_codel_change(sch, opt, extack) < 0) { +#endif nss_qdisc_destroy(nq); return -EINVAL; } @@ -451,7 +489,7 @@ static int nss_codel_dump(struct Qdisc * opt.flows = q->flows; opt.ecn = q->ecn; - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) { goto nla_put_failure; } @@ -511,7 +549,9 @@ struct Qdisc_ops nss_codel_qdisc_ops __r .enqueue = nss_codel_enqueue, .dequeue = nss_codel_dequeue, .peek = nss_codel_peek, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_codel_drop, +#endif .init = nss_codel_init, .reset = nss_codel_reset, .destroy = nss_codel_destroy, @@ -530,7 +570,9 @@ struct Qdisc_ops nss_fq_codel_qdisc_ops .enqueue = nss_codel_enqueue, .dequeue = nss_codel_dequeue, .peek = nss_codel_peek, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_codel_drop, +#endif .init = nss_codel_init, .reset = nss_codel_reset, .destroy = nss_codel_destroy, --- a/nss_qdisc/nss_fifo.c +++ b/nss_qdisc/nss_fifo.c @@ -29,9 +29,18 @@ static struct nla_policy nss_fifo_policy [TCA_NSSFIFO_PARMS] = { .len = sizeof(struct tc_nssfifo_qopt) }, }; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static int nss_fifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +static int nss_fifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return nss_qdisc_enqueue(skb, sch); +#else + return nss_qdisc_enqueue(skb, sch, to_free); +#endif } static struct sk_buff *nss_fifo_dequeue(struct Qdisc *sch) @@ -39,11 +48,13 @@ static struct sk_buff *nss_fifo_dequeue( return nss_qdisc_dequeue(sch); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static unsigned int nss_fifo_drop(struct Qdisc *sch) { nss_qdisc_info("nss_fifo dropping"); return nss_qdisc_drop(sch); } +#endif static void nss_fifo_reset(struct Qdisc *sch) { @@ -64,8 +75,14 @@ static void nss_fifo_destroy(struct Qdis nss_qdisc_info("nss_fifo destroyed"); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_fifo_params_validate_and_save(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_fifo_params_validate_and_save(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { + struct nlattr *tb[TCA_NSSFIFO_MAX + 1]; struct tc_nssfifo_qopt *qopt; struct nss_fifo_sched_data *q = qdisc_priv(sch); bool is_bfifo = (sch->ops == &nss_bfifo_qdisc_ops); @@ -74,7 +91,11 @@ static int nss_fifo_params_validate_and_ return -EINVAL; } - qopt = nss_qdisc_qopt_get(opt, nss_fifo_policy, TCA_NSSFIFO_MAX, TCA_NSSFIFO_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_fifo_policy, tb, TCA_NSSFIFO_MAX, TCA_NSSFIFO_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_fifo_policy, tb, TCA_NSSFIFO_MAX, TCA_NSSFIFO_PARMS, extack); +#endif if (!qopt) { nss_qdisc_warning("Invalid input to fifo %x", sch->handle); return -EINVAL; @@ -101,7 +122,11 @@ static int nss_fifo_params_validate_and_ } #if defined(NSS_QDISC_PPE_SUPPORT) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_fifo_ppe_change(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_fifo_ppe_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) +#endif { struct nss_fifo_sched_data *q = qdisc_priv(sch); struct nss_qdisc *nq = &q->nq; @@ -150,28 +175,45 @@ fail: /* * Fallback to nss qdisc if PPE Qdisc configuration failed at init time. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_ppe_fallback_to_nss(&q->nq, opt) < 0) { - nss_qdisc_warning("nss_fifo %x fallback to nss failed\n", sch->handle); +#else + if (nss_ppe_fallback_to_nss(&q->nq, opt, extack) < 0) { +#endif + nss_qdisc_warning("nss_fifo %x fallback to nss failed\n", sch->handle); return -EINVAL; } return 0; } #endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_fifo_change(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_fifo_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { struct nss_fifo_sched_data *q = qdisc_priv(sch); struct nss_qdisc *nq = &q->nq; struct nss_if_msg nim; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_fifo_params_validate_and_save(sch, opt) < 0) { +#else + if (nss_fifo_params_validate_and_save(sch, opt, extack) < 0) { +#endif nss_qdisc_warning("nss_fifo %px params validate and save failed\n", sch); return -EINVAL; } #if defined(NSS_QDISC_PPE_SUPPORT) if (nq->mode == NSS_QDISC_MODE_PPE) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_fifo_ppe_change(sch, opt) < 0) { +#else + if (nss_fifo_ppe_change(sch, opt, extack) < 0) { +#endif nss_qdisc_warning("nss_fifo %px params validate and save failed\n", sch); return -EINVAL; } @@ -208,9 +250,17 @@ static int nss_fifo_change(struct Qdisc return 0; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_fifo_init(struct Qdisc *sch, struct nlattr *opt) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_fifo_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ +#endif struct nss_qdisc *nq = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSFIFO_MAX + 1]; struct tc_nssfifo_qopt *qopt; if (!opt) { @@ -220,19 +270,28 @@ static int nss_fifo_init(struct Qdisc *s nss_qdisc_info("Initializing Fifo - type %d\n", NSS_SHAPER_NODE_TYPE_FIFO); nss_fifo_reset(sch); - qopt = nss_qdisc_qopt_get(opt, nss_fifo_policy, TCA_NSSFIFO_MAX, TCA_NSSFIFO_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_fifo_policy, tb, TCA_NSSFIFO_MAX, TCA_NSSFIFO_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_fifo_policy, tb, TCA_NSSFIFO_MAX, TCA_NSSFIFO_PARMS, extack); +#endif if (!qopt) { nss_qdisc_warning("Invalid input to fifo %x", sch->handle); return -EINVAL; } - if (nss_qdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_FIFO, 0, qopt->accel_mode) < 0) { + if (nss_qdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_FIFO, 0, qopt->accel_mode, extack) < 0) + { nss_qdisc_warning("Fifo %x init failed", sch->handle); return -EINVAL; } nss_qdisc_info("NSS fifo initialized - handle %x parent %x\n", sch->handle, sch->parent); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_fifo_change(sch, opt) < 0) { +#else + if (nss_fifo_change(sch, opt, extack) < 0) { +#endif nss_qdisc_destroy(nq); return -EINVAL; } @@ -262,7 +321,8 @@ static int nss_fifo_dump(struct Qdisc *s opt.set_default = q->set_default; opt.accel_mode = nss_qdisc_accel_mode_get(&q->nq); - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); + if (opts == NULL) { goto nla_put_failure; } @@ -290,7 +350,9 @@ struct Qdisc_ops nss_pfifo_qdisc_ops __r .enqueue = nss_fifo_enqueue, .dequeue = nss_fifo_dequeue, .peek = nss_fifo_peek, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_fifo_drop, +#endif .init = nss_fifo_init, .reset = nss_fifo_reset, .destroy = nss_fifo_destroy, @@ -305,7 +367,9 @@ struct Qdisc_ops nss_bfifo_qdisc_ops __r .enqueue = nss_fifo_enqueue, .dequeue = nss_fifo_dequeue, .peek = nss_fifo_peek, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_fifo_drop, +#endif .init = nss_fifo_init, .reset = nss_fifo_reset, .destroy = nss_fifo_destroy, --- a/nss_qdisc/nss_htb.c +++ b/nss_qdisc/nss_htb.c @@ -83,10 +83,16 @@ static inline struct nss_htb_class_data * nss_htb_class_params_validate_and_save() * Validates and saves the qdisc configuration parameters. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_htb_class_params_validate_and_save(struct Qdisc *sch, struct nlattr **tca, struct nss_htb_param *param) +#else +static int nss_htb_class_params_validate_and_save(struct Qdisc *sch, struct nlattr **tca, + struct nss_htb_param *param, struct netlink_ext_ack *extack) +#endif { struct nlattr *opt = tca[TCA_OPTIONS]; + struct nlattr *tb[TCA_NSSHTB_MAX + 1]; struct tc_nsshtb_class_qopt *qopt; struct nss_htb_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); @@ -99,7 +105,11 @@ static int nss_htb_class_params_validate return -EINVAL; } - qopt = nss_qdisc_qopt_get(opt, nss_htb_policy, TCA_NSSHTB_MAX, TCA_NSSHTB_CLASS_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_htb_policy, tb, TCA_NSSHTB_MAX, TCA_NSSHTB_CLASS_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_htb_policy, tb, TCA_NSSHTB_MAX, TCA_NSSHTB_CLASS_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -219,7 +229,7 @@ static struct nss_htb_class_data *nss_ht * reference count should not be 0. */ cl->qdisc = &noop_qdisc; - atomic_set(&cl->nq.refcnt, 1); + nss_qdisc_atomic_set(&cl->nq); return cl; } @@ -266,9 +276,16 @@ static int nss_htb_ppe_change_class(stru * nss_htb_change_class() * Configures a new class. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_htb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_htb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + struct nlattr **tca, unsigned long *arg, struct netlink_ext_ack *extack) +{ +#endif struct nss_htb_sched_data *q = qdisc_priv(sch); struct nss_htb_class_data *cl = (struct nss_htb_class_data *)*arg; struct nss_htb_class_data *parent; @@ -282,7 +299,11 @@ static int nss_htb_change_class(struct Q nss_qdisc_trace("configuring htb class %x of qdisc %x\n", classid, sch->handle); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_htb_class_params_validate_and_save(sch, tca, ¶m) < 0) { +#else + if (nss_htb_class_params_validate_and_save(sch, tca, ¶m, extack) < 0) { +#endif nss_qdisc_warning("validation of configuration parameters for htb class %x failed\n", sch->handle); return -EINVAL; @@ -332,7 +353,8 @@ static int nss_htb_change_class(struct Q * here. */ cl->nq.parent = nq_parent; - if (nss_qdisc_init(sch, &cl->nq, NSS_SHAPER_NODE_TYPE_HTB_GROUP, classid, accel_mode) < 0) { + if (nss_qdisc_init(sch, &cl->nq, NSS_SHAPER_NODE_TYPE_HTB_GROUP, classid, accel_mode, extack) < 0) + { nss_qdisc_error("nss_init for htb class %x failed\n", classid); goto failure; } @@ -478,7 +500,7 @@ static void nss_htb_destroy_class(struct /* * And now we destroy the child. */ - qdisc_destroy(cl->qdisc); + nss_qdisc_put(cl->qdisc); /* * Stop the stats polling timer and free class @@ -500,7 +522,11 @@ static void nss_htb_destroy_class(struct * nss_htb_delete_class() * Detaches a class from operation, but does not destroy it. */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) +static int nss_htb_delete_class(struct Qdisc *sch, unsigned long arg, struct netlink_ext_ack *extack) +#else static int nss_htb_delete_class(struct Qdisc *sch, unsigned long arg) +#endif { struct nss_htb_sched_data *q = qdisc_priv(sch); struct nss_htb_class_data *cl = (struct nss_htb_class_data *)arg; @@ -550,7 +576,7 @@ static int nss_htb_delete_class(struct Q * We simply deduct refcnt and return. */ if (!cl->parent) { - refcnt = atomic_sub_return(1, &cl->nq.refcnt); + refcnt = nss_qdisc_atomic_sub_return(&cl->nq); sch_tree_unlock(sch); return 0; } @@ -567,7 +593,7 @@ static int nss_htb_delete_class(struct Q /* * Decrement refcnt and return */ - refcnt = atomic_sub_return(1, &cl->nq.refcnt); + refcnt = nss_qdisc_atomic_sub_return(&cl->nq); sch_tree_unlock(sch); return 0; @@ -577,7 +603,12 @@ static int nss_htb_delete_class(struct Q * nss_htb_graft_class() * Replaces the qdisc attached to the provided class. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_htb_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) +#else +static int nss_htb_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old, + struct netlink_ext_ack *extack) +#endif { struct nss_htb_class_data *cl = (struct nss_htb_class_data *)arg; struct nss_if_msg nim_detach; @@ -664,6 +695,7 @@ static void nss_htb_qlen_notify(struct Q */ } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) /* * nss_htb_get_class() * Fetches the class pointer if provided the classid. @@ -695,10 +727,22 @@ static void nss_htb_put_class(struct Qdi * We are safe to destroy the qdisc if the reference count * goes down to 0. */ - if (atomic_sub_return(1, &cl->nq.refcnt) == 0) { + if (nss_qdisc_atomic_sub_return(&cl->nq) == 0) { nss_htb_destroy_class(sch, cl); } } +#else +/* + * nss_htb_search_class() + * Fetches the class pointer if provided the classid. + */ +static unsigned long nss_htb_search_class(struct Qdisc *sch, u32 classid) +{ + struct nss_htb_class_data *cl = nss_htb_find_class(classid, sch); + + return (unsigned long)cl; +} +#endif /* * nss_htb_dump_class() @@ -728,8 +772,7 @@ static int nss_htb_dump_class(struct Qdi tcm->tcm_handle = cl->sch_common.classid; tcm->tcm_info = cl->qdisc->handle; - opts = nla_nest_start(skb, TCA_OPTIONS); - + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL || nla_put(skb, TCA_NSSHTB_CLASS_PARMS, sizeof(qopt), &qopt)) { goto nla_put_failure; } @@ -750,7 +793,7 @@ static int nss_htb_dump_class_stats(stru { struct nss_qdisc *nq = (struct nss_qdisc *)arg; - if (nss_qdisc_gnet_stats_copy_basic(d, &nq->bstats) < 0 || + if (nss_qdisc_gnet_stats_copy_basic(sch, d, &nq->bstats) < 0 || nss_qdisc_gnet_stats_copy_queue(d, &nq->qstats) < 0) { nss_qdisc_error("htb class %x stats dump failed\n", nq->qos_tag); return -1; @@ -795,9 +838,15 @@ static void nss_htb_walk(struct Qdisc *s * nss_htb_change_qdisc() * Can be used to configure a htb qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_htb_change_qdisc(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_htb_change_qdisc(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { struct nss_htb_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSHTB_MAX + 1]; struct tc_nsshtb_qopt *qopt; /* @@ -820,7 +869,11 @@ static int nss_htb_change_qdisc(struct Q /* * If it is not NULL, parse to get qopt. */ - qopt = nss_qdisc_qopt_get(opt, nss_htb_policy, TCA_NSSHTB_MAX, TCA_NSSHTB_QDISC_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_htb_policy, tb, TCA_NSSHTB_MAX, TCA_NSSHTB_QDISC_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_htb_policy, tb, TCA_NSSHTB_MAX, TCA_NSSHTB_QDISC_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -895,7 +948,7 @@ static void nss_htb_destroy_qdisc(struct * Reduce refcnt by 1 before destroying. This is to * ensure that polling of stat stops properly. */ - atomic_sub(1, &cl->nq.refcnt); + nss_qdisc_atomic_sub(&cl->nq); /* * We are not root class. Therefore we reduce the children count @@ -945,9 +998,17 @@ static void nss_htb_destroy_qdisc(struct * nss_htb_init_qdisc() * Initializes the htb qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_htb_init_qdisc(struct Qdisc *sch, struct nlattr *opt) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_htb_init_qdisc(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ +#endif struct nss_htb_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSHTB_MAX + 1]; struct tc_nsshtb_qopt *qopt; int err; unsigned int accel_mode; @@ -964,7 +1025,11 @@ static int nss_htb_init_qdisc(struct Qdi if (!opt) { accel_mode = TCA_NSS_ACCEL_MODE_PPE; } else { - qopt = nss_qdisc_qopt_get(opt, nss_htb_policy, TCA_NSSHTB_MAX, TCA_NSSHTB_QDISC_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_htb_policy, tb, TCA_NSSHTB_MAX, TCA_NSSHTB_QDISC_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_htb_policy, tb, TCA_NSSHTB_MAX, TCA_NSSHTB_QDISC_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -977,7 +1042,7 @@ static int nss_htb_init_qdisc(struct Qdi /* * Initialize the NSSHTB shaper in NSS */ - if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_HTB, 0, accel_mode) < 0) { + if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_HTB, 0, accel_mode, extack) < 0) { nss_qdisc_error("failed to initialize htb qdisc %x in nss", sch->handle); return -EINVAL; } @@ -987,7 +1052,11 @@ static int nss_htb_init_qdisc(struct Qdi /* * Tune HTB parameters */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_htb_change_qdisc(sch, opt) < 0) { +#else + if (nss_htb_change_qdisc(sch, opt, extack) < 0) { +#endif nss_qdisc_destroy(&q->nq); return -EINVAL; } @@ -1016,7 +1085,8 @@ static int nss_htb_dump_qdisc(struct Qdi qopt.accel_mode = nss_qdisc_accel_mode_get(&q->nq); nss_qdisc_info("r2q = %u accel_mode = %u", qopt.r2q, qopt.accel_mode); - opts = nla_nest_start(skb, TCA_OPTIONS); + + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (!opts || nla_put(skb, TCA_NSSHTB_QDISC_PARMS, sizeof(qopt), &qopt)) { goto nla_put_failure; } @@ -1032,9 +1102,18 @@ static int nss_htb_dump_qdisc(struct Qdi * nss_htb_enqueue() * Enqueues a skb to htb qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static int nss_htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +static int nss_htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return nss_qdisc_enqueue(skb, sch); +#else + return nss_qdisc_enqueue(skb, sch, to_free); +#endif } /* @@ -1046,6 +1125,7 @@ static struct sk_buff *nss_htb_dequeue(s return nss_qdisc_dequeue(sch); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) /* * nss_htb_drop() * Drops a single skb from linux queue, if not empty. @@ -1057,6 +1137,7 @@ static unsigned int nss_htb_drop(struct nss_qdisc_trace("drop called on htb qdisc %x\n", sch->handle); return nss_qdisc_drop(sch); } +#endif /* * Registration structure for htb class @@ -1067,9 +1148,17 @@ const struct Qdisc_class_ops nss_htb_cla .graft = nss_htb_graft_class, .leaf = nss_htb_leaf_class, .qlen_notify = nss_htb_qlen_notify, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) .get = nss_htb_get_class, .put = nss_htb_put_class, +#else + .find = nss_htb_search_class, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) .tcf_chain = nss_qdisc_tcf_chain, +#else + .tcf_block = nss_qdisc_tcf_block, +#endif .bind_tcf = nss_qdisc_tcf_bind, .unbind_tcf = nss_qdisc_tcf_unbind, .dump = nss_htb_dump_class, @@ -1090,7 +1179,9 @@ struct Qdisc_ops nss_htb_qdisc_ops __rea .enqueue = nss_htb_enqueue, .dequeue = nss_htb_dequeue, .peek = qdisc_peek_dequeued, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_htb_drop, +#endif .cl_ops = &nss_htb_class_ops, .priv_size = sizeof(struct nss_htb_sched_data), .owner = THIS_MODULE --- a/nss_qdisc/nss_ppe.c +++ b/nss_qdisc/nss_ppe.c @@ -1773,7 +1773,7 @@ int nss_ppe_set_parent(struct Qdisc *sch struct net_device *dev = qdisc_dev(sch); struct nss_qdisc *parent_nq = NULL; struct Qdisc *parent_qdisc = NULL; - unsigned long parent_class; + unsigned long parent_class = 0; /* * PPE Qdisc cannot be attached to NSS Qdisc. @@ -1812,8 +1812,11 @@ int nss_ppe_set_parent(struct Qdisc *sch return NSS_PPE_QDISC_PARENT_NOT_EXISTING; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) parent_class = parent_qdisc->ops->cl_ops->get(parent_qdisc, parent); - +#else + parent_class = parent_qdisc->ops->cl_ops->find(parent_qdisc, parent); +#endif if (!parent_class) { nq->parent = NULL; nss_qdisc_info("HW qdisc/class %px cannot be attached to non-existing class %x\n", nq->qdisc, parent); @@ -1822,7 +1825,9 @@ int nss_ppe_set_parent(struct Qdisc *sch } nq->parent = (struct nss_qdisc *)parent_class; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) parent_qdisc->ops->cl_ops->put(parent_qdisc, parent_class); +#endif } } @@ -2200,14 +2205,22 @@ fail: * nss_ppe_fallback_to_nss() * Calls the initialization of NSS Qdisc when PPE initialization fails. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) int nss_ppe_fallback_to_nss(struct nss_qdisc *nq, struct nlattr *opt) +#else +int nss_ppe_fallback_to_nss(struct nss_qdisc *nq, struct nlattr *opt, struct netlink_ext_ack *extack) +#endif { nss_qdisc_destroy(nq); memset(&nq->npq, 0, sizeof(struct nss_ppe_qdisc)); nq->ppe_init_failed = true; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nq->qdisc->ops->init(nq->qdisc, opt) < 0) { +#else + if (nq->qdisc->ops->init(nq->qdisc, opt, extack) < 0) { +#endif nss_qdisc_warning("Fallback to NSS Qdisc failed.\n"); return -EINVAL; } --- a/nss_qdisc/nss_ppe.h +++ b/nss_qdisc/nss_ppe.h @@ -269,7 +269,11 @@ extern int nss_ppe_configure(struct nss_ * nss_ppe_fallback_to_nss() * Calls the initialization of NSS Qdisc when PPE initialization fails. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) extern int nss_ppe_fallback_to_nss(struct nss_qdisc *nq, struct nlattr *opt); +#else +extern int nss_ppe_fallback_to_nss(struct nss_qdisc *nq, struct nlattr *opt, struct netlink_ext_ack *extack); +#endif /* * nss_ppe_destroy() --- a/nss_qdisc/nss_prio.c +++ b/nss_qdisc/nss_prio.c @@ -37,9 +37,18 @@ static struct nla_policy nss_prio_policy * nss_prio_enqueue() * Enqueues a skb to nssprio qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static int nss_prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +static int nss_prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return nss_qdisc_enqueue(skb, sch); +#else + return nss_qdisc_enqueue(skb, sch, to_free); +#endif } /* @@ -51,6 +60,7 @@ static struct sk_buff *nss_prio_dequeue( return nss_qdisc_dequeue(sch); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) /* * nss_prio_drop() * Drops a single skb from linux queue, if not empty. @@ -61,6 +71,7 @@ static unsigned int nss_prio_drop(struct { return nss_qdisc_drop(sch); } +#endif /* * nss_prio_peek() @@ -117,7 +128,11 @@ static void nss_prio_destroy(struct Qdis /* * We can now destroy it */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) qdisc_destroy(q->queues[i]); +#else + qdisc_put(q->queues[i]); +#endif } /* @@ -157,8 +172,14 @@ static int nss_prio_get_max_bands(struct * nss_prio_change() * Function call to configure the nssprio parameters */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_prio_change(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_prio_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { + struct nlattr *tb[TCA_NSSPRIO_MAX + 1]; struct nss_prio_sched_data *q; struct tc_nssprio_qopt *qopt; @@ -180,7 +201,11 @@ static int nss_prio_change(struct Qdisc return 0; } - qopt = nss_qdisc_qopt_get(opt, nss_prio_policy, TCA_NSSPRIO_MAX, TCA_NSSPRIO_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_prio_policy, tb, TCA_NSSPRIO_MAX, TCA_NSSPRIO_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_prio_policy, tb, TCA_NSSPRIO_MAX, TCA_NSSPRIO_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -209,9 +234,17 @@ static int nss_prio_change(struct Qdisc * nss_prio_init() * Initializes the nssprio qdisc */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_prio_init(struct Qdisc *sch, struct nlattr *opt) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_prio_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ +#endif struct nss_prio_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSPRIO_MAX + 1]; struct tc_nssprio_qopt *qopt; int i; unsigned int accel_mode; @@ -223,21 +256,30 @@ static int nss_prio_init(struct Qdisc *s if (!opt) { accel_mode = TCA_NSS_ACCEL_MODE_PPE; } else { - qopt = nss_qdisc_qopt_get(opt, nss_prio_policy, TCA_NSSPRIO_MAX, TCA_NSSPRIO_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_prio_policy, tb, TCA_NSSPRIO_MAX, TCA_NSSPRIO_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_prio_policy, tb, TCA_NSSPRIO_MAX, TCA_NSSPRIO_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } accel_mode = qopt->accel_mode; } - if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_PRIO, 0, accel_mode) < 0) { + if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_PRIO, 0, accel_mode, extack) < 0) + { return -EINVAL; } nss_qdisc_info("Nssprio initialized - handle %x parent %x\n", sch->handle, sch->parent); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_prio_change(sch, opt) < 0) { +#else + if (nss_prio_change(sch, opt, extack) < 0) { +#endif nss_qdisc_destroy(&q->nq); return -EINVAL; } @@ -263,7 +305,7 @@ static int nss_prio_dump(struct Qdisc *s qopt.bands = q->bands; qopt.accel_mode = nss_qdisc_accel_mode_get(&q->nq); - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL || nla_put(skb, TCA_NSSPRIO_PARMS, sizeof(qopt), &qopt)) { goto nla_put_failure; } @@ -279,8 +321,14 @@ nla_put_failure: * nss_prio_graft() * Replaces existing child qdisc with the new qdisc that is passed. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) +#else +static int nss_prio_graft(struct Qdisc *sch, unsigned long arg, + struct Qdisc *new, struct Qdisc **old, + struct netlink_ext_ack *extack) +#endif { struct nss_prio_sched_data *q = qdisc_priv(sch); struct nss_qdisc *nq_new = qdisc_priv(new); @@ -365,6 +413,7 @@ static struct Qdisc *nss_prio_leaf(struc return q->queues[band]; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) /* * nss_prio_get() * Returns the band if provided the classid. @@ -390,6 +439,24 @@ static void nss_prio_put(struct Qdisc *s { nss_qdisc_info("Inside prio put\n"); } +#else +/* + * nss_prio_search() + * Returns the band if provided the classid. + */ +static unsigned long nss_prio_search(struct Qdisc *sch, u32 classid) +{ + struct nss_prio_sched_data *q = qdisc_priv(sch); + unsigned long band = TC_H_MIN(classid); + + nss_qdisc_info("Inside get. Handle - %x Classid - %x Band %lu Available band %u\n", sch->handle, classid, band, q->bands); + + if (band > q->bands) + return 0; + + return band; +} +#endif /* * nss_prio_walk() @@ -446,7 +513,7 @@ static int nss_prio_dump_class_stats(str cl_q = q->queues[cl - 1]; cl_q->qstats.qlen = cl_q->q.qlen; - if (nss_qdisc_gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || + if (nss_qdisc_gnet_stats_copy_basic(sch, d, &cl_q->bstats) < 0 || nss_qdisc_gnet_stats_copy_queue(d, &cl_q->qstats) < 0) return -1; @@ -460,9 +527,17 @@ static int nss_prio_dump_class_stats(str const struct Qdisc_class_ops nss_prio_class_ops = { .graft = nss_prio_graft, .leaf = nss_prio_leaf, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) .get = nss_prio_get, .put = nss_prio_put, +#else + .find = nss_prio_search, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) .tcf_chain = nss_qdisc_tcf_chain, +#else + .tcf_block = nss_qdisc_tcf_block, +#endif .bind_tcf = nss_qdisc_tcf_bind, .unbind_tcf = nss_qdisc_tcf_unbind, .walk = nss_prio_walk, @@ -481,7 +556,9 @@ struct Qdisc_ops nss_prio_qdisc_ops __re .enqueue = nss_prio_enqueue, .dequeue = nss_prio_dequeue, .peek = nss_prio_peek, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_prio_drop, +#endif .init = nss_prio_init, .reset = nss_prio_reset, .destroy = nss_prio_destroy, --- a/nss_qdisc/nss_qdisc.c +++ b/nss_qdisc/nss_qdisc.c @@ -1,6 +1,6 @@ /* ************************************************************************** - * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all copies. @@ -927,7 +927,11 @@ static inline void nss_qdisc_add_to_tail * We do not use the qdisc_enqueue_tail() API here in order * to prevent stats from getting updated by the API. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) __skb_queue_tail(&sch->q, skb); +#else + __qdisc_enqueue_tail(skb, &sch->q); +#endif spin_unlock_bh(&nq->bounce_protection_lock); }; @@ -942,7 +946,11 @@ static inline void nss_qdisc_add_to_tail * We do not use the qdisc_enqueue_tail() API here in order * to prevent stats from getting updated by the API. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) __skb_queue_tail(&sch->q, skb); +#else + __qdisc_enqueue_tail(skb, &sch->q); +#endif }; /* @@ -964,10 +972,12 @@ static inline struct sk_buff *nss_qdisc_ * We use __skb_dequeue() to ensure that * stats don't get updated twice. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) skb = __skb_dequeue(&sch->q); - +#else + skb = __qdisc_dequeue_head(&sch->q); +#endif spin_unlock_bh(&nq->bounce_protection_lock); - return skb; }; @@ -981,7 +991,11 @@ static inline struct sk_buff *nss_qdisc_ * We use __skb_dequeue() to ensure that * stats don't get updated twice. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) return __skb_dequeue(&sch->q); +#else + return __qdisc_dequeue_head(&sch->q); +#endif }; /* @@ -1059,24 +1073,33 @@ struct Qdisc *nss_qdisc_replace(struct Q * nss_qdisc_qopt_get() * Extracts qopt from opt. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) +void *nss_qdisc_qopt_get(struct nlattr *opt, struct nla_policy *policy, + struct nlattr *tb[], uint32_t tca_max, uint32_t tca_params) +#else void *nss_qdisc_qopt_get(struct nlattr *opt, struct nla_policy *policy, - uint32_t tca_max, uint32_t tca_params) + struct nlattr *tb[], uint32_t tca_max, uint32_t tca_params, struct netlink_ext_ack *extack) +#endif { - struct nlattr *na[tca_max + 1]; int err; if (!opt) { return NULL; } - err = nla_parse_nested(na, tca_max, opt, policy); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + err = nla_parse_nested(tb, tca_max, opt, policy); +#else + err = nla_parse_nested_deprecated(tb, tca_max, opt, policy, extack); +#endif + if (err < 0) return NULL; - if (na[tca_params] == NULL) + if (tb[tca_params] == NULL) return NULL; - return nla_data(na[tca_params]); + return nla_data(tb[tca_params]); } /* @@ -1102,16 +1125,17 @@ struct sk_buff *nss_qdisc_peek(struct Qd struct sk_buff *skb; if (!nq->is_virtual) { - skb = skb_peek(&sch->q); + skb = qdisc_peek_head(sch); } else { spin_lock_bh(&nq->bounce_protection_lock); - skb = skb_peek(&sch->q); + skb = qdisc_peek_head(sch); spin_unlock_bh(&nq->bounce_protection_lock); } return skb; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) /* * nss_qdisc_drop() * Called to drop the packet at the head of queue @@ -1134,6 +1158,7 @@ unsigned int nss_qdisc_drop(struct Qdisc return ret; } +#endif /* * nss_qdisc_reset() @@ -1181,7 +1206,11 @@ static bool nss_qdisc_iterate_fl(struct return 0; } - status = tc_classify(skb, tcf, &res, false); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0)) + status = tcf_classify(skb, tcf, &res, false); +#else + status = tcf_classify(skb, NULL, tcf, &res, false); +#endif if ((status == TC_ACT_STOLEN) || (status == TC_ACT_QUEUED)) { return 1; } @@ -1203,7 +1232,11 @@ static bool nss_qdisc_iterate_fl(struct * nss_qdisc_enqueue() * Generic enqueue call for enqueuing packets into NSS for shaping */ -int nss_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) +extern int nss_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +extern int nss_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) +#endif { struct nss_qdisc *nq = qdisc_priv(sch); nss_tx_status_t status; @@ -1263,11 +1296,18 @@ int nss_qdisc_enqueue(struct sk_buff *sk /* * Skip the shaping of already shaped packets. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) if (skb->tc_verd & TC_NCLS_NSS) { skb->tc_verd = CLR_TC_NCLS_NSS(skb->tc_verd); nss_qdisc_mark_and_schedule(nq->qdisc, skb); return NET_XMIT_SUCCESS; } +#else + if (skb_skip_tc_classify_offload(skb)) { + nss_qdisc_mark_and_schedule(nq->qdisc, skb); + return NET_XMIT_SUCCESS; + } +#endif if (!nq->is_virtual) { /* @@ -1316,12 +1356,15 @@ enqueue_drop: * We were unable to transmit the packet for bridge shaping. * We therefore drop it. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) kfree_skb(skb); spin_lock_bh(&nq->lock); sch->qstats.drops++; spin_unlock_bh(&nq->lock); - +#else + qdisc_drop(skb, sch, to_free); +#endif return NET_XMIT_DROP; } @@ -1882,7 +1925,12 @@ void nss_qdisc_destroy(struct nss_qdisc /* * Destroy any attached filter over qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) tcf_destroy_chain(&nq->filter_list); +#else + tcf_block_put(nq->block); +#endif + #if defined(NSS_QDISC_PPE_SUPPORT) if (nq->mode == NSS_QDISC_MODE_PPE) { nss_ppe_destroy(nq); @@ -1960,12 +2008,19 @@ void nss_qdisc_destroy(struct nss_qdisc } /* - * nss_qdisc_init() + * __nss_qdisc_init() * Initializes a shaper in NSS, based on the position of this qdisc (child or root) * and if its a normal interface or a bridge interface. */ -int nss_qdisc_init(struct Qdisc *sch, struct nss_qdisc *nq, nss_shaper_node_type_t type, uint32_t classid, uint32_t accel_mode) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +int __nss_qdisc_init(struct Qdisc *sch, struct nss_qdisc *nq, nss_shaper_node_type_t type, uint32_t classid, uint32_t accel_mode) +{ +#else +int __nss_qdisc_init(struct Qdisc *sch, struct nss_qdisc *nq, nss_shaper_node_type_t type, uint32_t classid, uint32_t accel_mode, + struct netlink_ext_ack *extack) { + int err; +#endif struct Qdisc *root; u32 parent; nss_tx_status_t rc; @@ -1978,7 +2033,6 @@ int nss_qdisc_init(struct Qdisc *sch, st bool mode_ppe = false; #endif bool igs_put = false; - if (accel_mode >= TCA_NSS_ACCEL_MODE_MAX) { nss_qdisc_warning("Qdisc %px (type %d) accel_mode:%u should be < %u\n", sch, nq->type, accel_mode, TCA_NSS_ACCEL_MODE_MAX); @@ -2037,8 +2091,9 @@ int nss_qdisc_init(struct Qdisc *sch, st /* * Initialize filter list. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) RCU_INIT_POINTER(nq->filter_list, NULL); - +#endif /* * If we are a class, then classid is used as the qos tag. * Else the qdisc handle will be used as the qos tag. @@ -2073,6 +2128,25 @@ int nss_qdisc_init(struct Qdisc *sch, st * or on a net device that is represented by a virtual NSS interface (e.g. WIFI) */ dev = qdisc_dev(sch); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + /* + * Currently filter addition is only supported over IFB interfaces. + * Therefore, perform tcf block allocation (which is used for storing + * filter list) only if the input net device is an IFB device. + */ + if (netif_is_ifb_dev(dev)) { + err = tcf_block_get(&nq->block, &nq->filter_list, sch, extack); + if (err) { + nss_qdisc_error("%px: Unable to initialize tcf_block\n", &nq->block); + return -1; + } + } else { + RCU_INIT_POINTER(nq->filter_list, NULL); + nq->block = NULL; + } +#endif + nss_qdisc_info("Qdisc %px (type %d) init dev: %px\n", nq->qdisc, nq->type, dev); /* @@ -2098,6 +2172,8 @@ int nss_qdisc_init(struct Qdisc *sch, st * This is to prevent mixing NSS and PPE qdisc with linux qdisc. */ if ((parent != TC_H_ROOT) && (root->ops->owner != THIS_MODULE)) { + nss_qdisc_warning("parent (%d) and TC_H_ROOT (%d))", parent, TC_H_ROOT); + nss_qdisc_warning("root->ops->owner (%px) and THIS_MODULE (%px))", root->ops->owner , THIS_MODULE); nss_qdisc_warning("NSS qdisc %px (type %d) used along with non-nss qdiscs," " or the interface is currently down", nq->qdisc, nq->type); } @@ -2394,7 +2470,7 @@ int nss_qdisc_init(struct Qdisc *sch, st if (igs_put) { nss_igs_module_put(); } - nss_qdisc_error("init for qdisc %x timedout!\n", nq->qos_tag); + nss_qdisc_error("init for qdisc %x timedout!\n", nq->qos_tag); return -1; } @@ -2465,6 +2541,20 @@ init_fail: } /* + * nss_qdisc_init() + * Initialize nss qdisc based on position of the qdisc + */ +int nss_qdisc_init(struct Qdisc *sch, struct nss_qdisc *nq, nss_shaper_node_type_t type, uint32_t classid, + uint32_t accel_mode, void *extack) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + return __nss_qdisc_init(sch, nq, type, classid, accel_mode); +#else + return __nss_qdisc_init(sch, nq, type, classid, accel_mode, extack); +#endif +} + +/* * nss_qdisc_basic_stats_callback() * Invoked after getting basic stats */ @@ -2476,7 +2566,11 @@ static void nss_qdisc_basic_stats_callba struct gnet_stats_basic_packed *bstats; struct gnet_stats_queue *qstats; struct nss_shaper_node_stats_response *response; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) atomic_t *refcnt; +#else + refcount_t *refcnt; +#endif if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { nss_qdisc_warning("Qdisc %px (type %d): Receive stats FAILED - " @@ -2539,7 +2633,11 @@ static void nss_qdisc_basic_stats_callba * All access to nq fields below do not need lock protection. They * do not get manipulated on different thread contexts. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) if (atomic_read(refcnt) == 0) { +#else + if (refcount_read(refcnt) == 0) { +#endif atomic_sub(1, &nq->pending_stat_requests); wake_up(&nq->wait_queue); return; @@ -2561,9 +2659,18 @@ static void nss_qdisc_basic_stats_callba * nss_qdisc_get_stats_timer_callback() * Invoked periodically to get updated stats */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) static void nss_qdisc_get_stats_timer_callback(unsigned long int data) +#else +static void nss_qdisc_get_stats_timer_callback(struct timer_list *tm) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) struct nss_qdisc *nq = (struct nss_qdisc *)data; +#else + struct nss_qdisc *nq = from_timer(nq, tm, stats_get_timer); +#endif + nss_tx_status_t rc; struct nss_if_msg nim; int msg_type; @@ -2610,9 +2717,14 @@ void nss_qdisc_start_basic_stats_polling return; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) init_timer(&nq->stats_get_timer); nq->stats_get_timer.function = nss_qdisc_get_stats_timer_callback; nq->stats_get_timer.data = (unsigned long)nq; +#else + timer_setup(&nq->stats_get_timer, nss_qdisc_get_stats_timer_callback, 0); +#endif + nq->stats_get_timer.expires = jiffies + HZ; atomic_set(&nq->pending_stat_requests, 1); add_timer(&nq->stats_get_timer); @@ -2650,13 +2762,15 @@ void nss_qdisc_stop_basic_stats_polling( * nss_qdisc_gnet_stats_copy_basic() * Wrapper around gnet_stats_copy_basic() */ -int nss_qdisc_gnet_stats_copy_basic(struct gnet_dump *d, +int nss_qdisc_gnet_stats_copy_basic(struct Qdisc *sch, struct gnet_dump *d, struct gnet_stats_basic_packed *b) { #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 0)) return gnet_stats_copy_basic(d, b); -#else +#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return gnet_stats_copy_basic(d, NULL, b); +#else + return gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, b); #endif } @@ -2695,10 +2809,8 @@ static int nss_qdisc_if_event_cb(struct switch (event) { case NETDEV_BR_JOIN: - nss_qdisc_info("Reveived NETDEV_BR_JOIN on interface %s\n", - dev->name); case NETDEV_BR_LEAVE: - nss_qdisc_info("Reveived NETDEV_BR_LEAVE on interface %s\n", + nss_qdisc_info("Received NETDEV_BR_JOIN/NETDEV_BR_LEAVE on interface %s\n", dev->name); br = nss_qdisc_get_dev_master(dev); if_num = nss_cmn_get_interface_number(nss_qdisc_ctx, dev); @@ -2754,6 +2866,7 @@ static int nss_qdisc_if_event_cb(struct return NOTIFY_DONE; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) /* * nss_qdisc_tcf_chain() * Return the filter list of qdisc. @@ -2778,8 +2891,29 @@ struct tcf_proto __rcu **nss_qdisc_tcf_c if (nq->is_root) { return &(nq->filter_list); } + + return NULL; +} +#else +/* + * nss_qdisc_tcf_block() + * Return the block containing chain of qdisc. + */ +struct tcf_block *nss_qdisc_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) +{ + struct nss_qdisc *nq = qdisc_priv(sch); + + /* + * Currently, support is available only for tc filter iterations + * at root qdisc. + */ + if (nq->is_root) { + return nq->block; + } + return NULL; } +#endif /* * nss_qdisc_tcf_bind() --- a/nss_qdisc/nss_qdisc.h +++ b/nss_qdisc/nss_qdisc.h @@ -1,6 +1,6 @@ /* ************************************************************************** - * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, 2020 The Linux Foundation. All rights reserved. * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all copies. @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,9 @@ #include #include #include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) +#include +#endif #if defined(NSS_QDISC_PPE_SUPPORT) #include "nss_ppe.h" @@ -186,7 +190,11 @@ struct nss_qdisc { */ struct gnet_stats_basic_packed bstats; /* Basic class statistics */ struct gnet_stats_queue qstats; /* Qstats for use by classes */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) atomic_t refcnt; /* Reference count for class use */ +#else + refcount_t refcnt; /* Reference count for class use */ +#endif struct timer_list stats_get_timer; /* Timer used to poll for stats */ atomic_t pending_stat_requests; /* Number of pending stats responses */ wait_queue_head_t wait_queue; /* Wait queue used to wait on responses from the NSS */ @@ -201,6 +209,9 @@ struct nss_qdisc { */ #endif struct tcf_proto __rcu *filter_list; /* Filter list */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) + struct tcf_block *block; +#endif }; /* @@ -238,11 +249,81 @@ enum nss_qdisc_hybrid_mode { }; /* + * nss_qdisc_nla_nest_start() + * Returns the container attribute + */ +static inline struct nlattr * nss_qdisc_nla_nest_start(struct sk_buff *skb, int attrtype) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + return nla_nest_start(skb, TCA_OPTIONS); +#else + return nla_nest_start_noflag(skb, TCA_OPTIONS); +#endif +} + +/* + * nss_qdisc_atomic_sub() + * Atomically decrements the ref count by 1 + */ +static inline void nss_qdisc_atomic_sub(struct nss_qdisc *nq) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) + atomic_sub(1, &nq->refcnt); +#else + atomic_sub(1, &nq->refcnt.refs); +#endif +} + +/* + * nss_qdisc_atomic_sub_return() + * Atomically decrements the ref count by 1 and return ref count + */ +static inline int nss_qdisc_atomic_sub_return(struct nss_qdisc *nq) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) + return atomic_sub_return(1, &nq->refcnt); +#else + return atomic_sub_return(1, &nq->refcnt.refs); +#endif +} + +/* + * nss_qdisc_atomic_set() + * Atomically sets the ref count by 1 + */ +static inline void nss_qdisc_atomic_set(struct nss_qdisc *nq) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) + atomic_set(&nq->refcnt, 1); +#else + refcount_set(&nq->refcnt, 1); +#endif +} + +/* + * nss_qdisc_put() + * Destroy the qdisc + */ +static inline void nss_qdisc_put(struct Qdisc *sch) +{ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)) + qdisc_destroy(sch); +#else + qdisc_put(sch); +#endif +} + +/* * nss_qdisc_qopt_get() * Extracts qopt from opt. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) extern void *nss_qdisc_qopt_get(struct nlattr *opt, struct nla_policy *policy, - uint32_t tca_max, uint32_t tca_params); + struct nlattr *tb[], uint32_t tca_max, uint32_t tca_params); +#else +extern void *nss_qdisc_qopt_get(struct nlattr *opt, struct nla_policy *policy, + struct nlattr *tb[], uint32_t tca_max, uint32_t tca_params, struct netlink_ext_ack *extack); +#endif /* * nss_qdisc_mode_get() @@ -256,11 +337,13 @@ extern uint8_t nss_qdisc_accel_mode_get( */ extern struct sk_buff *nss_qdisc_peek(struct Qdisc *sch); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) /* * nss_qdisc_drop() * Called to drop the packet at the head of queue */ extern unsigned int nss_qdisc_drop(struct Qdisc *sch); +#endif /* * nss_qdisc_reset() @@ -272,7 +355,11 @@ extern void nss_qdisc_reset(struct Qdisc * nss_qdisc_enqueue() * Generic enqueue call for enqueuing packets into NSS for shaping */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) extern int nss_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch); +#else +extern int nss_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free); +#endif /* * nss_qdisc_dequeue() @@ -338,7 +425,8 @@ extern void nss_qdisc_destroy(struct nss * Initializes a shaper in NSS, based on the position of this qdisc (child or root) * and if its a normal interface or a bridge interface. */ -extern int nss_qdisc_init(struct Qdisc *sch, struct nss_qdisc *nq, nss_shaper_node_type_t type, uint32_t classid, uint32_t accel_mode); +extern int nss_qdisc_init(struct Qdisc *sch, struct nss_qdisc *nq, nss_shaper_node_type_t type, uint32_t classid, uint32_t accel_mode, + void *extack); /* * nss_qdisc_start_basic_stats_polling() @@ -356,8 +444,8 @@ extern void nss_qdisc_stop_basic_stats_p * nss_qdisc_gnet_stats_copy_basic() * Wrapper around gnet_stats_copy_basic() */ -extern int nss_qdisc_gnet_stats_copy_basic(struct gnet_dump *d, - struct gnet_stats_basic_packed *b); +extern int nss_qdisc_gnet_stats_copy_basic(struct Qdisc *sch, + struct gnet_dump *d, struct gnet_stats_basic_packed *b); /* * nss_qdisc_gnet_stats_copy_queue() @@ -373,11 +461,19 @@ extern int nss_qdisc_gnet_stats_copy_que extern struct Qdisc *nss_qdisc_replace(struct Qdisc *sch, struct Qdisc *new, struct Qdisc **pold); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) /* * nss_qdisc_tcf_chain() * Return the filter list of qdisc. */ extern struct tcf_proto __rcu **nss_qdisc_tcf_chain(struct Qdisc *sch, unsigned long arg); +#else +/* + * nss_qdisc_tcf_block() + * Return the block containing chain of qdisc. + */ +extern struct tcf_block *nss_qdisc_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack); +#endif /* * nss_qdisc_tcf_bind() --- a/nss_qdisc/nss_tbl.c +++ b/nss_qdisc/nss_tbl.c @@ -29,9 +29,18 @@ static struct nla_policy nss_tbl_policy[ [TCA_NSSTBL_PARMS] = { .len = sizeof(struct tc_nsstbl_qopt) }, }; +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static int nss_tbl_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +static int nss_tbl_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return nss_qdisc_enqueue(skb, sch); +#else + return nss_qdisc_enqueue(skb, sch, to_free); +#endif } static struct sk_buff *nss_tbl_dequeue(struct Qdisc *sch) @@ -39,10 +48,12 @@ static struct sk_buff *nss_tbl_dequeue(s return nss_qdisc_dequeue(sch); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static unsigned int nss_tbl_drop(struct Qdisc *sch) { return nss_qdisc_drop(sch); } +#endif static struct sk_buff *nss_tbl_peek(struct Qdisc *sch) { @@ -77,7 +88,7 @@ static void nss_tbl_destroy(struct Qdisc /* * Now we can destroy our child qdisc */ - qdisc_destroy(q->qdisc); + nss_qdisc_put(q->qdisc); /* * Stop the polling of basic stats and destroy qdisc. @@ -87,7 +98,11 @@ static void nss_tbl_destroy(struct Qdisc } #if defined(NSS_QDISC_PPE_SUPPORT) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_tbl_ppe_change(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_tbl_ppe_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) +#endif { struct nss_tbl_sched_data *q = qdisc_priv(sch); struct nss_qdisc *nq = &q->nq; @@ -123,8 +138,12 @@ fail: /* * PPE qdisc config failed, try to initialize in NSS. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_ppe_fallback_to_nss(nq, opt)) { - nss_qdisc_warning("nss_tbl %x fallback to nss failed\n", sch->handle); +#else + if (nss_ppe_fallback_to_nss(nq, opt, extack)) { +#endif + nss_qdisc_warning("nss_tbl %x fallback to nss failed\n", sch->handle); return -EINVAL; } @@ -132,9 +151,15 @@ fail: } #endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_tbl_change(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_tbl_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { struct nss_tbl_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSTBL_MAX + 1]; struct tc_nsstbl_qopt *qopt; struct nss_if_msg nim; struct net_device *dev = qdisc_dev(sch); @@ -143,7 +168,11 @@ static int nss_tbl_change(struct Qdisc * return -EINVAL; } - qopt = nss_qdisc_qopt_get(opt, nss_tbl_policy, TCA_NSSTBL_MAX, TCA_NSSTBL_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_tbl_policy, tb, TCA_NSSTBL_MAX, TCA_NSSTBL_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_tbl_policy, tb, TCA_NSSTBL_MAX, TCA_NSSTBL_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -178,7 +207,11 @@ static int nss_tbl_change(struct Qdisc * #if defined(NSS_QDISC_PPE_SUPPORT) if (q->nq.mode == NSS_QDISC_MODE_PPE) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_tbl_ppe_change(sch, opt) < 0) { +#else + if (nss_tbl_ppe_change(sch, opt, extack) < 0) { +#endif nss_qdisc_warning("nss_tbl %x SSDK scheduler config failed\n", sch->handle); return -EINVAL; } @@ -216,9 +249,17 @@ static int nss_tbl_change(struct Qdisc * return 0; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_tbl_init(struct Qdisc *sch, struct nlattr *opt) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_tbl_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ +#endif struct nss_tbl_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSTBL_MAX + 1]; struct tc_nsstbl_qopt *qopt; if (!opt) { @@ -227,15 +268,25 @@ static int nss_tbl_init(struct Qdisc *sc q->qdisc = &noop_qdisc; - qopt = nss_qdisc_qopt_get(opt, nss_tbl_policy, TCA_NSSTBL_MAX, TCA_NSSTBL_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_tbl_policy, tb, TCA_NSSTBL_MAX, TCA_NSSTBL_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_tbl_policy, tb, TCA_NSSTBL_MAX, TCA_NSSTBL_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } - if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_TBL, 0, qopt->accel_mode) < 0) + if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_TBL, 0, qopt->accel_mode, extack) < 0) + { return -EINVAL; + } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_tbl_change(sch, opt) < 0) { +#else + if (nss_tbl_change(sch, opt, extack) < 0) { +#endif nss_qdisc_info("Failed to configure tbl\n"); nss_qdisc_destroy(&q->nq); return -EINVAL; @@ -262,7 +313,8 @@ static int nss_tbl_dump(struct Qdisc *sc opt.accel_mode = nss_qdisc_accel_mode_get(&q->nq); nss_qdisc_info("Nsstbl dumping"); - opts = nla_nest_start(skb, TCA_OPTIONS); + + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL || nla_put(skb, TCA_NSSTBL_PARMS, sizeof(opt), &opt)) { goto nla_put_failure; } @@ -286,8 +338,13 @@ static int nss_tbl_dump_class(struct Qdi return 0; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_tbl_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) +#else +static int nss_tbl_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, + struct Qdisc **old, struct netlink_ext_ack *extack) +#endif { struct nss_tbl_sched_data *q = qdisc_priv(sch); struct nss_qdisc *nq_new = (struct nss_qdisc *)qdisc_priv(new); @@ -339,6 +396,7 @@ static struct Qdisc *nss_tbl_leaf(struct return q->qdisc; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) static unsigned long nss_tbl_get(struct Qdisc *sch, u32 classid) { return 1; @@ -347,6 +405,12 @@ static unsigned long nss_tbl_get(struct static void nss_tbl_put(struct Qdisc *sch, unsigned long arg) { } +#else +static unsigned long nss_tbl_search(struct Qdisc *sch, u32 classid) +{ + return 1; +} +#endif static void nss_tbl_walk(struct Qdisc *sch, struct qdisc_walker *walker) { @@ -364,9 +428,17 @@ static void nss_tbl_walk(struct Qdisc *s const struct Qdisc_class_ops nss_tbl_class_ops = { .graft = nss_tbl_graft, .leaf = nss_tbl_leaf, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) .get = nss_tbl_get, .put = nss_tbl_put, +#else + .find = nss_tbl_search, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) .tcf_chain = nss_qdisc_tcf_chain, +#else + .tcf_block = nss_qdisc_tcf_block, +#endif .bind_tcf = nss_qdisc_tcf_bind, .unbind_tcf = nss_qdisc_tcf_unbind, .walk = nss_tbl_walk, @@ -381,7 +453,9 @@ struct Qdisc_ops nss_tbl_qdisc_ops __rea .enqueue = nss_tbl_enqueue, .dequeue = nss_tbl_dequeue, .peek = nss_tbl_peek, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_tbl_drop, +#endif .init = nss_tbl_init, .reset = nss_tbl_reset, .destroy = nss_tbl_destroy, --- a/nss_qdisc/nss_wred.c +++ b/nss_qdisc/nss_wred.c @@ -55,9 +55,18 @@ static struct nla_policy nss_wred_policy * nss_wred_enqueue() * Enqueue API for nsswred qdisc */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static int nss_wred_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +static int nss_wred_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return nss_qdisc_enqueue(skb, sch); +#else + return nss_qdisc_enqueue(skb, sch, to_free); +#endif } /* @@ -69,6 +78,7 @@ static struct sk_buff *nss_wred_dequeue( return nss_qdisc_dequeue(sch); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) /* * nss_wred_drop() * Drops a packet from HLOS queue. @@ -78,6 +88,7 @@ static unsigned int nss_wred_drop(struct nss_qdisc_info("nsswred dropping"); return nss_qdisc_drop(sch); } +#endif /* * nss_wred_reset() @@ -111,7 +122,11 @@ static void nss_wred_destroy(struct Qdis * nss_wred_ppe_change() * Function call to configure the nssred parameters for ppe qdisc. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_wred_ppe_change(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_wred_ppe_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) +#endif { struct nss_wred_sched_data *q = qdisc_priv(sch); struct nss_qdisc *nq = &q->nq; @@ -159,8 +174,12 @@ fail: /* * Fallback to nss qdisc if PPE Qdisc configuration failed at init time. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_ppe_fallback_to_nss(&q->nq, opt) < 0) { - nss_qdisc_warning("nss_wred %x fallback to nss failed\n", sch->handle); +#else + if (nss_ppe_fallback_to_nss(&q->nq, opt, extack) < 0) { +#endif + nss_qdisc_warning("nss_wred %x fallback to nss failed\n", sch->handle); return -EINVAL; } return 0; @@ -171,9 +190,15 @@ fail: * nss_wred_change() * Function call to configure the nsswred parameters */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_wred_change(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_wred_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { struct nss_wred_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSWRED_MAX + 1]; struct tc_nsswred_qopt *qopt; struct nss_if_msg nim; @@ -181,7 +206,11 @@ static int nss_wred_change(struct Qdisc return -EINVAL; } - qopt = nss_qdisc_qopt_get(opt, nss_wred_policy, TCA_NSSWRED_MAX, TCA_NSSWRED_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_wred_policy, tb, TCA_NSSWRED_MAX, TCA_NSSWRED_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_wred_policy, tb, TCA_NSSWRED_MAX, TCA_NSSWRED_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -253,7 +282,11 @@ static int nss_wred_change(struct Qdisc #if defined(NSS_QDISC_PPE_SUPPORT) if (q->nq.mode == NSS_QDISC_MODE_PPE) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_wred_ppe_change(sch, opt) < 0) { +#else + if (nss_wred_ppe_change(sch, opt, extack) < 0) { +#endif nss_qdisc_warning("nss_wred %px params validate and save failed\n", sch); return -EINVAL; } @@ -298,16 +331,28 @@ static int nss_wred_change(struct Qdisc * nss_wred_init() * Init the nsswred qdisc */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_wred_init(struct Qdisc *sch, struct nlattr *opt) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_wred_init(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ +#endif struct nss_qdisc *nq = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSWRED_MAX + 1]; struct tc_nsswred_qopt *qopt; if (opt == NULL) { return -EINVAL; } - qopt = nss_qdisc_qopt_get(opt, nss_wred_policy, TCA_NSSWRED_MAX, TCA_NSSWRED_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_wred_policy, tb, TCA_NSSWRED_MAX, TCA_NSSWRED_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_wred_policy, tb, TCA_NSSWRED_MAX, TCA_NSSWRED_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -315,11 +360,17 @@ static int nss_wred_init(struct Qdisc *s nss_qdisc_info("Initializing Wred - type %d\n", NSS_SHAPER_NODE_TYPE_WRED); nss_wred_reset(sch); - if (nss_qdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_WRED, 0, qopt->accel_mode) < 0) + if (nss_qdisc_init(sch, nq, NSS_SHAPER_NODE_TYPE_WRED, 0, qopt->accel_mode, extack) < 0) + { return -EINVAL; + } nss_qdisc_info("NSS wred initialized - handle %x parent %x\n", sch->handle, sch->parent); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_wred_change(sch, opt) < 0) { +#else + if (nss_wred_change(sch, opt, extack) < 0) { +#endif nss_qdisc_destroy(nq); return -EINVAL; } @@ -374,7 +425,7 @@ static int nss_wred_dump(struct Qdisc *s opt.set_default = q->set_default; opt.accel_mode = nss_qdisc_accel_mode_get(&q->nq); - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL || nla_put(skb, TCA_NSSWRED_PARMS, sizeof(opt), &opt)) { goto nla_put_failure; } @@ -405,7 +456,9 @@ struct Qdisc_ops nss_red_qdisc_ops __rea .enqueue = nss_wred_enqueue, .dequeue = nss_wred_dequeue, .peek = nss_wred_peek, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_wred_drop, +#endif .init = nss_wred_init, .reset = nss_wred_reset, .destroy = nss_wred_destroy, @@ -423,7 +476,9 @@ struct Qdisc_ops nss_wred_qdisc_ops __re .enqueue = nss_wred_enqueue, .dequeue = nss_wred_dequeue, .peek = nss_wred_peek, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_wred_drop, +#endif .init = nss_wred_init, .reset = nss_wred_reset, .destroy = nss_wred_destroy, --- a/nss_qdisc/nss_wrr.c +++ b/nss_qdisc/nss_wrr.c @@ -84,7 +84,7 @@ static void nss_wrr_destroy_class(struct /* * And now we destroy the child. */ - qdisc_destroy(cl->qdisc); + nss_qdisc_put(cl->qdisc); /* * Stop the stats polling timer and free class @@ -106,9 +106,15 @@ static void nss_wrr_destroy_class(struct * nss_wrr_class_params_validate_and_save() * Validates and saves the class configuration parameters. */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_wrr_class_params_validate_and_save(struct Qdisc *sch, struct nlattr **tca, uint32_t *quantum) +#else +static int nss_wrr_class_params_validate_and_save(struct Qdisc *sch, struct nlattr **tca, + uint32_t *quantum, struct netlink_ext_ack *extack) +#endif { + struct nlattr *tb[TCA_NSSWRR_MAX + 1]; struct nlattr *opt = tca[TCA_OPTIONS]; struct tc_nsswrr_class_qopt *qopt; struct net_device *dev = qdisc_dev(sch); @@ -123,7 +129,11 @@ static int nss_wrr_class_params_validate return -EINVAL; } - qopt = nss_qdisc_qopt_get(opt, nss_wrr_policy, TCA_NSSWRR_MAX, TCA_NSSWRR_CLASS_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_wrr_policy, tb, TCA_NSSWRR_MAX, TCA_NSSWRR_CLASS_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_wrr_policy, tb, TCA_NSSWRR_MAX, TCA_NSSWRR_CLASS_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -218,9 +228,16 @@ static int nss_wrr_ppe_change_class(stru } #endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_wrr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_wrr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, + struct nlattr **tca, unsigned long *arg, struct netlink_ext_ack *extack) +{ +#endif struct nss_wrr_sched_data *q = qdisc_priv(sch); struct nss_wrr_class_data *cl = (struct nss_wrr_class_data *)*arg; struct nss_if_msg nim_config; @@ -230,7 +247,11 @@ static int nss_wrr_change_class(struct Q nss_qdisc_info("Changing nss_wrr class %u\n", classid); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) if (nss_wrr_class_params_validate_and_save(sch, tca, &quantum) < 0) { +#else + if (nss_wrr_class_params_validate_and_save(sch, tca, &quantum, extack) < 0) { +#endif nss_qdisc_warning("validation of configuration parameters for wrr class %x failed\n", sch->handle); return -EINVAL; @@ -275,7 +296,7 @@ static int nss_wrr_change_class(struct Q * reference count should not be 0. */ cl->qdisc = &noop_qdisc; - atomic_set(&cl->nq.refcnt, 1); + nss_qdisc_atomic_set(&cl->nq); *arg = (unsigned long)cl; nss_qdisc_info("Adding classid %u to qdisc %px hash queue %px\n", classid, sch, &q->clhash); @@ -286,7 +307,8 @@ static int nss_wrr_change_class(struct Q * here. */ cl->nq.parent = &q->nq; - if (nss_qdisc_init(sch, &cl->nq, NSS_SHAPER_NODE_TYPE_WRR_GROUP, classid, accel_mode) < 0) { + if (nss_qdisc_init(sch, &cl->nq, NSS_SHAPER_NODE_TYPE_WRR_GROUP, classid, accel_mode, extack) < 0) + { nss_qdisc_error("Nss init for class %u failed\n", classid); return -EINVAL; } @@ -384,7 +406,11 @@ failure: return -EINVAL; } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0)) +static int nss_wrr_delete_class(struct Qdisc *sch, unsigned long arg, struct netlink_ext_ack *extack) +#else static int nss_wrr_delete_class(struct Qdisc *sch, unsigned long arg) +#endif { struct nss_wrr_sched_data *q = qdisc_priv(sch); struct nss_wrr_class_data *cl = (struct nss_wrr_class_data *)arg; @@ -412,7 +438,9 @@ static int nss_wrr_delete_class(struct Q sch_tree_lock(sch); qdisc_reset(cl->qdisc); qdisc_class_hash_remove(&q->clhash, &cl->cl_common); - refcnt = atomic_sub_return(1, &cl->nq.refcnt); + + refcnt = nss_qdisc_atomic_sub_return(&cl->nq); + sch_tree_unlock(sch); if (!refcnt) { nss_qdisc_error("Reference count should not be zero for class %px\n", cl); @@ -421,8 +449,13 @@ static int nss_wrr_delete_class(struct Q return 0; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_wrr_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) +#else +static int nss_wrr_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, + struct Qdisc **old, struct netlink_ext_ack *extack) +#endif { struct nss_wrr_sched_data *q = qdisc_priv(sch); struct nss_wrr_class_data *cl = (struct nss_wrr_class_data *)arg; @@ -504,6 +537,7 @@ static void nss_wrr_qlen_notify(struct Q */ } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) static unsigned long nss_wrr_get_class(struct Qdisc *sch, u32 classid) { struct nss_wrr_class_data *cl = nss_wrr_find_class(classid, sch); @@ -526,10 +560,24 @@ static void nss_wrr_put_class(struct Qdi * We are safe to destroy the qdisc if the reference count * goes down to 0. */ - if (atomic_sub_return(1, &cl->nq.refcnt) == 0) { + if (nss_qdisc_atomic_sub_return(&cl->nq) == 0) { nss_wrr_destroy_class(sch, cl); } } +#else +static unsigned long nss_wrr_search_class(struct Qdisc *sch, u32 classid) +{ + struct nss_wrr_class_data *cl = nss_wrr_find_class(classid, sch); + + nss_qdisc_info("Get nss_wrr class %px - class match = %px\n", sch, cl); + + if (cl != NULL) { + atomic_add(1, &cl->nq.refcnt.refs); + } + + return (unsigned long)cl; +} +#endif static int nss_wrr_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, struct tcmsg *tcm) @@ -550,7 +598,7 @@ static int nss_wrr_dump_class(struct Qdi tcm->tcm_handle = cl->cl_common.classid; tcm->tcm_info = cl->qdisc->handle; - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL || nla_put(skb, TCA_NSSWRR_CLASS_PARMS, sizeof(qopt), &qopt)) { goto nla_put_failure; } @@ -565,7 +613,7 @@ static int nss_wrr_dump_class_stats(stru { struct nss_qdisc *nq = (struct nss_qdisc *)arg; - if (nss_qdisc_gnet_stats_copy_basic(d, &nq->bstats) < 0 || + if (nss_qdisc_gnet_stats_copy_basic(sch, d, &nq->bstats) < 0 || nss_qdisc_gnet_stats_copy_queue(d, &nq->qstats) < 0) { return -1; } @@ -600,9 +648,17 @@ static void nss_wrr_walk(struct Qdisc *s } } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_wrr_init_qdisc(struct Qdisc *sch, struct nlattr *opt) { + struct netlink_ext_ack *extack = NULL; +#else +static int nss_wrr_init_qdisc(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ +#endif struct nss_wrr_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_NSSWRR_MAX + 1]; int err; struct nss_if_msg nim; struct tc_nsswrr_qopt *qopt; @@ -620,7 +676,11 @@ static int nss_wrr_init_qdisc(struct Qdi qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); qdisc_class_hash_grow(sch, &q->clhash); - qopt = nss_qdisc_qopt_get(opt, nss_wrr_policy, TCA_NSSWRR_MAX, TCA_NSSWRR_QDISC_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_wrr_policy, tb, TCA_NSSWRR_MAX, TCA_NSSWRR_QDISC_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_wrr_policy, tb, TCA_NSSWRR_MAX, TCA_NSSWRR_QDISC_PARMS, extack); +#endif if (!qopt) { nss_qdisc_warning("Failed to parse input"); return -EINVAL; @@ -629,7 +689,7 @@ static int nss_wrr_init_qdisc(struct Qdi /* * Initialize the NSSWRR shaper in NSS */ - if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_WRR, 0, qopt->accel_mode) < 0) { + if (nss_qdisc_init(sch, &q->nq, NSS_SHAPER_NODE_TYPE_WRR, 0, qopt->accel_mode, extack) < 0) { nss_qdisc_warning("Failed init nss_wrr qdisc"); return -EINVAL; } @@ -669,8 +729,14 @@ static int nss_wrr_init_qdisc(struct Qdi return 0; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) static int nss_wrr_change_qdisc(struct Qdisc *sch, struct nlattr *opt) +#else +static int nss_wrr_change_qdisc(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +#endif { + struct nlattr *tb[TCA_NSSWRR_MAX + 1]; struct nss_wrr_sched_data *q; struct tc_nsswrr_qopt *qopt; @@ -680,7 +746,11 @@ static int nss_wrr_change_qdisc(struct Q return -EINVAL; } - qopt = nss_qdisc_qopt_get(opt, nss_wrr_policy, TCA_NSSWRR_MAX, TCA_NSSWRR_QDISC_PARMS); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0)) + qopt = nss_qdisc_qopt_get(opt, nss_wrr_policy, tb, TCA_NSSWRR_MAX, TCA_NSSWRR_QDISC_PARMS); +#else + qopt = nss_qdisc_qopt_get(opt, nss_wrr_policy, tb, TCA_NSSWRR_MAX, TCA_NSSWRR_QDISC_PARMS, extack); +#endif if (!qopt) { return -EINVAL; } @@ -743,7 +813,7 @@ static void nss_wrr_destroy_qdisc(struct * Reduce refcnt by 1 before destroying. This is to * ensure that polling of stat stops properly. */ - atomic_sub(1, &cl->nq.refcnt); + nss_qdisc_atomic_sub(&cl->nq); /* * Detach class before destroying it. We dont check for noop qdisc here @@ -794,7 +864,7 @@ static int nss_wrr_dump_qdisc(struct Qdi opt.accel_mode = nss_qdisc_accel_mode_get(&q->nq); - opts = nla_nest_start(skb, TCA_OPTIONS); + opts = nss_qdisc_nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) { goto nla_put_failure; } @@ -809,9 +879,18 @@ nla_put_failure: return -EMSGSIZE; } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static int nss_wrr_enqueue(struct sk_buff *skb, struct Qdisc *sch) +#else +static int nss_wrr_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) +#endif { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) return nss_qdisc_enqueue(skb, sch); +#else + return nss_qdisc_enqueue(skb, sch, to_free); +#endif } static struct sk_buff *nss_wrr_dequeue(struct Qdisc *sch) @@ -819,11 +898,13 @@ static struct sk_buff *nss_wrr_dequeue(s return nss_qdisc_dequeue(sch); } +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) static unsigned int nss_wrr_drop(struct Qdisc *sch) { nss_qdisc_info("Nsswrr drop\n"); return nss_qdisc_drop(sch); } +#endif const struct Qdisc_class_ops nss_wrr_class_ops = { .change = nss_wrr_change_class, @@ -831,9 +912,17 @@ const struct Qdisc_class_ops nss_wrr_cla .graft = nss_wrr_graft_class, .leaf = nss_wrr_leaf_class, .qlen_notify = nss_wrr_qlen_notify, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) .get = nss_wrr_get_class, .put = nss_wrr_put_class, +#else + .find = nss_wrr_search_class, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) .tcf_chain = nss_qdisc_tcf_chain, +#else + .tcf_block = nss_qdisc_tcf_block, +#endif .bind_tcf = nss_qdisc_tcf_bind, .unbind_tcf = nss_qdisc_tcf_unbind, .dump = nss_wrr_dump_class, @@ -851,7 +940,9 @@ struct Qdisc_ops nss_wrr_qdisc_ops __rea .enqueue = nss_wrr_enqueue, .dequeue = nss_wrr_dequeue, .peek = qdisc_peek_dequeued, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_wrr_drop, +#endif .cl_ops = &nss_wrr_class_ops, .priv_size = sizeof(struct nss_wrr_sched_data), .owner = THIS_MODULE @@ -863,9 +954,17 @@ const struct Qdisc_class_ops nss_wfq_cla .graft = nss_wrr_graft_class, .leaf = nss_wrr_leaf_class, .qlen_notify = nss_wrr_qlen_notify, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) .get = nss_wrr_get_class, .put = nss_wrr_put_class, +#else + .find = nss_wrr_search_class, +#endif +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) .tcf_chain = nss_qdisc_tcf_chain, +#else + .tcf_block = nss_qdisc_tcf_block, +#endif .bind_tcf = nss_qdisc_tcf_bind, .unbind_tcf = nss_qdisc_tcf_unbind, .dump = nss_wrr_dump_class, @@ -883,7 +982,9 @@ struct Qdisc_ops nss_wfq_qdisc_ops __rea .enqueue = nss_wrr_enqueue, .dequeue = nss_wrr_dequeue, .peek = qdisc_peek_dequeued, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)) .drop = nss_wrr_drop, +#endif .cl_ops = &nss_wrr_class_ops, .priv_size = sizeof(struct nss_wrr_sched_data), .owner = THIS_MODULE