/*
* net/sched/act_api.c Packet action API.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Author: Jamal Hadi Salim
*
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/module.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include <net/act_api.h>
#include <net/netlink.h>
static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
{
u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
if (!tp)
return -EINVAL;
a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index);
if (!a->goto_chain)
return -ENOMEM;
return 0;
}
static void tcf_action_goto_chain_fini(struct tc_action *a)
{
tcf_chain_put_by_act(a->goto_chain);
}
static void tcf_action_goto_chain_exec(const struct tc_action *a,
struct tcf_result *res)
{
const struct tcf_chain *chain = a->goto_chain;
res->goto_tp = rcu_dereference_bh(chain->filter_chain);
}
static void tcf_free_cookie_rcu(struct rcu_head *p)
{
struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
kfree(cookie->data);
kfree(cookie);
}
static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
struct tc_cookie *new_cookie)
{
struct tc_cookie *old;
old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
if (old)
call_rcu(&old->rcu, tcf_free_cookie_rcu);
}
/* XXX: For standalone actions, we don't need a RCU grace period either, because
* actions are always connected to filters and filters are already destroyed in
* RCU callbacks, so after a RCU grace period actions are already disconnected
* from filters. Readers later can not find us.
*/
static void free_tcf(struct tc_action *p)
{
free_percpu(p->cpu_bstats);
free_percpu(p->cpu_bstats_hw);
free_percpu(p->cpu_qstats);
tcf_set_action_cookie(&p->act_cookie, NULL);
if (p->goto_chain)
tcf_action_goto_chain_fini(p);
kfree(p);
}
static void tcf_action_cleanup(struct tc_action *p)
{
if (p->ops->cleanup)
p->ops->cleanup(p);
gen_kill_estimator(&p->tcfa_rate_est);
free_tcf(p);
}
static int __tcf_action_put(struct tc_action *p, bool bind)
{
struct tcf_idrinfo *idrinfo = p->idrinfo;
if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
if (bind)
atomic_dec(&p->tcfa_bindcnt);
idr_remove(&idrinfo->action_idr, p->tcfa_index);
mutex_unlock(&idrinfo->lock);
tcf_action_cleanup(p);
return 1;
}
if (bind)
atomic_dec(&p->tcfa_bindcnt);
return 0;
}
int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
{
int ret = 0;
/* Release with strict==1 and bind==0 is only called through act API
* interface (classifiers always bind). Only case when action with
* positive reference count and zero bind count can exist is when it was
* also created with act API (unbinding last classifier will destroy the
* action if it was created by classifier). So only case when bind count
* can be changed after initial check is when unbound action is
* destroyed by act API while classifier binds to action with same id
* concurrently. This result either creation of new action(same behavior
* as before), or reusing existing action if concurrent process
* increments reference count before action is deleted. Both scenarios
* are acceptable.
*/
if (p) {
if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
return -EPERM;
if (__tcf_action_put(p, bind))
ret = ACT_P_DELETED;
}
return ret;
}
EXPORT_SYMBOL(__tcf_idr_release);
static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
{
struct tc_cookie *act_cookie;
u32 cookie_len = 0;
rcu_read_lock();
act_cookie = rcu_dereference(act->act_cookie);
if (act_cookie)
cookie_len = nla_total_size(act_cookie->len);
rcu_read_unlock();
return nla_total_size(0) /* action number nested */
+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
+ cookie_len /* TCA_ACT_COOKIE */
+ nla_total_size(0) /* TCA_ACT_STATS nested */
/* TCA_STATS_BASIC */
+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
/* TCA_STATS_QUEUE */
+ nla_total_size_64bit(sizeof(struct gnet_stats_queue))
+ nla_total_size(0) /* TCA_OPTIONS nested */
+ nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
}
static size_t tcf_action_full_attrs_size(size_t sz)
{
return NLMSG_HDRLEN /* struct nlmsghdr */
+ sizeof(struct tcamsg)
+ nla_total_size(0) /* TCA_ACT_TAB nested */
+ sz;
}
static size_t tcf_action_fill_size(const struct tc_action *act)
{
size_t sz = tcf_action_shared_attrs_size(act);
if (act->ops->get_fill_size)
return act->ops->get_fill_size(act) + sz;
return sz;
}
static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
struct netlink_callback *cb)
{
int err = 0, index = -1, s_i = 0, n_i = 0;
u32 act_flags = cb->args[2];
unsigned long jiffy_since = cb->args[3];
struct nlattr *nest;
struct idr *idr = &idrinfo->action_idr;
struct tc_action *p;
unsigned long id = 1;
mutex_lock(&idrinfo->lock);
s_i = cb->args[0];
idr_for_each_entry_ul(idr, p, id) {
index++;
if (index < s_i)
continue;
if (jiffy_since &&
time_after(jiffy_since,
(unsigned long)p->tcfa_tm.lastuse))
continue;
nest = nla_nest_start(skb, n_i);
if (!nest) {
index--;
goto nla_put_failure;
}
err = tcf_action_dump_1(skb, p, 0, 0);
if (err < 0) {
index--;
nlmsg_trim(skb, nest);
goto done;
}
nla_nest_end(skb, nest);
n_i++;
if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) &&
n_i >= TCA_ACT_MAX_PRIO)
goto done;
}
done:
if (index >= 0)
cb->args[0] = index + 1;
mutex_unlock(&idrinfo->lock);
if (n_i) {
if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
cb->args[1] = n_i;
}
return n_i;
nla_put_failure:
nla_nest_cancel(skb, nest);
goto done;
}
static int tcf_idr_release_unsafe(struct tc_action *p)
{
if (atomic_read(&p->tcfa_bindcnt) > 0)
return -EPERM;
if (refcount_dec_and_test(&p->tcfa_refcnt)) {
idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
tcf_action_cleanup(p);
return ACT_P_DELETED;
}
return 0;
}
static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
const struct tc_action_ops *ops)
{
struct nlattr *nest;
int n_i = 0;
int ret = -EINVAL;
struct idr *idr = &idrinfo->action_idr;
struct tc_action *p;
unsigned long id = 1;
nest = nla_nest_start(skb, 0);
if (nest == NULL)
goto nla_put_failure;
if (nla_put_string(skb, TCA_KIND, ops->kind))
goto nla_put_failure;
mutex_lock(&idrinfo->lock);
idr_for_each_entry_ul(idr, p, id) {
ret = tcf_idr_release_unsafe(p);
if (ret == ACT_P_DELETED) {
module_put(ops->owner);
n_i++;
} else if (ret < 0) {
mutex_unlock(&idrinfo->lock);
goto nla_put_failure;
}
}
mutex_unlock(&idrinfo->lock);
if (nla_put_u32(skb, TCA_FCNT, n_i))
goto nla_put_failure;
nla_nest_end(skb, nest);
return n_i;
nla_put_failure:
nla_nest_cancel(skb, nest);
return ret;
}
int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
struct netlink_callback *cb, int type,
const struct tc_action_ops *ops,
struct netlink_ext_ack *extack)
{
struct tcf_idrinfo *idrinfo = tn->idrinfo;
if (type
|