mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-01 22:37:41 -04:00
Merge tag 'nf-26-04-01' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf
Pablo Neira Ayuso says:
====================
Netfilter fixes for net
The following patchset contains Netfilter fixes for net. Note that most
of the bugs fixed here are >5 years old. The large PR is not due to an
increase in regressions.
1) Flowtable hardware offload support in IPv6 can lead to out-of-bounds
when populating the rule action array when combined with double-tagged
vlan. Bump the maximum number of actions from 16 to 24 and check that
such limit is never reached, otherwise bail out. This bugs stems from
the original flowtable hardware offload support.
2) nfnetlink_log does not include the netlink header size of the trailing
NLMSG_DONE message when calculating the skb size. From Florian Westphal.
3) Reject names in xt_cgroup and xt_rateest extensions which are not
nul-terminated. Also from Florian.
4) Use nla_strcmp in ipset lookup by set name, since IPSET_ATTR_NAME and
IPSET_ATTR_NAMEREF are of NLA_STRING type. From Florian Westphal.
5) When unregistering conntrack helpers, pass the helper that is going
away so the expectation cleanup is done accordingly, otherwise UaF is
possible when accessing expectation that refer to the helper that is
gone. From Qi Tang.
6) Zero expectation NAT fields to address leaking kernel memory through
the expectation netlink dump when unset. Also from Qi Tang.
7) Use the master conntrack helper when creating expectations via
ctnetlink, ignore the suggested helper through CTA_EXPECT_HELP_NAME.
This allows to address a possible read of kernel memory off the
expectation object boundary.
8) Fix incorrect release of the hash bucket logic in ipset when the
bucket is empty, leading to shrinking the hash bucket to size 0
which deals to out-of-bound write in next element additions.
From Yifan Wu.
9) Allow the use of x_tables extensions that explicitly declare
NFPROTO_ARP support only. This is to avoid an incorrect hook number
validation due to non-overlapping arp and inet hook number
definitions.
10) Reject immediate NF_QUEUE verdict in nf_tables. The userspace
nft tool always uses the nft_queue expression for queueing.
This ensures this verdict cannot be used for the arp family,
which does supported this.
* tag 'nf-26-04-01' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf:
netfilter: nf_tables: reject immediate NF_QUEUE verdict
netfilter: x_tables: restrict xt_check_match/xt_check_target extensions for NFPROTO_ARP
netfilter: ipset: drop logically empty buckets in mtype_del
netfilter: ctnetlink: ignore explicit helper on new expectations
netfilter: ctnetlink: zero expect NAT fields when CTA_EXPECT_NAT absent
netfilter: nf_conntrack_helper: pass helper to expect cleanup
netfilter: ipset: use nla_strcmp for IPSET_ATTR_NAME attr
netfilter: x_tables: ensure names are nul-terminated
netfilter: nfnetlink_log: account for netlink header size
netfilter: flowtable: strictly check for maximum number of actions
====================
Link: https://patch.msgid.link/20260401103646.1015423-1-pablo@netfilter.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -309,7 +309,7 @@ enum {
|
||||
|
||||
/* register and unregister set references */
|
||||
extern ip_set_id_t ip_set_get_byname(struct net *net,
|
||||
const char *name, struct ip_set **set);
|
||||
const struct nlattr *name, struct ip_set **set);
|
||||
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
|
||||
extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
|
||||
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
|
||||
|
||||
@@ -821,7 +821,7 @@ EXPORT_SYMBOL_GPL(ip_set_del);
|
||||
*
|
||||
*/
|
||||
ip_set_id_t
|
||||
ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
|
||||
ip_set_get_byname(struct net *net, const struct nlattr *name, struct ip_set **set)
|
||||
{
|
||||
ip_set_id_t i, index = IPSET_INVALID_ID;
|
||||
struct ip_set *s;
|
||||
@@ -830,7 +830,7 @@ ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < inst->ip_set_max; i++) {
|
||||
s = rcu_dereference(inst->ip_set_list)[i];
|
||||
if (s && STRNCMP(s->name, name)) {
|
||||
if (s && nla_strcmp(name, s->name) == 0) {
|
||||
__ip_set_get(s);
|
||||
index = i;
|
||||
*set = s;
|
||||
|
||||
@@ -1098,7 +1098,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
|
||||
if (!test_bit(i, n->used))
|
||||
k++;
|
||||
}
|
||||
if (n->pos == 0 && k == 0) {
|
||||
if (k == n->pos) {
|
||||
t->hregion[r].ext_size -= ext_size(n->size, dsize);
|
||||
rcu_assign_pointer(hbucket(t, key), NULL);
|
||||
kfree_rcu(n, rcu);
|
||||
|
||||
@@ -367,7 +367,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
ret = ip_set_get_extensions(set, tb, &ext);
|
||||
if (ret)
|
||||
return ret;
|
||||
e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
|
||||
e.id = ip_set_get_byname(map->net, tb[IPSET_ATTR_NAME], &s);
|
||||
if (e.id == IPSET_INVALID_ID)
|
||||
return -IPSET_ERR_NAME;
|
||||
/* "Loop detection" */
|
||||
@@ -389,7 +389,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
|
||||
|
||||
if (tb[IPSET_ATTR_NAMEREF]) {
|
||||
e.refid = ip_set_get_byname(map->net,
|
||||
nla_data(tb[IPSET_ATTR_NAMEREF]),
|
||||
tb[IPSET_ATTR_NAMEREF],
|
||||
&s);
|
||||
if (e.refid == IPSET_INVALID_ID) {
|
||||
ret = -IPSET_ERR_NAMEREF;
|
||||
|
||||
@@ -415,7 +415,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
|
||||
nf_ct_expect_iterate_destroy(expect_iter_me, me);
|
||||
nf_ct_iterate_destroy(unhelp, me);
|
||||
|
||||
/* nf_ct_iterate_destroy() does an unconditional synchronize_rcu() as
|
||||
|
||||
@@ -2636,7 +2636,6 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
|
||||
|
||||
static struct nf_conntrack_expect *
|
||||
ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
|
||||
struct nf_conntrack_helper *helper,
|
||||
struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conntrack_tuple *mask);
|
||||
|
||||
@@ -2865,7 +2864,6 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
|
||||
{
|
||||
struct nlattr *cda[CTA_EXPECT_MAX+1];
|
||||
struct nf_conntrack_tuple tuple, mask;
|
||||
struct nf_conntrack_helper *helper = NULL;
|
||||
struct nf_conntrack_expect *exp;
|
||||
int err;
|
||||
|
||||
@@ -2879,17 +2877,8 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (cda[CTA_EXPECT_HELP_NAME]) {
|
||||
const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
|
||||
|
||||
helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
|
||||
nf_ct_protonum(ct));
|
||||
if (helper == NULL)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
|
||||
helper, &tuple, &mask);
|
||||
&tuple, &mask);
|
||||
if (IS_ERR(exp))
|
||||
return PTR_ERR(exp);
|
||||
|
||||
@@ -3528,11 +3517,11 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
|
||||
|
||||
static struct nf_conntrack_expect *
|
||||
ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
|
||||
struct nf_conntrack_helper *helper,
|
||||
struct nf_conntrack_tuple *tuple,
|
||||
struct nf_conntrack_tuple *mask)
|
||||
{
|
||||
struct net *net = read_pnet(&ct->ct_net);
|
||||
struct nf_conntrack_helper *helper;
|
||||
struct nf_conntrack_expect *exp;
|
||||
struct nf_conn_help *help;
|
||||
u32 class = 0;
|
||||
@@ -3542,7 +3531,11 @@ ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
|
||||
if (!help)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
if (cda[CTA_EXPECT_CLASS] && helper) {
|
||||
helper = rcu_dereference(help->helper);
|
||||
if (!helper)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
if (cda[CTA_EXPECT_CLASS]) {
|
||||
class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
|
||||
if (class > helper->expect_class_max)
|
||||
return ERR_PTR(-EINVAL);
|
||||
@@ -3576,8 +3569,6 @@ ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
|
||||
#ifdef CONFIG_NF_CONNTRACK_ZONES
|
||||
exp->zone = ct->zone;
|
||||
#endif
|
||||
if (!helper)
|
||||
helper = rcu_dereference(help->helper);
|
||||
rcu_assign_pointer(exp->helper, helper);
|
||||
exp->tuple = *tuple;
|
||||
exp->mask.src.u3 = mask->src.u3;
|
||||
@@ -3588,6 +3579,12 @@ ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
|
||||
exp, nf_ct_l3num(ct));
|
||||
if (err < 0)
|
||||
goto err_out;
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
} else {
|
||||
memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
|
||||
memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
|
||||
exp->dir = 0;
|
||||
#endif
|
||||
}
|
||||
return exp;
|
||||
err_out:
|
||||
@@ -3603,7 +3600,6 @@ ctnetlink_create_expect(struct net *net,
|
||||
{
|
||||
struct nf_conntrack_tuple tuple, mask, master_tuple;
|
||||
struct nf_conntrack_tuple_hash *h = NULL;
|
||||
struct nf_conntrack_helper *helper = NULL;
|
||||
struct nf_conntrack_expect *exp;
|
||||
struct nf_conn *ct;
|
||||
int err;
|
||||
@@ -3629,33 +3625,7 @@ ctnetlink_create_expect(struct net *net,
|
||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||
|
||||
rcu_read_lock();
|
||||
if (cda[CTA_EXPECT_HELP_NAME]) {
|
||||
const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
|
||||
|
||||
helper = __nf_conntrack_helper_find(helpname, u3,
|
||||
nf_ct_protonum(ct));
|
||||
if (helper == NULL) {
|
||||
rcu_read_unlock();
|
||||
#ifdef CONFIG_MODULES
|
||||
if (request_module("nfct-helper-%s", helpname) < 0) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_ct;
|
||||
}
|
||||
rcu_read_lock();
|
||||
helper = __nf_conntrack_helper_find(helpname, u3,
|
||||
nf_ct_protonum(ct));
|
||||
if (helper) {
|
||||
err = -EAGAIN;
|
||||
goto err_rcu;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
#endif
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_ct;
|
||||
}
|
||||
}
|
||||
|
||||
exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
|
||||
exp = ctnetlink_alloc_expect(cda, ct, &tuple, &mask);
|
||||
if (IS_ERR(exp)) {
|
||||
err = PTR_ERR(exp);
|
||||
goto err_rcu;
|
||||
@@ -3665,8 +3635,8 @@ ctnetlink_create_expect(struct net *net,
|
||||
nf_ct_expect_put(exp);
|
||||
err_rcu:
|
||||
rcu_read_unlock();
|
||||
err_ct:
|
||||
nf_ct_put(ct);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
#include <net/netfilter/nf_conntrack_core.h>
|
||||
#include <net/netfilter/nf_conntrack_tuple.h>
|
||||
|
||||
#define NF_FLOW_RULE_ACTION_MAX 24
|
||||
|
||||
static struct workqueue_struct *nf_flow_offload_add_wq;
|
||||
static struct workqueue_struct *nf_flow_offload_del_wq;
|
||||
static struct workqueue_struct *nf_flow_offload_stats_wq;
|
||||
@@ -216,7 +218,12 @@ static void flow_offload_mangle(struct flow_action_entry *entry,
|
||||
static inline struct flow_action_entry *
|
||||
flow_action_entry_next(struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
int i = flow_rule->rule->action.num_entries++;
|
||||
int i;
|
||||
|
||||
if (unlikely(flow_rule->rule->action.num_entries >= NF_FLOW_RULE_ACTION_MAX))
|
||||
return NULL;
|
||||
|
||||
i = flow_rule->rule->action.num_entries++;
|
||||
|
||||
return &flow_rule->rule->action.entries[i];
|
||||
}
|
||||
@@ -234,6 +241,9 @@ static int flow_offload_eth_src(struct net *net,
|
||||
u32 mask, val;
|
||||
u16 val16;
|
||||
|
||||
if (!entry0 || !entry1)
|
||||
return -E2BIG;
|
||||
|
||||
this_tuple = &flow->tuplehash[dir].tuple;
|
||||
|
||||
switch (this_tuple->xmit_type) {
|
||||
@@ -284,6 +294,9 @@ static int flow_offload_eth_dst(struct net *net,
|
||||
u8 nud_state;
|
||||
u16 val16;
|
||||
|
||||
if (!entry0 || !entry1)
|
||||
return -E2BIG;
|
||||
|
||||
this_tuple = &flow->tuplehash[dir].tuple;
|
||||
|
||||
switch (this_tuple->xmit_type) {
|
||||
@@ -325,16 +338,19 @@ static int flow_offload_eth_dst(struct net *net,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_offload_ipv4_snat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_ipv4_snat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
|
||||
u32 mask = ~htonl(0xffffffff);
|
||||
__be32 addr;
|
||||
u32 offset;
|
||||
|
||||
if (!entry)
|
||||
return -E2BIG;
|
||||
|
||||
switch (dir) {
|
||||
case FLOW_OFFLOAD_DIR_ORIGINAL:
|
||||
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
|
||||
@@ -345,23 +361,27 @@ static void flow_offload_ipv4_snat(struct net *net,
|
||||
offset = offsetof(struct iphdr, daddr);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
|
||||
&addr, &mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_offload_ipv4_dnat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_ipv4_dnat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
|
||||
u32 mask = ~htonl(0xffffffff);
|
||||
__be32 addr;
|
||||
u32 offset;
|
||||
|
||||
if (!entry)
|
||||
return -E2BIG;
|
||||
|
||||
switch (dir) {
|
||||
case FLOW_OFFLOAD_DIR_ORIGINAL:
|
||||
addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
|
||||
@@ -372,14 +392,15 @@ static void flow_offload_ipv4_dnat(struct net *net,
|
||||
offset = offsetof(struct iphdr, saddr);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
|
||||
&addr, &mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
|
||||
static int flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
|
||||
unsigned int offset,
|
||||
const __be32 *addr, const __be32 *mask)
|
||||
{
|
||||
@@ -388,15 +409,20 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
|
||||
|
||||
for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) {
|
||||
entry = flow_action_entry_next(flow_rule);
|
||||
if (!entry)
|
||||
return -E2BIG;
|
||||
|
||||
flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
|
||||
offset + i * sizeof(u32), &addr[i], mask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_offload_ipv6_snat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_ipv6_snat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
u32 mask = ~htonl(0xffffffff);
|
||||
const __be32 *addr;
|
||||
@@ -412,16 +438,16 @@ static void flow_offload_ipv6_snat(struct net *net,
|
||||
offset = offsetof(struct ipv6hdr, daddr);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
|
||||
return flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
|
||||
}
|
||||
|
||||
static void flow_offload_ipv6_dnat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_ipv6_dnat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
u32 mask = ~htonl(0xffffffff);
|
||||
const __be32 *addr;
|
||||
@@ -437,10 +463,10 @@ static void flow_offload_ipv6_dnat(struct net *net,
|
||||
offset = offsetof(struct ipv6hdr, saddr);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
|
||||
return flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
|
||||
}
|
||||
|
||||
static int flow_offload_l4proto(const struct flow_offload *flow)
|
||||
@@ -462,15 +488,18 @@ static int flow_offload_l4proto(const struct flow_offload *flow)
|
||||
return type;
|
||||
}
|
||||
|
||||
static void flow_offload_port_snat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_port_snat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
|
||||
u32 mask, port;
|
||||
u32 offset;
|
||||
|
||||
if (!entry)
|
||||
return -E2BIG;
|
||||
|
||||
switch (dir) {
|
||||
case FLOW_OFFLOAD_DIR_ORIGINAL:
|
||||
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
|
||||
@@ -485,22 +514,26 @@ static void flow_offload_port_snat(struct net *net,
|
||||
mask = ~htonl(0xffff);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
|
||||
&port, &mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_offload_port_dnat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_port_dnat(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
|
||||
u32 mask, port;
|
||||
u32 offset;
|
||||
|
||||
if (!entry)
|
||||
return -E2BIG;
|
||||
|
||||
switch (dir) {
|
||||
case FLOW_OFFLOAD_DIR_ORIGINAL:
|
||||
port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
|
||||
@@ -515,20 +548,24 @@ static void flow_offload_port_dnat(struct net *net,
|
||||
mask = ~htonl(0xffff0000);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
|
||||
&port, &mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_offload_ipv4_checksum(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_ipv4_checksum(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
|
||||
struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
|
||||
|
||||
if (!entry)
|
||||
return -E2BIG;
|
||||
|
||||
entry->id = FLOW_ACTION_CSUM;
|
||||
entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
|
||||
|
||||
@@ -540,12 +577,14 @@ static void flow_offload_ipv4_checksum(struct net *net,
|
||||
entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_offload_redirect(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_redirect(struct net *net,
|
||||
const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
const struct flow_offload_tuple *this_tuple, *other_tuple;
|
||||
struct flow_action_entry *entry;
|
||||
@@ -563,21 +602,28 @@ static void flow_offload_redirect(struct net *net,
|
||||
ifindex = other_tuple->iifidx;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
dev = dev_get_by_index(net, ifindex);
|
||||
if (!dev)
|
||||
return;
|
||||
return -ENODEV;
|
||||
|
||||
entry = flow_action_entry_next(flow_rule);
|
||||
if (!entry) {
|
||||
dev_put(dev);
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
entry->id = FLOW_ACTION_REDIRECT;
|
||||
entry->dev = dev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_offload_encap_tunnel(const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_encap_tunnel(const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
const struct flow_offload_tuple *this_tuple;
|
||||
struct flow_action_entry *entry;
|
||||
@@ -585,7 +631,7 @@ static void flow_offload_encap_tunnel(const struct flow_offload *flow,
|
||||
|
||||
this_tuple = &flow->tuplehash[dir].tuple;
|
||||
if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
dst = this_tuple->dst_cache;
|
||||
if (dst && dst->lwtstate) {
|
||||
@@ -594,15 +640,19 @@ static void flow_offload_encap_tunnel(const struct flow_offload *flow,
|
||||
tun_info = lwt_tun_info(dst->lwtstate);
|
||||
if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
|
||||
entry = flow_action_entry_next(flow_rule);
|
||||
if (!entry)
|
||||
return -E2BIG;
|
||||
entry->id = FLOW_ACTION_TUNNEL_ENCAP;
|
||||
entry->tunnel = tun_info;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void flow_offload_decap_tunnel(const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
static int flow_offload_decap_tunnel(const struct flow_offload *flow,
|
||||
enum flow_offload_tuple_dir dir,
|
||||
struct nf_flow_rule *flow_rule)
|
||||
{
|
||||
const struct flow_offload_tuple *other_tuple;
|
||||
struct flow_action_entry *entry;
|
||||
@@ -610,7 +660,7 @@ static void flow_offload_decap_tunnel(const struct flow_offload *flow,
|
||||
|
||||
other_tuple = &flow->tuplehash[!dir].tuple;
|
||||
if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
dst = other_tuple->dst_cache;
|
||||
if (dst && dst->lwtstate) {
|
||||
@@ -619,9 +669,13 @@ static void flow_offload_decap_tunnel(const struct flow_offload *flow,
|
||||
tun_info = lwt_tun_info(dst->lwtstate);
|
||||
if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
|
||||
entry = flow_action_entry_next(flow_rule);
|
||||
if (!entry)
|
||||
return -E2BIG;
|
||||
entry->id = FLOW_ACTION_TUNNEL_DECAP;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -633,8 +687,9 @@ nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
|
||||
const struct flow_offload_tuple *tuple;
|
||||
int i;
|
||||
|
||||
flow_offload_decap_tunnel(flow, dir, flow_rule);
|
||||
flow_offload_encap_tunnel(flow, dir, flow_rule);
|
||||
if (flow_offload_decap_tunnel(flow, dir, flow_rule) < 0 ||
|
||||
flow_offload_encap_tunnel(flow, dir, flow_rule) < 0)
|
||||
return -1;
|
||||
|
||||
if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
|
||||
flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
|
||||
@@ -650,6 +705,8 @@ nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
|
||||
|
||||
if (tuple->encap[i].proto == htons(ETH_P_8021Q)) {
|
||||
entry = flow_action_entry_next(flow_rule);
|
||||
if (!entry)
|
||||
return -1;
|
||||
entry->id = FLOW_ACTION_VLAN_POP;
|
||||
}
|
||||
}
|
||||
@@ -663,6 +720,8 @@ nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
|
||||
continue;
|
||||
|
||||
entry = flow_action_entry_next(flow_rule);
|
||||
if (!entry)
|
||||
return -1;
|
||||
|
||||
switch (other_tuple->encap[i].proto) {
|
||||
case htons(ETH_P_PPP_SES):
|
||||
@@ -688,18 +747,22 @@ int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
|
||||
return -1;
|
||||
|
||||
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
|
||||
flow_offload_ipv4_snat(net, flow, dir, flow_rule);
|
||||
flow_offload_port_snat(net, flow, dir, flow_rule);
|
||||
if (flow_offload_ipv4_snat(net, flow, dir, flow_rule) < 0 ||
|
||||
flow_offload_port_snat(net, flow, dir, flow_rule) < 0)
|
||||
return -1;
|
||||
}
|
||||
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
|
||||
flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
|
||||
flow_offload_port_dnat(net, flow, dir, flow_rule);
|
||||
if (flow_offload_ipv4_dnat(net, flow, dir, flow_rule) < 0 ||
|
||||
flow_offload_port_dnat(net, flow, dir, flow_rule) < 0)
|
||||
return -1;
|
||||
}
|
||||
if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
|
||||
test_bit(NF_FLOW_DNAT, &flow->flags))
|
||||
flow_offload_ipv4_checksum(net, flow, flow_rule);
|
||||
if (flow_offload_ipv4_checksum(net, flow, flow_rule) < 0)
|
||||
return -1;
|
||||
|
||||
flow_offload_redirect(net, flow, dir, flow_rule);
|
||||
if (flow_offload_redirect(net, flow, dir, flow_rule) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -713,22 +776,23 @@ int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
|
||||
return -1;
|
||||
|
||||
if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
|
||||
flow_offload_ipv6_snat(net, flow, dir, flow_rule);
|
||||
flow_offload_port_snat(net, flow, dir, flow_rule);
|
||||
if (flow_offload_ipv6_snat(net, flow, dir, flow_rule) < 0 ||
|
||||
flow_offload_port_snat(net, flow, dir, flow_rule) < 0)
|
||||
return -1;
|
||||
}
|
||||
if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
|
||||
flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
|
||||
flow_offload_port_dnat(net, flow, dir, flow_rule);
|
||||
if (flow_offload_ipv6_dnat(net, flow, dir, flow_rule) < 0 ||
|
||||
flow_offload_port_dnat(net, flow, dir, flow_rule) < 0)
|
||||
return -1;
|
||||
}
|
||||
|
||||
flow_offload_redirect(net, flow, dir, flow_rule);
|
||||
if (flow_offload_redirect(net, flow, dir, flow_rule) < 0)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
|
||||
|
||||
#define NF_FLOW_RULE_ACTION_MAX 16
|
||||
|
||||
static struct nf_flow_rule *
|
||||
nf_flow_offload_rule_alloc(struct net *net,
|
||||
const struct flow_offload_work *offload,
|
||||
|
||||
@@ -11667,8 +11667,6 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
||||
switch (data->verdict.code) {
|
||||
case NF_ACCEPT:
|
||||
case NF_DROP:
|
||||
case NF_QUEUE:
|
||||
break;
|
||||
case NFT_CONTINUE:
|
||||
case NFT_BREAK:
|
||||
case NFT_RETURN:
|
||||
@@ -11703,6 +11701,11 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
|
||||
|
||||
data->verdict.chain = chain;
|
||||
break;
|
||||
case NF_QUEUE:
|
||||
/* The nft_queue expression is used for this purpose, an
|
||||
* immediate NF_QUEUE verdict should not ever be seen here.
|
||||
*/
|
||||
fallthrough;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -726,7 +726,7 @@ nfulnl_log_packet(struct net *net,
|
||||
+ nla_total_size(plen) /* prefix */
|
||||
+ nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
|
||||
+ nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
|
||||
+ nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */
|
||||
+ nlmsg_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */
|
||||
|
||||
if (in && skb_mac_header_was_set(skb)) {
|
||||
size += nla_total_size(skb->dev->hard_header_len)
|
||||
|
||||
@@ -501,6 +501,17 @@ int xt_check_match(struct xt_mtchk_param *par,
|
||||
par->match->table, par->table);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* NFPROTO_UNSPEC implies NF_INET_* hooks which do not overlap with
|
||||
* NF_ARP_IN,OUT,FORWARD, allow explicit extensions with NFPROTO_ARP
|
||||
* support.
|
||||
*/
|
||||
if (par->family == NFPROTO_ARP &&
|
||||
par->match->family != NFPROTO_ARP) {
|
||||
pr_info_ratelimited("%s_tables: %s match: not valid for this family\n",
|
||||
xt_prefix[par->family], par->match->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
|
||||
char used[64], allow[64];
|
||||
|
||||
@@ -1016,6 +1027,18 @@ int xt_check_target(struct xt_tgchk_param *par,
|
||||
par->target->table, par->table);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* NFPROTO_UNSPEC implies NF_INET_* hooks which do not overlap with
|
||||
* NF_ARP_IN,OUT,FORWARD, allow explicit extensions with NFPROTO_ARP
|
||||
* support.
|
||||
*/
|
||||
if (par->family == NFPROTO_ARP &&
|
||||
par->target->family != NFPROTO_ARP) {
|
||||
pr_info_ratelimited("%s_tables: %s target: not valid for this family\n",
|
||||
xt_prefix[par->family], par->target->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
|
||||
char used[64], allow[64];
|
||||
|
||||
|
||||
@@ -65,6 +65,9 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
|
||||
|
||||
info->priv = NULL;
|
||||
if (info->has_path) {
|
||||
if (strnlen(info->path, sizeof(info->path)) >= sizeof(info->path))
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
cgrp = cgroup_get_from_path(info->path);
|
||||
if (IS_ERR(cgrp)) {
|
||||
pr_info_ratelimited("invalid path, errno=%ld\n",
|
||||
@@ -102,6 +105,9 @@ static int cgroup_mt_check_v2(const struct xt_mtchk_param *par)
|
||||
|
||||
info->priv = NULL;
|
||||
if (info->has_path) {
|
||||
if (strnlen(info->path, sizeof(info->path)) >= sizeof(info->path))
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
cgrp = cgroup_get_from_path(info->path);
|
||||
if (IS_ERR(cgrp)) {
|
||||
pr_info_ratelimited("invalid path, errno=%ld\n",
|
||||
|
||||
@@ -91,6 +91,11 @@ static int xt_rateest_mt_checkentry(const struct xt_mtchk_param *par)
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (strnlen(info->name1, sizeof(info->name1)) >= sizeof(info->name1))
|
||||
return -ENAMETOOLONG;
|
||||
if (strnlen(info->name2, sizeof(info->name2)) >= sizeof(info->name2))
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
ret = -ENOENT;
|
||||
est1 = xt_rateest_lookup(par->net, info->name1);
|
||||
if (!est1)
|
||||
|
||||
Reference in New Issue
Block a user