Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net 1) Check for interval validity in all concatenation fields in nft_set_pipapo, from Stefano Brivio. 2) Missing preemption disabled in conntrack and flowtable stat updates, from Xin Long. 3) Fix compilation warning when CONFIG_NF_CONNTRACK_MARK=n. Except for 3) which was a bug introduced in a recent fix in 6.1-rc - anything else, broken for several releases. * git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf: netfilter: ctnetlink: fix compilation warning after data race fixes in ct mark netfilter: conntrack: fix using __this_cpu_add in preemptible netfilter: flowtable_offload: fix using __this_cpu_add in preemptible netfilter: nft_set_pipapo: Actually validate intervals in fields after the first one ==================== Link: https://lore.kernel.org/r/20221130121934.1125-1-pablo@netfilter.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
@@ -891,7 +891,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
|||||||
zone = nf_ct_zone(ct);
|
zone = nf_ct_zone(ct);
|
||||||
|
|
||||||
if (!nf_ct_ext_valid_pre(ct->ext)) {
|
if (!nf_ct_ext_valid_pre(ct->ext)) {
|
||||||
NF_CT_STAT_INC(net, insert_failed);
|
NF_CT_STAT_INC_ATOMIC(net, insert_failed);
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -938,7 +938,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
|||||||
|
|
||||||
if (!nf_ct_ext_valid_post(ct->ext)) {
|
if (!nf_ct_ext_valid_post(ct->ext)) {
|
||||||
nf_ct_kill(ct);
|
nf_ct_kill(ct);
|
||||||
NF_CT_STAT_INC(net, drop);
|
NF_CT_STAT_INC_ATOMIC(net, drop);
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1275,7 +1275,7 @@ chaintoolong:
|
|||||||
*/
|
*/
|
||||||
if (!nf_ct_ext_valid_post(ct->ext)) {
|
if (!nf_ct_ext_valid_post(ct->ext)) {
|
||||||
nf_ct_kill(ct);
|
nf_ct_kill(ct);
|
||||||
NF_CT_STAT_INC(net, drop);
|
NF_CT_STAT_INC_ATOMIC(net, drop);
|
||||||
return NF_DROP;
|
return NF_DROP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -328,8 +328,13 @@ nla_put_failure:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NF_CONNTRACK_MARK
|
#ifdef CONFIG_NF_CONNTRACK_MARK
|
||||||
static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark)
|
static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
|
||||||
{
|
{
|
||||||
|
u32 mark = READ_ONCE(ct->mark);
|
||||||
|
|
||||||
|
if (!mark)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
|
if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -543,7 +548,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
|
|||||||
static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
|
static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
|
||||||
{
|
{
|
||||||
if (ctnetlink_dump_status(skb, ct) < 0 ||
|
if (ctnetlink_dump_status(skb, ct) < 0 ||
|
||||||
ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 ||
|
ctnetlink_dump_mark(skb, ct) < 0 ||
|
||||||
ctnetlink_dump_secctx(skb, ct) < 0 ||
|
ctnetlink_dump_secctx(skb, ct) < 0 ||
|
||||||
ctnetlink_dump_id(skb, ct) < 0 ||
|
ctnetlink_dump_id(skb, ct) < 0 ||
|
||||||
ctnetlink_dump_use(skb, ct) < 0 ||
|
ctnetlink_dump_use(skb, ct) < 0 ||
|
||||||
@@ -722,7 +727,6 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned int type;
|
unsigned int type;
|
||||||
unsigned int flags = 0, group;
|
unsigned int flags = 0, group;
|
||||||
u32 mark;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (events & (1 << IPCT_DESTROY)) {
|
if (events & (1 << IPCT_DESTROY)) {
|
||||||
@@ -827,9 +831,8 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NF_CONNTRACK_MARK
|
#ifdef CONFIG_NF_CONNTRACK_MARK
|
||||||
mark = READ_ONCE(ct->mark);
|
if (events & (1 << IPCT_MARK) &&
|
||||||
if ((events & (1 << IPCT_MARK) || mark) &&
|
ctnetlink_dump_mark(skb, ct) < 0)
|
||||||
ctnetlink_dump_mark(skb, mark) < 0)
|
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
#endif
|
#endif
|
||||||
nlmsg_end(skb, nlh);
|
nlmsg_end(skb, nlh);
|
||||||
@@ -2671,7 +2674,6 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
|
|||||||
{
|
{
|
||||||
const struct nf_conntrack_zone *zone;
|
const struct nf_conntrack_zone *zone;
|
||||||
struct nlattr *nest_parms;
|
struct nlattr *nest_parms;
|
||||||
u32 mark;
|
|
||||||
|
|
||||||
zone = nf_ct_zone(ct);
|
zone = nf_ct_zone(ct);
|
||||||
|
|
||||||
@@ -2733,8 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
|
|||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
#ifdef CONFIG_NF_CONNTRACK_MARK
|
#ifdef CONFIG_NF_CONNTRACK_MARK
|
||||||
mark = READ_ONCE(ct->mark);
|
if (ctnetlink_dump_mark(skb, ct) < 0)
|
||||||
if (mark && ctnetlink_dump_mark(skb, mark) < 0)
|
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
#endif
|
#endif
|
||||||
if (ctnetlink_dump_labels(skb, ct) < 0)
|
if (ctnetlink_dump_labels(skb, ct) < 0)
|
||||||
|
|||||||
@@ -997,13 +997,13 @@ static void flow_offload_queue_work(struct flow_offload_work *offload)
|
|||||||
struct net *net = read_pnet(&offload->flowtable->net);
|
struct net *net = read_pnet(&offload->flowtable->net);
|
||||||
|
|
||||||
if (offload->cmd == FLOW_CLS_REPLACE) {
|
if (offload->cmd == FLOW_CLS_REPLACE) {
|
||||||
NF_FLOW_TABLE_STAT_INC(net, count_wq_add);
|
NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_add);
|
||||||
queue_work(nf_flow_offload_add_wq, &offload->work);
|
queue_work(nf_flow_offload_add_wq, &offload->work);
|
||||||
} else if (offload->cmd == FLOW_CLS_DESTROY) {
|
} else if (offload->cmd == FLOW_CLS_DESTROY) {
|
||||||
NF_FLOW_TABLE_STAT_INC(net, count_wq_del);
|
NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_del);
|
||||||
queue_work(nf_flow_offload_del_wq, &offload->work);
|
queue_work(nf_flow_offload_del_wq, &offload->work);
|
||||||
} else {
|
} else {
|
||||||
NF_FLOW_TABLE_STAT_INC(net, count_wq_stats);
|
NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_stats);
|
||||||
queue_work(nf_flow_offload_stats_wq, &offload->work);
|
queue_work(nf_flow_offload_stats_wq, &offload->work);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1162,6 +1162,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
|
|||||||
struct nft_pipapo_match *m = priv->clone;
|
struct nft_pipapo_match *m = priv->clone;
|
||||||
u8 genmask = nft_genmask_next(net);
|
u8 genmask = nft_genmask_next(net);
|
||||||
struct nft_pipapo_field *f;
|
struct nft_pipapo_field *f;
|
||||||
|
const u8 *start_p, *end_p;
|
||||||
int i, bsize_max, err = 0;
|
int i, bsize_max, err = 0;
|
||||||
|
|
||||||
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
|
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
|
||||||
@@ -1202,9 +1203,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Validate */
|
/* Validate */
|
||||||
|
start_p = start;
|
||||||
|
end_p = end;
|
||||||
nft_pipapo_for_each_field(f, i, m) {
|
nft_pipapo_for_each_field(f, i, m) {
|
||||||
const u8 *start_p = start, *end_p = end;
|
|
||||||
|
|
||||||
if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
|
if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user