xref: /openbmc/linux/net/sched/act_skbmod.c (revision 2025cf9e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/act_skbmod.c  skb data modifier
4  *
5  * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com>
6 */
7 
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/skbuff.h>
12 #include <linux/rtnetlink.h>
13 #include <net/netlink.h>
14 #include <net/pkt_sched.h>
15 #include <net/pkt_cls.h>
16 
17 #include <linux/tc_act/tc_skbmod.h>
18 #include <net/tc_act/tc_skbmod.h>
19 
20 static unsigned int skbmod_net_id;
21 static struct tc_action_ops act_skbmod_ops;
22 
23 #define MAX_EDIT_LEN ETH_HLEN
24 static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
25 			  struct tcf_result *res)
26 {
27 	struct tcf_skbmod *d = to_skbmod(a);
28 	int action;
29 	struct tcf_skbmod_params *p;
30 	u64 flags;
31 	int err;
32 
33 	tcf_lastuse_update(&d->tcf_tm);
34 	bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
35 
36 	/* XXX: if you are going to edit more fields beyond ethernet header
37 	 * (example when you add IP header replacement or vlan swap)
38 	 * then MAX_EDIT_LEN needs to change appropriately
39 	*/
40 	err = skb_ensure_writable(skb, MAX_EDIT_LEN);
41 	if (unlikely(err)) /* best policy is to drop on the floor */
42 		goto drop;
43 
44 	action = READ_ONCE(d->tcf_action);
45 	if (unlikely(action == TC_ACT_SHOT))
46 		goto drop;
47 
48 	p = rcu_dereference_bh(d->skbmod_p);
49 	flags = p->flags;
50 	if (flags & SKBMOD_F_DMAC)
51 		ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
52 	if (flags & SKBMOD_F_SMAC)
53 		ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
54 	if (flags & SKBMOD_F_ETYPE)
55 		eth_hdr(skb)->h_proto = p->eth_type;
56 
57 	if (flags & SKBMOD_F_SWAPMAC) {
58 		u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */
59 		/*XXX: I am sure we can come up with more efficient swapping*/
60 		ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest);
61 		ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source);
62 		ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr);
63 	}
64 
65 	return action;
66 
67 drop:
68 	qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
69 	return TC_ACT_SHOT;
70 }
71 
72 static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
73 	[TCA_SKBMOD_PARMS]		= { .len = sizeof(struct tc_skbmod) },
74 	[TCA_SKBMOD_DMAC]		= { .len = ETH_ALEN },
75 	[TCA_SKBMOD_SMAC]		= { .len = ETH_ALEN },
76 	[TCA_SKBMOD_ETYPE]		= { .type = NLA_U16 },
77 };
78 
79 static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
80 			   struct nlattr *est, struct tc_action **a,
81 			   int ovr, int bind, bool rtnl_held,
82 			   struct tcf_proto *tp,
83 			   struct netlink_ext_ack *extack)
84 {
85 	struct tc_action_net *tn = net_generic(net, skbmod_net_id);
86 	struct nlattr *tb[TCA_SKBMOD_MAX + 1];
87 	struct tcf_skbmod_params *p, *p_old;
88 	struct tcf_chain *goto_ch = NULL;
89 	struct tc_skbmod *parm;
90 	struct tcf_skbmod *d;
91 	bool exists = false;
92 	u8 *daddr = NULL;
93 	u8 *saddr = NULL;
94 	u16 eth_type = 0;
95 	u32 lflags = 0;
96 	int ret = 0, err;
97 
98 	if (!nla)
99 		return -EINVAL;
100 
101 	err = nla_parse_nested_deprecated(tb, TCA_SKBMOD_MAX, nla,
102 					  skbmod_policy, NULL);
103 	if (err < 0)
104 		return err;
105 
106 	if (!tb[TCA_SKBMOD_PARMS])
107 		return -EINVAL;
108 
109 	if (tb[TCA_SKBMOD_DMAC]) {
110 		daddr = nla_data(tb[TCA_SKBMOD_DMAC]);
111 		lflags |= SKBMOD_F_DMAC;
112 	}
113 
114 	if (tb[TCA_SKBMOD_SMAC]) {
115 		saddr = nla_data(tb[TCA_SKBMOD_SMAC]);
116 		lflags |= SKBMOD_F_SMAC;
117 	}
118 
119 	if (tb[TCA_SKBMOD_ETYPE]) {
120 		eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]);
121 		lflags |= SKBMOD_F_ETYPE;
122 	}
123 
124 	parm = nla_data(tb[TCA_SKBMOD_PARMS]);
125 	if (parm->flags & SKBMOD_F_SWAPMAC)
126 		lflags = SKBMOD_F_SWAPMAC;
127 
128 	err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
129 	if (err < 0)
130 		return err;
131 	exists = err;
132 	if (exists && bind)
133 		return 0;
134 
135 	if (!lflags) {
136 		if (exists)
137 			tcf_idr_release(*a, bind);
138 		else
139 			tcf_idr_cleanup(tn, parm->index);
140 		return -EINVAL;
141 	}
142 
143 	if (!exists) {
144 		ret = tcf_idr_create(tn, parm->index, est, a,
145 				     &act_skbmod_ops, bind, true);
146 		if (ret) {
147 			tcf_idr_cleanup(tn, parm->index);
148 			return ret;
149 		}
150 
151 		ret = ACT_P_CREATED;
152 	} else if (!ovr) {
153 		tcf_idr_release(*a, bind);
154 		return -EEXIST;
155 	}
156 	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
157 	if (err < 0)
158 		goto release_idr;
159 
160 	d = to_skbmod(*a);
161 
162 	p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
163 	if (unlikely(!p)) {
164 		err = -ENOMEM;
165 		goto put_chain;
166 	}
167 
168 	p->flags = lflags;
169 
170 	if (ovr)
171 		spin_lock_bh(&d->tcf_lock);
172 	/* Protected by tcf_lock if overwriting existing action. */
173 	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
174 	p_old = rcu_dereference_protected(d->skbmod_p, 1);
175 
176 	if (lflags & SKBMOD_F_DMAC)
177 		ether_addr_copy(p->eth_dst, daddr);
178 	if (lflags & SKBMOD_F_SMAC)
179 		ether_addr_copy(p->eth_src, saddr);
180 	if (lflags & SKBMOD_F_ETYPE)
181 		p->eth_type = htons(eth_type);
182 
183 	rcu_assign_pointer(d->skbmod_p, p);
184 	if (ovr)
185 		spin_unlock_bh(&d->tcf_lock);
186 
187 	if (p_old)
188 		kfree_rcu(p_old, rcu);
189 	if (goto_ch)
190 		tcf_chain_put_by_act(goto_ch);
191 
192 	if (ret == ACT_P_CREATED)
193 		tcf_idr_insert(tn, *a);
194 	return ret;
195 put_chain:
196 	if (goto_ch)
197 		tcf_chain_put_by_act(goto_ch);
198 release_idr:
199 	tcf_idr_release(*a, bind);
200 	return err;
201 }
202 
203 static void tcf_skbmod_cleanup(struct tc_action *a)
204 {
205 	struct tcf_skbmod *d = to_skbmod(a);
206 	struct tcf_skbmod_params  *p;
207 
208 	p = rcu_dereference_protected(d->skbmod_p, 1);
209 	if (p)
210 		kfree_rcu(p, rcu);
211 }
212 
213 static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
214 			   int bind, int ref)
215 {
216 	struct tcf_skbmod *d = to_skbmod(a);
217 	unsigned char *b = skb_tail_pointer(skb);
218 	struct tcf_skbmod_params  *p;
219 	struct tc_skbmod opt = {
220 		.index   = d->tcf_index,
221 		.refcnt  = refcount_read(&d->tcf_refcnt) - ref,
222 		.bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
223 	};
224 	struct tcf_t t;
225 
226 	spin_lock_bh(&d->tcf_lock);
227 	opt.action = d->tcf_action;
228 	p = rcu_dereference_protected(d->skbmod_p,
229 				      lockdep_is_held(&d->tcf_lock));
230 	opt.flags  = p->flags;
231 	if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
232 		goto nla_put_failure;
233 	if ((p->flags & SKBMOD_F_DMAC) &&
234 	    nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst))
235 		goto nla_put_failure;
236 	if ((p->flags & SKBMOD_F_SMAC) &&
237 	    nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src))
238 		goto nla_put_failure;
239 	if ((p->flags & SKBMOD_F_ETYPE) &&
240 	    nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type)))
241 		goto nla_put_failure;
242 
243 	tcf_tm_dump(&t, &d->tcf_tm);
244 	if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
245 		goto nla_put_failure;
246 
247 	spin_unlock_bh(&d->tcf_lock);
248 	return skb->len;
249 nla_put_failure:
250 	spin_unlock_bh(&d->tcf_lock);
251 	nlmsg_trim(skb, b);
252 	return -1;
253 }
254 
255 static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
256 			     struct netlink_callback *cb, int type,
257 			     const struct tc_action_ops *ops,
258 			     struct netlink_ext_ack *extack)
259 {
260 	struct tc_action_net *tn = net_generic(net, skbmod_net_id);
261 
262 	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
263 }
264 
265 static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
266 {
267 	struct tc_action_net *tn = net_generic(net, skbmod_net_id);
268 
269 	return tcf_idr_search(tn, a, index);
270 }
271 
272 static struct tc_action_ops act_skbmod_ops = {
273 	.kind		=	"skbmod",
274 	.id		=	TCA_ACT_SKBMOD,
275 	.owner		=	THIS_MODULE,
276 	.act		=	tcf_skbmod_act,
277 	.dump		=	tcf_skbmod_dump,
278 	.init		=	tcf_skbmod_init,
279 	.cleanup	=	tcf_skbmod_cleanup,
280 	.walk		=	tcf_skbmod_walker,
281 	.lookup		=	tcf_skbmod_search,
282 	.size		=	sizeof(struct tcf_skbmod),
283 };
284 
285 static __net_init int skbmod_init_net(struct net *net)
286 {
287 	struct tc_action_net *tn = net_generic(net, skbmod_net_id);
288 
289 	return tc_action_net_init(tn, &act_skbmod_ops);
290 }
291 
292 static void __net_exit skbmod_exit_net(struct list_head *net_list)
293 {
294 	tc_action_net_exit(net_list, skbmod_net_id);
295 }
296 
297 static struct pernet_operations skbmod_net_ops = {
298 	.init = skbmod_init_net,
299 	.exit_batch = skbmod_exit_net,
300 	.id   = &skbmod_net_id,
301 	.size = sizeof(struct tc_action_net),
302 };
303 
304 MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>");
305 MODULE_DESCRIPTION("SKB data mod-ing");
306 MODULE_LICENSE("GPL");
307 
308 static int __init skbmod_init_module(void)
309 {
310 	return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops);
311 }
312 
313 static void __exit skbmod_cleanup_module(void)
314 {
315 	tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops);
316 }
317 
318 module_init(skbmod_init_module);
319 module_exit(skbmod_cleanup_module);
320