xref: /openbmc/linux/net/8021q/vlan_core.c (revision e0f6d1a5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/skbuff.h>
3 #include <linux/netdevice.h>
4 #include <linux/if_vlan.h>
5 #include <linux/netpoll.h>
6 #include <linux/export.h>
7 #include "vlan.h"
8 
9 bool vlan_do_receive(struct sk_buff **skbp)
10 {
11 	struct sk_buff *skb = *skbp;
12 	__be16 vlan_proto = skb->vlan_proto;
13 	u16 vlan_id = skb_vlan_tag_get_id(skb);
14 	struct net_device *vlan_dev;
15 	struct vlan_pcpu_stats *rx_stats;
16 
17 	vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
18 	if (!vlan_dev)
19 		return false;
20 
21 	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
22 	if (unlikely(!skb))
23 		return false;
24 
25 	if (unlikely(!(vlan_dev->flags & IFF_UP))) {
26 		kfree_skb(skb);
27 		*skbp = NULL;
28 		return false;
29 	}
30 
31 	skb->dev = vlan_dev;
32 	if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
33 		/* Our lower layer thinks this is not local, let's make sure.
34 		 * This allows the VLAN to have a different MAC than the
35 		 * underlying device, and still route correctly. */
36 		if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
37 			skb->pkt_type = PACKET_HOST;
38 	}
39 
40 	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
41 	    !netif_is_macvlan_port(vlan_dev) &&
42 	    !netif_is_bridge_port(vlan_dev)) {
43 		unsigned int offset = skb->data - skb_mac_header(skb);
44 
45 		/*
46 		 * vlan_insert_tag expect skb->data pointing to mac header.
47 		 * So change skb->data before calling it and change back to
48 		 * original position later
49 		 */
50 		skb_push(skb, offset);
51 		skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
52 						    skb->vlan_tci, skb->mac_len);
53 		if (!skb)
54 			return false;
55 		skb_pull(skb, offset + VLAN_HLEN);
56 		skb_reset_mac_len(skb);
57 	}
58 
59 	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
60 	skb->vlan_tci = 0;
61 
62 	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
63 
64 	u64_stats_update_begin(&rx_stats->syncp);
65 	rx_stats->rx_packets++;
66 	rx_stats->rx_bytes += skb->len;
67 	if (skb->pkt_type == PACKET_MULTICAST)
68 		rx_stats->rx_multicast++;
69 	u64_stats_update_end(&rx_stats->syncp);
70 
71 	return true;
72 }
73 
74 /* Must be invoked with rcu_read_lock. */
75 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
76 					__be16 vlan_proto, u16 vlan_id)
77 {
78 	struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
79 
80 	if (vlan_info) {
81 		return vlan_group_get_device(&vlan_info->grp,
82 					     vlan_proto, vlan_id);
83 	} else {
84 		/*
85 		 * Lower devices of master uppers (bonding, team) do not have
86 		 * grp assigned to themselves. Grp is assigned to upper device
87 		 * instead.
88 		 */
89 		struct net_device *upper_dev;
90 
91 		upper_dev = netdev_master_upper_dev_get_rcu(dev);
92 		if (upper_dev)
93 			return __vlan_find_dev_deep_rcu(upper_dev,
94 						    vlan_proto, vlan_id);
95 	}
96 
97 	return NULL;
98 }
99 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
100 
101 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
102 {
103 	struct net_device *ret = vlan_dev_priv(dev)->real_dev;
104 
105 	while (is_vlan_dev(ret))
106 		ret = vlan_dev_priv(ret)->real_dev;
107 
108 	return ret;
109 }
110 EXPORT_SYMBOL(vlan_dev_real_dev);
111 
112 u16 vlan_dev_vlan_id(const struct net_device *dev)
113 {
114 	return vlan_dev_priv(dev)->vlan_id;
115 }
116 EXPORT_SYMBOL(vlan_dev_vlan_id);
117 
118 __be16 vlan_dev_vlan_proto(const struct net_device *dev)
119 {
120 	return vlan_dev_priv(dev)->vlan_proto;
121 }
122 EXPORT_SYMBOL(vlan_dev_vlan_proto);
123 
124 /*
125  * vlan info and vid list
126  */
127 
128 static void vlan_group_free(struct vlan_group *grp)
129 {
130 	int i, j;
131 
132 	for (i = 0; i < VLAN_PROTO_NUM; i++)
133 		for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
134 			kfree(grp->vlan_devices_arrays[i][j]);
135 }
136 
137 static void vlan_info_free(struct vlan_info *vlan_info)
138 {
139 	vlan_group_free(&vlan_info->grp);
140 	kfree(vlan_info);
141 }
142 
143 static void vlan_info_rcu_free(struct rcu_head *rcu)
144 {
145 	vlan_info_free(container_of(rcu, struct vlan_info, rcu));
146 }
147 
148 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
149 {
150 	struct vlan_info *vlan_info;
151 
152 	vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
153 	if (!vlan_info)
154 		return NULL;
155 
156 	vlan_info->real_dev = dev;
157 	INIT_LIST_HEAD(&vlan_info->vid_list);
158 	return vlan_info;
159 }
160 
161 struct vlan_vid_info {
162 	struct list_head list;
163 	__be16 proto;
164 	u16 vid;
165 	int refcount;
166 };
167 
168 static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
169 {
170 	if (proto == htons(ETH_P_8021Q) &&
171 	    dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
172 		return true;
173 	if (proto == htons(ETH_P_8021AD) &&
174 	    dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
175 		return true;
176 	return false;
177 }
178 
179 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
180 					       __be16 proto, u16 vid)
181 {
182 	struct vlan_vid_info *vid_info;
183 
184 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
185 		if (vid_info->proto == proto && vid_info->vid == vid)
186 			return vid_info;
187 	}
188 	return NULL;
189 }
190 
191 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
192 {
193 	struct vlan_vid_info *vid_info;
194 
195 	vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
196 	if (!vid_info)
197 		return NULL;
198 	vid_info->proto = proto;
199 	vid_info->vid = vid;
200 
201 	return vid_info;
202 }
203 
204 static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
205 {
206 	if (!vlan_hw_filter_capable(dev, proto))
207 		return 0;
208 
209 	if (netif_device_present(dev))
210 		return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
211 	else
212 		return -ENODEV;
213 }
214 
215 static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
216 {
217 	if (!vlan_hw_filter_capable(dev, proto))
218 		return 0;
219 
220 	if (netif_device_present(dev))
221 		return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
222 	else
223 		return -ENODEV;
224 }
225 
226 int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
227 {
228 	struct net_device *real_dev = vlan_info->real_dev;
229 	struct vlan_vid_info *vlan_vid_info;
230 	int err;
231 
232 	list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
233 		if (vlan_vid_info->proto == proto) {
234 			err = vlan_add_rx_filter_info(real_dev, proto,
235 						      vlan_vid_info->vid);
236 			if (err)
237 				goto unwind;
238 		}
239 	}
240 
241 	return 0;
242 
243 unwind:
244 	list_for_each_entry_continue_reverse(vlan_vid_info,
245 					     &vlan_info->vid_list, list) {
246 		if (vlan_vid_info->proto == proto)
247 			vlan_kill_rx_filter_info(real_dev, proto,
248 						 vlan_vid_info->vid);
249 	}
250 
251 	return err;
252 }
253 EXPORT_SYMBOL(vlan_filter_push_vids);
254 
255 void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
256 {
257 	struct vlan_vid_info *vlan_vid_info;
258 
259 	list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
260 		if (vlan_vid_info->proto == proto)
261 			vlan_kill_rx_filter_info(vlan_info->real_dev,
262 						 vlan_vid_info->proto,
263 						 vlan_vid_info->vid);
264 }
265 EXPORT_SYMBOL(vlan_filter_drop_vids);
266 
267 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
268 			  struct vlan_vid_info **pvid_info)
269 {
270 	struct net_device *dev = vlan_info->real_dev;
271 	struct vlan_vid_info *vid_info;
272 	int err;
273 
274 	vid_info = vlan_vid_info_alloc(proto, vid);
275 	if (!vid_info)
276 		return -ENOMEM;
277 
278 	err = vlan_add_rx_filter_info(dev, proto, vid);
279 	if (err) {
280 		kfree(vid_info);
281 		return err;
282 	}
283 
284 	list_add(&vid_info->list, &vlan_info->vid_list);
285 	vlan_info->nr_vids++;
286 	*pvid_info = vid_info;
287 	return 0;
288 }
289 
290 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
291 {
292 	struct vlan_info *vlan_info;
293 	struct vlan_vid_info *vid_info;
294 	bool vlan_info_created = false;
295 	int err;
296 
297 	ASSERT_RTNL();
298 
299 	vlan_info = rtnl_dereference(dev->vlan_info);
300 	if (!vlan_info) {
301 		vlan_info = vlan_info_alloc(dev);
302 		if (!vlan_info)
303 			return -ENOMEM;
304 		vlan_info_created = true;
305 	}
306 	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
307 	if (!vid_info) {
308 		err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
309 		if (err)
310 			goto out_free_vlan_info;
311 	}
312 	vid_info->refcount++;
313 
314 	if (vlan_info_created)
315 		rcu_assign_pointer(dev->vlan_info, vlan_info);
316 
317 	return 0;
318 
319 out_free_vlan_info:
320 	if (vlan_info_created)
321 		kfree(vlan_info);
322 	return err;
323 }
324 EXPORT_SYMBOL(vlan_vid_add);
325 
326 static void __vlan_vid_del(struct vlan_info *vlan_info,
327 			   struct vlan_vid_info *vid_info)
328 {
329 	struct net_device *dev = vlan_info->real_dev;
330 	__be16 proto = vid_info->proto;
331 	u16 vid = vid_info->vid;
332 	int err;
333 
334 	err = vlan_kill_rx_filter_info(dev, proto, vid);
335 	if (err)
336 		pr_warn("failed to kill vid %04x/%d for device %s\n",
337 			proto, vid, dev->name);
338 
339 	list_del(&vid_info->list);
340 	kfree(vid_info);
341 	vlan_info->nr_vids--;
342 }
343 
344 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
345 {
346 	struct vlan_info *vlan_info;
347 	struct vlan_vid_info *vid_info;
348 
349 	ASSERT_RTNL();
350 
351 	vlan_info = rtnl_dereference(dev->vlan_info);
352 	if (!vlan_info)
353 		return;
354 
355 	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
356 	if (!vid_info)
357 		return;
358 	vid_info->refcount--;
359 	if (vid_info->refcount == 0) {
360 		__vlan_vid_del(vlan_info, vid_info);
361 		if (vlan_info->nr_vids == 0) {
362 			RCU_INIT_POINTER(dev->vlan_info, NULL);
363 			call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
364 		}
365 	}
366 }
367 EXPORT_SYMBOL(vlan_vid_del);
368 
369 int vlan_vids_add_by_dev(struct net_device *dev,
370 			 const struct net_device *by_dev)
371 {
372 	struct vlan_vid_info *vid_info;
373 	struct vlan_info *vlan_info;
374 	int err;
375 
376 	ASSERT_RTNL();
377 
378 	vlan_info = rtnl_dereference(by_dev->vlan_info);
379 	if (!vlan_info)
380 		return 0;
381 
382 	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
383 		err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
384 		if (err)
385 			goto unwind;
386 	}
387 	return 0;
388 
389 unwind:
390 	list_for_each_entry_continue_reverse(vid_info,
391 					     &vlan_info->vid_list,
392 					     list) {
393 		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
394 	}
395 
396 	return err;
397 }
398 EXPORT_SYMBOL(vlan_vids_add_by_dev);
399 
400 void vlan_vids_del_by_dev(struct net_device *dev,
401 			  const struct net_device *by_dev)
402 {
403 	struct vlan_vid_info *vid_info;
404 	struct vlan_info *vlan_info;
405 
406 	ASSERT_RTNL();
407 
408 	vlan_info = rtnl_dereference(by_dev->vlan_info);
409 	if (!vlan_info)
410 		return;
411 
412 	list_for_each_entry(vid_info, &vlan_info->vid_list, list)
413 		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
414 }
415 EXPORT_SYMBOL(vlan_vids_del_by_dev);
416 
417 bool vlan_uses_dev(const struct net_device *dev)
418 {
419 	struct vlan_info *vlan_info;
420 
421 	ASSERT_RTNL();
422 
423 	vlan_info = rtnl_dereference(dev->vlan_info);
424 	if (!vlan_info)
425 		return false;
426 	return vlan_info->grp.nr_vlan_devs ? true : false;
427 }
428 EXPORT_SYMBOL(vlan_uses_dev);
429