1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/skbuff.h> 3 #include <linux/netdevice.h> 4 #include <linux/if_vlan.h> 5 #include <linux/netpoll.h> 6 #include <linux/export.h> 7 #include "vlan.h" 8 9 bool vlan_do_receive(struct sk_buff **skbp) 10 { 11 struct sk_buff *skb = *skbp; 12 __be16 vlan_proto = skb->vlan_proto; 13 u16 vlan_id = skb_vlan_tag_get_id(skb); 14 struct net_device *vlan_dev; 15 struct vlan_pcpu_stats *rx_stats; 16 17 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id); 18 if (!vlan_dev) 19 return false; 20 21 skb = *skbp = skb_share_check(skb, GFP_ATOMIC); 22 if (unlikely(!skb)) 23 return false; 24 25 if (unlikely(!(vlan_dev->flags & IFF_UP))) { 26 kfree_skb(skb); 27 *skbp = NULL; 28 return false; 29 } 30 31 skb->dev = vlan_dev; 32 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { 33 /* Our lower layer thinks this is not local, let's make sure. 34 * This allows the VLAN to have a different MAC than the 35 * underlying device, and still route correctly. */ 36 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) 37 skb->pkt_type = PACKET_HOST; 38 } 39 40 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) && 41 !netif_is_macvlan_port(vlan_dev) && 42 !netif_is_bridge_port(vlan_dev)) { 43 unsigned int offset = skb->data - skb_mac_header(skb); 44 45 /* 46 * vlan_insert_tag expect skb->data pointing to mac header. 47 * So change skb->data before calling it and change back to 48 * original position later 49 */ 50 skb_push(skb, offset); 51 skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto, 52 skb->vlan_tci, skb->mac_len); 53 if (!skb) 54 return false; 55 skb_pull(skb, offset + VLAN_HLEN); 56 skb_reset_mac_len(skb); 57 } 58 59 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 60 __vlan_hwaccel_clear_tag(skb); 61 62 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); 63 64 u64_stats_update_begin(&rx_stats->syncp); 65 rx_stats->rx_packets++; 66 rx_stats->rx_bytes += skb->len; 67 if (skb->pkt_type == PACKET_MULTICAST) 68 rx_stats->rx_multicast++; 69 u64_stats_update_end(&rx_stats->syncp); 70 71 return true; 72 } 73 74 /* Must be invoked with rcu_read_lock. */ 75 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev, 76 __be16 vlan_proto, u16 vlan_id) 77 { 78 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); 79 80 if (vlan_info) { 81 return vlan_group_get_device(&vlan_info->grp, 82 vlan_proto, vlan_id); 83 } else { 84 /* 85 * Lower devices of master uppers (bonding, team) do not have 86 * grp assigned to themselves. Grp is assigned to upper device 87 * instead. 88 */ 89 struct net_device *upper_dev; 90 91 upper_dev = netdev_master_upper_dev_get_rcu(dev); 92 if (upper_dev) 93 return __vlan_find_dev_deep_rcu(upper_dev, 94 vlan_proto, vlan_id); 95 } 96 97 return NULL; 98 } 99 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu); 100 101 struct net_device *vlan_dev_real_dev(const struct net_device *dev) 102 { 103 struct net_device *ret = vlan_dev_priv(dev)->real_dev; 104 105 while (is_vlan_dev(ret)) 106 ret = vlan_dev_priv(ret)->real_dev; 107 108 return ret; 109 } 110 EXPORT_SYMBOL(vlan_dev_real_dev); 111 112 u16 vlan_dev_vlan_id(const struct net_device *dev) 113 { 114 return vlan_dev_priv(dev)->vlan_id; 115 } 116 EXPORT_SYMBOL(vlan_dev_vlan_id); 117 118 __be16 vlan_dev_vlan_proto(const struct net_device *dev) 119 { 120 return vlan_dev_priv(dev)->vlan_proto; 121 } 122 EXPORT_SYMBOL(vlan_dev_vlan_proto); 123 124 /* 125 * vlan info and vid list 126 */ 127 128 static void vlan_group_free(struct vlan_group *grp) 129 { 130 int i, j; 131 132 for (i = 0; i < VLAN_PROTO_NUM; i++) 133 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++) 134 kfree(grp->vlan_devices_arrays[i][j]); 135 } 136 137 static void vlan_info_free(struct vlan_info *vlan_info) 138 { 139 vlan_group_free(&vlan_info->grp); 140 kfree(vlan_info); 141 } 142 143 static void vlan_info_rcu_free(struct rcu_head *rcu) 144 { 145 vlan_info_free(container_of(rcu, struct vlan_info, rcu)); 146 } 147 148 static struct vlan_info *vlan_info_alloc(struct net_device *dev) 149 { 150 struct vlan_info *vlan_info; 151 152 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL); 153 if (!vlan_info) 154 return NULL; 155 156 vlan_info->real_dev = dev; 157 INIT_LIST_HEAD(&vlan_info->vid_list); 158 return vlan_info; 159 } 160 161 struct vlan_vid_info { 162 struct list_head list; 163 __be16 proto; 164 u16 vid; 165 int refcount; 166 }; 167 168 static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto) 169 { 170 if (proto == htons(ETH_P_8021Q) && 171 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 172 return true; 173 if (proto == htons(ETH_P_8021AD) && 174 dev->features & NETIF_F_HW_VLAN_STAG_FILTER) 175 return true; 176 return false; 177 } 178 179 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, 180 __be16 proto, u16 vid) 181 { 182 struct vlan_vid_info *vid_info; 183 184 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 185 if (vid_info->proto == proto && vid_info->vid == vid) 186 return vid_info; 187 } 188 return NULL; 189 } 190 191 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid) 192 { 193 struct vlan_vid_info *vid_info; 194 195 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); 196 if (!vid_info) 197 return NULL; 198 vid_info->proto = proto; 199 vid_info->vid = vid; 200 201 return vid_info; 202 } 203 204 static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid) 205 { 206 if (!vlan_hw_filter_capable(dev, proto)) 207 return 0; 208 209 if (netif_device_present(dev)) 210 return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid); 211 else 212 return -ENODEV; 213 } 214 215 static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid) 216 { 217 if (!vlan_hw_filter_capable(dev, proto)) 218 return 0; 219 220 if (netif_device_present(dev)) 221 return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid); 222 else 223 return -ENODEV; 224 } 225 226 int vlan_for_each(struct net_device *dev, 227 int (*action)(struct net_device *dev, int vid, void *arg), 228 void *arg) 229 { 230 struct vlan_vid_info *vid_info; 231 struct vlan_info *vlan_info; 232 struct net_device *vdev; 233 int ret; 234 235 ASSERT_RTNL(); 236 237 vlan_info = rtnl_dereference(dev->vlan_info); 238 if (!vlan_info) 239 return 0; 240 241 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 242 vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto, 243 vid_info->vid); 244 ret = action(vdev, vid_info->vid, arg); 245 if (ret) 246 return ret; 247 } 248 249 return 0; 250 } 251 EXPORT_SYMBOL(vlan_for_each); 252 253 int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto) 254 { 255 struct net_device *real_dev = vlan_info->real_dev; 256 struct vlan_vid_info *vlan_vid_info; 257 int err; 258 259 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) { 260 if (vlan_vid_info->proto == proto) { 261 err = vlan_add_rx_filter_info(real_dev, proto, 262 vlan_vid_info->vid); 263 if (err) 264 goto unwind; 265 } 266 } 267 268 return 0; 269 270 unwind: 271 list_for_each_entry_continue_reverse(vlan_vid_info, 272 &vlan_info->vid_list, list) { 273 if (vlan_vid_info->proto == proto) 274 vlan_kill_rx_filter_info(real_dev, proto, 275 vlan_vid_info->vid); 276 } 277 278 return err; 279 } 280 EXPORT_SYMBOL(vlan_filter_push_vids); 281 282 void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto) 283 { 284 struct vlan_vid_info *vlan_vid_info; 285 286 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) 287 if (vlan_vid_info->proto == proto) 288 vlan_kill_rx_filter_info(vlan_info->real_dev, 289 vlan_vid_info->proto, 290 vlan_vid_info->vid); 291 } 292 EXPORT_SYMBOL(vlan_filter_drop_vids); 293 294 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid, 295 struct vlan_vid_info **pvid_info) 296 { 297 struct net_device *dev = vlan_info->real_dev; 298 struct vlan_vid_info *vid_info; 299 int err; 300 301 vid_info = vlan_vid_info_alloc(proto, vid); 302 if (!vid_info) 303 return -ENOMEM; 304 305 err = vlan_add_rx_filter_info(dev, proto, vid); 306 if (err) { 307 kfree(vid_info); 308 return err; 309 } 310 311 list_add(&vid_info->list, &vlan_info->vid_list); 312 vlan_info->nr_vids++; 313 *pvid_info = vid_info; 314 return 0; 315 } 316 317 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) 318 { 319 struct vlan_info *vlan_info; 320 struct vlan_vid_info *vid_info; 321 bool vlan_info_created = false; 322 int err; 323 324 ASSERT_RTNL(); 325 326 vlan_info = rtnl_dereference(dev->vlan_info); 327 if (!vlan_info) { 328 vlan_info = vlan_info_alloc(dev); 329 if (!vlan_info) 330 return -ENOMEM; 331 vlan_info_created = true; 332 } 333 vid_info = vlan_vid_info_get(vlan_info, proto, vid); 334 if (!vid_info) { 335 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info); 336 if (err) 337 goto out_free_vlan_info; 338 } 339 vid_info->refcount++; 340 341 if (vlan_info_created) 342 rcu_assign_pointer(dev->vlan_info, vlan_info); 343 344 return 0; 345 346 out_free_vlan_info: 347 if (vlan_info_created) 348 kfree(vlan_info); 349 return err; 350 } 351 EXPORT_SYMBOL(vlan_vid_add); 352 353 static void __vlan_vid_del(struct vlan_info *vlan_info, 354 struct vlan_vid_info *vid_info) 355 { 356 struct net_device *dev = vlan_info->real_dev; 357 __be16 proto = vid_info->proto; 358 u16 vid = vid_info->vid; 359 int err; 360 361 err = vlan_kill_rx_filter_info(dev, proto, vid); 362 if (err && dev->reg_state != NETREG_UNREGISTERING) 363 netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid); 364 365 list_del(&vid_info->list); 366 kfree(vid_info); 367 vlan_info->nr_vids--; 368 } 369 370 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) 371 { 372 struct vlan_info *vlan_info; 373 struct vlan_vid_info *vid_info; 374 375 ASSERT_RTNL(); 376 377 vlan_info = rtnl_dereference(dev->vlan_info); 378 if (!vlan_info) 379 return; 380 381 vid_info = vlan_vid_info_get(vlan_info, proto, vid); 382 if (!vid_info) 383 return; 384 vid_info->refcount--; 385 if (vid_info->refcount == 0) { 386 __vlan_vid_del(vlan_info, vid_info); 387 if (vlan_info->nr_vids == 0) { 388 RCU_INIT_POINTER(dev->vlan_info, NULL); 389 call_rcu(&vlan_info->rcu, vlan_info_rcu_free); 390 } 391 } 392 } 393 EXPORT_SYMBOL(vlan_vid_del); 394 395 int vlan_vids_add_by_dev(struct net_device *dev, 396 const struct net_device *by_dev) 397 { 398 struct vlan_vid_info *vid_info; 399 struct vlan_info *vlan_info; 400 int err; 401 402 ASSERT_RTNL(); 403 404 vlan_info = rtnl_dereference(by_dev->vlan_info); 405 if (!vlan_info) 406 return 0; 407 408 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 409 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid); 410 if (err) 411 goto unwind; 412 } 413 return 0; 414 415 unwind: 416 list_for_each_entry_continue_reverse(vid_info, 417 &vlan_info->vid_list, 418 list) { 419 vlan_vid_del(dev, vid_info->proto, vid_info->vid); 420 } 421 422 return err; 423 } 424 EXPORT_SYMBOL(vlan_vids_add_by_dev); 425 426 void vlan_vids_del_by_dev(struct net_device *dev, 427 const struct net_device *by_dev) 428 { 429 struct vlan_vid_info *vid_info; 430 struct vlan_info *vlan_info; 431 432 ASSERT_RTNL(); 433 434 vlan_info = rtnl_dereference(by_dev->vlan_info); 435 if (!vlan_info) 436 return; 437 438 list_for_each_entry(vid_info, &vlan_info->vid_list, list) 439 vlan_vid_del(dev, vid_info->proto, vid_info->vid); 440 } 441 EXPORT_SYMBOL(vlan_vids_del_by_dev); 442 443 bool vlan_uses_dev(const struct net_device *dev) 444 { 445 struct vlan_info *vlan_info; 446 447 ASSERT_RTNL(); 448 449 vlan_info = rtnl_dereference(dev->vlan_info); 450 if (!vlan_info) 451 return false; 452 return vlan_info->grp.nr_vlan_devs ? true : false; 453 } 454 EXPORT_SYMBOL(vlan_uses_dev); 455 456 static struct sk_buff *vlan_gro_receive(struct list_head *head, 457 struct sk_buff *skb) 458 { 459 const struct packet_offload *ptype; 460 unsigned int hlen, off_vlan; 461 struct sk_buff *pp = NULL; 462 struct vlan_hdr *vhdr; 463 struct sk_buff *p; 464 __be16 type; 465 int flush = 1; 466 467 off_vlan = skb_gro_offset(skb); 468 hlen = off_vlan + sizeof(*vhdr); 469 vhdr = skb_gro_header_fast(skb, off_vlan); 470 if (skb_gro_header_hard(skb, hlen)) { 471 vhdr = skb_gro_header_slow(skb, hlen, off_vlan); 472 if (unlikely(!vhdr)) 473 goto out; 474 } 475 476 type = vhdr->h_vlan_encapsulated_proto; 477 478 rcu_read_lock(); 479 ptype = gro_find_receive_by_type(type); 480 if (!ptype) 481 goto out_unlock; 482 483 flush = 0; 484 485 list_for_each_entry(p, head, list) { 486 struct vlan_hdr *vhdr2; 487 488 if (!NAPI_GRO_CB(p)->same_flow) 489 continue; 490 491 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan); 492 if (compare_vlan_header(vhdr, vhdr2)) 493 NAPI_GRO_CB(p)->same_flow = 0; 494 } 495 496 skb_gro_pull(skb, sizeof(*vhdr)); 497 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); 498 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); 499 500 out_unlock: 501 rcu_read_unlock(); 502 out: 503 skb_gro_flush_final(skb, pp, flush); 504 505 return pp; 506 } 507 508 static int vlan_gro_complete(struct sk_buff *skb, int nhoff) 509 { 510 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff); 511 __be16 type = vhdr->h_vlan_encapsulated_proto; 512 struct packet_offload *ptype; 513 int err = -ENOENT; 514 515 rcu_read_lock(); 516 ptype = gro_find_complete_by_type(type); 517 if (ptype) 518 err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr)); 519 520 rcu_read_unlock(); 521 return err; 522 } 523 524 static struct packet_offload vlan_packet_offloads[] __read_mostly = { 525 { 526 .type = cpu_to_be16(ETH_P_8021Q), 527 .priority = 10, 528 .callbacks = { 529 .gro_receive = vlan_gro_receive, 530 .gro_complete = vlan_gro_complete, 531 }, 532 }, 533 { 534 .type = cpu_to_be16(ETH_P_8021AD), 535 .priority = 10, 536 .callbacks = { 537 .gro_receive = vlan_gro_receive, 538 .gro_complete = vlan_gro_complete, 539 }, 540 }, 541 }; 542 543 static int __init vlan_offload_init(void) 544 { 545 unsigned int i; 546 547 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++) 548 dev_add_offload(&vlan_packet_offloads[i]); 549 550 return 0; 551 } 552 553 fs_initcall(vlan_offload_init); 554