1 #include <linux/skbuff.h> 2 #include <linux/netdevice.h> 3 #include <linux/if_vlan.h> 4 #include <linux/netpoll.h> 5 #include <linux/export.h> 6 #include "vlan.h" 7 8 bool vlan_do_receive(struct sk_buff **skbp) 9 { 10 struct sk_buff *skb = *skbp; 11 __be16 vlan_proto = skb->vlan_proto; 12 u16 vlan_id = skb_vlan_tag_get_id(skb); 13 struct net_device *vlan_dev; 14 struct vlan_pcpu_stats *rx_stats; 15 16 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id); 17 if (!vlan_dev) 18 return false; 19 20 skb = *skbp = skb_share_check(skb, GFP_ATOMIC); 21 if (unlikely(!skb)) 22 return false; 23 24 if (unlikely(!(vlan_dev->flags & IFF_UP))) { 25 kfree_skb(skb); 26 *skbp = NULL; 27 return false; 28 } 29 30 skb->dev = vlan_dev; 31 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { 32 /* Our lower layer thinks this is not local, let's make sure. 33 * This allows the VLAN to have a different MAC than the 34 * underlying device, and still route correctly. */ 35 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) 36 skb->pkt_type = PACKET_HOST; 37 } 38 39 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) && 40 !netif_is_macvlan_port(vlan_dev) && 41 !netif_is_bridge_port(vlan_dev)) { 42 unsigned int offset = skb->data - skb_mac_header(skb); 43 44 /* 45 * vlan_insert_tag expect skb->data pointing to mac header. 46 * So change skb->data before calling it and change back to 47 * original position later 48 */ 49 skb_push(skb, offset); 50 skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto, 51 skb->vlan_tci); 52 if (!skb) 53 return false; 54 skb_pull(skb, offset + VLAN_HLEN); 55 skb_reset_mac_len(skb); 56 } 57 58 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 59 skb->vlan_tci = 0; 60 61 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); 62 63 u64_stats_update_begin(&rx_stats->syncp); 64 rx_stats->rx_packets++; 65 rx_stats->rx_bytes += skb->len; 66 if (skb->pkt_type == PACKET_MULTICAST) 67 rx_stats->rx_multicast++; 68 u64_stats_update_end(&rx_stats->syncp); 69 70 return true; 71 } 72 73 /* Must be invoked with rcu_read_lock. */ 74 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev, 75 __be16 vlan_proto, u16 vlan_id) 76 { 77 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info); 78 79 if (vlan_info) { 80 return vlan_group_get_device(&vlan_info->grp, 81 vlan_proto, vlan_id); 82 } else { 83 /* 84 * Lower devices of master uppers (bonding, team) do not have 85 * grp assigned to themselves. Grp is assigned to upper device 86 * instead. 87 */ 88 struct net_device *upper_dev; 89 90 upper_dev = netdev_master_upper_dev_get_rcu(dev); 91 if (upper_dev) 92 return __vlan_find_dev_deep_rcu(upper_dev, 93 vlan_proto, vlan_id); 94 } 95 96 return NULL; 97 } 98 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu); 99 100 struct net_device *vlan_dev_real_dev(const struct net_device *dev) 101 { 102 struct net_device *ret = vlan_dev_priv(dev)->real_dev; 103 104 while (is_vlan_dev(ret)) 105 ret = vlan_dev_priv(ret)->real_dev; 106 107 return ret; 108 } 109 EXPORT_SYMBOL(vlan_dev_real_dev); 110 111 u16 vlan_dev_vlan_id(const struct net_device *dev) 112 { 113 return vlan_dev_priv(dev)->vlan_id; 114 } 115 EXPORT_SYMBOL(vlan_dev_vlan_id); 116 117 __be16 vlan_dev_vlan_proto(const struct net_device *dev) 118 { 119 return vlan_dev_priv(dev)->vlan_proto; 120 } 121 EXPORT_SYMBOL(vlan_dev_vlan_proto); 122 123 /* 124 * vlan info and vid list 125 */ 126 127 static void vlan_group_free(struct vlan_group *grp) 128 { 129 int i, j; 130 131 for (i = 0; i < VLAN_PROTO_NUM; i++) 132 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++) 133 kfree(grp->vlan_devices_arrays[i][j]); 134 } 135 136 static void vlan_info_free(struct vlan_info *vlan_info) 137 { 138 vlan_group_free(&vlan_info->grp); 139 kfree(vlan_info); 140 } 141 142 static void vlan_info_rcu_free(struct rcu_head *rcu) 143 { 144 vlan_info_free(container_of(rcu, struct vlan_info, rcu)); 145 } 146 147 static struct vlan_info *vlan_info_alloc(struct net_device *dev) 148 { 149 struct vlan_info *vlan_info; 150 151 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL); 152 if (!vlan_info) 153 return NULL; 154 155 vlan_info->real_dev = dev; 156 INIT_LIST_HEAD(&vlan_info->vid_list); 157 return vlan_info; 158 } 159 160 struct vlan_vid_info { 161 struct list_head list; 162 __be16 proto; 163 u16 vid; 164 int refcount; 165 }; 166 167 static bool vlan_hw_filter_capable(const struct net_device *dev, 168 const struct vlan_vid_info *vid_info) 169 { 170 if (vid_info->proto == htons(ETH_P_8021Q) && 171 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 172 return true; 173 if (vid_info->proto == htons(ETH_P_8021AD) && 174 dev->features & NETIF_F_HW_VLAN_STAG_FILTER) 175 return true; 176 return false; 177 } 178 179 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, 180 __be16 proto, u16 vid) 181 { 182 struct vlan_vid_info *vid_info; 183 184 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 185 if (vid_info->proto == proto && vid_info->vid == vid) 186 return vid_info; 187 } 188 return NULL; 189 } 190 191 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid) 192 { 193 struct vlan_vid_info *vid_info; 194 195 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); 196 if (!vid_info) 197 return NULL; 198 vid_info->proto = proto; 199 vid_info->vid = vid; 200 201 return vid_info; 202 } 203 204 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid, 205 struct vlan_vid_info **pvid_info) 206 { 207 struct net_device *dev = vlan_info->real_dev; 208 const struct net_device_ops *ops = dev->netdev_ops; 209 struct vlan_vid_info *vid_info; 210 int err; 211 212 vid_info = vlan_vid_info_alloc(proto, vid); 213 if (!vid_info) 214 return -ENOMEM; 215 216 if (vlan_hw_filter_capable(dev, vid_info)) { 217 if (netif_device_present(dev)) 218 err = ops->ndo_vlan_rx_add_vid(dev, proto, vid); 219 else 220 err = -ENODEV; 221 if (err) { 222 kfree(vid_info); 223 return err; 224 } 225 } 226 list_add(&vid_info->list, &vlan_info->vid_list); 227 vlan_info->nr_vids++; 228 *pvid_info = vid_info; 229 return 0; 230 } 231 232 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) 233 { 234 struct vlan_info *vlan_info; 235 struct vlan_vid_info *vid_info; 236 bool vlan_info_created = false; 237 int err; 238 239 ASSERT_RTNL(); 240 241 vlan_info = rtnl_dereference(dev->vlan_info); 242 if (!vlan_info) { 243 vlan_info = vlan_info_alloc(dev); 244 if (!vlan_info) 245 return -ENOMEM; 246 vlan_info_created = true; 247 } 248 vid_info = vlan_vid_info_get(vlan_info, proto, vid); 249 if (!vid_info) { 250 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info); 251 if (err) 252 goto out_free_vlan_info; 253 } 254 vid_info->refcount++; 255 256 if (vlan_info_created) 257 rcu_assign_pointer(dev->vlan_info, vlan_info); 258 259 return 0; 260 261 out_free_vlan_info: 262 if (vlan_info_created) 263 kfree(vlan_info); 264 return err; 265 } 266 EXPORT_SYMBOL(vlan_vid_add); 267 268 static void __vlan_vid_del(struct vlan_info *vlan_info, 269 struct vlan_vid_info *vid_info) 270 { 271 struct net_device *dev = vlan_info->real_dev; 272 const struct net_device_ops *ops = dev->netdev_ops; 273 __be16 proto = vid_info->proto; 274 u16 vid = vid_info->vid; 275 int err; 276 277 if (vlan_hw_filter_capable(dev, vid_info)) { 278 if (netif_device_present(dev)) 279 err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid); 280 else 281 err = -ENODEV; 282 if (err) { 283 pr_warn("failed to kill vid %04x/%d for device %s\n", 284 proto, vid, dev->name); 285 } 286 } 287 list_del(&vid_info->list); 288 kfree(vid_info); 289 vlan_info->nr_vids--; 290 } 291 292 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) 293 { 294 struct vlan_info *vlan_info; 295 struct vlan_vid_info *vid_info; 296 297 ASSERT_RTNL(); 298 299 vlan_info = rtnl_dereference(dev->vlan_info); 300 if (!vlan_info) 301 return; 302 303 vid_info = vlan_vid_info_get(vlan_info, proto, vid); 304 if (!vid_info) 305 return; 306 vid_info->refcount--; 307 if (vid_info->refcount == 0) { 308 __vlan_vid_del(vlan_info, vid_info); 309 if (vlan_info->nr_vids == 0) { 310 RCU_INIT_POINTER(dev->vlan_info, NULL); 311 call_rcu(&vlan_info->rcu, vlan_info_rcu_free); 312 } 313 } 314 } 315 EXPORT_SYMBOL(vlan_vid_del); 316 317 int vlan_vids_add_by_dev(struct net_device *dev, 318 const struct net_device *by_dev) 319 { 320 struct vlan_vid_info *vid_info; 321 struct vlan_info *vlan_info; 322 int err; 323 324 ASSERT_RTNL(); 325 326 vlan_info = rtnl_dereference(by_dev->vlan_info); 327 if (!vlan_info) 328 return 0; 329 330 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 331 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid); 332 if (err) 333 goto unwind; 334 } 335 return 0; 336 337 unwind: 338 list_for_each_entry_continue_reverse(vid_info, 339 &vlan_info->vid_list, 340 list) { 341 vlan_vid_del(dev, vid_info->proto, vid_info->vid); 342 } 343 344 return err; 345 } 346 EXPORT_SYMBOL(vlan_vids_add_by_dev); 347 348 void vlan_vids_del_by_dev(struct net_device *dev, 349 const struct net_device *by_dev) 350 { 351 struct vlan_vid_info *vid_info; 352 struct vlan_info *vlan_info; 353 354 ASSERT_RTNL(); 355 356 vlan_info = rtnl_dereference(by_dev->vlan_info); 357 if (!vlan_info) 358 return; 359 360 list_for_each_entry(vid_info, &vlan_info->vid_list, list) 361 vlan_vid_del(dev, vid_info->proto, vid_info->vid); 362 } 363 EXPORT_SYMBOL(vlan_vids_del_by_dev); 364 365 bool vlan_uses_dev(const struct net_device *dev) 366 { 367 struct vlan_info *vlan_info; 368 369 ASSERT_RTNL(); 370 371 vlan_info = rtnl_dereference(dev->vlan_info); 372 if (!vlan_info) 373 return false; 374 return vlan_info->grp.nr_vlan_devs ? true : false; 375 } 376 EXPORT_SYMBOL(vlan_uses_dev); 377