1 #include <linux/skbuff.h> 2 #include <linux/netdevice.h> 3 #include <linux/if_vlan.h> 4 #include <linux/netpoll.h> 5 #include <linux/export.h> 6 #include "vlan.h" 7 8 bool vlan_do_receive(struct sk_buff **skbp) 9 { 10 struct sk_buff *skb = *skbp; 11 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 12 struct net_device *vlan_dev; 13 struct vlan_pcpu_stats *rx_stats; 14 15 vlan_dev = vlan_find_dev(skb->dev, vlan_id); 16 if (!vlan_dev) 17 return false; 18 19 skb = *skbp = skb_share_check(skb, GFP_ATOMIC); 20 if (unlikely(!skb)) 21 return false; 22 23 skb->dev = vlan_dev; 24 if (skb->pkt_type == PACKET_OTHERHOST) { 25 /* Our lower layer thinks this is not local, let's make sure. 26 * This allows the VLAN to have a different MAC than the 27 * underlying device, and still route correctly. */ 28 if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) 29 skb->pkt_type = PACKET_HOST; 30 } 31 32 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { 33 unsigned int offset = skb->data - skb_mac_header(skb); 34 35 /* 36 * vlan_insert_tag expect skb->data pointing to mac header. 37 * So change skb->data before calling it and change back to 38 * original position later 39 */ 40 skb_push(skb, offset); 41 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); 42 if (!skb) 43 return false; 44 skb_pull(skb, offset + VLAN_HLEN); 45 skb_reset_mac_len(skb); 46 } 47 48 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 49 skb->vlan_tci = 0; 50 51 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); 52 53 u64_stats_update_begin(&rx_stats->syncp); 54 rx_stats->rx_packets++; 55 rx_stats->rx_bytes += skb->len; 56 if (skb->pkt_type == PACKET_MULTICAST) 57 rx_stats->rx_multicast++; 58 u64_stats_update_end(&rx_stats->syncp); 59 60 return true; 61 } 62 63 /* Must be invoked with rcu_read_lock or with RTNL. */ 64 struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, 65 u16 vlan_id) 66 { 67 struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); 68 69 if (vlan_info) { 70 return vlan_group_get_device(&vlan_info->grp, vlan_id); 71 } else { 72 /* 73 * Bonding slaves do not have grp assigned to themselves. 74 * Grp is assigned to bonding master instead. 75 */ 76 if (netif_is_bond_slave(real_dev)) 77 return __vlan_find_dev_deep(real_dev->master, vlan_id); 78 } 79 80 return NULL; 81 } 82 EXPORT_SYMBOL(__vlan_find_dev_deep); 83 84 struct net_device *vlan_dev_real_dev(const struct net_device *dev) 85 { 86 return vlan_dev_priv(dev)->real_dev; 87 } 88 EXPORT_SYMBOL(vlan_dev_real_dev); 89 90 u16 vlan_dev_vlan_id(const struct net_device *dev) 91 { 92 return vlan_dev_priv(dev)->vlan_id; 93 } 94 EXPORT_SYMBOL(vlan_dev_vlan_id); 95 96 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) 97 { 98 if (skb_cow(skb, skb_headroom(skb)) < 0) 99 return NULL; 100 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 101 skb->mac_header += VLAN_HLEN; 102 return skb; 103 } 104 105 struct sk_buff *vlan_untag(struct sk_buff *skb) 106 { 107 struct vlan_hdr *vhdr; 108 u16 vlan_tci; 109 110 if (unlikely(vlan_tx_tag_present(skb))) { 111 /* vlan_tci is already set-up so leave this for another time */ 112 return skb; 113 } 114 115 skb = skb_share_check(skb, GFP_ATOMIC); 116 if (unlikely(!skb)) 117 goto err_free; 118 119 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) 120 goto err_free; 121 122 vhdr = (struct vlan_hdr *) skb->data; 123 vlan_tci = ntohs(vhdr->h_vlan_TCI); 124 __vlan_hwaccel_put_tag(skb, vlan_tci); 125 126 skb_pull_rcsum(skb, VLAN_HLEN); 127 vlan_set_encap_proto(skb, vhdr); 128 129 skb = vlan_reorder_header(skb); 130 if (unlikely(!skb)) 131 goto err_free; 132 133 skb_reset_network_header(skb); 134 skb_reset_transport_header(skb); 135 skb_reset_mac_len(skb); 136 137 return skb; 138 139 err_free: 140 kfree_skb(skb); 141 return NULL; 142 } 143 144 145 /* 146 * vlan info and vid list 147 */ 148 149 static void vlan_group_free(struct vlan_group *grp) 150 { 151 int i; 152 153 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) 154 kfree(grp->vlan_devices_arrays[i]); 155 } 156 157 static void vlan_info_free(struct vlan_info *vlan_info) 158 { 159 vlan_group_free(&vlan_info->grp); 160 kfree(vlan_info); 161 } 162 163 static void vlan_info_rcu_free(struct rcu_head *rcu) 164 { 165 vlan_info_free(container_of(rcu, struct vlan_info, rcu)); 166 } 167 168 static struct vlan_info *vlan_info_alloc(struct net_device *dev) 169 { 170 struct vlan_info *vlan_info; 171 172 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL); 173 if (!vlan_info) 174 return NULL; 175 176 vlan_info->real_dev = dev; 177 INIT_LIST_HEAD(&vlan_info->vid_list); 178 return vlan_info; 179 } 180 181 struct vlan_vid_info { 182 struct list_head list; 183 unsigned short vid; 184 int refcount; 185 }; 186 187 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, 188 unsigned short vid) 189 { 190 struct vlan_vid_info *vid_info; 191 192 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 193 if (vid_info->vid == vid) 194 return vid_info; 195 } 196 return NULL; 197 } 198 199 static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid) 200 { 201 struct vlan_vid_info *vid_info; 202 203 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); 204 if (!vid_info) 205 return NULL; 206 vid_info->vid = vid; 207 208 return vid_info; 209 } 210 211 static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, 212 struct vlan_vid_info **pvid_info) 213 { 214 struct net_device *dev = vlan_info->real_dev; 215 const struct net_device_ops *ops = dev->netdev_ops; 216 struct vlan_vid_info *vid_info; 217 int err; 218 219 vid_info = vlan_vid_info_alloc(vid); 220 if (!vid_info) 221 return -ENOMEM; 222 223 if ((dev->features & NETIF_F_HW_VLAN_FILTER) && 224 ops->ndo_vlan_rx_add_vid) { 225 err = ops->ndo_vlan_rx_add_vid(dev, vid); 226 if (err) { 227 kfree(vid_info); 228 return err; 229 } 230 } 231 list_add(&vid_info->list, &vlan_info->vid_list); 232 vlan_info->nr_vids++; 233 *pvid_info = vid_info; 234 return 0; 235 } 236 237 int vlan_vid_add(struct net_device *dev, unsigned short vid) 238 { 239 struct vlan_info *vlan_info; 240 struct vlan_vid_info *vid_info; 241 bool vlan_info_created = false; 242 int err; 243 244 ASSERT_RTNL(); 245 246 vlan_info = rtnl_dereference(dev->vlan_info); 247 if (!vlan_info) { 248 vlan_info = vlan_info_alloc(dev); 249 if (!vlan_info) 250 return -ENOMEM; 251 vlan_info_created = true; 252 } 253 vid_info = vlan_vid_info_get(vlan_info, vid); 254 if (!vid_info) { 255 err = __vlan_vid_add(vlan_info, vid, &vid_info); 256 if (err) 257 goto out_free_vlan_info; 258 } 259 vid_info->refcount++; 260 261 if (vlan_info_created) 262 rcu_assign_pointer(dev->vlan_info, vlan_info); 263 264 return 0; 265 266 out_free_vlan_info: 267 if (vlan_info_created) 268 kfree(vlan_info); 269 return err; 270 } 271 EXPORT_SYMBOL(vlan_vid_add); 272 273 static void __vlan_vid_del(struct vlan_info *vlan_info, 274 struct vlan_vid_info *vid_info) 275 { 276 struct net_device *dev = vlan_info->real_dev; 277 const struct net_device_ops *ops = dev->netdev_ops; 278 unsigned short vid = vid_info->vid; 279 int err; 280 281 if ((dev->features & NETIF_F_HW_VLAN_FILTER) && 282 ops->ndo_vlan_rx_kill_vid) { 283 err = ops->ndo_vlan_rx_kill_vid(dev, vid); 284 if (err) { 285 pr_warn("failed to kill vid %d for device %s\n", 286 vid, dev->name); 287 } 288 } 289 list_del(&vid_info->list); 290 kfree(vid_info); 291 vlan_info->nr_vids--; 292 } 293 294 void vlan_vid_del(struct net_device *dev, unsigned short vid) 295 { 296 struct vlan_info *vlan_info; 297 struct vlan_vid_info *vid_info; 298 299 ASSERT_RTNL(); 300 301 vlan_info = rtnl_dereference(dev->vlan_info); 302 if (!vlan_info) 303 return; 304 305 vid_info = vlan_vid_info_get(vlan_info, vid); 306 if (!vid_info) 307 return; 308 vid_info->refcount--; 309 if (vid_info->refcount == 0) { 310 __vlan_vid_del(vlan_info, vid_info); 311 if (vlan_info->nr_vids == 0) { 312 RCU_INIT_POINTER(dev->vlan_info, NULL); 313 call_rcu(&vlan_info->rcu, vlan_info_rcu_free); 314 } 315 } 316 } 317 EXPORT_SYMBOL(vlan_vid_del); 318 319 int vlan_vids_add_by_dev(struct net_device *dev, 320 const struct net_device *by_dev) 321 { 322 struct vlan_vid_info *vid_info; 323 struct vlan_info *vlan_info; 324 int err; 325 326 ASSERT_RTNL(); 327 328 vlan_info = rtnl_dereference(by_dev->vlan_info); 329 if (!vlan_info) 330 return 0; 331 332 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 333 err = vlan_vid_add(dev, vid_info->vid); 334 if (err) 335 goto unwind; 336 } 337 return 0; 338 339 unwind: 340 list_for_each_entry_continue_reverse(vid_info, 341 &vlan_info->vid_list, 342 list) { 343 vlan_vid_del(dev, vid_info->vid); 344 } 345 346 return err; 347 } 348 EXPORT_SYMBOL(vlan_vids_add_by_dev); 349 350 void vlan_vids_del_by_dev(struct net_device *dev, 351 const struct net_device *by_dev) 352 { 353 struct vlan_vid_info *vid_info; 354 struct vlan_info *vlan_info; 355 356 ASSERT_RTNL(); 357 358 vlan_info = rtnl_dereference(by_dev->vlan_info); 359 if (!vlan_info) 360 return; 361 362 list_for_each_entry(vid_info, &vlan_info->vid_list, list) 363 vlan_vid_del(dev, vid_info->vid); 364 } 365 EXPORT_SYMBOL(vlan_vids_del_by_dev); 366 367 bool vlan_uses_dev(const struct net_device *dev) 368 { 369 struct vlan_info *vlan_info; 370 371 ASSERT_RTNL(); 372 373 vlan_info = rtnl_dereference(dev->vlan_info); 374 if (!vlan_info) 375 return false; 376 return vlan_info->grp.nr_vlan_devs ? true : false; 377 } 378 EXPORT_SYMBOL(vlan_uses_dev); 379