1 #include <linux/skbuff.h> 2 #include <linux/netdevice.h> 3 #include <linux/if_vlan.h> 4 #include <linux/netpoll.h> 5 #include <linux/export.h> 6 #include "vlan.h" 7 8 bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) 9 { 10 struct sk_buff *skb = *skbp; 11 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 12 struct net_device *vlan_dev; 13 struct vlan_pcpu_stats *rx_stats; 14 15 vlan_dev = vlan_find_dev(skb->dev, vlan_id); 16 if (!vlan_dev) { 17 /* Only the last call to vlan_do_receive() should change 18 * pkt_type to PACKET_OTHERHOST 19 */ 20 if (vlan_id && last_handler) 21 skb->pkt_type = PACKET_OTHERHOST; 22 return false; 23 } 24 25 skb = *skbp = skb_share_check(skb, GFP_ATOMIC); 26 if (unlikely(!skb)) 27 return false; 28 29 skb->dev = vlan_dev; 30 if (skb->pkt_type == PACKET_OTHERHOST) { 31 /* Our lower layer thinks this is not local, let's make sure. 32 * This allows the VLAN to have a different MAC than the 33 * underlying device, and still route correctly. */ 34 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 35 vlan_dev->dev_addr)) 36 skb->pkt_type = PACKET_HOST; 37 } 38 39 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { 40 unsigned int offset = skb->data - skb_mac_header(skb); 41 42 /* 43 * vlan_insert_tag expect skb->data pointing to mac header. 44 * So change skb->data before calling it and change back to 45 * original position later 46 */ 47 skb_push(skb, offset); 48 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); 49 if (!skb) 50 return false; 51 skb_pull(skb, offset + VLAN_HLEN); 52 skb_reset_mac_len(skb); 53 } 54 55 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 56 skb->vlan_tci = 0; 57 58 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); 59 60 u64_stats_update_begin(&rx_stats->syncp); 61 rx_stats->rx_packets++; 62 rx_stats->rx_bytes += skb->len; 63 if (skb->pkt_type == PACKET_MULTICAST) 64 rx_stats->rx_multicast++; 65 u64_stats_update_end(&rx_stats->syncp); 66 67 return true; 68 } 69 70 /* Must be invoked with rcu_read_lock or with RTNL. */ 71 struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, 72 u16 vlan_id) 73 { 74 struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info); 75 76 if (vlan_info) { 77 return vlan_group_get_device(&vlan_info->grp, vlan_id); 78 } else { 79 /* 80 * Bonding slaves do not have grp assigned to themselves. 81 * Grp is assigned to bonding master instead. 82 */ 83 if (netif_is_bond_slave(real_dev)) 84 return __vlan_find_dev_deep(real_dev->master, vlan_id); 85 } 86 87 return NULL; 88 } 89 EXPORT_SYMBOL(__vlan_find_dev_deep); 90 91 struct net_device *vlan_dev_real_dev(const struct net_device *dev) 92 { 93 return vlan_dev_priv(dev)->real_dev; 94 } 95 EXPORT_SYMBOL(vlan_dev_real_dev); 96 97 u16 vlan_dev_vlan_id(const struct net_device *dev) 98 { 99 return vlan_dev_priv(dev)->vlan_id; 100 } 101 EXPORT_SYMBOL(vlan_dev_vlan_id); 102 103 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) 104 { 105 if (skb_cow(skb, skb_headroom(skb)) < 0) 106 return NULL; 107 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 108 skb->mac_header += VLAN_HLEN; 109 skb_reset_mac_len(skb); 110 return skb; 111 } 112 113 struct sk_buff *vlan_untag(struct sk_buff *skb) 114 { 115 struct vlan_hdr *vhdr; 116 u16 vlan_tci; 117 118 if (unlikely(vlan_tx_tag_present(skb))) { 119 /* vlan_tci is already set-up so leave this for another time */ 120 return skb; 121 } 122 123 skb = skb_share_check(skb, GFP_ATOMIC); 124 if (unlikely(!skb)) 125 goto err_free; 126 127 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) 128 goto err_free; 129 130 vhdr = (struct vlan_hdr *) skb->data; 131 vlan_tci = ntohs(vhdr->h_vlan_TCI); 132 __vlan_hwaccel_put_tag(skb, vlan_tci); 133 134 skb_pull_rcsum(skb, VLAN_HLEN); 135 vlan_set_encap_proto(skb, vhdr); 136 137 skb = vlan_reorder_header(skb); 138 if (unlikely(!skb)) 139 goto err_free; 140 141 skb_reset_network_header(skb); 142 skb_reset_transport_header(skb); 143 return skb; 144 145 err_free: 146 kfree_skb(skb); 147 return NULL; 148 } 149 150 151 /* 152 * vlan info and vid list 153 */ 154 155 static void vlan_group_free(struct vlan_group *grp) 156 { 157 int i; 158 159 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) 160 kfree(grp->vlan_devices_arrays[i]); 161 } 162 163 static void vlan_info_free(struct vlan_info *vlan_info) 164 { 165 vlan_group_free(&vlan_info->grp); 166 kfree(vlan_info); 167 } 168 169 static void vlan_info_rcu_free(struct rcu_head *rcu) 170 { 171 vlan_info_free(container_of(rcu, struct vlan_info, rcu)); 172 } 173 174 static struct vlan_info *vlan_info_alloc(struct net_device *dev) 175 { 176 struct vlan_info *vlan_info; 177 178 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL); 179 if (!vlan_info) 180 return NULL; 181 182 vlan_info->real_dev = dev; 183 INIT_LIST_HEAD(&vlan_info->vid_list); 184 return vlan_info; 185 } 186 187 struct vlan_vid_info { 188 struct list_head list; 189 unsigned short vid; 190 int refcount; 191 }; 192 193 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info, 194 unsigned short vid) 195 { 196 struct vlan_vid_info *vid_info; 197 198 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 199 if (vid_info->vid == vid) 200 return vid_info; 201 } 202 return NULL; 203 } 204 205 static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid) 206 { 207 struct vlan_vid_info *vid_info; 208 209 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL); 210 if (!vid_info) 211 return NULL; 212 vid_info->vid = vid; 213 214 return vid_info; 215 } 216 217 static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid, 218 struct vlan_vid_info **pvid_info) 219 { 220 struct net_device *dev = vlan_info->real_dev; 221 const struct net_device_ops *ops = dev->netdev_ops; 222 struct vlan_vid_info *vid_info; 223 int err; 224 225 vid_info = vlan_vid_info_alloc(vid); 226 if (!vid_info) 227 return -ENOMEM; 228 229 if ((dev->features & NETIF_F_HW_VLAN_FILTER) && 230 ops->ndo_vlan_rx_add_vid) { 231 err = ops->ndo_vlan_rx_add_vid(dev, vid); 232 if (err) { 233 kfree(vid_info); 234 return err; 235 } 236 } 237 list_add(&vid_info->list, &vlan_info->vid_list); 238 vlan_info->nr_vids++; 239 *pvid_info = vid_info; 240 return 0; 241 } 242 243 int vlan_vid_add(struct net_device *dev, unsigned short vid) 244 { 245 struct vlan_info *vlan_info; 246 struct vlan_vid_info *vid_info; 247 bool vlan_info_created = false; 248 int err; 249 250 ASSERT_RTNL(); 251 252 vlan_info = rtnl_dereference(dev->vlan_info); 253 if (!vlan_info) { 254 vlan_info = vlan_info_alloc(dev); 255 if (!vlan_info) 256 return -ENOMEM; 257 vlan_info_created = true; 258 } 259 vid_info = vlan_vid_info_get(vlan_info, vid); 260 if (!vid_info) { 261 err = __vlan_vid_add(vlan_info, vid, &vid_info); 262 if (err) 263 goto out_free_vlan_info; 264 } 265 vid_info->refcount++; 266 267 if (vlan_info_created) 268 rcu_assign_pointer(dev->vlan_info, vlan_info); 269 270 return 0; 271 272 out_free_vlan_info: 273 if (vlan_info_created) 274 kfree(vlan_info); 275 return err; 276 } 277 EXPORT_SYMBOL(vlan_vid_add); 278 279 static void __vlan_vid_del(struct vlan_info *vlan_info, 280 struct vlan_vid_info *vid_info) 281 { 282 struct net_device *dev = vlan_info->real_dev; 283 const struct net_device_ops *ops = dev->netdev_ops; 284 unsigned short vid = vid_info->vid; 285 int err; 286 287 if ((dev->features & NETIF_F_HW_VLAN_FILTER) && 288 ops->ndo_vlan_rx_kill_vid) { 289 err = ops->ndo_vlan_rx_kill_vid(dev, vid); 290 if (err) { 291 pr_warn("failed to kill vid %d for device %s\n", 292 vid, dev->name); 293 } 294 } 295 list_del(&vid_info->list); 296 kfree(vid_info); 297 vlan_info->nr_vids--; 298 } 299 300 void vlan_vid_del(struct net_device *dev, unsigned short vid) 301 { 302 struct vlan_info *vlan_info; 303 struct vlan_vid_info *vid_info; 304 305 ASSERT_RTNL(); 306 307 vlan_info = rtnl_dereference(dev->vlan_info); 308 if (!vlan_info) 309 return; 310 311 vid_info = vlan_vid_info_get(vlan_info, vid); 312 if (!vid_info) 313 return; 314 vid_info->refcount--; 315 if (vid_info->refcount == 0) { 316 __vlan_vid_del(vlan_info, vid_info); 317 if (vlan_info->nr_vids == 0) { 318 RCU_INIT_POINTER(dev->vlan_info, NULL); 319 call_rcu(&vlan_info->rcu, vlan_info_rcu_free); 320 } 321 } 322 } 323 EXPORT_SYMBOL(vlan_vid_del); 324 325 int vlan_vids_add_by_dev(struct net_device *dev, 326 const struct net_device *by_dev) 327 { 328 struct vlan_vid_info *vid_info; 329 struct vlan_info *vlan_info; 330 int err; 331 332 ASSERT_RTNL(); 333 334 vlan_info = rtnl_dereference(by_dev->vlan_info); 335 if (!vlan_info) 336 return 0; 337 338 list_for_each_entry(vid_info, &vlan_info->vid_list, list) { 339 err = vlan_vid_add(dev, vid_info->vid); 340 if (err) 341 goto unwind; 342 } 343 return 0; 344 345 unwind: 346 list_for_each_entry_continue_reverse(vid_info, 347 &vlan_info->vid_list, 348 list) { 349 vlan_vid_del(dev, vid_info->vid); 350 } 351 352 return err; 353 } 354 EXPORT_SYMBOL(vlan_vids_add_by_dev); 355 356 void vlan_vids_del_by_dev(struct net_device *dev, 357 const struct net_device *by_dev) 358 { 359 struct vlan_vid_info *vid_info; 360 struct vlan_info *vlan_info; 361 362 ASSERT_RTNL(); 363 364 vlan_info = rtnl_dereference(by_dev->vlan_info); 365 if (!vlan_info) 366 return; 367 368 list_for_each_entry(vid_info, &vlan_info->vid_list, list) 369 vlan_vid_del(dev, vid_info->vid); 370 } 371 EXPORT_SYMBOL(vlan_vids_del_by_dev); 372