1 #include <linux/skbuff.h> 2 #include <linux/netdevice.h> 3 #include <linux/if_vlan.h> 4 #include <linux/netpoll.h> 5 #include "vlan.h" 6 7 /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */ 8 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 9 u16 vlan_tci, int polling) 10 { 11 if (netpoll_rx(skb)) 12 return NET_RX_DROP; 13 14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 15 skb->deliver_no_wcard = 1; 16 17 skb->skb_iif = skb->dev->ifindex; 18 __vlan_hwaccel_put_tag(skb, vlan_tci); 19 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 20 21 if (!skb->dev) 22 goto drop; 23 24 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 25 26 drop: 27 dev_kfree_skb_any(skb); 28 return NET_RX_DROP; 29 } 30 EXPORT_SYMBOL(__vlan_hwaccel_rx); 31 32 int vlan_hwaccel_do_receive(struct sk_buff *skb) 33 { 34 struct net_device *dev = skb->dev; 35 struct vlan_rx_stats *rx_stats; 36 37 skb->dev = vlan_dev_info(dev)->real_dev; 38 netif_nit_deliver(skb); 39 40 skb->dev = dev; 41 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 42 skb->vlan_tci = 0; 43 44 rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, 45 smp_processor_id()); 46 47 rx_stats->rx_packets++; 48 rx_stats->rx_bytes += skb->len; 49 50 switch (skb->pkt_type) { 51 case PACKET_BROADCAST: 52 break; 53 case PACKET_MULTICAST: 54 rx_stats->multicast++; 55 break; 56 case PACKET_OTHERHOST: 57 /* Our lower layer thinks this is not local, let's make sure. 58 * This allows the VLAN to have a different MAC than the 59 * underlying device, and still route correctly. */ 60 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 61 dev->dev_addr)) 62 skb->pkt_type = PACKET_HOST; 63 break; 64 } 65 return 0; 66 } 67 68 struct net_device *vlan_dev_real_dev(const struct net_device *dev) 69 { 70 return vlan_dev_info(dev)->real_dev; 71 } 72 EXPORT_SYMBOL(vlan_dev_real_dev); 73 74 u16 vlan_dev_vlan_id(const struct net_device *dev) 75 { 76 return vlan_dev_info(dev)->vlan_id; 77 } 78 EXPORT_SYMBOL(vlan_dev_vlan_id); 79 80 static gro_result_t 81 vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp, 82 unsigned int vlan_tci, struct sk_buff *skb) 83 { 84 struct sk_buff *p; 85 86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 87 skb->deliver_no_wcard = 1; 88 89 skb->skb_iif = skb->dev->ifindex; 90 __vlan_hwaccel_put_tag(skb, vlan_tci); 91 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 92 93 if (!skb->dev) 94 goto drop; 95 96 for (p = napi->gro_list; p; p = p->next) { 97 NAPI_GRO_CB(p)->same_flow = 98 p->dev == skb->dev && !compare_ether_header( 99 skb_mac_header(p), skb_gro_mac_header(skb)); 100 NAPI_GRO_CB(p)->flush = 0; 101 } 102 103 return dev_gro_receive(napi, skb); 104 105 drop: 106 return GRO_DROP; 107 } 108 109 gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, 110 unsigned int vlan_tci, struct sk_buff *skb) 111 { 112 if (netpoll_rx_on(skb)) 113 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) 114 ? GRO_DROP : GRO_NORMAL; 115 116 skb_gro_reset_offset(skb); 117 118 return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb); 119 } 120 EXPORT_SYMBOL(vlan_gro_receive); 121 122 gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, 123 unsigned int vlan_tci) 124 { 125 struct sk_buff *skb = napi_frags_skb(napi); 126 127 if (!skb) 128 return GRO_DROP; 129 130 if (netpoll_rx_on(skb)) { 131 skb->protocol = eth_type_trans(skb, skb->dev); 132 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci) 133 ? GRO_DROP : GRO_NORMAL; 134 } 135 136 return napi_frags_finish(napi, skb, 137 vlan_gro_common(napi, grp, vlan_tci, skb)); 138 } 139 EXPORT_SYMBOL(vlan_gro_frags); 140