ifb.c (e7246e122aaa99ebbb8ad7da80f35a20577bd8af) ifb.c (a5135bcfba7345031df45e02cd150a45add47cf8)
1/* drivers/net/ifb.c:
2
3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
5
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.

--- 64 unchanged lines hidden (view full) ---

73 if (!skb) {
74 if (!__netif_tx_trylock(txq))
75 goto resched;
76 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
77 __netif_tx_unlock(txq);
78 }
79
80 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
1/* drivers/net/ifb.c:
2
3 The purpose of this driver is to provide a device that allows
4 for sharing of resources:
5
6 1) qdiscs/policies that are per device as opposed to system wide.
7 ifb allows for a device which can be redirected to thus providing
8 an impression of sharing.

--- 64 unchanged lines hidden (view full) ---

73 if (!skb) {
74 if (!__netif_tx_trylock(txq))
75 goto resched;
76 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
77 __netif_tx_unlock(txq);
78 }
79
80 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
81 u32 from = G_TC_FROM(skb->tc_verd);
81 u32 from = skb->tc_from;
82
82
83 skb->tc_verd = 0;
83 skb_reset_tc(skb);
84 skb->tc_skip_classify = 1;
85
86 u64_stats_update_begin(&txp->tsync);
87 txp->tx_packets++;
88 txp->tx_bytes += skb->len;
89 u64_stats_update_end(&txp->tsync);
90
91 rcu_read_lock();

--- 142 unchanged lines hidden (view full) ---

234 netif_keep_dst(dev);
235 eth_hw_addr_random(dev);
236 dev->destructor = ifb_dev_free;
237}
238
239static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
240{
241 struct ifb_dev_private *dp = netdev_priv(dev);
84 skb->tc_skip_classify = 1;
85
86 u64_stats_update_begin(&txp->tsync);
87 txp->tx_packets++;
88 txp->tx_bytes += skb->len;
89 u64_stats_update_end(&txp->tsync);
90
91 rcu_read_lock();

--- 142 unchanged lines hidden (view full) ---

234 netif_keep_dst(dev);
235 eth_hw_addr_random(dev);
236 dev->destructor = ifb_dev_free;
237}
238
239static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
240{
241 struct ifb_dev_private *dp = netdev_priv(dev);
242 u32 from = G_TC_FROM(skb->tc_verd);
243 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
244
245 u64_stats_update_begin(&txp->rsync);
246 txp->rx_packets++;
247 txp->rx_bytes += skb->len;
248 u64_stats_update_end(&txp->rsync);
249
242 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
243
244 u64_stats_update_begin(&txp->rsync);
245 txp->rx_packets++;
246 txp->rx_bytes += skb->len;
247 u64_stats_update_end(&txp->rsync);
248
250 if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
249 if (skb->tc_from == AT_STACK || !skb->skb_iif) {
251 dev_kfree_skb(skb);
252 dev->stats.rx_dropped++;
253 return NETDEV_TX_OK;
254 }
255
256 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
257 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
258

--- 102 unchanged lines hidden ---
250 dev_kfree_skb(skb);
251 dev->stats.rx_dropped++;
252 return NETDEV_TX_OK;
253 }
254
255 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
256 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
257

--- 102 unchanged lines hidden ---