gianfar.c (a4feee89ce4590c7a4aead49ca5a4853dc6ea5dc) gianfar.c (d55398ba81139bc826a8c2417a01280e99f08cf3)
1/* drivers/net/ethernet/freescale/gianfar.c
2 *
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming

--- 151 unchanged lines hidden (view full) ---

160 u32 lstatus;
161
162 bdp->bufPtr = buf;
163
164 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
165 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
166 lstatus |= BD_LFLAG(RXBD_WRAP);
167
1/* drivers/net/ethernet/freescale/gianfar.c
2 *
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming

--- 151 unchanged lines hidden (view full) ---

160 u32 lstatus;
161
162 bdp->bufPtr = buf;
163
164 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
165 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
166 lstatus |= BD_LFLAG(RXBD_WRAP);
167
168 eieio();
168 gfar_wmb();
169
170 bdp->lstatus = lstatus;
171}
172
173static int gfar_init_bds(struct net_device *ndev)
174{
175 struct gfar_private *priv = netdev_priv(ndev);
176 struct gfar_priv_tx_q *tx_queue = NULL;

--- 2189 unchanged lines hidden (view full) ---

2366 *
2367 * The lock also protects us from gfar_error(), which can modify
2368 * regs->tstat and thus retrigger the transfers, which is why we
2369 * also must grab the lock before setting ready bit for the first
2370 * to be transmitted BD.
2371 */
2372 spin_lock_irqsave(&tx_queue->txlock, flags);
2373
169
170 bdp->lstatus = lstatus;
171}
172
173static int gfar_init_bds(struct net_device *ndev)
174{
175 struct gfar_private *priv = netdev_priv(ndev);
176 struct gfar_priv_tx_q *tx_queue = NULL;

--- 2189 unchanged lines hidden (view full) ---

2366 *
2367 * The lock also protects us from gfar_error(), which can modify
2368 * regs->tstat and thus retrigger the transfers, which is why we
2369 * also must grab the lock before setting ready bit for the first
2370 * to be transmitted BD.
2371 */
2372 spin_lock_irqsave(&tx_queue->txlock, flags);
2373
2374 /* The powerpc-specific eieio() is used, as wmb() has too strong
2375 * semantics (it requires synchronization between cacheable and
2376 * uncacheable mappings, which eieio doesn't provide and which we
2377 * don't need), thus requiring a more expensive sync instruction. At
2378 * some point, the set of architecture-independent barrier functions
2379 * should be expanded to include weaker barriers.
2380 */
2381 eieio();
2374 gfar_wmb();
2382
2383 txbdp_start->lstatus = lstatus;
2384
2375
2376 txbdp_start->lstatus = lstatus;
2377
2385 eieio(); /* force lstatus write before tx_skbuff */
2378 gfar_wmb(); /* force lstatus write before tx_skbuff */
2386
2387 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2388
2389 /* Update the current skb pointer to the next entry we will use
2390 * (wrapping if necessary)
2391 */
2392 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2393 TX_RING_MOD_MASK(tx_queue->tx_ring_size);

--- 1097 unchanged lines hidden ---
2379
2380 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
2381
2382 /* Update the current skb pointer to the next entry we will use
2383 * (wrapping if necessary)
2384 */
2385 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
2386 TX_RING_MOD_MASK(tx_queue->tx_ring_size);

--- 1097 unchanged lines hidden ---