Lines Matching refs:mp

419 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
421 return readl(mp->shared->base + offset);
424 static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
426 return readl(mp->base + offset);
429 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
431 writel(data, mp->shared->base + offset);
434 static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
436 writel(data, mp->base + offset);
453 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
454 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
459 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
462 wrlp(mp, RXQ_COMMAND, mask << 8);
463 while (rdlp(mp, RXQ_COMMAND) & mask)
469 struct mv643xx_eth_private *mp = txq_to_mp(txq);
474 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
479 struct mv643xx_eth_private *mp = txq_to_mp(txq);
480 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
485 struct mv643xx_eth_private *mp = txq_to_mp(txq);
488 wrlp(mp, TXQ_COMMAND, mask << 8);
489 while (rdlp(mp, TXQ_COMMAND) & mask)
495 struct mv643xx_eth_private *mp = txq_to_mp(txq);
496 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
508 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
509 struct net_device_stats *stats = &mp->dev->stats;
533 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr,
538 mp->work_rx_refill |= 1 << rxq->index;
570 skb->protocol = eth_type_trans(skb, mp->dev);
572 napi_gro_receive(&mp->napi, skb);
582 netdev_err(mp->dev,
593 mp->work_rx &= ~(1 << rxq->index);
600 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
610 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
613 mp->oom = 1;
630 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
648 mp->work_rx_refill &= ~(1 << rxq->index);
670 static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
686 if (length - hdr_len > mp->shared->tx_csum_limit ||
778 struct mv643xx_eth_private *mp = txq_to_mp(txq);
790 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
821 struct mv643xx_eth_private *mp = txq_to_mp(txq);
877 mp->work_tx_end &= ~(1 << txq->index);
893 struct mv643xx_eth_private *mp = txq_to_mp(txq);
923 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
932 struct mv643xx_eth_private *mp = txq_to_mp(txq);
949 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
970 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data,
982 mp->work_tx_end &= ~(1 << txq->index);
995 struct mv643xx_eth_private *mp = netdev_priv(dev);
1001 txq = mp->txq + queue;
1034 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1035 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1041 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1044 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1054 mp->work_tx_end &= ~(1 << txq->index);
1059 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1060 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1094 dma_unmap_page(mp->dev->dev.parent,
1099 dma_unmap_single(mp->dev->dev.parent,
1113 netdev_info(mp->dev, "tx error\n");
1114 mp->dev->stats.tx_errors++;
1122 mp->work_tx &= ~(1 << txq->index);
1133 static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1139 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1143 mtu = (mp->dev->mtu + 255) >> 8;
1151 switch (mp->shared->tx_bw_control) {
1153 wrlp(mp, TX_BW_RATE, token_rate);
1154 wrlp(mp, TX_BW_MTU, mtu);
1155 wrlp(mp, TX_BW_BURST, bucket_size);
1158 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
1159 wrlp(mp, TX_BW_MTU_MOVED, mtu);
1160 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
1167 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1171 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1179 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1180 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1185 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1193 switch (mp->shared->tx_bw_control) {
1203 val = rdlp(mp, off);
1205 wrlp(mp, off, val);
1213 struct mv643xx_eth_private *mp = netdev_priv(dev);
1214 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1248 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1254 struct mv643xx_eth_private *mp = netdev_priv(dev);
1261 for (i = 0; i < mp->txq_count; i++) {
1262 struct tx_queue *txq = mp->txq + i;
1276 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1278 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
1281 static void mib_counters_clear(struct mv643xx_eth_private *mp)
1286 mib_read(mp, i);
1289 rdlp(mp, RX_DISCARD_FRAME_CNT);
1290 rdlp(mp, RX_OVERRUN_FRAME_CNT);
1293 static void mib_counters_update(struct mv643xx_eth_private *mp)
1295 struct mib_counters *p = &mp->mib_counters;
1297 spin_lock_bh(&mp->mib_counters_lock);
1298 p->good_octets_received += mib_read(mp, 0x00);
1299 p->bad_octets_received += mib_read(mp, 0x08);
1300 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
1301 p->good_frames_received += mib_read(mp, 0x10);
1302 p->bad_frames_received += mib_read(mp, 0x14);
1303 p->broadcast_frames_received += mib_read(mp, 0x18);
1304 p->multicast_frames_received += mib_read(mp, 0x1c);
1305 p->frames_64_octets += mib_read(mp, 0x20);
1306 p->frames_65_to_127_octets += mib_read(mp, 0x24);
1307 p->frames_128_to_255_octets += mib_read(mp, 0x28);
1308 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
1309 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
1310 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
1311 p->good_octets_sent += mib_read(mp, 0x38);
1312 p->good_frames_sent += mib_read(mp, 0x40);
1313 p->excessive_collision += mib_read(mp, 0x44);
1314 p->multicast_frames_sent += mib_read(mp, 0x48);
1315 p->broadcast_frames_sent += mib_read(mp, 0x4c);
1316 p->unrec_mac_control_received += mib_read(mp, 0x50);
1317 p->fc_sent += mib_read(mp, 0x54);
1318 p->good_fc_received += mib_read(mp, 0x58);
1319 p->bad_fc_received += mib_read(mp, 0x5c);
1320 p->undersize_received += mib_read(mp, 0x60);
1321 p->fragments_received += mib_read(mp, 0x64);
1322 p->oversize_received += mib_read(mp, 0x68);
1323 p->jabber_received += mib_read(mp, 0x6c);
1324 p->mac_receive_error += mib_read(mp, 0x70);
1325 p->bad_crc_event += mib_read(mp, 0x74);
1326 p->collision += mib_read(mp, 0x78);
1327 p->late_collision += mib_read(mp, 0x7c);
1329 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
1330 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
1331 spin_unlock_bh(&mp->mib_counters_lock);
1336 struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
1337 mib_counters_update(mp);
1338 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1354 static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1356 u32 val = rdlp(mp, SDMA_CONFIG);
1359 if (mp->shared->extended_rx_coal_limit)
1365 temp += mp->t_clk / 2;
1366 do_div(temp, mp->t_clk);
1371 static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1376 temp = (u64)usec * mp->t_clk;
1380 val = rdlp(mp, SDMA_CONFIG);
1381 if (mp->shared->extended_rx_coal_limit) {
1393 wrlp(mp, SDMA_CONFIG, val);
1396 static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1400 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1402 temp += mp->t_clk / 2;
1403 do_div(temp, mp->t_clk);
1408 static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1412 temp = (u64)usec * mp->t_clk;
1419 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4);
1483 mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
1486 struct net_device *dev = mp->dev;
1502 mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp,
1508 port_status = rdlp(mp, PORT_STATUS);
1570 struct mv643xx_eth_private *mp = netdev_priv(dev);
1573 return mv643xx_eth_get_link_ksettings_phy(mp, cmd);
1575 return mv643xx_eth_get_link_ksettings_phyless(mp, cmd);
1620 struct mv643xx_eth_private *mp = netdev_priv(dev);
1622 ec->rx_coalesce_usecs = get_rx_coal(mp);
1623 ec->tx_coalesce_usecs = get_tx_coal(mp);
1633 struct mv643xx_eth_private *mp = netdev_priv(dev);
1635 set_rx_coal(mp, ec->rx_coalesce_usecs);
1636 set_tx_coal(mp, ec->tx_coalesce_usecs);
1646 struct mv643xx_eth_private *mp = netdev_priv(dev);
1651 er->rx_pending = mp->rx_ring_size;
1652 er->tx_pending = mp->tx_ring_size;
1660 struct mv643xx_eth_private *mp = netdev_priv(dev);
1665 mp->rx_ring_size = min(er->rx_pending, 4096U);
1666 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
1668 if (mp->tx_ring_size != er->tx_pending)
1670 mp->tx_ring_size, er->tx_pending);
1688 struct mv643xx_eth_private *mp = netdev_priv(dev);
1691 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
1714 struct mv643xx_eth_private *mp = netdev_priv(dev);
1718 mib_counters_update(mp);
1727 p = ((void *)mp->dev) + stat->netdev_off;
1729 p = ((void *)mp) + stat->mp_off;
1765 static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1767 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1768 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1778 static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr)
1780 wrlp(mp, MAC_ADDR_HIGH,
1782 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1808 struct mv643xx_eth_private *mp = netdev_priv(dev);
1813 uc_addr_set(mp, dev->dev_addr);
1815 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
1824 int off = UNICAST_TABLE(mp->port_num) + i;
1838 wrl(mp, off, v);
1841 wrlp(mp, PORT_CONFIG, port_config);
1864 struct mv643xx_eth_private *mp = netdev_priv(dev);
1896 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1898 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1907 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1909 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32),
1938 static int rxq_init(struct mv643xx_eth_private *mp, int index)
1940 struct rx_queue *rxq = mp->rxq + index;
1947 rxq->rx_ring_size = mp->rx_ring_size;
1955 if (index == 0 && size <= mp->rx_desc_sram_size) {
1956 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1957 mp->rx_desc_sram_size);
1958 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
1960 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
1966 netdev_err(mp->dev,
1994 if (index == 0 && size <= mp->rx_desc_sram_size)
1997 dma_free_coherent(mp->dev->dev.parent, size,
2007 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
2020 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n",
2025 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
2028 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size,
2034 static int txq_init(struct mv643xx_eth_private *mp, int index)
2036 struct tx_queue *txq = mp->txq + index;
2044 txq->tx_ring_size = mp->tx_ring_size;
2059 if (index == 0 && size <= mp->tx_desc_sram_size) {
2060 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
2061 mp->tx_desc_sram_size);
2062 txq->tx_desc_dma = mp->tx_desc_sram_addr;
2064 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2070 netdev_err(mp->dev,
2100 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2114 if (index == 0 && size <= mp->tx_desc_sram_size)
2117 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2124 struct mv643xx_eth_private *mp = txq_to_mp(txq);
2132 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2135 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2140 dma_free_coherent(mp->dev->dev.parent,
2147 static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
2152 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask;
2159 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
2163 wrlp(mp, INT_CAUSE, ~int_cause);
2164 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
2165 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
2166 mp->work_rx |= (int_cause & INT_RX) >> 2;
2171 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
2173 mp->work_link = 1;
2174 mp->work_tx |= int_cause_ext & INT_EXT_TX;
2183 struct mv643xx_eth_private *mp = netdev_priv(dev);
2185 if (unlikely(!mv643xx_eth_collect_events(mp)))
2188 wrlp(mp, INT_MASK, 0);
2189 napi_schedule(&mp->napi);
2194 static void handle_link_event(struct mv643xx_eth_private *mp)
2196 struct net_device *dev = mp->dev;
2202 port_status = rdlp(mp, PORT_STATUS);
2211 for (i = 0; i < mp->txq_count; i++) {
2212 struct tx_queue *txq = mp->txq + i;
2247 struct mv643xx_eth_private *mp;
2250 mp = container_of(napi, struct mv643xx_eth_private, napi);
2252 if (unlikely(mp->oom)) {
2253 mp->oom = 0;
2254 del_timer(&mp->rx_oom);
2263 if (mp->work_link) {
2264 mp->work_link = 0;
2265 handle_link_event(mp);
2270 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx;
2271 if (likely(!mp->oom))
2272 queue_mask |= mp->work_rx_refill;
2275 if (mv643xx_eth_collect_events(mp))
2287 if (mp->work_tx_end & queue_mask) {
2288 txq_kick(mp->txq + queue);
2289 } else if (mp->work_tx & queue_mask) {
2290 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2291 txq_maybe_wake(mp->txq + queue);
2292 } else if (mp->work_rx & queue_mask) {
2293 work_done += rxq_process(mp->rxq + queue, work_tbd);
2294 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) {
2295 work_done += rxq_refill(mp->rxq + queue, work_tbd);
2302 if (mp->oom)
2303 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
2305 wrlp(mp, INT_MASK, mp->int_mask);
2313 struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
2315 napi_schedule(&mp->napi);
2318 static void port_start(struct mv643xx_eth_private *mp)
2320 struct net_device *dev = mp->dev;
2340 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2343 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2348 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2353 tx_set_rate(mp, 1000000000, 16777216);
2354 for (i = 0; i < mp->txq_count; i++) {
2355 struct tx_queue *txq = mp->txq + i;
2367 mv643xx_eth_set_features(mp->dev, mp->dev->features);
2372 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2377 mv643xx_eth_program_unicast_filter(mp->dev);
2382 for (i = 0; i < mp->rxq_count; i++) {
2383 struct rx_queue *rxq = mp->rxq + i;
2388 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2394 static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2404 skb_size = mp->dev->mtu + 36;
2411 mp->skb_size = (skb_size + 7) & ~7;
2419 mp->skb_size += SKB_DMA_REALIGN;
2424 struct mv643xx_eth_private *mp = netdev_priv(dev);
2428 wrlp(mp, INT_CAUSE, 0);
2429 wrlp(mp, INT_CAUSE_EXT, 0);
2430 rdlp(mp, INT_CAUSE_EXT);
2439 mv643xx_eth_recalc_skb_size(mp);
2441 napi_enable(&mp->napi);
2443 mp->int_mask = INT_EXT;
2445 for (i = 0; i < mp->rxq_count; i++) {
2446 err = rxq_init(mp, i);
2449 rxq_deinit(mp->rxq + i);
2453 rxq_refill(mp->rxq + i, INT_MAX);
2454 mp->int_mask |= INT_RX_0 << i;
2457 if (mp->oom) {
2458 mp->rx_oom.expires = jiffies + (HZ / 10);
2459 add_timer(&mp->rx_oom);
2462 for (i = 0; i < mp->txq_count; i++) {
2463 err = txq_init(mp, i);
2466 txq_deinit(mp->txq + i);
2469 mp->int_mask |= INT_TX_END_0 << i;
2472 add_timer(&mp->mib_counters_timer);
2473 port_start(mp);
2475 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2476 wrlp(mp, INT_MASK, mp->int_mask);
2482 for (i = 0; i < mp->rxq_count; i++)
2483 rxq_deinit(mp->rxq + i);
2485 napi_disable(&mp->napi);
2491 static void port_reset(struct mv643xx_eth_private *mp)
2496 for (i = 0; i < mp->rxq_count; i++)
2497 rxq_disable(mp->rxq + i);
2498 for (i = 0; i < mp->txq_count; i++)
2499 txq_disable(mp->txq + i);
2502 u32 ps = rdlp(mp, PORT_STATUS);
2510 data = rdlp(mp, PORT_SERIAL_CONTROL);
2514 wrlp(mp, PORT_SERIAL_CONTROL, data);
2519 struct mv643xx_eth_private *mp = netdev_priv(dev);
2522 wrlp(mp, INT_MASK_EXT, 0x00000000);
2523 wrlp(mp, INT_MASK, 0x00000000);
2524 rdlp(mp, INT_MASK);
2526 napi_disable(&mp->napi);
2528 del_timer_sync(&mp->rx_oom);
2535 port_reset(mp);
2537 mib_counters_update(mp);
2538 del_timer_sync(&mp->mib_counters_timer);
2540 for (i = 0; i < mp->rxq_count; i++)
2541 rxq_deinit(mp->rxq + i);
2542 for (i = 0; i < mp->txq_count; i++)
2543 txq_deinit(mp->txq + i);
2563 struct mv643xx_eth_private *mp = netdev_priv(dev);
2566 mv643xx_eth_recalc_skb_size(mp);
2567 tx_set_rate(mp, 1000000000, 16777216);
2589 struct mv643xx_eth_private *mp;
2591 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2592 if (netif_running(mp->dev)) {
2593 netif_tx_stop_all_queues(mp->dev);
2594 port_reset(mp);
2595 port_start(mp);
2596 netif_tx_wake_all_queues(mp->dev);
2602 struct mv643xx_eth_private *mp = netdev_priv(dev);
2606 schedule_work(&mp->tx_timeout_task);
2612 struct mv643xx_eth_private *mp = netdev_priv(dev);
2614 wrlp(mp, INT_MASK, 0x00000000);
2615 rdlp(mp, INT_MASK);
2619 wrlp(mp, INT_MASK, mp->int_mask);
2924 static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
2926 int addr_shift = 5 * mp->port_num;
2929 data = rdl(mp, PHY_ADDR);
2932 wrl(mp, PHY_ADDR, data);
2935 static int phy_addr_get(struct mv643xx_eth_private *mp)
2939 data = rdl(mp, PHY_ADDR);
2941 return (data >> (5 * mp->port_num)) & 0x1f;
2944 static void set_params(struct mv643xx_eth_private *mp,
2947 struct net_device *dev = mp->dev;
2955 uc_addr_get(mp, addr);
2959 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2961 mp->rx_ring_size = pd->rx_queue_size;
2962 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2963 mp->rx_desc_sram_size = pd->rx_sram_size;
2965 mp->rxq_count = pd->rx_queue_count ? : 1;
2971 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
2973 if (mp->tx_ring_size != tx_ring_size)
2975 mp->tx_ring_size, tx_ring_size);
2977 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2978 mp->tx_desc_sram_size = pd->tx_sram_size;
2980 mp->txq_count = pd->tx_queue_count ? : 1;
2983 static int get_phy_mode(struct mv643xx_eth_private *mp)
2985 struct device *dev = mp->dev->dev.parent;
3000 static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
3010 start = phy_addr_get(mp) & 0x1f;
3025 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
3026 get_phy_mode(mp));
3028 phy_addr_set(mp, addr);
3036 static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
3038 struct net_device *dev = mp->dev;
3057 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
3059 struct net_device *dev = mp->dev;
3062 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
3065 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
3083 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
3106 struct mv643xx_eth_private *mp;
3128 mp = netdev_priv(dev);
3129 platform_set_drvdata(pdev, mp);
3131 mp->shared = platform_get_drvdata(pd->shared);
3132 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
3133 mp->port_num = pd->port_number;
3135 mp->dev = dev;
3139 psc1r = rdlp(mp, PORT_SERIAL_CONTROL1);
3174 wrlp(mp, PORT_SERIAL_CONTROL1, psc1r);
3181 mp->t_clk = 133000000;
3182 mp->clk = devm_clk_get(&pdev->dev, NULL);
3183 if (!IS_ERR(mp->clk)) {
3184 clk_prepare_enable(mp->clk);
3185 mp->t_clk = clk_get_rate(mp->clk);
3186 } else if (!IS_ERR(mp->shared->clk)) {
3187 mp->t_clk = clk_get_rate(mp->shared->clk);
3190 set_params(mp, pd);
3191 netif_set_real_num_tx_queues(dev, mp->txq_count);
3192 netif_set_real_num_rx_queues(dev, mp->rxq_count);
3196 phydev = of_phy_connect(mp->dev, pd->phy_node,
3198 get_phy_mode(mp));
3202 phy_addr_set(mp, phydev->mdio.addr);
3204 phydev = phy_scan(mp, pd->phy_addr);
3209 phy_init(mp, pd->speed, pd->duplex);
3220 init_pscr(mp, pd->speed, pd->duplex);
3223 mib_counters_clear(mp);
3225 timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
3226 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
3228 spin_lock_init(&mp->mib_counters_lock);
3230 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
3232 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll);
3234 timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
3262 if (mp->shared->win_protect)
3263 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
3267 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
3269 set_rx_coal(mp, 250);
3270 set_tx_coal(mp, 0);
3277 mp->port_num, dev->dev_addr);
3279 if (mp->tx_desc_sram_size > 0)
3285 if (!IS_ERR(mp->clk))
3286 clk_disable_unprepare(mp->clk);
3294 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
3295 struct net_device *dev = mp->dev;
3297 unregister_netdev(mp->dev);
3300 cancel_work_sync(&mp->tx_timeout_task);
3302 if (!IS_ERR(mp->clk))
3303 clk_disable_unprepare(mp->clk);
3305 free_netdev(mp->dev);
3312 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
3315 wrlp(mp, INT_MASK, 0);
3316 rdlp(mp, INT_MASK);
3318 if (netif_running(mp->dev))
3319 port_reset(mp);