Lines Matching +full:num +full:- +full:txq
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
6 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
14 * atl1c_pci_tbl - PCI Device ID Table
98 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { in atl1c_pcie_patch()
108 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { in atl1c_pcie_patch()
121 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) { in atl1c_pcie_patch()
133 * atl1c_init_pcie - init PCIE module
139 struct pci_dev *pdev = hw->adapter->pdev; in atl1c_reset_pcie()
153 /* wol sts read-clear */ in atl1c_reset_pcie()
185 * atl1c_irq_enable - Enable default interrupt generation settings
190 if (likely(atomic_dec_and_test(&adapter->irq_sem))) { in atl1c_irq_enable()
191 AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF); in atl1c_irq_enable()
192 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); in atl1c_irq_enable()
193 AT_WRITE_FLUSH(&adapter->hw); in atl1c_irq_enable()
198 * atl1c_irq_disable - Mask off interrupt generation on the NIC
203 atomic_inc(&adapter->irq_sem); in atl1c_irq_disable()
204 AT_WRITE_REG(&adapter->hw, REG_IMR, 0); in atl1c_irq_disable()
205 AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); in atl1c_irq_disable()
206 AT_WRITE_FLUSH(&adapter->hw); in atl1c_irq_disable()
207 synchronize_irq(adapter->pdev->irq); in atl1c_irq_disable()
211 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
229 * atl1c_phy_config - Timer Call-back
236 struct atl1c_hw *hw = &adapter->hw; in atl1c_phy_config()
239 spin_lock_irqsave(&adapter->mdio_lock, flags); in atl1c_phy_config()
241 spin_unlock_irqrestore(&adapter->mdio_lock, flags); in atl1c_phy_config()
248 clear_bit(__AT_RESETTING, &adapter->flags); in atl1c_reinit_locked()
253 struct atl1c_hw *hw = &adapter->hw; in atl1c_check_link_status()
254 struct net_device *netdev = adapter->netdev; in atl1c_check_link_status()
255 struct pci_dev *pdev = adapter->pdev; in atl1c_check_link_status()
261 spin_lock_irqsave(&adapter->mdio_lock, flags); in atl1c_check_link_status()
263 spin_unlock_irqrestore(&adapter->mdio_lock, flags); in atl1c_check_link_status()
268 hw->hibernate = true; in atl1c_check_link_status()
271 dev_warn(&pdev->dev, "reset mac failed\n"); in atl1c_check_link_status()
278 hw->hibernate = false; in atl1c_check_link_status()
279 spin_lock_irqsave(&adapter->mdio_lock, flags); in atl1c_check_link_status()
281 spin_unlock_irqrestore(&adapter->mdio_lock, flags); in atl1c_check_link_status()
285 if (adapter->link_speed != speed || in atl1c_check_link_status()
286 adapter->link_duplex != duplex) { in atl1c_check_link_status()
287 adapter->link_speed = speed; in atl1c_check_link_status()
288 adapter->link_duplex = duplex; in atl1c_check_link_status()
293 dev_info(&pdev->dev, in atl1c_check_link_status()
295 atl1c_driver_name, netdev->name, in atl1c_check_link_status()
296 adapter->link_speed, in atl1c_check_link_status()
297 adapter->link_duplex == FULL_DUPLEX ? in atl1c_check_link_status()
307 struct net_device *netdev = adapter->netdev; in atl1c_link_chg_event()
308 struct pci_dev *pdev = adapter->pdev; in atl1c_link_chg_event()
311 spin_lock(&adapter->mdio_lock); in atl1c_link_chg_event()
312 link = atl1c_get_link_status(&adapter->hw); in atl1c_link_chg_event()
313 spin_unlock(&adapter->mdio_lock); in atl1c_link_chg_event()
320 dev_info(&pdev->dev, in atl1c_link_chg_event()
322 atl1c_driver_name, netdev->name); in atl1c_link_chg_event()
323 adapter->link_speed = SPEED_0; in atl1c_link_chg_event()
327 set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event); in atl1c_link_chg_event()
328 schedule_work(&adapter->common_task); in atl1c_link_chg_event()
337 netdev = adapter->netdev; in atl1c_common_task()
339 if (test_bit(__AT_DOWN, &adapter->flags)) in atl1c_common_task()
342 if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { in atl1c_common_task()
350 &adapter->work_event)) { in atl1c_common_task()
360 del_timer_sync(&adapter->phy_config_timer); in atl1c_del_timer()
365 * atl1c_tx_timeout - Respond to a Tx Hang
374 set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); in atl1c_tx_timeout()
375 schedule_work(&adapter->common_task); in atl1c_tx_timeout()
379 * atl1c_set_multi - Multicast and Promiscuous mode set
385 * promiscuous mode, and all-multi behavior.
390 struct atl1c_hw *hw = &adapter->hw; in atl1c_set_multi()
398 if (netdev->flags & IFF_PROMISC) { in atl1c_set_multi()
400 } else if (netdev->flags & IFF_ALLMULTI) { in atl1c_set_multi()
415 hash_value = atl1c_hash_mc_addr(hw, ha->addr); in atl1c_set_multi()
435 struct pci_dev *pdev = adapter->pdev; in atl1c_vlan_mode()
439 dev_dbg(&pdev->dev, "atl1c_vlan_mode\n"); in atl1c_vlan_mode()
442 AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data); in atl1c_vlan_mode()
444 AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); in atl1c_vlan_mode()
450 struct pci_dev *pdev = adapter->pdev; in atl1c_restore_vlan()
453 dev_dbg(&pdev->dev, "atl1c_restore_vlan\n"); in atl1c_restore_vlan()
454 atl1c_vlan_mode(adapter->netdev, adapter->netdev->features); in atl1c_restore_vlan()
458 * atl1c_set_mac_addr - Change the Ethernet Address of the NIC
469 if (!is_valid_ether_addr(addr->sa_data)) in atl1c_set_mac_addr()
470 return -EADDRNOTAVAIL; in atl1c_set_mac_addr()
473 return -EBUSY; in atl1c_set_mac_addr()
475 eth_hw_addr_set(netdev, addr->sa_data); in atl1c_set_mac_addr()
476 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); in atl1c_set_mac_addr()
478 atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); in atl1c_set_mac_addr()
486 int mtu = dev->mtu; in atl1c_set_rxbufsize()
488 adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? in atl1c_set_rxbufsize()
496 struct atl1c_hw *hw = &adapter->hw; in atl1c_fix_features()
507 if (hw->nic_type != athr_mt) { in atl1c_fix_features()
508 if (netdev->mtu > MAX_TSO_FRAME_SIZE) in atl1c_fix_features()
518 netdev_features_t changed = netdev->features ^ features; in atl1c_set_features()
529 struct atl1c_hw *hw = &adapter->hw; in atl1c_set_max_mtu()
531 switch (hw->nic_type) { in atl1c_set_max_mtu()
536 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - in atl1c_set_max_mtu()
540 netdev->max_mtu = 9500; in atl1c_set_max_mtu()
544 netdev->max_mtu = ETH_DATA_LEN; in atl1c_set_max_mtu()
550 * atl1c_change_mtu - Change the Maximum Transfer Unit
562 while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) in atl1c_change_mtu()
564 netdev->mtu = new_mtu; in atl1c_change_mtu()
565 adapter->hw.max_frame_size = new_mtu; in atl1c_change_mtu()
570 clear_bit(__AT_RESETTING, &adapter->flags); in atl1c_change_mtu()
583 atl1c_read_phy_reg(&adapter->hw, reg_num, &result); in atl1c_mdio_read()
592 atl1c_write_phy_reg(&adapter->hw, reg_num, val); in atl1c_mdio_write()
599 struct pci_dev *pdev = adapter->pdev; in atl1c_mii_ioctl()
605 return -EINVAL; in atl1c_mii_ioctl()
607 spin_lock_irqsave(&adapter->mdio_lock, flags); in atl1c_mii_ioctl()
610 data->phy_id = 0; in atl1c_mii_ioctl()
614 if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, in atl1c_mii_ioctl()
615 &data->val_out)) { in atl1c_mii_ioctl()
616 retval = -EIO; in atl1c_mii_ioctl()
622 if (data->reg_num & ~(0x1F)) { in atl1c_mii_ioctl()
623 retval = -EFAULT; in atl1c_mii_ioctl()
627 dev_dbg(&pdev->dev, "<atl1c_mii_ioctl> write %x %x", in atl1c_mii_ioctl()
628 data->reg_num, data->val_in); in atl1c_mii_ioctl()
629 if (atl1c_write_phy_reg(&adapter->hw, in atl1c_mii_ioctl()
630 data->reg_num, data->val_in)) { in atl1c_mii_ioctl()
631 retval = -EIO; in atl1c_mii_ioctl()
637 retval = -EOPNOTSUPP; in atl1c_mii_ioctl()
641 spin_unlock_irqrestore(&adapter->mdio_lock, flags); in atl1c_mii_ioctl()
653 return -EOPNOTSUPP; in atl1c_ioctl()
658 * atl1c_alloc_queues - Allocate memory for all rings
670 switch (pdev->device) { in atl1c_get_mac_type()
696 hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | in atl1c_setup_mac_funcs()
698 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT | in atl1c_setup_mac_funcs()
700 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; in atl1c_setup_mac_funcs()
702 if (hw->nic_type == athr_l1c || in atl1c_setup_mac_funcs()
703 hw->nic_type == athr_l1d || in atl1c_setup_mac_funcs()
704 hw->nic_type == athr_l1d_2) in atl1c_setup_mac_funcs()
705 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; in atl1c_setup_mac_funcs()
742 struct pci_dev *pdev = hw->adapter->pdev; in atl1c_patch_assign()
746 hw->msi_lnkpatch = false; in atl1c_patch_assign()
749 if (plats[i].pci_did == hw->device_id && in atl1c_patch_assign()
750 plats[i].pci_revid == hw->revision_id && in atl1c_patch_assign()
751 plats[i].subsystem_vid == hw->subsystem_vendor_id && in atl1c_patch_assign()
752 plats[i].subsystem_did == hw->subsystem_id) { in atl1c_patch_assign()
754 hw->msi_lnkpatch = true; in atl1c_patch_assign()
759 if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 && in atl1c_patch_assign()
760 hw->revision_id == L2CB_V21) { in atl1c_patch_assign()
772 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
781 struct atl1c_hw *hw = &adapter->hw; in atl1c_sw_init()
782 struct pci_dev *pdev = adapter->pdev; in atl1c_sw_init()
786 adapter->wol = 0; in atl1c_sw_init()
787 device_set_wakeup_enable(&pdev->dev, false); in atl1c_sw_init()
788 adapter->link_speed = SPEED_0; in atl1c_sw_init()
789 adapter->link_duplex = FULL_DUPLEX; in atl1c_sw_init()
790 adapter->tpd_ring[0].count = 1024; in atl1c_sw_init()
791 adapter->rfd_ring[0].count = 512; in atl1c_sw_init()
793 hw->vendor_id = pdev->vendor; in atl1c_sw_init()
794 hw->device_id = pdev->device; in atl1c_sw_init()
795 hw->subsystem_vendor_id = pdev->subsystem_vendor; in atl1c_sw_init()
796 hw->subsystem_id = pdev->subsystem_device; in atl1c_sw_init()
798 hw->revision_id = revision & 0xFF; in atl1c_sw_init()
800 hw->hibernate = true; in atl1c_sw_init()
801 hw->media_type = MEDIA_TYPE_AUTO_SENSOR; in atl1c_sw_init()
803 dev_err(&pdev->dev, "set mac function pointers failed\n"); in atl1c_sw_init()
804 return -1; in atl1c_sw_init()
808 hw->intr_mask = IMR_NORMAL_MASK; in atl1c_sw_init()
809 for (i = 0; i < adapter->tx_queue_count; ++i) in atl1c_sw_init()
810 hw->intr_mask |= atl1c_qregs[i].tx_isr; in atl1c_sw_init()
811 for (i = 0; i < adapter->rx_queue_count; ++i) in atl1c_sw_init()
812 hw->intr_mask |= atl1c_qregs[i].rx_isr; in atl1c_sw_init()
813 hw->phy_configured = false; in atl1c_sw_init()
814 hw->preamble_len = 7; in atl1c_sw_init()
815 hw->max_frame_size = adapter->netdev->mtu; in atl1c_sw_init()
816 hw->autoneg_advertised = ADVERTISED_Autoneg; in atl1c_sw_init()
817 hw->indirect_tab = 0xE4E4E4E4; in atl1c_sw_init()
818 hw->base_cpu = 0; in atl1c_sw_init()
820 hw->ict = 50000; /* 100ms */ in atl1c_sw_init()
821 hw->smb_timer = 200000; /* 400ms */ in atl1c_sw_init()
822 hw->rx_imt = 200; in atl1c_sw_init()
823 hw->tx_imt = 1000; in atl1c_sw_init()
825 hw->tpd_burst = 5; in atl1c_sw_init()
826 hw->rfd_burst = 8; in atl1c_sw_init()
827 hw->dma_order = atl1c_dma_ord_out; in atl1c_sw_init()
828 hw->dmar_block = atl1c_dma_req_1024; in atl1c_sw_init()
831 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); in atl1c_sw_init()
832 return -ENOMEM; in atl1c_sw_init()
835 atl1c_set_rxbufsize(adapter, adapter->netdev); in atl1c_sw_init()
836 atomic_set(&adapter->irq_sem, 1); in atl1c_sw_init()
837 spin_lock_init(&adapter->mdio_lock); in atl1c_sw_init()
838 spin_lock_init(&adapter->hw.intr_mask_lock); in atl1c_sw_init()
839 set_bit(__AT_DOWN, &adapter->flags); in atl1c_sw_init()
848 if (buffer_info->flags & ATL1C_BUFFER_FREE) in atl1c_clean_buffer()
850 if (buffer_info->dma) { in atl1c_clean_buffer()
851 if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE) in atl1c_clean_buffer()
856 if (buffer_info->flags & ATL1C_PCIMAP_SINGLE) in atl1c_clean_buffer()
857 dma_unmap_single(&pdev->dev, buffer_info->dma, in atl1c_clean_buffer()
858 buffer_info->length, pci_driection); in atl1c_clean_buffer()
859 else if (buffer_info->flags & ATL1C_PCIMAP_PAGE) in atl1c_clean_buffer()
860 dma_unmap_page(&pdev->dev, buffer_info->dma, in atl1c_clean_buffer()
861 buffer_info->length, pci_driection); in atl1c_clean_buffer()
863 if (buffer_info->skb) in atl1c_clean_buffer()
864 dev_consume_skb_any(buffer_info->skb); in atl1c_clean_buffer()
865 buffer_info->dma = 0; in atl1c_clean_buffer()
866 buffer_info->skb = NULL; in atl1c_clean_buffer()
870 * atl1c_clean_tx_ring - Free Tx-skb
877 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue]; in atl1c_clean_tx_ring()
879 struct pci_dev *pdev = adapter->pdev; in atl1c_clean_tx_ring()
882 ring_count = tpd_ring->count; in atl1c_clean_tx_ring()
884 buffer_info = &tpd_ring->buffer_info[index]; in atl1c_clean_tx_ring()
888 netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue)); in atl1c_clean_tx_ring()
890 /* Zero out Tx-buffers */ in atl1c_clean_tx_ring()
891 memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * in atl1c_clean_tx_ring()
893 atomic_set(&tpd_ring->next_to_clean, 0); in atl1c_clean_tx_ring()
894 tpd_ring->next_to_use = 0; in atl1c_clean_tx_ring()
898 * atl1c_clean_rx_ring - Free rx-reservation skbs
904 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue]; in atl1c_clean_rx_ring()
905 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue]; in atl1c_clean_rx_ring()
907 struct pci_dev *pdev = adapter->pdev; in atl1c_clean_rx_ring()
910 for (j = 0; j < rfd_ring->count; j++) { in atl1c_clean_rx_ring()
911 buffer_info = &rfd_ring->buffer_info[j]; in atl1c_clean_rx_ring()
915 memset(rfd_ring->desc, 0, rfd_ring->size); in atl1c_clean_rx_ring()
916 rfd_ring->next_to_clean = 0; in atl1c_clean_rx_ring()
917 rfd_ring->next_to_use = 0; in atl1c_clean_rx_ring()
918 rrd_ring->next_to_use = 0; in atl1c_clean_rx_ring()
919 rrd_ring->next_to_clean = 0; in atl1c_clean_rx_ring()
927 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; in atl1c_init_ring_ptrs()
928 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; in atl1c_init_ring_ptrs()
929 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; in atl1c_init_ring_ptrs()
933 for (i = 0; i < adapter->tx_queue_count; i++) { in atl1c_init_ring_ptrs()
937 for (j = 0; j < tpd_ring->count; j++) in atl1c_init_ring_ptrs()
941 for (i = 0; i < adapter->rx_queue_count; i++) { in atl1c_init_ring_ptrs()
954 * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
961 struct pci_dev *pdev = adapter->pdev; in atl1c_free_ring_resources()
963 dma_free_coherent(&pdev->dev, adapter->ring_header.size, in atl1c_free_ring_resources()
964 adapter->ring_header.desc, adapter->ring_header.dma); in atl1c_free_ring_resources()
965 adapter->ring_header.desc = NULL; in atl1c_free_ring_resources()
970 if (adapter->tpd_ring[0].buffer_info) { in atl1c_free_ring_resources()
971 kfree(adapter->tpd_ring[0].buffer_info); in atl1c_free_ring_resources()
972 adapter->tpd_ring[0].buffer_info = NULL; in atl1c_free_ring_resources()
977 * atl1c_setup_ring_resources - allocate Tx / RX descriptor resources
984 struct pci_dev *pdev = adapter->pdev; in atl1c_setup_ring_resources()
985 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; in atl1c_setup_ring_resources()
986 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; in atl1c_setup_ring_resources()
987 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; in atl1c_setup_ring_resources()
988 struct atl1c_ring_header *ring_header = &adapter->ring_header; in atl1c_setup_ring_resources()
989 int tqc = adapter->tx_queue_count; in atl1c_setup_ring_resources()
990 int rqc = adapter->rx_queue_count; in atl1c_setup_ring_resources()
1005 size = sizeof(struct atl1c_buffer) * (tpd_ring->count * tqc + in atl1c_setup_ring_resources()
1006 rfd_ring->count * rqc); in atl1c_setup_ring_resources()
1007 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); in atl1c_setup_ring_resources()
1008 if (unlikely(!tpd_ring->buffer_info)) in atl1c_setup_ring_resources()
1013 tpd_ring[i].num = i; in atl1c_setup_ring_resources()
1014 tpd_ring[i].buffer_info = (tpd_ring->buffer_info + count); in atl1c_setup_ring_resources()
1020 rrd_ring[i].num = i; in atl1c_setup_ring_resources()
1023 rfd_ring[i].buffer_info = (tpd_ring->buffer_info + count); in atl1c_setup_ring_resources()
1024 count += rfd_ring->count; in atl1c_setup_ring_resources()
1032 ring_header->size = in atl1c_setup_ring_resources()
1033 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc + in atl1c_setup_ring_resources()
1034 sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc + in atl1c_setup_ring_resources()
1035 sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc + in atl1c_setup_ring_resources()
1038 ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size, in atl1c_setup_ring_resources()
1039 &ring_header->dma, GFP_KERNEL); in atl1c_setup_ring_resources()
1040 if (unlikely(!ring_header->desc)) { in atl1c_setup_ring_resources()
1041 dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); in atl1c_setup_ring_resources()
1046 tpd_ring[0].dma = roundup(ring_header->dma, 8); in atl1c_setup_ring_resources()
1047 offset = tpd_ring[0].dma - ring_header->dma; in atl1c_setup_ring_resources()
1049 tpd_ring[i].dma = ring_header->dma + offset; in atl1c_setup_ring_resources()
1050 tpd_ring[i].desc = (u8 *)ring_header->desc + offset; in atl1c_setup_ring_resources()
1057 rfd_ring[i].dma = ring_header->dma + offset; in atl1c_setup_ring_resources()
1058 rfd_ring[i].desc = (u8 *)ring_header->desc + offset; in atl1c_setup_ring_resources()
1064 rrd_ring[i].dma = ring_header->dma + offset; in atl1c_setup_ring_resources()
1065 rrd_ring[i].desc = (u8 *)ring_header->desc + offset; in atl1c_setup_ring_resources()
1074 kfree(tpd_ring->buffer_info); in atl1c_setup_ring_resources()
1075 return -ENOMEM; in atl1c_setup_ring_resources()
1080 struct atl1c_hw *hw = &adapter->hw; in atl1c_configure_des_ring()
1081 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; in atl1c_configure_des_ring()
1082 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; in atl1c_configure_des_ring()
1083 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; in atl1c_configure_des_ring()
1085 int tx_queue_count = adapter->tx_queue_count; in atl1c_configure_des_ring()
1104 (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32)); in atl1c_configure_des_ring()
1105 for (i = 0; i < adapter->rx_queue_count; i++) { in atl1c_configure_des_ring()
1111 rfd_ring->count & RFD_RING_SIZE_MASK); in atl1c_configure_des_ring()
1113 adapter->rx_buffer_len & RX_BUF_SIZE_MASK); in atl1c_configure_des_ring()
1116 for (i = 0; i < adapter->rx_queue_count; i++) { in atl1c_configure_des_ring()
1121 (rrd_ring->count & RRD_RING_SIZE_MASK)); in atl1c_configure_des_ring()
1123 if (hw->nic_type == athr_l2c_b) { in atl1c_configure_des_ring()
1139 struct atl1c_hw *hw = &adapter->hw; in atl1c_configure_tx()
1147 max_pay_load = pcie_get_readrq(adapter->pdev) >> 8; in atl1c_configure_tx()
1148 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); in atl1c_configure_tx()
1150 * if BIOS had changed the dam-read-max-length to an invalid value, in atl1c_configure_tx()
1153 if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) { in atl1c_configure_tx()
1154 pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN); in atl1c_configure_tx()
1155 hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN; in atl1c_configure_tx()
1158 hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ? in atl1c_configure_tx()
1166 struct atl1c_hw *hw = &adapter->hw; in atl1c_configure_rx()
1169 rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) << in atl1c_configure_rx()
1172 if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) in atl1c_configure_rx()
1176 if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0) in atl1c_configure_rx()
1185 struct atl1c_hw *hw = &adapter->hw; in atl1c_configure_dma()
1190 FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) | in atl1c_configure_dma()
1199 * hw - Struct containing variables accessed by shared code
1226 struct atl1c_hw *hw = &adapter->hw; in atl1c_start_mac()
1227 u32 mac, txq, rxq; in atl1c_start_mac() local
1229 hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX; in atl1c_start_mac()
1230 hw->mac_speed = adapter->link_speed == SPEED_1000 ? in atl1c_start_mac()
1233 AT_READ_REG(hw, REG_TXQ_CTRL, &txq); in atl1c_start_mac()
1237 txq |= TXQ_CTRL_EN; in atl1c_start_mac()
1244 if (hw->mac_duplex) in atl1c_start_mac()
1248 mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed); in atl1c_start_mac()
1249 mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len); in atl1c_start_mac()
1251 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq); in atl1c_start_mac()
1258 * hw - Struct containing variables accessed by shared code
1263 struct atl1c_adapter *adapter = hw->adapter; in atl1c_reset_mac()
1264 struct pci_dev *pdev = adapter->pdev; in atl1c_reset_mac()
1271 * the current PCI configuration. The global reset bit is self- in atl1c_reset_mac()
1283 dev_err(&pdev->dev, in atl1c_reset_mac()
1286 return -1; in atl1c_reset_mac()
1296 switch (hw->nic_type) { in atl1c_reset_mac()
1316 u16 ctrl_flags = hw->ctrl_flags; in atl1c_disable_l0s_l1()
1318 hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT); in atl1c_disable_l0s_l1()
1320 hw->ctrl_flags = ctrl_flags; in atl1c_disable_l0s_l1()
1337 if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { in atl1c_set_aspm()
1345 link_l1_timer = hw->nic_type == athr_l2c_b ? in atl1c_set_aspm()
1354 if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0) in atl1c_set_aspm()
1356 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) in atl1c_set_aspm()
1360 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || in atl1c_set_aspm()
1361 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { in atl1c_set_aspm()
1373 if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b) in atl1c_set_aspm()
1400 * atl1c_configure_mac - Configure Transmit&Receive Unit after Reset
1407 struct atl1c_hw *hw = &adapter->hw; in atl1c_configure_mac()
1422 * waiting x-time for software to notify it accept interrupt. in atl1c_configure_mac()
1426 if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) { in atl1c_configure_mac()
1427 if (hw->nic_type == athr_l2c_b) in atl1c_configure_mac()
1434 hw->ict & INT_RETRIG_TIMER_MASK); in atl1c_configure_mac()
1438 if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) { in atl1c_configure_mac()
1439 intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) << in atl1c_configure_mac()
1441 intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) << in atl1c_configure_mac()
1448 if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) in atl1c_configure_mac()
1455 hw->smb_timer & SMB_STAT_TIMER_MASK); in atl1c_configure_mac()
1458 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + in atl1c_configure_mac()
1470 struct net_device *netdev = adapter->netdev; in atl1c_configure()
1471 int num; in atl1c_configure() local
1474 if (adapter->hw.nic_type == athr_mt) { in atl1c_configure()
1477 AT_READ_REG(&adapter->hw, REG_MT_MODE, &mode); in atl1c_configure()
1478 if (adapter->rx_queue_count == 4) in atl1c_configure()
1482 AT_WRITE_REG(&adapter->hw, REG_MT_MODE, mode); in atl1c_configure()
1489 for (i = 0; i < adapter->rx_queue_count; ++i) { in atl1c_configure()
1490 num = atl1c_alloc_rx_buffer(adapter, i, false); in atl1c_configure()
1491 if (unlikely(num == 0)) in atl1c_configure()
1492 return -ENOMEM; in atl1c_configure()
1496 return -EIO; in atl1c_configure()
1509 stats_item = &adapter->hw_stats.rx_ok; in atl1c_update_hw_stats()
1511 AT_READ_REG(&adapter->hw, hw_reg_addr, &data); in atl1c_update_hw_stats()
1518 stats_item = &adapter->hw_stats.tx_ok; in atl1c_update_hw_stats()
1520 AT_READ_REG(&adapter->hw, hw_reg_addr, &data); in atl1c_update_hw_stats()
1528 * atl1c_get_stats - Get System Network Statistics
1537 struct atl1c_hw_stats *hw_stats = &adapter->hw_stats; in atl1c_get_stats()
1538 struct net_device_stats *net_stats = &netdev->stats; in atl1c_get_stats()
1541 net_stats->rx_bytes = hw_stats->rx_byte_cnt; in atl1c_get_stats()
1542 net_stats->tx_bytes = hw_stats->tx_byte_cnt; in atl1c_get_stats()
1543 net_stats->multicast = hw_stats->rx_mcast; in atl1c_get_stats()
1544 net_stats->collisions = hw_stats->tx_1_col + in atl1c_get_stats()
1545 hw_stats->tx_2_col + in atl1c_get_stats()
1546 hw_stats->tx_late_col + in atl1c_get_stats()
1547 hw_stats->tx_abort_col; in atl1c_get_stats()
1549 net_stats->rx_errors = hw_stats->rx_frag + in atl1c_get_stats()
1550 hw_stats->rx_fcs_err + in atl1c_get_stats()
1551 hw_stats->rx_len_err + in atl1c_get_stats()
1552 hw_stats->rx_sz_ov + in atl1c_get_stats()
1553 hw_stats->rx_rrd_ov + in atl1c_get_stats()
1554 hw_stats->rx_align_err + in atl1c_get_stats()
1555 hw_stats->rx_rxf_ov; in atl1c_get_stats()
1557 net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; in atl1c_get_stats()
1558 net_stats->rx_length_errors = hw_stats->rx_len_err; in atl1c_get_stats()
1559 net_stats->rx_crc_errors = hw_stats->rx_fcs_err; in atl1c_get_stats()
1560 net_stats->rx_frame_errors = hw_stats->rx_align_err; in atl1c_get_stats()
1561 net_stats->rx_dropped = hw_stats->rx_rrd_ov; in atl1c_get_stats()
1563 net_stats->tx_errors = hw_stats->tx_late_col + in atl1c_get_stats()
1564 hw_stats->tx_abort_col + in atl1c_get_stats()
1565 hw_stats->tx_underrun + in atl1c_get_stats()
1566 hw_stats->tx_trunc; in atl1c_get_stats()
1568 net_stats->tx_fifo_errors = hw_stats->tx_underrun; in atl1c_get_stats()
1569 net_stats->tx_aborted_errors = hw_stats->tx_abort_col; in atl1c_get_stats()
1570 net_stats->tx_window_errors = hw_stats->tx_late_col; in atl1c_get_stats()
1572 net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; in atl1c_get_stats()
1573 net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; in atl1c_get_stats()
1582 spin_lock(&adapter->mdio_lock); in atl1c_clear_phy_int()
1583 atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data); in atl1c_clear_phy_int()
1584 spin_unlock(&adapter->mdio_lock); in atl1c_clear_phy_int()
1591 struct atl1c_adapter *adapter = tpd_ring->adapter; in atl1c_clean_tx()
1592 struct netdev_queue *txq = in atl1c_clean_tx() local
1593 netdev_get_tx_queue(napi->dev, tpd_ring->num); in atl1c_clean_tx()
1595 struct pci_dev *pdev = adapter->pdev; in atl1c_clean_tx()
1596 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); in atl1c_clean_tx()
1601 AT_READ_REGW(&adapter->hw, atl1c_qregs[tpd_ring->num].tpd_cons, in atl1c_clean_tx()
1605 buffer_info = &tpd_ring->buffer_info[next_to_clean]; in atl1c_clean_tx()
1606 if (buffer_info->skb) { in atl1c_clean_tx()
1607 total_bytes += buffer_info->skb->len; in atl1c_clean_tx()
1611 if (++next_to_clean == tpd_ring->count) in atl1c_clean_tx()
1613 atomic_set(&tpd_ring->next_to_clean, next_to_clean); in atl1c_clean_tx()
1616 netdev_tx_completed_queue(txq, total_packets, total_bytes); in atl1c_clean_tx()
1618 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(adapter->netdev)) in atl1c_clean_tx()
1619 netif_tx_wake_queue(txq); in atl1c_clean_tx()
1623 spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags); in atl1c_clean_tx()
1624 adapter->hw.intr_mask |= atl1c_qregs[tpd_ring->num].tx_isr; in atl1c_clean_tx()
1625 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); in atl1c_clean_tx()
1626 spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags); in atl1c_clean_tx()
1634 struct atl1c_hw *hw = &adapter->hw; in atl1c_intr_rx_tx()
1638 spin_lock(&hw->intr_mask_lock); in atl1c_intr_rx_tx()
1639 intr_mask = hw->intr_mask; in atl1c_intr_rx_tx()
1640 for (i = 0; i < adapter->rx_queue_count; ++i) { in atl1c_intr_rx_tx()
1643 if (napi_schedule_prep(&adapter->rrd_ring[i].napi)) { in atl1c_intr_rx_tx()
1645 __napi_schedule(&adapter->rrd_ring[i].napi); in atl1c_intr_rx_tx()
1648 for (i = 0; i < adapter->tx_queue_count; ++i) { in atl1c_intr_rx_tx()
1651 if (napi_schedule_prep(&adapter->tpd_ring[i].napi)) { in atl1c_intr_rx_tx()
1653 __napi_schedule(&adapter->tpd_ring[i].napi); in atl1c_intr_rx_tx()
1657 if (hw->intr_mask != intr_mask) { in atl1c_intr_rx_tx()
1658 hw->intr_mask = intr_mask; in atl1c_intr_rx_tx()
1659 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); in atl1c_intr_rx_tx()
1661 spin_unlock(&hw->intr_mask_lock); in atl1c_intr_rx_tx()
1665 * atl1c_intr - Interrupt Handler
1673 struct pci_dev *pdev = adapter->pdev; in atl1c_intr()
1674 struct atl1c_hw *hw = &adapter->hw; in atl1c_intr()
1682 status = reg_data & hw->intr_mask; in atl1c_intr()
1701 dev_err(&pdev->dev, in atl1c_intr()
1705 set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); in atl1c_intr()
1706 schedule_work(&adapter->common_task); in atl1c_intr()
1712 dev_warn(&pdev->dev, in atl1c_intr()
1718 netdev->stats.tx_carrier_errors++; in atl1c_intr()
1723 } while (--max_ints > 0); in atl1c_intr()
1724 /* re-enable Interrupt*/ in atl1c_intr()
1725 AT_WRITE_REG(&adapter->hw, REG_ISR, 0); in atl1c_intr()
1732 if (adapter->hw.nic_type == athr_mt) { in atl1c_rx_checksum()
1733 if (prrs->word3 & RRS_MT_PROT_ID_TCPUDP) in atl1c_rx_checksum()
1734 skb->ip_summed = CHECKSUM_UNNECESSARY; in atl1c_rx_checksum()
1748 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue]; in atl1c_alloc_rx_buffer()
1749 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue]; in atl1c_alloc_rx_buffer()
1750 struct pci_dev *pdev = adapter->pdev; in atl1c_alloc_rx_buffer()
1759 next_next = rfd_next_to_use = rfd_ring->next_to_use; in atl1c_alloc_rx_buffer()
1760 if (++next_next == rfd_ring->count) in atl1c_alloc_rx_buffer()
1762 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; in atl1c_alloc_rx_buffer()
1763 next_info = &rfd_ring->buffer_info[next_next]; in atl1c_alloc_rx_buffer()
1765 while (next_info->flags & ATL1C_BUFFER_FREE) { in atl1c_alloc_rx_buffer()
1777 skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64); in atl1c_alloc_rx_buffer()
1779 skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64); in atl1c_alloc_rx_buffer()
1782 dev_warn(&pdev->dev, "alloc rx buffer failed\n"); in atl1c_alloc_rx_buffer()
1786 if (((unsigned long)skb->data & 0xfff) == 0xfc0) in atl1c_alloc_rx_buffer()
1794 vir_addr = skb->data; in atl1c_alloc_rx_buffer()
1796 buffer_info->skb = skb; in atl1c_alloc_rx_buffer()
1797 buffer_info->length = adapter->rx_buffer_len; in atl1c_alloc_rx_buffer()
1798 mapping = dma_map_single(&pdev->dev, vir_addr, in atl1c_alloc_rx_buffer()
1799 buffer_info->length, DMA_FROM_DEVICE); in atl1c_alloc_rx_buffer()
1800 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { in atl1c_alloc_rx_buffer()
1802 buffer_info->skb = NULL; in atl1c_alloc_rx_buffer()
1803 buffer_info->length = 0; in atl1c_alloc_rx_buffer()
1805 netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed"); in atl1c_alloc_rx_buffer()
1808 buffer_info->dma = mapping; in atl1c_alloc_rx_buffer()
1811 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); in atl1c_alloc_rx_buffer()
1813 if (++next_next == rfd_ring->count) in atl1c_alloc_rx_buffer()
1815 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; in atl1c_alloc_rx_buffer()
1816 next_info = &rfd_ring->buffer_info[next_next]; in atl1c_alloc_rx_buffer()
1823 rfd_ring->next_to_use = rfd_next_to_use; in atl1c_alloc_rx_buffer()
1824 AT_WRITE_REG(&adapter->hw, atl1c_qregs[queue].rfd_prod, in atl1c_alloc_rx_buffer()
1825 rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); in atl1c_alloc_rx_buffer()
1832 struct atl1c_recv_ret_status *rrs, u16 num) in atl1c_clean_rrd() argument
1836 for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring, in atl1c_clean_rrd()
1837 rrd_ring->next_to_clean)) { in atl1c_clean_rrd()
1838 rrs->word3 &= ~RRS_RXD_UPDATED; in atl1c_clean_rrd()
1839 if (++rrd_ring->next_to_clean == rrd_ring->count) in atl1c_clean_rrd()
1840 rrd_ring->next_to_clean = 0; in atl1c_clean_rrd()
1845 struct atl1c_recv_ret_status *rrs, u16 num) in atl1c_clean_rfd() argument
1849 struct atl1c_buffer *buffer_info = rfd_ring->buffer_info; in atl1c_clean_rfd()
1851 rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & in atl1c_clean_rfd()
1853 for (i = 0; i < num; i++) { in atl1c_clean_rfd()
1857 if (++rfd_index == rfd_ring->count) in atl1c_clean_rfd()
1860 rfd_ring->next_to_clean = rfd_index; in atl1c_clean_rfd()
1864 * atl1c_clean_rx - NAPI Rx polling callback
1872 struct atl1c_adapter *adapter = rrd_ring->adapter; in atl1c_clean_rx()
1875 struct pci_dev *pdev = adapter->pdev; in atl1c_clean_rx()
1876 struct net_device *netdev = adapter->netdev; in atl1c_clean_rx()
1877 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[rrd_ring->num]; in atl1c_clean_rx()
1885 if (!netif_carrier_ok(adapter->netdev)) in atl1c_clean_rx()
1891 rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean); in atl1c_clean_rx()
1892 if (likely(RRS_RXD_IS_VALID(rrs->word3))) { in atl1c_clean_rx()
1893 rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) & in atl1c_clean_rx()
1898 dev_warn(&pdev->dev, in atl1c_clean_rx()
1906 if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) { in atl1c_clean_rx()
1909 dev_warn(&pdev->dev, in atl1c_clean_rx()
1911 rrs->word3); in atl1c_clean_rx()
1915 length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) & in atl1c_clean_rx()
1919 rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & in atl1c_clean_rx()
1921 buffer_info = &rfd_ring->buffer_info[rfd_index]; in atl1c_clean_rx()
1922 dma_unmap_single(&pdev->dev, buffer_info->dma, in atl1c_clean_rx()
1923 buffer_info->length, DMA_FROM_DEVICE); in atl1c_clean_rx()
1924 skb = buffer_info->skb; in atl1c_clean_rx()
1928 dev_warn(&pdev->dev, in atl1c_clean_rx()
1933 skb_put(skb, length - ETH_FCS_LEN); in atl1c_clean_rx()
1934 skb->protocol = eth_type_trans(skb, netdev); in atl1c_clean_rx()
1936 if (rrs->word3 & RRS_VLAN_INS) { in atl1c_clean_rx()
1939 AT_TAG_TO_VLAN(rrs->vlan_tag, vlan); in atl1c_clean_rx()
1948 atl1c_alloc_rx_buffer(adapter, rrd_ring->num, true); in atl1c_clean_rx()
1953 spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags); in atl1c_clean_rx()
1954 adapter->hw.intr_mask |= atl1c_qregs[rrd_ring->num].rx_isr; in atl1c_clean_rx()
1955 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); in atl1c_clean_rx()
1956 spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags); in atl1c_clean_rx()
1964 * Polling 'interrupt' - used by things like netconsole to send skbs
1965 * without having to re-enable interrupts. It's not called while
1972 disable_irq(adapter->pdev->irq); in atl1c_netpoll()
1973 atl1c_intr(adapter->pdev->irq, netdev); in atl1c_netpoll()
1974 enable_irq(adapter->pdev->irq); in atl1c_netpoll()
1980 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue]; in atl1c_tpd_avail()
1984 next_to_clean = atomic_read(&tpd_ring->next_to_clean); in atl1c_tpd_avail()
1985 next_to_use = tpd_ring->next_to_use; in atl1c_tpd_avail()
1988 (next_to_clean - next_to_use - 1) : in atl1c_tpd_avail()
1989 (tpd_ring->count + next_to_clean - next_to_use - 1); in atl1c_tpd_avail()
2000 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue]; in atl1c_get_tpd()
2004 next_to_use = tpd_ring->next_to_use; in atl1c_get_tpd()
2005 if (++tpd_ring->next_to_use == tpd_ring->count) in atl1c_get_tpd()
2006 tpd_ring->next_to_use = 0; in atl1c_get_tpd()
2015 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; in atl1c_get_tx_buffer()
2017 return &tpd_ring->buffer_info[tpd - in atl1c_get_tx_buffer()
2018 (struct atl1c_tpd_desc *)tpd_ring->desc]; in atl1c_get_tx_buffer()
2027 tpd_req = skb_shinfo(skb)->nr_frags + 1; in atl1c_cal_tpd_req()
2033 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) in atl1c_cal_tpd_req()
2044 struct pci_dev *pdev = adapter->pdev; in atl1c_tso_csum()
2056 offload_type = skb_shinfo(skb)->gso_type; in atl1c_tso_csum()
2059 real_len = (((unsigned char *)ip_hdr(skb) - skb->data) in atl1c_tso_csum()
2060 + ntohs(ip_hdr(skb)->tot_len)); in atl1c_tso_csum()
2062 if (real_len < skb->len) { in atl1c_tso_csum()
2069 if (unlikely(skb->len == hdr_len)) { in atl1c_tso_csum()
2072 dev_warn(&pdev->dev, in atl1c_tso_csum()
2076 ip_hdr(skb)->check = 0; in atl1c_tso_csum()
2077 tcp_hdr(skb)->check = ~csum_tcpudp_magic( in atl1c_tso_csum()
2078 ip_hdr(skb)->saddr, in atl1c_tso_csum()
2079 ip_hdr(skb)->daddr, in atl1c_tso_csum()
2081 (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT; in atl1c_tso_csum()
2091 ipv6_hdr(skb)->payload_len = 0; in atl1c_tso_csum()
2094 if (unlikely(skb->len == hdr_len)) { in atl1c_tso_csum()
2097 dev_warn(&pdev->dev, in atl1c_tso_csum()
2103 etpd->word1 |= 1 << TPD_LSO_EN_SHIFT; in atl1c_tso_csum()
2104 etpd->word1 |= 1 << TPD_LSO_VER_SHIFT; in atl1c_tso_csum()
2105 etpd->pkt_len = cpu_to_le32(skb->len); in atl1c_tso_csum()
2106 (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT; in atl1c_tso_csum()
2109 (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT; in atl1c_tso_csum()
2110 (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) << in atl1c_tso_csum()
2112 (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << in atl1c_tso_csum()
2118 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { in atl1c_tso_csum()
2124 dev_err(&adapter->pdev->dev, in atl1c_tso_csum()
2126 return -1; in atl1c_tso_csum()
2128 css = cso + skb->csum_offset; in atl1c_tso_csum()
2130 (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) << in atl1c_tso_csum()
2132 (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) << in atl1c_tso_csum()
2134 (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT; in atl1c_tso_csum()
2144 struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[queue]; in atl1c_tx_rollback()
2149 first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc; in atl1c_tx_rollback()
2151 while (index != tpd_ring->next_to_use) { in atl1c_tx_rollback()
2153 buffer_info = &tpd_ring->buffer_info[index]; in atl1c_tx_rollback()
2154 atl1c_clean_buffer(adpt->pdev, buffer_info); in atl1c_tx_rollback()
2156 if (++index == tpd_ring->count) in atl1c_tx_rollback()
2159 tpd_ring->next_to_use = first_index; in atl1c_tx_rollback()
2176 nr_frags = skb_shinfo(skb)->nr_frags; in atl1c_tx_map()
2177 tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK; in atl1c_tx_map()
2185 buffer_info->length = map_len; in atl1c_tx_map()
2186 buffer_info->dma = dma_map_single(&adapter->pdev->dev, in atl1c_tx_map()
2187 skb->data, hdr_len, in atl1c_tx_map()
2189 if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))) in atl1c_tx_map()
2195 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); in atl1c_tx_map()
2196 use_tpd->buffer_len = cpu_to_le16(buffer_info->length); in atl1c_tx_map()
2209 buffer_info->length = buf_len - mapped_len; in atl1c_tx_map()
2210 buffer_info->dma = in atl1c_tx_map()
2211 dma_map_single(&adapter->pdev->dev, in atl1c_tx_map()
2212 skb->data + mapped_len, in atl1c_tx_map()
2213 buffer_info->length, DMA_TO_DEVICE); in atl1c_tx_map()
2214 if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))) in atl1c_tx_map()
2220 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); in atl1c_tx_map()
2221 use_tpd->buffer_len = cpu_to_le16(buffer_info->length); in atl1c_tx_map()
2225 skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in atl1c_tx_map()
2231 buffer_info->length = skb_frag_size(frag); in atl1c_tx_map()
2232 buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, in atl1c_tx_map()
2234 buffer_info->length, in atl1c_tx_map()
2236 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) in atl1c_tx_map()
2242 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); in atl1c_tx_map()
2243 use_tpd->buffer_len = cpu_to_le16(buffer_info->length); in atl1c_tx_map()
2247 use_tpd->word1 |= 1 << TPD_EOP_SHIFT; in atl1c_tx_map()
2250 buffer_info->skb = skb; in atl1c_tx_map()
2255 buffer_info->dma = 0; in atl1c_tx_map()
2256 buffer_info->length = 0; in atl1c_tx_map()
2257 return -1; in atl1c_tx_map()
2262 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue]; in atl1c_tx_queue()
2264 AT_WRITE_REGW(&adapter->hw, atl1c_qregs[queue].tpd_prod, in atl1c_tx_queue()
2265 tpd_ring->next_to_use); in atl1c_tx_queue()
2273 struct netdev_queue *txq = netdev_get_tx_queue(netdev, queue); in atl1c_xmit_frame() local
2277 if (test_bit(__AT_DOWN, &adapter->flags)) { in atl1c_xmit_frame()
2287 netif_tx_stop_queue(txq); in atl1c_xmit_frame()
2306 tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT; in atl1c_xmit_frame()
2307 tpd->vlan_tag = tag; in atl1c_xmit_frame()
2311 tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ in atl1c_xmit_frame()
2314 netif_info(adapter, tx_done, adapter->netdev, in atl1c_xmit_frame()
2315 "tx-skb dropped due to dma error\n"); in atl1c_xmit_frame()
2322 if (__netdev_tx_sent_queue(txq, skb->len, more)) in atl1c_xmit_frame()
2331 struct net_device *netdev = adapter->netdev; in atl1c_free_irq()
2333 free_irq(adapter->pdev->irq, netdev); in atl1c_free_irq()
2335 if (adapter->have_msi) in atl1c_free_irq()
2336 pci_disable_msi(adapter->pdev); in atl1c_free_irq()
2341 struct pci_dev *pdev = adapter->pdev; in atl1c_request_irq()
2342 struct net_device *netdev = adapter->netdev; in atl1c_request_irq()
2346 adapter->have_msi = true; in atl1c_request_irq()
2347 err = pci_enable_msi(adapter->pdev); in atl1c_request_irq()
2350 dev_err(&pdev->dev, in atl1c_request_irq()
2353 adapter->have_msi = false; in atl1c_request_irq()
2356 if (!adapter->have_msi) in atl1c_request_irq()
2358 err = request_irq(adapter->pdev->irq, atl1c_intr, flags, in atl1c_request_irq()
2359 netdev->name, netdev); in atl1c_request_irq()
2362 dev_err(&pdev->dev, in atl1c_request_irq()
2365 if (adapter->have_msi) in atl1c_request_irq()
2366 pci_disable_msi(adapter->pdev); in atl1c_request_irq()
2370 dev_dbg(&pdev->dev, "atl1c_request_irq OK\n"); in atl1c_request_irq()
2378 /* release tx-pending skbs and reset tx/rx ring index */ in atl1c_reset_dma_ring()
2379 for (i = 0; i < adapter->tx_queue_count; ++i) in atl1c_reset_dma_ring()
2381 for (i = 0; i < adapter->rx_queue_count; ++i) in atl1c_reset_dma_ring()
2387 struct net_device *netdev = adapter->netdev; in atl1c_up()
2402 clear_bit(__AT_DOWN, &adapter->flags); in atl1c_up()
2403 for (i = 0; i < adapter->tx_queue_count; ++i) in atl1c_up()
2404 napi_enable(&adapter->tpd_ring[i].napi); in atl1c_up()
2405 for (i = 0; i < adapter->rx_queue_count; ++i) in atl1c_up()
2406 napi_enable(&adapter->rrd_ring[i].napi); in atl1c_up()
2412 for (i = 0; i < adapter->rx_queue_count; ++i) in atl1c_up()
2419 struct net_device *netdev = adapter->netdev; in atl1c_down()
2423 adapter->work_event = 0; /* clear all event */ in atl1c_down()
2426 set_bit(__AT_DOWN, &adapter->flags); in atl1c_down()
2428 for (i = 0; i < adapter->tx_queue_count; ++i) in atl1c_down()
2429 napi_disable(&adapter->tpd_ring[i].napi); in atl1c_down()
2430 for (i = 0; i < adapter->rx_queue_count; ++i) in atl1c_down()
2431 napi_disable(&adapter->rrd_ring[i].napi); in atl1c_down()
2435 atl1c_disable_l0s_l1(&adapter->hw); in atl1c_down()
2437 atl1c_reset_mac(&adapter->hw); in atl1c_down()
2440 adapter->link_speed = SPEED_0; in atl1c_down()
2441 adapter->link_duplex = -1; in atl1c_down()
2446 * atl1c_open - Called when a network interface is made active
2463 if (test_bit(__AT_TESTING, &adapter->flags)) in atl1c_open()
2464 return -EBUSY; in atl1c_open()
2480 atl1c_reset_mac(&adapter->hw); in atl1c_open()
2485 * atl1c_close - Disables a network interface
2490 * The close entry point is called when an interface is de-activated
2499 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); in atl1c_close()
2500 set_bit(__AT_DOWN, &adapter->flags); in atl1c_close()
2501 cancel_work_sync(&adapter->common_task); in atl1c_close()
2511 struct atl1c_hw *hw = &adapter->hw; in atl1c_suspend()
2512 u32 wufc = adapter->wol; in atl1c_suspend()
2516 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); in atl1c_suspend()
2536 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); in atl1c_resume()
2537 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); in atl1c_resume()
2539 atl1c_phy_reset(&adapter->hw); in atl1c_resume()
2540 atl1c_reset_mac(&adapter->hw); in atl1c_resume()
2541 atl1c_phy_init(&adapter->hw); in atl1c_resume()
2556 atl1c_suspend(&pdev->dev); in atl1c_shutdown()
2557 pci_wake_from_d3(pdev, adapter->wol); in atl1c_shutdown()
2581 SET_NETDEV_DEV(netdev, &pdev->dev); in atl1c_init_netdev()
2584 netdev->netdev_ops = &atl1c_netdev_ops; in atl1c_init_netdev()
2585 netdev->watchdog_timeo = AT_TX_WATCHDOG; in atl1c_init_netdev()
2586 netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN); in atl1c_init_netdev()
2590 netdev->hw_features = NETIF_F_SG | in atl1c_init_netdev()
2595 netdev->features = netdev->hw_features | in atl1c_init_netdev()
2601 * atl1c_probe - Device Initialization Routine
2625 return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n"); in atl1c_probe()
2628 * The atl1c chip can DMA to 64-bit addresses, but it uses a single in atl1c_probe()
2632 * Supporting 64-bit DMA on this hardware is more trouble than it's in atl1c_probe()
2633 * worth. It is far easier to limit to 32-bit DMA than update in atl1c_probe()
2635 * fixed-high-32-bit system. in atl1c_probe()
2637 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in atl1c_probe()
2639 dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); in atl1c_probe()
2645 dev_err(&pdev->dev, "cannot obtain PCI resources\n"); in atl1c_probe()
2653 err = -EIO; in atl1c_probe()
2654 dev_err(&pdev->dev, "cannot map device registers\n"); in atl1c_probe()
2664 err = -ENOMEM; in atl1c_probe()
2670 dev_err(&pdev->dev, "init netdevice failed\n"); in atl1c_probe()
2674 adapter->bd_number = cards_found; in atl1c_probe()
2675 adapter->netdev = netdev; in atl1c_probe()
2676 adapter->pdev = pdev; in atl1c_probe()
2677 adapter->hw.adapter = adapter; in atl1c_probe()
2678 adapter->hw.nic_type = nic_type; in atl1c_probe()
2679 adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg); in atl1c_probe()
2680 adapter->hw.hw_addr = hw_addr; in atl1c_probe()
2681 adapter->tx_queue_count = queue_count; in atl1c_probe()
2682 adapter->rx_queue_count = queue_count; in atl1c_probe()
2685 adapter->mii.dev = netdev; in atl1c_probe()
2686 adapter->mii.mdio_read = atl1c_mdio_read; in atl1c_probe()
2687 adapter->mii.mdio_write = atl1c_mdio_write; in atl1c_probe()
2688 adapter->mii.phy_id_mask = 0x1f; in atl1c_probe()
2689 adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK; in atl1c_probe()
2691 for (i = 0; i < adapter->rx_queue_count; ++i) in atl1c_probe()
2692 netif_napi_add(netdev, &adapter->rrd_ring[i].napi, in atl1c_probe()
2694 for (i = 0; i < adapter->tx_queue_count; ++i) in atl1c_probe()
2695 netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi, in atl1c_probe()
2697 timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0); in atl1c_probe()
2701 dev_err(&pdev->dev, "net device private data init failed\n"); in atl1c_probe()
2707 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); in atl1c_probe()
2710 atl1c_phy_reset(&adapter->hw); in atl1c_probe()
2712 err = atl1c_reset_mac(&adapter->hw); in atl1c_probe()
2714 err = -EIO; in atl1c_probe()
2720 err = atl1c_phy_init(&adapter->hw); in atl1c_probe()
2722 err = -EIO; in atl1c_probe()
2725 if (atl1c_read_mac_addr(&adapter->hw)) { in atl1c_probe()
2727 netdev->addr_assign_type = NET_ADDR_RANDOM; in atl1c_probe()
2729 eth_hw_addr_set(netdev, adapter->hw.mac_addr); in atl1c_probe()
2731 dev_dbg(&pdev->dev, "mac address : %pM\n", in atl1c_probe()
2732 adapter->hw.mac_addr); in atl1c_probe()
2734 atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); in atl1c_probe()
2735 INIT_WORK(&adapter->common_task, atl1c_common_task); in atl1c_probe()
2736 adapter->work_event = 0; in atl1c_probe()
2739 dev_err(&pdev->dev, "register netdevice failed\n"); in atl1c_probe()
2762 * atl1c_remove - Device Removal Routine
2767 * Hot-Plug event, or because the driver is going to be removed from
2777 atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr); in atl1c_remove()
2778 atl1c_phy_disable(&adapter->hw); in atl1c_remove()
2780 iounmap(adapter->hw.hw_addr); in atl1c_remove()
2788 * atl1c_io_error_detected - called when PCI error is detected
2816 * atl1c_io_slot_reset - called after the pci bus has been reset.
2819 * Restart the card from scratch, as if from a cold-boot. Implementation
2820 * resembles the first-half of the e1000_resume routine.
2829 dev_err(&pdev->dev, in atl1c_io_slot_reset()
2830 "Cannot re-enable PCI device after reset\n"); in atl1c_io_slot_reset()
2838 atl1c_reset_mac(&adapter->hw); in atl1c_io_slot_reset()
2844 * atl1c_io_resume - called when traffic can start flowing again.
2849 * second-half of the atl1c_resume routine.
2859 dev_err(&pdev->dev, in atl1c_io_resume()