Lines Matching full:core
2 * Core code for QEMU igb emulation
66 IGBCore *core; member
94 igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
97 static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes);
98 static void igb_reset(IGBCore *core, bool sw);
101 igb_raise_legacy_irq(IGBCore *core) in igb_raise_legacy_irq() argument
104 e1000x_inc_reg_if_not_full(core->mac, IAC); in igb_raise_legacy_irq()
105 pci_set_irq(core->owner, 1); in igb_raise_legacy_irq()
109 igb_lower_legacy_irq(IGBCore *core) in igb_lower_legacy_irq() argument
112 pci_set_irq(core->owner, 0); in igb_lower_legacy_irq()
115 static void igb_msix_notify(IGBCore *core, unsigned int cause) in igb_msix_notify() argument
117 PCIDevice *dev = core->owner; in igb_msix_notify()
123 if (vfn < pcie_sriov_num_vfs(core->owner)) { in igb_msix_notify()
124 dev = pcie_sriov_get_vf_at_index(core->owner, vfn); in igb_msix_notify()
137 trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]); in igb_msix_notify()
138 effective_eiac = core->mac[EIAC] & BIT(cause); in igb_msix_notify()
139 core->mac[EICR] &= ~effective_eiac; in igb_msix_notify()
145 int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] * in igb_intrmgr_rearm_timer()
167 int idx = timer - &timer->core->eitr[0]; in igb_intrmgr_on_msix_throttling_timer()
172 igb_msix_notify(timer->core, idx); in igb_intrmgr_on_msix_throttling_timer()
176 igb_intrmgr_initialize_all_timers(IGBCore *core, bool create) in igb_intrmgr_initialize_all_timers() argument
181 core->eitr[i].core = core; in igb_intrmgr_initialize_all_timers()
182 core->eitr[i].delay_reg = EITR0 + i; in igb_intrmgr_initialize_all_timers()
183 core->eitr[i].delay_resolution_ns = E1000_INTR_DELAY_NS_RES; in igb_intrmgr_initialize_all_timers()
191 core->eitr[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, in igb_intrmgr_initialize_all_timers()
193 &core->eitr[i]); in igb_intrmgr_initialize_all_timers()
198 igb_intrmgr_resume(IGBCore *core) in igb_intrmgr_resume() argument
203 igb_intmgr_timer_resume(&core->eitr[i]); in igb_intrmgr_resume()
208 igb_intrmgr_reset(IGBCore *core) in igb_intrmgr_reset() argument
213 if (core->eitr[i].running) { in igb_intrmgr_reset()
214 timer_del(core->eitr[i].timer); in igb_intrmgr_reset()
215 igb_intrmgr_on_msix_throttling_timer(&core->eitr[i]); in igb_intrmgr_reset()
221 igb_intrmgr_pci_unint(IGBCore *core) in igb_intrmgr_pci_unint() argument
226 timer_free(core->eitr[i].timer); in igb_intrmgr_pci_unint()
231 igb_intrmgr_pci_realize(IGBCore *core) in igb_intrmgr_pci_realize() argument
233 igb_intrmgr_initialize_all_timers(core, true); in igb_intrmgr_pci_realize()
237 igb_rx_csum_enabled(IGBCore *core) in igb_rx_csum_enabled() argument
239 return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true; in igb_rx_csum_enabled()
243 igb_rx_use_legacy_descriptor(IGBCore *core) in igb_rx_use_legacy_descriptor() argument
262 igb_rx_queue_desctyp_get(IGBCore *core, const E1000ERingInfo *r) in igb_rx_queue_desctyp_get() argument
264 return core->mac[E1000_SRRCTL(r->idx) >> 2] & E1000_SRRCTL_DESCTYPE_MASK; in igb_rx_queue_desctyp_get()
268 igb_rx_use_ps_descriptor(IGBCore *core, const E1000ERingInfo *r) in igb_rx_use_ps_descriptor() argument
270 uint32_t desctyp = igb_rx_queue_desctyp_get(core, r); in igb_rx_use_ps_descriptor()
276 igb_rss_enabled(IGBCore *core) in igb_rss_enabled() argument
278 return (core->mac[MRQC] & 3) == E1000_MRQC_ENABLE_RSS_MQ && in igb_rss_enabled()
279 !igb_rx_csum_enabled(core) && in igb_rss_enabled()
280 !igb_rx_use_legacy_descriptor(core); in igb_rss_enabled()
291 igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt) in igb_rss_get_hash_type() argument
296 assert(igb_rss_enabled(core)); in igb_rss_get_hash_type()
301 trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC], in igb_rss_get_hash_type()
302 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]), in igb_rss_get_hash_type()
303 E1000_MRQC_EN_IPV4(core->mac[MRQC])); in igb_rss_get_hash_type()
306 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { in igb_rss_get_hash_type()
311 (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV4_UDP)) { in igb_rss_get_hash_type()
315 if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { in igb_rss_get_hash_type()
321 bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS; in igb_rss_get_hash_type()
322 bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS; in igb_rss_get_hash_type()
331 trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]); in igb_rss_get_hash_type()
336 core->mac[MRQC], in igb_rss_get_hash_type()
337 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]), in igb_rss_get_hash_type()
338 E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), in igb_rss_get_hash_type()
339 E1000_MRQC_EN_IPV6(core->mac[MRQC])); in igb_rss_get_hash_type()
346 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) { in igb_rss_get_hash_type()
351 (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV6_UDP)) { in igb_rss_get_hash_type()
355 if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { in igb_rss_get_hash_type()
361 if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) { in igb_rss_get_hash_type()
371 igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info) in igb_rss_calc_hash() argument
375 assert(igb_rss_enabled(core)); in igb_rss_calc_hash()
403 return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]); in igb_rss_calc_hash()
407 igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx, in igb_rss_parse_packet() argument
412 if (tx || !igb_rss_enabled(core)) { in igb_rss_parse_packet()
423 info->type = igb_rss_get_hash_type(core, pkt); in igb_rss_parse_packet()
433 info->hash = igb_rss_calc_hash(core, pkt, info); in igb_rss_parse_packet()
434 info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash); in igb_rss_parse_packet()
438 igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx, in igb_tx_insert_vlan() argument
441 if (core->mac[MRQC] & 1) { in igb_tx_insert_vlan()
444 if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) { in igb_tx_insert_vlan()
447 vlan = core->mac[VMVIR0 + pool] & 0xffff; in igb_tx_insert_vlan()
448 } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) { in igb_tx_insert_vlan()
455 core->mac[VET] & 0xffff); in igb_tx_insert_vlan()
460 igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx) in igb_setup_tx_offloads() argument
471 e1000x_inc_reg_if_not_full(core->mac, TSCTC); in igb_setup_tx_offloads()
489 static void igb_tx_pkt_mac_callback(void *core, in igb_tx_pkt_mac_callback() argument
495 igb_receive_internal(core, virt_iov, virt_iovcnt, true, NULL); in igb_tx_pkt_mac_callback()
507 igb_receive_internal(context->core, virt_iov, virt_iovcnt, true, in igb_tx_pkt_vmdq_callback()
511 if (context->core->has_vnet) { in igb_tx_pkt_vmdq_callback()
520 static bool igb_tx_pkt_switch(IGBCore *core, struct igb_tx *tx, in igb_tx_pkt_switch() argument
526 if (!(core->mac[MRQC] & 1)) { in igb_tx_pkt_switch()
531 if (!(core->mac[DTXSWC] & E1000_DTXSWC_VMDQ_LOOPBACK_EN)) { in igb_tx_pkt_switch()
535 context.core = core; in igb_tx_pkt_switch()
546 igb_tx_pkt_send(IGBCore *core, struct igb_tx *tx, int queue_index) in igb_tx_pkt_send() argument
548 int target_queue = MIN(core->max_queue_num, queue_index); in igb_tx_pkt_send()
549 NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue); in igb_tx_pkt_send()
551 if (!igb_setup_tx_offloads(core, tx)) { in igb_tx_pkt_send()
557 if ((core->phy[MII_BMCR] & MII_BMCR_LOOPBACK) || in igb_tx_pkt_send()
558 ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) { in igb_tx_pkt_send()
560 igb_tx_pkt_mac_callback, core); in igb_tx_pkt_send()
562 return igb_tx_pkt_switch(core, tx, queue); in igb_tx_pkt_send()
567 igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn) in igb_on_tx_done_update_stats() argument
574 e1000x_increase_size_stats(core->mac, PTCregs, tot_len); in igb_on_tx_done_update_stats()
575 e1000x_inc_reg_if_not_full(core->mac, TPT); in igb_on_tx_done_update_stats()
576 e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len); in igb_on_tx_done_update_stats()
580 e1000x_inc_reg_if_not_full(core->mac, BPTC); in igb_on_tx_done_update_stats()
583 e1000x_inc_reg_if_not_full(core->mac, MPTC); in igb_on_tx_done_update_stats()
591 e1000x_inc_reg_if_not_full(core->mac, GPTC); in igb_on_tx_done_update_stats()
592 e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); in igb_on_tx_done_update_stats()
594 if (core->mac[MRQC] & 1) { in igb_on_tx_done_update_stats()
597 core->mac[PVFGOTC0 + (pool * 64)] += tot_len; in igb_on_tx_done_update_stats()
598 core->mac[PVFGPTC0 + (pool * 64)]++; in igb_on_tx_done_update_stats()
603 igb_process_tx_desc(IGBCore *core, in igb_process_tx_desc() argument
659 igb_tx_insert_vlan(core, queue_index, tx, in igb_process_tx_desc()
664 (core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_ENABLED) && in igb_process_tx_desc()
665 !(core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_VALID)) { in igb_process_tx_desc()
666 core->mac[TSYNCTXCTL] |= E1000_TSYNCTXCTL_VALID; in igb_process_tx_desc()
667 e1000x_timestamp(core->mac, core->timadj, TXSTMPL, TXSTMPH); in igb_process_tx_desc()
670 if (igb_tx_pkt_send(core, tx, queue_index)) { in igb_process_tx_desc()
671 igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index); in igb_process_tx_desc()
681 static uint32_t igb_tx_wb_eic(IGBCore *core, int queue_idx) in igb_tx_wb_eic() argument
686 ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff; in igb_tx_wb_eic()
691 static uint32_t igb_rx_wb_eic(IGBCore *core, int queue_idx) in igb_rx_wb_eic() argument
696 ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff; in igb_rx_wb_eic()
702 igb_ring_empty(IGBCore *core, const E1000ERingInfo *r) in igb_ring_empty() argument
704 return core->mac[r->dh] == core->mac[r->dt] || in igb_ring_empty()
705 core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN; in igb_ring_empty()
709 igb_ring_base(IGBCore *core, const E1000ERingInfo *r) in igb_ring_base() argument
711 uint64_t bah = core->mac[r->dbah]; in igb_ring_base()
712 uint64_t bal = core->mac[r->dbal]; in igb_ring_base()
718 igb_ring_head_descr(IGBCore *core, const E1000ERingInfo *r) in igb_ring_head_descr() argument
720 return igb_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh]; in igb_ring_head_descr()
724 igb_ring_advance(IGBCore *core, const E1000ERingInfo *r, uint32_t count) in igb_ring_advance() argument
726 core->mac[r->dh] += count; in igb_ring_advance()
728 if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) { in igb_ring_advance()
729 core->mac[r->dh] = 0; in igb_ring_advance()
734 igb_ring_free_descr_num(IGBCore *core, const E1000ERingInfo *r) in igb_ring_free_descr_num() argument
736 trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen], in igb_ring_free_descr_num()
737 core->mac[r->dh], core->mac[r->dt]); in igb_ring_free_descr_num()
739 if (core->mac[r->dh] <= core->mac[r->dt]) { in igb_ring_free_descr_num()
740 return core->mac[r->dt] - core->mac[r->dh]; in igb_ring_free_descr_num()
743 if (core->mac[r->dh] > core->mac[r->dt]) { in igb_ring_free_descr_num()
744 return core->mac[r->dlen] / E1000_RING_DESC_LEN + in igb_ring_free_descr_num()
745 core->mac[r->dt] - core->mac[r->dh]; in igb_ring_free_descr_num()
752 igb_ring_enabled(IGBCore *core, const E1000ERingInfo *r) in igb_ring_enabled() argument
754 return core->mac[r->dlen] > 0; in igb_ring_enabled()
769 igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx) in igb_tx_ring_init() argument
793 txr->tx = &core->tx[idx]; in igb_tx_ring_init()
801 igb_rx_ring_init(IGBCore *core, E1000E_RxRing *rxr, int idx) in igb_rx_ring_init() argument
828 igb_txdesc_writeback(IGBCore *core, dma_addr_t base, in igb_txdesc_writeback() argument
836 tdwba = core->mac[E1000_TDWBAL(txi->idx) >> 2]; in igb_txdesc_writeback()
837 tdwba |= (uint64_t)core->mac[E1000_TDWBAH(txi->idx) >> 2] << 32; in igb_txdesc_writeback()
843 d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8); in igb_txdesc_writeback()
845 d = core->owner; in igb_txdesc_writeback()
849 uint32_t buffer = cpu_to_le32(core->mac[txi->dh]); in igb_txdesc_writeback()
859 return igb_tx_wb_eic(core, txi->idx); in igb_txdesc_writeback()
863 igb_tx_enabled(IGBCore *core, const E1000ERingInfo *txi) in igb_tx_enabled() argument
865 bool vmdq = core->mac[MRQC] & 1; in igb_tx_enabled()
869 return (core->mac[TCTL] & E1000_TCTL_EN) && in igb_tx_enabled()
870 (!vmdq || core->mac[VFTE] & BIT(pool)) && in igb_tx_enabled()
871 (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE); in igb_tx_enabled()
875 igb_start_xmit(IGBCore *core, const IGB_TxRing *txr) in igb_start_xmit() argument
883 if (!igb_tx_enabled(core, txi)) { in igb_start_xmit()
888 d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8); in igb_start_xmit()
890 d = core->owner; in igb_start_xmit()
893 while (!igb_ring_empty(core, txi)) { in igb_start_xmit()
894 base = igb_ring_head_descr(core, txi); in igb_start_xmit()
901 igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx); in igb_start_xmit()
902 igb_ring_advance(core, txi, 1); in igb_start_xmit()
903 eic |= igb_txdesc_writeback(core, base, &desc, txi); in igb_start_xmit()
907 igb_raise_interrupts(core, EICR, eic); in igb_start_xmit()
908 igb_raise_interrupts(core, ICR, E1000_ICR_TXDW); in igb_start_xmit()
915 igb_rxbufsize(IGBCore *core, const E1000ERingInfo *r) in igb_rxbufsize() argument
917 uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2]; in igb_rxbufsize()
923 return e1000x_rxbufsize(core->mac[RCTL]); in igb_rxbufsize()
927 igb_has_rxbufs(IGBCore *core, const E1000ERingInfo *r, size_t total_size) in igb_has_rxbufs() argument
929 uint32_t bufs = igb_ring_free_descr_num(core, r); in igb_has_rxbufs()
930 uint32_t bufsize = igb_rxbufsize(core, r); in igb_has_rxbufs()
934 return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) * in igb_has_rxbufs()
939 igb_rxhdrbufsize(IGBCore *core, const E1000ERingInfo *r) in igb_rxhdrbufsize() argument
941 uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2]; in igb_rxhdrbufsize()
947 igb_start_recv(IGBCore *core) in igb_start_recv() argument
953 for (i = 0; i <= core->max_queue_num; i++) { in igb_start_recv()
954 qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i)); in igb_start_recv()
959 igb_can_receive(IGBCore *core) in igb_can_receive() argument
963 if (!e1000x_rx_ready(core->owner, core->mac)) { in igb_can_receive()
969 if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) { in igb_can_receive()
973 igb_rx_ring_init(core, &rxr, i); in igb_can_receive()
974 if (igb_ring_enabled(core, rxr.i) && igb_has_rxbufs(core, rxr.i, 1)) { in igb_can_receive()
985 igb_receive(IGBCore *core, const uint8_t *buf, size_t size) in igb_receive() argument
992 return igb_receive_iov(core, &iov, 1); in igb_receive()
996 igb_rx_l3_cso_enabled(IGBCore *core) in igb_rx_l3_cso_enabled() argument
998 return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD); in igb_rx_l3_cso_enabled()
1002 igb_rx_l4_cso_enabled(IGBCore *core) in igb_rx_l4_cso_enabled() argument
1004 return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); in igb_rx_l4_cso_enabled()
1007 static bool igb_rx_is_oversized(IGBCore *core, const struct eth_header *ehdr, in igb_rx_is_oversized() argument
1016 static uint16_t igb_receive_assign(IGBCore *core, const struct iovec *iov, in igb_receive_assign() argument
1024 uint32_t f, ra[2], *macp, rctl = core->mac[RCTL]; in igb_receive_assign()
1040 if (core->mac[CTRL_EXT] & BIT(26)) { in igb_receive_assign()
1041 if (be16_to_cpu(ehdr->h_proto) == core->mac[VET] >> 16 && in igb_receive_assign()
1042 be16_to_cpu(l2_header->vlan[0].h_proto) == (core->mac[VET] & 0xffff)) { in igb_receive_assign()
1046 if (be16_to_cpu(ehdr->h_proto) == (core->mac[VET] & 0xffff)) { in igb_receive_assign()
1051 lpe = !!(core->mac[RCTL] & E1000_RCTL_LPE); in igb_receive_assign()
1052 rlpml = core->mac[RLPML]; in igb_receive_assign()
1053 if (!(core->mac[RCTL] & E1000_RCTL_SBP) && in igb_receive_assign()
1054 igb_rx_is_oversized(core, ehdr, size, vlan_num, lpe, rlpml)) { in igb_receive_assign()
1060 if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_FILTER_ENABLE) && in igb_receive_assign()
1061 be16_to_cpu(ehdr->h_proto) == (core->mac[ETQF0 + *etqf] & E1000_ETQF_ETYPE_MASK)) { in igb_receive_assign()
1062 if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_1588) && in igb_receive_assign()
1063 (core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_ENABLED) && in igb_receive_assign()
1064 !(core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_VALID) && in igb_receive_assign()
1067 ptp2.message_id_transport_specific == ((core->mac[TSYNCRXCFG] >> 8) & 255)) { in igb_receive_assign()
1068 e1000x_timestamp(core->mac, core->timadj, RXSTMPL, RXSTMPH); in igb_receive_assign()
1070 core->mac[TSYNCRXCTL] |= E1000_TSYNCRXCTL_VALID; in igb_receive_assign()
1071 core->mac[RXSATRL] = le32_to_cpu(ptp2.source_uuid_lo); in igb_receive_assign()
1072 core->mac[RXSATRH] = le16_to_cpu(ptp2.source_uuid_hi) | in igb_receive_assign()
1080 !e1000x_rx_vlan_filter(core->mac, l2_header->vlan + vlan_num - 1)) { in igb_receive_assign()
1084 if (core->mac[MRQC] & 1) { in igb_receive_assign()
1087 if (core->mac[VMOLR0 + i] & E1000_VMOLR_BAM) { in igb_receive_assign()
1092 for (macp = core->mac + RA; macp < core->mac + RA + 32; macp += 2) { in igb_receive_assign()
1103 for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) { in igb_receive_assign()
1115 macp = core->mac + (is_multicast_ether_addr(ehdr->h_dest) ? MTA : UTA); in igb_receive_assign()
1121 if (core->mac[VMOLR0 + i] & E1000_VMOLR_ROMPE) { in igb_receive_assign()
1131 if (e1000x_vlan_rx_filter_enabled(core->mac)) { in igb_receive_assign()
1138 if ((core->mac[VLVF0 + i] & E1000_VLVF_VLANID_MASK) == vid && in igb_receive_assign()
1139 (core->mac[VLVF0 + i] & E1000_VLVF_VLANID_ENABLE)) { in igb_receive_assign()
1140 uint32_t poolsel = core->mac[VLVF0 + i] & E1000_VLVF_POOLSEL_MASK; in igb_receive_assign()
1146 if (core->mac[VMOLR0 + i] & E1000_VMOLR_AUPE) { in igb_receive_assign()
1156 !(core->mac[VT_CTL] & E1000_VT_CTL_DISABLE_DEF_POOL)) { in igb_receive_assign()
1157 uint32_t def_pl = core->mac[VT_CTL] & E1000_VT_CTL_DEFAULT_POOL_MASK; in igb_receive_assign()
1161 queues &= core->mac[VFRE]; in igb_receive_assign()
1164 lpe = !!(core->mac[VMOLR0 + i] & E1000_VMOLR_LPE); in igb_receive_assign()
1165 rlpml = core->mac[VMOLR0 + i] & E1000_VMOLR_RLPML_MASK; in igb_receive_assign()
1167 igb_rx_is_oversized(core, ehdr, size, vlan_num, in igb_receive_assign()
1175 e1000x_inc_reg_if_not_full(core->mac, ROC); in igb_receive_assign()
1181 igb_rss_parse_packet(core, core->rx_pkt, in igb_receive_assign()
1187 (core->mac[VMOLR0 + i] & E1000_VMOLR_RSSE)) { in igb_receive_assign()
1195 bool accepted = e1000x_rx_group_filter(core->mac, ehdr); in igb_receive_assign()
1197 for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) { in igb_receive_assign()
1204 trace_e1000x_rx_flt_ucast_match((int)(macp - core->mac - RA2) / 2, in igb_receive_assign()
1214 igb_rss_parse_packet(core, core->rx_pkt, false, rss_info); in igb_receive_assign()
1223 igb_read_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, in igb_read_lgcy_rx_descr() argument
1230 igb_read_adv_rx_single_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc, in igb_read_adv_rx_single_buf_descr() argument
1237 igb_read_adv_rx_split_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc, in igb_read_adv_rx_split_buf_descr() argument
1273 igb_read_rx_descr(IGBCore *core, in igb_read_rx_descr() argument
1280 if (igb_rx_use_legacy_descriptor(core)) { in igb_read_rx_descr()
1281 igb_read_lgcy_rx_descr(core, &desc->legacy, &pdma_st->ba[1]); in igb_read_rx_descr()
1287 if (igb_rx_use_ps_descriptor(core, r)) { in igb_read_rx_descr()
1288 igb_read_adv_rx_split_buf_descr(core, &desc->adv, &pdma_st->ba[0]); in igb_read_rx_descr()
1293 desc_type = igb_rx_queue_desctyp_get(core, r); in igb_read_rx_descr()
1299 igb_read_adv_rx_single_buf_descr(core, &desc->adv, &pdma_st->ba[1]); in igb_read_rx_descr()
1304 igb_verify_csum_in_sw(IGBCore *core, in igb_verify_csum_in_sw() argument
1312 if (igb_rx_l3_cso_enabled(core)) { in igb_verify_csum_in_sw()
1323 if (!igb_rx_l4_cso_enabled(core)) { in igb_verify_csum_in_sw()
1342 igb_build_rx_metadata_common(IGBCore *core, in igb_build_rx_metadata_common() argument
1372 if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { in igb_build_rx_metadata_common()
1382 igb_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto); in igb_build_rx_metadata_common()
1386 if (igb_rx_l3_cso_enabled(core)) { in igb_build_rx_metadata_common()
1392 if (igb_rx_l4_cso_enabled(core)) { in igb_build_rx_metadata_common()
1424 igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc, in igb_write_lgcy_rx_descr() argument
1435 igb_build_rx_metadata_common(core, pkt, pkt != NULL, in igb_write_lgcy_rx_descr()
1444 igb_rx_ps_descriptor_split_always(IGBCore *core, const E1000ERingInfo *r) in igb_rx_ps_descriptor_split_always() argument
1446 uint32_t desctyp = igb_rx_queue_desctyp_get(core, r); in igb_rx_ps_descriptor_split_always()
1451 igb_rx_desc_get_packet_type(IGBCore *core, struct NetRxPkt *pkt, uint16_t etqf) in igb_rx_desc_get_packet_type() argument
1464 if (hasip6 && !(core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { in igb_rx_desc_get_packet_type()
1492 igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc, in igb_write_adv_rx_descr() argument
1505 igb_build_rx_metadata_common(core, pkt, eop, in igb_write_adv_rx_descr()
1515 if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) { in igb_write_adv_rx_descr()
1533 pkt_type = igb_rx_desc_get_packet_type(core, pkt, etqf); in igb_write_adv_rx_descr()
1540 igb_write_adv_ps_rx_descr(IGBCore *core, in igb_write_adv_ps_rx_descr() argument
1558 igb_write_adv_rx_descr(core, desc, pkt, rss_info, etqf, ts, pkt_len); in igb_write_adv_ps_rx_descr()
1570 igb_write_rx_descr(IGBCore *core, in igb_write_rx_descr() argument
1579 if (igb_rx_use_legacy_descriptor(core)) { in igb_write_rx_descr()
1580 igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, in igb_write_rx_descr()
1582 } else if (igb_rx_use_ps_descriptor(core, r)) { in igb_write_rx_descr()
1583 igb_write_adv_ps_rx_descr(core, &desc->adv, pkt, rss_info, r, etqf, ts, in igb_write_rx_descr()
1586 igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info, in igb_write_rx_descr()
1592 igb_pci_dma_write_rx_desc(IGBCore *core, PCIDevice *dev, dma_addr_t addr, in igb_pci_dma_write_rx_desc() argument
1595 if (igb_rx_use_legacy_descriptor(core)) { in igb_pci_dma_write_rx_desc()
1624 igb_update_rx_stats(IGBCore *core, const E1000ERingInfo *rxi, in igb_update_rx_stats() argument
1627 eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt); in igb_update_rx_stats()
1628 e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size); in igb_update_rx_stats()
1630 if (core->mac[MRQC] & 1) { in igb_update_rx_stats()
1633 core->mac[PVFGORC0 + (pool * 64)] += pkt_size + 4; in igb_update_rx_stats()
1634 core->mac[PVFGPRC0 + (pool * 64)]++; in igb_update_rx_stats()
1636 core->mac[PVFMPRC0 + (pool * 64)]++; in igb_update_rx_stats()
1642 igb_rx_descr_threshold_hit(IGBCore *core, const E1000ERingInfo *rxi) in igb_rx_descr_threshold_hit() argument
1644 return igb_ring_free_descr_num(core, rxi) == in igb_rx_descr_threshold_hit()
1645 ((core->mac[E1000_SRRCTL(rxi->idx) >> 2] >> 20) & 31) * 16; in igb_rx_descr_threshold_hit()
1649 igb_do_ps(IGBCore *core, in igb_do_ps() argument
1661 if (!igb_rx_use_ps_descriptor(core, r)) { in igb_do_ps()
1666 bheader_size = igb_rxhdrbufsize(core, r); in igb_do_ps()
1667 split_always = igb_rx_ps_descriptor_split_always(core, r); in igb_do_ps()
1685 if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) { in igb_do_ps()
1732 igb_write_hdr_frag_to_rx_buffers(IGBCore *core, in igb_write_hdr_frag_to_rx_buffers() argument
1748 igb_write_header_to_rx_buffers(IGBCore *core, in igb_write_header_to_rx_buffers() argument
1768 igb_write_hdr_frag_to_rx_buffers(core, d, pdma_st, in igb_write_header_to_rx_buffers()
1786 igb_write_payload_frag_to_rx_buffers(IGBCore *core, in igb_write_payload_frag_to_rx_buffers() argument
1824 igb_write_payload_to_rx_buffers(IGBCore *core, in igb_write_payload_to_rx_buffers() argument
1836 igb_write_payload_frag_to_rx_buffers(core, d, in igb_write_payload_to_rx_buffers()
1852 igb_write_payload_frag_to_rx_buffers(core, d, in igb_write_payload_to_rx_buffers()
1855 e1000x_fcs_len(core->mac)); in igb_write_payload_to_rx_buffers()
1860 igb_write_to_rx_buffers(IGBCore *core, in igb_write_to_rx_buffers() argument
1884 igb_write_header_to_rx_buffers(core, pkt, d, pdma_st, ©_size); in igb_write_to_rx_buffers()
1889 igb_write_payload_to_rx_buffers(core, pkt, d, pdma_st, ©_size); in igb_write_to_rx_buffers()
1893 igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt, in igb_write_packet_to_guest() argument
1907 pdma_st.total_size = pdma_st.size + e1000x_fcs_len(core->mac); in igb_write_packet_to_guest()
1910 rx_desc_len = core->rx_desc_len; in igb_write_packet_to_guest()
1911 pdma_st.rx_desc_packet_buf_size = igb_rxbufsize(core, rxi); in igb_write_packet_to_guest()
1912 pdma_st.rx_desc_header_buf_size = igb_rxhdrbufsize(core, rxi); in igb_write_packet_to_guest()
1914 d = pcie_sriov_get_vf_at_index(core->owner, rxi->idx % 8); in igb_write_packet_to_guest()
1916 d = core->owner; in igb_write_packet_to_guest()
1919 pdma_st.do_ps = igb_do_ps(core, rxi, pkt, &pdma_st); in igb_write_packet_to_guest()
1925 if (igb_ring_empty(core, rxi)) { in igb_write_packet_to_guest()
1929 base = igb_ring_head_descr(core, rxi); in igb_write_packet_to_guest()
1933 igb_read_rx_descr(core, &desc, &pdma_st, rxi); in igb_write_packet_to_guest()
1935 igb_write_to_rx_buffers(core, pkt, d, &pdma_st); in igb_write_packet_to_guest()
1941 igb_write_rx_descr(core, &desc, in igb_write_packet_to_guest()
1947 igb_pci_dma_write_rx_desc(core, d, base, &desc, rx_desc_len); in igb_write_packet_to_guest()
1948 igb_ring_advance(core, rxi, rx_desc_len / E1000_MIN_RX_DESC_LEN); in igb_write_packet_to_guest()
1951 igb_update_rx_stats(core, rxi, pdma_st.size, pdma_st.total_size); in igb_write_packet_to_guest()
1955 igb_rx_strip_vlan(IGBCore *core, const E1000ERingInfo *rxi) in igb_rx_strip_vlan() argument
1957 if (core->mac[MRQC] & 1) { in igb_rx_strip_vlan()
1960 return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ? in igb_rx_strip_vlan()
1961 core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN : in igb_rx_strip_vlan()
1962 core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN; in igb_rx_strip_vlan()
1965 return e1000x_vlan_enabled(core->mac); in igb_rx_strip_vlan()
1969 igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt) in igb_rx_fix_l4_csum() argument
1979 igb_receive_iov(IGBCore *core, const struct iovec *iov, int iovcnt) in igb_receive_iov() argument
1981 return igb_receive_internal(core, iov, iovcnt, core->has_vnet, NULL); in igb_receive_iov()
1985 igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt, in igb_receive_internal() argument
2012 if (!e1000x_hw_rx_enabled(core->mac)) { in igb_receive_internal()
2018 net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt); in igb_receive_internal()
2021 net_rx_pkt_unset_vhdr(core->rx_pkt); in igb_receive_internal()
2031 e1000x_inc_reg_if_not_full(core->mac, RUC); in igb_receive_internal()
2041 net_rx_pkt_set_packet_type(core->rx_pkt, in igb_receive_internal()
2043 net_rx_pkt_set_protocols(core->rx_pkt, iov, iovcnt, iov_ofs); in igb_receive_internal()
2045 queues = igb_receive_assign(core, iov, iovcnt, iov_ofs, in igb_receive_internal()
2055 !(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) { in igb_receive_internal()
2059 igb_rx_ring_init(core, &rxr, i); in igb_receive_internal()
2061 if (!igb_rx_strip_vlan(core, rxr.i)) { in igb_receive_internal()
2063 } else if (core->mac[CTRL_EXT] & BIT(26)) { in igb_receive_internal()
2069 net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, in igb_receive_internal()
2071 core->mac[VET] & 0xffff, in igb_receive_internal()
2072 core->mac[VET] >> 16); in igb_receive_internal()
2074 total_size = net_rx_pkt_get_total_len(core->rx_pkt) + in igb_receive_internal()
2075 e1000x_fcs_len(core->mac); in igb_receive_internal()
2077 if (!igb_has_rxbufs(core, rxr.i, total_size)) { in igb_receive_internal()
2085 igb_rx_fix_l4_csum(core, core->rx_pkt); in igb_receive_internal()
2086 igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info, etqf, ts); in igb_receive_internal()
2089 if (igb_rx_descr_threshold_hit(core, rxr.i)) { in igb_receive_internal()
2093 ecauses |= igb_rx_wb_eic(core, rxr.i->idx); in igb_receive_internal()
2099 igb_raise_interrupts(core, EICR, ecauses); in igb_receive_internal()
2100 igb_raise_interrupts(core, ICR, causes); in igb_receive_internal()
2106 igb_have_autoneg(IGBCore *core) in igb_have_autoneg() argument
2108 return core->phy[MII_BMCR] & MII_BMCR_AUTOEN; in igb_have_autoneg()
2111 static void igb_update_flowctl_status(IGBCore *core) in igb_update_flowctl_status() argument
2113 if (igb_have_autoneg(core) && core->phy[MII_BMSR] & MII_BMSR_AN_COMP) { in igb_update_flowctl_status()
2115 core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE; in igb_update_flowctl_status()
2122 igb_link_down(IGBCore *core) in igb_link_down() argument
2124 e1000x_update_regs_on_link_down(core->mac, core->phy); in igb_link_down()
2125 igb_update_flowctl_status(core); in igb_link_down()
2129 igb_set_phy_ctrl(IGBCore *core, uint16_t val) in igb_set_phy_ctrl() argument
2132 core->phy[MII_BMCR] = val & ~(0x3f | MII_BMCR_RESET | MII_BMCR_ANRESTART); in igb_set_phy_ctrl()
2134 if ((val & MII_BMCR_ANRESTART) && igb_have_autoneg(core)) { in igb_set_phy_ctrl()
2135 e1000x_restart_autoneg(core->mac, core->phy, core->autoneg_timer); in igb_set_phy_ctrl()
2139 void igb_core_set_link_status(IGBCore *core) in igb_core_set_link_status() argument
2141 NetClientState *nc = qemu_get_queue(core->owner_nic); in igb_core_set_link_status()
2142 uint32_t old_status = core->mac[STATUS]; in igb_core_set_link_status()
2147 e1000x_update_regs_on_link_down(core->mac, core->phy); in igb_core_set_link_status()
2149 if (igb_have_autoneg(core) && in igb_core_set_link_status()
2150 !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) { in igb_core_set_link_status()
2151 e1000x_restart_autoneg(core->mac, core->phy, in igb_core_set_link_status()
2152 core->autoneg_timer); in igb_core_set_link_status()
2154 e1000x_update_regs_on_link_up(core->mac, core->phy); in igb_core_set_link_status()
2155 igb_start_recv(core); in igb_core_set_link_status()
2159 if (core->mac[STATUS] != old_status) { in igb_core_set_link_status()
2160 igb_raise_interrupts(core, ICR, E1000_ICR_LSC); in igb_core_set_link_status()
2165 igb_set_ctrl(IGBCore *core, int index, uint32_t val) in igb_set_ctrl() argument
2170 core->mac[CTRL] = val & ~E1000_CTRL_RST; in igb_set_ctrl()
2171 core->mac[CTRL_DUP] = core->mac[CTRL]; in igb_set_ctrl()
2183 igb_reset(core, true); in igb_set_ctrl()
2188 core->mac[STATUS] |= E1000_STATUS_PHYRA; in igb_set_ctrl()
2193 igb_set_rfctl(IGBCore *core, int index, uint32_t val) in igb_set_rfctl() argument
2209 core->mac[RFCTL] = val; in igb_set_rfctl()
2213 igb_calc_rxdesclen(IGBCore *core) in igb_calc_rxdesclen() argument
2215 if (igb_rx_use_legacy_descriptor(core)) { in igb_calc_rxdesclen()
2216 core->rx_desc_len = sizeof(struct e1000_rx_desc); in igb_calc_rxdesclen()
2218 core->rx_desc_len = sizeof(union e1000_adv_rx_desc); in igb_calc_rxdesclen()
2220 trace_e1000e_rx_desc_len(core->rx_desc_len); in igb_calc_rxdesclen()
2224 igb_set_rx_control(IGBCore *core, int index, uint32_t val) in igb_set_rx_control() argument
2226 core->mac[RCTL] = val; in igb_set_rx_control()
2227 trace_e1000e_rx_set_rctl(core->mac[RCTL]); in igb_set_rx_control()
2235 igb_calc_rxdesclen(core); in igb_set_rx_control()
2236 igb_start_recv(core); in igb_set_rx_control()
2249 if (timer->core->mac[timer->delay_reg] != 0) { in igb_postpone_interrupt()
2257 igb_eitr_should_postpone(IGBCore *core, int idx) in igb_eitr_should_postpone() argument
2259 return igb_postpone_interrupt(&core->eitr[idx]); in igb_eitr_should_postpone()
2262 static void igb_send_msix(IGBCore *core, uint32_t causes) in igb_send_msix() argument
2267 if ((causes & BIT(vector)) && !igb_eitr_should_postpone(core, vector)) { in igb_send_msix()
2270 igb_msix_notify(core, vector); in igb_send_msix()
2276 igb_fix_icr_asserted(IGBCore *core) in igb_fix_icr_asserted() argument
2278 core->mac[ICR] &= ~E1000_ICR_ASSERTED; in igb_fix_icr_asserted()
2279 if (core->mac[ICR]) { in igb_fix_icr_asserted()
2280 core->mac[ICR] |= E1000_ICR_ASSERTED; in igb_fix_icr_asserted()
2283 trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); in igb_fix_icr_asserted()
2286 static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes) in igb_raise_interrupts() argument
2288 uint32_t old_causes = core->mac[ICR] & core->mac[IMS]; in igb_raise_interrupts()
2289 uint32_t old_ecauses = core->mac[EICR] & core->mac[EIMS]; in igb_raise_interrupts()
2295 core->mac[index], core->mac[index] | causes); in igb_raise_interrupts()
2297 core->mac[index] |= causes; in igb_raise_interrupts()
2299 if (core->mac[GPIE] & E1000_GPIE_MSIX_MODE) { in igb_raise_interrupts()
2300 raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes; in igb_raise_interrupts()
2303 int_alloc = core->mac[IVAR_MISC] & 0xff; in igb_raise_interrupts()
2305 core->mac[EICR] |= BIT(int_alloc & 0x1f); in igb_raise_interrupts()
2310 int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff; in igb_raise_interrupts()
2312 core->mac[EICR] |= BIT(int_alloc & 0x1f); in igb_raise_interrupts()
2316 raised_ecauses = core->mac[EICR] & core->mac[EIMS] & ~old_ecauses; in igb_raise_interrupts()
2321 igb_send_msix(core, raised_ecauses); in igb_raise_interrupts()
2323 igb_fix_icr_asserted(core); in igb_raise_interrupts()
2325 raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes; in igb_raise_interrupts()
2330 core->mac[EICR] |= (raised_causes & E1000_ICR_DRSTA) | E1000_EICR_OTHER; in igb_raise_interrupts()
2332 if (msix_enabled(core->owner)) { in igb_raise_interrupts()
2334 msix_notify(core->owner, 0); in igb_raise_interrupts()
2335 } else if (msi_enabled(core->owner)) { in igb_raise_interrupts()
2337 msi_notify(core->owner, 0); in igb_raise_interrupts()
2339 igb_raise_legacy_irq(core); in igb_raise_interrupts()
2344 static void igb_lower_interrupts(IGBCore *core, size_t index, uint32_t causes) in igb_lower_interrupts() argument
2347 core->mac[index], core->mac[index] & ~causes); in igb_lower_interrupts()
2349 core->mac[index] &= ~causes; in igb_lower_interrupts()
2351 trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], in igb_lower_interrupts()
2352 core->mac[ICR], core->mac[IMS]); in igb_lower_interrupts()
2354 if (!(core->mac[ICR] & core->mac[IMS]) && in igb_lower_interrupts()
2355 !(core->mac[GPIE] & E1000_GPIE_MSIX_MODE)) { in igb_lower_interrupts()
2356 core->mac[EICR] &= ~E1000_EICR_OTHER; in igb_lower_interrupts()
2358 if (!msix_enabled(core->owner) && !msi_enabled(core->owner)) { in igb_lower_interrupts()
2359 igb_lower_legacy_irq(core); in igb_lower_interrupts()
2364 static void igb_set_eics(IGBCore *core, int index, uint32_t val) in igb_set_eics() argument
2366 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); in igb_set_eics()
2370 igb_raise_interrupts(core, EICR, val & mask); in igb_set_eics()
2373 static void igb_set_eims(IGBCore *core, int index, uint32_t val) in igb_set_eims() argument
2375 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); in igb_set_eims()
2379 igb_raise_interrupts(core, EIMS, val & mask); in igb_set_eims()
2382 static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn) in mailbox_interrupt_to_vf() argument
2384 uint32_t ent = core->mac[VTIVAR_MISC + vfn]; in mailbox_interrupt_to_vf()
2389 igb_raise_interrupts(core, EICR, causes); in mailbox_interrupt_to_vf()
2393 static void mailbox_interrupt_to_pf(IGBCore *core) in mailbox_interrupt_to_pf() argument
2395 igb_raise_interrupts(core, ICR, E1000_ICR_VMMB); in mailbox_interrupt_to_pf()
2398 static void igb_set_pfmailbox(IGBCore *core, int index, uint32_t val) in igb_set_pfmailbox() argument
2405 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFSTS; in igb_set_pfmailbox()
2406 mailbox_interrupt_to_vf(core, vfn); in igb_set_pfmailbox()
2410 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFACK; in igb_set_pfmailbox()
2411 mailbox_interrupt_to_vf(core, vfn); in igb_set_pfmailbox()
2416 if (!(core->mac[index] & E1000_P2VMAILBOX_VFU)) { in igb_set_pfmailbox()
2417 core->mac[index] |= E1000_P2VMAILBOX_PFU; in igb_set_pfmailbox()
2418 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFU; in igb_set_pfmailbox()
2421 core->mac[index] &= ~E1000_P2VMAILBOX_PFU; in igb_set_pfmailbox()
2422 core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_PFU; in igb_set_pfmailbox()
2426 core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_VFU; in igb_set_pfmailbox()
2427 core->mac[MBVFICR] &= ~((E1000_MBVFICR_VFACK_VF1 << vfn) | in igb_set_pfmailbox()
2432 static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val) in igb_set_vfmailbox() argument
2439 core->mac[MBVFICR] |= E1000_MBVFICR_VFREQ_VF1 << vfn; in igb_set_vfmailbox()
2440 mailbox_interrupt_to_pf(core); in igb_set_vfmailbox()
2444 core->mac[MBVFICR] |= E1000_MBVFICR_VFACK_VF1 << vfn; in igb_set_vfmailbox()
2445 mailbox_interrupt_to_pf(core); in igb_set_vfmailbox()
2450 if (!(core->mac[index] & E1000_V2PMAILBOX_PFU)) { in igb_set_vfmailbox()
2451 core->mac[index] |= E1000_V2PMAILBOX_VFU; in igb_set_vfmailbox()
2452 core->mac[P2VMAILBOX0 + vfn] |= E1000_P2VMAILBOX_VFU; in igb_set_vfmailbox()
2455 core->mac[index] &= ~E1000_V2PMAILBOX_VFU; in igb_set_vfmailbox()
2456 core->mac[P2VMAILBOX0 + vfn] &= ~E1000_P2VMAILBOX_VFU; in igb_set_vfmailbox()
2460 void igb_core_vf_reset(IGBCore *core, uint16_t vfn) in igb_core_vf_reset() argument
2468 core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE; in igb_core_vf_reset()
2469 core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE; in igb_core_vf_reset()
2470 core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE; in igb_core_vf_reset()
2471 core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE; in igb_core_vf_reset()
2472 core->mac[VFRE] &= ~BIT(vfn); in igb_core_vf_reset()
2473 core->mac[VFTE] &= ~BIT(vfn); in igb_core_vf_reset()
2475 core->mac[VFLRE] |= BIT(vfn); in igb_core_vf_reset()
2477 mailbox_interrupt_to_pf(core); in igb_core_vf_reset()
2480 static void igb_w1c(IGBCore *core, int index, uint32_t val) in igb_w1c() argument
2482 core->mac[index] &= ~val; in igb_w1c()
2485 static void igb_set_eimc(IGBCore *core, int index, uint32_t val) in igb_set_eimc() argument
2487 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); in igb_set_eimc()
2493 igb_lower_interrupts(core, EIMS, val & mask); in igb_set_eimc()
2496 static void igb_set_eiac(IGBCore *core, int index, uint32_t val) in igb_set_eiac() argument
2498 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); in igb_set_eiac()
2507 core->mac[EIAC] |= (val & E1000_EICR_MSIX_MASK); in igb_set_eiac()
2511 static void igb_set_eiam(IGBCore *core, int index, uint32_t val) in igb_set_eiam() argument
2513 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); in igb_set_eiam()
2519 core->mac[EIAM] |= in igb_set_eiam()
2525 static void igb_set_eicr(IGBCore *core, int index, uint32_t val) in igb_set_eicr() argument
2527 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE); in igb_set_eicr()
2536 igb_lower_interrupts(core, EICR, val & mask); in igb_set_eicr()
2539 static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val) in igb_set_vtctrl() argument
2545 igb_core_vf_reset(core, vfn); in igb_set_vtctrl()
2549 static void igb_set_vteics(IGBCore *core, int index, uint32_t val) in igb_set_vteics() argument
2553 core->mac[index] = val; in igb_set_vteics()
2554 igb_set_eics(core, EICS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); in igb_set_vteics()
2557 static void igb_set_vteims(IGBCore *core, int index, uint32_t val) in igb_set_vteims() argument
2561 core->mac[index] = val; in igb_set_vteims()
2562 igb_set_eims(core, EIMS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); in igb_set_vteims()
2565 static void igb_set_vteimc(IGBCore *core, int index, uint32_t val) in igb_set_vteimc() argument
2569 core->mac[index] = val; in igb_set_vteimc()
2570 igb_set_eimc(core, EIMC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); in igb_set_vteimc()
2573 static void igb_set_vteiac(IGBCore *core, int index, uint32_t val) in igb_set_vteiac() argument
2577 core->mac[index] = val; in igb_set_vteiac()
2578 igb_set_eiac(core, EIAC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); in igb_set_vteiac()
2581 static void igb_set_vteiam(IGBCore *core, int index, uint32_t val) in igb_set_vteiam() argument
2585 core->mac[index] = val; in igb_set_vteiam()
2586 igb_set_eiam(core, EIAM, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); in igb_set_vteiam()
2589 static void igb_set_vteicr(IGBCore *core, int index, uint32_t val) in igb_set_vteicr() argument
2593 core->mac[index] = val; in igb_set_vteicr()
2594 igb_set_eicr(core, EICR, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM)); in igb_set_vteicr()
2597 static void igb_set_vtivar(IGBCore *core, int index, uint32_t val) in igb_set_vtivar() argument
2604 core->mac[index] = val; in igb_set_vtivar()
2610 core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4); in igb_set_vtivar()
2618 core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4); in igb_set_vtivar()
2629 IGBCore *core = opaque; in igb_autoneg_timer() local
2630 if (!qemu_get_queue(core->owner_nic)->link_down) { in igb_autoneg_timer()
2631 e1000x_update_regs_on_autoneg_done(core->mac, core->phy); in igb_autoneg_timer()
2632 igb_start_recv(core); in igb_autoneg_timer()
2634 igb_update_flowctl_status(core); in igb_autoneg_timer()
2636 igb_raise_interrupts(core, ICR, E1000_ICR_LSC); in igb_autoneg_timer()
2670 igb_phy_reg_write(IGBCore *core, uint32_t addr, uint16_t data) in igb_phy_reg_write() argument
2675 igb_set_phy_ctrl(core, data); in igb_phy_reg_write()
2677 core->phy[addr] = data; in igb_phy_reg_write()
2682 igb_set_mdic(IGBCore *core, int index, uint32_t val) in igb_set_mdic() argument
2688 val = core->mac[MDIC] | E1000_MDIC_ERROR; in igb_set_mdic()
2694 val = (val ^ data) | core->phy[addr]; in igb_set_mdic()
2703 igb_phy_reg_write(core, addr, data); in igb_set_mdic()
2706 core->mac[MDIC] = val | E1000_MDIC_READY; in igb_set_mdic()
2709 igb_raise_interrupts(core, ICR, E1000_ICR_MDAC); in igb_set_mdic()
2714 igb_set_rdt(IGBCore *core, int index, uint32_t val) in igb_set_rdt() argument
2716 core->mac[index] = val & 0xffff; in igb_set_rdt()
2718 igb_start_recv(core); in igb_set_rdt()
2722 igb_set_status(IGBCore *core, int index, uint32_t val) in igb_set_status() argument
2725 core->mac[index] &= ~E1000_STATUS_PHYRA; in igb_set_status()
2730 igb_set_ctrlext(IGBCore *core, int index, uint32_t val) in igb_set_ctrlext() argument
2738 core->mac[CTRL_EXT] = val; in igb_set_ctrlext()
2740 if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PFRSTD) { in igb_set_ctrlext()
2742 core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_RSTI; in igb_set_ctrlext()
2743 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTD; in igb_set_ctrlext()
2749 igb_set_pbaclr(IGBCore *core, int index, uint32_t val) in igb_set_pbaclr() argument
2753 core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK; in igb_set_pbaclr()
2755 if (!msix_enabled(core->owner)) { in igb_set_pbaclr()
2760 if (core->mac[PBACLR] & BIT(i)) { in igb_set_pbaclr()
2761 msix_clr_pending(core->owner, i); in igb_set_pbaclr()
2767 igb_set_fcrth(IGBCore *core, int index, uint32_t val) in igb_set_fcrth() argument
2769 core->mac[FCRTH] = val & 0xFFF8; in igb_set_fcrth()
2773 igb_set_fcrtl(IGBCore *core, int index, uint32_t val) in igb_set_fcrtl() argument
2775 core->mac[FCRTL] = val & 0x8000FFF8; in igb_set_fcrtl()
2780 igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \
2782 core->mac[index] = val & (BIT(num) - 1); \
2790 igb_set_dlen(IGBCore *core, int index, uint32_t val) in igb_set_dlen() argument
2792 core->mac[index] = val & 0xffff0; in igb_set_dlen()
2796 igb_set_dbal(IGBCore *core, int index, uint32_t val) in igb_set_dbal() argument
2798 core->mac[index] = val & E1000_XDBAL_MASK; in igb_set_dbal()
2802 igb_set_tdt(IGBCore *core, int index, uint32_t val) in igb_set_tdt() argument
2807 core->mac[index] = val & 0xffff; in igb_set_tdt()
2809 igb_tx_ring_init(core, &txr, qn); in igb_set_tdt()
2810 igb_start_xmit(core, &txr); in igb_set_tdt()
2814 igb_set_ics(IGBCore *core, int index, uint32_t val) in igb_set_ics() argument
2817 igb_raise_interrupts(core, ICR, val); in igb_set_ics()
2821 igb_set_imc(IGBCore *core, int index, uint32_t val) in igb_set_imc() argument
2824 igb_lower_interrupts(core, IMS, val); in igb_set_imc()
2828 igb_set_ims(IGBCore *core, int index, uint32_t val) in igb_set_ims() argument
2830 igb_raise_interrupts(core, IMS, val & 0x77D4FBFD); in igb_set_ims()
2833 static void igb_nsicr(IGBCore *core) in igb_nsicr() argument
2840 if ((core->mac[GPIE] & E1000_GPIE_NSICR) || in igb_nsicr()
2841 (core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) { in igb_nsicr()
2842 igb_lower_interrupts(core, IMS, core->mac[IAM]); in igb_nsicr()
2846 static void igb_set_icr(IGBCore *core, int index, uint32_t val) in igb_set_icr() argument
2848 igb_nsicr(core); in igb_set_icr()
2849 igb_lower_interrupts(core, ICR, val); in igb_set_icr()
2853 igb_mac_readreg(IGBCore *core, int index) in igb_mac_readreg() argument
2855 return core->mac[index]; in igb_mac_readreg()
2859 igb_mac_ics_read(IGBCore *core, int index) in igb_mac_ics_read() argument
2861 trace_e1000e_irq_read_ics(core->mac[ICS]); in igb_mac_ics_read()
2862 return core->mac[ICS]; in igb_mac_ics_read()
2866 igb_mac_ims_read(IGBCore *core, int index) in igb_mac_ims_read() argument
2868 trace_e1000e_irq_read_ims(core->mac[IMS]); in igb_mac_ims_read()
2869 return core->mac[IMS]; in igb_mac_ims_read()
2873 igb_mac_swsm_read(IGBCore *core, int index) in igb_mac_swsm_read() argument
2875 uint32_t val = core->mac[SWSM]; in igb_mac_swsm_read()
2876 core->mac[SWSM] = val | E1000_SWSM_SMBI; in igb_mac_swsm_read()
2881 igb_mac_eitr_read(IGBCore *core, int index) in igb_mac_eitr_read() argument
2883 return core->eitr_guest_value[index - EITR0]; in igb_mac_eitr_read()
2886 static uint32_t igb_mac_vfmailbox_read(IGBCore *core, int index) in igb_mac_vfmailbox_read() argument
2888 uint32_t val = core->mac[index]; in igb_mac_vfmailbox_read()
2890 core->mac[index] &= ~(E1000_V2PMAILBOX_PFSTS | E1000_V2PMAILBOX_PFACK | in igb_mac_vfmailbox_read()
2897 igb_mac_icr_read(IGBCore *core, int index) in igb_mac_icr_read() argument
2899 uint32_t ret = core->mac[ICR]; in igb_mac_icr_read()
2901 if (core->mac[GPIE] & E1000_GPIE_NSICR) { in igb_mac_icr_read()
2903 igb_lower_interrupts(core, ICR, 0xffffffff); in igb_mac_icr_read()
2904 } else if (core->mac[IMS] == 0) { in igb_mac_icr_read()
2906 igb_lower_interrupts(core, ICR, 0xffffffff); in igb_mac_icr_read()
2907 } else if (core->mac[ICR] & E1000_ICR_INT_ASSERTED) { in igb_mac_icr_read()
2908 igb_lower_interrupts(core, ICR, 0xffffffff); in igb_mac_icr_read()
2909 } else if (!msix_enabled(core->owner)) { in igb_mac_icr_read()
2911 igb_lower_interrupts(core, ICR, 0xffffffff); in igb_mac_icr_read()
2914 igb_nsicr(core); in igb_mac_icr_read()
2919 igb_mac_read_clr4(IGBCore *core, int index) in igb_mac_read_clr4() argument
2921 uint32_t ret = core->mac[index]; in igb_mac_read_clr4()
2923 core->mac[index] = 0; in igb_mac_read_clr4()
2928 igb_mac_read_clr8(IGBCore *core, int index) in igb_mac_read_clr8() argument
2930 uint32_t ret = core->mac[index]; in igb_mac_read_clr8()
2932 core->mac[index] = 0; in igb_mac_read_clr8()
2933 core->mac[index - 1] = 0; in igb_mac_read_clr8()
2938 igb_get_ctrl(IGBCore *core, int index) in igb_get_ctrl() argument
2940 uint32_t val = core->mac[CTRL]; in igb_get_ctrl()
2953 static uint32_t igb_get_status(IGBCore *core, int index) in igb_get_status() argument
2955 uint32_t res = core->mac[STATUS]; in igb_get_status()
2956 uint16_t num_vfs = pcie_sriov_num_vfs(core->owner); in igb_get_status()
2958 if (core->mac[CTRL] & E1000_CTRL_FRCDPX) { in igb_get_status()
2959 res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0; in igb_get_status()
2964 if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) || in igb_get_status()
2965 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) { in igb_get_status()
2966 switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) { in igb_get_status()
2987 if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) { in igb_get_status()
2995 igb_mac_writereg(IGBCore *core, int index, uint32_t val) in igb_mac_writereg() argument
2997 core->mac[index] = val; in igb_mac_writereg()
3001 igb_mac_setmacaddr(IGBCore *core, int index, uint32_t val) in igb_mac_setmacaddr() argument
3005 core->mac[index] = val; in igb_mac_setmacaddr()
3007 macaddr[0] = cpu_to_le32(core->mac[RA]); in igb_mac_setmacaddr()
3008 macaddr[1] = cpu_to_le32(core->mac[RA + 1]); in igb_mac_setmacaddr()
3009 qemu_format_nic_info_str(qemu_get_queue(core->owner_nic), in igb_mac_setmacaddr()
3016 igb_set_eecd(IGBCore *core, int index, uint32_t val) in igb_set_eecd() argument
3022 core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits); in igb_set_eecd()
3026 igb_set_eerd(IGBCore *core, int index, uint32_t val) in igb_set_eerd() argument
3033 data = core->eeprom[addr]; in igb_set_eerd()
3037 core->mac[EERD] = flags | in igb_set_eerd()
3043 igb_set_eitr(IGBCore *core, int index, uint32_t val) in igb_set_eitr() argument
3049 core->eitr_guest_value[eitr_num] = val & ~E1000_EITR_CNT_IGNR; in igb_set_eitr()
3050 core->mac[index] = val & 0x7FFE; in igb_set_eitr()
3054 igb_update_rx_offloads(IGBCore *core) in igb_update_rx_offloads() argument
3056 int cso_state = igb_rx_l4_cso_enabled(core); in igb_update_rx_offloads()
3060 if (core->has_vnet) { in igb_update_rx_offloads()
3061 qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, in igb_update_rx_offloads()
3067 igb_set_rxcsum(IGBCore *core, int index, uint32_t val) in igb_set_rxcsum() argument
3069 core->mac[RXCSUM] = val; in igb_set_rxcsum()
3070 igb_update_rx_offloads(core); in igb_set_rxcsum()
3074 igb_set_gcr(IGBCore *core, int index, uint32_t val) in igb_set_gcr() argument
3076 uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS; in igb_set_gcr()
3077 core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits; in igb_set_gcr()
3080 static uint32_t igb_get_systiml(IGBCore *core, int index) in igb_get_systiml() argument
3082 e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH); in igb_get_systiml()
3083 return core->mac[SYSTIML]; in igb_get_systiml()
3086 static uint32_t igb_get_rxsatrh(IGBCore *core, int index) in igb_get_rxsatrh() argument
3088 core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID; in igb_get_rxsatrh()
3089 return core->mac[RXSATRH]; in igb_get_rxsatrh()
3092 static uint32_t igb_get_txstmph(IGBCore *core, int index) in igb_get_txstmph() argument
3094 core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID; in igb_get_txstmph()
3095 return core->mac[TXSTMPH]; in igb_get_txstmph()
3098 static void igb_set_timinca(IGBCore *core, int index, uint32_t val) in igb_set_timinca() argument
3100 e1000x_set_timinca(core->mac, &core->timadj, val); in igb_set_timinca()
3103 static void igb_set_timadjh(IGBCore *core, int index, uint32_t val) in igb_set_timadjh() argument
3105 core->mac[TIMADJH] = val; in igb_set_timadjh()
3106 core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32); in igb_set_timadjh()
4237 igb_core_write(IGBCore *core, hwaddr addr, uint64_t val, unsigned size) in igb_core_write() argument
4246 igb_macreg_writeops[index](core, index, val); in igb_core_write()
4255 igb_core_read(IGBCore *core, hwaddr addr, unsigned size) in igb_core_read() argument
4264 val = igb_macreg_readops[index](core, index); in igb_core_read()
4274 igb_autoneg_resume(IGBCore *core) in igb_autoneg_resume() argument
4276 if (igb_have_autoneg(core) && in igb_autoneg_resume()
4277 !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) { in igb_autoneg_resume()
4278 qemu_get_queue(core->owner_nic)->link_down = false; in igb_autoneg_resume()
4279 timer_mod(core->autoneg_timer, in igb_autoneg_resume()
4285 igb_core_pci_realize(IGBCore *core, in igb_core_pci_realize() argument
4292 core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, in igb_core_pci_realize()
4293 igb_autoneg_timer, core); in igb_core_pci_realize()
4294 igb_intrmgr_pci_realize(core); in igb_core_pci_realize()
4297 net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); in igb_core_pci_realize()
4300 net_rx_pkt_init(&core->rx_pkt); in igb_core_pci_realize()
4302 e1000x_core_prepare_eeprom(core->eeprom, in igb_core_pci_realize()
4305 PCI_DEVICE_GET_CLASS(core->owner)->device_id, in igb_core_pci_realize()
4307 igb_update_rx_offloads(core); in igb_core_pci_realize()
4311 igb_core_pci_uninit(IGBCore *core) in igb_core_pci_uninit() argument
4315 timer_free(core->autoneg_timer); in igb_core_pci_uninit()
4317 igb_intrmgr_pci_unint(core); in igb_core_pci_uninit()
4320 net_tx_pkt_uninit(core->tx[i].tx_pkt); in igb_core_pci_uninit()
4323 net_rx_pkt_uninit(core->rx_pkt); in igb_core_pci_uninit()
4462 static void igb_reset(IGBCore *core, bool sw) in igb_reset() argument
4467 timer_del(core->autoneg_timer); in igb_reset()
4469 igb_intrmgr_reset(core); in igb_reset()
4471 memset(core->phy, 0, sizeof core->phy); in igb_reset()
4472 memcpy(core->phy, igb_phy_reg_init, sizeof igb_phy_reg_init); in igb_reset()
4481 core->mac[i] = i < ARRAY_SIZE(igb_mac_reg_init) ? in igb_reset()
4485 if (qemu_get_queue(core->owner_nic)->link_down) { in igb_reset()
4486 igb_link_down(core); in igb_reset()
4489 e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); in igb_reset()
4493 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTI; in igb_reset()
4496 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { in igb_reset()
4497 tx = &core->tx[i]; in igb_reset()
4505 igb_core_reset(IGBCore *core) in igb_core_reset() argument
4507 igb_reset(core, false); in igb_core_reset()
4510 void igb_core_pre_save(IGBCore *core) in igb_core_pre_save() argument
4513 NetClientState *nc = qemu_get_queue(core->owner_nic); in igb_core_pre_save()
4520 if (nc->link_down && igb_have_autoneg(core)) { in igb_core_pre_save()
4521 core->phy[MII_BMSR] |= MII_BMSR_AN_COMP; in igb_core_pre_save()
4522 igb_update_flowctl_status(core); in igb_core_pre_save()
4525 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { in igb_core_pre_save()
4526 if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) { in igb_core_pre_save()
4527 core->tx[i].skip_cp = true; in igb_core_pre_save()
4533 igb_core_post_load(IGBCore *core) in igb_core_post_load() argument
4535 NetClientState *nc = qemu_get_queue(core->owner_nic); in igb_core_post_load()
4539 * to link status bit in core.mac[STATUS]. in igb_core_post_load()
4541 nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; in igb_core_post_load()
4547 igb_intrmgr_resume(core); in igb_core_post_load()
4548 igb_autoneg_resume(core); in igb_core_post_load()