1 /*
2 * Core code for QEMU igb emulation
3 *
4 * Datasheet:
5 * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
6 *
7 * Copyright (c) 2020-2023 Red Hat, Inc.
8 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
9 * Developed by Daynix Computing LTD (http://www.daynix.com)
10 *
11 * Authors:
12 * Akihiko Odaki <akihiko.odaki@daynix.com>
13 * Gal Hammmer <gal.hammer@sap.com>
14 * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
15 * Dmitry Fleytman <dmitry@daynix.com>
16 * Leonid Bloch <leonid@daynix.com>
17 * Yan Vugenfirer <yan@daynix.com>
18 *
19 * Based on work done by:
20 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
21 * Copyright (c) 2008 Qumranet
22 * Based on work done by:
23 * Copyright (c) 2007 Dan Aloni
24 * Copyright (c) 2004 Antony T Curtis
25 *
26 * This library is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU Lesser General Public
28 * License as published by the Free Software Foundation; either
29 * version 2.1 of the License, or (at your option) any later version.
30 *
31 * This library is distributed in the hope that it will be useful,
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
34 * Lesser General Public License for more details.
35 *
36 * You should have received a copy of the GNU Lesser General Public
37 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
38 */
39
40 #include "qemu/osdep.h"
41 #include "qemu/log.h"
42 #include "net/net.h"
43 #include "net/tap.h"
44 #include "hw/net/mii.h"
45 #include "hw/pci/msi.h"
46 #include "hw/pci/msix.h"
47 #include "sysemu/runstate.h"
48
49 #include "net_tx_pkt.h"
50 #include "net_rx_pkt.h"
51
52 #include "igb_common.h"
53 #include "e1000x_common.h"
54 #include "igb_core.h"
55
56 #include "trace.h"
57
58 #define E1000E_MAX_TX_FRAGS (64)
59
60 union e1000_rx_desc_union {
61 struct e1000_rx_desc legacy;
62 union e1000_adv_rx_desc adv;
63 };
64
65 typedef struct IGBTxPktVmdqCallbackContext {
66 IGBCore *core;
67 NetClientState *nc;
68 } IGBTxPktVmdqCallbackContext;
69
70 typedef struct L2Header {
71 struct eth_header eth;
72 struct vlan_header vlan[2];
73 } L2Header;
74
75 typedef struct PTP2 {
76 uint8_t message_id_transport_specific;
77 uint8_t version_ptp;
78 uint16_t message_length;
79 uint8_t subdomain_number;
80 uint8_t reserved0;
81 uint16_t flags;
82 uint64_t correction;
83 uint8_t reserved1[5];
84 uint8_t source_communication_technology;
85 uint32_t source_uuid_lo;
86 uint16_t source_uuid_hi;
87 uint16_t source_port_id;
88 uint16_t sequence_id;
89 uint8_t control;
90 uint8_t log_message_period;
91 } PTP2;
92
93 static ssize_t
94 igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
95 bool has_vnet, bool *external_tx);
96
97 static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes);
98 static void igb_reset(IGBCore *core, bool sw);
99
100 static inline void
igb_raise_legacy_irq(IGBCore * core)101 igb_raise_legacy_irq(IGBCore *core)
102 {
103 trace_e1000e_irq_legacy_notify(true);
104 e1000x_inc_reg_if_not_full(core->mac, IAC);
105 pci_set_irq(core->owner, 1);
106 }
107
108 static inline void
igb_lower_legacy_irq(IGBCore * core)109 igb_lower_legacy_irq(IGBCore *core)
110 {
111 trace_e1000e_irq_legacy_notify(false);
112 pci_set_irq(core->owner, 0);
113 }
114
igb_msix_notify(IGBCore * core,unsigned int cause)115 static void igb_msix_notify(IGBCore *core, unsigned int cause)
116 {
117 PCIDevice *dev = core->owner;
118 uint16_t vfn;
119 uint32_t effective_eiac;
120 unsigned int vector;
121
122 vfn = 8 - (cause + 2) / IGBVF_MSIX_VEC_NUM;
123 if (vfn < pcie_sriov_num_vfs(core->owner)) {
124 dev = pcie_sriov_get_vf_at_index(core->owner, vfn);
125 assert(dev);
126 vector = (cause + 2) % IGBVF_MSIX_VEC_NUM;
127 } else if (cause >= IGB_MSIX_VEC_NUM) {
128 qemu_log_mask(LOG_GUEST_ERROR,
129 "igb: Tried to use vector unavailable for PF");
130 return;
131 } else {
132 vector = cause;
133 }
134
135 msix_notify(dev, vector);
136
137 trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]);
138 effective_eiac = core->mac[EIAC] & BIT(cause);
139 core->mac[EICR] &= ~effective_eiac;
140 }
141
142 static inline void
igb_intrmgr_rearm_timer(IGBIntrDelayTimer * timer)143 igb_intrmgr_rearm_timer(IGBIntrDelayTimer *timer)
144 {
145 int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] *
146 timer->delay_resolution_ns;
147
148 trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns);
149
150 timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns);
151
152 timer->running = true;
153 }
154
155 static void
igb_intmgr_timer_resume(IGBIntrDelayTimer * timer)156 igb_intmgr_timer_resume(IGBIntrDelayTimer *timer)
157 {
158 if (timer->running) {
159 igb_intrmgr_rearm_timer(timer);
160 }
161 }
162
163 static void
igb_intrmgr_on_msix_throttling_timer(void * opaque)164 igb_intrmgr_on_msix_throttling_timer(void *opaque)
165 {
166 IGBIntrDelayTimer *timer = opaque;
167 int idx = timer - &timer->core->eitr[0];
168
169 timer->running = false;
170
171 trace_e1000e_irq_msix_notify_postponed_vec(idx);
172 igb_msix_notify(timer->core, idx);
173 }
174
175 static void
igb_intrmgr_initialize_all_timers(IGBCore * core,bool create)176 igb_intrmgr_initialize_all_timers(IGBCore *core, bool create)
177 {
178 int i;
179
180 for (i = 0; i < IGB_INTR_NUM; i++) {
181 core->eitr[i].core = core;
182 core->eitr[i].delay_reg = EITR0 + i;
183 core->eitr[i].delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
184 }
185
186 if (!create) {
187 return;
188 }
189
190 for (i = 0; i < IGB_INTR_NUM; i++) {
191 core->eitr[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
192 igb_intrmgr_on_msix_throttling_timer,
193 &core->eitr[i]);
194 }
195 }
196
197 static void
igb_intrmgr_resume(IGBCore * core)198 igb_intrmgr_resume(IGBCore *core)
199 {
200 int i;
201
202 for (i = 0; i < IGB_INTR_NUM; i++) {
203 igb_intmgr_timer_resume(&core->eitr[i]);
204 }
205 }
206
207 static void
igb_intrmgr_reset(IGBCore * core)208 igb_intrmgr_reset(IGBCore *core)
209 {
210 int i;
211
212 for (i = 0; i < IGB_INTR_NUM; i++) {
213 if (core->eitr[i].running) {
214 timer_del(core->eitr[i].timer);
215 igb_intrmgr_on_msix_throttling_timer(&core->eitr[i]);
216 }
217 }
218 }
219
220 static void
igb_intrmgr_pci_unint(IGBCore * core)221 igb_intrmgr_pci_unint(IGBCore *core)
222 {
223 int i;
224
225 for (i = 0; i < IGB_INTR_NUM; i++) {
226 timer_free(core->eitr[i].timer);
227 }
228 }
229
230 static void
igb_intrmgr_pci_realize(IGBCore * core)231 igb_intrmgr_pci_realize(IGBCore *core)
232 {
233 igb_intrmgr_initialize_all_timers(core, true);
234 }
235
236 static inline bool
igb_rx_csum_enabled(IGBCore * core)237 igb_rx_csum_enabled(IGBCore *core)
238 {
239 return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true;
240 }
241
242 static inline bool
igb_rx_use_legacy_descriptor(IGBCore * core)243 igb_rx_use_legacy_descriptor(IGBCore *core)
244 {
245 /*
246 * TODO: If SRRCTL[n],DESCTYPE = 000b, the 82576 uses the legacy Rx
247 * descriptor.
248 */
249 return false;
250 }
251
252 typedef struct E1000ERingInfo {
253 int dbah;
254 int dbal;
255 int dlen;
256 int dh;
257 int dt;
258 int idx;
259 } E1000ERingInfo;
260
261 static uint32_t
igb_rx_queue_desctyp_get(IGBCore * core,const E1000ERingInfo * r)262 igb_rx_queue_desctyp_get(IGBCore *core, const E1000ERingInfo *r)
263 {
264 return core->mac[E1000_SRRCTL(r->idx) >> 2] & E1000_SRRCTL_DESCTYPE_MASK;
265 }
266
267 static bool
igb_rx_use_ps_descriptor(IGBCore * core,const E1000ERingInfo * r)268 igb_rx_use_ps_descriptor(IGBCore *core, const E1000ERingInfo *r)
269 {
270 uint32_t desctyp = igb_rx_queue_desctyp_get(core, r);
271 return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT ||
272 desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
273 }
274
275 static inline bool
igb_rss_enabled(IGBCore * core)276 igb_rss_enabled(IGBCore *core)
277 {
278 return (core->mac[MRQC] & 3) == E1000_MRQC_ENABLE_RSS_MQ &&
279 !igb_rx_csum_enabled(core) &&
280 !igb_rx_use_legacy_descriptor(core);
281 }
282
283 typedef struct E1000E_RSSInfo_st {
284 bool enabled;
285 uint32_t hash;
286 uint32_t queue;
287 uint32_t type;
288 } E1000E_RSSInfo;
289
290 static uint32_t
igb_rss_get_hash_type(IGBCore * core,struct NetRxPkt * pkt)291 igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt)
292 {
293 bool hasip4, hasip6;
294 EthL4HdrProto l4hdr_proto;
295
296 assert(igb_rss_enabled(core));
297
298 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
299
300 if (hasip4) {
301 trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC],
302 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]),
303 E1000_MRQC_EN_IPV4(core->mac[MRQC]));
304
305 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
306 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) {
307 return E1000_MRQ_RSS_TYPE_IPV4TCP;
308 }
309
310 if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP &&
311 (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV4_UDP)) {
312 return E1000_MRQ_RSS_TYPE_IPV4UDP;
313 }
314
315 if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) {
316 return E1000_MRQ_RSS_TYPE_IPV4;
317 }
318 } else if (hasip6) {
319 eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt);
320
321 bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS;
322 bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS;
323
324 /*
325 * Following two traces must not be combined because resulting
326 * event will have 11 arguments totally and some trace backends
327 * (at least "ust") have limitation of maximum 10 arguments per
328 * event. Events with more arguments fail to compile for
329 * backends like these.
330 */
331 trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]);
332 trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto,
333 ip6info->has_ext_hdrs,
334 ip6info->rss_ex_dst_valid,
335 ip6info->rss_ex_src_valid,
336 core->mac[MRQC],
337 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]),
338 E1000_MRQC_EN_IPV6EX(core->mac[MRQC]),
339 E1000_MRQC_EN_IPV6(core->mac[MRQC]));
340
341 if ((!ex_dis || !ip6info->has_ext_hdrs) &&
342 (!new_ex_dis || !(ip6info->rss_ex_dst_valid ||
343 ip6info->rss_ex_src_valid))) {
344
345 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
346 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) {
347 return E1000_MRQ_RSS_TYPE_IPV6TCPEX;
348 }
349
350 if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP &&
351 (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV6_UDP)) {
352 return E1000_MRQ_RSS_TYPE_IPV6UDP;
353 }
354
355 if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) {
356 return E1000_MRQ_RSS_TYPE_IPV6EX;
357 }
358
359 }
360
361 if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) {
362 return E1000_MRQ_RSS_TYPE_IPV6;
363 }
364
365 }
366
367 return E1000_MRQ_RSS_TYPE_NONE;
368 }
369
370 static uint32_t
igb_rss_calc_hash(IGBCore * core,struct NetRxPkt * pkt,E1000E_RSSInfo * info)371 igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info)
372 {
373 NetRxPktRssType type;
374
375 assert(igb_rss_enabled(core));
376
377 switch (info->type) {
378 case E1000_MRQ_RSS_TYPE_IPV4:
379 type = NetPktRssIpV4;
380 break;
381 case E1000_MRQ_RSS_TYPE_IPV4TCP:
382 type = NetPktRssIpV4Tcp;
383 break;
384 case E1000_MRQ_RSS_TYPE_IPV6TCPEX:
385 type = NetPktRssIpV6TcpEx;
386 break;
387 case E1000_MRQ_RSS_TYPE_IPV6:
388 type = NetPktRssIpV6;
389 break;
390 case E1000_MRQ_RSS_TYPE_IPV6EX:
391 type = NetPktRssIpV6Ex;
392 break;
393 case E1000_MRQ_RSS_TYPE_IPV4UDP:
394 type = NetPktRssIpV4Udp;
395 break;
396 case E1000_MRQ_RSS_TYPE_IPV6UDP:
397 type = NetPktRssIpV6Udp;
398 break;
399 default:
400 assert(false);
401 return 0;
402 }
403
404 return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]);
405 }
406
407 static void
igb_rss_parse_packet(IGBCore * core,struct NetRxPkt * pkt,bool tx,E1000E_RSSInfo * info)408 igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx,
409 E1000E_RSSInfo *info)
410 {
411 trace_e1000e_rx_rss_started();
412
413 if (tx || !igb_rss_enabled(core)) {
414 info->enabled = false;
415 info->hash = 0;
416 info->queue = 0;
417 info->type = 0;
418 trace_e1000e_rx_rss_disabled();
419 return;
420 }
421
422 info->enabled = true;
423
424 info->type = igb_rss_get_hash_type(core, pkt);
425
426 trace_e1000e_rx_rss_type(info->type);
427
428 if (info->type == E1000_MRQ_RSS_TYPE_NONE) {
429 info->hash = 0;
430 info->queue = 0;
431 return;
432 }
433
434 info->hash = igb_rss_calc_hash(core, pkt, info);
435 info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
436 }
437
438 static void
igb_tx_insert_vlan(IGBCore * core,uint16_t qn,struct igb_tx * tx,uint16_t vlan,bool insert_vlan)439 igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx,
440 uint16_t vlan, bool insert_vlan)
441 {
442 if (core->mac[MRQC] & 1) {
443 uint16_t pool = qn % IGB_NUM_VM_POOLS;
444
445 if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) {
446 /* always insert default VLAN */
447 insert_vlan = true;
448 vlan = core->mac[VMVIR0 + pool] & 0xffff;
449 } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) {
450 insert_vlan = false;
451 }
452 }
453
454 if (insert_vlan) {
455 net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan,
456 core->mac[VET] & 0xffff);
457 }
458 }
459
460 static bool
igb_setup_tx_offloads(IGBCore * core,struct igb_tx * tx)461 igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
462 {
463 uint32_t idx = (tx->first_olinfo_status >> 4) & 1;
464
465 if (tx->first_cmd_type_len & E1000_ADVTXD_DCMD_TSE) {
466 uint32_t mss = tx->ctx[idx].mss_l4len_idx >> E1000_ADVTXD_MSS_SHIFT;
467 if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) {
468 return false;
469 }
470
471 net_tx_pkt_update_ip_checksums(tx->tx_pkt);
472 e1000x_inc_reg_if_not_full(core->mac, TSCTC);
473 return true;
474 }
475
476 if ((tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) &&
477 !((tx->ctx[idx].type_tucmd_mlhl & E1000_ADVTXD_TUCMD_L4T_SCTP) ?
478 net_tx_pkt_update_sctp_checksum(tx->tx_pkt) :
479 net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0))) {
480 return false;
481 }
482
483 if (tx->first_olinfo_status & E1000_ADVTXD_POTS_IXSM) {
484 net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt);
485 }
486
487 return true;
488 }
489
igb_tx_pkt_mac_callback(void * core,const struct iovec * iov,int iovcnt,const struct iovec * virt_iov,int virt_iovcnt)490 static void igb_tx_pkt_mac_callback(void *core,
491 const struct iovec *iov,
492 int iovcnt,
493 const struct iovec *virt_iov,
494 int virt_iovcnt)
495 {
496 igb_receive_internal(core, virt_iov, virt_iovcnt, true, NULL);
497 }
498
igb_tx_pkt_vmdq_callback(void * opaque,const struct iovec * iov,int iovcnt,const struct iovec * virt_iov,int virt_iovcnt)499 static void igb_tx_pkt_vmdq_callback(void *opaque,
500 const struct iovec *iov,
501 int iovcnt,
502 const struct iovec *virt_iov,
503 int virt_iovcnt)
504 {
505 IGBTxPktVmdqCallbackContext *context = opaque;
506 bool external_tx;
507
508 igb_receive_internal(context->core, virt_iov, virt_iovcnt, true,
509 &external_tx);
510
511 if (external_tx) {
512 if (context->core->has_vnet) {
513 qemu_sendv_packet(context->nc, virt_iov, virt_iovcnt);
514 } else {
515 qemu_sendv_packet(context->nc, iov, iovcnt);
516 }
517 }
518 }
519
520 /* TX Packets Switching (7.10.3.6) */
igb_tx_pkt_switch(IGBCore * core,struct igb_tx * tx,NetClientState * nc)521 static bool igb_tx_pkt_switch(IGBCore *core, struct igb_tx *tx,
522 NetClientState *nc)
523 {
524 IGBTxPktVmdqCallbackContext context;
525
526 /* TX switching is only used to serve VM to VM traffic. */
527 if (!(core->mac[MRQC] & 1)) {
528 goto send_out;
529 }
530
531 /* TX switching requires DTXSWC.Loopback_en bit enabled. */
532 if (!(core->mac[DTXSWC] & E1000_DTXSWC_VMDQ_LOOPBACK_EN)) {
533 goto send_out;
534 }
535
536 context.core = core;
537 context.nc = nc;
538
539 return net_tx_pkt_send_custom(tx->tx_pkt, false,
540 igb_tx_pkt_vmdq_callback, &context);
541
542 send_out:
543 return net_tx_pkt_send(tx->tx_pkt, nc);
544 }
545
546 static bool
igb_tx_pkt_send(IGBCore * core,struct igb_tx * tx,int queue_index)547 igb_tx_pkt_send(IGBCore *core, struct igb_tx *tx, int queue_index)
548 {
549 int target_queue = MIN(core->max_queue_num, queue_index);
550 NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue);
551
552 if (!igb_setup_tx_offloads(core, tx)) {
553 return false;
554 }
555
556 net_tx_pkt_dump(tx->tx_pkt);
557
558 if ((core->phy[MII_BMCR] & MII_BMCR_LOOPBACK) ||
559 ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) {
560 return net_tx_pkt_send_custom(tx->tx_pkt, false,
561 igb_tx_pkt_mac_callback, core);
562 } else {
563 return igb_tx_pkt_switch(core, tx, queue);
564 }
565 }
566
567 static void
igb_on_tx_done_update_stats(IGBCore * core,struct NetTxPkt * tx_pkt,int qn)568 igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn)
569 {
570 static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
571 PTC1023, PTC1522 };
572
573 size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4;
574
575 e1000x_increase_size_stats(core->mac, PTCregs, tot_len);
576 e1000x_inc_reg_if_not_full(core->mac, TPT);
577 e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len);
578
579 switch (net_tx_pkt_get_packet_type(tx_pkt)) {
580 case ETH_PKT_BCAST:
581 e1000x_inc_reg_if_not_full(core->mac, BPTC);
582 break;
583 case ETH_PKT_MCAST:
584 e1000x_inc_reg_if_not_full(core->mac, MPTC);
585 break;
586 case ETH_PKT_UCAST:
587 break;
588 default:
589 g_assert_not_reached();
590 }
591
592 e1000x_inc_reg_if_not_full(core->mac, GPTC);
593 e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len);
594
595 if (core->mac[MRQC] & 1) {
596 uint16_t pool = qn % IGB_NUM_VM_POOLS;
597
598 core->mac[PVFGOTC0 + (pool * 64)] += tot_len;
599 core->mac[PVFGPTC0 + (pool * 64)]++;
600 }
601 }
602
603 static void
igb_process_tx_desc(IGBCore * core,PCIDevice * dev,struct igb_tx * tx,union e1000_adv_tx_desc * tx_desc,int queue_index)604 igb_process_tx_desc(IGBCore *core,
605 PCIDevice *dev,
606 struct igb_tx *tx,
607 union e1000_adv_tx_desc *tx_desc,
608 int queue_index)
609 {
610 struct e1000_adv_tx_context_desc *tx_ctx_desc;
611 uint32_t cmd_type_len;
612 uint32_t idx;
613 uint64_t buffer_addr;
614 uint16_t length;
615
616 cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
617
618 if (cmd_type_len & E1000_ADVTXD_DCMD_DEXT) {
619 if ((cmd_type_len & E1000_ADVTXD_DTYP_DATA) ==
620 E1000_ADVTXD_DTYP_DATA) {
621 /* advanced transmit data descriptor */
622 if (tx->first) {
623 tx->first_cmd_type_len = cmd_type_len;
624 tx->first_olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
625 tx->first = false;
626 }
627 } else if ((cmd_type_len & E1000_ADVTXD_DTYP_CTXT) ==
628 E1000_ADVTXD_DTYP_CTXT) {
629 /* advanced transmit context descriptor */
630 tx_ctx_desc = (struct e1000_adv_tx_context_desc *)tx_desc;
631 idx = (le32_to_cpu(tx_ctx_desc->mss_l4len_idx) >> 4) & 1;
632 tx->ctx[idx].vlan_macip_lens = le32_to_cpu(tx_ctx_desc->vlan_macip_lens);
633 tx->ctx[idx].seqnum_seed = le32_to_cpu(tx_ctx_desc->seqnum_seed);
634 tx->ctx[idx].type_tucmd_mlhl = le32_to_cpu(tx_ctx_desc->type_tucmd_mlhl);
635 tx->ctx[idx].mss_l4len_idx = le32_to_cpu(tx_ctx_desc->mss_l4len_idx);
636 return;
637 } else {
638 /* unknown descriptor type */
639 return;
640 }
641 } else {
642 /* legacy descriptor */
643
644 /* TODO: Implement a support for legacy descriptors (7.2.2.1). */
645 }
646
647 buffer_addr = le64_to_cpu(tx_desc->read.buffer_addr);
648 length = cmd_type_len & 0xFFFF;
649
650 if (!tx->skip_cp) {
651 if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, dev,
652 buffer_addr, length)) {
653 tx->skip_cp = true;
654 }
655 }
656
657 if (cmd_type_len & E1000_TXD_CMD_EOP) {
658 if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
659 idx = (tx->first_olinfo_status >> 4) & 1;
660 igb_tx_insert_vlan(core, queue_index, tx,
661 tx->ctx[idx].vlan_macip_lens >> IGB_TX_FLAGS_VLAN_SHIFT,
662 !!(tx->first_cmd_type_len & E1000_TXD_CMD_VLE));
663
664 if ((tx->first_cmd_type_len & E1000_ADVTXD_MAC_TSTAMP) &&
665 (core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_ENABLED) &&
666 !(core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_VALID)) {
667 core->mac[TSYNCTXCTL] |= E1000_TSYNCTXCTL_VALID;
668 e1000x_timestamp(core->mac, core->timadj, TXSTMPL, TXSTMPH);
669 }
670
671 if (igb_tx_pkt_send(core, tx, queue_index)) {
672 igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index);
673 }
674 }
675
676 tx->first = true;
677 tx->skip_cp = false;
678 net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, dev);
679 }
680 }
681
igb_tx_wb_eic(IGBCore * core,int queue_idx)682 static uint32_t igb_tx_wb_eic(IGBCore *core, int queue_idx)
683 {
684 uint32_t n, ent = 0;
685
686 n = igb_ivar_entry_tx(queue_idx);
687 ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
688
689 return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
690 }
691
igb_rx_wb_eic(IGBCore * core,int queue_idx)692 static uint32_t igb_rx_wb_eic(IGBCore *core, int queue_idx)
693 {
694 uint32_t n, ent = 0;
695
696 n = igb_ivar_entry_rx(queue_idx);
697 ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
698
699 return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
700 }
701
702 static inline bool
igb_ring_empty(IGBCore * core,const E1000ERingInfo * r)703 igb_ring_empty(IGBCore *core, const E1000ERingInfo *r)
704 {
705 return core->mac[r->dh] == core->mac[r->dt] ||
706 core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
707 }
708
709 static inline uint64_t
igb_ring_base(IGBCore * core,const E1000ERingInfo * r)710 igb_ring_base(IGBCore *core, const E1000ERingInfo *r)
711 {
712 uint64_t bah = core->mac[r->dbah];
713 uint64_t bal = core->mac[r->dbal];
714
715 return (bah << 32) + bal;
716 }
717
718 static inline uint64_t
igb_ring_head_descr(IGBCore * core,const E1000ERingInfo * r)719 igb_ring_head_descr(IGBCore *core, const E1000ERingInfo *r)
720 {
721 return igb_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh];
722 }
723
724 static inline void
igb_ring_advance(IGBCore * core,const E1000ERingInfo * r,uint32_t count)725 igb_ring_advance(IGBCore *core, const E1000ERingInfo *r, uint32_t count)
726 {
727 core->mac[r->dh] += count;
728
729 if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) {
730 core->mac[r->dh] = 0;
731 }
732 }
733
734 static inline uint32_t
igb_ring_free_descr_num(IGBCore * core,const E1000ERingInfo * r)735 igb_ring_free_descr_num(IGBCore *core, const E1000ERingInfo *r)
736 {
737 trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen],
738 core->mac[r->dh], core->mac[r->dt]);
739
740 if (core->mac[r->dh] <= core->mac[r->dt]) {
741 return core->mac[r->dt] - core->mac[r->dh];
742 }
743
744 if (core->mac[r->dh] > core->mac[r->dt]) {
745 return core->mac[r->dlen] / E1000_RING_DESC_LEN +
746 core->mac[r->dt] - core->mac[r->dh];
747 }
748
749 g_assert_not_reached();
750 return 0;
751 }
752
753 static inline bool
igb_ring_enabled(IGBCore * core,const E1000ERingInfo * r)754 igb_ring_enabled(IGBCore *core, const E1000ERingInfo *r)
755 {
756 return core->mac[r->dlen] > 0;
757 }
758
759 typedef struct IGB_TxRing_st {
760 const E1000ERingInfo *i;
761 struct igb_tx *tx;
762 } IGB_TxRing;
763
764 static inline int
igb_mq_queue_idx(int base_reg_idx,int reg_idx)765 igb_mq_queue_idx(int base_reg_idx, int reg_idx)
766 {
767 return (reg_idx - base_reg_idx) / 16;
768 }
769
770 static inline void
igb_tx_ring_init(IGBCore * core,IGB_TxRing * txr,int idx)771 igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx)
772 {
773 static const E1000ERingInfo i[IGB_NUM_QUEUES] = {
774 { TDBAH0, TDBAL0, TDLEN0, TDH0, TDT0, 0 },
775 { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 },
776 { TDBAH2, TDBAL2, TDLEN2, TDH2, TDT2, 2 },
777 { TDBAH3, TDBAL3, TDLEN3, TDH3, TDT3, 3 },
778 { TDBAH4, TDBAL4, TDLEN4, TDH4, TDT4, 4 },
779 { TDBAH5, TDBAL5, TDLEN5, TDH5, TDT5, 5 },
780 { TDBAH6, TDBAL6, TDLEN6, TDH6, TDT6, 6 },
781 { TDBAH7, TDBAL7, TDLEN7, TDH7, TDT7, 7 },
782 { TDBAH8, TDBAL8, TDLEN8, TDH8, TDT8, 8 },
783 { TDBAH9, TDBAL9, TDLEN9, TDH9, TDT9, 9 },
784 { TDBAH10, TDBAL10, TDLEN10, TDH10, TDT10, 10 },
785 { TDBAH11, TDBAL11, TDLEN11, TDH11, TDT11, 11 },
786 { TDBAH12, TDBAL12, TDLEN12, TDH12, TDT12, 12 },
787 { TDBAH13, TDBAL13, TDLEN13, TDH13, TDT13, 13 },
788 { TDBAH14, TDBAL14, TDLEN14, TDH14, TDT14, 14 },
789 { TDBAH15, TDBAL15, TDLEN15, TDH15, TDT15, 15 }
790 };
791
792 assert(idx < ARRAY_SIZE(i));
793
794 txr->i = &i[idx];
795 txr->tx = &core->tx[idx];
796 }
797
798 typedef struct E1000E_RxRing_st {
799 const E1000ERingInfo *i;
800 } E1000E_RxRing;
801
802 static inline void
igb_rx_ring_init(IGBCore * core,E1000E_RxRing * rxr,int idx)803 igb_rx_ring_init(IGBCore *core, E1000E_RxRing *rxr, int idx)
804 {
805 static const E1000ERingInfo i[IGB_NUM_QUEUES] = {
806 { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 },
807 { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 },
808 { RDBAH2, RDBAL2, RDLEN2, RDH2, RDT2, 2 },
809 { RDBAH3, RDBAL3, RDLEN3, RDH3, RDT3, 3 },
810 { RDBAH4, RDBAL4, RDLEN4, RDH4, RDT4, 4 },
811 { RDBAH5, RDBAL5, RDLEN5, RDH5, RDT5, 5 },
812 { RDBAH6, RDBAL6, RDLEN6, RDH6, RDT6, 6 },
813 { RDBAH7, RDBAL7, RDLEN7, RDH7, RDT7, 7 },
814 { RDBAH8, RDBAL8, RDLEN8, RDH8, RDT8, 8 },
815 { RDBAH9, RDBAL9, RDLEN9, RDH9, RDT9, 9 },
816 { RDBAH10, RDBAL10, RDLEN10, RDH10, RDT10, 10 },
817 { RDBAH11, RDBAL11, RDLEN11, RDH11, RDT11, 11 },
818 { RDBAH12, RDBAL12, RDLEN12, RDH12, RDT12, 12 },
819 { RDBAH13, RDBAL13, RDLEN13, RDH13, RDT13, 13 },
820 { RDBAH14, RDBAL14, RDLEN14, RDH14, RDT14, 14 },
821 { RDBAH15, RDBAL15, RDLEN15, RDH15, RDT15, 15 }
822 };
823
824 assert(idx < ARRAY_SIZE(i));
825
826 rxr->i = &i[idx];
827 }
828
829 static uint32_t
igb_txdesc_writeback(IGBCore * core,dma_addr_t base,union e1000_adv_tx_desc * tx_desc,const E1000ERingInfo * txi)830 igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
831 union e1000_adv_tx_desc *tx_desc,
832 const E1000ERingInfo *txi)
833 {
834 PCIDevice *d;
835 uint32_t cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
836 uint64_t tdwba;
837
838 tdwba = core->mac[E1000_TDWBAL(txi->idx) >> 2];
839 tdwba |= (uint64_t)core->mac[E1000_TDWBAH(txi->idx) >> 2] << 32;
840
841 if (!(cmd_type_len & E1000_TXD_CMD_RS)) {
842 return 0;
843 }
844
845 d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
846 if (!d) {
847 d = core->owner;
848 }
849
850 if (tdwba & 1) {
851 uint32_t buffer = cpu_to_le32(core->mac[txi->dh]);
852 pci_dma_write(d, tdwba & ~3, &buffer, sizeof(buffer));
853 } else {
854 uint32_t status = le32_to_cpu(tx_desc->wb.status) | E1000_TXD_STAT_DD;
855
856 tx_desc->wb.status = cpu_to_le32(status);
857 pci_dma_write(d, base + offsetof(union e1000_adv_tx_desc, wb),
858 &tx_desc->wb, sizeof(tx_desc->wb));
859 }
860
861 return igb_tx_wb_eic(core, txi->idx);
862 }
863
864 static inline bool
igb_tx_enabled(IGBCore * core,const E1000ERingInfo * txi)865 igb_tx_enabled(IGBCore *core, const E1000ERingInfo *txi)
866 {
867 bool vmdq = core->mac[MRQC] & 1;
868 uint16_t qn = txi->idx;
869 uint16_t pool = qn % IGB_NUM_VM_POOLS;
870
871 return (core->mac[TCTL] & E1000_TCTL_EN) &&
872 (!vmdq || core->mac[VFTE] & BIT(pool)) &&
873 (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
874 }
875
876 static void
igb_start_xmit(IGBCore * core,const IGB_TxRing * txr)877 igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
878 {
879 PCIDevice *d;
880 dma_addr_t base;
881 union e1000_adv_tx_desc desc;
882 const E1000ERingInfo *txi = txr->i;
883 uint32_t eic = 0;
884
885 if (!igb_tx_enabled(core, txi)) {
886 trace_e1000e_tx_disabled();
887 return;
888 }
889
890 d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
891 if (!d) {
892 d = core->owner;
893 }
894
895 while (!igb_ring_empty(core, txi)) {
896 base = igb_ring_head_descr(core, txi);
897
898 pci_dma_read(d, base, &desc, sizeof(desc));
899
900 trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr,
901 desc.read.cmd_type_len, desc.wb.status);
902
903 igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx);
904 igb_ring_advance(core, txi, 1);
905 eic |= igb_txdesc_writeback(core, base, &desc, txi);
906 }
907
908 if (eic) {
909 igb_raise_interrupts(core, EICR, eic);
910 igb_raise_interrupts(core, ICR, E1000_ICR_TXDW);
911 }
912
913 net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d);
914 }
915
916 static uint32_t
igb_rxbufsize(IGBCore * core,const E1000ERingInfo * r)917 igb_rxbufsize(IGBCore *core, const E1000ERingInfo *r)
918 {
919 uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2];
920 uint32_t bsizepkt = srrctl & E1000_SRRCTL_BSIZEPKT_MASK;
921 if (bsizepkt) {
922 return bsizepkt << E1000_SRRCTL_BSIZEPKT_SHIFT;
923 }
924
925 return e1000x_rxbufsize(core->mac[RCTL]);
926 }
927
928 static bool
igb_has_rxbufs(IGBCore * core,const E1000ERingInfo * r,size_t total_size)929 igb_has_rxbufs(IGBCore *core, const E1000ERingInfo *r, size_t total_size)
930 {
931 uint32_t bufs = igb_ring_free_descr_num(core, r);
932 uint32_t bufsize = igb_rxbufsize(core, r);
933
934 trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, bufsize);
935
936 return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) *
937 bufsize;
938 }
939
940 static uint32_t
igb_rxhdrbufsize(IGBCore * core,const E1000ERingInfo * r)941 igb_rxhdrbufsize(IGBCore *core, const E1000ERingInfo *r)
942 {
943 uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2];
944 return (srrctl & E1000_SRRCTL_BSIZEHDRSIZE_MASK) >>
945 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
946 }
947
948 void
igb_start_recv(IGBCore * core)949 igb_start_recv(IGBCore *core)
950 {
951 int i;
952
953 trace_e1000e_rx_start_recv();
954
955 for (i = 0; i <= core->max_queue_num; i++) {
956 qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i));
957 }
958 }
959
960 bool
igb_can_receive(IGBCore * core)961 igb_can_receive(IGBCore *core)
962 {
963 int i;
964
965 if (!e1000x_rx_ready(core->owner, core->mac)) {
966 return false;
967 }
968
969 for (i = 0; i < IGB_NUM_QUEUES; i++) {
970 E1000E_RxRing rxr;
971 if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
972 continue;
973 }
974
975 igb_rx_ring_init(core, &rxr, i);
976 if (igb_ring_enabled(core, rxr.i) && igb_has_rxbufs(core, rxr.i, 1)) {
977 trace_e1000e_rx_can_recv();
978 return true;
979 }
980 }
981
982 trace_e1000e_rx_can_recv_rings_full();
983 return false;
984 }
985
986 ssize_t
igb_receive(IGBCore * core,const uint8_t * buf,size_t size)987 igb_receive(IGBCore *core, const uint8_t *buf, size_t size)
988 {
989 const struct iovec iov = {
990 .iov_base = (uint8_t *)buf,
991 .iov_len = size
992 };
993
994 return igb_receive_iov(core, &iov, 1);
995 }
996
997 static inline bool
igb_rx_l3_cso_enabled(IGBCore * core)998 igb_rx_l3_cso_enabled(IGBCore *core)
999 {
1000 return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD);
1001 }
1002
1003 static inline bool
igb_rx_l4_cso_enabled(IGBCore * core)1004 igb_rx_l4_cso_enabled(IGBCore *core)
1005 {
1006 return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD);
1007 }
1008
igb_rx_is_oversized(IGBCore * core,const struct eth_header * ehdr,size_t size,size_t vlan_num,bool lpe,uint16_t rlpml)1009 static bool igb_rx_is_oversized(IGBCore *core, const struct eth_header *ehdr,
1010 size_t size, size_t vlan_num,
1011 bool lpe, uint16_t rlpml)
1012 {
1013 size_t vlan_header_size = sizeof(struct vlan_header) * vlan_num;
1014 size_t header_size = sizeof(struct eth_header) + vlan_header_size;
1015 return lpe ? size + ETH_FCS_LEN > rlpml : size > header_size + ETH_MTU;
1016 }
1017
igb_receive_assign(IGBCore * core,const struct iovec * iov,size_t iovcnt,size_t iov_ofs,const L2Header * l2_header,size_t size,E1000E_RSSInfo * rss_info,uint16_t * etqf,bool * ts,bool * external_tx)1018 static uint16_t igb_receive_assign(IGBCore *core, const struct iovec *iov,
1019 size_t iovcnt, size_t iov_ofs,
1020 const L2Header *l2_header, size_t size,
1021 E1000E_RSSInfo *rss_info,
1022 uint16_t *etqf, bool *ts, bool *external_tx)
1023 {
1024 static const int ta_shift[] = { 4, 3, 2, 0 };
1025 const struct eth_header *ehdr = &l2_header->eth;
1026 uint32_t f, ra[2], *macp, rctl = core->mac[RCTL];
1027 uint16_t queues = 0;
1028 uint16_t oversized = 0;
1029 size_t vlan_num = 0;
1030 PTP2 ptp2;
1031 bool lpe;
1032 uint16_t rlpml;
1033 int i;
1034
1035 memset(rss_info, 0, sizeof(E1000E_RSSInfo));
1036 *ts = false;
1037
1038 if (external_tx) {
1039 *external_tx = true;
1040 }
1041
1042 if (core->mac[CTRL_EXT] & BIT(26)) {
1043 if (be16_to_cpu(ehdr->h_proto) == core->mac[VET] >> 16 &&
1044 be16_to_cpu(l2_header->vlan[0].h_proto) == (core->mac[VET] & 0xffff)) {
1045 vlan_num = 2;
1046 }
1047 } else {
1048 if (be16_to_cpu(ehdr->h_proto) == (core->mac[VET] & 0xffff)) {
1049 vlan_num = 1;
1050 }
1051 }
1052
1053 lpe = !!(core->mac[RCTL] & E1000_RCTL_LPE);
1054 rlpml = core->mac[RLPML];
1055 if (!(core->mac[RCTL] & E1000_RCTL_SBP) &&
1056 igb_rx_is_oversized(core, ehdr, size, vlan_num, lpe, rlpml)) {
1057 trace_e1000x_rx_oversized(size);
1058 return queues;
1059 }
1060
1061 for (*etqf = 0; *etqf < 8; (*etqf)++) {
1062 if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_FILTER_ENABLE) &&
1063 be16_to_cpu(ehdr->h_proto) == (core->mac[ETQF0 + *etqf] & E1000_ETQF_ETYPE_MASK)) {
1064 if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_1588) &&
1065 (core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_ENABLED) &&
1066 !(core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_VALID) &&
1067 iov_to_buf(iov, iovcnt, iov_ofs + ETH_HLEN, &ptp2, sizeof(ptp2)) >= sizeof(ptp2) &&
1068 (ptp2.version_ptp & 15) == 2 &&
1069 ptp2.message_id_transport_specific == ((core->mac[TSYNCRXCFG] >> 8) & 255)) {
1070 e1000x_timestamp(core->mac, core->timadj, RXSTMPL, RXSTMPH);
1071 *ts = true;
1072 core->mac[TSYNCRXCTL] |= E1000_TSYNCRXCTL_VALID;
1073 core->mac[RXSATRL] = le32_to_cpu(ptp2.source_uuid_lo);
1074 core->mac[RXSATRH] = le16_to_cpu(ptp2.source_uuid_hi) |
1075 (le16_to_cpu(ptp2.sequence_id) << 16);
1076 }
1077 break;
1078 }
1079 }
1080
1081 if (vlan_num &&
1082 !e1000x_rx_vlan_filter(core->mac, l2_header->vlan + vlan_num - 1)) {
1083 return queues;
1084 }
1085
1086 if (core->mac[MRQC] & 1) {
1087 if (is_broadcast_ether_addr(ehdr->h_dest)) {
1088 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1089 if (core->mac[VMOLR0 + i] & E1000_VMOLR_BAM) {
1090 queues |= BIT(i);
1091 }
1092 }
1093 } else {
1094 for (macp = core->mac + RA; macp < core->mac + RA + 32; macp += 2) {
1095 if (!(macp[1] & E1000_RAH_AV)) {
1096 continue;
1097 }
1098 ra[0] = cpu_to_le32(macp[0]);
1099 ra[1] = cpu_to_le32(macp[1]);
1100 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
1101 queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
1102 }
1103 }
1104
1105 for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
1106 if (!(macp[1] & E1000_RAH_AV)) {
1107 continue;
1108 }
1109 ra[0] = cpu_to_le32(macp[0]);
1110 ra[1] = cpu_to_le32(macp[1]);
1111 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
1112 queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
1113 }
1114 }
1115
1116 if (!queues) {
1117 macp = core->mac + (is_multicast_ether_addr(ehdr->h_dest) ? MTA : UTA);
1118
1119 f = ta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
1120 f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff;
1121 if (macp[f >> 5] & (1 << (f & 0x1f))) {
1122 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1123 if (core->mac[VMOLR0 + i] & E1000_VMOLR_ROMPE) {
1124 queues |= BIT(i);
1125 }
1126 }
1127 }
1128 } else if (is_unicast_ether_addr(ehdr->h_dest) && external_tx) {
1129 *external_tx = false;
1130 }
1131 }
1132
1133 if (e1000x_vlan_rx_filter_enabled(core->mac)) {
1134 uint16_t mask = 0;
1135
1136 if (vlan_num) {
1137 uint16_t vid = be16_to_cpu(l2_header->vlan[vlan_num - 1].h_tci) & VLAN_VID_MASK;
1138
1139 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
1140 if ((core->mac[VLVF0 + i] & E1000_VLVF_VLANID_MASK) == vid &&
1141 (core->mac[VLVF0 + i] & E1000_VLVF_VLANID_ENABLE)) {
1142 uint32_t poolsel = core->mac[VLVF0 + i] & E1000_VLVF_POOLSEL_MASK;
1143 mask |= poolsel >> E1000_VLVF_POOLSEL_SHIFT;
1144 }
1145 }
1146 } else {
1147 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1148 if (core->mac[VMOLR0 + i] & E1000_VMOLR_AUPE) {
1149 mask |= BIT(i);
1150 }
1151 }
1152 }
1153
1154 queues &= mask;
1155 }
1156
1157 if (is_unicast_ether_addr(ehdr->h_dest) && !queues && !external_tx &&
1158 !(core->mac[VT_CTL] & E1000_VT_CTL_DISABLE_DEF_POOL)) {
1159 uint32_t def_pl = core->mac[VT_CTL] & E1000_VT_CTL_DEFAULT_POOL_MASK;
1160 queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
1161 }
1162
1163 queues &= core->mac[VFRE];
1164 if (queues) {
1165 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1166 lpe = !!(core->mac[VMOLR0 + i] & E1000_VMOLR_LPE);
1167 rlpml = core->mac[VMOLR0 + i] & E1000_VMOLR_RLPML_MASK;
1168 if ((queues & BIT(i)) &&
1169 igb_rx_is_oversized(core, ehdr, size, vlan_num,
1170 lpe, rlpml)) {
1171 oversized |= BIT(i);
1172 }
1173 }
1174 /* 8.19.37 increment ROC if packet is oversized for all queues */
1175 if (oversized == queues) {
1176 trace_e1000x_rx_oversized(size);
1177 e1000x_inc_reg_if_not_full(core->mac, ROC);
1178 }
1179 queues &= ~oversized;
1180 }
1181
1182 if (queues) {
1183 igb_rss_parse_packet(core, core->rx_pkt,
1184 external_tx != NULL, rss_info);
1185 /* Sec 8.26.1: PQn = VFn + VQn*8 */
1186 if (rss_info->queue & 1) {
1187 for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
1188 if ((queues & BIT(i)) &&
1189 (core->mac[VMOLR0 + i] & E1000_VMOLR_RSSE)) {
1190 queues |= BIT(i + IGB_NUM_VM_POOLS);
1191 queues &= ~BIT(i);
1192 }
1193 }
1194 }
1195 }
1196 } else {
1197 bool accepted = e1000x_rx_group_filter(core->mac, ehdr);
1198 if (!accepted) {
1199 for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
1200 if (!(macp[1] & E1000_RAH_AV)) {
1201 continue;
1202 }
1203 ra[0] = cpu_to_le32(macp[0]);
1204 ra[1] = cpu_to_le32(macp[1]);
1205 if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
1206 trace_e1000x_rx_flt_ucast_match((int)(macp - core->mac - RA2) / 2,
1207 MAC_ARG(ehdr->h_dest));
1208
1209 accepted = true;
1210 break;
1211 }
1212 }
1213 }
1214
1215 if (accepted) {
1216 igb_rss_parse_packet(core, core->rx_pkt, false, rss_info);
1217 queues = BIT(rss_info->queue);
1218 }
1219 }
1220
1221 return queues;
1222 }
1223
1224 static inline void
igb_read_lgcy_rx_descr(IGBCore * core,struct e1000_rx_desc * desc,hwaddr * buff_addr)1225 igb_read_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
1226 hwaddr *buff_addr)
1227 {
1228 *buff_addr = le64_to_cpu(desc->buffer_addr);
1229 }
1230
1231 static inline void
igb_read_adv_rx_single_buf_descr(IGBCore * core,union e1000_adv_rx_desc * desc,hwaddr * buff_addr)1232 igb_read_adv_rx_single_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
1233 hwaddr *buff_addr)
1234 {
1235 *buff_addr = le64_to_cpu(desc->read.pkt_addr);
1236 }
1237
1238 static inline void
igb_read_adv_rx_split_buf_descr(IGBCore * core,union e1000_adv_rx_desc * desc,hwaddr * buff_addr)1239 igb_read_adv_rx_split_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
1240 hwaddr *buff_addr)
1241 {
1242 buff_addr[0] = le64_to_cpu(desc->read.hdr_addr);
1243 buff_addr[1] = le64_to_cpu(desc->read.pkt_addr);
1244 }
1245
1246 typedef struct IGBBAState {
1247 uint16_t written[IGB_MAX_PS_BUFFERS];
1248 uint8_t cur_idx;
1249 } IGBBAState;
1250
1251 typedef struct IGBSplitDescriptorData {
1252 bool sph;
1253 bool hbo;
1254 size_t hdr_len;
1255 } IGBSplitDescriptorData;
1256
1257 typedef struct IGBPacketRxDMAState {
1258 size_t size;
1259 size_t total_size;
1260 size_t ps_hdr_len;
1261 size_t desc_size;
1262 size_t desc_offset;
1263 uint32_t rx_desc_packet_buf_size;
1264 uint32_t rx_desc_header_buf_size;
1265 struct iovec *iov;
1266 size_t iov_ofs;
1267 bool do_ps;
1268 bool is_first;
1269 IGBBAState bastate;
1270 hwaddr ba[IGB_MAX_PS_BUFFERS];
1271 IGBSplitDescriptorData ps_desc_data;
1272 } IGBPacketRxDMAState;
1273
1274 static inline void
igb_read_rx_descr(IGBCore * core,union e1000_rx_desc_union * desc,IGBPacketRxDMAState * pdma_st,const E1000ERingInfo * r)1275 igb_read_rx_descr(IGBCore *core,
1276 union e1000_rx_desc_union *desc,
1277 IGBPacketRxDMAState *pdma_st,
1278 const E1000ERingInfo *r)
1279 {
1280 uint32_t desc_type;
1281
1282 if (igb_rx_use_legacy_descriptor(core)) {
1283 igb_read_lgcy_rx_descr(core, &desc->legacy, &pdma_st->ba[1]);
1284 pdma_st->ba[0] = 0;
1285 return;
1286 }
1287
1288 /* advanced header split descriptor */
1289 if (igb_rx_use_ps_descriptor(core, r)) {
1290 igb_read_adv_rx_split_buf_descr(core, &desc->adv, &pdma_st->ba[0]);
1291 return;
1292 }
1293
1294 /* descriptor replication modes not supported */
1295 desc_type = igb_rx_queue_desctyp_get(core, r);
1296 if (desc_type != E1000_SRRCTL_DESCTYPE_ADV_ONEBUF) {
1297 trace_igb_wrn_rx_desc_modes_not_supp(desc_type);
1298 }
1299
1300 /* advanced single buffer descriptor */
1301 igb_read_adv_rx_single_buf_descr(core, &desc->adv, &pdma_st->ba[1]);
1302 pdma_st->ba[0] = 0;
1303 }
1304
1305 static void
igb_verify_csum_in_sw(IGBCore * core,struct NetRxPkt * pkt,uint32_t * status_flags,EthL4HdrProto l4hdr_proto)1306 igb_verify_csum_in_sw(IGBCore *core,
1307 struct NetRxPkt *pkt,
1308 uint32_t *status_flags,
1309 EthL4HdrProto l4hdr_proto)
1310 {
1311 bool csum_valid;
1312 uint32_t csum_error;
1313
1314 if (igb_rx_l3_cso_enabled(core)) {
1315 if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) {
1316 trace_e1000e_rx_metadata_l3_csum_validation_failed();
1317 } else {
1318 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE;
1319 *status_flags |= E1000_RXD_STAT_IPCS | csum_error;
1320 }
1321 } else {
1322 trace_e1000e_rx_metadata_l3_cso_disabled();
1323 }
1324
1325 if (!igb_rx_l4_cso_enabled(core)) {
1326 trace_e1000e_rx_metadata_l4_cso_disabled();
1327 return;
1328 }
1329
1330 if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) {
1331 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1332 return;
1333 }
1334
1335 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE;
1336 *status_flags |= E1000_RXD_STAT_TCPCS | csum_error;
1337
1338 if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
1339 *status_flags |= E1000_RXD_STAT_UDPCS;
1340 }
1341 }
1342
1343 static void
igb_build_rx_metadata_common(IGBCore * core,struct NetRxPkt * pkt,bool is_eop,uint32_t * status_flags,uint16_t * vlan_tag)1344 igb_build_rx_metadata_common(IGBCore *core,
1345 struct NetRxPkt *pkt,
1346 bool is_eop,
1347 uint32_t *status_flags,
1348 uint16_t *vlan_tag)
1349 {
1350 struct virtio_net_hdr *vhdr;
1351 bool hasip4, hasip6, csum_valid;
1352 EthL4HdrProto l4hdr_proto;
1353
1354 *status_flags = E1000_RXD_STAT_DD;
1355
1356 /* No additional metadata needed for non-EOP descriptors */
1357 if (!is_eop) {
1358 goto func_exit;
1359 }
1360
1361 *status_flags |= E1000_RXD_STAT_EOP;
1362
1363 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1364 trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto);
1365
1366 /* VLAN state */
1367 if (net_rx_pkt_is_vlan_stripped(pkt)) {
1368 *status_flags |= E1000_RXD_STAT_VP;
1369 *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt));
1370 trace_e1000e_rx_metadata_vlan(*vlan_tag);
1371 }
1372
1373 /* RX CSO information */
1374 if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
1375 trace_e1000e_rx_metadata_ipv6_sum_disabled();
1376 goto func_exit;
1377 }
1378
1379 vhdr = net_rx_pkt_get_vhdr(pkt);
1380
1381 if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
1382 !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
1383 trace_e1000e_rx_metadata_virthdr_no_csum_info();
1384 igb_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto);
1385 goto func_exit;
1386 }
1387
1388 if (igb_rx_l3_cso_enabled(core)) {
1389 *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0;
1390 } else {
1391 trace_e1000e_rx_metadata_l3_cso_disabled();
1392 }
1393
1394 if (igb_rx_l4_cso_enabled(core)) {
1395 switch (l4hdr_proto) {
1396 case ETH_L4_HDR_PROTO_SCTP:
1397 if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) {
1398 trace_e1000e_rx_metadata_l4_csum_validation_failed();
1399 goto func_exit;
1400 }
1401 if (!csum_valid) {
1402 *status_flags |= E1000_RXDEXT_STATERR_TCPE;
1403 }
1404 /* fall through */
1405 case ETH_L4_HDR_PROTO_TCP:
1406 *status_flags |= E1000_RXD_STAT_TCPCS;
1407 break;
1408
1409 case ETH_L4_HDR_PROTO_UDP:
1410 *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS;
1411 break;
1412
1413 default:
1414 break;
1415 }
1416 } else {
1417 trace_e1000e_rx_metadata_l4_cso_disabled();
1418 }
1419
1420 func_exit:
1421 trace_e1000e_rx_metadata_status_flags(*status_flags);
1422 *status_flags = cpu_to_le32(*status_flags);
1423 }
1424
1425 static inline void
igb_write_lgcy_rx_descr(IGBCore * core,struct e1000_rx_desc * desc,struct NetRxPkt * pkt,const E1000E_RSSInfo * rss_info,uint16_t length)1426 igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
1427 struct NetRxPkt *pkt,
1428 const E1000E_RSSInfo *rss_info,
1429 uint16_t length)
1430 {
1431 uint32_t status_flags;
1432
1433 assert(!rss_info->enabled);
1434
1435 memset(desc, 0, sizeof(*desc));
1436 desc->length = cpu_to_le16(length);
1437 igb_build_rx_metadata_common(core, pkt, pkt != NULL,
1438 &status_flags,
1439 &desc->special);
1440
1441 desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24);
1442 desc->status = (uint8_t) le32_to_cpu(status_flags);
1443 }
1444
1445 static bool
igb_rx_ps_descriptor_split_always(IGBCore * core,const E1000ERingInfo * r)1446 igb_rx_ps_descriptor_split_always(IGBCore *core, const E1000ERingInfo *r)
1447 {
1448 uint32_t desctyp = igb_rx_queue_desctyp_get(core, r);
1449 return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1450 }
1451
1452 static uint16_t
igb_rx_desc_get_packet_type(IGBCore * core,struct NetRxPkt * pkt,uint16_t etqf)1453 igb_rx_desc_get_packet_type(IGBCore *core, struct NetRxPkt *pkt, uint16_t etqf)
1454 {
1455 uint16_t pkt_type;
1456 bool hasip4, hasip6;
1457 EthL4HdrProto l4hdr_proto;
1458
1459 if (etqf < 8) {
1460 pkt_type = BIT(11) | etqf;
1461 return pkt_type;
1462 }
1463
1464 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1465
1466 if (hasip6 && !(core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
1467 eth_ip6_hdr_info *ip6hdr_info = net_rx_pkt_get_ip6_info(pkt);
1468 pkt_type = ip6hdr_info->has_ext_hdrs ? E1000_ADVRXD_PKT_IP6E :
1469 E1000_ADVRXD_PKT_IP6;
1470 } else if (hasip4) {
1471 pkt_type = E1000_ADVRXD_PKT_IP4;
1472 } else {
1473 pkt_type = 0;
1474 }
1475
1476 switch (l4hdr_proto) {
1477 case ETH_L4_HDR_PROTO_TCP:
1478 pkt_type |= E1000_ADVRXD_PKT_TCP;
1479 break;
1480 case ETH_L4_HDR_PROTO_UDP:
1481 pkt_type |= E1000_ADVRXD_PKT_UDP;
1482 break;
1483 case ETH_L4_HDR_PROTO_SCTP:
1484 pkt_type |= E1000_ADVRXD_PKT_SCTP;
1485 break;
1486 default:
1487 break;
1488 }
1489
1490 return pkt_type;
1491 }
1492
1493 static inline void
igb_write_adv_rx_descr(IGBCore * core,union e1000_adv_rx_desc * desc,struct NetRxPkt * pkt,const E1000E_RSSInfo * rss_info,uint16_t etqf,bool ts,uint16_t length)1494 igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
1495 struct NetRxPkt *pkt,
1496 const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts,
1497 uint16_t length)
1498 {
1499 bool hasip4, hasip6;
1500 EthL4HdrProto l4hdr_proto;
1501 uint16_t rss_type = 0, pkt_type;
1502 bool eop = (pkt != NULL);
1503 uint32_t adv_desc_status_error = 0;
1504 memset(&desc->wb, 0, sizeof(desc->wb));
1505
1506 desc->wb.upper.length = cpu_to_le16(length);
1507 igb_build_rx_metadata_common(core, pkt, eop,
1508 &desc->wb.upper.status_error,
1509 &desc->wb.upper.vlan);
1510
1511 if (!eop) {
1512 return;
1513 }
1514
1515 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1516
1517 if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) {
1518 if (rss_info->enabled) {
1519 desc->wb.lower.hi_dword.rss = cpu_to_le32(rss_info->hash);
1520 rss_type = rss_info->type;
1521 trace_igb_rx_metadata_rss(desc->wb.lower.hi_dword.rss, rss_type);
1522 }
1523 } else if (hasip4) {
1524 adv_desc_status_error |= E1000_RXD_STAT_IPIDV;
1525 desc->wb.lower.hi_dword.csum_ip.ip_id =
1526 cpu_to_le16(net_rx_pkt_get_ip_id(pkt));
1527 trace_e1000e_rx_metadata_ip_id(
1528 desc->wb.lower.hi_dword.csum_ip.ip_id);
1529 }
1530
1531 if (ts) {
1532 adv_desc_status_error |= BIT(16);
1533 }
1534
1535 pkt_type = igb_rx_desc_get_packet_type(core, pkt, etqf);
1536 trace_e1000e_rx_metadata_pkt_type(pkt_type);
1537 desc->wb.lower.lo_dword.pkt_info = cpu_to_le16(rss_type | (pkt_type << 4));
1538 desc->wb.upper.status_error |= cpu_to_le32(adv_desc_status_error);
1539 }
1540
1541 static inline void
igb_write_adv_ps_rx_descr(IGBCore * core,union e1000_adv_rx_desc * desc,struct NetRxPkt * pkt,const E1000E_RSSInfo * rss_info,const E1000ERingInfo * r,uint16_t etqf,bool ts,IGBPacketRxDMAState * pdma_st)1542 igb_write_adv_ps_rx_descr(IGBCore *core,
1543 union e1000_adv_rx_desc *desc,
1544 struct NetRxPkt *pkt,
1545 const E1000E_RSSInfo *rss_info,
1546 const E1000ERingInfo *r,
1547 uint16_t etqf,
1548 bool ts,
1549 IGBPacketRxDMAState *pdma_st)
1550 {
1551 size_t pkt_len;
1552 uint16_t hdr_info = 0;
1553
1554 if (pdma_st->do_ps) {
1555 pkt_len = pdma_st->bastate.written[1];
1556 } else {
1557 pkt_len = pdma_st->bastate.written[0] + pdma_st->bastate.written[1];
1558 }
1559
1560 igb_write_adv_rx_descr(core, desc, pkt, rss_info, etqf, ts, pkt_len);
1561
1562 hdr_info = (pdma_st->ps_desc_data.hdr_len << E1000_ADVRXD_HDR_LEN_OFFSET) &
1563 E1000_ADVRXD_ADV_HDR_LEN_MASK;
1564 hdr_info |= pdma_st->ps_desc_data.sph ? E1000_ADVRXD_HDR_SPH : 0;
1565 desc->wb.lower.lo_dword.hdr_info = cpu_to_le16(hdr_info);
1566
1567 desc->wb.upper.status_error |= cpu_to_le32(
1568 pdma_st->ps_desc_data.hbo ? E1000_ADVRXD_ST_ERR_HBO_OFFSET : 0);
1569 }
1570
1571 static inline void
igb_write_rx_descr(IGBCore * core,union e1000_rx_desc_union * desc,struct NetRxPkt * pkt,const E1000E_RSSInfo * rss_info,uint16_t etqf,bool ts,IGBPacketRxDMAState * pdma_st,const E1000ERingInfo * r)1572 igb_write_rx_descr(IGBCore *core,
1573 union e1000_rx_desc_union *desc,
1574 struct NetRxPkt *pkt,
1575 const E1000E_RSSInfo *rss_info,
1576 uint16_t etqf,
1577 bool ts,
1578 IGBPacketRxDMAState *pdma_st,
1579 const E1000ERingInfo *r)
1580 {
1581 if (igb_rx_use_legacy_descriptor(core)) {
1582 igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info,
1583 pdma_st->bastate.written[1]);
1584 } else if (igb_rx_use_ps_descriptor(core, r)) {
1585 igb_write_adv_ps_rx_descr(core, &desc->adv, pkt, rss_info, r, etqf, ts,
1586 pdma_st);
1587 } else {
1588 igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info,
1589 etqf, ts, pdma_st->bastate.written[1]);
1590 }
1591 }
1592
1593 static inline void
igb_pci_dma_write_rx_desc(IGBCore * core,PCIDevice * dev,dma_addr_t addr,union e1000_rx_desc_union * desc,dma_addr_t len)1594 igb_pci_dma_write_rx_desc(IGBCore *core, PCIDevice *dev, dma_addr_t addr,
1595 union e1000_rx_desc_union *desc, dma_addr_t len)
1596 {
1597 if (igb_rx_use_legacy_descriptor(core)) {
1598 struct e1000_rx_desc *d = &desc->legacy;
1599 size_t offset = offsetof(struct e1000_rx_desc, status);
1600 uint8_t status = d->status;
1601
1602 d->status &= ~E1000_RXD_STAT_DD;
1603 pci_dma_write(dev, addr, desc, len);
1604
1605 if (status & E1000_RXD_STAT_DD) {
1606 d->status = status;
1607 pci_dma_write(dev, addr + offset, &status, sizeof(status));
1608 }
1609 } else {
1610 union e1000_adv_rx_desc *d = &desc->adv;
1611 size_t offset =
1612 offsetof(union e1000_adv_rx_desc, wb.upper.status_error);
1613 uint32_t status = d->wb.upper.status_error;
1614
1615 d->wb.upper.status_error &= ~E1000_RXD_STAT_DD;
1616 pci_dma_write(dev, addr, desc, len);
1617
1618 if (status & E1000_RXD_STAT_DD) {
1619 d->wb.upper.status_error = status;
1620 pci_dma_write(dev, addr + offset, &status, sizeof(status));
1621 }
1622 }
1623 }
1624
1625 static void
igb_update_rx_stats(IGBCore * core,const E1000ERingInfo * rxi,size_t pkt_size,size_t pkt_fcs_size)1626 igb_update_rx_stats(IGBCore *core, const E1000ERingInfo *rxi,
1627 size_t pkt_size, size_t pkt_fcs_size)
1628 {
1629 eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt);
1630 e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size);
1631
1632 if (core->mac[MRQC] & 1) {
1633 uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
1634
1635 core->mac[PVFGORC0 + (pool * 64)] += pkt_size + 4;
1636 core->mac[PVFGPRC0 + (pool * 64)]++;
1637 if (pkt_type == ETH_PKT_MCAST) {
1638 core->mac[PVFMPRC0 + (pool * 64)]++;
1639 }
1640 }
1641 }
1642
1643 static inline bool
igb_rx_descr_threshold_hit(IGBCore * core,const E1000ERingInfo * rxi)1644 igb_rx_descr_threshold_hit(IGBCore *core, const E1000ERingInfo *rxi)
1645 {
1646 return igb_ring_free_descr_num(core, rxi) ==
1647 ((core->mac[E1000_SRRCTL(rxi->idx) >> 2] >> 20) & 31) * 16;
1648 }
1649
1650 static bool
igb_do_ps(IGBCore * core,const E1000ERingInfo * r,struct NetRxPkt * pkt,IGBPacketRxDMAState * pdma_st)1651 igb_do_ps(IGBCore *core,
1652 const E1000ERingInfo *r,
1653 struct NetRxPkt *pkt,
1654 IGBPacketRxDMAState *pdma_st)
1655 {
1656 bool hasip4, hasip6;
1657 EthL4HdrProto l4hdr_proto;
1658 bool fragment;
1659 bool split_always;
1660 size_t bheader_size;
1661 size_t total_pkt_len;
1662
1663 if (!igb_rx_use_ps_descriptor(core, r)) {
1664 return false;
1665 }
1666
1667 total_pkt_len = net_rx_pkt_get_total_len(pkt);
1668 bheader_size = igb_rxhdrbufsize(core, r);
1669 split_always = igb_rx_ps_descriptor_split_always(core, r);
1670 if (split_always && total_pkt_len <= bheader_size) {
1671 pdma_st->ps_hdr_len = total_pkt_len;
1672 pdma_st->ps_desc_data.hdr_len = total_pkt_len;
1673 return true;
1674 }
1675
1676 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1677
1678 if (hasip4) {
1679 fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
1680 } else if (hasip6) {
1681 fragment = net_rx_pkt_get_ip6_info(pkt)->fragment;
1682 } else {
1683 pdma_st->ps_desc_data.hdr_len = bheader_size;
1684 goto header_not_handled;
1685 }
1686
1687 if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) {
1688 pdma_st->ps_desc_data.hdr_len = bheader_size;
1689 goto header_not_handled;
1690 }
1691
1692 /* no header splitting for SCTP */
1693 if (!fragment && (l4hdr_proto == ETH_L4_HDR_PROTO_UDP ||
1694 l4hdr_proto == ETH_L4_HDR_PROTO_TCP)) {
1695 pdma_st->ps_hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt);
1696 } else {
1697 pdma_st->ps_hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt);
1698 }
1699
1700 pdma_st->ps_desc_data.sph = true;
1701 pdma_st->ps_desc_data.hdr_len = pdma_st->ps_hdr_len;
1702
1703 if (pdma_st->ps_hdr_len > bheader_size) {
1704 pdma_st->ps_desc_data.hbo = true;
1705 goto header_not_handled;
1706 }
1707
1708 return true;
1709
1710 header_not_handled:
1711 if (split_always) {
1712 pdma_st->ps_hdr_len = bheader_size;
1713 return true;
1714 }
1715
1716 return false;
1717 }
1718
1719 static void
igb_truncate_to_descriptor_size(IGBPacketRxDMAState * pdma_st,size_t * size)1720 igb_truncate_to_descriptor_size(IGBPacketRxDMAState *pdma_st, size_t *size)
1721 {
1722 if (pdma_st->do_ps && pdma_st->is_first) {
1723 if (*size > pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len) {
1724 *size = pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len;
1725 }
1726 } else {
1727 if (*size > pdma_st->rx_desc_packet_buf_size) {
1728 *size = pdma_st->rx_desc_packet_buf_size;
1729 }
1730 }
1731 }
1732
1733 static inline void
igb_write_hdr_frag_to_rx_buffers(IGBCore * core,PCIDevice * d,IGBPacketRxDMAState * pdma_st,const char * data,dma_addr_t data_len)1734 igb_write_hdr_frag_to_rx_buffers(IGBCore *core,
1735 PCIDevice *d,
1736 IGBPacketRxDMAState *pdma_st,
1737 const char *data,
1738 dma_addr_t data_len)
1739 {
1740 assert(data_len <= pdma_st->rx_desc_header_buf_size -
1741 pdma_st->bastate.written[0]);
1742 pci_dma_write(d,
1743 pdma_st->ba[0] + pdma_st->bastate.written[0],
1744 data, data_len);
1745 pdma_st->bastate.written[0] += data_len;
1746 pdma_st->bastate.cur_idx = 1;
1747 }
1748
1749 static void
igb_write_header_to_rx_buffers(IGBCore * core,struct NetRxPkt * pkt,PCIDevice * d,IGBPacketRxDMAState * pdma_st,size_t * copy_size)1750 igb_write_header_to_rx_buffers(IGBCore *core,
1751 struct NetRxPkt *pkt,
1752 PCIDevice *d,
1753 IGBPacketRxDMAState *pdma_st,
1754 size_t *copy_size)
1755 {
1756 size_t iov_copy;
1757 size_t ps_hdr_copied = 0;
1758
1759 if (!pdma_st->is_first) {
1760 /* Leave buffer 0 of each descriptor except first */
1761 /* empty */
1762 pdma_st->bastate.cur_idx = 1;
1763 return;
1764 }
1765
1766 do {
1767 iov_copy = MIN(pdma_st->ps_hdr_len - ps_hdr_copied,
1768 pdma_st->iov->iov_len - pdma_st->iov_ofs);
1769
1770 igb_write_hdr_frag_to_rx_buffers(core, d, pdma_st,
1771 pdma_st->iov->iov_base,
1772 iov_copy);
1773
1774 *copy_size -= iov_copy;
1775 ps_hdr_copied += iov_copy;
1776
1777 pdma_st->iov_ofs += iov_copy;
1778 if (pdma_st->iov_ofs == pdma_st->iov->iov_len) {
1779 pdma_st->iov++;
1780 pdma_st->iov_ofs = 0;
1781 }
1782 } while (ps_hdr_copied < pdma_st->ps_hdr_len);
1783
1784 pdma_st->is_first = false;
1785 }
1786
1787 static void
igb_write_payload_frag_to_rx_buffers(IGBCore * core,PCIDevice * d,IGBPacketRxDMAState * pdma_st,const char * data,dma_addr_t data_len)1788 igb_write_payload_frag_to_rx_buffers(IGBCore *core,
1789 PCIDevice *d,
1790 IGBPacketRxDMAState *pdma_st,
1791 const char *data,
1792 dma_addr_t data_len)
1793 {
1794 while (data_len > 0) {
1795 assert(pdma_st->bastate.cur_idx < IGB_MAX_PS_BUFFERS);
1796
1797 uint32_t cur_buf_bytes_left =
1798 pdma_st->rx_desc_packet_buf_size -
1799 pdma_st->bastate.written[pdma_st->bastate.cur_idx];
1800 uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left);
1801
1802 trace_igb_rx_desc_buff_write(
1803 pdma_st->bastate.cur_idx,
1804 pdma_st->ba[pdma_st->bastate.cur_idx],
1805 pdma_st->bastate.written[pdma_st->bastate.cur_idx],
1806 data,
1807 bytes_to_write);
1808
1809 pci_dma_write(d,
1810 pdma_st->ba[pdma_st->bastate.cur_idx] +
1811 pdma_st->bastate.written[pdma_st->bastate.cur_idx],
1812 data, bytes_to_write);
1813
1814 pdma_st->bastate.written[pdma_st->bastate.cur_idx] += bytes_to_write;
1815 data += bytes_to_write;
1816 data_len -= bytes_to_write;
1817
1818 if (pdma_st->bastate.written[pdma_st->bastate.cur_idx] ==
1819 pdma_st->rx_desc_packet_buf_size) {
1820 pdma_st->bastate.cur_idx++;
1821 }
1822 }
1823 }
1824
1825 static void
igb_write_payload_to_rx_buffers(IGBCore * core,struct NetRxPkt * pkt,PCIDevice * d,IGBPacketRxDMAState * pdma_st,size_t * copy_size)1826 igb_write_payload_to_rx_buffers(IGBCore *core,
1827 struct NetRxPkt *pkt,
1828 PCIDevice *d,
1829 IGBPacketRxDMAState *pdma_st,
1830 size_t *copy_size)
1831 {
1832 static const uint32_t fcs_pad;
1833 size_t iov_copy;
1834
1835 /* Copy packet payload */
1836 while (*copy_size) {
1837 iov_copy = MIN(*copy_size, pdma_st->iov->iov_len - pdma_st->iov_ofs);
1838 igb_write_payload_frag_to_rx_buffers(core, d,
1839 pdma_st,
1840 pdma_st->iov->iov_base +
1841 pdma_st->iov_ofs,
1842 iov_copy);
1843
1844 *copy_size -= iov_copy;
1845 pdma_st->iov_ofs += iov_copy;
1846 if (pdma_st->iov_ofs == pdma_st->iov->iov_len) {
1847 pdma_st->iov++;
1848 pdma_st->iov_ofs = 0;
1849 }
1850 }
1851
1852 if (pdma_st->desc_offset + pdma_st->desc_size >= pdma_st->total_size) {
1853 /* Simulate FCS checksum presence in the last descriptor */
1854 igb_write_payload_frag_to_rx_buffers(core, d,
1855 pdma_st,
1856 (const char *) &fcs_pad,
1857 e1000x_fcs_len(core->mac));
1858 }
1859 }
1860
1861 static void
igb_write_to_rx_buffers(IGBCore * core,struct NetRxPkt * pkt,PCIDevice * d,IGBPacketRxDMAState * pdma_st)1862 igb_write_to_rx_buffers(IGBCore *core,
1863 struct NetRxPkt *pkt,
1864 PCIDevice *d,
1865 IGBPacketRxDMAState *pdma_st)
1866 {
1867 size_t copy_size;
1868
1869 if (!(pdma_st->ba)[1] || (pdma_st->do_ps && !(pdma_st->ba[0]))) {
1870 /* as per intel docs; skip descriptors with null buf addr */
1871 trace_e1000e_rx_null_descriptor();
1872 return;
1873 }
1874
1875 if (pdma_st->desc_offset >= pdma_st->size) {
1876 return;
1877 }
1878
1879 pdma_st->desc_size = pdma_st->total_size - pdma_st->desc_offset;
1880 igb_truncate_to_descriptor_size(pdma_st, &pdma_st->desc_size);
1881 copy_size = pdma_st->size - pdma_st->desc_offset;
1882 igb_truncate_to_descriptor_size(pdma_st, ©_size);
1883
1884 /* For PS mode copy the packet header first */
1885 if (pdma_st->do_ps) {
1886 igb_write_header_to_rx_buffers(core, pkt, d, pdma_st, ©_size);
1887 } else {
1888 pdma_st->bastate.cur_idx = 1;
1889 }
1890
1891 igb_write_payload_to_rx_buffers(core, pkt, d, pdma_st, ©_size);
1892 }
1893
1894 static void
igb_write_packet_to_guest(IGBCore * core,struct NetRxPkt * pkt,const E1000E_RxRing * rxr,const E1000E_RSSInfo * rss_info,uint16_t etqf,bool ts)1895 igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt,
1896 const E1000E_RxRing *rxr,
1897 const E1000E_RSSInfo *rss_info,
1898 uint16_t etqf, bool ts)
1899 {
1900 PCIDevice *d;
1901 dma_addr_t base;
1902 union e1000_rx_desc_union desc;
1903 const E1000ERingInfo *rxi;
1904 size_t rx_desc_len;
1905
1906 IGBPacketRxDMAState pdma_st = {0};
1907 pdma_st.is_first = true;
1908 pdma_st.size = net_rx_pkt_get_total_len(pkt);
1909 pdma_st.total_size = pdma_st.size + e1000x_fcs_len(core->mac);
1910
1911 rxi = rxr->i;
1912 rx_desc_len = core->rx_desc_len;
1913 pdma_st.rx_desc_packet_buf_size = igb_rxbufsize(core, rxi);
1914 pdma_st.rx_desc_header_buf_size = igb_rxhdrbufsize(core, rxi);
1915 pdma_st.iov = net_rx_pkt_get_iovec(pkt);
1916 d = pcie_sriov_get_vf_at_index(core->owner, rxi->idx % 8);
1917 if (!d) {
1918 d = core->owner;
1919 }
1920
1921 pdma_st.do_ps = igb_do_ps(core, rxi, pkt, &pdma_st);
1922
1923 do {
1924 memset(&pdma_st.bastate, 0, sizeof(IGBBAState));
1925 bool is_last = false;
1926
1927 if (igb_ring_empty(core, rxi)) {
1928 return;
1929 }
1930
1931 base = igb_ring_head_descr(core, rxi);
1932 pci_dma_read(d, base, &desc, rx_desc_len);
1933 trace_e1000e_rx_descr(rxi->idx, base, rx_desc_len);
1934
1935 igb_read_rx_descr(core, &desc, &pdma_st, rxi);
1936
1937 igb_write_to_rx_buffers(core, pkt, d, &pdma_st);
1938 pdma_st.desc_offset += pdma_st.desc_size;
1939 if (pdma_st.desc_offset >= pdma_st.total_size) {
1940 is_last = true;
1941 }
1942
1943 igb_write_rx_descr(core, &desc,
1944 is_last ? pkt : NULL,
1945 rss_info,
1946 etqf, ts,
1947 &pdma_st,
1948 rxi);
1949 igb_pci_dma_write_rx_desc(core, d, base, &desc, rx_desc_len);
1950 igb_ring_advance(core, rxi, rx_desc_len / E1000_MIN_RX_DESC_LEN);
1951 } while (pdma_st.desc_offset < pdma_st.total_size);
1952
1953 igb_update_rx_stats(core, rxi, pdma_st.size, pdma_st.total_size);
1954 }
1955
1956 static bool
igb_rx_strip_vlan(IGBCore * core,const E1000ERingInfo * rxi)1957 igb_rx_strip_vlan(IGBCore *core, const E1000ERingInfo *rxi)
1958 {
1959 if (core->mac[MRQC] & 1) {
1960 uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
1961 /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */
1962 return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ?
1963 core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN :
1964 core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN;
1965 }
1966
1967 return e1000x_vlan_enabled(core->mac);
1968 }
1969
1970 static inline void
igb_rx_fix_l4_csum(IGBCore * core,struct NetRxPkt * pkt)1971 igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt)
1972 {
1973 struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt);
1974
1975 if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1976 net_rx_pkt_fix_l4_csum(pkt);
1977 }
1978 }
1979
1980 ssize_t
igb_receive_iov(IGBCore * core,const struct iovec * iov,int iovcnt)1981 igb_receive_iov(IGBCore *core, const struct iovec *iov, int iovcnt)
1982 {
1983 return igb_receive_internal(core, iov, iovcnt, core->has_vnet, NULL);
1984 }
1985
1986 static ssize_t
igb_receive_internal(IGBCore * core,const struct iovec * iov,int iovcnt,bool has_vnet,bool * external_tx)1987 igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
1988 bool has_vnet, bool *external_tx)
1989 {
1990 uint16_t queues = 0;
1991 uint32_t causes = 0;
1992 uint32_t ecauses = 0;
1993 union {
1994 L2Header l2_header;
1995 uint8_t octets[ETH_ZLEN];
1996 } buf;
1997 struct iovec min_iov;
1998 size_t size, orig_size;
1999 size_t iov_ofs = 0;
2000 E1000E_RxRing rxr;
2001 E1000E_RSSInfo rss_info;
2002 uint16_t etqf;
2003 bool ts;
2004 size_t total_size;
2005 int strip_vlan_index;
2006 int i;
2007
2008 trace_e1000e_rx_receive_iov(iovcnt);
2009
2010 if (external_tx) {
2011 *external_tx = true;
2012 }
2013
2014 if (!e1000x_hw_rx_enabled(core->mac)) {
2015 return -1;
2016 }
2017
2018 /* Pull virtio header in */
2019 if (has_vnet) {
2020 net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt);
2021 iov_ofs = sizeof(struct virtio_net_hdr);
2022 } else {
2023 net_rx_pkt_unset_vhdr(core->rx_pkt);
2024 }
2025
2026 orig_size = iov_size(iov, iovcnt);
2027 size = orig_size - iov_ofs;
2028
2029 /* Pad to minimum Ethernet frame length */
2030 if (size < sizeof(buf)) {
2031 iov_to_buf(iov, iovcnt, iov_ofs, &buf, size);
2032 memset(&buf.octets[size], 0, sizeof(buf) - size);
2033 e1000x_inc_reg_if_not_full(core->mac, RUC);
2034 min_iov.iov_base = &buf;
2035 min_iov.iov_len = size = sizeof(buf);
2036 iovcnt = 1;
2037 iov = &min_iov;
2038 iov_ofs = 0;
2039 } else {
2040 iov_to_buf(iov, iovcnt, iov_ofs, &buf, sizeof(buf.l2_header));
2041 }
2042
2043 net_rx_pkt_set_packet_type(core->rx_pkt,
2044 get_eth_packet_type(&buf.l2_header.eth));
2045 net_rx_pkt_set_protocols(core->rx_pkt, iov, iovcnt, iov_ofs);
2046
2047 queues = igb_receive_assign(core, iov, iovcnt, iov_ofs,
2048 &buf.l2_header, size,
2049 &rss_info, &etqf, &ts, external_tx);
2050 if (!queues) {
2051 trace_e1000e_rx_flt_dropped();
2052 return orig_size;
2053 }
2054
2055 for (i = 0; i < IGB_NUM_QUEUES; i++) {
2056 if (!(queues & BIT(i)) ||
2057 !(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
2058 continue;
2059 }
2060
2061 igb_rx_ring_init(core, &rxr, i);
2062
2063 if (!igb_rx_strip_vlan(core, rxr.i)) {
2064 strip_vlan_index = -1;
2065 } else if (core->mac[CTRL_EXT] & BIT(26)) {
2066 strip_vlan_index = 1;
2067 } else {
2068 strip_vlan_index = 0;
2069 }
2070
2071 net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
2072 strip_vlan_index,
2073 core->mac[VET] & 0xffff,
2074 core->mac[VET] >> 16);
2075
2076 total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
2077 e1000x_fcs_len(core->mac);
2078
2079 if (!igb_has_rxbufs(core, rxr.i, total_size)) {
2080 causes |= E1000_ICS_RXO;
2081 trace_e1000e_rx_not_written_to_guest(rxr.i->idx);
2082 continue;
2083 }
2084
2085 causes |= E1000_ICR_RXDW;
2086
2087 igb_rx_fix_l4_csum(core, core->rx_pkt);
2088 igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info, etqf, ts);
2089
2090 /* Check if receive descriptor minimum threshold hit */
2091 if (igb_rx_descr_threshold_hit(core, rxr.i)) {
2092 causes |= E1000_ICS_RXDMT0;
2093 }
2094
2095 ecauses |= igb_rx_wb_eic(core, rxr.i->idx);
2096
2097 trace_e1000e_rx_written_to_guest(rxr.i->idx);
2098 }
2099
2100 trace_e1000e_rx_interrupt_set(causes);
2101 igb_raise_interrupts(core, EICR, ecauses);
2102 igb_raise_interrupts(core, ICR, causes);
2103
2104 return orig_size;
2105 }
2106
2107 static inline bool
igb_have_autoneg(IGBCore * core)2108 igb_have_autoneg(IGBCore *core)
2109 {
2110 return core->phy[MII_BMCR] & MII_BMCR_AUTOEN;
2111 }
2112
igb_update_flowctl_status(IGBCore * core)2113 static void igb_update_flowctl_status(IGBCore *core)
2114 {
2115 if (igb_have_autoneg(core) && core->phy[MII_BMSR] & MII_BMSR_AN_COMP) {
2116 trace_e1000e_link_autoneg_flowctl(true);
2117 core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE;
2118 } else {
2119 trace_e1000e_link_autoneg_flowctl(false);
2120 }
2121 }
2122
2123 static inline void
igb_link_down(IGBCore * core)2124 igb_link_down(IGBCore *core)
2125 {
2126 e1000x_update_regs_on_link_down(core->mac, core->phy);
2127 igb_update_flowctl_status(core);
2128 }
2129
2130 static inline void
igb_set_phy_ctrl(IGBCore * core,uint16_t val)2131 igb_set_phy_ctrl(IGBCore *core, uint16_t val)
2132 {
2133 /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
2134 core->phy[MII_BMCR] = val & ~(0x3f | MII_BMCR_RESET | MII_BMCR_ANRESTART);
2135
2136 if ((val & MII_BMCR_ANRESTART) && igb_have_autoneg(core)) {
2137 e1000x_restart_autoneg(core->mac, core->phy, core->autoneg_timer);
2138 }
2139 }
2140
igb_core_set_link_status(IGBCore * core)2141 void igb_core_set_link_status(IGBCore *core)
2142 {
2143 NetClientState *nc = qemu_get_queue(core->owner_nic);
2144 uint32_t old_status = core->mac[STATUS];
2145
2146 trace_e1000e_link_status_changed(nc->link_down ? false : true);
2147
2148 if (nc->link_down) {
2149 e1000x_update_regs_on_link_down(core->mac, core->phy);
2150 } else {
2151 if (igb_have_autoneg(core) &&
2152 !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
2153 e1000x_restart_autoneg(core->mac, core->phy,
2154 core->autoneg_timer);
2155 } else {
2156 e1000x_update_regs_on_link_up(core->mac, core->phy);
2157 igb_start_recv(core);
2158 }
2159 }
2160
2161 if (core->mac[STATUS] != old_status) {
2162 igb_raise_interrupts(core, ICR, E1000_ICR_LSC);
2163 }
2164 }
2165
2166 static void
igb_set_ctrl(IGBCore * core,int index,uint32_t val)2167 igb_set_ctrl(IGBCore *core, int index, uint32_t val)
2168 {
2169 trace_e1000e_core_ctrl_write(index, val);
2170
2171 /* RST is self clearing */
2172 core->mac[CTRL] = val & ~E1000_CTRL_RST;
2173 core->mac[CTRL_DUP] = core->mac[CTRL];
2174
2175 trace_e1000e_link_set_params(
2176 !!(val & E1000_CTRL_ASDE),
2177 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
2178 !!(val & E1000_CTRL_FRCSPD),
2179 !!(val & E1000_CTRL_FRCDPX),
2180 !!(val & E1000_CTRL_RFCE),
2181 !!(val & E1000_CTRL_TFCE));
2182
2183 if (val & E1000_CTRL_RST) {
2184 trace_e1000e_core_ctrl_sw_reset();
2185 igb_reset(core, true);
2186 }
2187
2188 if (val & E1000_CTRL_PHY_RST) {
2189 trace_e1000e_core_ctrl_phy_reset();
2190 core->mac[STATUS] |= E1000_STATUS_PHYRA;
2191 }
2192 }
2193
2194 static void
igb_set_rfctl(IGBCore * core,int index,uint32_t val)2195 igb_set_rfctl(IGBCore *core, int index, uint32_t val)
2196 {
2197 trace_e1000e_rx_set_rfctl(val);
2198
2199 if (!(val & E1000_RFCTL_ISCSI_DIS)) {
2200 trace_e1000e_wrn_iscsi_filtering_not_supported();
2201 }
2202
2203 if (!(val & E1000_RFCTL_NFSW_DIS)) {
2204 trace_e1000e_wrn_nfsw_filtering_not_supported();
2205 }
2206
2207 if (!(val & E1000_RFCTL_NFSR_DIS)) {
2208 trace_e1000e_wrn_nfsr_filtering_not_supported();
2209 }
2210
2211 core->mac[RFCTL] = val;
2212 }
2213
2214 static void
igb_calc_rxdesclen(IGBCore * core)2215 igb_calc_rxdesclen(IGBCore *core)
2216 {
2217 if (igb_rx_use_legacy_descriptor(core)) {
2218 core->rx_desc_len = sizeof(struct e1000_rx_desc);
2219 } else {
2220 core->rx_desc_len = sizeof(union e1000_adv_rx_desc);
2221 }
2222 trace_e1000e_rx_desc_len(core->rx_desc_len);
2223 }
2224
2225 static void
igb_set_rx_control(IGBCore * core,int index,uint32_t val)2226 igb_set_rx_control(IGBCore *core, int index, uint32_t val)
2227 {
2228 core->mac[RCTL] = val;
2229 trace_e1000e_rx_set_rctl(core->mac[RCTL]);
2230
2231 if (val & E1000_RCTL_DTYP_MASK) {
2232 qemu_log_mask(LOG_GUEST_ERROR,
2233 "igb: RCTL.DTYP must be zero for compatibility");
2234 }
2235
2236 if (val & E1000_RCTL_EN) {
2237 igb_calc_rxdesclen(core);
2238 igb_start_recv(core);
2239 }
2240 }
2241
2242 static inline bool
igb_postpone_interrupt(IGBIntrDelayTimer * timer)2243 igb_postpone_interrupt(IGBIntrDelayTimer *timer)
2244 {
2245 if (timer->running) {
2246 trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2);
2247
2248 return true;
2249 }
2250
2251 if (timer->core->mac[timer->delay_reg] != 0) {
2252 igb_intrmgr_rearm_timer(timer);
2253 }
2254
2255 return false;
2256 }
2257
2258 static inline bool
igb_eitr_should_postpone(IGBCore * core,int idx)2259 igb_eitr_should_postpone(IGBCore *core, int idx)
2260 {
2261 return igb_postpone_interrupt(&core->eitr[idx]);
2262 }
2263
igb_send_msix(IGBCore * core,uint32_t causes)2264 static void igb_send_msix(IGBCore *core, uint32_t causes)
2265 {
2266 int vector;
2267
2268 for (vector = 0; vector < IGB_INTR_NUM; ++vector) {
2269 if ((causes & BIT(vector)) && !igb_eitr_should_postpone(core, vector)) {
2270
2271 trace_e1000e_irq_msix_notify_vec(vector);
2272 igb_msix_notify(core, vector);
2273 }
2274 }
2275 }
2276
2277 static inline void
igb_fix_icr_asserted(IGBCore * core)2278 igb_fix_icr_asserted(IGBCore *core)
2279 {
2280 core->mac[ICR] &= ~E1000_ICR_ASSERTED;
2281 if (core->mac[ICR]) {
2282 core->mac[ICR] |= E1000_ICR_ASSERTED;
2283 }
2284
2285 trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]);
2286 }
2287
igb_raise_interrupts(IGBCore * core,size_t index,uint32_t causes)2288 static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes)
2289 {
2290 uint32_t old_causes = core->mac[ICR] & core->mac[IMS];
2291 uint32_t old_ecauses = core->mac[EICR] & core->mac[EIMS];
2292 uint32_t raised_causes;
2293 uint32_t raised_ecauses;
2294 uint32_t int_alloc;
2295
2296 trace_e1000e_irq_set(index << 2,
2297 core->mac[index], core->mac[index] | causes);
2298
2299 core->mac[index] |= causes;
2300
2301 if (core->mac[GPIE] & E1000_GPIE_MSIX_MODE) {
2302 raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes;
2303
2304 if (raised_causes & E1000_ICR_DRSTA) {
2305 int_alloc = core->mac[IVAR_MISC] & 0xff;
2306 if (int_alloc & E1000_IVAR_VALID) {
2307 core->mac[EICR] |= BIT(int_alloc & 0x1f);
2308 }
2309 }
2310 /* Check if other bits (excluding the TCP Timer) are enabled. */
2311 if (raised_causes & ~E1000_ICR_DRSTA) {
2312 int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff;
2313 if (int_alloc & E1000_IVAR_VALID) {
2314 core->mac[EICR] |= BIT(int_alloc & 0x1f);
2315 }
2316 }
2317
2318 raised_ecauses = core->mac[EICR] & core->mac[EIMS] & ~old_ecauses;
2319 if (!raised_ecauses) {
2320 return;
2321 }
2322
2323 igb_send_msix(core, raised_ecauses);
2324 } else {
2325 igb_fix_icr_asserted(core);
2326
2327 raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes;
2328 if (!raised_causes) {
2329 return;
2330 }
2331
2332 core->mac[EICR] |= (raised_causes & E1000_ICR_DRSTA) | E1000_EICR_OTHER;
2333
2334 if (msix_enabled(core->owner)) {
2335 trace_e1000e_irq_msix_notify_vec(0);
2336 msix_notify(core->owner, 0);
2337 } else if (msi_enabled(core->owner)) {
2338 trace_e1000e_irq_msi_notify(raised_causes);
2339 msi_notify(core->owner, 0);
2340 } else {
2341 igb_raise_legacy_irq(core);
2342 }
2343 }
2344 }
2345
igb_lower_interrupts(IGBCore * core,size_t index,uint32_t causes)2346 static void igb_lower_interrupts(IGBCore *core, size_t index, uint32_t causes)
2347 {
2348 trace_e1000e_irq_clear(index << 2,
2349 core->mac[index], core->mac[index] & ~causes);
2350
2351 core->mac[index] &= ~causes;
2352
2353 trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS],
2354 core->mac[ICR], core->mac[IMS]);
2355
2356 if (!(core->mac[ICR] & core->mac[IMS]) &&
2357 !(core->mac[GPIE] & E1000_GPIE_MSIX_MODE)) {
2358 core->mac[EICR] &= ~E1000_EICR_OTHER;
2359
2360 if (!msix_enabled(core->owner) && !msi_enabled(core->owner)) {
2361 igb_lower_legacy_irq(core);
2362 }
2363 }
2364 }
2365
igb_set_eics(IGBCore * core,int index,uint32_t val)2366 static void igb_set_eics(IGBCore *core, int index, uint32_t val)
2367 {
2368 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2369 uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2370
2371 trace_igb_irq_write_eics(val, msix);
2372 igb_raise_interrupts(core, EICR, val & mask);
2373 }
2374
igb_set_eims(IGBCore * core,int index,uint32_t val)2375 static void igb_set_eims(IGBCore *core, int index, uint32_t val)
2376 {
2377 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2378 uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2379
2380 trace_igb_irq_write_eims(val, msix);
2381 igb_raise_interrupts(core, EIMS, val & mask);
2382 }
2383
mailbox_interrupt_to_vf(IGBCore * core,uint16_t vfn)2384 static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn)
2385 {
2386 uint32_t ent = core->mac[VTIVAR_MISC + vfn];
2387 uint32_t causes;
2388
2389 if ((ent & E1000_IVAR_VALID)) {
2390 causes = (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM);
2391 igb_raise_interrupts(core, EICR, causes);
2392 }
2393 }
2394
mailbox_interrupt_to_pf(IGBCore * core)2395 static void mailbox_interrupt_to_pf(IGBCore *core)
2396 {
2397 igb_raise_interrupts(core, ICR, E1000_ICR_VMMB);
2398 }
2399
igb_set_pfmailbox(IGBCore * core,int index,uint32_t val)2400 static void igb_set_pfmailbox(IGBCore *core, int index, uint32_t val)
2401 {
2402 uint16_t vfn = index - P2VMAILBOX0;
2403
2404 trace_igb_set_pfmailbox(vfn, val);
2405
2406 if (val & E1000_P2VMAILBOX_STS) {
2407 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFSTS;
2408 mailbox_interrupt_to_vf(core, vfn);
2409 }
2410
2411 if (val & E1000_P2VMAILBOX_ACK) {
2412 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFACK;
2413 mailbox_interrupt_to_vf(core, vfn);
2414 }
2415
2416 /* Buffer Taken by PF (can be set only if the VFU is cleared). */
2417 if (val & E1000_P2VMAILBOX_PFU) {
2418 if (!(core->mac[index] & E1000_P2VMAILBOX_VFU)) {
2419 core->mac[index] |= E1000_P2VMAILBOX_PFU;
2420 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFU;
2421 }
2422 } else {
2423 core->mac[index] &= ~E1000_P2VMAILBOX_PFU;
2424 core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_PFU;
2425 }
2426
2427 if (val & E1000_P2VMAILBOX_RVFU) {
2428 core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_VFU;
2429 core->mac[MBVFICR] &= ~((E1000_MBVFICR_VFACK_VF1 << vfn) |
2430 (E1000_MBVFICR_VFREQ_VF1 << vfn));
2431 }
2432 }
2433
igb_set_vfmailbox(IGBCore * core,int index,uint32_t val)2434 static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
2435 {
2436 uint16_t vfn = index - V2PMAILBOX0;
2437
2438 trace_igb_set_vfmailbox(vfn, val);
2439
2440 if (val & E1000_V2PMAILBOX_REQ) {
2441 core->mac[MBVFICR] |= E1000_MBVFICR_VFREQ_VF1 << vfn;
2442 mailbox_interrupt_to_pf(core);
2443 }
2444
2445 if (val & E1000_V2PMAILBOX_ACK) {
2446 core->mac[MBVFICR] |= E1000_MBVFICR_VFACK_VF1 << vfn;
2447 mailbox_interrupt_to_pf(core);
2448 }
2449
2450 /* Buffer Taken by VF (can be set only if the PFU is cleared). */
2451 if (val & E1000_V2PMAILBOX_VFU) {
2452 if (!(core->mac[index] & E1000_V2PMAILBOX_PFU)) {
2453 core->mac[index] |= E1000_V2PMAILBOX_VFU;
2454 core->mac[P2VMAILBOX0 + vfn] |= E1000_P2VMAILBOX_VFU;
2455 }
2456 } else {
2457 core->mac[index] &= ~E1000_V2PMAILBOX_VFU;
2458 core->mac[P2VMAILBOX0 + vfn] &= ~E1000_P2VMAILBOX_VFU;
2459 }
2460 }
2461
igb_core_vf_reset(IGBCore * core,uint16_t vfn)2462 void igb_core_vf_reset(IGBCore *core, uint16_t vfn)
2463 {
2464 uint16_t qn0 = vfn;
2465 uint16_t qn1 = vfn + IGB_NUM_VM_POOLS;
2466
2467 trace_igb_core_vf_reset(vfn);
2468
2469 /* disable Rx and Tx for the VF*/
2470 core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
2471 core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
2472 core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
2473 core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
2474 core->mac[VFRE] &= ~BIT(vfn);
2475 core->mac[VFTE] &= ~BIT(vfn);
2476 /* indicate VF reset to PF */
2477 core->mac[VFLRE] |= BIT(vfn);
2478 /* VFLRE and mailbox use the same interrupt cause */
2479 mailbox_interrupt_to_pf(core);
2480 }
2481
igb_w1c(IGBCore * core,int index,uint32_t val)2482 static void igb_w1c(IGBCore *core, int index, uint32_t val)
2483 {
2484 core->mac[index] &= ~val;
2485 }
2486
igb_set_eimc(IGBCore * core,int index,uint32_t val)2487 static void igb_set_eimc(IGBCore *core, int index, uint32_t val)
2488 {
2489 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2490 uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2491
2492 trace_igb_irq_write_eimc(val, msix);
2493
2494 /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */
2495 igb_lower_interrupts(core, EIMS, val & mask);
2496 }
2497
igb_set_eiac(IGBCore * core,int index,uint32_t val)2498 static void igb_set_eiac(IGBCore *core, int index, uint32_t val)
2499 {
2500 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2501
2502 if (msix) {
2503 trace_igb_irq_write_eiac(val);
2504
2505 /*
2506 * TODO: When using IOV, the bits that correspond to MSI-X vectors
2507 * that are assigned to a VF are read-only.
2508 */
2509 core->mac[EIAC] |= (val & E1000_EICR_MSIX_MASK);
2510 }
2511 }
2512
igb_set_eiam(IGBCore * core,int index,uint32_t val)2513 static void igb_set_eiam(IGBCore *core, int index, uint32_t val)
2514 {
2515 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2516
2517 /*
2518 * TODO: When using IOV, the bits that correspond to MSI-X vectors that
2519 * are assigned to a VF are read-only.
2520 */
2521 core->mac[EIAM] |=
2522 ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK));
2523
2524 trace_igb_irq_write_eiam(val, msix);
2525 }
2526
igb_set_eicr(IGBCore * core,int index,uint32_t val)2527 static void igb_set_eicr(IGBCore *core, int index, uint32_t val)
2528 {
2529 bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
2530
2531 /*
2532 * TODO: In IOV mode, only bit zero of this vector is available for the PF
2533 * function.
2534 */
2535 uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
2536
2537 trace_igb_irq_write_eicr(val, msix);
2538 igb_lower_interrupts(core, EICR, val & mask);
2539 }
2540
igb_set_vtctrl(IGBCore * core,int index,uint32_t val)2541 static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val)
2542 {
2543 uint16_t vfn;
2544
2545 if (val & E1000_CTRL_RST) {
2546 vfn = (index - PVTCTRL0) / 0x40;
2547 igb_core_vf_reset(core, vfn);
2548 }
2549 }
2550
igb_set_vteics(IGBCore * core,int index,uint32_t val)2551 static void igb_set_vteics(IGBCore *core, int index, uint32_t val)
2552 {
2553 uint16_t vfn = (index - PVTEICS0) / 0x40;
2554
2555 core->mac[index] = val;
2556 igb_set_eics(core, EICS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2557 }
2558
igb_set_vteims(IGBCore * core,int index,uint32_t val)2559 static void igb_set_vteims(IGBCore *core, int index, uint32_t val)
2560 {
2561 uint16_t vfn = (index - PVTEIMS0) / 0x40;
2562
2563 core->mac[index] = val;
2564 igb_set_eims(core, EIMS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2565 }
2566
igb_set_vteimc(IGBCore * core,int index,uint32_t val)2567 static void igb_set_vteimc(IGBCore *core, int index, uint32_t val)
2568 {
2569 uint16_t vfn = (index - PVTEIMC0) / 0x40;
2570
2571 core->mac[index] = val;
2572 igb_set_eimc(core, EIMC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2573 }
2574
igb_set_vteiac(IGBCore * core,int index,uint32_t val)2575 static void igb_set_vteiac(IGBCore *core, int index, uint32_t val)
2576 {
2577 uint16_t vfn = (index - PVTEIAC0) / 0x40;
2578
2579 core->mac[index] = val;
2580 igb_set_eiac(core, EIAC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2581 }
2582
igb_set_vteiam(IGBCore * core,int index,uint32_t val)2583 static void igb_set_vteiam(IGBCore *core, int index, uint32_t val)
2584 {
2585 uint16_t vfn = (index - PVTEIAM0) / 0x40;
2586
2587 core->mac[index] = val;
2588 igb_set_eiam(core, EIAM, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2589 }
2590
igb_set_vteicr(IGBCore * core,int index,uint32_t val)2591 static void igb_set_vteicr(IGBCore *core, int index, uint32_t val)
2592 {
2593 uint16_t vfn = (index - PVTEICR0) / 0x40;
2594
2595 core->mac[index] = val;
2596 igb_set_eicr(core, EICR, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
2597 }
2598
igb_set_vtivar(IGBCore * core,int index,uint32_t val)2599 static void igb_set_vtivar(IGBCore *core, int index, uint32_t val)
2600 {
2601 uint16_t vfn = (index - VTIVAR);
2602 uint16_t qn = vfn;
2603 uint8_t ent;
2604 int n;
2605
2606 core->mac[index] = val;
2607
2608 /* Get assigned vector associated with queue Rx#0. */
2609 if ((val & E1000_IVAR_VALID)) {
2610 n = igb_ivar_entry_rx(qn);
2611 ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (val & 0x7)));
2612 core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
2613 }
2614
2615 /* Get assigned vector associated with queue Tx#0 */
2616 ent = val >> 8;
2617 if ((ent & E1000_IVAR_VALID)) {
2618 n = igb_ivar_entry_tx(qn);
2619 ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (ent & 0x7)));
2620 core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
2621 }
2622
2623 /*
2624 * Ignoring assigned vectors associated with queues Rx#1 and Tx#1 for now.
2625 */
2626 }
2627
2628 static inline void
igb_autoneg_timer(void * opaque)2629 igb_autoneg_timer(void *opaque)
2630 {
2631 IGBCore *core = opaque;
2632 if (!qemu_get_queue(core->owner_nic)->link_down) {
2633 e1000x_update_regs_on_autoneg_done(core->mac, core->phy);
2634 igb_start_recv(core);
2635
2636 igb_update_flowctl_status(core);
2637 /* signal link status change to the guest */
2638 igb_raise_interrupts(core, ICR, E1000_ICR_LSC);
2639 }
2640 }
2641
2642 static inline uint16_t
igb_get_reg_index_with_offset(const uint16_t * mac_reg_access,hwaddr addr)2643 igb_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr)
2644 {
2645 uint16_t index = (addr & 0x1ffff) >> 2;
2646 return index + (mac_reg_access[index] & 0xfffe);
2647 }
2648
2649 static const char igb_phy_regcap[MAX_PHY_REG_ADDRESS + 1] = {
2650 [MII_BMCR] = PHY_RW,
2651 [MII_BMSR] = PHY_R,
2652 [MII_PHYID1] = PHY_R,
2653 [MII_PHYID2] = PHY_R,
2654 [MII_ANAR] = PHY_RW,
2655 [MII_ANLPAR] = PHY_R,
2656 [MII_ANER] = PHY_R,
2657 [MII_ANNP] = PHY_RW,
2658 [MII_ANLPRNP] = PHY_R,
2659 [MII_CTRL1000] = PHY_RW,
2660 [MII_STAT1000] = PHY_R,
2661 [MII_EXTSTAT] = PHY_R,
2662
2663 [IGP01E1000_PHY_PORT_CONFIG] = PHY_RW,
2664 [IGP01E1000_PHY_PORT_STATUS] = PHY_R,
2665 [IGP01E1000_PHY_PORT_CTRL] = PHY_RW,
2666 [IGP01E1000_PHY_LINK_HEALTH] = PHY_R,
2667 [IGP02E1000_PHY_POWER_MGMT] = PHY_RW,
2668 [IGP01E1000_PHY_PAGE_SELECT] = PHY_W
2669 };
2670
2671 static void
igb_phy_reg_write(IGBCore * core,uint32_t addr,uint16_t data)2672 igb_phy_reg_write(IGBCore *core, uint32_t addr, uint16_t data)
2673 {
2674 assert(addr <= MAX_PHY_REG_ADDRESS);
2675
2676 if (addr == MII_BMCR) {
2677 igb_set_phy_ctrl(core, data);
2678 } else {
2679 core->phy[addr] = data;
2680 }
2681 }
2682
2683 static void
igb_set_mdic(IGBCore * core,int index,uint32_t val)2684 igb_set_mdic(IGBCore *core, int index, uint32_t val)
2685 {
2686 uint32_t data = val & E1000_MDIC_DATA_MASK;
2687 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
2688
2689 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */
2690 val = core->mac[MDIC] | E1000_MDIC_ERROR;
2691 } else if (val & E1000_MDIC_OP_READ) {
2692 if (!(igb_phy_regcap[addr] & PHY_R)) {
2693 trace_igb_core_mdic_read_unhandled(addr);
2694 val |= E1000_MDIC_ERROR;
2695 } else {
2696 val = (val ^ data) | core->phy[addr];
2697 trace_igb_core_mdic_read(addr, val);
2698 }
2699 } else if (val & E1000_MDIC_OP_WRITE) {
2700 if (!(igb_phy_regcap[addr] & PHY_W)) {
2701 trace_igb_core_mdic_write_unhandled(addr);
2702 val |= E1000_MDIC_ERROR;
2703 } else {
2704 trace_igb_core_mdic_write(addr, data);
2705 igb_phy_reg_write(core, addr, data);
2706 }
2707 }
2708 core->mac[MDIC] = val | E1000_MDIC_READY;
2709
2710 if (val & E1000_MDIC_INT_EN) {
2711 igb_raise_interrupts(core, ICR, E1000_ICR_MDAC);
2712 }
2713 }
2714
2715 static void
igb_set_rdt(IGBCore * core,int index,uint32_t val)2716 igb_set_rdt(IGBCore *core, int index, uint32_t val)
2717 {
2718 core->mac[index] = val & 0xffff;
2719 trace_e1000e_rx_set_rdt(igb_mq_queue_idx(RDT0, index), val);
2720 igb_start_recv(core);
2721 }
2722
2723 static void
igb_set_status(IGBCore * core,int index,uint32_t val)2724 igb_set_status(IGBCore *core, int index, uint32_t val)
2725 {
2726 if ((val & E1000_STATUS_PHYRA) == 0) {
2727 core->mac[index] &= ~E1000_STATUS_PHYRA;
2728 }
2729 }
2730
2731 static void
igb_set_ctrlext(IGBCore * core,int index,uint32_t val)2732 igb_set_ctrlext(IGBCore *core, int index, uint32_t val)
2733 {
2734 trace_igb_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK),
2735 !!(val & E1000_CTRL_EXT_SPD_BYPS),
2736 !!(val & E1000_CTRL_EXT_PFRSTD));
2737
2738 /* Zero self-clearing bits */
2739 val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST);
2740 core->mac[CTRL_EXT] = val;
2741
2742 if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PFRSTD) {
2743 for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) {
2744 core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_RSTI;
2745 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTD;
2746 }
2747 }
2748 }
2749
2750 static void
igb_set_pbaclr(IGBCore * core,int index,uint32_t val)2751 igb_set_pbaclr(IGBCore *core, int index, uint32_t val)
2752 {
2753 int i;
2754
2755 core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK;
2756
2757 if (!msix_enabled(core->owner)) {
2758 return;
2759 }
2760
2761 for (i = 0; i < IGB_INTR_NUM; i++) {
2762 if (core->mac[PBACLR] & BIT(i)) {
2763 msix_clr_pending(core->owner, i);
2764 }
2765 }
2766 }
2767
2768 static void
igb_set_fcrth(IGBCore * core,int index,uint32_t val)2769 igb_set_fcrth(IGBCore *core, int index, uint32_t val)
2770 {
2771 core->mac[FCRTH] = val & 0xFFF8;
2772 }
2773
2774 static void
igb_set_fcrtl(IGBCore * core,int index,uint32_t val)2775 igb_set_fcrtl(IGBCore *core, int index, uint32_t val)
2776 {
2777 core->mac[FCRTL] = val & 0x8000FFF8;
2778 }
2779
2780 #define IGB_LOW_BITS_SET_FUNC(num) \
2781 static void \
2782 igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \
2783 { \
2784 core->mac[index] = val & (BIT(num) - 1); \
2785 }
2786
2787 IGB_LOW_BITS_SET_FUNC(4)
2788 IGB_LOW_BITS_SET_FUNC(13)
2789 IGB_LOW_BITS_SET_FUNC(16)
2790
2791 static void
igb_set_dlen(IGBCore * core,int index,uint32_t val)2792 igb_set_dlen(IGBCore *core, int index, uint32_t val)
2793 {
2794 core->mac[index] = val & 0xffff0;
2795 }
2796
2797 static void
igb_set_dbal(IGBCore * core,int index,uint32_t val)2798 igb_set_dbal(IGBCore *core, int index, uint32_t val)
2799 {
2800 core->mac[index] = val & E1000_XDBAL_MASK;
2801 }
2802
2803 static void
igb_set_tdt(IGBCore * core,int index,uint32_t val)2804 igb_set_tdt(IGBCore *core, int index, uint32_t val)
2805 {
2806 IGB_TxRing txr;
2807 int qn = igb_mq_queue_idx(TDT0, index);
2808
2809 core->mac[index] = val & 0xffff;
2810
2811 igb_tx_ring_init(core, &txr, qn);
2812 igb_start_xmit(core, &txr);
2813 }
2814
2815 static void
igb_set_ics(IGBCore * core,int index,uint32_t val)2816 igb_set_ics(IGBCore *core, int index, uint32_t val)
2817 {
2818 trace_e1000e_irq_write_ics(val);
2819 igb_raise_interrupts(core, ICR, val);
2820 }
2821
2822 static void
igb_set_imc(IGBCore * core,int index,uint32_t val)2823 igb_set_imc(IGBCore *core, int index, uint32_t val)
2824 {
2825 trace_e1000e_irq_ims_clear_set_imc(val);
2826 igb_lower_interrupts(core, IMS, val);
2827 }
2828
2829 static void
igb_set_ims(IGBCore * core,int index,uint32_t val)2830 igb_set_ims(IGBCore *core, int index, uint32_t val)
2831 {
2832 igb_raise_interrupts(core, IMS, val & 0x77D4FBFD);
2833 }
2834
igb_nsicr(IGBCore * core)2835 static void igb_nsicr(IGBCore *core)
2836 {
2837 /*
2838 * If GPIE.NSICR = 0, then the clear of IMS will occur only if at
2839 * least one bit is set in the IMS and there is a true interrupt as
2840 * reflected in ICR.INTA.
2841 */
2842 if ((core->mac[GPIE] & E1000_GPIE_NSICR) ||
2843 (core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) {
2844 igb_lower_interrupts(core, IMS, core->mac[IAM]);
2845 }
2846 }
2847
igb_set_icr(IGBCore * core,int index,uint32_t val)2848 static void igb_set_icr(IGBCore *core, int index, uint32_t val)
2849 {
2850 igb_nsicr(core);
2851 igb_lower_interrupts(core, ICR, val);
2852 }
2853
2854 static uint32_t
igb_mac_readreg(IGBCore * core,int index)2855 igb_mac_readreg(IGBCore *core, int index)
2856 {
2857 return core->mac[index];
2858 }
2859
2860 static uint32_t
igb_mac_ics_read(IGBCore * core,int index)2861 igb_mac_ics_read(IGBCore *core, int index)
2862 {
2863 trace_e1000e_irq_read_ics(core->mac[ICS]);
2864 return core->mac[ICS];
2865 }
2866
2867 static uint32_t
igb_mac_ims_read(IGBCore * core,int index)2868 igb_mac_ims_read(IGBCore *core, int index)
2869 {
2870 trace_e1000e_irq_read_ims(core->mac[IMS]);
2871 return core->mac[IMS];
2872 }
2873
2874 static uint32_t
igb_mac_swsm_read(IGBCore * core,int index)2875 igb_mac_swsm_read(IGBCore *core, int index)
2876 {
2877 uint32_t val = core->mac[SWSM];
2878 core->mac[SWSM] = val | E1000_SWSM_SMBI;
2879 return val;
2880 }
2881
2882 static uint32_t
igb_mac_eitr_read(IGBCore * core,int index)2883 igb_mac_eitr_read(IGBCore *core, int index)
2884 {
2885 return core->eitr_guest_value[index - EITR0];
2886 }
2887
igb_mac_vfmailbox_read(IGBCore * core,int index)2888 static uint32_t igb_mac_vfmailbox_read(IGBCore *core, int index)
2889 {
2890 uint32_t val = core->mac[index];
2891
2892 core->mac[index] &= ~(E1000_V2PMAILBOX_PFSTS | E1000_V2PMAILBOX_PFACK |
2893 E1000_V2PMAILBOX_RSTD);
2894
2895 return val;
2896 }
2897
2898 static uint32_t
igb_mac_icr_read(IGBCore * core,int index)2899 igb_mac_icr_read(IGBCore *core, int index)
2900 {
2901 uint32_t ret = core->mac[ICR];
2902
2903 if (core->mac[GPIE] & E1000_GPIE_NSICR) {
2904 trace_igb_irq_icr_clear_gpie_nsicr();
2905 igb_lower_interrupts(core, ICR, 0xffffffff);
2906 } else if (core->mac[IMS] == 0) {
2907 trace_e1000e_irq_icr_clear_zero_ims();
2908 igb_lower_interrupts(core, ICR, 0xffffffff);
2909 } else if (core->mac[ICR] & E1000_ICR_INT_ASSERTED) {
2910 igb_lower_interrupts(core, ICR, 0xffffffff);
2911 } else if (!msix_enabled(core->owner)) {
2912 trace_e1000e_irq_icr_clear_nonmsix_icr_read();
2913 igb_lower_interrupts(core, ICR, 0xffffffff);
2914 }
2915
2916 igb_nsicr(core);
2917 return ret;
2918 }
2919
2920 static uint32_t
igb_mac_read_clr4(IGBCore * core,int index)2921 igb_mac_read_clr4(IGBCore *core, int index)
2922 {
2923 uint32_t ret = core->mac[index];
2924
2925 core->mac[index] = 0;
2926 return ret;
2927 }
2928
2929 static uint32_t
igb_mac_read_clr8(IGBCore * core,int index)2930 igb_mac_read_clr8(IGBCore *core, int index)
2931 {
2932 uint32_t ret = core->mac[index];
2933
2934 core->mac[index] = 0;
2935 core->mac[index - 1] = 0;
2936 return ret;
2937 }
2938
2939 static uint32_t
igb_get_ctrl(IGBCore * core,int index)2940 igb_get_ctrl(IGBCore *core, int index)
2941 {
2942 uint32_t val = core->mac[CTRL];
2943
2944 trace_e1000e_link_read_params(
2945 !!(val & E1000_CTRL_ASDE),
2946 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
2947 !!(val & E1000_CTRL_FRCSPD),
2948 !!(val & E1000_CTRL_FRCDPX),
2949 !!(val & E1000_CTRL_RFCE),
2950 !!(val & E1000_CTRL_TFCE));
2951
2952 return val;
2953 }
2954
igb_get_status(IGBCore * core,int index)2955 static uint32_t igb_get_status(IGBCore *core, int index)
2956 {
2957 uint32_t res = core->mac[STATUS];
2958 uint16_t num_vfs = pcie_sriov_num_vfs(core->owner);
2959
2960 if (core->mac[CTRL] & E1000_CTRL_FRCDPX) {
2961 res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0;
2962 } else {
2963 res |= E1000_STATUS_FD;
2964 }
2965
2966 if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) ||
2967 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) {
2968 switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) {
2969 case E1000_CTRL_SPD_10:
2970 res |= E1000_STATUS_SPEED_10;
2971 break;
2972 case E1000_CTRL_SPD_100:
2973 res |= E1000_STATUS_SPEED_100;
2974 break;
2975 case E1000_CTRL_SPD_1000:
2976 default:
2977 res |= E1000_STATUS_SPEED_1000;
2978 break;
2979 }
2980 } else {
2981 res |= E1000_STATUS_SPEED_1000;
2982 }
2983
2984 if (num_vfs) {
2985 res |= num_vfs << E1000_STATUS_NUM_VFS_SHIFT;
2986 res |= E1000_STATUS_IOV_MODE;
2987 }
2988
2989 if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) {
2990 res |= E1000_STATUS_GIO_MASTER_ENABLE;
2991 }
2992
2993 return res;
2994 }
2995
2996 static void
igb_mac_writereg(IGBCore * core,int index,uint32_t val)2997 igb_mac_writereg(IGBCore *core, int index, uint32_t val)
2998 {
2999 core->mac[index] = val;
3000 }
3001
3002 static void
igb_mac_setmacaddr(IGBCore * core,int index,uint32_t val)3003 igb_mac_setmacaddr(IGBCore *core, int index, uint32_t val)
3004 {
3005 uint32_t macaddr[2];
3006
3007 core->mac[index] = val;
3008
3009 macaddr[0] = cpu_to_le32(core->mac[RA]);
3010 macaddr[1] = cpu_to_le32(core->mac[RA + 1]);
3011 qemu_format_nic_info_str(qemu_get_queue(core->owner_nic),
3012 (uint8_t *) macaddr);
3013
3014 trace_e1000e_mac_set_sw(MAC_ARG(macaddr));
3015 }
3016
3017 static void
igb_set_eecd(IGBCore * core,int index,uint32_t val)3018 igb_set_eecd(IGBCore *core, int index, uint32_t val)
3019 {
3020 static const uint32_t ro_bits = E1000_EECD_PRES |
3021 E1000_EECD_AUTO_RD |
3022 E1000_EECD_SIZE_EX_MASK;
3023
3024 core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits);
3025 }
3026
3027 static void
igb_set_eerd(IGBCore * core,int index,uint32_t val)3028 igb_set_eerd(IGBCore *core, int index, uint32_t val)
3029 {
3030 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK;
3031 uint32_t flags = 0;
3032 uint32_t data = 0;
3033
3034 if ((addr < IGB_EEPROM_SIZE) && (val & E1000_EERW_START)) {
3035 data = core->eeprom[addr];
3036 flags = E1000_EERW_DONE;
3037 }
3038
3039 core->mac[EERD] = flags |
3040 (addr << E1000_EERW_ADDR_SHIFT) |
3041 (data << E1000_EERW_DATA_SHIFT);
3042 }
3043
3044 static void
igb_set_eitr(IGBCore * core,int index,uint32_t val)3045 igb_set_eitr(IGBCore *core, int index, uint32_t val)
3046 {
3047 uint32_t eitr_num = index - EITR0;
3048
3049 trace_igb_irq_eitr_set(eitr_num, val);
3050
3051 core->eitr_guest_value[eitr_num] = val & ~E1000_EITR_CNT_IGNR;
3052 core->mac[index] = val & 0x7FFE;
3053 }
3054
3055 static void
igb_update_rx_offloads(IGBCore * core)3056 igb_update_rx_offloads(IGBCore *core)
3057 {
3058 int cso_state = igb_rx_l4_cso_enabled(core);
3059
3060 trace_e1000e_rx_set_cso(cso_state);
3061
3062 if (core->has_vnet) {
3063 qemu_set_offload(qemu_get_queue(core->owner_nic)->peer,
3064 cso_state, 0, 0, 0, 0, 0, 0);
3065 }
3066 }
3067
3068 static void
igb_set_rxcsum(IGBCore * core,int index,uint32_t val)3069 igb_set_rxcsum(IGBCore *core, int index, uint32_t val)
3070 {
3071 core->mac[RXCSUM] = val;
3072 igb_update_rx_offloads(core);
3073 }
3074
3075 static void
igb_set_gcr(IGBCore * core,int index,uint32_t val)3076 igb_set_gcr(IGBCore *core, int index, uint32_t val)
3077 {
3078 uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS;
3079 core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits;
3080 }
3081
igb_get_systiml(IGBCore * core,int index)3082 static uint32_t igb_get_systiml(IGBCore *core, int index)
3083 {
3084 e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH);
3085 return core->mac[SYSTIML];
3086 }
3087
igb_get_rxsatrh(IGBCore * core,int index)3088 static uint32_t igb_get_rxsatrh(IGBCore *core, int index)
3089 {
3090 core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID;
3091 return core->mac[RXSATRH];
3092 }
3093
igb_get_txstmph(IGBCore * core,int index)3094 static uint32_t igb_get_txstmph(IGBCore *core, int index)
3095 {
3096 core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID;
3097 return core->mac[TXSTMPH];
3098 }
3099
igb_set_timinca(IGBCore * core,int index,uint32_t val)3100 static void igb_set_timinca(IGBCore *core, int index, uint32_t val)
3101 {
3102 e1000x_set_timinca(core->mac, &core->timadj, val);
3103 }
3104
igb_set_timadjh(IGBCore * core,int index,uint32_t val)3105 static void igb_set_timadjh(IGBCore *core, int index, uint32_t val)
3106 {
3107 core->mac[TIMADJH] = val;
3108 core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32);
3109 }
3110
3111 #define igb_getreg(x) [x] = igb_mac_readreg
3112 typedef uint32_t (*readops)(IGBCore *, int);
3113 static const readops igb_macreg_readops[] = {
3114 igb_getreg(WUFC),
3115 igb_getreg(MANC),
3116 igb_getreg(TOTL),
3117 igb_getreg(RDT0),
3118 igb_getreg(RDT1),
3119 igb_getreg(RDT2),
3120 igb_getreg(RDT3),
3121 igb_getreg(RDT4),
3122 igb_getreg(RDT5),
3123 igb_getreg(RDT6),
3124 igb_getreg(RDT7),
3125 igb_getreg(RDT8),
3126 igb_getreg(RDT9),
3127 igb_getreg(RDT10),
3128 igb_getreg(RDT11),
3129 igb_getreg(RDT12),
3130 igb_getreg(RDT13),
3131 igb_getreg(RDT14),
3132 igb_getreg(RDT15),
3133 igb_getreg(RDBAH0),
3134 igb_getreg(RDBAH1),
3135 igb_getreg(RDBAH2),
3136 igb_getreg(RDBAH3),
3137 igb_getreg(RDBAH4),
3138 igb_getreg(RDBAH5),
3139 igb_getreg(RDBAH6),
3140 igb_getreg(RDBAH7),
3141 igb_getreg(RDBAH8),
3142 igb_getreg(RDBAH9),
3143 igb_getreg(RDBAH10),
3144 igb_getreg(RDBAH11),
3145 igb_getreg(RDBAH12),
3146 igb_getreg(RDBAH13),
3147 igb_getreg(RDBAH14),
3148 igb_getreg(RDBAH15),
3149 igb_getreg(TDBAL0),
3150 igb_getreg(TDBAL1),
3151 igb_getreg(TDBAL2),
3152 igb_getreg(TDBAL3),
3153 igb_getreg(TDBAL4),
3154 igb_getreg(TDBAL5),
3155 igb_getreg(TDBAL6),
3156 igb_getreg(TDBAL7),
3157 igb_getreg(TDBAL8),
3158 igb_getreg(TDBAL9),
3159 igb_getreg(TDBAL10),
3160 igb_getreg(TDBAL11),
3161 igb_getreg(TDBAL12),
3162 igb_getreg(TDBAL13),
3163 igb_getreg(TDBAL14),
3164 igb_getreg(TDBAL15),
3165 igb_getreg(RDLEN0),
3166 igb_getreg(RDLEN1),
3167 igb_getreg(RDLEN2),
3168 igb_getreg(RDLEN3),
3169 igb_getreg(RDLEN4),
3170 igb_getreg(RDLEN5),
3171 igb_getreg(RDLEN6),
3172 igb_getreg(RDLEN7),
3173 igb_getreg(RDLEN8),
3174 igb_getreg(RDLEN9),
3175 igb_getreg(RDLEN10),
3176 igb_getreg(RDLEN11),
3177 igb_getreg(RDLEN12),
3178 igb_getreg(RDLEN13),
3179 igb_getreg(RDLEN14),
3180 igb_getreg(RDLEN15),
3181 igb_getreg(SRRCTL0),
3182 igb_getreg(SRRCTL1),
3183 igb_getreg(SRRCTL2),
3184 igb_getreg(SRRCTL3),
3185 igb_getreg(SRRCTL4),
3186 igb_getreg(SRRCTL5),
3187 igb_getreg(SRRCTL6),
3188 igb_getreg(SRRCTL7),
3189 igb_getreg(SRRCTL8),
3190 igb_getreg(SRRCTL9),
3191 igb_getreg(SRRCTL10),
3192 igb_getreg(SRRCTL11),
3193 igb_getreg(SRRCTL12),
3194 igb_getreg(SRRCTL13),
3195 igb_getreg(SRRCTL14),
3196 igb_getreg(SRRCTL15),
3197 igb_getreg(LATECOL),
3198 igb_getreg(XONTXC),
3199 igb_getreg(TDFH),
3200 igb_getreg(TDFT),
3201 igb_getreg(TDFHS),
3202 igb_getreg(TDFTS),
3203 igb_getreg(TDFPC),
3204 igb_getreg(WUS),
3205 igb_getreg(RDFH),
3206 igb_getreg(RDFT),
3207 igb_getreg(RDFHS),
3208 igb_getreg(RDFTS),
3209 igb_getreg(RDFPC),
3210 igb_getreg(GORCL),
3211 igb_getreg(MGTPRC),
3212 igb_getreg(EERD),
3213 igb_getreg(EIAC),
3214 igb_getreg(MANC2H),
3215 igb_getreg(RXCSUM),
3216 igb_getreg(GSCL_3),
3217 igb_getreg(GSCN_2),
3218 igb_getreg(FCAH),
3219 igb_getreg(FCRTH),
3220 igb_getreg(FLOP),
3221 igb_getreg(RXSTMPH),
3222 igb_getreg(TXSTMPL),
3223 igb_getreg(TIMADJL),
3224 igb_getreg(RDH0),
3225 igb_getreg(RDH1),
3226 igb_getreg(RDH2),
3227 igb_getreg(RDH3),
3228 igb_getreg(RDH4),
3229 igb_getreg(RDH5),
3230 igb_getreg(RDH6),
3231 igb_getreg(RDH7),
3232 igb_getreg(RDH8),
3233 igb_getreg(RDH9),
3234 igb_getreg(RDH10),
3235 igb_getreg(RDH11),
3236 igb_getreg(RDH12),
3237 igb_getreg(RDH13),
3238 igb_getreg(RDH14),
3239 igb_getreg(RDH15),
3240 igb_getreg(TDT0),
3241 igb_getreg(TDT1),
3242 igb_getreg(TDT2),
3243 igb_getreg(TDT3),
3244 igb_getreg(TDT4),
3245 igb_getreg(TDT5),
3246 igb_getreg(TDT6),
3247 igb_getreg(TDT7),
3248 igb_getreg(TDT8),
3249 igb_getreg(TDT9),
3250 igb_getreg(TDT10),
3251 igb_getreg(TDT11),
3252 igb_getreg(TDT12),
3253 igb_getreg(TDT13),
3254 igb_getreg(TDT14),
3255 igb_getreg(TDT15),
3256 igb_getreg(TNCRS),
3257 igb_getreg(RJC),
3258 igb_getreg(IAM),
3259 igb_getreg(GSCL_2),
3260 igb_getreg(TIPG),
3261 igb_getreg(FLMNGCTL),
3262 igb_getreg(FLMNGCNT),
3263 igb_getreg(TSYNCTXCTL),
3264 igb_getreg(EEMNGDATA),
3265 igb_getreg(CTRL_EXT),
3266 igb_getreg(SYSTIMH),
3267 igb_getreg(EEMNGCTL),
3268 igb_getreg(FLMNGDATA),
3269 igb_getreg(TSYNCRXCTL),
3270 igb_getreg(LEDCTL),
3271 igb_getreg(TCTL),
3272 igb_getreg(TCTL_EXT),
3273 igb_getreg(DTXCTL),
3274 igb_getreg(RXPBS),
3275 igb_getreg(TDH0),
3276 igb_getreg(TDH1),
3277 igb_getreg(TDH2),
3278 igb_getreg(TDH3),
3279 igb_getreg(TDH4),
3280 igb_getreg(TDH5),
3281 igb_getreg(TDH6),
3282 igb_getreg(TDH7),
3283 igb_getreg(TDH8),
3284 igb_getreg(TDH9),
3285 igb_getreg(TDH10),
3286 igb_getreg(TDH11),
3287 igb_getreg(TDH12),
3288 igb_getreg(TDH13),
3289 igb_getreg(TDH14),
3290 igb_getreg(TDH15),
3291 igb_getreg(ECOL),
3292 igb_getreg(DC),
3293 igb_getreg(RLEC),
3294 igb_getreg(XOFFTXC),
3295 igb_getreg(RFC),
3296 igb_getreg(RNBC),
3297 igb_getreg(MGTPTC),
3298 igb_getreg(TIMINCA),
3299 igb_getreg(FACTPS),
3300 igb_getreg(GSCL_1),
3301 igb_getreg(GSCN_0),
3302 igb_getreg(PBACLR),
3303 igb_getreg(FCTTV),
3304 igb_getreg(RXSATRL),
3305 igb_getreg(TORL),
3306 igb_getreg(TDLEN0),
3307 igb_getreg(TDLEN1),
3308 igb_getreg(TDLEN2),
3309 igb_getreg(TDLEN3),
3310 igb_getreg(TDLEN4),
3311 igb_getreg(TDLEN5),
3312 igb_getreg(TDLEN6),
3313 igb_getreg(TDLEN7),
3314 igb_getreg(TDLEN8),
3315 igb_getreg(TDLEN9),
3316 igb_getreg(TDLEN10),
3317 igb_getreg(TDLEN11),
3318 igb_getreg(TDLEN12),
3319 igb_getreg(TDLEN13),
3320 igb_getreg(TDLEN14),
3321 igb_getreg(TDLEN15),
3322 igb_getreg(MCC),
3323 igb_getreg(WUC),
3324 igb_getreg(EECD),
3325 igb_getreg(FCRTV),
3326 igb_getreg(TXDCTL0),
3327 igb_getreg(TXDCTL1),
3328 igb_getreg(TXDCTL2),
3329 igb_getreg(TXDCTL3),
3330 igb_getreg(TXDCTL4),
3331 igb_getreg(TXDCTL5),
3332 igb_getreg(TXDCTL6),
3333 igb_getreg(TXDCTL7),
3334 igb_getreg(TXDCTL8),
3335 igb_getreg(TXDCTL9),
3336 igb_getreg(TXDCTL10),
3337 igb_getreg(TXDCTL11),
3338 igb_getreg(TXDCTL12),
3339 igb_getreg(TXDCTL13),
3340 igb_getreg(TXDCTL14),
3341 igb_getreg(TXDCTL15),
3342 igb_getreg(TXCTL0),
3343 igb_getreg(TXCTL1),
3344 igb_getreg(TXCTL2),
3345 igb_getreg(TXCTL3),
3346 igb_getreg(TXCTL4),
3347 igb_getreg(TXCTL5),
3348 igb_getreg(TXCTL6),
3349 igb_getreg(TXCTL7),
3350 igb_getreg(TXCTL8),
3351 igb_getreg(TXCTL9),
3352 igb_getreg(TXCTL10),
3353 igb_getreg(TXCTL11),
3354 igb_getreg(TXCTL12),
3355 igb_getreg(TXCTL13),
3356 igb_getreg(TXCTL14),
3357 igb_getreg(TXCTL15),
3358 igb_getreg(TDWBAL0),
3359 igb_getreg(TDWBAL1),
3360 igb_getreg(TDWBAL2),
3361 igb_getreg(TDWBAL3),
3362 igb_getreg(TDWBAL4),
3363 igb_getreg(TDWBAL5),
3364 igb_getreg(TDWBAL6),
3365 igb_getreg(TDWBAL7),
3366 igb_getreg(TDWBAL8),
3367 igb_getreg(TDWBAL9),
3368 igb_getreg(TDWBAL10),
3369 igb_getreg(TDWBAL11),
3370 igb_getreg(TDWBAL12),
3371 igb_getreg(TDWBAL13),
3372 igb_getreg(TDWBAL14),
3373 igb_getreg(TDWBAL15),
3374 igb_getreg(TDWBAH0),
3375 igb_getreg(TDWBAH1),
3376 igb_getreg(TDWBAH2),
3377 igb_getreg(TDWBAH3),
3378 igb_getreg(TDWBAH4),
3379 igb_getreg(TDWBAH5),
3380 igb_getreg(TDWBAH6),
3381 igb_getreg(TDWBAH7),
3382 igb_getreg(TDWBAH8),
3383 igb_getreg(TDWBAH9),
3384 igb_getreg(TDWBAH10),
3385 igb_getreg(TDWBAH11),
3386 igb_getreg(TDWBAH12),
3387 igb_getreg(TDWBAH13),
3388 igb_getreg(TDWBAH14),
3389 igb_getreg(TDWBAH15),
3390 igb_getreg(PVTCTRL0),
3391 igb_getreg(PVTCTRL1),
3392 igb_getreg(PVTCTRL2),
3393 igb_getreg(PVTCTRL3),
3394 igb_getreg(PVTCTRL4),
3395 igb_getreg(PVTCTRL5),
3396 igb_getreg(PVTCTRL6),
3397 igb_getreg(PVTCTRL7),
3398 igb_getreg(PVTEIMS0),
3399 igb_getreg(PVTEIMS1),
3400 igb_getreg(PVTEIMS2),
3401 igb_getreg(PVTEIMS3),
3402 igb_getreg(PVTEIMS4),
3403 igb_getreg(PVTEIMS5),
3404 igb_getreg(PVTEIMS6),
3405 igb_getreg(PVTEIMS7),
3406 igb_getreg(PVTEIAC0),
3407 igb_getreg(PVTEIAC1),
3408 igb_getreg(PVTEIAC2),
3409 igb_getreg(PVTEIAC3),
3410 igb_getreg(PVTEIAC4),
3411 igb_getreg(PVTEIAC5),
3412 igb_getreg(PVTEIAC6),
3413 igb_getreg(PVTEIAC7),
3414 igb_getreg(PVTEIAM0),
3415 igb_getreg(PVTEIAM1),
3416 igb_getreg(PVTEIAM2),
3417 igb_getreg(PVTEIAM3),
3418 igb_getreg(PVTEIAM4),
3419 igb_getreg(PVTEIAM5),
3420 igb_getreg(PVTEIAM6),
3421 igb_getreg(PVTEIAM7),
3422 igb_getreg(PVFGPRC0),
3423 igb_getreg(PVFGPRC1),
3424 igb_getreg(PVFGPRC2),
3425 igb_getreg(PVFGPRC3),
3426 igb_getreg(PVFGPRC4),
3427 igb_getreg(PVFGPRC5),
3428 igb_getreg(PVFGPRC6),
3429 igb_getreg(PVFGPRC7),
3430 igb_getreg(PVFGPTC0),
3431 igb_getreg(PVFGPTC1),
3432 igb_getreg(PVFGPTC2),
3433 igb_getreg(PVFGPTC3),
3434 igb_getreg(PVFGPTC4),
3435 igb_getreg(PVFGPTC5),
3436 igb_getreg(PVFGPTC6),
3437 igb_getreg(PVFGPTC7),
3438 igb_getreg(PVFGORC0),
3439 igb_getreg(PVFGORC1),
3440 igb_getreg(PVFGORC2),
3441 igb_getreg(PVFGORC3),
3442 igb_getreg(PVFGORC4),
3443 igb_getreg(PVFGORC5),
3444 igb_getreg(PVFGORC6),
3445 igb_getreg(PVFGORC7),
3446 igb_getreg(PVFGOTC0),
3447 igb_getreg(PVFGOTC1),
3448 igb_getreg(PVFGOTC2),
3449 igb_getreg(PVFGOTC3),
3450 igb_getreg(PVFGOTC4),
3451 igb_getreg(PVFGOTC5),
3452 igb_getreg(PVFGOTC6),
3453 igb_getreg(PVFGOTC7),
3454 igb_getreg(PVFMPRC0),
3455 igb_getreg(PVFMPRC1),
3456 igb_getreg(PVFMPRC2),
3457 igb_getreg(PVFMPRC3),
3458 igb_getreg(PVFMPRC4),
3459 igb_getreg(PVFMPRC5),
3460 igb_getreg(PVFMPRC6),
3461 igb_getreg(PVFMPRC7),
3462 igb_getreg(PVFGPRLBC0),
3463 igb_getreg(PVFGPRLBC1),
3464 igb_getreg(PVFGPRLBC2),
3465 igb_getreg(PVFGPRLBC3),
3466 igb_getreg(PVFGPRLBC4),
3467 igb_getreg(PVFGPRLBC5),
3468 igb_getreg(PVFGPRLBC6),
3469 igb_getreg(PVFGPRLBC7),
3470 igb_getreg(PVFGPTLBC0),
3471 igb_getreg(PVFGPTLBC1),
3472 igb_getreg(PVFGPTLBC2),
3473 igb_getreg(PVFGPTLBC3),
3474 igb_getreg(PVFGPTLBC4),
3475 igb_getreg(PVFGPTLBC5),
3476 igb_getreg(PVFGPTLBC6),
3477 igb_getreg(PVFGPTLBC7),
3478 igb_getreg(PVFGORLBC0),
3479 igb_getreg(PVFGORLBC1),
3480 igb_getreg(PVFGORLBC2),
3481 igb_getreg(PVFGORLBC3),
3482 igb_getreg(PVFGORLBC4),
3483 igb_getreg(PVFGORLBC5),
3484 igb_getreg(PVFGORLBC6),
3485 igb_getreg(PVFGORLBC7),
3486 igb_getreg(PVFGOTLBC0),
3487 igb_getreg(PVFGOTLBC1),
3488 igb_getreg(PVFGOTLBC2),
3489 igb_getreg(PVFGOTLBC3),
3490 igb_getreg(PVFGOTLBC4),
3491 igb_getreg(PVFGOTLBC5),
3492 igb_getreg(PVFGOTLBC6),
3493 igb_getreg(PVFGOTLBC7),
3494 igb_getreg(RCTL),
3495 igb_getreg(MDIC),
3496 igb_getreg(FCRUC),
3497 igb_getreg(VET),
3498 igb_getreg(RDBAL0),
3499 igb_getreg(RDBAL1),
3500 igb_getreg(RDBAL2),
3501 igb_getreg(RDBAL3),
3502 igb_getreg(RDBAL4),
3503 igb_getreg(RDBAL5),
3504 igb_getreg(RDBAL6),
3505 igb_getreg(RDBAL7),
3506 igb_getreg(RDBAL8),
3507 igb_getreg(RDBAL9),
3508 igb_getreg(RDBAL10),
3509 igb_getreg(RDBAL11),
3510 igb_getreg(RDBAL12),
3511 igb_getreg(RDBAL13),
3512 igb_getreg(RDBAL14),
3513 igb_getreg(RDBAL15),
3514 igb_getreg(TDBAH0),
3515 igb_getreg(TDBAH1),
3516 igb_getreg(TDBAH2),
3517 igb_getreg(TDBAH3),
3518 igb_getreg(TDBAH4),
3519 igb_getreg(TDBAH5),
3520 igb_getreg(TDBAH6),
3521 igb_getreg(TDBAH7),
3522 igb_getreg(TDBAH8),
3523 igb_getreg(TDBAH9),
3524 igb_getreg(TDBAH10),
3525 igb_getreg(TDBAH11),
3526 igb_getreg(TDBAH12),
3527 igb_getreg(TDBAH13),
3528 igb_getreg(TDBAH14),
3529 igb_getreg(TDBAH15),
3530 igb_getreg(SCC),
3531 igb_getreg(COLC),
3532 igb_getreg(XOFFRXC),
3533 igb_getreg(IPAV),
3534 igb_getreg(GOTCL),
3535 igb_getreg(MGTPDC),
3536 igb_getreg(GCR),
3537 igb_getreg(MFVAL),
3538 igb_getreg(FUNCTAG),
3539 igb_getreg(GSCL_4),
3540 igb_getreg(GSCN_3),
3541 igb_getreg(MRQC),
3542 igb_getreg(FCT),
3543 igb_getreg(FLA),
3544 igb_getreg(RXDCTL0),
3545 igb_getreg(RXDCTL1),
3546 igb_getreg(RXDCTL2),
3547 igb_getreg(RXDCTL3),
3548 igb_getreg(RXDCTL4),
3549 igb_getreg(RXDCTL5),
3550 igb_getreg(RXDCTL6),
3551 igb_getreg(RXDCTL7),
3552 igb_getreg(RXDCTL8),
3553 igb_getreg(RXDCTL9),
3554 igb_getreg(RXDCTL10),
3555 igb_getreg(RXDCTL11),
3556 igb_getreg(RXDCTL12),
3557 igb_getreg(RXDCTL13),
3558 igb_getreg(RXDCTL14),
3559 igb_getreg(RXDCTL15),
3560 igb_getreg(RXSTMPL),
3561 igb_getreg(TIMADJH),
3562 igb_getreg(FCRTL),
3563 igb_getreg(XONRXC),
3564 igb_getreg(RFCTL),
3565 igb_getreg(GSCN_1),
3566 igb_getreg(FCAL),
3567 igb_getreg(GPIE),
3568 igb_getreg(TXPBS),
3569 igb_getreg(RLPML),
3570
3571 [TOTH] = igb_mac_read_clr8,
3572 [GOTCH] = igb_mac_read_clr8,
3573 [PRC64] = igb_mac_read_clr4,
3574 [PRC255] = igb_mac_read_clr4,
3575 [PRC1023] = igb_mac_read_clr4,
3576 [PTC64] = igb_mac_read_clr4,
3577 [PTC255] = igb_mac_read_clr4,
3578 [PTC1023] = igb_mac_read_clr4,
3579 [GPRC] = igb_mac_read_clr4,
3580 [TPT] = igb_mac_read_clr4,
3581 [RUC] = igb_mac_read_clr4,
3582 [BPRC] = igb_mac_read_clr4,
3583 [MPTC] = igb_mac_read_clr4,
3584 [IAC] = igb_mac_read_clr4,
3585 [ICR] = igb_mac_icr_read,
3586 [STATUS] = igb_get_status,
3587 [ICS] = igb_mac_ics_read,
3588 /*
3589 * 8.8.10: Reading the IMC register returns the value of the IMS register.
3590 */
3591 [IMC] = igb_mac_ims_read,
3592 [TORH] = igb_mac_read_clr8,
3593 [GORCH] = igb_mac_read_clr8,
3594 [PRC127] = igb_mac_read_clr4,
3595 [PRC511] = igb_mac_read_clr4,
3596 [PRC1522] = igb_mac_read_clr4,
3597 [PTC127] = igb_mac_read_clr4,
3598 [PTC511] = igb_mac_read_clr4,
3599 [PTC1522] = igb_mac_read_clr4,
3600 [GPTC] = igb_mac_read_clr4,
3601 [TPR] = igb_mac_read_clr4,
3602 [ROC] = igb_mac_read_clr4,
3603 [MPRC] = igb_mac_read_clr4,
3604 [BPTC] = igb_mac_read_clr4,
3605 [TSCTC] = igb_mac_read_clr4,
3606 [CTRL] = igb_get_ctrl,
3607 [SWSM] = igb_mac_swsm_read,
3608 [IMS] = igb_mac_ims_read,
3609 [SYSTIML] = igb_get_systiml,
3610 [RXSATRH] = igb_get_rxsatrh,
3611 [TXSTMPH] = igb_get_txstmph,
3612
3613 [CRCERRS ... MPC] = igb_mac_readreg,
3614 [IP6AT ... IP6AT + 3] = igb_mac_readreg,
3615 [IP4AT ... IP4AT + 6] = igb_mac_readreg,
3616 [RA ... RA + 31] = igb_mac_readreg,
3617 [RA2 ... RA2 + 31] = igb_mac_readreg,
3618 [WUPM ... WUPM + 31] = igb_mac_readreg,
3619 [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_readreg,
3620 [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_readreg,
3621 [FFMT ... FFMT + 254] = igb_mac_readreg,
3622 [MDEF ... MDEF + 7] = igb_mac_readreg,
3623 [FTFT ... FTFT + 254] = igb_mac_readreg,
3624 [RETA ... RETA + 31] = igb_mac_readreg,
3625 [RSSRK ... RSSRK + 9] = igb_mac_readreg,
3626 [MAVTV0 ... MAVTV3] = igb_mac_readreg,
3627 [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_mac_eitr_read,
3628 [PVTEICR0] = igb_mac_read_clr4,
3629 [PVTEICR1] = igb_mac_read_clr4,
3630 [PVTEICR2] = igb_mac_read_clr4,
3631 [PVTEICR3] = igb_mac_read_clr4,
3632 [PVTEICR4] = igb_mac_read_clr4,
3633 [PVTEICR5] = igb_mac_read_clr4,
3634 [PVTEICR6] = igb_mac_read_clr4,
3635 [PVTEICR7] = igb_mac_read_clr4,
3636
3637 /* IGB specific: */
3638 [FWSM] = igb_mac_readreg,
3639 [SW_FW_SYNC] = igb_mac_readreg,
3640 [HTCBDPC] = igb_mac_read_clr4,
3641 [EICR] = igb_mac_read_clr4,
3642 [EIMS] = igb_mac_readreg,
3643 [EIAM] = igb_mac_readreg,
3644 [IVAR0 ... IVAR0 + 7] = igb_mac_readreg,
3645 igb_getreg(IVAR_MISC),
3646 igb_getreg(TSYNCRXCFG),
3647 [ETQF0 ... ETQF0 + 7] = igb_mac_readreg,
3648 igb_getreg(VT_CTL),
3649 [P2VMAILBOX0 ... P2VMAILBOX7] = igb_mac_readreg,
3650 [V2PMAILBOX0 ... V2PMAILBOX7] = igb_mac_vfmailbox_read,
3651 igb_getreg(MBVFICR),
3652 [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_readreg,
3653 igb_getreg(MBVFIMR),
3654 igb_getreg(VFLRE),
3655 igb_getreg(VFRE),
3656 igb_getreg(VFTE),
3657 igb_getreg(QDE),
3658 igb_getreg(DTXSWC),
3659 igb_getreg(RPLOLR),
3660 [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_readreg,
3661 [VMVIR0 ... VMVIR7] = igb_mac_readreg,
3662 [VMOLR0 ... VMOLR7] = igb_mac_readreg,
3663 [WVBR] = igb_mac_read_clr4,
3664 [RQDPC0] = igb_mac_read_clr4,
3665 [RQDPC1] = igb_mac_read_clr4,
3666 [RQDPC2] = igb_mac_read_clr4,
3667 [RQDPC3] = igb_mac_read_clr4,
3668 [RQDPC4] = igb_mac_read_clr4,
3669 [RQDPC5] = igb_mac_read_clr4,
3670 [RQDPC6] = igb_mac_read_clr4,
3671 [RQDPC7] = igb_mac_read_clr4,
3672 [RQDPC8] = igb_mac_read_clr4,
3673 [RQDPC9] = igb_mac_read_clr4,
3674 [RQDPC10] = igb_mac_read_clr4,
3675 [RQDPC11] = igb_mac_read_clr4,
3676 [RQDPC12] = igb_mac_read_clr4,
3677 [RQDPC13] = igb_mac_read_clr4,
3678 [RQDPC14] = igb_mac_read_clr4,
3679 [RQDPC15] = igb_mac_read_clr4,
3680 [VTIVAR ... VTIVAR + 7] = igb_mac_readreg,
3681 [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_readreg,
3682 };
3683 enum { IGB_NREADOPS = ARRAY_SIZE(igb_macreg_readops) };
3684
3685 #define igb_putreg(x) [x] = igb_mac_writereg
3686 typedef void (*writeops)(IGBCore *, int, uint32_t);
3687 static const writeops igb_macreg_writeops[] = {
3688 igb_putreg(SWSM),
3689 igb_putreg(WUFC),
3690 igb_putreg(RDBAH0),
3691 igb_putreg(RDBAH1),
3692 igb_putreg(RDBAH2),
3693 igb_putreg(RDBAH3),
3694 igb_putreg(RDBAH4),
3695 igb_putreg(RDBAH5),
3696 igb_putreg(RDBAH6),
3697 igb_putreg(RDBAH7),
3698 igb_putreg(RDBAH8),
3699 igb_putreg(RDBAH9),
3700 igb_putreg(RDBAH10),
3701 igb_putreg(RDBAH11),
3702 igb_putreg(RDBAH12),
3703 igb_putreg(RDBAH13),
3704 igb_putreg(RDBAH14),
3705 igb_putreg(RDBAH15),
3706 igb_putreg(SRRCTL0),
3707 igb_putreg(SRRCTL1),
3708 igb_putreg(SRRCTL2),
3709 igb_putreg(SRRCTL3),
3710 igb_putreg(SRRCTL4),
3711 igb_putreg(SRRCTL5),
3712 igb_putreg(SRRCTL6),
3713 igb_putreg(SRRCTL7),
3714 igb_putreg(SRRCTL8),
3715 igb_putreg(SRRCTL9),
3716 igb_putreg(SRRCTL10),
3717 igb_putreg(SRRCTL11),
3718 igb_putreg(SRRCTL12),
3719 igb_putreg(SRRCTL13),
3720 igb_putreg(SRRCTL14),
3721 igb_putreg(SRRCTL15),
3722 igb_putreg(RXDCTL0),
3723 igb_putreg(RXDCTL1),
3724 igb_putreg(RXDCTL2),
3725 igb_putreg(RXDCTL3),
3726 igb_putreg(RXDCTL4),
3727 igb_putreg(RXDCTL5),
3728 igb_putreg(RXDCTL6),
3729 igb_putreg(RXDCTL7),
3730 igb_putreg(RXDCTL8),
3731 igb_putreg(RXDCTL9),
3732 igb_putreg(RXDCTL10),
3733 igb_putreg(RXDCTL11),
3734 igb_putreg(RXDCTL12),
3735 igb_putreg(RXDCTL13),
3736 igb_putreg(RXDCTL14),
3737 igb_putreg(RXDCTL15),
3738 igb_putreg(LEDCTL),
3739 igb_putreg(TCTL),
3740 igb_putreg(TCTL_EXT),
3741 igb_putreg(DTXCTL),
3742 igb_putreg(RXPBS),
3743 igb_putreg(RQDPC0),
3744 igb_putreg(FCAL),
3745 igb_putreg(FCRUC),
3746 igb_putreg(WUC),
3747 igb_putreg(WUS),
3748 igb_putreg(IPAV),
3749 igb_putreg(TDBAH0),
3750 igb_putreg(TDBAH1),
3751 igb_putreg(TDBAH2),
3752 igb_putreg(TDBAH3),
3753 igb_putreg(TDBAH4),
3754 igb_putreg(TDBAH5),
3755 igb_putreg(TDBAH6),
3756 igb_putreg(TDBAH7),
3757 igb_putreg(TDBAH8),
3758 igb_putreg(TDBAH9),
3759 igb_putreg(TDBAH10),
3760 igb_putreg(TDBAH11),
3761 igb_putreg(TDBAH12),
3762 igb_putreg(TDBAH13),
3763 igb_putreg(TDBAH14),
3764 igb_putreg(TDBAH15),
3765 igb_putreg(IAM),
3766 igb_putreg(MANC),
3767 igb_putreg(MANC2H),
3768 igb_putreg(MFVAL),
3769 igb_putreg(FACTPS),
3770 igb_putreg(FUNCTAG),
3771 igb_putreg(GSCL_1),
3772 igb_putreg(GSCL_2),
3773 igb_putreg(GSCL_3),
3774 igb_putreg(GSCL_4),
3775 igb_putreg(GSCN_0),
3776 igb_putreg(GSCN_1),
3777 igb_putreg(GSCN_2),
3778 igb_putreg(GSCN_3),
3779 igb_putreg(MRQC),
3780 igb_putreg(FLOP),
3781 igb_putreg(FLA),
3782 igb_putreg(TXDCTL0),
3783 igb_putreg(TXDCTL1),
3784 igb_putreg(TXDCTL2),
3785 igb_putreg(TXDCTL3),
3786 igb_putreg(TXDCTL4),
3787 igb_putreg(TXDCTL5),
3788 igb_putreg(TXDCTL6),
3789 igb_putreg(TXDCTL7),
3790 igb_putreg(TXDCTL8),
3791 igb_putreg(TXDCTL9),
3792 igb_putreg(TXDCTL10),
3793 igb_putreg(TXDCTL11),
3794 igb_putreg(TXDCTL12),
3795 igb_putreg(TXDCTL13),
3796 igb_putreg(TXDCTL14),
3797 igb_putreg(TXDCTL15),
3798 igb_putreg(TXCTL0),
3799 igb_putreg(TXCTL1),
3800 igb_putreg(TXCTL2),
3801 igb_putreg(TXCTL3),
3802 igb_putreg(TXCTL4),
3803 igb_putreg(TXCTL5),
3804 igb_putreg(TXCTL6),
3805 igb_putreg(TXCTL7),
3806 igb_putreg(TXCTL8),
3807 igb_putreg(TXCTL9),
3808 igb_putreg(TXCTL10),
3809 igb_putreg(TXCTL11),
3810 igb_putreg(TXCTL12),
3811 igb_putreg(TXCTL13),
3812 igb_putreg(TXCTL14),
3813 igb_putreg(TXCTL15),
3814 igb_putreg(TDWBAL0),
3815 igb_putreg(TDWBAL1),
3816 igb_putreg(TDWBAL2),
3817 igb_putreg(TDWBAL3),
3818 igb_putreg(TDWBAL4),
3819 igb_putreg(TDWBAL5),
3820 igb_putreg(TDWBAL6),
3821 igb_putreg(TDWBAL7),
3822 igb_putreg(TDWBAL8),
3823 igb_putreg(TDWBAL9),
3824 igb_putreg(TDWBAL10),
3825 igb_putreg(TDWBAL11),
3826 igb_putreg(TDWBAL12),
3827 igb_putreg(TDWBAL13),
3828 igb_putreg(TDWBAL14),
3829 igb_putreg(TDWBAL15),
3830 igb_putreg(TDWBAH0),
3831 igb_putreg(TDWBAH1),
3832 igb_putreg(TDWBAH2),
3833 igb_putreg(TDWBAH3),
3834 igb_putreg(TDWBAH4),
3835 igb_putreg(TDWBAH5),
3836 igb_putreg(TDWBAH6),
3837 igb_putreg(TDWBAH7),
3838 igb_putreg(TDWBAH8),
3839 igb_putreg(TDWBAH9),
3840 igb_putreg(TDWBAH10),
3841 igb_putreg(TDWBAH11),
3842 igb_putreg(TDWBAH12),
3843 igb_putreg(TDWBAH13),
3844 igb_putreg(TDWBAH14),
3845 igb_putreg(TDWBAH15),
3846 igb_putreg(TIPG),
3847 igb_putreg(RXSTMPH),
3848 igb_putreg(RXSTMPL),
3849 igb_putreg(RXSATRL),
3850 igb_putreg(RXSATRH),
3851 igb_putreg(TXSTMPL),
3852 igb_putreg(TXSTMPH),
3853 igb_putreg(SYSTIML),
3854 igb_putreg(SYSTIMH),
3855 igb_putreg(TIMADJL),
3856 igb_putreg(TSYNCRXCTL),
3857 igb_putreg(TSYNCTXCTL),
3858 igb_putreg(EEMNGCTL),
3859 igb_putreg(GPIE),
3860 igb_putreg(TXPBS),
3861 igb_putreg(RLPML),
3862 igb_putreg(VET),
3863
3864 [TDH0] = igb_set_16bit,
3865 [TDH1] = igb_set_16bit,
3866 [TDH2] = igb_set_16bit,
3867 [TDH3] = igb_set_16bit,
3868 [TDH4] = igb_set_16bit,
3869 [TDH5] = igb_set_16bit,
3870 [TDH6] = igb_set_16bit,
3871 [TDH7] = igb_set_16bit,
3872 [TDH8] = igb_set_16bit,
3873 [TDH9] = igb_set_16bit,
3874 [TDH10] = igb_set_16bit,
3875 [TDH11] = igb_set_16bit,
3876 [TDH12] = igb_set_16bit,
3877 [TDH13] = igb_set_16bit,
3878 [TDH14] = igb_set_16bit,
3879 [TDH15] = igb_set_16bit,
3880 [TDT0] = igb_set_tdt,
3881 [TDT1] = igb_set_tdt,
3882 [TDT2] = igb_set_tdt,
3883 [TDT3] = igb_set_tdt,
3884 [TDT4] = igb_set_tdt,
3885 [TDT5] = igb_set_tdt,
3886 [TDT6] = igb_set_tdt,
3887 [TDT7] = igb_set_tdt,
3888 [TDT8] = igb_set_tdt,
3889 [TDT9] = igb_set_tdt,
3890 [TDT10] = igb_set_tdt,
3891 [TDT11] = igb_set_tdt,
3892 [TDT12] = igb_set_tdt,
3893 [TDT13] = igb_set_tdt,
3894 [TDT14] = igb_set_tdt,
3895 [TDT15] = igb_set_tdt,
3896 [MDIC] = igb_set_mdic,
3897 [ICS] = igb_set_ics,
3898 [RDH0] = igb_set_16bit,
3899 [RDH1] = igb_set_16bit,
3900 [RDH2] = igb_set_16bit,
3901 [RDH3] = igb_set_16bit,
3902 [RDH4] = igb_set_16bit,
3903 [RDH5] = igb_set_16bit,
3904 [RDH6] = igb_set_16bit,
3905 [RDH7] = igb_set_16bit,
3906 [RDH8] = igb_set_16bit,
3907 [RDH9] = igb_set_16bit,
3908 [RDH10] = igb_set_16bit,
3909 [RDH11] = igb_set_16bit,
3910 [RDH12] = igb_set_16bit,
3911 [RDH13] = igb_set_16bit,
3912 [RDH14] = igb_set_16bit,
3913 [RDH15] = igb_set_16bit,
3914 [RDT0] = igb_set_rdt,
3915 [RDT1] = igb_set_rdt,
3916 [RDT2] = igb_set_rdt,
3917 [RDT3] = igb_set_rdt,
3918 [RDT4] = igb_set_rdt,
3919 [RDT5] = igb_set_rdt,
3920 [RDT6] = igb_set_rdt,
3921 [RDT7] = igb_set_rdt,
3922 [RDT8] = igb_set_rdt,
3923 [RDT9] = igb_set_rdt,
3924 [RDT10] = igb_set_rdt,
3925 [RDT11] = igb_set_rdt,
3926 [RDT12] = igb_set_rdt,
3927 [RDT13] = igb_set_rdt,
3928 [RDT14] = igb_set_rdt,
3929 [RDT15] = igb_set_rdt,
3930 [IMC] = igb_set_imc,
3931 [IMS] = igb_set_ims,
3932 [ICR] = igb_set_icr,
3933 [EECD] = igb_set_eecd,
3934 [RCTL] = igb_set_rx_control,
3935 [CTRL] = igb_set_ctrl,
3936 [EERD] = igb_set_eerd,
3937 [TDFH] = igb_set_13bit,
3938 [TDFT] = igb_set_13bit,
3939 [TDFHS] = igb_set_13bit,
3940 [TDFTS] = igb_set_13bit,
3941 [TDFPC] = igb_set_13bit,
3942 [RDFH] = igb_set_13bit,
3943 [RDFT] = igb_set_13bit,
3944 [RDFHS] = igb_set_13bit,
3945 [RDFTS] = igb_set_13bit,
3946 [RDFPC] = igb_set_13bit,
3947 [GCR] = igb_set_gcr,
3948 [RXCSUM] = igb_set_rxcsum,
3949 [TDLEN0] = igb_set_dlen,
3950 [TDLEN1] = igb_set_dlen,
3951 [TDLEN2] = igb_set_dlen,
3952 [TDLEN3] = igb_set_dlen,
3953 [TDLEN4] = igb_set_dlen,
3954 [TDLEN5] = igb_set_dlen,
3955 [TDLEN6] = igb_set_dlen,
3956 [TDLEN7] = igb_set_dlen,
3957 [TDLEN8] = igb_set_dlen,
3958 [TDLEN9] = igb_set_dlen,
3959 [TDLEN10] = igb_set_dlen,
3960 [TDLEN11] = igb_set_dlen,
3961 [TDLEN12] = igb_set_dlen,
3962 [TDLEN13] = igb_set_dlen,
3963 [TDLEN14] = igb_set_dlen,
3964 [TDLEN15] = igb_set_dlen,
3965 [RDLEN0] = igb_set_dlen,
3966 [RDLEN1] = igb_set_dlen,
3967 [RDLEN2] = igb_set_dlen,
3968 [RDLEN3] = igb_set_dlen,
3969 [RDLEN4] = igb_set_dlen,
3970 [RDLEN5] = igb_set_dlen,
3971 [RDLEN6] = igb_set_dlen,
3972 [RDLEN7] = igb_set_dlen,
3973 [RDLEN8] = igb_set_dlen,
3974 [RDLEN9] = igb_set_dlen,
3975 [RDLEN10] = igb_set_dlen,
3976 [RDLEN11] = igb_set_dlen,
3977 [RDLEN12] = igb_set_dlen,
3978 [RDLEN13] = igb_set_dlen,
3979 [RDLEN14] = igb_set_dlen,
3980 [RDLEN15] = igb_set_dlen,
3981 [TDBAL0] = igb_set_dbal,
3982 [TDBAL1] = igb_set_dbal,
3983 [TDBAL2] = igb_set_dbal,
3984 [TDBAL3] = igb_set_dbal,
3985 [TDBAL4] = igb_set_dbal,
3986 [TDBAL5] = igb_set_dbal,
3987 [TDBAL6] = igb_set_dbal,
3988 [TDBAL7] = igb_set_dbal,
3989 [TDBAL8] = igb_set_dbal,
3990 [TDBAL9] = igb_set_dbal,
3991 [TDBAL10] = igb_set_dbal,
3992 [TDBAL11] = igb_set_dbal,
3993 [TDBAL12] = igb_set_dbal,
3994 [TDBAL13] = igb_set_dbal,
3995 [TDBAL14] = igb_set_dbal,
3996 [TDBAL15] = igb_set_dbal,
3997 [RDBAL0] = igb_set_dbal,
3998 [RDBAL1] = igb_set_dbal,
3999 [RDBAL2] = igb_set_dbal,
4000 [RDBAL3] = igb_set_dbal,
4001 [RDBAL4] = igb_set_dbal,
4002 [RDBAL5] = igb_set_dbal,
4003 [RDBAL6] = igb_set_dbal,
4004 [RDBAL7] = igb_set_dbal,
4005 [RDBAL8] = igb_set_dbal,
4006 [RDBAL9] = igb_set_dbal,
4007 [RDBAL10] = igb_set_dbal,
4008 [RDBAL11] = igb_set_dbal,
4009 [RDBAL12] = igb_set_dbal,
4010 [RDBAL13] = igb_set_dbal,
4011 [RDBAL14] = igb_set_dbal,
4012 [RDBAL15] = igb_set_dbal,
4013 [STATUS] = igb_set_status,
4014 [PBACLR] = igb_set_pbaclr,
4015 [CTRL_EXT] = igb_set_ctrlext,
4016 [FCAH] = igb_set_16bit,
4017 [FCT] = igb_set_16bit,
4018 [FCTTV] = igb_set_16bit,
4019 [FCRTV] = igb_set_16bit,
4020 [FCRTH] = igb_set_fcrth,
4021 [FCRTL] = igb_set_fcrtl,
4022 [CTRL_DUP] = igb_set_ctrl,
4023 [RFCTL] = igb_set_rfctl,
4024 [TIMINCA] = igb_set_timinca,
4025 [TIMADJH] = igb_set_timadjh,
4026
4027 [IP6AT ... IP6AT + 3] = igb_mac_writereg,
4028 [IP4AT ... IP4AT + 6] = igb_mac_writereg,
4029 [RA] = igb_mac_writereg,
4030 [RA + 1] = igb_mac_setmacaddr,
4031 [RA + 2 ... RA + 31] = igb_mac_writereg,
4032 [RA2 ... RA2 + 31] = igb_mac_writereg,
4033 [WUPM ... WUPM + 31] = igb_mac_writereg,
4034 [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
4035 [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_writereg,
4036 [FFMT ... FFMT + 254] = igb_set_4bit,
4037 [MDEF ... MDEF + 7] = igb_mac_writereg,
4038 [FTFT ... FTFT + 254] = igb_mac_writereg,
4039 [RETA ... RETA + 31] = igb_mac_writereg,
4040 [RSSRK ... RSSRK + 9] = igb_mac_writereg,
4041 [MAVTV0 ... MAVTV3] = igb_mac_writereg,
4042 [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_set_eitr,
4043
4044 /* IGB specific: */
4045 [FWSM] = igb_mac_writereg,
4046 [SW_FW_SYNC] = igb_mac_writereg,
4047 [EICR] = igb_set_eicr,
4048 [EICS] = igb_set_eics,
4049 [EIAC] = igb_set_eiac,
4050 [EIAM] = igb_set_eiam,
4051 [EIMC] = igb_set_eimc,
4052 [EIMS] = igb_set_eims,
4053 [IVAR0 ... IVAR0 + 7] = igb_mac_writereg,
4054 igb_putreg(IVAR_MISC),
4055 igb_putreg(TSYNCRXCFG),
4056 [ETQF0 ... ETQF0 + 7] = igb_mac_writereg,
4057 igb_putreg(VT_CTL),
4058 [P2VMAILBOX0 ... P2VMAILBOX7] = igb_set_pfmailbox,
4059 [V2PMAILBOX0 ... V2PMAILBOX7] = igb_set_vfmailbox,
4060 [MBVFICR] = igb_w1c,
4061 [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_writereg,
4062 igb_putreg(MBVFIMR),
4063 [VFLRE] = igb_w1c,
4064 igb_putreg(VFRE),
4065 igb_putreg(VFTE),
4066 igb_putreg(QDE),
4067 igb_putreg(DTXSWC),
4068 igb_putreg(RPLOLR),
4069 [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_writereg,
4070 [VMVIR0 ... VMVIR7] = igb_mac_writereg,
4071 [VMOLR0 ... VMOLR7] = igb_mac_writereg,
4072 [UTA ... UTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
4073 [PVTCTRL0] = igb_set_vtctrl,
4074 [PVTCTRL1] = igb_set_vtctrl,
4075 [PVTCTRL2] = igb_set_vtctrl,
4076 [PVTCTRL3] = igb_set_vtctrl,
4077 [PVTCTRL4] = igb_set_vtctrl,
4078 [PVTCTRL5] = igb_set_vtctrl,
4079 [PVTCTRL6] = igb_set_vtctrl,
4080 [PVTCTRL7] = igb_set_vtctrl,
4081 [PVTEICS0] = igb_set_vteics,
4082 [PVTEICS1] = igb_set_vteics,
4083 [PVTEICS2] = igb_set_vteics,
4084 [PVTEICS3] = igb_set_vteics,
4085 [PVTEICS4] = igb_set_vteics,
4086 [PVTEICS5] = igb_set_vteics,
4087 [PVTEICS6] = igb_set_vteics,
4088 [PVTEICS7] = igb_set_vteics,
4089 [PVTEIMS0] = igb_set_vteims,
4090 [PVTEIMS1] = igb_set_vteims,
4091 [PVTEIMS2] = igb_set_vteims,
4092 [PVTEIMS3] = igb_set_vteims,
4093 [PVTEIMS4] = igb_set_vteims,
4094 [PVTEIMS5] = igb_set_vteims,
4095 [PVTEIMS6] = igb_set_vteims,
4096 [PVTEIMS7] = igb_set_vteims,
4097 [PVTEIMC0] = igb_set_vteimc,
4098 [PVTEIMC1] = igb_set_vteimc,
4099 [PVTEIMC2] = igb_set_vteimc,
4100 [PVTEIMC3] = igb_set_vteimc,
4101 [PVTEIMC4] = igb_set_vteimc,
4102 [PVTEIMC5] = igb_set_vteimc,
4103 [PVTEIMC6] = igb_set_vteimc,
4104 [PVTEIMC7] = igb_set_vteimc,
4105 [PVTEIAC0] = igb_set_vteiac,
4106 [PVTEIAC1] = igb_set_vteiac,
4107 [PVTEIAC2] = igb_set_vteiac,
4108 [PVTEIAC3] = igb_set_vteiac,
4109 [PVTEIAC4] = igb_set_vteiac,
4110 [PVTEIAC5] = igb_set_vteiac,
4111 [PVTEIAC6] = igb_set_vteiac,
4112 [PVTEIAC7] = igb_set_vteiac,
4113 [PVTEIAM0] = igb_set_vteiam,
4114 [PVTEIAM1] = igb_set_vteiam,
4115 [PVTEIAM2] = igb_set_vteiam,
4116 [PVTEIAM3] = igb_set_vteiam,
4117 [PVTEIAM4] = igb_set_vteiam,
4118 [PVTEIAM5] = igb_set_vteiam,
4119 [PVTEIAM6] = igb_set_vteiam,
4120 [PVTEIAM7] = igb_set_vteiam,
4121 [PVTEICR0] = igb_set_vteicr,
4122 [PVTEICR1] = igb_set_vteicr,
4123 [PVTEICR2] = igb_set_vteicr,
4124 [PVTEICR3] = igb_set_vteicr,
4125 [PVTEICR4] = igb_set_vteicr,
4126 [PVTEICR5] = igb_set_vteicr,
4127 [PVTEICR6] = igb_set_vteicr,
4128 [PVTEICR7] = igb_set_vteicr,
4129 [VTIVAR ... VTIVAR + 7] = igb_set_vtivar,
4130 [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_writereg
4131 };
4132 enum { IGB_NWRITEOPS = ARRAY_SIZE(igb_macreg_writeops) };
4133
4134 enum { MAC_ACCESS_PARTIAL = 1 };
4135
4136 /*
4137 * The array below combines alias offsets of the index values for the
4138 * MAC registers that have aliases, with the indication of not fully
4139 * implemented registers (lowest bit). This combination is possible
4140 * because all of the offsets are even.
4141 */
4142 static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = {
4143 /* Alias index offsets */
4144 [FCRTL_A] = 0x07fe,
4145 [RDFH_A] = 0xe904, [RDFT_A] = 0xe904,
4146 [TDFH_A] = 0xed00, [TDFT_A] = 0xed00,
4147 [RA_A ... RA_A + 31] = 0x14f0,
4148 [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400,
4149
4150 [RDBAL0_A] = 0x2600,
4151 [RDBAH0_A] = 0x2600,
4152 [RDLEN0_A] = 0x2600,
4153 [SRRCTL0_A] = 0x2600,
4154 [RDH0_A] = 0x2600,
4155 [RDT0_A] = 0x2600,
4156 [RXDCTL0_A] = 0x2600,
4157 [RXCTL0_A] = 0x2600,
4158 [RQDPC0_A] = 0x2600,
4159 [RDBAL1_A] = 0x25D0,
4160 [RDBAL2_A] = 0x25A0,
4161 [RDBAL3_A] = 0x2570,
4162 [RDBAH1_A] = 0x25D0,
4163 [RDBAH2_A] = 0x25A0,
4164 [RDBAH3_A] = 0x2570,
4165 [RDLEN1_A] = 0x25D0,
4166 [RDLEN2_A] = 0x25A0,
4167 [RDLEN3_A] = 0x2570,
4168 [SRRCTL1_A] = 0x25D0,
4169 [SRRCTL2_A] = 0x25A0,
4170 [SRRCTL3_A] = 0x2570,
4171 [RDH1_A] = 0x25D0,
4172 [RDH2_A] = 0x25A0,
4173 [RDH3_A] = 0x2570,
4174 [RDT1_A] = 0x25D0,
4175 [RDT2_A] = 0x25A0,
4176 [RDT3_A] = 0x2570,
4177 [RXDCTL1_A] = 0x25D0,
4178 [RXDCTL2_A] = 0x25A0,
4179 [RXDCTL3_A] = 0x2570,
4180 [RXCTL1_A] = 0x25D0,
4181 [RXCTL2_A] = 0x25A0,
4182 [RXCTL3_A] = 0x2570,
4183 [RQDPC1_A] = 0x25D0,
4184 [RQDPC2_A] = 0x25A0,
4185 [RQDPC3_A] = 0x2570,
4186 [TDBAL0_A] = 0x2A00,
4187 [TDBAH0_A] = 0x2A00,
4188 [TDLEN0_A] = 0x2A00,
4189 [TDH0_A] = 0x2A00,
4190 [TDT0_A] = 0x2A00,
4191 [TXCTL0_A] = 0x2A00,
4192 [TDWBAL0_A] = 0x2A00,
4193 [TDWBAH0_A] = 0x2A00,
4194 [TDBAL1_A] = 0x29D0,
4195 [TDBAL2_A] = 0x29A0,
4196 [TDBAL3_A] = 0x2970,
4197 [TDBAH1_A] = 0x29D0,
4198 [TDBAH2_A] = 0x29A0,
4199 [TDBAH3_A] = 0x2970,
4200 [TDLEN1_A] = 0x29D0,
4201 [TDLEN2_A] = 0x29A0,
4202 [TDLEN3_A] = 0x2970,
4203 [TDH1_A] = 0x29D0,
4204 [TDH2_A] = 0x29A0,
4205 [TDH3_A] = 0x2970,
4206 [TDT1_A] = 0x29D0,
4207 [TDT2_A] = 0x29A0,
4208 [TDT3_A] = 0x2970,
4209 [TXDCTL0_A] = 0x2A00,
4210 [TXDCTL1_A] = 0x29D0,
4211 [TXDCTL2_A] = 0x29A0,
4212 [TXDCTL3_A] = 0x2970,
4213 [TXCTL1_A] = 0x29D0,
4214 [TXCTL2_A] = 0x29A0,
4215 [TXCTL3_A] = 0x29D0,
4216 [TDWBAL1_A] = 0x29D0,
4217 [TDWBAL2_A] = 0x29A0,
4218 [TDWBAL3_A] = 0x2970,
4219 [TDWBAH1_A] = 0x29D0,
4220 [TDWBAH2_A] = 0x29A0,
4221 [TDWBAH3_A] = 0x2970,
4222
4223 /* Access options */
4224 [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL,
4225 [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL,
4226 [RDFPC] = MAC_ACCESS_PARTIAL,
4227 [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL,
4228 [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL,
4229 [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL,
4230 [FLA] = MAC_ACCESS_PARTIAL,
4231 [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL,
4232 [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL,
4233 [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL,
4234 [FCRTH] = MAC_ACCESS_PARTIAL,
4235 [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL
4236 };
4237
4238 void
igb_core_write(IGBCore * core,hwaddr addr,uint64_t val,unsigned size)4239 igb_core_write(IGBCore *core, hwaddr addr, uint64_t val, unsigned size)
4240 {
4241 uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
4242
4243 if (index < IGB_NWRITEOPS && igb_macreg_writeops[index]) {
4244 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
4245 trace_e1000e_wrn_regs_write_trivial(index << 2);
4246 }
4247 trace_e1000e_core_write(index << 2, size, val);
4248 igb_macreg_writeops[index](core, index, val);
4249 } else if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
4250 trace_e1000e_wrn_regs_write_ro(index << 2, size, val);
4251 } else {
4252 trace_e1000e_wrn_regs_write_unknown(index << 2, size, val);
4253 }
4254 }
4255
4256 uint64_t
igb_core_read(IGBCore * core,hwaddr addr,unsigned size)4257 igb_core_read(IGBCore *core, hwaddr addr, unsigned size)
4258 {
4259 uint64_t val;
4260 uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
4261
4262 if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
4263 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
4264 trace_e1000e_wrn_regs_read_trivial(index << 2);
4265 }
4266 val = igb_macreg_readops[index](core, index);
4267 trace_e1000e_core_read(index << 2, size, val);
4268 return val;
4269 } else {
4270 trace_e1000e_wrn_regs_read_unknown(index << 2, size);
4271 }
4272 return 0;
4273 }
4274
4275 static void
igb_autoneg_resume(IGBCore * core)4276 igb_autoneg_resume(IGBCore *core)
4277 {
4278 if (igb_have_autoneg(core) &&
4279 !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
4280 qemu_get_queue(core->owner_nic)->link_down = false;
4281 timer_mod(core->autoneg_timer,
4282 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
4283 }
4284 }
4285
4286 void
igb_core_pci_realize(IGBCore * core,const uint16_t * eeprom_templ,uint32_t eeprom_size,const uint8_t * macaddr)4287 igb_core_pci_realize(IGBCore *core,
4288 const uint16_t *eeprom_templ,
4289 uint32_t eeprom_size,
4290 const uint8_t *macaddr)
4291 {
4292 int i;
4293
4294 core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
4295 igb_autoneg_timer, core);
4296 igb_intrmgr_pci_realize(core);
4297
4298 for (i = 0; i < IGB_NUM_QUEUES; i++) {
4299 net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS);
4300 }
4301
4302 net_rx_pkt_init(&core->rx_pkt);
4303
4304 e1000x_core_prepare_eeprom(core->eeprom,
4305 eeprom_templ,
4306 eeprom_size,
4307 PCI_DEVICE_GET_CLASS(core->owner)->device_id,
4308 macaddr);
4309 igb_update_rx_offloads(core);
4310 }
4311
4312 void
igb_core_pci_uninit(IGBCore * core)4313 igb_core_pci_uninit(IGBCore *core)
4314 {
4315 int i;
4316
4317 timer_free(core->autoneg_timer);
4318
4319 igb_intrmgr_pci_unint(core);
4320
4321 for (i = 0; i < IGB_NUM_QUEUES; i++) {
4322 net_tx_pkt_uninit(core->tx[i].tx_pkt);
4323 }
4324
4325 net_rx_pkt_uninit(core->rx_pkt);
4326 }
4327
4328 static const uint16_t
4329 igb_phy_reg_init[] = {
4330 [MII_BMCR] = MII_BMCR_SPEED1000 |
4331 MII_BMCR_FD |
4332 MII_BMCR_AUTOEN,
4333
4334 [MII_BMSR] = MII_BMSR_EXTCAP |
4335 MII_BMSR_LINK_ST |
4336 MII_BMSR_AUTONEG |
4337 MII_BMSR_MFPS |
4338 MII_BMSR_EXTSTAT |
4339 MII_BMSR_10T_HD |
4340 MII_BMSR_10T_FD |
4341 MII_BMSR_100TX_HD |
4342 MII_BMSR_100TX_FD,
4343
4344 [MII_PHYID1] = IGP03E1000_E_PHY_ID >> 16,
4345 [MII_PHYID2] = (IGP03E1000_E_PHY_ID & 0xfff0) | 1,
4346 [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 |
4347 MII_ANAR_10FD | MII_ANAR_TX |
4348 MII_ANAR_TXFD | MII_ANAR_PAUSE |
4349 MII_ANAR_PAUSE_ASYM,
4350 [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD |
4351 MII_ANLPAR_TX | MII_ANLPAR_TXFD |
4352 MII_ANLPAR_T4 | MII_ANLPAR_PAUSE,
4353 [MII_ANER] = MII_ANER_NP | MII_ANER_NWAY,
4354 [MII_ANNP] = 0x1 | MII_ANNP_MP,
4355 [MII_CTRL1000] = MII_CTRL1000_HALF | MII_CTRL1000_FULL |
4356 MII_CTRL1000_PORT | MII_CTRL1000_MASTER,
4357 [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL |
4358 MII_STAT1000_ROK | MII_STAT1000_LOK,
4359 [MII_EXTSTAT] = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD,
4360
4361 [IGP01E1000_PHY_PORT_CONFIG] = BIT(5) | BIT(8),
4362 [IGP01E1000_PHY_PORT_STATUS] = IGP01E1000_PSSR_SPEED_1000MBPS,
4363 [IGP02E1000_PHY_POWER_MGMT] = BIT(0) | BIT(3) | IGP02E1000_PM_D3_LPLU |
4364 IGP01E1000_PSCFR_SMART_SPEED
4365 };
4366
4367 static const uint32_t igb_mac_reg_init[] = {
4368 [LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
4369 [EEMNGCTL] = BIT(31),
4370 [TXDCTL0] = E1000_TXDCTL_QUEUE_ENABLE,
4371 [RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
4372 [RXDCTL1] = 1 << 16,
4373 [RXDCTL2] = 1 << 16,
4374 [RXDCTL3] = 1 << 16,
4375 [RXDCTL4] = 1 << 16,
4376 [RXDCTL5] = 1 << 16,
4377 [RXDCTL6] = 1 << 16,
4378 [RXDCTL7] = 1 << 16,
4379 [RXDCTL8] = 1 << 16,
4380 [RXDCTL9] = 1 << 16,
4381 [RXDCTL10] = 1 << 16,
4382 [RXDCTL11] = 1 << 16,
4383 [RXDCTL12] = 1 << 16,
4384 [RXDCTL13] = 1 << 16,
4385 [RXDCTL14] = 1 << 16,
4386 [RXDCTL15] = 1 << 16,
4387 [TIPG] = 0x08 | (0x04 << 10) | (0x06 << 20),
4388 [CTRL] = E1000_CTRL_FD | E1000_CTRL_LRST | E1000_CTRL_SPD_1000 |
4389 E1000_CTRL_ADVD3WUC,
4390 [STATUS] = E1000_STATUS_PHYRA | BIT(31),
4391 [EECD] = E1000_EECD_FWE_DIS | E1000_EECD_PRES |
4392 (2 << E1000_EECD_SIZE_EX_SHIFT),
4393 [GCR] = E1000_L0S_ADJUST |
4394 E1000_GCR_CMPL_TMOUT_RESEND |
4395 E1000_GCR_CAP_VER2 |
4396 E1000_L1_ENTRY_LATENCY_MSB |
4397 E1000_L1_ENTRY_LATENCY_LSB,
4398 [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD,
4399 [TXPBS] = 0x28,
4400 [RXPBS] = 0x40,
4401 [TCTL] = E1000_TCTL_PSP | (0xF << E1000_CT_SHIFT) |
4402 (0x40 << E1000_COLD_SHIFT) | (0x1 << 26) | (0xA << 28),
4403 [TCTL_EXT] = 0x40 | (0x42 << 10),
4404 [DTXCTL] = E1000_DTXCTL_8023LL | E1000_DTXCTL_SPOOF_INT,
4405 [VET] = ETH_P_VLAN | (ETH_P_VLAN << 16),
4406
4407 [V2PMAILBOX0 ... V2PMAILBOX0 + IGB_MAX_VF_FUNCTIONS - 1] = E1000_V2PMAILBOX_RSTI,
4408 [MBVFIMR] = 0xFF,
4409 [VFRE] = 0xFF,
4410 [VFTE] = 0xFF,
4411 [VMOLR0 ... VMOLR0 + 7] = 0x2600 | E1000_VMOLR_STRCRC,
4412 [RPLOLR] = E1000_RPLOLR_STRCRC,
4413 [RLPML] = 0x2600,
4414 [TXCTL0] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4415 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4416 E1000_DCA_TXCTRL_DESC_RRO_EN,
4417 [TXCTL1] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4418 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4419 E1000_DCA_TXCTRL_DESC_RRO_EN,
4420 [TXCTL2] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4421 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4422 E1000_DCA_TXCTRL_DESC_RRO_EN,
4423 [TXCTL3] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4424 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4425 E1000_DCA_TXCTRL_DESC_RRO_EN,
4426 [TXCTL4] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4427 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4428 E1000_DCA_TXCTRL_DESC_RRO_EN,
4429 [TXCTL5] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4430 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4431 E1000_DCA_TXCTRL_DESC_RRO_EN,
4432 [TXCTL6] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4433 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4434 E1000_DCA_TXCTRL_DESC_RRO_EN,
4435 [TXCTL7] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4436 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4437 E1000_DCA_TXCTRL_DESC_RRO_EN,
4438 [TXCTL8] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4439 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4440 E1000_DCA_TXCTRL_DESC_RRO_EN,
4441 [TXCTL9] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4442 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4443 E1000_DCA_TXCTRL_DESC_RRO_EN,
4444 [TXCTL10] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4445 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4446 E1000_DCA_TXCTRL_DESC_RRO_EN,
4447 [TXCTL11] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4448 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4449 E1000_DCA_TXCTRL_DESC_RRO_EN,
4450 [TXCTL12] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4451 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4452 E1000_DCA_TXCTRL_DESC_RRO_EN,
4453 [TXCTL13] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4454 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4455 E1000_DCA_TXCTRL_DESC_RRO_EN,
4456 [TXCTL14] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4457 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4458 E1000_DCA_TXCTRL_DESC_RRO_EN,
4459 [TXCTL15] = E1000_DCA_TXCTRL_DATA_RRO_EN |
4460 E1000_DCA_TXCTRL_TX_WB_RO_EN |
4461 E1000_DCA_TXCTRL_DESC_RRO_EN,
4462 };
4463
igb_reset(IGBCore * core,bool sw)4464 static void igb_reset(IGBCore *core, bool sw)
4465 {
4466 struct igb_tx *tx;
4467 int i;
4468
4469 timer_del(core->autoneg_timer);
4470
4471 igb_intrmgr_reset(core);
4472
4473 memset(core->phy, 0, sizeof core->phy);
4474 memcpy(core->phy, igb_phy_reg_init, sizeof igb_phy_reg_init);
4475
4476 for (i = 0; i < E1000E_MAC_SIZE; i++) {
4477 if (sw &&
4478 (i == RXPBS || i == TXPBS ||
4479 (i >= EITR0 && i < EITR0 + IGB_INTR_NUM))) {
4480 continue;
4481 }
4482
4483 core->mac[i] = i < ARRAY_SIZE(igb_mac_reg_init) ?
4484 igb_mac_reg_init[i] : 0;
4485 }
4486
4487 if (qemu_get_queue(core->owner_nic)->link_down) {
4488 igb_link_down(core);
4489 }
4490
4491 e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
4492
4493 for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) {
4494 /* Set RSTI, so VF can identify a PF reset is in progress */
4495 core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTI;
4496 }
4497
4498 for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
4499 tx = &core->tx[i];
4500 memset(tx->ctx, 0, sizeof(tx->ctx));
4501 tx->first = true;
4502 tx->skip_cp = false;
4503 }
4504 }
4505
4506 void
igb_core_reset(IGBCore * core)4507 igb_core_reset(IGBCore *core)
4508 {
4509 igb_reset(core, false);
4510 }
4511
igb_core_pre_save(IGBCore * core)4512 void igb_core_pre_save(IGBCore *core)
4513 {
4514 int i;
4515 NetClientState *nc = qemu_get_queue(core->owner_nic);
4516
4517 /*
4518 * If link is down and auto-negotiation is supported and ongoing,
4519 * complete auto-negotiation immediately. This allows us to look
4520 * at MII_BMSR_AN_COMP to infer link status on load.
4521 */
4522 if (nc->link_down && igb_have_autoneg(core)) {
4523 core->phy[MII_BMSR] |= MII_BMSR_AN_COMP;
4524 igb_update_flowctl_status(core);
4525 }
4526
4527 for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
4528 if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) {
4529 core->tx[i].skip_cp = true;
4530 }
4531 }
4532 }
4533
4534 int
igb_core_post_load(IGBCore * core)4535 igb_core_post_load(IGBCore *core)
4536 {
4537 NetClientState *nc = qemu_get_queue(core->owner_nic);
4538
4539 /*
4540 * nc.link_down can't be migrated, so infer link_down according
4541 * to link status bit in core.mac[STATUS].
4542 */
4543 nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
4544
4545 /*
4546 * we need to restart intrmgr timers, as an older version of
4547 * QEMU can have stopped them before migration
4548 */
4549 igb_intrmgr_resume(core);
4550 igb_autoneg_resume(core);
4551
4552 return 0;
4553 }
4554