1 /* 2 * Core code for QEMU e1000e emulation 3 * 4 * Software developer's manuals: 5 * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf 6 * 7 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) 8 * Developed by Daynix Computing LTD (http://www.daynix.com) 9 * 10 * Authors: 11 * Dmitry Fleytman <dmitry@daynix.com> 12 * Leonid Bloch <leonid@daynix.com> 13 * Yan Vugenfirer <yan@daynix.com> 14 * 15 * Based on work done by: 16 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. 17 * Copyright (c) 2008 Qumranet 18 * Based on work done by: 19 * Copyright (c) 2007 Dan Aloni 20 * Copyright (c) 2004 Antony T Curtis 21 * 22 * This library is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU Lesser General Public 24 * License as published by the Free Software Foundation; either 25 * version 2.1 of the License, or (at your option) any later version. 26 * 27 * This library is distributed in the hope that it will be useful, 28 * but WITHOUT ANY WARRANTY; without even the implied warranty of 29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 30 * Lesser General Public License for more details. 31 * 32 * You should have received a copy of the GNU Lesser General Public 33 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 34 */ 35 36 #include "qemu/osdep.h" 37 #include "qemu/log.h" 38 #include "net/net.h" 39 #include "net/tap.h" 40 #include "hw/net/mii.h" 41 #include "hw/pci/msi.h" 42 #include "hw/pci/msix.h" 43 #include "sysemu/runstate.h" 44 45 #include "net_tx_pkt.h" 46 #include "net_rx_pkt.h" 47 48 #include "e1000_common.h" 49 #include "e1000x_common.h" 50 #include "e1000e_core.h" 51 52 #include "trace.h" 53 54 /* No more then 7813 interrupts per second according to spec 10.2.4.2 */ 55 #define E1000E_MIN_XITR (500) 56 57 #define E1000E_MAX_TX_FRAGS (64) 58 59 union e1000_rx_desc_union { 60 struct e1000_rx_desc legacy; 61 union e1000_rx_desc_extended extended; 62 union e1000_rx_desc_packet_split packet_split; 63 }; 64 65 static ssize_t 66 e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, 67 bool has_vnet); 68 69 static inline void 70 e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val); 71 72 static void e1000e_reset(E1000ECore *core, bool sw); 73 74 static inline void 75 e1000e_process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp) 76 { 77 if (le32_to_cpu(dp->upper.data) & E1000_TXD_EXTCMD_TSTAMP) { 78 trace_e1000e_wrn_no_ts_support(); 79 } 80 } 81 82 static inline void 83 e1000e_process_snap_option(E1000ECore *core, uint32_t cmd_and_length) 84 { 85 if (cmd_and_length & E1000_TXD_CMD_SNAP) { 86 trace_e1000e_wrn_no_snap_support(); 87 } 88 } 89 90 static inline void 91 e1000e_raise_legacy_irq(E1000ECore *core) 92 { 93 trace_e1000e_irq_legacy_notify(true); 94 e1000x_inc_reg_if_not_full(core->mac, IAC); 95 pci_set_irq(core->owner, 1); 96 } 97 98 static inline void 99 e1000e_lower_legacy_irq(E1000ECore *core) 100 { 101 trace_e1000e_irq_legacy_notify(false); 102 pci_set_irq(core->owner, 0); 103 } 104 105 static inline void 106 e1000e_intrmgr_rearm_timer(E1000IntrDelayTimer *timer) 107 { 108 int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] * 109 timer->delay_resolution_ns; 110 111 trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns); 112 113 timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns); 114 115 timer->running = true; 116 } 117 118 static void 119 e1000e_intmgr_timer_resume(E1000IntrDelayTimer *timer) 120 { 121 if (timer->running) { 122 e1000e_intrmgr_rearm_timer(timer); 123 } 124 } 125 126 static void 127 e1000e_intmgr_timer_pause(E1000IntrDelayTimer *timer) 128 { 129 if (timer->running) { 130 timer_del(timer->timer); 131 } 132 } 133 134 static inline void 135 e1000e_intrmgr_stop_timer(E1000IntrDelayTimer *timer) 136 { 137 if (timer->running) { 138 timer_del(timer->timer); 139 timer->running = false; 140 } 141 } 142 143 static inline void 144 e1000e_intrmgr_fire_delayed_interrupts(E1000ECore *core) 145 { 146 trace_e1000e_irq_fire_delayed_interrupts(); 147 e1000e_set_interrupt_cause(core, 0); 148 } 149 150 static void 151 e1000e_intrmgr_on_timer(void *opaque) 152 { 153 E1000IntrDelayTimer *timer = opaque; 154 155 trace_e1000e_irq_throttling_timer(timer->delay_reg << 2); 156 157 timer->running = false; 158 e1000e_intrmgr_fire_delayed_interrupts(timer->core); 159 } 160 161 static void 162 e1000e_intrmgr_on_throttling_timer(void *opaque) 163 { 164 E1000IntrDelayTimer *timer = opaque; 165 166 timer->running = false; 167 168 if (msi_enabled(timer->core->owner)) { 169 trace_e1000e_irq_msi_notify_postponed(); 170 /* Clear msi_causes_pending to fire MSI eventually */ 171 timer->core->msi_causes_pending = 0; 172 e1000e_set_interrupt_cause(timer->core, 0); 173 } else { 174 trace_e1000e_irq_legacy_notify_postponed(); 175 e1000e_set_interrupt_cause(timer->core, 0); 176 } 177 } 178 179 static void 180 e1000e_intrmgr_on_msix_throttling_timer(void *opaque) 181 { 182 E1000IntrDelayTimer *timer = opaque; 183 int idx = timer - &timer->core->eitr[0]; 184 185 timer->running = false; 186 187 trace_e1000e_irq_msix_notify_postponed_vec(idx); 188 msix_notify(timer->core->owner, idx); 189 } 190 191 static void 192 e1000e_intrmgr_initialize_all_timers(E1000ECore *core, bool create) 193 { 194 int i; 195 196 core->radv.delay_reg = RADV; 197 core->rdtr.delay_reg = RDTR; 198 core->raid.delay_reg = RAID; 199 core->tadv.delay_reg = TADV; 200 core->tidv.delay_reg = TIDV; 201 202 core->radv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 203 core->rdtr.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 204 core->raid.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 205 core->tadv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 206 core->tidv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 207 208 core->radv.core = core; 209 core->rdtr.core = core; 210 core->raid.core = core; 211 core->tadv.core = core; 212 core->tidv.core = core; 213 214 core->itr.core = core; 215 core->itr.delay_reg = ITR; 216 core->itr.delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; 217 218 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 219 core->eitr[i].core = core; 220 core->eitr[i].delay_reg = EITR + i; 221 core->eitr[i].delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; 222 } 223 224 if (!create) { 225 return; 226 } 227 228 core->radv.timer = 229 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->radv); 230 core->rdtr.timer = 231 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->rdtr); 232 core->raid.timer = 233 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->raid); 234 235 core->tadv.timer = 236 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tadv); 237 core->tidv.timer = 238 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tidv); 239 240 core->itr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 241 e1000e_intrmgr_on_throttling_timer, 242 &core->itr); 243 244 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 245 core->eitr[i].timer = 246 timer_new_ns(QEMU_CLOCK_VIRTUAL, 247 e1000e_intrmgr_on_msix_throttling_timer, 248 &core->eitr[i]); 249 } 250 } 251 252 static inline void 253 e1000e_intrmgr_stop_delay_timers(E1000ECore *core) 254 { 255 e1000e_intrmgr_stop_timer(&core->radv); 256 e1000e_intrmgr_stop_timer(&core->rdtr); 257 e1000e_intrmgr_stop_timer(&core->raid); 258 e1000e_intrmgr_stop_timer(&core->tidv); 259 e1000e_intrmgr_stop_timer(&core->tadv); 260 } 261 262 static bool 263 e1000e_intrmgr_delay_rx_causes(E1000ECore *core, uint32_t *causes) 264 { 265 uint32_t delayable_causes; 266 uint32_t rdtr = core->mac[RDTR]; 267 uint32_t radv = core->mac[RADV]; 268 uint32_t raid = core->mac[RAID]; 269 270 if (msix_enabled(core->owner)) { 271 return false; 272 } 273 274 delayable_causes = E1000_ICR_RXQ0 | 275 E1000_ICR_RXQ1 | 276 E1000_ICR_RXT0; 277 278 if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS)) { 279 delayable_causes |= E1000_ICR_ACK; 280 } 281 282 /* Clean up all causes that may be delayed */ 283 core->delayed_causes |= *causes & delayable_causes; 284 *causes &= ~delayable_causes; 285 286 /* 287 * Check if delayed RX interrupts disabled by client 288 * or if there are causes that cannot be delayed 289 */ 290 if ((rdtr == 0) || (*causes != 0)) { 291 return false; 292 } 293 294 /* 295 * Check if delayed RX ACK interrupts disabled by client 296 * and there is an ACK packet received 297 */ 298 if ((raid == 0) && (core->delayed_causes & E1000_ICR_ACK)) { 299 return false; 300 } 301 302 /* All causes delayed */ 303 e1000e_intrmgr_rearm_timer(&core->rdtr); 304 305 if (!core->radv.running && (radv != 0)) { 306 e1000e_intrmgr_rearm_timer(&core->radv); 307 } 308 309 if (!core->raid.running && (core->delayed_causes & E1000_ICR_ACK)) { 310 e1000e_intrmgr_rearm_timer(&core->raid); 311 } 312 313 return true; 314 } 315 316 static bool 317 e1000e_intrmgr_delay_tx_causes(E1000ECore *core, uint32_t *causes) 318 { 319 static const uint32_t delayable_causes = E1000_ICR_TXQ0 | 320 E1000_ICR_TXQ1 | 321 E1000_ICR_TXQE | 322 E1000_ICR_TXDW; 323 324 if (msix_enabled(core->owner)) { 325 return false; 326 } 327 328 /* Clean up all causes that may be delayed */ 329 core->delayed_causes |= *causes & delayable_causes; 330 *causes &= ~delayable_causes; 331 332 /* If there are causes that cannot be delayed */ 333 if (*causes != 0) { 334 return false; 335 } 336 337 /* All causes delayed */ 338 e1000e_intrmgr_rearm_timer(&core->tidv); 339 340 if (!core->tadv.running && (core->mac[TADV] != 0)) { 341 e1000e_intrmgr_rearm_timer(&core->tadv); 342 } 343 344 return true; 345 } 346 347 static uint32_t 348 e1000e_intmgr_collect_delayed_causes(E1000ECore *core) 349 { 350 uint32_t res; 351 352 if (msix_enabled(core->owner)) { 353 assert(core->delayed_causes == 0); 354 return 0; 355 } 356 357 res = core->delayed_causes; 358 core->delayed_causes = 0; 359 360 e1000e_intrmgr_stop_delay_timers(core); 361 362 return res; 363 } 364 365 static void 366 e1000e_intrmgr_fire_all_timers(E1000ECore *core) 367 { 368 int i; 369 uint32_t val = e1000e_intmgr_collect_delayed_causes(core); 370 371 trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]); 372 core->mac[ICR] |= val; 373 374 if (core->itr.running) { 375 timer_del(core->itr.timer); 376 e1000e_intrmgr_on_throttling_timer(&core->itr); 377 } 378 379 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 380 if (core->eitr[i].running) { 381 timer_del(core->eitr[i].timer); 382 e1000e_intrmgr_on_msix_throttling_timer(&core->eitr[i]); 383 } 384 } 385 } 386 387 static void 388 e1000e_intrmgr_resume(E1000ECore *core) 389 { 390 int i; 391 392 e1000e_intmgr_timer_resume(&core->radv); 393 e1000e_intmgr_timer_resume(&core->rdtr); 394 e1000e_intmgr_timer_resume(&core->raid); 395 e1000e_intmgr_timer_resume(&core->tidv); 396 e1000e_intmgr_timer_resume(&core->tadv); 397 398 e1000e_intmgr_timer_resume(&core->itr); 399 400 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 401 e1000e_intmgr_timer_resume(&core->eitr[i]); 402 } 403 } 404 405 static void 406 e1000e_intrmgr_pause(E1000ECore *core) 407 { 408 int i; 409 410 e1000e_intmgr_timer_pause(&core->radv); 411 e1000e_intmgr_timer_pause(&core->rdtr); 412 e1000e_intmgr_timer_pause(&core->raid); 413 e1000e_intmgr_timer_pause(&core->tidv); 414 e1000e_intmgr_timer_pause(&core->tadv); 415 416 e1000e_intmgr_timer_pause(&core->itr); 417 418 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 419 e1000e_intmgr_timer_pause(&core->eitr[i]); 420 } 421 } 422 423 static void 424 e1000e_intrmgr_reset(E1000ECore *core) 425 { 426 int i; 427 428 core->delayed_causes = 0; 429 430 e1000e_intrmgr_stop_delay_timers(core); 431 432 e1000e_intrmgr_stop_timer(&core->itr); 433 434 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 435 e1000e_intrmgr_stop_timer(&core->eitr[i]); 436 } 437 } 438 439 static void 440 e1000e_intrmgr_pci_unint(E1000ECore *core) 441 { 442 int i; 443 444 timer_free(core->radv.timer); 445 timer_free(core->rdtr.timer); 446 timer_free(core->raid.timer); 447 448 timer_free(core->tadv.timer); 449 timer_free(core->tidv.timer); 450 451 timer_free(core->itr.timer); 452 453 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 454 timer_free(core->eitr[i].timer); 455 } 456 } 457 458 static void 459 e1000e_intrmgr_pci_realize(E1000ECore *core) 460 { 461 e1000e_intrmgr_initialize_all_timers(core, true); 462 } 463 464 static inline bool 465 e1000e_rx_csum_enabled(E1000ECore *core) 466 { 467 return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true; 468 } 469 470 static inline bool 471 e1000e_rx_use_legacy_descriptor(E1000ECore *core) 472 { 473 return (core->mac[RFCTL] & E1000_RFCTL_EXTEN) ? false : true; 474 } 475 476 static inline bool 477 e1000e_rx_use_ps_descriptor(E1000ECore *core) 478 { 479 return !e1000e_rx_use_legacy_descriptor(core) && 480 (core->mac[RCTL] & E1000_RCTL_DTYP_PS); 481 } 482 483 static inline bool 484 e1000e_rss_enabled(E1000ECore *core) 485 { 486 return E1000_MRQC_ENABLED(core->mac[MRQC]) && 487 !e1000e_rx_csum_enabled(core) && 488 !e1000e_rx_use_legacy_descriptor(core); 489 } 490 491 typedef struct E1000E_RSSInfo_st { 492 bool enabled; 493 uint32_t hash; 494 uint32_t queue; 495 uint32_t type; 496 } E1000E_RSSInfo; 497 498 static uint32_t 499 e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) 500 { 501 bool hasip4, hasip6; 502 EthL4HdrProto l4hdr_proto; 503 504 assert(e1000e_rss_enabled(core)); 505 506 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); 507 508 if (hasip4) { 509 trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC], 510 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]), 511 E1000_MRQC_EN_IPV4(core->mac[MRQC])); 512 513 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && 514 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { 515 return E1000_MRQ_RSS_TYPE_IPV4TCP; 516 } 517 518 if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { 519 return E1000_MRQ_RSS_TYPE_IPV4; 520 } 521 } else if (hasip6) { 522 eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt); 523 524 bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS; 525 bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS; 526 527 /* 528 * Following two traces must not be combined because resulting 529 * event will have 11 arguments totally and some trace backends 530 * (at least "ust") have limitation of maximum 10 arguments per 531 * event. Events with more arguments fail to compile for 532 * backends like these. 533 */ 534 trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]); 535 trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto, 536 ip6info->has_ext_hdrs, 537 ip6info->rss_ex_dst_valid, 538 ip6info->rss_ex_src_valid, 539 core->mac[MRQC], 540 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]), 541 E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), 542 E1000_MRQC_EN_IPV6(core->mac[MRQC])); 543 544 if ((!ex_dis || !ip6info->has_ext_hdrs) && 545 (!new_ex_dis || !(ip6info->rss_ex_dst_valid || 546 ip6info->rss_ex_src_valid))) { 547 548 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && 549 E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) { 550 return E1000_MRQ_RSS_TYPE_IPV6TCPEX; 551 } 552 553 if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { 554 return E1000_MRQ_RSS_TYPE_IPV6EX; 555 } 556 557 } 558 559 if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) { 560 return E1000_MRQ_RSS_TYPE_IPV6; 561 } 562 563 } 564 565 return E1000_MRQ_RSS_TYPE_NONE; 566 } 567 568 static uint32_t 569 e1000e_rss_calc_hash(E1000ECore *core, 570 struct NetRxPkt *pkt, 571 E1000E_RSSInfo *info) 572 { 573 NetRxPktRssType type; 574 575 assert(e1000e_rss_enabled(core)); 576 577 switch (info->type) { 578 case E1000_MRQ_RSS_TYPE_IPV4: 579 type = NetPktRssIpV4; 580 break; 581 case E1000_MRQ_RSS_TYPE_IPV4TCP: 582 type = NetPktRssIpV4Tcp; 583 break; 584 case E1000_MRQ_RSS_TYPE_IPV6TCPEX: 585 type = NetPktRssIpV6TcpEx; 586 break; 587 case E1000_MRQ_RSS_TYPE_IPV6: 588 type = NetPktRssIpV6; 589 break; 590 case E1000_MRQ_RSS_TYPE_IPV6EX: 591 type = NetPktRssIpV6Ex; 592 break; 593 default: 594 assert(false); 595 return 0; 596 } 597 598 return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]); 599 } 600 601 static void 602 e1000e_rss_parse_packet(E1000ECore *core, 603 struct NetRxPkt *pkt, 604 E1000E_RSSInfo *info) 605 { 606 trace_e1000e_rx_rss_started(); 607 608 if (!e1000e_rss_enabled(core)) { 609 info->enabled = false; 610 info->hash = 0; 611 info->queue = 0; 612 info->type = 0; 613 trace_e1000e_rx_rss_disabled(); 614 return; 615 } 616 617 info->enabled = true; 618 619 info->type = e1000e_rss_get_hash_type(core, pkt); 620 621 trace_e1000e_rx_rss_type(info->type); 622 623 if (info->type == E1000_MRQ_RSS_TYPE_NONE) { 624 info->hash = 0; 625 info->queue = 0; 626 return; 627 } 628 629 info->hash = e1000e_rss_calc_hash(core, pkt, info); 630 info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash); 631 } 632 633 static bool 634 e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx) 635 { 636 if (tx->props.tse && tx->cptse) { 637 if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss)) { 638 return false; 639 } 640 641 net_tx_pkt_update_ip_checksums(tx->tx_pkt); 642 e1000x_inc_reg_if_not_full(core->mac, TSCTC); 643 return true; 644 } 645 646 if (tx->sum_needed & E1000_TXD_POPTS_TXSM) { 647 if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) { 648 return false; 649 } 650 } 651 652 if (tx->sum_needed & E1000_TXD_POPTS_IXSM) { 653 net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt); 654 } 655 656 return true; 657 } 658 659 static void e1000e_tx_pkt_callback(void *core, 660 const struct iovec *iov, 661 int iovcnt, 662 const struct iovec *virt_iov, 663 int virt_iovcnt) 664 { 665 e1000e_receive_internal(core, virt_iov, virt_iovcnt, true); 666 } 667 668 static bool 669 e1000e_tx_pkt_send(E1000ECore *core, struct e1000e_tx *tx, int queue_index) 670 { 671 int target_queue = MIN(core->max_queue_num, queue_index); 672 NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue); 673 674 if (!e1000e_setup_tx_offloads(core, tx)) { 675 return false; 676 } 677 678 net_tx_pkt_dump(tx->tx_pkt); 679 680 if ((core->phy[0][MII_BMCR] & MII_BMCR_LOOPBACK) || 681 ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) { 682 return net_tx_pkt_send_custom(tx->tx_pkt, false, 683 e1000e_tx_pkt_callback, core); 684 } else { 685 return net_tx_pkt_send(tx->tx_pkt, queue); 686 } 687 } 688 689 static void 690 e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) 691 { 692 static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, 693 PTC1023, PTC1522 }; 694 695 size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4; 696 697 e1000x_increase_size_stats(core->mac, PTCregs, tot_len); 698 e1000x_inc_reg_if_not_full(core->mac, TPT); 699 e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len); 700 701 switch (net_tx_pkt_get_packet_type(tx_pkt)) { 702 case ETH_PKT_BCAST: 703 e1000x_inc_reg_if_not_full(core->mac, BPTC); 704 break; 705 case ETH_PKT_MCAST: 706 e1000x_inc_reg_if_not_full(core->mac, MPTC); 707 break; 708 case ETH_PKT_UCAST: 709 break; 710 default: 711 g_assert_not_reached(); 712 } 713 714 e1000x_inc_reg_if_not_full(core->mac, GPTC); 715 e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); 716 } 717 718 static void 719 e1000e_process_tx_desc(E1000ECore *core, 720 struct e1000e_tx *tx, 721 struct e1000_tx_desc *dp, 722 int queue_index) 723 { 724 uint32_t txd_lower = le32_to_cpu(dp->lower.data); 725 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D); 726 unsigned int split_size = txd_lower & 0xffff; 727 uint64_t addr; 728 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp; 729 bool eop = txd_lower & E1000_TXD_CMD_EOP; 730 731 if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */ 732 e1000x_read_tx_ctx_descr(xp, &tx->props); 733 e1000e_process_snap_option(core, le32_to_cpu(xp->cmd_and_length)); 734 return; 735 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) { 736 /* data descriptor */ 737 tx->sum_needed = le32_to_cpu(dp->upper.data) >> 8; 738 tx->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0; 739 e1000e_process_ts_option(core, dp); 740 } else { 741 /* legacy descriptor */ 742 e1000e_process_ts_option(core, dp); 743 tx->cptse = 0; 744 } 745 746 addr = le64_to_cpu(dp->buffer_addr); 747 748 if (!tx->skip_cp) { 749 if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, core->owner, 750 addr, split_size)) { 751 tx->skip_cp = true; 752 } 753 } 754 755 if (eop) { 756 if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) { 757 if (e1000x_vlan_enabled(core->mac) && 758 e1000x_is_vlan_txd(txd_lower)) { 759 net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, 760 le16_to_cpu(dp->upper.fields.special), core->mac[VET]); 761 } 762 if (e1000e_tx_pkt_send(core, tx, queue_index)) { 763 e1000e_on_tx_done_update_stats(core, tx->tx_pkt); 764 } 765 } 766 767 tx->skip_cp = false; 768 net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner); 769 770 tx->sum_needed = 0; 771 tx->cptse = 0; 772 } 773 } 774 775 static inline uint32_t 776 e1000e_tx_wb_interrupt_cause(E1000ECore *core, int queue_idx) 777 { 778 if (!msix_enabled(core->owner)) { 779 return E1000_ICR_TXDW; 780 } 781 782 return (queue_idx == 0) ? E1000_ICR_TXQ0 : E1000_ICR_TXQ1; 783 } 784 785 static inline uint32_t 786 e1000e_rx_wb_interrupt_cause(E1000ECore *core, int queue_idx, 787 bool min_threshold_hit) 788 { 789 if (!msix_enabled(core->owner)) { 790 return E1000_ICS_RXT0 | (min_threshold_hit ? E1000_ICS_RXDMT0 : 0); 791 } 792 793 return (queue_idx == 0) ? E1000_ICR_RXQ0 : E1000_ICR_RXQ1; 794 } 795 796 static uint32_t 797 e1000e_txdesc_writeback(E1000ECore *core, dma_addr_t base, 798 struct e1000_tx_desc *dp, bool *ide, int queue_idx) 799 { 800 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data); 801 802 if (!(txd_lower & E1000_TXD_CMD_RS) && 803 !(core->mac[IVAR] & E1000_IVAR_TX_INT_EVERY_WB)) { 804 return 0; 805 } 806 807 *ide = (txd_lower & E1000_TXD_CMD_IDE) ? true : false; 808 809 txd_upper = le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD; 810 811 dp->upper.data = cpu_to_le32(txd_upper); 812 pci_dma_write(core->owner, base + ((char *)&dp->upper - (char *)dp), 813 &dp->upper, sizeof(dp->upper)); 814 return e1000e_tx_wb_interrupt_cause(core, queue_idx); 815 } 816 817 typedef struct E1000E_RingInfo_st { 818 int dbah; 819 int dbal; 820 int dlen; 821 int dh; 822 int dt; 823 int idx; 824 } E1000E_RingInfo; 825 826 static inline bool 827 e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r) 828 { 829 return core->mac[r->dh] == core->mac[r->dt] || 830 core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN; 831 } 832 833 static inline uint64_t 834 e1000e_ring_base(E1000ECore *core, const E1000E_RingInfo *r) 835 { 836 uint64_t bah = core->mac[r->dbah]; 837 uint64_t bal = core->mac[r->dbal]; 838 839 return (bah << 32) + bal; 840 } 841 842 static inline uint64_t 843 e1000e_ring_head_descr(E1000ECore *core, const E1000E_RingInfo *r) 844 { 845 return e1000e_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh]; 846 } 847 848 static inline void 849 e1000e_ring_advance(E1000ECore *core, const E1000E_RingInfo *r, uint32_t count) 850 { 851 core->mac[r->dh] += count; 852 853 if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) { 854 core->mac[r->dh] = 0; 855 } 856 } 857 858 static inline uint32_t 859 e1000e_ring_free_descr_num(E1000ECore *core, const E1000E_RingInfo *r) 860 { 861 trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen], 862 core->mac[r->dh], core->mac[r->dt]); 863 864 if (core->mac[r->dh] <= core->mac[r->dt]) { 865 return core->mac[r->dt] - core->mac[r->dh]; 866 } 867 868 if (core->mac[r->dh] > core->mac[r->dt]) { 869 return core->mac[r->dlen] / E1000_RING_DESC_LEN + 870 core->mac[r->dt] - core->mac[r->dh]; 871 } 872 873 g_assert_not_reached(); 874 return 0; 875 } 876 877 static inline bool 878 e1000e_ring_enabled(E1000ECore *core, const E1000E_RingInfo *r) 879 { 880 return core->mac[r->dlen] > 0; 881 } 882 883 static inline uint32_t 884 e1000e_ring_len(E1000ECore *core, const E1000E_RingInfo *r) 885 { 886 return core->mac[r->dlen]; 887 } 888 889 typedef struct E1000E_TxRing_st { 890 const E1000E_RingInfo *i; 891 struct e1000e_tx *tx; 892 } E1000E_TxRing; 893 894 static inline int 895 e1000e_mq_queue_idx(int base_reg_idx, int reg_idx) 896 { 897 return (reg_idx - base_reg_idx) / (0x100 >> 2); 898 } 899 900 static inline void 901 e1000e_tx_ring_init(E1000ECore *core, E1000E_TxRing *txr, int idx) 902 { 903 static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { 904 { TDBAH, TDBAL, TDLEN, TDH, TDT, 0 }, 905 { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 } 906 }; 907 908 assert(idx < ARRAY_SIZE(i)); 909 910 txr->i = &i[idx]; 911 txr->tx = &core->tx[idx]; 912 } 913 914 typedef struct E1000E_RxRing_st { 915 const E1000E_RingInfo *i; 916 } E1000E_RxRing; 917 918 static inline void 919 e1000e_rx_ring_init(E1000ECore *core, E1000E_RxRing *rxr, int idx) 920 { 921 static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { 922 { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 }, 923 { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 } 924 }; 925 926 assert(idx < ARRAY_SIZE(i)); 927 928 rxr->i = &i[idx]; 929 } 930 931 static void 932 e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) 933 { 934 dma_addr_t base; 935 struct e1000_tx_desc desc; 936 bool ide = false; 937 const E1000E_RingInfo *txi = txr->i; 938 uint32_t cause = E1000_ICS_TXQE; 939 940 if (!(core->mac[TCTL] & E1000_TCTL_EN)) { 941 trace_e1000e_tx_disabled(); 942 return; 943 } 944 945 while (!e1000e_ring_empty(core, txi)) { 946 base = e1000e_ring_head_descr(core, txi); 947 948 pci_dma_read(core->owner, base, &desc, sizeof(desc)); 949 950 trace_e1000e_tx_descr((void *)(intptr_t)desc.buffer_addr, 951 desc.lower.data, desc.upper.data); 952 953 e1000e_process_tx_desc(core, txr->tx, &desc, txi->idx); 954 cause |= e1000e_txdesc_writeback(core, base, &desc, &ide, txi->idx); 955 956 e1000e_ring_advance(core, txi, 1); 957 } 958 959 if (!ide || !e1000e_intrmgr_delay_tx_causes(core, &cause)) { 960 e1000e_set_interrupt_cause(core, cause); 961 } 962 963 net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, core->owner); 964 } 965 966 static bool 967 e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r, 968 size_t total_size) 969 { 970 uint32_t bufs = e1000e_ring_free_descr_num(core, r); 971 972 trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, 973 core->rx_desc_buf_size); 974 975 return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) * 976 core->rx_desc_buf_size; 977 } 978 979 void 980 e1000e_start_recv(E1000ECore *core) 981 { 982 int i; 983 984 trace_e1000e_rx_start_recv(); 985 986 for (i = 0; i <= core->max_queue_num; i++) { 987 qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i)); 988 } 989 } 990 991 bool 992 e1000e_can_receive(E1000ECore *core) 993 { 994 int i; 995 996 if (!e1000x_rx_ready(core->owner, core->mac)) { 997 return false; 998 } 999 1000 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 1001 E1000E_RxRing rxr; 1002 1003 e1000e_rx_ring_init(core, &rxr, i); 1004 if (e1000e_ring_enabled(core, rxr.i) && 1005 e1000e_has_rxbufs(core, rxr.i, 1)) { 1006 trace_e1000e_rx_can_recv(); 1007 return true; 1008 } 1009 } 1010 1011 trace_e1000e_rx_can_recv_rings_full(); 1012 return false; 1013 } 1014 1015 ssize_t 1016 e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size) 1017 { 1018 const struct iovec iov = { 1019 .iov_base = (uint8_t *)buf, 1020 .iov_len = size 1021 }; 1022 1023 return e1000e_receive_iov(core, &iov, 1); 1024 } 1025 1026 static inline bool 1027 e1000e_rx_l3_cso_enabled(E1000ECore *core) 1028 { 1029 return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD); 1030 } 1031 1032 static inline bool 1033 e1000e_rx_l4_cso_enabled(E1000ECore *core) 1034 { 1035 return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); 1036 } 1037 1038 static bool 1039 e1000e_receive_filter(E1000ECore *core, const void *buf) 1040 { 1041 return (!e1000x_is_vlan_packet(buf, core->mac[VET]) || 1042 e1000x_rx_vlan_filter(core->mac, PKT_GET_VLAN_HDR(buf))) && 1043 e1000x_rx_group_filter(core->mac, buf); 1044 } 1045 1046 static inline void 1047 e1000e_read_lgcy_rx_descr(E1000ECore *core, struct e1000_rx_desc *desc, 1048 hwaddr *buff_addr) 1049 { 1050 *buff_addr = le64_to_cpu(desc->buffer_addr); 1051 } 1052 1053 static inline void 1054 e1000e_read_ext_rx_descr(E1000ECore *core, union e1000_rx_desc_extended *desc, 1055 hwaddr *buff_addr) 1056 { 1057 *buff_addr = le64_to_cpu(desc->read.buffer_addr); 1058 } 1059 1060 static inline void 1061 e1000e_read_ps_rx_descr(E1000ECore *core, 1062 union e1000_rx_desc_packet_split *desc, 1063 hwaddr buff_addr[MAX_PS_BUFFERS]) 1064 { 1065 int i; 1066 1067 for (i = 0; i < MAX_PS_BUFFERS; i++) { 1068 buff_addr[i] = le64_to_cpu(desc->read.buffer_addr[i]); 1069 } 1070 1071 trace_e1000e_rx_desc_ps_read(buff_addr[0], buff_addr[1], 1072 buff_addr[2], buff_addr[3]); 1073 } 1074 1075 static inline void 1076 e1000e_read_rx_descr(E1000ECore *core, union e1000_rx_desc_union *desc, 1077 hwaddr buff_addr[MAX_PS_BUFFERS]) 1078 { 1079 if (e1000e_rx_use_legacy_descriptor(core)) { 1080 e1000e_read_lgcy_rx_descr(core, &desc->legacy, &buff_addr[0]); 1081 buff_addr[1] = buff_addr[2] = buff_addr[3] = 0; 1082 } else { 1083 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1084 e1000e_read_ps_rx_descr(core, &desc->packet_split, buff_addr); 1085 } else { 1086 e1000e_read_ext_rx_descr(core, &desc->extended, &buff_addr[0]); 1087 buff_addr[1] = buff_addr[2] = buff_addr[3] = 0; 1088 } 1089 } 1090 } 1091 1092 static void 1093 e1000e_verify_csum_in_sw(E1000ECore *core, 1094 struct NetRxPkt *pkt, 1095 uint32_t *status_flags, 1096 EthL4HdrProto l4hdr_proto) 1097 { 1098 bool csum_valid; 1099 uint32_t csum_error; 1100 1101 if (e1000e_rx_l3_cso_enabled(core)) { 1102 if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) { 1103 trace_e1000e_rx_metadata_l3_csum_validation_failed(); 1104 } else { 1105 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE; 1106 *status_flags |= E1000_RXD_STAT_IPCS | csum_error; 1107 } 1108 } else { 1109 trace_e1000e_rx_metadata_l3_cso_disabled(); 1110 } 1111 1112 if (!e1000e_rx_l4_cso_enabled(core)) { 1113 trace_e1000e_rx_metadata_l4_cso_disabled(); 1114 return; 1115 } 1116 1117 if (l4hdr_proto != ETH_L4_HDR_PROTO_TCP && 1118 l4hdr_proto != ETH_L4_HDR_PROTO_UDP) { 1119 return; 1120 } 1121 1122 if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { 1123 trace_e1000e_rx_metadata_l4_csum_validation_failed(); 1124 return; 1125 } 1126 1127 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE; 1128 *status_flags |= E1000_RXD_STAT_TCPCS | csum_error; 1129 1130 if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { 1131 *status_flags |= E1000_RXD_STAT_UDPCS; 1132 } 1133 } 1134 1135 static inline bool 1136 e1000e_is_tcp_ack(E1000ECore *core, struct NetRxPkt *rx_pkt) 1137 { 1138 if (!net_rx_pkt_is_tcp_ack(rx_pkt)) { 1139 return false; 1140 } 1141 1142 if (core->mac[RFCTL] & E1000_RFCTL_ACK_DATA_DIS) { 1143 return !net_rx_pkt_has_tcp_data(rx_pkt); 1144 } 1145 1146 return true; 1147 } 1148 1149 static void 1150 e1000e_build_rx_metadata(E1000ECore *core, 1151 struct NetRxPkt *pkt, 1152 bool is_eop, 1153 const E1000E_RSSInfo *rss_info, 1154 uint32_t *rss, uint32_t *mrq, 1155 uint32_t *status_flags, 1156 uint16_t *ip_id, 1157 uint16_t *vlan_tag) 1158 { 1159 struct virtio_net_hdr *vhdr; 1160 bool hasip4, hasip6; 1161 EthL4HdrProto l4hdr_proto; 1162 uint32_t pkt_type; 1163 1164 *status_flags = E1000_RXD_STAT_DD; 1165 1166 /* No additional metadata needed for non-EOP descriptors */ 1167 if (!is_eop) { 1168 goto func_exit; 1169 } 1170 1171 *status_flags |= E1000_RXD_STAT_EOP; 1172 1173 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); 1174 trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto); 1175 1176 /* VLAN state */ 1177 if (net_rx_pkt_is_vlan_stripped(pkt)) { 1178 *status_flags |= E1000_RXD_STAT_VP; 1179 *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt)); 1180 trace_e1000e_rx_metadata_vlan(*vlan_tag); 1181 } 1182 1183 /* Packet parsing results */ 1184 if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) { 1185 if (rss_info->enabled) { 1186 *rss = cpu_to_le32(rss_info->hash); 1187 *mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8)); 1188 trace_e1000e_rx_metadata_rss(*rss, *mrq); 1189 } 1190 } else if (hasip4) { 1191 *status_flags |= E1000_RXD_STAT_IPIDV; 1192 *ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt)); 1193 trace_e1000e_rx_metadata_ip_id(*ip_id); 1194 } 1195 1196 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && e1000e_is_tcp_ack(core, pkt)) { 1197 *status_flags |= E1000_RXD_STAT_ACK; 1198 trace_e1000e_rx_metadata_ack(); 1199 } 1200 1201 if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { 1202 trace_e1000e_rx_metadata_ipv6_filtering_disabled(); 1203 pkt_type = E1000_RXD_PKT_MAC; 1204 } else if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP || 1205 l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { 1206 pkt_type = hasip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP; 1207 } else if (hasip4 || hasip6) { 1208 pkt_type = hasip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6; 1209 } else { 1210 pkt_type = E1000_RXD_PKT_MAC; 1211 } 1212 1213 *status_flags |= E1000_RXD_PKT_TYPE(pkt_type); 1214 trace_e1000e_rx_metadata_pkt_type(pkt_type); 1215 1216 /* RX CSO information */ 1217 if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { 1218 trace_e1000e_rx_metadata_ipv6_sum_disabled(); 1219 goto func_exit; 1220 } 1221 1222 vhdr = net_rx_pkt_get_vhdr(pkt); 1223 1224 if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) && 1225 !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { 1226 trace_e1000e_rx_metadata_virthdr_no_csum_info(); 1227 e1000e_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto); 1228 goto func_exit; 1229 } 1230 1231 if (e1000e_rx_l3_cso_enabled(core)) { 1232 *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0; 1233 } else { 1234 trace_e1000e_rx_metadata_l3_cso_disabled(); 1235 } 1236 1237 if (e1000e_rx_l4_cso_enabled(core)) { 1238 switch (l4hdr_proto) { 1239 case ETH_L4_HDR_PROTO_TCP: 1240 *status_flags |= E1000_RXD_STAT_TCPCS; 1241 break; 1242 1243 case ETH_L4_HDR_PROTO_UDP: 1244 *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS; 1245 break; 1246 1247 default: 1248 break; 1249 } 1250 } else { 1251 trace_e1000e_rx_metadata_l4_cso_disabled(); 1252 } 1253 1254 func_exit: 1255 trace_e1000e_rx_metadata_status_flags(*status_flags); 1256 *status_flags = cpu_to_le32(*status_flags); 1257 } 1258 1259 static inline void 1260 e1000e_write_lgcy_rx_descr(E1000ECore *core, struct e1000_rx_desc *desc, 1261 struct NetRxPkt *pkt, 1262 const E1000E_RSSInfo *rss_info, 1263 uint16_t length) 1264 { 1265 uint32_t status_flags, rss, mrq; 1266 uint16_t ip_id; 1267 1268 assert(!rss_info->enabled); 1269 1270 desc->length = cpu_to_le16(length); 1271 desc->csum = 0; 1272 1273 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1274 rss_info, 1275 &rss, &mrq, 1276 &status_flags, &ip_id, 1277 &desc->special); 1278 desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); 1279 desc->status = (uint8_t) le32_to_cpu(status_flags); 1280 } 1281 1282 static inline void 1283 e1000e_write_ext_rx_descr(E1000ECore *core, union e1000_rx_desc_extended *desc, 1284 struct NetRxPkt *pkt, 1285 const E1000E_RSSInfo *rss_info, 1286 uint16_t length) 1287 { 1288 memset(&desc->wb, 0, sizeof(desc->wb)); 1289 1290 desc->wb.upper.length = cpu_to_le16(length); 1291 1292 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1293 rss_info, 1294 &desc->wb.lower.hi_dword.rss, 1295 &desc->wb.lower.mrq, 1296 &desc->wb.upper.status_error, 1297 &desc->wb.lower.hi_dword.csum_ip.ip_id, 1298 &desc->wb.upper.vlan); 1299 } 1300 1301 static inline void 1302 e1000e_write_ps_rx_descr(E1000ECore *core, 1303 union e1000_rx_desc_packet_split *desc, 1304 struct NetRxPkt *pkt, 1305 const E1000E_RSSInfo *rss_info, 1306 size_t ps_hdr_len, 1307 uint16_t(*written)[MAX_PS_BUFFERS]) 1308 { 1309 int i; 1310 1311 memset(&desc->wb, 0, sizeof(desc->wb)); 1312 1313 desc->wb.middle.length0 = cpu_to_le16((*written)[0]); 1314 1315 for (i = 0; i < PS_PAGE_BUFFERS; i++) { 1316 desc->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]); 1317 } 1318 1319 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1320 rss_info, 1321 &desc->wb.lower.hi_dword.rss, 1322 &desc->wb.lower.mrq, 1323 &desc->wb.middle.status_error, 1324 &desc->wb.lower.hi_dword.csum_ip.ip_id, 1325 &desc->wb.middle.vlan); 1326 1327 desc->wb.upper.header_status = 1328 cpu_to_le16(ps_hdr_len | (ps_hdr_len ? E1000_RXDPS_HDRSTAT_HDRSP : 0)); 1329 1330 trace_e1000e_rx_desc_ps_write((*written)[0], (*written)[1], 1331 (*written)[2], (*written)[3]); 1332 } 1333 1334 static inline void 1335 e1000e_write_rx_descr(E1000ECore *core, union e1000_rx_desc_union *desc, 1336 struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, 1337 size_t ps_hdr_len, uint16_t(*written)[MAX_PS_BUFFERS]) 1338 { 1339 if (e1000e_rx_use_legacy_descriptor(core)) { 1340 assert(ps_hdr_len == 0); 1341 e1000e_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, 1342 (*written)[0]); 1343 } else { 1344 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1345 e1000e_write_ps_rx_descr(core, &desc->packet_split, pkt, rss_info, 1346 ps_hdr_len, written); 1347 } else { 1348 assert(ps_hdr_len == 0); 1349 e1000e_write_ext_rx_descr(core, &desc->extended, pkt, rss_info, 1350 (*written)[0]); 1351 } 1352 } 1353 } 1354 1355 static inline void 1356 e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr, 1357 union e1000_rx_desc_union *desc, dma_addr_t len) 1358 { 1359 PCIDevice *dev = core->owner; 1360 1361 if (e1000e_rx_use_legacy_descriptor(core)) { 1362 struct e1000_rx_desc *d = &desc->legacy; 1363 size_t offset = offsetof(struct e1000_rx_desc, status); 1364 uint8_t status = d->status; 1365 1366 d->status &= ~E1000_RXD_STAT_DD; 1367 pci_dma_write(dev, addr, desc, len); 1368 1369 if (status & E1000_RXD_STAT_DD) { 1370 d->status = status; 1371 pci_dma_write(dev, addr + offset, &status, sizeof(status)); 1372 } 1373 } else { 1374 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1375 union e1000_rx_desc_packet_split *d = &desc->packet_split; 1376 size_t offset = offsetof(union e1000_rx_desc_packet_split, 1377 wb.middle.status_error); 1378 uint32_t status = d->wb.middle.status_error; 1379 1380 d->wb.middle.status_error &= ~E1000_RXD_STAT_DD; 1381 pci_dma_write(dev, addr, desc, len); 1382 1383 if (status & E1000_RXD_STAT_DD) { 1384 d->wb.middle.status_error = status; 1385 pci_dma_write(dev, addr + offset, &status, sizeof(status)); 1386 } 1387 } else { 1388 union e1000_rx_desc_extended *d = &desc->extended; 1389 size_t offset = offsetof(union e1000_rx_desc_extended, 1390 wb.upper.status_error); 1391 uint32_t status = d->wb.upper.status_error; 1392 1393 d->wb.upper.status_error &= ~E1000_RXD_STAT_DD; 1394 pci_dma_write(dev, addr, desc, len); 1395 1396 if (status & E1000_RXD_STAT_DD) { 1397 d->wb.upper.status_error = status; 1398 pci_dma_write(dev, addr + offset, &status, sizeof(status)); 1399 } 1400 } 1401 } 1402 } 1403 1404 typedef struct e1000e_ba_state_st { 1405 uint16_t written[MAX_PS_BUFFERS]; 1406 uint8_t cur_idx; 1407 } e1000e_ba_state; 1408 1409 static inline void 1410 e1000e_write_hdr_to_rx_buffers(E1000ECore *core, 1411 hwaddr ba[MAX_PS_BUFFERS], 1412 e1000e_ba_state *bastate, 1413 const char *data, 1414 dma_addr_t data_len) 1415 { 1416 assert(data_len <= core->rxbuf_sizes[0] - bastate->written[0]); 1417 1418 pci_dma_write(core->owner, ba[0] + bastate->written[0], data, data_len); 1419 bastate->written[0] += data_len; 1420 1421 bastate->cur_idx = 1; 1422 } 1423 1424 static void 1425 e1000e_write_to_rx_buffers(E1000ECore *core, 1426 hwaddr ba[MAX_PS_BUFFERS], 1427 e1000e_ba_state *bastate, 1428 const char *data, 1429 dma_addr_t data_len) 1430 { 1431 while (data_len > 0) { 1432 uint32_t cur_buf_len = core->rxbuf_sizes[bastate->cur_idx]; 1433 uint32_t cur_buf_bytes_left = cur_buf_len - 1434 bastate->written[bastate->cur_idx]; 1435 uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left); 1436 1437 trace_e1000e_rx_desc_buff_write(bastate->cur_idx, 1438 ba[bastate->cur_idx], 1439 bastate->written[bastate->cur_idx], 1440 data, 1441 bytes_to_write); 1442 1443 pci_dma_write(core->owner, 1444 ba[bastate->cur_idx] + bastate->written[bastate->cur_idx], 1445 data, bytes_to_write); 1446 1447 bastate->written[bastate->cur_idx] += bytes_to_write; 1448 data += bytes_to_write; 1449 data_len -= bytes_to_write; 1450 1451 if (bastate->written[bastate->cur_idx] == cur_buf_len) { 1452 bastate->cur_idx++; 1453 } 1454 1455 assert(bastate->cur_idx < MAX_PS_BUFFERS); 1456 } 1457 } 1458 1459 static void 1460 e1000e_update_rx_stats(E1000ECore *core, size_t pkt_size, size_t pkt_fcs_size) 1461 { 1462 eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt); 1463 e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size); 1464 } 1465 1466 static inline bool 1467 e1000e_rx_descr_threshold_hit(E1000ECore *core, const E1000E_RingInfo *rxi) 1468 { 1469 return e1000e_ring_free_descr_num(core, rxi) == 1470 e1000e_ring_len(core, rxi) >> core->rxbuf_min_shift; 1471 } 1472 1473 static bool 1474 e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len) 1475 { 1476 bool hasip4, hasip6; 1477 EthL4HdrProto l4hdr_proto; 1478 bool fragment; 1479 1480 if (!e1000e_rx_use_ps_descriptor(core)) { 1481 return false; 1482 } 1483 1484 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); 1485 1486 if (hasip4) { 1487 fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; 1488 } else if (hasip6) { 1489 fragment = net_rx_pkt_get_ip6_info(pkt)->fragment; 1490 } else { 1491 return false; 1492 } 1493 1494 if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) { 1495 return false; 1496 } 1497 1498 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP || 1499 l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { 1500 *hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt); 1501 } else { 1502 *hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt); 1503 } 1504 1505 if ((*hdr_len > core->rxbuf_sizes[0]) || 1506 (*hdr_len > net_rx_pkt_get_total_len(pkt))) { 1507 return false; 1508 } 1509 1510 return true; 1511 } 1512 1513 static void 1514 e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, 1515 const E1000E_RxRing *rxr, 1516 const E1000E_RSSInfo *rss_info) 1517 { 1518 PCIDevice *d = core->owner; 1519 dma_addr_t base; 1520 union e1000_rx_desc_union desc; 1521 size_t desc_size; 1522 size_t desc_offset = 0; 1523 size_t iov_ofs = 0; 1524 1525 struct iovec *iov = net_rx_pkt_get_iovec(pkt); 1526 size_t size = net_rx_pkt_get_total_len(pkt); 1527 size_t total_size = size + e1000x_fcs_len(core->mac); 1528 const E1000E_RingInfo *rxi; 1529 size_t ps_hdr_len = 0; 1530 bool do_ps = e1000e_do_ps(core, pkt, &ps_hdr_len); 1531 bool is_first = true; 1532 1533 rxi = rxr->i; 1534 1535 do { 1536 hwaddr ba[MAX_PS_BUFFERS]; 1537 e1000e_ba_state bastate = { { 0 } }; 1538 bool is_last = false; 1539 1540 desc_size = total_size - desc_offset; 1541 1542 if (desc_size > core->rx_desc_buf_size) { 1543 desc_size = core->rx_desc_buf_size; 1544 } 1545 1546 if (e1000e_ring_empty(core, rxi)) { 1547 return; 1548 } 1549 1550 base = e1000e_ring_head_descr(core, rxi); 1551 1552 pci_dma_read(d, base, &desc, core->rx_desc_len); 1553 1554 trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len); 1555 1556 e1000e_read_rx_descr(core, &desc, ba); 1557 1558 if (ba[0]) { 1559 if (desc_offset < size) { 1560 static const uint32_t fcs_pad; 1561 size_t iov_copy; 1562 size_t copy_size = size - desc_offset; 1563 if (copy_size > core->rx_desc_buf_size) { 1564 copy_size = core->rx_desc_buf_size; 1565 } 1566 1567 /* For PS mode copy the packet header first */ 1568 if (do_ps) { 1569 if (is_first) { 1570 size_t ps_hdr_copied = 0; 1571 do { 1572 iov_copy = MIN(ps_hdr_len - ps_hdr_copied, 1573 iov->iov_len - iov_ofs); 1574 1575 e1000e_write_hdr_to_rx_buffers(core, ba, &bastate, 1576 iov->iov_base, iov_copy); 1577 1578 copy_size -= iov_copy; 1579 ps_hdr_copied += iov_copy; 1580 1581 iov_ofs += iov_copy; 1582 if (iov_ofs == iov->iov_len) { 1583 iov++; 1584 iov_ofs = 0; 1585 } 1586 } while (ps_hdr_copied < ps_hdr_len); 1587 1588 is_first = false; 1589 } else { 1590 /* Leave buffer 0 of each descriptor except first */ 1591 /* empty as per spec 7.1.5.1 */ 1592 e1000e_write_hdr_to_rx_buffers(core, ba, &bastate, 1593 NULL, 0); 1594 } 1595 } 1596 1597 /* Copy packet payload */ 1598 while (copy_size) { 1599 iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); 1600 1601 e1000e_write_to_rx_buffers(core, ba, &bastate, 1602 iov->iov_base + iov_ofs, iov_copy); 1603 1604 copy_size -= iov_copy; 1605 iov_ofs += iov_copy; 1606 if (iov_ofs == iov->iov_len) { 1607 iov++; 1608 iov_ofs = 0; 1609 } 1610 } 1611 1612 if (desc_offset + desc_size >= total_size) { 1613 /* Simulate FCS checksum presence in the last descriptor */ 1614 e1000e_write_to_rx_buffers(core, ba, &bastate, 1615 (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); 1616 } 1617 } 1618 } else { /* as per intel docs; skip descriptors with null buf addr */ 1619 trace_e1000e_rx_null_descriptor(); 1620 } 1621 desc_offset += desc_size; 1622 if (desc_offset >= total_size) { 1623 is_last = true; 1624 } 1625 1626 e1000e_write_rx_descr(core, &desc, is_last ? core->rx_pkt : NULL, 1627 rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); 1628 e1000e_pci_dma_write_rx_desc(core, base, &desc, core->rx_desc_len); 1629 1630 e1000e_ring_advance(core, rxi, 1631 core->rx_desc_len / E1000_MIN_RX_DESC_LEN); 1632 1633 } while (desc_offset < total_size); 1634 1635 e1000e_update_rx_stats(core, size, total_size); 1636 } 1637 1638 static inline void 1639 e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt) 1640 { 1641 struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt); 1642 1643 if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1644 net_rx_pkt_fix_l4_csum(pkt); 1645 } 1646 } 1647 1648 ssize_t 1649 e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) 1650 { 1651 return e1000e_receive_internal(core, iov, iovcnt, core->has_vnet); 1652 } 1653 1654 static ssize_t 1655 e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, 1656 bool has_vnet) 1657 { 1658 uint32_t causes = 0; 1659 uint8_t buf[ETH_ZLEN]; 1660 struct iovec min_iov; 1661 size_t size, orig_size; 1662 size_t iov_ofs = 0; 1663 E1000E_RxRing rxr; 1664 E1000E_RSSInfo rss_info; 1665 size_t total_size; 1666 ssize_t retval; 1667 bool rdmts_hit; 1668 1669 trace_e1000e_rx_receive_iov(iovcnt); 1670 1671 if (!e1000x_hw_rx_enabled(core->mac)) { 1672 return -1; 1673 } 1674 1675 /* Pull virtio header in */ 1676 if (has_vnet) { 1677 net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt); 1678 iov_ofs = sizeof(struct virtio_net_hdr); 1679 } else { 1680 net_rx_pkt_unset_vhdr(core->rx_pkt); 1681 } 1682 1683 orig_size = iov_size(iov, iovcnt); 1684 size = orig_size - iov_ofs; 1685 1686 /* Pad to minimum Ethernet frame length */ 1687 if (size < sizeof(buf)) { 1688 iov_to_buf(iov, iovcnt, iov_ofs, buf, size); 1689 memset(&buf[size], 0, sizeof(buf) - size); 1690 e1000x_inc_reg_if_not_full(core->mac, RUC); 1691 min_iov.iov_base = buf; 1692 min_iov.iov_len = size = sizeof(buf); 1693 iovcnt = 1; 1694 iov = &min_iov; 1695 iov_ofs = 0; 1696 } else { 1697 iov_to_buf(iov, iovcnt, iov_ofs, buf, ETH_HLEN + 4); 1698 } 1699 1700 /* Discard oversized packets if !LPE and !SBP. */ 1701 if (e1000x_is_oversized(core->mac, size)) { 1702 return orig_size; 1703 } 1704 1705 net_rx_pkt_set_packet_type(core->rx_pkt, 1706 get_eth_packet_type(PKT_GET_ETH_HDR(buf))); 1707 1708 if (!e1000e_receive_filter(core, buf)) { 1709 trace_e1000e_rx_flt_dropped(); 1710 return orig_size; 1711 } 1712 1713 net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, 1714 e1000x_vlan_enabled(core->mac) ? 0 : -1, 1715 core->mac[VET], 0); 1716 1717 e1000e_rss_parse_packet(core, core->rx_pkt, &rss_info); 1718 e1000e_rx_ring_init(core, &rxr, rss_info.queue); 1719 1720 total_size = net_rx_pkt_get_total_len(core->rx_pkt) + 1721 e1000x_fcs_len(core->mac); 1722 1723 if (e1000e_has_rxbufs(core, rxr.i, total_size)) { 1724 e1000e_rx_fix_l4_csum(core, core->rx_pkt); 1725 1726 e1000e_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info); 1727 1728 retval = orig_size; 1729 1730 /* Perform small receive detection (RSRPD) */ 1731 if (total_size < core->mac[RSRPD]) { 1732 causes |= E1000_ICS_SRPD; 1733 } 1734 1735 /* Perform ACK receive detection */ 1736 if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS) && 1737 (e1000e_is_tcp_ack(core, core->rx_pkt))) { 1738 causes |= E1000_ICS_ACK; 1739 } 1740 1741 /* Check if receive descriptor minimum threshold hit */ 1742 rdmts_hit = e1000e_rx_descr_threshold_hit(core, rxr.i); 1743 causes |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); 1744 1745 trace_e1000e_rx_written_to_guest(rxr.i->idx); 1746 } else { 1747 causes |= E1000_ICS_RXO; 1748 retval = 0; 1749 1750 trace_e1000e_rx_not_written_to_guest(rxr.i->idx); 1751 } 1752 1753 if (!e1000e_intrmgr_delay_rx_causes(core, &causes)) { 1754 trace_e1000e_rx_interrupt_set(causes); 1755 e1000e_set_interrupt_cause(core, causes); 1756 } else { 1757 trace_e1000e_rx_interrupt_delayed(causes); 1758 } 1759 1760 return retval; 1761 } 1762 1763 static inline bool 1764 e1000e_have_autoneg(E1000ECore *core) 1765 { 1766 return core->phy[0][MII_BMCR] & MII_BMCR_AUTOEN; 1767 } 1768 1769 static void e1000e_update_flowctl_status(E1000ECore *core) 1770 { 1771 if (e1000e_have_autoneg(core) && 1772 core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP) { 1773 trace_e1000e_link_autoneg_flowctl(true); 1774 core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE; 1775 } else { 1776 trace_e1000e_link_autoneg_flowctl(false); 1777 } 1778 } 1779 1780 static inline void 1781 e1000e_link_down(E1000ECore *core) 1782 { 1783 e1000x_update_regs_on_link_down(core->mac, core->phy[0]); 1784 e1000e_update_flowctl_status(core); 1785 } 1786 1787 static inline void 1788 e1000e_set_phy_ctrl(E1000ECore *core, int index, uint16_t val) 1789 { 1790 /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */ 1791 core->phy[0][MII_BMCR] = val & ~(0x3f | 1792 MII_BMCR_RESET | 1793 MII_BMCR_ANRESTART); 1794 1795 if ((val & MII_BMCR_ANRESTART) && 1796 e1000e_have_autoneg(core)) { 1797 e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); 1798 } 1799 } 1800 1801 static void 1802 e1000e_set_phy_oem_bits(E1000ECore *core, int index, uint16_t val) 1803 { 1804 core->phy[0][PHY_OEM_BITS] = val & ~BIT(10); 1805 1806 if (val & BIT(10)) { 1807 e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); 1808 } 1809 } 1810 1811 static void 1812 e1000e_set_phy_page(E1000ECore *core, int index, uint16_t val) 1813 { 1814 core->phy[0][PHY_PAGE] = val & PHY_PAGE_RW_MASK; 1815 } 1816 1817 void 1818 e1000e_core_set_link_status(E1000ECore *core) 1819 { 1820 NetClientState *nc = qemu_get_queue(core->owner_nic); 1821 uint32_t old_status = core->mac[STATUS]; 1822 1823 trace_e1000e_link_status_changed(nc->link_down ? false : true); 1824 1825 if (nc->link_down) { 1826 e1000x_update_regs_on_link_down(core->mac, core->phy[0]); 1827 } else { 1828 if (e1000e_have_autoneg(core) && 1829 !(core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP)) { 1830 e1000x_restart_autoneg(core->mac, core->phy[0], 1831 core->autoneg_timer); 1832 } else { 1833 e1000x_update_regs_on_link_up(core->mac, core->phy[0]); 1834 e1000e_start_recv(core); 1835 } 1836 } 1837 1838 if (core->mac[STATUS] != old_status) { 1839 e1000e_set_interrupt_cause(core, E1000_ICR_LSC); 1840 } 1841 } 1842 1843 static void 1844 e1000e_set_ctrl(E1000ECore *core, int index, uint32_t val) 1845 { 1846 trace_e1000e_core_ctrl_write(index, val); 1847 1848 /* RST is self clearing */ 1849 core->mac[CTRL] = val & ~E1000_CTRL_RST; 1850 core->mac[CTRL_DUP] = core->mac[CTRL]; 1851 1852 trace_e1000e_link_set_params( 1853 !!(val & E1000_CTRL_ASDE), 1854 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, 1855 !!(val & E1000_CTRL_FRCSPD), 1856 !!(val & E1000_CTRL_FRCDPX), 1857 !!(val & E1000_CTRL_RFCE), 1858 !!(val & E1000_CTRL_TFCE)); 1859 1860 if (val & E1000_CTRL_RST) { 1861 trace_e1000e_core_ctrl_sw_reset(); 1862 e1000e_reset(core, true); 1863 } 1864 1865 if (val & E1000_CTRL_PHY_RST) { 1866 trace_e1000e_core_ctrl_phy_reset(); 1867 core->mac[STATUS] |= E1000_STATUS_PHYRA; 1868 } 1869 } 1870 1871 static void 1872 e1000e_set_rfctl(E1000ECore *core, int index, uint32_t val) 1873 { 1874 trace_e1000e_rx_set_rfctl(val); 1875 1876 if (!(val & E1000_RFCTL_ISCSI_DIS)) { 1877 trace_e1000e_wrn_iscsi_filtering_not_supported(); 1878 } 1879 1880 if (!(val & E1000_RFCTL_NFSW_DIS)) { 1881 trace_e1000e_wrn_nfsw_filtering_not_supported(); 1882 } 1883 1884 if (!(val & E1000_RFCTL_NFSR_DIS)) { 1885 trace_e1000e_wrn_nfsr_filtering_not_supported(); 1886 } 1887 1888 core->mac[RFCTL] = val; 1889 } 1890 1891 static void 1892 e1000e_calc_per_desc_buf_size(E1000ECore *core) 1893 { 1894 int i; 1895 core->rx_desc_buf_size = 0; 1896 1897 for (i = 0; i < ARRAY_SIZE(core->rxbuf_sizes); i++) { 1898 core->rx_desc_buf_size += core->rxbuf_sizes[i]; 1899 } 1900 } 1901 1902 static void 1903 e1000e_parse_rxbufsize(E1000ECore *core) 1904 { 1905 uint32_t rctl = core->mac[RCTL]; 1906 1907 memset(core->rxbuf_sizes, 0, sizeof(core->rxbuf_sizes)); 1908 1909 if (rctl & E1000_RCTL_DTYP_MASK) { 1910 uint32_t bsize; 1911 1912 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE0_MASK; 1913 core->rxbuf_sizes[0] = (bsize >> E1000_PSRCTL_BSIZE0_SHIFT) * 128; 1914 1915 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE1_MASK; 1916 core->rxbuf_sizes[1] = (bsize >> E1000_PSRCTL_BSIZE1_SHIFT) * 1024; 1917 1918 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE2_MASK; 1919 core->rxbuf_sizes[2] = (bsize >> E1000_PSRCTL_BSIZE2_SHIFT) * 1024; 1920 1921 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE3_MASK; 1922 core->rxbuf_sizes[3] = (bsize >> E1000_PSRCTL_BSIZE3_SHIFT) * 1024; 1923 } else if (rctl & E1000_RCTL_FLXBUF_MASK) { 1924 int flxbuf = rctl & E1000_RCTL_FLXBUF_MASK; 1925 core->rxbuf_sizes[0] = (flxbuf >> E1000_RCTL_FLXBUF_SHIFT) * 1024; 1926 } else { 1927 core->rxbuf_sizes[0] = e1000x_rxbufsize(rctl); 1928 } 1929 1930 trace_e1000e_rx_desc_buff_sizes(core->rxbuf_sizes[0], core->rxbuf_sizes[1], 1931 core->rxbuf_sizes[2], core->rxbuf_sizes[3]); 1932 1933 e1000e_calc_per_desc_buf_size(core); 1934 } 1935 1936 static void 1937 e1000e_calc_rxdesclen(E1000ECore *core) 1938 { 1939 if (e1000e_rx_use_legacy_descriptor(core)) { 1940 core->rx_desc_len = sizeof(struct e1000_rx_desc); 1941 } else { 1942 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1943 core->rx_desc_len = sizeof(union e1000_rx_desc_packet_split); 1944 } else { 1945 core->rx_desc_len = sizeof(union e1000_rx_desc_extended); 1946 } 1947 } 1948 trace_e1000e_rx_desc_len(core->rx_desc_len); 1949 } 1950 1951 static void 1952 e1000e_set_rx_control(E1000ECore *core, int index, uint32_t val) 1953 { 1954 core->mac[RCTL] = val; 1955 trace_e1000e_rx_set_rctl(core->mac[RCTL]); 1956 1957 if (val & E1000_RCTL_EN) { 1958 e1000e_parse_rxbufsize(core); 1959 e1000e_calc_rxdesclen(core); 1960 core->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1 + 1961 E1000_RING_DESC_LEN_SHIFT; 1962 1963 e1000e_start_recv(core); 1964 } 1965 } 1966 1967 static 1968 void(*e1000e_phyreg_writeops[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE]) 1969 (E1000ECore *, int, uint16_t) = { 1970 [0] = { 1971 [MII_BMCR] = e1000e_set_phy_ctrl, 1972 [PHY_PAGE] = e1000e_set_phy_page, 1973 [PHY_OEM_BITS] = e1000e_set_phy_oem_bits 1974 } 1975 }; 1976 1977 static inline void 1978 e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits) 1979 { 1980 trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); 1981 core->mac[IMS] &= ~bits; 1982 } 1983 1984 static inline bool 1985 e1000e_postpone_interrupt(E1000IntrDelayTimer *timer) 1986 { 1987 if (timer->running) { 1988 trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2); 1989 1990 return true; 1991 } 1992 1993 if (timer->core->mac[timer->delay_reg] != 0) { 1994 e1000e_intrmgr_rearm_timer(timer); 1995 } 1996 1997 return false; 1998 } 1999 2000 static inline bool 2001 e1000e_itr_should_postpone(E1000ECore *core) 2002 { 2003 return e1000e_postpone_interrupt(&core->itr); 2004 } 2005 2006 static inline bool 2007 e1000e_eitr_should_postpone(E1000ECore *core, int idx) 2008 { 2009 return e1000e_postpone_interrupt(&core->eitr[idx]); 2010 } 2011 2012 static void 2013 e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) 2014 { 2015 uint32_t effective_eiac; 2016 2017 if (E1000_IVAR_ENTRY_VALID(int_cfg)) { 2018 uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); 2019 if (vec < E1000E_MSIX_VEC_NUM) { 2020 if (!e1000e_eitr_should_postpone(core, vec)) { 2021 trace_e1000e_irq_msix_notify_vec(vec); 2022 msix_notify(core->owner, vec); 2023 } 2024 } else { 2025 trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); 2026 } 2027 } else { 2028 trace_e1000e_wrn_msix_invalid(cause, int_cfg); 2029 } 2030 2031 if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_EIAME) { 2032 trace_e1000e_irq_iam_clear_eiame(core->mac[IAM], cause); 2033 core->mac[IAM] &= ~cause; 2034 } 2035 2036 trace_e1000e_irq_icr_clear_eiac(core->mac[ICR], core->mac[EIAC]); 2037 2038 effective_eiac = core->mac[EIAC] & cause; 2039 2040 core->mac[ICR] &= ~effective_eiac; 2041 core->msi_causes_pending &= ~effective_eiac; 2042 2043 if (!(core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2044 core->mac[IMS] &= ~effective_eiac; 2045 } 2046 } 2047 2048 static void 2049 e1000e_msix_notify(E1000ECore *core, uint32_t causes) 2050 { 2051 if (causes & E1000_ICR_RXQ0) { 2052 e1000e_msix_notify_one(core, E1000_ICR_RXQ0, 2053 E1000_IVAR_RXQ0(core->mac[IVAR])); 2054 } 2055 2056 if (causes & E1000_ICR_RXQ1) { 2057 e1000e_msix_notify_one(core, E1000_ICR_RXQ1, 2058 E1000_IVAR_RXQ1(core->mac[IVAR])); 2059 } 2060 2061 if (causes & E1000_ICR_TXQ0) { 2062 e1000e_msix_notify_one(core, E1000_ICR_TXQ0, 2063 E1000_IVAR_TXQ0(core->mac[IVAR])); 2064 } 2065 2066 if (causes & E1000_ICR_TXQ1) { 2067 e1000e_msix_notify_one(core, E1000_ICR_TXQ1, 2068 E1000_IVAR_TXQ1(core->mac[IVAR])); 2069 } 2070 2071 if (causes & E1000_ICR_OTHER) { 2072 e1000e_msix_notify_one(core, E1000_ICR_OTHER, 2073 E1000_IVAR_OTHER(core->mac[IVAR])); 2074 } 2075 } 2076 2077 static void 2078 e1000e_msix_clear_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) 2079 { 2080 if (E1000_IVAR_ENTRY_VALID(int_cfg)) { 2081 uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); 2082 if (vec < E1000E_MSIX_VEC_NUM) { 2083 trace_e1000e_irq_msix_pending_clearing(cause, int_cfg, vec); 2084 msix_clr_pending(core->owner, vec); 2085 } else { 2086 trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); 2087 } 2088 } else { 2089 trace_e1000e_wrn_msix_invalid(cause, int_cfg); 2090 } 2091 } 2092 2093 static void 2094 e1000e_msix_clear(E1000ECore *core, uint32_t causes) 2095 { 2096 if (causes & E1000_ICR_RXQ0) { 2097 e1000e_msix_clear_one(core, E1000_ICR_RXQ0, 2098 E1000_IVAR_RXQ0(core->mac[IVAR])); 2099 } 2100 2101 if (causes & E1000_ICR_RXQ1) { 2102 e1000e_msix_clear_one(core, E1000_ICR_RXQ1, 2103 E1000_IVAR_RXQ1(core->mac[IVAR])); 2104 } 2105 2106 if (causes & E1000_ICR_TXQ0) { 2107 e1000e_msix_clear_one(core, E1000_ICR_TXQ0, 2108 E1000_IVAR_TXQ0(core->mac[IVAR])); 2109 } 2110 2111 if (causes & E1000_ICR_TXQ1) { 2112 e1000e_msix_clear_one(core, E1000_ICR_TXQ1, 2113 E1000_IVAR_TXQ1(core->mac[IVAR])); 2114 } 2115 2116 if (causes & E1000_ICR_OTHER) { 2117 e1000e_msix_clear_one(core, E1000_ICR_OTHER, 2118 E1000_IVAR_OTHER(core->mac[IVAR])); 2119 } 2120 } 2121 2122 static inline void 2123 e1000e_fix_icr_asserted(E1000ECore *core) 2124 { 2125 core->mac[ICR] &= ~E1000_ICR_ASSERTED; 2126 if (core->mac[ICR]) { 2127 core->mac[ICR] |= E1000_ICR_ASSERTED; 2128 } 2129 2130 trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); 2131 } 2132 2133 static void 2134 e1000e_send_msi(E1000ECore *core, bool msix) 2135 { 2136 uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED; 2137 2138 core->msi_causes_pending &= causes; 2139 causes ^= core->msi_causes_pending; 2140 if (causes == 0) { 2141 return; 2142 } 2143 core->msi_causes_pending |= causes; 2144 2145 if (msix) { 2146 e1000e_msix_notify(core, causes); 2147 } else { 2148 if (!e1000e_itr_should_postpone(core)) { 2149 trace_e1000e_irq_msi_notify(causes); 2150 msi_notify(core->owner, 0); 2151 } 2152 } 2153 } 2154 2155 static void 2156 e1000e_update_interrupt_state(E1000ECore *core) 2157 { 2158 bool interrupts_pending; 2159 bool is_msix = msix_enabled(core->owner); 2160 2161 /* Set ICR[OTHER] for MSI-X */ 2162 if (is_msix) { 2163 if (core->mac[ICR] & E1000_ICR_OTHER_CAUSES) { 2164 core->mac[ICR] |= E1000_ICR_OTHER; 2165 trace_e1000e_irq_add_msi_other(core->mac[ICR]); 2166 } 2167 } 2168 2169 e1000e_fix_icr_asserted(core); 2170 2171 /* 2172 * Make sure ICR and ICS registers have the same value. 2173 * The spec says that the ICS register is write-only. However in practice, 2174 * on real hardware ICS is readable, and for reads it has the same value as 2175 * ICR (except that ICS does not have the clear on read behaviour of ICR). 2176 * 2177 * The VxWorks PRO/1000 driver uses this behaviour. 2178 */ 2179 core->mac[ICS] = core->mac[ICR]; 2180 2181 interrupts_pending = (core->mac[IMS] & core->mac[ICR]) ? true : false; 2182 if (!interrupts_pending) { 2183 core->msi_causes_pending = 0; 2184 } 2185 2186 trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], 2187 core->mac[ICR], core->mac[IMS]); 2188 2189 if (is_msix || msi_enabled(core->owner)) { 2190 if (interrupts_pending) { 2191 e1000e_send_msi(core, is_msix); 2192 } 2193 } else { 2194 if (interrupts_pending) { 2195 if (!e1000e_itr_should_postpone(core)) { 2196 e1000e_raise_legacy_irq(core); 2197 } 2198 } else { 2199 e1000e_lower_legacy_irq(core); 2200 } 2201 } 2202 } 2203 2204 static void 2205 e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) 2206 { 2207 trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); 2208 2209 val |= e1000e_intmgr_collect_delayed_causes(core); 2210 core->mac[ICR] |= val; 2211 2212 trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); 2213 2214 e1000e_update_interrupt_state(core); 2215 } 2216 2217 static inline void 2218 e1000e_autoneg_timer(void *opaque) 2219 { 2220 E1000ECore *core = opaque; 2221 if (!qemu_get_queue(core->owner_nic)->link_down) { 2222 e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]); 2223 e1000e_start_recv(core); 2224 2225 e1000e_update_flowctl_status(core); 2226 /* signal link status change to the guest */ 2227 e1000e_set_interrupt_cause(core, E1000_ICR_LSC); 2228 } 2229 } 2230 2231 static inline uint16_t 2232 e1000e_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr) 2233 { 2234 uint16_t index = (addr & 0x1ffff) >> 2; 2235 return index + (mac_reg_access[index] & 0xfffe); 2236 } 2237 2238 static const char e1000e_phy_regcap[E1000E_PHY_PAGES][0x20] = { 2239 [0] = { 2240 [MII_BMCR] = PHY_ANYPAGE | PHY_RW, 2241 [MII_BMSR] = PHY_ANYPAGE | PHY_R, 2242 [MII_PHYID1] = PHY_ANYPAGE | PHY_R, 2243 [MII_PHYID2] = PHY_ANYPAGE | PHY_R, 2244 [MII_ANAR] = PHY_ANYPAGE | PHY_RW, 2245 [MII_ANLPAR] = PHY_ANYPAGE | PHY_R, 2246 [MII_ANER] = PHY_ANYPAGE | PHY_R, 2247 [MII_ANNP] = PHY_ANYPAGE | PHY_RW, 2248 [MII_ANLPRNP] = PHY_ANYPAGE | PHY_R, 2249 [MII_CTRL1000] = PHY_ANYPAGE | PHY_RW, 2250 [MII_STAT1000] = PHY_ANYPAGE | PHY_R, 2251 [MII_EXTSTAT] = PHY_ANYPAGE | PHY_R, 2252 [PHY_PAGE] = PHY_ANYPAGE | PHY_RW, 2253 2254 [PHY_COPPER_CTRL1] = PHY_RW, 2255 [PHY_COPPER_STAT1] = PHY_R, 2256 [PHY_COPPER_CTRL3] = PHY_RW, 2257 [PHY_RX_ERR_CNTR] = PHY_R, 2258 [PHY_OEM_BITS] = PHY_RW, 2259 [PHY_BIAS_1] = PHY_RW, 2260 [PHY_BIAS_2] = PHY_RW, 2261 [PHY_COPPER_INT_ENABLE] = PHY_RW, 2262 [PHY_COPPER_STAT2] = PHY_R, 2263 [PHY_COPPER_CTRL2] = PHY_RW 2264 }, 2265 [2] = { 2266 [PHY_MAC_CTRL1] = PHY_RW, 2267 [PHY_MAC_INT_ENABLE] = PHY_RW, 2268 [PHY_MAC_STAT] = PHY_R, 2269 [PHY_MAC_CTRL2] = PHY_RW 2270 }, 2271 [3] = { 2272 [PHY_LED_03_FUNC_CTRL1] = PHY_RW, 2273 [PHY_LED_03_POL_CTRL] = PHY_RW, 2274 [PHY_LED_TIMER_CTRL] = PHY_RW, 2275 [PHY_LED_45_CTRL] = PHY_RW 2276 }, 2277 [5] = { 2278 [PHY_1000T_SKEW] = PHY_R, 2279 [PHY_1000T_SWAP] = PHY_R 2280 }, 2281 [6] = { 2282 [PHY_CRC_COUNTERS] = PHY_R 2283 } 2284 }; 2285 2286 static bool 2287 e1000e_phy_reg_check_cap(E1000ECore *core, uint32_t addr, 2288 char cap, uint8_t *page) 2289 { 2290 *page = 2291 (e1000e_phy_regcap[0][addr] & PHY_ANYPAGE) ? 0 2292 : core->phy[0][PHY_PAGE]; 2293 2294 if (*page >= E1000E_PHY_PAGES) { 2295 return false; 2296 } 2297 2298 return e1000e_phy_regcap[*page][addr] & cap; 2299 } 2300 2301 static void 2302 e1000e_phy_reg_write(E1000ECore *core, uint8_t page, 2303 uint32_t addr, uint16_t data) 2304 { 2305 assert(page < E1000E_PHY_PAGES); 2306 assert(addr < E1000E_PHY_PAGE_SIZE); 2307 2308 if (e1000e_phyreg_writeops[page][addr]) { 2309 e1000e_phyreg_writeops[page][addr](core, addr, data); 2310 } else { 2311 core->phy[page][addr] = data; 2312 } 2313 } 2314 2315 static void 2316 e1000e_set_mdic(E1000ECore *core, int index, uint32_t val) 2317 { 2318 uint32_t data = val & E1000_MDIC_DATA_MASK; 2319 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); 2320 uint8_t page; 2321 2322 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */ 2323 val = core->mac[MDIC] | E1000_MDIC_ERROR; 2324 } else if (val & E1000_MDIC_OP_READ) { 2325 if (!e1000e_phy_reg_check_cap(core, addr, PHY_R, &page)) { 2326 trace_e1000e_core_mdic_read_unhandled(page, addr); 2327 val |= E1000_MDIC_ERROR; 2328 } else { 2329 val = (val ^ data) | core->phy[page][addr]; 2330 trace_e1000e_core_mdic_read(page, addr, val); 2331 } 2332 } else if (val & E1000_MDIC_OP_WRITE) { 2333 if (!e1000e_phy_reg_check_cap(core, addr, PHY_W, &page)) { 2334 trace_e1000e_core_mdic_write_unhandled(page, addr); 2335 val |= E1000_MDIC_ERROR; 2336 } else { 2337 trace_e1000e_core_mdic_write(page, addr, data); 2338 e1000e_phy_reg_write(core, page, addr, data); 2339 } 2340 } 2341 core->mac[MDIC] = val | E1000_MDIC_READY; 2342 2343 if (val & E1000_MDIC_INT_EN) { 2344 e1000e_set_interrupt_cause(core, E1000_ICR_MDAC); 2345 } 2346 } 2347 2348 static void 2349 e1000e_set_rdt(E1000ECore *core, int index, uint32_t val) 2350 { 2351 core->mac[index] = val & 0xffff; 2352 trace_e1000e_rx_set_rdt(e1000e_mq_queue_idx(RDT0, index), val); 2353 e1000e_start_recv(core); 2354 } 2355 2356 static void 2357 e1000e_set_status(E1000ECore *core, int index, uint32_t val) 2358 { 2359 if ((val & E1000_STATUS_PHYRA) == 0) { 2360 core->mac[index] &= ~E1000_STATUS_PHYRA; 2361 } 2362 } 2363 2364 static void 2365 e1000e_set_ctrlext(E1000ECore *core, int index, uint32_t val) 2366 { 2367 trace_e1000e_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK), 2368 !!(val & E1000_CTRL_EXT_SPD_BYPS)); 2369 2370 /* Zero self-clearing bits */ 2371 val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST); 2372 core->mac[CTRL_EXT] = val; 2373 } 2374 2375 static void 2376 e1000e_set_pbaclr(E1000ECore *core, int index, uint32_t val) 2377 { 2378 int i; 2379 2380 core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK; 2381 2382 if (!msix_enabled(core->owner)) { 2383 return; 2384 } 2385 2386 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 2387 if (core->mac[PBACLR] & BIT(i)) { 2388 msix_clr_pending(core->owner, i); 2389 } 2390 } 2391 } 2392 2393 static void 2394 e1000e_set_fcrth(E1000ECore *core, int index, uint32_t val) 2395 { 2396 core->mac[FCRTH] = val & 0xFFF8; 2397 } 2398 2399 static void 2400 e1000e_set_fcrtl(E1000ECore *core, int index, uint32_t val) 2401 { 2402 core->mac[FCRTL] = val & 0x8000FFF8; 2403 } 2404 2405 #define E1000E_LOW_BITS_SET_FUNC(num) \ 2406 static void \ 2407 e1000e_set_##num##bit(E1000ECore *core, int index, uint32_t val) \ 2408 { \ 2409 core->mac[index] = val & (BIT(num) - 1); \ 2410 } 2411 2412 E1000E_LOW_BITS_SET_FUNC(4) 2413 E1000E_LOW_BITS_SET_FUNC(6) 2414 E1000E_LOW_BITS_SET_FUNC(11) 2415 E1000E_LOW_BITS_SET_FUNC(12) 2416 E1000E_LOW_BITS_SET_FUNC(13) 2417 E1000E_LOW_BITS_SET_FUNC(16) 2418 2419 static void 2420 e1000e_set_vet(E1000ECore *core, int index, uint32_t val) 2421 { 2422 core->mac[VET] = val & 0xffff; 2423 trace_e1000e_vlan_vet(core->mac[VET]); 2424 } 2425 2426 static void 2427 e1000e_set_dlen(E1000ECore *core, int index, uint32_t val) 2428 { 2429 core->mac[index] = val & E1000_XDLEN_MASK; 2430 } 2431 2432 static void 2433 e1000e_set_dbal(E1000ECore *core, int index, uint32_t val) 2434 { 2435 core->mac[index] = val & E1000_XDBAL_MASK; 2436 } 2437 2438 static void 2439 e1000e_set_tctl(E1000ECore *core, int index, uint32_t val) 2440 { 2441 E1000E_TxRing txr; 2442 core->mac[index] = val; 2443 2444 if (core->mac[TARC0] & E1000_TARC_ENABLE) { 2445 e1000e_tx_ring_init(core, &txr, 0); 2446 e1000e_start_xmit(core, &txr); 2447 } 2448 2449 if (core->mac[TARC1] & E1000_TARC_ENABLE) { 2450 e1000e_tx_ring_init(core, &txr, 1); 2451 e1000e_start_xmit(core, &txr); 2452 } 2453 } 2454 2455 static void 2456 e1000e_set_tdt(E1000ECore *core, int index, uint32_t val) 2457 { 2458 E1000E_TxRing txr; 2459 int qidx = e1000e_mq_queue_idx(TDT, index); 2460 uint32_t tarc_reg = (qidx == 0) ? TARC0 : TARC1; 2461 2462 core->mac[index] = val & 0xffff; 2463 2464 if (core->mac[tarc_reg] & E1000_TARC_ENABLE) { 2465 e1000e_tx_ring_init(core, &txr, qidx); 2466 e1000e_start_xmit(core, &txr); 2467 } 2468 } 2469 2470 static void 2471 e1000e_set_ics(E1000ECore *core, int index, uint32_t val) 2472 { 2473 trace_e1000e_irq_write_ics(val); 2474 e1000e_set_interrupt_cause(core, val); 2475 } 2476 2477 static void 2478 e1000e_set_icr(E1000ECore *core, int index, uint32_t val) 2479 { 2480 uint32_t icr = 0; 2481 if ((core->mac[ICR] & E1000_ICR_ASSERTED) && 2482 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2483 trace_e1000e_irq_icr_process_iame(); 2484 e1000e_clear_ims_bits(core, core->mac[IAM]); 2485 } 2486 2487 icr = core->mac[ICR] & ~val; 2488 /* 2489 * Windows driver expects that the "receive overrun" bit and other 2490 * ones to be cleared when the "Other" bit (#24) is cleared. 2491 */ 2492 icr = (val & E1000_ICR_OTHER) ? (icr & ~E1000_ICR_OTHER_CAUSES) : icr; 2493 trace_e1000e_irq_icr_write(val, core->mac[ICR], icr); 2494 core->mac[ICR] = icr; 2495 e1000e_update_interrupt_state(core); 2496 } 2497 2498 static void 2499 e1000e_set_imc(E1000ECore *core, int index, uint32_t val) 2500 { 2501 trace_e1000e_irq_ims_clear_set_imc(val); 2502 e1000e_clear_ims_bits(core, val); 2503 e1000e_update_interrupt_state(core); 2504 } 2505 2506 static void 2507 e1000e_set_ims(E1000ECore *core, int index, uint32_t val) 2508 { 2509 static const uint32_t ims_ext_mask = 2510 E1000_IMS_RXQ0 | E1000_IMS_RXQ1 | 2511 E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | 2512 E1000_IMS_OTHER; 2513 2514 static const uint32_t ims_valid_mask = 2515 E1000_IMS_TXDW | E1000_IMS_TXQE | E1000_IMS_LSC | 2516 E1000_IMS_RXDMT0 | E1000_IMS_RXO | E1000_IMS_RXT0 | 2517 E1000_IMS_MDAC | E1000_IMS_TXD_LOW | E1000_IMS_SRPD | 2518 E1000_IMS_ACK | E1000_IMS_MNG | E1000_IMS_RXQ0 | 2519 E1000_IMS_RXQ1 | E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | 2520 E1000_IMS_OTHER; 2521 2522 uint32_t valid_val = val & ims_valid_mask; 2523 2524 trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); 2525 core->mac[IMS] |= valid_val; 2526 2527 if ((valid_val & ims_ext_mask) && 2528 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PBA_CLR) && 2529 msix_enabled(core->owner)) { 2530 e1000e_msix_clear(core, valid_val); 2531 } 2532 2533 if ((valid_val == ims_valid_mask) && 2534 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA)) { 2535 trace_e1000e_irq_fire_all_timers(val); 2536 e1000e_intrmgr_fire_all_timers(core); 2537 } 2538 2539 e1000e_update_interrupt_state(core); 2540 } 2541 2542 static void 2543 e1000e_set_rdtr(E1000ECore *core, int index, uint32_t val) 2544 { 2545 e1000e_set_16bit(core, index, val); 2546 2547 if ((val & E1000_RDTR_FPD) && (core->rdtr.running)) { 2548 trace_e1000e_irq_rdtr_fpd_running(); 2549 e1000e_intrmgr_fire_delayed_interrupts(core); 2550 } else { 2551 trace_e1000e_irq_rdtr_fpd_not_running(); 2552 } 2553 } 2554 2555 static void 2556 e1000e_set_tidv(E1000ECore *core, int index, uint32_t val) 2557 { 2558 e1000e_set_16bit(core, index, val); 2559 2560 if ((val & E1000_TIDV_FPD) && (core->tidv.running)) { 2561 trace_e1000e_irq_tidv_fpd_running(); 2562 e1000e_intrmgr_fire_delayed_interrupts(core); 2563 } else { 2564 trace_e1000e_irq_tidv_fpd_not_running(); 2565 } 2566 } 2567 2568 static uint32_t 2569 e1000e_mac_readreg(E1000ECore *core, int index) 2570 { 2571 return core->mac[index]; 2572 } 2573 2574 static uint32_t 2575 e1000e_mac_ics_read(E1000ECore *core, int index) 2576 { 2577 trace_e1000e_irq_read_ics(core->mac[ICS]); 2578 return core->mac[ICS]; 2579 } 2580 2581 static uint32_t 2582 e1000e_mac_ims_read(E1000ECore *core, int index) 2583 { 2584 trace_e1000e_irq_read_ims(core->mac[IMS]); 2585 return core->mac[IMS]; 2586 } 2587 2588 static uint32_t 2589 e1000e_mac_swsm_read(E1000ECore *core, int index) 2590 { 2591 uint32_t val = core->mac[SWSM]; 2592 core->mac[SWSM] = val | E1000_SWSM_SMBI; 2593 return val; 2594 } 2595 2596 static uint32_t 2597 e1000e_mac_itr_read(E1000ECore *core, int index) 2598 { 2599 return core->itr_guest_value; 2600 } 2601 2602 static uint32_t 2603 e1000e_mac_eitr_read(E1000ECore *core, int index) 2604 { 2605 return core->eitr_guest_value[index - EITR]; 2606 } 2607 2608 static uint32_t 2609 e1000e_mac_icr_read(E1000ECore *core, int index) 2610 { 2611 uint32_t ret = core->mac[ICR]; 2612 trace_e1000e_irq_icr_read_entry(ret); 2613 2614 if (core->mac[IMS] == 0) { 2615 trace_e1000e_irq_icr_clear_zero_ims(); 2616 core->mac[ICR] = 0; 2617 } 2618 2619 if (!msix_enabled(core->owner)) { 2620 trace_e1000e_irq_icr_clear_nonmsix_icr_read(); 2621 core->mac[ICR] = 0; 2622 } 2623 2624 if ((core->mac[ICR] & E1000_ICR_ASSERTED) && 2625 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2626 trace_e1000e_irq_icr_clear_iame(); 2627 core->mac[ICR] = 0; 2628 trace_e1000e_irq_icr_process_iame(); 2629 e1000e_clear_ims_bits(core, core->mac[IAM]); 2630 } 2631 2632 trace_e1000e_irq_icr_read_exit(core->mac[ICR]); 2633 e1000e_update_interrupt_state(core); 2634 return ret; 2635 } 2636 2637 static uint32_t 2638 e1000e_mac_read_clr4(E1000ECore *core, int index) 2639 { 2640 uint32_t ret = core->mac[index]; 2641 2642 core->mac[index] = 0; 2643 return ret; 2644 } 2645 2646 static uint32_t 2647 e1000e_mac_read_clr8(E1000ECore *core, int index) 2648 { 2649 uint32_t ret = core->mac[index]; 2650 2651 core->mac[index] = 0; 2652 core->mac[index - 1] = 0; 2653 return ret; 2654 } 2655 2656 static uint32_t 2657 e1000e_get_ctrl(E1000ECore *core, int index) 2658 { 2659 uint32_t val = core->mac[CTRL]; 2660 2661 trace_e1000e_link_read_params( 2662 !!(val & E1000_CTRL_ASDE), 2663 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, 2664 !!(val & E1000_CTRL_FRCSPD), 2665 !!(val & E1000_CTRL_FRCDPX), 2666 !!(val & E1000_CTRL_RFCE), 2667 !!(val & E1000_CTRL_TFCE)); 2668 2669 return val; 2670 } 2671 2672 static uint32_t 2673 e1000e_get_status(E1000ECore *core, int index) 2674 { 2675 uint32_t res = core->mac[STATUS]; 2676 2677 if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) { 2678 res |= E1000_STATUS_GIO_MASTER_ENABLE; 2679 } 2680 2681 if (core->mac[CTRL] & E1000_CTRL_FRCDPX) { 2682 res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0; 2683 } else { 2684 res |= E1000_STATUS_FD; 2685 } 2686 2687 if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) || 2688 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) { 2689 switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) { 2690 case E1000_CTRL_SPD_10: 2691 res |= E1000_STATUS_SPEED_10; 2692 break; 2693 case E1000_CTRL_SPD_100: 2694 res |= E1000_STATUS_SPEED_100; 2695 break; 2696 case E1000_CTRL_SPD_1000: 2697 default: 2698 res |= E1000_STATUS_SPEED_1000; 2699 break; 2700 } 2701 } else { 2702 res |= E1000_STATUS_SPEED_1000; 2703 } 2704 2705 trace_e1000e_link_status( 2706 !!(res & E1000_STATUS_LU), 2707 !!(res & E1000_STATUS_FD), 2708 (res & E1000_STATUS_SPEED_MASK) >> E1000_STATUS_SPEED_SHIFT, 2709 (res & E1000_STATUS_ASDV) >> E1000_STATUS_ASDV_SHIFT); 2710 2711 return res; 2712 } 2713 2714 static uint32_t 2715 e1000e_get_tarc(E1000ECore *core, int index) 2716 { 2717 return core->mac[index] & ((BIT(11) - 1) | 2718 BIT(27) | 2719 BIT(28) | 2720 BIT(29) | 2721 BIT(30)); 2722 } 2723 2724 static void 2725 e1000e_mac_writereg(E1000ECore *core, int index, uint32_t val) 2726 { 2727 core->mac[index] = val; 2728 } 2729 2730 static void 2731 e1000e_mac_setmacaddr(E1000ECore *core, int index, uint32_t val) 2732 { 2733 uint32_t macaddr[2]; 2734 2735 core->mac[index] = val; 2736 2737 macaddr[0] = cpu_to_le32(core->mac[RA]); 2738 macaddr[1] = cpu_to_le32(core->mac[RA + 1]); 2739 qemu_format_nic_info_str(qemu_get_queue(core->owner_nic), 2740 (uint8_t *) macaddr); 2741 2742 trace_e1000e_mac_set_sw(MAC_ARG(macaddr)); 2743 } 2744 2745 static void 2746 e1000e_set_eecd(E1000ECore *core, int index, uint32_t val) 2747 { 2748 static const uint32_t ro_bits = E1000_EECD_PRES | 2749 E1000_EECD_AUTO_RD | 2750 E1000_EECD_SIZE_EX_MASK; 2751 2752 core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits); 2753 } 2754 2755 static void 2756 e1000e_set_eerd(E1000ECore *core, int index, uint32_t val) 2757 { 2758 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; 2759 uint32_t flags = 0; 2760 uint32_t data = 0; 2761 2762 if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { 2763 data = core->eeprom[addr]; 2764 flags = E1000_EERW_DONE; 2765 } 2766 2767 core->mac[EERD] = flags | 2768 (addr << E1000_EERW_ADDR_SHIFT) | 2769 (data << E1000_EERW_DATA_SHIFT); 2770 } 2771 2772 static void 2773 e1000e_set_eewr(E1000ECore *core, int index, uint32_t val) 2774 { 2775 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; 2776 uint32_t data = (val >> E1000_EERW_DATA_SHIFT) & E1000_EERW_DATA_MASK; 2777 uint32_t flags = 0; 2778 2779 if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { 2780 core->eeprom[addr] = data; 2781 flags = E1000_EERW_DONE; 2782 } 2783 2784 core->mac[EERD] = flags | 2785 (addr << E1000_EERW_ADDR_SHIFT) | 2786 (data << E1000_EERW_DATA_SHIFT); 2787 } 2788 2789 static void 2790 e1000e_set_rxdctl(E1000ECore *core, int index, uint32_t val) 2791 { 2792 core->mac[RXDCTL] = core->mac[RXDCTL1] = val; 2793 } 2794 2795 static void 2796 e1000e_set_itr(E1000ECore *core, int index, uint32_t val) 2797 { 2798 uint32_t interval = val & 0xffff; 2799 2800 trace_e1000e_irq_itr_set(val); 2801 2802 core->itr_guest_value = interval; 2803 core->mac[index] = MAX(interval, E1000E_MIN_XITR); 2804 } 2805 2806 static void 2807 e1000e_set_eitr(E1000ECore *core, int index, uint32_t val) 2808 { 2809 uint32_t interval = val & 0xffff; 2810 uint32_t eitr_num = index - EITR; 2811 2812 trace_e1000e_irq_eitr_set(eitr_num, val); 2813 2814 core->eitr_guest_value[eitr_num] = interval; 2815 core->mac[index] = MAX(interval, E1000E_MIN_XITR); 2816 } 2817 2818 static void 2819 e1000e_set_psrctl(E1000ECore *core, int index, uint32_t val) 2820 { 2821 if (core->mac[RCTL] & E1000_RCTL_DTYP_MASK) { 2822 2823 if ((val & E1000_PSRCTL_BSIZE0_MASK) == 0) { 2824 qemu_log_mask(LOG_GUEST_ERROR, 2825 "e1000e: PSRCTL.BSIZE0 cannot be zero"); 2826 return; 2827 } 2828 2829 if ((val & E1000_PSRCTL_BSIZE1_MASK) == 0) { 2830 qemu_log_mask(LOG_GUEST_ERROR, 2831 "e1000e: PSRCTL.BSIZE1 cannot be zero"); 2832 return; 2833 } 2834 } 2835 2836 core->mac[PSRCTL] = val; 2837 } 2838 2839 static void 2840 e1000e_update_rx_offloads(E1000ECore *core) 2841 { 2842 int cso_state = e1000e_rx_l4_cso_enabled(core); 2843 2844 trace_e1000e_rx_set_cso(cso_state); 2845 2846 if (core->has_vnet) { 2847 qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, 2848 cso_state, 0, 0, 0, 0); 2849 } 2850 } 2851 2852 static void 2853 e1000e_set_rxcsum(E1000ECore *core, int index, uint32_t val) 2854 { 2855 core->mac[RXCSUM] = val; 2856 e1000e_update_rx_offloads(core); 2857 } 2858 2859 static void 2860 e1000e_set_gcr(E1000ECore *core, int index, uint32_t val) 2861 { 2862 uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS; 2863 core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits; 2864 } 2865 2866 static uint32_t e1000e_get_systiml(E1000ECore *core, int index) 2867 { 2868 e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH); 2869 return core->mac[SYSTIML]; 2870 } 2871 2872 static uint32_t e1000e_get_rxsatrh(E1000ECore *core, int index) 2873 { 2874 core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID; 2875 return core->mac[RXSATRH]; 2876 } 2877 2878 static uint32_t e1000e_get_txstmph(E1000ECore *core, int index) 2879 { 2880 core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID; 2881 return core->mac[TXSTMPH]; 2882 } 2883 2884 static void e1000e_set_timinca(E1000ECore *core, int index, uint32_t val) 2885 { 2886 e1000x_set_timinca(core->mac, &core->timadj, val); 2887 } 2888 2889 static void e1000e_set_timadjh(E1000ECore *core, int index, uint32_t val) 2890 { 2891 core->mac[TIMADJH] = val; 2892 core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32); 2893 } 2894 2895 #define e1000e_getreg(x) [x] = e1000e_mac_readreg 2896 typedef uint32_t (*readops)(E1000ECore *, int); 2897 static const readops e1000e_macreg_readops[] = { 2898 e1000e_getreg(PBA), 2899 e1000e_getreg(WUFC), 2900 e1000e_getreg(MANC), 2901 e1000e_getreg(TOTL), 2902 e1000e_getreg(RDT0), 2903 e1000e_getreg(RDBAH0), 2904 e1000e_getreg(TDBAL1), 2905 e1000e_getreg(RDLEN0), 2906 e1000e_getreg(RDH1), 2907 e1000e_getreg(LATECOL), 2908 e1000e_getreg(SEQEC), 2909 e1000e_getreg(XONTXC), 2910 e1000e_getreg(AIT), 2911 e1000e_getreg(TDFH), 2912 e1000e_getreg(TDFT), 2913 e1000e_getreg(TDFHS), 2914 e1000e_getreg(TDFTS), 2915 e1000e_getreg(TDFPC), 2916 e1000e_getreg(WUS), 2917 e1000e_getreg(PBS), 2918 e1000e_getreg(RDFH), 2919 e1000e_getreg(RDFT), 2920 e1000e_getreg(RDFHS), 2921 e1000e_getreg(RDFTS), 2922 e1000e_getreg(RDFPC), 2923 e1000e_getreg(GORCL), 2924 e1000e_getreg(MGTPRC), 2925 e1000e_getreg(EERD), 2926 e1000e_getreg(EIAC), 2927 e1000e_getreg(PSRCTL), 2928 e1000e_getreg(MANC2H), 2929 e1000e_getreg(RXCSUM), 2930 e1000e_getreg(GSCL_3), 2931 e1000e_getreg(GSCN_2), 2932 e1000e_getreg(RSRPD), 2933 e1000e_getreg(RDBAL1), 2934 e1000e_getreg(FCAH), 2935 e1000e_getreg(FCRTH), 2936 e1000e_getreg(FLOP), 2937 e1000e_getreg(FLASHT), 2938 e1000e_getreg(RXSTMPH), 2939 e1000e_getreg(TXSTMPL), 2940 e1000e_getreg(TIMADJL), 2941 e1000e_getreg(TXDCTL), 2942 e1000e_getreg(RDH0), 2943 e1000e_getreg(TDT1), 2944 e1000e_getreg(TNCRS), 2945 e1000e_getreg(RJC), 2946 e1000e_getreg(IAM), 2947 e1000e_getreg(GSCL_2), 2948 e1000e_getreg(RDBAH1), 2949 e1000e_getreg(FLSWDATA), 2950 e1000e_getreg(TIPG), 2951 e1000e_getreg(FLMNGCTL), 2952 e1000e_getreg(FLMNGCNT), 2953 e1000e_getreg(TSYNCTXCTL), 2954 e1000e_getreg(EXTCNF_SIZE), 2955 e1000e_getreg(EXTCNF_CTRL), 2956 e1000e_getreg(EEMNGDATA), 2957 e1000e_getreg(CTRL_EXT), 2958 e1000e_getreg(SYSTIMH), 2959 e1000e_getreg(EEMNGCTL), 2960 e1000e_getreg(FLMNGDATA), 2961 e1000e_getreg(TSYNCRXCTL), 2962 e1000e_getreg(TDH), 2963 e1000e_getreg(LEDCTL), 2964 e1000e_getreg(TCTL), 2965 e1000e_getreg(TDBAL), 2966 e1000e_getreg(TDLEN), 2967 e1000e_getreg(TDH1), 2968 e1000e_getreg(RADV), 2969 e1000e_getreg(ECOL), 2970 e1000e_getreg(DC), 2971 e1000e_getreg(RLEC), 2972 e1000e_getreg(XOFFTXC), 2973 e1000e_getreg(RFC), 2974 e1000e_getreg(RNBC), 2975 e1000e_getreg(MGTPTC), 2976 e1000e_getreg(TIMINCA), 2977 e1000e_getreg(RXCFGL), 2978 e1000e_getreg(MFUTP01), 2979 e1000e_getreg(FACTPS), 2980 e1000e_getreg(GSCL_1), 2981 e1000e_getreg(GSCN_0), 2982 e1000e_getreg(GCR2), 2983 e1000e_getreg(RDT1), 2984 e1000e_getreg(PBACLR), 2985 e1000e_getreg(FCTTV), 2986 e1000e_getreg(EEWR), 2987 e1000e_getreg(FLSWCTL), 2988 e1000e_getreg(RXDCTL1), 2989 e1000e_getreg(RXSATRL), 2990 e1000e_getreg(RXUDP), 2991 e1000e_getreg(TORL), 2992 e1000e_getreg(TDLEN1), 2993 e1000e_getreg(MCC), 2994 e1000e_getreg(WUC), 2995 e1000e_getreg(EECD), 2996 e1000e_getreg(MFUTP23), 2997 e1000e_getreg(RAID), 2998 e1000e_getreg(FCRTV), 2999 e1000e_getreg(TXDCTL1), 3000 e1000e_getreg(RCTL), 3001 e1000e_getreg(TDT), 3002 e1000e_getreg(MDIC), 3003 e1000e_getreg(FCRUC), 3004 e1000e_getreg(VET), 3005 e1000e_getreg(RDBAL0), 3006 e1000e_getreg(TDBAH1), 3007 e1000e_getreg(RDTR), 3008 e1000e_getreg(SCC), 3009 e1000e_getreg(COLC), 3010 e1000e_getreg(CEXTERR), 3011 e1000e_getreg(XOFFRXC), 3012 e1000e_getreg(IPAV), 3013 e1000e_getreg(GOTCL), 3014 e1000e_getreg(MGTPDC), 3015 e1000e_getreg(GCR), 3016 e1000e_getreg(IVAR), 3017 e1000e_getreg(POEMB), 3018 e1000e_getreg(MFVAL), 3019 e1000e_getreg(FUNCTAG), 3020 e1000e_getreg(GSCL_4), 3021 e1000e_getreg(GSCN_3), 3022 e1000e_getreg(MRQC), 3023 e1000e_getreg(RDLEN1), 3024 e1000e_getreg(FCT), 3025 e1000e_getreg(FLA), 3026 e1000e_getreg(FLOL), 3027 e1000e_getreg(RXDCTL), 3028 e1000e_getreg(RXSTMPL), 3029 e1000e_getreg(TIMADJH), 3030 e1000e_getreg(FCRTL), 3031 e1000e_getreg(TDBAH), 3032 e1000e_getreg(TADV), 3033 e1000e_getreg(XONRXC), 3034 e1000e_getreg(TSCTFC), 3035 e1000e_getreg(RFCTL), 3036 e1000e_getreg(GSCN_1), 3037 e1000e_getreg(FCAL), 3038 e1000e_getreg(FLSWCNT), 3039 3040 [TOTH] = e1000e_mac_read_clr8, 3041 [GOTCH] = e1000e_mac_read_clr8, 3042 [PRC64] = e1000e_mac_read_clr4, 3043 [PRC255] = e1000e_mac_read_clr4, 3044 [PRC1023] = e1000e_mac_read_clr4, 3045 [PTC64] = e1000e_mac_read_clr4, 3046 [PTC255] = e1000e_mac_read_clr4, 3047 [PTC1023] = e1000e_mac_read_clr4, 3048 [GPRC] = e1000e_mac_read_clr4, 3049 [TPT] = e1000e_mac_read_clr4, 3050 [RUC] = e1000e_mac_read_clr4, 3051 [BPRC] = e1000e_mac_read_clr4, 3052 [MPTC] = e1000e_mac_read_clr4, 3053 [IAC] = e1000e_mac_read_clr4, 3054 [ICR] = e1000e_mac_icr_read, 3055 [STATUS] = e1000e_get_status, 3056 [TARC0] = e1000e_get_tarc, 3057 [ICS] = e1000e_mac_ics_read, 3058 [TORH] = e1000e_mac_read_clr8, 3059 [GORCH] = e1000e_mac_read_clr8, 3060 [PRC127] = e1000e_mac_read_clr4, 3061 [PRC511] = e1000e_mac_read_clr4, 3062 [PRC1522] = e1000e_mac_read_clr4, 3063 [PTC127] = e1000e_mac_read_clr4, 3064 [PTC511] = e1000e_mac_read_clr4, 3065 [PTC1522] = e1000e_mac_read_clr4, 3066 [GPTC] = e1000e_mac_read_clr4, 3067 [TPR] = e1000e_mac_read_clr4, 3068 [ROC] = e1000e_mac_read_clr4, 3069 [MPRC] = e1000e_mac_read_clr4, 3070 [BPTC] = e1000e_mac_read_clr4, 3071 [TSCTC] = e1000e_mac_read_clr4, 3072 [ITR] = e1000e_mac_itr_read, 3073 [CTRL] = e1000e_get_ctrl, 3074 [TARC1] = e1000e_get_tarc, 3075 [SWSM] = e1000e_mac_swsm_read, 3076 [IMS] = e1000e_mac_ims_read, 3077 [SYSTIML] = e1000e_get_systiml, 3078 [RXSATRH] = e1000e_get_rxsatrh, 3079 [TXSTMPH] = e1000e_get_txstmph, 3080 3081 [CRCERRS ... MPC] = e1000e_mac_readreg, 3082 [IP6AT ... IP6AT + 3] = e1000e_mac_readreg, 3083 [IP4AT ... IP4AT + 6] = e1000e_mac_readreg, 3084 [RA ... RA + 31] = e1000e_mac_readreg, 3085 [WUPM ... WUPM + 31] = e1000e_mac_readreg, 3086 [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = e1000e_mac_readreg, 3087 [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = e1000e_mac_readreg, 3088 [FFMT ... FFMT + 254] = e1000e_mac_readreg, 3089 [FFVT ... FFVT + 254] = e1000e_mac_readreg, 3090 [MDEF ... MDEF + 7] = e1000e_mac_readreg, 3091 [FFLT ... FFLT + 10] = e1000e_mac_readreg, 3092 [FTFT ... FTFT + 254] = e1000e_mac_readreg, 3093 [PBM ... PBM + 10239] = e1000e_mac_readreg, 3094 [RETA ... RETA + 31] = e1000e_mac_readreg, 3095 [RSSRK ... RSSRK + 31] = e1000e_mac_readreg, 3096 [MAVTV0 ... MAVTV3] = e1000e_mac_readreg, 3097 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_mac_eitr_read 3098 }; 3099 enum { E1000E_NREADOPS = ARRAY_SIZE(e1000e_macreg_readops) }; 3100 3101 #define e1000e_putreg(x) [x] = e1000e_mac_writereg 3102 typedef void (*writeops)(E1000ECore *, int, uint32_t); 3103 static const writeops e1000e_macreg_writeops[] = { 3104 e1000e_putreg(PBA), 3105 e1000e_putreg(SWSM), 3106 e1000e_putreg(WUFC), 3107 e1000e_putreg(RDBAH1), 3108 e1000e_putreg(TDBAH), 3109 e1000e_putreg(TXDCTL), 3110 e1000e_putreg(RDBAH0), 3111 e1000e_putreg(LEDCTL), 3112 e1000e_putreg(FCAL), 3113 e1000e_putreg(FCRUC), 3114 e1000e_putreg(WUC), 3115 e1000e_putreg(WUS), 3116 e1000e_putreg(IPAV), 3117 e1000e_putreg(TDBAH1), 3118 e1000e_putreg(IAM), 3119 e1000e_putreg(EIAC), 3120 e1000e_putreg(IVAR), 3121 e1000e_putreg(TARC0), 3122 e1000e_putreg(TARC1), 3123 e1000e_putreg(FLSWDATA), 3124 e1000e_putreg(POEMB), 3125 e1000e_putreg(MFUTP01), 3126 e1000e_putreg(MFUTP23), 3127 e1000e_putreg(MANC), 3128 e1000e_putreg(MANC2H), 3129 e1000e_putreg(MFVAL), 3130 e1000e_putreg(EXTCNF_CTRL), 3131 e1000e_putreg(FACTPS), 3132 e1000e_putreg(FUNCTAG), 3133 e1000e_putreg(GSCL_1), 3134 e1000e_putreg(GSCL_2), 3135 e1000e_putreg(GSCL_3), 3136 e1000e_putreg(GSCL_4), 3137 e1000e_putreg(GSCN_0), 3138 e1000e_putreg(GSCN_1), 3139 e1000e_putreg(GSCN_2), 3140 e1000e_putreg(GSCN_3), 3141 e1000e_putreg(GCR2), 3142 e1000e_putreg(MRQC), 3143 e1000e_putreg(FLOP), 3144 e1000e_putreg(FLOL), 3145 e1000e_putreg(FLSWCTL), 3146 e1000e_putreg(FLSWCNT), 3147 e1000e_putreg(FLA), 3148 e1000e_putreg(RXDCTL1), 3149 e1000e_putreg(TXDCTL1), 3150 e1000e_putreg(TIPG), 3151 e1000e_putreg(RXSTMPH), 3152 e1000e_putreg(RXSTMPL), 3153 e1000e_putreg(RXSATRL), 3154 e1000e_putreg(RXSATRH), 3155 e1000e_putreg(TXSTMPL), 3156 e1000e_putreg(TXSTMPH), 3157 e1000e_putreg(SYSTIML), 3158 e1000e_putreg(SYSTIMH), 3159 e1000e_putreg(TIMADJL), 3160 e1000e_putreg(RXUDP), 3161 e1000e_putreg(RXCFGL), 3162 e1000e_putreg(TSYNCRXCTL), 3163 e1000e_putreg(TSYNCTXCTL), 3164 e1000e_putreg(EXTCNF_SIZE), 3165 e1000e_putreg(EEMNGCTL), 3166 e1000e_putreg(RA), 3167 3168 [TDH1] = e1000e_set_16bit, 3169 [TDT1] = e1000e_set_tdt, 3170 [TCTL] = e1000e_set_tctl, 3171 [TDT] = e1000e_set_tdt, 3172 [MDIC] = e1000e_set_mdic, 3173 [ICS] = e1000e_set_ics, 3174 [TDH] = e1000e_set_16bit, 3175 [RDH0] = e1000e_set_16bit, 3176 [RDT0] = e1000e_set_rdt, 3177 [IMC] = e1000e_set_imc, 3178 [IMS] = e1000e_set_ims, 3179 [ICR] = e1000e_set_icr, 3180 [EECD] = e1000e_set_eecd, 3181 [RCTL] = e1000e_set_rx_control, 3182 [CTRL] = e1000e_set_ctrl, 3183 [RDTR] = e1000e_set_rdtr, 3184 [RADV] = e1000e_set_16bit, 3185 [TADV] = e1000e_set_16bit, 3186 [ITR] = e1000e_set_itr, 3187 [EERD] = e1000e_set_eerd, 3188 [AIT] = e1000e_set_16bit, 3189 [TDFH] = e1000e_set_13bit, 3190 [TDFT] = e1000e_set_13bit, 3191 [TDFHS] = e1000e_set_13bit, 3192 [TDFTS] = e1000e_set_13bit, 3193 [TDFPC] = e1000e_set_13bit, 3194 [RDFH] = e1000e_set_13bit, 3195 [RDFHS] = e1000e_set_13bit, 3196 [RDFT] = e1000e_set_13bit, 3197 [RDFTS] = e1000e_set_13bit, 3198 [RDFPC] = e1000e_set_13bit, 3199 [PBS] = e1000e_set_6bit, 3200 [GCR] = e1000e_set_gcr, 3201 [PSRCTL] = e1000e_set_psrctl, 3202 [RXCSUM] = e1000e_set_rxcsum, 3203 [RAID] = e1000e_set_16bit, 3204 [RSRPD] = e1000e_set_12bit, 3205 [TIDV] = e1000e_set_tidv, 3206 [TDLEN1] = e1000e_set_dlen, 3207 [TDLEN] = e1000e_set_dlen, 3208 [RDLEN0] = e1000e_set_dlen, 3209 [RDLEN1] = e1000e_set_dlen, 3210 [TDBAL] = e1000e_set_dbal, 3211 [TDBAL1] = e1000e_set_dbal, 3212 [RDBAL0] = e1000e_set_dbal, 3213 [RDBAL1] = e1000e_set_dbal, 3214 [RDH1] = e1000e_set_16bit, 3215 [RDT1] = e1000e_set_rdt, 3216 [STATUS] = e1000e_set_status, 3217 [PBACLR] = e1000e_set_pbaclr, 3218 [CTRL_EXT] = e1000e_set_ctrlext, 3219 [FCAH] = e1000e_set_16bit, 3220 [FCT] = e1000e_set_16bit, 3221 [FCTTV] = e1000e_set_16bit, 3222 [FCRTV] = e1000e_set_16bit, 3223 [FCRTH] = e1000e_set_fcrth, 3224 [FCRTL] = e1000e_set_fcrtl, 3225 [VET] = e1000e_set_vet, 3226 [RXDCTL] = e1000e_set_rxdctl, 3227 [FLASHT] = e1000e_set_16bit, 3228 [EEWR] = e1000e_set_eewr, 3229 [CTRL_DUP] = e1000e_set_ctrl, 3230 [RFCTL] = e1000e_set_rfctl, 3231 [RA + 1] = e1000e_mac_setmacaddr, 3232 [TIMINCA] = e1000e_set_timinca, 3233 [TIMADJH] = e1000e_set_timadjh, 3234 3235 [IP6AT ... IP6AT + 3] = e1000e_mac_writereg, 3236 [IP4AT ... IP4AT + 6] = e1000e_mac_writereg, 3237 [RA + 2 ... RA + 31] = e1000e_mac_writereg, 3238 [WUPM ... WUPM + 31] = e1000e_mac_writereg, 3239 [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = e1000e_mac_writereg, 3240 [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = e1000e_mac_writereg, 3241 [FFMT ... FFMT + 254] = e1000e_set_4bit, 3242 [FFVT ... FFVT + 254] = e1000e_mac_writereg, 3243 [PBM ... PBM + 10239] = e1000e_mac_writereg, 3244 [MDEF ... MDEF + 7] = e1000e_mac_writereg, 3245 [FFLT ... FFLT + 10] = e1000e_set_11bit, 3246 [FTFT ... FTFT + 254] = e1000e_mac_writereg, 3247 [RETA ... RETA + 31] = e1000e_mac_writereg, 3248 [RSSRK ... RSSRK + 31] = e1000e_mac_writereg, 3249 [MAVTV0 ... MAVTV3] = e1000e_mac_writereg, 3250 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_set_eitr 3251 }; 3252 enum { E1000E_NWRITEOPS = ARRAY_SIZE(e1000e_macreg_writeops) }; 3253 3254 enum { MAC_ACCESS_PARTIAL = 1 }; 3255 3256 /* 3257 * The array below combines alias offsets of the index values for the 3258 * MAC registers that have aliases, with the indication of not fully 3259 * implemented registers (lowest bit). This combination is possible 3260 * because all of the offsets are even. 3261 */ 3262 static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = { 3263 /* Alias index offsets */ 3264 [FCRTL_A] = 0x07fe, [FCRTH_A] = 0x0802, 3265 [RDH0_A] = 0x09bc, [RDT0_A] = 0x09bc, [RDTR_A] = 0x09c6, 3266 [RDFH_A] = 0xe904, [RDFT_A] = 0xe904, 3267 [TDH_A] = 0x0cf8, [TDT_A] = 0x0cf8, [TIDV_A] = 0x0cf8, 3268 [TDFH_A] = 0xed00, [TDFT_A] = 0xed00, 3269 [RA_A ... RA_A + 31] = 0x14f0, 3270 [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400, 3271 [RDBAL0_A ... RDLEN0_A] = 0x09bc, 3272 [TDBAL_A ... TDLEN_A] = 0x0cf8, 3273 /* Access options */ 3274 [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL, 3275 [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL, 3276 [RDFPC] = MAC_ACCESS_PARTIAL, 3277 [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL, 3278 [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL, 3279 [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL, 3280 [PBM] = MAC_ACCESS_PARTIAL, [FLA] = MAC_ACCESS_PARTIAL, 3281 [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL, 3282 [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL, 3283 [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL, 3284 [FCRTH] = MAC_ACCESS_PARTIAL, [TXDCTL] = MAC_ACCESS_PARTIAL, 3285 [TXDCTL1] = MAC_ACCESS_PARTIAL, 3286 [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL 3287 }; 3288 3289 void 3290 e1000e_core_write(E1000ECore *core, hwaddr addr, uint64_t val, unsigned size) 3291 { 3292 uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); 3293 3294 if (index < E1000E_NWRITEOPS && e1000e_macreg_writeops[index]) { 3295 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { 3296 trace_e1000e_wrn_regs_write_trivial(index << 2); 3297 } 3298 trace_e1000e_core_write(index << 2, size, val); 3299 e1000e_macreg_writeops[index](core, index, val); 3300 } else if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { 3301 trace_e1000e_wrn_regs_write_ro(index << 2, size, val); 3302 } else { 3303 trace_e1000e_wrn_regs_write_unknown(index << 2, size, val); 3304 } 3305 } 3306 3307 uint64_t 3308 e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size) 3309 { 3310 uint64_t val; 3311 uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); 3312 3313 if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { 3314 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { 3315 trace_e1000e_wrn_regs_read_trivial(index << 2); 3316 } 3317 val = e1000e_macreg_readops[index](core, index); 3318 trace_e1000e_core_read(index << 2, size, val); 3319 return val; 3320 } else { 3321 trace_e1000e_wrn_regs_read_unknown(index << 2, size); 3322 } 3323 return 0; 3324 } 3325 3326 static inline void 3327 e1000e_autoneg_pause(E1000ECore *core) 3328 { 3329 timer_del(core->autoneg_timer); 3330 } 3331 3332 static void 3333 e1000e_autoneg_resume(E1000ECore *core) 3334 { 3335 if (e1000e_have_autoneg(core) && 3336 !(core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP)) { 3337 qemu_get_queue(core->owner_nic)->link_down = false; 3338 timer_mod(core->autoneg_timer, 3339 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); 3340 } 3341 } 3342 3343 static void 3344 e1000e_vm_state_change(void *opaque, bool running, RunState state) 3345 { 3346 E1000ECore *core = opaque; 3347 3348 if (running) { 3349 trace_e1000e_vm_state_running(); 3350 e1000e_intrmgr_resume(core); 3351 e1000e_autoneg_resume(core); 3352 } else { 3353 trace_e1000e_vm_state_stopped(); 3354 e1000e_autoneg_pause(core); 3355 e1000e_intrmgr_pause(core); 3356 } 3357 } 3358 3359 void 3360 e1000e_core_pci_realize(E1000ECore *core, 3361 const uint16_t *eeprom_templ, 3362 uint32_t eeprom_size, 3363 const uint8_t *macaddr) 3364 { 3365 int i; 3366 3367 core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 3368 e1000e_autoneg_timer, core); 3369 e1000e_intrmgr_pci_realize(core); 3370 3371 core->vmstate = 3372 qemu_add_vm_change_state_handler(e1000e_vm_state_change, core); 3373 3374 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 3375 net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS); 3376 } 3377 3378 net_rx_pkt_init(&core->rx_pkt); 3379 3380 e1000x_core_prepare_eeprom(core->eeprom, 3381 eeprom_templ, 3382 eeprom_size, 3383 PCI_DEVICE_GET_CLASS(core->owner)->device_id, 3384 macaddr); 3385 e1000e_update_rx_offloads(core); 3386 } 3387 3388 void 3389 e1000e_core_pci_uninit(E1000ECore *core) 3390 { 3391 int i; 3392 3393 timer_free(core->autoneg_timer); 3394 3395 e1000e_intrmgr_pci_unint(core); 3396 3397 qemu_del_vm_change_state_handler(core->vmstate); 3398 3399 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 3400 net_tx_pkt_uninit(core->tx[i].tx_pkt); 3401 } 3402 3403 net_rx_pkt_uninit(core->rx_pkt); 3404 } 3405 3406 static const uint16_t 3407 e1000e_phy_reg_init[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE] = { 3408 [0] = { 3409 [MII_BMCR] = MII_BMCR_SPEED1000 | 3410 MII_BMCR_FD | 3411 MII_BMCR_AUTOEN, 3412 3413 [MII_BMSR] = MII_BMSR_EXTCAP | 3414 MII_BMSR_LINK_ST | 3415 MII_BMSR_AUTONEG | 3416 MII_BMSR_MFPS | 3417 MII_BMSR_EXTSTAT | 3418 MII_BMSR_10T_HD | 3419 MII_BMSR_10T_FD | 3420 MII_BMSR_100TX_HD | 3421 MII_BMSR_100TX_FD, 3422 3423 [MII_PHYID1] = 0x141, 3424 [MII_PHYID2] = E1000_PHY_ID2_82574x, 3425 [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 | 3426 MII_ANAR_10FD | MII_ANAR_TX | 3427 MII_ANAR_TXFD | MII_ANAR_PAUSE | 3428 MII_ANAR_PAUSE_ASYM, 3429 [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD | 3430 MII_ANLPAR_TX | MII_ANLPAR_TXFD | 3431 MII_ANLPAR_T4 | MII_ANLPAR_PAUSE, 3432 [MII_ANER] = MII_ANER_NP | MII_ANER_NWAY, 3433 [MII_ANNP] = 1 | MII_ANNP_MP, 3434 [MII_CTRL1000] = MII_CTRL1000_HALF | MII_CTRL1000_FULL | 3435 MII_CTRL1000_PORT | MII_CTRL1000_MASTER, 3436 [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL | 3437 MII_STAT1000_ROK | MII_STAT1000_LOK, 3438 [MII_EXTSTAT] = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD, 3439 3440 [PHY_COPPER_CTRL1] = BIT(5) | BIT(6) | BIT(8) | BIT(9) | 3441 BIT(12) | BIT(13), 3442 [PHY_COPPER_STAT1] = BIT(3) | BIT(10) | BIT(11) | BIT(13) | BIT(15) 3443 }, 3444 [2] = { 3445 [PHY_MAC_CTRL1] = BIT(3) | BIT(7), 3446 [PHY_MAC_CTRL2] = BIT(1) | BIT(2) | BIT(6) | BIT(12) 3447 }, 3448 [3] = { 3449 [PHY_LED_TIMER_CTRL] = BIT(0) | BIT(2) | BIT(14) 3450 } 3451 }; 3452 3453 static const uint32_t e1000e_mac_reg_init[] = { 3454 [PBA] = 0x00140014, 3455 [LEDCTL] = BIT(1) | BIT(8) | BIT(9) | BIT(15) | BIT(17) | BIT(18), 3456 [EXTCNF_CTRL] = BIT(3), 3457 [EEMNGCTL] = BIT(31), 3458 [FLASHT] = 0x2, 3459 [FLSWCTL] = BIT(30) | BIT(31), 3460 [FLOL] = BIT(0), 3461 [RXDCTL] = BIT(16), 3462 [RXDCTL1] = BIT(16), 3463 [TIPG] = 0x8 | (0x8 << 10) | (0x6 << 20), 3464 [RXCFGL] = 0x88F7, 3465 [RXUDP] = 0x319, 3466 [CTRL] = E1000_CTRL_FD | E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 | 3467 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU | 3468 E1000_CTRL_ADVD3WUC, 3469 [STATUS] = E1000_STATUS_ASDV_1000 | E1000_STATUS_LU, 3470 [PSRCTL] = (2 << E1000_PSRCTL_BSIZE0_SHIFT) | 3471 (4 << E1000_PSRCTL_BSIZE1_SHIFT) | 3472 (4 << E1000_PSRCTL_BSIZE2_SHIFT), 3473 [TARC0] = 0x3 | E1000_TARC_ENABLE, 3474 [TARC1] = 0x3 | E1000_TARC_ENABLE, 3475 [EECD] = E1000_EECD_AUTO_RD | E1000_EECD_PRES, 3476 [EERD] = E1000_EERW_DONE, 3477 [EEWR] = E1000_EERW_DONE, 3478 [GCR] = E1000_L0S_ADJUST | 3479 E1000_L1_ENTRY_LATENCY_MSB | 3480 E1000_L1_ENTRY_LATENCY_LSB, 3481 [TDFH] = 0x600, 3482 [TDFT] = 0x600, 3483 [TDFHS] = 0x600, 3484 [TDFTS] = 0x600, 3485 [POEMB] = 0x30D, 3486 [PBS] = 0x028, 3487 [MANC] = E1000_MANC_DIS_IP_CHK_ARP, 3488 [FACTPS] = E1000_FACTPS_LAN0_ON | 0x20000000, 3489 [SWSM] = 1, 3490 [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD, 3491 [ITR] = E1000E_MIN_XITR, 3492 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = E1000E_MIN_XITR, 3493 }; 3494 3495 static void e1000e_reset(E1000ECore *core, bool sw) 3496 { 3497 int i; 3498 3499 timer_del(core->autoneg_timer); 3500 3501 e1000e_intrmgr_reset(core); 3502 3503 memset(core->phy, 0, sizeof core->phy); 3504 memcpy(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init); 3505 3506 for (i = 0; i < E1000E_MAC_SIZE; i++) { 3507 if (sw && (i == PBA || i == PBS || i == FLA)) { 3508 continue; 3509 } 3510 3511 core->mac[i] = i < ARRAY_SIZE(e1000e_mac_reg_init) ? 3512 e1000e_mac_reg_init[i] : 0; 3513 } 3514 3515 core->rxbuf_min_shift = 1 + E1000_RING_DESC_LEN_SHIFT; 3516 3517 if (qemu_get_queue(core->owner_nic)->link_down) { 3518 e1000e_link_down(core); 3519 } 3520 3521 e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); 3522 3523 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { 3524 memset(&core->tx[i].props, 0, sizeof(core->tx[i].props)); 3525 core->tx[i].skip_cp = false; 3526 } 3527 } 3528 3529 void 3530 e1000e_core_reset(E1000ECore *core) 3531 { 3532 e1000e_reset(core, false); 3533 } 3534 3535 void e1000e_core_pre_save(E1000ECore *core) 3536 { 3537 int i; 3538 NetClientState *nc = qemu_get_queue(core->owner_nic); 3539 3540 /* 3541 * If link is down and auto-negotiation is supported and ongoing, 3542 * complete auto-negotiation immediately. This allows us to look 3543 * at MII_BMSR_AN_COMP to infer link status on load. 3544 */ 3545 if (nc->link_down && e1000e_have_autoneg(core)) { 3546 core->phy[0][MII_BMSR] |= MII_BMSR_AN_COMP; 3547 e1000e_update_flowctl_status(core); 3548 } 3549 3550 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { 3551 if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) { 3552 core->tx[i].skip_cp = true; 3553 } 3554 } 3555 } 3556 3557 int 3558 e1000e_core_post_load(E1000ECore *core) 3559 { 3560 NetClientState *nc = qemu_get_queue(core->owner_nic); 3561 3562 /* 3563 * nc.link_down can't be migrated, so infer link_down according 3564 * to link status bit in core.mac[STATUS]. 3565 */ 3566 nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; 3567 3568 return 0; 3569 } 3570