1 /* 2 * Core code for QEMU e1000e emulation 3 * 4 * Software developer's manuals: 5 * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf 6 * 7 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) 8 * Developed by Daynix Computing LTD (http://www.daynix.com) 9 * 10 * Authors: 11 * Dmitry Fleytman <dmitry@daynix.com> 12 * Leonid Bloch <leonid@daynix.com> 13 * Yan Vugenfirer <yan@daynix.com> 14 * 15 * Based on work done by: 16 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. 17 * Copyright (c) 2008 Qumranet 18 * Based on work done by: 19 * Copyright (c) 2007 Dan Aloni 20 * Copyright (c) 2004 Antony T Curtis 21 * 22 * This library is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU Lesser General Public 24 * License as published by the Free Software Foundation; either 25 * version 2.1 of the License, or (at your option) any later version. 26 * 27 * This library is distributed in the hope that it will be useful, 28 * but WITHOUT ANY WARRANTY; without even the implied warranty of 29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 30 * Lesser General Public License for more details. 31 * 32 * You should have received a copy of the GNU Lesser General Public 33 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 34 */ 35 36 #include "qemu/osdep.h" 37 #include "qemu/log.h" 38 #include "net/net.h" 39 #include "net/tap.h" 40 #include "hw/pci/msi.h" 41 #include "hw/pci/msix.h" 42 #include "sysemu/runstate.h" 43 44 #include "net_tx_pkt.h" 45 #include "net_rx_pkt.h" 46 47 #include "e1000x_common.h" 48 #include "e1000e_core.h" 49 50 #include "trace.h" 51 52 /* No more then 7813 interrupts per second according to spec 10.2.4.2 */ 53 #define E1000E_MIN_XITR (500) 54 55 #define E1000E_MAX_TX_FRAGS (64) 56 57 static inline void 58 e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val); 59 60 static inline void 61 e1000e_process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp) 62 { 63 if (le32_to_cpu(dp->upper.data) & E1000_TXD_EXTCMD_TSTAMP) { 64 trace_e1000e_wrn_no_ts_support(); 65 } 66 } 67 68 static inline void 69 e1000e_process_snap_option(E1000ECore *core, uint32_t cmd_and_length) 70 { 71 if (cmd_and_length & E1000_TXD_CMD_SNAP) { 72 trace_e1000e_wrn_no_snap_support(); 73 } 74 } 75 76 static inline void 77 e1000e_raise_legacy_irq(E1000ECore *core) 78 { 79 trace_e1000e_irq_legacy_notify(true); 80 e1000x_inc_reg_if_not_full(core->mac, IAC); 81 pci_set_irq(core->owner, 1); 82 } 83 84 static inline void 85 e1000e_lower_legacy_irq(E1000ECore *core) 86 { 87 trace_e1000e_irq_legacy_notify(false); 88 pci_set_irq(core->owner, 0); 89 } 90 91 static inline void 92 e1000e_intrmgr_rearm_timer(E1000IntrDelayTimer *timer) 93 { 94 int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] * 95 timer->delay_resolution_ns; 96 97 trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns); 98 99 timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns); 100 101 timer->running = true; 102 } 103 104 static void 105 e1000e_intmgr_timer_resume(E1000IntrDelayTimer *timer) 106 { 107 if (timer->running) { 108 e1000e_intrmgr_rearm_timer(timer); 109 } 110 } 111 112 static void 113 e1000e_intmgr_timer_pause(E1000IntrDelayTimer *timer) 114 { 115 if (timer->running) { 116 timer_del(timer->timer); 117 } 118 } 119 120 static inline void 121 e1000e_intrmgr_stop_timer(E1000IntrDelayTimer *timer) 122 { 123 if (timer->running) { 124 timer_del(timer->timer); 125 timer->running = false; 126 } 127 } 128 129 static inline void 130 e1000e_intrmgr_fire_delayed_interrupts(E1000ECore *core) 131 { 132 trace_e1000e_irq_fire_delayed_interrupts(); 133 e1000e_set_interrupt_cause(core, 0); 134 } 135 136 static void 137 e1000e_intrmgr_on_timer(void *opaque) 138 { 139 E1000IntrDelayTimer *timer = opaque; 140 141 trace_e1000e_irq_throttling_timer(timer->delay_reg << 2); 142 143 timer->running = false; 144 e1000e_intrmgr_fire_delayed_interrupts(timer->core); 145 } 146 147 static void 148 e1000e_intrmgr_on_throttling_timer(void *opaque) 149 { 150 E1000IntrDelayTimer *timer = opaque; 151 152 assert(!msix_enabled(timer->core->owner)); 153 154 timer->running = false; 155 156 if (!timer->core->itr_intr_pending) { 157 trace_e1000e_irq_throttling_no_pending_interrupts(); 158 return; 159 } 160 161 if (msi_enabled(timer->core->owner)) { 162 trace_e1000e_irq_msi_notify_postponed(); 163 /* Clear msi_causes_pending to fire MSI eventually */ 164 timer->core->msi_causes_pending = 0; 165 e1000e_set_interrupt_cause(timer->core, 0); 166 } else { 167 trace_e1000e_irq_legacy_notify_postponed(); 168 e1000e_set_interrupt_cause(timer->core, 0); 169 } 170 } 171 172 static void 173 e1000e_intrmgr_on_msix_throttling_timer(void *opaque) 174 { 175 E1000IntrDelayTimer *timer = opaque; 176 int idx = timer - &timer->core->eitr[0]; 177 178 assert(msix_enabled(timer->core->owner)); 179 180 timer->running = false; 181 182 if (!timer->core->eitr_intr_pending[idx]) { 183 trace_e1000e_irq_throttling_no_pending_vec(idx); 184 return; 185 } 186 187 trace_e1000e_irq_msix_notify_postponed_vec(idx); 188 msix_notify(timer->core->owner, idx); 189 } 190 191 static void 192 e1000e_intrmgr_initialize_all_timers(E1000ECore *core, bool create) 193 { 194 int i; 195 196 core->radv.delay_reg = RADV; 197 core->rdtr.delay_reg = RDTR; 198 core->raid.delay_reg = RAID; 199 core->tadv.delay_reg = TADV; 200 core->tidv.delay_reg = TIDV; 201 202 core->radv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 203 core->rdtr.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 204 core->raid.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 205 core->tadv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 206 core->tidv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 207 208 core->radv.core = core; 209 core->rdtr.core = core; 210 core->raid.core = core; 211 core->tadv.core = core; 212 core->tidv.core = core; 213 214 core->itr.core = core; 215 core->itr.delay_reg = ITR; 216 core->itr.delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; 217 218 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 219 core->eitr[i].core = core; 220 core->eitr[i].delay_reg = EITR + i; 221 core->eitr[i].delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; 222 } 223 224 if (!create) { 225 return; 226 } 227 228 core->radv.timer = 229 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->radv); 230 core->rdtr.timer = 231 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->rdtr); 232 core->raid.timer = 233 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->raid); 234 235 core->tadv.timer = 236 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tadv); 237 core->tidv.timer = 238 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tidv); 239 240 core->itr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 241 e1000e_intrmgr_on_throttling_timer, 242 &core->itr); 243 244 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 245 core->eitr[i].timer = 246 timer_new_ns(QEMU_CLOCK_VIRTUAL, 247 e1000e_intrmgr_on_msix_throttling_timer, 248 &core->eitr[i]); 249 } 250 } 251 252 static inline void 253 e1000e_intrmgr_stop_delay_timers(E1000ECore *core) 254 { 255 e1000e_intrmgr_stop_timer(&core->radv); 256 e1000e_intrmgr_stop_timer(&core->rdtr); 257 e1000e_intrmgr_stop_timer(&core->raid); 258 e1000e_intrmgr_stop_timer(&core->tidv); 259 e1000e_intrmgr_stop_timer(&core->tadv); 260 } 261 262 static bool 263 e1000e_intrmgr_delay_rx_causes(E1000ECore *core, uint32_t *causes) 264 { 265 uint32_t delayable_causes; 266 uint32_t rdtr = core->mac[RDTR]; 267 uint32_t radv = core->mac[RADV]; 268 uint32_t raid = core->mac[RAID]; 269 270 if (msix_enabled(core->owner)) { 271 return false; 272 } 273 274 delayable_causes = E1000_ICR_RXQ0 | 275 E1000_ICR_RXQ1 | 276 E1000_ICR_RXT0; 277 278 if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS)) { 279 delayable_causes |= E1000_ICR_ACK; 280 } 281 282 /* Clean up all causes that may be delayed */ 283 core->delayed_causes |= *causes & delayable_causes; 284 *causes &= ~delayable_causes; 285 286 /* 287 * Check if delayed RX interrupts disabled by client 288 * or if there are causes that cannot be delayed 289 */ 290 if ((rdtr == 0) || (*causes != 0)) { 291 return false; 292 } 293 294 /* 295 * Check if delayed RX ACK interrupts disabled by client 296 * and there is an ACK packet received 297 */ 298 if ((raid == 0) && (core->delayed_causes & E1000_ICR_ACK)) { 299 return false; 300 } 301 302 /* All causes delayed */ 303 e1000e_intrmgr_rearm_timer(&core->rdtr); 304 305 if (!core->radv.running && (radv != 0)) { 306 e1000e_intrmgr_rearm_timer(&core->radv); 307 } 308 309 if (!core->raid.running && (core->delayed_causes & E1000_ICR_ACK)) { 310 e1000e_intrmgr_rearm_timer(&core->raid); 311 } 312 313 return true; 314 } 315 316 static bool 317 e1000e_intrmgr_delay_tx_causes(E1000ECore *core, uint32_t *causes) 318 { 319 static const uint32_t delayable_causes = E1000_ICR_TXQ0 | 320 E1000_ICR_TXQ1 | 321 E1000_ICR_TXQE | 322 E1000_ICR_TXDW; 323 324 if (msix_enabled(core->owner)) { 325 return false; 326 } 327 328 /* Clean up all causes that may be delayed */ 329 core->delayed_causes |= *causes & delayable_causes; 330 *causes &= ~delayable_causes; 331 332 /* If there are causes that cannot be delayed */ 333 if (*causes != 0) { 334 return false; 335 } 336 337 /* All causes delayed */ 338 e1000e_intrmgr_rearm_timer(&core->tidv); 339 340 if (!core->tadv.running && (core->mac[TADV] != 0)) { 341 e1000e_intrmgr_rearm_timer(&core->tadv); 342 } 343 344 return true; 345 } 346 347 static uint32_t 348 e1000e_intmgr_collect_delayed_causes(E1000ECore *core) 349 { 350 uint32_t res; 351 352 if (msix_enabled(core->owner)) { 353 assert(core->delayed_causes == 0); 354 return 0; 355 } 356 357 res = core->delayed_causes; 358 core->delayed_causes = 0; 359 360 e1000e_intrmgr_stop_delay_timers(core); 361 362 return res; 363 } 364 365 static void 366 e1000e_intrmgr_fire_all_timers(E1000ECore *core) 367 { 368 int i; 369 uint32_t val = e1000e_intmgr_collect_delayed_causes(core); 370 371 trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]); 372 core->mac[ICR] |= val; 373 374 if (core->itr.running) { 375 timer_del(core->itr.timer); 376 e1000e_intrmgr_on_throttling_timer(&core->itr); 377 } 378 379 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 380 if (core->eitr[i].running) { 381 timer_del(core->eitr[i].timer); 382 e1000e_intrmgr_on_msix_throttling_timer(&core->eitr[i]); 383 } 384 } 385 } 386 387 static void 388 e1000e_intrmgr_resume(E1000ECore *core) 389 { 390 int i; 391 392 e1000e_intmgr_timer_resume(&core->radv); 393 e1000e_intmgr_timer_resume(&core->rdtr); 394 e1000e_intmgr_timer_resume(&core->raid); 395 e1000e_intmgr_timer_resume(&core->tidv); 396 e1000e_intmgr_timer_resume(&core->tadv); 397 398 e1000e_intmgr_timer_resume(&core->itr); 399 400 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 401 e1000e_intmgr_timer_resume(&core->eitr[i]); 402 } 403 } 404 405 static void 406 e1000e_intrmgr_pause(E1000ECore *core) 407 { 408 int i; 409 410 e1000e_intmgr_timer_pause(&core->radv); 411 e1000e_intmgr_timer_pause(&core->rdtr); 412 e1000e_intmgr_timer_pause(&core->raid); 413 e1000e_intmgr_timer_pause(&core->tidv); 414 e1000e_intmgr_timer_pause(&core->tadv); 415 416 e1000e_intmgr_timer_pause(&core->itr); 417 418 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 419 e1000e_intmgr_timer_pause(&core->eitr[i]); 420 } 421 } 422 423 static void 424 e1000e_intrmgr_reset(E1000ECore *core) 425 { 426 int i; 427 428 core->delayed_causes = 0; 429 430 e1000e_intrmgr_stop_delay_timers(core); 431 432 e1000e_intrmgr_stop_timer(&core->itr); 433 434 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 435 e1000e_intrmgr_stop_timer(&core->eitr[i]); 436 } 437 } 438 439 static void 440 e1000e_intrmgr_pci_unint(E1000ECore *core) 441 { 442 int i; 443 444 timer_free(core->radv.timer); 445 timer_free(core->rdtr.timer); 446 timer_free(core->raid.timer); 447 448 timer_free(core->tadv.timer); 449 timer_free(core->tidv.timer); 450 451 timer_free(core->itr.timer); 452 453 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 454 timer_free(core->eitr[i].timer); 455 } 456 } 457 458 static void 459 e1000e_intrmgr_pci_realize(E1000ECore *core) 460 { 461 e1000e_intrmgr_initialize_all_timers(core, true); 462 } 463 464 static inline bool 465 e1000e_rx_csum_enabled(E1000ECore *core) 466 { 467 return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true; 468 } 469 470 static inline bool 471 e1000e_rx_use_legacy_descriptor(E1000ECore *core) 472 { 473 return (core->mac[RFCTL] & E1000_RFCTL_EXTEN) ? false : true; 474 } 475 476 static inline bool 477 e1000e_rx_use_ps_descriptor(E1000ECore *core) 478 { 479 return !e1000e_rx_use_legacy_descriptor(core) && 480 (core->mac[RCTL] & E1000_RCTL_DTYP_PS); 481 } 482 483 static inline bool 484 e1000e_rss_enabled(E1000ECore *core) 485 { 486 return E1000_MRQC_ENABLED(core->mac[MRQC]) && 487 !e1000e_rx_csum_enabled(core) && 488 !e1000e_rx_use_legacy_descriptor(core); 489 } 490 491 typedef struct E1000E_RSSInfo_st { 492 bool enabled; 493 uint32_t hash; 494 uint32_t queue; 495 uint32_t type; 496 } E1000E_RSSInfo; 497 498 static uint32_t 499 e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) 500 { 501 bool isip4, isip6, isudp, istcp; 502 503 assert(e1000e_rss_enabled(core)); 504 505 net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); 506 507 if (isip4) { 508 bool fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; 509 510 trace_e1000e_rx_rss_ip4(fragment, istcp, core->mac[MRQC], 511 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]), 512 E1000_MRQC_EN_IPV4(core->mac[MRQC])); 513 514 if (!fragment && istcp && E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { 515 return E1000_MRQ_RSS_TYPE_IPV4TCP; 516 } 517 518 if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { 519 return E1000_MRQ_RSS_TYPE_IPV4; 520 } 521 } else if (isip6) { 522 eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt); 523 524 bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS; 525 bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS; 526 527 /* 528 * Following two traces must not be combined because resulting 529 * event will have 11 arguments totally and some trace backends 530 * (at least "ust") have limitation of maximum 10 arguments per 531 * event. Events with more arguments fail to compile for 532 * backends like these. 533 */ 534 trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]); 535 trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, istcp, 536 ip6info->has_ext_hdrs, 537 ip6info->rss_ex_dst_valid, 538 ip6info->rss_ex_src_valid, 539 core->mac[MRQC], 540 E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), 541 E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), 542 E1000_MRQC_EN_IPV6(core->mac[MRQC])); 543 544 if ((!ex_dis || !ip6info->has_ext_hdrs) && 545 (!new_ex_dis || !(ip6info->rss_ex_dst_valid || 546 ip6info->rss_ex_src_valid))) { 547 548 if (istcp && !ip6info->fragment && 549 E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { 550 return E1000_MRQ_RSS_TYPE_IPV6TCP; 551 } 552 553 if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { 554 return E1000_MRQ_RSS_TYPE_IPV6EX; 555 } 556 557 } 558 559 if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) { 560 return E1000_MRQ_RSS_TYPE_IPV6; 561 } 562 563 } 564 565 return E1000_MRQ_RSS_TYPE_NONE; 566 } 567 568 static uint32_t 569 e1000e_rss_calc_hash(E1000ECore *core, 570 struct NetRxPkt *pkt, 571 E1000E_RSSInfo *info) 572 { 573 NetRxPktRssType type; 574 575 assert(e1000e_rss_enabled(core)); 576 577 switch (info->type) { 578 case E1000_MRQ_RSS_TYPE_IPV4: 579 type = NetPktRssIpV4; 580 break; 581 case E1000_MRQ_RSS_TYPE_IPV4TCP: 582 type = NetPktRssIpV4Tcp; 583 break; 584 case E1000_MRQ_RSS_TYPE_IPV6TCP: 585 type = NetPktRssIpV6TcpEx; 586 break; 587 case E1000_MRQ_RSS_TYPE_IPV6: 588 type = NetPktRssIpV6; 589 break; 590 case E1000_MRQ_RSS_TYPE_IPV6EX: 591 type = NetPktRssIpV6Ex; 592 break; 593 default: 594 assert(false); 595 return 0; 596 } 597 598 return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]); 599 } 600 601 static void 602 e1000e_rss_parse_packet(E1000ECore *core, 603 struct NetRxPkt *pkt, 604 E1000E_RSSInfo *info) 605 { 606 trace_e1000e_rx_rss_started(); 607 608 if (!e1000e_rss_enabled(core)) { 609 info->enabled = false; 610 info->hash = 0; 611 info->queue = 0; 612 info->type = 0; 613 trace_e1000e_rx_rss_disabled(); 614 return; 615 } 616 617 info->enabled = true; 618 619 info->type = e1000e_rss_get_hash_type(core, pkt); 620 621 trace_e1000e_rx_rss_type(info->type); 622 623 if (info->type == E1000_MRQ_RSS_TYPE_NONE) { 624 info->hash = 0; 625 info->queue = 0; 626 return; 627 } 628 629 info->hash = e1000e_rss_calc_hash(core, pkt, info); 630 info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash); 631 } 632 633 static void 634 e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx) 635 { 636 if (tx->props.tse && tx->cptse) { 637 net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss); 638 net_tx_pkt_update_ip_checksums(tx->tx_pkt); 639 e1000x_inc_reg_if_not_full(core->mac, TSCTC); 640 return; 641 } 642 643 if (tx->sum_needed & E1000_TXD_POPTS_TXSM) { 644 net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0); 645 } 646 647 if (tx->sum_needed & E1000_TXD_POPTS_IXSM) { 648 net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt); 649 } 650 } 651 652 static bool 653 e1000e_tx_pkt_send(E1000ECore *core, struct e1000e_tx *tx, int queue_index) 654 { 655 int target_queue = MIN(core->max_queue_num, queue_index); 656 NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue); 657 658 e1000e_setup_tx_offloads(core, tx); 659 660 net_tx_pkt_dump(tx->tx_pkt); 661 662 if ((core->phy[0][PHY_CTRL] & MII_CR_LOOPBACK) || 663 ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) { 664 return net_tx_pkt_send_loopback(tx->tx_pkt, queue); 665 } else { 666 return net_tx_pkt_send(tx->tx_pkt, queue); 667 } 668 } 669 670 static void 671 e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) 672 { 673 static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, 674 PTC1023, PTC1522 }; 675 676 size_t tot_len = net_tx_pkt_get_total_len(tx_pkt); 677 678 e1000x_increase_size_stats(core->mac, PTCregs, tot_len); 679 e1000x_inc_reg_if_not_full(core->mac, TPT); 680 e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len); 681 682 switch (net_tx_pkt_get_packet_type(tx_pkt)) { 683 case ETH_PKT_BCAST: 684 e1000x_inc_reg_if_not_full(core->mac, BPTC); 685 break; 686 case ETH_PKT_MCAST: 687 e1000x_inc_reg_if_not_full(core->mac, MPTC); 688 break; 689 case ETH_PKT_UCAST: 690 break; 691 default: 692 g_assert_not_reached(); 693 } 694 695 core->mac[GPTC] = core->mac[TPT]; 696 core->mac[GOTCL] = core->mac[TOTL]; 697 core->mac[GOTCH] = core->mac[TOTH]; 698 } 699 700 static void 701 e1000e_process_tx_desc(E1000ECore *core, 702 struct e1000e_tx *tx, 703 struct e1000_tx_desc *dp, 704 int queue_index) 705 { 706 uint32_t txd_lower = le32_to_cpu(dp->lower.data); 707 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D); 708 unsigned int split_size = txd_lower & 0xffff; 709 uint64_t addr; 710 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp; 711 bool eop = txd_lower & E1000_TXD_CMD_EOP; 712 713 if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */ 714 e1000x_read_tx_ctx_descr(xp, &tx->props); 715 e1000e_process_snap_option(core, le32_to_cpu(xp->cmd_and_length)); 716 return; 717 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) { 718 /* data descriptor */ 719 tx->sum_needed = le32_to_cpu(dp->upper.data) >> 8; 720 tx->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0; 721 e1000e_process_ts_option(core, dp); 722 } else { 723 /* legacy descriptor */ 724 e1000e_process_ts_option(core, dp); 725 tx->cptse = 0; 726 } 727 728 addr = le64_to_cpu(dp->buffer_addr); 729 730 if (!tx->skip_cp) { 731 if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) { 732 tx->skip_cp = true; 733 } 734 } 735 736 if (eop) { 737 if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) { 738 if (e1000x_vlan_enabled(core->mac) && 739 e1000x_is_vlan_txd(txd_lower)) { 740 net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, 741 le16_to_cpu(dp->upper.fields.special), core->mac[VET]); 742 } 743 if (e1000e_tx_pkt_send(core, tx, queue_index)) { 744 e1000e_on_tx_done_update_stats(core, tx->tx_pkt); 745 } 746 } 747 748 tx->skip_cp = false; 749 net_tx_pkt_reset(tx->tx_pkt); 750 751 tx->sum_needed = 0; 752 tx->cptse = 0; 753 } 754 } 755 756 static inline uint32_t 757 e1000e_tx_wb_interrupt_cause(E1000ECore *core, int queue_idx) 758 { 759 if (!msix_enabled(core->owner)) { 760 return E1000_ICR_TXDW; 761 } 762 763 return (queue_idx == 0) ? E1000_ICR_TXQ0 : E1000_ICR_TXQ1; 764 } 765 766 static inline uint32_t 767 e1000e_rx_wb_interrupt_cause(E1000ECore *core, int queue_idx, 768 bool min_threshold_hit) 769 { 770 if (!msix_enabled(core->owner)) { 771 return E1000_ICS_RXT0 | (min_threshold_hit ? E1000_ICS_RXDMT0 : 0); 772 } 773 774 return (queue_idx == 0) ? E1000_ICR_RXQ0 : E1000_ICR_RXQ1; 775 } 776 777 static uint32_t 778 e1000e_txdesc_writeback(E1000ECore *core, dma_addr_t base, 779 struct e1000_tx_desc *dp, bool *ide, int queue_idx) 780 { 781 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data); 782 783 if (!(txd_lower & E1000_TXD_CMD_RS) && 784 !(core->mac[IVAR] & E1000_IVAR_TX_INT_EVERY_WB)) { 785 return 0; 786 } 787 788 *ide = (txd_lower & E1000_TXD_CMD_IDE) ? true : false; 789 790 txd_upper = le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD; 791 792 dp->upper.data = cpu_to_le32(txd_upper); 793 pci_dma_write(core->owner, base + ((char *)&dp->upper - (char *)dp), 794 &dp->upper, sizeof(dp->upper)); 795 return e1000e_tx_wb_interrupt_cause(core, queue_idx); 796 } 797 798 typedef struct E1000E_RingInfo_st { 799 int dbah; 800 int dbal; 801 int dlen; 802 int dh; 803 int dt; 804 int idx; 805 } E1000E_RingInfo; 806 807 static inline bool 808 e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r) 809 { 810 return core->mac[r->dh] == core->mac[r->dt] || 811 core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN; 812 } 813 814 static inline uint64_t 815 e1000e_ring_base(E1000ECore *core, const E1000E_RingInfo *r) 816 { 817 uint64_t bah = core->mac[r->dbah]; 818 uint64_t bal = core->mac[r->dbal]; 819 820 return (bah << 32) + bal; 821 } 822 823 static inline uint64_t 824 e1000e_ring_head_descr(E1000ECore *core, const E1000E_RingInfo *r) 825 { 826 return e1000e_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh]; 827 } 828 829 static inline void 830 e1000e_ring_advance(E1000ECore *core, const E1000E_RingInfo *r, uint32_t count) 831 { 832 core->mac[r->dh] += count; 833 834 if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) { 835 core->mac[r->dh] = 0; 836 } 837 } 838 839 static inline uint32_t 840 e1000e_ring_free_descr_num(E1000ECore *core, const E1000E_RingInfo *r) 841 { 842 trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen], 843 core->mac[r->dh], core->mac[r->dt]); 844 845 if (core->mac[r->dh] <= core->mac[r->dt]) { 846 return core->mac[r->dt] - core->mac[r->dh]; 847 } 848 849 if (core->mac[r->dh] > core->mac[r->dt]) { 850 return core->mac[r->dlen] / E1000_RING_DESC_LEN + 851 core->mac[r->dt] - core->mac[r->dh]; 852 } 853 854 g_assert_not_reached(); 855 return 0; 856 } 857 858 static inline bool 859 e1000e_ring_enabled(E1000ECore *core, const E1000E_RingInfo *r) 860 { 861 return core->mac[r->dlen] > 0; 862 } 863 864 static inline uint32_t 865 e1000e_ring_len(E1000ECore *core, const E1000E_RingInfo *r) 866 { 867 return core->mac[r->dlen]; 868 } 869 870 typedef struct E1000E_TxRing_st { 871 const E1000E_RingInfo *i; 872 struct e1000e_tx *tx; 873 } E1000E_TxRing; 874 875 static inline int 876 e1000e_mq_queue_idx(int base_reg_idx, int reg_idx) 877 { 878 return (reg_idx - base_reg_idx) / (0x100 >> 2); 879 } 880 881 static inline void 882 e1000e_tx_ring_init(E1000ECore *core, E1000E_TxRing *txr, int idx) 883 { 884 static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { 885 { TDBAH, TDBAL, TDLEN, TDH, TDT, 0 }, 886 { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 } 887 }; 888 889 assert(idx < ARRAY_SIZE(i)); 890 891 txr->i = &i[idx]; 892 txr->tx = &core->tx[idx]; 893 } 894 895 typedef struct E1000E_RxRing_st { 896 const E1000E_RingInfo *i; 897 } E1000E_RxRing; 898 899 static inline void 900 e1000e_rx_ring_init(E1000ECore *core, E1000E_RxRing *rxr, int idx) 901 { 902 static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { 903 { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 }, 904 { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 } 905 }; 906 907 assert(idx < ARRAY_SIZE(i)); 908 909 rxr->i = &i[idx]; 910 } 911 912 static void 913 e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) 914 { 915 dma_addr_t base; 916 struct e1000_tx_desc desc; 917 bool ide = false; 918 const E1000E_RingInfo *txi = txr->i; 919 uint32_t cause = E1000_ICS_TXQE; 920 921 if (!(core->mac[TCTL] & E1000_TCTL_EN)) { 922 trace_e1000e_tx_disabled(); 923 return; 924 } 925 926 while (!e1000e_ring_empty(core, txi)) { 927 base = e1000e_ring_head_descr(core, txi); 928 929 pci_dma_read(core->owner, base, &desc, sizeof(desc)); 930 931 trace_e1000e_tx_descr((void *)(intptr_t)desc.buffer_addr, 932 desc.lower.data, desc.upper.data); 933 934 e1000e_process_tx_desc(core, txr->tx, &desc, txi->idx); 935 cause |= e1000e_txdesc_writeback(core, base, &desc, &ide, txi->idx); 936 937 e1000e_ring_advance(core, txi, 1); 938 } 939 940 if (!ide || !e1000e_intrmgr_delay_tx_causes(core, &cause)) { 941 e1000e_set_interrupt_cause(core, cause); 942 } 943 } 944 945 static bool 946 e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r, 947 size_t total_size) 948 { 949 uint32_t bufs = e1000e_ring_free_descr_num(core, r); 950 951 trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, 952 core->rx_desc_buf_size); 953 954 return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) * 955 core->rx_desc_buf_size; 956 } 957 958 void 959 e1000e_start_recv(E1000ECore *core) 960 { 961 int i; 962 963 trace_e1000e_rx_start_recv(); 964 965 for (i = 0; i <= core->max_queue_num; i++) { 966 qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i)); 967 } 968 } 969 970 bool 971 e1000e_can_receive(E1000ECore *core) 972 { 973 int i; 974 975 if (!e1000x_rx_ready(core->owner, core->mac)) { 976 return false; 977 } 978 979 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 980 E1000E_RxRing rxr; 981 982 e1000e_rx_ring_init(core, &rxr, i); 983 if (e1000e_ring_enabled(core, rxr.i) && 984 e1000e_has_rxbufs(core, rxr.i, 1)) { 985 trace_e1000e_rx_can_recv(); 986 return true; 987 } 988 } 989 990 trace_e1000e_rx_can_recv_rings_full(); 991 return false; 992 } 993 994 ssize_t 995 e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size) 996 { 997 const struct iovec iov = { 998 .iov_base = (uint8_t *)buf, 999 .iov_len = size 1000 }; 1001 1002 return e1000e_receive_iov(core, &iov, 1); 1003 } 1004 1005 static inline bool 1006 e1000e_rx_l3_cso_enabled(E1000ECore *core) 1007 { 1008 return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD); 1009 } 1010 1011 static inline bool 1012 e1000e_rx_l4_cso_enabled(E1000ECore *core) 1013 { 1014 return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); 1015 } 1016 1017 static bool 1018 e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size) 1019 { 1020 uint32_t rctl = core->mac[RCTL]; 1021 1022 if (e1000x_is_vlan_packet(buf, core->mac[VET]) && 1023 e1000x_vlan_rx_filter_enabled(core->mac)) { 1024 uint16_t vid = lduw_be_p(buf + 14); 1025 uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) + 1026 ((vid >> 5) & 0x7f)); 1027 if ((vfta & (1 << (vid & 0x1f))) == 0) { 1028 trace_e1000e_rx_flt_vlan_mismatch(vid); 1029 return false; 1030 } else { 1031 trace_e1000e_rx_flt_vlan_match(vid); 1032 } 1033 } 1034 1035 switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { 1036 case ETH_PKT_UCAST: 1037 if (rctl & E1000_RCTL_UPE) { 1038 return true; /* promiscuous ucast */ 1039 } 1040 break; 1041 1042 case ETH_PKT_BCAST: 1043 if (rctl & E1000_RCTL_BAM) { 1044 return true; /* broadcast enabled */ 1045 } 1046 break; 1047 1048 case ETH_PKT_MCAST: 1049 if (rctl & E1000_RCTL_MPE) { 1050 return true; /* promiscuous mcast */ 1051 } 1052 break; 1053 1054 default: 1055 g_assert_not_reached(); 1056 } 1057 1058 return e1000x_rx_group_filter(core->mac, buf); 1059 } 1060 1061 static inline void 1062 e1000e_read_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr) 1063 { 1064 struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; 1065 *buff_addr = le64_to_cpu(d->buffer_addr); 1066 } 1067 1068 static inline void 1069 e1000e_read_ext_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr) 1070 { 1071 union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc; 1072 *buff_addr = le64_to_cpu(d->read.buffer_addr); 1073 } 1074 1075 static inline void 1076 e1000e_read_ps_rx_descr(E1000ECore *core, uint8_t *desc, 1077 hwaddr (*buff_addr)[MAX_PS_BUFFERS]) 1078 { 1079 int i; 1080 union e1000_rx_desc_packet_split *d = 1081 (union e1000_rx_desc_packet_split *) desc; 1082 1083 for (i = 0; i < MAX_PS_BUFFERS; i++) { 1084 (*buff_addr)[i] = le64_to_cpu(d->read.buffer_addr[i]); 1085 } 1086 1087 trace_e1000e_rx_desc_ps_read((*buff_addr)[0], (*buff_addr)[1], 1088 (*buff_addr)[2], (*buff_addr)[3]); 1089 } 1090 1091 static inline void 1092 e1000e_read_rx_descr(E1000ECore *core, uint8_t *desc, 1093 hwaddr (*buff_addr)[MAX_PS_BUFFERS]) 1094 { 1095 if (e1000e_rx_use_legacy_descriptor(core)) { 1096 e1000e_read_lgcy_rx_descr(core, desc, &(*buff_addr)[0]); 1097 (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0; 1098 } else { 1099 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1100 e1000e_read_ps_rx_descr(core, desc, buff_addr); 1101 } else { 1102 e1000e_read_ext_rx_descr(core, desc, &(*buff_addr)[0]); 1103 (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0; 1104 } 1105 } 1106 } 1107 1108 static void 1109 e1000e_verify_csum_in_sw(E1000ECore *core, 1110 struct NetRxPkt *pkt, 1111 uint32_t *status_flags, 1112 bool istcp, bool isudp) 1113 { 1114 bool csum_valid; 1115 uint32_t csum_error; 1116 1117 if (e1000e_rx_l3_cso_enabled(core)) { 1118 if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) { 1119 trace_e1000e_rx_metadata_l3_csum_validation_failed(); 1120 } else { 1121 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE; 1122 *status_flags |= E1000_RXD_STAT_IPCS | csum_error; 1123 } 1124 } else { 1125 trace_e1000e_rx_metadata_l3_cso_disabled(); 1126 } 1127 1128 if (!e1000e_rx_l4_cso_enabled(core)) { 1129 trace_e1000e_rx_metadata_l4_cso_disabled(); 1130 return; 1131 } 1132 1133 if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { 1134 trace_e1000e_rx_metadata_l4_csum_validation_failed(); 1135 return; 1136 } 1137 1138 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE; 1139 1140 if (istcp) { 1141 *status_flags |= E1000_RXD_STAT_TCPCS | 1142 csum_error; 1143 } else if (isudp) { 1144 *status_flags |= E1000_RXD_STAT_TCPCS | 1145 E1000_RXD_STAT_UDPCS | 1146 csum_error; 1147 } 1148 } 1149 1150 static inline bool 1151 e1000e_is_tcp_ack(E1000ECore *core, struct NetRxPkt *rx_pkt) 1152 { 1153 if (!net_rx_pkt_is_tcp_ack(rx_pkt)) { 1154 return false; 1155 } 1156 1157 if (core->mac[RFCTL] & E1000_RFCTL_ACK_DATA_DIS) { 1158 return !net_rx_pkt_has_tcp_data(rx_pkt); 1159 } 1160 1161 return true; 1162 } 1163 1164 static void 1165 e1000e_build_rx_metadata(E1000ECore *core, 1166 struct NetRxPkt *pkt, 1167 bool is_eop, 1168 const E1000E_RSSInfo *rss_info, 1169 uint32_t *rss, uint32_t *mrq, 1170 uint32_t *status_flags, 1171 uint16_t *ip_id, 1172 uint16_t *vlan_tag) 1173 { 1174 struct virtio_net_hdr *vhdr; 1175 bool isip4, isip6, istcp, isudp; 1176 uint32_t pkt_type; 1177 1178 *status_flags = E1000_RXD_STAT_DD; 1179 1180 /* No additional metadata needed for non-EOP descriptors */ 1181 if (!is_eop) { 1182 goto func_exit; 1183 } 1184 1185 *status_flags |= E1000_RXD_STAT_EOP; 1186 1187 net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); 1188 trace_e1000e_rx_metadata_protocols(isip4, isip6, isudp, istcp); 1189 1190 /* VLAN state */ 1191 if (net_rx_pkt_is_vlan_stripped(pkt)) { 1192 *status_flags |= E1000_RXD_STAT_VP; 1193 *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt)); 1194 trace_e1000e_rx_metadata_vlan(*vlan_tag); 1195 } 1196 1197 /* Packet parsing results */ 1198 if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) { 1199 if (rss_info->enabled) { 1200 *rss = cpu_to_le32(rss_info->hash); 1201 *mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8)); 1202 trace_e1000e_rx_metadata_rss(*rss, *mrq); 1203 } 1204 } else if (isip4) { 1205 *status_flags |= E1000_RXD_STAT_IPIDV; 1206 *ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt)); 1207 trace_e1000e_rx_metadata_ip_id(*ip_id); 1208 } 1209 1210 if (istcp && e1000e_is_tcp_ack(core, pkt)) { 1211 *status_flags |= E1000_RXD_STAT_ACK; 1212 trace_e1000e_rx_metadata_ack(); 1213 } 1214 1215 if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { 1216 trace_e1000e_rx_metadata_ipv6_filtering_disabled(); 1217 pkt_type = E1000_RXD_PKT_MAC; 1218 } else if (istcp || isudp) { 1219 pkt_type = isip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP; 1220 } else if (isip4 || isip6) { 1221 pkt_type = isip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6; 1222 } else { 1223 pkt_type = E1000_RXD_PKT_MAC; 1224 } 1225 1226 *status_flags |= E1000_RXD_PKT_TYPE(pkt_type); 1227 trace_e1000e_rx_metadata_pkt_type(pkt_type); 1228 1229 /* RX CSO information */ 1230 if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { 1231 trace_e1000e_rx_metadata_ipv6_sum_disabled(); 1232 goto func_exit; 1233 } 1234 1235 if (!net_rx_pkt_has_virt_hdr(pkt)) { 1236 trace_e1000e_rx_metadata_no_virthdr(); 1237 e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp); 1238 goto func_exit; 1239 } 1240 1241 vhdr = net_rx_pkt_get_vhdr(pkt); 1242 1243 if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) && 1244 !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { 1245 trace_e1000e_rx_metadata_virthdr_no_csum_info(); 1246 e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp); 1247 goto func_exit; 1248 } 1249 1250 if (e1000e_rx_l3_cso_enabled(core)) { 1251 *status_flags |= isip4 ? E1000_RXD_STAT_IPCS : 0; 1252 } else { 1253 trace_e1000e_rx_metadata_l3_cso_disabled(); 1254 } 1255 1256 if (e1000e_rx_l4_cso_enabled(core)) { 1257 if (istcp) { 1258 *status_flags |= E1000_RXD_STAT_TCPCS; 1259 } else if (isudp) { 1260 *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS; 1261 } 1262 } else { 1263 trace_e1000e_rx_metadata_l4_cso_disabled(); 1264 } 1265 1266 trace_e1000e_rx_metadata_status_flags(*status_flags); 1267 1268 func_exit: 1269 *status_flags = cpu_to_le32(*status_flags); 1270 } 1271 1272 static inline void 1273 e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, 1274 struct NetRxPkt *pkt, 1275 const E1000E_RSSInfo *rss_info, 1276 uint16_t length) 1277 { 1278 uint32_t status_flags, rss, mrq; 1279 uint16_t ip_id; 1280 1281 struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; 1282 1283 assert(!rss_info->enabled); 1284 1285 d->length = cpu_to_le16(length); 1286 d->csum = 0; 1287 1288 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1289 rss_info, 1290 &rss, &mrq, 1291 &status_flags, &ip_id, 1292 &d->special); 1293 d->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); 1294 d->status = (uint8_t) le32_to_cpu(status_flags); 1295 } 1296 1297 static inline void 1298 e1000e_write_ext_rx_descr(E1000ECore *core, uint8_t *desc, 1299 struct NetRxPkt *pkt, 1300 const E1000E_RSSInfo *rss_info, 1301 uint16_t length) 1302 { 1303 union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc; 1304 1305 memset(&d->wb, 0, sizeof(d->wb)); 1306 1307 d->wb.upper.length = cpu_to_le16(length); 1308 1309 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1310 rss_info, 1311 &d->wb.lower.hi_dword.rss, 1312 &d->wb.lower.mrq, 1313 &d->wb.upper.status_error, 1314 &d->wb.lower.hi_dword.csum_ip.ip_id, 1315 &d->wb.upper.vlan); 1316 } 1317 1318 static inline void 1319 e1000e_write_ps_rx_descr(E1000ECore *core, uint8_t *desc, 1320 struct NetRxPkt *pkt, 1321 const E1000E_RSSInfo *rss_info, 1322 size_t ps_hdr_len, 1323 uint16_t(*written)[MAX_PS_BUFFERS]) 1324 { 1325 int i; 1326 union e1000_rx_desc_packet_split *d = 1327 (union e1000_rx_desc_packet_split *) desc; 1328 1329 memset(&d->wb, 0, sizeof(d->wb)); 1330 1331 d->wb.middle.length0 = cpu_to_le16((*written)[0]); 1332 1333 for (i = 0; i < PS_PAGE_BUFFERS; i++) { 1334 d->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]); 1335 } 1336 1337 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1338 rss_info, 1339 &d->wb.lower.hi_dword.rss, 1340 &d->wb.lower.mrq, 1341 &d->wb.middle.status_error, 1342 &d->wb.lower.hi_dword.csum_ip.ip_id, 1343 &d->wb.middle.vlan); 1344 1345 d->wb.upper.header_status = 1346 cpu_to_le16(ps_hdr_len | (ps_hdr_len ? E1000_RXDPS_HDRSTAT_HDRSP : 0)); 1347 1348 trace_e1000e_rx_desc_ps_write((*written)[0], (*written)[1], 1349 (*written)[2], (*written)[3]); 1350 } 1351 1352 static inline void 1353 e1000e_write_rx_descr(E1000ECore *core, uint8_t *desc, 1354 struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, 1355 size_t ps_hdr_len, uint16_t(*written)[MAX_PS_BUFFERS]) 1356 { 1357 if (e1000e_rx_use_legacy_descriptor(core)) { 1358 assert(ps_hdr_len == 0); 1359 e1000e_write_lgcy_rx_descr(core, desc, pkt, rss_info, (*written)[0]); 1360 } else { 1361 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1362 e1000e_write_ps_rx_descr(core, desc, pkt, rss_info, 1363 ps_hdr_len, written); 1364 } else { 1365 assert(ps_hdr_len == 0); 1366 e1000e_write_ext_rx_descr(core, desc, pkt, rss_info, 1367 (*written)[0]); 1368 } 1369 } 1370 } 1371 1372 static inline void 1373 e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr, 1374 uint8_t *desc, dma_addr_t len) 1375 { 1376 PCIDevice *dev = core->owner; 1377 1378 if (e1000e_rx_use_legacy_descriptor(core)) { 1379 struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; 1380 size_t offset = offsetof(struct e1000_rx_desc, status); 1381 uint8_t status = d->status; 1382 1383 d->status &= ~E1000_RXD_STAT_DD; 1384 pci_dma_write(dev, addr, desc, len); 1385 1386 if (status & E1000_RXD_STAT_DD) { 1387 d->status = status; 1388 pci_dma_write(dev, addr + offset, &status, sizeof(status)); 1389 } 1390 } else { 1391 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1392 union e1000_rx_desc_packet_split *d = 1393 (union e1000_rx_desc_packet_split *) desc; 1394 size_t offset = offsetof(union e1000_rx_desc_packet_split, 1395 wb.middle.status_error); 1396 uint32_t status = d->wb.middle.status_error; 1397 1398 d->wb.middle.status_error &= ~E1000_RXD_STAT_DD; 1399 pci_dma_write(dev, addr, desc, len); 1400 1401 if (status & E1000_RXD_STAT_DD) { 1402 d->wb.middle.status_error = status; 1403 pci_dma_write(dev, addr + offset, &status, sizeof(status)); 1404 } 1405 } else { 1406 union e1000_rx_desc_extended *d = 1407 (union e1000_rx_desc_extended *) desc; 1408 size_t offset = offsetof(union e1000_rx_desc_extended, 1409 wb.upper.status_error); 1410 uint32_t status = d->wb.upper.status_error; 1411 1412 d->wb.upper.status_error &= ~E1000_RXD_STAT_DD; 1413 pci_dma_write(dev, addr, desc, len); 1414 1415 if (status & E1000_RXD_STAT_DD) { 1416 d->wb.upper.status_error = status; 1417 pci_dma_write(dev, addr + offset, &status, sizeof(status)); 1418 } 1419 } 1420 } 1421 } 1422 1423 typedef struct e1000e_ba_state_st { 1424 uint16_t written[MAX_PS_BUFFERS]; 1425 uint8_t cur_idx; 1426 } e1000e_ba_state; 1427 1428 static inline void 1429 e1000e_write_hdr_to_rx_buffers(E1000ECore *core, 1430 hwaddr (*ba)[MAX_PS_BUFFERS], 1431 e1000e_ba_state *bastate, 1432 const char *data, 1433 dma_addr_t data_len) 1434 { 1435 assert(data_len <= core->rxbuf_sizes[0] - bastate->written[0]); 1436 1437 pci_dma_write(core->owner, (*ba)[0] + bastate->written[0], data, data_len); 1438 bastate->written[0] += data_len; 1439 1440 bastate->cur_idx = 1; 1441 } 1442 1443 static void 1444 e1000e_write_to_rx_buffers(E1000ECore *core, 1445 hwaddr (*ba)[MAX_PS_BUFFERS], 1446 e1000e_ba_state *bastate, 1447 const char *data, 1448 dma_addr_t data_len) 1449 { 1450 while (data_len > 0) { 1451 uint32_t cur_buf_len = core->rxbuf_sizes[bastate->cur_idx]; 1452 uint32_t cur_buf_bytes_left = cur_buf_len - 1453 bastate->written[bastate->cur_idx]; 1454 uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left); 1455 1456 trace_e1000e_rx_desc_buff_write(bastate->cur_idx, 1457 (*ba)[bastate->cur_idx], 1458 bastate->written[bastate->cur_idx], 1459 data, 1460 bytes_to_write); 1461 1462 pci_dma_write(core->owner, 1463 (*ba)[bastate->cur_idx] + bastate->written[bastate->cur_idx], 1464 data, bytes_to_write); 1465 1466 bastate->written[bastate->cur_idx] += bytes_to_write; 1467 data += bytes_to_write; 1468 data_len -= bytes_to_write; 1469 1470 if (bastate->written[bastate->cur_idx] == cur_buf_len) { 1471 bastate->cur_idx++; 1472 } 1473 1474 assert(bastate->cur_idx < MAX_PS_BUFFERS); 1475 } 1476 } 1477 1478 static void 1479 e1000e_update_rx_stats(E1000ECore *core, 1480 size_t data_size, 1481 size_t data_fcs_size) 1482 { 1483 e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); 1484 1485 switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { 1486 case ETH_PKT_BCAST: 1487 e1000x_inc_reg_if_not_full(core->mac, BPRC); 1488 break; 1489 1490 case ETH_PKT_MCAST: 1491 e1000x_inc_reg_if_not_full(core->mac, MPRC); 1492 break; 1493 1494 default: 1495 break; 1496 } 1497 } 1498 1499 static inline bool 1500 e1000e_rx_descr_threshold_hit(E1000ECore *core, const E1000E_RingInfo *rxi) 1501 { 1502 return e1000e_ring_free_descr_num(core, rxi) == 1503 e1000e_ring_len(core, rxi) >> core->rxbuf_min_shift; 1504 } 1505 1506 static bool 1507 e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len) 1508 { 1509 bool isip4, isip6, isudp, istcp; 1510 bool fragment; 1511 1512 if (!e1000e_rx_use_ps_descriptor(core)) { 1513 return false; 1514 } 1515 1516 net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); 1517 1518 if (isip4) { 1519 fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; 1520 } else if (isip6) { 1521 fragment = net_rx_pkt_get_ip6_info(pkt)->fragment; 1522 } else { 1523 return false; 1524 } 1525 1526 if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) { 1527 return false; 1528 } 1529 1530 if (!fragment && (isudp || istcp)) { 1531 *hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt); 1532 } else { 1533 *hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt); 1534 } 1535 1536 if ((*hdr_len > core->rxbuf_sizes[0]) || 1537 (*hdr_len > net_rx_pkt_get_total_len(pkt))) { 1538 return false; 1539 } 1540 1541 return true; 1542 } 1543 1544 static void 1545 e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, 1546 const E1000E_RxRing *rxr, 1547 const E1000E_RSSInfo *rss_info) 1548 { 1549 PCIDevice *d = core->owner; 1550 dma_addr_t base; 1551 uint8_t desc[E1000_MAX_RX_DESC_LEN]; 1552 size_t desc_size; 1553 size_t desc_offset = 0; 1554 size_t iov_ofs = 0; 1555 1556 struct iovec *iov = net_rx_pkt_get_iovec(pkt); 1557 size_t size = net_rx_pkt_get_total_len(pkt); 1558 size_t total_size = size + e1000x_fcs_len(core->mac); 1559 const E1000E_RingInfo *rxi; 1560 size_t ps_hdr_len = 0; 1561 bool do_ps = e1000e_do_ps(core, pkt, &ps_hdr_len); 1562 bool is_first = true; 1563 1564 rxi = rxr->i; 1565 1566 do { 1567 hwaddr ba[MAX_PS_BUFFERS]; 1568 e1000e_ba_state bastate = { { 0 } }; 1569 bool is_last = false; 1570 1571 desc_size = total_size - desc_offset; 1572 1573 if (desc_size > core->rx_desc_buf_size) { 1574 desc_size = core->rx_desc_buf_size; 1575 } 1576 1577 if (e1000e_ring_empty(core, rxi)) { 1578 return; 1579 } 1580 1581 base = e1000e_ring_head_descr(core, rxi); 1582 1583 pci_dma_read(d, base, &desc, core->rx_desc_len); 1584 1585 trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len); 1586 1587 e1000e_read_rx_descr(core, desc, &ba); 1588 1589 if (ba[0]) { 1590 if (desc_offset < size) { 1591 static const uint32_t fcs_pad; 1592 size_t iov_copy; 1593 size_t copy_size = size - desc_offset; 1594 if (copy_size > core->rx_desc_buf_size) { 1595 copy_size = core->rx_desc_buf_size; 1596 } 1597 1598 /* For PS mode copy the packet header first */ 1599 if (do_ps) { 1600 if (is_first) { 1601 size_t ps_hdr_copied = 0; 1602 do { 1603 iov_copy = MIN(ps_hdr_len - ps_hdr_copied, 1604 iov->iov_len - iov_ofs); 1605 1606 e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, 1607 iov->iov_base, iov_copy); 1608 1609 copy_size -= iov_copy; 1610 ps_hdr_copied += iov_copy; 1611 1612 iov_ofs += iov_copy; 1613 if (iov_ofs == iov->iov_len) { 1614 iov++; 1615 iov_ofs = 0; 1616 } 1617 } while (ps_hdr_copied < ps_hdr_len); 1618 1619 is_first = false; 1620 } else { 1621 /* Leave buffer 0 of each descriptor except first */ 1622 /* empty as per spec 7.1.5.1 */ 1623 e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, 1624 NULL, 0); 1625 } 1626 } 1627 1628 /* Copy packet payload */ 1629 while (copy_size) { 1630 iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); 1631 1632 e1000e_write_to_rx_buffers(core, &ba, &bastate, 1633 iov->iov_base + iov_ofs, iov_copy); 1634 1635 copy_size -= iov_copy; 1636 iov_ofs += iov_copy; 1637 if (iov_ofs == iov->iov_len) { 1638 iov++; 1639 iov_ofs = 0; 1640 } 1641 } 1642 1643 if (desc_offset + desc_size >= total_size) { 1644 /* Simulate FCS checksum presence in the last descriptor */ 1645 e1000e_write_to_rx_buffers(core, &ba, &bastate, 1646 (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); 1647 } 1648 } 1649 } else { /* as per intel docs; skip descriptors with null buf addr */ 1650 trace_e1000e_rx_null_descriptor(); 1651 } 1652 desc_offset += desc_size; 1653 if (desc_offset >= total_size) { 1654 is_last = true; 1655 } 1656 1657 e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL, 1658 rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); 1659 e1000e_pci_dma_write_rx_desc(core, base, desc, core->rx_desc_len); 1660 1661 e1000e_ring_advance(core, rxi, 1662 core->rx_desc_len / E1000_MIN_RX_DESC_LEN); 1663 1664 } while (desc_offset < total_size); 1665 1666 e1000e_update_rx_stats(core, size, total_size); 1667 } 1668 1669 static inline void 1670 e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt) 1671 { 1672 if (net_rx_pkt_has_virt_hdr(pkt)) { 1673 struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt); 1674 1675 if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1676 net_rx_pkt_fix_l4_csum(pkt); 1677 } 1678 } 1679 } 1680 1681 /* Min. octets in an ethernet frame sans FCS */ 1682 #define MIN_BUF_SIZE 60 1683 1684 ssize_t 1685 e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) 1686 { 1687 static const int maximum_ethernet_hdr_len = (14 + 4); 1688 1689 uint32_t n = 0; 1690 uint8_t min_buf[MIN_BUF_SIZE]; 1691 struct iovec min_iov; 1692 uint8_t *filter_buf; 1693 size_t size, orig_size; 1694 size_t iov_ofs = 0; 1695 E1000E_RxRing rxr; 1696 E1000E_RSSInfo rss_info; 1697 size_t total_size; 1698 ssize_t retval; 1699 bool rdmts_hit; 1700 1701 trace_e1000e_rx_receive_iov(iovcnt); 1702 1703 if (!e1000x_hw_rx_enabled(core->mac)) { 1704 return -1; 1705 } 1706 1707 /* Pull virtio header in */ 1708 if (core->has_vnet) { 1709 net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt); 1710 iov_ofs = sizeof(struct virtio_net_hdr); 1711 } 1712 1713 filter_buf = iov->iov_base + iov_ofs; 1714 orig_size = iov_size(iov, iovcnt); 1715 size = orig_size - iov_ofs; 1716 1717 /* Pad to minimum Ethernet frame length */ 1718 if (size < sizeof(min_buf)) { 1719 iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size); 1720 memset(&min_buf[size], 0, sizeof(min_buf) - size); 1721 e1000x_inc_reg_if_not_full(core->mac, RUC); 1722 min_iov.iov_base = filter_buf = min_buf; 1723 min_iov.iov_len = size = sizeof(min_buf); 1724 iovcnt = 1; 1725 iov = &min_iov; 1726 iov_ofs = 0; 1727 } else if (iov->iov_len < maximum_ethernet_hdr_len) { 1728 /* This is very unlikely, but may happen. */ 1729 iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len); 1730 filter_buf = min_buf; 1731 } 1732 1733 /* Discard oversized packets if !LPE and !SBP. */ 1734 if (e1000x_is_oversized(core->mac, size)) { 1735 return orig_size; 1736 } 1737 1738 net_rx_pkt_set_packet_type(core->rx_pkt, 1739 get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf))); 1740 1741 if (!e1000e_receive_filter(core, filter_buf, size)) { 1742 trace_e1000e_rx_flt_dropped(); 1743 return orig_size; 1744 } 1745 1746 net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, 1747 e1000x_vlan_enabled(core->mac), core->mac[VET]); 1748 1749 e1000e_rss_parse_packet(core, core->rx_pkt, &rss_info); 1750 e1000e_rx_ring_init(core, &rxr, rss_info.queue); 1751 1752 trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx); 1753 1754 total_size = net_rx_pkt_get_total_len(core->rx_pkt) + 1755 e1000x_fcs_len(core->mac); 1756 1757 if (e1000e_has_rxbufs(core, rxr.i, total_size)) { 1758 e1000e_rx_fix_l4_csum(core, core->rx_pkt); 1759 1760 e1000e_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info); 1761 1762 retval = orig_size; 1763 1764 /* Perform small receive detection (RSRPD) */ 1765 if (total_size < core->mac[RSRPD]) { 1766 n |= E1000_ICS_SRPD; 1767 } 1768 1769 /* Perform ACK receive detection */ 1770 if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS) && 1771 (e1000e_is_tcp_ack(core, core->rx_pkt))) { 1772 n |= E1000_ICS_ACK; 1773 } 1774 1775 /* Check if receive descriptor minimum threshold hit */ 1776 rdmts_hit = e1000e_rx_descr_threshold_hit(core, rxr.i); 1777 n |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); 1778 1779 trace_e1000e_rx_written_to_guest(n); 1780 } else { 1781 n |= E1000_ICS_RXO; 1782 retval = 0; 1783 1784 trace_e1000e_rx_not_written_to_guest(n); 1785 } 1786 1787 if (!e1000e_intrmgr_delay_rx_causes(core, &n)) { 1788 trace_e1000e_rx_interrupt_set(n); 1789 e1000e_set_interrupt_cause(core, n); 1790 } else { 1791 trace_e1000e_rx_interrupt_delayed(n); 1792 } 1793 1794 return retval; 1795 } 1796 1797 static inline bool 1798 e1000e_have_autoneg(E1000ECore *core) 1799 { 1800 return core->phy[0][PHY_CTRL] & MII_CR_AUTO_NEG_EN; 1801 } 1802 1803 static void e1000e_update_flowctl_status(E1000ECore *core) 1804 { 1805 if (e1000e_have_autoneg(core) && 1806 core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE) { 1807 trace_e1000e_link_autoneg_flowctl(true); 1808 core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE; 1809 } else { 1810 trace_e1000e_link_autoneg_flowctl(false); 1811 } 1812 } 1813 1814 static inline void 1815 e1000e_link_down(E1000ECore *core) 1816 { 1817 e1000x_update_regs_on_link_down(core->mac, core->phy[0]); 1818 e1000e_update_flowctl_status(core); 1819 } 1820 1821 static inline void 1822 e1000e_set_phy_ctrl(E1000ECore *core, int index, uint16_t val) 1823 { 1824 /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */ 1825 core->phy[0][PHY_CTRL] = val & ~(0x3f | 1826 MII_CR_RESET | 1827 MII_CR_RESTART_AUTO_NEG); 1828 1829 if ((val & MII_CR_RESTART_AUTO_NEG) && 1830 e1000e_have_autoneg(core)) { 1831 e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); 1832 } 1833 } 1834 1835 static void 1836 e1000e_set_phy_oem_bits(E1000ECore *core, int index, uint16_t val) 1837 { 1838 core->phy[0][PHY_OEM_BITS] = val & ~BIT(10); 1839 1840 if (val & BIT(10)) { 1841 e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); 1842 } 1843 } 1844 1845 static void 1846 e1000e_set_phy_page(E1000ECore *core, int index, uint16_t val) 1847 { 1848 core->phy[0][PHY_PAGE] = val & PHY_PAGE_RW_MASK; 1849 } 1850 1851 void 1852 e1000e_core_set_link_status(E1000ECore *core) 1853 { 1854 NetClientState *nc = qemu_get_queue(core->owner_nic); 1855 uint32_t old_status = core->mac[STATUS]; 1856 1857 trace_e1000e_link_status_changed(nc->link_down ? false : true); 1858 1859 if (nc->link_down) { 1860 e1000x_update_regs_on_link_down(core->mac, core->phy[0]); 1861 } else { 1862 if (e1000e_have_autoneg(core) && 1863 !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { 1864 e1000x_restart_autoneg(core->mac, core->phy[0], 1865 core->autoneg_timer); 1866 } else { 1867 e1000x_update_regs_on_link_up(core->mac, core->phy[0]); 1868 e1000e_start_recv(core); 1869 } 1870 } 1871 1872 if (core->mac[STATUS] != old_status) { 1873 e1000e_set_interrupt_cause(core, E1000_ICR_LSC); 1874 } 1875 } 1876 1877 static void 1878 e1000e_set_ctrl(E1000ECore *core, int index, uint32_t val) 1879 { 1880 trace_e1000e_core_ctrl_write(index, val); 1881 1882 /* RST is self clearing */ 1883 core->mac[CTRL] = val & ~E1000_CTRL_RST; 1884 core->mac[CTRL_DUP] = core->mac[CTRL]; 1885 1886 trace_e1000e_link_set_params( 1887 !!(val & E1000_CTRL_ASDE), 1888 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, 1889 !!(val & E1000_CTRL_FRCSPD), 1890 !!(val & E1000_CTRL_FRCDPX), 1891 !!(val & E1000_CTRL_RFCE), 1892 !!(val & E1000_CTRL_TFCE)); 1893 1894 if (val & E1000_CTRL_RST) { 1895 trace_e1000e_core_ctrl_sw_reset(); 1896 e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); 1897 } 1898 1899 if (val & E1000_CTRL_PHY_RST) { 1900 trace_e1000e_core_ctrl_phy_reset(); 1901 core->mac[STATUS] |= E1000_STATUS_PHYRA; 1902 } 1903 } 1904 1905 static void 1906 e1000e_set_rfctl(E1000ECore *core, int index, uint32_t val) 1907 { 1908 trace_e1000e_rx_set_rfctl(val); 1909 1910 if (!(val & E1000_RFCTL_ISCSI_DIS)) { 1911 trace_e1000e_wrn_iscsi_filtering_not_supported(); 1912 } 1913 1914 if (!(val & E1000_RFCTL_NFSW_DIS)) { 1915 trace_e1000e_wrn_nfsw_filtering_not_supported(); 1916 } 1917 1918 if (!(val & E1000_RFCTL_NFSR_DIS)) { 1919 trace_e1000e_wrn_nfsr_filtering_not_supported(); 1920 } 1921 1922 core->mac[RFCTL] = val; 1923 } 1924 1925 static void 1926 e1000e_calc_per_desc_buf_size(E1000ECore *core) 1927 { 1928 int i; 1929 core->rx_desc_buf_size = 0; 1930 1931 for (i = 0; i < ARRAY_SIZE(core->rxbuf_sizes); i++) { 1932 core->rx_desc_buf_size += core->rxbuf_sizes[i]; 1933 } 1934 } 1935 1936 static void 1937 e1000e_parse_rxbufsize(E1000ECore *core) 1938 { 1939 uint32_t rctl = core->mac[RCTL]; 1940 1941 memset(core->rxbuf_sizes, 0, sizeof(core->rxbuf_sizes)); 1942 1943 if (rctl & E1000_RCTL_DTYP_MASK) { 1944 uint32_t bsize; 1945 1946 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE0_MASK; 1947 core->rxbuf_sizes[0] = (bsize >> E1000_PSRCTL_BSIZE0_SHIFT) * 128; 1948 1949 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE1_MASK; 1950 core->rxbuf_sizes[1] = (bsize >> E1000_PSRCTL_BSIZE1_SHIFT) * 1024; 1951 1952 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE2_MASK; 1953 core->rxbuf_sizes[2] = (bsize >> E1000_PSRCTL_BSIZE2_SHIFT) * 1024; 1954 1955 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE3_MASK; 1956 core->rxbuf_sizes[3] = (bsize >> E1000_PSRCTL_BSIZE3_SHIFT) * 1024; 1957 } else if (rctl & E1000_RCTL_FLXBUF_MASK) { 1958 int flxbuf = rctl & E1000_RCTL_FLXBUF_MASK; 1959 core->rxbuf_sizes[0] = (flxbuf >> E1000_RCTL_FLXBUF_SHIFT) * 1024; 1960 } else { 1961 core->rxbuf_sizes[0] = e1000x_rxbufsize(rctl); 1962 } 1963 1964 trace_e1000e_rx_desc_buff_sizes(core->rxbuf_sizes[0], core->rxbuf_sizes[1], 1965 core->rxbuf_sizes[2], core->rxbuf_sizes[3]); 1966 1967 e1000e_calc_per_desc_buf_size(core); 1968 } 1969 1970 static void 1971 e1000e_calc_rxdesclen(E1000ECore *core) 1972 { 1973 if (e1000e_rx_use_legacy_descriptor(core)) { 1974 core->rx_desc_len = sizeof(struct e1000_rx_desc); 1975 } else { 1976 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1977 core->rx_desc_len = sizeof(union e1000_rx_desc_packet_split); 1978 } else { 1979 core->rx_desc_len = sizeof(union e1000_rx_desc_extended); 1980 } 1981 } 1982 trace_e1000e_rx_desc_len(core->rx_desc_len); 1983 } 1984 1985 static void 1986 e1000e_set_rx_control(E1000ECore *core, int index, uint32_t val) 1987 { 1988 core->mac[RCTL] = val; 1989 trace_e1000e_rx_set_rctl(core->mac[RCTL]); 1990 1991 if (val & E1000_RCTL_EN) { 1992 e1000e_parse_rxbufsize(core); 1993 e1000e_calc_rxdesclen(core); 1994 core->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1 + 1995 E1000_RING_DESC_LEN_SHIFT; 1996 1997 e1000e_start_recv(core); 1998 } 1999 } 2000 2001 static 2002 void(*e1000e_phyreg_writeops[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE]) 2003 (E1000ECore *, int, uint16_t) = { 2004 [0] = { 2005 [PHY_CTRL] = e1000e_set_phy_ctrl, 2006 [PHY_PAGE] = e1000e_set_phy_page, 2007 [PHY_OEM_BITS] = e1000e_set_phy_oem_bits 2008 } 2009 }; 2010 2011 static inline void 2012 e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits) 2013 { 2014 trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); 2015 core->mac[IMS] &= ~bits; 2016 } 2017 2018 static inline bool 2019 e1000e_postpone_interrupt(bool *interrupt_pending, 2020 E1000IntrDelayTimer *timer) 2021 { 2022 if (timer->running) { 2023 trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2); 2024 2025 *interrupt_pending = true; 2026 return true; 2027 } 2028 2029 if (timer->core->mac[timer->delay_reg] != 0) { 2030 e1000e_intrmgr_rearm_timer(timer); 2031 } 2032 2033 return false; 2034 } 2035 2036 static inline bool 2037 e1000e_itr_should_postpone(E1000ECore *core) 2038 { 2039 return e1000e_postpone_interrupt(&core->itr_intr_pending, &core->itr); 2040 } 2041 2042 static inline bool 2043 e1000e_eitr_should_postpone(E1000ECore *core, int idx) 2044 { 2045 return e1000e_postpone_interrupt(&core->eitr_intr_pending[idx], 2046 &core->eitr[idx]); 2047 } 2048 2049 static void 2050 e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) 2051 { 2052 uint32_t effective_eiac; 2053 2054 if (E1000_IVAR_ENTRY_VALID(int_cfg)) { 2055 uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); 2056 if (vec < E1000E_MSIX_VEC_NUM) { 2057 if (!e1000e_eitr_should_postpone(core, vec)) { 2058 trace_e1000e_irq_msix_notify_vec(vec); 2059 msix_notify(core->owner, vec); 2060 } 2061 } else { 2062 trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); 2063 } 2064 } else { 2065 trace_e1000e_wrn_msix_invalid(cause, int_cfg); 2066 } 2067 2068 if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_EIAME) { 2069 trace_e1000e_irq_iam_clear_eiame(core->mac[IAM], cause); 2070 core->mac[IAM] &= ~cause; 2071 } 2072 2073 trace_e1000e_irq_icr_clear_eiac(core->mac[ICR], core->mac[EIAC]); 2074 2075 effective_eiac = core->mac[EIAC] & cause; 2076 2077 core->mac[ICR] &= ~effective_eiac; 2078 core->msi_causes_pending &= ~effective_eiac; 2079 2080 if (!(core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2081 core->mac[IMS] &= ~effective_eiac; 2082 } 2083 } 2084 2085 static void 2086 e1000e_msix_notify(E1000ECore *core, uint32_t causes) 2087 { 2088 if (causes & E1000_ICR_RXQ0) { 2089 e1000e_msix_notify_one(core, E1000_ICR_RXQ0, 2090 E1000_IVAR_RXQ0(core->mac[IVAR])); 2091 } 2092 2093 if (causes & E1000_ICR_RXQ1) { 2094 e1000e_msix_notify_one(core, E1000_ICR_RXQ1, 2095 E1000_IVAR_RXQ1(core->mac[IVAR])); 2096 } 2097 2098 if (causes & E1000_ICR_TXQ0) { 2099 e1000e_msix_notify_one(core, E1000_ICR_TXQ0, 2100 E1000_IVAR_TXQ0(core->mac[IVAR])); 2101 } 2102 2103 if (causes & E1000_ICR_TXQ1) { 2104 e1000e_msix_notify_one(core, E1000_ICR_TXQ1, 2105 E1000_IVAR_TXQ1(core->mac[IVAR])); 2106 } 2107 2108 if (causes & E1000_ICR_OTHER) { 2109 e1000e_msix_notify_one(core, E1000_ICR_OTHER, 2110 E1000_IVAR_OTHER(core->mac[IVAR])); 2111 } 2112 } 2113 2114 static void 2115 e1000e_msix_clear_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) 2116 { 2117 if (E1000_IVAR_ENTRY_VALID(int_cfg)) { 2118 uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); 2119 if (vec < E1000E_MSIX_VEC_NUM) { 2120 trace_e1000e_irq_msix_pending_clearing(cause, int_cfg, vec); 2121 msix_clr_pending(core->owner, vec); 2122 } else { 2123 trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); 2124 } 2125 } else { 2126 trace_e1000e_wrn_msix_invalid(cause, int_cfg); 2127 } 2128 } 2129 2130 static void 2131 e1000e_msix_clear(E1000ECore *core, uint32_t causes) 2132 { 2133 if (causes & E1000_ICR_RXQ0) { 2134 e1000e_msix_clear_one(core, E1000_ICR_RXQ0, 2135 E1000_IVAR_RXQ0(core->mac[IVAR])); 2136 } 2137 2138 if (causes & E1000_ICR_RXQ1) { 2139 e1000e_msix_clear_one(core, E1000_ICR_RXQ1, 2140 E1000_IVAR_RXQ1(core->mac[IVAR])); 2141 } 2142 2143 if (causes & E1000_ICR_TXQ0) { 2144 e1000e_msix_clear_one(core, E1000_ICR_TXQ0, 2145 E1000_IVAR_TXQ0(core->mac[IVAR])); 2146 } 2147 2148 if (causes & E1000_ICR_TXQ1) { 2149 e1000e_msix_clear_one(core, E1000_ICR_TXQ1, 2150 E1000_IVAR_TXQ1(core->mac[IVAR])); 2151 } 2152 2153 if (causes & E1000_ICR_OTHER) { 2154 e1000e_msix_clear_one(core, E1000_ICR_OTHER, 2155 E1000_IVAR_OTHER(core->mac[IVAR])); 2156 } 2157 } 2158 2159 static inline void 2160 e1000e_fix_icr_asserted(E1000ECore *core) 2161 { 2162 core->mac[ICR] &= ~E1000_ICR_ASSERTED; 2163 if (core->mac[ICR]) { 2164 core->mac[ICR] |= E1000_ICR_ASSERTED; 2165 } 2166 2167 trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); 2168 } 2169 2170 static void 2171 e1000e_send_msi(E1000ECore *core, bool msix) 2172 { 2173 uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED; 2174 2175 core->msi_causes_pending &= causes; 2176 causes ^= core->msi_causes_pending; 2177 if (causes == 0) { 2178 return; 2179 } 2180 core->msi_causes_pending |= causes; 2181 2182 if (msix) { 2183 e1000e_msix_notify(core, causes); 2184 } else { 2185 if (!e1000e_itr_should_postpone(core)) { 2186 trace_e1000e_irq_msi_notify(causes); 2187 msi_notify(core->owner, 0); 2188 } 2189 } 2190 } 2191 2192 static void 2193 e1000e_update_interrupt_state(E1000ECore *core) 2194 { 2195 bool interrupts_pending; 2196 bool is_msix = msix_enabled(core->owner); 2197 2198 /* Set ICR[OTHER] for MSI-X */ 2199 if (is_msix) { 2200 if (core->mac[ICR] & E1000_ICR_OTHER_CAUSES) { 2201 core->mac[ICR] |= E1000_ICR_OTHER; 2202 trace_e1000e_irq_add_msi_other(core->mac[ICR]); 2203 } 2204 } 2205 2206 e1000e_fix_icr_asserted(core); 2207 2208 /* 2209 * Make sure ICR and ICS registers have the same value. 2210 * The spec says that the ICS register is write-only. However in practice, 2211 * on real hardware ICS is readable, and for reads it has the same value as 2212 * ICR (except that ICS does not have the clear on read behaviour of ICR). 2213 * 2214 * The VxWorks PRO/1000 driver uses this behaviour. 2215 */ 2216 core->mac[ICS] = core->mac[ICR]; 2217 2218 interrupts_pending = (core->mac[IMS] & core->mac[ICR]) ? true : false; 2219 if (!interrupts_pending) { 2220 core->msi_causes_pending = 0; 2221 } 2222 2223 trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], 2224 core->mac[ICR], core->mac[IMS]); 2225 2226 if (is_msix || msi_enabled(core->owner)) { 2227 if (interrupts_pending) { 2228 e1000e_send_msi(core, is_msix); 2229 } 2230 } else { 2231 if (interrupts_pending) { 2232 if (!e1000e_itr_should_postpone(core)) { 2233 e1000e_raise_legacy_irq(core); 2234 } 2235 } else { 2236 e1000e_lower_legacy_irq(core); 2237 } 2238 } 2239 } 2240 2241 static void 2242 e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) 2243 { 2244 trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); 2245 2246 val |= e1000e_intmgr_collect_delayed_causes(core); 2247 core->mac[ICR] |= val; 2248 2249 trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); 2250 2251 e1000e_update_interrupt_state(core); 2252 } 2253 2254 static inline void 2255 e1000e_autoneg_timer(void *opaque) 2256 { 2257 E1000ECore *core = opaque; 2258 if (!qemu_get_queue(core->owner_nic)->link_down) { 2259 e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]); 2260 e1000e_start_recv(core); 2261 2262 e1000e_update_flowctl_status(core); 2263 /* signal link status change to the guest */ 2264 e1000e_set_interrupt_cause(core, E1000_ICR_LSC); 2265 } 2266 } 2267 2268 static inline uint16_t 2269 e1000e_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr) 2270 { 2271 uint16_t index = (addr & 0x1ffff) >> 2; 2272 return index + (mac_reg_access[index] & 0xfffe); 2273 } 2274 2275 static const char e1000e_phy_regcap[E1000E_PHY_PAGES][0x20] = { 2276 [0] = { 2277 [PHY_CTRL] = PHY_ANYPAGE | PHY_RW, 2278 [PHY_STATUS] = PHY_ANYPAGE | PHY_R, 2279 [PHY_ID1] = PHY_ANYPAGE | PHY_R, 2280 [PHY_ID2] = PHY_ANYPAGE | PHY_R, 2281 [PHY_AUTONEG_ADV] = PHY_ANYPAGE | PHY_RW, 2282 [PHY_LP_ABILITY] = PHY_ANYPAGE | PHY_R, 2283 [PHY_AUTONEG_EXP] = PHY_ANYPAGE | PHY_R, 2284 [PHY_NEXT_PAGE_TX] = PHY_ANYPAGE | PHY_RW, 2285 [PHY_LP_NEXT_PAGE] = PHY_ANYPAGE | PHY_R, 2286 [PHY_1000T_CTRL] = PHY_ANYPAGE | PHY_RW, 2287 [PHY_1000T_STATUS] = PHY_ANYPAGE | PHY_R, 2288 [PHY_EXT_STATUS] = PHY_ANYPAGE | PHY_R, 2289 [PHY_PAGE] = PHY_ANYPAGE | PHY_RW, 2290 2291 [PHY_COPPER_CTRL1] = PHY_RW, 2292 [PHY_COPPER_STAT1] = PHY_R, 2293 [PHY_COPPER_CTRL3] = PHY_RW, 2294 [PHY_RX_ERR_CNTR] = PHY_R, 2295 [PHY_OEM_BITS] = PHY_RW, 2296 [PHY_BIAS_1] = PHY_RW, 2297 [PHY_BIAS_2] = PHY_RW, 2298 [PHY_COPPER_INT_ENABLE] = PHY_RW, 2299 [PHY_COPPER_STAT2] = PHY_R, 2300 [PHY_COPPER_CTRL2] = PHY_RW 2301 }, 2302 [2] = { 2303 [PHY_MAC_CTRL1] = PHY_RW, 2304 [PHY_MAC_INT_ENABLE] = PHY_RW, 2305 [PHY_MAC_STAT] = PHY_R, 2306 [PHY_MAC_CTRL2] = PHY_RW 2307 }, 2308 [3] = { 2309 [PHY_LED_03_FUNC_CTRL1] = PHY_RW, 2310 [PHY_LED_03_POL_CTRL] = PHY_RW, 2311 [PHY_LED_TIMER_CTRL] = PHY_RW, 2312 [PHY_LED_45_CTRL] = PHY_RW 2313 }, 2314 [5] = { 2315 [PHY_1000T_SKEW] = PHY_R, 2316 [PHY_1000T_SWAP] = PHY_R 2317 }, 2318 [6] = { 2319 [PHY_CRC_COUNTERS] = PHY_R 2320 } 2321 }; 2322 2323 static bool 2324 e1000e_phy_reg_check_cap(E1000ECore *core, uint32_t addr, 2325 char cap, uint8_t *page) 2326 { 2327 *page = 2328 (e1000e_phy_regcap[0][addr] & PHY_ANYPAGE) ? 0 2329 : core->phy[0][PHY_PAGE]; 2330 2331 if (*page >= E1000E_PHY_PAGES) { 2332 return false; 2333 } 2334 2335 return e1000e_phy_regcap[*page][addr] & cap; 2336 } 2337 2338 static void 2339 e1000e_phy_reg_write(E1000ECore *core, uint8_t page, 2340 uint32_t addr, uint16_t data) 2341 { 2342 assert(page < E1000E_PHY_PAGES); 2343 assert(addr < E1000E_PHY_PAGE_SIZE); 2344 2345 if (e1000e_phyreg_writeops[page][addr]) { 2346 e1000e_phyreg_writeops[page][addr](core, addr, data); 2347 } else { 2348 core->phy[page][addr] = data; 2349 } 2350 } 2351 2352 static void 2353 e1000e_set_mdic(E1000ECore *core, int index, uint32_t val) 2354 { 2355 uint32_t data = val & E1000_MDIC_DATA_MASK; 2356 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); 2357 uint8_t page; 2358 2359 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */ 2360 val = core->mac[MDIC] | E1000_MDIC_ERROR; 2361 } else if (val & E1000_MDIC_OP_READ) { 2362 if (!e1000e_phy_reg_check_cap(core, addr, PHY_R, &page)) { 2363 trace_e1000e_core_mdic_read_unhandled(page, addr); 2364 val |= E1000_MDIC_ERROR; 2365 } else { 2366 val = (val ^ data) | core->phy[page][addr]; 2367 trace_e1000e_core_mdic_read(page, addr, val); 2368 } 2369 } else if (val & E1000_MDIC_OP_WRITE) { 2370 if (!e1000e_phy_reg_check_cap(core, addr, PHY_W, &page)) { 2371 trace_e1000e_core_mdic_write_unhandled(page, addr); 2372 val |= E1000_MDIC_ERROR; 2373 } else { 2374 trace_e1000e_core_mdic_write(page, addr, data); 2375 e1000e_phy_reg_write(core, page, addr, data); 2376 } 2377 } 2378 core->mac[MDIC] = val | E1000_MDIC_READY; 2379 2380 if (val & E1000_MDIC_INT_EN) { 2381 e1000e_set_interrupt_cause(core, E1000_ICR_MDAC); 2382 } 2383 } 2384 2385 static void 2386 e1000e_set_rdt(E1000ECore *core, int index, uint32_t val) 2387 { 2388 core->mac[index] = val & 0xffff; 2389 trace_e1000e_rx_set_rdt(e1000e_mq_queue_idx(RDT0, index), val); 2390 e1000e_start_recv(core); 2391 } 2392 2393 static void 2394 e1000e_set_status(E1000ECore *core, int index, uint32_t val) 2395 { 2396 if ((val & E1000_STATUS_PHYRA) == 0) { 2397 core->mac[index] &= ~E1000_STATUS_PHYRA; 2398 } 2399 } 2400 2401 static void 2402 e1000e_set_ctrlext(E1000ECore *core, int index, uint32_t val) 2403 { 2404 trace_e1000e_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK), 2405 !!(val & E1000_CTRL_EXT_SPD_BYPS)); 2406 2407 /* Zero self-clearing bits */ 2408 val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST); 2409 core->mac[CTRL_EXT] = val; 2410 } 2411 2412 static void 2413 e1000e_set_pbaclr(E1000ECore *core, int index, uint32_t val) 2414 { 2415 int i; 2416 2417 core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK; 2418 2419 if (!msix_enabled(core->owner)) { 2420 return; 2421 } 2422 2423 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 2424 if (core->mac[PBACLR] & BIT(i)) { 2425 msix_clr_pending(core->owner, i); 2426 } 2427 } 2428 } 2429 2430 static void 2431 e1000e_set_fcrth(E1000ECore *core, int index, uint32_t val) 2432 { 2433 core->mac[FCRTH] = val & 0xFFF8; 2434 } 2435 2436 static void 2437 e1000e_set_fcrtl(E1000ECore *core, int index, uint32_t val) 2438 { 2439 core->mac[FCRTL] = val & 0x8000FFF8; 2440 } 2441 2442 static inline void 2443 e1000e_set_16bit(E1000ECore *core, int index, uint32_t val) 2444 { 2445 core->mac[index] = val & 0xffff; 2446 } 2447 2448 static void 2449 e1000e_set_12bit(E1000ECore *core, int index, uint32_t val) 2450 { 2451 core->mac[index] = val & 0xfff; 2452 } 2453 2454 static void 2455 e1000e_set_vet(E1000ECore *core, int index, uint32_t val) 2456 { 2457 core->mac[VET] = val & 0xffff; 2458 trace_e1000e_vlan_vet(core->mac[VET]); 2459 } 2460 2461 static void 2462 e1000e_set_dlen(E1000ECore *core, int index, uint32_t val) 2463 { 2464 core->mac[index] = val & E1000_XDLEN_MASK; 2465 } 2466 2467 static void 2468 e1000e_set_dbal(E1000ECore *core, int index, uint32_t val) 2469 { 2470 core->mac[index] = val & E1000_XDBAL_MASK; 2471 } 2472 2473 static void 2474 e1000e_set_tctl(E1000ECore *core, int index, uint32_t val) 2475 { 2476 E1000E_TxRing txr; 2477 core->mac[index] = val; 2478 2479 if (core->mac[TARC0] & E1000_TARC_ENABLE) { 2480 e1000e_tx_ring_init(core, &txr, 0); 2481 e1000e_start_xmit(core, &txr); 2482 } 2483 2484 if (core->mac[TARC1] & E1000_TARC_ENABLE) { 2485 e1000e_tx_ring_init(core, &txr, 1); 2486 e1000e_start_xmit(core, &txr); 2487 } 2488 } 2489 2490 static void 2491 e1000e_set_tdt(E1000ECore *core, int index, uint32_t val) 2492 { 2493 E1000E_TxRing txr; 2494 int qidx = e1000e_mq_queue_idx(TDT, index); 2495 uint32_t tarc_reg = (qidx == 0) ? TARC0 : TARC1; 2496 2497 core->mac[index] = val & 0xffff; 2498 2499 if (core->mac[tarc_reg] & E1000_TARC_ENABLE) { 2500 e1000e_tx_ring_init(core, &txr, qidx); 2501 e1000e_start_xmit(core, &txr); 2502 } 2503 } 2504 2505 static void 2506 e1000e_set_ics(E1000ECore *core, int index, uint32_t val) 2507 { 2508 trace_e1000e_irq_write_ics(val); 2509 e1000e_set_interrupt_cause(core, val); 2510 } 2511 2512 static void 2513 e1000e_set_icr(E1000ECore *core, int index, uint32_t val) 2514 { 2515 uint32_t icr = 0; 2516 if ((core->mac[ICR] & E1000_ICR_ASSERTED) && 2517 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2518 trace_e1000e_irq_icr_process_iame(); 2519 e1000e_clear_ims_bits(core, core->mac[IAM]); 2520 } 2521 2522 icr = core->mac[ICR] & ~val; 2523 /* 2524 * Windows driver expects that the "receive overrun" bit and other 2525 * ones to be cleared when the "Other" bit (#24) is cleared. 2526 */ 2527 icr = (val & E1000_ICR_OTHER) ? (icr & ~E1000_ICR_OTHER_CAUSES) : icr; 2528 trace_e1000e_irq_icr_write(val, core->mac[ICR], icr); 2529 core->mac[ICR] = icr; 2530 e1000e_update_interrupt_state(core); 2531 } 2532 2533 static void 2534 e1000e_set_imc(E1000ECore *core, int index, uint32_t val) 2535 { 2536 trace_e1000e_irq_ims_clear_set_imc(val); 2537 e1000e_clear_ims_bits(core, val); 2538 e1000e_update_interrupt_state(core); 2539 } 2540 2541 static void 2542 e1000e_set_ims(E1000ECore *core, int index, uint32_t val) 2543 { 2544 static const uint32_t ims_ext_mask = 2545 E1000_IMS_RXQ0 | E1000_IMS_RXQ1 | 2546 E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | 2547 E1000_IMS_OTHER; 2548 2549 static const uint32_t ims_valid_mask = 2550 E1000_IMS_TXDW | E1000_IMS_TXQE | E1000_IMS_LSC | 2551 E1000_IMS_RXDMT0 | E1000_IMS_RXO | E1000_IMS_RXT0 | 2552 E1000_IMS_MDAC | E1000_IMS_TXD_LOW | E1000_IMS_SRPD | 2553 E1000_IMS_ACK | E1000_IMS_MNG | E1000_IMS_RXQ0 | 2554 E1000_IMS_RXQ1 | E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | 2555 E1000_IMS_OTHER; 2556 2557 uint32_t valid_val = val & ims_valid_mask; 2558 2559 trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); 2560 core->mac[IMS] |= valid_val; 2561 2562 if ((valid_val & ims_ext_mask) && 2563 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PBA_CLR) && 2564 msix_enabled(core->owner)) { 2565 e1000e_msix_clear(core, valid_val); 2566 } 2567 2568 if ((valid_val == ims_valid_mask) && 2569 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA)) { 2570 trace_e1000e_irq_fire_all_timers(val); 2571 e1000e_intrmgr_fire_all_timers(core); 2572 } 2573 2574 e1000e_update_interrupt_state(core); 2575 } 2576 2577 static void 2578 e1000e_set_rdtr(E1000ECore *core, int index, uint32_t val) 2579 { 2580 e1000e_set_16bit(core, index, val); 2581 2582 if ((val & E1000_RDTR_FPD) && (core->rdtr.running)) { 2583 trace_e1000e_irq_rdtr_fpd_running(); 2584 e1000e_intrmgr_fire_delayed_interrupts(core); 2585 } else { 2586 trace_e1000e_irq_rdtr_fpd_not_running(); 2587 } 2588 } 2589 2590 static void 2591 e1000e_set_tidv(E1000ECore *core, int index, uint32_t val) 2592 { 2593 e1000e_set_16bit(core, index, val); 2594 2595 if ((val & E1000_TIDV_FPD) && (core->tidv.running)) { 2596 trace_e1000e_irq_tidv_fpd_running(); 2597 e1000e_intrmgr_fire_delayed_interrupts(core); 2598 } else { 2599 trace_e1000e_irq_tidv_fpd_not_running(); 2600 } 2601 } 2602 2603 static uint32_t 2604 e1000e_mac_readreg(E1000ECore *core, int index) 2605 { 2606 return core->mac[index]; 2607 } 2608 2609 static uint32_t 2610 e1000e_mac_ics_read(E1000ECore *core, int index) 2611 { 2612 trace_e1000e_irq_read_ics(core->mac[ICS]); 2613 return core->mac[ICS]; 2614 } 2615 2616 static uint32_t 2617 e1000e_mac_ims_read(E1000ECore *core, int index) 2618 { 2619 trace_e1000e_irq_read_ims(core->mac[IMS]); 2620 return core->mac[IMS]; 2621 } 2622 2623 #define E1000E_LOW_BITS_READ_FUNC(num) \ 2624 static uint32_t \ 2625 e1000e_mac_low##num##_read(E1000ECore *core, int index) \ 2626 { \ 2627 return core->mac[index] & (BIT(num) - 1); \ 2628 } \ 2629 2630 #define E1000E_LOW_BITS_READ(num) \ 2631 e1000e_mac_low##num##_read 2632 2633 E1000E_LOW_BITS_READ_FUNC(4); 2634 E1000E_LOW_BITS_READ_FUNC(6); 2635 E1000E_LOW_BITS_READ_FUNC(11); 2636 E1000E_LOW_BITS_READ_FUNC(13); 2637 E1000E_LOW_BITS_READ_FUNC(16); 2638 2639 static uint32_t 2640 e1000e_mac_swsm_read(E1000ECore *core, int index) 2641 { 2642 uint32_t val = core->mac[SWSM]; 2643 core->mac[SWSM] = val | 1; 2644 return val; 2645 } 2646 2647 static uint32_t 2648 e1000e_mac_itr_read(E1000ECore *core, int index) 2649 { 2650 return core->itr_guest_value; 2651 } 2652 2653 static uint32_t 2654 e1000e_mac_eitr_read(E1000ECore *core, int index) 2655 { 2656 return core->eitr_guest_value[index - EITR]; 2657 } 2658 2659 static uint32_t 2660 e1000e_mac_icr_read(E1000ECore *core, int index) 2661 { 2662 uint32_t ret = core->mac[ICR]; 2663 trace_e1000e_irq_icr_read_entry(ret); 2664 2665 if (core->mac[IMS] == 0) { 2666 trace_e1000e_irq_icr_clear_zero_ims(); 2667 core->mac[ICR] = 0; 2668 } 2669 2670 if (!msix_enabled(core->owner)) { 2671 trace_e1000e_irq_icr_clear_nonmsix_icr_read(); 2672 core->mac[ICR] = 0; 2673 } 2674 2675 if ((core->mac[ICR] & E1000_ICR_ASSERTED) && 2676 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2677 trace_e1000e_irq_icr_clear_iame(); 2678 core->mac[ICR] = 0; 2679 trace_e1000e_irq_icr_process_iame(); 2680 e1000e_clear_ims_bits(core, core->mac[IAM]); 2681 } 2682 2683 trace_e1000e_irq_icr_read_exit(core->mac[ICR]); 2684 e1000e_update_interrupt_state(core); 2685 return ret; 2686 } 2687 2688 static uint32_t 2689 e1000e_mac_read_clr4(E1000ECore *core, int index) 2690 { 2691 uint32_t ret = core->mac[index]; 2692 2693 core->mac[index] = 0; 2694 return ret; 2695 } 2696 2697 static uint32_t 2698 e1000e_mac_read_clr8(E1000ECore *core, int index) 2699 { 2700 uint32_t ret = core->mac[index]; 2701 2702 core->mac[index] = 0; 2703 core->mac[index - 1] = 0; 2704 return ret; 2705 } 2706 2707 static uint32_t 2708 e1000e_get_ctrl(E1000ECore *core, int index) 2709 { 2710 uint32_t val = core->mac[CTRL]; 2711 2712 trace_e1000e_link_read_params( 2713 !!(val & E1000_CTRL_ASDE), 2714 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, 2715 !!(val & E1000_CTRL_FRCSPD), 2716 !!(val & E1000_CTRL_FRCDPX), 2717 !!(val & E1000_CTRL_RFCE), 2718 !!(val & E1000_CTRL_TFCE)); 2719 2720 return val; 2721 } 2722 2723 static uint32_t 2724 e1000e_get_status(E1000ECore *core, int index) 2725 { 2726 uint32_t res = core->mac[STATUS]; 2727 2728 if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) { 2729 res |= E1000_STATUS_GIO_MASTER_ENABLE; 2730 } 2731 2732 if (core->mac[CTRL] & E1000_CTRL_FRCDPX) { 2733 res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0; 2734 } else { 2735 res |= E1000_STATUS_FD; 2736 } 2737 2738 if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) || 2739 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) { 2740 switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) { 2741 case E1000_CTRL_SPD_10: 2742 res |= E1000_STATUS_SPEED_10; 2743 break; 2744 case E1000_CTRL_SPD_100: 2745 res |= E1000_STATUS_SPEED_100; 2746 break; 2747 case E1000_CTRL_SPD_1000: 2748 default: 2749 res |= E1000_STATUS_SPEED_1000; 2750 break; 2751 } 2752 } else { 2753 res |= E1000_STATUS_SPEED_1000; 2754 } 2755 2756 trace_e1000e_link_status( 2757 !!(res & E1000_STATUS_LU), 2758 !!(res & E1000_STATUS_FD), 2759 (res & E1000_STATUS_SPEED_MASK) >> E1000_STATUS_SPEED_SHIFT, 2760 (res & E1000_STATUS_ASDV) >> E1000_STATUS_ASDV_SHIFT); 2761 2762 return res; 2763 } 2764 2765 static uint32_t 2766 e1000e_get_tarc(E1000ECore *core, int index) 2767 { 2768 return core->mac[index] & ((BIT(11) - 1) | 2769 BIT(27) | 2770 BIT(28) | 2771 BIT(29) | 2772 BIT(30)); 2773 } 2774 2775 static void 2776 e1000e_mac_writereg(E1000ECore *core, int index, uint32_t val) 2777 { 2778 core->mac[index] = val; 2779 } 2780 2781 static void 2782 e1000e_mac_setmacaddr(E1000ECore *core, int index, uint32_t val) 2783 { 2784 uint32_t macaddr[2]; 2785 2786 core->mac[index] = val; 2787 2788 macaddr[0] = cpu_to_le32(core->mac[RA]); 2789 macaddr[1] = cpu_to_le32(core->mac[RA + 1]); 2790 qemu_format_nic_info_str(qemu_get_queue(core->owner_nic), 2791 (uint8_t *) macaddr); 2792 2793 trace_e1000e_mac_set_sw(MAC_ARG(macaddr)); 2794 } 2795 2796 static void 2797 e1000e_set_eecd(E1000ECore *core, int index, uint32_t val) 2798 { 2799 static const uint32_t ro_bits = E1000_EECD_PRES | 2800 E1000_EECD_AUTO_RD | 2801 E1000_EECD_SIZE_EX_MASK; 2802 2803 core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits); 2804 } 2805 2806 static void 2807 e1000e_set_eerd(E1000ECore *core, int index, uint32_t val) 2808 { 2809 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; 2810 uint32_t flags = 0; 2811 uint32_t data = 0; 2812 2813 if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { 2814 data = core->eeprom[addr]; 2815 flags = E1000_EERW_DONE; 2816 } 2817 2818 core->mac[EERD] = flags | 2819 (addr << E1000_EERW_ADDR_SHIFT) | 2820 (data << E1000_EERW_DATA_SHIFT); 2821 } 2822 2823 static void 2824 e1000e_set_eewr(E1000ECore *core, int index, uint32_t val) 2825 { 2826 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; 2827 uint32_t data = (val >> E1000_EERW_DATA_SHIFT) & E1000_EERW_DATA_MASK; 2828 uint32_t flags = 0; 2829 2830 if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { 2831 core->eeprom[addr] = data; 2832 flags = E1000_EERW_DONE; 2833 } 2834 2835 core->mac[EERD] = flags | 2836 (addr << E1000_EERW_ADDR_SHIFT) | 2837 (data << E1000_EERW_DATA_SHIFT); 2838 } 2839 2840 static void 2841 e1000e_set_rxdctl(E1000ECore *core, int index, uint32_t val) 2842 { 2843 core->mac[RXDCTL] = core->mac[RXDCTL1] = val; 2844 } 2845 2846 static void 2847 e1000e_set_itr(E1000ECore *core, int index, uint32_t val) 2848 { 2849 uint32_t interval = val & 0xffff; 2850 2851 trace_e1000e_irq_itr_set(val); 2852 2853 core->itr_guest_value = interval; 2854 core->mac[index] = MAX(interval, E1000E_MIN_XITR); 2855 } 2856 2857 static void 2858 e1000e_set_eitr(E1000ECore *core, int index, uint32_t val) 2859 { 2860 uint32_t interval = val & 0xffff; 2861 uint32_t eitr_num = index - EITR; 2862 2863 trace_e1000e_irq_eitr_set(eitr_num, val); 2864 2865 core->eitr_guest_value[eitr_num] = interval; 2866 core->mac[index] = MAX(interval, E1000E_MIN_XITR); 2867 } 2868 2869 static void 2870 e1000e_set_psrctl(E1000ECore *core, int index, uint32_t val) 2871 { 2872 if (core->mac[RCTL] & E1000_RCTL_DTYP_MASK) { 2873 2874 if ((val & E1000_PSRCTL_BSIZE0_MASK) == 0) { 2875 qemu_log_mask(LOG_GUEST_ERROR, 2876 "e1000e: PSRCTL.BSIZE0 cannot be zero"); 2877 return; 2878 } 2879 2880 if ((val & E1000_PSRCTL_BSIZE1_MASK) == 0) { 2881 qemu_log_mask(LOG_GUEST_ERROR, 2882 "e1000e: PSRCTL.BSIZE1 cannot be zero"); 2883 return; 2884 } 2885 } 2886 2887 core->mac[PSRCTL] = val; 2888 } 2889 2890 static void 2891 e1000e_update_rx_offloads(E1000ECore *core) 2892 { 2893 int cso_state = e1000e_rx_l4_cso_enabled(core); 2894 2895 trace_e1000e_rx_set_cso(cso_state); 2896 2897 if (core->has_vnet) { 2898 qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, 2899 cso_state, 0, 0, 0, 0); 2900 } 2901 } 2902 2903 static void 2904 e1000e_set_rxcsum(E1000ECore *core, int index, uint32_t val) 2905 { 2906 core->mac[RXCSUM] = val; 2907 e1000e_update_rx_offloads(core); 2908 } 2909 2910 static void 2911 e1000e_set_gcr(E1000ECore *core, int index, uint32_t val) 2912 { 2913 uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS; 2914 core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits; 2915 } 2916 2917 #define e1000e_getreg(x) [x] = e1000e_mac_readreg 2918 typedef uint32_t (*readops)(E1000ECore *, int); 2919 static const readops e1000e_macreg_readops[] = { 2920 e1000e_getreg(PBA), 2921 e1000e_getreg(WUFC), 2922 e1000e_getreg(MANC), 2923 e1000e_getreg(TOTL), 2924 e1000e_getreg(RDT0), 2925 e1000e_getreg(RDBAH0), 2926 e1000e_getreg(TDBAL1), 2927 e1000e_getreg(RDLEN0), 2928 e1000e_getreg(RDH1), 2929 e1000e_getreg(LATECOL), 2930 e1000e_getreg(SEQEC), 2931 e1000e_getreg(XONTXC), 2932 e1000e_getreg(WUS), 2933 e1000e_getreg(GORCL), 2934 e1000e_getreg(MGTPRC), 2935 e1000e_getreg(EERD), 2936 e1000e_getreg(EIAC), 2937 e1000e_getreg(PSRCTL), 2938 e1000e_getreg(MANC2H), 2939 e1000e_getreg(RXCSUM), 2940 e1000e_getreg(GSCL_3), 2941 e1000e_getreg(GSCN_2), 2942 e1000e_getreg(RSRPD), 2943 e1000e_getreg(RDBAL1), 2944 e1000e_getreg(FCAH), 2945 e1000e_getreg(FCRTH), 2946 e1000e_getreg(FLOP), 2947 e1000e_getreg(FLASHT), 2948 e1000e_getreg(RXSTMPH), 2949 e1000e_getreg(TXSTMPL), 2950 e1000e_getreg(TIMADJL), 2951 e1000e_getreg(TXDCTL), 2952 e1000e_getreg(RDH0), 2953 e1000e_getreg(TDT1), 2954 e1000e_getreg(TNCRS), 2955 e1000e_getreg(RJC), 2956 e1000e_getreg(IAM), 2957 e1000e_getreg(GSCL_2), 2958 e1000e_getreg(RDBAH1), 2959 e1000e_getreg(FLSWDATA), 2960 e1000e_getreg(RXSATRH), 2961 e1000e_getreg(TIPG), 2962 e1000e_getreg(FLMNGCTL), 2963 e1000e_getreg(FLMNGCNT), 2964 e1000e_getreg(TSYNCTXCTL), 2965 e1000e_getreg(EXTCNF_SIZE), 2966 e1000e_getreg(EXTCNF_CTRL), 2967 e1000e_getreg(EEMNGDATA), 2968 e1000e_getreg(CTRL_EXT), 2969 e1000e_getreg(SYSTIMH), 2970 e1000e_getreg(EEMNGCTL), 2971 e1000e_getreg(FLMNGDATA), 2972 e1000e_getreg(TSYNCRXCTL), 2973 e1000e_getreg(TDH), 2974 e1000e_getreg(LEDCTL), 2975 e1000e_getreg(TCTL), 2976 e1000e_getreg(TDBAL), 2977 e1000e_getreg(TDLEN), 2978 e1000e_getreg(TDH1), 2979 e1000e_getreg(RADV), 2980 e1000e_getreg(ECOL), 2981 e1000e_getreg(DC), 2982 e1000e_getreg(RLEC), 2983 e1000e_getreg(XOFFTXC), 2984 e1000e_getreg(RFC), 2985 e1000e_getreg(RNBC), 2986 e1000e_getreg(MGTPTC), 2987 e1000e_getreg(TIMINCA), 2988 e1000e_getreg(RXCFGL), 2989 e1000e_getreg(MFUTP01), 2990 e1000e_getreg(FACTPS), 2991 e1000e_getreg(GSCL_1), 2992 e1000e_getreg(GSCN_0), 2993 e1000e_getreg(GCR2), 2994 e1000e_getreg(RDT1), 2995 e1000e_getreg(PBACLR), 2996 e1000e_getreg(FCTTV), 2997 e1000e_getreg(EEWR), 2998 e1000e_getreg(FLSWCTL), 2999 e1000e_getreg(RXDCTL1), 3000 e1000e_getreg(RXSATRL), 3001 e1000e_getreg(SYSTIML), 3002 e1000e_getreg(RXUDP), 3003 e1000e_getreg(TORL), 3004 e1000e_getreg(TDLEN1), 3005 e1000e_getreg(MCC), 3006 e1000e_getreg(WUC), 3007 e1000e_getreg(EECD), 3008 e1000e_getreg(MFUTP23), 3009 e1000e_getreg(RAID), 3010 e1000e_getreg(FCRTV), 3011 e1000e_getreg(TXDCTL1), 3012 e1000e_getreg(RCTL), 3013 e1000e_getreg(TDT), 3014 e1000e_getreg(MDIC), 3015 e1000e_getreg(FCRUC), 3016 e1000e_getreg(VET), 3017 e1000e_getreg(RDBAL0), 3018 e1000e_getreg(TDBAH1), 3019 e1000e_getreg(RDTR), 3020 e1000e_getreg(SCC), 3021 e1000e_getreg(COLC), 3022 e1000e_getreg(CEXTERR), 3023 e1000e_getreg(XOFFRXC), 3024 e1000e_getreg(IPAV), 3025 e1000e_getreg(GOTCL), 3026 e1000e_getreg(MGTPDC), 3027 e1000e_getreg(GCR), 3028 e1000e_getreg(IVAR), 3029 e1000e_getreg(POEMB), 3030 e1000e_getreg(MFVAL), 3031 e1000e_getreg(FUNCTAG), 3032 e1000e_getreg(GSCL_4), 3033 e1000e_getreg(GSCN_3), 3034 e1000e_getreg(MRQC), 3035 e1000e_getreg(RDLEN1), 3036 e1000e_getreg(FCT), 3037 e1000e_getreg(FLA), 3038 e1000e_getreg(FLOL), 3039 e1000e_getreg(RXDCTL), 3040 e1000e_getreg(RXSTMPL), 3041 e1000e_getreg(TXSTMPH), 3042 e1000e_getreg(TIMADJH), 3043 e1000e_getreg(FCRTL), 3044 e1000e_getreg(TDBAH), 3045 e1000e_getreg(TADV), 3046 e1000e_getreg(XONRXC), 3047 e1000e_getreg(TSCTFC), 3048 e1000e_getreg(RFCTL), 3049 e1000e_getreg(GSCN_1), 3050 e1000e_getreg(FCAL), 3051 e1000e_getreg(FLSWCNT), 3052 3053 [TOTH] = e1000e_mac_read_clr8, 3054 [GOTCH] = e1000e_mac_read_clr8, 3055 [PRC64] = e1000e_mac_read_clr4, 3056 [PRC255] = e1000e_mac_read_clr4, 3057 [PRC1023] = e1000e_mac_read_clr4, 3058 [PTC64] = e1000e_mac_read_clr4, 3059 [PTC255] = e1000e_mac_read_clr4, 3060 [PTC1023] = e1000e_mac_read_clr4, 3061 [GPRC] = e1000e_mac_read_clr4, 3062 [TPT] = e1000e_mac_read_clr4, 3063 [RUC] = e1000e_mac_read_clr4, 3064 [BPRC] = e1000e_mac_read_clr4, 3065 [MPTC] = e1000e_mac_read_clr4, 3066 [IAC] = e1000e_mac_read_clr4, 3067 [ICR] = e1000e_mac_icr_read, 3068 [RDFH] = E1000E_LOW_BITS_READ(13), 3069 [RDFHS] = E1000E_LOW_BITS_READ(13), 3070 [RDFPC] = E1000E_LOW_BITS_READ(13), 3071 [TDFH] = E1000E_LOW_BITS_READ(13), 3072 [TDFHS] = E1000E_LOW_BITS_READ(13), 3073 [STATUS] = e1000e_get_status, 3074 [TARC0] = e1000e_get_tarc, 3075 [PBS] = E1000E_LOW_BITS_READ(6), 3076 [ICS] = e1000e_mac_ics_read, 3077 [AIT] = E1000E_LOW_BITS_READ(16), 3078 [TORH] = e1000e_mac_read_clr8, 3079 [GORCH] = e1000e_mac_read_clr8, 3080 [PRC127] = e1000e_mac_read_clr4, 3081 [PRC511] = e1000e_mac_read_clr4, 3082 [PRC1522] = e1000e_mac_read_clr4, 3083 [PTC127] = e1000e_mac_read_clr4, 3084 [PTC511] = e1000e_mac_read_clr4, 3085 [PTC1522] = e1000e_mac_read_clr4, 3086 [GPTC] = e1000e_mac_read_clr4, 3087 [TPR] = e1000e_mac_read_clr4, 3088 [ROC] = e1000e_mac_read_clr4, 3089 [MPRC] = e1000e_mac_read_clr4, 3090 [BPTC] = e1000e_mac_read_clr4, 3091 [TSCTC] = e1000e_mac_read_clr4, 3092 [ITR] = e1000e_mac_itr_read, 3093 [RDFT] = E1000E_LOW_BITS_READ(13), 3094 [RDFTS] = E1000E_LOW_BITS_READ(13), 3095 [TDFPC] = E1000E_LOW_BITS_READ(13), 3096 [TDFT] = E1000E_LOW_BITS_READ(13), 3097 [TDFTS] = E1000E_LOW_BITS_READ(13), 3098 [CTRL] = e1000e_get_ctrl, 3099 [TARC1] = e1000e_get_tarc, 3100 [SWSM] = e1000e_mac_swsm_read, 3101 [IMS] = e1000e_mac_ims_read, 3102 3103 [CRCERRS ... MPC] = e1000e_mac_readreg, 3104 [IP6AT ... IP6AT + 3] = e1000e_mac_readreg, 3105 [IP4AT ... IP4AT + 6] = e1000e_mac_readreg, 3106 [RA ... RA + 31] = e1000e_mac_readreg, 3107 [WUPM ... WUPM + 31] = e1000e_mac_readreg, 3108 [MTA ... MTA + 127] = e1000e_mac_readreg, 3109 [VFTA ... VFTA + 127] = e1000e_mac_readreg, 3110 [FFMT ... FFMT + 254] = E1000E_LOW_BITS_READ(4), 3111 [FFVT ... FFVT + 254] = e1000e_mac_readreg, 3112 [MDEF ... MDEF + 7] = e1000e_mac_readreg, 3113 [FFLT ... FFLT + 10] = E1000E_LOW_BITS_READ(11), 3114 [FTFT ... FTFT + 254] = e1000e_mac_readreg, 3115 [PBM ... PBM + 10239] = e1000e_mac_readreg, 3116 [RETA ... RETA + 31] = e1000e_mac_readreg, 3117 [RSSRK ... RSSRK + 31] = e1000e_mac_readreg, 3118 [MAVTV0 ... MAVTV3] = e1000e_mac_readreg, 3119 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_mac_eitr_read 3120 }; 3121 enum { E1000E_NREADOPS = ARRAY_SIZE(e1000e_macreg_readops) }; 3122 3123 #define e1000e_putreg(x) [x] = e1000e_mac_writereg 3124 typedef void (*writeops)(E1000ECore *, int, uint32_t); 3125 static const writeops e1000e_macreg_writeops[] = { 3126 e1000e_putreg(PBA), 3127 e1000e_putreg(SWSM), 3128 e1000e_putreg(WUFC), 3129 e1000e_putreg(RDBAH1), 3130 e1000e_putreg(TDBAH), 3131 e1000e_putreg(TXDCTL), 3132 e1000e_putreg(RDBAH0), 3133 e1000e_putreg(LEDCTL), 3134 e1000e_putreg(FCAL), 3135 e1000e_putreg(FCRUC), 3136 e1000e_putreg(AIT), 3137 e1000e_putreg(TDFH), 3138 e1000e_putreg(TDFT), 3139 e1000e_putreg(TDFHS), 3140 e1000e_putreg(TDFTS), 3141 e1000e_putreg(TDFPC), 3142 e1000e_putreg(WUC), 3143 e1000e_putreg(WUS), 3144 e1000e_putreg(RDFH), 3145 e1000e_putreg(RDFT), 3146 e1000e_putreg(RDFHS), 3147 e1000e_putreg(RDFTS), 3148 e1000e_putreg(RDFPC), 3149 e1000e_putreg(IPAV), 3150 e1000e_putreg(TDBAH1), 3151 e1000e_putreg(TIMINCA), 3152 e1000e_putreg(IAM), 3153 e1000e_putreg(EIAC), 3154 e1000e_putreg(IVAR), 3155 e1000e_putreg(TARC0), 3156 e1000e_putreg(TARC1), 3157 e1000e_putreg(FLSWDATA), 3158 e1000e_putreg(POEMB), 3159 e1000e_putreg(PBS), 3160 e1000e_putreg(MFUTP01), 3161 e1000e_putreg(MFUTP23), 3162 e1000e_putreg(MANC), 3163 e1000e_putreg(MANC2H), 3164 e1000e_putreg(MFVAL), 3165 e1000e_putreg(EXTCNF_CTRL), 3166 e1000e_putreg(FACTPS), 3167 e1000e_putreg(FUNCTAG), 3168 e1000e_putreg(GSCL_1), 3169 e1000e_putreg(GSCL_2), 3170 e1000e_putreg(GSCL_3), 3171 e1000e_putreg(GSCL_4), 3172 e1000e_putreg(GSCN_0), 3173 e1000e_putreg(GSCN_1), 3174 e1000e_putreg(GSCN_2), 3175 e1000e_putreg(GSCN_3), 3176 e1000e_putreg(GCR2), 3177 e1000e_putreg(MRQC), 3178 e1000e_putreg(FLOP), 3179 e1000e_putreg(FLOL), 3180 e1000e_putreg(FLSWCTL), 3181 e1000e_putreg(FLSWCNT), 3182 e1000e_putreg(FLA), 3183 e1000e_putreg(RXDCTL1), 3184 e1000e_putreg(TXDCTL1), 3185 e1000e_putreg(TIPG), 3186 e1000e_putreg(RXSTMPH), 3187 e1000e_putreg(RXSTMPL), 3188 e1000e_putreg(RXSATRL), 3189 e1000e_putreg(RXSATRH), 3190 e1000e_putreg(TXSTMPL), 3191 e1000e_putreg(TXSTMPH), 3192 e1000e_putreg(SYSTIML), 3193 e1000e_putreg(SYSTIMH), 3194 e1000e_putreg(TIMADJL), 3195 e1000e_putreg(TIMADJH), 3196 e1000e_putreg(RXUDP), 3197 e1000e_putreg(RXCFGL), 3198 e1000e_putreg(TSYNCRXCTL), 3199 e1000e_putreg(TSYNCTXCTL), 3200 e1000e_putreg(EXTCNF_SIZE), 3201 e1000e_putreg(EEMNGCTL), 3202 e1000e_putreg(RA), 3203 3204 [TDH1] = e1000e_set_16bit, 3205 [TDT1] = e1000e_set_tdt, 3206 [TCTL] = e1000e_set_tctl, 3207 [TDT] = e1000e_set_tdt, 3208 [MDIC] = e1000e_set_mdic, 3209 [ICS] = e1000e_set_ics, 3210 [TDH] = e1000e_set_16bit, 3211 [RDH0] = e1000e_set_16bit, 3212 [RDT0] = e1000e_set_rdt, 3213 [IMC] = e1000e_set_imc, 3214 [IMS] = e1000e_set_ims, 3215 [ICR] = e1000e_set_icr, 3216 [EECD] = e1000e_set_eecd, 3217 [RCTL] = e1000e_set_rx_control, 3218 [CTRL] = e1000e_set_ctrl, 3219 [RDTR] = e1000e_set_rdtr, 3220 [RADV] = e1000e_set_16bit, 3221 [TADV] = e1000e_set_16bit, 3222 [ITR] = e1000e_set_itr, 3223 [EERD] = e1000e_set_eerd, 3224 [GCR] = e1000e_set_gcr, 3225 [PSRCTL] = e1000e_set_psrctl, 3226 [RXCSUM] = e1000e_set_rxcsum, 3227 [RAID] = e1000e_set_16bit, 3228 [RSRPD] = e1000e_set_12bit, 3229 [TIDV] = e1000e_set_tidv, 3230 [TDLEN1] = e1000e_set_dlen, 3231 [TDLEN] = e1000e_set_dlen, 3232 [RDLEN0] = e1000e_set_dlen, 3233 [RDLEN1] = e1000e_set_dlen, 3234 [TDBAL] = e1000e_set_dbal, 3235 [TDBAL1] = e1000e_set_dbal, 3236 [RDBAL0] = e1000e_set_dbal, 3237 [RDBAL1] = e1000e_set_dbal, 3238 [RDH1] = e1000e_set_16bit, 3239 [RDT1] = e1000e_set_rdt, 3240 [STATUS] = e1000e_set_status, 3241 [PBACLR] = e1000e_set_pbaclr, 3242 [CTRL_EXT] = e1000e_set_ctrlext, 3243 [FCAH] = e1000e_set_16bit, 3244 [FCT] = e1000e_set_16bit, 3245 [FCTTV] = e1000e_set_16bit, 3246 [FCRTV] = e1000e_set_16bit, 3247 [FCRTH] = e1000e_set_fcrth, 3248 [FCRTL] = e1000e_set_fcrtl, 3249 [VET] = e1000e_set_vet, 3250 [RXDCTL] = e1000e_set_rxdctl, 3251 [FLASHT] = e1000e_set_16bit, 3252 [EEWR] = e1000e_set_eewr, 3253 [CTRL_DUP] = e1000e_set_ctrl, 3254 [RFCTL] = e1000e_set_rfctl, 3255 [RA + 1] = e1000e_mac_setmacaddr, 3256 3257 [IP6AT ... IP6AT + 3] = e1000e_mac_writereg, 3258 [IP4AT ... IP4AT + 6] = e1000e_mac_writereg, 3259 [RA + 2 ... RA + 31] = e1000e_mac_writereg, 3260 [WUPM ... WUPM + 31] = e1000e_mac_writereg, 3261 [MTA ... MTA + 127] = e1000e_mac_writereg, 3262 [VFTA ... VFTA + 127] = e1000e_mac_writereg, 3263 [FFMT ... FFMT + 254] = e1000e_mac_writereg, 3264 [FFVT ... FFVT + 254] = e1000e_mac_writereg, 3265 [PBM ... PBM + 10239] = e1000e_mac_writereg, 3266 [MDEF ... MDEF + 7] = e1000e_mac_writereg, 3267 [FFLT ... FFLT + 10] = e1000e_mac_writereg, 3268 [FTFT ... FTFT + 254] = e1000e_mac_writereg, 3269 [RETA ... RETA + 31] = e1000e_mac_writereg, 3270 [RSSRK ... RSSRK + 31] = e1000e_mac_writereg, 3271 [MAVTV0 ... MAVTV3] = e1000e_mac_writereg, 3272 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_set_eitr 3273 }; 3274 enum { E1000E_NWRITEOPS = ARRAY_SIZE(e1000e_macreg_writeops) }; 3275 3276 enum { MAC_ACCESS_PARTIAL = 1 }; 3277 3278 /* 3279 * The array below combines alias offsets of the index values for the 3280 * MAC registers that have aliases, with the indication of not fully 3281 * implemented registers (lowest bit). This combination is possible 3282 * because all of the offsets are even. 3283 */ 3284 static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = { 3285 /* Alias index offsets */ 3286 [FCRTL_A] = 0x07fe, [FCRTH_A] = 0x0802, 3287 [RDH0_A] = 0x09bc, [RDT0_A] = 0x09bc, [RDTR_A] = 0x09c6, 3288 [RDFH_A] = 0xe904, [RDFT_A] = 0xe904, 3289 [TDH_A] = 0x0cf8, [TDT_A] = 0x0cf8, [TIDV_A] = 0x0cf8, 3290 [TDFH_A] = 0xed00, [TDFT_A] = 0xed00, 3291 [RA_A ... RA_A + 31] = 0x14f0, 3292 [VFTA_A ... VFTA_A + 127] = 0x1400, 3293 [RDBAL0_A ... RDLEN0_A] = 0x09bc, 3294 [TDBAL_A ... TDLEN_A] = 0x0cf8, 3295 /* Access options */ 3296 [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL, 3297 [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL, 3298 [RDFPC] = MAC_ACCESS_PARTIAL, 3299 [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL, 3300 [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL, 3301 [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL, 3302 [PBM] = MAC_ACCESS_PARTIAL, [FLA] = MAC_ACCESS_PARTIAL, 3303 [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL, 3304 [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL, 3305 [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL, 3306 [FCRTH] = MAC_ACCESS_PARTIAL, [TXDCTL] = MAC_ACCESS_PARTIAL, 3307 [TXDCTL1] = MAC_ACCESS_PARTIAL, 3308 [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL 3309 }; 3310 3311 void 3312 e1000e_core_write(E1000ECore *core, hwaddr addr, uint64_t val, unsigned size) 3313 { 3314 uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); 3315 3316 if (index < E1000E_NWRITEOPS && e1000e_macreg_writeops[index]) { 3317 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { 3318 trace_e1000e_wrn_regs_write_trivial(index << 2); 3319 } 3320 trace_e1000e_core_write(index << 2, size, val); 3321 e1000e_macreg_writeops[index](core, index, val); 3322 } else if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { 3323 trace_e1000e_wrn_regs_write_ro(index << 2, size, val); 3324 } else { 3325 trace_e1000e_wrn_regs_write_unknown(index << 2, size, val); 3326 } 3327 } 3328 3329 uint64_t 3330 e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size) 3331 { 3332 uint64_t val; 3333 uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); 3334 3335 if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { 3336 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { 3337 trace_e1000e_wrn_regs_read_trivial(index << 2); 3338 } 3339 val = e1000e_macreg_readops[index](core, index); 3340 trace_e1000e_core_read(index << 2, size, val); 3341 return val; 3342 } else { 3343 trace_e1000e_wrn_regs_read_unknown(index << 2, size); 3344 } 3345 return 0; 3346 } 3347 3348 static inline void 3349 e1000e_autoneg_pause(E1000ECore *core) 3350 { 3351 timer_del(core->autoneg_timer); 3352 } 3353 3354 static void 3355 e1000e_autoneg_resume(E1000ECore *core) 3356 { 3357 if (e1000e_have_autoneg(core) && 3358 !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { 3359 qemu_get_queue(core->owner_nic)->link_down = false; 3360 timer_mod(core->autoneg_timer, 3361 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); 3362 } 3363 } 3364 3365 static void 3366 e1000e_vm_state_change(void *opaque, bool running, RunState state) 3367 { 3368 E1000ECore *core = opaque; 3369 3370 if (running) { 3371 trace_e1000e_vm_state_running(); 3372 e1000e_intrmgr_resume(core); 3373 e1000e_autoneg_resume(core); 3374 } else { 3375 trace_e1000e_vm_state_stopped(); 3376 e1000e_autoneg_pause(core); 3377 e1000e_intrmgr_pause(core); 3378 } 3379 } 3380 3381 void 3382 e1000e_core_pci_realize(E1000ECore *core, 3383 const uint16_t *eeprom_templ, 3384 uint32_t eeprom_size, 3385 const uint8_t *macaddr) 3386 { 3387 int i; 3388 3389 core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 3390 e1000e_autoneg_timer, core); 3391 e1000e_intrmgr_pci_realize(core); 3392 3393 core->vmstate = 3394 qemu_add_vm_change_state_handler(e1000e_vm_state_change, core); 3395 3396 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 3397 net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, 3398 E1000E_MAX_TX_FRAGS, core->has_vnet); 3399 } 3400 3401 net_rx_pkt_init(&core->rx_pkt, core->has_vnet); 3402 3403 e1000x_core_prepare_eeprom(core->eeprom, 3404 eeprom_templ, 3405 eeprom_size, 3406 PCI_DEVICE_GET_CLASS(core->owner)->device_id, 3407 macaddr); 3408 e1000e_update_rx_offloads(core); 3409 } 3410 3411 void 3412 e1000e_core_pci_uninit(E1000ECore *core) 3413 { 3414 int i; 3415 3416 timer_free(core->autoneg_timer); 3417 3418 e1000e_intrmgr_pci_unint(core); 3419 3420 qemu_del_vm_change_state_handler(core->vmstate); 3421 3422 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 3423 net_tx_pkt_reset(core->tx[i].tx_pkt); 3424 net_tx_pkt_uninit(core->tx[i].tx_pkt); 3425 } 3426 3427 net_rx_pkt_uninit(core->rx_pkt); 3428 } 3429 3430 static const uint16_t 3431 e1000e_phy_reg_init[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE] = { 3432 [0] = { 3433 [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB | 3434 MII_CR_FULL_DUPLEX | 3435 MII_CR_AUTO_NEG_EN, 3436 3437 [PHY_STATUS] = MII_SR_EXTENDED_CAPS | 3438 MII_SR_LINK_STATUS | 3439 MII_SR_AUTONEG_CAPS | 3440 MII_SR_PREAMBLE_SUPPRESS | 3441 MII_SR_EXTENDED_STATUS | 3442 MII_SR_10T_HD_CAPS | 3443 MII_SR_10T_FD_CAPS | 3444 MII_SR_100X_HD_CAPS | 3445 MII_SR_100X_FD_CAPS, 3446 3447 [PHY_ID1] = 0x141, 3448 [PHY_ID2] = E1000_PHY_ID2_82574x, 3449 [PHY_AUTONEG_ADV] = 0xde1, 3450 [PHY_LP_ABILITY] = 0x7e0, 3451 [PHY_AUTONEG_EXP] = BIT(2), 3452 [PHY_NEXT_PAGE_TX] = BIT(0) | BIT(13), 3453 [PHY_1000T_CTRL] = BIT(8) | BIT(9) | BIT(10) | BIT(11), 3454 [PHY_1000T_STATUS] = 0x3c00, 3455 [PHY_EXT_STATUS] = BIT(12) | BIT(13), 3456 3457 [PHY_COPPER_CTRL1] = BIT(5) | BIT(6) | BIT(8) | BIT(9) | 3458 BIT(12) | BIT(13), 3459 [PHY_COPPER_STAT1] = BIT(3) | BIT(10) | BIT(11) | BIT(13) | BIT(15) 3460 }, 3461 [2] = { 3462 [PHY_MAC_CTRL1] = BIT(3) | BIT(7), 3463 [PHY_MAC_CTRL2] = BIT(1) | BIT(2) | BIT(6) | BIT(12) 3464 }, 3465 [3] = { 3466 [PHY_LED_TIMER_CTRL] = BIT(0) | BIT(2) | BIT(14) 3467 } 3468 }; 3469 3470 static const uint32_t e1000e_mac_reg_init[] = { 3471 [PBA] = 0x00140014, 3472 [LEDCTL] = BIT(1) | BIT(8) | BIT(9) | BIT(15) | BIT(17) | BIT(18), 3473 [EXTCNF_CTRL] = BIT(3), 3474 [EEMNGCTL] = BIT(31), 3475 [FLASHT] = 0x2, 3476 [FLSWCTL] = BIT(30) | BIT(31), 3477 [FLOL] = BIT(0), 3478 [RXDCTL] = BIT(16), 3479 [RXDCTL1] = BIT(16), 3480 [TIPG] = 0x8 | (0x8 << 10) | (0x6 << 20), 3481 [RXCFGL] = 0x88F7, 3482 [RXUDP] = 0x319, 3483 [CTRL] = E1000_CTRL_FD | E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 | 3484 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU | 3485 E1000_CTRL_ADVD3WUC, 3486 [STATUS] = E1000_STATUS_ASDV_1000 | E1000_STATUS_LU, 3487 [PSRCTL] = (2 << E1000_PSRCTL_BSIZE0_SHIFT) | 3488 (4 << E1000_PSRCTL_BSIZE1_SHIFT) | 3489 (4 << E1000_PSRCTL_BSIZE2_SHIFT), 3490 [TARC0] = 0x3 | E1000_TARC_ENABLE, 3491 [TARC1] = 0x3 | E1000_TARC_ENABLE, 3492 [EECD] = E1000_EECD_AUTO_RD | E1000_EECD_PRES, 3493 [EERD] = E1000_EERW_DONE, 3494 [EEWR] = E1000_EERW_DONE, 3495 [GCR] = E1000_L0S_ADJUST | 3496 E1000_L1_ENTRY_LATENCY_MSB | 3497 E1000_L1_ENTRY_LATENCY_LSB, 3498 [TDFH] = 0x600, 3499 [TDFT] = 0x600, 3500 [TDFHS] = 0x600, 3501 [TDFTS] = 0x600, 3502 [POEMB] = 0x30D, 3503 [PBS] = 0x028, 3504 [MANC] = E1000_MANC_DIS_IP_CHK_ARP, 3505 [FACTPS] = E1000_FACTPS_LAN0_ON | 0x20000000, 3506 [SWSM] = 1, 3507 [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD, 3508 [ITR] = E1000E_MIN_XITR, 3509 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = E1000E_MIN_XITR, 3510 }; 3511 3512 void 3513 e1000e_core_reset(E1000ECore *core) 3514 { 3515 int i; 3516 3517 timer_del(core->autoneg_timer); 3518 3519 e1000e_intrmgr_reset(core); 3520 3521 memset(core->phy, 0, sizeof core->phy); 3522 memmove(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init); 3523 memset(core->mac, 0, sizeof core->mac); 3524 memmove(core->mac, e1000e_mac_reg_init, sizeof e1000e_mac_reg_init); 3525 3526 core->rxbuf_min_shift = 1 + E1000_RING_DESC_LEN_SHIFT; 3527 3528 if (qemu_get_queue(core->owner_nic)->link_down) { 3529 e1000e_link_down(core); 3530 } 3531 3532 e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); 3533 3534 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { 3535 net_tx_pkt_reset(core->tx[i].tx_pkt); 3536 memset(&core->tx[i].props, 0, sizeof(core->tx[i].props)); 3537 core->tx[i].skip_cp = false; 3538 } 3539 } 3540 3541 void e1000e_core_pre_save(E1000ECore *core) 3542 { 3543 int i; 3544 NetClientState *nc = qemu_get_queue(core->owner_nic); 3545 3546 /* 3547 * If link is down and auto-negotiation is supported and ongoing, 3548 * complete auto-negotiation immediately. This allows us to look 3549 * at MII_SR_AUTONEG_COMPLETE to infer link status on load. 3550 */ 3551 if (nc->link_down && e1000e_have_autoneg(core)) { 3552 core->phy[0][PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE; 3553 e1000e_update_flowctl_status(core); 3554 } 3555 3556 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { 3557 if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) { 3558 core->tx[i].skip_cp = true; 3559 } 3560 } 3561 } 3562 3563 int 3564 e1000e_core_post_load(E1000ECore *core) 3565 { 3566 NetClientState *nc = qemu_get_queue(core->owner_nic); 3567 3568 /* 3569 * nc.link_down can't be migrated, so infer link_down according 3570 * to link status bit in core.mac[STATUS]. 3571 */ 3572 nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; 3573 3574 return 0; 3575 } 3576