1 /* 2 * Core code for QEMU e1000e emulation 3 * 4 * Software developer's manuals: 5 * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf 6 * 7 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) 8 * Developed by Daynix Computing LTD (http://www.daynix.com) 9 * 10 * Authors: 11 * Dmitry Fleytman <dmitry@daynix.com> 12 * Leonid Bloch <leonid@daynix.com> 13 * Yan Vugenfirer <yan@daynix.com> 14 * 15 * Based on work done by: 16 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. 17 * Copyright (c) 2008 Qumranet 18 * Based on work done by: 19 * Copyright (c) 2007 Dan Aloni 20 * Copyright (c) 2004 Antony T Curtis 21 * 22 * This library is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU Lesser General Public 24 * License as published by the Free Software Foundation; either 25 * version 2.1 of the License, or (at your option) any later version. 26 * 27 * This library is distributed in the hope that it will be useful, 28 * but WITHOUT ANY WARRANTY; without even the implied warranty of 29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 30 * Lesser General Public License for more details. 31 * 32 * You should have received a copy of the GNU Lesser General Public 33 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 34 */ 35 36 #include "qemu/osdep.h" 37 #include "qemu/log.h" 38 #include "net/net.h" 39 #include "net/tap.h" 40 #include "hw/net/mii.h" 41 #include "hw/pci/msi.h" 42 #include "hw/pci/msix.h" 43 #include "sysemu/runstate.h" 44 45 #include "net_tx_pkt.h" 46 #include "net_rx_pkt.h" 47 48 #include "e1000_common.h" 49 #include "e1000x_common.h" 50 #include "e1000e_core.h" 51 52 #include "trace.h" 53 54 /* No more then 7813 interrupts per second according to spec 10.2.4.2 */ 55 #define E1000E_MIN_XITR (500) 56 57 #define E1000E_MAX_TX_FRAGS (64) 58 59 union e1000_rx_desc_union { 60 struct e1000_rx_desc legacy; 61 union e1000_rx_desc_extended extended; 62 union e1000_rx_desc_packet_split packet_split; 63 }; 64 65 static ssize_t 66 e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, 67 bool has_vnet); 68 69 static inline void 70 e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val); 71 72 static void e1000e_reset(E1000ECore *core, bool sw); 73 74 static inline void 75 e1000e_process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp) 76 { 77 if (le32_to_cpu(dp->upper.data) & E1000_TXD_EXTCMD_TSTAMP) { 78 trace_e1000e_wrn_no_ts_support(); 79 } 80 } 81 82 static inline void 83 e1000e_process_snap_option(E1000ECore *core, uint32_t cmd_and_length) 84 { 85 if (cmd_and_length & E1000_TXD_CMD_SNAP) { 86 trace_e1000e_wrn_no_snap_support(); 87 } 88 } 89 90 static inline void 91 e1000e_raise_legacy_irq(E1000ECore *core) 92 { 93 trace_e1000e_irq_legacy_notify(true); 94 e1000x_inc_reg_if_not_full(core->mac, IAC); 95 pci_set_irq(core->owner, 1); 96 } 97 98 static inline void 99 e1000e_lower_legacy_irq(E1000ECore *core) 100 { 101 trace_e1000e_irq_legacy_notify(false); 102 pci_set_irq(core->owner, 0); 103 } 104 105 static inline void 106 e1000e_intrmgr_rearm_timer(E1000IntrDelayTimer *timer) 107 { 108 int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] * 109 timer->delay_resolution_ns; 110 111 trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns); 112 113 timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns); 114 115 timer->running = true; 116 } 117 118 static void 119 e1000e_intmgr_timer_resume(E1000IntrDelayTimer *timer) 120 { 121 if (timer->running) { 122 e1000e_intrmgr_rearm_timer(timer); 123 } 124 } 125 126 static void 127 e1000e_intmgr_timer_pause(E1000IntrDelayTimer *timer) 128 { 129 if (timer->running) { 130 timer_del(timer->timer); 131 } 132 } 133 134 static inline void 135 e1000e_intrmgr_stop_timer(E1000IntrDelayTimer *timer) 136 { 137 if (timer->running) { 138 timer_del(timer->timer); 139 timer->running = false; 140 } 141 } 142 143 static inline void 144 e1000e_intrmgr_fire_delayed_interrupts(E1000ECore *core) 145 { 146 trace_e1000e_irq_fire_delayed_interrupts(); 147 e1000e_set_interrupt_cause(core, 0); 148 } 149 150 static void 151 e1000e_intrmgr_on_timer(void *opaque) 152 { 153 E1000IntrDelayTimer *timer = opaque; 154 155 trace_e1000e_irq_throttling_timer(timer->delay_reg << 2); 156 157 timer->running = false; 158 e1000e_intrmgr_fire_delayed_interrupts(timer->core); 159 } 160 161 static void 162 e1000e_intrmgr_on_throttling_timer(void *opaque) 163 { 164 E1000IntrDelayTimer *timer = opaque; 165 166 timer->running = false; 167 168 if (msi_enabled(timer->core->owner)) { 169 trace_e1000e_irq_msi_notify_postponed(); 170 /* Clear msi_causes_pending to fire MSI eventually */ 171 timer->core->msi_causes_pending = 0; 172 e1000e_set_interrupt_cause(timer->core, 0); 173 } else { 174 trace_e1000e_irq_legacy_notify_postponed(); 175 e1000e_set_interrupt_cause(timer->core, 0); 176 } 177 } 178 179 static void 180 e1000e_intrmgr_on_msix_throttling_timer(void *opaque) 181 { 182 E1000IntrDelayTimer *timer = opaque; 183 int idx = timer - &timer->core->eitr[0]; 184 185 timer->running = false; 186 187 trace_e1000e_irq_msix_notify_postponed_vec(idx); 188 msix_notify(timer->core->owner, idx); 189 } 190 191 static void 192 e1000e_intrmgr_initialize_all_timers(E1000ECore *core, bool create) 193 { 194 int i; 195 196 core->radv.delay_reg = RADV; 197 core->rdtr.delay_reg = RDTR; 198 core->raid.delay_reg = RAID; 199 core->tadv.delay_reg = TADV; 200 core->tidv.delay_reg = TIDV; 201 202 core->radv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 203 core->rdtr.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 204 core->raid.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 205 core->tadv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 206 core->tidv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 207 208 core->radv.core = core; 209 core->rdtr.core = core; 210 core->raid.core = core; 211 core->tadv.core = core; 212 core->tidv.core = core; 213 214 core->itr.core = core; 215 core->itr.delay_reg = ITR; 216 core->itr.delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; 217 218 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 219 core->eitr[i].core = core; 220 core->eitr[i].delay_reg = EITR + i; 221 core->eitr[i].delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; 222 } 223 224 if (!create) { 225 return; 226 } 227 228 core->radv.timer = 229 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->radv); 230 core->rdtr.timer = 231 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->rdtr); 232 core->raid.timer = 233 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->raid); 234 235 core->tadv.timer = 236 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tadv); 237 core->tidv.timer = 238 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tidv); 239 240 core->itr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 241 e1000e_intrmgr_on_throttling_timer, 242 &core->itr); 243 244 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 245 core->eitr[i].timer = 246 timer_new_ns(QEMU_CLOCK_VIRTUAL, 247 e1000e_intrmgr_on_msix_throttling_timer, 248 &core->eitr[i]); 249 } 250 } 251 252 static inline void 253 e1000e_intrmgr_stop_delay_timers(E1000ECore *core) 254 { 255 e1000e_intrmgr_stop_timer(&core->radv); 256 e1000e_intrmgr_stop_timer(&core->rdtr); 257 e1000e_intrmgr_stop_timer(&core->raid); 258 e1000e_intrmgr_stop_timer(&core->tidv); 259 e1000e_intrmgr_stop_timer(&core->tadv); 260 } 261 262 static bool 263 e1000e_intrmgr_delay_rx_causes(E1000ECore *core, uint32_t *causes) 264 { 265 uint32_t delayable_causes; 266 uint32_t rdtr = core->mac[RDTR]; 267 uint32_t radv = core->mac[RADV]; 268 uint32_t raid = core->mac[RAID]; 269 270 if (msix_enabled(core->owner)) { 271 return false; 272 } 273 274 delayable_causes = E1000_ICR_RXQ0 | 275 E1000_ICR_RXQ1 | 276 E1000_ICR_RXT0; 277 278 if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS)) { 279 delayable_causes |= E1000_ICR_ACK; 280 } 281 282 /* Clean up all causes that may be delayed */ 283 core->delayed_causes |= *causes & delayable_causes; 284 *causes &= ~delayable_causes; 285 286 /* 287 * Check if delayed RX interrupts disabled by client 288 * or if there are causes that cannot be delayed 289 */ 290 if ((rdtr == 0) || (*causes != 0)) { 291 return false; 292 } 293 294 /* 295 * Check if delayed RX ACK interrupts disabled by client 296 * and there is an ACK packet received 297 */ 298 if ((raid == 0) && (core->delayed_causes & E1000_ICR_ACK)) { 299 return false; 300 } 301 302 /* All causes delayed */ 303 e1000e_intrmgr_rearm_timer(&core->rdtr); 304 305 if (!core->radv.running && (radv != 0)) { 306 e1000e_intrmgr_rearm_timer(&core->radv); 307 } 308 309 if (!core->raid.running && (core->delayed_causes & E1000_ICR_ACK)) { 310 e1000e_intrmgr_rearm_timer(&core->raid); 311 } 312 313 return true; 314 } 315 316 static bool 317 e1000e_intrmgr_delay_tx_causes(E1000ECore *core, uint32_t *causes) 318 { 319 static const uint32_t delayable_causes = E1000_ICR_TXQ0 | 320 E1000_ICR_TXQ1 | 321 E1000_ICR_TXQE | 322 E1000_ICR_TXDW; 323 324 if (msix_enabled(core->owner)) { 325 return false; 326 } 327 328 /* Clean up all causes that may be delayed */ 329 core->delayed_causes |= *causes & delayable_causes; 330 *causes &= ~delayable_causes; 331 332 /* If there are causes that cannot be delayed */ 333 if (*causes != 0) { 334 return false; 335 } 336 337 /* All causes delayed */ 338 e1000e_intrmgr_rearm_timer(&core->tidv); 339 340 if (!core->tadv.running && (core->mac[TADV] != 0)) { 341 e1000e_intrmgr_rearm_timer(&core->tadv); 342 } 343 344 return true; 345 } 346 347 static uint32_t 348 e1000e_intmgr_collect_delayed_causes(E1000ECore *core) 349 { 350 uint32_t res; 351 352 if (msix_enabled(core->owner)) { 353 assert(core->delayed_causes == 0); 354 return 0; 355 } 356 357 res = core->delayed_causes; 358 core->delayed_causes = 0; 359 360 e1000e_intrmgr_stop_delay_timers(core); 361 362 return res; 363 } 364 365 static void 366 e1000e_intrmgr_fire_all_timers(E1000ECore *core) 367 { 368 int i; 369 uint32_t val = e1000e_intmgr_collect_delayed_causes(core); 370 371 trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]); 372 core->mac[ICR] |= val; 373 374 if (core->itr.running) { 375 timer_del(core->itr.timer); 376 e1000e_intrmgr_on_throttling_timer(&core->itr); 377 } 378 379 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 380 if (core->eitr[i].running) { 381 timer_del(core->eitr[i].timer); 382 e1000e_intrmgr_on_msix_throttling_timer(&core->eitr[i]); 383 } 384 } 385 } 386 387 static void 388 e1000e_intrmgr_resume(E1000ECore *core) 389 { 390 int i; 391 392 e1000e_intmgr_timer_resume(&core->radv); 393 e1000e_intmgr_timer_resume(&core->rdtr); 394 e1000e_intmgr_timer_resume(&core->raid); 395 e1000e_intmgr_timer_resume(&core->tidv); 396 e1000e_intmgr_timer_resume(&core->tadv); 397 398 e1000e_intmgr_timer_resume(&core->itr); 399 400 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 401 e1000e_intmgr_timer_resume(&core->eitr[i]); 402 } 403 } 404 405 static void 406 e1000e_intrmgr_pause(E1000ECore *core) 407 { 408 int i; 409 410 e1000e_intmgr_timer_pause(&core->radv); 411 e1000e_intmgr_timer_pause(&core->rdtr); 412 e1000e_intmgr_timer_pause(&core->raid); 413 e1000e_intmgr_timer_pause(&core->tidv); 414 e1000e_intmgr_timer_pause(&core->tadv); 415 416 e1000e_intmgr_timer_pause(&core->itr); 417 418 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 419 e1000e_intmgr_timer_pause(&core->eitr[i]); 420 } 421 } 422 423 static void 424 e1000e_intrmgr_reset(E1000ECore *core) 425 { 426 int i; 427 428 core->delayed_causes = 0; 429 430 e1000e_intrmgr_stop_delay_timers(core); 431 432 e1000e_intrmgr_stop_timer(&core->itr); 433 434 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 435 e1000e_intrmgr_stop_timer(&core->eitr[i]); 436 } 437 } 438 439 static void 440 e1000e_intrmgr_pci_unint(E1000ECore *core) 441 { 442 int i; 443 444 timer_free(core->radv.timer); 445 timer_free(core->rdtr.timer); 446 timer_free(core->raid.timer); 447 448 timer_free(core->tadv.timer); 449 timer_free(core->tidv.timer); 450 451 timer_free(core->itr.timer); 452 453 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 454 timer_free(core->eitr[i].timer); 455 } 456 } 457 458 static void 459 e1000e_intrmgr_pci_realize(E1000ECore *core) 460 { 461 e1000e_intrmgr_initialize_all_timers(core, true); 462 } 463 464 static inline bool 465 e1000e_rx_csum_enabled(E1000ECore *core) 466 { 467 return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true; 468 } 469 470 static inline bool 471 e1000e_rx_use_legacy_descriptor(E1000ECore *core) 472 { 473 return (core->mac[RFCTL] & E1000_RFCTL_EXTEN) ? false : true; 474 } 475 476 static inline bool 477 e1000e_rx_use_ps_descriptor(E1000ECore *core) 478 { 479 return !e1000e_rx_use_legacy_descriptor(core) && 480 (core->mac[RCTL] & E1000_RCTL_DTYP_PS); 481 } 482 483 static inline bool 484 e1000e_rss_enabled(E1000ECore *core) 485 { 486 return E1000_MRQC_ENABLED(core->mac[MRQC]) && 487 !e1000e_rx_csum_enabled(core) && 488 !e1000e_rx_use_legacy_descriptor(core); 489 } 490 491 typedef struct E1000E_RSSInfo_st { 492 bool enabled; 493 uint32_t hash; 494 uint32_t queue; 495 uint32_t type; 496 } E1000E_RSSInfo; 497 498 static uint32_t 499 e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) 500 { 501 bool hasip4, hasip6; 502 EthL4HdrProto l4hdr_proto; 503 504 assert(e1000e_rss_enabled(core)); 505 506 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); 507 508 if (hasip4) { 509 trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC], 510 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]), 511 E1000_MRQC_EN_IPV4(core->mac[MRQC])); 512 513 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && 514 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { 515 return E1000_MRQ_RSS_TYPE_IPV4TCP; 516 } 517 518 if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { 519 return E1000_MRQ_RSS_TYPE_IPV4; 520 } 521 } else if (hasip6) { 522 eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt); 523 524 bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS; 525 bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS; 526 527 /* 528 * Following two traces must not be combined because resulting 529 * event will have 11 arguments totally and some trace backends 530 * (at least "ust") have limitation of maximum 10 arguments per 531 * event. Events with more arguments fail to compile for 532 * backends like these. 533 */ 534 trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]); 535 trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto, 536 ip6info->has_ext_hdrs, 537 ip6info->rss_ex_dst_valid, 538 ip6info->rss_ex_src_valid, 539 core->mac[MRQC], 540 E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), 541 E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), 542 E1000_MRQC_EN_IPV6(core->mac[MRQC])); 543 544 if ((!ex_dis || !ip6info->has_ext_hdrs) && 545 (!new_ex_dis || !(ip6info->rss_ex_dst_valid || 546 ip6info->rss_ex_src_valid))) { 547 548 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && 549 E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { 550 return E1000_MRQ_RSS_TYPE_IPV6TCP; 551 } 552 553 if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { 554 return E1000_MRQ_RSS_TYPE_IPV6EX; 555 } 556 557 } 558 559 if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) { 560 return E1000_MRQ_RSS_TYPE_IPV6; 561 } 562 563 } 564 565 return E1000_MRQ_RSS_TYPE_NONE; 566 } 567 568 static uint32_t 569 e1000e_rss_calc_hash(E1000ECore *core, 570 struct NetRxPkt *pkt, 571 E1000E_RSSInfo *info) 572 { 573 NetRxPktRssType type; 574 575 assert(e1000e_rss_enabled(core)); 576 577 switch (info->type) { 578 case E1000_MRQ_RSS_TYPE_IPV4: 579 type = NetPktRssIpV4; 580 break; 581 case E1000_MRQ_RSS_TYPE_IPV4TCP: 582 type = NetPktRssIpV4Tcp; 583 break; 584 case E1000_MRQ_RSS_TYPE_IPV6TCP: 585 type = NetPktRssIpV6TcpEx; 586 break; 587 case E1000_MRQ_RSS_TYPE_IPV6: 588 type = NetPktRssIpV6; 589 break; 590 case E1000_MRQ_RSS_TYPE_IPV6EX: 591 type = NetPktRssIpV6Ex; 592 break; 593 default: 594 assert(false); 595 return 0; 596 } 597 598 return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]); 599 } 600 601 static void 602 e1000e_rss_parse_packet(E1000ECore *core, 603 struct NetRxPkt *pkt, 604 E1000E_RSSInfo *info) 605 { 606 trace_e1000e_rx_rss_started(); 607 608 if (!e1000e_rss_enabled(core)) { 609 info->enabled = false; 610 info->hash = 0; 611 info->queue = 0; 612 info->type = 0; 613 trace_e1000e_rx_rss_disabled(); 614 return; 615 } 616 617 info->enabled = true; 618 619 info->type = e1000e_rss_get_hash_type(core, pkt); 620 621 trace_e1000e_rx_rss_type(info->type); 622 623 if (info->type == E1000_MRQ_RSS_TYPE_NONE) { 624 info->hash = 0; 625 info->queue = 0; 626 return; 627 } 628 629 info->hash = e1000e_rss_calc_hash(core, pkt, info); 630 info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash); 631 } 632 633 static bool 634 e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx) 635 { 636 if (tx->props.tse && tx->cptse) { 637 if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss)) { 638 return false; 639 } 640 641 net_tx_pkt_update_ip_checksums(tx->tx_pkt); 642 e1000x_inc_reg_if_not_full(core->mac, TSCTC); 643 return true; 644 } 645 646 if (tx->sum_needed & E1000_TXD_POPTS_TXSM) { 647 if (!net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0)) { 648 return false; 649 } 650 } 651 652 if (tx->sum_needed & E1000_TXD_POPTS_IXSM) { 653 net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt); 654 } 655 656 return true; 657 } 658 659 static void e1000e_tx_pkt_callback(void *core, 660 const struct iovec *iov, 661 int iovcnt, 662 const struct iovec *virt_iov, 663 int virt_iovcnt) 664 { 665 e1000e_receive_internal(core, virt_iov, virt_iovcnt, true); 666 } 667 668 static bool 669 e1000e_tx_pkt_send(E1000ECore *core, struct e1000e_tx *tx, int queue_index) 670 { 671 int target_queue = MIN(core->max_queue_num, queue_index); 672 NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue); 673 674 if (!e1000e_setup_tx_offloads(core, tx)) { 675 return false; 676 } 677 678 net_tx_pkt_dump(tx->tx_pkt); 679 680 if ((core->phy[0][MII_BMCR] & MII_BMCR_LOOPBACK) || 681 ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) { 682 return net_tx_pkt_send_custom(tx->tx_pkt, false, 683 e1000e_tx_pkt_callback, core); 684 } else { 685 return net_tx_pkt_send(tx->tx_pkt, queue); 686 } 687 } 688 689 static void 690 e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) 691 { 692 static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, 693 PTC1023, PTC1522 }; 694 695 size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4; 696 697 e1000x_increase_size_stats(core->mac, PTCregs, tot_len); 698 e1000x_inc_reg_if_not_full(core->mac, TPT); 699 e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len); 700 701 switch (net_tx_pkt_get_packet_type(tx_pkt)) { 702 case ETH_PKT_BCAST: 703 e1000x_inc_reg_if_not_full(core->mac, BPTC); 704 break; 705 case ETH_PKT_MCAST: 706 e1000x_inc_reg_if_not_full(core->mac, MPTC); 707 break; 708 case ETH_PKT_UCAST: 709 break; 710 default: 711 g_assert_not_reached(); 712 } 713 714 e1000x_inc_reg_if_not_full(core->mac, GPTC); 715 e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len); 716 } 717 718 static void 719 e1000e_process_tx_desc(E1000ECore *core, 720 struct e1000e_tx *tx, 721 struct e1000_tx_desc *dp, 722 int queue_index) 723 { 724 uint32_t txd_lower = le32_to_cpu(dp->lower.data); 725 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D); 726 unsigned int split_size = txd_lower & 0xffff; 727 uint64_t addr; 728 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp; 729 bool eop = txd_lower & E1000_TXD_CMD_EOP; 730 731 if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */ 732 e1000x_read_tx_ctx_descr(xp, &tx->props); 733 e1000e_process_snap_option(core, le32_to_cpu(xp->cmd_and_length)); 734 return; 735 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) { 736 /* data descriptor */ 737 tx->sum_needed = le32_to_cpu(dp->upper.data) >> 8; 738 tx->cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0; 739 e1000e_process_ts_option(core, dp); 740 } else { 741 /* legacy descriptor */ 742 e1000e_process_ts_option(core, dp); 743 tx->cptse = 0; 744 } 745 746 addr = le64_to_cpu(dp->buffer_addr); 747 748 if (!tx->skip_cp) { 749 if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) { 750 tx->skip_cp = true; 751 } 752 } 753 754 if (eop) { 755 if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) { 756 if (e1000x_vlan_enabled(core->mac) && 757 e1000x_is_vlan_txd(txd_lower)) { 758 net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, 759 le16_to_cpu(dp->upper.fields.special), core->mac[VET]); 760 } 761 if (e1000e_tx_pkt_send(core, tx, queue_index)) { 762 e1000e_on_tx_done_update_stats(core, tx->tx_pkt); 763 } 764 } 765 766 tx->skip_cp = false; 767 net_tx_pkt_reset(tx->tx_pkt, core->owner); 768 769 tx->sum_needed = 0; 770 tx->cptse = 0; 771 } 772 } 773 774 static inline uint32_t 775 e1000e_tx_wb_interrupt_cause(E1000ECore *core, int queue_idx) 776 { 777 if (!msix_enabled(core->owner)) { 778 return E1000_ICR_TXDW; 779 } 780 781 return (queue_idx == 0) ? E1000_ICR_TXQ0 : E1000_ICR_TXQ1; 782 } 783 784 static inline uint32_t 785 e1000e_rx_wb_interrupt_cause(E1000ECore *core, int queue_idx, 786 bool min_threshold_hit) 787 { 788 if (!msix_enabled(core->owner)) { 789 return E1000_ICS_RXT0 | (min_threshold_hit ? E1000_ICS_RXDMT0 : 0); 790 } 791 792 return (queue_idx == 0) ? E1000_ICR_RXQ0 : E1000_ICR_RXQ1; 793 } 794 795 static uint32_t 796 e1000e_txdesc_writeback(E1000ECore *core, dma_addr_t base, 797 struct e1000_tx_desc *dp, bool *ide, int queue_idx) 798 { 799 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data); 800 801 if (!(txd_lower & E1000_TXD_CMD_RS) && 802 !(core->mac[IVAR] & E1000_IVAR_TX_INT_EVERY_WB)) { 803 return 0; 804 } 805 806 *ide = (txd_lower & E1000_TXD_CMD_IDE) ? true : false; 807 808 txd_upper = le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD; 809 810 dp->upper.data = cpu_to_le32(txd_upper); 811 pci_dma_write(core->owner, base + ((char *)&dp->upper - (char *)dp), 812 &dp->upper, sizeof(dp->upper)); 813 return e1000e_tx_wb_interrupt_cause(core, queue_idx); 814 } 815 816 typedef struct E1000E_RingInfo_st { 817 int dbah; 818 int dbal; 819 int dlen; 820 int dh; 821 int dt; 822 int idx; 823 } E1000E_RingInfo; 824 825 static inline bool 826 e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r) 827 { 828 return core->mac[r->dh] == core->mac[r->dt] || 829 core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN; 830 } 831 832 static inline uint64_t 833 e1000e_ring_base(E1000ECore *core, const E1000E_RingInfo *r) 834 { 835 uint64_t bah = core->mac[r->dbah]; 836 uint64_t bal = core->mac[r->dbal]; 837 838 return (bah << 32) + bal; 839 } 840 841 static inline uint64_t 842 e1000e_ring_head_descr(E1000ECore *core, const E1000E_RingInfo *r) 843 { 844 return e1000e_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh]; 845 } 846 847 static inline void 848 e1000e_ring_advance(E1000ECore *core, const E1000E_RingInfo *r, uint32_t count) 849 { 850 core->mac[r->dh] += count; 851 852 if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) { 853 core->mac[r->dh] = 0; 854 } 855 } 856 857 static inline uint32_t 858 e1000e_ring_free_descr_num(E1000ECore *core, const E1000E_RingInfo *r) 859 { 860 trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen], 861 core->mac[r->dh], core->mac[r->dt]); 862 863 if (core->mac[r->dh] <= core->mac[r->dt]) { 864 return core->mac[r->dt] - core->mac[r->dh]; 865 } 866 867 if (core->mac[r->dh] > core->mac[r->dt]) { 868 return core->mac[r->dlen] / E1000_RING_DESC_LEN + 869 core->mac[r->dt] - core->mac[r->dh]; 870 } 871 872 g_assert_not_reached(); 873 return 0; 874 } 875 876 static inline bool 877 e1000e_ring_enabled(E1000ECore *core, const E1000E_RingInfo *r) 878 { 879 return core->mac[r->dlen] > 0; 880 } 881 882 static inline uint32_t 883 e1000e_ring_len(E1000ECore *core, const E1000E_RingInfo *r) 884 { 885 return core->mac[r->dlen]; 886 } 887 888 typedef struct E1000E_TxRing_st { 889 const E1000E_RingInfo *i; 890 struct e1000e_tx *tx; 891 } E1000E_TxRing; 892 893 static inline int 894 e1000e_mq_queue_idx(int base_reg_idx, int reg_idx) 895 { 896 return (reg_idx - base_reg_idx) / (0x100 >> 2); 897 } 898 899 static inline void 900 e1000e_tx_ring_init(E1000ECore *core, E1000E_TxRing *txr, int idx) 901 { 902 static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { 903 { TDBAH, TDBAL, TDLEN, TDH, TDT, 0 }, 904 { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 } 905 }; 906 907 assert(idx < ARRAY_SIZE(i)); 908 909 txr->i = &i[idx]; 910 txr->tx = &core->tx[idx]; 911 } 912 913 typedef struct E1000E_RxRing_st { 914 const E1000E_RingInfo *i; 915 } E1000E_RxRing; 916 917 static inline void 918 e1000e_rx_ring_init(E1000ECore *core, E1000E_RxRing *rxr, int idx) 919 { 920 static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { 921 { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 }, 922 { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 } 923 }; 924 925 assert(idx < ARRAY_SIZE(i)); 926 927 rxr->i = &i[idx]; 928 } 929 930 static void 931 e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) 932 { 933 dma_addr_t base; 934 struct e1000_tx_desc desc; 935 bool ide = false; 936 const E1000E_RingInfo *txi = txr->i; 937 uint32_t cause = E1000_ICS_TXQE; 938 939 if (!(core->mac[TCTL] & E1000_TCTL_EN)) { 940 trace_e1000e_tx_disabled(); 941 return; 942 } 943 944 while (!e1000e_ring_empty(core, txi)) { 945 base = e1000e_ring_head_descr(core, txi); 946 947 pci_dma_read(core->owner, base, &desc, sizeof(desc)); 948 949 trace_e1000e_tx_descr((void *)(intptr_t)desc.buffer_addr, 950 desc.lower.data, desc.upper.data); 951 952 e1000e_process_tx_desc(core, txr->tx, &desc, txi->idx); 953 cause |= e1000e_txdesc_writeback(core, base, &desc, &ide, txi->idx); 954 955 e1000e_ring_advance(core, txi, 1); 956 } 957 958 if (!ide || !e1000e_intrmgr_delay_tx_causes(core, &cause)) { 959 e1000e_set_interrupt_cause(core, cause); 960 } 961 } 962 963 static bool 964 e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r, 965 size_t total_size) 966 { 967 uint32_t bufs = e1000e_ring_free_descr_num(core, r); 968 969 trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, 970 core->rx_desc_buf_size); 971 972 return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) * 973 core->rx_desc_buf_size; 974 } 975 976 void 977 e1000e_start_recv(E1000ECore *core) 978 { 979 int i; 980 981 trace_e1000e_rx_start_recv(); 982 983 for (i = 0; i <= core->max_queue_num; i++) { 984 qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i)); 985 } 986 } 987 988 bool 989 e1000e_can_receive(E1000ECore *core) 990 { 991 int i; 992 993 if (!e1000x_rx_ready(core->owner, core->mac)) { 994 return false; 995 } 996 997 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 998 E1000E_RxRing rxr; 999 1000 e1000e_rx_ring_init(core, &rxr, i); 1001 if (e1000e_ring_enabled(core, rxr.i) && 1002 e1000e_has_rxbufs(core, rxr.i, 1)) { 1003 trace_e1000e_rx_can_recv(); 1004 return true; 1005 } 1006 } 1007 1008 trace_e1000e_rx_can_recv_rings_full(); 1009 return false; 1010 } 1011 1012 ssize_t 1013 e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size) 1014 { 1015 const struct iovec iov = { 1016 .iov_base = (uint8_t *)buf, 1017 .iov_len = size 1018 }; 1019 1020 return e1000e_receive_iov(core, &iov, 1); 1021 } 1022 1023 static inline bool 1024 e1000e_rx_l3_cso_enabled(E1000ECore *core) 1025 { 1026 return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD); 1027 } 1028 1029 static inline bool 1030 e1000e_rx_l4_cso_enabled(E1000ECore *core) 1031 { 1032 return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); 1033 } 1034 1035 static bool 1036 e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size) 1037 { 1038 uint32_t rctl = core->mac[RCTL]; 1039 1040 if (e1000x_is_vlan_packet(buf, core->mac[VET]) && 1041 e1000x_vlan_rx_filter_enabled(core->mac)) { 1042 uint16_t vid = lduw_be_p(&PKT_GET_VLAN_HDR(buf)->h_tci); 1043 uint32_t vfta = 1044 ldl_le_p((uint32_t *)(core->mac + VFTA) + 1045 ((vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK)); 1046 if ((vfta & (1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK))) == 0) { 1047 trace_e1000e_rx_flt_vlan_mismatch(vid); 1048 return false; 1049 } else { 1050 trace_e1000e_rx_flt_vlan_match(vid); 1051 } 1052 } 1053 1054 switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { 1055 case ETH_PKT_UCAST: 1056 if (rctl & E1000_RCTL_UPE) { 1057 return true; /* promiscuous ucast */ 1058 } 1059 break; 1060 1061 case ETH_PKT_BCAST: 1062 if (rctl & E1000_RCTL_BAM) { 1063 return true; /* broadcast enabled */ 1064 } 1065 break; 1066 1067 case ETH_PKT_MCAST: 1068 if (rctl & E1000_RCTL_MPE) { 1069 return true; /* promiscuous mcast */ 1070 } 1071 break; 1072 1073 default: 1074 g_assert_not_reached(); 1075 } 1076 1077 return e1000x_rx_group_filter(core->mac, buf); 1078 } 1079 1080 static inline void 1081 e1000e_read_lgcy_rx_descr(E1000ECore *core, struct e1000_rx_desc *desc, 1082 hwaddr *buff_addr) 1083 { 1084 *buff_addr = le64_to_cpu(desc->buffer_addr); 1085 } 1086 1087 static inline void 1088 e1000e_read_ext_rx_descr(E1000ECore *core, union e1000_rx_desc_extended *desc, 1089 hwaddr *buff_addr) 1090 { 1091 *buff_addr = le64_to_cpu(desc->read.buffer_addr); 1092 } 1093 1094 static inline void 1095 e1000e_read_ps_rx_descr(E1000ECore *core, 1096 union e1000_rx_desc_packet_split *desc, 1097 hwaddr buff_addr[MAX_PS_BUFFERS]) 1098 { 1099 int i; 1100 1101 for (i = 0; i < MAX_PS_BUFFERS; i++) { 1102 buff_addr[i] = le64_to_cpu(desc->read.buffer_addr[i]); 1103 } 1104 1105 trace_e1000e_rx_desc_ps_read(buff_addr[0], buff_addr[1], 1106 buff_addr[2], buff_addr[3]); 1107 } 1108 1109 static inline void 1110 e1000e_read_rx_descr(E1000ECore *core, union e1000_rx_desc_union *desc, 1111 hwaddr buff_addr[MAX_PS_BUFFERS]) 1112 { 1113 if (e1000e_rx_use_legacy_descriptor(core)) { 1114 e1000e_read_lgcy_rx_descr(core, &desc->legacy, &buff_addr[0]); 1115 buff_addr[1] = buff_addr[2] = buff_addr[3] = 0; 1116 } else { 1117 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1118 e1000e_read_ps_rx_descr(core, &desc->packet_split, buff_addr); 1119 } else { 1120 e1000e_read_ext_rx_descr(core, &desc->extended, &buff_addr[0]); 1121 buff_addr[1] = buff_addr[2] = buff_addr[3] = 0; 1122 } 1123 } 1124 } 1125 1126 static void 1127 e1000e_verify_csum_in_sw(E1000ECore *core, 1128 struct NetRxPkt *pkt, 1129 uint32_t *status_flags, 1130 EthL4HdrProto l4hdr_proto) 1131 { 1132 bool csum_valid; 1133 uint32_t csum_error; 1134 1135 if (e1000e_rx_l3_cso_enabled(core)) { 1136 if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) { 1137 trace_e1000e_rx_metadata_l3_csum_validation_failed(); 1138 } else { 1139 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE; 1140 *status_flags |= E1000_RXD_STAT_IPCS | csum_error; 1141 } 1142 } else { 1143 trace_e1000e_rx_metadata_l3_cso_disabled(); 1144 } 1145 1146 if (!e1000e_rx_l4_cso_enabled(core)) { 1147 trace_e1000e_rx_metadata_l4_cso_disabled(); 1148 return; 1149 } 1150 1151 if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { 1152 trace_e1000e_rx_metadata_l4_csum_validation_failed(); 1153 return; 1154 } 1155 1156 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE; 1157 *status_flags |= E1000_RXD_STAT_TCPCS | csum_error; 1158 1159 if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { 1160 *status_flags |= E1000_RXD_STAT_UDPCS; 1161 } 1162 } 1163 1164 static inline bool 1165 e1000e_is_tcp_ack(E1000ECore *core, struct NetRxPkt *rx_pkt) 1166 { 1167 if (!net_rx_pkt_is_tcp_ack(rx_pkt)) { 1168 return false; 1169 } 1170 1171 if (core->mac[RFCTL] & E1000_RFCTL_ACK_DATA_DIS) { 1172 return !net_rx_pkt_has_tcp_data(rx_pkt); 1173 } 1174 1175 return true; 1176 } 1177 1178 static void 1179 e1000e_build_rx_metadata(E1000ECore *core, 1180 struct NetRxPkt *pkt, 1181 bool is_eop, 1182 const E1000E_RSSInfo *rss_info, 1183 uint32_t *rss, uint32_t *mrq, 1184 uint32_t *status_flags, 1185 uint16_t *ip_id, 1186 uint16_t *vlan_tag) 1187 { 1188 struct virtio_net_hdr *vhdr; 1189 bool hasip4, hasip6; 1190 EthL4HdrProto l4hdr_proto; 1191 uint32_t pkt_type; 1192 1193 *status_flags = E1000_RXD_STAT_DD; 1194 1195 /* No additional metadata needed for non-EOP descriptors */ 1196 if (!is_eop) { 1197 goto func_exit; 1198 } 1199 1200 *status_flags |= E1000_RXD_STAT_EOP; 1201 1202 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); 1203 trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto); 1204 1205 /* VLAN state */ 1206 if (net_rx_pkt_is_vlan_stripped(pkt)) { 1207 *status_flags |= E1000_RXD_STAT_VP; 1208 *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt)); 1209 trace_e1000e_rx_metadata_vlan(*vlan_tag); 1210 } 1211 1212 /* Packet parsing results */ 1213 if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) { 1214 if (rss_info->enabled) { 1215 *rss = cpu_to_le32(rss_info->hash); 1216 *mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8)); 1217 trace_e1000e_rx_metadata_rss(*rss, *mrq); 1218 } 1219 } else if (hasip4) { 1220 *status_flags |= E1000_RXD_STAT_IPIDV; 1221 *ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt)); 1222 trace_e1000e_rx_metadata_ip_id(*ip_id); 1223 } 1224 1225 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP && e1000e_is_tcp_ack(core, pkt)) { 1226 *status_flags |= E1000_RXD_STAT_ACK; 1227 trace_e1000e_rx_metadata_ack(); 1228 } 1229 1230 if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { 1231 trace_e1000e_rx_metadata_ipv6_filtering_disabled(); 1232 pkt_type = E1000_RXD_PKT_MAC; 1233 } else if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP || 1234 l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { 1235 pkt_type = hasip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP; 1236 } else if (hasip4 || hasip6) { 1237 pkt_type = hasip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6; 1238 } else { 1239 pkt_type = E1000_RXD_PKT_MAC; 1240 } 1241 1242 *status_flags |= E1000_RXD_PKT_TYPE(pkt_type); 1243 trace_e1000e_rx_metadata_pkt_type(pkt_type); 1244 1245 /* RX CSO information */ 1246 if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { 1247 trace_e1000e_rx_metadata_ipv6_sum_disabled(); 1248 goto func_exit; 1249 } 1250 1251 vhdr = net_rx_pkt_get_vhdr(pkt); 1252 1253 if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) && 1254 !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { 1255 trace_e1000e_rx_metadata_virthdr_no_csum_info(); 1256 e1000e_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto); 1257 goto func_exit; 1258 } 1259 1260 if (e1000e_rx_l3_cso_enabled(core)) { 1261 *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0; 1262 } else { 1263 trace_e1000e_rx_metadata_l3_cso_disabled(); 1264 } 1265 1266 if (e1000e_rx_l4_cso_enabled(core)) { 1267 switch (l4hdr_proto) { 1268 case ETH_L4_HDR_PROTO_TCP: 1269 *status_flags |= E1000_RXD_STAT_TCPCS; 1270 break; 1271 1272 case ETH_L4_HDR_PROTO_UDP: 1273 *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS; 1274 break; 1275 1276 default: 1277 break; 1278 } 1279 } else { 1280 trace_e1000e_rx_metadata_l4_cso_disabled(); 1281 } 1282 1283 trace_e1000e_rx_metadata_status_flags(*status_flags); 1284 1285 func_exit: 1286 *status_flags = cpu_to_le32(*status_flags); 1287 } 1288 1289 static inline void 1290 e1000e_write_lgcy_rx_descr(E1000ECore *core, struct e1000_rx_desc *desc, 1291 struct NetRxPkt *pkt, 1292 const E1000E_RSSInfo *rss_info, 1293 uint16_t length) 1294 { 1295 uint32_t status_flags, rss, mrq; 1296 uint16_t ip_id; 1297 1298 assert(!rss_info->enabled); 1299 1300 desc->length = cpu_to_le16(length); 1301 desc->csum = 0; 1302 1303 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1304 rss_info, 1305 &rss, &mrq, 1306 &status_flags, &ip_id, 1307 &desc->special); 1308 desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); 1309 desc->status = (uint8_t) le32_to_cpu(status_flags); 1310 } 1311 1312 static inline void 1313 e1000e_write_ext_rx_descr(E1000ECore *core, union e1000_rx_desc_extended *desc, 1314 struct NetRxPkt *pkt, 1315 const E1000E_RSSInfo *rss_info, 1316 uint16_t length) 1317 { 1318 memset(&desc->wb, 0, sizeof(desc->wb)); 1319 1320 desc->wb.upper.length = cpu_to_le16(length); 1321 1322 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1323 rss_info, 1324 &desc->wb.lower.hi_dword.rss, 1325 &desc->wb.lower.mrq, 1326 &desc->wb.upper.status_error, 1327 &desc->wb.lower.hi_dword.csum_ip.ip_id, 1328 &desc->wb.upper.vlan); 1329 } 1330 1331 static inline void 1332 e1000e_write_ps_rx_descr(E1000ECore *core, 1333 union e1000_rx_desc_packet_split *desc, 1334 struct NetRxPkt *pkt, 1335 const E1000E_RSSInfo *rss_info, 1336 size_t ps_hdr_len, 1337 uint16_t(*written)[MAX_PS_BUFFERS]) 1338 { 1339 int i; 1340 1341 memset(&desc->wb, 0, sizeof(desc->wb)); 1342 1343 desc->wb.middle.length0 = cpu_to_le16((*written)[0]); 1344 1345 for (i = 0; i < PS_PAGE_BUFFERS; i++) { 1346 desc->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]); 1347 } 1348 1349 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1350 rss_info, 1351 &desc->wb.lower.hi_dword.rss, 1352 &desc->wb.lower.mrq, 1353 &desc->wb.middle.status_error, 1354 &desc->wb.lower.hi_dword.csum_ip.ip_id, 1355 &desc->wb.middle.vlan); 1356 1357 desc->wb.upper.header_status = 1358 cpu_to_le16(ps_hdr_len | (ps_hdr_len ? E1000_RXDPS_HDRSTAT_HDRSP : 0)); 1359 1360 trace_e1000e_rx_desc_ps_write((*written)[0], (*written)[1], 1361 (*written)[2], (*written)[3]); 1362 } 1363 1364 static inline void 1365 e1000e_write_rx_descr(E1000ECore *core, union e1000_rx_desc_union *desc, 1366 struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, 1367 size_t ps_hdr_len, uint16_t(*written)[MAX_PS_BUFFERS]) 1368 { 1369 if (e1000e_rx_use_legacy_descriptor(core)) { 1370 assert(ps_hdr_len == 0); 1371 e1000e_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info, 1372 (*written)[0]); 1373 } else { 1374 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1375 e1000e_write_ps_rx_descr(core, &desc->packet_split, pkt, rss_info, 1376 ps_hdr_len, written); 1377 } else { 1378 assert(ps_hdr_len == 0); 1379 e1000e_write_ext_rx_descr(core, &desc->extended, pkt, rss_info, 1380 (*written)[0]); 1381 } 1382 } 1383 } 1384 1385 static inline void 1386 e1000e_pci_dma_write_rx_desc(E1000ECore *core, dma_addr_t addr, 1387 union e1000_rx_desc_union *desc, dma_addr_t len) 1388 { 1389 PCIDevice *dev = core->owner; 1390 1391 if (e1000e_rx_use_legacy_descriptor(core)) { 1392 struct e1000_rx_desc *d = &desc->legacy; 1393 size_t offset = offsetof(struct e1000_rx_desc, status); 1394 uint8_t status = d->status; 1395 1396 d->status &= ~E1000_RXD_STAT_DD; 1397 pci_dma_write(dev, addr, desc, len); 1398 1399 if (status & E1000_RXD_STAT_DD) { 1400 d->status = status; 1401 pci_dma_write(dev, addr + offset, &status, sizeof(status)); 1402 } 1403 } else { 1404 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1405 union e1000_rx_desc_packet_split *d = &desc->packet_split; 1406 size_t offset = offsetof(union e1000_rx_desc_packet_split, 1407 wb.middle.status_error); 1408 uint32_t status = d->wb.middle.status_error; 1409 1410 d->wb.middle.status_error &= ~E1000_RXD_STAT_DD; 1411 pci_dma_write(dev, addr, desc, len); 1412 1413 if (status & E1000_RXD_STAT_DD) { 1414 d->wb.middle.status_error = status; 1415 pci_dma_write(dev, addr + offset, &status, sizeof(status)); 1416 } 1417 } else { 1418 union e1000_rx_desc_extended *d = &desc->extended; 1419 size_t offset = offsetof(union e1000_rx_desc_extended, 1420 wb.upper.status_error); 1421 uint32_t status = d->wb.upper.status_error; 1422 1423 d->wb.upper.status_error &= ~E1000_RXD_STAT_DD; 1424 pci_dma_write(dev, addr, desc, len); 1425 1426 if (status & E1000_RXD_STAT_DD) { 1427 d->wb.upper.status_error = status; 1428 pci_dma_write(dev, addr + offset, &status, sizeof(status)); 1429 } 1430 } 1431 } 1432 } 1433 1434 typedef struct e1000e_ba_state_st { 1435 uint16_t written[MAX_PS_BUFFERS]; 1436 uint8_t cur_idx; 1437 } e1000e_ba_state; 1438 1439 static inline void 1440 e1000e_write_hdr_to_rx_buffers(E1000ECore *core, 1441 hwaddr ba[MAX_PS_BUFFERS], 1442 e1000e_ba_state *bastate, 1443 const char *data, 1444 dma_addr_t data_len) 1445 { 1446 assert(data_len <= core->rxbuf_sizes[0] - bastate->written[0]); 1447 1448 pci_dma_write(core->owner, ba[0] + bastate->written[0], data, data_len); 1449 bastate->written[0] += data_len; 1450 1451 bastate->cur_idx = 1; 1452 } 1453 1454 static void 1455 e1000e_write_to_rx_buffers(E1000ECore *core, 1456 hwaddr ba[MAX_PS_BUFFERS], 1457 e1000e_ba_state *bastate, 1458 const char *data, 1459 dma_addr_t data_len) 1460 { 1461 while (data_len > 0) { 1462 uint32_t cur_buf_len = core->rxbuf_sizes[bastate->cur_idx]; 1463 uint32_t cur_buf_bytes_left = cur_buf_len - 1464 bastate->written[bastate->cur_idx]; 1465 uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left); 1466 1467 trace_e1000e_rx_desc_buff_write(bastate->cur_idx, 1468 ba[bastate->cur_idx], 1469 bastate->written[bastate->cur_idx], 1470 data, 1471 bytes_to_write); 1472 1473 pci_dma_write(core->owner, 1474 ba[bastate->cur_idx] + bastate->written[bastate->cur_idx], 1475 data, bytes_to_write); 1476 1477 bastate->written[bastate->cur_idx] += bytes_to_write; 1478 data += bytes_to_write; 1479 data_len -= bytes_to_write; 1480 1481 if (bastate->written[bastate->cur_idx] == cur_buf_len) { 1482 bastate->cur_idx++; 1483 } 1484 1485 assert(bastate->cur_idx < MAX_PS_BUFFERS); 1486 } 1487 } 1488 1489 static void 1490 e1000e_update_rx_stats(E1000ECore *core, 1491 size_t data_size, 1492 size_t data_fcs_size) 1493 { 1494 e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); 1495 1496 switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { 1497 case ETH_PKT_BCAST: 1498 e1000x_inc_reg_if_not_full(core->mac, BPRC); 1499 break; 1500 1501 case ETH_PKT_MCAST: 1502 e1000x_inc_reg_if_not_full(core->mac, MPRC); 1503 break; 1504 1505 default: 1506 break; 1507 } 1508 } 1509 1510 static inline bool 1511 e1000e_rx_descr_threshold_hit(E1000ECore *core, const E1000E_RingInfo *rxi) 1512 { 1513 return e1000e_ring_free_descr_num(core, rxi) == 1514 e1000e_ring_len(core, rxi) >> core->rxbuf_min_shift; 1515 } 1516 1517 static bool 1518 e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len) 1519 { 1520 bool hasip4, hasip6; 1521 EthL4HdrProto l4hdr_proto; 1522 bool fragment; 1523 1524 if (!e1000e_rx_use_ps_descriptor(core)) { 1525 return false; 1526 } 1527 1528 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto); 1529 1530 if (hasip4) { 1531 fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; 1532 } else if (hasip6) { 1533 fragment = net_rx_pkt_get_ip6_info(pkt)->fragment; 1534 } else { 1535 return false; 1536 } 1537 1538 if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) { 1539 return false; 1540 } 1541 1542 if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP || 1543 l4hdr_proto == ETH_L4_HDR_PROTO_UDP) { 1544 *hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt); 1545 } else { 1546 *hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt); 1547 } 1548 1549 if ((*hdr_len > core->rxbuf_sizes[0]) || 1550 (*hdr_len > net_rx_pkt_get_total_len(pkt))) { 1551 return false; 1552 } 1553 1554 return true; 1555 } 1556 1557 static void 1558 e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, 1559 const E1000E_RxRing *rxr, 1560 const E1000E_RSSInfo *rss_info) 1561 { 1562 PCIDevice *d = core->owner; 1563 dma_addr_t base; 1564 union e1000_rx_desc_union desc; 1565 size_t desc_size; 1566 size_t desc_offset = 0; 1567 size_t iov_ofs = 0; 1568 1569 struct iovec *iov = net_rx_pkt_get_iovec(pkt); 1570 size_t size = net_rx_pkt_get_total_len(pkt); 1571 size_t total_size = size + e1000x_fcs_len(core->mac); 1572 const E1000E_RingInfo *rxi; 1573 size_t ps_hdr_len = 0; 1574 bool do_ps = e1000e_do_ps(core, pkt, &ps_hdr_len); 1575 bool is_first = true; 1576 1577 rxi = rxr->i; 1578 1579 do { 1580 hwaddr ba[MAX_PS_BUFFERS]; 1581 e1000e_ba_state bastate = { { 0 } }; 1582 bool is_last = false; 1583 1584 desc_size = total_size - desc_offset; 1585 1586 if (desc_size > core->rx_desc_buf_size) { 1587 desc_size = core->rx_desc_buf_size; 1588 } 1589 1590 if (e1000e_ring_empty(core, rxi)) { 1591 return; 1592 } 1593 1594 base = e1000e_ring_head_descr(core, rxi); 1595 1596 pci_dma_read(d, base, &desc, core->rx_desc_len); 1597 1598 trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len); 1599 1600 e1000e_read_rx_descr(core, &desc, ba); 1601 1602 if (ba[0]) { 1603 if (desc_offset < size) { 1604 static const uint32_t fcs_pad; 1605 size_t iov_copy; 1606 size_t copy_size = size - desc_offset; 1607 if (copy_size > core->rx_desc_buf_size) { 1608 copy_size = core->rx_desc_buf_size; 1609 } 1610 1611 /* For PS mode copy the packet header first */ 1612 if (do_ps) { 1613 if (is_first) { 1614 size_t ps_hdr_copied = 0; 1615 do { 1616 iov_copy = MIN(ps_hdr_len - ps_hdr_copied, 1617 iov->iov_len - iov_ofs); 1618 1619 e1000e_write_hdr_to_rx_buffers(core, ba, &bastate, 1620 iov->iov_base, iov_copy); 1621 1622 copy_size -= iov_copy; 1623 ps_hdr_copied += iov_copy; 1624 1625 iov_ofs += iov_copy; 1626 if (iov_ofs == iov->iov_len) { 1627 iov++; 1628 iov_ofs = 0; 1629 } 1630 } while (ps_hdr_copied < ps_hdr_len); 1631 1632 is_first = false; 1633 } else { 1634 /* Leave buffer 0 of each descriptor except first */ 1635 /* empty as per spec 7.1.5.1 */ 1636 e1000e_write_hdr_to_rx_buffers(core, ba, &bastate, 1637 NULL, 0); 1638 } 1639 } 1640 1641 /* Copy packet payload */ 1642 while (copy_size) { 1643 iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); 1644 1645 e1000e_write_to_rx_buffers(core, ba, &bastate, 1646 iov->iov_base + iov_ofs, iov_copy); 1647 1648 copy_size -= iov_copy; 1649 iov_ofs += iov_copy; 1650 if (iov_ofs == iov->iov_len) { 1651 iov++; 1652 iov_ofs = 0; 1653 } 1654 } 1655 1656 if (desc_offset + desc_size >= total_size) { 1657 /* Simulate FCS checksum presence in the last descriptor */ 1658 e1000e_write_to_rx_buffers(core, ba, &bastate, 1659 (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); 1660 } 1661 } 1662 } else { /* as per intel docs; skip descriptors with null buf addr */ 1663 trace_e1000e_rx_null_descriptor(); 1664 } 1665 desc_offset += desc_size; 1666 if (desc_offset >= total_size) { 1667 is_last = true; 1668 } 1669 1670 e1000e_write_rx_descr(core, &desc, is_last ? core->rx_pkt : NULL, 1671 rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); 1672 e1000e_pci_dma_write_rx_desc(core, base, &desc, core->rx_desc_len); 1673 1674 e1000e_ring_advance(core, rxi, 1675 core->rx_desc_len / E1000_MIN_RX_DESC_LEN); 1676 1677 } while (desc_offset < total_size); 1678 1679 e1000e_update_rx_stats(core, size, total_size); 1680 } 1681 1682 static inline void 1683 e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt) 1684 { 1685 struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt); 1686 1687 if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1688 net_rx_pkt_fix_l4_csum(pkt); 1689 } 1690 } 1691 1692 ssize_t 1693 e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) 1694 { 1695 return e1000e_receive_internal(core, iov, iovcnt, core->has_vnet); 1696 } 1697 1698 static ssize_t 1699 e1000e_receive_internal(E1000ECore *core, const struct iovec *iov, int iovcnt, 1700 bool has_vnet) 1701 { 1702 static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4); 1703 1704 uint32_t n = 0; 1705 uint8_t min_buf[ETH_ZLEN]; 1706 struct iovec min_iov; 1707 uint8_t *filter_buf; 1708 size_t size, orig_size; 1709 size_t iov_ofs = 0; 1710 E1000E_RxRing rxr; 1711 E1000E_RSSInfo rss_info; 1712 size_t total_size; 1713 ssize_t retval; 1714 bool rdmts_hit; 1715 1716 trace_e1000e_rx_receive_iov(iovcnt); 1717 1718 if (!e1000x_hw_rx_enabled(core->mac)) { 1719 return -1; 1720 } 1721 1722 /* Pull virtio header in */ 1723 if (has_vnet) { 1724 net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt); 1725 iov_ofs = sizeof(struct virtio_net_hdr); 1726 } else { 1727 net_rx_pkt_unset_vhdr(core->rx_pkt); 1728 } 1729 1730 filter_buf = iov->iov_base + iov_ofs; 1731 orig_size = iov_size(iov, iovcnt); 1732 size = orig_size - iov_ofs; 1733 1734 /* Pad to minimum Ethernet frame length */ 1735 if (size < sizeof(min_buf)) { 1736 iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size); 1737 memset(&min_buf[size], 0, sizeof(min_buf) - size); 1738 e1000x_inc_reg_if_not_full(core->mac, RUC); 1739 min_iov.iov_base = filter_buf = min_buf; 1740 min_iov.iov_len = size = sizeof(min_buf); 1741 iovcnt = 1; 1742 iov = &min_iov; 1743 iov_ofs = 0; 1744 } else if (iov->iov_len < maximum_ethernet_hdr_len) { 1745 /* This is very unlikely, but may happen. */ 1746 iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len); 1747 filter_buf = min_buf; 1748 } 1749 1750 /* Discard oversized packets if !LPE and !SBP. */ 1751 if (e1000x_is_oversized(core->mac, size)) { 1752 return orig_size; 1753 } 1754 1755 net_rx_pkt_set_packet_type(core->rx_pkt, 1756 get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf))); 1757 1758 if (!e1000e_receive_filter(core, filter_buf, size)) { 1759 trace_e1000e_rx_flt_dropped(); 1760 return orig_size; 1761 } 1762 1763 net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, 1764 e1000x_vlan_enabled(core->mac), core->mac[VET]); 1765 1766 e1000e_rss_parse_packet(core, core->rx_pkt, &rss_info); 1767 e1000e_rx_ring_init(core, &rxr, rss_info.queue); 1768 1769 total_size = net_rx_pkt_get_total_len(core->rx_pkt) + 1770 e1000x_fcs_len(core->mac); 1771 1772 if (e1000e_has_rxbufs(core, rxr.i, total_size)) { 1773 e1000e_rx_fix_l4_csum(core, core->rx_pkt); 1774 1775 e1000e_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info); 1776 1777 retval = orig_size; 1778 1779 /* Perform small receive detection (RSRPD) */ 1780 if (total_size < core->mac[RSRPD]) { 1781 n |= E1000_ICS_SRPD; 1782 } 1783 1784 /* Perform ACK receive detection */ 1785 if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS) && 1786 (e1000e_is_tcp_ack(core, core->rx_pkt))) { 1787 n |= E1000_ICS_ACK; 1788 } 1789 1790 /* Check if receive descriptor minimum threshold hit */ 1791 rdmts_hit = e1000e_rx_descr_threshold_hit(core, rxr.i); 1792 n |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); 1793 1794 trace_e1000e_rx_written_to_guest(rxr.i->idx); 1795 } else { 1796 n |= E1000_ICS_RXO; 1797 retval = 0; 1798 1799 trace_e1000e_rx_not_written_to_guest(rxr.i->idx); 1800 } 1801 1802 if (!e1000e_intrmgr_delay_rx_causes(core, &n)) { 1803 trace_e1000e_rx_interrupt_set(n); 1804 e1000e_set_interrupt_cause(core, n); 1805 } else { 1806 trace_e1000e_rx_interrupt_delayed(n); 1807 } 1808 1809 return retval; 1810 } 1811 1812 static inline bool 1813 e1000e_have_autoneg(E1000ECore *core) 1814 { 1815 return core->phy[0][MII_BMCR] & MII_BMCR_AUTOEN; 1816 } 1817 1818 static void e1000e_update_flowctl_status(E1000ECore *core) 1819 { 1820 if (e1000e_have_autoneg(core) && 1821 core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP) { 1822 trace_e1000e_link_autoneg_flowctl(true); 1823 core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE; 1824 } else { 1825 trace_e1000e_link_autoneg_flowctl(false); 1826 } 1827 } 1828 1829 static inline void 1830 e1000e_link_down(E1000ECore *core) 1831 { 1832 e1000x_update_regs_on_link_down(core->mac, core->phy[0]); 1833 e1000e_update_flowctl_status(core); 1834 } 1835 1836 static inline void 1837 e1000e_set_phy_ctrl(E1000ECore *core, int index, uint16_t val) 1838 { 1839 /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */ 1840 core->phy[0][MII_BMCR] = val & ~(0x3f | 1841 MII_BMCR_RESET | 1842 MII_BMCR_ANRESTART); 1843 1844 if ((val & MII_BMCR_ANRESTART) && 1845 e1000e_have_autoneg(core)) { 1846 e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); 1847 } 1848 } 1849 1850 static void 1851 e1000e_set_phy_oem_bits(E1000ECore *core, int index, uint16_t val) 1852 { 1853 core->phy[0][PHY_OEM_BITS] = val & ~BIT(10); 1854 1855 if (val & BIT(10)) { 1856 e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); 1857 } 1858 } 1859 1860 static void 1861 e1000e_set_phy_page(E1000ECore *core, int index, uint16_t val) 1862 { 1863 core->phy[0][PHY_PAGE] = val & PHY_PAGE_RW_MASK; 1864 } 1865 1866 void 1867 e1000e_core_set_link_status(E1000ECore *core) 1868 { 1869 NetClientState *nc = qemu_get_queue(core->owner_nic); 1870 uint32_t old_status = core->mac[STATUS]; 1871 1872 trace_e1000e_link_status_changed(nc->link_down ? false : true); 1873 1874 if (nc->link_down) { 1875 e1000x_update_regs_on_link_down(core->mac, core->phy[0]); 1876 } else { 1877 if (e1000e_have_autoneg(core) && 1878 !(core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP)) { 1879 e1000x_restart_autoneg(core->mac, core->phy[0], 1880 core->autoneg_timer); 1881 } else { 1882 e1000x_update_regs_on_link_up(core->mac, core->phy[0]); 1883 e1000e_start_recv(core); 1884 } 1885 } 1886 1887 if (core->mac[STATUS] != old_status) { 1888 e1000e_set_interrupt_cause(core, E1000_ICR_LSC); 1889 } 1890 } 1891 1892 static void 1893 e1000e_set_ctrl(E1000ECore *core, int index, uint32_t val) 1894 { 1895 trace_e1000e_core_ctrl_write(index, val); 1896 1897 /* RST is self clearing */ 1898 core->mac[CTRL] = val & ~E1000_CTRL_RST; 1899 core->mac[CTRL_DUP] = core->mac[CTRL]; 1900 1901 trace_e1000e_link_set_params( 1902 !!(val & E1000_CTRL_ASDE), 1903 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, 1904 !!(val & E1000_CTRL_FRCSPD), 1905 !!(val & E1000_CTRL_FRCDPX), 1906 !!(val & E1000_CTRL_RFCE), 1907 !!(val & E1000_CTRL_TFCE)); 1908 1909 if (val & E1000_CTRL_RST) { 1910 trace_e1000e_core_ctrl_sw_reset(); 1911 e1000e_reset(core, true); 1912 } 1913 1914 if (val & E1000_CTRL_PHY_RST) { 1915 trace_e1000e_core_ctrl_phy_reset(); 1916 core->mac[STATUS] |= E1000_STATUS_PHYRA; 1917 } 1918 } 1919 1920 static void 1921 e1000e_set_rfctl(E1000ECore *core, int index, uint32_t val) 1922 { 1923 trace_e1000e_rx_set_rfctl(val); 1924 1925 if (!(val & E1000_RFCTL_ISCSI_DIS)) { 1926 trace_e1000e_wrn_iscsi_filtering_not_supported(); 1927 } 1928 1929 if (!(val & E1000_RFCTL_NFSW_DIS)) { 1930 trace_e1000e_wrn_nfsw_filtering_not_supported(); 1931 } 1932 1933 if (!(val & E1000_RFCTL_NFSR_DIS)) { 1934 trace_e1000e_wrn_nfsr_filtering_not_supported(); 1935 } 1936 1937 core->mac[RFCTL] = val; 1938 } 1939 1940 static void 1941 e1000e_calc_per_desc_buf_size(E1000ECore *core) 1942 { 1943 int i; 1944 core->rx_desc_buf_size = 0; 1945 1946 for (i = 0; i < ARRAY_SIZE(core->rxbuf_sizes); i++) { 1947 core->rx_desc_buf_size += core->rxbuf_sizes[i]; 1948 } 1949 } 1950 1951 static void 1952 e1000e_parse_rxbufsize(E1000ECore *core) 1953 { 1954 uint32_t rctl = core->mac[RCTL]; 1955 1956 memset(core->rxbuf_sizes, 0, sizeof(core->rxbuf_sizes)); 1957 1958 if (rctl & E1000_RCTL_DTYP_MASK) { 1959 uint32_t bsize; 1960 1961 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE0_MASK; 1962 core->rxbuf_sizes[0] = (bsize >> E1000_PSRCTL_BSIZE0_SHIFT) * 128; 1963 1964 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE1_MASK; 1965 core->rxbuf_sizes[1] = (bsize >> E1000_PSRCTL_BSIZE1_SHIFT) * 1024; 1966 1967 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE2_MASK; 1968 core->rxbuf_sizes[2] = (bsize >> E1000_PSRCTL_BSIZE2_SHIFT) * 1024; 1969 1970 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE3_MASK; 1971 core->rxbuf_sizes[3] = (bsize >> E1000_PSRCTL_BSIZE3_SHIFT) * 1024; 1972 } else if (rctl & E1000_RCTL_FLXBUF_MASK) { 1973 int flxbuf = rctl & E1000_RCTL_FLXBUF_MASK; 1974 core->rxbuf_sizes[0] = (flxbuf >> E1000_RCTL_FLXBUF_SHIFT) * 1024; 1975 } else { 1976 core->rxbuf_sizes[0] = e1000x_rxbufsize(rctl); 1977 } 1978 1979 trace_e1000e_rx_desc_buff_sizes(core->rxbuf_sizes[0], core->rxbuf_sizes[1], 1980 core->rxbuf_sizes[2], core->rxbuf_sizes[3]); 1981 1982 e1000e_calc_per_desc_buf_size(core); 1983 } 1984 1985 static void 1986 e1000e_calc_rxdesclen(E1000ECore *core) 1987 { 1988 if (e1000e_rx_use_legacy_descriptor(core)) { 1989 core->rx_desc_len = sizeof(struct e1000_rx_desc); 1990 } else { 1991 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1992 core->rx_desc_len = sizeof(union e1000_rx_desc_packet_split); 1993 } else { 1994 core->rx_desc_len = sizeof(union e1000_rx_desc_extended); 1995 } 1996 } 1997 trace_e1000e_rx_desc_len(core->rx_desc_len); 1998 } 1999 2000 static void 2001 e1000e_set_rx_control(E1000ECore *core, int index, uint32_t val) 2002 { 2003 core->mac[RCTL] = val; 2004 trace_e1000e_rx_set_rctl(core->mac[RCTL]); 2005 2006 if (val & E1000_RCTL_EN) { 2007 e1000e_parse_rxbufsize(core); 2008 e1000e_calc_rxdesclen(core); 2009 core->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1 + 2010 E1000_RING_DESC_LEN_SHIFT; 2011 2012 e1000e_start_recv(core); 2013 } 2014 } 2015 2016 static 2017 void(*e1000e_phyreg_writeops[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE]) 2018 (E1000ECore *, int, uint16_t) = { 2019 [0] = { 2020 [MII_BMCR] = e1000e_set_phy_ctrl, 2021 [PHY_PAGE] = e1000e_set_phy_page, 2022 [PHY_OEM_BITS] = e1000e_set_phy_oem_bits 2023 } 2024 }; 2025 2026 static inline void 2027 e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits) 2028 { 2029 trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); 2030 core->mac[IMS] &= ~bits; 2031 } 2032 2033 static inline bool 2034 e1000e_postpone_interrupt(E1000IntrDelayTimer *timer) 2035 { 2036 if (timer->running) { 2037 trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2); 2038 2039 return true; 2040 } 2041 2042 if (timer->core->mac[timer->delay_reg] != 0) { 2043 e1000e_intrmgr_rearm_timer(timer); 2044 } 2045 2046 return false; 2047 } 2048 2049 static inline bool 2050 e1000e_itr_should_postpone(E1000ECore *core) 2051 { 2052 return e1000e_postpone_interrupt(&core->itr); 2053 } 2054 2055 static inline bool 2056 e1000e_eitr_should_postpone(E1000ECore *core, int idx) 2057 { 2058 return e1000e_postpone_interrupt(&core->eitr[idx]); 2059 } 2060 2061 static void 2062 e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) 2063 { 2064 uint32_t effective_eiac; 2065 2066 if (E1000_IVAR_ENTRY_VALID(int_cfg)) { 2067 uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); 2068 if (vec < E1000E_MSIX_VEC_NUM) { 2069 if (!e1000e_eitr_should_postpone(core, vec)) { 2070 trace_e1000e_irq_msix_notify_vec(vec); 2071 msix_notify(core->owner, vec); 2072 } 2073 } else { 2074 trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); 2075 } 2076 } else { 2077 trace_e1000e_wrn_msix_invalid(cause, int_cfg); 2078 } 2079 2080 if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_EIAME) { 2081 trace_e1000e_irq_iam_clear_eiame(core->mac[IAM], cause); 2082 core->mac[IAM] &= ~cause; 2083 } 2084 2085 trace_e1000e_irq_icr_clear_eiac(core->mac[ICR], core->mac[EIAC]); 2086 2087 effective_eiac = core->mac[EIAC] & cause; 2088 2089 core->mac[ICR] &= ~effective_eiac; 2090 core->msi_causes_pending &= ~effective_eiac; 2091 2092 if (!(core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2093 core->mac[IMS] &= ~effective_eiac; 2094 } 2095 } 2096 2097 static void 2098 e1000e_msix_notify(E1000ECore *core, uint32_t causes) 2099 { 2100 if (causes & E1000_ICR_RXQ0) { 2101 e1000e_msix_notify_one(core, E1000_ICR_RXQ0, 2102 E1000_IVAR_RXQ0(core->mac[IVAR])); 2103 } 2104 2105 if (causes & E1000_ICR_RXQ1) { 2106 e1000e_msix_notify_one(core, E1000_ICR_RXQ1, 2107 E1000_IVAR_RXQ1(core->mac[IVAR])); 2108 } 2109 2110 if (causes & E1000_ICR_TXQ0) { 2111 e1000e_msix_notify_one(core, E1000_ICR_TXQ0, 2112 E1000_IVAR_TXQ0(core->mac[IVAR])); 2113 } 2114 2115 if (causes & E1000_ICR_TXQ1) { 2116 e1000e_msix_notify_one(core, E1000_ICR_TXQ1, 2117 E1000_IVAR_TXQ1(core->mac[IVAR])); 2118 } 2119 2120 if (causes & E1000_ICR_OTHER) { 2121 e1000e_msix_notify_one(core, E1000_ICR_OTHER, 2122 E1000_IVAR_OTHER(core->mac[IVAR])); 2123 } 2124 } 2125 2126 static void 2127 e1000e_msix_clear_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) 2128 { 2129 if (E1000_IVAR_ENTRY_VALID(int_cfg)) { 2130 uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); 2131 if (vec < E1000E_MSIX_VEC_NUM) { 2132 trace_e1000e_irq_msix_pending_clearing(cause, int_cfg, vec); 2133 msix_clr_pending(core->owner, vec); 2134 } else { 2135 trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); 2136 } 2137 } else { 2138 trace_e1000e_wrn_msix_invalid(cause, int_cfg); 2139 } 2140 } 2141 2142 static void 2143 e1000e_msix_clear(E1000ECore *core, uint32_t causes) 2144 { 2145 if (causes & E1000_ICR_RXQ0) { 2146 e1000e_msix_clear_one(core, E1000_ICR_RXQ0, 2147 E1000_IVAR_RXQ0(core->mac[IVAR])); 2148 } 2149 2150 if (causes & E1000_ICR_RXQ1) { 2151 e1000e_msix_clear_one(core, E1000_ICR_RXQ1, 2152 E1000_IVAR_RXQ1(core->mac[IVAR])); 2153 } 2154 2155 if (causes & E1000_ICR_TXQ0) { 2156 e1000e_msix_clear_one(core, E1000_ICR_TXQ0, 2157 E1000_IVAR_TXQ0(core->mac[IVAR])); 2158 } 2159 2160 if (causes & E1000_ICR_TXQ1) { 2161 e1000e_msix_clear_one(core, E1000_ICR_TXQ1, 2162 E1000_IVAR_TXQ1(core->mac[IVAR])); 2163 } 2164 2165 if (causes & E1000_ICR_OTHER) { 2166 e1000e_msix_clear_one(core, E1000_ICR_OTHER, 2167 E1000_IVAR_OTHER(core->mac[IVAR])); 2168 } 2169 } 2170 2171 static inline void 2172 e1000e_fix_icr_asserted(E1000ECore *core) 2173 { 2174 core->mac[ICR] &= ~E1000_ICR_ASSERTED; 2175 if (core->mac[ICR]) { 2176 core->mac[ICR] |= E1000_ICR_ASSERTED; 2177 } 2178 2179 trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); 2180 } 2181 2182 static void 2183 e1000e_send_msi(E1000ECore *core, bool msix) 2184 { 2185 uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED; 2186 2187 core->msi_causes_pending &= causes; 2188 causes ^= core->msi_causes_pending; 2189 if (causes == 0) { 2190 return; 2191 } 2192 core->msi_causes_pending |= causes; 2193 2194 if (msix) { 2195 e1000e_msix_notify(core, causes); 2196 } else { 2197 if (!e1000e_itr_should_postpone(core)) { 2198 trace_e1000e_irq_msi_notify(causes); 2199 msi_notify(core->owner, 0); 2200 } 2201 } 2202 } 2203 2204 static void 2205 e1000e_update_interrupt_state(E1000ECore *core) 2206 { 2207 bool interrupts_pending; 2208 bool is_msix = msix_enabled(core->owner); 2209 2210 /* Set ICR[OTHER] for MSI-X */ 2211 if (is_msix) { 2212 if (core->mac[ICR] & E1000_ICR_OTHER_CAUSES) { 2213 core->mac[ICR] |= E1000_ICR_OTHER; 2214 trace_e1000e_irq_add_msi_other(core->mac[ICR]); 2215 } 2216 } 2217 2218 e1000e_fix_icr_asserted(core); 2219 2220 /* 2221 * Make sure ICR and ICS registers have the same value. 2222 * The spec says that the ICS register is write-only. However in practice, 2223 * on real hardware ICS is readable, and for reads it has the same value as 2224 * ICR (except that ICS does not have the clear on read behaviour of ICR). 2225 * 2226 * The VxWorks PRO/1000 driver uses this behaviour. 2227 */ 2228 core->mac[ICS] = core->mac[ICR]; 2229 2230 interrupts_pending = (core->mac[IMS] & core->mac[ICR]) ? true : false; 2231 if (!interrupts_pending) { 2232 core->msi_causes_pending = 0; 2233 } 2234 2235 trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], 2236 core->mac[ICR], core->mac[IMS]); 2237 2238 if (is_msix || msi_enabled(core->owner)) { 2239 if (interrupts_pending) { 2240 e1000e_send_msi(core, is_msix); 2241 } 2242 } else { 2243 if (interrupts_pending) { 2244 if (!e1000e_itr_should_postpone(core)) { 2245 e1000e_raise_legacy_irq(core); 2246 } 2247 } else { 2248 e1000e_lower_legacy_irq(core); 2249 } 2250 } 2251 } 2252 2253 static void 2254 e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) 2255 { 2256 trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); 2257 2258 val |= e1000e_intmgr_collect_delayed_causes(core); 2259 core->mac[ICR] |= val; 2260 2261 trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); 2262 2263 e1000e_update_interrupt_state(core); 2264 } 2265 2266 static inline void 2267 e1000e_autoneg_timer(void *opaque) 2268 { 2269 E1000ECore *core = opaque; 2270 if (!qemu_get_queue(core->owner_nic)->link_down) { 2271 e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]); 2272 e1000e_start_recv(core); 2273 2274 e1000e_update_flowctl_status(core); 2275 /* signal link status change to the guest */ 2276 e1000e_set_interrupt_cause(core, E1000_ICR_LSC); 2277 } 2278 } 2279 2280 static inline uint16_t 2281 e1000e_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr) 2282 { 2283 uint16_t index = (addr & 0x1ffff) >> 2; 2284 return index + (mac_reg_access[index] & 0xfffe); 2285 } 2286 2287 static const char e1000e_phy_regcap[E1000E_PHY_PAGES][0x20] = { 2288 [0] = { 2289 [MII_BMCR] = PHY_ANYPAGE | PHY_RW, 2290 [MII_BMSR] = PHY_ANYPAGE | PHY_R, 2291 [MII_PHYID1] = PHY_ANYPAGE | PHY_R, 2292 [MII_PHYID2] = PHY_ANYPAGE | PHY_R, 2293 [MII_ANAR] = PHY_ANYPAGE | PHY_RW, 2294 [MII_ANLPAR] = PHY_ANYPAGE | PHY_R, 2295 [MII_ANER] = PHY_ANYPAGE | PHY_R, 2296 [MII_ANNP] = PHY_ANYPAGE | PHY_RW, 2297 [MII_ANLPRNP] = PHY_ANYPAGE | PHY_R, 2298 [MII_CTRL1000] = PHY_ANYPAGE | PHY_RW, 2299 [MII_STAT1000] = PHY_ANYPAGE | PHY_R, 2300 [MII_EXTSTAT] = PHY_ANYPAGE | PHY_R, 2301 [PHY_PAGE] = PHY_ANYPAGE | PHY_RW, 2302 2303 [PHY_COPPER_CTRL1] = PHY_RW, 2304 [PHY_COPPER_STAT1] = PHY_R, 2305 [PHY_COPPER_CTRL3] = PHY_RW, 2306 [PHY_RX_ERR_CNTR] = PHY_R, 2307 [PHY_OEM_BITS] = PHY_RW, 2308 [PHY_BIAS_1] = PHY_RW, 2309 [PHY_BIAS_2] = PHY_RW, 2310 [PHY_COPPER_INT_ENABLE] = PHY_RW, 2311 [PHY_COPPER_STAT2] = PHY_R, 2312 [PHY_COPPER_CTRL2] = PHY_RW 2313 }, 2314 [2] = { 2315 [PHY_MAC_CTRL1] = PHY_RW, 2316 [PHY_MAC_INT_ENABLE] = PHY_RW, 2317 [PHY_MAC_STAT] = PHY_R, 2318 [PHY_MAC_CTRL2] = PHY_RW 2319 }, 2320 [3] = { 2321 [PHY_LED_03_FUNC_CTRL1] = PHY_RW, 2322 [PHY_LED_03_POL_CTRL] = PHY_RW, 2323 [PHY_LED_TIMER_CTRL] = PHY_RW, 2324 [PHY_LED_45_CTRL] = PHY_RW 2325 }, 2326 [5] = { 2327 [PHY_1000T_SKEW] = PHY_R, 2328 [PHY_1000T_SWAP] = PHY_R 2329 }, 2330 [6] = { 2331 [PHY_CRC_COUNTERS] = PHY_R 2332 } 2333 }; 2334 2335 static bool 2336 e1000e_phy_reg_check_cap(E1000ECore *core, uint32_t addr, 2337 char cap, uint8_t *page) 2338 { 2339 *page = 2340 (e1000e_phy_regcap[0][addr] & PHY_ANYPAGE) ? 0 2341 : core->phy[0][PHY_PAGE]; 2342 2343 if (*page >= E1000E_PHY_PAGES) { 2344 return false; 2345 } 2346 2347 return e1000e_phy_regcap[*page][addr] & cap; 2348 } 2349 2350 static void 2351 e1000e_phy_reg_write(E1000ECore *core, uint8_t page, 2352 uint32_t addr, uint16_t data) 2353 { 2354 assert(page < E1000E_PHY_PAGES); 2355 assert(addr < E1000E_PHY_PAGE_SIZE); 2356 2357 if (e1000e_phyreg_writeops[page][addr]) { 2358 e1000e_phyreg_writeops[page][addr](core, addr, data); 2359 } else { 2360 core->phy[page][addr] = data; 2361 } 2362 } 2363 2364 static void 2365 e1000e_set_mdic(E1000ECore *core, int index, uint32_t val) 2366 { 2367 uint32_t data = val & E1000_MDIC_DATA_MASK; 2368 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); 2369 uint8_t page; 2370 2371 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */ 2372 val = core->mac[MDIC] | E1000_MDIC_ERROR; 2373 } else if (val & E1000_MDIC_OP_READ) { 2374 if (!e1000e_phy_reg_check_cap(core, addr, PHY_R, &page)) { 2375 trace_e1000e_core_mdic_read_unhandled(page, addr); 2376 val |= E1000_MDIC_ERROR; 2377 } else { 2378 val = (val ^ data) | core->phy[page][addr]; 2379 trace_e1000e_core_mdic_read(page, addr, val); 2380 } 2381 } else if (val & E1000_MDIC_OP_WRITE) { 2382 if (!e1000e_phy_reg_check_cap(core, addr, PHY_W, &page)) { 2383 trace_e1000e_core_mdic_write_unhandled(page, addr); 2384 val |= E1000_MDIC_ERROR; 2385 } else { 2386 trace_e1000e_core_mdic_write(page, addr, data); 2387 e1000e_phy_reg_write(core, page, addr, data); 2388 } 2389 } 2390 core->mac[MDIC] = val | E1000_MDIC_READY; 2391 2392 if (val & E1000_MDIC_INT_EN) { 2393 e1000e_set_interrupt_cause(core, E1000_ICR_MDAC); 2394 } 2395 } 2396 2397 static void 2398 e1000e_set_rdt(E1000ECore *core, int index, uint32_t val) 2399 { 2400 core->mac[index] = val & 0xffff; 2401 trace_e1000e_rx_set_rdt(e1000e_mq_queue_idx(RDT0, index), val); 2402 e1000e_start_recv(core); 2403 } 2404 2405 static void 2406 e1000e_set_status(E1000ECore *core, int index, uint32_t val) 2407 { 2408 if ((val & E1000_STATUS_PHYRA) == 0) { 2409 core->mac[index] &= ~E1000_STATUS_PHYRA; 2410 } 2411 } 2412 2413 static void 2414 e1000e_set_ctrlext(E1000ECore *core, int index, uint32_t val) 2415 { 2416 trace_e1000e_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK), 2417 !!(val & E1000_CTRL_EXT_SPD_BYPS)); 2418 2419 /* Zero self-clearing bits */ 2420 val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST); 2421 core->mac[CTRL_EXT] = val; 2422 } 2423 2424 static void 2425 e1000e_set_pbaclr(E1000ECore *core, int index, uint32_t val) 2426 { 2427 int i; 2428 2429 core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK; 2430 2431 if (!msix_enabled(core->owner)) { 2432 return; 2433 } 2434 2435 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 2436 if (core->mac[PBACLR] & BIT(i)) { 2437 msix_clr_pending(core->owner, i); 2438 } 2439 } 2440 } 2441 2442 static void 2443 e1000e_set_fcrth(E1000ECore *core, int index, uint32_t val) 2444 { 2445 core->mac[FCRTH] = val & 0xFFF8; 2446 } 2447 2448 static void 2449 e1000e_set_fcrtl(E1000ECore *core, int index, uint32_t val) 2450 { 2451 core->mac[FCRTL] = val & 0x8000FFF8; 2452 } 2453 2454 #define E1000E_LOW_BITS_SET_FUNC(num) \ 2455 static void \ 2456 e1000e_set_##num##bit(E1000ECore *core, int index, uint32_t val) \ 2457 { \ 2458 core->mac[index] = val & (BIT(num) - 1); \ 2459 } 2460 2461 E1000E_LOW_BITS_SET_FUNC(4) 2462 E1000E_LOW_BITS_SET_FUNC(6) 2463 E1000E_LOW_BITS_SET_FUNC(11) 2464 E1000E_LOW_BITS_SET_FUNC(12) 2465 E1000E_LOW_BITS_SET_FUNC(13) 2466 E1000E_LOW_BITS_SET_FUNC(16) 2467 2468 static void 2469 e1000e_set_vet(E1000ECore *core, int index, uint32_t val) 2470 { 2471 core->mac[VET] = val & 0xffff; 2472 trace_e1000e_vlan_vet(core->mac[VET]); 2473 } 2474 2475 static void 2476 e1000e_set_dlen(E1000ECore *core, int index, uint32_t val) 2477 { 2478 core->mac[index] = val & E1000_XDLEN_MASK; 2479 } 2480 2481 static void 2482 e1000e_set_dbal(E1000ECore *core, int index, uint32_t val) 2483 { 2484 core->mac[index] = val & E1000_XDBAL_MASK; 2485 } 2486 2487 static void 2488 e1000e_set_tctl(E1000ECore *core, int index, uint32_t val) 2489 { 2490 E1000E_TxRing txr; 2491 core->mac[index] = val; 2492 2493 if (core->mac[TARC0] & E1000_TARC_ENABLE) { 2494 e1000e_tx_ring_init(core, &txr, 0); 2495 e1000e_start_xmit(core, &txr); 2496 } 2497 2498 if (core->mac[TARC1] & E1000_TARC_ENABLE) { 2499 e1000e_tx_ring_init(core, &txr, 1); 2500 e1000e_start_xmit(core, &txr); 2501 } 2502 } 2503 2504 static void 2505 e1000e_set_tdt(E1000ECore *core, int index, uint32_t val) 2506 { 2507 E1000E_TxRing txr; 2508 int qidx = e1000e_mq_queue_idx(TDT, index); 2509 uint32_t tarc_reg = (qidx == 0) ? TARC0 : TARC1; 2510 2511 core->mac[index] = val & 0xffff; 2512 2513 if (core->mac[tarc_reg] & E1000_TARC_ENABLE) { 2514 e1000e_tx_ring_init(core, &txr, qidx); 2515 e1000e_start_xmit(core, &txr); 2516 } 2517 } 2518 2519 static void 2520 e1000e_set_ics(E1000ECore *core, int index, uint32_t val) 2521 { 2522 trace_e1000e_irq_write_ics(val); 2523 e1000e_set_interrupt_cause(core, val); 2524 } 2525 2526 static void 2527 e1000e_set_icr(E1000ECore *core, int index, uint32_t val) 2528 { 2529 uint32_t icr = 0; 2530 if ((core->mac[ICR] & E1000_ICR_ASSERTED) && 2531 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2532 trace_e1000e_irq_icr_process_iame(); 2533 e1000e_clear_ims_bits(core, core->mac[IAM]); 2534 } 2535 2536 icr = core->mac[ICR] & ~val; 2537 /* 2538 * Windows driver expects that the "receive overrun" bit and other 2539 * ones to be cleared when the "Other" bit (#24) is cleared. 2540 */ 2541 icr = (val & E1000_ICR_OTHER) ? (icr & ~E1000_ICR_OTHER_CAUSES) : icr; 2542 trace_e1000e_irq_icr_write(val, core->mac[ICR], icr); 2543 core->mac[ICR] = icr; 2544 e1000e_update_interrupt_state(core); 2545 } 2546 2547 static void 2548 e1000e_set_imc(E1000ECore *core, int index, uint32_t val) 2549 { 2550 trace_e1000e_irq_ims_clear_set_imc(val); 2551 e1000e_clear_ims_bits(core, val); 2552 e1000e_update_interrupt_state(core); 2553 } 2554 2555 static void 2556 e1000e_set_ims(E1000ECore *core, int index, uint32_t val) 2557 { 2558 static const uint32_t ims_ext_mask = 2559 E1000_IMS_RXQ0 | E1000_IMS_RXQ1 | 2560 E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | 2561 E1000_IMS_OTHER; 2562 2563 static const uint32_t ims_valid_mask = 2564 E1000_IMS_TXDW | E1000_IMS_TXQE | E1000_IMS_LSC | 2565 E1000_IMS_RXDMT0 | E1000_IMS_RXO | E1000_IMS_RXT0 | 2566 E1000_IMS_MDAC | E1000_IMS_TXD_LOW | E1000_IMS_SRPD | 2567 E1000_IMS_ACK | E1000_IMS_MNG | E1000_IMS_RXQ0 | 2568 E1000_IMS_RXQ1 | E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | 2569 E1000_IMS_OTHER; 2570 2571 uint32_t valid_val = val & ims_valid_mask; 2572 2573 trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); 2574 core->mac[IMS] |= valid_val; 2575 2576 if ((valid_val & ims_ext_mask) && 2577 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PBA_CLR) && 2578 msix_enabled(core->owner)) { 2579 e1000e_msix_clear(core, valid_val); 2580 } 2581 2582 if ((valid_val == ims_valid_mask) && 2583 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA)) { 2584 trace_e1000e_irq_fire_all_timers(val); 2585 e1000e_intrmgr_fire_all_timers(core); 2586 } 2587 2588 e1000e_update_interrupt_state(core); 2589 } 2590 2591 static void 2592 e1000e_set_rdtr(E1000ECore *core, int index, uint32_t val) 2593 { 2594 e1000e_set_16bit(core, index, val); 2595 2596 if ((val & E1000_RDTR_FPD) && (core->rdtr.running)) { 2597 trace_e1000e_irq_rdtr_fpd_running(); 2598 e1000e_intrmgr_fire_delayed_interrupts(core); 2599 } else { 2600 trace_e1000e_irq_rdtr_fpd_not_running(); 2601 } 2602 } 2603 2604 static void 2605 e1000e_set_tidv(E1000ECore *core, int index, uint32_t val) 2606 { 2607 e1000e_set_16bit(core, index, val); 2608 2609 if ((val & E1000_TIDV_FPD) && (core->tidv.running)) { 2610 trace_e1000e_irq_tidv_fpd_running(); 2611 e1000e_intrmgr_fire_delayed_interrupts(core); 2612 } else { 2613 trace_e1000e_irq_tidv_fpd_not_running(); 2614 } 2615 } 2616 2617 static uint32_t 2618 e1000e_mac_readreg(E1000ECore *core, int index) 2619 { 2620 return core->mac[index]; 2621 } 2622 2623 static uint32_t 2624 e1000e_mac_ics_read(E1000ECore *core, int index) 2625 { 2626 trace_e1000e_irq_read_ics(core->mac[ICS]); 2627 return core->mac[ICS]; 2628 } 2629 2630 static uint32_t 2631 e1000e_mac_ims_read(E1000ECore *core, int index) 2632 { 2633 trace_e1000e_irq_read_ims(core->mac[IMS]); 2634 return core->mac[IMS]; 2635 } 2636 2637 static uint32_t 2638 e1000e_mac_swsm_read(E1000ECore *core, int index) 2639 { 2640 uint32_t val = core->mac[SWSM]; 2641 core->mac[SWSM] = val | E1000_SWSM_SMBI; 2642 return val; 2643 } 2644 2645 static uint32_t 2646 e1000e_mac_itr_read(E1000ECore *core, int index) 2647 { 2648 return core->itr_guest_value; 2649 } 2650 2651 static uint32_t 2652 e1000e_mac_eitr_read(E1000ECore *core, int index) 2653 { 2654 return core->eitr_guest_value[index - EITR]; 2655 } 2656 2657 static uint32_t 2658 e1000e_mac_icr_read(E1000ECore *core, int index) 2659 { 2660 uint32_t ret = core->mac[ICR]; 2661 trace_e1000e_irq_icr_read_entry(ret); 2662 2663 if (core->mac[IMS] == 0) { 2664 trace_e1000e_irq_icr_clear_zero_ims(); 2665 core->mac[ICR] = 0; 2666 } 2667 2668 if (!msix_enabled(core->owner)) { 2669 trace_e1000e_irq_icr_clear_nonmsix_icr_read(); 2670 core->mac[ICR] = 0; 2671 } 2672 2673 if ((core->mac[ICR] & E1000_ICR_ASSERTED) && 2674 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2675 trace_e1000e_irq_icr_clear_iame(); 2676 core->mac[ICR] = 0; 2677 trace_e1000e_irq_icr_process_iame(); 2678 e1000e_clear_ims_bits(core, core->mac[IAM]); 2679 } 2680 2681 trace_e1000e_irq_icr_read_exit(core->mac[ICR]); 2682 e1000e_update_interrupt_state(core); 2683 return ret; 2684 } 2685 2686 static uint32_t 2687 e1000e_mac_read_clr4(E1000ECore *core, int index) 2688 { 2689 uint32_t ret = core->mac[index]; 2690 2691 core->mac[index] = 0; 2692 return ret; 2693 } 2694 2695 static uint32_t 2696 e1000e_mac_read_clr8(E1000ECore *core, int index) 2697 { 2698 uint32_t ret = core->mac[index]; 2699 2700 core->mac[index] = 0; 2701 core->mac[index - 1] = 0; 2702 return ret; 2703 } 2704 2705 static uint32_t 2706 e1000e_get_ctrl(E1000ECore *core, int index) 2707 { 2708 uint32_t val = core->mac[CTRL]; 2709 2710 trace_e1000e_link_read_params( 2711 !!(val & E1000_CTRL_ASDE), 2712 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, 2713 !!(val & E1000_CTRL_FRCSPD), 2714 !!(val & E1000_CTRL_FRCDPX), 2715 !!(val & E1000_CTRL_RFCE), 2716 !!(val & E1000_CTRL_TFCE)); 2717 2718 return val; 2719 } 2720 2721 static uint32_t 2722 e1000e_get_status(E1000ECore *core, int index) 2723 { 2724 uint32_t res = core->mac[STATUS]; 2725 2726 if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) { 2727 res |= E1000_STATUS_GIO_MASTER_ENABLE; 2728 } 2729 2730 if (core->mac[CTRL] & E1000_CTRL_FRCDPX) { 2731 res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0; 2732 } else { 2733 res |= E1000_STATUS_FD; 2734 } 2735 2736 if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) || 2737 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) { 2738 switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) { 2739 case E1000_CTRL_SPD_10: 2740 res |= E1000_STATUS_SPEED_10; 2741 break; 2742 case E1000_CTRL_SPD_100: 2743 res |= E1000_STATUS_SPEED_100; 2744 break; 2745 case E1000_CTRL_SPD_1000: 2746 default: 2747 res |= E1000_STATUS_SPEED_1000; 2748 break; 2749 } 2750 } else { 2751 res |= E1000_STATUS_SPEED_1000; 2752 } 2753 2754 trace_e1000e_link_status( 2755 !!(res & E1000_STATUS_LU), 2756 !!(res & E1000_STATUS_FD), 2757 (res & E1000_STATUS_SPEED_MASK) >> E1000_STATUS_SPEED_SHIFT, 2758 (res & E1000_STATUS_ASDV) >> E1000_STATUS_ASDV_SHIFT); 2759 2760 return res; 2761 } 2762 2763 static uint32_t 2764 e1000e_get_tarc(E1000ECore *core, int index) 2765 { 2766 return core->mac[index] & ((BIT(11) - 1) | 2767 BIT(27) | 2768 BIT(28) | 2769 BIT(29) | 2770 BIT(30)); 2771 } 2772 2773 static void 2774 e1000e_mac_writereg(E1000ECore *core, int index, uint32_t val) 2775 { 2776 core->mac[index] = val; 2777 } 2778 2779 static void 2780 e1000e_mac_setmacaddr(E1000ECore *core, int index, uint32_t val) 2781 { 2782 uint32_t macaddr[2]; 2783 2784 core->mac[index] = val; 2785 2786 macaddr[0] = cpu_to_le32(core->mac[RA]); 2787 macaddr[1] = cpu_to_le32(core->mac[RA + 1]); 2788 qemu_format_nic_info_str(qemu_get_queue(core->owner_nic), 2789 (uint8_t *) macaddr); 2790 2791 trace_e1000e_mac_set_sw(MAC_ARG(macaddr)); 2792 } 2793 2794 static void 2795 e1000e_set_eecd(E1000ECore *core, int index, uint32_t val) 2796 { 2797 static const uint32_t ro_bits = E1000_EECD_PRES | 2798 E1000_EECD_AUTO_RD | 2799 E1000_EECD_SIZE_EX_MASK; 2800 2801 core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits); 2802 } 2803 2804 static void 2805 e1000e_set_eerd(E1000ECore *core, int index, uint32_t val) 2806 { 2807 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; 2808 uint32_t flags = 0; 2809 uint32_t data = 0; 2810 2811 if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { 2812 data = core->eeprom[addr]; 2813 flags = E1000_EERW_DONE; 2814 } 2815 2816 core->mac[EERD] = flags | 2817 (addr << E1000_EERW_ADDR_SHIFT) | 2818 (data << E1000_EERW_DATA_SHIFT); 2819 } 2820 2821 static void 2822 e1000e_set_eewr(E1000ECore *core, int index, uint32_t val) 2823 { 2824 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; 2825 uint32_t data = (val >> E1000_EERW_DATA_SHIFT) & E1000_EERW_DATA_MASK; 2826 uint32_t flags = 0; 2827 2828 if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { 2829 core->eeprom[addr] = data; 2830 flags = E1000_EERW_DONE; 2831 } 2832 2833 core->mac[EERD] = flags | 2834 (addr << E1000_EERW_ADDR_SHIFT) | 2835 (data << E1000_EERW_DATA_SHIFT); 2836 } 2837 2838 static void 2839 e1000e_set_rxdctl(E1000ECore *core, int index, uint32_t val) 2840 { 2841 core->mac[RXDCTL] = core->mac[RXDCTL1] = val; 2842 } 2843 2844 static void 2845 e1000e_set_itr(E1000ECore *core, int index, uint32_t val) 2846 { 2847 uint32_t interval = val & 0xffff; 2848 2849 trace_e1000e_irq_itr_set(val); 2850 2851 core->itr_guest_value = interval; 2852 core->mac[index] = MAX(interval, E1000E_MIN_XITR); 2853 } 2854 2855 static void 2856 e1000e_set_eitr(E1000ECore *core, int index, uint32_t val) 2857 { 2858 uint32_t interval = val & 0xffff; 2859 uint32_t eitr_num = index - EITR; 2860 2861 trace_e1000e_irq_eitr_set(eitr_num, val); 2862 2863 core->eitr_guest_value[eitr_num] = interval; 2864 core->mac[index] = MAX(interval, E1000E_MIN_XITR); 2865 } 2866 2867 static void 2868 e1000e_set_psrctl(E1000ECore *core, int index, uint32_t val) 2869 { 2870 if (core->mac[RCTL] & E1000_RCTL_DTYP_MASK) { 2871 2872 if ((val & E1000_PSRCTL_BSIZE0_MASK) == 0) { 2873 qemu_log_mask(LOG_GUEST_ERROR, 2874 "e1000e: PSRCTL.BSIZE0 cannot be zero"); 2875 return; 2876 } 2877 2878 if ((val & E1000_PSRCTL_BSIZE1_MASK) == 0) { 2879 qemu_log_mask(LOG_GUEST_ERROR, 2880 "e1000e: PSRCTL.BSIZE1 cannot be zero"); 2881 return; 2882 } 2883 } 2884 2885 core->mac[PSRCTL] = val; 2886 } 2887 2888 static void 2889 e1000e_update_rx_offloads(E1000ECore *core) 2890 { 2891 int cso_state = e1000e_rx_l4_cso_enabled(core); 2892 2893 trace_e1000e_rx_set_cso(cso_state); 2894 2895 if (core->has_vnet) { 2896 qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, 2897 cso_state, 0, 0, 0, 0); 2898 } 2899 } 2900 2901 static void 2902 e1000e_set_rxcsum(E1000ECore *core, int index, uint32_t val) 2903 { 2904 core->mac[RXCSUM] = val; 2905 e1000e_update_rx_offloads(core); 2906 } 2907 2908 static void 2909 e1000e_set_gcr(E1000ECore *core, int index, uint32_t val) 2910 { 2911 uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS; 2912 core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits; 2913 } 2914 2915 static uint32_t e1000e_get_systiml(E1000ECore *core, int index) 2916 { 2917 e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH); 2918 return core->mac[SYSTIML]; 2919 } 2920 2921 static uint32_t e1000e_get_rxsatrh(E1000ECore *core, int index) 2922 { 2923 core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID; 2924 return core->mac[RXSATRH]; 2925 } 2926 2927 static uint32_t e1000e_get_txstmph(E1000ECore *core, int index) 2928 { 2929 core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID; 2930 return core->mac[TXSTMPH]; 2931 } 2932 2933 static void e1000e_set_timinca(E1000ECore *core, int index, uint32_t val) 2934 { 2935 e1000x_set_timinca(core->mac, &core->timadj, val); 2936 } 2937 2938 static void e1000e_set_timadjh(E1000ECore *core, int index, uint32_t val) 2939 { 2940 core->mac[TIMADJH] = val; 2941 core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32); 2942 } 2943 2944 #define e1000e_getreg(x) [x] = e1000e_mac_readreg 2945 typedef uint32_t (*readops)(E1000ECore *, int); 2946 static const readops e1000e_macreg_readops[] = { 2947 e1000e_getreg(PBA), 2948 e1000e_getreg(WUFC), 2949 e1000e_getreg(MANC), 2950 e1000e_getreg(TOTL), 2951 e1000e_getreg(RDT0), 2952 e1000e_getreg(RDBAH0), 2953 e1000e_getreg(TDBAL1), 2954 e1000e_getreg(RDLEN0), 2955 e1000e_getreg(RDH1), 2956 e1000e_getreg(LATECOL), 2957 e1000e_getreg(SEQEC), 2958 e1000e_getreg(XONTXC), 2959 e1000e_getreg(AIT), 2960 e1000e_getreg(TDFH), 2961 e1000e_getreg(TDFT), 2962 e1000e_getreg(TDFHS), 2963 e1000e_getreg(TDFTS), 2964 e1000e_getreg(TDFPC), 2965 e1000e_getreg(WUS), 2966 e1000e_getreg(PBS), 2967 e1000e_getreg(RDFH), 2968 e1000e_getreg(RDFT), 2969 e1000e_getreg(RDFHS), 2970 e1000e_getreg(RDFTS), 2971 e1000e_getreg(RDFPC), 2972 e1000e_getreg(GORCL), 2973 e1000e_getreg(MGTPRC), 2974 e1000e_getreg(EERD), 2975 e1000e_getreg(EIAC), 2976 e1000e_getreg(PSRCTL), 2977 e1000e_getreg(MANC2H), 2978 e1000e_getreg(RXCSUM), 2979 e1000e_getreg(GSCL_3), 2980 e1000e_getreg(GSCN_2), 2981 e1000e_getreg(RSRPD), 2982 e1000e_getreg(RDBAL1), 2983 e1000e_getreg(FCAH), 2984 e1000e_getreg(FCRTH), 2985 e1000e_getreg(FLOP), 2986 e1000e_getreg(FLASHT), 2987 e1000e_getreg(RXSTMPH), 2988 e1000e_getreg(TXSTMPL), 2989 e1000e_getreg(TIMADJL), 2990 e1000e_getreg(TXDCTL), 2991 e1000e_getreg(RDH0), 2992 e1000e_getreg(TDT1), 2993 e1000e_getreg(TNCRS), 2994 e1000e_getreg(RJC), 2995 e1000e_getreg(IAM), 2996 e1000e_getreg(GSCL_2), 2997 e1000e_getreg(RDBAH1), 2998 e1000e_getreg(FLSWDATA), 2999 e1000e_getreg(TIPG), 3000 e1000e_getreg(FLMNGCTL), 3001 e1000e_getreg(FLMNGCNT), 3002 e1000e_getreg(TSYNCTXCTL), 3003 e1000e_getreg(EXTCNF_SIZE), 3004 e1000e_getreg(EXTCNF_CTRL), 3005 e1000e_getreg(EEMNGDATA), 3006 e1000e_getreg(CTRL_EXT), 3007 e1000e_getreg(SYSTIMH), 3008 e1000e_getreg(EEMNGCTL), 3009 e1000e_getreg(FLMNGDATA), 3010 e1000e_getreg(TSYNCRXCTL), 3011 e1000e_getreg(TDH), 3012 e1000e_getreg(LEDCTL), 3013 e1000e_getreg(TCTL), 3014 e1000e_getreg(TDBAL), 3015 e1000e_getreg(TDLEN), 3016 e1000e_getreg(TDH1), 3017 e1000e_getreg(RADV), 3018 e1000e_getreg(ECOL), 3019 e1000e_getreg(DC), 3020 e1000e_getreg(RLEC), 3021 e1000e_getreg(XOFFTXC), 3022 e1000e_getreg(RFC), 3023 e1000e_getreg(RNBC), 3024 e1000e_getreg(MGTPTC), 3025 e1000e_getreg(TIMINCA), 3026 e1000e_getreg(RXCFGL), 3027 e1000e_getreg(MFUTP01), 3028 e1000e_getreg(FACTPS), 3029 e1000e_getreg(GSCL_1), 3030 e1000e_getreg(GSCN_0), 3031 e1000e_getreg(GCR2), 3032 e1000e_getreg(RDT1), 3033 e1000e_getreg(PBACLR), 3034 e1000e_getreg(FCTTV), 3035 e1000e_getreg(EEWR), 3036 e1000e_getreg(FLSWCTL), 3037 e1000e_getreg(RXDCTL1), 3038 e1000e_getreg(RXSATRL), 3039 e1000e_getreg(RXUDP), 3040 e1000e_getreg(TORL), 3041 e1000e_getreg(TDLEN1), 3042 e1000e_getreg(MCC), 3043 e1000e_getreg(WUC), 3044 e1000e_getreg(EECD), 3045 e1000e_getreg(MFUTP23), 3046 e1000e_getreg(RAID), 3047 e1000e_getreg(FCRTV), 3048 e1000e_getreg(TXDCTL1), 3049 e1000e_getreg(RCTL), 3050 e1000e_getreg(TDT), 3051 e1000e_getreg(MDIC), 3052 e1000e_getreg(FCRUC), 3053 e1000e_getreg(VET), 3054 e1000e_getreg(RDBAL0), 3055 e1000e_getreg(TDBAH1), 3056 e1000e_getreg(RDTR), 3057 e1000e_getreg(SCC), 3058 e1000e_getreg(COLC), 3059 e1000e_getreg(CEXTERR), 3060 e1000e_getreg(XOFFRXC), 3061 e1000e_getreg(IPAV), 3062 e1000e_getreg(GOTCL), 3063 e1000e_getreg(MGTPDC), 3064 e1000e_getreg(GCR), 3065 e1000e_getreg(IVAR), 3066 e1000e_getreg(POEMB), 3067 e1000e_getreg(MFVAL), 3068 e1000e_getreg(FUNCTAG), 3069 e1000e_getreg(GSCL_4), 3070 e1000e_getreg(GSCN_3), 3071 e1000e_getreg(MRQC), 3072 e1000e_getreg(RDLEN1), 3073 e1000e_getreg(FCT), 3074 e1000e_getreg(FLA), 3075 e1000e_getreg(FLOL), 3076 e1000e_getreg(RXDCTL), 3077 e1000e_getreg(RXSTMPL), 3078 e1000e_getreg(TIMADJH), 3079 e1000e_getreg(FCRTL), 3080 e1000e_getreg(TDBAH), 3081 e1000e_getreg(TADV), 3082 e1000e_getreg(XONRXC), 3083 e1000e_getreg(TSCTFC), 3084 e1000e_getreg(RFCTL), 3085 e1000e_getreg(GSCN_1), 3086 e1000e_getreg(FCAL), 3087 e1000e_getreg(FLSWCNT), 3088 3089 [TOTH] = e1000e_mac_read_clr8, 3090 [GOTCH] = e1000e_mac_read_clr8, 3091 [PRC64] = e1000e_mac_read_clr4, 3092 [PRC255] = e1000e_mac_read_clr4, 3093 [PRC1023] = e1000e_mac_read_clr4, 3094 [PTC64] = e1000e_mac_read_clr4, 3095 [PTC255] = e1000e_mac_read_clr4, 3096 [PTC1023] = e1000e_mac_read_clr4, 3097 [GPRC] = e1000e_mac_read_clr4, 3098 [TPT] = e1000e_mac_read_clr4, 3099 [RUC] = e1000e_mac_read_clr4, 3100 [BPRC] = e1000e_mac_read_clr4, 3101 [MPTC] = e1000e_mac_read_clr4, 3102 [IAC] = e1000e_mac_read_clr4, 3103 [ICR] = e1000e_mac_icr_read, 3104 [STATUS] = e1000e_get_status, 3105 [TARC0] = e1000e_get_tarc, 3106 [ICS] = e1000e_mac_ics_read, 3107 [TORH] = e1000e_mac_read_clr8, 3108 [GORCH] = e1000e_mac_read_clr8, 3109 [PRC127] = e1000e_mac_read_clr4, 3110 [PRC511] = e1000e_mac_read_clr4, 3111 [PRC1522] = e1000e_mac_read_clr4, 3112 [PTC127] = e1000e_mac_read_clr4, 3113 [PTC511] = e1000e_mac_read_clr4, 3114 [PTC1522] = e1000e_mac_read_clr4, 3115 [GPTC] = e1000e_mac_read_clr4, 3116 [TPR] = e1000e_mac_read_clr4, 3117 [ROC] = e1000e_mac_read_clr4, 3118 [MPRC] = e1000e_mac_read_clr4, 3119 [BPTC] = e1000e_mac_read_clr4, 3120 [TSCTC] = e1000e_mac_read_clr4, 3121 [ITR] = e1000e_mac_itr_read, 3122 [CTRL] = e1000e_get_ctrl, 3123 [TARC1] = e1000e_get_tarc, 3124 [SWSM] = e1000e_mac_swsm_read, 3125 [IMS] = e1000e_mac_ims_read, 3126 [SYSTIML] = e1000e_get_systiml, 3127 [RXSATRH] = e1000e_get_rxsatrh, 3128 [TXSTMPH] = e1000e_get_txstmph, 3129 3130 [CRCERRS ... MPC] = e1000e_mac_readreg, 3131 [IP6AT ... IP6AT + 3] = e1000e_mac_readreg, 3132 [IP4AT ... IP4AT + 6] = e1000e_mac_readreg, 3133 [RA ... RA + 31] = e1000e_mac_readreg, 3134 [WUPM ... WUPM + 31] = e1000e_mac_readreg, 3135 [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = e1000e_mac_readreg, 3136 [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = e1000e_mac_readreg, 3137 [FFMT ... FFMT + 254] = e1000e_mac_readreg, 3138 [FFVT ... FFVT + 254] = e1000e_mac_readreg, 3139 [MDEF ... MDEF + 7] = e1000e_mac_readreg, 3140 [FFLT ... FFLT + 10] = e1000e_mac_readreg, 3141 [FTFT ... FTFT + 254] = e1000e_mac_readreg, 3142 [PBM ... PBM + 10239] = e1000e_mac_readreg, 3143 [RETA ... RETA + 31] = e1000e_mac_readreg, 3144 [RSSRK ... RSSRK + 31] = e1000e_mac_readreg, 3145 [MAVTV0 ... MAVTV3] = e1000e_mac_readreg, 3146 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_mac_eitr_read 3147 }; 3148 enum { E1000E_NREADOPS = ARRAY_SIZE(e1000e_macreg_readops) }; 3149 3150 #define e1000e_putreg(x) [x] = e1000e_mac_writereg 3151 typedef void (*writeops)(E1000ECore *, int, uint32_t); 3152 static const writeops e1000e_macreg_writeops[] = { 3153 e1000e_putreg(PBA), 3154 e1000e_putreg(SWSM), 3155 e1000e_putreg(WUFC), 3156 e1000e_putreg(RDBAH1), 3157 e1000e_putreg(TDBAH), 3158 e1000e_putreg(TXDCTL), 3159 e1000e_putreg(RDBAH0), 3160 e1000e_putreg(LEDCTL), 3161 e1000e_putreg(FCAL), 3162 e1000e_putreg(FCRUC), 3163 e1000e_putreg(WUC), 3164 e1000e_putreg(WUS), 3165 e1000e_putreg(IPAV), 3166 e1000e_putreg(TDBAH1), 3167 e1000e_putreg(IAM), 3168 e1000e_putreg(EIAC), 3169 e1000e_putreg(IVAR), 3170 e1000e_putreg(TARC0), 3171 e1000e_putreg(TARC1), 3172 e1000e_putreg(FLSWDATA), 3173 e1000e_putreg(POEMB), 3174 e1000e_putreg(MFUTP01), 3175 e1000e_putreg(MFUTP23), 3176 e1000e_putreg(MANC), 3177 e1000e_putreg(MANC2H), 3178 e1000e_putreg(MFVAL), 3179 e1000e_putreg(EXTCNF_CTRL), 3180 e1000e_putreg(FACTPS), 3181 e1000e_putreg(FUNCTAG), 3182 e1000e_putreg(GSCL_1), 3183 e1000e_putreg(GSCL_2), 3184 e1000e_putreg(GSCL_3), 3185 e1000e_putreg(GSCL_4), 3186 e1000e_putreg(GSCN_0), 3187 e1000e_putreg(GSCN_1), 3188 e1000e_putreg(GSCN_2), 3189 e1000e_putreg(GSCN_3), 3190 e1000e_putreg(GCR2), 3191 e1000e_putreg(MRQC), 3192 e1000e_putreg(FLOP), 3193 e1000e_putreg(FLOL), 3194 e1000e_putreg(FLSWCTL), 3195 e1000e_putreg(FLSWCNT), 3196 e1000e_putreg(FLA), 3197 e1000e_putreg(RXDCTL1), 3198 e1000e_putreg(TXDCTL1), 3199 e1000e_putreg(TIPG), 3200 e1000e_putreg(RXSTMPH), 3201 e1000e_putreg(RXSTMPL), 3202 e1000e_putreg(RXSATRL), 3203 e1000e_putreg(RXSATRH), 3204 e1000e_putreg(TXSTMPL), 3205 e1000e_putreg(TXSTMPH), 3206 e1000e_putreg(SYSTIML), 3207 e1000e_putreg(SYSTIMH), 3208 e1000e_putreg(TIMADJL), 3209 e1000e_putreg(RXUDP), 3210 e1000e_putreg(RXCFGL), 3211 e1000e_putreg(TSYNCRXCTL), 3212 e1000e_putreg(TSYNCTXCTL), 3213 e1000e_putreg(EXTCNF_SIZE), 3214 e1000e_putreg(EEMNGCTL), 3215 e1000e_putreg(RA), 3216 3217 [TDH1] = e1000e_set_16bit, 3218 [TDT1] = e1000e_set_tdt, 3219 [TCTL] = e1000e_set_tctl, 3220 [TDT] = e1000e_set_tdt, 3221 [MDIC] = e1000e_set_mdic, 3222 [ICS] = e1000e_set_ics, 3223 [TDH] = e1000e_set_16bit, 3224 [RDH0] = e1000e_set_16bit, 3225 [RDT0] = e1000e_set_rdt, 3226 [IMC] = e1000e_set_imc, 3227 [IMS] = e1000e_set_ims, 3228 [ICR] = e1000e_set_icr, 3229 [EECD] = e1000e_set_eecd, 3230 [RCTL] = e1000e_set_rx_control, 3231 [CTRL] = e1000e_set_ctrl, 3232 [RDTR] = e1000e_set_rdtr, 3233 [RADV] = e1000e_set_16bit, 3234 [TADV] = e1000e_set_16bit, 3235 [ITR] = e1000e_set_itr, 3236 [EERD] = e1000e_set_eerd, 3237 [AIT] = e1000e_set_16bit, 3238 [TDFH] = e1000e_set_13bit, 3239 [TDFT] = e1000e_set_13bit, 3240 [TDFHS] = e1000e_set_13bit, 3241 [TDFTS] = e1000e_set_13bit, 3242 [TDFPC] = e1000e_set_13bit, 3243 [RDFH] = e1000e_set_13bit, 3244 [RDFHS] = e1000e_set_13bit, 3245 [RDFT] = e1000e_set_13bit, 3246 [RDFTS] = e1000e_set_13bit, 3247 [RDFPC] = e1000e_set_13bit, 3248 [PBS] = e1000e_set_6bit, 3249 [GCR] = e1000e_set_gcr, 3250 [PSRCTL] = e1000e_set_psrctl, 3251 [RXCSUM] = e1000e_set_rxcsum, 3252 [RAID] = e1000e_set_16bit, 3253 [RSRPD] = e1000e_set_12bit, 3254 [TIDV] = e1000e_set_tidv, 3255 [TDLEN1] = e1000e_set_dlen, 3256 [TDLEN] = e1000e_set_dlen, 3257 [RDLEN0] = e1000e_set_dlen, 3258 [RDLEN1] = e1000e_set_dlen, 3259 [TDBAL] = e1000e_set_dbal, 3260 [TDBAL1] = e1000e_set_dbal, 3261 [RDBAL0] = e1000e_set_dbal, 3262 [RDBAL1] = e1000e_set_dbal, 3263 [RDH1] = e1000e_set_16bit, 3264 [RDT1] = e1000e_set_rdt, 3265 [STATUS] = e1000e_set_status, 3266 [PBACLR] = e1000e_set_pbaclr, 3267 [CTRL_EXT] = e1000e_set_ctrlext, 3268 [FCAH] = e1000e_set_16bit, 3269 [FCT] = e1000e_set_16bit, 3270 [FCTTV] = e1000e_set_16bit, 3271 [FCRTV] = e1000e_set_16bit, 3272 [FCRTH] = e1000e_set_fcrth, 3273 [FCRTL] = e1000e_set_fcrtl, 3274 [VET] = e1000e_set_vet, 3275 [RXDCTL] = e1000e_set_rxdctl, 3276 [FLASHT] = e1000e_set_16bit, 3277 [EEWR] = e1000e_set_eewr, 3278 [CTRL_DUP] = e1000e_set_ctrl, 3279 [RFCTL] = e1000e_set_rfctl, 3280 [RA + 1] = e1000e_mac_setmacaddr, 3281 [TIMINCA] = e1000e_set_timinca, 3282 [TIMADJH] = e1000e_set_timadjh, 3283 3284 [IP6AT ... IP6AT + 3] = e1000e_mac_writereg, 3285 [IP4AT ... IP4AT + 6] = e1000e_mac_writereg, 3286 [RA + 2 ... RA + 31] = e1000e_mac_writereg, 3287 [WUPM ... WUPM + 31] = e1000e_mac_writereg, 3288 [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = e1000e_mac_writereg, 3289 [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = e1000e_mac_writereg, 3290 [FFMT ... FFMT + 254] = e1000e_set_4bit, 3291 [FFVT ... FFVT + 254] = e1000e_mac_writereg, 3292 [PBM ... PBM + 10239] = e1000e_mac_writereg, 3293 [MDEF ... MDEF + 7] = e1000e_mac_writereg, 3294 [FFLT ... FFLT + 10] = e1000e_set_11bit, 3295 [FTFT ... FTFT + 254] = e1000e_mac_writereg, 3296 [RETA ... RETA + 31] = e1000e_mac_writereg, 3297 [RSSRK ... RSSRK + 31] = e1000e_mac_writereg, 3298 [MAVTV0 ... MAVTV3] = e1000e_mac_writereg, 3299 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_set_eitr 3300 }; 3301 enum { E1000E_NWRITEOPS = ARRAY_SIZE(e1000e_macreg_writeops) }; 3302 3303 enum { MAC_ACCESS_PARTIAL = 1 }; 3304 3305 /* 3306 * The array below combines alias offsets of the index values for the 3307 * MAC registers that have aliases, with the indication of not fully 3308 * implemented registers (lowest bit). This combination is possible 3309 * because all of the offsets are even. 3310 */ 3311 static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = { 3312 /* Alias index offsets */ 3313 [FCRTL_A] = 0x07fe, [FCRTH_A] = 0x0802, 3314 [RDH0_A] = 0x09bc, [RDT0_A] = 0x09bc, [RDTR_A] = 0x09c6, 3315 [RDFH_A] = 0xe904, [RDFT_A] = 0xe904, 3316 [TDH_A] = 0x0cf8, [TDT_A] = 0x0cf8, [TIDV_A] = 0x0cf8, 3317 [TDFH_A] = 0xed00, [TDFT_A] = 0xed00, 3318 [RA_A ... RA_A + 31] = 0x14f0, 3319 [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400, 3320 [RDBAL0_A ... RDLEN0_A] = 0x09bc, 3321 [TDBAL_A ... TDLEN_A] = 0x0cf8, 3322 /* Access options */ 3323 [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL, 3324 [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL, 3325 [RDFPC] = MAC_ACCESS_PARTIAL, 3326 [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL, 3327 [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL, 3328 [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL, 3329 [PBM] = MAC_ACCESS_PARTIAL, [FLA] = MAC_ACCESS_PARTIAL, 3330 [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL, 3331 [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL, 3332 [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL, 3333 [FCRTH] = MAC_ACCESS_PARTIAL, [TXDCTL] = MAC_ACCESS_PARTIAL, 3334 [TXDCTL1] = MAC_ACCESS_PARTIAL, 3335 [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL 3336 }; 3337 3338 void 3339 e1000e_core_write(E1000ECore *core, hwaddr addr, uint64_t val, unsigned size) 3340 { 3341 uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); 3342 3343 if (index < E1000E_NWRITEOPS && e1000e_macreg_writeops[index]) { 3344 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { 3345 trace_e1000e_wrn_regs_write_trivial(index << 2); 3346 } 3347 trace_e1000e_core_write(index << 2, size, val); 3348 e1000e_macreg_writeops[index](core, index, val); 3349 } else if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { 3350 trace_e1000e_wrn_regs_write_ro(index << 2, size, val); 3351 } else { 3352 trace_e1000e_wrn_regs_write_unknown(index << 2, size, val); 3353 } 3354 } 3355 3356 uint64_t 3357 e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size) 3358 { 3359 uint64_t val; 3360 uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); 3361 3362 if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { 3363 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { 3364 trace_e1000e_wrn_regs_read_trivial(index << 2); 3365 } 3366 val = e1000e_macreg_readops[index](core, index); 3367 trace_e1000e_core_read(index << 2, size, val); 3368 return val; 3369 } else { 3370 trace_e1000e_wrn_regs_read_unknown(index << 2, size); 3371 } 3372 return 0; 3373 } 3374 3375 static inline void 3376 e1000e_autoneg_pause(E1000ECore *core) 3377 { 3378 timer_del(core->autoneg_timer); 3379 } 3380 3381 static void 3382 e1000e_autoneg_resume(E1000ECore *core) 3383 { 3384 if (e1000e_have_autoneg(core) && 3385 !(core->phy[0][MII_BMSR] & MII_BMSR_AN_COMP)) { 3386 qemu_get_queue(core->owner_nic)->link_down = false; 3387 timer_mod(core->autoneg_timer, 3388 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); 3389 } 3390 } 3391 3392 static void 3393 e1000e_vm_state_change(void *opaque, bool running, RunState state) 3394 { 3395 E1000ECore *core = opaque; 3396 3397 if (running) { 3398 trace_e1000e_vm_state_running(); 3399 e1000e_intrmgr_resume(core); 3400 e1000e_autoneg_resume(core); 3401 } else { 3402 trace_e1000e_vm_state_stopped(); 3403 e1000e_autoneg_pause(core); 3404 e1000e_intrmgr_pause(core); 3405 } 3406 } 3407 3408 void 3409 e1000e_core_pci_realize(E1000ECore *core, 3410 const uint16_t *eeprom_templ, 3411 uint32_t eeprom_size, 3412 const uint8_t *macaddr) 3413 { 3414 int i; 3415 3416 core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 3417 e1000e_autoneg_timer, core); 3418 e1000e_intrmgr_pci_realize(core); 3419 3420 core->vmstate = 3421 qemu_add_vm_change_state_handler(e1000e_vm_state_change, core); 3422 3423 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 3424 net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, E1000E_MAX_TX_FRAGS); 3425 } 3426 3427 net_rx_pkt_init(&core->rx_pkt); 3428 3429 e1000x_core_prepare_eeprom(core->eeprom, 3430 eeprom_templ, 3431 eeprom_size, 3432 PCI_DEVICE_GET_CLASS(core->owner)->device_id, 3433 macaddr); 3434 e1000e_update_rx_offloads(core); 3435 } 3436 3437 void 3438 e1000e_core_pci_uninit(E1000ECore *core) 3439 { 3440 int i; 3441 3442 timer_free(core->autoneg_timer); 3443 3444 e1000e_intrmgr_pci_unint(core); 3445 3446 qemu_del_vm_change_state_handler(core->vmstate); 3447 3448 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 3449 net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner); 3450 net_tx_pkt_uninit(core->tx[i].tx_pkt); 3451 } 3452 3453 net_rx_pkt_uninit(core->rx_pkt); 3454 } 3455 3456 static const uint16_t 3457 e1000e_phy_reg_init[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE] = { 3458 [0] = { 3459 [MII_BMCR] = MII_BMCR_SPEED1000 | 3460 MII_BMCR_FD | 3461 MII_BMCR_AUTOEN, 3462 3463 [MII_BMSR] = MII_BMSR_EXTCAP | 3464 MII_BMSR_LINK_ST | 3465 MII_BMSR_AUTONEG | 3466 MII_BMSR_MFPS | 3467 MII_BMSR_EXTSTAT | 3468 MII_BMSR_10T_HD | 3469 MII_BMSR_10T_FD | 3470 MII_BMSR_100TX_HD | 3471 MII_BMSR_100TX_FD, 3472 3473 [MII_PHYID1] = 0x141, 3474 [MII_PHYID2] = E1000_PHY_ID2_82574x, 3475 [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 | 3476 MII_ANAR_10FD | MII_ANAR_TX | 3477 MII_ANAR_TXFD | MII_ANAR_PAUSE | 3478 MII_ANAR_PAUSE_ASYM, 3479 [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD | 3480 MII_ANLPAR_TX | MII_ANLPAR_TXFD | 3481 MII_ANLPAR_T4 | MII_ANLPAR_PAUSE, 3482 [MII_ANER] = MII_ANER_NP | MII_ANER_NWAY, 3483 [MII_ANNP] = 1 | MII_ANNP_MP, 3484 [MII_CTRL1000] = MII_CTRL1000_HALF | MII_CTRL1000_FULL | 3485 MII_CTRL1000_PORT | MII_CTRL1000_MASTER, 3486 [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL | 3487 MII_STAT1000_ROK | MII_STAT1000_LOK, 3488 [MII_EXTSTAT] = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD, 3489 3490 [PHY_COPPER_CTRL1] = BIT(5) | BIT(6) | BIT(8) | BIT(9) | 3491 BIT(12) | BIT(13), 3492 [PHY_COPPER_STAT1] = BIT(3) | BIT(10) | BIT(11) | BIT(13) | BIT(15) 3493 }, 3494 [2] = { 3495 [PHY_MAC_CTRL1] = BIT(3) | BIT(7), 3496 [PHY_MAC_CTRL2] = BIT(1) | BIT(2) | BIT(6) | BIT(12) 3497 }, 3498 [3] = { 3499 [PHY_LED_TIMER_CTRL] = BIT(0) | BIT(2) | BIT(14) 3500 } 3501 }; 3502 3503 static const uint32_t e1000e_mac_reg_init[] = { 3504 [PBA] = 0x00140014, 3505 [LEDCTL] = BIT(1) | BIT(8) | BIT(9) | BIT(15) | BIT(17) | BIT(18), 3506 [EXTCNF_CTRL] = BIT(3), 3507 [EEMNGCTL] = BIT(31), 3508 [FLASHT] = 0x2, 3509 [FLSWCTL] = BIT(30) | BIT(31), 3510 [FLOL] = BIT(0), 3511 [RXDCTL] = BIT(16), 3512 [RXDCTL1] = BIT(16), 3513 [TIPG] = 0x8 | (0x8 << 10) | (0x6 << 20), 3514 [RXCFGL] = 0x88F7, 3515 [RXUDP] = 0x319, 3516 [CTRL] = E1000_CTRL_FD | E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 | 3517 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU | 3518 E1000_CTRL_ADVD3WUC, 3519 [STATUS] = E1000_STATUS_ASDV_1000 | E1000_STATUS_LU, 3520 [PSRCTL] = (2 << E1000_PSRCTL_BSIZE0_SHIFT) | 3521 (4 << E1000_PSRCTL_BSIZE1_SHIFT) | 3522 (4 << E1000_PSRCTL_BSIZE2_SHIFT), 3523 [TARC0] = 0x3 | E1000_TARC_ENABLE, 3524 [TARC1] = 0x3 | E1000_TARC_ENABLE, 3525 [EECD] = E1000_EECD_AUTO_RD | E1000_EECD_PRES, 3526 [EERD] = E1000_EERW_DONE, 3527 [EEWR] = E1000_EERW_DONE, 3528 [GCR] = E1000_L0S_ADJUST | 3529 E1000_L1_ENTRY_LATENCY_MSB | 3530 E1000_L1_ENTRY_LATENCY_LSB, 3531 [TDFH] = 0x600, 3532 [TDFT] = 0x600, 3533 [TDFHS] = 0x600, 3534 [TDFTS] = 0x600, 3535 [POEMB] = 0x30D, 3536 [PBS] = 0x028, 3537 [MANC] = E1000_MANC_DIS_IP_CHK_ARP, 3538 [FACTPS] = E1000_FACTPS_LAN0_ON | 0x20000000, 3539 [SWSM] = 1, 3540 [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD, 3541 [ITR] = E1000E_MIN_XITR, 3542 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = E1000E_MIN_XITR, 3543 }; 3544 3545 static void e1000e_reset(E1000ECore *core, bool sw) 3546 { 3547 int i; 3548 3549 timer_del(core->autoneg_timer); 3550 3551 e1000e_intrmgr_reset(core); 3552 3553 memset(core->phy, 0, sizeof core->phy); 3554 memcpy(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init); 3555 3556 for (i = 0; i < E1000E_MAC_SIZE; i++) { 3557 if (sw && (i == PBA || i == PBS || i == FLA)) { 3558 continue; 3559 } 3560 3561 core->mac[i] = i < ARRAY_SIZE(e1000e_mac_reg_init) ? 3562 e1000e_mac_reg_init[i] : 0; 3563 } 3564 3565 core->rxbuf_min_shift = 1 + E1000_RING_DESC_LEN_SHIFT; 3566 3567 if (qemu_get_queue(core->owner_nic)->link_down) { 3568 e1000e_link_down(core); 3569 } 3570 3571 e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); 3572 3573 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { 3574 net_tx_pkt_reset(core->tx[i].tx_pkt, core->owner); 3575 memset(&core->tx[i].props, 0, sizeof(core->tx[i].props)); 3576 core->tx[i].skip_cp = false; 3577 } 3578 } 3579 3580 void 3581 e1000e_core_reset(E1000ECore *core) 3582 { 3583 e1000e_reset(core, false); 3584 } 3585 3586 void e1000e_core_pre_save(E1000ECore *core) 3587 { 3588 int i; 3589 NetClientState *nc = qemu_get_queue(core->owner_nic); 3590 3591 /* 3592 * If link is down and auto-negotiation is supported and ongoing, 3593 * complete auto-negotiation immediately. This allows us to look 3594 * at MII_BMSR_AN_COMP to infer link status on load. 3595 */ 3596 if (nc->link_down && e1000e_have_autoneg(core)) { 3597 core->phy[0][MII_BMSR] |= MII_BMSR_AN_COMP; 3598 e1000e_update_flowctl_status(core); 3599 } 3600 3601 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { 3602 if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) { 3603 core->tx[i].skip_cp = true; 3604 } 3605 } 3606 } 3607 3608 int 3609 e1000e_core_post_load(E1000ECore *core) 3610 { 3611 NetClientState *nc = qemu_get_queue(core->owner_nic); 3612 3613 /* 3614 * nc.link_down can't be migrated, so infer link_down according 3615 * to link status bit in core.mac[STATUS]. 3616 */ 3617 nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; 3618 3619 return 0; 3620 } 3621