1 /* 2 * Core code for QEMU e1000e emulation 3 * 4 * Software developer's manuals: 5 * http://www.intel.com/content/dam/doc/datasheet/82574l-gbe-controller-datasheet.pdf 6 * 7 * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) 8 * Developed by Daynix Computing LTD (http://www.daynix.com) 9 * 10 * Authors: 11 * Dmitry Fleytman <dmitry@daynix.com> 12 * Leonid Bloch <leonid@daynix.com> 13 * Yan Vugenfirer <yan@daynix.com> 14 * 15 * Based on work done by: 16 * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc. 17 * Copyright (c) 2008 Qumranet 18 * Based on work done by: 19 * Copyright (c) 2007 Dan Aloni 20 * Copyright (c) 2004 Antony T Curtis 21 * 22 * This library is free software; you can redistribute it and/or 23 * modify it under the terms of the GNU Lesser General Public 24 * License as published by the Free Software Foundation; either 25 * version 2 of the License, or (at your option) any later version. 26 * 27 * This library is distributed in the hope that it will be useful, 28 * but WITHOUT ANY WARRANTY; without even the implied warranty of 29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 30 * Lesser General Public License for more details. 31 * 32 * You should have received a copy of the GNU Lesser General Public 33 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 34 */ 35 36 #include "qemu/osdep.h" 37 #include "sysemu/sysemu.h" 38 #include "net/net.h" 39 #include "net/tap.h" 40 #include "hw/pci/msi.h" 41 #include "hw/pci/msix.h" 42 43 #include "net_tx_pkt.h" 44 #include "net_rx_pkt.h" 45 46 #include "e1000x_common.h" 47 #include "e1000e_core.h" 48 49 #include "trace.h" 50 51 #define E1000E_MIN_XITR (500) /* No more then 7813 interrupts per 52 second according to spec 10.2.4.2 */ 53 #define E1000E_MAX_TX_FRAGS (64) 54 55 static void 56 e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val); 57 58 static inline void 59 e1000e_process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp) 60 { 61 if (le32_to_cpu(dp->upper.data) & E1000_TXD_EXTCMD_TSTAMP) { 62 trace_e1000e_wrn_no_ts_support(); 63 } 64 } 65 66 static inline void 67 e1000e_process_snap_option(E1000ECore *core, uint32_t cmd_and_length) 68 { 69 if (cmd_and_length & E1000_TXD_CMD_SNAP) { 70 trace_e1000e_wrn_no_snap_support(); 71 } 72 } 73 74 static inline void 75 e1000e_raise_legacy_irq(E1000ECore *core) 76 { 77 trace_e1000e_irq_legacy_notify(true); 78 e1000x_inc_reg_if_not_full(core->mac, IAC); 79 pci_set_irq(core->owner, 1); 80 } 81 82 static inline void 83 e1000e_lower_legacy_irq(E1000ECore *core) 84 { 85 trace_e1000e_irq_legacy_notify(false); 86 pci_set_irq(core->owner, 0); 87 } 88 89 static inline void 90 e1000e_intrmgr_rearm_timer(E1000IntrDelayTimer *timer) 91 { 92 int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] * 93 timer->delay_resolution_ns; 94 95 trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns); 96 97 timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns); 98 99 timer->running = true; 100 } 101 102 static void 103 e1000e_intmgr_timer_resume(E1000IntrDelayTimer *timer) 104 { 105 if (timer->running) { 106 e1000e_intrmgr_rearm_timer(timer); 107 } 108 } 109 110 static void 111 e1000e_intmgr_timer_pause(E1000IntrDelayTimer *timer) 112 { 113 if (timer->running) { 114 timer_del(timer->timer); 115 } 116 } 117 118 static inline void 119 e1000e_intrmgr_stop_timer(E1000IntrDelayTimer *timer) 120 { 121 if (timer->running) { 122 timer_del(timer->timer); 123 timer->running = false; 124 } 125 } 126 127 static inline void 128 e1000e_intrmgr_fire_delayed_interrupts(E1000ECore *core) 129 { 130 trace_e1000e_irq_fire_delayed_interrupts(); 131 e1000e_set_interrupt_cause(core, 0); 132 } 133 134 static void 135 e1000e_intrmgr_on_timer(void *opaque) 136 { 137 E1000IntrDelayTimer *timer = opaque; 138 139 trace_e1000e_irq_throttling_timer(timer->delay_reg << 2); 140 141 timer->running = false; 142 e1000e_intrmgr_fire_delayed_interrupts(timer->core); 143 } 144 145 static void 146 e1000e_intrmgr_on_throttling_timer(void *opaque) 147 { 148 E1000IntrDelayTimer *timer = opaque; 149 150 assert(!msix_enabled(timer->core->owner)); 151 152 timer->running = false; 153 154 if (!timer->core->itr_intr_pending) { 155 trace_e1000e_irq_throttling_no_pending_interrupts(); 156 return; 157 } 158 159 if (msi_enabled(timer->core->owner)) { 160 trace_e1000e_irq_msi_notify_postponed(); 161 e1000e_set_interrupt_cause(timer->core, 0); 162 } else { 163 trace_e1000e_irq_legacy_notify_postponed(); 164 e1000e_set_interrupt_cause(timer->core, 0); 165 } 166 } 167 168 static void 169 e1000e_intrmgr_on_msix_throttling_timer(void *opaque) 170 { 171 E1000IntrDelayTimer *timer = opaque; 172 int idx = timer - &timer->core->eitr[0]; 173 174 assert(msix_enabled(timer->core->owner)); 175 176 timer->running = false; 177 178 if (!timer->core->eitr_intr_pending[idx]) { 179 trace_e1000e_irq_throttling_no_pending_vec(idx); 180 return; 181 } 182 183 trace_e1000e_irq_msix_notify_postponed_vec(idx); 184 msix_notify(timer->core->owner, idx); 185 } 186 187 static void 188 e1000e_intrmgr_initialize_all_timers(E1000ECore *core, bool create) 189 { 190 int i; 191 192 core->radv.delay_reg = RADV; 193 core->rdtr.delay_reg = RDTR; 194 core->raid.delay_reg = RAID; 195 core->tadv.delay_reg = TADV; 196 core->tidv.delay_reg = TIDV; 197 198 core->radv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 199 core->rdtr.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 200 core->raid.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 201 core->tadv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 202 core->tidv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES; 203 204 core->radv.core = core; 205 core->rdtr.core = core; 206 core->raid.core = core; 207 core->tadv.core = core; 208 core->tidv.core = core; 209 210 core->itr.core = core; 211 core->itr.delay_reg = ITR; 212 core->itr.delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; 213 214 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 215 core->eitr[i].core = core; 216 core->eitr[i].delay_reg = EITR + i; 217 core->eitr[i].delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES; 218 } 219 220 if (!create) { 221 return; 222 } 223 224 core->radv.timer = 225 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->radv); 226 core->rdtr.timer = 227 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->rdtr); 228 core->raid.timer = 229 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->raid); 230 231 core->tadv.timer = 232 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tadv); 233 core->tidv.timer = 234 timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tidv); 235 236 core->itr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 237 e1000e_intrmgr_on_throttling_timer, 238 &core->itr); 239 240 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 241 core->eitr[i].timer = 242 timer_new_ns(QEMU_CLOCK_VIRTUAL, 243 e1000e_intrmgr_on_msix_throttling_timer, 244 &core->eitr[i]); 245 } 246 } 247 248 static inline void 249 e1000e_intrmgr_stop_delay_timers(E1000ECore *core) 250 { 251 e1000e_intrmgr_stop_timer(&core->radv); 252 e1000e_intrmgr_stop_timer(&core->rdtr); 253 e1000e_intrmgr_stop_timer(&core->raid); 254 e1000e_intrmgr_stop_timer(&core->tidv); 255 e1000e_intrmgr_stop_timer(&core->tadv); 256 } 257 258 static bool 259 e1000e_intrmgr_delay_rx_causes(E1000ECore *core, uint32_t *causes) 260 { 261 uint32_t delayable_causes; 262 uint32_t rdtr = core->mac[RDTR]; 263 uint32_t radv = core->mac[RADV]; 264 uint32_t raid = core->mac[RAID]; 265 266 if (msix_enabled(core->owner)) { 267 return false; 268 } 269 270 delayable_causes = E1000_ICR_RXQ0 | 271 E1000_ICR_RXQ1 | 272 E1000_ICR_RXT0; 273 274 if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS)) { 275 delayable_causes |= E1000_ICR_ACK; 276 } 277 278 /* Clean up all causes that may be delayed */ 279 core->delayed_causes |= *causes & delayable_causes; 280 *causes &= ~delayable_causes; 281 282 /* Check if delayed RX interrupts disabled by client 283 or if there are causes that cannot be delayed */ 284 if ((rdtr == 0) || (causes != 0)) { 285 return false; 286 } 287 288 /* Check if delayed RX ACK interrupts disabled by client 289 and there is an ACK packet received */ 290 if ((raid == 0) && (core->delayed_causes & E1000_ICR_ACK)) { 291 return false; 292 } 293 294 /* All causes delayed */ 295 e1000e_intrmgr_rearm_timer(&core->rdtr); 296 297 if (!core->radv.running && (radv != 0)) { 298 e1000e_intrmgr_rearm_timer(&core->radv); 299 } 300 301 if (!core->raid.running && (core->delayed_causes & E1000_ICR_ACK)) { 302 e1000e_intrmgr_rearm_timer(&core->raid); 303 } 304 305 return true; 306 } 307 308 static bool 309 e1000e_intrmgr_delay_tx_causes(E1000ECore *core, uint32_t *causes) 310 { 311 static const uint32_t delayable_causes = E1000_ICR_TXQ0 | 312 E1000_ICR_TXQ1 | 313 E1000_ICR_TXQE | 314 E1000_ICR_TXDW; 315 316 if (msix_enabled(core->owner)) { 317 return false; 318 } 319 320 /* Clean up all causes that may be delayed */ 321 core->delayed_causes |= *causes & delayable_causes; 322 *causes &= ~delayable_causes; 323 324 /* If there are causes that cannot be delayed */ 325 if (causes != 0) { 326 return false; 327 } 328 329 /* All causes delayed */ 330 e1000e_intrmgr_rearm_timer(&core->tidv); 331 332 if (!core->tadv.running && (core->mac[TADV] != 0)) { 333 e1000e_intrmgr_rearm_timer(&core->tadv); 334 } 335 336 return true; 337 } 338 339 static uint32_t 340 e1000e_intmgr_collect_delayed_causes(E1000ECore *core) 341 { 342 uint32_t res; 343 344 if (msix_enabled(core->owner)) { 345 assert(core->delayed_causes == 0); 346 return 0; 347 } 348 349 res = core->delayed_causes; 350 core->delayed_causes = 0; 351 352 e1000e_intrmgr_stop_delay_timers(core); 353 354 return res; 355 } 356 357 static void 358 e1000e_intrmgr_fire_all_timers(E1000ECore *core) 359 { 360 int i; 361 uint32_t val = e1000e_intmgr_collect_delayed_causes(core); 362 363 trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]); 364 core->mac[ICR] |= val; 365 366 if (core->itr.running) { 367 timer_del(core->itr.timer); 368 e1000e_intrmgr_on_throttling_timer(&core->itr); 369 } 370 371 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 372 if (core->eitr[i].running) { 373 timer_del(core->eitr[i].timer); 374 e1000e_intrmgr_on_msix_throttling_timer(&core->eitr[i]); 375 } 376 } 377 } 378 379 static void 380 e1000e_intrmgr_resume(E1000ECore *core) 381 { 382 int i; 383 384 e1000e_intmgr_timer_resume(&core->radv); 385 e1000e_intmgr_timer_resume(&core->rdtr); 386 e1000e_intmgr_timer_resume(&core->raid); 387 e1000e_intmgr_timer_resume(&core->tidv); 388 e1000e_intmgr_timer_resume(&core->tadv); 389 390 e1000e_intmgr_timer_resume(&core->itr); 391 392 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 393 e1000e_intmgr_timer_resume(&core->eitr[i]); 394 } 395 } 396 397 static void 398 e1000e_intrmgr_pause(E1000ECore *core) 399 { 400 int i; 401 402 e1000e_intmgr_timer_pause(&core->radv); 403 e1000e_intmgr_timer_pause(&core->rdtr); 404 e1000e_intmgr_timer_pause(&core->raid); 405 e1000e_intmgr_timer_pause(&core->tidv); 406 e1000e_intmgr_timer_pause(&core->tadv); 407 408 e1000e_intmgr_timer_pause(&core->itr); 409 410 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 411 e1000e_intmgr_timer_pause(&core->eitr[i]); 412 } 413 } 414 415 static void 416 e1000e_intrmgr_reset(E1000ECore *core) 417 { 418 int i; 419 420 core->delayed_causes = 0; 421 422 e1000e_intrmgr_stop_delay_timers(core); 423 424 e1000e_intrmgr_stop_timer(&core->itr); 425 426 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 427 e1000e_intrmgr_stop_timer(&core->eitr[i]); 428 } 429 } 430 431 static void 432 e1000e_intrmgr_pci_unint(E1000ECore *core) 433 { 434 int i; 435 436 timer_del(core->radv.timer); 437 timer_free(core->radv.timer); 438 timer_del(core->rdtr.timer); 439 timer_free(core->rdtr.timer); 440 timer_del(core->raid.timer); 441 timer_free(core->raid.timer); 442 443 timer_del(core->tadv.timer); 444 timer_free(core->tadv.timer); 445 timer_del(core->tidv.timer); 446 timer_free(core->tidv.timer); 447 448 timer_del(core->itr.timer); 449 timer_free(core->itr.timer); 450 451 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 452 timer_del(core->eitr[i].timer); 453 timer_free(core->eitr[i].timer); 454 } 455 } 456 457 static void 458 e1000e_intrmgr_pci_realize(E1000ECore *core) 459 { 460 e1000e_intrmgr_initialize_all_timers(core, true); 461 } 462 463 static inline bool 464 e1000e_rx_csum_enabled(E1000ECore *core) 465 { 466 return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true; 467 } 468 469 static inline bool 470 e1000e_rx_use_legacy_descriptor(E1000ECore *core) 471 { 472 return (core->mac[RFCTL] & E1000_RFCTL_EXTEN) ? false : true; 473 } 474 475 static inline bool 476 e1000e_rx_use_ps_descriptor(E1000ECore *core) 477 { 478 return !e1000e_rx_use_legacy_descriptor(core) && 479 (core->mac[RCTL] & E1000_RCTL_DTYP_PS); 480 } 481 482 static inline bool 483 e1000e_rss_enabled(E1000ECore *core) 484 { 485 return E1000_MRQC_ENABLED(core->mac[MRQC]) && 486 !e1000e_rx_csum_enabled(core) && 487 !e1000e_rx_use_legacy_descriptor(core); 488 } 489 490 typedef struct E1000E_RSSInfo_st { 491 bool enabled; 492 uint32_t hash; 493 uint32_t queue; 494 uint32_t type; 495 } E1000E_RSSInfo; 496 497 static uint32_t 498 e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt) 499 { 500 bool isip4, isip6, isudp, istcp; 501 502 assert(e1000e_rss_enabled(core)); 503 504 net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); 505 506 if (isip4) { 507 bool fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; 508 509 trace_e1000e_rx_rss_ip4(fragment, istcp, core->mac[MRQC], 510 E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]), 511 E1000_MRQC_EN_IPV4(core->mac[MRQC])); 512 513 if (!fragment && istcp && E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) { 514 return E1000_MRQ_RSS_TYPE_IPV4TCP; 515 } 516 517 if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) { 518 return E1000_MRQ_RSS_TYPE_IPV4; 519 } 520 } else if (isip6) { 521 eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt); 522 523 bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS; 524 bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS; 525 526 /* 527 * Following two traces must not be combined because resulting 528 * event will have 11 arguments totally and some trace backends 529 * (at least "ust") have limitation of maximum 10 arguments per 530 * event. Events with more arguments fail to compile for 531 * backends like these. 532 */ 533 trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]); 534 trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, istcp, 535 ip6info->has_ext_hdrs, 536 ip6info->rss_ex_dst_valid, 537 ip6info->rss_ex_src_valid, 538 core->mac[MRQC], 539 E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]), 540 E1000_MRQC_EN_IPV6EX(core->mac[MRQC]), 541 E1000_MRQC_EN_IPV6(core->mac[MRQC])); 542 543 if ((!ex_dis || !ip6info->has_ext_hdrs) && 544 (!new_ex_dis || !(ip6info->rss_ex_dst_valid || 545 ip6info->rss_ex_src_valid))) { 546 547 if (istcp && !ip6info->fragment && 548 E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) { 549 return E1000_MRQ_RSS_TYPE_IPV6TCP; 550 } 551 552 if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) { 553 return E1000_MRQ_RSS_TYPE_IPV6EX; 554 } 555 556 } 557 558 if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) { 559 return E1000_MRQ_RSS_TYPE_IPV6; 560 } 561 562 } 563 564 return E1000_MRQ_RSS_TYPE_NONE; 565 } 566 567 static uint32_t 568 e1000e_rss_calc_hash(E1000ECore *core, 569 struct NetRxPkt *pkt, 570 E1000E_RSSInfo *info) 571 { 572 NetRxPktRssType type; 573 574 assert(e1000e_rss_enabled(core)); 575 576 switch (info->type) { 577 case E1000_MRQ_RSS_TYPE_IPV4: 578 type = NetPktRssIpV4; 579 break; 580 case E1000_MRQ_RSS_TYPE_IPV4TCP: 581 type = NetPktRssIpV4Tcp; 582 break; 583 case E1000_MRQ_RSS_TYPE_IPV6TCP: 584 type = NetPktRssIpV6Tcp; 585 break; 586 case E1000_MRQ_RSS_TYPE_IPV6: 587 type = NetPktRssIpV6; 588 break; 589 case E1000_MRQ_RSS_TYPE_IPV6EX: 590 type = NetPktRssIpV6Ex; 591 break; 592 default: 593 assert(false); 594 return 0; 595 } 596 597 return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]); 598 } 599 600 static void 601 e1000e_rss_parse_packet(E1000ECore *core, 602 struct NetRxPkt *pkt, 603 E1000E_RSSInfo *info) 604 { 605 trace_e1000e_rx_rss_started(); 606 607 if (!e1000e_rss_enabled(core)) { 608 info->enabled = false; 609 info->hash = 0; 610 info->queue = 0; 611 info->type = 0; 612 trace_e1000e_rx_rss_disabled(); 613 return; 614 } 615 616 info->enabled = true; 617 618 info->type = e1000e_rss_get_hash_type(core, pkt); 619 620 trace_e1000e_rx_rss_type(info->type); 621 622 if (info->type == E1000_MRQ_RSS_TYPE_NONE) { 623 info->hash = 0; 624 info->queue = 0; 625 return; 626 } 627 628 info->hash = e1000e_rss_calc_hash(core, pkt, info); 629 info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash); 630 } 631 632 static void 633 e1000e_setup_tx_offloads(E1000ECore *core, struct e1000e_tx *tx) 634 { 635 if (tx->props.tse && tx->props.cptse) { 636 net_tx_pkt_build_vheader(tx->tx_pkt, true, true, tx->props.mss); 637 net_tx_pkt_update_ip_checksums(tx->tx_pkt); 638 e1000x_inc_reg_if_not_full(core->mac, TSCTC); 639 return; 640 } 641 642 if (tx->props.sum_needed & E1000_TXD_POPTS_TXSM) { 643 net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0); 644 } 645 646 if (tx->props.sum_needed & E1000_TXD_POPTS_IXSM) { 647 net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt); 648 } 649 } 650 651 static bool 652 e1000e_tx_pkt_send(E1000ECore *core, struct e1000e_tx *tx, int queue_index) 653 { 654 int target_queue = MIN(core->max_queue_num, queue_index); 655 NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue); 656 657 e1000e_setup_tx_offloads(core, tx); 658 659 net_tx_pkt_dump(tx->tx_pkt); 660 661 if ((core->phy[0][PHY_CTRL] & MII_CR_LOOPBACK) || 662 ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) { 663 return net_tx_pkt_send_loopback(tx->tx_pkt, queue); 664 } else { 665 return net_tx_pkt_send(tx->tx_pkt, queue); 666 } 667 } 668 669 static void 670 e1000e_on_tx_done_update_stats(E1000ECore *core, struct NetTxPkt *tx_pkt) 671 { 672 static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511, 673 PTC1023, PTC1522 }; 674 675 size_t tot_len = net_tx_pkt_get_total_len(tx_pkt); 676 677 e1000x_increase_size_stats(core->mac, PTCregs, tot_len); 678 e1000x_inc_reg_if_not_full(core->mac, TPT); 679 e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len); 680 681 switch (net_tx_pkt_get_packet_type(tx_pkt)) { 682 case ETH_PKT_BCAST: 683 e1000x_inc_reg_if_not_full(core->mac, BPTC); 684 break; 685 case ETH_PKT_MCAST: 686 e1000x_inc_reg_if_not_full(core->mac, MPTC); 687 break; 688 case ETH_PKT_UCAST: 689 break; 690 default: 691 g_assert_not_reached(); 692 } 693 694 core->mac[GPTC] = core->mac[TPT]; 695 core->mac[GOTCL] = core->mac[TOTL]; 696 core->mac[GOTCH] = core->mac[TOTH]; 697 } 698 699 static void 700 e1000e_process_tx_desc(E1000ECore *core, 701 struct e1000e_tx *tx, 702 struct e1000_tx_desc *dp, 703 int queue_index) 704 { 705 uint32_t txd_lower = le32_to_cpu(dp->lower.data); 706 uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D); 707 unsigned int split_size = txd_lower & 0xffff; 708 uint64_t addr; 709 struct e1000_context_desc *xp = (struct e1000_context_desc *)dp; 710 bool eop = txd_lower & E1000_TXD_CMD_EOP; 711 712 if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */ 713 e1000x_read_tx_ctx_descr(xp, &tx->props); 714 e1000e_process_snap_option(core, le32_to_cpu(xp->cmd_and_length)); 715 return; 716 } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) { 717 /* data descriptor */ 718 tx->props.sum_needed = le32_to_cpu(dp->upper.data) >> 8; 719 tx->props.cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0; 720 e1000e_process_ts_option(core, dp); 721 } else { 722 /* legacy descriptor */ 723 e1000e_process_ts_option(core, dp); 724 tx->props.cptse = 0; 725 } 726 727 addr = le64_to_cpu(dp->buffer_addr); 728 729 if (!tx->skip_cp) { 730 if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) { 731 tx->skip_cp = true; 732 } 733 } 734 735 if (eop) { 736 if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) { 737 if (e1000x_vlan_enabled(core->mac) && 738 e1000x_is_vlan_txd(txd_lower)) { 739 net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, 740 le16_to_cpu(dp->upper.fields.special), core->vet); 741 } 742 if (e1000e_tx_pkt_send(core, tx, queue_index)) { 743 e1000e_on_tx_done_update_stats(core, tx->tx_pkt); 744 } 745 } 746 747 tx->skip_cp = false; 748 net_tx_pkt_reset(tx->tx_pkt); 749 750 tx->props.sum_needed = 0; 751 tx->props.cptse = 0; 752 } 753 } 754 755 static inline uint32_t 756 e1000e_tx_wb_interrupt_cause(E1000ECore *core, int queue_idx) 757 { 758 if (!msix_enabled(core->owner)) { 759 return E1000_ICR_TXDW; 760 } 761 762 return (queue_idx == 0) ? E1000_ICR_TXQ0 : E1000_ICR_TXQ1; 763 } 764 765 static inline uint32_t 766 e1000e_rx_wb_interrupt_cause(E1000ECore *core, int queue_idx, 767 bool min_threshold_hit) 768 { 769 if (!msix_enabled(core->owner)) { 770 return E1000_ICS_RXT0 | (min_threshold_hit ? E1000_ICS_RXDMT0 : 0); 771 } 772 773 return (queue_idx == 0) ? E1000_ICR_RXQ0 : E1000_ICR_RXQ1; 774 } 775 776 static uint32_t 777 e1000e_txdesc_writeback(E1000ECore *core, dma_addr_t base, 778 struct e1000_tx_desc *dp, bool *ide, int queue_idx) 779 { 780 uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data); 781 782 if (!(txd_lower & E1000_TXD_CMD_RS) && 783 !(core->mac[IVAR] & E1000_IVAR_TX_INT_EVERY_WB)) { 784 return 0; 785 } 786 787 *ide = (txd_lower & E1000_TXD_CMD_IDE) ? true : false; 788 789 txd_upper = le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD; 790 791 dp->upper.data = cpu_to_le32(txd_upper); 792 pci_dma_write(core->owner, base + ((char *)&dp->upper - (char *)dp), 793 &dp->upper, sizeof(dp->upper)); 794 return e1000e_tx_wb_interrupt_cause(core, queue_idx); 795 } 796 797 typedef struct E1000E_RingInfo_st { 798 int dbah; 799 int dbal; 800 int dlen; 801 int dh; 802 int dt; 803 int idx; 804 } E1000E_RingInfo; 805 806 static inline bool 807 e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r) 808 { 809 return core->mac[r->dh] == core->mac[r->dt]; 810 } 811 812 static inline uint64_t 813 e1000e_ring_base(E1000ECore *core, const E1000E_RingInfo *r) 814 { 815 uint64_t bah = core->mac[r->dbah]; 816 uint64_t bal = core->mac[r->dbal]; 817 818 return (bah << 32) + bal; 819 } 820 821 static inline uint64_t 822 e1000e_ring_head_descr(E1000ECore *core, const E1000E_RingInfo *r) 823 { 824 return e1000e_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh]; 825 } 826 827 static inline void 828 e1000e_ring_advance(E1000ECore *core, const E1000E_RingInfo *r, uint32_t count) 829 { 830 core->mac[r->dh] += count; 831 832 if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) { 833 core->mac[r->dh] = 0; 834 } 835 } 836 837 static inline uint32_t 838 e1000e_ring_free_descr_num(E1000ECore *core, const E1000E_RingInfo *r) 839 { 840 trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen], 841 core->mac[r->dh], core->mac[r->dt]); 842 843 if (core->mac[r->dh] <= core->mac[r->dt]) { 844 return core->mac[r->dt] - core->mac[r->dh]; 845 } 846 847 if (core->mac[r->dh] > core->mac[r->dt]) { 848 return core->mac[r->dlen] / E1000_RING_DESC_LEN + 849 core->mac[r->dt] - core->mac[r->dh]; 850 } 851 852 g_assert_not_reached(); 853 return 0; 854 } 855 856 static inline bool 857 e1000e_ring_enabled(E1000ECore *core, const E1000E_RingInfo *r) 858 { 859 return core->mac[r->dlen] > 0; 860 } 861 862 static inline uint32_t 863 e1000e_ring_len(E1000ECore *core, const E1000E_RingInfo *r) 864 { 865 return core->mac[r->dlen]; 866 } 867 868 typedef struct E1000E_TxRing_st { 869 const E1000E_RingInfo *i; 870 struct e1000e_tx *tx; 871 } E1000E_TxRing; 872 873 static inline int 874 e1000e_mq_queue_idx(int base_reg_idx, int reg_idx) 875 { 876 return (reg_idx - base_reg_idx) / (0x100 >> 2); 877 } 878 879 static inline void 880 e1000e_tx_ring_init(E1000ECore *core, E1000E_TxRing *txr, int idx) 881 { 882 static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { 883 { TDBAH, TDBAL, TDLEN, TDH, TDT, 0 }, 884 { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 } 885 }; 886 887 assert(idx < ARRAY_SIZE(i)); 888 889 txr->i = &i[idx]; 890 txr->tx = &core->tx[idx]; 891 } 892 893 typedef struct E1000E_RxRing_st { 894 const E1000E_RingInfo *i; 895 } E1000E_RxRing; 896 897 static inline void 898 e1000e_rx_ring_init(E1000ECore *core, E1000E_RxRing *rxr, int idx) 899 { 900 static const E1000E_RingInfo i[E1000E_NUM_QUEUES] = { 901 { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 }, 902 { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 } 903 }; 904 905 assert(idx < ARRAY_SIZE(i)); 906 907 rxr->i = &i[idx]; 908 } 909 910 static void 911 e1000e_start_xmit(E1000ECore *core, const E1000E_TxRing *txr) 912 { 913 dma_addr_t base; 914 struct e1000_tx_desc desc; 915 bool ide = false; 916 const E1000E_RingInfo *txi = txr->i; 917 uint32_t cause = E1000_ICS_TXQE; 918 919 if (!(core->mac[TCTL] & E1000_TCTL_EN)) { 920 trace_e1000e_tx_disabled(); 921 return; 922 } 923 924 while (!e1000e_ring_empty(core, txi)) { 925 base = e1000e_ring_head_descr(core, txi); 926 927 pci_dma_read(core->owner, base, &desc, sizeof(desc)); 928 929 trace_e1000e_tx_descr((void *)(intptr_t)desc.buffer_addr, 930 desc.lower.data, desc.upper.data); 931 932 e1000e_process_tx_desc(core, txr->tx, &desc, txi->idx); 933 cause |= e1000e_txdesc_writeback(core, base, &desc, &ide, txi->idx); 934 935 e1000e_ring_advance(core, txi, 1); 936 } 937 938 if (!ide || !e1000e_intrmgr_delay_tx_causes(core, &cause)) { 939 e1000e_set_interrupt_cause(core, cause); 940 } 941 } 942 943 static bool 944 e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r, 945 size_t total_size) 946 { 947 uint32_t bufs = e1000e_ring_free_descr_num(core, r); 948 949 trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, 950 core->rx_desc_buf_size); 951 952 return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) * 953 core->rx_desc_buf_size; 954 } 955 956 static inline void 957 e1000e_start_recv(E1000ECore *core) 958 { 959 int i; 960 961 trace_e1000e_rx_start_recv(); 962 963 for (i = 0; i <= core->max_queue_num; i++) { 964 qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i)); 965 } 966 } 967 968 int 969 e1000e_can_receive(E1000ECore *core) 970 { 971 int i; 972 973 if (!e1000x_rx_ready(core->owner, core->mac)) { 974 return false; 975 } 976 977 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 978 E1000E_RxRing rxr; 979 980 e1000e_rx_ring_init(core, &rxr, i); 981 if (e1000e_ring_enabled(core, rxr.i) && 982 e1000e_has_rxbufs(core, rxr.i, 1)) { 983 trace_e1000e_rx_can_recv(); 984 return true; 985 } 986 } 987 988 trace_e1000e_rx_can_recv_rings_full(); 989 return false; 990 } 991 992 ssize_t 993 e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size) 994 { 995 const struct iovec iov = { 996 .iov_base = (uint8_t *)buf, 997 .iov_len = size 998 }; 999 1000 return e1000e_receive_iov(core, &iov, 1); 1001 } 1002 1003 static inline bool 1004 e1000e_rx_l3_cso_enabled(E1000ECore *core) 1005 { 1006 return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD); 1007 } 1008 1009 static inline bool 1010 e1000e_rx_l4_cso_enabled(E1000ECore *core) 1011 { 1012 return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD); 1013 } 1014 1015 static bool 1016 e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size) 1017 { 1018 uint32_t rctl = core->mac[RCTL]; 1019 1020 if (e1000x_is_vlan_packet(buf, core->vet) && 1021 e1000x_vlan_rx_filter_enabled(core->mac)) { 1022 uint16_t vid = lduw_be_p(buf + 14); 1023 uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) + 1024 ((vid >> 5) & 0x7f)); 1025 if ((vfta & (1 << (vid & 0x1f))) == 0) { 1026 trace_e1000e_rx_flt_vlan_mismatch(vid); 1027 return false; 1028 } else { 1029 trace_e1000e_rx_flt_vlan_match(vid); 1030 } 1031 } 1032 1033 switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { 1034 case ETH_PKT_UCAST: 1035 if (rctl & E1000_RCTL_UPE) { 1036 return true; /* promiscuous ucast */ 1037 } 1038 break; 1039 1040 case ETH_PKT_BCAST: 1041 if (rctl & E1000_RCTL_BAM) { 1042 return true; /* broadcast enabled */ 1043 } 1044 break; 1045 1046 case ETH_PKT_MCAST: 1047 if (rctl & E1000_RCTL_MPE) { 1048 return true; /* promiscuous mcast */ 1049 } 1050 break; 1051 1052 default: 1053 g_assert_not_reached(); 1054 } 1055 1056 return e1000x_rx_group_filter(core->mac, buf); 1057 } 1058 1059 static inline void 1060 e1000e_read_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr) 1061 { 1062 struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; 1063 *buff_addr = le64_to_cpu(d->buffer_addr); 1064 } 1065 1066 static inline void 1067 e1000e_read_ext_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr) 1068 { 1069 union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc; 1070 *buff_addr = le64_to_cpu(d->read.buffer_addr); 1071 } 1072 1073 static inline void 1074 e1000e_read_ps_rx_descr(E1000ECore *core, uint8_t *desc, 1075 hwaddr (*buff_addr)[MAX_PS_BUFFERS]) 1076 { 1077 int i; 1078 union e1000_rx_desc_packet_split *d = 1079 (union e1000_rx_desc_packet_split *) desc; 1080 1081 for (i = 0; i < MAX_PS_BUFFERS; i++) { 1082 (*buff_addr)[i] = le64_to_cpu(d->read.buffer_addr[i]); 1083 } 1084 1085 trace_e1000e_rx_desc_ps_read((*buff_addr)[0], (*buff_addr)[1], 1086 (*buff_addr)[2], (*buff_addr)[3]); 1087 } 1088 1089 static inline void 1090 e1000e_read_rx_descr(E1000ECore *core, uint8_t *desc, 1091 hwaddr (*buff_addr)[MAX_PS_BUFFERS]) 1092 { 1093 if (e1000e_rx_use_legacy_descriptor(core)) { 1094 e1000e_read_lgcy_rx_descr(core, desc, &(*buff_addr)[0]); 1095 (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0; 1096 } else { 1097 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1098 e1000e_read_ps_rx_descr(core, desc, buff_addr); 1099 } else { 1100 e1000e_read_ext_rx_descr(core, desc, &(*buff_addr)[0]); 1101 (*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0; 1102 } 1103 } 1104 } 1105 1106 static void 1107 e1000e_verify_csum_in_sw(E1000ECore *core, 1108 struct NetRxPkt *pkt, 1109 uint32_t *status_flags, 1110 bool istcp, bool isudp) 1111 { 1112 bool csum_valid; 1113 uint32_t csum_error; 1114 1115 if (e1000e_rx_l3_cso_enabled(core)) { 1116 if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) { 1117 trace_e1000e_rx_metadata_l3_csum_validation_failed(); 1118 } else { 1119 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE; 1120 *status_flags |= E1000_RXD_STAT_IPCS | csum_error; 1121 } 1122 } else { 1123 trace_e1000e_rx_metadata_l3_cso_disabled(); 1124 } 1125 1126 if (!e1000e_rx_l4_cso_enabled(core)) { 1127 trace_e1000e_rx_metadata_l4_cso_disabled(); 1128 return; 1129 } 1130 1131 if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) { 1132 trace_e1000e_rx_metadata_l4_csum_validation_failed(); 1133 return; 1134 } 1135 1136 csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE; 1137 1138 if (istcp) { 1139 *status_flags |= E1000_RXD_STAT_TCPCS | 1140 csum_error; 1141 } else if (isudp) { 1142 *status_flags |= E1000_RXD_STAT_TCPCS | 1143 E1000_RXD_STAT_UDPCS | 1144 csum_error; 1145 } 1146 } 1147 1148 static inline bool 1149 e1000e_is_tcp_ack(E1000ECore *core, struct NetRxPkt *rx_pkt) 1150 { 1151 if (!net_rx_pkt_is_tcp_ack(rx_pkt)) { 1152 return false; 1153 } 1154 1155 if (core->mac[RFCTL] & E1000_RFCTL_ACK_DATA_DIS) { 1156 return !net_rx_pkt_has_tcp_data(rx_pkt); 1157 } 1158 1159 return true; 1160 } 1161 1162 static void 1163 e1000e_build_rx_metadata(E1000ECore *core, 1164 struct NetRxPkt *pkt, 1165 bool is_eop, 1166 const E1000E_RSSInfo *rss_info, 1167 uint32_t *rss, uint32_t *mrq, 1168 uint32_t *status_flags, 1169 uint16_t *ip_id, 1170 uint16_t *vlan_tag) 1171 { 1172 struct virtio_net_hdr *vhdr; 1173 bool isip4, isip6, istcp, isudp; 1174 uint32_t pkt_type; 1175 1176 *status_flags = E1000_RXD_STAT_DD; 1177 1178 /* No additional metadata needed for non-EOP descriptors */ 1179 if (!is_eop) { 1180 goto func_exit; 1181 } 1182 1183 *status_flags |= E1000_RXD_STAT_EOP; 1184 1185 net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); 1186 trace_e1000e_rx_metadata_protocols(isip4, isip6, isudp, istcp); 1187 1188 /* VLAN state */ 1189 if (net_rx_pkt_is_vlan_stripped(pkt)) { 1190 *status_flags |= E1000_RXD_STAT_VP; 1191 *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt)); 1192 trace_e1000e_rx_metadata_vlan(*vlan_tag); 1193 } 1194 1195 /* Packet parsing results */ 1196 if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) { 1197 if (rss_info->enabled) { 1198 *rss = cpu_to_le32(rss_info->hash); 1199 *mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8)); 1200 trace_e1000e_rx_metadata_rss(*rss, *mrq); 1201 } 1202 } else if (isip4) { 1203 *status_flags |= E1000_RXD_STAT_IPIDV; 1204 *ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt)); 1205 trace_e1000e_rx_metadata_ip_id(*ip_id); 1206 } 1207 1208 if (istcp && e1000e_is_tcp_ack(core, pkt)) { 1209 *status_flags |= E1000_RXD_STAT_ACK; 1210 trace_e1000e_rx_metadata_ack(); 1211 } 1212 1213 if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) { 1214 trace_e1000e_rx_metadata_ipv6_filtering_disabled(); 1215 pkt_type = E1000_RXD_PKT_MAC; 1216 } else if (istcp || isudp) { 1217 pkt_type = isip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP; 1218 } else if (isip4 || isip6) { 1219 pkt_type = isip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6; 1220 } else { 1221 pkt_type = E1000_RXD_PKT_MAC; 1222 } 1223 1224 *status_flags |= E1000_RXD_PKT_TYPE(pkt_type); 1225 trace_e1000e_rx_metadata_pkt_type(pkt_type); 1226 1227 /* RX CSO information */ 1228 if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) { 1229 trace_e1000e_rx_metadata_ipv6_sum_disabled(); 1230 goto func_exit; 1231 } 1232 1233 if (!net_rx_pkt_has_virt_hdr(pkt)) { 1234 trace_e1000e_rx_metadata_no_virthdr(); 1235 e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp); 1236 goto func_exit; 1237 } 1238 1239 vhdr = net_rx_pkt_get_vhdr(pkt); 1240 1241 if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) && 1242 !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) { 1243 trace_e1000e_rx_metadata_virthdr_no_csum_info(); 1244 e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp); 1245 goto func_exit; 1246 } 1247 1248 if (e1000e_rx_l3_cso_enabled(core)) { 1249 *status_flags |= isip4 ? E1000_RXD_STAT_IPCS : 0; 1250 } else { 1251 trace_e1000e_rx_metadata_l3_cso_disabled(); 1252 } 1253 1254 if (e1000e_rx_l4_cso_enabled(core)) { 1255 if (istcp) { 1256 *status_flags |= E1000_RXD_STAT_TCPCS; 1257 } else if (isudp) { 1258 *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS; 1259 } 1260 } else { 1261 trace_e1000e_rx_metadata_l4_cso_disabled(); 1262 } 1263 1264 trace_e1000e_rx_metadata_status_flags(*status_flags); 1265 1266 func_exit: 1267 *status_flags = cpu_to_le32(*status_flags); 1268 } 1269 1270 static inline void 1271 e1000e_write_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, 1272 struct NetRxPkt *pkt, 1273 const E1000E_RSSInfo *rss_info, 1274 uint16_t length) 1275 { 1276 uint32_t status_flags, rss, mrq; 1277 uint16_t ip_id; 1278 1279 struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc; 1280 1281 memset(d, 0, sizeof(*d)); 1282 1283 assert(!rss_info->enabled); 1284 1285 d->length = cpu_to_le16(length); 1286 1287 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1288 rss_info, 1289 &rss, &mrq, 1290 &status_flags, &ip_id, 1291 &d->special); 1292 d->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24); 1293 d->status = (uint8_t) le32_to_cpu(status_flags); 1294 } 1295 1296 static inline void 1297 e1000e_write_ext_rx_descr(E1000ECore *core, uint8_t *desc, 1298 struct NetRxPkt *pkt, 1299 const E1000E_RSSInfo *rss_info, 1300 uint16_t length) 1301 { 1302 union e1000_rx_desc_extended *d = (union e1000_rx_desc_extended *) desc; 1303 1304 memset(d, 0, sizeof(*d)); 1305 1306 d->wb.upper.length = cpu_to_le16(length); 1307 1308 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1309 rss_info, 1310 &d->wb.lower.hi_dword.rss, 1311 &d->wb.lower.mrq, 1312 &d->wb.upper.status_error, 1313 &d->wb.lower.hi_dword.csum_ip.ip_id, 1314 &d->wb.upper.vlan); 1315 } 1316 1317 static inline void 1318 e1000e_write_ps_rx_descr(E1000ECore *core, uint8_t *desc, 1319 struct NetRxPkt *pkt, 1320 const E1000E_RSSInfo *rss_info, 1321 size_t ps_hdr_len, 1322 uint16_t(*written)[MAX_PS_BUFFERS]) 1323 { 1324 int i; 1325 union e1000_rx_desc_packet_split *d = 1326 (union e1000_rx_desc_packet_split *) desc; 1327 1328 memset(d, 0, sizeof(*d)); 1329 1330 d->wb.middle.length0 = cpu_to_le16((*written)[0]); 1331 1332 for (i = 0; i < PS_PAGE_BUFFERS; i++) { 1333 d->wb.upper.length[i] = cpu_to_le16((*written)[i + 1]); 1334 } 1335 1336 e1000e_build_rx_metadata(core, pkt, pkt != NULL, 1337 rss_info, 1338 &d->wb.lower.hi_dword.rss, 1339 &d->wb.lower.mrq, 1340 &d->wb.middle.status_error, 1341 &d->wb.lower.hi_dword.csum_ip.ip_id, 1342 &d->wb.middle.vlan); 1343 1344 d->wb.upper.header_status = 1345 cpu_to_le16(ps_hdr_len | (ps_hdr_len ? E1000_RXDPS_HDRSTAT_HDRSP : 0)); 1346 1347 trace_e1000e_rx_desc_ps_write((*written)[0], (*written)[1], 1348 (*written)[2], (*written)[3]); 1349 } 1350 1351 static inline void 1352 e1000e_write_rx_descr(E1000ECore *core, uint8_t *desc, 1353 struct NetRxPkt *pkt, const E1000E_RSSInfo *rss_info, 1354 size_t ps_hdr_len, uint16_t(*written)[MAX_PS_BUFFERS]) 1355 { 1356 if (e1000e_rx_use_legacy_descriptor(core)) { 1357 assert(ps_hdr_len == 0); 1358 e1000e_write_lgcy_rx_descr(core, desc, pkt, rss_info, (*written)[0]); 1359 } else { 1360 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1361 e1000e_write_ps_rx_descr(core, desc, pkt, rss_info, 1362 ps_hdr_len, written); 1363 } else { 1364 assert(ps_hdr_len == 0); 1365 e1000e_write_ext_rx_descr(core, desc, pkt, rss_info, 1366 (*written)[0]); 1367 } 1368 } 1369 } 1370 1371 typedef struct e1000e_ba_state_st { 1372 uint16_t written[MAX_PS_BUFFERS]; 1373 uint8_t cur_idx; 1374 } e1000e_ba_state; 1375 1376 static inline void 1377 e1000e_write_hdr_to_rx_buffers(E1000ECore *core, 1378 hwaddr (*ba)[MAX_PS_BUFFERS], 1379 e1000e_ba_state *bastate, 1380 const char *data, 1381 dma_addr_t data_len) 1382 { 1383 assert(data_len <= core->rxbuf_sizes[0] - bastate->written[0]); 1384 1385 pci_dma_write(core->owner, (*ba)[0] + bastate->written[0], data, data_len); 1386 bastate->written[0] += data_len; 1387 1388 bastate->cur_idx = 1; 1389 } 1390 1391 static void 1392 e1000e_write_to_rx_buffers(E1000ECore *core, 1393 hwaddr (*ba)[MAX_PS_BUFFERS], 1394 e1000e_ba_state *bastate, 1395 const char *data, 1396 dma_addr_t data_len) 1397 { 1398 while (data_len > 0) { 1399 uint32_t cur_buf_len = core->rxbuf_sizes[bastate->cur_idx]; 1400 uint32_t cur_buf_bytes_left = cur_buf_len - 1401 bastate->written[bastate->cur_idx]; 1402 uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left); 1403 1404 trace_e1000e_rx_desc_buff_write(bastate->cur_idx, 1405 (*ba)[bastate->cur_idx], 1406 bastate->written[bastate->cur_idx], 1407 data, 1408 bytes_to_write); 1409 1410 pci_dma_write(core->owner, 1411 (*ba)[bastate->cur_idx] + bastate->written[bastate->cur_idx], 1412 data, bytes_to_write); 1413 1414 bastate->written[bastate->cur_idx] += bytes_to_write; 1415 data += bytes_to_write; 1416 data_len -= bytes_to_write; 1417 1418 if (bastate->written[bastate->cur_idx] == cur_buf_len) { 1419 bastate->cur_idx++; 1420 } 1421 1422 assert(bastate->cur_idx < MAX_PS_BUFFERS); 1423 } 1424 } 1425 1426 static void 1427 e1000e_update_rx_stats(E1000ECore *core, 1428 size_t data_size, 1429 size_t data_fcs_size) 1430 { 1431 e1000x_update_rx_total_stats(core->mac, data_size, data_fcs_size); 1432 1433 switch (net_rx_pkt_get_packet_type(core->rx_pkt)) { 1434 case ETH_PKT_BCAST: 1435 e1000x_inc_reg_if_not_full(core->mac, BPRC); 1436 break; 1437 1438 case ETH_PKT_MCAST: 1439 e1000x_inc_reg_if_not_full(core->mac, MPRC); 1440 break; 1441 1442 default: 1443 break; 1444 } 1445 } 1446 1447 static inline bool 1448 e1000e_rx_descr_threshold_hit(E1000ECore *core, const E1000E_RingInfo *rxi) 1449 { 1450 return e1000e_ring_free_descr_num(core, rxi) == 1451 e1000e_ring_len(core, rxi) >> core->rxbuf_min_shift; 1452 } 1453 1454 static bool 1455 e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len) 1456 { 1457 bool isip4, isip6, isudp, istcp; 1458 bool fragment; 1459 1460 if (!e1000e_rx_use_ps_descriptor(core)) { 1461 return false; 1462 } 1463 1464 net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp); 1465 1466 if (isip4) { 1467 fragment = net_rx_pkt_get_ip4_info(pkt)->fragment; 1468 } else if (isip6) { 1469 fragment = net_rx_pkt_get_ip6_info(pkt)->fragment; 1470 } else { 1471 return false; 1472 } 1473 1474 if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) { 1475 return false; 1476 } 1477 1478 if (!fragment && (isudp || istcp)) { 1479 *hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt); 1480 } else { 1481 *hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt); 1482 } 1483 1484 if ((*hdr_len > core->rxbuf_sizes[0]) || 1485 (*hdr_len > net_rx_pkt_get_total_len(pkt))) { 1486 return false; 1487 } 1488 1489 return true; 1490 } 1491 1492 static void 1493 e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt, 1494 const E1000E_RxRing *rxr, 1495 const E1000E_RSSInfo *rss_info) 1496 { 1497 PCIDevice *d = core->owner; 1498 dma_addr_t base; 1499 uint8_t desc[E1000_MAX_RX_DESC_LEN]; 1500 size_t desc_size; 1501 size_t desc_offset = 0; 1502 size_t iov_ofs = 0; 1503 1504 struct iovec *iov = net_rx_pkt_get_iovec(pkt); 1505 size_t size = net_rx_pkt_get_total_len(pkt); 1506 size_t total_size = size + e1000x_fcs_len(core->mac); 1507 const E1000E_RingInfo *rxi; 1508 size_t ps_hdr_len = 0; 1509 bool do_ps = e1000e_do_ps(core, pkt, &ps_hdr_len); 1510 1511 rxi = rxr->i; 1512 1513 do { 1514 hwaddr ba[MAX_PS_BUFFERS]; 1515 e1000e_ba_state bastate = { { 0 } }; 1516 bool is_last = false; 1517 bool is_first = true; 1518 1519 desc_size = total_size - desc_offset; 1520 1521 if (desc_size > core->rx_desc_buf_size) { 1522 desc_size = core->rx_desc_buf_size; 1523 } 1524 1525 base = e1000e_ring_head_descr(core, rxi); 1526 1527 pci_dma_read(d, base, &desc, core->rx_desc_len); 1528 1529 trace_e1000e_rx_descr(rxi->idx, base, core->rx_desc_len); 1530 1531 e1000e_read_rx_descr(core, desc, &ba); 1532 1533 if (ba[0]) { 1534 if (desc_offset < size) { 1535 static const uint32_t fcs_pad; 1536 size_t iov_copy; 1537 size_t copy_size = size - desc_offset; 1538 if (copy_size > core->rx_desc_buf_size) { 1539 copy_size = core->rx_desc_buf_size; 1540 } 1541 1542 /* For PS mode copy the packet header first */ 1543 if (do_ps) { 1544 if (is_first) { 1545 size_t ps_hdr_copied = 0; 1546 do { 1547 iov_copy = MIN(ps_hdr_len - ps_hdr_copied, 1548 iov->iov_len - iov_ofs); 1549 1550 e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, 1551 iov->iov_base, iov_copy); 1552 1553 copy_size -= iov_copy; 1554 ps_hdr_copied += iov_copy; 1555 1556 iov_ofs += iov_copy; 1557 if (iov_ofs == iov->iov_len) { 1558 iov++; 1559 iov_ofs = 0; 1560 } 1561 } while (ps_hdr_copied < ps_hdr_len); 1562 1563 is_first = false; 1564 } else { 1565 /* Leave buffer 0 of each descriptor except first */ 1566 /* empty as per spec 7.1.5.1 */ 1567 e1000e_write_hdr_to_rx_buffers(core, &ba, &bastate, 1568 NULL, 0); 1569 } 1570 } 1571 1572 /* Copy packet payload */ 1573 while (copy_size) { 1574 iov_copy = MIN(copy_size, iov->iov_len - iov_ofs); 1575 1576 e1000e_write_to_rx_buffers(core, &ba, &bastate, 1577 iov->iov_base + iov_ofs, iov_copy); 1578 1579 copy_size -= iov_copy; 1580 iov_ofs += iov_copy; 1581 if (iov_ofs == iov->iov_len) { 1582 iov++; 1583 iov_ofs = 0; 1584 } 1585 } 1586 1587 if (desc_offset + desc_size >= total_size) { 1588 /* Simulate FCS checksum presence in the last descriptor */ 1589 e1000e_write_to_rx_buffers(core, &ba, &bastate, 1590 (const char *) &fcs_pad, e1000x_fcs_len(core->mac)); 1591 } 1592 } 1593 desc_offset += desc_size; 1594 if (desc_offset >= total_size) { 1595 is_last = true; 1596 } 1597 } else { /* as per intel docs; skip descriptors with null buf addr */ 1598 trace_e1000e_rx_null_descriptor(); 1599 } 1600 1601 e1000e_write_rx_descr(core, desc, is_last ? core->rx_pkt : NULL, 1602 rss_info, do_ps ? ps_hdr_len : 0, &bastate.written); 1603 pci_dma_write(d, base, &desc, core->rx_desc_len); 1604 1605 e1000e_ring_advance(core, rxi, 1606 core->rx_desc_len / E1000_MIN_RX_DESC_LEN); 1607 1608 } while (desc_offset < total_size); 1609 1610 e1000e_update_rx_stats(core, size, total_size); 1611 } 1612 1613 static inline void 1614 e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt) 1615 { 1616 if (net_rx_pkt_has_virt_hdr(pkt)) { 1617 struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt); 1618 1619 if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 1620 net_rx_pkt_fix_l4_csum(pkt); 1621 } 1622 } 1623 } 1624 1625 ssize_t 1626 e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) 1627 { 1628 static const int maximum_ethernet_hdr_len = (14 + 4); 1629 /* Min. octets in an ethernet frame sans FCS */ 1630 static const int min_buf_size = 60; 1631 1632 uint32_t n = 0; 1633 uint8_t min_buf[min_buf_size]; 1634 struct iovec min_iov; 1635 uint8_t *filter_buf; 1636 size_t size, orig_size; 1637 size_t iov_ofs = 0; 1638 E1000E_RxRing rxr; 1639 E1000E_RSSInfo rss_info; 1640 size_t total_size; 1641 ssize_t retval; 1642 bool rdmts_hit; 1643 1644 trace_e1000e_rx_receive_iov(iovcnt); 1645 1646 if (!e1000x_hw_rx_enabled(core->mac)) { 1647 return -1; 1648 } 1649 1650 /* Pull virtio header in */ 1651 if (core->has_vnet) { 1652 net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt); 1653 iov_ofs = sizeof(struct virtio_net_hdr); 1654 } 1655 1656 filter_buf = iov->iov_base + iov_ofs; 1657 orig_size = iov_size(iov, iovcnt); 1658 size = orig_size - iov_ofs; 1659 1660 /* Pad to minimum Ethernet frame length */ 1661 if (size < sizeof(min_buf)) { 1662 iov_to_buf(iov, iovcnt, iov_ofs, min_buf, size); 1663 memset(&min_buf[size], 0, sizeof(min_buf) - size); 1664 e1000x_inc_reg_if_not_full(core->mac, RUC); 1665 min_iov.iov_base = filter_buf = min_buf; 1666 min_iov.iov_len = size = sizeof(min_buf); 1667 iovcnt = 1; 1668 iov = &min_iov; 1669 iov_ofs = 0; 1670 } else if (iov->iov_len < maximum_ethernet_hdr_len) { 1671 /* This is very unlikely, but may happen. */ 1672 iov_to_buf(iov, iovcnt, iov_ofs, min_buf, maximum_ethernet_hdr_len); 1673 filter_buf = min_buf; 1674 } 1675 1676 /* Discard oversized packets if !LPE and !SBP. */ 1677 if (e1000x_is_oversized(core->mac, size)) { 1678 return orig_size; 1679 } 1680 1681 net_rx_pkt_set_packet_type(core->rx_pkt, 1682 get_eth_packet_type(PKT_GET_ETH_HDR(filter_buf))); 1683 1684 if (!e1000e_receive_filter(core, filter_buf, size)) { 1685 trace_e1000e_rx_flt_dropped(); 1686 return orig_size; 1687 } 1688 1689 net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs, 1690 e1000x_vlan_enabled(core->mac), core->vet); 1691 1692 e1000e_rss_parse_packet(core, core->rx_pkt, &rss_info); 1693 e1000e_rx_ring_init(core, &rxr, rss_info.queue); 1694 1695 trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx); 1696 1697 total_size = net_rx_pkt_get_total_len(core->rx_pkt) + 1698 e1000x_fcs_len(core->mac); 1699 1700 if (e1000e_has_rxbufs(core, rxr.i, total_size)) { 1701 e1000e_rx_fix_l4_csum(core, core->rx_pkt); 1702 1703 e1000e_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info); 1704 1705 retval = orig_size; 1706 1707 /* Perform small receive detection (RSRPD) */ 1708 if (total_size < core->mac[RSRPD]) { 1709 n |= E1000_ICS_SRPD; 1710 } 1711 1712 /* Perform ACK receive detection */ 1713 if (e1000e_is_tcp_ack(core, core->rx_pkt)) { 1714 n |= E1000_ICS_ACK; 1715 } 1716 1717 /* Check if receive descriptor minimum threshold hit */ 1718 rdmts_hit = e1000e_rx_descr_threshold_hit(core, rxr.i); 1719 n |= e1000e_rx_wb_interrupt_cause(core, rxr.i->idx, rdmts_hit); 1720 1721 trace_e1000e_rx_written_to_guest(n); 1722 } else { 1723 n |= E1000_ICS_RXO; 1724 retval = 0; 1725 1726 trace_e1000e_rx_not_written_to_guest(n); 1727 } 1728 1729 if (!e1000e_intrmgr_delay_rx_causes(core, &n)) { 1730 trace_e1000e_rx_interrupt_set(n); 1731 e1000e_set_interrupt_cause(core, n); 1732 } else { 1733 trace_e1000e_rx_interrupt_delayed(n); 1734 } 1735 1736 return retval; 1737 } 1738 1739 static inline bool 1740 e1000e_have_autoneg(E1000ECore *core) 1741 { 1742 return core->phy[0][PHY_CTRL] & MII_CR_AUTO_NEG_EN; 1743 } 1744 1745 static void e1000e_update_flowctl_status(E1000ECore *core) 1746 { 1747 if (e1000e_have_autoneg(core) && 1748 core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE) { 1749 trace_e1000e_link_autoneg_flowctl(true); 1750 core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE; 1751 } else { 1752 trace_e1000e_link_autoneg_flowctl(false); 1753 } 1754 } 1755 1756 static inline void 1757 e1000e_link_down(E1000ECore *core) 1758 { 1759 e1000x_update_regs_on_link_down(core->mac, core->phy[0]); 1760 e1000e_update_flowctl_status(core); 1761 } 1762 1763 static inline void 1764 e1000e_set_phy_ctrl(E1000ECore *core, int index, uint16_t val) 1765 { 1766 /* bits 0-5 reserved; MII_CR_[RESTART_AUTO_NEG,RESET] are self clearing */ 1767 core->phy[0][PHY_CTRL] = val & ~(0x3f | 1768 MII_CR_RESET | 1769 MII_CR_RESTART_AUTO_NEG); 1770 1771 if ((val & MII_CR_RESTART_AUTO_NEG) && 1772 e1000e_have_autoneg(core)) { 1773 e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); 1774 } 1775 } 1776 1777 static void 1778 e1000e_set_phy_oem_bits(E1000ECore *core, int index, uint16_t val) 1779 { 1780 core->phy[0][PHY_OEM_BITS] = val & ~BIT(10); 1781 1782 if (val & BIT(10)) { 1783 e1000x_restart_autoneg(core->mac, core->phy[0], core->autoneg_timer); 1784 } 1785 } 1786 1787 static void 1788 e1000e_set_phy_page(E1000ECore *core, int index, uint16_t val) 1789 { 1790 core->phy[0][PHY_PAGE] = val & PHY_PAGE_RW_MASK; 1791 } 1792 1793 void 1794 e1000e_core_set_link_status(E1000ECore *core) 1795 { 1796 NetClientState *nc = qemu_get_queue(core->owner_nic); 1797 uint32_t old_status = core->mac[STATUS]; 1798 1799 trace_e1000e_link_status_changed(nc->link_down ? false : true); 1800 1801 if (nc->link_down) { 1802 e1000x_update_regs_on_link_down(core->mac, core->phy[0]); 1803 } else { 1804 if (e1000e_have_autoneg(core) && 1805 !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { 1806 e1000x_restart_autoneg(core->mac, core->phy[0], 1807 core->autoneg_timer); 1808 } else { 1809 e1000x_update_regs_on_link_up(core->mac, core->phy[0]); 1810 } 1811 } 1812 1813 if (core->mac[STATUS] != old_status) { 1814 e1000e_set_interrupt_cause(core, E1000_ICR_LSC); 1815 } 1816 } 1817 1818 static void 1819 e1000e_set_ctrl(E1000ECore *core, int index, uint32_t val) 1820 { 1821 trace_e1000e_core_ctrl_write(index, val); 1822 1823 /* RST is self clearing */ 1824 core->mac[CTRL] = val & ~E1000_CTRL_RST; 1825 core->mac[CTRL_DUP] = core->mac[CTRL]; 1826 1827 trace_e1000e_link_set_params( 1828 !!(val & E1000_CTRL_ASDE), 1829 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, 1830 !!(val & E1000_CTRL_FRCSPD), 1831 !!(val & E1000_CTRL_FRCDPX), 1832 !!(val & E1000_CTRL_RFCE), 1833 !!(val & E1000_CTRL_TFCE)); 1834 1835 if (val & E1000_CTRL_RST) { 1836 trace_e1000e_core_ctrl_sw_reset(); 1837 e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); 1838 } 1839 1840 if (val & E1000_CTRL_PHY_RST) { 1841 trace_e1000e_core_ctrl_phy_reset(); 1842 core->mac[STATUS] |= E1000_STATUS_PHYRA; 1843 } 1844 } 1845 1846 static void 1847 e1000e_set_rfctl(E1000ECore *core, int index, uint32_t val) 1848 { 1849 trace_e1000e_rx_set_rfctl(val); 1850 1851 if (!(val & E1000_RFCTL_ISCSI_DIS)) { 1852 trace_e1000e_wrn_iscsi_filtering_not_supported(); 1853 } 1854 1855 if (!(val & E1000_RFCTL_NFSW_DIS)) { 1856 trace_e1000e_wrn_nfsw_filtering_not_supported(); 1857 } 1858 1859 if (!(val & E1000_RFCTL_NFSR_DIS)) { 1860 trace_e1000e_wrn_nfsr_filtering_not_supported(); 1861 } 1862 1863 core->mac[RFCTL] = val; 1864 } 1865 1866 static void 1867 e1000e_calc_per_desc_buf_size(E1000ECore *core) 1868 { 1869 int i; 1870 core->rx_desc_buf_size = 0; 1871 1872 for (i = 0; i < ARRAY_SIZE(core->rxbuf_sizes); i++) { 1873 core->rx_desc_buf_size += core->rxbuf_sizes[i]; 1874 } 1875 } 1876 1877 static void 1878 e1000e_parse_rxbufsize(E1000ECore *core) 1879 { 1880 uint32_t rctl = core->mac[RCTL]; 1881 1882 memset(core->rxbuf_sizes, 0, sizeof(core->rxbuf_sizes)); 1883 1884 if (rctl & E1000_RCTL_DTYP_MASK) { 1885 uint32_t bsize; 1886 1887 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE0_MASK; 1888 core->rxbuf_sizes[0] = (bsize >> E1000_PSRCTL_BSIZE0_SHIFT) * 128; 1889 1890 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE1_MASK; 1891 core->rxbuf_sizes[1] = (bsize >> E1000_PSRCTL_BSIZE1_SHIFT) * 1024; 1892 1893 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE2_MASK; 1894 core->rxbuf_sizes[2] = (bsize >> E1000_PSRCTL_BSIZE2_SHIFT) * 1024; 1895 1896 bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE3_MASK; 1897 core->rxbuf_sizes[3] = (bsize >> E1000_PSRCTL_BSIZE3_SHIFT) * 1024; 1898 } else if (rctl & E1000_RCTL_FLXBUF_MASK) { 1899 int flxbuf = rctl & E1000_RCTL_FLXBUF_MASK; 1900 core->rxbuf_sizes[0] = (flxbuf >> E1000_RCTL_FLXBUF_SHIFT) * 1024; 1901 } else { 1902 core->rxbuf_sizes[0] = e1000x_rxbufsize(rctl); 1903 } 1904 1905 trace_e1000e_rx_desc_buff_sizes(core->rxbuf_sizes[0], core->rxbuf_sizes[1], 1906 core->rxbuf_sizes[2], core->rxbuf_sizes[3]); 1907 1908 e1000e_calc_per_desc_buf_size(core); 1909 } 1910 1911 static void 1912 e1000e_calc_rxdesclen(E1000ECore *core) 1913 { 1914 if (e1000e_rx_use_legacy_descriptor(core)) { 1915 core->rx_desc_len = sizeof(struct e1000_rx_desc); 1916 } else { 1917 if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) { 1918 core->rx_desc_len = sizeof(union e1000_rx_desc_packet_split); 1919 } else { 1920 core->rx_desc_len = sizeof(union e1000_rx_desc_extended); 1921 } 1922 } 1923 trace_e1000e_rx_desc_len(core->rx_desc_len); 1924 } 1925 1926 static void 1927 e1000e_set_rx_control(E1000ECore *core, int index, uint32_t val) 1928 { 1929 core->mac[RCTL] = val; 1930 trace_e1000e_rx_set_rctl(core->mac[RCTL]); 1931 1932 if (val & E1000_RCTL_EN) { 1933 e1000e_parse_rxbufsize(core); 1934 e1000e_calc_rxdesclen(core); 1935 core->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1 + 1936 E1000_RING_DESC_LEN_SHIFT; 1937 1938 e1000e_start_recv(core); 1939 } 1940 } 1941 1942 static 1943 void(*e1000e_phyreg_writeops[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE]) 1944 (E1000ECore *, int, uint16_t) = { 1945 [0] = { 1946 [PHY_CTRL] = e1000e_set_phy_ctrl, 1947 [PHY_PAGE] = e1000e_set_phy_page, 1948 [PHY_OEM_BITS] = e1000e_set_phy_oem_bits 1949 } 1950 }; 1951 1952 static inline void 1953 e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits) 1954 { 1955 trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits); 1956 core->mac[IMS] &= ~bits; 1957 } 1958 1959 static inline bool 1960 e1000e_postpone_interrupt(bool *interrupt_pending, 1961 E1000IntrDelayTimer *timer) 1962 { 1963 if (timer->running) { 1964 trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2); 1965 1966 *interrupt_pending = true; 1967 return true; 1968 } 1969 1970 if (timer->core->mac[timer->delay_reg] != 0) { 1971 e1000e_intrmgr_rearm_timer(timer); 1972 } 1973 1974 return false; 1975 } 1976 1977 static inline bool 1978 e1000e_itr_should_postpone(E1000ECore *core) 1979 { 1980 return e1000e_postpone_interrupt(&core->itr_intr_pending, &core->itr); 1981 } 1982 1983 static inline bool 1984 e1000e_eitr_should_postpone(E1000ECore *core, int idx) 1985 { 1986 return e1000e_postpone_interrupt(&core->eitr_intr_pending[idx], 1987 &core->eitr[idx]); 1988 } 1989 1990 static void 1991 e1000e_msix_notify_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) 1992 { 1993 uint32_t effective_eiac; 1994 1995 if (E1000_IVAR_ENTRY_VALID(int_cfg)) { 1996 uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); 1997 if (vec < E1000E_MSIX_VEC_NUM) { 1998 if (!e1000e_eitr_should_postpone(core, vec)) { 1999 trace_e1000e_irq_msix_notify_vec(vec); 2000 msix_notify(core->owner, vec); 2001 } 2002 } else { 2003 trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); 2004 } 2005 } else { 2006 trace_e1000e_wrn_msix_invalid(cause, int_cfg); 2007 } 2008 2009 if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_EIAME) { 2010 trace_e1000e_irq_ims_clear_eiame(core->mac[IAM], cause); 2011 e1000e_clear_ims_bits(core, core->mac[IAM] & cause); 2012 } 2013 2014 trace_e1000e_irq_icr_clear_eiac(core->mac[ICR], core->mac[EIAC]); 2015 2016 if (core->mac[EIAC] & E1000_ICR_OTHER) { 2017 effective_eiac = (core->mac[EIAC] & E1000_EIAC_MASK) | 2018 E1000_ICR_OTHER_CAUSES; 2019 } else { 2020 effective_eiac = core->mac[EIAC] & E1000_EIAC_MASK; 2021 } 2022 core->mac[ICR] &= ~effective_eiac; 2023 } 2024 2025 static void 2026 e1000e_msix_notify(E1000ECore *core, uint32_t causes) 2027 { 2028 if (causes & E1000_ICR_RXQ0) { 2029 e1000e_msix_notify_one(core, E1000_ICR_RXQ0, 2030 E1000_IVAR_RXQ0(core->mac[IVAR])); 2031 } 2032 2033 if (causes & E1000_ICR_RXQ1) { 2034 e1000e_msix_notify_one(core, E1000_ICR_RXQ1, 2035 E1000_IVAR_RXQ1(core->mac[IVAR])); 2036 } 2037 2038 if (causes & E1000_ICR_TXQ0) { 2039 e1000e_msix_notify_one(core, E1000_ICR_TXQ0, 2040 E1000_IVAR_TXQ0(core->mac[IVAR])); 2041 } 2042 2043 if (causes & E1000_ICR_TXQ1) { 2044 e1000e_msix_notify_one(core, E1000_ICR_TXQ1, 2045 E1000_IVAR_TXQ1(core->mac[IVAR])); 2046 } 2047 2048 if (causes & E1000_ICR_OTHER) { 2049 e1000e_msix_notify_one(core, E1000_ICR_OTHER, 2050 E1000_IVAR_OTHER(core->mac[IVAR])); 2051 } 2052 } 2053 2054 static void 2055 e1000e_msix_clear_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg) 2056 { 2057 if (E1000_IVAR_ENTRY_VALID(int_cfg)) { 2058 uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg); 2059 if (vec < E1000E_MSIX_VEC_NUM) { 2060 trace_e1000e_irq_msix_pending_clearing(cause, int_cfg, vec); 2061 msix_clr_pending(core->owner, vec); 2062 } else { 2063 trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg); 2064 } 2065 } else { 2066 trace_e1000e_wrn_msix_invalid(cause, int_cfg); 2067 } 2068 } 2069 2070 static void 2071 e1000e_msix_clear(E1000ECore *core, uint32_t causes) 2072 { 2073 if (causes & E1000_ICR_RXQ0) { 2074 e1000e_msix_clear_one(core, E1000_ICR_RXQ0, 2075 E1000_IVAR_RXQ0(core->mac[IVAR])); 2076 } 2077 2078 if (causes & E1000_ICR_RXQ1) { 2079 e1000e_msix_clear_one(core, E1000_ICR_RXQ1, 2080 E1000_IVAR_RXQ1(core->mac[IVAR])); 2081 } 2082 2083 if (causes & E1000_ICR_TXQ0) { 2084 e1000e_msix_clear_one(core, E1000_ICR_TXQ0, 2085 E1000_IVAR_TXQ0(core->mac[IVAR])); 2086 } 2087 2088 if (causes & E1000_ICR_TXQ1) { 2089 e1000e_msix_clear_one(core, E1000_ICR_TXQ1, 2090 E1000_IVAR_TXQ1(core->mac[IVAR])); 2091 } 2092 2093 if (causes & E1000_ICR_OTHER) { 2094 e1000e_msix_clear_one(core, E1000_ICR_OTHER, 2095 E1000_IVAR_OTHER(core->mac[IVAR])); 2096 } 2097 } 2098 2099 static inline void 2100 e1000e_fix_icr_asserted(E1000ECore *core) 2101 { 2102 core->mac[ICR] &= ~E1000_ICR_ASSERTED; 2103 if (core->mac[ICR]) { 2104 core->mac[ICR] |= E1000_ICR_ASSERTED; 2105 } 2106 2107 trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]); 2108 } 2109 2110 static void 2111 e1000e_send_msi(E1000ECore *core, bool msix) 2112 { 2113 uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED; 2114 2115 if (msix) { 2116 e1000e_msix_notify(core, causes); 2117 } else { 2118 if (!e1000e_itr_should_postpone(core)) { 2119 trace_e1000e_irq_msi_notify(causes); 2120 msi_notify(core->owner, 0); 2121 } 2122 } 2123 } 2124 2125 static void 2126 e1000e_update_interrupt_state(E1000ECore *core) 2127 { 2128 bool interrupts_pending; 2129 bool is_msix = msix_enabled(core->owner); 2130 2131 /* Set ICR[OTHER] for MSI-X */ 2132 if (is_msix) { 2133 if (core->mac[ICR] & core->mac[IMS] & E1000_ICR_OTHER_CAUSES) { 2134 core->mac[ICR] |= E1000_ICR_OTHER; 2135 trace_e1000e_irq_add_msi_other(core->mac[ICR]); 2136 } 2137 } 2138 2139 e1000e_fix_icr_asserted(core); 2140 2141 /* 2142 * Make sure ICR and ICS registers have the same value. 2143 * The spec says that the ICS register is write-only. However in practice, 2144 * on real hardware ICS is readable, and for reads it has the same value as 2145 * ICR (except that ICS does not have the clear on read behaviour of ICR). 2146 * 2147 * The VxWorks PRO/1000 driver uses this behaviour. 2148 */ 2149 core->mac[ICS] = core->mac[ICR]; 2150 2151 interrupts_pending = (core->mac[IMS] & core->mac[ICR]) ? true : false; 2152 2153 trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS], 2154 core->mac[ICR], core->mac[IMS]); 2155 2156 if (is_msix || msi_enabled(core->owner)) { 2157 if (interrupts_pending) { 2158 e1000e_send_msi(core, is_msix); 2159 } 2160 } else { 2161 if (interrupts_pending) { 2162 if (!e1000e_itr_should_postpone(core)) { 2163 e1000e_raise_legacy_irq(core); 2164 } 2165 } else { 2166 e1000e_lower_legacy_irq(core); 2167 } 2168 } 2169 } 2170 2171 static inline void 2172 e1000e_set_interrupt_cause(E1000ECore *core, uint32_t val) 2173 { 2174 trace_e1000e_irq_set_cause_entry(val, core->mac[ICR]); 2175 2176 val |= e1000e_intmgr_collect_delayed_causes(core); 2177 core->mac[ICR] |= val; 2178 2179 trace_e1000e_irq_set_cause_exit(val, core->mac[ICR]); 2180 2181 e1000e_update_interrupt_state(core); 2182 } 2183 2184 static inline void 2185 e1000e_autoneg_timer(void *opaque) 2186 { 2187 E1000ECore *core = opaque; 2188 if (!qemu_get_queue(core->owner_nic)->link_down) { 2189 e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]); 2190 e1000e_update_flowctl_status(core); 2191 /* signal link status change to the guest */ 2192 e1000e_set_interrupt_cause(core, E1000_ICR_LSC); 2193 } 2194 } 2195 2196 static inline uint16_t 2197 e1000e_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr) 2198 { 2199 uint16_t index = (addr & 0x1ffff) >> 2; 2200 return index + (mac_reg_access[index] & 0xfffe); 2201 } 2202 2203 static const char e1000e_phy_regcap[E1000E_PHY_PAGES][0x20] = { 2204 [0] = { 2205 [PHY_CTRL] = PHY_ANYPAGE | PHY_RW, 2206 [PHY_STATUS] = PHY_ANYPAGE | PHY_R, 2207 [PHY_ID1] = PHY_ANYPAGE | PHY_R, 2208 [PHY_ID2] = PHY_ANYPAGE | PHY_R, 2209 [PHY_AUTONEG_ADV] = PHY_ANYPAGE | PHY_RW, 2210 [PHY_LP_ABILITY] = PHY_ANYPAGE | PHY_R, 2211 [PHY_AUTONEG_EXP] = PHY_ANYPAGE | PHY_R, 2212 [PHY_NEXT_PAGE_TX] = PHY_ANYPAGE | PHY_RW, 2213 [PHY_LP_NEXT_PAGE] = PHY_ANYPAGE | PHY_R, 2214 [PHY_1000T_CTRL] = PHY_ANYPAGE | PHY_RW, 2215 [PHY_1000T_STATUS] = PHY_ANYPAGE | PHY_R, 2216 [PHY_EXT_STATUS] = PHY_ANYPAGE | PHY_R, 2217 [PHY_PAGE] = PHY_ANYPAGE | PHY_RW, 2218 2219 [PHY_COPPER_CTRL1] = PHY_RW, 2220 [PHY_COPPER_STAT1] = PHY_R, 2221 [PHY_COPPER_CTRL3] = PHY_RW, 2222 [PHY_RX_ERR_CNTR] = PHY_R, 2223 [PHY_OEM_BITS] = PHY_RW, 2224 [PHY_BIAS_1] = PHY_RW, 2225 [PHY_BIAS_2] = PHY_RW, 2226 [PHY_COPPER_INT_ENABLE] = PHY_RW, 2227 [PHY_COPPER_STAT2] = PHY_R, 2228 [PHY_COPPER_CTRL2] = PHY_RW 2229 }, 2230 [2] = { 2231 [PHY_MAC_CTRL1] = PHY_RW, 2232 [PHY_MAC_INT_ENABLE] = PHY_RW, 2233 [PHY_MAC_STAT] = PHY_R, 2234 [PHY_MAC_CTRL2] = PHY_RW 2235 }, 2236 [3] = { 2237 [PHY_LED_03_FUNC_CTRL1] = PHY_RW, 2238 [PHY_LED_03_POL_CTRL] = PHY_RW, 2239 [PHY_LED_TIMER_CTRL] = PHY_RW, 2240 [PHY_LED_45_CTRL] = PHY_RW 2241 }, 2242 [5] = { 2243 [PHY_1000T_SKEW] = PHY_R, 2244 [PHY_1000T_SWAP] = PHY_R 2245 }, 2246 [6] = { 2247 [PHY_CRC_COUNTERS] = PHY_R 2248 } 2249 }; 2250 2251 static bool 2252 e1000e_phy_reg_check_cap(E1000ECore *core, uint32_t addr, 2253 char cap, uint8_t *page) 2254 { 2255 *page = 2256 (e1000e_phy_regcap[0][addr] & PHY_ANYPAGE) ? 0 2257 : core->phy[0][PHY_PAGE]; 2258 2259 if (*page >= E1000E_PHY_PAGES) { 2260 return false; 2261 } 2262 2263 return e1000e_phy_regcap[*page][addr] & cap; 2264 } 2265 2266 static void 2267 e1000e_phy_reg_write(E1000ECore *core, uint8_t page, 2268 uint32_t addr, uint16_t data) 2269 { 2270 assert(page < E1000E_PHY_PAGES); 2271 assert(addr < E1000E_PHY_PAGE_SIZE); 2272 2273 if (e1000e_phyreg_writeops[page][addr]) { 2274 e1000e_phyreg_writeops[page][addr](core, addr, data); 2275 } else { 2276 core->phy[page][addr] = data; 2277 } 2278 } 2279 2280 static void 2281 e1000e_set_mdic(E1000ECore *core, int index, uint32_t val) 2282 { 2283 uint32_t data = val & E1000_MDIC_DATA_MASK; 2284 uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); 2285 uint8_t page; 2286 2287 if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */ 2288 val = core->mac[MDIC] | E1000_MDIC_ERROR; 2289 } else if (val & E1000_MDIC_OP_READ) { 2290 if (!e1000e_phy_reg_check_cap(core, addr, PHY_R, &page)) { 2291 trace_e1000e_core_mdic_read_unhandled(page, addr); 2292 val |= E1000_MDIC_ERROR; 2293 } else { 2294 val = (val ^ data) | core->phy[page][addr]; 2295 trace_e1000e_core_mdic_read(page, addr, val); 2296 } 2297 } else if (val & E1000_MDIC_OP_WRITE) { 2298 if (!e1000e_phy_reg_check_cap(core, addr, PHY_W, &page)) { 2299 trace_e1000e_core_mdic_write_unhandled(page, addr); 2300 val |= E1000_MDIC_ERROR; 2301 } else { 2302 trace_e1000e_core_mdic_write(page, addr, data); 2303 e1000e_phy_reg_write(core, page, addr, data); 2304 } 2305 } 2306 core->mac[MDIC] = val | E1000_MDIC_READY; 2307 2308 if (val & E1000_MDIC_INT_EN) { 2309 e1000e_set_interrupt_cause(core, E1000_ICR_MDAC); 2310 } 2311 } 2312 2313 static void 2314 e1000e_set_rdt(E1000ECore *core, int index, uint32_t val) 2315 { 2316 core->mac[index] = val & 0xffff; 2317 trace_e1000e_rx_set_rdt(e1000e_mq_queue_idx(RDT0, index), val); 2318 e1000e_start_recv(core); 2319 } 2320 2321 static void 2322 e1000e_set_status(E1000ECore *core, int index, uint32_t val) 2323 { 2324 if ((val & E1000_STATUS_PHYRA) == 0) { 2325 core->mac[index] &= ~E1000_STATUS_PHYRA; 2326 } 2327 } 2328 2329 static void 2330 e1000e_set_ctrlext(E1000ECore *core, int index, uint32_t val) 2331 { 2332 trace_e1000e_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK), 2333 !!(val & E1000_CTRL_EXT_SPD_BYPS)); 2334 2335 /* Zero self-clearing bits */ 2336 val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST); 2337 core->mac[CTRL_EXT] = val; 2338 } 2339 2340 static void 2341 e1000e_set_pbaclr(E1000ECore *core, int index, uint32_t val) 2342 { 2343 int i; 2344 2345 core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK; 2346 2347 if (msix_enabled(core->owner)) { 2348 return; 2349 } 2350 2351 for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) { 2352 if (core->mac[PBACLR] & BIT(i)) { 2353 msix_clr_pending(core->owner, i); 2354 } 2355 } 2356 } 2357 2358 static void 2359 e1000e_set_fcrth(E1000ECore *core, int index, uint32_t val) 2360 { 2361 core->mac[FCRTH] = val & 0xFFF8; 2362 } 2363 2364 static void 2365 e1000e_set_fcrtl(E1000ECore *core, int index, uint32_t val) 2366 { 2367 core->mac[FCRTL] = val & 0x8000FFF8; 2368 } 2369 2370 static inline void 2371 e1000e_set_16bit(E1000ECore *core, int index, uint32_t val) 2372 { 2373 core->mac[index] = val & 0xffff; 2374 } 2375 2376 static void 2377 e1000e_set_12bit(E1000ECore *core, int index, uint32_t val) 2378 { 2379 core->mac[index] = val & 0xfff; 2380 } 2381 2382 static void 2383 e1000e_set_vet(E1000ECore *core, int index, uint32_t val) 2384 { 2385 core->mac[VET] = val & 0xffff; 2386 core->vet = le16_to_cpu(core->mac[VET]); 2387 trace_e1000e_vlan_vet(core->vet); 2388 } 2389 2390 static void 2391 e1000e_set_dlen(E1000ECore *core, int index, uint32_t val) 2392 { 2393 core->mac[index] = val & E1000_XDLEN_MASK; 2394 } 2395 2396 static void 2397 e1000e_set_dbal(E1000ECore *core, int index, uint32_t val) 2398 { 2399 core->mac[index] = val & E1000_XDBAL_MASK; 2400 } 2401 2402 static void 2403 e1000e_set_tctl(E1000ECore *core, int index, uint32_t val) 2404 { 2405 E1000E_TxRing txr; 2406 core->mac[index] = val; 2407 2408 if (core->mac[TARC0] & E1000_TARC_ENABLE) { 2409 e1000e_tx_ring_init(core, &txr, 0); 2410 e1000e_start_xmit(core, &txr); 2411 } 2412 2413 if (core->mac[TARC1] & E1000_TARC_ENABLE) { 2414 e1000e_tx_ring_init(core, &txr, 1); 2415 e1000e_start_xmit(core, &txr); 2416 } 2417 } 2418 2419 static void 2420 e1000e_set_tdt(E1000ECore *core, int index, uint32_t val) 2421 { 2422 E1000E_TxRing txr; 2423 int qidx = e1000e_mq_queue_idx(TDT, index); 2424 uint32_t tarc_reg = (qidx == 0) ? TARC0 : TARC1; 2425 2426 core->mac[index] = val & 0xffff; 2427 2428 if (core->mac[tarc_reg] & E1000_TARC_ENABLE) { 2429 e1000e_tx_ring_init(core, &txr, qidx); 2430 e1000e_start_xmit(core, &txr); 2431 } 2432 } 2433 2434 static void 2435 e1000e_set_ics(E1000ECore *core, int index, uint32_t val) 2436 { 2437 trace_e1000e_irq_write_ics(val); 2438 e1000e_set_interrupt_cause(core, val); 2439 } 2440 2441 static void 2442 e1000e_set_icr(E1000ECore *core, int index, uint32_t val) 2443 { 2444 if ((core->mac[ICR] & E1000_ICR_ASSERTED) && 2445 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2446 trace_e1000e_irq_icr_process_iame(); 2447 e1000e_clear_ims_bits(core, core->mac[IAM]); 2448 } 2449 2450 trace_e1000e_irq_icr_write(val, core->mac[ICR], core->mac[ICR] & ~val); 2451 core->mac[ICR] &= ~val; 2452 e1000e_update_interrupt_state(core); 2453 } 2454 2455 static void 2456 e1000e_set_imc(E1000ECore *core, int index, uint32_t val) 2457 { 2458 trace_e1000e_irq_ims_clear_set_imc(val); 2459 e1000e_clear_ims_bits(core, val); 2460 e1000e_update_interrupt_state(core); 2461 } 2462 2463 static void 2464 e1000e_set_ims(E1000ECore *core, int index, uint32_t val) 2465 { 2466 static const uint32_t ims_ext_mask = 2467 E1000_IMS_RXQ0 | E1000_IMS_RXQ1 | 2468 E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | 2469 E1000_IMS_OTHER; 2470 2471 static const uint32_t ims_valid_mask = 2472 E1000_IMS_TXDW | E1000_IMS_TXQE | E1000_IMS_LSC | 2473 E1000_IMS_RXDMT0 | E1000_IMS_RXO | E1000_IMS_RXT0 | 2474 E1000_IMS_MDAC | E1000_IMS_TXD_LOW | E1000_IMS_SRPD | 2475 E1000_IMS_ACK | E1000_IMS_MNG | E1000_IMS_RXQ0 | 2476 E1000_IMS_RXQ1 | E1000_IMS_TXQ0 | E1000_IMS_TXQ1 | 2477 E1000_IMS_OTHER; 2478 2479 uint32_t valid_val = val & ims_valid_mask; 2480 2481 trace_e1000e_irq_set_ims(val, core->mac[IMS], core->mac[IMS] | valid_val); 2482 core->mac[IMS] |= valid_val; 2483 2484 if ((valid_val & ims_ext_mask) && 2485 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PBA_CLR) && 2486 msix_enabled(core->owner)) { 2487 e1000e_msix_clear(core, valid_val); 2488 } 2489 2490 if ((valid_val == ims_valid_mask) && 2491 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_INT_TIMERS_CLEAR_ENA)) { 2492 trace_e1000e_irq_fire_all_timers(val); 2493 e1000e_intrmgr_fire_all_timers(core); 2494 } 2495 2496 e1000e_update_interrupt_state(core); 2497 } 2498 2499 static void 2500 e1000e_set_rdtr(E1000ECore *core, int index, uint32_t val) 2501 { 2502 e1000e_set_16bit(core, index, val); 2503 2504 if ((val & E1000_RDTR_FPD) && (core->rdtr.running)) { 2505 trace_e1000e_irq_rdtr_fpd_running(); 2506 e1000e_intrmgr_fire_delayed_interrupts(core); 2507 } else { 2508 trace_e1000e_irq_rdtr_fpd_not_running(); 2509 } 2510 } 2511 2512 static void 2513 e1000e_set_tidv(E1000ECore *core, int index, uint32_t val) 2514 { 2515 e1000e_set_16bit(core, index, val); 2516 2517 if ((val & E1000_TIDV_FPD) && (core->tidv.running)) { 2518 trace_e1000e_irq_tidv_fpd_running(); 2519 e1000e_intrmgr_fire_delayed_interrupts(core); 2520 } else { 2521 trace_e1000e_irq_tidv_fpd_not_running(); 2522 } 2523 } 2524 2525 static uint32_t 2526 e1000e_mac_readreg(E1000ECore *core, int index) 2527 { 2528 return core->mac[index]; 2529 } 2530 2531 static uint32_t 2532 e1000e_mac_ics_read(E1000ECore *core, int index) 2533 { 2534 trace_e1000e_irq_read_ics(core->mac[ICS]); 2535 return core->mac[ICS]; 2536 } 2537 2538 static uint32_t 2539 e1000e_mac_ims_read(E1000ECore *core, int index) 2540 { 2541 trace_e1000e_irq_read_ims(core->mac[IMS]); 2542 return core->mac[IMS]; 2543 } 2544 2545 #define E1000E_LOW_BITS_READ_FUNC(num) \ 2546 static uint32_t \ 2547 e1000e_mac_low##num##_read(E1000ECore *core, int index) \ 2548 { \ 2549 return core->mac[index] & (BIT(num) - 1); \ 2550 } \ 2551 2552 #define E1000E_LOW_BITS_READ(num) \ 2553 e1000e_mac_low##num##_read 2554 2555 E1000E_LOW_BITS_READ_FUNC(4); 2556 E1000E_LOW_BITS_READ_FUNC(6); 2557 E1000E_LOW_BITS_READ_FUNC(11); 2558 E1000E_LOW_BITS_READ_FUNC(13); 2559 E1000E_LOW_BITS_READ_FUNC(16); 2560 2561 static uint32_t 2562 e1000e_mac_swsm_read(E1000ECore *core, int index) 2563 { 2564 uint32_t val = core->mac[SWSM]; 2565 core->mac[SWSM] = val | 1; 2566 return val; 2567 } 2568 2569 static uint32_t 2570 e1000e_mac_itr_read(E1000ECore *core, int index) 2571 { 2572 return core->itr_guest_value; 2573 } 2574 2575 static uint32_t 2576 e1000e_mac_eitr_read(E1000ECore *core, int index) 2577 { 2578 return core->eitr_guest_value[index - EITR]; 2579 } 2580 2581 static uint32_t 2582 e1000e_mac_icr_read(E1000ECore *core, int index) 2583 { 2584 uint32_t ret = core->mac[ICR]; 2585 trace_e1000e_irq_icr_read_entry(ret); 2586 2587 if (core->mac[IMS] == 0) { 2588 trace_e1000e_irq_icr_clear_zero_ims(); 2589 core->mac[ICR] = 0; 2590 } 2591 2592 if ((core->mac[ICR] & E1000_ICR_ASSERTED) && 2593 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_IAME)) { 2594 trace_e1000e_irq_icr_clear_iame(); 2595 core->mac[ICR] = 0; 2596 trace_e1000e_irq_icr_process_iame(); 2597 e1000e_clear_ims_bits(core, core->mac[IAM]); 2598 } 2599 2600 trace_e1000e_irq_icr_read_exit(core->mac[ICR]); 2601 e1000e_update_interrupt_state(core); 2602 return ret; 2603 } 2604 2605 static uint32_t 2606 e1000e_mac_read_clr4(E1000ECore *core, int index) 2607 { 2608 uint32_t ret = core->mac[index]; 2609 2610 core->mac[index] = 0; 2611 return ret; 2612 } 2613 2614 static uint32_t 2615 e1000e_mac_read_clr8(E1000ECore *core, int index) 2616 { 2617 uint32_t ret = core->mac[index]; 2618 2619 core->mac[index] = 0; 2620 core->mac[index - 1] = 0; 2621 return ret; 2622 } 2623 2624 static uint32_t 2625 e1000e_get_ctrl(E1000ECore *core, int index) 2626 { 2627 uint32_t val = core->mac[CTRL]; 2628 2629 trace_e1000e_link_read_params( 2630 !!(val & E1000_CTRL_ASDE), 2631 (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT, 2632 !!(val & E1000_CTRL_FRCSPD), 2633 !!(val & E1000_CTRL_FRCDPX), 2634 !!(val & E1000_CTRL_RFCE), 2635 !!(val & E1000_CTRL_TFCE)); 2636 2637 return val; 2638 } 2639 2640 static uint32_t 2641 e1000e_get_status(E1000ECore *core, int index) 2642 { 2643 uint32_t res = core->mac[STATUS]; 2644 2645 if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) { 2646 res |= E1000_STATUS_GIO_MASTER_ENABLE; 2647 } 2648 2649 if (core->mac[CTRL] & E1000_CTRL_FRCDPX) { 2650 res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0; 2651 } else { 2652 res |= E1000_STATUS_FD; 2653 } 2654 2655 if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) || 2656 (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) { 2657 switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) { 2658 case E1000_CTRL_SPD_10: 2659 res |= E1000_STATUS_SPEED_10; 2660 break; 2661 case E1000_CTRL_SPD_100: 2662 res |= E1000_STATUS_SPEED_100; 2663 break; 2664 case E1000_CTRL_SPD_1000: 2665 default: 2666 res |= E1000_STATUS_SPEED_1000; 2667 break; 2668 } 2669 } else { 2670 res |= E1000_STATUS_SPEED_1000; 2671 } 2672 2673 trace_e1000e_link_status( 2674 !!(res & E1000_STATUS_LU), 2675 !!(res & E1000_STATUS_FD), 2676 (res & E1000_STATUS_SPEED_MASK) >> E1000_STATUS_SPEED_SHIFT, 2677 (res & E1000_STATUS_ASDV) >> E1000_STATUS_ASDV_SHIFT); 2678 2679 return res; 2680 } 2681 2682 static uint32_t 2683 e1000e_get_tarc(E1000ECore *core, int index) 2684 { 2685 return core->mac[index] & ((BIT(11) - 1) | 2686 BIT(27) | 2687 BIT(28) | 2688 BIT(29) | 2689 BIT(30)); 2690 } 2691 2692 static void 2693 e1000e_mac_writereg(E1000ECore *core, int index, uint32_t val) 2694 { 2695 core->mac[index] = val; 2696 } 2697 2698 static void 2699 e1000e_mac_setmacaddr(E1000ECore *core, int index, uint32_t val) 2700 { 2701 uint32_t macaddr[2]; 2702 2703 core->mac[index] = val; 2704 2705 macaddr[0] = cpu_to_le32(core->mac[RA]); 2706 macaddr[1] = cpu_to_le32(core->mac[RA + 1]); 2707 qemu_format_nic_info_str(qemu_get_queue(core->owner_nic), 2708 (uint8_t *) macaddr); 2709 2710 trace_e1000e_mac_set_sw(MAC_ARG(macaddr)); 2711 } 2712 2713 static void 2714 e1000e_set_eecd(E1000ECore *core, int index, uint32_t val) 2715 { 2716 static const uint32_t ro_bits = E1000_EECD_PRES | 2717 E1000_EECD_AUTO_RD | 2718 E1000_EECD_SIZE_EX_MASK; 2719 2720 core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits); 2721 } 2722 2723 static void 2724 e1000e_set_eerd(E1000ECore *core, int index, uint32_t val) 2725 { 2726 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; 2727 uint32_t flags = 0; 2728 uint32_t data = 0; 2729 2730 if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { 2731 data = core->eeprom[addr]; 2732 flags = E1000_EERW_DONE; 2733 } 2734 2735 core->mac[EERD] = flags | 2736 (addr << E1000_EERW_ADDR_SHIFT) | 2737 (data << E1000_EERW_DATA_SHIFT); 2738 } 2739 2740 static void 2741 e1000e_set_eewr(E1000ECore *core, int index, uint32_t val) 2742 { 2743 uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK; 2744 uint32_t data = (val >> E1000_EERW_DATA_SHIFT) & E1000_EERW_DATA_MASK; 2745 uint32_t flags = 0; 2746 2747 if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) { 2748 core->eeprom[addr] = data; 2749 flags = E1000_EERW_DONE; 2750 } 2751 2752 core->mac[EERD] = flags | 2753 (addr << E1000_EERW_ADDR_SHIFT) | 2754 (data << E1000_EERW_DATA_SHIFT); 2755 } 2756 2757 static void 2758 e1000e_set_rxdctl(E1000ECore *core, int index, uint32_t val) 2759 { 2760 core->mac[RXDCTL] = core->mac[RXDCTL1] = val; 2761 } 2762 2763 static void 2764 e1000e_set_itr(E1000ECore *core, int index, uint32_t val) 2765 { 2766 uint32_t interval = val & 0xffff; 2767 2768 trace_e1000e_irq_itr_set(val); 2769 2770 core->itr_guest_value = interval; 2771 core->mac[index] = MAX(interval, E1000E_MIN_XITR); 2772 } 2773 2774 static void 2775 e1000e_set_eitr(E1000ECore *core, int index, uint32_t val) 2776 { 2777 uint32_t interval = val & 0xffff; 2778 uint32_t eitr_num = index - EITR; 2779 2780 trace_e1000e_irq_eitr_set(eitr_num, val); 2781 2782 core->eitr_guest_value[eitr_num] = interval; 2783 core->mac[index] = MAX(interval, E1000E_MIN_XITR); 2784 } 2785 2786 static void 2787 e1000e_set_psrctl(E1000ECore *core, int index, uint32_t val) 2788 { 2789 if ((val & E1000_PSRCTL_BSIZE0_MASK) == 0) { 2790 hw_error("e1000e: PSRCTL.BSIZE0 cannot be zero"); 2791 } 2792 2793 if ((val & E1000_PSRCTL_BSIZE1_MASK) == 0) { 2794 hw_error("e1000e: PSRCTL.BSIZE1 cannot be zero"); 2795 } 2796 2797 core->mac[PSRCTL] = val; 2798 } 2799 2800 static void 2801 e1000e_update_rx_offloads(E1000ECore *core) 2802 { 2803 int cso_state = e1000e_rx_l4_cso_enabled(core); 2804 2805 trace_e1000e_rx_set_cso(cso_state); 2806 2807 if (core->has_vnet) { 2808 qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, 2809 cso_state, 0, 0, 0, 0); 2810 } 2811 } 2812 2813 static void 2814 e1000e_set_rxcsum(E1000ECore *core, int index, uint32_t val) 2815 { 2816 core->mac[RXCSUM] = val; 2817 e1000e_update_rx_offloads(core); 2818 } 2819 2820 static void 2821 e1000e_set_gcr(E1000ECore *core, int index, uint32_t val) 2822 { 2823 uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS; 2824 core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits; 2825 } 2826 2827 #define e1000e_getreg(x) [x] = e1000e_mac_readreg 2828 static uint32_t (*e1000e_macreg_readops[])(E1000ECore *, int) = { 2829 e1000e_getreg(PBA), 2830 e1000e_getreg(WUFC), 2831 e1000e_getreg(MANC), 2832 e1000e_getreg(TOTL), 2833 e1000e_getreg(RDT0), 2834 e1000e_getreg(RDBAH0), 2835 e1000e_getreg(TDBAL1), 2836 e1000e_getreg(RDLEN0), 2837 e1000e_getreg(RDH1), 2838 e1000e_getreg(LATECOL), 2839 e1000e_getreg(SEC), 2840 e1000e_getreg(XONTXC), 2841 e1000e_getreg(WUS), 2842 e1000e_getreg(GORCL), 2843 e1000e_getreg(MGTPRC), 2844 e1000e_getreg(EERD), 2845 e1000e_getreg(EIAC), 2846 e1000e_getreg(PSRCTL), 2847 e1000e_getreg(MANC2H), 2848 e1000e_getreg(RXCSUM), 2849 e1000e_getreg(GSCL_3), 2850 e1000e_getreg(GSCN_2), 2851 e1000e_getreg(RSRPD), 2852 e1000e_getreg(RDBAL1), 2853 e1000e_getreg(FCAH), 2854 e1000e_getreg(FCRTH), 2855 e1000e_getreg(FLOP), 2856 e1000e_getreg(FLASHT), 2857 e1000e_getreg(RXSTMPH), 2858 e1000e_getreg(TXSTMPL), 2859 e1000e_getreg(TIMADJL), 2860 e1000e_getreg(TXDCTL), 2861 e1000e_getreg(RDH0), 2862 e1000e_getreg(TDT1), 2863 e1000e_getreg(TNCRS), 2864 e1000e_getreg(RJC), 2865 e1000e_getreg(IAM), 2866 e1000e_getreg(GSCL_2), 2867 e1000e_getreg(RDBAH1), 2868 e1000e_getreg(FLSWDATA), 2869 e1000e_getreg(RXSATRH), 2870 e1000e_getreg(TIPG), 2871 e1000e_getreg(FLMNGCTL), 2872 e1000e_getreg(FLMNGCNT), 2873 e1000e_getreg(TSYNCTXCTL), 2874 e1000e_getreg(EXTCNF_SIZE), 2875 e1000e_getreg(EXTCNF_CTRL), 2876 e1000e_getreg(EEMNGDATA), 2877 e1000e_getreg(CTRL_EXT), 2878 e1000e_getreg(SYSTIMH), 2879 e1000e_getreg(EEMNGCTL), 2880 e1000e_getreg(FLMNGDATA), 2881 e1000e_getreg(TSYNCRXCTL), 2882 e1000e_getreg(TDH), 2883 e1000e_getreg(LEDCTL), 2884 e1000e_getreg(STATUS), 2885 e1000e_getreg(TCTL), 2886 e1000e_getreg(TDBAL), 2887 e1000e_getreg(TDLEN), 2888 e1000e_getreg(TDH1), 2889 e1000e_getreg(RADV), 2890 e1000e_getreg(ECOL), 2891 e1000e_getreg(DC), 2892 e1000e_getreg(RLEC), 2893 e1000e_getreg(XOFFTXC), 2894 e1000e_getreg(RFC), 2895 e1000e_getreg(RNBC), 2896 e1000e_getreg(MGTPTC), 2897 e1000e_getreg(TIMINCA), 2898 e1000e_getreg(RXCFGL), 2899 e1000e_getreg(MFUTP01), 2900 e1000e_getreg(FACTPS), 2901 e1000e_getreg(GSCL_1), 2902 e1000e_getreg(GSCN_0), 2903 e1000e_getreg(GCR2), 2904 e1000e_getreg(RDT1), 2905 e1000e_getreg(PBACLR), 2906 e1000e_getreg(FCTTV), 2907 e1000e_getreg(EEWR), 2908 e1000e_getreg(FLSWCTL), 2909 e1000e_getreg(RXDCTL1), 2910 e1000e_getreg(RXSATRL), 2911 e1000e_getreg(SYSTIML), 2912 e1000e_getreg(RXUDP), 2913 e1000e_getreg(TORL), 2914 e1000e_getreg(TDLEN1), 2915 e1000e_getreg(MCC), 2916 e1000e_getreg(WUC), 2917 e1000e_getreg(EECD), 2918 e1000e_getreg(MFUTP23), 2919 e1000e_getreg(RAID), 2920 e1000e_getreg(FCRTV), 2921 e1000e_getreg(TXDCTL1), 2922 e1000e_getreg(RCTL), 2923 e1000e_getreg(TDT), 2924 e1000e_getreg(MDIC), 2925 e1000e_getreg(FCRUC), 2926 e1000e_getreg(VET), 2927 e1000e_getreg(RDBAL0), 2928 e1000e_getreg(TDBAH1), 2929 e1000e_getreg(RDTR), 2930 e1000e_getreg(SCC), 2931 e1000e_getreg(COLC), 2932 e1000e_getreg(CEXTERR), 2933 e1000e_getreg(XOFFRXC), 2934 e1000e_getreg(IPAV), 2935 e1000e_getreg(GOTCL), 2936 e1000e_getreg(MGTPDC), 2937 e1000e_getreg(GCR), 2938 e1000e_getreg(IVAR), 2939 e1000e_getreg(POEMB), 2940 e1000e_getreg(MFVAL), 2941 e1000e_getreg(FUNCTAG), 2942 e1000e_getreg(GSCL_4), 2943 e1000e_getreg(GSCN_3), 2944 e1000e_getreg(MRQC), 2945 e1000e_getreg(RDLEN1), 2946 e1000e_getreg(FCT), 2947 e1000e_getreg(FLA), 2948 e1000e_getreg(FLOL), 2949 e1000e_getreg(RXDCTL), 2950 e1000e_getreg(RXSTMPL), 2951 e1000e_getreg(TXSTMPH), 2952 e1000e_getreg(TIMADJH), 2953 e1000e_getreg(FCRTL), 2954 e1000e_getreg(TDBAH), 2955 e1000e_getreg(TADV), 2956 e1000e_getreg(XONRXC), 2957 e1000e_getreg(TSCTFC), 2958 e1000e_getreg(RFCTL), 2959 e1000e_getreg(GSCN_1), 2960 e1000e_getreg(FCAL), 2961 e1000e_getreg(FLSWCNT), 2962 2963 [TOTH] = e1000e_mac_read_clr8, 2964 [GOTCH] = e1000e_mac_read_clr8, 2965 [PRC64] = e1000e_mac_read_clr4, 2966 [PRC255] = e1000e_mac_read_clr4, 2967 [PRC1023] = e1000e_mac_read_clr4, 2968 [PTC64] = e1000e_mac_read_clr4, 2969 [PTC255] = e1000e_mac_read_clr4, 2970 [PTC1023] = e1000e_mac_read_clr4, 2971 [GPRC] = e1000e_mac_read_clr4, 2972 [TPT] = e1000e_mac_read_clr4, 2973 [RUC] = e1000e_mac_read_clr4, 2974 [BPRC] = e1000e_mac_read_clr4, 2975 [MPTC] = e1000e_mac_read_clr4, 2976 [IAC] = e1000e_mac_read_clr4, 2977 [ICR] = e1000e_mac_icr_read, 2978 [RDFH] = E1000E_LOW_BITS_READ(13), 2979 [RDFHS] = E1000E_LOW_BITS_READ(13), 2980 [RDFPC] = E1000E_LOW_BITS_READ(13), 2981 [TDFH] = E1000E_LOW_BITS_READ(13), 2982 [TDFHS] = E1000E_LOW_BITS_READ(13), 2983 [STATUS] = e1000e_get_status, 2984 [TARC0] = e1000e_get_tarc, 2985 [PBS] = E1000E_LOW_BITS_READ(6), 2986 [ICS] = e1000e_mac_ics_read, 2987 [AIT] = E1000E_LOW_BITS_READ(16), 2988 [TORH] = e1000e_mac_read_clr8, 2989 [GORCH] = e1000e_mac_read_clr8, 2990 [PRC127] = e1000e_mac_read_clr4, 2991 [PRC511] = e1000e_mac_read_clr4, 2992 [PRC1522] = e1000e_mac_read_clr4, 2993 [PTC127] = e1000e_mac_read_clr4, 2994 [PTC511] = e1000e_mac_read_clr4, 2995 [PTC1522] = e1000e_mac_read_clr4, 2996 [GPTC] = e1000e_mac_read_clr4, 2997 [TPR] = e1000e_mac_read_clr4, 2998 [ROC] = e1000e_mac_read_clr4, 2999 [MPRC] = e1000e_mac_read_clr4, 3000 [BPTC] = e1000e_mac_read_clr4, 3001 [TSCTC] = e1000e_mac_read_clr4, 3002 [ITR] = e1000e_mac_itr_read, 3003 [RDFT] = E1000E_LOW_BITS_READ(13), 3004 [RDFTS] = E1000E_LOW_BITS_READ(13), 3005 [TDFPC] = E1000E_LOW_BITS_READ(13), 3006 [TDFT] = E1000E_LOW_BITS_READ(13), 3007 [TDFTS] = E1000E_LOW_BITS_READ(13), 3008 [CTRL] = e1000e_get_ctrl, 3009 [TARC1] = e1000e_get_tarc, 3010 [SWSM] = e1000e_mac_swsm_read, 3011 [IMS] = e1000e_mac_ims_read, 3012 3013 [CRCERRS ... MPC] = e1000e_mac_readreg, 3014 [IP6AT ... IP6AT + 3] = e1000e_mac_readreg, 3015 [IP4AT ... IP4AT + 6] = e1000e_mac_readreg, 3016 [RA ... RA + 31] = e1000e_mac_readreg, 3017 [WUPM ... WUPM + 31] = e1000e_mac_readreg, 3018 [MTA ... MTA + 127] = e1000e_mac_readreg, 3019 [VFTA ... VFTA + 127] = e1000e_mac_readreg, 3020 [FFMT ... FFMT + 254] = E1000E_LOW_BITS_READ(4), 3021 [FFVT ... FFVT + 254] = e1000e_mac_readreg, 3022 [MDEF ... MDEF + 7] = e1000e_mac_readreg, 3023 [FFLT ... FFLT + 10] = E1000E_LOW_BITS_READ(11), 3024 [FTFT ... FTFT + 254] = e1000e_mac_readreg, 3025 [PBM ... PBM + 10239] = e1000e_mac_readreg, 3026 [RETA ... RETA + 31] = e1000e_mac_readreg, 3027 [RSSRK ... RSSRK + 31] = e1000e_mac_readreg, 3028 [MAVTV0 ... MAVTV3] = e1000e_mac_readreg, 3029 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_mac_eitr_read 3030 }; 3031 enum { E1000E_NREADOPS = ARRAY_SIZE(e1000e_macreg_readops) }; 3032 3033 #define e1000e_putreg(x) [x] = e1000e_mac_writereg 3034 static void (*e1000e_macreg_writeops[])(E1000ECore *, int, uint32_t) = { 3035 e1000e_putreg(PBA), 3036 e1000e_putreg(SWSM), 3037 e1000e_putreg(WUFC), 3038 e1000e_putreg(RDBAH1), 3039 e1000e_putreg(TDBAH), 3040 e1000e_putreg(TXDCTL), 3041 e1000e_putreg(RDBAH0), 3042 e1000e_putreg(LEDCTL), 3043 e1000e_putreg(FCAL), 3044 e1000e_putreg(FCRUC), 3045 e1000e_putreg(AIT), 3046 e1000e_putreg(TDFH), 3047 e1000e_putreg(TDFT), 3048 e1000e_putreg(TDFHS), 3049 e1000e_putreg(TDFTS), 3050 e1000e_putreg(TDFPC), 3051 e1000e_putreg(WUC), 3052 e1000e_putreg(WUS), 3053 e1000e_putreg(RDFH), 3054 e1000e_putreg(RDFT), 3055 e1000e_putreg(RDFHS), 3056 e1000e_putreg(RDFTS), 3057 e1000e_putreg(RDFPC), 3058 e1000e_putreg(IPAV), 3059 e1000e_putreg(TDBAH1), 3060 e1000e_putreg(TIMINCA), 3061 e1000e_putreg(IAM), 3062 e1000e_putreg(EIAC), 3063 e1000e_putreg(IVAR), 3064 e1000e_putreg(TARC0), 3065 e1000e_putreg(TARC1), 3066 e1000e_putreg(FLSWDATA), 3067 e1000e_putreg(POEMB), 3068 e1000e_putreg(PBS), 3069 e1000e_putreg(MFUTP01), 3070 e1000e_putreg(MFUTP23), 3071 e1000e_putreg(MANC), 3072 e1000e_putreg(MANC2H), 3073 e1000e_putreg(MFVAL), 3074 e1000e_putreg(EXTCNF_CTRL), 3075 e1000e_putreg(FACTPS), 3076 e1000e_putreg(FUNCTAG), 3077 e1000e_putreg(GSCL_1), 3078 e1000e_putreg(GSCL_2), 3079 e1000e_putreg(GSCL_3), 3080 e1000e_putreg(GSCL_4), 3081 e1000e_putreg(GSCN_0), 3082 e1000e_putreg(GSCN_1), 3083 e1000e_putreg(GSCN_2), 3084 e1000e_putreg(GSCN_3), 3085 e1000e_putreg(GCR2), 3086 e1000e_putreg(MRQC), 3087 e1000e_putreg(FLOP), 3088 e1000e_putreg(FLOL), 3089 e1000e_putreg(FLSWCTL), 3090 e1000e_putreg(FLSWCNT), 3091 e1000e_putreg(FLA), 3092 e1000e_putreg(RXDCTL1), 3093 e1000e_putreg(TXDCTL1), 3094 e1000e_putreg(TIPG), 3095 e1000e_putreg(RXSTMPH), 3096 e1000e_putreg(RXSTMPL), 3097 e1000e_putreg(RXSATRL), 3098 e1000e_putreg(RXSATRH), 3099 e1000e_putreg(TXSTMPL), 3100 e1000e_putreg(TXSTMPH), 3101 e1000e_putreg(SYSTIML), 3102 e1000e_putreg(SYSTIMH), 3103 e1000e_putreg(TIMADJL), 3104 e1000e_putreg(TIMADJH), 3105 e1000e_putreg(RXUDP), 3106 e1000e_putreg(RXCFGL), 3107 e1000e_putreg(TSYNCRXCTL), 3108 e1000e_putreg(TSYNCTXCTL), 3109 e1000e_putreg(FLSWDATA), 3110 e1000e_putreg(EXTCNF_SIZE), 3111 e1000e_putreg(EEMNGCTL), 3112 e1000e_putreg(RA), 3113 3114 [TDH1] = e1000e_set_16bit, 3115 [TDT1] = e1000e_set_tdt, 3116 [TCTL] = e1000e_set_tctl, 3117 [TDT] = e1000e_set_tdt, 3118 [MDIC] = e1000e_set_mdic, 3119 [ICS] = e1000e_set_ics, 3120 [TDH] = e1000e_set_16bit, 3121 [RDH0] = e1000e_set_16bit, 3122 [RDT0] = e1000e_set_rdt, 3123 [IMC] = e1000e_set_imc, 3124 [IMS] = e1000e_set_ims, 3125 [ICR] = e1000e_set_icr, 3126 [EECD] = e1000e_set_eecd, 3127 [RCTL] = e1000e_set_rx_control, 3128 [CTRL] = e1000e_set_ctrl, 3129 [RDTR] = e1000e_set_rdtr, 3130 [RADV] = e1000e_set_16bit, 3131 [TADV] = e1000e_set_16bit, 3132 [ITR] = e1000e_set_itr, 3133 [EERD] = e1000e_set_eerd, 3134 [GCR] = e1000e_set_gcr, 3135 [PSRCTL] = e1000e_set_psrctl, 3136 [RXCSUM] = e1000e_set_rxcsum, 3137 [RAID] = e1000e_set_16bit, 3138 [RSRPD] = e1000e_set_12bit, 3139 [TIDV] = e1000e_set_tidv, 3140 [TDLEN1] = e1000e_set_dlen, 3141 [TDLEN] = e1000e_set_dlen, 3142 [RDLEN0] = e1000e_set_dlen, 3143 [RDLEN1] = e1000e_set_dlen, 3144 [TDBAL] = e1000e_set_dbal, 3145 [TDBAL1] = e1000e_set_dbal, 3146 [RDBAL0] = e1000e_set_dbal, 3147 [RDBAL1] = e1000e_set_dbal, 3148 [RDH1] = e1000e_set_16bit, 3149 [RDT1] = e1000e_set_rdt, 3150 [STATUS] = e1000e_set_status, 3151 [PBACLR] = e1000e_set_pbaclr, 3152 [CTRL_EXT] = e1000e_set_ctrlext, 3153 [FCAH] = e1000e_set_16bit, 3154 [FCT] = e1000e_set_16bit, 3155 [FCTTV] = e1000e_set_16bit, 3156 [FCRTV] = e1000e_set_16bit, 3157 [FCRTH] = e1000e_set_fcrth, 3158 [FCRTL] = e1000e_set_fcrtl, 3159 [VET] = e1000e_set_vet, 3160 [RXDCTL] = e1000e_set_rxdctl, 3161 [FLASHT] = e1000e_set_16bit, 3162 [EEWR] = e1000e_set_eewr, 3163 [CTRL_DUP] = e1000e_set_ctrl, 3164 [RFCTL] = e1000e_set_rfctl, 3165 [RA + 1] = e1000e_mac_setmacaddr, 3166 3167 [IP6AT ... IP6AT + 3] = e1000e_mac_writereg, 3168 [IP4AT ... IP4AT + 6] = e1000e_mac_writereg, 3169 [RA + 2 ... RA + 31] = e1000e_mac_writereg, 3170 [WUPM ... WUPM + 31] = e1000e_mac_writereg, 3171 [MTA ... MTA + 127] = e1000e_mac_writereg, 3172 [VFTA ... VFTA + 127] = e1000e_mac_writereg, 3173 [FFMT ... FFMT + 254] = e1000e_mac_writereg, 3174 [FFVT ... FFVT + 254] = e1000e_mac_writereg, 3175 [PBM ... PBM + 10239] = e1000e_mac_writereg, 3176 [MDEF ... MDEF + 7] = e1000e_mac_writereg, 3177 [FFLT ... FFLT + 10] = e1000e_mac_writereg, 3178 [FTFT ... FTFT + 254] = e1000e_mac_writereg, 3179 [RETA ... RETA + 31] = e1000e_mac_writereg, 3180 [RSSRK ... RSSRK + 31] = e1000e_mac_writereg, 3181 [MAVTV0 ... MAVTV3] = e1000e_mac_writereg, 3182 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = e1000e_set_eitr 3183 }; 3184 enum { E1000E_NWRITEOPS = ARRAY_SIZE(e1000e_macreg_writeops) }; 3185 3186 enum { MAC_ACCESS_PARTIAL = 1 }; 3187 3188 /* The array below combines alias offsets of the index values for the 3189 * MAC registers that have aliases, with the indication of not fully 3190 * implemented registers (lowest bit). This combination is possible 3191 * because all of the offsets are even. */ 3192 static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = { 3193 /* Alias index offsets */ 3194 [FCRTL_A] = 0x07fe, [FCRTH_A] = 0x0802, 3195 [RDH0_A] = 0x09bc, [RDT0_A] = 0x09bc, [RDTR_A] = 0x09c6, 3196 [RDFH_A] = 0xe904, [RDFT_A] = 0xe904, 3197 [TDH_A] = 0x0cf8, [TDT_A] = 0x0cf8, [TIDV_A] = 0x0cf8, 3198 [TDFH_A] = 0xed00, [TDFT_A] = 0xed00, 3199 [RA_A ... RA_A + 31] = 0x14f0, 3200 [VFTA_A ... VFTA_A + 127] = 0x1400, 3201 [RDBAL0_A ... RDLEN0_A] = 0x09bc, 3202 [TDBAL_A ... TDLEN_A] = 0x0cf8, 3203 /* Access options */ 3204 [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL, 3205 [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL, 3206 [RDFPC] = MAC_ACCESS_PARTIAL, 3207 [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL, 3208 [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL, 3209 [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL, 3210 [PBM] = MAC_ACCESS_PARTIAL, [FLA] = MAC_ACCESS_PARTIAL, 3211 [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL, 3212 [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL, 3213 [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL, 3214 [FCRTH] = MAC_ACCESS_PARTIAL, [TXDCTL] = MAC_ACCESS_PARTIAL, 3215 [TXDCTL1] = MAC_ACCESS_PARTIAL, 3216 [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL 3217 }; 3218 3219 void 3220 e1000e_core_write(E1000ECore *core, hwaddr addr, uint64_t val, unsigned size) 3221 { 3222 uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); 3223 3224 if (index < E1000E_NWRITEOPS && e1000e_macreg_writeops[index]) { 3225 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { 3226 trace_e1000e_wrn_regs_write_trivial(index << 2); 3227 } 3228 trace_e1000e_core_write(index << 2, size, val); 3229 e1000e_macreg_writeops[index](core, index, val); 3230 } else if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { 3231 trace_e1000e_wrn_regs_write_ro(index << 2, size, val); 3232 } else { 3233 trace_e1000e_wrn_regs_write_unknown(index << 2, size, val); 3234 } 3235 } 3236 3237 uint64_t 3238 e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size) 3239 { 3240 uint64_t val; 3241 uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr); 3242 3243 if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) { 3244 if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) { 3245 trace_e1000e_wrn_regs_read_trivial(index << 2); 3246 } 3247 val = e1000e_macreg_readops[index](core, index); 3248 trace_e1000e_core_read(index << 2, size, val); 3249 return val; 3250 } else { 3251 trace_e1000e_wrn_regs_read_unknown(index << 2, size); 3252 } 3253 return 0; 3254 } 3255 3256 static inline void 3257 e1000e_autoneg_pause(E1000ECore *core) 3258 { 3259 timer_del(core->autoneg_timer); 3260 } 3261 3262 static void 3263 e1000e_autoneg_resume(E1000ECore *core) 3264 { 3265 if (e1000e_have_autoneg(core) && 3266 !(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) { 3267 qemu_get_queue(core->owner_nic)->link_down = false; 3268 timer_mod(core->autoneg_timer, 3269 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500); 3270 } 3271 } 3272 3273 static void 3274 e1000e_vm_state_change(void *opaque, int running, RunState state) 3275 { 3276 E1000ECore *core = opaque; 3277 3278 if (running) { 3279 trace_e1000e_vm_state_running(); 3280 e1000e_intrmgr_resume(core); 3281 e1000e_autoneg_resume(core); 3282 } else { 3283 trace_e1000e_vm_state_stopped(); 3284 e1000e_autoneg_pause(core); 3285 e1000e_intrmgr_pause(core); 3286 } 3287 } 3288 3289 void 3290 e1000e_core_pci_realize(E1000ECore *core, 3291 const uint16_t *eeprom_templ, 3292 uint32_t eeprom_size, 3293 const uint8_t *macaddr) 3294 { 3295 int i; 3296 3297 core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, 3298 e1000e_autoneg_timer, core); 3299 e1000e_intrmgr_pci_realize(core); 3300 3301 core->vmstate = 3302 qemu_add_vm_change_state_handler(e1000e_vm_state_change, core); 3303 3304 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 3305 net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner, 3306 E1000E_MAX_TX_FRAGS, core->has_vnet); 3307 } 3308 3309 net_rx_pkt_init(&core->rx_pkt, core->has_vnet); 3310 3311 e1000x_core_prepare_eeprom(core->eeprom, 3312 eeprom_templ, 3313 eeprom_size, 3314 PCI_DEVICE_GET_CLASS(core->owner)->device_id, 3315 macaddr); 3316 e1000e_update_rx_offloads(core); 3317 } 3318 3319 void 3320 e1000e_core_pci_uninit(E1000ECore *core) 3321 { 3322 int i; 3323 3324 timer_del(core->autoneg_timer); 3325 timer_free(core->autoneg_timer); 3326 3327 e1000e_intrmgr_pci_unint(core); 3328 3329 qemu_del_vm_change_state_handler(core->vmstate); 3330 3331 for (i = 0; i < E1000E_NUM_QUEUES; i++) { 3332 net_tx_pkt_reset(core->tx[i].tx_pkt); 3333 net_tx_pkt_uninit(core->tx[i].tx_pkt); 3334 } 3335 3336 net_rx_pkt_uninit(core->rx_pkt); 3337 } 3338 3339 static const uint16_t 3340 e1000e_phy_reg_init[E1000E_PHY_PAGES][E1000E_PHY_PAGE_SIZE] = { 3341 [0] = { 3342 [PHY_CTRL] = MII_CR_SPEED_SELECT_MSB | 3343 MII_CR_FULL_DUPLEX | 3344 MII_CR_AUTO_NEG_EN, 3345 3346 [PHY_STATUS] = MII_SR_EXTENDED_CAPS | 3347 MII_SR_LINK_STATUS | 3348 MII_SR_AUTONEG_CAPS | 3349 MII_SR_PREAMBLE_SUPPRESS | 3350 MII_SR_EXTENDED_STATUS | 3351 MII_SR_10T_HD_CAPS | 3352 MII_SR_10T_FD_CAPS | 3353 MII_SR_100X_HD_CAPS | 3354 MII_SR_100X_FD_CAPS, 3355 3356 [PHY_ID1] = 0x141, 3357 [PHY_ID2] = E1000_PHY_ID2_82574x, 3358 [PHY_AUTONEG_ADV] = 0xde1, 3359 [PHY_LP_ABILITY] = 0x7e0, 3360 [PHY_AUTONEG_EXP] = BIT(2), 3361 [PHY_NEXT_PAGE_TX] = BIT(0) | BIT(13), 3362 [PHY_1000T_CTRL] = BIT(8) | BIT(9) | BIT(10) | BIT(11), 3363 [PHY_1000T_STATUS] = 0x3c00, 3364 [PHY_EXT_STATUS] = BIT(12) | BIT(13), 3365 3366 [PHY_COPPER_CTRL1] = BIT(5) | BIT(6) | BIT(8) | BIT(9) | 3367 BIT(12) | BIT(13), 3368 [PHY_COPPER_STAT1] = BIT(3) | BIT(10) | BIT(11) | BIT(13) | BIT(15) 3369 }, 3370 [2] = { 3371 [PHY_MAC_CTRL1] = BIT(3) | BIT(7), 3372 [PHY_MAC_CTRL2] = BIT(1) | BIT(2) | BIT(6) | BIT(12) 3373 }, 3374 [3] = { 3375 [PHY_LED_TIMER_CTRL] = BIT(0) | BIT(2) | BIT(14) 3376 } 3377 }; 3378 3379 static const uint32_t e1000e_mac_reg_init[] = { 3380 [PBA] = 0x00140014, 3381 [LEDCTL] = BIT(1) | BIT(8) | BIT(9) | BIT(15) | BIT(17) | BIT(18), 3382 [EXTCNF_CTRL] = BIT(3), 3383 [EEMNGCTL] = BIT(31), 3384 [FLASHT] = 0x2, 3385 [FLSWCTL] = BIT(30) | BIT(31), 3386 [FLOL] = BIT(0), 3387 [RXDCTL] = BIT(16), 3388 [RXDCTL1] = BIT(16), 3389 [TIPG] = 0x8 | (0x8 << 10) | (0x6 << 20), 3390 [RXCFGL] = 0x88F7, 3391 [RXUDP] = 0x319, 3392 [CTRL] = E1000_CTRL_FD | E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 | 3393 E1000_CTRL_SPD_1000 | E1000_CTRL_SLU | 3394 E1000_CTRL_ADVD3WUC, 3395 [STATUS] = E1000_STATUS_ASDV_1000 | E1000_STATUS_LU, 3396 [PSRCTL] = (2 << E1000_PSRCTL_BSIZE0_SHIFT) | 3397 (4 << E1000_PSRCTL_BSIZE1_SHIFT) | 3398 (4 << E1000_PSRCTL_BSIZE2_SHIFT), 3399 [TARC0] = 0x3 | E1000_TARC_ENABLE, 3400 [TARC1] = 0x3 | E1000_TARC_ENABLE, 3401 [EECD] = E1000_EECD_AUTO_RD | E1000_EECD_PRES, 3402 [EERD] = E1000_EERW_DONE, 3403 [EEWR] = E1000_EERW_DONE, 3404 [GCR] = E1000_L0S_ADJUST | 3405 E1000_L1_ENTRY_LATENCY_MSB | 3406 E1000_L1_ENTRY_LATENCY_LSB, 3407 [TDFH] = 0x600, 3408 [TDFT] = 0x600, 3409 [TDFHS] = 0x600, 3410 [TDFTS] = 0x600, 3411 [POEMB] = 0x30D, 3412 [PBS] = 0x028, 3413 [MANC] = E1000_MANC_DIS_IP_CHK_ARP, 3414 [FACTPS] = E1000_FACTPS_LAN0_ON | 0x20000000, 3415 [SWSM] = 1, 3416 [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD, 3417 [ITR] = E1000E_MIN_XITR, 3418 [EITR...EITR + E1000E_MSIX_VEC_NUM - 1] = E1000E_MIN_XITR, 3419 }; 3420 3421 void 3422 e1000e_core_reset(E1000ECore *core) 3423 { 3424 int i; 3425 3426 timer_del(core->autoneg_timer); 3427 3428 e1000e_intrmgr_reset(core); 3429 3430 memset(core->phy, 0, sizeof core->phy); 3431 memmove(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init); 3432 memset(core->mac, 0, sizeof core->mac); 3433 memmove(core->mac, e1000e_mac_reg_init, sizeof e1000e_mac_reg_init); 3434 3435 core->rxbuf_min_shift = 1 + E1000_RING_DESC_LEN_SHIFT; 3436 3437 if (qemu_get_queue(core->owner_nic)->link_down) { 3438 e1000e_link_down(core); 3439 } 3440 3441 e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac); 3442 3443 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { 3444 net_tx_pkt_reset(core->tx[i].tx_pkt); 3445 memset(&core->tx[i].props, 0, sizeof(core->tx[i].props)); 3446 core->tx[i].skip_cp = false; 3447 } 3448 } 3449 3450 void e1000e_core_pre_save(E1000ECore *core) 3451 { 3452 int i; 3453 NetClientState *nc = qemu_get_queue(core->owner_nic); 3454 3455 /* 3456 * If link is down and auto-negotiation is supported and ongoing, 3457 * complete auto-negotiation immediately. This allows us to look 3458 * at MII_SR_AUTONEG_COMPLETE to infer link status on load. 3459 */ 3460 if (nc->link_down && e1000e_have_autoneg(core)) { 3461 core->phy[0][PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE; 3462 e1000e_update_flowctl_status(core); 3463 } 3464 3465 for (i = 0; i < ARRAY_SIZE(core->tx); i++) { 3466 if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) { 3467 core->tx[i].skip_cp = true; 3468 } 3469 } 3470 } 3471 3472 int 3473 e1000e_core_post_load(E1000ECore *core) 3474 { 3475 NetClientState *nc = qemu_get_queue(core->owner_nic); 3476 3477 /* nc.link_down can't be migrated, so infer link_down according 3478 * to link status bit in core.mac[STATUS]. 3479 */ 3480 nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0; 3481 3482 return 0; 3483 } 3484