1 /* 2 * i.MX Fast Ethernet Controller emulation. 3 * 4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net> 5 * 6 * Based on Coldfire Fast Ethernet Controller emulation. 7 * 8 * Copyright (c) 2007 CodeSourcery. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with this program; if not, see <http://www.gnu.org/licenses/>. 22 */ 23 24 #include "qemu/osdep.h" 25 #include "hw/irq.h" 26 #include "hw/net/imx_fec.h" 27 #include "hw/qdev-properties.h" 28 #include "migration/vmstate.h" 29 #include "sysemu/dma.h" 30 #include "qemu/log.h" 31 #include "qemu/module.h" 32 #include "net/checksum.h" 33 #include "net/eth.h" 34 35 /* For crc32 */ 36 #include <zlib.h> 37 38 #ifndef DEBUG_IMX_FEC 39 #define DEBUG_IMX_FEC 0 40 #endif 41 42 #define FEC_PRINTF(fmt, args...) \ 43 do { \ 44 if (DEBUG_IMX_FEC) { \ 45 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \ 46 __func__, ##args); \ 47 } \ 48 } while (0) 49 50 #ifndef DEBUG_IMX_PHY 51 #define DEBUG_IMX_PHY 0 52 #endif 53 54 #define PHY_PRINTF(fmt, args...) \ 55 do { \ 56 if (DEBUG_IMX_PHY) { \ 57 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \ 58 __func__, ##args); \ 59 } \ 60 } while (0) 61 62 #define IMX_MAX_DESC 1024 63 64 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index) 65 { 66 static char tmp[20]; 67 sprintf(tmp, "index %d", index); 68 return tmp; 69 } 70 71 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index) 72 { 73 switch (index) { 74 case ENET_FRBR: 75 return "FRBR"; 76 case ENET_FRSR: 77 return "FRSR"; 78 case ENET_MIIGSK_CFGR: 79 return "MIIGSK_CFGR"; 80 case ENET_MIIGSK_ENR: 81 return "MIIGSK_ENR"; 82 default: 83 return imx_default_reg_name(s, index); 84 } 85 } 86 87 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index) 88 { 89 switch (index) { 90 case ENET_RSFL: 91 return "RSFL"; 92 case ENET_RSEM: 93 return "RSEM"; 94 case ENET_RAEM: 95 return "RAEM"; 96 case ENET_RAFL: 97 return "RAFL"; 98 case ENET_TSEM: 99 return "TSEM"; 100 case ENET_TAEM: 101 return "TAEM"; 102 case ENET_TAFL: 103 return "TAFL"; 104 case ENET_TIPG: 105 return "TIPG"; 106 case ENET_FTRL: 107 return "FTRL"; 108 case ENET_TACC: 109 return "TACC"; 110 case ENET_RACC: 111 return "RACC"; 112 case ENET_ATCR: 113 return "ATCR"; 114 case ENET_ATVR: 115 return "ATVR"; 116 case ENET_ATOFF: 117 return "ATOFF"; 118 case ENET_ATPER: 119 return "ATPER"; 120 case ENET_ATCOR: 121 return "ATCOR"; 122 case ENET_ATINC: 123 return "ATINC"; 124 case ENET_ATSTMP: 125 return "ATSTMP"; 126 case ENET_TGSR: 127 return "TGSR"; 128 case ENET_TCSR0: 129 return "TCSR0"; 130 case ENET_TCCR0: 131 return "TCCR0"; 132 case ENET_TCSR1: 133 return "TCSR1"; 134 case ENET_TCCR1: 135 return "TCCR1"; 136 case ENET_TCSR2: 137 return "TCSR2"; 138 case ENET_TCCR2: 139 return "TCCR2"; 140 case ENET_TCSR3: 141 return "TCSR3"; 142 case ENET_TCCR3: 143 return "TCCR3"; 144 default: 145 return imx_default_reg_name(s, index); 146 } 147 } 148 149 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index) 150 { 151 switch (index) { 152 case ENET_EIR: 153 return "EIR"; 154 case ENET_EIMR: 155 return "EIMR"; 156 case ENET_RDAR: 157 return "RDAR"; 158 case ENET_TDAR: 159 return "TDAR"; 160 case ENET_ECR: 161 return "ECR"; 162 case ENET_MMFR: 163 return "MMFR"; 164 case ENET_MSCR: 165 return "MSCR"; 166 case ENET_MIBC: 167 return "MIBC"; 168 case ENET_RCR: 169 return "RCR"; 170 case ENET_TCR: 171 return "TCR"; 172 case ENET_PALR: 173 return "PALR"; 174 case ENET_PAUR: 175 return "PAUR"; 176 case ENET_OPD: 177 return "OPD"; 178 case ENET_IAUR: 179 return "IAUR"; 180 case ENET_IALR: 181 return "IALR"; 182 case ENET_GAUR: 183 return "GAUR"; 184 case ENET_GALR: 185 return "GALR"; 186 case ENET_TFWR: 187 return "TFWR"; 188 case ENET_RDSR: 189 return "RDSR"; 190 case ENET_TDSR: 191 return "TDSR"; 192 case ENET_MRBR: 193 return "MRBR"; 194 default: 195 if (s->is_fec) { 196 return imx_fec_reg_name(s, index); 197 } else { 198 return imx_enet_reg_name(s, index); 199 } 200 } 201 } 202 203 /* 204 * Versions of this device with more than one TX descriptor save the 205 * 2nd and 3rd descriptors in a subsection, to maintain migration 206 * compatibility with previous versions of the device that only 207 * supported a single descriptor. 208 */ 209 static bool imx_eth_is_multi_tx_ring(void *opaque) 210 { 211 IMXFECState *s = IMX_FEC(opaque); 212 213 return s->tx_ring_num > 1; 214 } 215 216 static const VMStateDescription vmstate_imx_eth_txdescs = { 217 .name = "imx.fec/txdescs", 218 .version_id = 1, 219 .minimum_version_id = 1, 220 .needed = imx_eth_is_multi_tx_ring, 221 .fields = (VMStateField[]) { 222 VMSTATE_UINT32(tx_descriptor[1], IMXFECState), 223 VMSTATE_UINT32(tx_descriptor[2], IMXFECState), 224 VMSTATE_END_OF_LIST() 225 } 226 }; 227 228 static const VMStateDescription vmstate_imx_eth = { 229 .name = TYPE_IMX_FEC, 230 .version_id = 2, 231 .minimum_version_id = 2, 232 .fields = (VMStateField[]) { 233 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX), 234 VMSTATE_UINT32(rx_descriptor, IMXFECState), 235 VMSTATE_UINT32(tx_descriptor[0], IMXFECState), 236 VMSTATE_UINT32(phy_status, IMXFECState), 237 VMSTATE_UINT32(phy_control, IMXFECState), 238 VMSTATE_UINT32(phy_advertise, IMXFECState), 239 VMSTATE_UINT32(phy_int, IMXFECState), 240 VMSTATE_UINT32(phy_int_mask, IMXFECState), 241 VMSTATE_END_OF_LIST() 242 }, 243 .subsections = (const VMStateDescription * []) { 244 &vmstate_imx_eth_txdescs, 245 NULL 246 }, 247 }; 248 249 #define PHY_INT_ENERGYON (1 << 7) 250 #define PHY_INT_AUTONEG_COMPLETE (1 << 6) 251 #define PHY_INT_FAULT (1 << 5) 252 #define PHY_INT_DOWN (1 << 4) 253 #define PHY_INT_AUTONEG_LP (1 << 3) 254 #define PHY_INT_PARFAULT (1 << 2) 255 #define PHY_INT_AUTONEG_PAGE (1 << 1) 256 257 static void imx_eth_update(IMXFECState *s); 258 259 /* 260 * The MII phy could raise a GPIO to the processor which in turn 261 * could be handled as an interrpt by the OS. 262 * For now we don't handle any GPIO/interrupt line, so the OS will 263 * have to poll for the PHY status. 264 */ 265 static void phy_update_irq(IMXFECState *s) 266 { 267 imx_eth_update(s); 268 } 269 270 static void phy_update_link(IMXFECState *s) 271 { 272 /* Autonegotiation status mirrors link status. */ 273 if (qemu_get_queue(s->nic)->link_down) { 274 PHY_PRINTF("link is down\n"); 275 s->phy_status &= ~0x0024; 276 s->phy_int |= PHY_INT_DOWN; 277 } else { 278 PHY_PRINTF("link is up\n"); 279 s->phy_status |= 0x0024; 280 s->phy_int |= PHY_INT_ENERGYON; 281 s->phy_int |= PHY_INT_AUTONEG_COMPLETE; 282 } 283 phy_update_irq(s); 284 } 285 286 static void imx_eth_set_link(NetClientState *nc) 287 { 288 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc))); 289 } 290 291 static void phy_reset(IMXFECState *s) 292 { 293 s->phy_status = 0x7809; 294 s->phy_control = 0x3000; 295 s->phy_advertise = 0x01e1; 296 s->phy_int_mask = 0; 297 s->phy_int = 0; 298 phy_update_link(s); 299 } 300 301 static uint32_t do_phy_read(IMXFECState *s, int reg) 302 { 303 uint32_t val; 304 305 if (reg > 31) { 306 /* we only advertise one phy */ 307 return 0; 308 } 309 310 switch (reg) { 311 case 0: /* Basic Control */ 312 val = s->phy_control; 313 break; 314 case 1: /* Basic Status */ 315 val = s->phy_status; 316 break; 317 case 2: /* ID1 */ 318 val = 0x0007; 319 break; 320 case 3: /* ID2 */ 321 val = 0xc0d1; 322 break; 323 case 4: /* Auto-neg advertisement */ 324 val = s->phy_advertise; 325 break; 326 case 5: /* Auto-neg Link Partner Ability */ 327 val = 0x0f71; 328 break; 329 case 6: /* Auto-neg Expansion */ 330 val = 1; 331 break; 332 case 29: /* Interrupt source. */ 333 val = s->phy_int; 334 s->phy_int = 0; 335 phy_update_irq(s); 336 break; 337 case 30: /* Interrupt mask */ 338 val = s->phy_int_mask; 339 break; 340 case 17: 341 case 18: 342 case 27: 343 case 31: 344 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n", 345 TYPE_IMX_FEC, __func__, reg); 346 val = 0; 347 break; 348 default: 349 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", 350 TYPE_IMX_FEC, __func__, reg); 351 val = 0; 352 break; 353 } 354 355 PHY_PRINTF("read 0x%04x @ %d\n", val, reg); 356 357 return val; 358 } 359 360 static void do_phy_write(IMXFECState *s, int reg, uint32_t val) 361 { 362 PHY_PRINTF("write 0x%04x @ %d\n", val, reg); 363 364 if (reg > 31) { 365 /* we only advertise one phy */ 366 return; 367 } 368 369 switch (reg) { 370 case 0: /* Basic Control */ 371 if (val & 0x8000) { 372 phy_reset(s); 373 } else { 374 s->phy_control = val & 0x7980; 375 /* Complete autonegotiation immediately. */ 376 if (val & 0x1000) { 377 s->phy_status |= 0x0020; 378 } 379 } 380 break; 381 case 4: /* Auto-neg advertisement */ 382 s->phy_advertise = (val & 0x2d7f) | 0x80; 383 break; 384 case 30: /* Interrupt mask */ 385 s->phy_int_mask = val & 0xff; 386 phy_update_irq(s); 387 break; 388 case 17: 389 case 18: 390 case 27: 391 case 31: 392 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n", 393 TYPE_IMX_FEC, __func__, reg); 394 break; 395 default: 396 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", 397 TYPE_IMX_FEC, __func__, reg); 398 break; 399 } 400 } 401 402 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr) 403 { 404 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); 405 } 406 407 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr) 408 { 409 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); 410 } 411 412 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr) 413 { 414 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); 415 } 416 417 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr) 418 { 419 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); 420 } 421 422 static void imx_eth_update(IMXFECState *s) 423 { 424 /* 425 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER 426 * interrupts swapped. This worked with older versions of Linux (4.14 427 * and older) since Linux associated both interrupt lines with Ethernet 428 * MAC interrupts. Specifically, 429 * - Linux 4.15 and later have separate interrupt handlers for the MAC and 430 * timer interrupts. Those versions of Linux fail with versions of QEMU 431 * with swapped interrupt assignments. 432 * - In linux 4.14, both interrupt lines were registered with the Ethernet 433 * MAC interrupt handler. As a result, all versions of qemu happen to 434 * work, though that is accidental. 435 * - In Linux 4.9 and older, the timer interrupt was registered directly 436 * with the Ethernet MAC interrupt handler. The MAC interrupt was 437 * redirected to a GPIO interrupt to work around erratum ERR006687. 438 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO 439 * interrupt never fired since IOMUX is currently not supported in qemu. 440 * Linux instead received MAC interrupts on the timer interrupt. 441 * As a result, qemu versions with the swapped interrupt assignment work, 442 * albeit accidentally, but qemu versions with the correct interrupt 443 * assignment fail. 444 * 445 * To ensure that all versions of Linux work, generate ENET_INT_MAC 446 * interrrupts on both interrupt lines. This should be changed if and when 447 * qemu supports IOMUX. 448 */ 449 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & 450 (ENET_INT_MAC | ENET_INT_TS_TIMER)) { 451 qemu_set_irq(s->irq[1], 1); 452 } else { 453 qemu_set_irq(s->irq[1], 0); 454 } 455 456 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) { 457 qemu_set_irq(s->irq[0], 1); 458 } else { 459 qemu_set_irq(s->irq[0], 0); 460 } 461 } 462 463 static void imx_fec_do_tx(IMXFECState *s) 464 { 465 int frame_size = 0, descnt = 0; 466 uint8_t *ptr = s->frame; 467 uint32_t addr = s->tx_descriptor[0]; 468 469 while (descnt++ < IMX_MAX_DESC) { 470 IMXFECBufDesc bd; 471 int len; 472 473 imx_fec_read_bd(&bd, addr); 474 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n", 475 addr, bd.flags, bd.length, bd.data); 476 if ((bd.flags & ENET_BD_R) == 0) { 477 /* Run out of descriptors to transmit. */ 478 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n"); 479 break; 480 } 481 len = bd.length; 482 if (frame_size + len > ENET_MAX_FRAME_SIZE) { 483 len = ENET_MAX_FRAME_SIZE - frame_size; 484 s->regs[ENET_EIR] |= ENET_INT_BABT; 485 } 486 dma_memory_read(&address_space_memory, bd.data, ptr, len); 487 ptr += len; 488 frame_size += len; 489 if (bd.flags & ENET_BD_L) { 490 /* Last buffer in frame. */ 491 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size); 492 ptr = s->frame; 493 frame_size = 0; 494 s->regs[ENET_EIR] |= ENET_INT_TXF; 495 } 496 s->regs[ENET_EIR] |= ENET_INT_TXB; 497 bd.flags &= ~ENET_BD_R; 498 /* Write back the modified descriptor. */ 499 imx_fec_write_bd(&bd, addr); 500 /* Advance to the next descriptor. */ 501 if ((bd.flags & ENET_BD_W) != 0) { 502 addr = s->regs[ENET_TDSR]; 503 } else { 504 addr += sizeof(bd); 505 } 506 } 507 508 s->tx_descriptor[0] = addr; 509 510 imx_eth_update(s); 511 } 512 513 static void imx_enet_do_tx(IMXFECState *s, uint32_t index) 514 { 515 int frame_size = 0, descnt = 0; 516 517 uint8_t *ptr = s->frame; 518 uint32_t addr, int_txb, int_txf, tdsr; 519 size_t ring; 520 521 switch (index) { 522 case ENET_TDAR: 523 ring = 0; 524 int_txb = ENET_INT_TXB; 525 int_txf = ENET_INT_TXF; 526 tdsr = ENET_TDSR; 527 break; 528 case ENET_TDAR1: 529 ring = 1; 530 int_txb = ENET_INT_TXB1; 531 int_txf = ENET_INT_TXF1; 532 tdsr = ENET_TDSR1; 533 break; 534 case ENET_TDAR2: 535 ring = 2; 536 int_txb = ENET_INT_TXB2; 537 int_txf = ENET_INT_TXF2; 538 tdsr = ENET_TDSR2; 539 break; 540 default: 541 qemu_log_mask(LOG_GUEST_ERROR, 542 "%s: bogus value for index %x\n", 543 __func__, index); 544 abort(); 545 break; 546 } 547 548 addr = s->tx_descriptor[ring]; 549 550 while (descnt++ < IMX_MAX_DESC) { 551 IMXENETBufDesc bd; 552 int len; 553 554 imx_enet_read_bd(&bd, addr); 555 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x " 556 "status %04x\n", addr, bd.flags, bd.length, bd.data, 557 bd.option, bd.status); 558 if ((bd.flags & ENET_BD_R) == 0) { 559 /* Run out of descriptors to transmit. */ 560 break; 561 } 562 len = bd.length; 563 if (frame_size + len > ENET_MAX_FRAME_SIZE) { 564 len = ENET_MAX_FRAME_SIZE - frame_size; 565 s->regs[ENET_EIR] |= ENET_INT_BABT; 566 } 567 dma_memory_read(&address_space_memory, bd.data, ptr, len); 568 ptr += len; 569 frame_size += len; 570 if (bd.flags & ENET_BD_L) { 571 if (bd.option & ENET_BD_PINS) { 572 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame); 573 if (IP_HEADER_VERSION(ip_hd) == 4) { 574 net_checksum_calculate(s->frame, frame_size); 575 } 576 } 577 if (bd.option & ENET_BD_IINS) { 578 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame); 579 /* We compute checksum only for IPv4 frames */ 580 if (IP_HEADER_VERSION(ip_hd) == 4) { 581 uint16_t csum; 582 ip_hd->ip_sum = 0; 583 csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd)); 584 ip_hd->ip_sum = cpu_to_be16(csum); 585 } 586 } 587 /* Last buffer in frame. */ 588 589 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size); 590 ptr = s->frame; 591 592 frame_size = 0; 593 if (bd.option & ENET_BD_TX_INT) { 594 s->regs[ENET_EIR] |= int_txf; 595 } 596 /* Indicate that we've updated the last buffer descriptor. */ 597 bd.last_buffer = ENET_BD_BDU; 598 } 599 if (bd.option & ENET_BD_TX_INT) { 600 s->regs[ENET_EIR] |= int_txb; 601 } 602 bd.flags &= ~ENET_BD_R; 603 /* Write back the modified descriptor. */ 604 imx_enet_write_bd(&bd, addr); 605 /* Advance to the next descriptor. */ 606 if ((bd.flags & ENET_BD_W) != 0) { 607 addr = s->regs[tdsr]; 608 } else { 609 addr += sizeof(bd); 610 } 611 } 612 613 s->tx_descriptor[ring] = addr; 614 615 imx_eth_update(s); 616 } 617 618 static void imx_eth_do_tx(IMXFECState *s, uint32_t index) 619 { 620 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) { 621 imx_enet_do_tx(s, index); 622 } else { 623 imx_fec_do_tx(s); 624 } 625 } 626 627 static void imx_eth_enable_rx(IMXFECState *s, bool flush) 628 { 629 IMXFECBufDesc bd; 630 631 imx_fec_read_bd(&bd, s->rx_descriptor); 632 633 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0; 634 635 if (!s->regs[ENET_RDAR]) { 636 FEC_PRINTF("RX buffer full\n"); 637 } else if (flush) { 638 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 639 } 640 } 641 642 static void imx_eth_reset(DeviceState *d) 643 { 644 IMXFECState *s = IMX_FEC(d); 645 646 /* Reset the Device */ 647 memset(s->regs, 0, sizeof(s->regs)); 648 s->regs[ENET_ECR] = 0xf0000000; 649 s->regs[ENET_MIBC] = 0xc0000000; 650 s->regs[ENET_RCR] = 0x05ee0001; 651 s->regs[ENET_OPD] = 0x00010000; 652 653 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24) 654 | (s->conf.macaddr.a[1] << 16) 655 | (s->conf.macaddr.a[2] << 8) 656 | s->conf.macaddr.a[3]; 657 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24) 658 | (s->conf.macaddr.a[5] << 16) 659 | 0x8808; 660 661 if (s->is_fec) { 662 s->regs[ENET_FRBR] = 0x00000600; 663 s->regs[ENET_FRSR] = 0x00000500; 664 s->regs[ENET_MIIGSK_ENR] = 0x00000006; 665 } else { 666 s->regs[ENET_RAEM] = 0x00000004; 667 s->regs[ENET_RAFL] = 0x00000004; 668 s->regs[ENET_TAEM] = 0x00000004; 669 s->regs[ENET_TAFL] = 0x00000008; 670 s->regs[ENET_TIPG] = 0x0000000c; 671 s->regs[ENET_FTRL] = 0x000007ff; 672 s->regs[ENET_ATPER] = 0x3b9aca00; 673 } 674 675 s->rx_descriptor = 0; 676 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor)); 677 678 /* We also reset the PHY */ 679 phy_reset(s); 680 } 681 682 static uint32_t imx_default_read(IMXFECState *s, uint32_t index) 683 { 684 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" 685 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4); 686 return 0; 687 } 688 689 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index) 690 { 691 switch (index) { 692 case ENET_FRBR: 693 case ENET_FRSR: 694 case ENET_MIIGSK_CFGR: 695 case ENET_MIIGSK_ENR: 696 return s->regs[index]; 697 default: 698 return imx_default_read(s, index); 699 } 700 } 701 702 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index) 703 { 704 switch (index) { 705 case ENET_RSFL: 706 case ENET_RSEM: 707 case ENET_RAEM: 708 case ENET_RAFL: 709 case ENET_TSEM: 710 case ENET_TAEM: 711 case ENET_TAFL: 712 case ENET_TIPG: 713 case ENET_FTRL: 714 case ENET_TACC: 715 case ENET_RACC: 716 case ENET_ATCR: 717 case ENET_ATVR: 718 case ENET_ATOFF: 719 case ENET_ATPER: 720 case ENET_ATCOR: 721 case ENET_ATINC: 722 case ENET_ATSTMP: 723 case ENET_TGSR: 724 case ENET_TCSR0: 725 case ENET_TCCR0: 726 case ENET_TCSR1: 727 case ENET_TCCR1: 728 case ENET_TCSR2: 729 case ENET_TCCR2: 730 case ENET_TCSR3: 731 case ENET_TCCR3: 732 return s->regs[index]; 733 default: 734 return imx_default_read(s, index); 735 } 736 } 737 738 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size) 739 { 740 uint32_t value = 0; 741 IMXFECState *s = IMX_FEC(opaque); 742 uint32_t index = offset >> 2; 743 744 switch (index) { 745 case ENET_EIR: 746 case ENET_EIMR: 747 case ENET_RDAR: 748 case ENET_TDAR: 749 case ENET_ECR: 750 case ENET_MMFR: 751 case ENET_MSCR: 752 case ENET_MIBC: 753 case ENET_RCR: 754 case ENET_TCR: 755 case ENET_PALR: 756 case ENET_PAUR: 757 case ENET_OPD: 758 case ENET_IAUR: 759 case ENET_IALR: 760 case ENET_GAUR: 761 case ENET_GALR: 762 case ENET_TFWR: 763 case ENET_RDSR: 764 case ENET_TDSR: 765 case ENET_MRBR: 766 value = s->regs[index]; 767 break; 768 default: 769 if (s->is_fec) { 770 value = imx_fec_read(s, index); 771 } else { 772 value = imx_enet_read(s, index); 773 } 774 break; 775 } 776 777 FEC_PRINTF("reg[%s] => 0x%" PRIx32 "\n", imx_eth_reg_name(s, index), 778 value); 779 780 return value; 781 } 782 783 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value) 784 { 785 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%" 786 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4); 787 return; 788 } 789 790 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value) 791 { 792 switch (index) { 793 case ENET_FRBR: 794 /* FRBR is read only */ 795 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n", 796 TYPE_IMX_FEC, __func__); 797 break; 798 case ENET_FRSR: 799 s->regs[index] = (value & 0x000003fc) | 0x00000400; 800 break; 801 case ENET_MIIGSK_CFGR: 802 s->regs[index] = value & 0x00000053; 803 break; 804 case ENET_MIIGSK_ENR: 805 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0; 806 break; 807 default: 808 imx_default_write(s, index, value); 809 break; 810 } 811 } 812 813 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value) 814 { 815 switch (index) { 816 case ENET_RSFL: 817 case ENET_RSEM: 818 case ENET_RAEM: 819 case ENET_RAFL: 820 case ENET_TSEM: 821 case ENET_TAEM: 822 case ENET_TAFL: 823 s->regs[index] = value & 0x000001ff; 824 break; 825 case ENET_TIPG: 826 s->regs[index] = value & 0x0000001f; 827 break; 828 case ENET_FTRL: 829 s->regs[index] = value & 0x00003fff; 830 break; 831 case ENET_TACC: 832 s->regs[index] = value & 0x00000019; 833 break; 834 case ENET_RACC: 835 s->regs[index] = value & 0x000000C7; 836 break; 837 case ENET_ATCR: 838 s->regs[index] = value & 0x00002a9d; 839 break; 840 case ENET_ATVR: 841 case ENET_ATOFF: 842 case ENET_ATPER: 843 s->regs[index] = value; 844 break; 845 case ENET_ATSTMP: 846 /* ATSTMP is read only */ 847 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n", 848 TYPE_IMX_FEC, __func__); 849 break; 850 case ENET_ATCOR: 851 s->regs[index] = value & 0x7fffffff; 852 break; 853 case ENET_ATINC: 854 s->regs[index] = value & 0x00007f7f; 855 break; 856 case ENET_TGSR: 857 /* implement clear timer flag */ 858 s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */ 859 break; 860 case ENET_TCSR0: 861 case ENET_TCSR1: 862 case ENET_TCSR2: 863 case ENET_TCSR3: 864 s->regs[index] &= ~(value & 0x00000080); /* W1C bits */ 865 s->regs[index] &= ~0x0000007d; /* writable fields */ 866 s->regs[index] |= (value & 0x0000007d); 867 break; 868 case ENET_TCCR0: 869 case ENET_TCCR1: 870 case ENET_TCCR2: 871 case ENET_TCCR3: 872 s->regs[index] = value; 873 break; 874 default: 875 imx_default_write(s, index, value); 876 break; 877 } 878 } 879 880 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value, 881 unsigned size) 882 { 883 IMXFECState *s = IMX_FEC(opaque); 884 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s); 885 uint32_t index = offset >> 2; 886 887 FEC_PRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx_eth_reg_name(s, index), 888 (uint32_t)value); 889 890 switch (index) { 891 case ENET_EIR: 892 s->regs[index] &= ~value; 893 break; 894 case ENET_EIMR: 895 s->regs[index] = value; 896 break; 897 case ENET_RDAR: 898 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) { 899 if (!s->regs[index]) { 900 imx_eth_enable_rx(s, true); 901 } 902 } else { 903 s->regs[index] = 0; 904 } 905 break; 906 case ENET_TDAR1: 907 case ENET_TDAR2: 908 if (unlikely(single_tx_ring)) { 909 qemu_log_mask(LOG_GUEST_ERROR, 910 "[%s]%s: trying to access TDAR2 or TDAR1\n", 911 TYPE_IMX_FEC, __func__); 912 return; 913 } 914 /* fall through */ 915 case ENET_TDAR: 916 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) { 917 s->regs[index] = ENET_TDAR_TDAR; 918 imx_eth_do_tx(s, index); 919 } 920 s->regs[index] = 0; 921 break; 922 case ENET_ECR: 923 if (value & ENET_ECR_RESET) { 924 return imx_eth_reset(DEVICE(s)); 925 } 926 s->regs[index] = value; 927 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) { 928 s->regs[ENET_RDAR] = 0; 929 s->rx_descriptor = s->regs[ENET_RDSR]; 930 s->regs[ENET_TDAR] = 0; 931 s->regs[ENET_TDAR1] = 0; 932 s->regs[ENET_TDAR2] = 0; 933 s->tx_descriptor[0] = s->regs[ENET_TDSR]; 934 s->tx_descriptor[1] = s->regs[ENET_TDSR1]; 935 s->tx_descriptor[2] = s->regs[ENET_TDSR2]; 936 } 937 break; 938 case ENET_MMFR: 939 s->regs[index] = value; 940 if (extract32(value, 29, 1)) { 941 /* This is a read operation */ 942 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16, 943 do_phy_read(s, 944 extract32(value, 945 18, 10))); 946 } else { 947 /* This a write operation */ 948 do_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16)); 949 } 950 /* raise the interrupt as the PHY operation is done */ 951 s->regs[ENET_EIR] |= ENET_INT_MII; 952 break; 953 case ENET_MSCR: 954 s->regs[index] = value & 0xfe; 955 break; 956 case ENET_MIBC: 957 /* TODO: Implement MIB. */ 958 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0; 959 break; 960 case ENET_RCR: 961 s->regs[index] = value & 0x07ff003f; 962 /* TODO: Implement LOOP mode. */ 963 break; 964 case ENET_TCR: 965 /* We transmit immediately, so raise GRA immediately. */ 966 s->regs[index] = value; 967 if (value & 1) { 968 s->regs[ENET_EIR] |= ENET_INT_GRA; 969 } 970 break; 971 case ENET_PALR: 972 s->regs[index] = value; 973 s->conf.macaddr.a[0] = value >> 24; 974 s->conf.macaddr.a[1] = value >> 16; 975 s->conf.macaddr.a[2] = value >> 8; 976 s->conf.macaddr.a[3] = value; 977 break; 978 case ENET_PAUR: 979 s->regs[index] = (value | 0x0000ffff) & 0xffff8808; 980 s->conf.macaddr.a[4] = value >> 24; 981 s->conf.macaddr.a[5] = value >> 16; 982 break; 983 case ENET_OPD: 984 s->regs[index] = (value & 0x0000ffff) | 0x00010000; 985 break; 986 case ENET_IAUR: 987 case ENET_IALR: 988 case ENET_GAUR: 989 case ENET_GALR: 990 /* TODO: implement MAC hash filtering. */ 991 break; 992 case ENET_TFWR: 993 if (s->is_fec) { 994 s->regs[index] = value & 0x3; 995 } else { 996 s->regs[index] = value & 0x13f; 997 } 998 break; 999 case ENET_RDSR: 1000 if (s->is_fec) { 1001 s->regs[index] = value & ~3; 1002 } else { 1003 s->regs[index] = value & ~7; 1004 } 1005 s->rx_descriptor = s->regs[index]; 1006 break; 1007 case ENET_TDSR: 1008 if (s->is_fec) { 1009 s->regs[index] = value & ~3; 1010 } else { 1011 s->regs[index] = value & ~7; 1012 } 1013 s->tx_descriptor[0] = s->regs[index]; 1014 break; 1015 case ENET_TDSR1: 1016 if (unlikely(single_tx_ring)) { 1017 qemu_log_mask(LOG_GUEST_ERROR, 1018 "[%s]%s: trying to access TDSR1\n", 1019 TYPE_IMX_FEC, __func__); 1020 return; 1021 } 1022 1023 s->regs[index] = value & ~7; 1024 s->tx_descriptor[1] = s->regs[index]; 1025 break; 1026 case ENET_TDSR2: 1027 if (unlikely(single_tx_ring)) { 1028 qemu_log_mask(LOG_GUEST_ERROR, 1029 "[%s]%s: trying to access TDSR2\n", 1030 TYPE_IMX_FEC, __func__); 1031 return; 1032 } 1033 1034 s->regs[index] = value & ~7; 1035 s->tx_descriptor[2] = s->regs[index]; 1036 break; 1037 case ENET_MRBR: 1038 s->regs[index] = value & 0x00003ff0; 1039 break; 1040 default: 1041 if (s->is_fec) { 1042 imx_fec_write(s, index, value); 1043 } else { 1044 imx_enet_write(s, index, value); 1045 } 1046 return; 1047 } 1048 1049 imx_eth_update(s); 1050 } 1051 1052 static bool imx_eth_can_receive(NetClientState *nc) 1053 { 1054 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1055 1056 FEC_PRINTF("\n"); 1057 1058 return !!s->regs[ENET_RDAR]; 1059 } 1060 1061 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf, 1062 size_t len) 1063 { 1064 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1065 IMXFECBufDesc bd; 1066 uint32_t flags = 0; 1067 uint32_t addr; 1068 uint32_t crc; 1069 uint32_t buf_addr; 1070 uint8_t *crc_ptr; 1071 unsigned int buf_len; 1072 size_t size = len; 1073 1074 FEC_PRINTF("len %d\n", (int)size); 1075 1076 if (!s->regs[ENET_RDAR]) { 1077 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n", 1078 TYPE_IMX_FEC, __func__); 1079 return 0; 1080 } 1081 1082 /* 4 bytes for the CRC. */ 1083 size += 4; 1084 crc = cpu_to_be32(crc32(~0, buf, size)); 1085 crc_ptr = (uint8_t *) &crc; 1086 1087 /* Huge frames are truncated. */ 1088 if (size > ENET_MAX_FRAME_SIZE) { 1089 size = ENET_MAX_FRAME_SIZE; 1090 flags |= ENET_BD_TR | ENET_BD_LG; 1091 } 1092 1093 /* Frames larger than the user limit just set error flags. */ 1094 if (size > (s->regs[ENET_RCR] >> 16)) { 1095 flags |= ENET_BD_LG; 1096 } 1097 1098 addr = s->rx_descriptor; 1099 while (size > 0) { 1100 imx_fec_read_bd(&bd, addr); 1101 if ((bd.flags & ENET_BD_E) == 0) { 1102 /* No descriptors available. Bail out. */ 1103 /* 1104 * FIXME: This is wrong. We should probably either 1105 * save the remainder for when more RX buffers are 1106 * available, or flag an error. 1107 */ 1108 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n", 1109 TYPE_IMX_FEC, __func__); 1110 break; 1111 } 1112 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR]; 1113 bd.length = buf_len; 1114 size -= buf_len; 1115 1116 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length); 1117 1118 /* The last 4 bytes are the CRC. */ 1119 if (size < 4) { 1120 buf_len += size - 4; 1121 } 1122 buf_addr = bd.data; 1123 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); 1124 buf += buf_len; 1125 if (size < 4) { 1126 dma_memory_write(&address_space_memory, buf_addr + buf_len, 1127 crc_ptr, 4 - size); 1128 crc_ptr += 4 - size; 1129 } 1130 bd.flags &= ~ENET_BD_E; 1131 if (size == 0) { 1132 /* Last buffer in frame. */ 1133 bd.flags |= flags | ENET_BD_L; 1134 FEC_PRINTF("rx frame flags %04x\n", bd.flags); 1135 s->regs[ENET_EIR] |= ENET_INT_RXF; 1136 } else { 1137 s->regs[ENET_EIR] |= ENET_INT_RXB; 1138 } 1139 imx_fec_write_bd(&bd, addr); 1140 /* Advance to the next descriptor. */ 1141 if ((bd.flags & ENET_BD_W) != 0) { 1142 addr = s->regs[ENET_RDSR]; 1143 } else { 1144 addr += sizeof(bd); 1145 } 1146 } 1147 s->rx_descriptor = addr; 1148 imx_eth_enable_rx(s, false); 1149 imx_eth_update(s); 1150 return len; 1151 } 1152 1153 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf, 1154 size_t len) 1155 { 1156 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1157 IMXENETBufDesc bd; 1158 uint32_t flags = 0; 1159 uint32_t addr; 1160 uint32_t crc; 1161 uint32_t buf_addr; 1162 uint8_t *crc_ptr; 1163 unsigned int buf_len; 1164 size_t size = len; 1165 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16; 1166 1167 FEC_PRINTF("len %d\n", (int)size); 1168 1169 if (!s->regs[ENET_RDAR]) { 1170 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n", 1171 TYPE_IMX_FEC, __func__); 1172 return 0; 1173 } 1174 1175 /* 4 bytes for the CRC. */ 1176 size += 4; 1177 crc = cpu_to_be32(crc32(~0, buf, size)); 1178 crc_ptr = (uint8_t *) &crc; 1179 1180 if (shift16) { 1181 size += 2; 1182 } 1183 1184 /* Huge frames are truncated. */ 1185 if (size > s->regs[ENET_FTRL]) { 1186 size = s->regs[ENET_FTRL]; 1187 flags |= ENET_BD_TR | ENET_BD_LG; 1188 } 1189 1190 /* Frames larger than the user limit just set error flags. */ 1191 if (size > (s->regs[ENET_RCR] >> 16)) { 1192 flags |= ENET_BD_LG; 1193 } 1194 1195 addr = s->rx_descriptor; 1196 while (size > 0) { 1197 imx_enet_read_bd(&bd, addr); 1198 if ((bd.flags & ENET_BD_E) == 0) { 1199 /* No descriptors available. Bail out. */ 1200 /* 1201 * FIXME: This is wrong. We should probably either 1202 * save the remainder for when more RX buffers are 1203 * available, or flag an error. 1204 */ 1205 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n", 1206 TYPE_IMX_FEC, __func__); 1207 break; 1208 } 1209 buf_len = MIN(size, s->regs[ENET_MRBR]); 1210 bd.length = buf_len; 1211 size -= buf_len; 1212 1213 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length); 1214 1215 /* The last 4 bytes are the CRC. */ 1216 if (size < 4) { 1217 buf_len += size - 4; 1218 } 1219 buf_addr = bd.data; 1220 1221 if (shift16) { 1222 /* 1223 * If SHIFT16 bit of ENETx_RACC register is set we need to 1224 * align the payload to 4-byte boundary. 1225 */ 1226 const uint8_t zeros[2] = { 0 }; 1227 1228 dma_memory_write(&address_space_memory, buf_addr, 1229 zeros, sizeof(zeros)); 1230 1231 buf_addr += sizeof(zeros); 1232 buf_len -= sizeof(zeros); 1233 1234 /* We only do this once per Ethernet frame */ 1235 shift16 = false; 1236 } 1237 1238 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); 1239 buf += buf_len; 1240 if (size < 4) { 1241 dma_memory_write(&address_space_memory, buf_addr + buf_len, 1242 crc_ptr, 4 - size); 1243 crc_ptr += 4 - size; 1244 } 1245 bd.flags &= ~ENET_BD_E; 1246 if (size == 0) { 1247 /* Last buffer in frame. */ 1248 bd.flags |= flags | ENET_BD_L; 1249 FEC_PRINTF("rx frame flags %04x\n", bd.flags); 1250 /* Indicate that we've updated the last buffer descriptor. */ 1251 bd.last_buffer = ENET_BD_BDU; 1252 if (bd.option & ENET_BD_RX_INT) { 1253 s->regs[ENET_EIR] |= ENET_INT_RXF; 1254 } 1255 } else { 1256 if (bd.option & ENET_BD_RX_INT) { 1257 s->regs[ENET_EIR] |= ENET_INT_RXB; 1258 } 1259 } 1260 imx_enet_write_bd(&bd, addr); 1261 /* Advance to the next descriptor. */ 1262 if ((bd.flags & ENET_BD_W) != 0) { 1263 addr = s->regs[ENET_RDSR]; 1264 } else { 1265 addr += sizeof(bd); 1266 } 1267 } 1268 s->rx_descriptor = addr; 1269 imx_eth_enable_rx(s, false); 1270 imx_eth_update(s); 1271 return len; 1272 } 1273 1274 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf, 1275 size_t len) 1276 { 1277 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1278 1279 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) { 1280 return imx_enet_receive(nc, buf, len); 1281 } else { 1282 return imx_fec_receive(nc, buf, len); 1283 } 1284 } 1285 1286 static const MemoryRegionOps imx_eth_ops = { 1287 .read = imx_eth_read, 1288 .write = imx_eth_write, 1289 .valid.min_access_size = 4, 1290 .valid.max_access_size = 4, 1291 .endianness = DEVICE_NATIVE_ENDIAN, 1292 }; 1293 1294 static void imx_eth_cleanup(NetClientState *nc) 1295 { 1296 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1297 1298 s->nic = NULL; 1299 } 1300 1301 static NetClientInfo imx_eth_net_info = { 1302 .type = NET_CLIENT_DRIVER_NIC, 1303 .size = sizeof(NICState), 1304 .can_receive = imx_eth_can_receive, 1305 .receive = imx_eth_receive, 1306 .cleanup = imx_eth_cleanup, 1307 .link_status_changed = imx_eth_set_link, 1308 }; 1309 1310 1311 static void imx_eth_realize(DeviceState *dev, Error **errp) 1312 { 1313 IMXFECState *s = IMX_FEC(dev); 1314 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1315 1316 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s, 1317 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE); 1318 sysbus_init_mmio(sbd, &s->iomem); 1319 sysbus_init_irq(sbd, &s->irq[0]); 1320 sysbus_init_irq(sbd, &s->irq[1]); 1321 1322 qemu_macaddr_default_if_unset(&s->conf.macaddr); 1323 1324 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf, 1325 object_get_typename(OBJECT(dev)), 1326 DEVICE(dev)->id, s); 1327 1328 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 1329 } 1330 1331 static Property imx_eth_properties[] = { 1332 DEFINE_NIC_PROPERTIES(IMXFECState, conf), 1333 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1), 1334 DEFINE_PROP_END_OF_LIST(), 1335 }; 1336 1337 static void imx_eth_class_init(ObjectClass *klass, void *data) 1338 { 1339 DeviceClass *dc = DEVICE_CLASS(klass); 1340 1341 dc->vmsd = &vmstate_imx_eth; 1342 dc->reset = imx_eth_reset; 1343 device_class_set_props(dc, imx_eth_properties); 1344 dc->realize = imx_eth_realize; 1345 dc->desc = "i.MX FEC/ENET Ethernet Controller"; 1346 } 1347 1348 static void imx_fec_init(Object *obj) 1349 { 1350 IMXFECState *s = IMX_FEC(obj); 1351 1352 s->is_fec = true; 1353 } 1354 1355 static void imx_enet_init(Object *obj) 1356 { 1357 IMXFECState *s = IMX_FEC(obj); 1358 1359 s->is_fec = false; 1360 } 1361 1362 static const TypeInfo imx_fec_info = { 1363 .name = TYPE_IMX_FEC, 1364 .parent = TYPE_SYS_BUS_DEVICE, 1365 .instance_size = sizeof(IMXFECState), 1366 .instance_init = imx_fec_init, 1367 .class_init = imx_eth_class_init, 1368 }; 1369 1370 static const TypeInfo imx_enet_info = { 1371 .name = TYPE_IMX_ENET, 1372 .parent = TYPE_IMX_FEC, 1373 .instance_init = imx_enet_init, 1374 }; 1375 1376 static void imx_eth_register_types(void) 1377 { 1378 type_register_static(&imx_fec_info); 1379 type_register_static(&imx_enet_info); 1380 } 1381 1382 type_init(imx_eth_register_types) 1383