1 /* 2 * i.MX Fast Ethernet Controller emulation. 3 * 4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net> 5 * 6 * Based on Coldfire Fast Ethernet Controller emulation. 7 * 8 * Copyright (c) 2007 CodeSourcery. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of the GNU General Public License as published by the 12 * Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, but WITHOUT 16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18 * for more details. 19 * 20 * You should have received a copy of the GNU General Public License along 21 * with this program; if not, see <http://www.gnu.org/licenses/>. 22 */ 23 24 #include "qemu/osdep.h" 25 #include "hw/irq.h" 26 #include "hw/net/imx_fec.h" 27 #include "migration/vmstate.h" 28 #include "sysemu/dma.h" 29 #include "qemu/log.h" 30 #include "qemu/module.h" 31 #include "net/checksum.h" 32 #include "net/eth.h" 33 34 /* For crc32 */ 35 #include <zlib.h> 36 37 #ifndef DEBUG_IMX_FEC 38 #define DEBUG_IMX_FEC 0 39 #endif 40 41 #define FEC_PRINTF(fmt, args...) \ 42 do { \ 43 if (DEBUG_IMX_FEC) { \ 44 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \ 45 __func__, ##args); \ 46 } \ 47 } while (0) 48 49 #ifndef DEBUG_IMX_PHY 50 #define DEBUG_IMX_PHY 0 51 #endif 52 53 #define PHY_PRINTF(fmt, args...) \ 54 do { \ 55 if (DEBUG_IMX_PHY) { \ 56 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \ 57 __func__, ##args); \ 58 } \ 59 } while (0) 60 61 #define IMX_MAX_DESC 1024 62 63 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index) 64 { 65 static char tmp[20]; 66 sprintf(tmp, "index %d", index); 67 return tmp; 68 } 69 70 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index) 71 { 72 switch (index) { 73 case ENET_FRBR: 74 return "FRBR"; 75 case ENET_FRSR: 76 return "FRSR"; 77 case ENET_MIIGSK_CFGR: 78 return "MIIGSK_CFGR"; 79 case ENET_MIIGSK_ENR: 80 return "MIIGSK_ENR"; 81 default: 82 return imx_default_reg_name(s, index); 83 } 84 } 85 86 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index) 87 { 88 switch (index) { 89 case ENET_RSFL: 90 return "RSFL"; 91 case ENET_RSEM: 92 return "RSEM"; 93 case ENET_RAEM: 94 return "RAEM"; 95 case ENET_RAFL: 96 return "RAFL"; 97 case ENET_TSEM: 98 return "TSEM"; 99 case ENET_TAEM: 100 return "TAEM"; 101 case ENET_TAFL: 102 return "TAFL"; 103 case ENET_TIPG: 104 return "TIPG"; 105 case ENET_FTRL: 106 return "FTRL"; 107 case ENET_TACC: 108 return "TACC"; 109 case ENET_RACC: 110 return "RACC"; 111 case ENET_ATCR: 112 return "ATCR"; 113 case ENET_ATVR: 114 return "ATVR"; 115 case ENET_ATOFF: 116 return "ATOFF"; 117 case ENET_ATPER: 118 return "ATPER"; 119 case ENET_ATCOR: 120 return "ATCOR"; 121 case ENET_ATINC: 122 return "ATINC"; 123 case ENET_ATSTMP: 124 return "ATSTMP"; 125 case ENET_TGSR: 126 return "TGSR"; 127 case ENET_TCSR0: 128 return "TCSR0"; 129 case ENET_TCCR0: 130 return "TCCR0"; 131 case ENET_TCSR1: 132 return "TCSR1"; 133 case ENET_TCCR1: 134 return "TCCR1"; 135 case ENET_TCSR2: 136 return "TCSR2"; 137 case ENET_TCCR2: 138 return "TCCR2"; 139 case ENET_TCSR3: 140 return "TCSR3"; 141 case ENET_TCCR3: 142 return "TCCR3"; 143 default: 144 return imx_default_reg_name(s, index); 145 } 146 } 147 148 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index) 149 { 150 switch (index) { 151 case ENET_EIR: 152 return "EIR"; 153 case ENET_EIMR: 154 return "EIMR"; 155 case ENET_RDAR: 156 return "RDAR"; 157 case ENET_TDAR: 158 return "TDAR"; 159 case ENET_ECR: 160 return "ECR"; 161 case ENET_MMFR: 162 return "MMFR"; 163 case ENET_MSCR: 164 return "MSCR"; 165 case ENET_MIBC: 166 return "MIBC"; 167 case ENET_RCR: 168 return "RCR"; 169 case ENET_TCR: 170 return "TCR"; 171 case ENET_PALR: 172 return "PALR"; 173 case ENET_PAUR: 174 return "PAUR"; 175 case ENET_OPD: 176 return "OPD"; 177 case ENET_IAUR: 178 return "IAUR"; 179 case ENET_IALR: 180 return "IALR"; 181 case ENET_GAUR: 182 return "GAUR"; 183 case ENET_GALR: 184 return "GALR"; 185 case ENET_TFWR: 186 return "TFWR"; 187 case ENET_RDSR: 188 return "RDSR"; 189 case ENET_TDSR: 190 return "TDSR"; 191 case ENET_MRBR: 192 return "MRBR"; 193 default: 194 if (s->is_fec) { 195 return imx_fec_reg_name(s, index); 196 } else { 197 return imx_enet_reg_name(s, index); 198 } 199 } 200 } 201 202 /* 203 * Versions of this device with more than one TX descriptor save the 204 * 2nd and 3rd descriptors in a subsection, to maintain migration 205 * compatibility with previous versions of the device that only 206 * supported a single descriptor. 207 */ 208 static bool imx_eth_is_multi_tx_ring(void *opaque) 209 { 210 IMXFECState *s = IMX_FEC(opaque); 211 212 return s->tx_ring_num > 1; 213 } 214 215 static const VMStateDescription vmstate_imx_eth_txdescs = { 216 .name = "imx.fec/txdescs", 217 .version_id = 1, 218 .minimum_version_id = 1, 219 .needed = imx_eth_is_multi_tx_ring, 220 .fields = (VMStateField[]) { 221 VMSTATE_UINT32(tx_descriptor[1], IMXFECState), 222 VMSTATE_UINT32(tx_descriptor[2], IMXFECState), 223 VMSTATE_END_OF_LIST() 224 } 225 }; 226 227 static const VMStateDescription vmstate_imx_eth = { 228 .name = TYPE_IMX_FEC, 229 .version_id = 2, 230 .minimum_version_id = 2, 231 .fields = (VMStateField[]) { 232 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX), 233 VMSTATE_UINT32(rx_descriptor, IMXFECState), 234 VMSTATE_UINT32(tx_descriptor[0], IMXFECState), 235 VMSTATE_UINT32(phy_status, IMXFECState), 236 VMSTATE_UINT32(phy_control, IMXFECState), 237 VMSTATE_UINT32(phy_advertise, IMXFECState), 238 VMSTATE_UINT32(phy_int, IMXFECState), 239 VMSTATE_UINT32(phy_int_mask, IMXFECState), 240 VMSTATE_END_OF_LIST() 241 }, 242 .subsections = (const VMStateDescription * []) { 243 &vmstate_imx_eth_txdescs, 244 NULL 245 }, 246 }; 247 248 #define PHY_INT_ENERGYON (1 << 7) 249 #define PHY_INT_AUTONEG_COMPLETE (1 << 6) 250 #define PHY_INT_FAULT (1 << 5) 251 #define PHY_INT_DOWN (1 << 4) 252 #define PHY_INT_AUTONEG_LP (1 << 3) 253 #define PHY_INT_PARFAULT (1 << 2) 254 #define PHY_INT_AUTONEG_PAGE (1 << 1) 255 256 static void imx_eth_update(IMXFECState *s); 257 258 /* 259 * The MII phy could raise a GPIO to the processor which in turn 260 * could be handled as an interrpt by the OS. 261 * For now we don't handle any GPIO/interrupt line, so the OS will 262 * have to poll for the PHY status. 263 */ 264 static void phy_update_irq(IMXFECState *s) 265 { 266 imx_eth_update(s); 267 } 268 269 static void phy_update_link(IMXFECState *s) 270 { 271 /* Autonegotiation status mirrors link status. */ 272 if (qemu_get_queue(s->nic)->link_down) { 273 PHY_PRINTF("link is down\n"); 274 s->phy_status &= ~0x0024; 275 s->phy_int |= PHY_INT_DOWN; 276 } else { 277 PHY_PRINTF("link is up\n"); 278 s->phy_status |= 0x0024; 279 s->phy_int |= PHY_INT_ENERGYON; 280 s->phy_int |= PHY_INT_AUTONEG_COMPLETE; 281 } 282 phy_update_irq(s); 283 } 284 285 static void imx_eth_set_link(NetClientState *nc) 286 { 287 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc))); 288 } 289 290 static void phy_reset(IMXFECState *s) 291 { 292 s->phy_status = 0x7809; 293 s->phy_control = 0x3000; 294 s->phy_advertise = 0x01e1; 295 s->phy_int_mask = 0; 296 s->phy_int = 0; 297 phy_update_link(s); 298 } 299 300 static uint32_t do_phy_read(IMXFECState *s, int reg) 301 { 302 uint32_t val; 303 304 if (reg > 31) { 305 /* we only advertise one phy */ 306 return 0; 307 } 308 309 switch (reg) { 310 case 0: /* Basic Control */ 311 val = s->phy_control; 312 break; 313 case 1: /* Basic Status */ 314 val = s->phy_status; 315 break; 316 case 2: /* ID1 */ 317 val = 0x0007; 318 break; 319 case 3: /* ID2 */ 320 val = 0xc0d1; 321 break; 322 case 4: /* Auto-neg advertisement */ 323 val = s->phy_advertise; 324 break; 325 case 5: /* Auto-neg Link Partner Ability */ 326 val = 0x0f71; 327 break; 328 case 6: /* Auto-neg Expansion */ 329 val = 1; 330 break; 331 case 29: /* Interrupt source. */ 332 val = s->phy_int; 333 s->phy_int = 0; 334 phy_update_irq(s); 335 break; 336 case 30: /* Interrupt mask */ 337 val = s->phy_int_mask; 338 break; 339 case 17: 340 case 18: 341 case 27: 342 case 31: 343 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n", 344 TYPE_IMX_FEC, __func__, reg); 345 val = 0; 346 break; 347 default: 348 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", 349 TYPE_IMX_FEC, __func__, reg); 350 val = 0; 351 break; 352 } 353 354 PHY_PRINTF("read 0x%04x @ %d\n", val, reg); 355 356 return val; 357 } 358 359 static void do_phy_write(IMXFECState *s, int reg, uint32_t val) 360 { 361 PHY_PRINTF("write 0x%04x @ %d\n", val, reg); 362 363 if (reg > 31) { 364 /* we only advertise one phy */ 365 return; 366 } 367 368 switch (reg) { 369 case 0: /* Basic Control */ 370 if (val & 0x8000) { 371 phy_reset(s); 372 } else { 373 s->phy_control = val & 0x7980; 374 /* Complete autonegotiation immediately. */ 375 if (val & 0x1000) { 376 s->phy_status |= 0x0020; 377 } 378 } 379 break; 380 case 4: /* Auto-neg advertisement */ 381 s->phy_advertise = (val & 0x2d7f) | 0x80; 382 break; 383 case 30: /* Interrupt mask */ 384 s->phy_int_mask = val & 0xff; 385 phy_update_irq(s); 386 break; 387 case 17: 388 case 18: 389 case 27: 390 case 31: 391 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n", 392 TYPE_IMX_FEC, __func__, reg); 393 break; 394 default: 395 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n", 396 TYPE_IMX_FEC, __func__, reg); 397 break; 398 } 399 } 400 401 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr) 402 { 403 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); 404 } 405 406 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr) 407 { 408 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); 409 } 410 411 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr) 412 { 413 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); 414 } 415 416 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr) 417 { 418 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); 419 } 420 421 static void imx_eth_update(IMXFECState *s) 422 { 423 /* 424 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER 425 * interrupts swapped. This worked with older versions of Linux (4.14 426 * and older) since Linux associated both interrupt lines with Ethernet 427 * MAC interrupts. Specifically, 428 * - Linux 4.15 and later have separate interrupt handlers for the MAC and 429 * timer interrupts. Those versions of Linux fail with versions of QEMU 430 * with swapped interrupt assignments. 431 * - In linux 4.14, both interrupt lines were registered with the Ethernet 432 * MAC interrupt handler. As a result, all versions of qemu happen to 433 * work, though that is accidental. 434 * - In Linux 4.9 and older, the timer interrupt was registered directly 435 * with the Ethernet MAC interrupt handler. The MAC interrupt was 436 * redirected to a GPIO interrupt to work around erratum ERR006687. 437 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO 438 * interrupt never fired since IOMUX is currently not supported in qemu. 439 * Linux instead received MAC interrupts on the timer interrupt. 440 * As a result, qemu versions with the swapped interrupt assignment work, 441 * albeit accidentally, but qemu versions with the correct interrupt 442 * assignment fail. 443 * 444 * To ensure that all versions of Linux work, generate ENET_INT_MAC 445 * interrrupts on both interrupt lines. This should be changed if and when 446 * qemu supports IOMUX. 447 */ 448 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & 449 (ENET_INT_MAC | ENET_INT_TS_TIMER)) { 450 qemu_set_irq(s->irq[1], 1); 451 } else { 452 qemu_set_irq(s->irq[1], 0); 453 } 454 455 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) { 456 qemu_set_irq(s->irq[0], 1); 457 } else { 458 qemu_set_irq(s->irq[0], 0); 459 } 460 } 461 462 static void imx_fec_do_tx(IMXFECState *s) 463 { 464 int frame_size = 0, descnt = 0; 465 uint8_t *ptr = s->frame; 466 uint32_t addr = s->tx_descriptor[0]; 467 468 while (descnt++ < IMX_MAX_DESC) { 469 IMXFECBufDesc bd; 470 int len; 471 472 imx_fec_read_bd(&bd, addr); 473 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n", 474 addr, bd.flags, bd.length, bd.data); 475 if ((bd.flags & ENET_BD_R) == 0) { 476 /* Run out of descriptors to transmit. */ 477 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n"); 478 break; 479 } 480 len = bd.length; 481 if (frame_size + len > ENET_MAX_FRAME_SIZE) { 482 len = ENET_MAX_FRAME_SIZE - frame_size; 483 s->regs[ENET_EIR] |= ENET_INT_BABT; 484 } 485 dma_memory_read(&address_space_memory, bd.data, ptr, len); 486 ptr += len; 487 frame_size += len; 488 if (bd.flags & ENET_BD_L) { 489 /* Last buffer in frame. */ 490 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size); 491 ptr = s->frame; 492 frame_size = 0; 493 s->regs[ENET_EIR] |= ENET_INT_TXF; 494 } 495 s->regs[ENET_EIR] |= ENET_INT_TXB; 496 bd.flags &= ~ENET_BD_R; 497 /* Write back the modified descriptor. */ 498 imx_fec_write_bd(&bd, addr); 499 /* Advance to the next descriptor. */ 500 if ((bd.flags & ENET_BD_W) != 0) { 501 addr = s->regs[ENET_TDSR]; 502 } else { 503 addr += sizeof(bd); 504 } 505 } 506 507 s->tx_descriptor[0] = addr; 508 509 imx_eth_update(s); 510 } 511 512 static void imx_enet_do_tx(IMXFECState *s, uint32_t index) 513 { 514 int frame_size = 0, descnt = 0; 515 516 uint8_t *ptr = s->frame; 517 uint32_t addr, int_txb, int_txf, tdsr; 518 size_t ring; 519 520 switch (index) { 521 case ENET_TDAR: 522 ring = 0; 523 int_txb = ENET_INT_TXB; 524 int_txf = ENET_INT_TXF; 525 tdsr = ENET_TDSR; 526 break; 527 case ENET_TDAR1: 528 ring = 1; 529 int_txb = ENET_INT_TXB1; 530 int_txf = ENET_INT_TXF1; 531 tdsr = ENET_TDSR1; 532 break; 533 case ENET_TDAR2: 534 ring = 2; 535 int_txb = ENET_INT_TXB2; 536 int_txf = ENET_INT_TXF2; 537 tdsr = ENET_TDSR2; 538 break; 539 default: 540 qemu_log_mask(LOG_GUEST_ERROR, 541 "%s: bogus value for index %x\n", 542 __func__, index); 543 abort(); 544 break; 545 } 546 547 addr = s->tx_descriptor[ring]; 548 549 while (descnt++ < IMX_MAX_DESC) { 550 IMXENETBufDesc bd; 551 int len; 552 553 imx_enet_read_bd(&bd, addr); 554 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x " 555 "status %04x\n", addr, bd.flags, bd.length, bd.data, 556 bd.option, bd.status); 557 if ((bd.flags & ENET_BD_R) == 0) { 558 /* Run out of descriptors to transmit. */ 559 break; 560 } 561 len = bd.length; 562 if (frame_size + len > ENET_MAX_FRAME_SIZE) { 563 len = ENET_MAX_FRAME_SIZE - frame_size; 564 s->regs[ENET_EIR] |= ENET_INT_BABT; 565 } 566 dma_memory_read(&address_space_memory, bd.data, ptr, len); 567 ptr += len; 568 frame_size += len; 569 if (bd.flags & ENET_BD_L) { 570 if (bd.option & ENET_BD_PINS) { 571 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame); 572 if (IP_HEADER_VERSION(ip_hd) == 4) { 573 net_checksum_calculate(s->frame, frame_size); 574 } 575 } 576 if (bd.option & ENET_BD_IINS) { 577 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame); 578 /* We compute checksum only for IPv4 frames */ 579 if (IP_HEADER_VERSION(ip_hd) == 4) { 580 uint16_t csum; 581 ip_hd->ip_sum = 0; 582 csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd)); 583 ip_hd->ip_sum = cpu_to_be16(csum); 584 } 585 } 586 /* Last buffer in frame. */ 587 588 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size); 589 ptr = s->frame; 590 591 frame_size = 0; 592 if (bd.option & ENET_BD_TX_INT) { 593 s->regs[ENET_EIR] |= int_txf; 594 } 595 } 596 if (bd.option & ENET_BD_TX_INT) { 597 s->regs[ENET_EIR] |= int_txb; 598 } 599 bd.flags &= ~ENET_BD_R; 600 /* Write back the modified descriptor. */ 601 imx_enet_write_bd(&bd, addr); 602 /* Advance to the next descriptor. */ 603 if ((bd.flags & ENET_BD_W) != 0) { 604 addr = s->regs[tdsr]; 605 } else { 606 addr += sizeof(bd); 607 } 608 } 609 610 s->tx_descriptor[ring] = addr; 611 612 imx_eth_update(s); 613 } 614 615 static void imx_eth_do_tx(IMXFECState *s, uint32_t index) 616 { 617 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) { 618 imx_enet_do_tx(s, index); 619 } else { 620 imx_fec_do_tx(s); 621 } 622 } 623 624 static void imx_eth_enable_rx(IMXFECState *s, bool flush) 625 { 626 IMXFECBufDesc bd; 627 628 imx_fec_read_bd(&bd, s->rx_descriptor); 629 630 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0; 631 632 if (!s->regs[ENET_RDAR]) { 633 FEC_PRINTF("RX buffer full\n"); 634 } else if (flush) { 635 qemu_flush_queued_packets(qemu_get_queue(s->nic)); 636 } 637 } 638 639 static void imx_eth_reset(DeviceState *d) 640 { 641 IMXFECState *s = IMX_FEC(d); 642 643 /* Reset the Device */ 644 memset(s->regs, 0, sizeof(s->regs)); 645 s->regs[ENET_ECR] = 0xf0000000; 646 s->regs[ENET_MIBC] = 0xc0000000; 647 s->regs[ENET_RCR] = 0x05ee0001; 648 s->regs[ENET_OPD] = 0x00010000; 649 650 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24) 651 | (s->conf.macaddr.a[1] << 16) 652 | (s->conf.macaddr.a[2] << 8) 653 | s->conf.macaddr.a[3]; 654 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24) 655 | (s->conf.macaddr.a[5] << 16) 656 | 0x8808; 657 658 if (s->is_fec) { 659 s->regs[ENET_FRBR] = 0x00000600; 660 s->regs[ENET_FRSR] = 0x00000500; 661 s->regs[ENET_MIIGSK_ENR] = 0x00000006; 662 } else { 663 s->regs[ENET_RAEM] = 0x00000004; 664 s->regs[ENET_RAFL] = 0x00000004; 665 s->regs[ENET_TAEM] = 0x00000004; 666 s->regs[ENET_TAFL] = 0x00000008; 667 s->regs[ENET_TIPG] = 0x0000000c; 668 s->regs[ENET_FTRL] = 0x000007ff; 669 s->regs[ENET_ATPER] = 0x3b9aca00; 670 } 671 672 s->rx_descriptor = 0; 673 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor)); 674 675 /* We also reset the PHY */ 676 phy_reset(s); 677 } 678 679 static uint32_t imx_default_read(IMXFECState *s, uint32_t index) 680 { 681 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%" 682 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4); 683 return 0; 684 } 685 686 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index) 687 { 688 switch (index) { 689 case ENET_FRBR: 690 case ENET_FRSR: 691 case ENET_MIIGSK_CFGR: 692 case ENET_MIIGSK_ENR: 693 return s->regs[index]; 694 default: 695 return imx_default_read(s, index); 696 } 697 } 698 699 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index) 700 { 701 switch (index) { 702 case ENET_RSFL: 703 case ENET_RSEM: 704 case ENET_RAEM: 705 case ENET_RAFL: 706 case ENET_TSEM: 707 case ENET_TAEM: 708 case ENET_TAFL: 709 case ENET_TIPG: 710 case ENET_FTRL: 711 case ENET_TACC: 712 case ENET_RACC: 713 case ENET_ATCR: 714 case ENET_ATVR: 715 case ENET_ATOFF: 716 case ENET_ATPER: 717 case ENET_ATCOR: 718 case ENET_ATINC: 719 case ENET_ATSTMP: 720 case ENET_TGSR: 721 case ENET_TCSR0: 722 case ENET_TCCR0: 723 case ENET_TCSR1: 724 case ENET_TCCR1: 725 case ENET_TCSR2: 726 case ENET_TCCR2: 727 case ENET_TCSR3: 728 case ENET_TCCR3: 729 return s->regs[index]; 730 default: 731 return imx_default_read(s, index); 732 } 733 } 734 735 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size) 736 { 737 uint32_t value = 0; 738 IMXFECState *s = IMX_FEC(opaque); 739 uint32_t index = offset >> 2; 740 741 switch (index) { 742 case ENET_EIR: 743 case ENET_EIMR: 744 case ENET_RDAR: 745 case ENET_TDAR: 746 case ENET_ECR: 747 case ENET_MMFR: 748 case ENET_MSCR: 749 case ENET_MIBC: 750 case ENET_RCR: 751 case ENET_TCR: 752 case ENET_PALR: 753 case ENET_PAUR: 754 case ENET_OPD: 755 case ENET_IAUR: 756 case ENET_IALR: 757 case ENET_GAUR: 758 case ENET_GALR: 759 case ENET_TFWR: 760 case ENET_RDSR: 761 case ENET_TDSR: 762 case ENET_MRBR: 763 value = s->regs[index]; 764 break; 765 default: 766 if (s->is_fec) { 767 value = imx_fec_read(s, index); 768 } else { 769 value = imx_enet_read(s, index); 770 } 771 break; 772 } 773 774 FEC_PRINTF("reg[%s] => 0x%" PRIx32 "\n", imx_eth_reg_name(s, index), 775 value); 776 777 return value; 778 } 779 780 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value) 781 { 782 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%" 783 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4); 784 return; 785 } 786 787 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value) 788 { 789 switch (index) { 790 case ENET_FRBR: 791 /* FRBR is read only */ 792 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n", 793 TYPE_IMX_FEC, __func__); 794 break; 795 case ENET_FRSR: 796 s->regs[index] = (value & 0x000003fc) | 0x00000400; 797 break; 798 case ENET_MIIGSK_CFGR: 799 s->regs[index] = value & 0x00000053; 800 break; 801 case ENET_MIIGSK_ENR: 802 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0; 803 break; 804 default: 805 imx_default_write(s, index, value); 806 break; 807 } 808 } 809 810 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value) 811 { 812 switch (index) { 813 case ENET_RSFL: 814 case ENET_RSEM: 815 case ENET_RAEM: 816 case ENET_RAFL: 817 case ENET_TSEM: 818 case ENET_TAEM: 819 case ENET_TAFL: 820 s->regs[index] = value & 0x000001ff; 821 break; 822 case ENET_TIPG: 823 s->regs[index] = value & 0x0000001f; 824 break; 825 case ENET_FTRL: 826 s->regs[index] = value & 0x00003fff; 827 break; 828 case ENET_TACC: 829 s->regs[index] = value & 0x00000019; 830 break; 831 case ENET_RACC: 832 s->regs[index] = value & 0x000000C7; 833 break; 834 case ENET_ATCR: 835 s->regs[index] = value & 0x00002a9d; 836 break; 837 case ENET_ATVR: 838 case ENET_ATOFF: 839 case ENET_ATPER: 840 s->regs[index] = value; 841 break; 842 case ENET_ATSTMP: 843 /* ATSTMP is read only */ 844 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n", 845 TYPE_IMX_FEC, __func__); 846 break; 847 case ENET_ATCOR: 848 s->regs[index] = value & 0x7fffffff; 849 break; 850 case ENET_ATINC: 851 s->regs[index] = value & 0x00007f7f; 852 break; 853 case ENET_TGSR: 854 /* implement clear timer flag */ 855 value = value & 0x0000000f; 856 break; 857 case ENET_TCSR0: 858 case ENET_TCSR1: 859 case ENET_TCSR2: 860 case ENET_TCSR3: 861 value = value & 0x000000fd; 862 break; 863 case ENET_TCCR0: 864 case ENET_TCCR1: 865 case ENET_TCCR2: 866 case ENET_TCCR3: 867 s->regs[index] = value; 868 break; 869 default: 870 imx_default_write(s, index, value); 871 break; 872 } 873 } 874 875 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value, 876 unsigned size) 877 { 878 IMXFECState *s = IMX_FEC(opaque); 879 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s); 880 uint32_t index = offset >> 2; 881 882 FEC_PRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx_eth_reg_name(s, index), 883 (uint32_t)value); 884 885 switch (index) { 886 case ENET_EIR: 887 s->regs[index] &= ~value; 888 break; 889 case ENET_EIMR: 890 s->regs[index] = value; 891 break; 892 case ENET_RDAR: 893 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) { 894 if (!s->regs[index]) { 895 imx_eth_enable_rx(s, true); 896 } 897 } else { 898 s->regs[index] = 0; 899 } 900 break; 901 case ENET_TDAR1: /* FALLTHROUGH */ 902 case ENET_TDAR2: /* FALLTHROUGH */ 903 if (unlikely(single_tx_ring)) { 904 qemu_log_mask(LOG_GUEST_ERROR, 905 "[%s]%s: trying to access TDAR2 or TDAR1\n", 906 TYPE_IMX_FEC, __func__); 907 return; 908 } 909 case ENET_TDAR: /* FALLTHROUGH */ 910 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) { 911 s->regs[index] = ENET_TDAR_TDAR; 912 imx_eth_do_tx(s, index); 913 } 914 s->regs[index] = 0; 915 break; 916 case ENET_ECR: 917 if (value & ENET_ECR_RESET) { 918 return imx_eth_reset(DEVICE(s)); 919 } 920 s->regs[index] = value; 921 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) { 922 s->regs[ENET_RDAR] = 0; 923 s->rx_descriptor = s->regs[ENET_RDSR]; 924 s->regs[ENET_TDAR] = 0; 925 s->regs[ENET_TDAR1] = 0; 926 s->regs[ENET_TDAR2] = 0; 927 s->tx_descriptor[0] = s->regs[ENET_TDSR]; 928 s->tx_descriptor[1] = s->regs[ENET_TDSR1]; 929 s->tx_descriptor[2] = s->regs[ENET_TDSR2]; 930 } 931 break; 932 case ENET_MMFR: 933 s->regs[index] = value; 934 if (extract32(value, 29, 1)) { 935 /* This is a read operation */ 936 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16, 937 do_phy_read(s, 938 extract32(value, 939 18, 10))); 940 } else { 941 /* This a write operation */ 942 do_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16)); 943 } 944 /* raise the interrupt as the PHY operation is done */ 945 s->regs[ENET_EIR] |= ENET_INT_MII; 946 break; 947 case ENET_MSCR: 948 s->regs[index] = value & 0xfe; 949 break; 950 case ENET_MIBC: 951 /* TODO: Implement MIB. */ 952 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0; 953 break; 954 case ENET_RCR: 955 s->regs[index] = value & 0x07ff003f; 956 /* TODO: Implement LOOP mode. */ 957 break; 958 case ENET_TCR: 959 /* We transmit immediately, so raise GRA immediately. */ 960 s->regs[index] = value; 961 if (value & 1) { 962 s->regs[ENET_EIR] |= ENET_INT_GRA; 963 } 964 break; 965 case ENET_PALR: 966 s->regs[index] = value; 967 s->conf.macaddr.a[0] = value >> 24; 968 s->conf.macaddr.a[1] = value >> 16; 969 s->conf.macaddr.a[2] = value >> 8; 970 s->conf.macaddr.a[3] = value; 971 break; 972 case ENET_PAUR: 973 s->regs[index] = (value | 0x0000ffff) & 0xffff8808; 974 s->conf.macaddr.a[4] = value >> 24; 975 s->conf.macaddr.a[5] = value >> 16; 976 break; 977 case ENET_OPD: 978 s->regs[index] = (value & 0x0000ffff) | 0x00010000; 979 break; 980 case ENET_IAUR: 981 case ENET_IALR: 982 case ENET_GAUR: 983 case ENET_GALR: 984 /* TODO: implement MAC hash filtering. */ 985 break; 986 case ENET_TFWR: 987 if (s->is_fec) { 988 s->regs[index] = value & 0x3; 989 } else { 990 s->regs[index] = value & 0x13f; 991 } 992 break; 993 case ENET_RDSR: 994 if (s->is_fec) { 995 s->regs[index] = value & ~3; 996 } else { 997 s->regs[index] = value & ~7; 998 } 999 s->rx_descriptor = s->regs[index]; 1000 break; 1001 case ENET_TDSR: 1002 if (s->is_fec) { 1003 s->regs[index] = value & ~3; 1004 } else { 1005 s->regs[index] = value & ~7; 1006 } 1007 s->tx_descriptor[0] = s->regs[index]; 1008 break; 1009 case ENET_TDSR1: 1010 if (unlikely(single_tx_ring)) { 1011 qemu_log_mask(LOG_GUEST_ERROR, 1012 "[%s]%s: trying to access TDSR1\n", 1013 TYPE_IMX_FEC, __func__); 1014 return; 1015 } 1016 1017 s->regs[index] = value & ~7; 1018 s->tx_descriptor[1] = s->regs[index]; 1019 break; 1020 case ENET_TDSR2: 1021 if (unlikely(single_tx_ring)) { 1022 qemu_log_mask(LOG_GUEST_ERROR, 1023 "[%s]%s: trying to access TDSR2\n", 1024 TYPE_IMX_FEC, __func__); 1025 return; 1026 } 1027 1028 s->regs[index] = value & ~7; 1029 s->tx_descriptor[2] = s->regs[index]; 1030 break; 1031 case ENET_MRBR: 1032 s->regs[index] = value & 0x00003ff0; 1033 break; 1034 default: 1035 if (s->is_fec) { 1036 imx_fec_write(s, index, value); 1037 } else { 1038 imx_enet_write(s, index, value); 1039 } 1040 return; 1041 } 1042 1043 imx_eth_update(s); 1044 } 1045 1046 static int imx_eth_can_receive(NetClientState *nc) 1047 { 1048 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1049 1050 FEC_PRINTF("\n"); 1051 1052 return !!s->regs[ENET_RDAR]; 1053 } 1054 1055 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf, 1056 size_t len) 1057 { 1058 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1059 IMXFECBufDesc bd; 1060 uint32_t flags = 0; 1061 uint32_t addr; 1062 uint32_t crc; 1063 uint32_t buf_addr; 1064 uint8_t *crc_ptr; 1065 unsigned int buf_len; 1066 size_t size = len; 1067 1068 FEC_PRINTF("len %d\n", (int)size); 1069 1070 if (!s->regs[ENET_RDAR]) { 1071 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n", 1072 TYPE_IMX_FEC, __func__); 1073 return 0; 1074 } 1075 1076 /* 4 bytes for the CRC. */ 1077 size += 4; 1078 crc = cpu_to_be32(crc32(~0, buf, size)); 1079 crc_ptr = (uint8_t *) &crc; 1080 1081 /* Huge frames are truncated. */ 1082 if (size > ENET_MAX_FRAME_SIZE) { 1083 size = ENET_MAX_FRAME_SIZE; 1084 flags |= ENET_BD_TR | ENET_BD_LG; 1085 } 1086 1087 /* Frames larger than the user limit just set error flags. */ 1088 if (size > (s->regs[ENET_RCR] >> 16)) { 1089 flags |= ENET_BD_LG; 1090 } 1091 1092 addr = s->rx_descriptor; 1093 while (size > 0) { 1094 imx_fec_read_bd(&bd, addr); 1095 if ((bd.flags & ENET_BD_E) == 0) { 1096 /* No descriptors available. Bail out. */ 1097 /* 1098 * FIXME: This is wrong. We should probably either 1099 * save the remainder for when more RX buffers are 1100 * available, or flag an error. 1101 */ 1102 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n", 1103 TYPE_IMX_FEC, __func__); 1104 break; 1105 } 1106 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR]; 1107 bd.length = buf_len; 1108 size -= buf_len; 1109 1110 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length); 1111 1112 /* The last 4 bytes are the CRC. */ 1113 if (size < 4) { 1114 buf_len += size - 4; 1115 } 1116 buf_addr = bd.data; 1117 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); 1118 buf += buf_len; 1119 if (size < 4) { 1120 dma_memory_write(&address_space_memory, buf_addr + buf_len, 1121 crc_ptr, 4 - size); 1122 crc_ptr += 4 - size; 1123 } 1124 bd.flags &= ~ENET_BD_E; 1125 if (size == 0) { 1126 /* Last buffer in frame. */ 1127 bd.flags |= flags | ENET_BD_L; 1128 FEC_PRINTF("rx frame flags %04x\n", bd.flags); 1129 s->regs[ENET_EIR] |= ENET_INT_RXF; 1130 } else { 1131 s->regs[ENET_EIR] |= ENET_INT_RXB; 1132 } 1133 imx_fec_write_bd(&bd, addr); 1134 /* Advance to the next descriptor. */ 1135 if ((bd.flags & ENET_BD_W) != 0) { 1136 addr = s->regs[ENET_RDSR]; 1137 } else { 1138 addr += sizeof(bd); 1139 } 1140 } 1141 s->rx_descriptor = addr; 1142 imx_eth_enable_rx(s, false); 1143 imx_eth_update(s); 1144 return len; 1145 } 1146 1147 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf, 1148 size_t len) 1149 { 1150 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1151 IMXENETBufDesc bd; 1152 uint32_t flags = 0; 1153 uint32_t addr; 1154 uint32_t crc; 1155 uint32_t buf_addr; 1156 uint8_t *crc_ptr; 1157 unsigned int buf_len; 1158 size_t size = len; 1159 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16; 1160 1161 FEC_PRINTF("len %d\n", (int)size); 1162 1163 if (!s->regs[ENET_RDAR]) { 1164 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n", 1165 TYPE_IMX_FEC, __func__); 1166 return 0; 1167 } 1168 1169 /* 4 bytes for the CRC. */ 1170 size += 4; 1171 crc = cpu_to_be32(crc32(~0, buf, size)); 1172 crc_ptr = (uint8_t *) &crc; 1173 1174 if (shift16) { 1175 size += 2; 1176 } 1177 1178 /* Huge frames are truncated. */ 1179 if (size > s->regs[ENET_FTRL]) { 1180 size = s->regs[ENET_FTRL]; 1181 flags |= ENET_BD_TR | ENET_BD_LG; 1182 } 1183 1184 /* Frames larger than the user limit just set error flags. */ 1185 if (size > (s->regs[ENET_RCR] >> 16)) { 1186 flags |= ENET_BD_LG; 1187 } 1188 1189 addr = s->rx_descriptor; 1190 while (size > 0) { 1191 imx_enet_read_bd(&bd, addr); 1192 if ((bd.flags & ENET_BD_E) == 0) { 1193 /* No descriptors available. Bail out. */ 1194 /* 1195 * FIXME: This is wrong. We should probably either 1196 * save the remainder for when more RX buffers are 1197 * available, or flag an error. 1198 */ 1199 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n", 1200 TYPE_IMX_FEC, __func__); 1201 break; 1202 } 1203 buf_len = MIN(size, s->regs[ENET_MRBR]); 1204 bd.length = buf_len; 1205 size -= buf_len; 1206 1207 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length); 1208 1209 /* The last 4 bytes are the CRC. */ 1210 if (size < 4) { 1211 buf_len += size - 4; 1212 } 1213 buf_addr = bd.data; 1214 1215 if (shift16) { 1216 /* 1217 * If SHIFT16 bit of ENETx_RACC register is set we need to 1218 * align the payload to 4-byte boundary. 1219 */ 1220 const uint8_t zeros[2] = { 0 }; 1221 1222 dma_memory_write(&address_space_memory, buf_addr, 1223 zeros, sizeof(zeros)); 1224 1225 buf_addr += sizeof(zeros); 1226 buf_len -= sizeof(zeros); 1227 1228 /* We only do this once per Ethernet frame */ 1229 shift16 = false; 1230 } 1231 1232 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); 1233 buf += buf_len; 1234 if (size < 4) { 1235 dma_memory_write(&address_space_memory, buf_addr + buf_len, 1236 crc_ptr, 4 - size); 1237 crc_ptr += 4 - size; 1238 } 1239 bd.flags &= ~ENET_BD_E; 1240 if (size == 0) { 1241 /* Last buffer in frame. */ 1242 bd.flags |= flags | ENET_BD_L; 1243 FEC_PRINTF("rx frame flags %04x\n", bd.flags); 1244 if (bd.option & ENET_BD_RX_INT) { 1245 s->regs[ENET_EIR] |= ENET_INT_RXF; 1246 } 1247 } else { 1248 if (bd.option & ENET_BD_RX_INT) { 1249 s->regs[ENET_EIR] |= ENET_INT_RXB; 1250 } 1251 } 1252 imx_enet_write_bd(&bd, addr); 1253 /* Advance to the next descriptor. */ 1254 if ((bd.flags & ENET_BD_W) != 0) { 1255 addr = s->regs[ENET_RDSR]; 1256 } else { 1257 addr += sizeof(bd); 1258 } 1259 } 1260 s->rx_descriptor = addr; 1261 imx_eth_enable_rx(s, false); 1262 imx_eth_update(s); 1263 return len; 1264 } 1265 1266 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf, 1267 size_t len) 1268 { 1269 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1270 1271 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) { 1272 return imx_enet_receive(nc, buf, len); 1273 } else { 1274 return imx_fec_receive(nc, buf, len); 1275 } 1276 } 1277 1278 static const MemoryRegionOps imx_eth_ops = { 1279 .read = imx_eth_read, 1280 .write = imx_eth_write, 1281 .valid.min_access_size = 4, 1282 .valid.max_access_size = 4, 1283 .endianness = DEVICE_NATIVE_ENDIAN, 1284 }; 1285 1286 static void imx_eth_cleanup(NetClientState *nc) 1287 { 1288 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc)); 1289 1290 s->nic = NULL; 1291 } 1292 1293 static NetClientInfo imx_eth_net_info = { 1294 .type = NET_CLIENT_DRIVER_NIC, 1295 .size = sizeof(NICState), 1296 .can_receive = imx_eth_can_receive, 1297 .receive = imx_eth_receive, 1298 .cleanup = imx_eth_cleanup, 1299 .link_status_changed = imx_eth_set_link, 1300 }; 1301 1302 1303 static void imx_eth_realize(DeviceState *dev, Error **errp) 1304 { 1305 IMXFECState *s = IMX_FEC(dev); 1306 SysBusDevice *sbd = SYS_BUS_DEVICE(dev); 1307 1308 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s, 1309 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE); 1310 sysbus_init_mmio(sbd, &s->iomem); 1311 sysbus_init_irq(sbd, &s->irq[0]); 1312 sysbus_init_irq(sbd, &s->irq[1]); 1313 1314 qemu_macaddr_default_if_unset(&s->conf.macaddr); 1315 1316 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf, 1317 object_get_typename(OBJECT(dev)), 1318 DEVICE(dev)->id, s); 1319 1320 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 1321 } 1322 1323 static Property imx_eth_properties[] = { 1324 DEFINE_NIC_PROPERTIES(IMXFECState, conf), 1325 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1), 1326 DEFINE_PROP_END_OF_LIST(), 1327 }; 1328 1329 static void imx_eth_class_init(ObjectClass *klass, void *data) 1330 { 1331 DeviceClass *dc = DEVICE_CLASS(klass); 1332 1333 dc->vmsd = &vmstate_imx_eth; 1334 dc->reset = imx_eth_reset; 1335 dc->props = imx_eth_properties; 1336 dc->realize = imx_eth_realize; 1337 dc->desc = "i.MX FEC/ENET Ethernet Controller"; 1338 } 1339 1340 static void imx_fec_init(Object *obj) 1341 { 1342 IMXFECState *s = IMX_FEC(obj); 1343 1344 s->is_fec = true; 1345 } 1346 1347 static void imx_enet_init(Object *obj) 1348 { 1349 IMXFECState *s = IMX_FEC(obj); 1350 1351 s->is_fec = false; 1352 } 1353 1354 static const TypeInfo imx_fec_info = { 1355 .name = TYPE_IMX_FEC, 1356 .parent = TYPE_SYS_BUS_DEVICE, 1357 .instance_size = sizeof(IMXFECState), 1358 .instance_init = imx_fec_init, 1359 .class_init = imx_eth_class_init, 1360 }; 1361 1362 static const TypeInfo imx_enet_info = { 1363 .name = TYPE_IMX_ENET, 1364 .parent = TYPE_IMX_FEC, 1365 .instance_init = imx_enet_init, 1366 }; 1367 1368 static void imx_eth_register_types(void) 1369 { 1370 type_register_static(&imx_fec_info); 1371 type_register_static(&imx_enet_info); 1372 } 1373 1374 type_init(imx_eth_register_types) 1375