1 /* 2 * ColdFire Fast Ethernet Controller emulation. 3 * 4 * Copyright (c) 2007 CodeSourcery. 5 * 6 * This code is licensed under the GPL 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "hw/irq.h" 12 #include "net/net.h" 13 #include "qemu/module.h" 14 #include "hw/m68k/mcf.h" 15 #include "hw/m68k/mcf_fec.h" 16 #include "hw/net/mii.h" 17 #include "hw/qdev-properties.h" 18 #include "hw/sysbus.h" 19 /* For crc32 */ 20 #include <zlib.h> 21 22 //#define DEBUG_FEC 1 23 24 #ifdef DEBUG_FEC 25 #define DPRINTF(fmt, ...) \ 26 do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #define FEC_MAX_DESC 1024 32 #define FEC_MAX_FRAME_SIZE 2032 33 #define FEC_MIB_SIZE 64 34 35 struct mcf_fec_state { 36 SysBusDevice parent_obj; 37 38 MemoryRegion iomem; 39 qemu_irq irq[FEC_NUM_IRQ]; 40 NICState *nic; 41 NICConf conf; 42 uint32_t irq_state; 43 uint32_t eir; 44 uint32_t eimr; 45 int rx_enabled; 46 uint32_t rx_descriptor; 47 uint32_t tx_descriptor; 48 uint32_t ecr; 49 uint32_t mmfr; 50 uint32_t mscr; 51 uint32_t rcr; 52 uint32_t tcr; 53 uint32_t tfwr; 54 uint32_t rfsr; 55 uint32_t erdsr; 56 uint32_t etdsr; 57 uint32_t emrbr; 58 uint32_t mib[FEC_MIB_SIZE]; 59 }; 60 61 #define FEC_INT_HB 0x80000000 62 #define FEC_INT_BABR 0x40000000 63 #define FEC_INT_BABT 0x20000000 64 #define FEC_INT_GRA 0x10000000 65 #define FEC_INT_TXF 0x08000000 66 #define FEC_INT_TXB 0x04000000 67 #define FEC_INT_RXF 0x02000000 68 #define FEC_INT_RXB 0x01000000 69 #define FEC_INT_MII 0x00800000 70 #define FEC_INT_EB 0x00400000 71 #define FEC_INT_LC 0x00200000 72 #define FEC_INT_RL 0x00100000 73 #define FEC_INT_UN 0x00080000 74 75 #define FEC_EN 2 76 #define FEC_RESET 1 77 78 /* Map interrupt flags onto IRQ lines. */ 79 static const uint32_t mcf_fec_irq_map[FEC_NUM_IRQ] = { 80 FEC_INT_TXF, 81 FEC_INT_TXB, 82 FEC_INT_UN, 83 FEC_INT_RL, 84 FEC_INT_RXF, 85 FEC_INT_RXB, 86 FEC_INT_MII, 87 FEC_INT_LC, 88 FEC_INT_HB, 89 FEC_INT_GRA, 90 FEC_INT_EB, 91 FEC_INT_BABT, 92 FEC_INT_BABR 93 }; 94 95 /* Buffer Descriptor. */ 96 typedef struct { 97 uint16_t flags; 98 uint16_t length; 99 uint32_t data; 100 } mcf_fec_bd; 101 102 #define FEC_BD_R 0x8000 103 #define FEC_BD_E 0x8000 104 #define FEC_BD_O1 0x4000 105 #define FEC_BD_W 0x2000 106 #define FEC_BD_O2 0x1000 107 #define FEC_BD_L 0x0800 108 #define FEC_BD_TC 0x0400 109 #define FEC_BD_ABC 0x0200 110 #define FEC_BD_M 0x0100 111 #define FEC_BD_BC 0x0080 112 #define FEC_BD_MC 0x0040 113 #define FEC_BD_LG 0x0020 114 #define FEC_BD_NO 0x0010 115 #define FEC_BD_CR 0x0004 116 #define FEC_BD_OV 0x0002 117 #define FEC_BD_TR 0x0001 118 119 #define MIB_RMON_T_DROP 0 120 #define MIB_RMON_T_PACKETS 1 121 #define MIB_RMON_T_BC_PKT 2 122 #define MIB_RMON_T_MC_PKT 3 123 #define MIB_RMON_T_CRC_ALIGN 4 124 #define MIB_RMON_T_UNDERSIZE 5 125 #define MIB_RMON_T_OVERSIZE 6 126 #define MIB_RMON_T_FRAG 7 127 #define MIB_RMON_T_JAB 8 128 #define MIB_RMON_T_COL 9 129 #define MIB_RMON_T_P64 10 130 #define MIB_RMON_T_P65TO127 11 131 #define MIB_RMON_T_P128TO255 12 132 #define MIB_RMON_T_P256TO511 13 133 #define MIB_RMON_T_P512TO1023 14 134 #define MIB_RMON_T_P1024TO2047 15 135 #define MIB_RMON_T_P_GTE2048 16 136 #define MIB_RMON_T_OCTETS 17 137 #define MIB_IEEE_T_DROP 18 138 #define MIB_IEEE_T_FRAME_OK 19 139 #define MIB_IEEE_T_1COL 20 140 #define MIB_IEEE_T_MCOL 21 141 #define MIB_IEEE_T_DEF 22 142 #define MIB_IEEE_T_LCOL 23 143 #define MIB_IEEE_T_EXCOL 24 144 #define MIB_IEEE_T_MACERR 25 145 #define MIB_IEEE_T_CSERR 26 146 #define MIB_IEEE_T_SQE 27 147 #define MIB_IEEE_T_FDXFC 28 148 #define MIB_IEEE_T_OCTETS_OK 29 149 150 #define MIB_RMON_R_DROP 32 151 #define MIB_RMON_R_PACKETS 33 152 #define MIB_RMON_R_BC_PKT 34 153 #define MIB_RMON_R_MC_PKT 35 154 #define MIB_RMON_R_CRC_ALIGN 36 155 #define MIB_RMON_R_UNDERSIZE 37 156 #define MIB_RMON_R_OVERSIZE 38 157 #define MIB_RMON_R_FRAG 39 158 #define MIB_RMON_R_JAB 40 159 #define MIB_RMON_R_RESVD_0 41 160 #define MIB_RMON_R_P64 42 161 #define MIB_RMON_R_P65TO127 43 162 #define MIB_RMON_R_P128TO255 44 163 #define MIB_RMON_R_P256TO511 45 164 #define MIB_RMON_R_P512TO1023 46 165 #define MIB_RMON_R_P1024TO2047 47 166 #define MIB_RMON_R_P_GTE2048 48 167 #define MIB_RMON_R_OCTETS 49 168 #define MIB_IEEE_R_DROP 50 169 #define MIB_IEEE_R_FRAME_OK 51 170 #define MIB_IEEE_R_CRC 52 171 #define MIB_IEEE_R_ALIGN 53 172 #define MIB_IEEE_R_MACERR 54 173 #define MIB_IEEE_R_FDXFC 55 174 #define MIB_IEEE_R_OCTETS_OK 56 175 176 static void mcf_fec_read_bd(mcf_fec_bd *bd, uint32_t addr) 177 { 178 cpu_physical_memory_read(addr, bd, sizeof(*bd)); 179 be16_to_cpus(&bd->flags); 180 be16_to_cpus(&bd->length); 181 be32_to_cpus(&bd->data); 182 } 183 184 static void mcf_fec_write_bd(mcf_fec_bd *bd, uint32_t addr) 185 { 186 mcf_fec_bd tmp; 187 tmp.flags = cpu_to_be16(bd->flags); 188 tmp.length = cpu_to_be16(bd->length); 189 tmp.data = cpu_to_be32(bd->data); 190 cpu_physical_memory_write(addr, &tmp, sizeof(tmp)); 191 } 192 193 static void mcf_fec_update(mcf_fec_state *s) 194 { 195 uint32_t active; 196 uint32_t changed; 197 uint32_t mask; 198 int i; 199 200 active = s->eir & s->eimr; 201 changed = active ^s->irq_state; 202 for (i = 0; i < FEC_NUM_IRQ; i++) { 203 mask = mcf_fec_irq_map[i]; 204 if (changed & mask) { 205 DPRINTF("IRQ %d = %d\n", i, (active & mask) != 0); 206 qemu_set_irq(s->irq[i], (active & mask) != 0); 207 } 208 } 209 s->irq_state = active; 210 } 211 212 static void mcf_fec_tx_stats(mcf_fec_state *s, int size) 213 { 214 s->mib[MIB_RMON_T_PACKETS]++; 215 s->mib[MIB_RMON_T_OCTETS] += size; 216 if (size < 64) { 217 s->mib[MIB_RMON_T_FRAG]++; 218 } else if (size == 64) { 219 s->mib[MIB_RMON_T_P64]++; 220 } else if (size < 128) { 221 s->mib[MIB_RMON_T_P65TO127]++; 222 } else if (size < 256) { 223 s->mib[MIB_RMON_T_P128TO255]++; 224 } else if (size < 512) { 225 s->mib[MIB_RMON_T_P256TO511]++; 226 } else if (size < 1024) { 227 s->mib[MIB_RMON_T_P512TO1023]++; 228 } else if (size < 2048) { 229 s->mib[MIB_RMON_T_P1024TO2047]++; 230 } else { 231 s->mib[MIB_RMON_T_P_GTE2048]++; 232 } 233 s->mib[MIB_IEEE_T_FRAME_OK]++; 234 s->mib[MIB_IEEE_T_OCTETS_OK] += size; 235 } 236 237 static void mcf_fec_do_tx(mcf_fec_state *s) 238 { 239 uint32_t addr; 240 mcf_fec_bd bd; 241 int frame_size; 242 int len, descnt = 0; 243 uint8_t frame[FEC_MAX_FRAME_SIZE]; 244 uint8_t *ptr; 245 246 DPRINTF("do_tx\n"); 247 ptr = frame; 248 frame_size = 0; 249 addr = s->tx_descriptor; 250 while (descnt++ < FEC_MAX_DESC) { 251 mcf_fec_read_bd(&bd, addr); 252 DPRINTF("tx_bd %x flags %04x len %d data %08x\n", 253 addr, bd.flags, bd.length, bd.data); 254 if ((bd.flags & FEC_BD_R) == 0) { 255 /* Run out of descriptors to transmit. */ 256 break; 257 } 258 len = bd.length; 259 if (frame_size + len > FEC_MAX_FRAME_SIZE) { 260 len = FEC_MAX_FRAME_SIZE - frame_size; 261 s->eir |= FEC_INT_BABT; 262 } 263 cpu_physical_memory_read(bd.data, ptr, len); 264 ptr += len; 265 frame_size += len; 266 if (bd.flags & FEC_BD_L) { 267 /* Last buffer in frame. */ 268 DPRINTF("Sending packet\n"); 269 qemu_send_packet(qemu_get_queue(s->nic), frame, frame_size); 270 mcf_fec_tx_stats(s, frame_size); 271 ptr = frame; 272 frame_size = 0; 273 s->eir |= FEC_INT_TXF; 274 } 275 s->eir |= FEC_INT_TXB; 276 bd.flags &= ~FEC_BD_R; 277 /* Write back the modified descriptor. */ 278 mcf_fec_write_bd(&bd, addr); 279 /* Advance to the next descriptor. */ 280 if ((bd.flags & FEC_BD_W) != 0) { 281 addr = s->etdsr; 282 } else { 283 addr += 8; 284 } 285 } 286 s->tx_descriptor = addr; 287 } 288 289 static void mcf_fec_enable_rx(mcf_fec_state *s) 290 { 291 NetClientState *nc = qemu_get_queue(s->nic); 292 mcf_fec_bd bd; 293 294 mcf_fec_read_bd(&bd, s->rx_descriptor); 295 s->rx_enabled = ((bd.flags & FEC_BD_E) != 0); 296 if (s->rx_enabled) { 297 qemu_flush_queued_packets(nc); 298 } 299 } 300 301 static void mcf_fec_reset(DeviceState *dev) 302 { 303 mcf_fec_state *s = MCF_FEC_NET(dev); 304 305 s->eir = 0; 306 s->eimr = 0; 307 s->rx_enabled = 0; 308 s->ecr = 0; 309 s->mscr = 0; 310 s->rcr = 0x05ee0001; 311 s->tcr = 0; 312 s->tfwr = 0; 313 s->rfsr = 0x500; 314 } 315 316 #define MMFR_WRITE_OP (1 << 28) 317 #define MMFR_READ_OP (2 << 28) 318 #define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f) 319 #define MMFR_REGNUM(v) (((v) >> 18) & 0x1f) 320 321 static uint64_t mcf_fec_read_mdio(mcf_fec_state *s) 322 { 323 uint64_t v; 324 325 if (s->mmfr & MMFR_WRITE_OP) 326 return s->mmfr; 327 if (MMFR_PHYADDR(s->mmfr) != 1) 328 return s->mmfr |= 0xffff; 329 330 switch (MMFR_REGNUM(s->mmfr)) { 331 case MII_BMCR: 332 v = MII_BMCR_SPEED | MII_BMCR_AUTOEN | MII_BMCR_FD; 333 break; 334 case MII_BMSR: 335 v = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD | 336 MII_BMSR_10T_HD | MII_BMSR_MFPS | MII_BMSR_AN_COMP | 337 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST; 338 break; 339 case MII_PHYID1: 340 v = DP83848_PHYID1; 341 break; 342 case MII_PHYID2: 343 v = DP83848_PHYID2; 344 break; 345 case MII_ANAR: 346 v = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD | 347 MII_ANAR_10 | MII_ANAR_CSMACD; 348 break; 349 case MII_ANLPAR: 350 v = MII_ANLPAR_ACK | MII_ANLPAR_TXFD | MII_ANLPAR_TX | 351 MII_ANLPAR_10FD | MII_ANLPAR_10 | MII_ANLPAR_CSMACD; 352 break; 353 default: 354 v = 0xffff; 355 break; 356 } 357 s->mmfr = (s->mmfr & ~0xffff) | v; 358 return s->mmfr; 359 } 360 361 static uint64_t mcf_fec_read(void *opaque, hwaddr addr, 362 unsigned size) 363 { 364 mcf_fec_state *s = (mcf_fec_state *)opaque; 365 switch (addr & 0x3ff) { 366 case 0x004: return s->eir; 367 case 0x008: return s->eimr; 368 case 0x010: return s->rx_enabled ? (1 << 24) : 0; /* RDAR */ 369 case 0x014: return 0; /* TDAR */ 370 case 0x024: return s->ecr; 371 case 0x040: return mcf_fec_read_mdio(s); 372 case 0x044: return s->mscr; 373 case 0x064: return 0; /* MIBC */ 374 case 0x084: return s->rcr; 375 case 0x0c4: return s->tcr; 376 case 0x0e4: /* PALR */ 377 return (s->conf.macaddr.a[0] << 24) | (s->conf.macaddr.a[1] << 16) 378 | (s->conf.macaddr.a[2] << 8) | s->conf.macaddr.a[3]; 379 break; 380 case 0x0e8: /* PAUR */ 381 return (s->conf.macaddr.a[4] << 24) | (s->conf.macaddr.a[5] << 16) | 0x8808; 382 case 0x0ec: return 0x10000; /* OPD */ 383 case 0x118: return 0; 384 case 0x11c: return 0; 385 case 0x120: return 0; 386 case 0x124: return 0; 387 case 0x144: return s->tfwr; 388 case 0x14c: return 0x600; 389 case 0x150: return s->rfsr; 390 case 0x180: return s->erdsr; 391 case 0x184: return s->etdsr; 392 case 0x188: return s->emrbr; 393 case 0x200 ... 0x2e0: return s->mib[(addr & 0x1ff) / 4]; 394 default: 395 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address 0x%" HWADDR_PRIX "\n", 396 __func__, addr); 397 return 0; 398 } 399 } 400 401 static void mcf_fec_write(void *opaque, hwaddr addr, 402 uint64_t value, unsigned size) 403 { 404 mcf_fec_state *s = (mcf_fec_state *)opaque; 405 switch (addr & 0x3ff) { 406 case 0x004: 407 s->eir &= ~value; 408 break; 409 case 0x008: 410 s->eimr = value; 411 break; 412 case 0x010: /* RDAR */ 413 if ((s->ecr & FEC_EN) && !s->rx_enabled) { 414 DPRINTF("RX enable\n"); 415 mcf_fec_enable_rx(s); 416 } 417 break; 418 case 0x014: /* TDAR */ 419 if (s->ecr & FEC_EN) { 420 mcf_fec_do_tx(s); 421 } 422 break; 423 case 0x024: 424 s->ecr = value; 425 if (value & FEC_RESET) { 426 DPRINTF("Reset\n"); 427 mcf_fec_reset(opaque); 428 } 429 if ((s->ecr & FEC_EN) == 0) { 430 s->rx_enabled = 0; 431 } 432 break; 433 case 0x040: 434 s->mmfr = value; 435 s->eir |= FEC_INT_MII; 436 break; 437 case 0x044: 438 s->mscr = value & 0xfe; 439 break; 440 case 0x064: 441 /* TODO: Implement MIB. */ 442 break; 443 case 0x084: 444 s->rcr = value & 0x07ff003f; 445 /* TODO: Implement LOOP mode. */ 446 break; 447 case 0x0c4: /* TCR */ 448 /* We transmit immediately, so raise GRA immediately. */ 449 s->tcr = value; 450 if (value & 1) 451 s->eir |= FEC_INT_GRA; 452 break; 453 case 0x0e4: /* PALR */ 454 s->conf.macaddr.a[0] = value >> 24; 455 s->conf.macaddr.a[1] = value >> 16; 456 s->conf.macaddr.a[2] = value >> 8; 457 s->conf.macaddr.a[3] = value; 458 break; 459 case 0x0e8: /* PAUR */ 460 s->conf.macaddr.a[4] = value >> 24; 461 s->conf.macaddr.a[5] = value >> 16; 462 break; 463 case 0x0ec: 464 /* OPD */ 465 break; 466 case 0x118: 467 case 0x11c: 468 case 0x120: 469 case 0x124: 470 /* TODO: implement MAC hash filtering. */ 471 break; 472 case 0x144: 473 s->tfwr = value & 3; 474 break; 475 case 0x14c: 476 /* FRBR writes ignored. */ 477 break; 478 case 0x150: 479 s->rfsr = (value & 0x3fc) | 0x400; 480 break; 481 case 0x180: 482 s->erdsr = value & ~3; 483 s->rx_descriptor = s->erdsr; 484 break; 485 case 0x184: 486 s->etdsr = value & ~3; 487 s->tx_descriptor = s->etdsr; 488 break; 489 case 0x188: 490 s->emrbr = value > 0 ? value & 0x7F0 : 0x7F0; 491 break; 492 case 0x200 ... 0x2e0: 493 s->mib[(addr & 0x1ff) / 4] = value; 494 break; 495 default: 496 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address 0x%" HWADDR_PRIX "\n", 497 __func__, addr); 498 return; 499 } 500 mcf_fec_update(s); 501 } 502 503 static void mcf_fec_rx_stats(mcf_fec_state *s, int size) 504 { 505 s->mib[MIB_RMON_R_PACKETS]++; 506 s->mib[MIB_RMON_R_OCTETS] += size; 507 if (size < 64) { 508 s->mib[MIB_RMON_R_FRAG]++; 509 } else if (size == 64) { 510 s->mib[MIB_RMON_R_P64]++; 511 } else if (size < 128) { 512 s->mib[MIB_RMON_R_P65TO127]++; 513 } else if (size < 256) { 514 s->mib[MIB_RMON_R_P128TO255]++; 515 } else if (size < 512) { 516 s->mib[MIB_RMON_R_P256TO511]++; 517 } else if (size < 1024) { 518 s->mib[MIB_RMON_R_P512TO1023]++; 519 } else if (size < 2048) { 520 s->mib[MIB_RMON_R_P1024TO2047]++; 521 } else { 522 s->mib[MIB_RMON_R_P_GTE2048]++; 523 } 524 s->mib[MIB_IEEE_R_FRAME_OK]++; 525 s->mib[MIB_IEEE_R_OCTETS_OK] += size; 526 } 527 528 static int mcf_fec_have_receive_space(mcf_fec_state *s, size_t want) 529 { 530 mcf_fec_bd bd; 531 uint32_t addr; 532 533 /* Walk descriptor list to determine if we have enough buffer */ 534 addr = s->rx_descriptor; 535 while (want > 0) { 536 mcf_fec_read_bd(&bd, addr); 537 if ((bd.flags & FEC_BD_E) == 0) { 538 return 0; 539 } 540 if (want < s->emrbr) { 541 return 1; 542 } 543 want -= s->emrbr; 544 /* Advance to the next descriptor. */ 545 if ((bd.flags & FEC_BD_W) != 0) { 546 addr = s->erdsr; 547 } else { 548 addr += 8; 549 } 550 } 551 return 0; 552 } 553 554 static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t size) 555 { 556 mcf_fec_state *s = qemu_get_nic_opaque(nc); 557 mcf_fec_bd bd; 558 uint32_t flags = 0; 559 uint32_t addr; 560 uint32_t crc; 561 uint32_t buf_addr; 562 uint8_t *crc_ptr; 563 unsigned int buf_len; 564 size_t retsize; 565 566 DPRINTF("do_rx len %d\n", size); 567 if (!s->rx_enabled) { 568 return -1; 569 } 570 /* 4 bytes for the CRC. */ 571 size += 4; 572 crc = cpu_to_be32(crc32(~0, buf, size)); 573 crc_ptr = (uint8_t *)&crc; 574 /* Huge frames are truncated. */ 575 if (size > FEC_MAX_FRAME_SIZE) { 576 size = FEC_MAX_FRAME_SIZE; 577 flags |= FEC_BD_TR | FEC_BD_LG; 578 } 579 /* Frames larger than the user limit just set error flags. */ 580 if (size > (s->rcr >> 16)) { 581 flags |= FEC_BD_LG; 582 } 583 /* Check if we have enough space in current descriptors */ 584 if (!mcf_fec_have_receive_space(s, size)) { 585 return 0; 586 } 587 addr = s->rx_descriptor; 588 retsize = size; 589 while (size > 0) { 590 mcf_fec_read_bd(&bd, addr); 591 buf_len = (size <= s->emrbr) ? size: s->emrbr; 592 bd.length = buf_len; 593 size -= buf_len; 594 DPRINTF("rx_bd %x length %d\n", addr, bd.length); 595 /* The last 4 bytes are the CRC. */ 596 if (size < 4) 597 buf_len += size - 4; 598 buf_addr = bd.data; 599 cpu_physical_memory_write(buf_addr, buf, buf_len); 600 buf += buf_len; 601 if (size < 4) { 602 cpu_physical_memory_write(buf_addr + buf_len, crc_ptr, 4 - size); 603 crc_ptr += 4 - size; 604 } 605 bd.flags &= ~FEC_BD_E; 606 if (size == 0) { 607 /* Last buffer in frame. */ 608 bd.flags |= flags | FEC_BD_L; 609 DPRINTF("rx frame flags %04x\n", bd.flags); 610 s->eir |= FEC_INT_RXF; 611 } else { 612 s->eir |= FEC_INT_RXB; 613 } 614 mcf_fec_write_bd(&bd, addr); 615 /* Advance to the next descriptor. */ 616 if ((bd.flags & FEC_BD_W) != 0) { 617 addr = s->erdsr; 618 } else { 619 addr += 8; 620 } 621 } 622 s->rx_descriptor = addr; 623 mcf_fec_rx_stats(s, retsize); 624 mcf_fec_enable_rx(s); 625 mcf_fec_update(s); 626 return retsize; 627 } 628 629 static const MemoryRegionOps mcf_fec_ops = { 630 .read = mcf_fec_read, 631 .write = mcf_fec_write, 632 .endianness = DEVICE_NATIVE_ENDIAN, 633 }; 634 635 static NetClientInfo net_mcf_fec_info = { 636 .type = NET_CLIENT_DRIVER_NIC, 637 .size = sizeof(NICState), 638 .receive = mcf_fec_receive, 639 }; 640 641 static void mcf_fec_realize(DeviceState *dev, Error **errp) 642 { 643 mcf_fec_state *s = MCF_FEC_NET(dev); 644 645 s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, 646 object_get_typename(OBJECT(dev)), dev->id, 647 &dev->mem_reentrancy_guard, s); 648 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 649 } 650 651 static void mcf_fec_instance_init(Object *obj) 652 { 653 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 654 mcf_fec_state *s = MCF_FEC_NET(obj); 655 int i; 656 657 memory_region_init_io(&s->iomem, obj, &mcf_fec_ops, s, "fec", 0x400); 658 sysbus_init_mmio(sbd, &s->iomem); 659 for (i = 0; i < FEC_NUM_IRQ; i++) { 660 sysbus_init_irq(sbd, &s->irq[i]); 661 } 662 } 663 664 static Property mcf_fec_properties[] = { 665 DEFINE_NIC_PROPERTIES(mcf_fec_state, conf), 666 DEFINE_PROP_END_OF_LIST(), 667 }; 668 669 static void mcf_fec_class_init(ObjectClass *oc, void *data) 670 { 671 DeviceClass *dc = DEVICE_CLASS(oc); 672 673 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 674 dc->realize = mcf_fec_realize; 675 dc->desc = "MCF Fast Ethernet Controller network device"; 676 device_class_set_legacy_reset(dc, mcf_fec_reset); 677 device_class_set_props(dc, mcf_fec_properties); 678 } 679 680 static const TypeInfo mcf_fec_info = { 681 .name = TYPE_MCF_FEC_NET, 682 .parent = TYPE_SYS_BUS_DEVICE, 683 .instance_size = sizeof(mcf_fec_state), 684 .instance_init = mcf_fec_instance_init, 685 .class_init = mcf_fec_class_init, 686 }; 687 688 static void mcf_fec_register_types(void) 689 { 690 type_register_static(&mcf_fec_info); 691 } 692 693 type_init(mcf_fec_register_types) 694