1 /* 2 * ColdFire Fast Ethernet Controller emulation. 3 * 4 * Copyright (c) 2007 CodeSourcery. 5 * 6 * This code is licensed under the GPL 7 */ 8 9 #include "qemu/osdep.h" 10 #include "qemu/log.h" 11 #include "hw/irq.h" 12 #include "net/net.h" 13 #include "qemu/module.h" 14 #include "hw/m68k/mcf.h" 15 #include "hw/m68k/mcf_fec.h" 16 #include "hw/net/mii.h" 17 #include "hw/qdev-properties.h" 18 #include "hw/sysbus.h" 19 #include <zlib.h> /* for crc32 */ 20 21 //#define DEBUG_FEC 1 22 23 #ifdef DEBUG_FEC 24 #define DPRINTF(fmt, ...) \ 25 do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0) 26 #else 27 #define DPRINTF(fmt, ...) do {} while(0) 28 #endif 29 30 #define FEC_MAX_DESC 1024 31 #define FEC_MAX_FRAME_SIZE 2032 32 #define FEC_MIB_SIZE 64 33 34 struct mcf_fec_state { 35 SysBusDevice parent_obj; 36 37 MemoryRegion iomem; 38 qemu_irq irq[FEC_NUM_IRQ]; 39 NICState *nic; 40 NICConf conf; 41 uint32_t irq_state; 42 uint32_t eir; 43 uint32_t eimr; 44 int rx_enabled; 45 uint32_t rx_descriptor; 46 uint32_t tx_descriptor; 47 uint32_t ecr; 48 uint32_t mmfr; 49 uint32_t mscr; 50 uint32_t rcr; 51 uint32_t tcr; 52 uint32_t tfwr; 53 uint32_t rfsr; 54 uint32_t erdsr; 55 uint32_t etdsr; 56 uint32_t emrbr; 57 uint32_t mib[FEC_MIB_SIZE]; 58 }; 59 60 #define FEC_INT_HB 0x80000000 61 #define FEC_INT_BABR 0x40000000 62 #define FEC_INT_BABT 0x20000000 63 #define FEC_INT_GRA 0x10000000 64 #define FEC_INT_TXF 0x08000000 65 #define FEC_INT_TXB 0x04000000 66 #define FEC_INT_RXF 0x02000000 67 #define FEC_INT_RXB 0x01000000 68 #define FEC_INT_MII 0x00800000 69 #define FEC_INT_EB 0x00400000 70 #define FEC_INT_LC 0x00200000 71 #define FEC_INT_RL 0x00100000 72 #define FEC_INT_UN 0x00080000 73 74 #define FEC_EN 2 75 #define FEC_RESET 1 76 77 /* Map interrupt flags onto IRQ lines. */ 78 static const uint32_t mcf_fec_irq_map[FEC_NUM_IRQ] = { 79 FEC_INT_TXF, 80 FEC_INT_TXB, 81 FEC_INT_UN, 82 FEC_INT_RL, 83 FEC_INT_RXF, 84 FEC_INT_RXB, 85 FEC_INT_MII, 86 FEC_INT_LC, 87 FEC_INT_HB, 88 FEC_INT_GRA, 89 FEC_INT_EB, 90 FEC_INT_BABT, 91 FEC_INT_BABR 92 }; 93 94 /* Buffer Descriptor. */ 95 typedef struct { 96 uint16_t flags; 97 uint16_t length; 98 uint32_t data; 99 } mcf_fec_bd; 100 101 #define FEC_BD_R 0x8000 102 #define FEC_BD_E 0x8000 103 #define FEC_BD_O1 0x4000 104 #define FEC_BD_W 0x2000 105 #define FEC_BD_O2 0x1000 106 #define FEC_BD_L 0x0800 107 #define FEC_BD_TC 0x0400 108 #define FEC_BD_ABC 0x0200 109 #define FEC_BD_M 0x0100 110 #define FEC_BD_BC 0x0080 111 #define FEC_BD_MC 0x0040 112 #define FEC_BD_LG 0x0020 113 #define FEC_BD_NO 0x0010 114 #define FEC_BD_CR 0x0004 115 #define FEC_BD_OV 0x0002 116 #define FEC_BD_TR 0x0001 117 118 #define MIB_RMON_T_DROP 0 119 #define MIB_RMON_T_PACKETS 1 120 #define MIB_RMON_T_BC_PKT 2 121 #define MIB_RMON_T_MC_PKT 3 122 #define MIB_RMON_T_CRC_ALIGN 4 123 #define MIB_RMON_T_UNDERSIZE 5 124 #define MIB_RMON_T_OVERSIZE 6 125 #define MIB_RMON_T_FRAG 7 126 #define MIB_RMON_T_JAB 8 127 #define MIB_RMON_T_COL 9 128 #define MIB_RMON_T_P64 10 129 #define MIB_RMON_T_P65TO127 11 130 #define MIB_RMON_T_P128TO255 12 131 #define MIB_RMON_T_P256TO511 13 132 #define MIB_RMON_T_P512TO1023 14 133 #define MIB_RMON_T_P1024TO2047 15 134 #define MIB_RMON_T_P_GTE2048 16 135 #define MIB_RMON_T_OCTETS 17 136 #define MIB_IEEE_T_DROP 18 137 #define MIB_IEEE_T_FRAME_OK 19 138 #define MIB_IEEE_T_1COL 20 139 #define MIB_IEEE_T_MCOL 21 140 #define MIB_IEEE_T_DEF 22 141 #define MIB_IEEE_T_LCOL 23 142 #define MIB_IEEE_T_EXCOL 24 143 #define MIB_IEEE_T_MACERR 25 144 #define MIB_IEEE_T_CSERR 26 145 #define MIB_IEEE_T_SQE 27 146 #define MIB_IEEE_T_FDXFC 28 147 #define MIB_IEEE_T_OCTETS_OK 29 148 149 #define MIB_RMON_R_DROP 32 150 #define MIB_RMON_R_PACKETS 33 151 #define MIB_RMON_R_BC_PKT 34 152 #define MIB_RMON_R_MC_PKT 35 153 #define MIB_RMON_R_CRC_ALIGN 36 154 #define MIB_RMON_R_UNDERSIZE 37 155 #define MIB_RMON_R_OVERSIZE 38 156 #define MIB_RMON_R_FRAG 39 157 #define MIB_RMON_R_JAB 40 158 #define MIB_RMON_R_RESVD_0 41 159 #define MIB_RMON_R_P64 42 160 #define MIB_RMON_R_P65TO127 43 161 #define MIB_RMON_R_P128TO255 44 162 #define MIB_RMON_R_P256TO511 45 163 #define MIB_RMON_R_P512TO1023 46 164 #define MIB_RMON_R_P1024TO2047 47 165 #define MIB_RMON_R_P_GTE2048 48 166 #define MIB_RMON_R_OCTETS 49 167 #define MIB_IEEE_R_DROP 50 168 #define MIB_IEEE_R_FRAME_OK 51 169 #define MIB_IEEE_R_CRC 52 170 #define MIB_IEEE_R_ALIGN 53 171 #define MIB_IEEE_R_MACERR 54 172 #define MIB_IEEE_R_FDXFC 55 173 #define MIB_IEEE_R_OCTETS_OK 56 174 175 static void mcf_fec_read_bd(mcf_fec_bd *bd, uint32_t addr) 176 { 177 cpu_physical_memory_read(addr, bd, sizeof(*bd)); 178 be16_to_cpus(&bd->flags); 179 be16_to_cpus(&bd->length); 180 be32_to_cpus(&bd->data); 181 } 182 183 static void mcf_fec_write_bd(mcf_fec_bd *bd, uint32_t addr) 184 { 185 mcf_fec_bd tmp; 186 tmp.flags = cpu_to_be16(bd->flags); 187 tmp.length = cpu_to_be16(bd->length); 188 tmp.data = cpu_to_be32(bd->data); 189 cpu_physical_memory_write(addr, &tmp, sizeof(tmp)); 190 } 191 192 static void mcf_fec_update(mcf_fec_state *s) 193 { 194 uint32_t active; 195 uint32_t changed; 196 uint32_t mask; 197 int i; 198 199 active = s->eir & s->eimr; 200 changed = active ^s->irq_state; 201 for (i = 0; i < FEC_NUM_IRQ; i++) { 202 mask = mcf_fec_irq_map[i]; 203 if (changed & mask) { 204 DPRINTF("IRQ %d = %d\n", i, (active & mask) != 0); 205 qemu_set_irq(s->irq[i], (active & mask) != 0); 206 } 207 } 208 s->irq_state = active; 209 } 210 211 static void mcf_fec_tx_stats(mcf_fec_state *s, int size) 212 { 213 s->mib[MIB_RMON_T_PACKETS]++; 214 s->mib[MIB_RMON_T_OCTETS] += size; 215 if (size < 64) { 216 s->mib[MIB_RMON_T_FRAG]++; 217 } else if (size == 64) { 218 s->mib[MIB_RMON_T_P64]++; 219 } else if (size < 128) { 220 s->mib[MIB_RMON_T_P65TO127]++; 221 } else if (size < 256) { 222 s->mib[MIB_RMON_T_P128TO255]++; 223 } else if (size < 512) { 224 s->mib[MIB_RMON_T_P256TO511]++; 225 } else if (size < 1024) { 226 s->mib[MIB_RMON_T_P512TO1023]++; 227 } else if (size < 2048) { 228 s->mib[MIB_RMON_T_P1024TO2047]++; 229 } else { 230 s->mib[MIB_RMON_T_P_GTE2048]++; 231 } 232 s->mib[MIB_IEEE_T_FRAME_OK]++; 233 s->mib[MIB_IEEE_T_OCTETS_OK] += size; 234 } 235 236 static void mcf_fec_do_tx(mcf_fec_state *s) 237 { 238 uint32_t addr; 239 mcf_fec_bd bd; 240 int frame_size; 241 int len, descnt = 0; 242 uint8_t frame[FEC_MAX_FRAME_SIZE]; 243 uint8_t *ptr; 244 245 DPRINTF("do_tx\n"); 246 ptr = frame; 247 frame_size = 0; 248 addr = s->tx_descriptor; 249 while (descnt++ < FEC_MAX_DESC) { 250 mcf_fec_read_bd(&bd, addr); 251 DPRINTF("tx_bd %x flags %04x len %d data %08x\n", 252 addr, bd.flags, bd.length, bd.data); 253 if ((bd.flags & FEC_BD_R) == 0) { 254 /* Run out of descriptors to transmit. */ 255 break; 256 } 257 len = bd.length; 258 if (frame_size + len > FEC_MAX_FRAME_SIZE) { 259 len = FEC_MAX_FRAME_SIZE - frame_size; 260 s->eir |= FEC_INT_BABT; 261 } 262 cpu_physical_memory_read(bd.data, ptr, len); 263 ptr += len; 264 frame_size += len; 265 if (bd.flags & FEC_BD_L) { 266 /* Last buffer in frame. */ 267 DPRINTF("Sending packet\n"); 268 qemu_send_packet(qemu_get_queue(s->nic), frame, frame_size); 269 mcf_fec_tx_stats(s, frame_size); 270 ptr = frame; 271 frame_size = 0; 272 s->eir |= FEC_INT_TXF; 273 } 274 s->eir |= FEC_INT_TXB; 275 bd.flags &= ~FEC_BD_R; 276 /* Write back the modified descriptor. */ 277 mcf_fec_write_bd(&bd, addr); 278 /* Advance to the next descriptor. */ 279 if ((bd.flags & FEC_BD_W) != 0) { 280 addr = s->etdsr; 281 } else { 282 addr += 8; 283 } 284 } 285 s->tx_descriptor = addr; 286 } 287 288 static void mcf_fec_enable_rx(mcf_fec_state *s) 289 { 290 NetClientState *nc = qemu_get_queue(s->nic); 291 mcf_fec_bd bd; 292 293 mcf_fec_read_bd(&bd, s->rx_descriptor); 294 s->rx_enabled = ((bd.flags & FEC_BD_E) != 0); 295 if (s->rx_enabled) { 296 qemu_flush_queued_packets(nc); 297 } 298 } 299 300 static void mcf_fec_reset(DeviceState *dev) 301 { 302 mcf_fec_state *s = MCF_FEC_NET(dev); 303 304 s->eir = 0; 305 s->eimr = 0; 306 s->rx_enabled = 0; 307 s->ecr = 0; 308 s->mscr = 0; 309 s->rcr = 0x05ee0001; 310 s->tcr = 0; 311 s->tfwr = 0; 312 s->rfsr = 0x500; 313 } 314 315 #define MMFR_WRITE_OP (1 << 28) 316 #define MMFR_READ_OP (2 << 28) 317 #define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f) 318 #define MMFR_REGNUM(v) (((v) >> 18) & 0x1f) 319 320 static uint64_t mcf_fec_read_mdio(mcf_fec_state *s) 321 { 322 uint64_t v; 323 324 if (s->mmfr & MMFR_WRITE_OP) 325 return s->mmfr; 326 if (MMFR_PHYADDR(s->mmfr) != 1) 327 return s->mmfr |= 0xffff; 328 329 switch (MMFR_REGNUM(s->mmfr)) { 330 case MII_BMCR: 331 v = MII_BMCR_SPEED | MII_BMCR_AUTOEN | MII_BMCR_FD; 332 break; 333 case MII_BMSR: 334 v = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD | 335 MII_BMSR_10T_HD | MII_BMSR_MFPS | MII_BMSR_AN_COMP | 336 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST; 337 break; 338 case MII_PHYID1: 339 v = DP83848_PHYID1; 340 break; 341 case MII_PHYID2: 342 v = DP83848_PHYID2; 343 break; 344 case MII_ANAR: 345 v = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD | 346 MII_ANAR_10 | MII_ANAR_CSMACD; 347 break; 348 case MII_ANLPAR: 349 v = MII_ANLPAR_ACK | MII_ANLPAR_TXFD | MII_ANLPAR_TX | 350 MII_ANLPAR_10FD | MII_ANLPAR_10 | MII_ANLPAR_CSMACD; 351 break; 352 default: 353 v = 0xffff; 354 break; 355 } 356 s->mmfr = (s->mmfr & ~0xffff) | v; 357 return s->mmfr; 358 } 359 360 static uint64_t mcf_fec_read(void *opaque, hwaddr addr, 361 unsigned size) 362 { 363 mcf_fec_state *s = (mcf_fec_state *)opaque; 364 switch (addr & 0x3ff) { 365 case 0x004: return s->eir; 366 case 0x008: return s->eimr; 367 case 0x010: return s->rx_enabled ? (1 << 24) : 0; /* RDAR */ 368 case 0x014: return 0; /* TDAR */ 369 case 0x024: return s->ecr; 370 case 0x040: return mcf_fec_read_mdio(s); 371 case 0x044: return s->mscr; 372 case 0x064: return 0; /* MIBC */ 373 case 0x084: return s->rcr; 374 case 0x0c4: return s->tcr; 375 case 0x0e4: /* PALR */ 376 return (s->conf.macaddr.a[0] << 24) | (s->conf.macaddr.a[1] << 16) 377 | (s->conf.macaddr.a[2] << 8) | s->conf.macaddr.a[3]; 378 break; 379 case 0x0e8: /* PAUR */ 380 return (s->conf.macaddr.a[4] << 24) | (s->conf.macaddr.a[5] << 16) | 0x8808; 381 case 0x0ec: return 0x10000; /* OPD */ 382 case 0x118: return 0; 383 case 0x11c: return 0; 384 case 0x120: return 0; 385 case 0x124: return 0; 386 case 0x144: return s->tfwr; 387 case 0x14c: return 0x600; 388 case 0x150: return s->rfsr; 389 case 0x180: return s->erdsr; 390 case 0x184: return s->etdsr; 391 case 0x188: return s->emrbr; 392 case 0x200 ... 0x2e0: return s->mib[(addr & 0x1ff) / 4]; 393 default: 394 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address 0x%" HWADDR_PRIX "\n", 395 __func__, addr); 396 return 0; 397 } 398 } 399 400 static void mcf_fec_write(void *opaque, hwaddr addr, 401 uint64_t value, unsigned size) 402 { 403 mcf_fec_state *s = (mcf_fec_state *)opaque; 404 switch (addr & 0x3ff) { 405 case 0x004: 406 s->eir &= ~value; 407 break; 408 case 0x008: 409 s->eimr = value; 410 break; 411 case 0x010: /* RDAR */ 412 if ((s->ecr & FEC_EN) && !s->rx_enabled) { 413 DPRINTF("RX enable\n"); 414 mcf_fec_enable_rx(s); 415 } 416 break; 417 case 0x014: /* TDAR */ 418 if (s->ecr & FEC_EN) { 419 mcf_fec_do_tx(s); 420 } 421 break; 422 case 0x024: 423 s->ecr = value; 424 if (value & FEC_RESET) { 425 DPRINTF("Reset\n"); 426 mcf_fec_reset(opaque); 427 } 428 if ((s->ecr & FEC_EN) == 0) { 429 s->rx_enabled = 0; 430 } 431 break; 432 case 0x040: 433 s->mmfr = value; 434 s->eir |= FEC_INT_MII; 435 break; 436 case 0x044: 437 s->mscr = value & 0xfe; 438 break; 439 case 0x064: 440 /* TODO: Implement MIB. */ 441 break; 442 case 0x084: 443 s->rcr = value & 0x07ff003f; 444 /* TODO: Implement LOOP mode. */ 445 break; 446 case 0x0c4: /* TCR */ 447 /* We transmit immediately, so raise GRA immediately. */ 448 s->tcr = value; 449 if (value & 1) 450 s->eir |= FEC_INT_GRA; 451 break; 452 case 0x0e4: /* PALR */ 453 s->conf.macaddr.a[0] = value >> 24; 454 s->conf.macaddr.a[1] = value >> 16; 455 s->conf.macaddr.a[2] = value >> 8; 456 s->conf.macaddr.a[3] = value; 457 break; 458 case 0x0e8: /* PAUR */ 459 s->conf.macaddr.a[4] = value >> 24; 460 s->conf.macaddr.a[5] = value >> 16; 461 break; 462 case 0x0ec: 463 /* OPD */ 464 break; 465 case 0x118: 466 case 0x11c: 467 case 0x120: 468 case 0x124: 469 /* TODO: implement MAC hash filtering. */ 470 break; 471 case 0x144: 472 s->tfwr = value & 3; 473 break; 474 case 0x14c: 475 /* FRBR writes ignored. */ 476 break; 477 case 0x150: 478 s->rfsr = (value & 0x3fc) | 0x400; 479 break; 480 case 0x180: 481 s->erdsr = value & ~3; 482 s->rx_descriptor = s->erdsr; 483 break; 484 case 0x184: 485 s->etdsr = value & ~3; 486 s->tx_descriptor = s->etdsr; 487 break; 488 case 0x188: 489 s->emrbr = value > 0 ? value & 0x7F0 : 0x7F0; 490 break; 491 case 0x200 ... 0x2e0: 492 s->mib[(addr & 0x1ff) / 4] = value; 493 break; 494 default: 495 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address 0x%" HWADDR_PRIX "\n", 496 __func__, addr); 497 return; 498 } 499 mcf_fec_update(s); 500 } 501 502 static void mcf_fec_rx_stats(mcf_fec_state *s, int size) 503 { 504 s->mib[MIB_RMON_R_PACKETS]++; 505 s->mib[MIB_RMON_R_OCTETS] += size; 506 if (size < 64) { 507 s->mib[MIB_RMON_R_FRAG]++; 508 } else if (size == 64) { 509 s->mib[MIB_RMON_R_P64]++; 510 } else if (size < 128) { 511 s->mib[MIB_RMON_R_P65TO127]++; 512 } else if (size < 256) { 513 s->mib[MIB_RMON_R_P128TO255]++; 514 } else if (size < 512) { 515 s->mib[MIB_RMON_R_P256TO511]++; 516 } else if (size < 1024) { 517 s->mib[MIB_RMON_R_P512TO1023]++; 518 } else if (size < 2048) { 519 s->mib[MIB_RMON_R_P1024TO2047]++; 520 } else { 521 s->mib[MIB_RMON_R_P_GTE2048]++; 522 } 523 s->mib[MIB_IEEE_R_FRAME_OK]++; 524 s->mib[MIB_IEEE_R_OCTETS_OK] += size; 525 } 526 527 static int mcf_fec_have_receive_space(mcf_fec_state *s, size_t want) 528 { 529 mcf_fec_bd bd; 530 uint32_t addr; 531 532 /* Walk descriptor list to determine if we have enough buffer */ 533 addr = s->rx_descriptor; 534 while (want > 0) { 535 mcf_fec_read_bd(&bd, addr); 536 if ((bd.flags & FEC_BD_E) == 0) { 537 return 0; 538 } 539 if (want < s->emrbr) { 540 return 1; 541 } 542 want -= s->emrbr; 543 /* Advance to the next descriptor. */ 544 if ((bd.flags & FEC_BD_W) != 0) { 545 addr = s->erdsr; 546 } else { 547 addr += 8; 548 } 549 } 550 return 0; 551 } 552 553 static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t size) 554 { 555 mcf_fec_state *s = qemu_get_nic_opaque(nc); 556 mcf_fec_bd bd; 557 uint32_t flags = 0; 558 uint32_t addr; 559 uint32_t crc; 560 uint32_t buf_addr; 561 uint8_t *crc_ptr; 562 unsigned int buf_len; 563 size_t retsize; 564 565 DPRINTF("do_rx len %d\n", size); 566 if (!s->rx_enabled) { 567 return -1; 568 } 569 /* 4 bytes for the CRC. */ 570 size += 4; 571 crc = cpu_to_be32(crc32(~0, buf, size)); 572 crc_ptr = (uint8_t *)&crc; 573 /* Huge frames are truncated. */ 574 if (size > FEC_MAX_FRAME_SIZE) { 575 size = FEC_MAX_FRAME_SIZE; 576 flags |= FEC_BD_TR | FEC_BD_LG; 577 } 578 /* Frames larger than the user limit just set error flags. */ 579 if (size > (s->rcr >> 16)) { 580 flags |= FEC_BD_LG; 581 } 582 /* Check if we have enough space in current descriptors */ 583 if (!mcf_fec_have_receive_space(s, size)) { 584 return 0; 585 } 586 addr = s->rx_descriptor; 587 retsize = size; 588 while (size > 0) { 589 mcf_fec_read_bd(&bd, addr); 590 buf_len = (size <= s->emrbr) ? size: s->emrbr; 591 bd.length = buf_len; 592 size -= buf_len; 593 DPRINTF("rx_bd %x length %d\n", addr, bd.length); 594 /* The last 4 bytes are the CRC. */ 595 if (size < 4) 596 buf_len += size - 4; 597 buf_addr = bd.data; 598 cpu_physical_memory_write(buf_addr, buf, buf_len); 599 buf += buf_len; 600 if (size < 4) { 601 cpu_physical_memory_write(buf_addr + buf_len, crc_ptr, 4 - size); 602 crc_ptr += 4 - size; 603 } 604 bd.flags &= ~FEC_BD_E; 605 if (size == 0) { 606 /* Last buffer in frame. */ 607 bd.flags |= flags | FEC_BD_L; 608 DPRINTF("rx frame flags %04x\n", bd.flags); 609 s->eir |= FEC_INT_RXF; 610 } else { 611 s->eir |= FEC_INT_RXB; 612 } 613 mcf_fec_write_bd(&bd, addr); 614 /* Advance to the next descriptor. */ 615 if ((bd.flags & FEC_BD_W) != 0) { 616 addr = s->erdsr; 617 } else { 618 addr += 8; 619 } 620 } 621 s->rx_descriptor = addr; 622 mcf_fec_rx_stats(s, retsize); 623 mcf_fec_enable_rx(s); 624 mcf_fec_update(s); 625 return retsize; 626 } 627 628 static const MemoryRegionOps mcf_fec_ops = { 629 .read = mcf_fec_read, 630 .write = mcf_fec_write, 631 .endianness = DEVICE_NATIVE_ENDIAN, 632 }; 633 634 static NetClientInfo net_mcf_fec_info = { 635 .type = NET_CLIENT_DRIVER_NIC, 636 .size = sizeof(NICState), 637 .receive = mcf_fec_receive, 638 }; 639 640 static void mcf_fec_realize(DeviceState *dev, Error **errp) 641 { 642 mcf_fec_state *s = MCF_FEC_NET(dev); 643 644 s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, 645 object_get_typename(OBJECT(dev)), dev->id, 646 &dev->mem_reentrancy_guard, s); 647 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 648 } 649 650 static void mcf_fec_instance_init(Object *obj) 651 { 652 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 653 mcf_fec_state *s = MCF_FEC_NET(obj); 654 int i; 655 656 memory_region_init_io(&s->iomem, obj, &mcf_fec_ops, s, "fec", 0x400); 657 sysbus_init_mmio(sbd, &s->iomem); 658 for (i = 0; i < FEC_NUM_IRQ; i++) { 659 sysbus_init_irq(sbd, &s->irq[i]); 660 } 661 } 662 663 static Property mcf_fec_properties[] = { 664 DEFINE_NIC_PROPERTIES(mcf_fec_state, conf), 665 DEFINE_PROP_END_OF_LIST(), 666 }; 667 668 static void mcf_fec_class_init(ObjectClass *oc, void *data) 669 { 670 DeviceClass *dc = DEVICE_CLASS(oc); 671 672 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); 673 dc->realize = mcf_fec_realize; 674 dc->desc = "MCF Fast Ethernet Controller network device"; 675 device_class_set_legacy_reset(dc, mcf_fec_reset); 676 device_class_set_props(dc, mcf_fec_properties); 677 } 678 679 static const TypeInfo mcf_fec_info = { 680 .name = TYPE_MCF_FEC_NET, 681 .parent = TYPE_SYS_BUS_DEVICE, 682 .instance_size = sizeof(mcf_fec_state), 683 .instance_init = mcf_fec_instance_init, 684 .class_init = mcf_fec_class_init, 685 }; 686 687 static void mcf_fec_register_types(void) 688 { 689 type_register_static(&mcf_fec_info); 690 } 691 692 type_init(mcf_fec_register_types) 693