1 /* 2 * ColdFire Fast Ethernet Controller emulation. 3 * 4 * Copyright (c) 2007 CodeSourcery. 5 * 6 * This code is licensed under the GPL 7 */ 8 #include "qemu/osdep.h" 9 #include "hw/hw.h" 10 #include "net/net.h" 11 #include "hw/m68k/mcf.h" 12 #include "hw/net/mii.h" 13 /* For crc32 */ 14 #include <zlib.h> 15 #include "exec/address-spaces.h" 16 17 //#define DEBUG_FEC 1 18 19 #ifdef DEBUG_FEC 20 #define DPRINTF(fmt, ...) \ 21 do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0) 22 #else 23 #define DPRINTF(fmt, ...) do {} while(0) 24 #endif 25 26 #define FEC_MAX_DESC 1024 27 #define FEC_MAX_FRAME_SIZE 2032 28 29 typedef struct { 30 MemoryRegion *sysmem; 31 MemoryRegion iomem; 32 qemu_irq *irq; 33 NICState *nic; 34 NICConf conf; 35 uint32_t irq_state; 36 uint32_t eir; 37 uint32_t eimr; 38 int rx_enabled; 39 uint32_t rx_descriptor; 40 uint32_t tx_descriptor; 41 uint32_t ecr; 42 uint32_t mmfr; 43 uint32_t mscr; 44 uint32_t rcr; 45 uint32_t tcr; 46 uint32_t tfwr; 47 uint32_t rfsr; 48 uint32_t erdsr; 49 uint32_t etdsr; 50 uint32_t emrbr; 51 } mcf_fec_state; 52 53 #define FEC_INT_HB 0x80000000 54 #define FEC_INT_BABR 0x40000000 55 #define FEC_INT_BABT 0x20000000 56 #define FEC_INT_GRA 0x10000000 57 #define FEC_INT_TXF 0x08000000 58 #define FEC_INT_TXB 0x04000000 59 #define FEC_INT_RXF 0x02000000 60 #define FEC_INT_RXB 0x01000000 61 #define FEC_INT_MII 0x00800000 62 #define FEC_INT_EB 0x00400000 63 #define FEC_INT_LC 0x00200000 64 #define FEC_INT_RL 0x00100000 65 #define FEC_INT_UN 0x00080000 66 67 #define FEC_EN 2 68 #define FEC_RESET 1 69 70 /* Map interrupt flags onto IRQ lines. */ 71 #define FEC_NUM_IRQ 13 72 static const uint32_t mcf_fec_irq_map[FEC_NUM_IRQ] = { 73 FEC_INT_TXF, 74 FEC_INT_TXB, 75 FEC_INT_UN, 76 FEC_INT_RL, 77 FEC_INT_RXF, 78 FEC_INT_RXB, 79 FEC_INT_MII, 80 FEC_INT_LC, 81 FEC_INT_HB, 82 FEC_INT_GRA, 83 FEC_INT_EB, 84 FEC_INT_BABT, 85 FEC_INT_BABR 86 }; 87 88 /* Buffer Descriptor. */ 89 typedef struct { 90 uint16_t flags; 91 uint16_t length; 92 uint32_t data; 93 } mcf_fec_bd; 94 95 #define FEC_BD_R 0x8000 96 #define FEC_BD_E 0x8000 97 #define FEC_BD_O1 0x4000 98 #define FEC_BD_W 0x2000 99 #define FEC_BD_O2 0x1000 100 #define FEC_BD_L 0x0800 101 #define FEC_BD_TC 0x0400 102 #define FEC_BD_ABC 0x0200 103 #define FEC_BD_M 0x0100 104 #define FEC_BD_BC 0x0080 105 #define FEC_BD_MC 0x0040 106 #define FEC_BD_LG 0x0020 107 #define FEC_BD_NO 0x0010 108 #define FEC_BD_CR 0x0004 109 #define FEC_BD_OV 0x0002 110 #define FEC_BD_TR 0x0001 111 112 static void mcf_fec_read_bd(mcf_fec_bd *bd, uint32_t addr) 113 { 114 cpu_physical_memory_read(addr, bd, sizeof(*bd)); 115 be16_to_cpus(&bd->flags); 116 be16_to_cpus(&bd->length); 117 be32_to_cpus(&bd->data); 118 } 119 120 static void mcf_fec_write_bd(mcf_fec_bd *bd, uint32_t addr) 121 { 122 mcf_fec_bd tmp; 123 tmp.flags = cpu_to_be16(bd->flags); 124 tmp.length = cpu_to_be16(bd->length); 125 tmp.data = cpu_to_be32(bd->data); 126 cpu_physical_memory_write(addr, &tmp, sizeof(tmp)); 127 } 128 129 static void mcf_fec_update(mcf_fec_state *s) 130 { 131 uint32_t active; 132 uint32_t changed; 133 uint32_t mask; 134 int i; 135 136 active = s->eir & s->eimr; 137 changed = active ^s->irq_state; 138 for (i = 0; i < FEC_NUM_IRQ; i++) { 139 mask = mcf_fec_irq_map[i]; 140 if (changed & mask) { 141 DPRINTF("IRQ %d = %d\n", i, (active & mask) != 0); 142 qemu_set_irq(s->irq[i], (active & mask) != 0); 143 } 144 } 145 s->irq_state = active; 146 } 147 148 static void mcf_fec_do_tx(mcf_fec_state *s) 149 { 150 uint32_t addr; 151 mcf_fec_bd bd; 152 int frame_size; 153 int len, descnt = 0; 154 uint8_t frame[FEC_MAX_FRAME_SIZE]; 155 uint8_t *ptr; 156 157 DPRINTF("do_tx\n"); 158 ptr = frame; 159 frame_size = 0; 160 addr = s->tx_descriptor; 161 while (descnt++ < FEC_MAX_DESC) { 162 mcf_fec_read_bd(&bd, addr); 163 DPRINTF("tx_bd %x flags %04x len %d data %08x\n", 164 addr, bd.flags, bd.length, bd.data); 165 if ((bd.flags & FEC_BD_R) == 0) { 166 /* Run out of descriptors to transmit. */ 167 break; 168 } 169 len = bd.length; 170 if (frame_size + len > FEC_MAX_FRAME_SIZE) { 171 len = FEC_MAX_FRAME_SIZE - frame_size; 172 s->eir |= FEC_INT_BABT; 173 } 174 cpu_physical_memory_read(bd.data, ptr, len); 175 ptr += len; 176 frame_size += len; 177 if (bd.flags & FEC_BD_L) { 178 /* Last buffer in frame. */ 179 DPRINTF("Sending packet\n"); 180 qemu_send_packet(qemu_get_queue(s->nic), frame, frame_size); 181 ptr = frame; 182 frame_size = 0; 183 s->eir |= FEC_INT_TXF; 184 } 185 s->eir |= FEC_INT_TXB; 186 bd.flags &= ~FEC_BD_R; 187 /* Write back the modified descriptor. */ 188 mcf_fec_write_bd(&bd, addr); 189 /* Advance to the next descriptor. */ 190 if ((bd.flags & FEC_BD_W) != 0) { 191 addr = s->etdsr; 192 } else { 193 addr += 8; 194 } 195 } 196 s->tx_descriptor = addr; 197 } 198 199 static void mcf_fec_enable_rx(mcf_fec_state *s) 200 { 201 NetClientState *nc = qemu_get_queue(s->nic); 202 mcf_fec_bd bd; 203 204 mcf_fec_read_bd(&bd, s->rx_descriptor); 205 s->rx_enabled = ((bd.flags & FEC_BD_E) != 0); 206 if (s->rx_enabled) { 207 qemu_flush_queued_packets(nc); 208 } 209 } 210 211 static void mcf_fec_reset(mcf_fec_state *s) 212 { 213 s->eir = 0; 214 s->eimr = 0; 215 s->rx_enabled = 0; 216 s->ecr = 0; 217 s->mscr = 0; 218 s->rcr = 0x05ee0001; 219 s->tcr = 0; 220 s->tfwr = 0; 221 s->rfsr = 0x500; 222 } 223 224 #define MMFR_WRITE_OP (1 << 28) 225 #define MMFR_READ_OP (2 << 28) 226 #define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f) 227 #define MMFR_REGNUM(v) (((v) >> 18) & 0x1f) 228 229 static uint64_t mcf_fec_read_mdio(mcf_fec_state *s) 230 { 231 uint64_t v; 232 233 if (s->mmfr & MMFR_WRITE_OP) 234 return s->mmfr; 235 if (MMFR_PHYADDR(s->mmfr) != 1) 236 return s->mmfr |= 0xffff; 237 238 switch (MMFR_REGNUM(s->mmfr)) { 239 case MII_BMCR: 240 v = MII_BMCR_SPEED | MII_BMCR_AUTOEN | MII_BMCR_FD; 241 break; 242 case MII_BMSR: 243 v = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD | 244 MII_BMSR_10T_HD | MII_BMSR_MFPS | MII_BMSR_AN_COMP | 245 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST; 246 break; 247 case MII_PHYID1: 248 v = DP83848_PHYID1; 249 break; 250 case MII_PHYID2: 251 v = DP83848_PHYID2; 252 break; 253 case MII_ANAR: 254 v = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD | 255 MII_ANAR_10 | MII_ANAR_CSMACD; 256 break; 257 case MII_ANLPAR: 258 v = MII_ANLPAR_ACK | MII_ANLPAR_TXFD | MII_ANLPAR_TX | 259 MII_ANLPAR_10FD | MII_ANLPAR_10 | MII_ANLPAR_CSMACD; 260 break; 261 default: 262 v = 0xffff; 263 break; 264 } 265 s->mmfr = (s->mmfr & ~0xffff) | v; 266 return s->mmfr; 267 } 268 269 static uint64_t mcf_fec_read(void *opaque, hwaddr addr, 270 unsigned size) 271 { 272 mcf_fec_state *s = (mcf_fec_state *)opaque; 273 switch (addr & 0x3ff) { 274 case 0x004: return s->eir; 275 case 0x008: return s->eimr; 276 case 0x010: return s->rx_enabled ? (1 << 24) : 0; /* RDAR */ 277 case 0x014: return 0; /* TDAR */ 278 case 0x024: return s->ecr; 279 case 0x040: return mcf_fec_read_mdio(s); 280 case 0x044: return s->mscr; 281 case 0x064: return 0; /* MIBC */ 282 case 0x084: return s->rcr; 283 case 0x0c4: return s->tcr; 284 case 0x0e4: /* PALR */ 285 return (s->conf.macaddr.a[0] << 24) | (s->conf.macaddr.a[1] << 16) 286 | (s->conf.macaddr.a[2] << 8) | s->conf.macaddr.a[3]; 287 break; 288 case 0x0e8: /* PAUR */ 289 return (s->conf.macaddr.a[4] << 24) | (s->conf.macaddr.a[5] << 16) | 0x8808; 290 case 0x0ec: return 0x10000; /* OPD */ 291 case 0x118: return 0; 292 case 0x11c: return 0; 293 case 0x120: return 0; 294 case 0x124: return 0; 295 case 0x144: return s->tfwr; 296 case 0x14c: return 0x600; 297 case 0x150: return s->rfsr; 298 case 0x180: return s->erdsr; 299 case 0x184: return s->etdsr; 300 case 0x188: return s->emrbr; 301 default: 302 hw_error("mcf_fec_read: Bad address 0x%x\n", (int)addr); 303 return 0; 304 } 305 } 306 307 static void mcf_fec_write(void *opaque, hwaddr addr, 308 uint64_t value, unsigned size) 309 { 310 mcf_fec_state *s = (mcf_fec_state *)opaque; 311 switch (addr & 0x3ff) { 312 case 0x004: 313 s->eir &= ~value; 314 break; 315 case 0x008: 316 s->eimr = value; 317 break; 318 case 0x010: /* RDAR */ 319 if ((s->ecr & FEC_EN) && !s->rx_enabled) { 320 DPRINTF("RX enable\n"); 321 mcf_fec_enable_rx(s); 322 } 323 break; 324 case 0x014: /* TDAR */ 325 if (s->ecr & FEC_EN) { 326 mcf_fec_do_tx(s); 327 } 328 break; 329 case 0x024: 330 s->ecr = value; 331 if (value & FEC_RESET) { 332 DPRINTF("Reset\n"); 333 mcf_fec_reset(s); 334 } 335 if ((s->ecr & FEC_EN) == 0) { 336 s->rx_enabled = 0; 337 } 338 break; 339 case 0x040: 340 s->mmfr = value; 341 s->eir |= FEC_INT_MII; 342 break; 343 case 0x044: 344 s->mscr = value & 0xfe; 345 break; 346 case 0x064: 347 /* TODO: Implement MIB. */ 348 break; 349 case 0x084: 350 s->rcr = value & 0x07ff003f; 351 /* TODO: Implement LOOP mode. */ 352 break; 353 case 0x0c4: /* TCR */ 354 /* We transmit immediately, so raise GRA immediately. */ 355 s->tcr = value; 356 if (value & 1) 357 s->eir |= FEC_INT_GRA; 358 break; 359 case 0x0e4: /* PALR */ 360 s->conf.macaddr.a[0] = value >> 24; 361 s->conf.macaddr.a[1] = value >> 16; 362 s->conf.macaddr.a[2] = value >> 8; 363 s->conf.macaddr.a[3] = value; 364 break; 365 case 0x0e8: /* PAUR */ 366 s->conf.macaddr.a[4] = value >> 24; 367 s->conf.macaddr.a[5] = value >> 16; 368 break; 369 case 0x0ec: 370 /* OPD */ 371 break; 372 case 0x118: 373 case 0x11c: 374 case 0x120: 375 case 0x124: 376 /* TODO: implement MAC hash filtering. */ 377 break; 378 case 0x144: 379 s->tfwr = value & 3; 380 break; 381 case 0x14c: 382 /* FRBR writes ignored. */ 383 break; 384 case 0x150: 385 s->rfsr = (value & 0x3fc) | 0x400; 386 break; 387 case 0x180: 388 s->erdsr = value & ~3; 389 s->rx_descriptor = s->erdsr; 390 break; 391 case 0x184: 392 s->etdsr = value & ~3; 393 s->tx_descriptor = s->etdsr; 394 break; 395 case 0x188: 396 s->emrbr = value & 0x7f0; 397 break; 398 default: 399 hw_error("mcf_fec_write Bad address 0x%x\n", (int)addr); 400 } 401 mcf_fec_update(s); 402 } 403 404 static int mcf_fec_have_receive_space(mcf_fec_state *s, size_t want) 405 { 406 mcf_fec_bd bd; 407 uint32_t addr; 408 409 /* Walk descriptor list to determine if we have enough buffer */ 410 addr = s->rx_descriptor; 411 while (want > 0) { 412 mcf_fec_read_bd(&bd, addr); 413 if ((bd.flags & FEC_BD_E) == 0) { 414 return 0; 415 } 416 if (want < s->emrbr) { 417 return 1; 418 } 419 want -= s->emrbr; 420 /* Advance to the next descriptor. */ 421 if ((bd.flags & FEC_BD_W) != 0) { 422 addr = s->erdsr; 423 } else { 424 addr += 8; 425 } 426 } 427 return 0; 428 } 429 430 static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t size) 431 { 432 mcf_fec_state *s = qemu_get_nic_opaque(nc); 433 mcf_fec_bd bd; 434 uint32_t flags = 0; 435 uint32_t addr; 436 uint32_t crc; 437 uint32_t buf_addr; 438 uint8_t *crc_ptr; 439 unsigned int buf_len; 440 size_t retsize; 441 442 DPRINTF("do_rx len %d\n", size); 443 if (!s->rx_enabled) { 444 return -1; 445 } 446 /* 4 bytes for the CRC. */ 447 size += 4; 448 crc = cpu_to_be32(crc32(~0, buf, size)); 449 crc_ptr = (uint8_t *)&crc; 450 /* Huge frames are truncted. */ 451 if (size > FEC_MAX_FRAME_SIZE) { 452 size = FEC_MAX_FRAME_SIZE; 453 flags |= FEC_BD_TR | FEC_BD_LG; 454 } 455 /* Frames larger than the user limit just set error flags. */ 456 if (size > (s->rcr >> 16)) { 457 flags |= FEC_BD_LG; 458 } 459 /* Check if we have enough space in current descriptors */ 460 if (!mcf_fec_have_receive_space(s, size)) { 461 return 0; 462 } 463 addr = s->rx_descriptor; 464 retsize = size; 465 while (size > 0) { 466 mcf_fec_read_bd(&bd, addr); 467 buf_len = (size <= s->emrbr) ? size: s->emrbr; 468 bd.length = buf_len; 469 size -= buf_len; 470 DPRINTF("rx_bd %x length %d\n", addr, bd.length); 471 /* The last 4 bytes are the CRC. */ 472 if (size < 4) 473 buf_len += size - 4; 474 buf_addr = bd.data; 475 cpu_physical_memory_write(buf_addr, buf, buf_len); 476 buf += buf_len; 477 if (size < 4) { 478 cpu_physical_memory_write(buf_addr + buf_len, crc_ptr, 4 - size); 479 crc_ptr += 4 - size; 480 } 481 bd.flags &= ~FEC_BD_E; 482 if (size == 0) { 483 /* Last buffer in frame. */ 484 bd.flags |= flags | FEC_BD_L; 485 DPRINTF("rx frame flags %04x\n", bd.flags); 486 s->eir |= FEC_INT_RXF; 487 } else { 488 s->eir |= FEC_INT_RXB; 489 } 490 mcf_fec_write_bd(&bd, addr); 491 /* Advance to the next descriptor. */ 492 if ((bd.flags & FEC_BD_W) != 0) { 493 addr = s->erdsr; 494 } else { 495 addr += 8; 496 } 497 } 498 s->rx_descriptor = addr; 499 mcf_fec_enable_rx(s); 500 mcf_fec_update(s); 501 return retsize; 502 } 503 504 static const MemoryRegionOps mcf_fec_ops = { 505 .read = mcf_fec_read, 506 .write = mcf_fec_write, 507 .endianness = DEVICE_NATIVE_ENDIAN, 508 }; 509 510 static NetClientInfo net_mcf_fec_info = { 511 .type = NET_CLIENT_DRIVER_NIC, 512 .size = sizeof(NICState), 513 .receive = mcf_fec_receive, 514 }; 515 516 void mcf_fec_init(MemoryRegion *sysmem, NICInfo *nd, 517 hwaddr base, qemu_irq *irq) 518 { 519 mcf_fec_state *s; 520 521 qemu_check_nic_model(nd, "mcf_fec"); 522 523 s = (mcf_fec_state *)g_malloc0(sizeof(mcf_fec_state)); 524 s->sysmem = sysmem; 525 s->irq = irq; 526 527 memory_region_init_io(&s->iomem, NULL, &mcf_fec_ops, s, "fec", 0x400); 528 memory_region_add_subregion(sysmem, base, &s->iomem); 529 530 s->conf.macaddr = nd->macaddr; 531 s->conf.peers.ncs[0] = nd->netdev; 532 533 s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, nd->model, nd->name, s); 534 535 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 536 } 537