1 /* 2 * ColdFire Fast Ethernet Controller emulation. 3 * 4 * Copyright (c) 2007 CodeSourcery. 5 * 6 * This code is licensed under the GPL 7 */ 8 #include "hw/hw.h" 9 #include "net/net.h" 10 #include "hw/m68k/mcf.h" 11 #include "hw/net/mii.h" 12 /* For crc32 */ 13 #include <zlib.h> 14 #include "exec/address-spaces.h" 15 16 //#define DEBUG_FEC 1 17 18 #ifdef DEBUG_FEC 19 #define DPRINTF(fmt, ...) \ 20 do { printf("mcf_fec: " fmt , ## __VA_ARGS__); } while (0) 21 #else 22 #define DPRINTF(fmt, ...) do {} while(0) 23 #endif 24 25 #define FEC_MAX_FRAME_SIZE 2032 26 27 typedef struct { 28 MemoryRegion *sysmem; 29 MemoryRegion iomem; 30 qemu_irq *irq; 31 NICState *nic; 32 NICConf conf; 33 uint32_t irq_state; 34 uint32_t eir; 35 uint32_t eimr; 36 int rx_enabled; 37 uint32_t rx_descriptor; 38 uint32_t tx_descriptor; 39 uint32_t ecr; 40 uint32_t mmfr; 41 uint32_t mscr; 42 uint32_t rcr; 43 uint32_t tcr; 44 uint32_t tfwr; 45 uint32_t rfsr; 46 uint32_t erdsr; 47 uint32_t etdsr; 48 uint32_t emrbr; 49 } mcf_fec_state; 50 51 #define FEC_INT_HB 0x80000000 52 #define FEC_INT_BABR 0x40000000 53 #define FEC_INT_BABT 0x20000000 54 #define FEC_INT_GRA 0x10000000 55 #define FEC_INT_TXF 0x08000000 56 #define FEC_INT_TXB 0x04000000 57 #define FEC_INT_RXF 0x02000000 58 #define FEC_INT_RXB 0x01000000 59 #define FEC_INT_MII 0x00800000 60 #define FEC_INT_EB 0x00400000 61 #define FEC_INT_LC 0x00200000 62 #define FEC_INT_RL 0x00100000 63 #define FEC_INT_UN 0x00080000 64 65 #define FEC_EN 2 66 #define FEC_RESET 1 67 68 /* Map interrupt flags onto IRQ lines. */ 69 #define FEC_NUM_IRQ 13 70 static const uint32_t mcf_fec_irq_map[FEC_NUM_IRQ] = { 71 FEC_INT_TXF, 72 FEC_INT_TXB, 73 FEC_INT_UN, 74 FEC_INT_RL, 75 FEC_INT_RXF, 76 FEC_INT_RXB, 77 FEC_INT_MII, 78 FEC_INT_LC, 79 FEC_INT_HB, 80 FEC_INT_GRA, 81 FEC_INT_EB, 82 FEC_INT_BABT, 83 FEC_INT_BABR 84 }; 85 86 /* Buffer Descriptor. */ 87 typedef struct { 88 uint16_t flags; 89 uint16_t length; 90 uint32_t data; 91 } mcf_fec_bd; 92 93 #define FEC_BD_R 0x8000 94 #define FEC_BD_E 0x8000 95 #define FEC_BD_O1 0x4000 96 #define FEC_BD_W 0x2000 97 #define FEC_BD_O2 0x1000 98 #define FEC_BD_L 0x0800 99 #define FEC_BD_TC 0x0400 100 #define FEC_BD_ABC 0x0200 101 #define FEC_BD_M 0x0100 102 #define FEC_BD_BC 0x0080 103 #define FEC_BD_MC 0x0040 104 #define FEC_BD_LG 0x0020 105 #define FEC_BD_NO 0x0010 106 #define FEC_BD_CR 0x0004 107 #define FEC_BD_OV 0x0002 108 #define FEC_BD_TR 0x0001 109 110 static void mcf_fec_read_bd(mcf_fec_bd *bd, uint32_t addr) 111 { 112 cpu_physical_memory_read(addr, bd, sizeof(*bd)); 113 be16_to_cpus(&bd->flags); 114 be16_to_cpus(&bd->length); 115 be32_to_cpus(&bd->data); 116 } 117 118 static void mcf_fec_write_bd(mcf_fec_bd *bd, uint32_t addr) 119 { 120 mcf_fec_bd tmp; 121 tmp.flags = cpu_to_be16(bd->flags); 122 tmp.length = cpu_to_be16(bd->length); 123 tmp.data = cpu_to_be32(bd->data); 124 cpu_physical_memory_write(addr, &tmp, sizeof(tmp)); 125 } 126 127 static void mcf_fec_update(mcf_fec_state *s) 128 { 129 uint32_t active; 130 uint32_t changed; 131 uint32_t mask; 132 int i; 133 134 active = s->eir & s->eimr; 135 changed = active ^s->irq_state; 136 for (i = 0; i < FEC_NUM_IRQ; i++) { 137 mask = mcf_fec_irq_map[i]; 138 if (changed & mask) { 139 DPRINTF("IRQ %d = %d\n", i, (active & mask) != 0); 140 qemu_set_irq(s->irq[i], (active & mask) != 0); 141 } 142 } 143 s->irq_state = active; 144 } 145 146 static void mcf_fec_do_tx(mcf_fec_state *s) 147 { 148 uint32_t addr; 149 mcf_fec_bd bd; 150 int frame_size; 151 int len; 152 uint8_t frame[FEC_MAX_FRAME_SIZE]; 153 uint8_t *ptr; 154 155 DPRINTF("do_tx\n"); 156 ptr = frame; 157 frame_size = 0; 158 addr = s->tx_descriptor; 159 while (1) { 160 mcf_fec_read_bd(&bd, addr); 161 DPRINTF("tx_bd %x flags %04x len %d data %08x\n", 162 addr, bd.flags, bd.length, bd.data); 163 if ((bd.flags & FEC_BD_R) == 0) { 164 /* Run out of descriptors to transmit. */ 165 break; 166 } 167 len = bd.length; 168 if (frame_size + len > FEC_MAX_FRAME_SIZE) { 169 len = FEC_MAX_FRAME_SIZE - frame_size; 170 s->eir |= FEC_INT_BABT; 171 } 172 cpu_physical_memory_read(bd.data, ptr, len); 173 ptr += len; 174 frame_size += len; 175 if (bd.flags & FEC_BD_L) { 176 /* Last buffer in frame. */ 177 DPRINTF("Sending packet\n"); 178 qemu_send_packet(qemu_get_queue(s->nic), frame, len); 179 ptr = frame; 180 frame_size = 0; 181 s->eir |= FEC_INT_TXF; 182 } 183 s->eir |= FEC_INT_TXB; 184 bd.flags &= ~FEC_BD_R; 185 /* Write back the modified descriptor. */ 186 mcf_fec_write_bd(&bd, addr); 187 /* Advance to the next descriptor. */ 188 if ((bd.flags & FEC_BD_W) != 0) { 189 addr = s->etdsr; 190 } else { 191 addr += 8; 192 } 193 } 194 s->tx_descriptor = addr; 195 } 196 197 static void mcf_fec_enable_rx(mcf_fec_state *s) 198 { 199 NetClientState *nc = qemu_get_queue(s->nic); 200 mcf_fec_bd bd; 201 202 mcf_fec_read_bd(&bd, s->rx_descriptor); 203 s->rx_enabled = ((bd.flags & FEC_BD_E) != 0); 204 if (s->rx_enabled) { 205 qemu_flush_queued_packets(nc); 206 } 207 } 208 209 static void mcf_fec_reset(mcf_fec_state *s) 210 { 211 s->eir = 0; 212 s->eimr = 0; 213 s->rx_enabled = 0; 214 s->ecr = 0; 215 s->mscr = 0; 216 s->rcr = 0x05ee0001; 217 s->tcr = 0; 218 s->tfwr = 0; 219 s->rfsr = 0x500; 220 } 221 222 #define MMFR_WRITE_OP (1 << 28) 223 #define MMFR_READ_OP (2 << 28) 224 #define MMFR_PHYADDR(v) (((v) >> 23) & 0x1f) 225 #define MMFR_REGNUM(v) (((v) >> 18) & 0x1f) 226 227 static uint64_t mcf_fec_read_mdio(mcf_fec_state *s) 228 { 229 uint64_t v; 230 231 if (s->mmfr & MMFR_WRITE_OP) 232 return s->mmfr; 233 if (MMFR_PHYADDR(s->mmfr) != 1) 234 return s->mmfr |= 0xffff; 235 236 switch (MMFR_REGNUM(s->mmfr)) { 237 case MII_BMCR: 238 v = MII_BMCR_SPEED | MII_BMCR_AUTOEN | MII_BMCR_FD; 239 break; 240 case MII_BMSR: 241 v = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD | 242 MII_BMSR_10T_HD | MII_BMSR_MFPS | MII_BMSR_AN_COMP | 243 MII_BMSR_AUTONEG | MII_BMSR_LINK_ST; 244 break; 245 case MII_PHYID1: 246 v = DP83848_PHYID1; 247 break; 248 case MII_PHYID2: 249 v = DP83848_PHYID2; 250 break; 251 case MII_ANAR: 252 v = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD | 253 MII_ANAR_10 | MII_ANAR_CSMACD; 254 break; 255 case MII_ANLPAR: 256 v = MII_ANLPAR_ACK | MII_ANLPAR_TXFD | MII_ANLPAR_TX | 257 MII_ANLPAR_10FD | MII_ANLPAR_10 | MII_ANLPAR_CSMACD; 258 break; 259 default: 260 v = 0xffff; 261 break; 262 } 263 s->mmfr = (s->mmfr & ~0xffff) | v; 264 return s->mmfr; 265 } 266 267 static uint64_t mcf_fec_read(void *opaque, hwaddr addr, 268 unsigned size) 269 { 270 mcf_fec_state *s = (mcf_fec_state *)opaque; 271 switch (addr & 0x3ff) { 272 case 0x004: return s->eir; 273 case 0x008: return s->eimr; 274 case 0x010: return s->rx_enabled ? (1 << 24) : 0; /* RDAR */ 275 case 0x014: return 0; /* TDAR */ 276 case 0x024: return s->ecr; 277 case 0x040: return mcf_fec_read_mdio(s); 278 case 0x044: return s->mscr; 279 case 0x064: return 0; /* MIBC */ 280 case 0x084: return s->rcr; 281 case 0x0c4: return s->tcr; 282 case 0x0e4: /* PALR */ 283 return (s->conf.macaddr.a[0] << 24) | (s->conf.macaddr.a[1] << 16) 284 | (s->conf.macaddr.a[2] << 8) | s->conf.macaddr.a[3]; 285 break; 286 case 0x0e8: /* PAUR */ 287 return (s->conf.macaddr.a[4] << 24) | (s->conf.macaddr.a[5] << 16) | 0x8808; 288 case 0x0ec: return 0x10000; /* OPD */ 289 case 0x118: return 0; 290 case 0x11c: return 0; 291 case 0x120: return 0; 292 case 0x124: return 0; 293 case 0x144: return s->tfwr; 294 case 0x14c: return 0x600; 295 case 0x150: return s->rfsr; 296 case 0x180: return s->erdsr; 297 case 0x184: return s->etdsr; 298 case 0x188: return s->emrbr; 299 default: 300 hw_error("mcf_fec_read: Bad address 0x%x\n", (int)addr); 301 return 0; 302 } 303 } 304 305 static void mcf_fec_write(void *opaque, hwaddr addr, 306 uint64_t value, unsigned size) 307 { 308 mcf_fec_state *s = (mcf_fec_state *)opaque; 309 switch (addr & 0x3ff) { 310 case 0x004: 311 s->eir &= ~value; 312 break; 313 case 0x008: 314 s->eimr = value; 315 break; 316 case 0x010: /* RDAR */ 317 if ((s->ecr & FEC_EN) && !s->rx_enabled) { 318 DPRINTF("RX enable\n"); 319 mcf_fec_enable_rx(s); 320 } 321 break; 322 case 0x014: /* TDAR */ 323 if (s->ecr & FEC_EN) { 324 mcf_fec_do_tx(s); 325 } 326 break; 327 case 0x024: 328 s->ecr = value; 329 if (value & FEC_RESET) { 330 DPRINTF("Reset\n"); 331 mcf_fec_reset(s); 332 } 333 if ((s->ecr & FEC_EN) == 0) { 334 s->rx_enabled = 0; 335 } 336 break; 337 case 0x040: 338 s->mmfr = value; 339 s->eir |= FEC_INT_MII; 340 break; 341 case 0x044: 342 s->mscr = value & 0xfe; 343 break; 344 case 0x064: 345 /* TODO: Implement MIB. */ 346 break; 347 case 0x084: 348 s->rcr = value & 0x07ff003f; 349 /* TODO: Implement LOOP mode. */ 350 break; 351 case 0x0c4: /* TCR */ 352 /* We transmit immediately, so raise GRA immediately. */ 353 s->tcr = value; 354 if (value & 1) 355 s->eir |= FEC_INT_GRA; 356 break; 357 case 0x0e4: /* PALR */ 358 s->conf.macaddr.a[0] = value >> 24; 359 s->conf.macaddr.a[1] = value >> 16; 360 s->conf.macaddr.a[2] = value >> 8; 361 s->conf.macaddr.a[3] = value; 362 break; 363 case 0x0e8: /* PAUR */ 364 s->conf.macaddr.a[4] = value >> 24; 365 s->conf.macaddr.a[5] = value >> 16; 366 break; 367 case 0x0ec: 368 /* OPD */ 369 break; 370 case 0x118: 371 case 0x11c: 372 case 0x120: 373 case 0x124: 374 /* TODO: implement MAC hash filtering. */ 375 break; 376 case 0x144: 377 s->tfwr = value & 3; 378 break; 379 case 0x14c: 380 /* FRBR writes ignored. */ 381 break; 382 case 0x150: 383 s->rfsr = (value & 0x3fc) | 0x400; 384 break; 385 case 0x180: 386 s->erdsr = value & ~3; 387 s->rx_descriptor = s->erdsr; 388 break; 389 case 0x184: 390 s->etdsr = value & ~3; 391 s->tx_descriptor = s->etdsr; 392 break; 393 case 0x188: 394 s->emrbr = value & 0x7f0; 395 break; 396 default: 397 hw_error("mcf_fec_write Bad address 0x%x\n", (int)addr); 398 } 399 mcf_fec_update(s); 400 } 401 402 static int mcf_fec_have_receive_space(mcf_fec_state *s, size_t want) 403 { 404 mcf_fec_bd bd; 405 uint32_t addr; 406 407 /* Walk descriptor list to determine if we have enough buffer */ 408 addr = s->rx_descriptor; 409 while (want > 0) { 410 mcf_fec_read_bd(&bd, addr); 411 if ((bd.flags & FEC_BD_E) == 0) { 412 return 0; 413 } 414 if (want < s->emrbr) { 415 return 1; 416 } 417 want -= s->emrbr; 418 /* Advance to the next descriptor. */ 419 if ((bd.flags & FEC_BD_W) != 0) { 420 addr = s->erdsr; 421 } else { 422 addr += 8; 423 } 424 } 425 return 0; 426 } 427 428 static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t size) 429 { 430 mcf_fec_state *s = qemu_get_nic_opaque(nc); 431 mcf_fec_bd bd; 432 uint32_t flags = 0; 433 uint32_t addr; 434 uint32_t crc; 435 uint32_t buf_addr; 436 uint8_t *crc_ptr; 437 unsigned int buf_len; 438 size_t retsize; 439 440 DPRINTF("do_rx len %d\n", size); 441 if (!s->rx_enabled) { 442 return -1; 443 } 444 /* 4 bytes for the CRC. */ 445 size += 4; 446 crc = cpu_to_be32(crc32(~0, buf, size)); 447 crc_ptr = (uint8_t *)&crc; 448 /* Huge frames are truncted. */ 449 if (size > FEC_MAX_FRAME_SIZE) { 450 size = FEC_MAX_FRAME_SIZE; 451 flags |= FEC_BD_TR | FEC_BD_LG; 452 } 453 /* Frames larger than the user limit just set error flags. */ 454 if (size > (s->rcr >> 16)) { 455 flags |= FEC_BD_LG; 456 } 457 /* Check if we have enough space in current descriptors */ 458 if (!mcf_fec_have_receive_space(s, size)) { 459 return 0; 460 } 461 addr = s->rx_descriptor; 462 retsize = size; 463 while (size > 0) { 464 mcf_fec_read_bd(&bd, addr); 465 buf_len = (size <= s->emrbr) ? size: s->emrbr; 466 bd.length = buf_len; 467 size -= buf_len; 468 DPRINTF("rx_bd %x length %d\n", addr, bd.length); 469 /* The last 4 bytes are the CRC. */ 470 if (size < 4) 471 buf_len += size - 4; 472 buf_addr = bd.data; 473 cpu_physical_memory_write(buf_addr, buf, buf_len); 474 buf += buf_len; 475 if (size < 4) { 476 cpu_physical_memory_write(buf_addr + buf_len, crc_ptr, 4 - size); 477 crc_ptr += 4 - size; 478 } 479 bd.flags &= ~FEC_BD_E; 480 if (size == 0) { 481 /* Last buffer in frame. */ 482 bd.flags |= flags | FEC_BD_L; 483 DPRINTF("rx frame flags %04x\n", bd.flags); 484 s->eir |= FEC_INT_RXF; 485 } else { 486 s->eir |= FEC_INT_RXB; 487 } 488 mcf_fec_write_bd(&bd, addr); 489 /* Advance to the next descriptor. */ 490 if ((bd.flags & FEC_BD_W) != 0) { 491 addr = s->erdsr; 492 } else { 493 addr += 8; 494 } 495 } 496 s->rx_descriptor = addr; 497 mcf_fec_enable_rx(s); 498 mcf_fec_update(s); 499 return retsize; 500 } 501 502 static const MemoryRegionOps mcf_fec_ops = { 503 .read = mcf_fec_read, 504 .write = mcf_fec_write, 505 .endianness = DEVICE_NATIVE_ENDIAN, 506 }; 507 508 static NetClientInfo net_mcf_fec_info = { 509 .type = NET_CLIENT_OPTIONS_KIND_NIC, 510 .size = sizeof(NICState), 511 .receive = mcf_fec_receive, 512 }; 513 514 void mcf_fec_init(MemoryRegion *sysmem, NICInfo *nd, 515 hwaddr base, qemu_irq *irq) 516 { 517 mcf_fec_state *s; 518 519 qemu_check_nic_model(nd, "mcf_fec"); 520 521 s = (mcf_fec_state *)g_malloc0(sizeof(mcf_fec_state)); 522 s->sysmem = sysmem; 523 s->irq = irq; 524 525 memory_region_init_io(&s->iomem, NULL, &mcf_fec_ops, s, "fec", 0x400); 526 memory_region_add_subregion(sysmem, base, &s->iomem); 527 528 s->conf.macaddr = nd->macaddr; 529 s->conf.peers.ncs[0] = nd->netdev; 530 531 s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, nd->model, nd->name, s); 532 533 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); 534 } 535