1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /*------------------------------------------------------------------------ 3 . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device. 4 . 5 . Copyright (C) 1996 by Erik Stahlman 6 . Copyright (C) 2001 Standard Microsystems Corporation 7 . Developed by Simple Network Magic Corporation 8 . Copyright (C) 2003 Monta Vista Software, Inc. 9 . Unified SMC91x driver by Nicolas Pitre 10 . 11 . 12 . Information contained in this file was obtained from the LAN91C111 13 . manual from SMC. To get a copy, if you really want one, you can find 14 . information under www.smsc.com. 15 . 16 . Authors 17 . Erik Stahlman <erik@vt.edu> 18 . Daris A Nevil <dnevil@snmc.com> 19 . Nicolas Pitre <nico@fluxnic.net> 20 . 21 ---------------------------------------------------------------------------*/ 22 #ifndef _SMC91X_H_ 23 #define _SMC91X_H_ 24 25 #include <linux/dmaengine.h> 26 #include <linux/smc91x.h> 27 28 /* 29 * Any 16-bit access is performed with two 8-bit accesses if the hardware 30 * can't do it directly. Most registers are 16-bit so those are mandatory. 31 */ 32 #define SMC_outw_b(x, a, r) \ 33 do { \ 34 unsigned int __val16 = (x); \ 35 unsigned int __reg = (r); \ 36 SMC_outb(__val16, a, __reg); \ 37 SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \ 38 } while (0) 39 40 #define SMC_inw_b(a, r) \ 41 ({ \ 42 unsigned int __val16; \ 43 unsigned int __reg = r; \ 44 __val16 = SMC_inb(a, __reg); \ 45 __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \ 46 __val16; \ 47 }) 48 49 /* 50 * Define your architecture specific bus configuration parameters here. 51 */ 52 53 #if defined(CONFIG_ARM) 54 55 #include <asm/mach-types.h> 56 57 /* Now the bus width is specified in the platform data 58 * pretend here to support all I/O access types 59 */ 60 #define SMC_CAN_USE_8BIT 1 61 #define SMC_CAN_USE_16BIT 1 62 #define SMC_CAN_USE_32BIT 1 63 #define SMC_NOWAIT 1 64 65 #define SMC_IO_SHIFT (lp->io_shift) 66 67 #define SMC_inb(a, r) readb((a) + (r)) 68 #define SMC_inw(a, r) \ 69 ({ \ 70 unsigned int __smc_r = r; \ 71 SMC_16BIT(lp) ? readw((a) + __smc_r) : \ 72 SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \ 73 ({ BUG(); 0; }); \ 74 }) 75 76 #define SMC_inl(a, r) readl((a) + (r)) 77 #define SMC_outb(v, a, r) writeb(v, (a) + (r)) 78 #define SMC_outw(lp, v, a, r) \ 79 do { \ 80 unsigned int __v = v, __smc_r = r; \ 81 if (SMC_16BIT(lp)) \ 82 __SMC_outw(lp, __v, a, __smc_r); \ 83 else if (SMC_8BIT(lp)) \ 84 SMC_outw_b(__v, a, __smc_r); \ 85 else \ 86 BUG(); \ 87 } while (0) 88 89 #define SMC_outl(v, a, r) writel(v, (a) + (r)) 90 #define SMC_insb(a, r, p, l) readsb((a) + (r), p, l) 91 #define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l) 92 #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 93 #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 94 #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 95 #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 96 #define SMC_IRQ_FLAGS (-1) /* from resource */ 97 98 /* We actually can't write halfwords properly if not word aligned */ 99 static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg, 100 bool use_align4_workaround) 101 { 102 if (use_align4_workaround) { 103 unsigned int v = val << 16; 104 v |= readl(ioaddr + (reg & ~2)) & 0xffff; 105 writel(v, ioaddr + (reg & ~2)); 106 } else { 107 writew(val, ioaddr + reg); 108 } 109 } 110 111 #define __SMC_outw(lp, v, a, r) \ 112 _SMC_outw_align4((v), (a), (r), \ 113 IS_BUILTIN(CONFIG_ARCH_PXA) && ((r) & 2) && \ 114 (lp)->cfg.pxa_u16_align4) 115 116 117 #elif defined(CONFIG_SH_SH4202_MICRODEV) 118 119 #define SMC_CAN_USE_8BIT 0 120 #define SMC_CAN_USE_16BIT 1 121 #define SMC_CAN_USE_32BIT 0 122 123 #define SMC_inb(a, r) inb((a) + (r) - 0xa0000000) 124 #define SMC_inw(a, r) inw((a) + (r) - 0xa0000000) 125 #define SMC_inl(a, r) inl((a) + (r) - 0xa0000000) 126 #define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000) 127 #define SMC_outw(lp, v, a, r) outw(v, (a) + (r) - 0xa0000000) 128 #define SMC_outl(v, a, r) outl(v, (a) + (r) - 0xa0000000) 129 #define SMC_insl(a, r, p, l) insl((a) + (r) - 0xa0000000, p, l) 130 #define SMC_outsl(a, r, p, l) outsl((a) + (r) - 0xa0000000, p, l) 131 #define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) 132 #define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) 133 134 #define SMC_IRQ_FLAGS (0) 135 136 #elif defined(CONFIG_ATARI) 137 138 #define SMC_CAN_USE_8BIT 1 139 #define SMC_CAN_USE_16BIT 1 140 #define SMC_CAN_USE_32BIT 1 141 #define SMC_NOWAIT 1 142 143 #define SMC_inb(a, r) readb((a) + (r)) 144 #define SMC_inw(a, r) readw((a) + (r)) 145 #define SMC_inl(a, r) readl((a) + (r)) 146 #define SMC_outb(v, a, r) writeb(v, (a) + (r)) 147 #define SMC_outw(lp, v, a, r) writew(v, (a) + (r)) 148 #define SMC_outl(v, a, r) writel(v, (a) + (r)) 149 #define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 150 #define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 151 #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 152 #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 153 154 #define RPC_LSA_DEFAULT RPC_LED_100_10 155 #define RPC_LSB_DEFAULT RPC_LED_TX_RX 156 157 #elif defined(CONFIG_COLDFIRE) 158 159 #define SMC_CAN_USE_8BIT 0 160 #define SMC_CAN_USE_16BIT 1 161 #define SMC_CAN_USE_32BIT 0 162 #define SMC_NOWAIT 1 163 164 static inline void mcf_insw(void *a, unsigned char *p, int l) 165 { 166 u16 *wp = (u16 *) p; 167 while (l-- > 0) 168 *wp++ = readw(a); 169 } 170 171 static inline void mcf_outsw(void *a, unsigned char *p, int l) 172 { 173 u16 *wp = (u16 *) p; 174 while (l-- > 0) 175 writew(*wp++, a); 176 } 177 178 #define SMC_inw(a, r) _swapw(readw((a) + (r))) 179 #define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r)) 180 #define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l) 181 #define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l) 182 183 #define SMC_IRQ_FLAGS 0 184 185 #else 186 187 /* 188 * Default configuration 189 */ 190 191 #define SMC_CAN_USE_8BIT 1 192 #define SMC_CAN_USE_16BIT 1 193 #define SMC_CAN_USE_32BIT 1 194 #define SMC_NOWAIT 1 195 196 #define SMC_IO_SHIFT (lp->io_shift) 197 198 #define SMC_inb(a, r) ioread8((a) + (r)) 199 #define SMC_inw(a, r) ioread16((a) + (r)) 200 #define SMC_inl(a, r) ioread32((a) + (r)) 201 #define SMC_outb(v, a, r) iowrite8(v, (a) + (r)) 202 #define SMC_outw(lp, v, a, r) iowrite16(v, (a) + (r)) 203 #define SMC_outl(v, a, r) iowrite32(v, (a) + (r)) 204 #define SMC_insw(a, r, p, l) ioread16_rep((a) + (r), p, l) 205 #define SMC_outsw(a, r, p, l) iowrite16_rep((a) + (r), p, l) 206 #define SMC_insl(a, r, p, l) ioread32_rep((a) + (r), p, l) 207 #define SMC_outsl(a, r, p, l) iowrite32_rep((a) + (r), p, l) 208 209 #define RPC_LSA_DEFAULT RPC_LED_100_10 210 #define RPC_LSB_DEFAULT RPC_LED_TX_RX 211 212 #endif 213 214 215 /* store this information for the driver.. */ 216 struct smc_local { 217 /* 218 * If I have to wait until memory is available to send a 219 * packet, I will store the skbuff here, until I get the 220 * desired memory. Then, I'll send it out and free it. 221 */ 222 struct sk_buff *pending_tx_skb; 223 struct tasklet_struct tx_task; 224 225 struct gpio_desc *power_gpio; 226 struct gpio_desc *reset_gpio; 227 228 /* version/revision of the SMC91x chip */ 229 int version; 230 231 /* Contains the current active transmission mode */ 232 int tcr_cur_mode; 233 234 /* Contains the current active receive mode */ 235 int rcr_cur_mode; 236 237 /* Contains the current active receive/phy mode */ 238 int rpc_cur_mode; 239 int ctl_rfduplx; 240 int ctl_rspeed; 241 242 u32 msg_enable; 243 u32 phy_type; 244 struct mii_if_info mii; 245 246 /* work queue */ 247 struct work_struct phy_configure; 248 struct net_device *dev; 249 int work_pending; 250 251 spinlock_t lock; 252 253 #ifdef CONFIG_ARCH_PXA 254 /* DMA needs the physical address of the chip */ 255 u_long physaddr; 256 struct device *device; 257 #endif 258 struct dma_chan *dma_chan; 259 void __iomem *base; 260 void __iomem *datacs; 261 262 /* the low address lines on some platforms aren't connected... */ 263 int io_shift; 264 /* on some platforms a u16 write must be 4-bytes aligned */ 265 bool half_word_align4; 266 267 struct smc91x_platdata cfg; 268 }; 269 270 #define SMC_8BIT(p) ((p)->cfg.flags & SMC91X_USE_8BIT) 271 #define SMC_16BIT(p) ((p)->cfg.flags & SMC91X_USE_16BIT) 272 #define SMC_32BIT(p) ((p)->cfg.flags & SMC91X_USE_32BIT) 273 274 #ifdef CONFIG_ARCH_PXA 275 /* 276 * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is 277 * always happening in irq context so no need to worry about races. TX is 278 * different and probably not worth it for that reason, and not as critical 279 * as RX which can overrun memory and lose packets. 280 */ 281 #include <linux/dma-mapping.h> 282 283 #ifdef SMC_insl 284 #undef SMC_insl 285 #define SMC_insl(a, r, p, l) \ 286 smc_pxa_dma_insl(a, lp, r, dev->dma, p, l) 287 static inline void 288 smc_pxa_dma_inpump(struct smc_local *lp, u_char *buf, int len) 289 { 290 dma_addr_t dmabuf; 291 struct dma_async_tx_descriptor *tx; 292 dma_cookie_t cookie; 293 enum dma_status status; 294 struct dma_tx_state state; 295 296 dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE); 297 tx = dmaengine_prep_slave_single(lp->dma_chan, dmabuf, len, 298 DMA_DEV_TO_MEM, 0); 299 if (tx) { 300 cookie = dmaengine_submit(tx); 301 dma_async_issue_pending(lp->dma_chan); 302 do { 303 status = dmaengine_tx_status(lp->dma_chan, cookie, 304 &state); 305 cpu_relax(); 306 } while (status != DMA_COMPLETE && status != DMA_ERROR && 307 state.residue); 308 dmaengine_terminate_all(lp->dma_chan); 309 } 310 dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE); 311 } 312 313 static inline void 314 smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, 315 u_char *buf, int len) 316 { 317 struct dma_slave_config config; 318 int ret; 319 320 /* fallback if no DMA available */ 321 if (!lp->dma_chan) { 322 readsl(ioaddr + reg, buf, len); 323 return; 324 } 325 326 /* 64 bit alignment is required for memory to memory DMA */ 327 if ((long)buf & 4) { 328 *((u32 *)buf) = SMC_inl(ioaddr, reg); 329 buf += 4; 330 len--; 331 } 332 333 memset(&config, 0, sizeof(config)); 334 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 335 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 336 config.src_addr = lp->physaddr + reg; 337 config.dst_addr = lp->physaddr + reg; 338 config.src_maxburst = 32; 339 config.dst_maxburst = 32; 340 ret = dmaengine_slave_config(lp->dma_chan, &config); 341 if (ret) { 342 dev_err(lp->device, "dma channel configuration failed: %d\n", 343 ret); 344 return; 345 } 346 347 len *= 4; 348 smc_pxa_dma_inpump(lp, buf, len); 349 } 350 #endif 351 352 #ifdef SMC_insw 353 #undef SMC_insw 354 #define SMC_insw(a, r, p, l) \ 355 smc_pxa_dma_insw(a, lp, r, dev->dma, p, l) 356 static inline void 357 smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, 358 u_char *buf, int len) 359 { 360 struct dma_slave_config config; 361 int ret; 362 363 /* fallback if no DMA available */ 364 if (!lp->dma_chan) { 365 readsw(ioaddr + reg, buf, len); 366 return; 367 } 368 369 /* 64 bit alignment is required for memory to memory DMA */ 370 while ((long)buf & 6) { 371 *((u16 *)buf) = SMC_inw(ioaddr, reg); 372 buf += 2; 373 len--; 374 } 375 376 memset(&config, 0, sizeof(config)); 377 config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 378 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 379 config.src_addr = lp->physaddr + reg; 380 config.dst_addr = lp->physaddr + reg; 381 config.src_maxburst = 32; 382 config.dst_maxburst = 32; 383 ret = dmaengine_slave_config(lp->dma_chan, &config); 384 if (ret) { 385 dev_err(lp->device, "dma channel configuration failed: %d\n", 386 ret); 387 return; 388 } 389 390 len *= 2; 391 smc_pxa_dma_inpump(lp, buf, len); 392 } 393 #endif 394 395 #endif /* CONFIG_ARCH_PXA */ 396 397 398 /* 399 * Everything a particular hardware setup needs should have been defined 400 * at this point. Add stubs for the undefined cases, mainly to avoid 401 * compilation warnings since they'll be optimized away, or to prevent buggy 402 * use of them. 403 */ 404 405 #if ! SMC_CAN_USE_32BIT 406 #define SMC_inl(ioaddr, reg) ({ BUG(); 0; }) 407 #define SMC_outl(x, ioaddr, reg) BUG() 408 #define SMC_insl(a, r, p, l) BUG() 409 #define SMC_outsl(a, r, p, l) BUG() 410 #endif 411 412 #if !defined(SMC_insl) || !defined(SMC_outsl) 413 #define SMC_insl(a, r, p, l) BUG() 414 #define SMC_outsl(a, r, p, l) BUG() 415 #endif 416 417 #if ! SMC_CAN_USE_16BIT 418 419 #define SMC_outw(lp, x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg) 420 #define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg) 421 #define SMC_insw(a, r, p, l) BUG() 422 #define SMC_outsw(a, r, p, l) BUG() 423 424 #endif 425 426 #if !defined(SMC_insw) || !defined(SMC_outsw) 427 #define SMC_insw(a, r, p, l) BUG() 428 #define SMC_outsw(a, r, p, l) BUG() 429 #endif 430 431 #if ! SMC_CAN_USE_8BIT 432 #undef SMC_inb 433 #define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) 434 #undef SMC_outb 435 #define SMC_outb(x, ioaddr, reg) BUG() 436 #define SMC_insb(a, r, p, l) BUG() 437 #define SMC_outsb(a, r, p, l) BUG() 438 #endif 439 440 #if !defined(SMC_insb) || !defined(SMC_outsb) 441 #define SMC_insb(a, r, p, l) BUG() 442 #define SMC_outsb(a, r, p, l) BUG() 443 #endif 444 445 #ifndef SMC_CAN_USE_DATACS 446 #define SMC_CAN_USE_DATACS 0 447 #endif 448 449 #ifndef SMC_IO_SHIFT 450 #define SMC_IO_SHIFT 0 451 #endif 452 453 #ifndef SMC_IRQ_FLAGS 454 #define SMC_IRQ_FLAGS IRQF_TRIGGER_RISING 455 #endif 456 457 #ifndef SMC_INTERRUPT_PREAMBLE 458 #define SMC_INTERRUPT_PREAMBLE 459 #endif 460 461 462 /* Because of bank switching, the LAN91x uses only 16 I/O ports */ 463 #define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) 464 #define SMC_DATA_EXTENT (4) 465 466 /* 467 . Bank Select Register: 468 . 469 . yyyy yyyy 0000 00xx 470 . xx = bank number 471 . yyyy yyyy = 0x33, for identification purposes. 472 */ 473 #define BANK_SELECT (14 << SMC_IO_SHIFT) 474 475 476 // Transmit Control Register 477 /* BANK 0 */ 478 #define TCR_REG(lp) SMC_REG(lp, 0x0000, 0) 479 #define TCR_ENABLE 0x0001 // When 1 we can transmit 480 #define TCR_LOOP 0x0002 // Controls output pin LBK 481 #define TCR_FORCOL 0x0004 // When 1 will force a collision 482 #define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0 483 #define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames 484 #define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier 485 #define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation 486 #define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error 487 #define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback 488 #define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode 489 490 #define TCR_CLEAR 0 /* do NOTHING */ 491 /* the default settings for the TCR register : */ 492 #define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN) 493 494 495 // EPH Status Register 496 /* BANK 0 */ 497 #define EPH_STATUS_REG(lp) SMC_REG(lp, 0x0002, 0) 498 #define ES_TX_SUC 0x0001 // Last TX was successful 499 #define ES_SNGL_COL 0x0002 // Single collision detected for last tx 500 #define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx 501 #define ES_LTX_MULT 0x0008 // Last tx was a multicast 502 #define ES_16COL 0x0010 // 16 Collisions Reached 503 #define ES_SQET 0x0020 // Signal Quality Error Test 504 #define ES_LTXBRD 0x0040 // Last tx was a broadcast 505 #define ES_TXDEFR 0x0080 // Transmit Deferred 506 #define ES_LATCOL 0x0200 // Late collision detected on last tx 507 #define ES_LOSTCARR 0x0400 // Lost Carrier Sense 508 #define ES_EXC_DEF 0x0800 // Excessive Deferral 509 #define ES_CTR_ROL 0x1000 // Counter Roll Over indication 510 #define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin 511 #define ES_TXUNRN 0x8000 // Tx Underrun 512 513 514 // Receive Control Register 515 /* BANK 0 */ 516 #define RCR_REG(lp) SMC_REG(lp, 0x0004, 0) 517 #define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted 518 #define RCR_PRMS 0x0002 // Enable promiscuous mode 519 #define RCR_ALMUL 0x0004 // When set accepts all multicast frames 520 #define RCR_RXEN 0x0100 // IFF this is set, we can receive packets 521 #define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets 522 #define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision 523 #define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier 524 #define RCR_SOFTRST 0x8000 // resets the chip 525 526 /* the normal settings for the RCR register : */ 527 #define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN) 528 #define RCR_CLEAR 0x0 // set it to a base state 529 530 531 // Counter Register 532 /* BANK 0 */ 533 #define COUNTER_REG(lp) SMC_REG(lp, 0x0006, 0) 534 535 536 // Memory Information Register 537 /* BANK 0 */ 538 #define MIR_REG(lp) SMC_REG(lp, 0x0008, 0) 539 540 541 // Receive/Phy Control Register 542 /* BANK 0 */ 543 #define RPC_REG(lp) SMC_REG(lp, 0x000A, 0) 544 #define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode. 545 #define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode 546 #define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode 547 #define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb 548 #define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb 549 550 #ifndef RPC_LSA_DEFAULT 551 #define RPC_LSA_DEFAULT RPC_LED_100 552 #endif 553 #ifndef RPC_LSB_DEFAULT 554 #define RPC_LSB_DEFAULT RPC_LED_FD 555 #endif 556 557 #define RPC_DEFAULT (RPC_ANEG | RPC_SPEED | RPC_DPLX) 558 559 560 /* Bank 0 0x0C is reserved */ 561 562 // Bank Select Register 563 /* All Banks */ 564 #define BSR_REG 0x000E 565 566 567 // Configuration Reg 568 /* BANK 1 */ 569 #define CONFIG_REG(lp) SMC_REG(lp, 0x0000, 1) 570 #define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy 571 #define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL 572 #define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus 573 #define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode. 574 575 // Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low 576 #define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN) 577 578 579 // Base Address Register 580 /* BANK 1 */ 581 #define BASE_REG(lp) SMC_REG(lp, 0x0002, 1) 582 583 584 // Individual Address Registers 585 /* BANK 1 */ 586 #define ADDR0_REG(lp) SMC_REG(lp, 0x0004, 1) 587 #define ADDR1_REG(lp) SMC_REG(lp, 0x0006, 1) 588 #define ADDR2_REG(lp) SMC_REG(lp, 0x0008, 1) 589 590 591 // General Purpose Register 592 /* BANK 1 */ 593 #define GP_REG(lp) SMC_REG(lp, 0x000A, 1) 594 595 596 // Control Register 597 /* BANK 1 */ 598 #define CTL_REG(lp) SMC_REG(lp, 0x000C, 1) 599 #define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received 600 #define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically 601 #define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt 602 #define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt 603 #define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt 604 #define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store 605 #define CTL_RELOAD 0x0002 // When set reads EEPROM into registers 606 #define CTL_STORE 0x0001 // When set stores registers into EEPROM 607 608 609 // MMU Command Register 610 /* BANK 2 */ 611 #define MMU_CMD_REG(lp) SMC_REG(lp, 0x0000, 2) 612 #define MC_BUSY 1 // When 1 the last release has not completed 613 #define MC_NOP (0<<5) // No Op 614 #define MC_ALLOC (1<<5) // OR with number of 256 byte packets 615 #define MC_RESET (2<<5) // Reset MMU to initial state 616 #define MC_REMOVE (3<<5) // Remove the current rx packet 617 #define MC_RELEASE (4<<5) // Remove and release the current rx packet 618 #define MC_FREEPKT (5<<5) // Release packet in PNR register 619 #define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit 620 #define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs 621 622 623 // Packet Number Register 624 /* BANK 2 */ 625 #define PN_REG(lp) SMC_REG(lp, 0x0002, 2) 626 627 628 // Allocation Result Register 629 /* BANK 2 */ 630 #define AR_REG(lp) SMC_REG(lp, 0x0003, 2) 631 #define AR_FAILED 0x80 // Alocation Failed 632 633 634 // TX FIFO Ports Register 635 /* BANK 2 */ 636 #define TXFIFO_REG(lp) SMC_REG(lp, 0x0004, 2) 637 #define TXFIFO_TEMPTY 0x80 // TX FIFO Empty 638 639 // RX FIFO Ports Register 640 /* BANK 2 */ 641 #define RXFIFO_REG(lp) SMC_REG(lp, 0x0005, 2) 642 #define RXFIFO_REMPTY 0x80 // RX FIFO Empty 643 644 #define FIFO_REG(lp) SMC_REG(lp, 0x0004, 2) 645 646 // Pointer Register 647 /* BANK 2 */ 648 #define PTR_REG(lp) SMC_REG(lp, 0x0006, 2) 649 #define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area 650 #define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access 651 #define PTR_READ 0x2000 // When 1 the operation is a read 652 653 654 // Data Register 655 /* BANK 2 */ 656 #define DATA_REG(lp) SMC_REG(lp, 0x0008, 2) 657 658 659 // Interrupt Status/Acknowledge Register 660 /* BANK 2 */ 661 #define INT_REG(lp) SMC_REG(lp, 0x000C, 2) 662 663 664 // Interrupt Mask Register 665 /* BANK 2 */ 666 #define IM_REG(lp) SMC_REG(lp, 0x000D, 2) 667 #define IM_MDINT 0x80 // PHY MI Register 18 Interrupt 668 #define IM_ERCV_INT 0x40 // Early Receive Interrupt 669 #define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section 670 #define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns 671 #define IM_ALLOC_INT 0x08 // Set when allocation request is completed 672 #define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty 673 #define IM_TX_INT 0x02 // Transmit Interrupt 674 #define IM_RCV_INT 0x01 // Receive Interrupt 675 676 677 // Multicast Table Registers 678 /* BANK 3 */ 679 #define MCAST_REG1(lp) SMC_REG(lp, 0x0000, 3) 680 #define MCAST_REG2(lp) SMC_REG(lp, 0x0002, 3) 681 #define MCAST_REG3(lp) SMC_REG(lp, 0x0004, 3) 682 #define MCAST_REG4(lp) SMC_REG(lp, 0x0006, 3) 683 684 685 // Management Interface Register (MII) 686 /* BANK 3 */ 687 #define MII_REG(lp) SMC_REG(lp, 0x0008, 3) 688 #define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup 689 #define MII_MDOE 0x0008 // MII Output Enable 690 #define MII_MCLK 0x0004 // MII Clock, pin MDCLK 691 #define MII_MDI 0x0002 // MII Input, pin MDI 692 #define MII_MDO 0x0001 // MII Output, pin MDO 693 694 695 // Revision Register 696 /* BANK 3 */ 697 /* ( hi: chip id low: rev # ) */ 698 #define REV_REG(lp) SMC_REG(lp, 0x000A, 3) 699 700 701 // Early RCV Register 702 /* BANK 3 */ 703 /* this is NOT on SMC9192 */ 704 #define ERCV_REG(lp) SMC_REG(lp, 0x000C, 3) 705 #define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received 706 #define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask 707 708 709 // External Register 710 /* BANK 7 */ 711 #define EXT_REG(lp) SMC_REG(lp, 0x0000, 7) 712 713 714 #define CHIP_9192 3 715 #define CHIP_9194 4 716 #define CHIP_9195 5 717 #define CHIP_9196 6 718 #define CHIP_91100 7 719 #define CHIP_91100FD 8 720 #define CHIP_91111FD 9 721 722 static const char * chip_ids[ 16 ] = { 723 NULL, NULL, NULL, 724 /* 3 */ "SMC91C90/91C92", 725 /* 4 */ "SMC91C94", 726 /* 5 */ "SMC91C95", 727 /* 6 */ "SMC91C96", 728 /* 7 */ "SMC91C100", 729 /* 8 */ "SMC91C100FD", 730 /* 9 */ "SMC91C11xFD", 731 NULL, NULL, NULL, 732 NULL, NULL, NULL}; 733 734 735 /* 736 . Receive status bits 737 */ 738 #define RS_ALGNERR 0x8000 739 #define RS_BRODCAST 0x4000 740 #define RS_BADCRC 0x2000 741 #define RS_ODDFRAME 0x1000 742 #define RS_TOOLONG 0x0800 743 #define RS_TOOSHORT 0x0400 744 #define RS_MULTICAST 0x0001 745 #define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) 746 747 748 /* 749 * PHY IDs 750 * LAN83C183 == LAN91C111 Internal PHY 751 */ 752 #define PHY_LAN83C183 0x0016f840 753 #define PHY_LAN83C180 0x02821c50 754 755 /* 756 * PHY Register Addresses (LAN91C111 Internal PHY) 757 * 758 * Generic PHY registers can be found in <linux/mii.h> 759 * 760 * These phy registers are specific to our on-board phy. 761 */ 762 763 // PHY Configuration Register 1 764 #define PHY_CFG1_REG 0x10 765 #define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled 766 #define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled 767 #define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down 768 #define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler 769 #define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable 770 #define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled 771 #define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm) 772 #define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db 773 #define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust 774 #define PHY_CFG1_TLVL_MASK 0x003C 775 #define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time 776 777 778 // PHY Configuration Register 2 779 #define PHY_CFG2_REG 0x11 780 #define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled 781 #define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled 782 #define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt) 783 #define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo 784 785 // PHY Status Output (and Interrupt status) Register 786 #define PHY_INT_REG 0x12 // Status Output (Interrupt Status) 787 #define PHY_INT_INT 0x8000 // 1=bits have changed since last read 788 #define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected 789 #define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync 790 #define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx 791 #define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx 792 #define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx 793 #define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected 794 #define PHY_INT_JAB 0x0100 // 1=Jabber detected 795 #define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode 796 #define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex 797 798 // PHY Interrupt/Status Mask Register 799 #define PHY_MASK_REG 0x13 // Interrupt Mask 800 // Uses the same bit definitions as PHY_INT_REG 801 802 803 /* 804 * SMC91C96 ethernet config and status registers. 805 * These are in the "attribute" space. 806 */ 807 #define ECOR 0x8000 808 #define ECOR_RESET 0x80 809 #define ECOR_LEVEL_IRQ 0x40 810 #define ECOR_WR_ATTRIB 0x04 811 #define ECOR_ENABLE 0x01 812 813 #define ECSR 0x8002 814 #define ECSR_IOIS8 0x20 815 #define ECSR_PWRDWN 0x04 816 #define ECSR_INT 0x02 817 818 #define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT) 819 820 821 /* 822 * Macros to abstract register access according to the data bus 823 * capabilities. Please use those and not the in/out primitives. 824 * Note: the following macros do *not* select the bank -- this must 825 * be done separately as needed in the main code. The SMC_REG() macro 826 * only uses the bank argument for debugging purposes (when enabled). 827 * 828 * Note: despite inline functions being safer, everything leading to this 829 * should preferably be macros to let BUG() display the line number in 830 * the core source code since we're interested in the top call site 831 * not in any inline function location. 832 */ 833 834 #if SMC_DEBUG > 0 835 #define SMC_REG(lp, reg, bank) \ 836 ({ \ 837 int __b = SMC_CURRENT_BANK(lp); \ 838 if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \ 839 pr_err("%s: bank reg screwed (0x%04x)\n", \ 840 CARDNAME, __b); \ 841 BUG(); \ 842 } \ 843 reg<<SMC_IO_SHIFT; \ 844 }) 845 #else 846 #define SMC_REG(lp, reg, bank) (reg<<SMC_IO_SHIFT) 847 #endif 848 849 /* 850 * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not 851 * aligned to a 32 bit boundary. I tell you that does exist! 852 * Fortunately the affected register accesses can be easily worked around 853 * since we can write zeroes to the preceding 16 bits without adverse 854 * effects and use a 32-bit access. 855 * 856 * Enforce it on any 32-bit capable setup for now. 857 */ 858 #define SMC_MUST_ALIGN_WRITE(lp) SMC_32BIT(lp) 859 860 #define SMC_GET_PN(lp) \ 861 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, PN_REG(lp))) \ 862 : (SMC_inw(ioaddr, PN_REG(lp)) & 0xFF)) 863 864 #define SMC_SET_PN(lp, x) \ 865 do { \ 866 if (SMC_MUST_ALIGN_WRITE(lp)) \ 867 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 0, 2)); \ 868 else if (SMC_8BIT(lp)) \ 869 SMC_outb(x, ioaddr, PN_REG(lp)); \ 870 else \ 871 SMC_outw(lp, x, ioaddr, PN_REG(lp)); \ 872 } while (0) 873 874 #define SMC_GET_AR(lp) \ 875 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, AR_REG(lp))) \ 876 : (SMC_inw(ioaddr, PN_REG(lp)) >> 8)) 877 878 #define SMC_GET_TXFIFO(lp) \ 879 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, TXFIFO_REG(lp))) \ 880 : (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF)) 881 882 #define SMC_GET_RXFIFO(lp) \ 883 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, RXFIFO_REG(lp))) \ 884 : (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8)) 885 886 #define SMC_GET_INT(lp) \ 887 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, INT_REG(lp))) \ 888 : (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF)) 889 890 #define SMC_ACK_INT(lp, x) \ 891 do { \ 892 if (SMC_8BIT(lp)) \ 893 SMC_outb(x, ioaddr, INT_REG(lp)); \ 894 else { \ 895 unsigned long __flags; \ 896 int __mask; \ 897 local_irq_save(__flags); \ 898 __mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \ 899 SMC_outw(lp, __mask | (x), ioaddr, INT_REG(lp)); \ 900 local_irq_restore(__flags); \ 901 } \ 902 } while (0) 903 904 #define SMC_GET_INT_MASK(lp) \ 905 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, IM_REG(lp))) \ 906 : (SMC_inw(ioaddr, INT_REG(lp)) >> 8)) 907 908 #define SMC_SET_INT_MASK(lp, x) \ 909 do { \ 910 if (SMC_8BIT(lp)) \ 911 SMC_outb(x, ioaddr, IM_REG(lp)); \ 912 else \ 913 SMC_outw(lp, (x) << 8, ioaddr, INT_REG(lp)); \ 914 } while (0) 915 916 #define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT) 917 918 #define SMC_SELECT_BANK(lp, x) \ 919 do { \ 920 if (SMC_MUST_ALIGN_WRITE(lp)) \ 921 SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \ 922 else \ 923 SMC_outw(lp, x, ioaddr, BANK_SELECT); \ 924 } while (0) 925 926 #define SMC_GET_BASE(lp) SMC_inw(ioaddr, BASE_REG(lp)) 927 928 #define SMC_SET_BASE(lp, x) SMC_outw(lp, x, ioaddr, BASE_REG(lp)) 929 930 #define SMC_GET_CONFIG(lp) SMC_inw(ioaddr, CONFIG_REG(lp)) 931 932 #define SMC_SET_CONFIG(lp, x) SMC_outw(lp, x, ioaddr, CONFIG_REG(lp)) 933 934 #define SMC_GET_COUNTER(lp) SMC_inw(ioaddr, COUNTER_REG(lp)) 935 936 #define SMC_GET_CTL(lp) SMC_inw(ioaddr, CTL_REG(lp)) 937 938 #define SMC_SET_CTL(lp, x) SMC_outw(lp, x, ioaddr, CTL_REG(lp)) 939 940 #define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp)) 941 942 #define SMC_GET_GP(lp) SMC_inw(ioaddr, GP_REG(lp)) 943 944 #define SMC_SET_GP(lp, x) \ 945 do { \ 946 if (SMC_MUST_ALIGN_WRITE(lp)) \ 947 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 1)); \ 948 else \ 949 SMC_outw(lp, x, ioaddr, GP_REG(lp)); \ 950 } while (0) 951 952 #define SMC_SET_MII(lp, x) SMC_outw(lp, x, ioaddr, MII_REG(lp)) 953 954 #define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp)) 955 956 #define SMC_SET_MIR(lp, x) SMC_outw(lp, x, ioaddr, MIR_REG(lp)) 957 958 #define SMC_GET_MMU_CMD(lp) SMC_inw(ioaddr, MMU_CMD_REG(lp)) 959 960 #define SMC_SET_MMU_CMD(lp, x) SMC_outw(lp, x, ioaddr, MMU_CMD_REG(lp)) 961 962 #define SMC_GET_FIFO(lp) SMC_inw(ioaddr, FIFO_REG(lp)) 963 964 #define SMC_GET_PTR(lp) SMC_inw(ioaddr, PTR_REG(lp)) 965 966 #define SMC_SET_PTR(lp, x) \ 967 do { \ 968 if (SMC_MUST_ALIGN_WRITE(lp)) \ 969 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 4, 2)); \ 970 else \ 971 SMC_outw(lp, x, ioaddr, PTR_REG(lp)); \ 972 } while (0) 973 974 #define SMC_GET_EPH_STATUS(lp) SMC_inw(ioaddr, EPH_STATUS_REG(lp)) 975 976 #define SMC_GET_RCR(lp) SMC_inw(ioaddr, RCR_REG(lp)) 977 978 #define SMC_SET_RCR(lp, x) SMC_outw(lp, x, ioaddr, RCR_REG(lp)) 979 980 #define SMC_GET_REV(lp) SMC_inw(ioaddr, REV_REG(lp)) 981 982 #define SMC_GET_RPC(lp) SMC_inw(ioaddr, RPC_REG(lp)) 983 984 #define SMC_SET_RPC(lp, x) \ 985 do { \ 986 if (SMC_MUST_ALIGN_WRITE(lp)) \ 987 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 0)); \ 988 else \ 989 SMC_outw(lp, x, ioaddr, RPC_REG(lp)); \ 990 } while (0) 991 992 #define SMC_GET_TCR(lp) SMC_inw(ioaddr, TCR_REG(lp)) 993 994 #define SMC_SET_TCR(lp, x) SMC_outw(lp, x, ioaddr, TCR_REG(lp)) 995 996 #ifndef SMC_GET_MAC_ADDR 997 #define SMC_GET_MAC_ADDR(lp, addr) \ 998 do { \ 999 unsigned int __v; \ 1000 __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \ 1001 addr[0] = __v; addr[1] = __v >> 8; \ 1002 __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \ 1003 addr[2] = __v; addr[3] = __v >> 8; \ 1004 __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \ 1005 addr[4] = __v; addr[5] = __v >> 8; \ 1006 } while (0) 1007 #endif 1008 1009 #define SMC_SET_MAC_ADDR(lp, addr) \ 1010 do { \ 1011 SMC_outw(lp, addr[0] | (addr[1] << 8), ioaddr, ADDR0_REG(lp)); \ 1012 SMC_outw(lp, addr[2] | (addr[3] << 8), ioaddr, ADDR1_REG(lp)); \ 1013 SMC_outw(lp, addr[4] | (addr[5] << 8), ioaddr, ADDR2_REG(lp)); \ 1014 } while (0) 1015 1016 #define SMC_SET_MCAST(lp, x) \ 1017 do { \ 1018 const unsigned char *mt = (x); \ 1019 SMC_outw(lp, mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \ 1020 SMC_outw(lp, mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \ 1021 SMC_outw(lp, mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \ 1022 SMC_outw(lp, mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \ 1023 } while (0) 1024 1025 #define SMC_PUT_PKT_HDR(lp, status, length) \ 1026 do { \ 1027 if (SMC_32BIT(lp)) \ 1028 SMC_outl((status) | (length)<<16, ioaddr, \ 1029 DATA_REG(lp)); \ 1030 else { \ 1031 SMC_outw(lp, status, ioaddr, DATA_REG(lp)); \ 1032 SMC_outw(lp, length, ioaddr, DATA_REG(lp)); \ 1033 } \ 1034 } while (0) 1035 1036 #define SMC_GET_PKT_HDR(lp, status, length) \ 1037 do { \ 1038 if (SMC_32BIT(lp)) { \ 1039 unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \ 1040 (status) = __val & 0xffff; \ 1041 (length) = __val >> 16; \ 1042 } else { \ 1043 (status) = SMC_inw(ioaddr, DATA_REG(lp)); \ 1044 (length) = SMC_inw(ioaddr, DATA_REG(lp)); \ 1045 } \ 1046 } while (0) 1047 1048 #define SMC_PUSH_DATA(lp, p, l) \ 1049 do { \ 1050 if (SMC_32BIT(lp)) { \ 1051 void *__ptr = (p); \ 1052 int __len = (l); \ 1053 void __iomem *__ioaddr = ioaddr; \ 1054 if (__len >= 2 && (unsigned long)__ptr & 2) { \ 1055 __len -= 2; \ 1056 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \ 1057 __ptr += 2; \ 1058 } \ 1059 if (SMC_CAN_USE_DATACS && lp->datacs) \ 1060 __ioaddr = lp->datacs; \ 1061 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ 1062 if (__len & 2) { \ 1063 __ptr += (__len & ~3); \ 1064 SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \ 1065 } \ 1066 } else if (SMC_16BIT(lp)) \ 1067 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ 1068 else if (SMC_8BIT(lp)) \ 1069 SMC_outsb(ioaddr, DATA_REG(lp), p, l); \ 1070 } while (0) 1071 1072 #define SMC_PULL_DATA(lp, p, l) \ 1073 do { \ 1074 if (SMC_32BIT(lp)) { \ 1075 void *__ptr = (p); \ 1076 int __len = (l); \ 1077 void __iomem *__ioaddr = ioaddr; \ 1078 if ((unsigned long)__ptr & 2) { \ 1079 /* \ 1080 * We want 32bit alignment here. \ 1081 * Since some buses perform a full \ 1082 * 32bit fetch even for 16bit data \ 1083 * we can't use SMC_inw() here. \ 1084 * Back both source (on-chip) and \ 1085 * destination pointers of 2 bytes. \ 1086 * This is possible since the call to \ 1087 * SMC_GET_PKT_HDR() already advanced \ 1088 * the source pointer of 4 bytes, and \ 1089 * the skb_reserve(skb, 2) advanced \ 1090 * the destination pointer of 2 bytes. \ 1091 */ \ 1092 __ptr -= 2; \ 1093 __len += 2; \ 1094 SMC_SET_PTR(lp, \ 1095 2|PTR_READ|PTR_RCV|PTR_AUTOINC); \ 1096 } \ 1097 if (SMC_CAN_USE_DATACS && lp->datacs) \ 1098 __ioaddr = lp->datacs; \ 1099 __len += 2; \ 1100 SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \ 1101 } else if (SMC_16BIT(lp)) \ 1102 SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \ 1103 else if (SMC_8BIT(lp)) \ 1104 SMC_insb(ioaddr, DATA_REG(lp), p, l); \ 1105 } while (0) 1106 1107 #endif /* _SMC91X_H_ */ 1108