1 /* 2 * ks8842.c timberdale KS8842 ethernet driver 3 * Copyright (c) 2009 Intel Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 */ 18 19 /* Supports: 20 * The Micrel KS8842 behind the timberdale FPGA 21 * The genuine Micrel KS8841/42 device with ISA 16/32bit bus interface 22 */ 23 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/interrupt.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/platform_device.h> 30 #include <linux/netdevice.h> 31 #include <linux/etherdevice.h> 32 #include <linux/ethtool.h> 33 #include <linux/ks8842.h> 34 #include <linux/dmaengine.h> 35 #include <linux/dma-mapping.h> 36 #include <linux/scatterlist.h> 37 38 #define DRV_NAME "ks8842" 39 40 /* Timberdale specific Registers */ 41 #define REG_TIMB_RST 0x1c 42 #define REG_TIMB_FIFO 0x20 43 #define REG_TIMB_ISR 0x24 44 #define REG_TIMB_IER 0x28 45 #define REG_TIMB_IAR 0x2C 46 #define REQ_TIMB_DMA_RESUME 0x30 47 48 /* KS8842 registers */ 49 50 #define REG_SELECT_BANK 0x0e 51 52 /* bank 0 registers */ 53 #define REG_QRFCR 0x04 54 55 /* bank 2 registers */ 56 #define REG_MARL 0x00 57 #define REG_MARM 0x02 58 #define REG_MARH 0x04 59 60 /* bank 3 registers */ 61 #define REG_GRR 0x06 62 63 /* bank 16 registers */ 64 #define REG_TXCR 0x00 65 #define REG_TXSR 0x02 66 #define REG_RXCR 0x04 67 #define REG_TXMIR 0x08 68 #define REG_RXMIR 0x0A 69 70 /* bank 17 registers */ 71 #define REG_TXQCR 0x00 72 #define REG_RXQCR 0x02 73 #define REG_TXFDPR 0x04 74 #define REG_RXFDPR 0x06 75 #define REG_QMU_DATA_LO 0x08 76 #define REG_QMU_DATA_HI 0x0A 77 78 /* bank 18 registers */ 79 #define REG_IER 0x00 80 #define IRQ_LINK_CHANGE 0x8000 81 #define IRQ_TX 0x4000 82 #define IRQ_RX 0x2000 83 #define IRQ_RX_OVERRUN 0x0800 84 #define IRQ_TX_STOPPED 0x0200 85 #define IRQ_RX_STOPPED 0x0100 86 #define IRQ_RX_ERROR 0x0080 87 #define ENABLED_IRQS (IRQ_LINK_CHANGE | IRQ_TX | IRQ_RX | IRQ_RX_STOPPED | \ 88 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) 89 /* When running via timberdale in DMA mode, the RX interrupt should be 90 enabled in the KS8842, but not in the FPGA IP, since the IP handles 91 RX DMA internally. 92 TX interrupts are not needed it is handled by the FPGA the driver is 93 notified via DMA callbacks. 94 */ 95 #define ENABLED_IRQS_DMA_IP (IRQ_LINK_CHANGE | IRQ_RX_STOPPED | \ 96 IRQ_TX_STOPPED | IRQ_RX_OVERRUN | IRQ_RX_ERROR) 97 #define ENABLED_IRQS_DMA (ENABLED_IRQS_DMA_IP | IRQ_RX) 98 #define REG_ISR 0x02 99 #define REG_RXSR 0x04 100 #define RXSR_VALID 0x8000 101 #define RXSR_BROADCAST 0x80 102 #define RXSR_MULTICAST 0x40 103 #define RXSR_UNICAST 0x20 104 #define RXSR_FRAMETYPE 0x08 105 #define RXSR_TOO_LONG 0x04 106 #define RXSR_RUNT 0x02 107 #define RXSR_CRC_ERROR 0x01 108 #define RXSR_ERROR (RXSR_TOO_LONG | RXSR_RUNT | RXSR_CRC_ERROR) 109 110 /* bank 32 registers */ 111 #define REG_SW_ID_AND_ENABLE 0x00 112 #define REG_SGCR1 0x02 113 #define REG_SGCR2 0x04 114 #define REG_SGCR3 0x06 115 116 /* bank 39 registers */ 117 #define REG_MACAR1 0x00 118 #define REG_MACAR2 0x02 119 #define REG_MACAR3 0x04 120 121 /* bank 45 registers */ 122 #define REG_P1MBCR 0x00 123 #define REG_P1MBSR 0x02 124 125 /* bank 46 registers */ 126 #define REG_P2MBCR 0x00 127 #define REG_P2MBSR 0x02 128 129 /* bank 48 registers */ 130 #define REG_P1CR2 0x02 131 132 /* bank 49 registers */ 133 #define REG_P1CR4 0x02 134 #define REG_P1SR 0x04 135 136 /* flags passed by platform_device for configuration */ 137 #define MICREL_KS884X 0x01 /* 0=Timeberdale(FPGA), 1=Micrel */ 138 #define KS884X_16BIT 0x02 /* 1=16bit, 0=32bit */ 139 140 #define DMA_BUFFER_SIZE 2048 141 142 struct ks8842_tx_dma_ctl { 143 struct dma_chan *chan; 144 struct dma_async_tx_descriptor *adesc; 145 void *buf; 146 struct scatterlist sg; 147 int channel; 148 }; 149 150 struct ks8842_rx_dma_ctl { 151 struct dma_chan *chan; 152 struct dma_async_tx_descriptor *adesc; 153 struct sk_buff *skb; 154 struct scatterlist sg; 155 struct tasklet_struct tasklet; 156 int channel; 157 }; 158 159 #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \ 160 ((adapter)->dma_rx.channel != -1)) 161 162 struct ks8842_adapter { 163 void __iomem *hw_addr; 164 int irq; 165 unsigned long conf_flags; /* copy of platform_device config */ 166 struct tasklet_struct tasklet; 167 spinlock_t lock; /* spinlock to be interrupt safe */ 168 struct work_struct timeout_work; 169 struct net_device *netdev; 170 struct device *dev; 171 struct ks8842_tx_dma_ctl dma_tx; 172 struct ks8842_rx_dma_ctl dma_rx; 173 }; 174 175 static void ks8842_dma_rx_cb(void *data); 176 static void ks8842_dma_tx_cb(void *data); 177 178 static inline void ks8842_resume_dma(struct ks8842_adapter *adapter) 179 { 180 iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME); 181 } 182 183 static inline void ks8842_select_bank(struct ks8842_adapter *adapter, u16 bank) 184 { 185 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); 186 } 187 188 static inline void ks8842_write8(struct ks8842_adapter *adapter, u16 bank, 189 u8 value, int offset) 190 { 191 ks8842_select_bank(adapter, bank); 192 iowrite8(value, adapter->hw_addr + offset); 193 } 194 195 static inline void ks8842_write16(struct ks8842_adapter *adapter, u16 bank, 196 u16 value, int offset) 197 { 198 ks8842_select_bank(adapter, bank); 199 iowrite16(value, adapter->hw_addr + offset); 200 } 201 202 static inline void ks8842_enable_bits(struct ks8842_adapter *adapter, u16 bank, 203 u16 bits, int offset) 204 { 205 u16 reg; 206 ks8842_select_bank(adapter, bank); 207 reg = ioread16(adapter->hw_addr + offset); 208 reg |= bits; 209 iowrite16(reg, adapter->hw_addr + offset); 210 } 211 212 static inline void ks8842_clear_bits(struct ks8842_adapter *adapter, u16 bank, 213 u16 bits, int offset) 214 { 215 u16 reg; 216 ks8842_select_bank(adapter, bank); 217 reg = ioread16(adapter->hw_addr + offset); 218 reg &= ~bits; 219 iowrite16(reg, adapter->hw_addr + offset); 220 } 221 222 static inline void ks8842_write32(struct ks8842_adapter *adapter, u16 bank, 223 u32 value, int offset) 224 { 225 ks8842_select_bank(adapter, bank); 226 iowrite32(value, adapter->hw_addr + offset); 227 } 228 229 static inline u8 ks8842_read8(struct ks8842_adapter *adapter, u16 bank, 230 int offset) 231 { 232 ks8842_select_bank(adapter, bank); 233 return ioread8(adapter->hw_addr + offset); 234 } 235 236 static inline u16 ks8842_read16(struct ks8842_adapter *adapter, u16 bank, 237 int offset) 238 { 239 ks8842_select_bank(adapter, bank); 240 return ioread16(adapter->hw_addr + offset); 241 } 242 243 static inline u32 ks8842_read32(struct ks8842_adapter *adapter, u16 bank, 244 int offset) 245 { 246 ks8842_select_bank(adapter, bank); 247 return ioread32(adapter->hw_addr + offset); 248 } 249 250 static void ks8842_reset(struct ks8842_adapter *adapter) 251 { 252 if (adapter->conf_flags & MICREL_KS884X) { 253 ks8842_write16(adapter, 3, 1, REG_GRR); 254 msleep(10); 255 iowrite16(0, adapter->hw_addr + REG_GRR); 256 } else { 257 /* The KS8842 goes haywire when doing softare reset 258 * a work around in the timberdale IP is implemented to 259 * do a hardware reset instead 260 ks8842_write16(adapter, 3, 1, REG_GRR); 261 msleep(10); 262 iowrite16(0, adapter->hw_addr + REG_GRR); 263 */ 264 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); 265 msleep(20); 266 } 267 } 268 269 static void ks8842_update_link_status(struct net_device *netdev, 270 struct ks8842_adapter *adapter) 271 { 272 /* check the status of the link */ 273 if (ks8842_read16(adapter, 45, REG_P1MBSR) & 0x4) { 274 netif_carrier_on(netdev); 275 netif_wake_queue(netdev); 276 } else { 277 netif_stop_queue(netdev); 278 netif_carrier_off(netdev); 279 } 280 } 281 282 static void ks8842_enable_tx(struct ks8842_adapter *adapter) 283 { 284 ks8842_enable_bits(adapter, 16, 0x01, REG_TXCR); 285 } 286 287 static void ks8842_disable_tx(struct ks8842_adapter *adapter) 288 { 289 ks8842_clear_bits(adapter, 16, 0x01, REG_TXCR); 290 } 291 292 static void ks8842_enable_rx(struct ks8842_adapter *adapter) 293 { 294 ks8842_enable_bits(adapter, 16, 0x01, REG_RXCR); 295 } 296 297 static void ks8842_disable_rx(struct ks8842_adapter *adapter) 298 { 299 ks8842_clear_bits(adapter, 16, 0x01, REG_RXCR); 300 } 301 302 static void ks8842_reset_hw(struct ks8842_adapter *adapter) 303 { 304 /* reset the HW */ 305 ks8842_reset(adapter); 306 307 /* Enable QMU Transmit flow control / transmit padding / Transmit CRC */ 308 ks8842_write16(adapter, 16, 0x000E, REG_TXCR); 309 310 /* enable the receiver, uni + multi + broadcast + flow ctrl 311 + crc strip */ 312 ks8842_write16(adapter, 16, 0x8 | 0x20 | 0x40 | 0x80 | 0x400, 313 REG_RXCR); 314 315 /* TX frame pointer autoincrement */ 316 ks8842_write16(adapter, 17, 0x4000, REG_TXFDPR); 317 318 /* RX frame pointer autoincrement */ 319 ks8842_write16(adapter, 17, 0x4000, REG_RXFDPR); 320 321 /* RX 2 kb high watermark */ 322 ks8842_write16(adapter, 0, 0x1000, REG_QRFCR); 323 324 /* aggressive back off in half duplex */ 325 ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1); 326 327 /* enable no excessive collison drop */ 328 ks8842_enable_bits(adapter, 32, 1 << 3, REG_SGCR2); 329 330 /* Enable port 1 force flow control / back pressure / transmit / recv */ 331 ks8842_write16(adapter, 48, 0x1E07, REG_P1CR2); 332 333 /* restart port auto-negotiation */ 334 ks8842_enable_bits(adapter, 49, 1 << 13, REG_P1CR4); 335 336 /* Enable the transmitter */ 337 ks8842_enable_tx(adapter); 338 339 /* Enable the receiver */ 340 ks8842_enable_rx(adapter); 341 342 /* clear all interrupts */ 343 ks8842_write16(adapter, 18, 0xffff, REG_ISR); 344 345 /* enable interrupts */ 346 if (KS8842_USE_DMA(adapter)) { 347 /* When running in DMA Mode the RX interrupt is not enabled in 348 timberdale because RX data is received by DMA callbacks 349 it must still be enabled in the KS8842 because it indicates 350 to timberdale when there is RX data for it's DMA FIFOs */ 351 iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER); 352 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); 353 } else { 354 if (!(adapter->conf_flags & MICREL_KS884X)) 355 iowrite16(ENABLED_IRQS, 356 adapter->hw_addr + REG_TIMB_IER); 357 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 358 } 359 /* enable the switch */ 360 ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE); 361 } 362 363 static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest) 364 { 365 int i; 366 u16 mac; 367 368 for (i = 0; i < ETH_ALEN; i++) 369 dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); 370 371 if (adapter->conf_flags & MICREL_KS884X) { 372 /* 373 the sequence of saving mac addr between MAC and Switch is 374 different. 375 */ 376 377 mac = ks8842_read16(adapter, 2, REG_MARL); 378 ks8842_write16(adapter, 39, mac, REG_MACAR3); 379 mac = ks8842_read16(adapter, 2, REG_MARM); 380 ks8842_write16(adapter, 39, mac, REG_MACAR2); 381 mac = ks8842_read16(adapter, 2, REG_MARH); 382 ks8842_write16(adapter, 39, mac, REG_MACAR1); 383 } else { 384 385 /* make sure the switch port uses the same MAC as the QMU */ 386 mac = ks8842_read16(adapter, 2, REG_MARL); 387 ks8842_write16(adapter, 39, mac, REG_MACAR1); 388 mac = ks8842_read16(adapter, 2, REG_MARM); 389 ks8842_write16(adapter, 39, mac, REG_MACAR2); 390 mac = ks8842_read16(adapter, 2, REG_MARH); 391 ks8842_write16(adapter, 39, mac, REG_MACAR3); 392 } 393 } 394 395 static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac) 396 { 397 unsigned long flags; 398 unsigned i; 399 400 spin_lock_irqsave(&adapter->lock, flags); 401 for (i = 0; i < ETH_ALEN; i++) { 402 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i); 403 if (!(adapter->conf_flags & MICREL_KS884X)) 404 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], 405 REG_MACAR1 + i); 406 } 407 408 if (adapter->conf_flags & MICREL_KS884X) { 409 /* 410 the sequence of saving mac addr between MAC and Switch is 411 different. 412 */ 413 414 u16 mac; 415 416 mac = ks8842_read16(adapter, 2, REG_MARL); 417 ks8842_write16(adapter, 39, mac, REG_MACAR3); 418 mac = ks8842_read16(adapter, 2, REG_MARM); 419 ks8842_write16(adapter, 39, mac, REG_MACAR2); 420 mac = ks8842_read16(adapter, 2, REG_MARH); 421 ks8842_write16(adapter, 39, mac, REG_MACAR1); 422 } 423 spin_unlock_irqrestore(&adapter->lock, flags); 424 } 425 426 static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter) 427 { 428 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; 429 } 430 431 static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) 432 { 433 struct ks8842_adapter *adapter = netdev_priv(netdev); 434 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; 435 u8 *buf = ctl->buf; 436 437 if (ctl->adesc) { 438 netdev_dbg(netdev, "%s: TX ongoing\n", __func__); 439 /* transfer ongoing */ 440 return NETDEV_TX_BUSY; 441 } 442 443 sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); 444 445 /* copy data to the TX buffer */ 446 /* the control word, enable IRQ, port 1 and the length */ 447 *buf++ = 0x00; 448 *buf++ = 0x01; /* Port 1 */ 449 *buf++ = skb->len & 0xff; 450 *buf++ = (skb->len >> 8) & 0xff; 451 skb_copy_from_linear_data(skb, buf, skb->len); 452 453 dma_sync_single_range_for_device(adapter->dev, 454 sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), 455 DMA_TO_DEVICE); 456 457 /* make sure the length is a multiple of 4 */ 458 if (sg_dma_len(&ctl->sg) % 4) 459 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; 460 461 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, 462 &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); 463 if (!ctl->adesc) 464 return NETDEV_TX_BUSY; 465 466 ctl->adesc->callback_param = netdev; 467 ctl->adesc->callback = ks8842_dma_tx_cb; 468 ctl->adesc->tx_submit(ctl->adesc); 469 470 netdev->stats.tx_bytes += skb->len; 471 472 dev_kfree_skb(skb); 473 474 return NETDEV_TX_OK; 475 } 476 477 static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev) 478 { 479 struct ks8842_adapter *adapter = netdev_priv(netdev); 480 int len = skb->len; 481 482 netdev_dbg(netdev, "%s: len %u head %p data %p tail %p end %p\n", 483 __func__, skb->len, skb->head, skb->data, 484 skb_tail_pointer(skb), skb_end_pointer(skb)); 485 486 /* check FIFO buffer space, we need space for CRC and command bits */ 487 if (ks8842_tx_fifo_space(adapter) < len + 8) 488 return NETDEV_TX_BUSY; 489 490 if (adapter->conf_flags & KS884X_16BIT) { 491 u16 *ptr16 = (u16 *)skb->data; 492 ks8842_write16(adapter, 17, 0x8000 | 0x100, REG_QMU_DATA_LO); 493 ks8842_write16(adapter, 17, (u16)len, REG_QMU_DATA_HI); 494 netdev->stats.tx_bytes += len; 495 496 /* copy buffer */ 497 while (len > 0) { 498 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO); 499 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI); 500 len -= sizeof(u32); 501 } 502 } else { 503 504 u32 *ptr = (u32 *)skb->data; 505 u32 ctrl; 506 /* the control word, enable IRQ, port 1 and the length */ 507 ctrl = 0x8000 | 0x100 | (len << 16); 508 ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO); 509 510 netdev->stats.tx_bytes += len; 511 512 /* copy buffer */ 513 while (len > 0) { 514 iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); 515 len -= sizeof(u32); 516 ptr++; 517 } 518 } 519 520 /* enqueue packet */ 521 ks8842_write16(adapter, 17, 1, REG_TXQCR); 522 523 dev_kfree_skb(skb); 524 525 return NETDEV_TX_OK; 526 } 527 528 static void ks8842_update_rx_err_counters(struct net_device *netdev, u32 status) 529 { 530 netdev_dbg(netdev, "RX error, status: %x\n", status); 531 532 netdev->stats.rx_errors++; 533 if (status & RXSR_TOO_LONG) 534 netdev->stats.rx_length_errors++; 535 if (status & RXSR_CRC_ERROR) 536 netdev->stats.rx_crc_errors++; 537 if (status & RXSR_RUNT) 538 netdev->stats.rx_frame_errors++; 539 } 540 541 static void ks8842_update_rx_counters(struct net_device *netdev, u32 status, 542 int len) 543 { 544 netdev_dbg(netdev, "RX packet, len: %d\n", len); 545 546 netdev->stats.rx_packets++; 547 netdev->stats.rx_bytes += len; 548 if (status & RXSR_MULTICAST) 549 netdev->stats.multicast++; 550 } 551 552 static int __ks8842_start_new_rx_dma(struct net_device *netdev) 553 { 554 struct ks8842_adapter *adapter = netdev_priv(netdev); 555 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; 556 struct scatterlist *sg = &ctl->sg; 557 int err; 558 559 ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE); 560 if (ctl->skb) { 561 sg_init_table(sg, 1); 562 sg_dma_address(sg) = dma_map_single(adapter->dev, 563 ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); 564 if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) { 565 err = -ENOMEM; 566 sg_dma_address(sg) = 0; 567 goto out; 568 } 569 570 sg_dma_len(sg) = DMA_BUFFER_SIZE; 571 572 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, 573 sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); 574 575 if (!ctl->adesc) { 576 err = -ENOMEM; 577 goto out; 578 } 579 580 ctl->adesc->callback_param = netdev; 581 ctl->adesc->callback = ks8842_dma_rx_cb; 582 ctl->adesc->tx_submit(ctl->adesc); 583 } else { 584 err = -ENOMEM; 585 sg_dma_address(sg) = 0; 586 goto out; 587 } 588 589 return 0; 590 out: 591 if (sg_dma_address(sg)) 592 dma_unmap_single(adapter->dev, sg_dma_address(sg), 593 DMA_BUFFER_SIZE, DMA_FROM_DEVICE); 594 sg_dma_address(sg) = 0; 595 if (ctl->skb) 596 dev_kfree_skb(ctl->skb); 597 598 ctl->skb = NULL; 599 600 printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); 601 return err; 602 } 603 604 static void ks8842_rx_frame_dma_tasklet(unsigned long arg) 605 { 606 struct net_device *netdev = (struct net_device *)arg; 607 struct ks8842_adapter *adapter = netdev_priv(netdev); 608 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; 609 struct sk_buff *skb = ctl->skb; 610 dma_addr_t addr = sg_dma_address(&ctl->sg); 611 u32 status; 612 613 ctl->adesc = NULL; 614 615 /* kick next transfer going */ 616 __ks8842_start_new_rx_dma(netdev); 617 618 /* now handle the data we got */ 619 dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); 620 621 status = *((u32 *)skb->data); 622 623 netdev_dbg(netdev, "%s - rx_data: status: %x\n", 624 __func__, status & 0xffff); 625 626 /* check the status */ 627 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 628 int len = (status >> 16) & 0x7ff; 629 630 ks8842_update_rx_counters(netdev, status, len); 631 632 /* reserve 4 bytes which is the status word */ 633 skb_reserve(skb, 4); 634 skb_put(skb, len); 635 636 skb->protocol = eth_type_trans(skb, netdev); 637 netif_rx(skb); 638 } else { 639 ks8842_update_rx_err_counters(netdev, status); 640 dev_kfree_skb(skb); 641 } 642 } 643 644 static void ks8842_rx_frame(struct net_device *netdev, 645 struct ks8842_adapter *adapter) 646 { 647 u32 status; 648 int len; 649 650 if (adapter->conf_flags & KS884X_16BIT) { 651 status = ks8842_read16(adapter, 17, REG_QMU_DATA_LO); 652 len = ks8842_read16(adapter, 17, REG_QMU_DATA_HI); 653 netdev_dbg(netdev, "%s - rx_data: status: %x\n", 654 __func__, status); 655 } else { 656 status = ks8842_read32(adapter, 17, REG_QMU_DATA_LO); 657 len = (status >> 16) & 0x7ff; 658 status &= 0xffff; 659 netdev_dbg(netdev, "%s - rx_data: status: %x\n", 660 __func__, status); 661 } 662 663 /* check the status */ 664 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 665 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len + 3); 666 667 if (skb) { 668 669 ks8842_update_rx_counters(netdev, status, len); 670 671 if (adapter->conf_flags & KS884X_16BIT) { 672 u16 *data16 = skb_put(skb, len); 673 ks8842_select_bank(adapter, 17); 674 while (len > 0) { 675 *data16++ = ioread16(adapter->hw_addr + 676 REG_QMU_DATA_LO); 677 *data16++ = ioread16(adapter->hw_addr + 678 REG_QMU_DATA_HI); 679 len -= sizeof(u32); 680 } 681 } else { 682 u32 *data = skb_put(skb, len); 683 684 ks8842_select_bank(adapter, 17); 685 while (len > 0) { 686 *data++ = ioread32(adapter->hw_addr + 687 REG_QMU_DATA_LO); 688 len -= sizeof(u32); 689 } 690 } 691 skb->protocol = eth_type_trans(skb, netdev); 692 netif_rx(skb); 693 } else 694 netdev->stats.rx_dropped++; 695 } else 696 ks8842_update_rx_err_counters(netdev, status); 697 698 /* set high watermark to 3K */ 699 ks8842_clear_bits(adapter, 0, 1 << 12, REG_QRFCR); 700 701 /* release the frame */ 702 ks8842_write16(adapter, 17, 0x01, REG_RXQCR); 703 704 /* set high watermark to 2K */ 705 ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR); 706 } 707 708 static void ks8842_handle_rx(struct net_device *netdev, 709 struct ks8842_adapter *adapter) 710 { 711 u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; 712 netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); 713 while (rx_data) { 714 ks8842_rx_frame(netdev, adapter); 715 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff; 716 } 717 } 718 719 static void ks8842_handle_tx(struct net_device *netdev, 720 struct ks8842_adapter *adapter) 721 { 722 u16 sr = ks8842_read16(adapter, 16, REG_TXSR); 723 netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); 724 netdev->stats.tx_packets++; 725 if (netif_queue_stopped(netdev)) 726 netif_wake_queue(netdev); 727 } 728 729 static void ks8842_handle_rx_overrun(struct net_device *netdev, 730 struct ks8842_adapter *adapter) 731 { 732 netdev_dbg(netdev, "%s: entry\n", __func__); 733 netdev->stats.rx_errors++; 734 netdev->stats.rx_fifo_errors++; 735 } 736 737 static void ks8842_tasklet(unsigned long arg) 738 { 739 struct net_device *netdev = (struct net_device *)arg; 740 struct ks8842_adapter *adapter = netdev_priv(netdev); 741 u16 isr; 742 unsigned long flags; 743 u16 entry_bank; 744 745 /* read current bank to be able to set it back */ 746 spin_lock_irqsave(&adapter->lock, flags); 747 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); 748 spin_unlock_irqrestore(&adapter->lock, flags); 749 750 isr = ks8842_read16(adapter, 18, REG_ISR); 751 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); 752 753 /* when running in DMA mode, do not ack RX interrupts, it is handled 754 internally by timberdale, otherwise it's DMA FIFO:s would stop 755 */ 756 if (KS8842_USE_DMA(adapter)) 757 isr &= ~IRQ_RX; 758 759 /* Ack */ 760 ks8842_write16(adapter, 18, isr, REG_ISR); 761 762 if (!(adapter->conf_flags & MICREL_KS884X)) 763 /* Ack in the timberdale IP as well */ 764 iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR); 765 766 if (!netif_running(netdev)) 767 return; 768 769 if (isr & IRQ_LINK_CHANGE) 770 ks8842_update_link_status(netdev, adapter); 771 772 /* should not get IRQ_RX when running DMA mode */ 773 if (isr & (IRQ_RX | IRQ_RX_ERROR) && !KS8842_USE_DMA(adapter)) 774 ks8842_handle_rx(netdev, adapter); 775 776 /* should only happen when in PIO mode */ 777 if (isr & IRQ_TX) 778 ks8842_handle_tx(netdev, adapter); 779 780 if (isr & IRQ_RX_OVERRUN) 781 ks8842_handle_rx_overrun(netdev, adapter); 782 783 if (isr & IRQ_TX_STOPPED) { 784 ks8842_disable_tx(adapter); 785 ks8842_enable_tx(adapter); 786 } 787 788 if (isr & IRQ_RX_STOPPED) { 789 ks8842_disable_rx(adapter); 790 ks8842_enable_rx(adapter); 791 } 792 793 /* re-enable interrupts, put back the bank selection register */ 794 spin_lock_irqsave(&adapter->lock, flags); 795 if (KS8842_USE_DMA(adapter)) 796 ks8842_write16(adapter, 18, ENABLED_IRQS_DMA, REG_IER); 797 else 798 ks8842_write16(adapter, 18, ENABLED_IRQS, REG_IER); 799 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 800 801 /* Make sure timberdale continues DMA operations, they are stopped while 802 we are handling the ks8842 because we might change bank */ 803 if (KS8842_USE_DMA(adapter)) 804 ks8842_resume_dma(adapter); 805 806 spin_unlock_irqrestore(&adapter->lock, flags); 807 } 808 809 static irqreturn_t ks8842_irq(int irq, void *devid) 810 { 811 struct net_device *netdev = devid; 812 struct ks8842_adapter *adapter = netdev_priv(netdev); 813 u16 isr; 814 u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); 815 irqreturn_t ret = IRQ_NONE; 816 817 isr = ks8842_read16(adapter, 18, REG_ISR); 818 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); 819 820 if (isr) { 821 if (KS8842_USE_DMA(adapter)) 822 /* disable all but RX IRQ, since the FPGA relies on it*/ 823 ks8842_write16(adapter, 18, IRQ_RX, REG_IER); 824 else 825 /* disable IRQ */ 826 ks8842_write16(adapter, 18, 0x00, REG_IER); 827 828 /* schedule tasklet */ 829 tasklet_schedule(&adapter->tasklet); 830 831 ret = IRQ_HANDLED; 832 } 833 834 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); 835 836 /* After an interrupt, tell timberdale to continue DMA operations. 837 DMA is disabled while we are handling the ks8842 because we might 838 change bank */ 839 ks8842_resume_dma(adapter); 840 841 return ret; 842 } 843 844 static void ks8842_dma_rx_cb(void *data) 845 { 846 struct net_device *netdev = data; 847 struct ks8842_adapter *adapter = netdev_priv(netdev); 848 849 netdev_dbg(netdev, "RX DMA finished\n"); 850 /* schedule tasklet */ 851 if (adapter->dma_rx.adesc) 852 tasklet_schedule(&adapter->dma_rx.tasklet); 853 } 854 855 static void ks8842_dma_tx_cb(void *data) 856 { 857 struct net_device *netdev = data; 858 struct ks8842_adapter *adapter = netdev_priv(netdev); 859 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; 860 861 netdev_dbg(netdev, "TX DMA finished\n"); 862 863 if (!ctl->adesc) 864 return; 865 866 netdev->stats.tx_packets++; 867 ctl->adesc = NULL; 868 869 if (netif_queue_stopped(netdev)) 870 netif_wake_queue(netdev); 871 } 872 873 static void ks8842_stop_dma(struct ks8842_adapter *adapter) 874 { 875 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; 876 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; 877 878 tx_ctl->adesc = NULL; 879 if (tx_ctl->chan) 880 dmaengine_terminate_all(tx_ctl->chan); 881 882 rx_ctl->adesc = NULL; 883 if (rx_ctl->chan) 884 dmaengine_terminate_all(rx_ctl->chan); 885 886 if (sg_dma_address(&rx_ctl->sg)) 887 dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), 888 DMA_BUFFER_SIZE, DMA_FROM_DEVICE); 889 sg_dma_address(&rx_ctl->sg) = 0; 890 891 dev_kfree_skb(rx_ctl->skb); 892 rx_ctl->skb = NULL; 893 } 894 895 static void ks8842_dealloc_dma_bufs(struct ks8842_adapter *adapter) 896 { 897 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; 898 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; 899 900 ks8842_stop_dma(adapter); 901 902 if (tx_ctl->chan) 903 dma_release_channel(tx_ctl->chan); 904 tx_ctl->chan = NULL; 905 906 if (rx_ctl->chan) 907 dma_release_channel(rx_ctl->chan); 908 rx_ctl->chan = NULL; 909 910 tasklet_kill(&rx_ctl->tasklet); 911 912 if (sg_dma_address(&tx_ctl->sg)) 913 dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), 914 DMA_BUFFER_SIZE, DMA_TO_DEVICE); 915 sg_dma_address(&tx_ctl->sg) = 0; 916 917 kfree(tx_ctl->buf); 918 tx_ctl->buf = NULL; 919 } 920 921 static bool ks8842_dma_filter_fn(struct dma_chan *chan, void *filter_param) 922 { 923 return chan->chan_id == (long)filter_param; 924 } 925 926 static int ks8842_alloc_dma_bufs(struct net_device *netdev) 927 { 928 struct ks8842_adapter *adapter = netdev_priv(netdev); 929 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; 930 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; 931 int err; 932 933 dma_cap_mask_t mask; 934 935 dma_cap_zero(mask); 936 dma_cap_set(DMA_SLAVE, mask); 937 dma_cap_set(DMA_PRIVATE, mask); 938 939 sg_init_table(&tx_ctl->sg, 1); 940 941 tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, 942 (void *)(long)tx_ctl->channel); 943 if (!tx_ctl->chan) { 944 err = -ENODEV; 945 goto err; 946 } 947 948 /* allocate DMA buffer */ 949 tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL); 950 if (!tx_ctl->buf) { 951 err = -ENOMEM; 952 goto err; 953 } 954 955 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, 956 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); 957 if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) { 958 err = -ENOMEM; 959 sg_dma_address(&tx_ctl->sg) = 0; 960 goto err; 961 } 962 963 rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, 964 (void *)(long)rx_ctl->channel); 965 if (!rx_ctl->chan) { 966 err = -ENODEV; 967 goto err; 968 } 969 970 tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet, 971 (unsigned long)netdev); 972 973 return 0; 974 err: 975 ks8842_dealloc_dma_bufs(adapter); 976 return err; 977 } 978 979 /* Netdevice operations */ 980 981 static int ks8842_open(struct net_device *netdev) 982 { 983 struct ks8842_adapter *adapter = netdev_priv(netdev); 984 int err; 985 986 netdev_dbg(netdev, "%s - entry\n", __func__); 987 988 if (KS8842_USE_DMA(adapter)) { 989 err = ks8842_alloc_dma_bufs(netdev); 990 991 if (!err) { 992 /* start RX dma */ 993 err = __ks8842_start_new_rx_dma(netdev); 994 if (err) 995 ks8842_dealloc_dma_bufs(adapter); 996 } 997 998 if (err) { 999 printk(KERN_WARNING DRV_NAME 1000 ": Failed to initiate DMA, running PIO\n"); 1001 ks8842_dealloc_dma_bufs(adapter); 1002 adapter->dma_rx.channel = -1; 1003 adapter->dma_tx.channel = -1; 1004 } 1005 } 1006 1007 /* reset the HW */ 1008 ks8842_reset_hw(adapter); 1009 1010 ks8842_write_mac_addr(adapter, netdev->dev_addr); 1011 1012 ks8842_update_link_status(netdev, adapter); 1013 1014 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, 1015 netdev); 1016 if (err) { 1017 pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); 1018 return err; 1019 } 1020 1021 return 0; 1022 } 1023 1024 static int ks8842_close(struct net_device *netdev) 1025 { 1026 struct ks8842_adapter *adapter = netdev_priv(netdev); 1027 1028 netdev_dbg(netdev, "%s - entry\n", __func__); 1029 1030 cancel_work_sync(&adapter->timeout_work); 1031 1032 if (KS8842_USE_DMA(adapter)) 1033 ks8842_dealloc_dma_bufs(adapter); 1034 1035 /* free the irq */ 1036 free_irq(adapter->irq, netdev); 1037 1038 /* disable the switch */ 1039 ks8842_write16(adapter, 32, 0x0, REG_SW_ID_AND_ENABLE); 1040 1041 return 0; 1042 } 1043 1044 static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb, 1045 struct net_device *netdev) 1046 { 1047 int ret; 1048 struct ks8842_adapter *adapter = netdev_priv(netdev); 1049 1050 netdev_dbg(netdev, "%s: entry\n", __func__); 1051 1052 if (KS8842_USE_DMA(adapter)) { 1053 unsigned long flags; 1054 ret = ks8842_tx_frame_dma(skb, netdev); 1055 /* for now only allow one transfer at the time */ 1056 spin_lock_irqsave(&adapter->lock, flags); 1057 if (adapter->dma_tx.adesc) 1058 netif_stop_queue(netdev); 1059 spin_unlock_irqrestore(&adapter->lock, flags); 1060 return ret; 1061 } 1062 1063 ret = ks8842_tx_frame(skb, netdev); 1064 1065 if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8) 1066 netif_stop_queue(netdev); 1067 1068 return ret; 1069 } 1070 1071 static int ks8842_set_mac(struct net_device *netdev, void *p) 1072 { 1073 struct ks8842_adapter *adapter = netdev_priv(netdev); 1074 struct sockaddr *addr = p; 1075 char *mac = (u8 *)addr->sa_data; 1076 1077 netdev_dbg(netdev, "%s: entry\n", __func__); 1078 1079 if (!is_valid_ether_addr(addr->sa_data)) 1080 return -EADDRNOTAVAIL; 1081 1082 memcpy(netdev->dev_addr, mac, netdev->addr_len); 1083 1084 ks8842_write_mac_addr(adapter, mac); 1085 return 0; 1086 } 1087 1088 static void ks8842_tx_timeout_work(struct work_struct *work) 1089 { 1090 struct ks8842_adapter *adapter = 1091 container_of(work, struct ks8842_adapter, timeout_work); 1092 struct net_device *netdev = adapter->netdev; 1093 unsigned long flags; 1094 1095 netdev_dbg(netdev, "%s: entry\n", __func__); 1096 1097 spin_lock_irqsave(&adapter->lock, flags); 1098 1099 if (KS8842_USE_DMA(adapter)) 1100 ks8842_stop_dma(adapter); 1101 1102 /* disable interrupts */ 1103 ks8842_write16(adapter, 18, 0, REG_IER); 1104 ks8842_write16(adapter, 18, 0xFFFF, REG_ISR); 1105 1106 netif_stop_queue(netdev); 1107 1108 spin_unlock_irqrestore(&adapter->lock, flags); 1109 1110 ks8842_reset_hw(adapter); 1111 1112 ks8842_write_mac_addr(adapter, netdev->dev_addr); 1113 1114 ks8842_update_link_status(netdev, adapter); 1115 1116 if (KS8842_USE_DMA(adapter)) 1117 __ks8842_start_new_rx_dma(netdev); 1118 } 1119 1120 static void ks8842_tx_timeout(struct net_device *netdev) 1121 { 1122 struct ks8842_adapter *adapter = netdev_priv(netdev); 1123 1124 netdev_dbg(netdev, "%s: entry\n", __func__); 1125 1126 schedule_work(&adapter->timeout_work); 1127 } 1128 1129 static const struct net_device_ops ks8842_netdev_ops = { 1130 .ndo_open = ks8842_open, 1131 .ndo_stop = ks8842_close, 1132 .ndo_start_xmit = ks8842_xmit_frame, 1133 .ndo_set_mac_address = ks8842_set_mac, 1134 .ndo_tx_timeout = ks8842_tx_timeout, 1135 .ndo_validate_addr = eth_validate_addr 1136 }; 1137 1138 static const struct ethtool_ops ks8842_ethtool_ops = { 1139 .get_link = ethtool_op_get_link, 1140 }; 1141 1142 static int ks8842_probe(struct platform_device *pdev) 1143 { 1144 int err = -ENOMEM; 1145 struct resource *iomem; 1146 struct net_device *netdev; 1147 struct ks8842_adapter *adapter; 1148 struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev); 1149 u16 id; 1150 unsigned i; 1151 1152 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1153 if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) 1154 goto err_mem_region; 1155 1156 netdev = alloc_etherdev(sizeof(struct ks8842_adapter)); 1157 if (!netdev) 1158 goto err_alloc_etherdev; 1159 1160 SET_NETDEV_DEV(netdev, &pdev->dev); 1161 1162 adapter = netdev_priv(netdev); 1163 adapter->netdev = netdev; 1164 INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work); 1165 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); 1166 adapter->conf_flags = iomem->flags; 1167 1168 if (!adapter->hw_addr) 1169 goto err_ioremap; 1170 1171 adapter->irq = platform_get_irq(pdev, 0); 1172 if (adapter->irq < 0) { 1173 err = adapter->irq; 1174 goto err_get_irq; 1175 } 1176 1177 adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev; 1178 1179 /* DMA is only supported when accessed via timberdale */ 1180 if (!(adapter->conf_flags & MICREL_KS884X) && pdata && 1181 (pdata->tx_dma_channel != -1) && 1182 (pdata->rx_dma_channel != -1)) { 1183 adapter->dma_rx.channel = pdata->rx_dma_channel; 1184 adapter->dma_tx.channel = pdata->tx_dma_channel; 1185 } else { 1186 adapter->dma_rx.channel = -1; 1187 adapter->dma_tx.channel = -1; 1188 } 1189 1190 tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev); 1191 spin_lock_init(&adapter->lock); 1192 1193 netdev->netdev_ops = &ks8842_netdev_ops; 1194 netdev->ethtool_ops = &ks8842_ethtool_ops; 1195 1196 /* Check if a mac address was given */ 1197 i = netdev->addr_len; 1198 if (pdata) { 1199 for (i = 0; i < netdev->addr_len; i++) 1200 if (pdata->macaddr[i] != 0) 1201 break; 1202 1203 if (i < netdev->addr_len) 1204 /* an address was passed, use it */ 1205 memcpy(netdev->dev_addr, pdata->macaddr, 1206 netdev->addr_len); 1207 } 1208 1209 if (i == netdev->addr_len) { 1210 ks8842_read_mac_addr(adapter, netdev->dev_addr); 1211 1212 if (!is_valid_ether_addr(netdev->dev_addr)) 1213 eth_hw_addr_random(netdev); 1214 } 1215 1216 id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE); 1217 1218 strcpy(netdev->name, "eth%d"); 1219 err = register_netdev(netdev); 1220 if (err) 1221 goto err_register; 1222 1223 platform_set_drvdata(pdev, netdev); 1224 1225 pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", 1226 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); 1227 1228 return 0; 1229 1230 err_register: 1231 err_get_irq: 1232 iounmap(adapter->hw_addr); 1233 err_ioremap: 1234 free_netdev(netdev); 1235 err_alloc_etherdev: 1236 release_mem_region(iomem->start, resource_size(iomem)); 1237 err_mem_region: 1238 return err; 1239 } 1240 1241 static int ks8842_remove(struct platform_device *pdev) 1242 { 1243 struct net_device *netdev = platform_get_drvdata(pdev); 1244 struct ks8842_adapter *adapter = netdev_priv(netdev); 1245 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1246 1247 unregister_netdev(netdev); 1248 tasklet_kill(&adapter->tasklet); 1249 iounmap(adapter->hw_addr); 1250 free_netdev(netdev); 1251 release_mem_region(iomem->start, resource_size(iomem)); 1252 return 0; 1253 } 1254 1255 1256 static struct platform_driver ks8842_platform_driver = { 1257 .driver = { 1258 .name = DRV_NAME, 1259 }, 1260 .probe = ks8842_probe, 1261 .remove = ks8842_remove, 1262 }; 1263 1264 module_platform_driver(ks8842_platform_driver); 1265 1266 MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver"); 1267 MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); 1268 MODULE_LICENSE("GPL v2"); 1269 MODULE_ALIAS("platform:ks8842"); 1270 1271