1 /* 2 * nosy - Snoop mode driver for TI PCILynx 1394 controllers 3 * Copyright (C) 2002-2007 Kristian Høgsberg 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software Foundation, 17 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 */ 19 20 #include <linux/device.h> 21 #include <linux/errno.h> 22 #include <linux/fs.h> 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/io.h> 26 #include <linux/kernel.h> 27 #include <linux/kref.h> 28 #include <linux/miscdevice.h> 29 #include <linux/module.h> 30 #include <linux/mutex.h> 31 #include <linux/pci.h> 32 #include <linux/poll.h> 33 #include <linux/sched.h> /* required for linux/wait.h */ 34 #include <linux/slab.h> 35 #include <linux/spinlock.h> 36 #include <linux/time64.h> 37 #include <linux/timex.h> 38 #include <linux/uaccess.h> 39 #include <linux/wait.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/atomic.h> 42 #include <asm/byteorder.h> 43 44 #include "nosy.h" 45 #include "nosy-user.h" 46 47 #define TCODE_PHY_PACKET 0x10 48 #define PCI_DEVICE_ID_TI_PCILYNX 0x8000 49 50 static char driver_name[] = KBUILD_MODNAME; 51 52 /* this is the physical layout of a PCL, its size is 128 bytes */ 53 struct pcl { 54 __le32 next; 55 __le32 async_error_next; 56 u32 user_data; 57 __le32 pcl_status; 58 __le32 remaining_transfer_count; 59 __le32 next_data_buffer; 60 struct { 61 __le32 control; 62 __le32 pointer; 63 } buffer[13]; 64 }; 65 66 struct packet { 67 unsigned int length; 68 char data[0]; 69 }; 70 71 struct packet_buffer { 72 char *data; 73 size_t capacity; 74 long total_packet_count, lost_packet_count; 75 atomic_t size; 76 struct packet *head, *tail; 77 wait_queue_head_t wait; 78 }; 79 80 struct pcilynx { 81 struct pci_dev *pci_device; 82 __iomem char *registers; 83 84 struct pcl *rcv_start_pcl, *rcv_pcl; 85 __le32 *rcv_buffer; 86 87 dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus; 88 89 spinlock_t client_list_lock; 90 struct list_head client_list; 91 92 struct miscdevice misc; 93 struct list_head link; 94 struct kref kref; 95 }; 96 97 static inline struct pcilynx * 98 lynx_get(struct pcilynx *lynx) 99 { 100 kref_get(&lynx->kref); 101 102 return lynx; 103 } 104 105 static void 106 lynx_release(struct kref *kref) 107 { 108 kfree(container_of(kref, struct pcilynx, kref)); 109 } 110 111 static inline void 112 lynx_put(struct pcilynx *lynx) 113 { 114 kref_put(&lynx->kref, lynx_release); 115 } 116 117 struct client { 118 struct pcilynx *lynx; 119 u32 tcode_mask; 120 struct packet_buffer buffer; 121 struct list_head link; 122 }; 123 124 static DEFINE_MUTEX(card_mutex); 125 static LIST_HEAD(card_list); 126 127 static int 128 packet_buffer_init(struct packet_buffer *buffer, size_t capacity) 129 { 130 buffer->data = kmalloc(capacity, GFP_KERNEL); 131 if (buffer->data == NULL) 132 return -ENOMEM; 133 buffer->head = (struct packet *) buffer->data; 134 buffer->tail = (struct packet *) buffer->data; 135 buffer->capacity = capacity; 136 buffer->lost_packet_count = 0; 137 atomic_set(&buffer->size, 0); 138 init_waitqueue_head(&buffer->wait); 139 140 return 0; 141 } 142 143 static void 144 packet_buffer_destroy(struct packet_buffer *buffer) 145 { 146 kfree(buffer->data); 147 } 148 149 static int 150 packet_buffer_get(struct client *client, char __user *data, size_t user_length) 151 { 152 struct packet_buffer *buffer = &client->buffer; 153 size_t length; 154 char *end; 155 156 if (wait_event_interruptible(buffer->wait, 157 atomic_read(&buffer->size) > 0) || 158 list_empty(&client->lynx->link)) 159 return -ERESTARTSYS; 160 161 if (atomic_read(&buffer->size) == 0) 162 return -ENODEV; 163 164 /* FIXME: Check length <= user_length. */ 165 166 end = buffer->data + buffer->capacity; 167 length = buffer->head->length; 168 169 if (&buffer->head->data[length] < end) { 170 if (copy_to_user(data, buffer->head->data, length)) 171 return -EFAULT; 172 buffer->head = (struct packet *) &buffer->head->data[length]; 173 } else { 174 size_t split = end - buffer->head->data; 175 176 if (copy_to_user(data, buffer->head->data, split)) 177 return -EFAULT; 178 if (copy_to_user(data + split, buffer->data, length - split)) 179 return -EFAULT; 180 buffer->head = (struct packet *) &buffer->data[length - split]; 181 } 182 183 /* 184 * Decrease buffer->size as the last thing, since this is what 185 * keeps the interrupt from overwriting the packet we are 186 * retrieving from the buffer. 187 */ 188 atomic_sub(sizeof(struct packet) + length, &buffer->size); 189 190 return length; 191 } 192 193 static void 194 packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length) 195 { 196 char *end; 197 198 buffer->total_packet_count++; 199 200 if (buffer->capacity < 201 atomic_read(&buffer->size) + sizeof(struct packet) + length) { 202 buffer->lost_packet_count++; 203 return; 204 } 205 206 end = buffer->data + buffer->capacity; 207 buffer->tail->length = length; 208 209 if (&buffer->tail->data[length] < end) { 210 memcpy(buffer->tail->data, data, length); 211 buffer->tail = (struct packet *) &buffer->tail->data[length]; 212 } else { 213 size_t split = end - buffer->tail->data; 214 215 memcpy(buffer->tail->data, data, split); 216 memcpy(buffer->data, data + split, length - split); 217 buffer->tail = (struct packet *) &buffer->data[length - split]; 218 } 219 220 /* Finally, adjust buffer size and wake up userspace reader. */ 221 222 atomic_add(sizeof(struct packet) + length, &buffer->size); 223 wake_up_interruptible(&buffer->wait); 224 } 225 226 static inline void 227 reg_write(struct pcilynx *lynx, int offset, u32 data) 228 { 229 writel(data, lynx->registers + offset); 230 } 231 232 static inline u32 233 reg_read(struct pcilynx *lynx, int offset) 234 { 235 return readl(lynx->registers + offset); 236 } 237 238 static inline void 239 reg_set_bits(struct pcilynx *lynx, int offset, u32 mask) 240 { 241 reg_write(lynx, offset, (reg_read(lynx, offset) | mask)); 242 } 243 244 /* 245 * Maybe the pcl programs could be set up to just append data instead 246 * of using a whole packet. 247 */ 248 static inline void 249 run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus, 250 int dmachan) 251 { 252 reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus); 253 reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20, 254 DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK); 255 } 256 257 static int 258 set_phy_reg(struct pcilynx *lynx, int addr, int val) 259 { 260 if (addr > 15) { 261 dev_err(&lynx->pci_device->dev, 262 "PHY register address %d out of range\n", addr); 263 return -1; 264 } 265 if (val > 0xff) { 266 dev_err(&lynx->pci_device->dev, 267 "PHY register value %d out of range\n", val); 268 return -1; 269 } 270 reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | 271 LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val)); 272 273 return 0; 274 } 275 276 static int 277 nosy_open(struct inode *inode, struct file *file) 278 { 279 int minor = iminor(inode); 280 struct client *client; 281 struct pcilynx *tmp, *lynx = NULL; 282 283 mutex_lock(&card_mutex); 284 list_for_each_entry(tmp, &card_list, link) 285 if (tmp->misc.minor == minor) { 286 lynx = lynx_get(tmp); 287 break; 288 } 289 mutex_unlock(&card_mutex); 290 if (lynx == NULL) 291 return -ENODEV; 292 293 client = kmalloc(sizeof *client, GFP_KERNEL); 294 if (client == NULL) 295 goto fail; 296 297 client->tcode_mask = ~0; 298 client->lynx = lynx; 299 INIT_LIST_HEAD(&client->link); 300 301 if (packet_buffer_init(&client->buffer, 128 * 1024) < 0) 302 goto fail; 303 304 file->private_data = client; 305 306 return nonseekable_open(inode, file); 307 fail: 308 kfree(client); 309 lynx_put(lynx); 310 311 return -ENOMEM; 312 } 313 314 static int 315 nosy_release(struct inode *inode, struct file *file) 316 { 317 struct client *client = file->private_data; 318 struct pcilynx *lynx = client->lynx; 319 320 spin_lock_irq(&lynx->client_list_lock); 321 list_del_init(&client->link); 322 spin_unlock_irq(&lynx->client_list_lock); 323 324 packet_buffer_destroy(&client->buffer); 325 kfree(client); 326 lynx_put(lynx); 327 328 return 0; 329 } 330 331 static unsigned int 332 nosy_poll(struct file *file, poll_table *pt) 333 { 334 struct client *client = file->private_data; 335 unsigned int ret = 0; 336 337 poll_wait(file, &client->buffer.wait, pt); 338 339 if (atomic_read(&client->buffer.size) > 0) 340 ret = POLLIN | POLLRDNORM; 341 342 if (list_empty(&client->lynx->link)) 343 ret |= POLLHUP; 344 345 return ret; 346 } 347 348 static ssize_t 349 nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset) 350 { 351 struct client *client = file->private_data; 352 353 return packet_buffer_get(client, buffer, count); 354 } 355 356 static long 357 nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 358 { 359 struct client *client = file->private_data; 360 spinlock_t *client_list_lock = &client->lynx->client_list_lock; 361 struct nosy_stats stats; 362 363 switch (cmd) { 364 case NOSY_IOC_GET_STATS: 365 spin_lock_irq(client_list_lock); 366 stats.total_packet_count = client->buffer.total_packet_count; 367 stats.lost_packet_count = client->buffer.lost_packet_count; 368 spin_unlock_irq(client_list_lock); 369 370 if (copy_to_user((void __user *) arg, &stats, sizeof stats)) 371 return -EFAULT; 372 else 373 return 0; 374 375 case NOSY_IOC_START: 376 spin_lock_irq(client_list_lock); 377 list_add_tail(&client->link, &client->lynx->client_list); 378 spin_unlock_irq(client_list_lock); 379 380 return 0; 381 382 case NOSY_IOC_STOP: 383 spin_lock_irq(client_list_lock); 384 list_del_init(&client->link); 385 spin_unlock_irq(client_list_lock); 386 387 return 0; 388 389 case NOSY_IOC_FILTER: 390 spin_lock_irq(client_list_lock); 391 client->tcode_mask = arg; 392 spin_unlock_irq(client_list_lock); 393 394 return 0; 395 396 default: 397 return -EINVAL; 398 /* Flush buffer, configure filter. */ 399 } 400 } 401 402 static const struct file_operations nosy_ops = { 403 .owner = THIS_MODULE, 404 .read = nosy_read, 405 .unlocked_ioctl = nosy_ioctl, 406 .poll = nosy_poll, 407 .open = nosy_open, 408 .release = nosy_release, 409 }; 410 411 #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ 412 413 static void 414 packet_irq_handler(struct pcilynx *lynx) 415 { 416 struct client *client; 417 u32 tcode_mask, tcode, timestamp; 418 size_t length; 419 struct timespec64 ts64; 420 421 /* FIXME: Also report rcv_speed. */ 422 423 length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff; 424 tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf; 425 426 ktime_get_real_ts64(&ts64); 427 timestamp = ts64.tv_nsec / NSEC_PER_USEC; 428 lynx->rcv_buffer[0] = (__force __le32)timestamp; 429 430 if (length == PHY_PACKET_SIZE) 431 tcode_mask = 1 << TCODE_PHY_PACKET; 432 else 433 tcode_mask = 1 << tcode; 434 435 spin_lock(&lynx->client_list_lock); 436 437 list_for_each_entry(client, &lynx->client_list, link) 438 if (client->tcode_mask & tcode_mask) 439 packet_buffer_put(&client->buffer, 440 lynx->rcv_buffer, length + 4); 441 442 spin_unlock(&lynx->client_list_lock); 443 } 444 445 static void 446 bus_reset_irq_handler(struct pcilynx *lynx) 447 { 448 struct client *client; 449 struct timespec64 ts64; 450 u32 timestamp; 451 452 ktime_get_real_ts64(&ts64); 453 timestamp = ts64.tv_nsec / NSEC_PER_USEC; 454 455 spin_lock(&lynx->client_list_lock); 456 457 list_for_each_entry(client, &lynx->client_list, link) 458 packet_buffer_put(&client->buffer, ×tamp, 4); 459 460 spin_unlock(&lynx->client_list_lock); 461 } 462 463 static irqreturn_t 464 irq_handler(int irq, void *device) 465 { 466 struct pcilynx *lynx = device; 467 u32 pci_int_status; 468 469 pci_int_status = reg_read(lynx, PCI_INT_STATUS); 470 471 if (pci_int_status == ~0) 472 /* Card was ejected. */ 473 return IRQ_NONE; 474 475 if ((pci_int_status & PCI_INT_INT_PEND) == 0) 476 /* Not our interrupt, bail out quickly. */ 477 return IRQ_NONE; 478 479 if ((pci_int_status & PCI_INT_P1394_INT) != 0) { 480 u32 link_int_status; 481 482 link_int_status = reg_read(lynx, LINK_INT_STATUS); 483 reg_write(lynx, LINK_INT_STATUS, link_int_status); 484 485 if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0) 486 bus_reset_irq_handler(lynx); 487 } 488 489 /* Clear the PCI_INT_STATUS register only after clearing the 490 * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will 491 * be set again immediately. */ 492 493 reg_write(lynx, PCI_INT_STATUS, pci_int_status); 494 495 if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) { 496 packet_irq_handler(lynx); 497 run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); 498 } 499 500 return IRQ_HANDLED; 501 } 502 503 static void 504 remove_card(struct pci_dev *dev) 505 { 506 struct pcilynx *lynx = pci_get_drvdata(dev); 507 struct client *client; 508 509 mutex_lock(&card_mutex); 510 list_del_init(&lynx->link); 511 misc_deregister(&lynx->misc); 512 mutex_unlock(&card_mutex); 513 514 reg_write(lynx, PCI_INT_ENABLE, 0); 515 free_irq(lynx->pci_device->irq, lynx); 516 517 spin_lock_irq(&lynx->client_list_lock); 518 list_for_each_entry(client, &lynx->client_list, link) 519 wake_up_interruptible(&client->buffer.wait); 520 spin_unlock_irq(&lynx->client_list_lock); 521 522 pci_free_consistent(lynx->pci_device, sizeof(struct pcl), 523 lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); 524 pci_free_consistent(lynx->pci_device, sizeof(struct pcl), 525 lynx->rcv_pcl, lynx->rcv_pcl_bus); 526 pci_free_consistent(lynx->pci_device, PAGE_SIZE, 527 lynx->rcv_buffer, lynx->rcv_buffer_bus); 528 529 iounmap(lynx->registers); 530 pci_disable_device(dev); 531 lynx_put(lynx); 532 } 533 534 #define RCV_BUFFER_SIZE (16 * 1024) 535 536 static int 537 add_card(struct pci_dev *dev, const struct pci_device_id *unused) 538 { 539 struct pcilynx *lynx; 540 u32 p, end; 541 int ret, i; 542 543 if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { 544 dev_err(&dev->dev, 545 "DMA address limits not supported for PCILynx hardware\n"); 546 return -ENXIO; 547 } 548 if (pci_enable_device(dev)) { 549 dev_err(&dev->dev, "Failed to enable PCILynx hardware\n"); 550 return -ENXIO; 551 } 552 pci_set_master(dev); 553 554 lynx = kzalloc(sizeof *lynx, GFP_KERNEL); 555 if (lynx == NULL) { 556 dev_err(&dev->dev, "Failed to allocate control structure\n"); 557 ret = -ENOMEM; 558 goto fail_disable; 559 } 560 lynx->pci_device = dev; 561 pci_set_drvdata(dev, lynx); 562 563 spin_lock_init(&lynx->client_list_lock); 564 INIT_LIST_HEAD(&lynx->client_list); 565 kref_init(&lynx->kref); 566 567 lynx->registers = ioremap_nocache(pci_resource_start(dev, 0), 568 PCILYNX_MAX_REGISTER); 569 570 lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device, 571 sizeof(struct pcl), &lynx->rcv_start_pcl_bus); 572 lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device, 573 sizeof(struct pcl), &lynx->rcv_pcl_bus); 574 lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device, 575 RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus); 576 if (lynx->rcv_start_pcl == NULL || 577 lynx->rcv_pcl == NULL || 578 lynx->rcv_buffer == NULL) { 579 dev_err(&dev->dev, "Failed to allocate receive buffer\n"); 580 ret = -ENOMEM; 581 goto fail_deallocate; 582 } 583 lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus); 584 lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID); 585 lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID); 586 587 lynx->rcv_pcl->buffer[0].control = 588 cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044); 589 lynx->rcv_pcl->buffer[0].pointer = 590 cpu_to_le32(lynx->rcv_buffer_bus + 4); 591 p = lynx->rcv_buffer_bus + 2048; 592 end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE; 593 for (i = 1; p < end; i++, p += 2048) { 594 lynx->rcv_pcl->buffer[i].control = 595 cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048); 596 lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p); 597 } 598 lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF); 599 600 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET); 601 /* Fix buggy cards with autoboot pin not tied low: */ 602 reg_write(lynx, DMA0_CHAN_CTRL, 0); 603 reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24); 604 605 #if 0 606 /* now, looking for PHY register set */ 607 if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) { 608 lynx->phyic.reg_1394a = 1; 609 PRINT(KERN_INFO, lynx->id, 610 "found 1394a conform PHY (using extended register set)"); 611 lynx->phyic.vendor = get_phy_vendorid(lynx); 612 lynx->phyic.product = get_phy_productid(lynx); 613 } else { 614 lynx->phyic.reg_1394a = 0; 615 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY"); 616 } 617 #endif 618 619 /* Setup the general receive FIFO max size. */ 620 reg_write(lynx, FIFO_SIZES, 255); 621 622 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL); 623 624 reg_write(lynx, LINK_INT_ENABLE, 625 LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD | 626 LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK | 627 LINK_INT_AT_STUCK | LINK_INT_SNTRJ | 628 LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW | 629 LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW); 630 631 /* Disable the L flag in self ID packets. */ 632 set_phy_reg(lynx, 4, 0); 633 634 /* Put this baby into snoop mode */ 635 reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE); 636 637 run_pcl(lynx, lynx->rcv_start_pcl_bus, 0); 638 639 if (request_irq(dev->irq, irq_handler, IRQF_SHARED, 640 driver_name, lynx)) { 641 dev_err(&dev->dev, 642 "Failed to allocate shared interrupt %d\n", dev->irq); 643 ret = -EIO; 644 goto fail_deallocate; 645 } 646 647 lynx->misc.parent = &dev->dev; 648 lynx->misc.minor = MISC_DYNAMIC_MINOR; 649 lynx->misc.name = "nosy"; 650 lynx->misc.fops = &nosy_ops; 651 652 mutex_lock(&card_mutex); 653 ret = misc_register(&lynx->misc); 654 if (ret) { 655 dev_err(&dev->dev, "Failed to register misc char device\n"); 656 mutex_unlock(&card_mutex); 657 goto fail_free_irq; 658 } 659 list_add_tail(&lynx->link, &card_list); 660 mutex_unlock(&card_mutex); 661 662 dev_info(&dev->dev, 663 "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq); 664 665 return 0; 666 667 fail_free_irq: 668 reg_write(lynx, PCI_INT_ENABLE, 0); 669 free_irq(lynx->pci_device->irq, lynx); 670 671 fail_deallocate: 672 if (lynx->rcv_start_pcl) 673 pci_free_consistent(lynx->pci_device, sizeof(struct pcl), 674 lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus); 675 if (lynx->rcv_pcl) 676 pci_free_consistent(lynx->pci_device, sizeof(struct pcl), 677 lynx->rcv_pcl, lynx->rcv_pcl_bus); 678 if (lynx->rcv_buffer) 679 pci_free_consistent(lynx->pci_device, PAGE_SIZE, 680 lynx->rcv_buffer, lynx->rcv_buffer_bus); 681 iounmap(lynx->registers); 682 kfree(lynx); 683 684 fail_disable: 685 pci_disable_device(dev); 686 687 return ret; 688 } 689 690 static struct pci_device_id pci_table[] = { 691 { 692 .vendor = PCI_VENDOR_ID_TI, 693 .device = PCI_DEVICE_ID_TI_PCILYNX, 694 .subvendor = PCI_ANY_ID, 695 .subdevice = PCI_ANY_ID, 696 }, 697 { } /* Terminating entry */ 698 }; 699 700 MODULE_DEVICE_TABLE(pci, pci_table); 701 702 static struct pci_driver lynx_pci_driver = { 703 .name = driver_name, 704 .id_table = pci_table, 705 .probe = add_card, 706 .remove = remove_card, 707 }; 708 709 module_pci_driver(lynx_pci_driver); 710 711 MODULE_AUTHOR("Kristian Hoegsberg"); 712 MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers"); 713 MODULE_LICENSE("GPL"); 714