1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mediated virtual PCI serial host device driver 4 * 5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 6 * Author: Neo Jia <cjia@nvidia.com> 7 * Kirti Wankhede <kwankhede@nvidia.com> 8 * 9 * Sample driver that creates mdev device that simulates serial port over PCI 10 * card. 11 */ 12 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/device.h> 16 #include <linux/kernel.h> 17 #include <linux/fs.h> 18 #include <linux/poll.h> 19 #include <linux/slab.h> 20 #include <linux/cdev.h> 21 #include <linux/sched.h> 22 #include <linux/wait.h> 23 #include <linux/uuid.h> 24 #include <linux/vfio.h> 25 #include <linux/iommu.h> 26 #include <linux/sysfs.h> 27 #include <linux/ctype.h> 28 #include <linux/file.h> 29 #include <linux/mdev.h> 30 #include <linux/pci.h> 31 #include <linux/serial.h> 32 #include <uapi/linux/serial_reg.h> 33 #include <linux/eventfd.h> 34 /* 35 * #defines 36 */ 37 38 #define VERSION_STRING "0.1" 39 #define DRIVER_AUTHOR "NVIDIA Corporation" 40 41 #define MTTY_CLASS_NAME "mtty" 42 43 #define MTTY_NAME "mtty" 44 45 #define MTTY_STRING_LEN 16 46 47 #define MTTY_CONFIG_SPACE_SIZE 0xff 48 #define MTTY_IO_BAR_SIZE 0x8 49 #define MTTY_MMIO_BAR_SIZE 0x100000 50 51 #define STORE_LE16(addr, val) (*(u16 *)addr = val) 52 #define STORE_LE32(addr, val) (*(u32 *)addr = val) 53 54 #define MAX_FIFO_SIZE 16 55 56 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1)) 57 58 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40 59 60 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT) 61 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \ 62 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT) 63 #define MTTY_VFIO_PCI_OFFSET_MASK \ 64 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1) 65 #define MAX_MTTYS 24 66 67 /* 68 * Global Structures 69 */ 70 71 static struct mtty_dev { 72 dev_t vd_devt; 73 struct class *vd_class; 74 struct cdev vd_cdev; 75 struct idr vd_idr; 76 struct device dev; 77 } mtty_dev; 78 79 struct mdev_region_info { 80 u64 start; 81 u64 phys_start; 82 u32 size; 83 u64 vfio_offset; 84 }; 85 86 #if defined(DEBUG_REGS) 87 static const char *wr_reg[] = { 88 "TX", 89 "IER", 90 "FCR", 91 "LCR", 92 "MCR", 93 "LSR", 94 "MSR", 95 "SCR" 96 }; 97 98 static const char *rd_reg[] = { 99 "RX", 100 "IER", 101 "IIR", 102 "LCR", 103 "MCR", 104 "LSR", 105 "MSR", 106 "SCR" 107 }; 108 #endif 109 110 /* loop back buffer */ 111 struct rxtx { 112 u8 fifo[MAX_FIFO_SIZE]; 113 u8 head, tail; 114 u8 count; 115 }; 116 117 struct serial_port { 118 u8 uart_reg[8]; /* 8 registers */ 119 struct rxtx rxtx; /* loop back buffer */ 120 bool dlab; 121 bool overrun; 122 u16 divisor; 123 u8 fcr; /* FIFO control register */ 124 u8 max_fifo_size; 125 u8 intr_trigger_level; /* interrupt trigger level */ 126 }; 127 128 /* State of each mdev device */ 129 struct mdev_state { 130 struct vfio_device vdev; 131 int irq_fd; 132 struct eventfd_ctx *intx_evtfd; 133 struct eventfd_ctx *msi_evtfd; 134 int irq_index; 135 u8 *vconfig; 136 struct mutex ops_lock; 137 struct mdev_device *mdev; 138 struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS]; 139 u32 bar_mask[VFIO_PCI_NUM_REGIONS]; 140 struct list_head next; 141 struct serial_port s[2]; 142 struct mutex rxtx_lock; 143 struct vfio_device_info dev_info; 144 int nr_ports; 145 }; 146 147 static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS); 148 149 static const struct file_operations vd_fops = { 150 .owner = THIS_MODULE, 151 }; 152 153 static const struct vfio_device_ops mtty_dev_ops; 154 155 /* function prototypes */ 156 157 static int mtty_trigger_interrupt(struct mdev_state *mdev_state); 158 159 /* Helper functions */ 160 161 static void dump_buffer(u8 *buf, uint32_t count) 162 { 163 #if defined(DEBUG) 164 int i; 165 166 pr_info("Buffer:\n"); 167 for (i = 0; i < count; i++) { 168 pr_info("%2x ", *(buf + i)); 169 if ((i + 1) % 16 == 0) 170 pr_info("\n"); 171 } 172 #endif 173 } 174 175 static void mtty_create_config_space(struct mdev_state *mdev_state) 176 { 177 /* PCI dev ID */ 178 STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348); 179 180 /* Control: I/O+, Mem-, BusMaster- */ 181 STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001); 182 183 /* Status: capabilities list absent */ 184 STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200); 185 186 /* Rev ID */ 187 mdev_state->vconfig[0x8] = 0x10; 188 189 /* programming interface class : 16550-compatible serial controller */ 190 mdev_state->vconfig[0x9] = 0x02; 191 192 /* Sub class : 00 */ 193 mdev_state->vconfig[0xa] = 0x00; 194 195 /* Base class : Simple Communication controllers */ 196 mdev_state->vconfig[0xb] = 0x07; 197 198 /* base address registers */ 199 /* BAR0: IO space */ 200 STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001); 201 mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1; 202 203 if (mdev_state->nr_ports == 2) { 204 /* BAR1: IO space */ 205 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001); 206 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1; 207 } 208 209 /* Subsystem ID */ 210 STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348); 211 212 mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */ 213 mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */ 214 215 /* Vendor specific data */ 216 mdev_state->vconfig[0x40] = 0x23; 217 mdev_state->vconfig[0x43] = 0x80; 218 mdev_state->vconfig[0x44] = 0x23; 219 mdev_state->vconfig[0x48] = 0x23; 220 mdev_state->vconfig[0x4c] = 0x23; 221 222 mdev_state->vconfig[0x60] = 0x50; 223 mdev_state->vconfig[0x61] = 0x43; 224 mdev_state->vconfig[0x62] = 0x49; 225 mdev_state->vconfig[0x63] = 0x20; 226 mdev_state->vconfig[0x64] = 0x53; 227 mdev_state->vconfig[0x65] = 0x65; 228 mdev_state->vconfig[0x66] = 0x72; 229 mdev_state->vconfig[0x67] = 0x69; 230 mdev_state->vconfig[0x68] = 0x61; 231 mdev_state->vconfig[0x69] = 0x6c; 232 mdev_state->vconfig[0x6a] = 0x2f; 233 mdev_state->vconfig[0x6b] = 0x55; 234 mdev_state->vconfig[0x6c] = 0x41; 235 mdev_state->vconfig[0x6d] = 0x52; 236 mdev_state->vconfig[0x6e] = 0x54; 237 } 238 239 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, 240 u8 *buf, u32 count) 241 { 242 u32 cfg_addr, bar_mask, bar_index = 0; 243 244 switch (offset) { 245 case 0x04: /* device control */ 246 case 0x06: /* device status */ 247 /* do nothing */ 248 break; 249 case 0x3c: /* interrupt line */ 250 mdev_state->vconfig[0x3c] = buf[0]; 251 break; 252 case 0x3d: 253 /* 254 * Interrupt Pin is hardwired to INTA. 255 * This field is write protected by hardware 256 */ 257 break; 258 case 0x10: /* BAR0 */ 259 case 0x14: /* BAR1 */ 260 if (offset == 0x10) 261 bar_index = 0; 262 else if (offset == 0x14) 263 bar_index = 1; 264 265 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) { 266 STORE_LE32(&mdev_state->vconfig[offset], 0); 267 break; 268 } 269 270 cfg_addr = *(u32 *)buf; 271 pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr); 272 273 if (cfg_addr == 0xffffffff) { 274 bar_mask = mdev_state->bar_mask[bar_index]; 275 cfg_addr = (cfg_addr & bar_mask); 276 } 277 278 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul); 279 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); 280 break; 281 case 0x18: /* BAR2 */ 282 case 0x1c: /* BAR3 */ 283 case 0x20: /* BAR4 */ 284 STORE_LE32(&mdev_state->vconfig[offset], 0); 285 break; 286 default: 287 pr_info("PCI config write @0x%x of %d bytes not handled\n", 288 offset, count); 289 break; 290 } 291 } 292 293 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state, 294 u16 offset, u8 *buf, u32 count) 295 { 296 u8 data = *buf; 297 298 /* Handle data written by guest */ 299 switch (offset) { 300 case UART_TX: 301 /* if DLAB set, data is LSB of divisor */ 302 if (mdev_state->s[index].dlab) { 303 mdev_state->s[index].divisor |= data; 304 break; 305 } 306 307 mutex_lock(&mdev_state->rxtx_lock); 308 309 /* save in TX buffer */ 310 if (mdev_state->s[index].rxtx.count < 311 mdev_state->s[index].max_fifo_size) { 312 mdev_state->s[index].rxtx.fifo[ 313 mdev_state->s[index].rxtx.head] = data; 314 mdev_state->s[index].rxtx.count++; 315 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head); 316 mdev_state->s[index].overrun = false; 317 318 /* 319 * Trigger interrupt if receive data interrupt is 320 * enabled and fifo reached trigger level 321 */ 322 if ((mdev_state->s[index].uart_reg[UART_IER] & 323 UART_IER_RDI) && 324 (mdev_state->s[index].rxtx.count == 325 mdev_state->s[index].intr_trigger_level)) { 326 /* trigger interrupt */ 327 #if defined(DEBUG_INTR) 328 pr_err("Serial port %d: Fifo level trigger\n", 329 index); 330 #endif 331 mtty_trigger_interrupt(mdev_state); 332 } 333 } else { 334 #if defined(DEBUG_INTR) 335 pr_err("Serial port %d: Buffer Overflow\n", index); 336 #endif 337 mdev_state->s[index].overrun = true; 338 339 /* 340 * Trigger interrupt if receiver line status interrupt 341 * is enabled 342 */ 343 if (mdev_state->s[index].uart_reg[UART_IER] & 344 UART_IER_RLSI) 345 mtty_trigger_interrupt(mdev_state); 346 } 347 mutex_unlock(&mdev_state->rxtx_lock); 348 break; 349 350 case UART_IER: 351 /* if DLAB set, data is MSB of divisor */ 352 if (mdev_state->s[index].dlab) 353 mdev_state->s[index].divisor |= (u16)data << 8; 354 else { 355 mdev_state->s[index].uart_reg[offset] = data; 356 mutex_lock(&mdev_state->rxtx_lock); 357 if ((data & UART_IER_THRI) && 358 (mdev_state->s[index].rxtx.head == 359 mdev_state->s[index].rxtx.tail)) { 360 #if defined(DEBUG_INTR) 361 pr_err("Serial port %d: IER_THRI write\n", 362 index); 363 #endif 364 mtty_trigger_interrupt(mdev_state); 365 } 366 367 mutex_unlock(&mdev_state->rxtx_lock); 368 } 369 370 break; 371 372 case UART_FCR: 373 mdev_state->s[index].fcr = data; 374 375 mutex_lock(&mdev_state->rxtx_lock); 376 if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) { 377 /* clear loop back FIFO */ 378 mdev_state->s[index].rxtx.count = 0; 379 mdev_state->s[index].rxtx.head = 0; 380 mdev_state->s[index].rxtx.tail = 0; 381 } 382 mutex_unlock(&mdev_state->rxtx_lock); 383 384 switch (data & UART_FCR_TRIGGER_MASK) { 385 case UART_FCR_TRIGGER_1: 386 mdev_state->s[index].intr_trigger_level = 1; 387 break; 388 389 case UART_FCR_TRIGGER_4: 390 mdev_state->s[index].intr_trigger_level = 4; 391 break; 392 393 case UART_FCR_TRIGGER_8: 394 mdev_state->s[index].intr_trigger_level = 8; 395 break; 396 397 case UART_FCR_TRIGGER_14: 398 mdev_state->s[index].intr_trigger_level = 14; 399 break; 400 } 401 402 /* 403 * Set trigger level to 1 otherwise or implement timer with 404 * timeout of 4 characters and on expiring that timer set 405 * Recevice data timeout in IIR register 406 */ 407 mdev_state->s[index].intr_trigger_level = 1; 408 if (data & UART_FCR_ENABLE_FIFO) 409 mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE; 410 else { 411 mdev_state->s[index].max_fifo_size = 1; 412 mdev_state->s[index].intr_trigger_level = 1; 413 } 414 415 break; 416 417 case UART_LCR: 418 if (data & UART_LCR_DLAB) { 419 mdev_state->s[index].dlab = true; 420 mdev_state->s[index].divisor = 0; 421 } else 422 mdev_state->s[index].dlab = false; 423 424 mdev_state->s[index].uart_reg[offset] = data; 425 break; 426 427 case UART_MCR: 428 mdev_state->s[index].uart_reg[offset] = data; 429 430 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) && 431 (data & UART_MCR_OUT2)) { 432 #if defined(DEBUG_INTR) 433 pr_err("Serial port %d: MCR_OUT2 write\n", index); 434 #endif 435 mtty_trigger_interrupt(mdev_state); 436 } 437 438 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) && 439 (data & (UART_MCR_RTS | UART_MCR_DTR))) { 440 #if defined(DEBUG_INTR) 441 pr_err("Serial port %d: MCR RTS/DTR write\n", index); 442 #endif 443 mtty_trigger_interrupt(mdev_state); 444 } 445 break; 446 447 case UART_LSR: 448 case UART_MSR: 449 /* do nothing */ 450 break; 451 452 case UART_SCR: 453 mdev_state->s[index].uart_reg[offset] = data; 454 break; 455 456 default: 457 break; 458 } 459 } 460 461 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state, 462 u16 offset, u8 *buf, u32 count) 463 { 464 /* Handle read requests by guest */ 465 switch (offset) { 466 case UART_RX: 467 /* if DLAB set, data is LSB of divisor */ 468 if (mdev_state->s[index].dlab) { 469 *buf = (u8)mdev_state->s[index].divisor; 470 break; 471 } 472 473 mutex_lock(&mdev_state->rxtx_lock); 474 /* return data in tx buffer */ 475 if (mdev_state->s[index].rxtx.head != 476 mdev_state->s[index].rxtx.tail) { 477 *buf = mdev_state->s[index].rxtx.fifo[ 478 mdev_state->s[index].rxtx.tail]; 479 mdev_state->s[index].rxtx.count--; 480 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail); 481 } 482 483 if (mdev_state->s[index].rxtx.head == 484 mdev_state->s[index].rxtx.tail) { 485 /* 486 * Trigger interrupt if tx buffer empty interrupt is 487 * enabled and fifo is empty 488 */ 489 #if defined(DEBUG_INTR) 490 pr_err("Serial port %d: Buffer Empty\n", index); 491 #endif 492 if (mdev_state->s[index].uart_reg[UART_IER] & 493 UART_IER_THRI) 494 mtty_trigger_interrupt(mdev_state); 495 } 496 mutex_unlock(&mdev_state->rxtx_lock); 497 498 break; 499 500 case UART_IER: 501 if (mdev_state->s[index].dlab) { 502 *buf = (u8)(mdev_state->s[index].divisor >> 8); 503 break; 504 } 505 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f; 506 break; 507 508 case UART_IIR: 509 { 510 u8 ier = mdev_state->s[index].uart_reg[UART_IER]; 511 *buf = 0; 512 513 mutex_lock(&mdev_state->rxtx_lock); 514 /* Interrupt priority 1: Parity, overrun, framing or break */ 515 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun) 516 *buf |= UART_IIR_RLSI; 517 518 /* Interrupt priority 2: Fifo trigger level reached */ 519 if ((ier & UART_IER_RDI) && 520 (mdev_state->s[index].rxtx.count >= 521 mdev_state->s[index].intr_trigger_level)) 522 *buf |= UART_IIR_RDI; 523 524 /* Interrupt priotiry 3: transmitter holding register empty */ 525 if ((ier & UART_IER_THRI) && 526 (mdev_state->s[index].rxtx.head == 527 mdev_state->s[index].rxtx.tail)) 528 *buf |= UART_IIR_THRI; 529 530 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */ 531 if ((ier & UART_IER_MSI) && 532 (mdev_state->s[index].uart_reg[UART_MCR] & 533 (UART_MCR_RTS | UART_MCR_DTR))) 534 *buf |= UART_IIR_MSI; 535 536 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */ 537 if (*buf == 0) 538 *buf = UART_IIR_NO_INT; 539 540 /* set bit 6 & 7 to be 16550 compatible */ 541 *buf |= 0xC0; 542 mutex_unlock(&mdev_state->rxtx_lock); 543 } 544 break; 545 546 case UART_LCR: 547 case UART_MCR: 548 *buf = mdev_state->s[index].uart_reg[offset]; 549 break; 550 551 case UART_LSR: 552 { 553 u8 lsr = 0; 554 555 mutex_lock(&mdev_state->rxtx_lock); 556 /* atleast one char in FIFO */ 557 if (mdev_state->s[index].rxtx.head != 558 mdev_state->s[index].rxtx.tail) 559 lsr |= UART_LSR_DR; 560 561 /* if FIFO overrun */ 562 if (mdev_state->s[index].overrun) 563 lsr |= UART_LSR_OE; 564 565 /* transmit FIFO empty and tramsitter empty */ 566 if (mdev_state->s[index].rxtx.head == 567 mdev_state->s[index].rxtx.tail) 568 lsr |= UART_LSR_TEMT | UART_LSR_THRE; 569 570 mutex_unlock(&mdev_state->rxtx_lock); 571 *buf = lsr; 572 break; 573 } 574 case UART_MSR: 575 *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD; 576 577 mutex_lock(&mdev_state->rxtx_lock); 578 /* if AFE is 1 and FIFO have space, set CTS bit */ 579 if (mdev_state->s[index].uart_reg[UART_MCR] & 580 UART_MCR_AFE) { 581 if (mdev_state->s[index].rxtx.count < 582 mdev_state->s[index].max_fifo_size) 583 *buf |= UART_MSR_CTS | UART_MSR_DCTS; 584 } else 585 *buf |= UART_MSR_CTS | UART_MSR_DCTS; 586 mutex_unlock(&mdev_state->rxtx_lock); 587 588 break; 589 590 case UART_SCR: 591 *buf = mdev_state->s[index].uart_reg[offset]; 592 break; 593 594 default: 595 break; 596 } 597 } 598 599 static void mdev_read_base(struct mdev_state *mdev_state) 600 { 601 int index, pos; 602 u32 start_lo, start_hi; 603 u32 mem_type; 604 605 pos = PCI_BASE_ADDRESS_0; 606 607 for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) { 608 609 if (!mdev_state->region_info[index].size) 610 continue; 611 612 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) & 613 PCI_BASE_ADDRESS_MEM_MASK; 614 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) & 615 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 616 617 switch (mem_type) { 618 case PCI_BASE_ADDRESS_MEM_TYPE_64: 619 start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4)); 620 pos += 4; 621 break; 622 case PCI_BASE_ADDRESS_MEM_TYPE_32: 623 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 624 /* 1M mem BAR treated as 32-bit BAR */ 625 default: 626 /* mem unknown type treated as 32-bit BAR */ 627 start_hi = 0; 628 break; 629 } 630 pos += 4; 631 mdev_state->region_info[index].start = ((u64)start_hi << 32) | 632 start_lo; 633 } 634 } 635 636 static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count, 637 loff_t pos, bool is_write) 638 { 639 unsigned int index; 640 loff_t offset; 641 int ret = 0; 642 643 if (!buf) 644 return -EINVAL; 645 646 mutex_lock(&mdev_state->ops_lock); 647 648 index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos); 649 offset = pos & MTTY_VFIO_PCI_OFFSET_MASK; 650 switch (index) { 651 case VFIO_PCI_CONFIG_REGION_INDEX: 652 653 #if defined(DEBUG) 654 pr_info("%s: PCI config space %s at offset 0x%llx\n", 655 __func__, is_write ? "write" : "read", offset); 656 #endif 657 if (is_write) { 658 dump_buffer(buf, count); 659 handle_pci_cfg_write(mdev_state, offset, buf, count); 660 } else { 661 memcpy(buf, (mdev_state->vconfig + offset), count); 662 dump_buffer(buf, count); 663 } 664 665 break; 666 667 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: 668 if (!mdev_state->region_info[index].start) 669 mdev_read_base(mdev_state); 670 671 if (is_write) { 672 dump_buffer(buf, count); 673 674 #if defined(DEBUG_REGS) 675 pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n", 676 __func__, index, offset, wr_reg[offset], 677 *buf, mdev_state->s[index].dlab); 678 #endif 679 handle_bar_write(index, mdev_state, offset, buf, count); 680 } else { 681 handle_bar_read(index, mdev_state, offset, buf, count); 682 dump_buffer(buf, count); 683 684 #if defined(DEBUG_REGS) 685 pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n", 686 __func__, index, offset, rd_reg[offset], 687 *buf, mdev_state->s[index].dlab); 688 #endif 689 } 690 break; 691 692 default: 693 ret = -1; 694 goto accessfailed; 695 } 696 697 ret = count; 698 699 700 accessfailed: 701 mutex_unlock(&mdev_state->ops_lock); 702 703 return ret; 704 } 705 706 static int mtty_probe(struct mdev_device *mdev) 707 { 708 struct mdev_state *mdev_state; 709 int nr_ports = mdev_get_type_group_id(mdev) + 1; 710 int avail_ports = atomic_read(&mdev_avail_ports); 711 int ret; 712 713 do { 714 if (avail_ports < nr_ports) 715 return -ENOSPC; 716 } while (!atomic_try_cmpxchg(&mdev_avail_ports, 717 &avail_ports, avail_ports - nr_ports)); 718 719 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); 720 if (mdev_state == NULL) { 721 ret = -ENOMEM; 722 goto err_nr_ports; 723 } 724 725 vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mtty_dev_ops); 726 727 mdev_state->nr_ports = nr_ports; 728 mdev_state->irq_index = -1; 729 mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE; 730 mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE; 731 mutex_init(&mdev_state->rxtx_lock); 732 mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL); 733 734 if (mdev_state->vconfig == NULL) { 735 ret = -ENOMEM; 736 goto err_state; 737 } 738 739 mutex_init(&mdev_state->ops_lock); 740 mdev_state->mdev = mdev; 741 742 mtty_create_config_space(mdev_state); 743 744 ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); 745 if (ret) 746 goto err_vconfig; 747 dev_set_drvdata(&mdev->dev, mdev_state); 748 return 0; 749 750 err_vconfig: 751 kfree(mdev_state->vconfig); 752 err_state: 753 vfio_uninit_group_dev(&mdev_state->vdev); 754 kfree(mdev_state); 755 err_nr_ports: 756 atomic_add(nr_ports, &mdev_avail_ports); 757 return ret; 758 } 759 760 static void mtty_remove(struct mdev_device *mdev) 761 { 762 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); 763 int nr_ports = mdev_state->nr_ports; 764 765 vfio_unregister_group_dev(&mdev_state->vdev); 766 767 kfree(mdev_state->vconfig); 768 vfio_uninit_group_dev(&mdev_state->vdev); 769 kfree(mdev_state); 770 atomic_add(nr_ports, &mdev_avail_ports); 771 } 772 773 static int mtty_reset(struct mdev_state *mdev_state) 774 { 775 pr_info("%s: called\n", __func__); 776 777 return 0; 778 } 779 780 static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf, 781 size_t count, loff_t *ppos) 782 { 783 struct mdev_state *mdev_state = 784 container_of(vdev, struct mdev_state, vdev); 785 unsigned int done = 0; 786 int ret; 787 788 while (count) { 789 size_t filled; 790 791 if (count >= 4 && !(*ppos % 4)) { 792 u32 val; 793 794 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 795 *ppos, false); 796 if (ret <= 0) 797 goto read_err; 798 799 if (copy_to_user(buf, &val, sizeof(val))) 800 goto read_err; 801 802 filled = 4; 803 } else if (count >= 2 && !(*ppos % 2)) { 804 u16 val; 805 806 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 807 *ppos, false); 808 if (ret <= 0) 809 goto read_err; 810 811 if (copy_to_user(buf, &val, sizeof(val))) 812 goto read_err; 813 814 filled = 2; 815 } else { 816 u8 val; 817 818 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 819 *ppos, false); 820 if (ret <= 0) 821 goto read_err; 822 823 if (copy_to_user(buf, &val, sizeof(val))) 824 goto read_err; 825 826 filled = 1; 827 } 828 829 count -= filled; 830 done += filled; 831 *ppos += filled; 832 buf += filled; 833 } 834 835 return done; 836 837 read_err: 838 return -EFAULT; 839 } 840 841 static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf, 842 size_t count, loff_t *ppos) 843 { 844 struct mdev_state *mdev_state = 845 container_of(vdev, struct mdev_state, vdev); 846 unsigned int done = 0; 847 int ret; 848 849 while (count) { 850 size_t filled; 851 852 if (count >= 4 && !(*ppos % 4)) { 853 u32 val; 854 855 if (copy_from_user(&val, buf, sizeof(val))) 856 goto write_err; 857 858 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 859 *ppos, true); 860 if (ret <= 0) 861 goto write_err; 862 863 filled = 4; 864 } else if (count >= 2 && !(*ppos % 2)) { 865 u16 val; 866 867 if (copy_from_user(&val, buf, sizeof(val))) 868 goto write_err; 869 870 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 871 *ppos, true); 872 if (ret <= 0) 873 goto write_err; 874 875 filled = 2; 876 } else { 877 u8 val; 878 879 if (copy_from_user(&val, buf, sizeof(val))) 880 goto write_err; 881 882 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 883 *ppos, true); 884 if (ret <= 0) 885 goto write_err; 886 887 filled = 1; 888 } 889 count -= filled; 890 done += filled; 891 *ppos += filled; 892 buf += filled; 893 } 894 895 return done; 896 write_err: 897 return -EFAULT; 898 } 899 900 static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags, 901 unsigned int index, unsigned int start, 902 unsigned int count, void *data) 903 { 904 int ret = 0; 905 906 mutex_lock(&mdev_state->ops_lock); 907 switch (index) { 908 case VFIO_PCI_INTX_IRQ_INDEX: 909 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 910 case VFIO_IRQ_SET_ACTION_MASK: 911 case VFIO_IRQ_SET_ACTION_UNMASK: 912 break; 913 case VFIO_IRQ_SET_ACTION_TRIGGER: 914 { 915 if (flags & VFIO_IRQ_SET_DATA_NONE) { 916 pr_info("%s: disable INTx\n", __func__); 917 if (mdev_state->intx_evtfd) 918 eventfd_ctx_put(mdev_state->intx_evtfd); 919 break; 920 } 921 922 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 923 int fd = *(int *)data; 924 925 if (fd > 0) { 926 struct eventfd_ctx *evt; 927 928 evt = eventfd_ctx_fdget(fd); 929 if (IS_ERR(evt)) { 930 ret = PTR_ERR(evt); 931 break; 932 } 933 mdev_state->intx_evtfd = evt; 934 mdev_state->irq_fd = fd; 935 mdev_state->irq_index = index; 936 break; 937 } 938 } 939 break; 940 } 941 } 942 break; 943 case VFIO_PCI_MSI_IRQ_INDEX: 944 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 945 case VFIO_IRQ_SET_ACTION_MASK: 946 case VFIO_IRQ_SET_ACTION_UNMASK: 947 break; 948 case VFIO_IRQ_SET_ACTION_TRIGGER: 949 if (flags & VFIO_IRQ_SET_DATA_NONE) { 950 if (mdev_state->msi_evtfd) 951 eventfd_ctx_put(mdev_state->msi_evtfd); 952 pr_info("%s: disable MSI\n", __func__); 953 mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX; 954 break; 955 } 956 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 957 int fd = *(int *)data; 958 struct eventfd_ctx *evt; 959 960 if (fd <= 0) 961 break; 962 963 if (mdev_state->msi_evtfd) 964 break; 965 966 evt = eventfd_ctx_fdget(fd); 967 if (IS_ERR(evt)) { 968 ret = PTR_ERR(evt); 969 break; 970 } 971 mdev_state->msi_evtfd = evt; 972 mdev_state->irq_fd = fd; 973 mdev_state->irq_index = index; 974 } 975 break; 976 } 977 break; 978 case VFIO_PCI_MSIX_IRQ_INDEX: 979 pr_info("%s: MSIX_IRQ\n", __func__); 980 break; 981 case VFIO_PCI_ERR_IRQ_INDEX: 982 pr_info("%s: ERR_IRQ\n", __func__); 983 break; 984 case VFIO_PCI_REQ_IRQ_INDEX: 985 pr_info("%s: REQ_IRQ\n", __func__); 986 break; 987 } 988 989 mutex_unlock(&mdev_state->ops_lock); 990 return ret; 991 } 992 993 static int mtty_trigger_interrupt(struct mdev_state *mdev_state) 994 { 995 int ret = -1; 996 997 if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) && 998 (!mdev_state->msi_evtfd)) 999 return -EINVAL; 1000 else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) && 1001 (!mdev_state->intx_evtfd)) { 1002 pr_info("%s: Intr eventfd not found\n", __func__); 1003 return -EINVAL; 1004 } 1005 1006 if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) 1007 ret = eventfd_signal(mdev_state->msi_evtfd, 1); 1008 else 1009 ret = eventfd_signal(mdev_state->intx_evtfd, 1); 1010 1011 #if defined(DEBUG_INTR) 1012 pr_info("Intx triggered\n"); 1013 #endif 1014 if (ret != 1) 1015 pr_err("%s: eventfd signal failed (%d)\n", __func__, ret); 1016 1017 return ret; 1018 } 1019 1020 static int mtty_get_region_info(struct mdev_state *mdev_state, 1021 struct vfio_region_info *region_info, 1022 u16 *cap_type_id, void **cap_type) 1023 { 1024 unsigned int size = 0; 1025 u32 bar_index; 1026 1027 bar_index = region_info->index; 1028 if (bar_index >= VFIO_PCI_NUM_REGIONS) 1029 return -EINVAL; 1030 1031 mutex_lock(&mdev_state->ops_lock); 1032 1033 switch (bar_index) { 1034 case VFIO_PCI_CONFIG_REGION_INDEX: 1035 size = MTTY_CONFIG_SPACE_SIZE; 1036 break; 1037 case VFIO_PCI_BAR0_REGION_INDEX: 1038 size = MTTY_IO_BAR_SIZE; 1039 break; 1040 case VFIO_PCI_BAR1_REGION_INDEX: 1041 if (mdev_state->nr_ports == 2) 1042 size = MTTY_IO_BAR_SIZE; 1043 break; 1044 default: 1045 size = 0; 1046 break; 1047 } 1048 1049 mdev_state->region_info[bar_index].size = size; 1050 mdev_state->region_info[bar_index].vfio_offset = 1051 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index); 1052 1053 region_info->size = size; 1054 region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index); 1055 region_info->flags = VFIO_REGION_INFO_FLAG_READ | 1056 VFIO_REGION_INFO_FLAG_WRITE; 1057 mutex_unlock(&mdev_state->ops_lock); 1058 return 0; 1059 } 1060 1061 static int mtty_get_irq_info(struct vfio_irq_info *irq_info) 1062 { 1063 switch (irq_info->index) { 1064 case VFIO_PCI_INTX_IRQ_INDEX: 1065 case VFIO_PCI_MSI_IRQ_INDEX: 1066 case VFIO_PCI_REQ_IRQ_INDEX: 1067 break; 1068 1069 default: 1070 return -EINVAL; 1071 } 1072 1073 irq_info->flags = VFIO_IRQ_INFO_EVENTFD; 1074 irq_info->count = 1; 1075 1076 if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX) 1077 irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE | 1078 VFIO_IRQ_INFO_AUTOMASKED); 1079 else 1080 irq_info->flags |= VFIO_IRQ_INFO_NORESIZE; 1081 1082 return 0; 1083 } 1084 1085 static int mtty_get_device_info(struct vfio_device_info *dev_info) 1086 { 1087 dev_info->flags = VFIO_DEVICE_FLAGS_PCI; 1088 dev_info->num_regions = VFIO_PCI_NUM_REGIONS; 1089 dev_info->num_irqs = VFIO_PCI_NUM_IRQS; 1090 1091 return 0; 1092 } 1093 1094 static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd, 1095 unsigned long arg) 1096 { 1097 struct mdev_state *mdev_state = 1098 container_of(vdev, struct mdev_state, vdev); 1099 int ret = 0; 1100 unsigned long minsz; 1101 1102 switch (cmd) { 1103 case VFIO_DEVICE_GET_INFO: 1104 { 1105 struct vfio_device_info info; 1106 1107 minsz = offsetofend(struct vfio_device_info, num_irqs); 1108 1109 if (copy_from_user(&info, (void __user *)arg, minsz)) 1110 return -EFAULT; 1111 1112 if (info.argsz < minsz) 1113 return -EINVAL; 1114 1115 ret = mtty_get_device_info(&info); 1116 if (ret) 1117 return ret; 1118 1119 memcpy(&mdev_state->dev_info, &info, sizeof(info)); 1120 1121 if (copy_to_user((void __user *)arg, &info, minsz)) 1122 return -EFAULT; 1123 1124 return 0; 1125 } 1126 case VFIO_DEVICE_GET_REGION_INFO: 1127 { 1128 struct vfio_region_info info; 1129 u16 cap_type_id = 0; 1130 void *cap_type = NULL; 1131 1132 minsz = offsetofend(struct vfio_region_info, offset); 1133 1134 if (copy_from_user(&info, (void __user *)arg, minsz)) 1135 return -EFAULT; 1136 1137 if (info.argsz < minsz) 1138 return -EINVAL; 1139 1140 ret = mtty_get_region_info(mdev_state, &info, &cap_type_id, 1141 &cap_type); 1142 if (ret) 1143 return ret; 1144 1145 if (copy_to_user((void __user *)arg, &info, minsz)) 1146 return -EFAULT; 1147 1148 return 0; 1149 } 1150 1151 case VFIO_DEVICE_GET_IRQ_INFO: 1152 { 1153 struct vfio_irq_info info; 1154 1155 minsz = offsetofend(struct vfio_irq_info, count); 1156 1157 if (copy_from_user(&info, (void __user *)arg, minsz)) 1158 return -EFAULT; 1159 1160 if ((info.argsz < minsz) || 1161 (info.index >= mdev_state->dev_info.num_irqs)) 1162 return -EINVAL; 1163 1164 ret = mtty_get_irq_info(&info); 1165 if (ret) 1166 return ret; 1167 1168 if (copy_to_user((void __user *)arg, &info, minsz)) 1169 return -EFAULT; 1170 1171 return 0; 1172 } 1173 case VFIO_DEVICE_SET_IRQS: 1174 { 1175 struct vfio_irq_set hdr; 1176 u8 *data = NULL, *ptr = NULL; 1177 size_t data_size = 0; 1178 1179 minsz = offsetofend(struct vfio_irq_set, count); 1180 1181 if (copy_from_user(&hdr, (void __user *)arg, minsz)) 1182 return -EFAULT; 1183 1184 ret = vfio_set_irqs_validate_and_prepare(&hdr, 1185 mdev_state->dev_info.num_irqs, 1186 VFIO_PCI_NUM_IRQS, 1187 &data_size); 1188 if (ret) 1189 return ret; 1190 1191 if (data_size) { 1192 ptr = data = memdup_user((void __user *)(arg + minsz), 1193 data_size); 1194 if (IS_ERR(data)) 1195 return PTR_ERR(data); 1196 } 1197 1198 ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start, 1199 hdr.count, data); 1200 1201 kfree(ptr); 1202 return ret; 1203 } 1204 case VFIO_DEVICE_RESET: 1205 return mtty_reset(mdev_state); 1206 } 1207 return -ENOTTY; 1208 } 1209 1210 static ssize_t 1211 sample_mtty_dev_show(struct device *dev, struct device_attribute *attr, 1212 char *buf) 1213 { 1214 return sprintf(buf, "This is phy device\n"); 1215 } 1216 1217 static DEVICE_ATTR_RO(sample_mtty_dev); 1218 1219 static struct attribute *mtty_dev_attrs[] = { 1220 &dev_attr_sample_mtty_dev.attr, 1221 NULL, 1222 }; 1223 1224 static const struct attribute_group mtty_dev_group = { 1225 .name = "mtty_dev", 1226 .attrs = mtty_dev_attrs, 1227 }; 1228 1229 static const struct attribute_group *mtty_dev_groups[] = { 1230 &mtty_dev_group, 1231 NULL, 1232 }; 1233 1234 static ssize_t 1235 sample_mdev_dev_show(struct device *dev, struct device_attribute *attr, 1236 char *buf) 1237 { 1238 if (mdev_from_dev(dev)) 1239 return sprintf(buf, "This is MDEV %s\n", dev_name(dev)); 1240 1241 return sprintf(buf, "\n"); 1242 } 1243 1244 static DEVICE_ATTR_RO(sample_mdev_dev); 1245 1246 static struct attribute *mdev_dev_attrs[] = { 1247 &dev_attr_sample_mdev_dev.attr, 1248 NULL, 1249 }; 1250 1251 static const struct attribute_group mdev_dev_group = { 1252 .name = "vendor", 1253 .attrs = mdev_dev_attrs, 1254 }; 1255 1256 static const struct attribute_group *mdev_dev_groups[] = { 1257 &mdev_dev_group, 1258 NULL, 1259 }; 1260 1261 static ssize_t name_show(struct mdev_type *mtype, 1262 struct mdev_type_attribute *attr, char *buf) 1263 { 1264 static const char *name_str[2] = { "Single port serial", 1265 "Dual port serial" }; 1266 1267 return sysfs_emit(buf, "%s\n", 1268 name_str[mtype_get_type_group_id(mtype)]); 1269 } 1270 1271 static MDEV_TYPE_ATTR_RO(name); 1272 1273 static ssize_t available_instances_show(struct mdev_type *mtype, 1274 struct mdev_type_attribute *attr, 1275 char *buf) 1276 { 1277 unsigned int ports = mtype_get_type_group_id(mtype) + 1; 1278 1279 return sprintf(buf, "%d\n", atomic_read(&mdev_avail_ports) / ports); 1280 } 1281 1282 static MDEV_TYPE_ATTR_RO(available_instances); 1283 1284 static ssize_t device_api_show(struct mdev_type *mtype, 1285 struct mdev_type_attribute *attr, char *buf) 1286 { 1287 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); 1288 } 1289 1290 static MDEV_TYPE_ATTR_RO(device_api); 1291 1292 static struct attribute *mdev_types_attrs[] = { 1293 &mdev_type_attr_name.attr, 1294 &mdev_type_attr_device_api.attr, 1295 &mdev_type_attr_available_instances.attr, 1296 NULL, 1297 }; 1298 1299 static struct attribute_group mdev_type_group1 = { 1300 .name = "1", 1301 .attrs = mdev_types_attrs, 1302 }; 1303 1304 static struct attribute_group mdev_type_group2 = { 1305 .name = "2", 1306 .attrs = mdev_types_attrs, 1307 }; 1308 1309 static struct attribute_group *mdev_type_groups[] = { 1310 &mdev_type_group1, 1311 &mdev_type_group2, 1312 NULL, 1313 }; 1314 1315 static const struct vfio_device_ops mtty_dev_ops = { 1316 .name = "vfio-mtty", 1317 .read = mtty_read, 1318 .write = mtty_write, 1319 .ioctl = mtty_ioctl, 1320 }; 1321 1322 static struct mdev_driver mtty_driver = { 1323 .driver = { 1324 .name = "mtty", 1325 .owner = THIS_MODULE, 1326 .mod_name = KBUILD_MODNAME, 1327 .dev_groups = mdev_dev_groups, 1328 }, 1329 .probe = mtty_probe, 1330 .remove = mtty_remove, 1331 }; 1332 1333 static const struct mdev_parent_ops mdev_fops = { 1334 .owner = THIS_MODULE, 1335 .device_driver = &mtty_driver, 1336 .dev_attr_groups = mtty_dev_groups, 1337 .supported_type_groups = mdev_type_groups, 1338 }; 1339 1340 static void mtty_device_release(struct device *dev) 1341 { 1342 dev_dbg(dev, "mtty: released\n"); 1343 } 1344 1345 static int __init mtty_dev_init(void) 1346 { 1347 int ret = 0; 1348 1349 pr_info("mtty_dev: %s\n", __func__); 1350 1351 memset(&mtty_dev, 0, sizeof(mtty_dev)); 1352 1353 idr_init(&mtty_dev.vd_idr); 1354 1355 ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1, 1356 MTTY_NAME); 1357 1358 if (ret < 0) { 1359 pr_err("Error: failed to register mtty_dev, err:%d\n", ret); 1360 return ret; 1361 } 1362 1363 cdev_init(&mtty_dev.vd_cdev, &vd_fops); 1364 cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1); 1365 1366 pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt)); 1367 1368 ret = mdev_register_driver(&mtty_driver); 1369 if (ret) 1370 goto err_cdev; 1371 1372 mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME); 1373 1374 if (IS_ERR(mtty_dev.vd_class)) { 1375 pr_err("Error: failed to register mtty_dev class\n"); 1376 ret = PTR_ERR(mtty_dev.vd_class); 1377 goto err_driver; 1378 } 1379 1380 mtty_dev.dev.class = mtty_dev.vd_class; 1381 mtty_dev.dev.release = mtty_device_release; 1382 dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME); 1383 1384 ret = device_register(&mtty_dev.dev); 1385 if (ret) 1386 goto err_class; 1387 1388 ret = mdev_register_device(&mtty_dev.dev, &mdev_fops); 1389 if (ret) 1390 goto err_device; 1391 return 0; 1392 1393 err_device: 1394 device_unregister(&mtty_dev.dev); 1395 err_class: 1396 class_destroy(mtty_dev.vd_class); 1397 err_driver: 1398 mdev_unregister_driver(&mtty_driver); 1399 err_cdev: 1400 cdev_del(&mtty_dev.vd_cdev); 1401 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1); 1402 return ret; 1403 } 1404 1405 static void __exit mtty_dev_exit(void) 1406 { 1407 mtty_dev.dev.bus = NULL; 1408 mdev_unregister_device(&mtty_dev.dev); 1409 1410 device_unregister(&mtty_dev.dev); 1411 idr_destroy(&mtty_dev.vd_idr); 1412 mdev_unregister_driver(&mtty_driver); 1413 cdev_del(&mtty_dev.vd_cdev); 1414 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1); 1415 class_destroy(mtty_dev.vd_class); 1416 mtty_dev.vd_class = NULL; 1417 pr_info("mtty_dev: Unloaded!\n"); 1418 } 1419 1420 module_init(mtty_dev_init) 1421 module_exit(mtty_dev_exit) 1422 1423 MODULE_LICENSE("GPL v2"); 1424 MODULE_INFO(supported, "Test driver that simulate serial port over PCI"); 1425 MODULE_VERSION(VERSION_STRING); 1426 MODULE_AUTHOR(DRIVER_AUTHOR); 1427