1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Mediated virtual PCI serial host device driver 4 * 5 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 6 * Author: Neo Jia <cjia@nvidia.com> 7 * Kirti Wankhede <kwankhede@nvidia.com> 8 * 9 * Sample driver that creates mdev device that simulates serial port over PCI 10 * card. 11 */ 12 13 #include <linux/init.h> 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/fs.h> 17 #include <linux/poll.h> 18 #include <linux/slab.h> 19 #include <linux/cdev.h> 20 #include <linux/sched.h> 21 #include <linux/wait.h> 22 #include <linux/vfio.h> 23 #include <linux/iommu.h> 24 #include <linux/sysfs.h> 25 #include <linux/ctype.h> 26 #include <linux/file.h> 27 #include <linux/mdev.h> 28 #include <linux/pci.h> 29 #include <linux/serial.h> 30 #include <uapi/linux/serial_reg.h> 31 #include <linux/eventfd.h> 32 /* 33 * #defines 34 */ 35 36 #define VERSION_STRING "0.1" 37 #define DRIVER_AUTHOR "NVIDIA Corporation" 38 39 #define MTTY_CLASS_NAME "mtty" 40 41 #define MTTY_NAME "mtty" 42 43 #define MTTY_STRING_LEN 16 44 45 #define MTTY_CONFIG_SPACE_SIZE 0xff 46 #define MTTY_IO_BAR_SIZE 0x8 47 #define MTTY_MMIO_BAR_SIZE 0x100000 48 49 #define STORE_LE16(addr, val) (*(u16 *)addr = val) 50 #define STORE_LE32(addr, val) (*(u32 *)addr = val) 51 52 #define MAX_FIFO_SIZE 16 53 54 #define CIRCULAR_BUF_INC_IDX(idx) (idx = (idx + 1) & (MAX_FIFO_SIZE - 1)) 55 56 #define MTTY_VFIO_PCI_OFFSET_SHIFT 40 57 58 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off) (off >> MTTY_VFIO_PCI_OFFSET_SHIFT) 59 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \ 60 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT) 61 #define MTTY_VFIO_PCI_OFFSET_MASK \ 62 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1) 63 #define MAX_MTTYS 24 64 65 /* 66 * Global Structures 67 */ 68 69 static struct mtty_dev { 70 dev_t vd_devt; 71 struct class *vd_class; 72 struct cdev vd_cdev; 73 struct idr vd_idr; 74 struct device dev; 75 struct mdev_parent parent; 76 } mtty_dev; 77 78 struct mdev_region_info { 79 u64 start; 80 u64 phys_start; 81 u32 size; 82 u64 vfio_offset; 83 }; 84 85 #if defined(DEBUG_REGS) 86 static const char *wr_reg[] = { 87 "TX", 88 "IER", 89 "FCR", 90 "LCR", 91 "MCR", 92 "LSR", 93 "MSR", 94 "SCR" 95 }; 96 97 static const char *rd_reg[] = { 98 "RX", 99 "IER", 100 "IIR", 101 "LCR", 102 "MCR", 103 "LSR", 104 "MSR", 105 "SCR" 106 }; 107 #endif 108 109 /* loop back buffer */ 110 struct rxtx { 111 u8 fifo[MAX_FIFO_SIZE]; 112 u8 head, tail; 113 u8 count; 114 }; 115 116 struct serial_port { 117 u8 uart_reg[8]; /* 8 registers */ 118 struct rxtx rxtx; /* loop back buffer */ 119 bool dlab; 120 bool overrun; 121 u16 divisor; 122 u8 fcr; /* FIFO control register */ 123 u8 max_fifo_size; 124 u8 intr_trigger_level; /* interrupt trigger level */ 125 }; 126 127 /* State of each mdev device */ 128 struct mdev_state { 129 struct vfio_device vdev; 130 struct eventfd_ctx *intx_evtfd; 131 struct eventfd_ctx *msi_evtfd; 132 int irq_index; 133 u8 *vconfig; 134 struct mutex ops_lock; 135 struct mdev_device *mdev; 136 struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS]; 137 u32 bar_mask[VFIO_PCI_NUM_REGIONS]; 138 struct list_head next; 139 struct serial_port s[2]; 140 struct mutex rxtx_lock; 141 struct vfio_device_info dev_info; 142 int nr_ports; 143 u8 intx_mask:1; 144 }; 145 146 static struct mtty_type { 147 struct mdev_type type; 148 int nr_ports; 149 } mtty_types[2] = { 150 { .nr_ports = 1, .type.sysfs_name = "1", 151 .type.pretty_name = "Single port serial" }, 152 { .nr_ports = 2, .type.sysfs_name = "2", 153 .type.pretty_name = "Dual port serial" }, 154 }; 155 156 static struct mdev_type *mtty_mdev_types[] = { 157 &mtty_types[0].type, 158 &mtty_types[1].type, 159 }; 160 161 static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS); 162 163 static const struct file_operations vd_fops = { 164 .owner = THIS_MODULE, 165 }; 166 167 static const struct vfio_device_ops mtty_dev_ops; 168 169 /* Helper functions */ 170 171 static void dump_buffer(u8 *buf, uint32_t count) 172 { 173 #if defined(DEBUG) 174 int i; 175 176 pr_info("Buffer:\n"); 177 for (i = 0; i < count; i++) { 178 pr_info("%2x ", *(buf + i)); 179 if ((i + 1) % 16 == 0) 180 pr_info("\n"); 181 } 182 #endif 183 } 184 185 static bool is_intx(struct mdev_state *mdev_state) 186 { 187 return mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX; 188 } 189 190 static bool is_msi(struct mdev_state *mdev_state) 191 { 192 return mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX; 193 } 194 195 static bool is_noirq(struct mdev_state *mdev_state) 196 { 197 return !is_intx(mdev_state) && !is_msi(mdev_state); 198 } 199 200 static void mtty_trigger_interrupt(struct mdev_state *mdev_state) 201 { 202 lockdep_assert_held(&mdev_state->ops_lock); 203 204 if (is_msi(mdev_state)) { 205 if (mdev_state->msi_evtfd) 206 eventfd_signal(mdev_state->msi_evtfd, 1); 207 } else if (is_intx(mdev_state)) { 208 if (mdev_state->intx_evtfd && !mdev_state->intx_mask) { 209 eventfd_signal(mdev_state->intx_evtfd, 1); 210 mdev_state->intx_mask = true; 211 } 212 } 213 } 214 215 static void mtty_create_config_space(struct mdev_state *mdev_state) 216 { 217 /* PCI dev ID */ 218 STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348); 219 220 /* Control: I/O+, Mem-, BusMaster- */ 221 STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001); 222 223 /* Status: capabilities list absent */ 224 STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200); 225 226 /* Rev ID */ 227 mdev_state->vconfig[0x8] = 0x10; 228 229 /* programming interface class : 16550-compatible serial controller */ 230 mdev_state->vconfig[0x9] = 0x02; 231 232 /* Sub class : 00 */ 233 mdev_state->vconfig[0xa] = 0x00; 234 235 /* Base class : Simple Communication controllers */ 236 mdev_state->vconfig[0xb] = 0x07; 237 238 /* base address registers */ 239 /* BAR0: IO space */ 240 STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001); 241 mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1; 242 243 if (mdev_state->nr_ports == 2) { 244 /* BAR1: IO space */ 245 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001); 246 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1; 247 } 248 249 /* Subsystem ID */ 250 STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348); 251 252 mdev_state->vconfig[0x34] = 0x00; /* Cap Ptr */ 253 mdev_state->vconfig[0x3d] = 0x01; /* interrupt pin (INTA#) */ 254 255 /* Vendor specific data */ 256 mdev_state->vconfig[0x40] = 0x23; 257 mdev_state->vconfig[0x43] = 0x80; 258 mdev_state->vconfig[0x44] = 0x23; 259 mdev_state->vconfig[0x48] = 0x23; 260 mdev_state->vconfig[0x4c] = 0x23; 261 262 mdev_state->vconfig[0x60] = 0x50; 263 mdev_state->vconfig[0x61] = 0x43; 264 mdev_state->vconfig[0x62] = 0x49; 265 mdev_state->vconfig[0x63] = 0x20; 266 mdev_state->vconfig[0x64] = 0x53; 267 mdev_state->vconfig[0x65] = 0x65; 268 mdev_state->vconfig[0x66] = 0x72; 269 mdev_state->vconfig[0x67] = 0x69; 270 mdev_state->vconfig[0x68] = 0x61; 271 mdev_state->vconfig[0x69] = 0x6c; 272 mdev_state->vconfig[0x6a] = 0x2f; 273 mdev_state->vconfig[0x6b] = 0x55; 274 mdev_state->vconfig[0x6c] = 0x41; 275 mdev_state->vconfig[0x6d] = 0x52; 276 mdev_state->vconfig[0x6e] = 0x54; 277 } 278 279 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, 280 u8 *buf, u32 count) 281 { 282 u32 cfg_addr, bar_mask, bar_index = 0; 283 284 switch (offset) { 285 case 0x04: /* device control */ 286 case 0x06: /* device status */ 287 /* do nothing */ 288 break; 289 case 0x3c: /* interrupt line */ 290 mdev_state->vconfig[0x3c] = buf[0]; 291 break; 292 case 0x3d: 293 /* 294 * Interrupt Pin is hardwired to INTA. 295 * This field is write protected by hardware 296 */ 297 break; 298 case 0x10: /* BAR0 */ 299 case 0x14: /* BAR1 */ 300 if (offset == 0x10) 301 bar_index = 0; 302 else if (offset == 0x14) 303 bar_index = 1; 304 305 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) { 306 STORE_LE32(&mdev_state->vconfig[offset], 0); 307 break; 308 } 309 310 cfg_addr = *(u32 *)buf; 311 pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr); 312 313 if (cfg_addr == 0xffffffff) { 314 bar_mask = mdev_state->bar_mask[bar_index]; 315 cfg_addr = (cfg_addr & bar_mask); 316 } 317 318 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul); 319 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); 320 break; 321 case 0x18: /* BAR2 */ 322 case 0x1c: /* BAR3 */ 323 case 0x20: /* BAR4 */ 324 STORE_LE32(&mdev_state->vconfig[offset], 0); 325 break; 326 default: 327 pr_info("PCI config write @0x%x of %d bytes not handled\n", 328 offset, count); 329 break; 330 } 331 } 332 333 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state, 334 u16 offset, u8 *buf, u32 count) 335 { 336 u8 data = *buf; 337 338 /* Handle data written by guest */ 339 switch (offset) { 340 case UART_TX: 341 /* if DLAB set, data is LSB of divisor */ 342 if (mdev_state->s[index].dlab) { 343 mdev_state->s[index].divisor |= data; 344 break; 345 } 346 347 mutex_lock(&mdev_state->rxtx_lock); 348 349 /* save in TX buffer */ 350 if (mdev_state->s[index].rxtx.count < 351 mdev_state->s[index].max_fifo_size) { 352 mdev_state->s[index].rxtx.fifo[ 353 mdev_state->s[index].rxtx.head] = data; 354 mdev_state->s[index].rxtx.count++; 355 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head); 356 mdev_state->s[index].overrun = false; 357 358 /* 359 * Trigger interrupt if receive data interrupt is 360 * enabled and fifo reached trigger level 361 */ 362 if ((mdev_state->s[index].uart_reg[UART_IER] & 363 UART_IER_RDI) && 364 (mdev_state->s[index].rxtx.count == 365 mdev_state->s[index].intr_trigger_level)) { 366 /* trigger interrupt */ 367 #if defined(DEBUG_INTR) 368 pr_err("Serial port %d: Fifo level trigger\n", 369 index); 370 #endif 371 mtty_trigger_interrupt(mdev_state); 372 } 373 } else { 374 #if defined(DEBUG_INTR) 375 pr_err("Serial port %d: Buffer Overflow\n", index); 376 #endif 377 mdev_state->s[index].overrun = true; 378 379 /* 380 * Trigger interrupt if receiver line status interrupt 381 * is enabled 382 */ 383 if (mdev_state->s[index].uart_reg[UART_IER] & 384 UART_IER_RLSI) 385 mtty_trigger_interrupt(mdev_state); 386 } 387 mutex_unlock(&mdev_state->rxtx_lock); 388 break; 389 390 case UART_IER: 391 /* if DLAB set, data is MSB of divisor */ 392 if (mdev_state->s[index].dlab) 393 mdev_state->s[index].divisor |= (u16)data << 8; 394 else { 395 mdev_state->s[index].uart_reg[offset] = data; 396 mutex_lock(&mdev_state->rxtx_lock); 397 if ((data & UART_IER_THRI) && 398 (mdev_state->s[index].rxtx.head == 399 mdev_state->s[index].rxtx.tail)) { 400 #if defined(DEBUG_INTR) 401 pr_err("Serial port %d: IER_THRI write\n", 402 index); 403 #endif 404 mtty_trigger_interrupt(mdev_state); 405 } 406 407 mutex_unlock(&mdev_state->rxtx_lock); 408 } 409 410 break; 411 412 case UART_FCR: 413 mdev_state->s[index].fcr = data; 414 415 mutex_lock(&mdev_state->rxtx_lock); 416 if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) { 417 /* clear loop back FIFO */ 418 mdev_state->s[index].rxtx.count = 0; 419 mdev_state->s[index].rxtx.head = 0; 420 mdev_state->s[index].rxtx.tail = 0; 421 } 422 mutex_unlock(&mdev_state->rxtx_lock); 423 424 switch (data & UART_FCR_TRIGGER_MASK) { 425 case UART_FCR_TRIGGER_1: 426 mdev_state->s[index].intr_trigger_level = 1; 427 break; 428 429 case UART_FCR_TRIGGER_4: 430 mdev_state->s[index].intr_trigger_level = 4; 431 break; 432 433 case UART_FCR_TRIGGER_8: 434 mdev_state->s[index].intr_trigger_level = 8; 435 break; 436 437 case UART_FCR_TRIGGER_14: 438 mdev_state->s[index].intr_trigger_level = 14; 439 break; 440 } 441 442 /* 443 * Set trigger level to 1 otherwise or implement timer with 444 * timeout of 4 characters and on expiring that timer set 445 * Recevice data timeout in IIR register 446 */ 447 mdev_state->s[index].intr_trigger_level = 1; 448 if (data & UART_FCR_ENABLE_FIFO) 449 mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE; 450 else { 451 mdev_state->s[index].max_fifo_size = 1; 452 mdev_state->s[index].intr_trigger_level = 1; 453 } 454 455 break; 456 457 case UART_LCR: 458 if (data & UART_LCR_DLAB) { 459 mdev_state->s[index].dlab = true; 460 mdev_state->s[index].divisor = 0; 461 } else 462 mdev_state->s[index].dlab = false; 463 464 mdev_state->s[index].uart_reg[offset] = data; 465 break; 466 467 case UART_MCR: 468 mdev_state->s[index].uart_reg[offset] = data; 469 470 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) && 471 (data & UART_MCR_OUT2)) { 472 #if defined(DEBUG_INTR) 473 pr_err("Serial port %d: MCR_OUT2 write\n", index); 474 #endif 475 mtty_trigger_interrupt(mdev_state); 476 } 477 478 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) && 479 (data & (UART_MCR_RTS | UART_MCR_DTR))) { 480 #if defined(DEBUG_INTR) 481 pr_err("Serial port %d: MCR RTS/DTR write\n", index); 482 #endif 483 mtty_trigger_interrupt(mdev_state); 484 } 485 break; 486 487 case UART_LSR: 488 case UART_MSR: 489 /* do nothing */ 490 break; 491 492 case UART_SCR: 493 mdev_state->s[index].uart_reg[offset] = data; 494 break; 495 496 default: 497 break; 498 } 499 } 500 501 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state, 502 u16 offset, u8 *buf, u32 count) 503 { 504 /* Handle read requests by guest */ 505 switch (offset) { 506 case UART_RX: 507 /* if DLAB set, data is LSB of divisor */ 508 if (mdev_state->s[index].dlab) { 509 *buf = (u8)mdev_state->s[index].divisor; 510 break; 511 } 512 513 mutex_lock(&mdev_state->rxtx_lock); 514 /* return data in tx buffer */ 515 if (mdev_state->s[index].rxtx.head != 516 mdev_state->s[index].rxtx.tail) { 517 *buf = mdev_state->s[index].rxtx.fifo[ 518 mdev_state->s[index].rxtx.tail]; 519 mdev_state->s[index].rxtx.count--; 520 CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail); 521 } 522 523 if (mdev_state->s[index].rxtx.head == 524 mdev_state->s[index].rxtx.tail) { 525 /* 526 * Trigger interrupt if tx buffer empty interrupt is 527 * enabled and fifo is empty 528 */ 529 #if defined(DEBUG_INTR) 530 pr_err("Serial port %d: Buffer Empty\n", index); 531 #endif 532 if (mdev_state->s[index].uart_reg[UART_IER] & 533 UART_IER_THRI) 534 mtty_trigger_interrupt(mdev_state); 535 } 536 mutex_unlock(&mdev_state->rxtx_lock); 537 538 break; 539 540 case UART_IER: 541 if (mdev_state->s[index].dlab) { 542 *buf = (u8)(mdev_state->s[index].divisor >> 8); 543 break; 544 } 545 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f; 546 break; 547 548 case UART_IIR: 549 { 550 u8 ier = mdev_state->s[index].uart_reg[UART_IER]; 551 *buf = 0; 552 553 mutex_lock(&mdev_state->rxtx_lock); 554 /* Interrupt priority 1: Parity, overrun, framing or break */ 555 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun) 556 *buf |= UART_IIR_RLSI; 557 558 /* Interrupt priority 2: Fifo trigger level reached */ 559 if ((ier & UART_IER_RDI) && 560 (mdev_state->s[index].rxtx.count >= 561 mdev_state->s[index].intr_trigger_level)) 562 *buf |= UART_IIR_RDI; 563 564 /* Interrupt priotiry 3: transmitter holding register empty */ 565 if ((ier & UART_IER_THRI) && 566 (mdev_state->s[index].rxtx.head == 567 mdev_state->s[index].rxtx.tail)) 568 *buf |= UART_IIR_THRI; 569 570 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD */ 571 if ((ier & UART_IER_MSI) && 572 (mdev_state->s[index].uart_reg[UART_MCR] & 573 (UART_MCR_RTS | UART_MCR_DTR))) 574 *buf |= UART_IIR_MSI; 575 576 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */ 577 if (*buf == 0) 578 *buf = UART_IIR_NO_INT; 579 580 /* set bit 6 & 7 to be 16550 compatible */ 581 *buf |= 0xC0; 582 mutex_unlock(&mdev_state->rxtx_lock); 583 } 584 break; 585 586 case UART_LCR: 587 case UART_MCR: 588 *buf = mdev_state->s[index].uart_reg[offset]; 589 break; 590 591 case UART_LSR: 592 { 593 u8 lsr = 0; 594 595 mutex_lock(&mdev_state->rxtx_lock); 596 /* atleast one char in FIFO */ 597 if (mdev_state->s[index].rxtx.head != 598 mdev_state->s[index].rxtx.tail) 599 lsr |= UART_LSR_DR; 600 601 /* if FIFO overrun */ 602 if (mdev_state->s[index].overrun) 603 lsr |= UART_LSR_OE; 604 605 /* transmit FIFO empty and tramsitter empty */ 606 if (mdev_state->s[index].rxtx.head == 607 mdev_state->s[index].rxtx.tail) 608 lsr |= UART_LSR_TEMT | UART_LSR_THRE; 609 610 mutex_unlock(&mdev_state->rxtx_lock); 611 *buf = lsr; 612 break; 613 } 614 case UART_MSR: 615 *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD; 616 617 mutex_lock(&mdev_state->rxtx_lock); 618 /* if AFE is 1 and FIFO have space, set CTS bit */ 619 if (mdev_state->s[index].uart_reg[UART_MCR] & 620 UART_MCR_AFE) { 621 if (mdev_state->s[index].rxtx.count < 622 mdev_state->s[index].max_fifo_size) 623 *buf |= UART_MSR_CTS | UART_MSR_DCTS; 624 } else 625 *buf |= UART_MSR_CTS | UART_MSR_DCTS; 626 mutex_unlock(&mdev_state->rxtx_lock); 627 628 break; 629 630 case UART_SCR: 631 *buf = mdev_state->s[index].uart_reg[offset]; 632 break; 633 634 default: 635 break; 636 } 637 } 638 639 static void mdev_read_base(struct mdev_state *mdev_state) 640 { 641 int index, pos; 642 u32 start_lo, start_hi; 643 u32 mem_type; 644 645 pos = PCI_BASE_ADDRESS_0; 646 647 for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) { 648 649 if (!mdev_state->region_info[index].size) 650 continue; 651 652 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) & 653 PCI_BASE_ADDRESS_MEM_MASK; 654 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) & 655 PCI_BASE_ADDRESS_MEM_TYPE_MASK; 656 657 switch (mem_type) { 658 case PCI_BASE_ADDRESS_MEM_TYPE_64: 659 start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4)); 660 pos += 4; 661 break; 662 case PCI_BASE_ADDRESS_MEM_TYPE_32: 663 case PCI_BASE_ADDRESS_MEM_TYPE_1M: 664 /* 1M mem BAR treated as 32-bit BAR */ 665 default: 666 /* mem unknown type treated as 32-bit BAR */ 667 start_hi = 0; 668 break; 669 } 670 pos += 4; 671 mdev_state->region_info[index].start = ((u64)start_hi << 32) | 672 start_lo; 673 } 674 } 675 676 static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count, 677 loff_t pos, bool is_write) 678 { 679 unsigned int index; 680 loff_t offset; 681 int ret = 0; 682 683 if (!buf) 684 return -EINVAL; 685 686 mutex_lock(&mdev_state->ops_lock); 687 688 index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos); 689 offset = pos & MTTY_VFIO_PCI_OFFSET_MASK; 690 switch (index) { 691 case VFIO_PCI_CONFIG_REGION_INDEX: 692 693 #if defined(DEBUG) 694 pr_info("%s: PCI config space %s at offset 0x%llx\n", 695 __func__, is_write ? "write" : "read", offset); 696 #endif 697 if (is_write) { 698 dump_buffer(buf, count); 699 handle_pci_cfg_write(mdev_state, offset, buf, count); 700 } else { 701 memcpy(buf, (mdev_state->vconfig + offset), count); 702 dump_buffer(buf, count); 703 } 704 705 break; 706 707 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: 708 if (!mdev_state->region_info[index].start) 709 mdev_read_base(mdev_state); 710 711 if (is_write) { 712 dump_buffer(buf, count); 713 714 #if defined(DEBUG_REGS) 715 pr_info("%s: BAR%d WR @0x%llx %s val:0x%02x dlab:%d\n", 716 __func__, index, offset, wr_reg[offset], 717 *buf, mdev_state->s[index].dlab); 718 #endif 719 handle_bar_write(index, mdev_state, offset, buf, count); 720 } else { 721 handle_bar_read(index, mdev_state, offset, buf, count); 722 dump_buffer(buf, count); 723 724 #if defined(DEBUG_REGS) 725 pr_info("%s: BAR%d RD @0x%llx %s val:0x%02x dlab:%d\n", 726 __func__, index, offset, rd_reg[offset], 727 *buf, mdev_state->s[index].dlab); 728 #endif 729 } 730 break; 731 732 default: 733 ret = -1; 734 goto accessfailed; 735 } 736 737 ret = count; 738 739 740 accessfailed: 741 mutex_unlock(&mdev_state->ops_lock); 742 743 return ret; 744 } 745 746 static int mtty_init_dev(struct vfio_device *vdev) 747 { 748 struct mdev_state *mdev_state = 749 container_of(vdev, struct mdev_state, vdev); 750 struct mdev_device *mdev = to_mdev_device(vdev->dev); 751 struct mtty_type *type = 752 container_of(mdev->type, struct mtty_type, type); 753 int avail_ports = atomic_read(&mdev_avail_ports); 754 int ret; 755 756 do { 757 if (avail_ports < type->nr_ports) 758 return -ENOSPC; 759 } while (!atomic_try_cmpxchg(&mdev_avail_ports, 760 &avail_ports, 761 avail_ports - type->nr_ports)); 762 763 mdev_state->nr_ports = type->nr_ports; 764 mdev_state->irq_index = -1; 765 mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE; 766 mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE; 767 mutex_init(&mdev_state->rxtx_lock); 768 769 mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL); 770 if (!mdev_state->vconfig) { 771 ret = -ENOMEM; 772 goto err_nr_ports; 773 } 774 775 mutex_init(&mdev_state->ops_lock); 776 mdev_state->mdev = mdev; 777 mtty_create_config_space(mdev_state); 778 return 0; 779 780 err_nr_ports: 781 atomic_add(type->nr_ports, &mdev_avail_ports); 782 return ret; 783 } 784 785 static int mtty_probe(struct mdev_device *mdev) 786 { 787 struct mdev_state *mdev_state; 788 int ret; 789 790 mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, 791 &mtty_dev_ops); 792 if (IS_ERR(mdev_state)) 793 return PTR_ERR(mdev_state); 794 795 ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); 796 if (ret) 797 goto err_put_vdev; 798 dev_set_drvdata(&mdev->dev, mdev_state); 799 return 0; 800 801 err_put_vdev: 802 vfio_put_device(&mdev_state->vdev); 803 return ret; 804 } 805 806 static void mtty_release_dev(struct vfio_device *vdev) 807 { 808 struct mdev_state *mdev_state = 809 container_of(vdev, struct mdev_state, vdev); 810 811 atomic_add(mdev_state->nr_ports, &mdev_avail_ports); 812 kfree(mdev_state->vconfig); 813 } 814 815 static void mtty_remove(struct mdev_device *mdev) 816 { 817 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); 818 819 vfio_unregister_group_dev(&mdev_state->vdev); 820 vfio_put_device(&mdev_state->vdev); 821 } 822 823 static int mtty_reset(struct mdev_state *mdev_state) 824 { 825 pr_info("%s: called\n", __func__); 826 827 return 0; 828 } 829 830 static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf, 831 size_t count, loff_t *ppos) 832 { 833 struct mdev_state *mdev_state = 834 container_of(vdev, struct mdev_state, vdev); 835 unsigned int done = 0; 836 int ret; 837 838 while (count) { 839 size_t filled; 840 841 if (count >= 4 && !(*ppos % 4)) { 842 u32 val; 843 844 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 845 *ppos, false); 846 if (ret <= 0) 847 goto read_err; 848 849 if (copy_to_user(buf, &val, sizeof(val))) 850 goto read_err; 851 852 filled = 4; 853 } else if (count >= 2 && !(*ppos % 2)) { 854 u16 val; 855 856 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 857 *ppos, false); 858 if (ret <= 0) 859 goto read_err; 860 861 if (copy_to_user(buf, &val, sizeof(val))) 862 goto read_err; 863 864 filled = 2; 865 } else { 866 u8 val; 867 868 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 869 *ppos, false); 870 if (ret <= 0) 871 goto read_err; 872 873 if (copy_to_user(buf, &val, sizeof(val))) 874 goto read_err; 875 876 filled = 1; 877 } 878 879 count -= filled; 880 done += filled; 881 *ppos += filled; 882 buf += filled; 883 } 884 885 return done; 886 887 read_err: 888 return -EFAULT; 889 } 890 891 static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf, 892 size_t count, loff_t *ppos) 893 { 894 struct mdev_state *mdev_state = 895 container_of(vdev, struct mdev_state, vdev); 896 unsigned int done = 0; 897 int ret; 898 899 while (count) { 900 size_t filled; 901 902 if (count >= 4 && !(*ppos % 4)) { 903 u32 val; 904 905 if (copy_from_user(&val, buf, sizeof(val))) 906 goto write_err; 907 908 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 909 *ppos, true); 910 if (ret <= 0) 911 goto write_err; 912 913 filled = 4; 914 } else if (count >= 2 && !(*ppos % 2)) { 915 u16 val; 916 917 if (copy_from_user(&val, buf, sizeof(val))) 918 goto write_err; 919 920 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 921 *ppos, true); 922 if (ret <= 0) 923 goto write_err; 924 925 filled = 2; 926 } else { 927 u8 val; 928 929 if (copy_from_user(&val, buf, sizeof(val))) 930 goto write_err; 931 932 ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val), 933 *ppos, true); 934 if (ret <= 0) 935 goto write_err; 936 937 filled = 1; 938 } 939 count -= filled; 940 done += filled; 941 *ppos += filled; 942 buf += filled; 943 } 944 945 return done; 946 write_err: 947 return -EFAULT; 948 } 949 950 static void mtty_disable_intx(struct mdev_state *mdev_state) 951 { 952 if (mdev_state->intx_evtfd) { 953 eventfd_ctx_put(mdev_state->intx_evtfd); 954 mdev_state->intx_evtfd = NULL; 955 mdev_state->intx_mask = false; 956 mdev_state->irq_index = -1; 957 } 958 } 959 960 static void mtty_disable_msi(struct mdev_state *mdev_state) 961 { 962 if (mdev_state->msi_evtfd) { 963 eventfd_ctx_put(mdev_state->msi_evtfd); 964 mdev_state->msi_evtfd = NULL; 965 mdev_state->irq_index = -1; 966 } 967 } 968 969 static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags, 970 unsigned int index, unsigned int start, 971 unsigned int count, void *data) 972 { 973 int ret = 0; 974 975 mutex_lock(&mdev_state->ops_lock); 976 switch (index) { 977 case VFIO_PCI_INTX_IRQ_INDEX: 978 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 979 case VFIO_IRQ_SET_ACTION_MASK: 980 if (!is_intx(mdev_state) || start != 0 || count != 1) { 981 ret = -EINVAL; 982 break; 983 } 984 985 if (flags & VFIO_IRQ_SET_DATA_NONE) { 986 mdev_state->intx_mask = true; 987 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 988 uint8_t mask = *(uint8_t *)data; 989 990 if (mask) 991 mdev_state->intx_mask = true; 992 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 993 ret = -ENOTTY; /* No support for mask fd */ 994 } 995 break; 996 case VFIO_IRQ_SET_ACTION_UNMASK: 997 if (!is_intx(mdev_state) || start != 0 || count != 1) { 998 ret = -EINVAL; 999 break; 1000 } 1001 1002 if (flags & VFIO_IRQ_SET_DATA_NONE) { 1003 mdev_state->intx_mask = false; 1004 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 1005 uint8_t mask = *(uint8_t *)data; 1006 1007 if (mask) 1008 mdev_state->intx_mask = false; 1009 } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 1010 ret = -ENOTTY; /* No support for unmask fd */ 1011 } 1012 break; 1013 case VFIO_IRQ_SET_ACTION_TRIGGER: 1014 if (is_intx(mdev_state) && !count && 1015 (flags & VFIO_IRQ_SET_DATA_NONE)) { 1016 mtty_disable_intx(mdev_state); 1017 break; 1018 } 1019 1020 if (!(is_intx(mdev_state) || is_noirq(mdev_state)) || 1021 start != 0 || count != 1) { 1022 ret = -EINVAL; 1023 break; 1024 } 1025 1026 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 1027 int fd = *(int *)data; 1028 struct eventfd_ctx *evt; 1029 1030 mtty_disable_intx(mdev_state); 1031 1032 if (fd < 0) 1033 break; 1034 1035 evt = eventfd_ctx_fdget(fd); 1036 if (IS_ERR(evt)) { 1037 ret = PTR_ERR(evt); 1038 break; 1039 } 1040 mdev_state->intx_evtfd = evt; 1041 mdev_state->irq_index = index; 1042 break; 1043 } 1044 1045 if (!is_intx(mdev_state)) { 1046 ret = -EINVAL; 1047 break; 1048 } 1049 1050 if (flags & VFIO_IRQ_SET_DATA_NONE) { 1051 mtty_trigger_interrupt(mdev_state); 1052 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 1053 uint8_t trigger = *(uint8_t *)data; 1054 1055 if (trigger) 1056 mtty_trigger_interrupt(mdev_state); 1057 } 1058 break; 1059 } 1060 break; 1061 case VFIO_PCI_MSI_IRQ_INDEX: 1062 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { 1063 case VFIO_IRQ_SET_ACTION_MASK: 1064 case VFIO_IRQ_SET_ACTION_UNMASK: 1065 ret = -ENOTTY; 1066 break; 1067 case VFIO_IRQ_SET_ACTION_TRIGGER: 1068 if (is_msi(mdev_state) && !count && 1069 (flags & VFIO_IRQ_SET_DATA_NONE)) { 1070 mtty_disable_msi(mdev_state); 1071 break; 1072 } 1073 1074 if (!(is_msi(mdev_state) || is_noirq(mdev_state)) || 1075 start != 0 || count != 1) { 1076 ret = -EINVAL; 1077 break; 1078 } 1079 1080 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { 1081 int fd = *(int *)data; 1082 struct eventfd_ctx *evt; 1083 1084 mtty_disable_msi(mdev_state); 1085 1086 if (fd < 0) 1087 break; 1088 1089 evt = eventfd_ctx_fdget(fd); 1090 if (IS_ERR(evt)) { 1091 ret = PTR_ERR(evt); 1092 break; 1093 } 1094 mdev_state->msi_evtfd = evt; 1095 mdev_state->irq_index = index; 1096 break; 1097 } 1098 1099 if (!is_msi(mdev_state)) { 1100 ret = -EINVAL; 1101 break; 1102 } 1103 1104 if (flags & VFIO_IRQ_SET_DATA_NONE) { 1105 mtty_trigger_interrupt(mdev_state); 1106 } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { 1107 uint8_t trigger = *(uint8_t *)data; 1108 1109 if (trigger) 1110 mtty_trigger_interrupt(mdev_state); 1111 } 1112 break; 1113 } 1114 break; 1115 case VFIO_PCI_MSIX_IRQ_INDEX: 1116 dev_dbg(mdev_state->vdev.dev, "%s: MSIX_IRQ\n", __func__); 1117 ret = -ENOTTY; 1118 break; 1119 case VFIO_PCI_ERR_IRQ_INDEX: 1120 dev_dbg(mdev_state->vdev.dev, "%s: ERR_IRQ\n", __func__); 1121 ret = -ENOTTY; 1122 break; 1123 case VFIO_PCI_REQ_IRQ_INDEX: 1124 dev_dbg(mdev_state->vdev.dev, "%s: REQ_IRQ\n", __func__); 1125 ret = -ENOTTY; 1126 break; 1127 } 1128 1129 mutex_unlock(&mdev_state->ops_lock); 1130 return ret; 1131 } 1132 1133 static int mtty_get_region_info(struct mdev_state *mdev_state, 1134 struct vfio_region_info *region_info, 1135 u16 *cap_type_id, void **cap_type) 1136 { 1137 unsigned int size = 0; 1138 u32 bar_index; 1139 1140 bar_index = region_info->index; 1141 if (bar_index >= VFIO_PCI_NUM_REGIONS) 1142 return -EINVAL; 1143 1144 mutex_lock(&mdev_state->ops_lock); 1145 1146 switch (bar_index) { 1147 case VFIO_PCI_CONFIG_REGION_INDEX: 1148 size = MTTY_CONFIG_SPACE_SIZE; 1149 break; 1150 case VFIO_PCI_BAR0_REGION_INDEX: 1151 size = MTTY_IO_BAR_SIZE; 1152 break; 1153 case VFIO_PCI_BAR1_REGION_INDEX: 1154 if (mdev_state->nr_ports == 2) 1155 size = MTTY_IO_BAR_SIZE; 1156 break; 1157 default: 1158 size = 0; 1159 break; 1160 } 1161 1162 mdev_state->region_info[bar_index].size = size; 1163 mdev_state->region_info[bar_index].vfio_offset = 1164 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index); 1165 1166 region_info->size = size; 1167 region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index); 1168 region_info->flags = VFIO_REGION_INFO_FLAG_READ | 1169 VFIO_REGION_INFO_FLAG_WRITE; 1170 mutex_unlock(&mdev_state->ops_lock); 1171 return 0; 1172 } 1173 1174 static int mtty_get_irq_info(struct vfio_irq_info *irq_info) 1175 { 1176 if (irq_info->index != VFIO_PCI_INTX_IRQ_INDEX && 1177 irq_info->index != VFIO_PCI_MSI_IRQ_INDEX) 1178 return -EINVAL; 1179 1180 irq_info->flags = VFIO_IRQ_INFO_EVENTFD; 1181 irq_info->count = 1; 1182 1183 if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX) 1184 irq_info->flags |= VFIO_IRQ_INFO_MASKABLE | 1185 VFIO_IRQ_INFO_AUTOMASKED; 1186 else 1187 irq_info->flags |= VFIO_IRQ_INFO_NORESIZE; 1188 1189 return 0; 1190 } 1191 1192 static int mtty_get_device_info(struct vfio_device_info *dev_info) 1193 { 1194 dev_info->flags = VFIO_DEVICE_FLAGS_PCI; 1195 dev_info->num_regions = VFIO_PCI_NUM_REGIONS; 1196 dev_info->num_irqs = VFIO_PCI_NUM_IRQS; 1197 1198 return 0; 1199 } 1200 1201 static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd, 1202 unsigned long arg) 1203 { 1204 struct mdev_state *mdev_state = 1205 container_of(vdev, struct mdev_state, vdev); 1206 int ret = 0; 1207 unsigned long minsz; 1208 1209 switch (cmd) { 1210 case VFIO_DEVICE_GET_INFO: 1211 { 1212 struct vfio_device_info info; 1213 1214 minsz = offsetofend(struct vfio_device_info, num_irqs); 1215 1216 if (copy_from_user(&info, (void __user *)arg, minsz)) 1217 return -EFAULT; 1218 1219 if (info.argsz < minsz) 1220 return -EINVAL; 1221 1222 ret = mtty_get_device_info(&info); 1223 if (ret) 1224 return ret; 1225 1226 memcpy(&mdev_state->dev_info, &info, sizeof(info)); 1227 1228 if (copy_to_user((void __user *)arg, &info, minsz)) 1229 return -EFAULT; 1230 1231 return 0; 1232 } 1233 case VFIO_DEVICE_GET_REGION_INFO: 1234 { 1235 struct vfio_region_info info; 1236 u16 cap_type_id = 0; 1237 void *cap_type = NULL; 1238 1239 minsz = offsetofend(struct vfio_region_info, offset); 1240 1241 if (copy_from_user(&info, (void __user *)arg, minsz)) 1242 return -EFAULT; 1243 1244 if (info.argsz < minsz) 1245 return -EINVAL; 1246 1247 ret = mtty_get_region_info(mdev_state, &info, &cap_type_id, 1248 &cap_type); 1249 if (ret) 1250 return ret; 1251 1252 if (copy_to_user((void __user *)arg, &info, minsz)) 1253 return -EFAULT; 1254 1255 return 0; 1256 } 1257 1258 case VFIO_DEVICE_GET_IRQ_INFO: 1259 { 1260 struct vfio_irq_info info; 1261 1262 minsz = offsetofend(struct vfio_irq_info, count); 1263 1264 if (copy_from_user(&info, (void __user *)arg, minsz)) 1265 return -EFAULT; 1266 1267 if ((info.argsz < minsz) || 1268 (info.index >= mdev_state->dev_info.num_irqs)) 1269 return -EINVAL; 1270 1271 ret = mtty_get_irq_info(&info); 1272 if (ret) 1273 return ret; 1274 1275 if (copy_to_user((void __user *)arg, &info, minsz)) 1276 return -EFAULT; 1277 1278 return 0; 1279 } 1280 case VFIO_DEVICE_SET_IRQS: 1281 { 1282 struct vfio_irq_set hdr; 1283 u8 *data = NULL, *ptr = NULL; 1284 size_t data_size = 0; 1285 1286 minsz = offsetofend(struct vfio_irq_set, count); 1287 1288 if (copy_from_user(&hdr, (void __user *)arg, minsz)) 1289 return -EFAULT; 1290 1291 ret = vfio_set_irqs_validate_and_prepare(&hdr, 1292 mdev_state->dev_info.num_irqs, 1293 VFIO_PCI_NUM_IRQS, 1294 &data_size); 1295 if (ret) 1296 return ret; 1297 1298 if (data_size) { 1299 ptr = data = memdup_user((void __user *)(arg + minsz), 1300 data_size); 1301 if (IS_ERR(data)) 1302 return PTR_ERR(data); 1303 } 1304 1305 ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start, 1306 hdr.count, data); 1307 1308 kfree(ptr); 1309 return ret; 1310 } 1311 case VFIO_DEVICE_RESET: 1312 return mtty_reset(mdev_state); 1313 } 1314 return -ENOTTY; 1315 } 1316 1317 static ssize_t 1318 sample_mdev_dev_show(struct device *dev, struct device_attribute *attr, 1319 char *buf) 1320 { 1321 return sprintf(buf, "This is MDEV %s\n", dev_name(dev)); 1322 } 1323 1324 static DEVICE_ATTR_RO(sample_mdev_dev); 1325 1326 static struct attribute *mdev_dev_attrs[] = { 1327 &dev_attr_sample_mdev_dev.attr, 1328 NULL, 1329 }; 1330 1331 static const struct attribute_group mdev_dev_group = { 1332 .name = "vendor", 1333 .attrs = mdev_dev_attrs, 1334 }; 1335 1336 static const struct attribute_group *mdev_dev_groups[] = { 1337 &mdev_dev_group, 1338 NULL, 1339 }; 1340 1341 static unsigned int mtty_get_available(struct mdev_type *mtype) 1342 { 1343 struct mtty_type *type = container_of(mtype, struct mtty_type, type); 1344 1345 return atomic_read(&mdev_avail_ports) / type->nr_ports; 1346 } 1347 1348 static void mtty_close(struct vfio_device *vdev) 1349 { 1350 struct mdev_state *mdev_state = 1351 container_of(vdev, struct mdev_state, vdev); 1352 1353 mtty_disable_intx(mdev_state); 1354 mtty_disable_msi(mdev_state); 1355 } 1356 1357 static const struct vfio_device_ops mtty_dev_ops = { 1358 .name = "vfio-mtty", 1359 .init = mtty_init_dev, 1360 .release = mtty_release_dev, 1361 .read = mtty_read, 1362 .write = mtty_write, 1363 .ioctl = mtty_ioctl, 1364 .bind_iommufd = vfio_iommufd_emulated_bind, 1365 .unbind_iommufd = vfio_iommufd_emulated_unbind, 1366 .attach_ioas = vfio_iommufd_emulated_attach_ioas, 1367 .detach_ioas = vfio_iommufd_emulated_detach_ioas, 1368 .close_device = mtty_close, 1369 }; 1370 1371 static struct mdev_driver mtty_driver = { 1372 .device_api = VFIO_DEVICE_API_PCI_STRING, 1373 .driver = { 1374 .name = "mtty", 1375 .owner = THIS_MODULE, 1376 .mod_name = KBUILD_MODNAME, 1377 .dev_groups = mdev_dev_groups, 1378 }, 1379 .probe = mtty_probe, 1380 .remove = mtty_remove, 1381 .get_available = mtty_get_available, 1382 }; 1383 1384 static void mtty_device_release(struct device *dev) 1385 { 1386 dev_dbg(dev, "mtty: released\n"); 1387 } 1388 1389 static int __init mtty_dev_init(void) 1390 { 1391 int ret = 0; 1392 1393 pr_info("mtty_dev: %s\n", __func__); 1394 1395 memset(&mtty_dev, 0, sizeof(mtty_dev)); 1396 1397 idr_init(&mtty_dev.vd_idr); 1398 1399 ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1, 1400 MTTY_NAME); 1401 1402 if (ret < 0) { 1403 pr_err("Error: failed to register mtty_dev, err:%d\n", ret); 1404 return ret; 1405 } 1406 1407 cdev_init(&mtty_dev.vd_cdev, &vd_fops); 1408 cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1); 1409 1410 pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt)); 1411 1412 ret = mdev_register_driver(&mtty_driver); 1413 if (ret) 1414 goto err_cdev; 1415 1416 mtty_dev.vd_class = class_create(MTTY_CLASS_NAME); 1417 1418 if (IS_ERR(mtty_dev.vd_class)) { 1419 pr_err("Error: failed to register mtty_dev class\n"); 1420 ret = PTR_ERR(mtty_dev.vd_class); 1421 goto err_driver; 1422 } 1423 1424 mtty_dev.dev.class = mtty_dev.vd_class; 1425 mtty_dev.dev.release = mtty_device_release; 1426 dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME); 1427 1428 ret = device_register(&mtty_dev.dev); 1429 if (ret) 1430 goto err_put; 1431 1432 ret = mdev_register_parent(&mtty_dev.parent, &mtty_dev.dev, 1433 &mtty_driver, mtty_mdev_types, 1434 ARRAY_SIZE(mtty_mdev_types)); 1435 if (ret) 1436 goto err_device; 1437 return 0; 1438 1439 err_device: 1440 device_del(&mtty_dev.dev); 1441 err_put: 1442 put_device(&mtty_dev.dev); 1443 class_destroy(mtty_dev.vd_class); 1444 err_driver: 1445 mdev_unregister_driver(&mtty_driver); 1446 err_cdev: 1447 cdev_del(&mtty_dev.vd_cdev); 1448 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1); 1449 return ret; 1450 } 1451 1452 static void __exit mtty_dev_exit(void) 1453 { 1454 mtty_dev.dev.bus = NULL; 1455 mdev_unregister_parent(&mtty_dev.parent); 1456 1457 device_unregister(&mtty_dev.dev); 1458 idr_destroy(&mtty_dev.vd_idr); 1459 mdev_unregister_driver(&mtty_driver); 1460 cdev_del(&mtty_dev.vd_cdev); 1461 unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1); 1462 class_destroy(mtty_dev.vd_class); 1463 mtty_dev.vd_class = NULL; 1464 pr_info("mtty_dev: Unloaded!\n"); 1465 } 1466 1467 module_init(mtty_dev_init) 1468 module_exit(mtty_dev_exit) 1469 1470 MODULE_LICENSE("GPL v2"); 1471 MODULE_INFO(supported, "Test driver that simulate serial port over PCI"); 1472 MODULE_VERSION(VERSION_STRING); 1473 MODULE_AUTHOR(DRIVER_AUTHOR); 1474