1 /** 2 * IBM Accelerator Family 'GenWQE' 3 * 4 * (C) Copyright IBM Corp. 2013 5 * 6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> 7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> 8 * Author: Michael Jung <mijung@gmx.net> 9 * Author: Michael Ruettger <michael@ibmra.de> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License (version 2 only) 13 * as published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 */ 20 21 /* 22 * Character device representation of the GenWQE device. This allows 23 * user-space applications to communicate with the card. 24 */ 25 26 #include <linux/kernel.h> 27 #include <linux/types.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/string.h> 31 #include <linux/fs.h> 32 #include <linux/sched/signal.h> 33 #include <linux/wait.h> 34 #include <linux/delay.h> 35 #include <linux/atomic.h> 36 37 #include "card_base.h" 38 #include "card_ddcb.h" 39 40 static int genwqe_open_files(struct genwqe_dev *cd) 41 { 42 int rc; 43 unsigned long flags; 44 45 spin_lock_irqsave(&cd->file_lock, flags); 46 rc = list_empty(&cd->file_list); 47 spin_unlock_irqrestore(&cd->file_lock, flags); 48 return !rc; 49 } 50 51 static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) 52 { 53 unsigned long flags; 54 55 cfile->owner = current; 56 spin_lock_irqsave(&cd->file_lock, flags); 57 list_add(&cfile->list, &cd->file_list); 58 spin_unlock_irqrestore(&cd->file_lock, flags); 59 } 60 61 static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) 62 { 63 unsigned long flags; 64 65 spin_lock_irqsave(&cd->file_lock, flags); 66 list_del(&cfile->list); 67 spin_unlock_irqrestore(&cd->file_lock, flags); 68 69 return 0; 70 } 71 72 static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m) 73 { 74 unsigned long flags; 75 76 spin_lock_irqsave(&cfile->pin_lock, flags); 77 list_add(&m->pin_list, &cfile->pin_list); 78 spin_unlock_irqrestore(&cfile->pin_lock, flags); 79 } 80 81 static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m) 82 { 83 unsigned long flags; 84 85 spin_lock_irqsave(&cfile->pin_lock, flags); 86 list_del(&m->pin_list); 87 spin_unlock_irqrestore(&cfile->pin_lock, flags); 88 89 return 0; 90 } 91 92 /** 93 * genwqe_search_pin() - Search for the mapping for a userspace address 94 * @cfile: Descriptor of opened file 95 * @u_addr: User virtual address 96 * @size: Size of buffer 97 * @dma_addr: DMA address to be updated 98 * 99 * Return: Pointer to the corresponding mapping NULL if not found 100 */ 101 static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile, 102 unsigned long u_addr, 103 unsigned int size, 104 void **virt_addr) 105 { 106 unsigned long flags; 107 struct dma_mapping *m; 108 109 spin_lock_irqsave(&cfile->pin_lock, flags); 110 111 list_for_each_entry(m, &cfile->pin_list, pin_list) { 112 if ((((u64)m->u_vaddr) <= (u_addr)) && 113 (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { 114 115 if (virt_addr) 116 *virt_addr = m->k_vaddr + 117 (u_addr - (u64)m->u_vaddr); 118 119 spin_unlock_irqrestore(&cfile->pin_lock, flags); 120 return m; 121 } 122 } 123 spin_unlock_irqrestore(&cfile->pin_lock, flags); 124 return NULL; 125 } 126 127 static void __genwqe_add_mapping(struct genwqe_file *cfile, 128 struct dma_mapping *dma_map) 129 { 130 unsigned long flags; 131 132 spin_lock_irqsave(&cfile->map_lock, flags); 133 list_add(&dma_map->card_list, &cfile->map_list); 134 spin_unlock_irqrestore(&cfile->map_lock, flags); 135 } 136 137 static void __genwqe_del_mapping(struct genwqe_file *cfile, 138 struct dma_mapping *dma_map) 139 { 140 unsigned long flags; 141 142 spin_lock_irqsave(&cfile->map_lock, flags); 143 list_del(&dma_map->card_list); 144 spin_unlock_irqrestore(&cfile->map_lock, flags); 145 } 146 147 148 /** 149 * __genwqe_search_mapping() - Search for the mapping for a userspace address 150 * @cfile: descriptor of opened file 151 * @u_addr: user virtual address 152 * @size: size of buffer 153 * @dma_addr: DMA address to be updated 154 * Return: Pointer to the corresponding mapping NULL if not found 155 */ 156 static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile, 157 unsigned long u_addr, 158 unsigned int size, 159 dma_addr_t *dma_addr, 160 void **virt_addr) 161 { 162 unsigned long flags; 163 struct dma_mapping *m; 164 struct pci_dev *pci_dev = cfile->cd->pci_dev; 165 166 spin_lock_irqsave(&cfile->map_lock, flags); 167 list_for_each_entry(m, &cfile->map_list, card_list) { 168 169 if ((((u64)m->u_vaddr) <= (u_addr)) && 170 (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { 171 172 /* match found: current is as expected and 173 addr is in range */ 174 if (dma_addr) 175 *dma_addr = m->dma_addr + 176 (u_addr - (u64)m->u_vaddr); 177 178 if (virt_addr) 179 *virt_addr = m->k_vaddr + 180 (u_addr - (u64)m->u_vaddr); 181 182 spin_unlock_irqrestore(&cfile->map_lock, flags); 183 return m; 184 } 185 } 186 spin_unlock_irqrestore(&cfile->map_lock, flags); 187 188 dev_err(&pci_dev->dev, 189 "[%s] Entry not found: u_addr=%lx, size=%x\n", 190 __func__, u_addr, size); 191 192 return NULL; 193 } 194 195 static void genwqe_remove_mappings(struct genwqe_file *cfile) 196 { 197 int i = 0; 198 struct list_head *node, *next; 199 struct dma_mapping *dma_map; 200 struct genwqe_dev *cd = cfile->cd; 201 struct pci_dev *pci_dev = cfile->cd->pci_dev; 202 203 list_for_each_safe(node, next, &cfile->map_list) { 204 dma_map = list_entry(node, struct dma_mapping, card_list); 205 206 list_del_init(&dma_map->card_list); 207 208 /* 209 * This is really a bug, because those things should 210 * have been already tidied up. 211 * 212 * GENWQE_MAPPING_RAW should have been removed via mmunmap(). 213 * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. 214 */ 215 dev_err(&pci_dev->dev, 216 "[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n", 217 __func__, i++, dma_map->u_vaddr, 218 (unsigned long)dma_map->k_vaddr, 219 (unsigned long)dma_map->dma_addr); 220 221 if (dma_map->type == GENWQE_MAPPING_RAW) { 222 /* we allocated this dynamically */ 223 __genwqe_free_consistent(cd, dma_map->size, 224 dma_map->k_vaddr, 225 dma_map->dma_addr); 226 kfree(dma_map); 227 } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) { 228 /* we use dma_map statically from the request */ 229 genwqe_user_vunmap(cd, dma_map); 230 } 231 } 232 } 233 234 static void genwqe_remove_pinnings(struct genwqe_file *cfile) 235 { 236 struct list_head *node, *next; 237 struct dma_mapping *dma_map; 238 struct genwqe_dev *cd = cfile->cd; 239 240 list_for_each_safe(node, next, &cfile->pin_list) { 241 dma_map = list_entry(node, struct dma_mapping, pin_list); 242 243 /* 244 * This is not a bug, because a killed processed might 245 * not call the unpin ioctl, which is supposed to free 246 * the resources. 247 * 248 * Pinnings are dymically allocated and need to be 249 * deleted. 250 */ 251 list_del_init(&dma_map->pin_list); 252 genwqe_user_vunmap(cd, dma_map); 253 kfree(dma_map); 254 } 255 } 256 257 /** 258 * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files 259 * 260 * E.g. genwqe_send_signal(cd, SIGIO); 261 */ 262 static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) 263 { 264 unsigned int files = 0; 265 unsigned long flags; 266 struct genwqe_file *cfile; 267 268 spin_lock_irqsave(&cd->file_lock, flags); 269 list_for_each_entry(cfile, &cd->file_list, list) { 270 if (cfile->async_queue) 271 kill_fasync(&cfile->async_queue, sig, POLL_HUP); 272 files++; 273 } 274 spin_unlock_irqrestore(&cd->file_lock, flags); 275 return files; 276 } 277 278 static int genwqe_force_sig(struct genwqe_dev *cd, int sig) 279 { 280 unsigned int files = 0; 281 unsigned long flags; 282 struct genwqe_file *cfile; 283 284 spin_lock_irqsave(&cd->file_lock, flags); 285 list_for_each_entry(cfile, &cd->file_list, list) { 286 force_sig(sig, cfile->owner); 287 files++; 288 } 289 spin_unlock_irqrestore(&cd->file_lock, flags); 290 return files; 291 } 292 293 /** 294 * genwqe_open() - file open 295 * @inode: file system information 296 * @filp: file handle 297 * 298 * This function is executed whenever an application calls 299 * open("/dev/genwqe",..). 300 * 301 * Return: 0 if successful or <0 if errors 302 */ 303 static int genwqe_open(struct inode *inode, struct file *filp) 304 { 305 struct genwqe_dev *cd; 306 struct genwqe_file *cfile; 307 308 cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); 309 if (cfile == NULL) 310 return -ENOMEM; 311 312 cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); 313 cfile->cd = cd; 314 cfile->filp = filp; 315 cfile->client = NULL; 316 317 spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */ 318 INIT_LIST_HEAD(&cfile->map_list); 319 320 spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */ 321 INIT_LIST_HEAD(&cfile->pin_list); 322 323 filp->private_data = cfile; 324 325 genwqe_add_file(cd, cfile); 326 return 0; 327 } 328 329 /** 330 * genwqe_fasync() - Setup process to receive SIGIO. 331 * @fd: file descriptor 332 * @filp: file handle 333 * @mode: file mode 334 * 335 * Sending a signal is working as following: 336 * 337 * if (cdev->async_queue) 338 * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN); 339 * 340 * Some devices also implement asynchronous notification to indicate 341 * when the device can be written; in this case, of course, 342 * kill_fasync must be called with a mode of POLL_OUT. 343 */ 344 static int genwqe_fasync(int fd, struct file *filp, int mode) 345 { 346 struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; 347 348 return fasync_helper(fd, filp, mode, &cdev->async_queue); 349 } 350 351 352 /** 353 * genwqe_release() - file close 354 * @inode: file system information 355 * @filp: file handle 356 * 357 * This function is executed whenever an application calls 'close(fd_genwqe)' 358 * 359 * Return: always 0 360 */ 361 static int genwqe_release(struct inode *inode, struct file *filp) 362 { 363 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; 364 struct genwqe_dev *cd = cfile->cd; 365 366 /* there must be no entries in these lists! */ 367 genwqe_remove_mappings(cfile); 368 genwqe_remove_pinnings(cfile); 369 370 /* remove this filp from the asynchronously notified filp's */ 371 genwqe_fasync(-1, filp, 0); 372 373 /* 374 * For this to work we must not release cd when this cfile is 375 * not yet released, otherwise the list entry is invalid, 376 * because the list itself gets reinstantiated! 377 */ 378 genwqe_del_file(cd, cfile); 379 kfree(cfile); 380 return 0; 381 } 382 383 static void genwqe_vma_open(struct vm_area_struct *vma) 384 { 385 /* nothing ... */ 386 } 387 388 /** 389 * genwqe_vma_close() - Called each time when vma is unmapped 390 * 391 * Free memory which got allocated by GenWQE mmap(). 392 */ 393 static void genwqe_vma_close(struct vm_area_struct *vma) 394 { 395 unsigned long vsize = vma->vm_end - vma->vm_start; 396 struct inode *inode = file_inode(vma->vm_file); 397 struct dma_mapping *dma_map; 398 struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev, 399 cdev_genwqe); 400 struct pci_dev *pci_dev = cd->pci_dev; 401 dma_addr_t d_addr = 0; 402 struct genwqe_file *cfile = vma->vm_private_data; 403 404 dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize, 405 &d_addr, NULL); 406 if (dma_map == NULL) { 407 dev_err(&pci_dev->dev, 408 " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n", 409 __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, 410 vsize); 411 return; 412 } 413 __genwqe_del_mapping(cfile, dma_map); 414 __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr, 415 dma_map->dma_addr); 416 kfree(dma_map); 417 } 418 419 static const struct vm_operations_struct genwqe_vma_ops = { 420 .open = genwqe_vma_open, 421 .close = genwqe_vma_close, 422 }; 423 424 /** 425 * genwqe_mmap() - Provide contignous buffers to userspace 426 * 427 * We use mmap() to allocate contignous buffers used for DMA 428 * transfers. After the buffer is allocated we remap it to user-space 429 * and remember a reference to our dma_mapping data structure, where 430 * we store the associated DMA address and allocated size. 431 * 432 * When we receive a DDCB execution request with the ATS bits set to 433 * plain buffer, we lookup our dma_mapping list to find the 434 * corresponding DMA address for the associated user-space address. 435 */ 436 static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) 437 { 438 int rc; 439 unsigned long pfn, vsize = vma->vm_end - vma->vm_start; 440 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; 441 struct genwqe_dev *cd = cfile->cd; 442 struct dma_mapping *dma_map; 443 444 if (vsize == 0) 445 return -EINVAL; 446 447 if (get_order(vsize) > MAX_ORDER) 448 return -ENOMEM; 449 450 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL); 451 if (dma_map == NULL) 452 return -ENOMEM; 453 454 genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW); 455 dma_map->u_vaddr = (void *)vma->vm_start; 456 dma_map->size = vsize; 457 dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE); 458 dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize, 459 &dma_map->dma_addr); 460 if (dma_map->k_vaddr == NULL) { 461 rc = -ENOMEM; 462 goto free_dma_map; 463 } 464 465 if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t))) 466 *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr; 467 468 pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT; 469 rc = remap_pfn_range(vma, 470 vma->vm_start, 471 pfn, 472 vsize, 473 vma->vm_page_prot); 474 if (rc != 0) { 475 rc = -EFAULT; 476 goto free_dma_mem; 477 } 478 479 vma->vm_private_data = cfile; 480 vma->vm_ops = &genwqe_vma_ops; 481 __genwqe_add_mapping(cfile, dma_map); 482 483 return 0; 484 485 free_dma_mem: 486 __genwqe_free_consistent(cd, dma_map->size, 487 dma_map->k_vaddr, 488 dma_map->dma_addr); 489 free_dma_map: 490 kfree(dma_map); 491 return rc; 492 } 493 494 /** 495 * do_flash_update() - Excute flash update (write image or CVPD) 496 * @cd: genwqe device 497 * @load: details about image load 498 * 499 * Return: 0 if successful 500 */ 501 502 #define FLASH_BLOCK 0x40000 /* we use 256k blocks */ 503 504 static int do_flash_update(struct genwqe_file *cfile, 505 struct genwqe_bitstream *load) 506 { 507 int rc = 0; 508 int blocks_to_flash; 509 dma_addr_t dma_addr; 510 u64 flash = 0; 511 size_t tocopy = 0; 512 u8 __user *buf; 513 u8 *xbuf; 514 u32 crc; 515 u8 cmdopts; 516 struct genwqe_dev *cd = cfile->cd; 517 struct file *filp = cfile->filp; 518 struct pci_dev *pci_dev = cd->pci_dev; 519 520 if ((load->size & 0x3) != 0) 521 return -EINVAL; 522 523 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) 524 return -EINVAL; 525 526 /* FIXME Bits have changed for new service layer! */ 527 switch ((char)load->partition) { 528 case '0': 529 cmdopts = 0x14; 530 break; /* download/erase_first/part_0 */ 531 case '1': 532 cmdopts = 0x1C; 533 break; /* download/erase_first/part_1 */ 534 case 'v': 535 cmdopts = 0x0C; 536 break; /* download/erase_first/vpd */ 537 default: 538 return -EINVAL; 539 } 540 541 buf = (u8 __user *)load->data_addr; 542 xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); 543 if (xbuf == NULL) 544 return -ENOMEM; 545 546 blocks_to_flash = load->size / FLASH_BLOCK; 547 while (load->size) { 548 struct genwqe_ddcb_cmd *req; 549 550 /* 551 * We must be 4 byte aligned. Buffer must be 0 appened 552 * to have defined values when calculating CRC. 553 */ 554 tocopy = min_t(size_t, load->size, FLASH_BLOCK); 555 556 rc = copy_from_user(xbuf, buf, tocopy); 557 if (rc) { 558 rc = -EFAULT; 559 goto free_buffer; 560 } 561 crc = genwqe_crc32(xbuf, tocopy, 0xffffffff); 562 563 dev_dbg(&pci_dev->dev, 564 "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n", 565 __func__, (unsigned long)dma_addr, crc, tocopy, 566 blocks_to_flash); 567 568 /* prepare DDCB for SLU process */ 569 req = ddcb_requ_alloc(); 570 if (req == NULL) { 571 rc = -ENOMEM; 572 goto free_buffer; 573 } 574 575 req->cmd = SLCMD_MOVE_FLASH; 576 req->cmdopts = cmdopts; 577 578 /* prepare invariant values */ 579 if (genwqe_get_slu_id(cd) <= 0x2) { 580 *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr); 581 *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy); 582 *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash); 583 *(__be32 *)&req->__asiv[24] = cpu_to_be32(0); 584 req->__asiv[24] = load->uid; 585 *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc); 586 587 /* for simulation only */ 588 *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id); 589 *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id); 590 req->asiv_length = 32; /* bytes included in crc calc */ 591 } else { /* setup DDCB for ATS architecture */ 592 *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr); 593 *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy); 594 *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */ 595 *(__be64 *)&req->asiv[16] = cpu_to_be64(flash); 596 *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24); 597 *(__be32 *)&req->asiv[28] = cpu_to_be32(crc); 598 599 /* for simulation only */ 600 *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id); 601 *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id); 602 603 /* Rd only */ 604 req->ats = 0x4ULL << 44; 605 req->asiv_length = 40; /* bytes included in crc calc */ 606 } 607 req->asv_length = 8; 608 609 /* For Genwqe5 we get back the calculated CRC */ 610 *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */ 611 612 rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags); 613 614 load->retc = req->retc; 615 load->attn = req->attn; 616 load->progress = req->progress; 617 618 if (rc < 0) { 619 ddcb_requ_free(req); 620 goto free_buffer; 621 } 622 623 if (req->retc != DDCB_RETC_COMPLETE) { 624 rc = -EIO; 625 ddcb_requ_free(req); 626 goto free_buffer; 627 } 628 629 load->size -= tocopy; 630 flash += tocopy; 631 buf += tocopy; 632 blocks_to_flash--; 633 ddcb_requ_free(req); 634 } 635 636 free_buffer: 637 __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); 638 return rc; 639 } 640 641 static int do_flash_read(struct genwqe_file *cfile, 642 struct genwqe_bitstream *load) 643 { 644 int rc, blocks_to_flash; 645 dma_addr_t dma_addr; 646 u64 flash = 0; 647 size_t tocopy = 0; 648 u8 __user *buf; 649 u8 *xbuf; 650 u8 cmdopts; 651 struct genwqe_dev *cd = cfile->cd; 652 struct file *filp = cfile->filp; 653 struct pci_dev *pci_dev = cd->pci_dev; 654 struct genwqe_ddcb_cmd *cmd; 655 656 if ((load->size & 0x3) != 0) 657 return -EINVAL; 658 659 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) 660 return -EINVAL; 661 662 /* FIXME Bits have changed for new service layer! */ 663 switch ((char)load->partition) { 664 case '0': 665 cmdopts = 0x12; 666 break; /* upload/part_0 */ 667 case '1': 668 cmdopts = 0x1A; 669 break; /* upload/part_1 */ 670 case 'v': 671 cmdopts = 0x0A; 672 break; /* upload/vpd */ 673 default: 674 return -EINVAL; 675 } 676 677 buf = (u8 __user *)load->data_addr; 678 xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); 679 if (xbuf == NULL) 680 return -ENOMEM; 681 682 blocks_to_flash = load->size / FLASH_BLOCK; 683 while (load->size) { 684 /* 685 * We must be 4 byte aligned. Buffer must be 0 appened 686 * to have defined values when calculating CRC. 687 */ 688 tocopy = min_t(size_t, load->size, FLASH_BLOCK); 689 690 dev_dbg(&pci_dev->dev, 691 "[%s] DMA: %lx SZ: %ld %d\n", 692 __func__, (unsigned long)dma_addr, tocopy, 693 blocks_to_flash); 694 695 /* prepare DDCB for SLU process */ 696 cmd = ddcb_requ_alloc(); 697 if (cmd == NULL) { 698 rc = -ENOMEM; 699 goto free_buffer; 700 } 701 cmd->cmd = SLCMD_MOVE_FLASH; 702 cmd->cmdopts = cmdopts; 703 704 /* prepare invariant values */ 705 if (genwqe_get_slu_id(cd) <= 0x2) { 706 *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr); 707 *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy); 708 *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash); 709 *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0); 710 cmd->__asiv[24] = load->uid; 711 *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */; 712 cmd->asiv_length = 32; /* bytes included in crc calc */ 713 } else { /* setup DDCB for ATS architecture */ 714 *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr); 715 *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy); 716 *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */ 717 *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash); 718 *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24); 719 *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */ 720 721 /* rd/wr */ 722 cmd->ats = 0x5ULL << 44; 723 cmd->asiv_length = 40; /* bytes included in crc calc */ 724 } 725 cmd->asv_length = 8; 726 727 /* we only get back the calculated CRC */ 728 *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */ 729 730 rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); 731 732 load->retc = cmd->retc; 733 load->attn = cmd->attn; 734 load->progress = cmd->progress; 735 736 if ((rc < 0) && (rc != -EBADMSG)) { 737 ddcb_requ_free(cmd); 738 goto free_buffer; 739 } 740 741 rc = copy_to_user(buf, xbuf, tocopy); 742 if (rc) { 743 rc = -EFAULT; 744 ddcb_requ_free(cmd); 745 goto free_buffer; 746 } 747 748 /* We know that we can get retc 0x104 with CRC err */ 749 if (((cmd->retc == DDCB_RETC_FAULT) && 750 (cmd->attn != 0x02)) || /* Normally ignore CRC error */ 751 ((cmd->retc == DDCB_RETC_COMPLETE) && 752 (cmd->attn != 0x00))) { /* Everything was fine */ 753 rc = -EIO; 754 ddcb_requ_free(cmd); 755 goto free_buffer; 756 } 757 758 load->size -= tocopy; 759 flash += tocopy; 760 buf += tocopy; 761 blocks_to_flash--; 762 ddcb_requ_free(cmd); 763 } 764 rc = 0; 765 766 free_buffer: 767 __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); 768 return rc; 769 } 770 771 static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) 772 { 773 int rc; 774 struct genwqe_dev *cd = cfile->cd; 775 struct pci_dev *pci_dev = cfile->cd->pci_dev; 776 struct dma_mapping *dma_map; 777 unsigned long map_addr; 778 unsigned long map_size; 779 780 if ((m->addr == 0x0) || (m->size == 0)) 781 return -EINVAL; 782 783 map_addr = (m->addr & PAGE_MASK); 784 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); 785 786 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL); 787 if (dma_map == NULL) 788 return -ENOMEM; 789 790 genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED); 791 rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size); 792 if (rc != 0) { 793 dev_err(&pci_dev->dev, 794 "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); 795 kfree(dma_map); 796 return rc; 797 } 798 799 genwqe_add_pin(cfile, dma_map); 800 return 0; 801 } 802 803 static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) 804 { 805 struct genwqe_dev *cd = cfile->cd; 806 struct dma_mapping *dma_map; 807 unsigned long map_addr; 808 unsigned long map_size; 809 810 if (m->addr == 0x0) 811 return -EINVAL; 812 813 map_addr = (m->addr & PAGE_MASK); 814 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); 815 816 dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL); 817 if (dma_map == NULL) 818 return -ENOENT; 819 820 genwqe_del_pin(cfile, dma_map); 821 genwqe_user_vunmap(cd, dma_map); 822 kfree(dma_map); 823 return 0; 824 } 825 826 /** 827 * ddcb_cmd_cleanup() - Remove dynamically created fixup entries 828 * 829 * Only if there are any. Pinnings are not removed. 830 */ 831 static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req) 832 { 833 unsigned int i; 834 struct dma_mapping *dma_map; 835 struct genwqe_dev *cd = cfile->cd; 836 837 for (i = 0; i < DDCB_FIXUPS; i++) { 838 dma_map = &req->dma_mappings[i]; 839 840 if (dma_mapping_used(dma_map)) { 841 __genwqe_del_mapping(cfile, dma_map); 842 genwqe_user_vunmap(cd, dma_map); 843 } 844 if (req->sgls[i].sgl != NULL) 845 genwqe_free_sync_sgl(cd, &req->sgls[i]); 846 } 847 return 0; 848 } 849 850 /** 851 * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references 852 * 853 * Before the DDCB gets executed we need to handle the fixups. We 854 * replace the user-space addresses with DMA addresses or do 855 * additional setup work e.g. generating a scatter-gather list which 856 * is used to describe the memory referred to in the fixup. 857 */ 858 static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) 859 { 860 int rc; 861 unsigned int asiv_offs, i; 862 struct genwqe_dev *cd = cfile->cd; 863 struct genwqe_ddcb_cmd *cmd = &req->cmd; 864 struct dma_mapping *m; 865 866 for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; 867 i++, asiv_offs += 0x08) { 868 869 u64 u_addr; 870 dma_addr_t d_addr; 871 u32 u_size = 0; 872 u64 ats_flags; 873 874 ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs); 875 876 switch (ats_flags) { 877 878 case ATS_TYPE_DATA: 879 break; /* nothing to do here */ 880 881 case ATS_TYPE_FLAT_RDWR: 882 case ATS_TYPE_FLAT_RD: { 883 u_addr = be64_to_cpu(*((__be64 *)&cmd-> 884 asiv[asiv_offs])); 885 u_size = be32_to_cpu(*((__be32 *)&cmd-> 886 asiv[asiv_offs + 0x08])); 887 888 /* 889 * No data available. Ignore u_addr in this 890 * case and set addr to 0. Hardware must not 891 * fetch the buffer. 892 */ 893 if (u_size == 0x0) { 894 *((__be64 *)&cmd->asiv[asiv_offs]) = 895 cpu_to_be64(0x0); 896 break; 897 } 898 899 m = __genwqe_search_mapping(cfile, u_addr, u_size, 900 &d_addr, NULL); 901 if (m == NULL) { 902 rc = -EFAULT; 903 goto err_out; 904 } 905 906 *((__be64 *)&cmd->asiv[asiv_offs]) = 907 cpu_to_be64(d_addr); 908 break; 909 } 910 911 case ATS_TYPE_SGL_RDWR: 912 case ATS_TYPE_SGL_RD: { 913 int page_offs; 914 915 u_addr = be64_to_cpu(*((__be64 *) 916 &cmd->asiv[asiv_offs])); 917 u_size = be32_to_cpu(*((__be32 *) 918 &cmd->asiv[asiv_offs + 0x08])); 919 920 /* 921 * No data available. Ignore u_addr in this 922 * case and set addr to 0. Hardware must not 923 * fetch the empty sgl. 924 */ 925 if (u_size == 0x0) { 926 *((__be64 *)&cmd->asiv[asiv_offs]) = 927 cpu_to_be64(0x0); 928 break; 929 } 930 931 m = genwqe_search_pin(cfile, u_addr, u_size, NULL); 932 if (m != NULL) { 933 page_offs = (u_addr - 934 (u64)m->u_vaddr)/PAGE_SIZE; 935 } else { 936 m = &req->dma_mappings[i]; 937 938 genwqe_mapping_init(m, 939 GENWQE_MAPPING_SGL_TEMP); 940 941 if (ats_flags == ATS_TYPE_SGL_RD) 942 m->write = 0; 943 944 rc = genwqe_user_vmap(cd, m, (void *)u_addr, 945 u_size); 946 if (rc != 0) 947 goto err_out; 948 949 __genwqe_add_mapping(cfile, m); 950 page_offs = 0; 951 } 952 953 /* create genwqe style scatter gather list */ 954 rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i], 955 (void __user *)u_addr, 956 u_size, m->write); 957 if (rc != 0) 958 goto err_out; 959 960 genwqe_setup_sgl(cd, &req->sgls[i], 961 &m->dma_list[page_offs]); 962 963 *((__be64 *)&cmd->asiv[asiv_offs]) = 964 cpu_to_be64(req->sgls[i].sgl_dma_addr); 965 966 break; 967 } 968 default: 969 rc = -EINVAL; 970 goto err_out; 971 } 972 } 973 return 0; 974 975 err_out: 976 ddcb_cmd_cleanup(cfile, req); 977 return rc; 978 } 979 980 /** 981 * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups 982 * 983 * The code will build up the translation tables or lookup the 984 * contignous memory allocation table to find the right translations 985 * and DMA addresses. 986 */ 987 static int genwqe_execute_ddcb(struct genwqe_file *cfile, 988 struct genwqe_ddcb_cmd *cmd) 989 { 990 int rc; 991 struct genwqe_dev *cd = cfile->cd; 992 struct file *filp = cfile->filp; 993 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); 994 995 rc = ddcb_cmd_fixups(cfile, req); 996 if (rc != 0) 997 return rc; 998 999 rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); 1000 ddcb_cmd_cleanup(cfile, req); 1001 return rc; 1002 } 1003 1004 static int do_execute_ddcb(struct genwqe_file *cfile, 1005 unsigned long arg, int raw) 1006 { 1007 int rc; 1008 struct genwqe_ddcb_cmd *cmd; 1009 struct genwqe_dev *cd = cfile->cd; 1010 struct file *filp = cfile->filp; 1011 1012 cmd = ddcb_requ_alloc(); 1013 if (cmd == NULL) 1014 return -ENOMEM; 1015 1016 if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) { 1017 ddcb_requ_free(cmd); 1018 return -EFAULT; 1019 } 1020 1021 if (!raw) 1022 rc = genwqe_execute_ddcb(cfile, cmd); 1023 else 1024 rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags); 1025 1026 /* Copy back only the modifed fields. Do not copy ASIV 1027 back since the copy got modified by the driver. */ 1028 if (copy_to_user((void __user *)arg, cmd, 1029 sizeof(*cmd) - DDCB_ASIV_LENGTH)) { 1030 ddcb_requ_free(cmd); 1031 return -EFAULT; 1032 } 1033 1034 ddcb_requ_free(cmd); 1035 return rc; 1036 } 1037 1038 /** 1039 * genwqe_ioctl() - IO control 1040 * @filp: file handle 1041 * @cmd: command identifier (passed from user) 1042 * @arg: argument (passed from user) 1043 * 1044 * Return: 0 success 1045 */ 1046 static long genwqe_ioctl(struct file *filp, unsigned int cmd, 1047 unsigned long arg) 1048 { 1049 int rc = 0; 1050 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; 1051 struct genwqe_dev *cd = cfile->cd; 1052 struct pci_dev *pci_dev = cd->pci_dev; 1053 struct genwqe_reg_io __user *io; 1054 u64 val; 1055 u32 reg_offs; 1056 1057 /* Return -EIO if card hit EEH */ 1058 if (pci_channel_offline(pci_dev)) 1059 return -EIO; 1060 1061 if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE) 1062 return -EINVAL; 1063 1064 switch (cmd) { 1065 1066 case GENWQE_GET_CARD_STATE: 1067 put_user(cd->card_state, (enum genwqe_card_state __user *)arg); 1068 return 0; 1069 1070 /* Register access */ 1071 case GENWQE_READ_REG64: { 1072 io = (struct genwqe_reg_io __user *)arg; 1073 1074 if (get_user(reg_offs, &io->num)) 1075 return -EFAULT; 1076 1077 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) 1078 return -EINVAL; 1079 1080 val = __genwqe_readq(cd, reg_offs); 1081 put_user(val, &io->val64); 1082 return 0; 1083 } 1084 1085 case GENWQE_WRITE_REG64: { 1086 io = (struct genwqe_reg_io __user *)arg; 1087 1088 if (!capable(CAP_SYS_ADMIN)) 1089 return -EPERM; 1090 1091 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) 1092 return -EPERM; 1093 1094 if (get_user(reg_offs, &io->num)) 1095 return -EFAULT; 1096 1097 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) 1098 return -EINVAL; 1099 1100 if (get_user(val, &io->val64)) 1101 return -EFAULT; 1102 1103 __genwqe_writeq(cd, reg_offs, val); 1104 return 0; 1105 } 1106 1107 case GENWQE_READ_REG32: { 1108 io = (struct genwqe_reg_io __user *)arg; 1109 1110 if (get_user(reg_offs, &io->num)) 1111 return -EFAULT; 1112 1113 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) 1114 return -EINVAL; 1115 1116 val = __genwqe_readl(cd, reg_offs); 1117 put_user(val, &io->val64); 1118 return 0; 1119 } 1120 1121 case GENWQE_WRITE_REG32: { 1122 io = (struct genwqe_reg_io __user *)arg; 1123 1124 if (!capable(CAP_SYS_ADMIN)) 1125 return -EPERM; 1126 1127 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) 1128 return -EPERM; 1129 1130 if (get_user(reg_offs, &io->num)) 1131 return -EFAULT; 1132 1133 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) 1134 return -EINVAL; 1135 1136 if (get_user(val, &io->val64)) 1137 return -EFAULT; 1138 1139 __genwqe_writel(cd, reg_offs, val); 1140 return 0; 1141 } 1142 1143 /* Flash update/reading */ 1144 case GENWQE_SLU_UPDATE: { 1145 struct genwqe_bitstream load; 1146 1147 if (!genwqe_is_privileged(cd)) 1148 return -EPERM; 1149 1150 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) 1151 return -EPERM; 1152 1153 if (copy_from_user(&load, (void __user *)arg, 1154 sizeof(load))) 1155 return -EFAULT; 1156 1157 rc = do_flash_update(cfile, &load); 1158 1159 if (copy_to_user((void __user *)arg, &load, sizeof(load))) 1160 return -EFAULT; 1161 1162 return rc; 1163 } 1164 1165 case GENWQE_SLU_READ: { 1166 struct genwqe_bitstream load; 1167 1168 if (!genwqe_is_privileged(cd)) 1169 return -EPERM; 1170 1171 if (genwqe_flash_readback_fails(cd)) 1172 return -ENOSPC; /* known to fail for old versions */ 1173 1174 if (copy_from_user(&load, (void __user *)arg, sizeof(load))) 1175 return -EFAULT; 1176 1177 rc = do_flash_read(cfile, &load); 1178 1179 if (copy_to_user((void __user *)arg, &load, sizeof(load))) 1180 return -EFAULT; 1181 1182 return rc; 1183 } 1184 1185 /* memory pinning and unpinning */ 1186 case GENWQE_PIN_MEM: { 1187 struct genwqe_mem m; 1188 1189 if (copy_from_user(&m, (void __user *)arg, sizeof(m))) 1190 return -EFAULT; 1191 1192 return genwqe_pin_mem(cfile, &m); 1193 } 1194 1195 case GENWQE_UNPIN_MEM: { 1196 struct genwqe_mem m; 1197 1198 if (copy_from_user(&m, (void __user *)arg, sizeof(m))) 1199 return -EFAULT; 1200 1201 return genwqe_unpin_mem(cfile, &m); 1202 } 1203 1204 /* launch an DDCB and wait for completion */ 1205 case GENWQE_EXECUTE_DDCB: 1206 return do_execute_ddcb(cfile, arg, 0); 1207 1208 case GENWQE_EXECUTE_RAW_DDCB: { 1209 1210 if (!capable(CAP_SYS_ADMIN)) 1211 return -EPERM; 1212 1213 return do_execute_ddcb(cfile, arg, 1); 1214 } 1215 1216 default: 1217 return -EINVAL; 1218 } 1219 1220 return rc; 1221 } 1222 1223 #if defined(CONFIG_COMPAT) 1224 /** 1225 * genwqe_compat_ioctl() - Compatibility ioctl 1226 * 1227 * Called whenever a 32-bit process running under a 64-bit kernel 1228 * performs an ioctl on /dev/genwqe<n>_card. 1229 * 1230 * @filp: file pointer. 1231 * @cmd: command. 1232 * @arg: user argument. 1233 * Return: zero on success or negative number on failure. 1234 */ 1235 static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd, 1236 unsigned long arg) 1237 { 1238 return genwqe_ioctl(filp, cmd, arg); 1239 } 1240 #endif /* defined(CONFIG_COMPAT) */ 1241 1242 static const struct file_operations genwqe_fops = { 1243 .owner = THIS_MODULE, 1244 .open = genwqe_open, 1245 .fasync = genwqe_fasync, 1246 .mmap = genwqe_mmap, 1247 .unlocked_ioctl = genwqe_ioctl, 1248 #if defined(CONFIG_COMPAT) 1249 .compat_ioctl = genwqe_compat_ioctl, 1250 #endif 1251 .release = genwqe_release, 1252 }; 1253 1254 static int genwqe_device_initialized(struct genwqe_dev *cd) 1255 { 1256 return cd->dev != NULL; 1257 } 1258 1259 /** 1260 * genwqe_device_create() - Create and configure genwqe char device 1261 * @cd: genwqe device descriptor 1262 * 1263 * This function must be called before we create any more genwqe 1264 * character devices, because it is allocating the major and minor 1265 * number which are supposed to be used by the client drivers. 1266 */ 1267 int genwqe_device_create(struct genwqe_dev *cd) 1268 { 1269 int rc; 1270 struct pci_dev *pci_dev = cd->pci_dev; 1271 1272 /* 1273 * Here starts the individual setup per client. It must 1274 * initialize its own cdev data structure with its own fops. 1275 * The appropriate devnum needs to be created. The ranges must 1276 * not overlap. 1277 */ 1278 rc = alloc_chrdev_region(&cd->devnum_genwqe, 0, 1279 GENWQE_MAX_MINOR, GENWQE_DEVNAME); 1280 if (rc < 0) { 1281 dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n"); 1282 goto err_dev; 1283 } 1284 1285 cdev_init(&cd->cdev_genwqe, &genwqe_fops); 1286 cd->cdev_genwqe.owner = THIS_MODULE; 1287 1288 rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1); 1289 if (rc < 0) { 1290 dev_err(&pci_dev->dev, "err: cdev_add failed\n"); 1291 goto err_add; 1292 } 1293 1294 /* 1295 * Finally the device in /dev/... must be created. The rule is 1296 * to use card%d_clientname for each created device. 1297 */ 1298 cd->dev = device_create_with_groups(cd->class_genwqe, 1299 &cd->pci_dev->dev, 1300 cd->devnum_genwqe, cd, 1301 genwqe_attribute_groups, 1302 GENWQE_DEVNAME "%u_card", 1303 cd->card_idx); 1304 if (IS_ERR(cd->dev)) { 1305 rc = PTR_ERR(cd->dev); 1306 goto err_cdev; 1307 } 1308 1309 rc = genwqe_init_debugfs(cd); 1310 if (rc != 0) 1311 goto err_debugfs; 1312 1313 return 0; 1314 1315 err_debugfs: 1316 device_destroy(cd->class_genwqe, cd->devnum_genwqe); 1317 err_cdev: 1318 cdev_del(&cd->cdev_genwqe); 1319 err_add: 1320 unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); 1321 err_dev: 1322 cd->dev = NULL; 1323 return rc; 1324 } 1325 1326 static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) 1327 { 1328 int rc; 1329 unsigned int i; 1330 struct pci_dev *pci_dev = cd->pci_dev; 1331 1332 if (!genwqe_open_files(cd)) 1333 return 0; 1334 1335 dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__); 1336 1337 rc = genwqe_kill_fasync(cd, SIGIO); 1338 if (rc > 0) { 1339 /* give kill_timeout seconds to close file descriptors ... */ 1340 for (i = 0; (i < GENWQE_KILL_TIMEOUT) && 1341 genwqe_open_files(cd); i++) { 1342 dev_info(&pci_dev->dev, " %d sec ...", i); 1343 1344 cond_resched(); 1345 msleep(1000); 1346 } 1347 1348 /* if no open files we can safely continue, else ... */ 1349 if (!genwqe_open_files(cd)) 1350 return 0; 1351 1352 dev_warn(&pci_dev->dev, 1353 "[%s] send SIGKILL and wait ...\n", __func__); 1354 1355 rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */ 1356 if (rc) { 1357 /* Give kill_timout more seconds to end processes */ 1358 for (i = 0; (i < GENWQE_KILL_TIMEOUT) && 1359 genwqe_open_files(cd); i++) { 1360 dev_warn(&pci_dev->dev, " %d sec ...", i); 1361 1362 cond_resched(); 1363 msleep(1000); 1364 } 1365 } 1366 } 1367 return 0; 1368 } 1369 1370 /** 1371 * genwqe_device_remove() - Remove genwqe's char device 1372 * 1373 * This function must be called after the client devices are removed 1374 * because it will free the major/minor number range for the genwqe 1375 * drivers. 1376 * 1377 * This function must be robust enough to be called twice. 1378 */ 1379 int genwqe_device_remove(struct genwqe_dev *cd) 1380 { 1381 int rc; 1382 struct pci_dev *pci_dev = cd->pci_dev; 1383 1384 if (!genwqe_device_initialized(cd)) 1385 return 1; 1386 1387 genwqe_inform_and_stop_processes(cd); 1388 1389 /* 1390 * We currently do wait until all filedescriptors are 1391 * closed. This leads to a problem when we abort the 1392 * application which will decrease this reference from 1393 * 1/unused to 0/illegal and not from 2/used 1/empty. 1394 */ 1395 rc = kref_read(&cd->cdev_genwqe.kobj.kref); 1396 if (rc != 1) { 1397 dev_err(&pci_dev->dev, 1398 "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc); 1399 panic("Fatal err: cannot free resources with pending references!"); 1400 } 1401 1402 genqwe_exit_debugfs(cd); 1403 device_destroy(cd->class_genwqe, cd->devnum_genwqe); 1404 cdev_del(&cd->cdev_genwqe); 1405 unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); 1406 cd->dev = NULL; 1407 1408 return 0; 1409 } 1410