1 /** 2 * IBM Accelerator Family 'GenWQE' 3 * 4 * (C) Copyright IBM Corp. 2013 5 * 6 * Author: Frank Haverkamp <haver@linux.vnet.ibm.com> 7 * Author: Joerg-Stephan Vogt <jsvogt@de.ibm.com> 8 * Author: Michael Jung <mijung@de.ibm.com> 9 * Author: Michael Ruettger <michael@ibmra.de> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License (version 2 only) 13 * as published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 */ 20 21 /* 22 * Character device representation of the GenWQE device. This allows 23 * user-space applications to communicate with the card. 24 */ 25 26 #include <linux/kernel.h> 27 #include <linux/types.h> 28 #include <linux/module.h> 29 #include <linux/pci.h> 30 #include <linux/string.h> 31 #include <linux/fs.h> 32 #include <linux/sched.h> 33 #include <linux/wait.h> 34 #include <linux/delay.h> 35 #include <linux/atomic.h> 36 37 #include "card_base.h" 38 #include "card_ddcb.h" 39 40 static int genwqe_open_files(struct genwqe_dev *cd) 41 { 42 int rc; 43 unsigned long flags; 44 45 spin_lock_irqsave(&cd->file_lock, flags); 46 rc = list_empty(&cd->file_list); 47 spin_unlock_irqrestore(&cd->file_lock, flags); 48 return !rc; 49 } 50 51 static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile) 52 { 53 unsigned long flags; 54 55 cfile->owner = current; 56 spin_lock_irqsave(&cd->file_lock, flags); 57 list_add(&cfile->list, &cd->file_list); 58 spin_unlock_irqrestore(&cd->file_lock, flags); 59 } 60 61 static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile) 62 { 63 unsigned long flags; 64 65 spin_lock_irqsave(&cd->file_lock, flags); 66 list_del(&cfile->list); 67 spin_unlock_irqrestore(&cd->file_lock, flags); 68 69 return 0; 70 } 71 72 static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m) 73 { 74 unsigned long flags; 75 76 spin_lock_irqsave(&cfile->pin_lock, flags); 77 list_add(&m->pin_list, &cfile->pin_list); 78 spin_unlock_irqrestore(&cfile->pin_lock, flags); 79 } 80 81 static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m) 82 { 83 unsigned long flags; 84 85 spin_lock_irqsave(&cfile->pin_lock, flags); 86 list_del(&m->pin_list); 87 spin_unlock_irqrestore(&cfile->pin_lock, flags); 88 89 return 0; 90 } 91 92 /** 93 * genwqe_search_pin() - Search for the mapping for a userspace address 94 * @cfile: Descriptor of opened file 95 * @u_addr: User virtual address 96 * @size: Size of buffer 97 * @dma_addr: DMA address to be updated 98 * 99 * Return: Pointer to the corresponding mapping NULL if not found 100 */ 101 static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile, 102 unsigned long u_addr, 103 unsigned int size, 104 void **virt_addr) 105 { 106 unsigned long flags; 107 struct dma_mapping *m; 108 109 spin_lock_irqsave(&cfile->pin_lock, flags); 110 111 list_for_each_entry(m, &cfile->pin_list, pin_list) { 112 if ((((u64)m->u_vaddr) <= (u_addr)) && 113 (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { 114 115 if (virt_addr) 116 *virt_addr = m->k_vaddr + 117 (u_addr - (u64)m->u_vaddr); 118 119 spin_unlock_irqrestore(&cfile->pin_lock, flags); 120 return m; 121 } 122 } 123 spin_unlock_irqrestore(&cfile->pin_lock, flags); 124 return NULL; 125 } 126 127 static void __genwqe_add_mapping(struct genwqe_file *cfile, 128 struct dma_mapping *dma_map) 129 { 130 unsigned long flags; 131 132 spin_lock_irqsave(&cfile->map_lock, flags); 133 list_add(&dma_map->card_list, &cfile->map_list); 134 spin_unlock_irqrestore(&cfile->map_lock, flags); 135 } 136 137 static void __genwqe_del_mapping(struct genwqe_file *cfile, 138 struct dma_mapping *dma_map) 139 { 140 unsigned long flags; 141 142 spin_lock_irqsave(&cfile->map_lock, flags); 143 list_del(&dma_map->card_list); 144 spin_unlock_irqrestore(&cfile->map_lock, flags); 145 } 146 147 148 /** 149 * __genwqe_search_mapping() - Search for the mapping for a userspace address 150 * @cfile: descriptor of opened file 151 * @u_addr: user virtual address 152 * @size: size of buffer 153 * @dma_addr: DMA address to be updated 154 * Return: Pointer to the corresponding mapping NULL if not found 155 */ 156 static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile, 157 unsigned long u_addr, 158 unsigned int size, 159 dma_addr_t *dma_addr, 160 void **virt_addr) 161 { 162 unsigned long flags; 163 struct dma_mapping *m; 164 struct pci_dev *pci_dev = cfile->cd->pci_dev; 165 166 spin_lock_irqsave(&cfile->map_lock, flags); 167 list_for_each_entry(m, &cfile->map_list, card_list) { 168 169 if ((((u64)m->u_vaddr) <= (u_addr)) && 170 (((u64)m->u_vaddr + m->size) >= (u_addr + size))) { 171 172 /* match found: current is as expected and 173 addr is in range */ 174 if (dma_addr) 175 *dma_addr = m->dma_addr + 176 (u_addr - (u64)m->u_vaddr); 177 178 if (virt_addr) 179 *virt_addr = m->k_vaddr + 180 (u_addr - (u64)m->u_vaddr); 181 182 spin_unlock_irqrestore(&cfile->map_lock, flags); 183 return m; 184 } 185 } 186 spin_unlock_irqrestore(&cfile->map_lock, flags); 187 188 dev_err(&pci_dev->dev, 189 "[%s] Entry not found: u_addr=%lx, size=%x\n", 190 __func__, u_addr, size); 191 192 return NULL; 193 } 194 195 static void genwqe_remove_mappings(struct genwqe_file *cfile) 196 { 197 int i = 0; 198 struct list_head *node, *next; 199 struct dma_mapping *dma_map; 200 struct genwqe_dev *cd = cfile->cd; 201 struct pci_dev *pci_dev = cfile->cd->pci_dev; 202 203 list_for_each_safe(node, next, &cfile->map_list) { 204 dma_map = list_entry(node, struct dma_mapping, card_list); 205 206 list_del_init(&dma_map->card_list); 207 208 /* 209 * This is really a bug, because those things should 210 * have been already tidied up. 211 * 212 * GENWQE_MAPPING_RAW should have been removed via mmunmap(). 213 * GENWQE_MAPPING_SGL_TEMP should be removed by tidy up code. 214 */ 215 dev_err(&pci_dev->dev, 216 "[%s] %d. cleanup mapping: u_vaddr=%p " 217 "u_kaddr=%016lx dma_addr=%lx\n", __func__, i++, 218 dma_map->u_vaddr, (unsigned long)dma_map->k_vaddr, 219 (unsigned long)dma_map->dma_addr); 220 221 if (dma_map->type == GENWQE_MAPPING_RAW) { 222 /* we allocated this dynamically */ 223 __genwqe_free_consistent(cd, dma_map->size, 224 dma_map->k_vaddr, 225 dma_map->dma_addr); 226 kfree(dma_map); 227 } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) { 228 /* we use dma_map statically from the request */ 229 genwqe_user_vunmap(cd, dma_map, NULL); 230 } 231 } 232 } 233 234 static void genwqe_remove_pinnings(struct genwqe_file *cfile) 235 { 236 struct list_head *node, *next; 237 struct dma_mapping *dma_map; 238 struct genwqe_dev *cd = cfile->cd; 239 240 list_for_each_safe(node, next, &cfile->pin_list) { 241 dma_map = list_entry(node, struct dma_mapping, pin_list); 242 243 /* 244 * This is not a bug, because a killed processed might 245 * not call the unpin ioctl, which is supposed to free 246 * the resources. 247 * 248 * Pinnings are dymically allocated and need to be 249 * deleted. 250 */ 251 list_del_init(&dma_map->pin_list); 252 genwqe_user_vunmap(cd, dma_map, NULL); 253 kfree(dma_map); 254 } 255 } 256 257 /** 258 * genwqe_kill_fasync() - Send signal to all processes with open GenWQE files 259 * 260 * E.g. genwqe_send_signal(cd, SIGIO); 261 */ 262 static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig) 263 { 264 unsigned int files = 0; 265 unsigned long flags; 266 struct genwqe_file *cfile; 267 268 spin_lock_irqsave(&cd->file_lock, flags); 269 list_for_each_entry(cfile, &cd->file_list, list) { 270 if (cfile->async_queue) 271 kill_fasync(&cfile->async_queue, sig, POLL_HUP); 272 files++; 273 } 274 spin_unlock_irqrestore(&cd->file_lock, flags); 275 return files; 276 } 277 278 static int genwqe_force_sig(struct genwqe_dev *cd, int sig) 279 { 280 unsigned int files = 0; 281 unsigned long flags; 282 struct genwqe_file *cfile; 283 284 spin_lock_irqsave(&cd->file_lock, flags); 285 list_for_each_entry(cfile, &cd->file_list, list) { 286 force_sig(sig, cfile->owner); 287 files++; 288 } 289 spin_unlock_irqrestore(&cd->file_lock, flags); 290 return files; 291 } 292 293 /** 294 * genwqe_open() - file open 295 * @inode: file system information 296 * @filp: file handle 297 * 298 * This function is executed whenever an application calls 299 * open("/dev/genwqe",..). 300 * 301 * Return: 0 if successful or <0 if errors 302 */ 303 static int genwqe_open(struct inode *inode, struct file *filp) 304 { 305 struct genwqe_dev *cd; 306 struct genwqe_file *cfile; 307 struct pci_dev *pci_dev; 308 309 cfile = kzalloc(sizeof(*cfile), GFP_KERNEL); 310 if (cfile == NULL) 311 return -ENOMEM; 312 313 cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe); 314 pci_dev = cd->pci_dev; 315 cfile->cd = cd; 316 cfile->filp = filp; 317 cfile->client = NULL; 318 319 spin_lock_init(&cfile->map_lock); /* list of raw memory allocations */ 320 INIT_LIST_HEAD(&cfile->map_list); 321 322 spin_lock_init(&cfile->pin_lock); /* list of user pinned memory */ 323 INIT_LIST_HEAD(&cfile->pin_list); 324 325 filp->private_data = cfile; 326 327 genwqe_add_file(cd, cfile); 328 return 0; 329 } 330 331 /** 332 * genwqe_fasync() - Setup process to receive SIGIO. 333 * @fd: file descriptor 334 * @filp: file handle 335 * @mode: file mode 336 * 337 * Sending a signal is working as following: 338 * 339 * if (cdev->async_queue) 340 * kill_fasync(&cdev->async_queue, SIGIO, POLL_IN); 341 * 342 * Some devices also implement asynchronous notification to indicate 343 * when the device can be written; in this case, of course, 344 * kill_fasync must be called with a mode of POLL_OUT. 345 */ 346 static int genwqe_fasync(int fd, struct file *filp, int mode) 347 { 348 struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data; 349 return fasync_helper(fd, filp, mode, &cdev->async_queue); 350 } 351 352 353 /** 354 * genwqe_release() - file close 355 * @inode: file system information 356 * @filp: file handle 357 * 358 * This function is executed whenever an application calls 'close(fd_genwqe)' 359 * 360 * Return: always 0 361 */ 362 static int genwqe_release(struct inode *inode, struct file *filp) 363 { 364 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; 365 struct genwqe_dev *cd = cfile->cd; 366 367 /* there must be no entries in these lists! */ 368 genwqe_remove_mappings(cfile); 369 genwqe_remove_pinnings(cfile); 370 371 /* remove this filp from the asynchronously notified filp's */ 372 genwqe_fasync(-1, filp, 0); 373 374 /* 375 * For this to work we must not release cd when this cfile is 376 * not yet released, otherwise the list entry is invalid, 377 * because the list itself gets reinstantiated! 378 */ 379 genwqe_del_file(cd, cfile); 380 kfree(cfile); 381 return 0; 382 } 383 384 static void genwqe_vma_open(struct vm_area_struct *vma) 385 { 386 /* nothing ... */ 387 } 388 389 /** 390 * genwqe_vma_close() - Called each time when vma is unmapped 391 * 392 * Free memory which got allocated by GenWQE mmap(). 393 */ 394 static void genwqe_vma_close(struct vm_area_struct *vma) 395 { 396 unsigned long vsize = vma->vm_end - vma->vm_start; 397 struct inode *inode = vma->vm_file->f_dentry->d_inode; 398 struct dma_mapping *dma_map; 399 struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev, 400 cdev_genwqe); 401 struct pci_dev *pci_dev = cd->pci_dev; 402 dma_addr_t d_addr = 0; 403 struct genwqe_file *cfile = vma->vm_private_data; 404 405 dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize, 406 &d_addr, NULL); 407 if (dma_map == NULL) { 408 dev_err(&pci_dev->dev, 409 " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n", 410 __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT, 411 vsize); 412 return; 413 } 414 __genwqe_del_mapping(cfile, dma_map); 415 __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr, 416 dma_map->dma_addr); 417 kfree(dma_map); 418 } 419 420 static struct vm_operations_struct genwqe_vma_ops = { 421 .open = genwqe_vma_open, 422 .close = genwqe_vma_close, 423 }; 424 425 /** 426 * genwqe_mmap() - Provide contignous buffers to userspace 427 * 428 * We use mmap() to allocate contignous buffers used for DMA 429 * transfers. After the buffer is allocated we remap it to user-space 430 * and remember a reference to our dma_mapping data structure, where 431 * we store the associated DMA address and allocated size. 432 * 433 * When we receive a DDCB execution request with the ATS bits set to 434 * plain buffer, we lookup our dma_mapping list to find the 435 * corresponding DMA address for the associated user-space address. 436 */ 437 static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma) 438 { 439 int rc; 440 unsigned long pfn, vsize = vma->vm_end - vma->vm_start; 441 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; 442 struct genwqe_dev *cd = cfile->cd; 443 struct dma_mapping *dma_map; 444 445 if (vsize == 0) 446 return -EINVAL; 447 448 if (get_order(vsize) > MAX_ORDER) 449 return -ENOMEM; 450 451 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); 452 if (dma_map == NULL) 453 return -ENOMEM; 454 455 genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW); 456 dma_map->u_vaddr = (void *)vma->vm_start; 457 dma_map->size = vsize; 458 dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE); 459 dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize, 460 &dma_map->dma_addr); 461 if (dma_map->k_vaddr == NULL) { 462 rc = -ENOMEM; 463 goto free_dma_map; 464 } 465 466 if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t))) 467 *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr; 468 469 pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT; 470 rc = remap_pfn_range(vma, 471 vma->vm_start, 472 pfn, 473 vsize, 474 vma->vm_page_prot); 475 if (rc != 0) { 476 rc = -EFAULT; 477 goto free_dma_mem; 478 } 479 480 vma->vm_private_data = cfile; 481 vma->vm_ops = &genwqe_vma_ops; 482 __genwqe_add_mapping(cfile, dma_map); 483 484 return 0; 485 486 free_dma_mem: 487 __genwqe_free_consistent(cd, dma_map->size, 488 dma_map->k_vaddr, 489 dma_map->dma_addr); 490 free_dma_map: 491 kfree(dma_map); 492 return rc; 493 } 494 495 /** 496 * do_flash_update() - Excute flash update (write image or CVPD) 497 * @cd: genwqe device 498 * @load: details about image load 499 * 500 * Return: 0 if successful 501 */ 502 503 #define FLASH_BLOCK 0x40000 /* we use 256k blocks */ 504 505 static int do_flash_update(struct genwqe_file *cfile, 506 struct genwqe_bitstream *load) 507 { 508 int rc = 0; 509 int blocks_to_flash; 510 dma_addr_t dma_addr; 511 u64 flash = 0; 512 size_t tocopy = 0; 513 u8 __user *buf; 514 u8 *xbuf; 515 u32 crc; 516 u8 cmdopts; 517 struct genwqe_dev *cd = cfile->cd; 518 struct pci_dev *pci_dev = cd->pci_dev; 519 520 if ((load->size & 0x3) != 0) 521 return -EINVAL; 522 523 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) 524 return -EINVAL; 525 526 /* FIXME Bits have changed for new service layer! */ 527 switch ((char)load->partition) { 528 case '0': 529 cmdopts = 0x14; 530 break; /* download/erase_first/part_0 */ 531 case '1': 532 cmdopts = 0x1C; 533 break; /* download/erase_first/part_1 */ 534 case 'v': /* cmdopts = 0x0c (VPD) */ 535 default: 536 return -EINVAL; 537 } 538 539 buf = (u8 __user *)load->data_addr; 540 xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); 541 if (xbuf == NULL) 542 return -ENOMEM; 543 544 blocks_to_flash = load->size / FLASH_BLOCK; 545 while (load->size) { 546 struct genwqe_ddcb_cmd *req; 547 548 /* 549 * We must be 4 byte aligned. Buffer must be 0 appened 550 * to have defined values when calculating CRC. 551 */ 552 tocopy = min_t(size_t, load->size, FLASH_BLOCK); 553 554 rc = copy_from_user(xbuf, buf, tocopy); 555 if (rc) { 556 rc = -EFAULT; 557 goto free_buffer; 558 } 559 crc = genwqe_crc32(xbuf, tocopy, 0xffffffff); 560 561 dev_dbg(&pci_dev->dev, 562 "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n", 563 __func__, (unsigned long)dma_addr, crc, tocopy, 564 blocks_to_flash); 565 566 /* prepare DDCB for SLU process */ 567 req = ddcb_requ_alloc(); 568 if (req == NULL) { 569 rc = -ENOMEM; 570 goto free_buffer; 571 } 572 573 req->cmd = SLCMD_MOVE_FLASH; 574 req->cmdopts = cmdopts; 575 576 /* prepare invariant values */ 577 if (genwqe_get_slu_id(cd) <= 0x2) { 578 *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr); 579 *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy); 580 *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash); 581 *(__be32 *)&req->__asiv[24] = cpu_to_be32(0); 582 req->__asiv[24] = load->uid; 583 *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc); 584 585 /* for simulation only */ 586 *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id); 587 *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id); 588 req->asiv_length = 32; /* bytes included in crc calc */ 589 } else { /* setup DDCB for ATS architecture */ 590 *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr); 591 *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy); 592 *(__be32 *)&req->asiv[12] = cpu_to_be32(0); /* resvd */ 593 *(__be64 *)&req->asiv[16] = cpu_to_be64(flash); 594 *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24); 595 *(__be32 *)&req->asiv[28] = cpu_to_be32(crc); 596 597 /* for simulation only */ 598 *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id); 599 *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id); 600 601 /* Rd only */ 602 req->ats = 0x4ULL << 44; 603 req->asiv_length = 40; /* bytes included in crc calc */ 604 } 605 req->asv_length = 8; 606 607 /* For Genwqe5 we get back the calculated CRC */ 608 *(u64 *)&req->asv[0] = 0ULL; /* 0x80 */ 609 610 rc = __genwqe_execute_raw_ddcb(cd, req); 611 612 load->retc = req->retc; 613 load->attn = req->attn; 614 load->progress = req->progress; 615 616 if (rc < 0) { 617 ddcb_requ_free(req); 618 goto free_buffer; 619 } 620 621 if (req->retc != DDCB_RETC_COMPLETE) { 622 rc = -EIO; 623 ddcb_requ_free(req); 624 goto free_buffer; 625 } 626 627 load->size -= tocopy; 628 flash += tocopy; 629 buf += tocopy; 630 blocks_to_flash--; 631 ddcb_requ_free(req); 632 } 633 634 free_buffer: 635 __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); 636 return rc; 637 } 638 639 static int do_flash_read(struct genwqe_file *cfile, 640 struct genwqe_bitstream *load) 641 { 642 int rc, blocks_to_flash; 643 dma_addr_t dma_addr; 644 u64 flash = 0; 645 size_t tocopy = 0; 646 u8 __user *buf; 647 u8 *xbuf; 648 u8 cmdopts; 649 struct genwqe_dev *cd = cfile->cd; 650 struct pci_dev *pci_dev = cd->pci_dev; 651 struct genwqe_ddcb_cmd *cmd; 652 653 if ((load->size & 0x3) != 0) 654 return -EINVAL; 655 656 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0) 657 return -EINVAL; 658 659 /* FIXME Bits have changed for new service layer! */ 660 switch ((char)load->partition) { 661 case '0': 662 cmdopts = 0x12; 663 break; /* upload/part_0 */ 664 case '1': 665 cmdopts = 0x1A; 666 break; /* upload/part_1 */ 667 case 'v': 668 default: 669 return -EINVAL; 670 } 671 672 buf = (u8 __user *)load->data_addr; 673 xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr); 674 if (xbuf == NULL) 675 return -ENOMEM; 676 677 blocks_to_flash = load->size / FLASH_BLOCK; 678 while (load->size) { 679 /* 680 * We must be 4 byte aligned. Buffer must be 0 appened 681 * to have defined values when calculating CRC. 682 */ 683 tocopy = min_t(size_t, load->size, FLASH_BLOCK); 684 685 dev_dbg(&pci_dev->dev, 686 "[%s] DMA: %lx SZ: %ld %d\n", 687 __func__, (unsigned long)dma_addr, tocopy, 688 blocks_to_flash); 689 690 /* prepare DDCB for SLU process */ 691 cmd = ddcb_requ_alloc(); 692 if (cmd == NULL) { 693 rc = -ENOMEM; 694 goto free_buffer; 695 } 696 cmd->cmd = SLCMD_MOVE_FLASH; 697 cmd->cmdopts = cmdopts; 698 699 /* prepare invariant values */ 700 if (genwqe_get_slu_id(cd) <= 0x2) { 701 *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr); 702 *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy); 703 *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash); 704 *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0); 705 cmd->__asiv[24] = load->uid; 706 *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) /* CRC */; 707 cmd->asiv_length = 32; /* bytes included in crc calc */ 708 } else { /* setup DDCB for ATS architecture */ 709 *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr); 710 *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy); 711 *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0); /* resvd */ 712 *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash); 713 *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24); 714 *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0); /* CRC */ 715 716 /* rd/wr */ 717 cmd->ats = 0x5ULL << 44; 718 cmd->asiv_length = 40; /* bytes included in crc calc */ 719 } 720 cmd->asv_length = 8; 721 722 /* we only get back the calculated CRC */ 723 *(u64 *)&cmd->asv[0] = 0ULL; /* 0x80 */ 724 725 rc = __genwqe_execute_raw_ddcb(cd, cmd); 726 727 load->retc = cmd->retc; 728 load->attn = cmd->attn; 729 load->progress = cmd->progress; 730 731 if ((rc < 0) && (rc != -EBADMSG)) { 732 ddcb_requ_free(cmd); 733 goto free_buffer; 734 } 735 736 rc = copy_to_user(buf, xbuf, tocopy); 737 if (rc) { 738 rc = -EFAULT; 739 ddcb_requ_free(cmd); 740 goto free_buffer; 741 } 742 743 /* We know that we can get retc 0x104 with CRC err */ 744 if (((cmd->retc == DDCB_RETC_FAULT) && 745 (cmd->attn != 0x02)) || /* Normally ignore CRC error */ 746 ((cmd->retc == DDCB_RETC_COMPLETE) && 747 (cmd->attn != 0x00))) { /* Everything was fine */ 748 rc = -EIO; 749 ddcb_requ_free(cmd); 750 goto free_buffer; 751 } 752 753 load->size -= tocopy; 754 flash += tocopy; 755 buf += tocopy; 756 blocks_to_flash--; 757 ddcb_requ_free(cmd); 758 } 759 rc = 0; 760 761 free_buffer: 762 __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr); 763 return rc; 764 } 765 766 static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) 767 { 768 int rc; 769 struct genwqe_dev *cd = cfile->cd; 770 struct pci_dev *pci_dev = cfile->cd->pci_dev; 771 struct dma_mapping *dma_map; 772 unsigned long map_addr; 773 unsigned long map_size; 774 775 if ((m->addr == 0x0) || (m->size == 0)) 776 return -EINVAL; 777 778 map_addr = (m->addr & PAGE_MASK); 779 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); 780 781 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_ATOMIC); 782 if (dma_map == NULL) 783 return -ENOMEM; 784 785 genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED); 786 rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size, NULL); 787 if (rc != 0) { 788 dev_err(&pci_dev->dev, 789 "[%s] genwqe_user_vmap rc=%d\n", __func__, rc); 790 return rc; 791 } 792 793 genwqe_add_pin(cfile, dma_map); 794 return 0; 795 } 796 797 static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m) 798 { 799 struct genwqe_dev *cd = cfile->cd; 800 struct dma_mapping *dma_map; 801 unsigned long map_addr; 802 unsigned long map_size; 803 804 if (m->addr == 0x0) 805 return -EINVAL; 806 807 map_addr = (m->addr & PAGE_MASK); 808 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE); 809 810 dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL); 811 if (dma_map == NULL) 812 return -ENOENT; 813 814 genwqe_del_pin(cfile, dma_map); 815 genwqe_user_vunmap(cd, dma_map, NULL); 816 kfree(dma_map); 817 return 0; 818 } 819 820 /** 821 * ddcb_cmd_cleanup() - Remove dynamically created fixup entries 822 * 823 * Only if there are any. Pinnings are not removed. 824 */ 825 static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req) 826 { 827 unsigned int i; 828 struct dma_mapping *dma_map; 829 struct genwqe_dev *cd = cfile->cd; 830 831 for (i = 0; i < DDCB_FIXUPS; i++) { 832 dma_map = &req->dma_mappings[i]; 833 834 if (dma_mapping_used(dma_map)) { 835 __genwqe_del_mapping(cfile, dma_map); 836 genwqe_user_vunmap(cd, dma_map, req); 837 } 838 if (req->sgl[i] != NULL) { 839 genwqe_free_sgl(cd, req->sgl[i], 840 req->sgl_dma_addr[i], 841 req->sgl_size[i]); 842 req->sgl[i] = NULL; 843 req->sgl_dma_addr[i] = 0x0; 844 req->sgl_size[i] = 0; 845 } 846 847 } 848 return 0; 849 } 850 851 /** 852 * ddcb_cmd_fixups() - Establish DMA fixups/sglists for user memory references 853 * 854 * Before the DDCB gets executed we need to handle the fixups. We 855 * replace the user-space addresses with DMA addresses or do 856 * additional setup work e.g. generating a scatter-gather list which 857 * is used to describe the memory referred to in the fixup. 858 */ 859 static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req) 860 { 861 int rc; 862 unsigned int asiv_offs, i; 863 struct genwqe_dev *cd = cfile->cd; 864 struct genwqe_ddcb_cmd *cmd = &req->cmd; 865 struct dma_mapping *m; 866 const char *type = "UNKNOWN"; 867 868 for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58; 869 i++, asiv_offs += 0x08) { 870 871 u64 u_addr; 872 dma_addr_t d_addr; 873 u32 u_size = 0; 874 u64 ats_flags; 875 876 ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs); 877 878 switch (ats_flags) { 879 880 case ATS_TYPE_DATA: 881 break; /* nothing to do here */ 882 883 case ATS_TYPE_FLAT_RDWR: 884 case ATS_TYPE_FLAT_RD: { 885 u_addr = be64_to_cpu(*((__be64 *)&cmd-> 886 asiv[asiv_offs])); 887 u_size = be32_to_cpu(*((__be32 *)&cmd-> 888 asiv[asiv_offs + 0x08])); 889 890 /* 891 * No data available. Ignore u_addr in this 892 * case and set addr to 0. Hardware must not 893 * fetch the buffer. 894 */ 895 if (u_size == 0x0) { 896 *((__be64 *)&cmd->asiv[asiv_offs]) = 897 cpu_to_be64(0x0); 898 break; 899 } 900 901 m = __genwqe_search_mapping(cfile, u_addr, u_size, 902 &d_addr, NULL); 903 if (m == NULL) { 904 rc = -EFAULT; 905 goto err_out; 906 } 907 908 *((__be64 *)&cmd->asiv[asiv_offs]) = 909 cpu_to_be64(d_addr); 910 break; 911 } 912 913 case ATS_TYPE_SGL_RDWR: 914 case ATS_TYPE_SGL_RD: { 915 int page_offs, nr_pages, offs; 916 917 u_addr = be64_to_cpu(*((__be64 *) 918 &cmd->asiv[asiv_offs])); 919 u_size = be32_to_cpu(*((__be32 *) 920 &cmd->asiv[asiv_offs + 0x08])); 921 922 /* 923 * No data available. Ignore u_addr in this 924 * case and set addr to 0. Hardware must not 925 * fetch the empty sgl. 926 */ 927 if (u_size == 0x0) { 928 *((__be64 *)&cmd->asiv[asiv_offs]) = 929 cpu_to_be64(0x0); 930 break; 931 } 932 933 m = genwqe_search_pin(cfile, u_addr, u_size, NULL); 934 if (m != NULL) { 935 type = "PINNING"; 936 page_offs = (u_addr - 937 (u64)m->u_vaddr)/PAGE_SIZE; 938 } else { 939 type = "MAPPING"; 940 m = &req->dma_mappings[i]; 941 942 genwqe_mapping_init(m, 943 GENWQE_MAPPING_SGL_TEMP); 944 rc = genwqe_user_vmap(cd, m, (void *)u_addr, 945 u_size, req); 946 if (rc != 0) 947 goto err_out; 948 949 __genwqe_add_mapping(cfile, m); 950 page_offs = 0; 951 } 952 953 offs = offset_in_page(u_addr); 954 nr_pages = DIV_ROUND_UP(offs + u_size, PAGE_SIZE); 955 956 /* create genwqe style scatter gather list */ 957 req->sgl[i] = genwqe_alloc_sgl(cd, m->nr_pages, 958 &req->sgl_dma_addr[i], 959 &req->sgl_size[i]); 960 if (req->sgl[i] == NULL) { 961 rc = -ENOMEM; 962 goto err_out; 963 } 964 genwqe_setup_sgl(cd, offs, u_size, 965 req->sgl[i], 966 req->sgl_dma_addr[i], 967 req->sgl_size[i], 968 m->dma_list, 969 page_offs, 970 nr_pages); 971 972 *((__be64 *)&cmd->asiv[asiv_offs]) = 973 cpu_to_be64(req->sgl_dma_addr[i]); 974 975 break; 976 } 977 default: 978 rc = -EINVAL; 979 goto err_out; 980 } 981 } 982 return 0; 983 984 err_out: 985 ddcb_cmd_cleanup(cfile, req); 986 return rc; 987 } 988 989 /** 990 * genwqe_execute_ddcb() - Execute DDCB using userspace address fixups 991 * 992 * The code will build up the translation tables or lookup the 993 * contignous memory allocation table to find the right translations 994 * and DMA addresses. 995 */ 996 static int genwqe_execute_ddcb(struct genwqe_file *cfile, 997 struct genwqe_ddcb_cmd *cmd) 998 { 999 int rc; 1000 struct genwqe_dev *cd = cfile->cd; 1001 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); 1002 1003 rc = ddcb_cmd_fixups(cfile, req); 1004 if (rc != 0) 1005 return rc; 1006 1007 rc = __genwqe_execute_raw_ddcb(cd, cmd); 1008 ddcb_cmd_cleanup(cfile, req); 1009 return rc; 1010 } 1011 1012 static int do_execute_ddcb(struct genwqe_file *cfile, 1013 unsigned long arg, int raw) 1014 { 1015 int rc; 1016 struct genwqe_ddcb_cmd *cmd; 1017 struct ddcb_requ *req; 1018 struct genwqe_dev *cd = cfile->cd; 1019 1020 cmd = ddcb_requ_alloc(); 1021 if (cmd == NULL) 1022 return -ENOMEM; 1023 1024 req = container_of(cmd, struct ddcb_requ, cmd); 1025 1026 if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) { 1027 ddcb_requ_free(cmd); 1028 return -EFAULT; 1029 } 1030 1031 if (!raw) 1032 rc = genwqe_execute_ddcb(cfile, cmd); 1033 else 1034 rc = __genwqe_execute_raw_ddcb(cd, cmd); 1035 1036 /* Copy back only the modifed fields. Do not copy ASIV 1037 back since the copy got modified by the driver. */ 1038 if (copy_to_user((void __user *)arg, cmd, 1039 sizeof(*cmd) - DDCB_ASIV_LENGTH)) { 1040 ddcb_requ_free(cmd); 1041 return -EFAULT; 1042 } 1043 1044 ddcb_requ_free(cmd); 1045 return rc; 1046 } 1047 1048 /** 1049 * genwqe_ioctl() - IO control 1050 * @filp: file handle 1051 * @cmd: command identifier (passed from user) 1052 * @arg: argument (passed from user) 1053 * 1054 * Return: 0 success 1055 */ 1056 static long genwqe_ioctl(struct file *filp, unsigned int cmd, 1057 unsigned long arg) 1058 { 1059 int rc = 0; 1060 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data; 1061 struct genwqe_dev *cd = cfile->cd; 1062 struct genwqe_reg_io __user *io; 1063 u64 val; 1064 u32 reg_offs; 1065 1066 if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE) 1067 return -EINVAL; 1068 1069 switch (cmd) { 1070 1071 case GENWQE_GET_CARD_STATE: 1072 put_user(cd->card_state, (enum genwqe_card_state __user *)arg); 1073 return 0; 1074 1075 /* Register access */ 1076 case GENWQE_READ_REG64: { 1077 io = (struct genwqe_reg_io __user *)arg; 1078 1079 if (get_user(reg_offs, &io->num)) 1080 return -EFAULT; 1081 1082 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) 1083 return -EINVAL; 1084 1085 val = __genwqe_readq(cd, reg_offs); 1086 put_user(val, &io->val64); 1087 return 0; 1088 } 1089 1090 case GENWQE_WRITE_REG64: { 1091 io = (struct genwqe_reg_io __user *)arg; 1092 1093 if (!capable(CAP_SYS_ADMIN)) 1094 return -EPERM; 1095 1096 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) 1097 return -EPERM; 1098 1099 if (get_user(reg_offs, &io->num)) 1100 return -EFAULT; 1101 1102 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7)) 1103 return -EINVAL; 1104 1105 if (get_user(val, &io->val64)) 1106 return -EFAULT; 1107 1108 __genwqe_writeq(cd, reg_offs, val); 1109 return 0; 1110 } 1111 1112 case GENWQE_READ_REG32: { 1113 io = (struct genwqe_reg_io __user *)arg; 1114 1115 if (get_user(reg_offs, &io->num)) 1116 return -EFAULT; 1117 1118 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) 1119 return -EINVAL; 1120 1121 val = __genwqe_readl(cd, reg_offs); 1122 put_user(val, &io->val64); 1123 return 0; 1124 } 1125 1126 case GENWQE_WRITE_REG32: { 1127 io = (struct genwqe_reg_io __user *)arg; 1128 1129 if (!capable(CAP_SYS_ADMIN)) 1130 return -EPERM; 1131 1132 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) 1133 return -EPERM; 1134 1135 if (get_user(reg_offs, &io->num)) 1136 return -EFAULT; 1137 1138 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3)) 1139 return -EINVAL; 1140 1141 if (get_user(val, &io->val64)) 1142 return -EFAULT; 1143 1144 __genwqe_writel(cd, reg_offs, val); 1145 return 0; 1146 } 1147 1148 /* Flash update/reading */ 1149 case GENWQE_SLU_UPDATE: { 1150 struct genwqe_bitstream load; 1151 1152 if (!genwqe_is_privileged(cd)) 1153 return -EPERM; 1154 1155 if ((filp->f_flags & O_ACCMODE) == O_RDONLY) 1156 return -EPERM; 1157 1158 if (copy_from_user(&load, (void __user *)arg, 1159 sizeof(load))) 1160 return -EFAULT; 1161 1162 rc = do_flash_update(cfile, &load); 1163 1164 if (copy_to_user((void __user *)arg, &load, sizeof(load))) 1165 return -EFAULT; 1166 1167 return rc; 1168 } 1169 1170 case GENWQE_SLU_READ: { 1171 struct genwqe_bitstream load; 1172 1173 if (!genwqe_is_privileged(cd)) 1174 return -EPERM; 1175 1176 if (genwqe_flash_readback_fails(cd)) 1177 return -ENOSPC; /* known to fail for old versions */ 1178 1179 if (copy_from_user(&load, (void __user *)arg, sizeof(load))) 1180 return -EFAULT; 1181 1182 rc = do_flash_read(cfile, &load); 1183 1184 if (copy_to_user((void __user *)arg, &load, sizeof(load))) 1185 return -EFAULT; 1186 1187 return rc; 1188 } 1189 1190 /* memory pinning and unpinning */ 1191 case GENWQE_PIN_MEM: { 1192 struct genwqe_mem m; 1193 1194 if (copy_from_user(&m, (void __user *)arg, sizeof(m))) 1195 return -EFAULT; 1196 1197 return genwqe_pin_mem(cfile, &m); 1198 } 1199 1200 case GENWQE_UNPIN_MEM: { 1201 struct genwqe_mem m; 1202 1203 if (copy_from_user(&m, (void __user *)arg, sizeof(m))) 1204 return -EFAULT; 1205 1206 return genwqe_unpin_mem(cfile, &m); 1207 } 1208 1209 /* launch an DDCB and wait for completion */ 1210 case GENWQE_EXECUTE_DDCB: 1211 return do_execute_ddcb(cfile, arg, 0); 1212 1213 case GENWQE_EXECUTE_RAW_DDCB: { 1214 1215 if (!capable(CAP_SYS_ADMIN)) 1216 return -EPERM; 1217 1218 return do_execute_ddcb(cfile, arg, 1); 1219 } 1220 1221 default: 1222 return -EINVAL; 1223 } 1224 1225 return rc; 1226 } 1227 1228 #if defined(CONFIG_COMPAT) 1229 /** 1230 * genwqe_compat_ioctl() - Compatibility ioctl 1231 * 1232 * Called whenever a 32-bit process running under a 64-bit kernel 1233 * performs an ioctl on /dev/genwqe<n>_card. 1234 * 1235 * @filp: file pointer. 1236 * @cmd: command. 1237 * @arg: user argument. 1238 * Return: zero on success or negative number on failure. 1239 */ 1240 static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd, 1241 unsigned long arg) 1242 { 1243 return genwqe_ioctl(filp, cmd, arg); 1244 } 1245 #endif /* defined(CONFIG_COMPAT) */ 1246 1247 static const struct file_operations genwqe_fops = { 1248 .owner = THIS_MODULE, 1249 .open = genwqe_open, 1250 .fasync = genwqe_fasync, 1251 .mmap = genwqe_mmap, 1252 .unlocked_ioctl = genwqe_ioctl, 1253 #if defined(CONFIG_COMPAT) 1254 .compat_ioctl = genwqe_compat_ioctl, 1255 #endif 1256 .release = genwqe_release, 1257 }; 1258 1259 static int genwqe_device_initialized(struct genwqe_dev *cd) 1260 { 1261 return cd->dev != NULL; 1262 } 1263 1264 /** 1265 * genwqe_device_create() - Create and configure genwqe char device 1266 * @cd: genwqe device descriptor 1267 * 1268 * This function must be called before we create any more genwqe 1269 * character devices, because it is allocating the major and minor 1270 * number which are supposed to be used by the client drivers. 1271 */ 1272 int genwqe_device_create(struct genwqe_dev *cd) 1273 { 1274 int rc; 1275 struct pci_dev *pci_dev = cd->pci_dev; 1276 1277 /* 1278 * Here starts the individual setup per client. It must 1279 * initialize its own cdev data structure with its own fops. 1280 * The appropriate devnum needs to be created. The ranges must 1281 * not overlap. 1282 */ 1283 rc = alloc_chrdev_region(&cd->devnum_genwqe, 0, 1284 GENWQE_MAX_MINOR, GENWQE_DEVNAME); 1285 if (rc < 0) { 1286 dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n"); 1287 goto err_dev; 1288 } 1289 1290 cdev_init(&cd->cdev_genwqe, &genwqe_fops); 1291 cd->cdev_genwqe.owner = THIS_MODULE; 1292 1293 rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1); 1294 if (rc < 0) { 1295 dev_err(&pci_dev->dev, "err: cdev_add failed\n"); 1296 goto err_add; 1297 } 1298 1299 /* 1300 * Finally the device in /dev/... must be created. The rule is 1301 * to use card%d_clientname for each created device. 1302 */ 1303 cd->dev = device_create_with_groups(cd->class_genwqe, 1304 &cd->pci_dev->dev, 1305 cd->devnum_genwqe, cd, 1306 genwqe_attribute_groups, 1307 GENWQE_DEVNAME "%u_card", 1308 cd->card_idx); 1309 if (IS_ERR(cd->dev)) { 1310 rc = PTR_ERR(cd->dev); 1311 goto err_cdev; 1312 } 1313 1314 rc = genwqe_init_debugfs(cd); 1315 if (rc != 0) 1316 goto err_debugfs; 1317 1318 return 0; 1319 1320 err_debugfs: 1321 device_destroy(cd->class_genwqe, cd->devnum_genwqe); 1322 err_cdev: 1323 cdev_del(&cd->cdev_genwqe); 1324 err_add: 1325 unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); 1326 err_dev: 1327 cd->dev = NULL; 1328 return rc; 1329 } 1330 1331 static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd) 1332 { 1333 int rc; 1334 unsigned int i; 1335 struct pci_dev *pci_dev = cd->pci_dev; 1336 1337 if (!genwqe_open_files(cd)) 1338 return 0; 1339 1340 dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__); 1341 1342 rc = genwqe_kill_fasync(cd, SIGIO); 1343 if (rc > 0) { 1344 /* give kill_timeout seconds to close file descriptors ... */ 1345 for (i = 0; (i < genwqe_kill_timeout) && 1346 genwqe_open_files(cd); i++) { 1347 dev_info(&pci_dev->dev, " %d sec ...", i); 1348 1349 cond_resched(); 1350 msleep(1000); 1351 } 1352 1353 /* if no open files we can safely continue, else ... */ 1354 if (!genwqe_open_files(cd)) 1355 return 0; 1356 1357 dev_warn(&pci_dev->dev, 1358 "[%s] send SIGKILL and wait ...\n", __func__); 1359 1360 rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */ 1361 if (rc) { 1362 /* Give kill_timout more seconds to end processes */ 1363 for (i = 0; (i < genwqe_kill_timeout) && 1364 genwqe_open_files(cd); i++) { 1365 dev_warn(&pci_dev->dev, " %d sec ...", i); 1366 1367 cond_resched(); 1368 msleep(1000); 1369 } 1370 } 1371 } 1372 return 0; 1373 } 1374 1375 /** 1376 * genwqe_device_remove() - Remove genwqe's char device 1377 * 1378 * This function must be called after the client devices are removed 1379 * because it will free the major/minor number range for the genwqe 1380 * drivers. 1381 * 1382 * This function must be robust enough to be called twice. 1383 */ 1384 int genwqe_device_remove(struct genwqe_dev *cd) 1385 { 1386 int rc; 1387 struct pci_dev *pci_dev = cd->pci_dev; 1388 1389 if (!genwqe_device_initialized(cd)) 1390 return 1; 1391 1392 genwqe_inform_and_stop_processes(cd); 1393 1394 /* 1395 * We currently do wait until all filedescriptors are 1396 * closed. This leads to a problem when we abort the 1397 * application which will decrease this reference from 1398 * 1/unused to 0/illegal and not from 2/used 1/empty. 1399 */ 1400 rc = atomic_read(&cd->cdev_genwqe.kobj.kref.refcount); 1401 if (rc != 1) { 1402 dev_err(&pci_dev->dev, 1403 "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc); 1404 panic("Fatal err: cannot free resources with pending references!"); 1405 } 1406 1407 genqwe_exit_debugfs(cd); 1408 device_destroy(cd->class_genwqe, cd->devnum_genwqe); 1409 cdev_del(&cd->cdev_genwqe); 1410 unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR); 1411 cd->dev = NULL; 1412 1413 return 0; 1414 } 1415