1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 5 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2018 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/file.h> 16 #include <linux/idr.h> 17 #include <linux/module.h> 18 #include <linux/mount.h> 19 #include <linux/poll.h> 20 #include <linux/sched/signal.h> 21 22 #include <misc/ocxl.h> 23 24 #include <uapi/misc/cxl.h> 25 26 #include "backend.h" 27 #include "ocxl_hw.h" 28 29 /* 30 * Pseudo-filesystem to allocate inodes. 31 */ 32 33 #define OCXLFLASH_FS_MAGIC 0x1697698f 34 35 static int ocxlflash_fs_cnt; 36 static struct vfsmount *ocxlflash_vfs_mount; 37 38 static const struct dentry_operations ocxlflash_fs_dops = { 39 .d_dname = simple_dname, 40 }; 41 42 /* 43 * ocxlflash_fs_mount() - mount the pseudo-filesystem 44 * @fs_type: File system type. 45 * @flags: Flags for the filesystem. 46 * @dev_name: Device name associated with the filesystem. 47 * @data: Data pointer. 48 * 49 * Return: pointer to the directory entry structure 50 */ 51 static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type, 52 int flags, const char *dev_name, 53 void *data) 54 { 55 return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops, 56 OCXLFLASH_FS_MAGIC); 57 } 58 59 static struct file_system_type ocxlflash_fs_type = { 60 .name = "ocxlflash", 61 .owner = THIS_MODULE, 62 .mount = ocxlflash_fs_mount, 63 .kill_sb = kill_anon_super, 64 }; 65 66 /* 67 * ocxlflash_release_mapping() - release the memory mapping 68 * @ctx: Context whose mapping is to be released. 69 */ 70 static void ocxlflash_release_mapping(struct ocxlflash_context *ctx) 71 { 72 if (ctx->mapping) 73 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 74 ctx->mapping = NULL; 75 } 76 77 /* 78 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file 79 * @dev: Generic device of the host. 80 * @name: Name of the pseudo filesystem. 81 * @fops: File operations. 82 * @priv: Private data. 83 * @flags: Flags for the file. 84 * 85 * Return: pointer to the file on success, ERR_PTR on failure 86 */ 87 static struct file *ocxlflash_getfile(struct device *dev, const char *name, 88 const struct file_operations *fops, 89 void *priv, int flags) 90 { 91 struct qstr this; 92 struct path path; 93 struct file *file; 94 struct inode *inode = NULL; 95 int rc; 96 97 if (fops->owner && !try_module_get(fops->owner)) { 98 dev_err(dev, "%s: Owner does not exist\n", __func__); 99 rc = -ENOENT; 100 goto err1; 101 } 102 103 rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount, 104 &ocxlflash_fs_cnt); 105 if (unlikely(rc < 0)) { 106 dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n", 107 __func__, rc); 108 goto err2; 109 } 110 111 inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb); 112 if (IS_ERR(inode)) { 113 rc = PTR_ERR(inode); 114 dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n", 115 __func__, rc); 116 goto err3; 117 } 118 119 this.name = name; 120 this.len = strlen(name); 121 this.hash = 0; 122 path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this); 123 if (!path.dentry) { 124 dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__); 125 rc = -ENOMEM; 126 goto err4; 127 } 128 129 path.mnt = mntget(ocxlflash_vfs_mount); 130 d_instantiate(path.dentry, inode); 131 132 file = alloc_file(&path, OPEN_FMODE(flags), fops); 133 if (IS_ERR(file)) { 134 rc = PTR_ERR(file); 135 dev_err(dev, "%s: alloc_file failed rc=%d\n", 136 __func__, rc); 137 goto err5; 138 } 139 140 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); 141 file->private_data = priv; 142 out: 143 return file; 144 err5: 145 path_put(&path); 146 err4: 147 iput(inode); 148 err3: 149 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 150 err2: 151 module_put(fops->owner); 152 err1: 153 file = ERR_PTR(rc); 154 goto out; 155 } 156 157 /** 158 * ocxlflash_psa_map() - map the process specific MMIO space 159 * @ctx_cookie: Adapter context for which the mapping needs to be done. 160 * 161 * Return: MMIO pointer of the mapped region 162 */ 163 static void __iomem *ocxlflash_psa_map(void *ctx_cookie) 164 { 165 struct ocxlflash_context *ctx = ctx_cookie; 166 struct device *dev = ctx->hw_afu->dev; 167 168 mutex_lock(&ctx->state_mutex); 169 if (ctx->state != STARTED) { 170 dev_err(dev, "%s: Context not started, state=%d\n", __func__, 171 ctx->state); 172 mutex_unlock(&ctx->state_mutex); 173 return NULL; 174 } 175 mutex_unlock(&ctx->state_mutex); 176 177 return ioremap(ctx->psn_phys, ctx->psn_size); 178 } 179 180 /** 181 * ocxlflash_psa_unmap() - unmap the process specific MMIO space 182 * @addr: MMIO pointer to unmap. 183 */ 184 static void ocxlflash_psa_unmap(void __iomem *addr) 185 { 186 iounmap(addr); 187 } 188 189 /** 190 * ocxlflash_process_element() - get process element of the adapter context 191 * @ctx_cookie: Adapter context associated with the process element. 192 * 193 * Return: process element of the adapter context 194 */ 195 static int ocxlflash_process_element(void *ctx_cookie) 196 { 197 struct ocxlflash_context *ctx = ctx_cookie; 198 199 return ctx->pe; 200 } 201 202 /** 203 * afu_map_irq() - map the interrupt of the adapter context 204 * @flags: Flags. 205 * @ctx: Adapter context. 206 * @num: Per-context AFU interrupt number. 207 * @handler: Interrupt handler to register. 208 * @cookie: Interrupt handler private data. 209 * @name: Name of the interrupt. 210 * 211 * Return: 0 on success, -errno on failure 212 */ 213 static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num, 214 irq_handler_t handler, void *cookie, char *name) 215 { 216 struct ocxl_hw_afu *afu = ctx->hw_afu; 217 struct device *dev = afu->dev; 218 struct ocxlflash_irqs *irq; 219 void __iomem *vtrig; 220 u32 virq; 221 int rc = 0; 222 223 if (num < 0 || num >= ctx->num_irqs) { 224 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 225 rc = -ENOENT; 226 goto out; 227 } 228 229 irq = &ctx->irqs[num]; 230 virq = irq_create_mapping(NULL, irq->hwirq); 231 if (unlikely(!virq)) { 232 dev_err(dev, "%s: irq_create_mapping failed\n", __func__); 233 rc = -ENOMEM; 234 goto out; 235 } 236 237 rc = request_irq(virq, handler, 0, name, cookie); 238 if (unlikely(rc)) { 239 dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc); 240 goto err1; 241 } 242 243 vtrig = ioremap(irq->ptrig, PAGE_SIZE); 244 if (unlikely(!vtrig)) { 245 dev_err(dev, "%s: Trigger page mapping failed\n", __func__); 246 rc = -ENOMEM; 247 goto err2; 248 } 249 250 irq->virq = virq; 251 irq->vtrig = vtrig; 252 out: 253 return rc; 254 err2: 255 free_irq(virq, cookie); 256 err1: 257 irq_dispose_mapping(virq); 258 goto out; 259 } 260 261 /** 262 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context 263 * @ctx_cookie: Adapter context. 264 * @num: Per-context AFU interrupt number. 265 * @handler: Interrupt handler to register. 266 * @cookie: Interrupt handler private data. 267 * @name: Name of the interrupt. 268 * 269 * Return: 0 on success, -errno on failure 270 */ 271 static int ocxlflash_map_afu_irq(void *ctx_cookie, int num, 272 irq_handler_t handler, void *cookie, 273 char *name) 274 { 275 return afu_map_irq(0, ctx_cookie, num, handler, cookie, name); 276 } 277 278 /** 279 * afu_unmap_irq() - unmap the interrupt 280 * @flags: Flags. 281 * @ctx: Adapter context. 282 * @num: Per-context AFU interrupt number. 283 * @cookie: Interrupt handler private data. 284 */ 285 static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num, 286 void *cookie) 287 { 288 struct ocxl_hw_afu *afu = ctx->hw_afu; 289 struct device *dev = afu->dev; 290 struct ocxlflash_irqs *irq; 291 292 if (num < 0 || num >= ctx->num_irqs) { 293 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 294 return; 295 } 296 297 irq = &ctx->irqs[num]; 298 if (irq->vtrig) 299 iounmap(irq->vtrig); 300 301 if (irq_find_mapping(NULL, irq->hwirq)) { 302 free_irq(irq->virq, cookie); 303 irq_dispose_mapping(irq->virq); 304 } 305 306 memset(irq, 0, sizeof(*irq)); 307 } 308 309 /** 310 * ocxlflash_unmap_afu_irq() - unmap the interrupt 311 * @ctx_cookie: Adapter context. 312 * @num: Per-context AFU interrupt number. 313 * @cookie: Interrupt handler private data. 314 */ 315 static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie) 316 { 317 return afu_unmap_irq(0, ctx_cookie, num, cookie); 318 } 319 320 /** 321 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt 322 * @ctx_cookie: Context associated with the interrupt. 323 * @irq: Interrupt number. 324 * 325 * Return: effective address of the mapped region 326 */ 327 static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq) 328 { 329 struct ocxlflash_context *ctx = ctx_cookie; 330 331 if (irq < 0 || irq >= ctx->num_irqs) 332 return 0; 333 334 return (__force u64)ctx->irqs[irq].vtrig; 335 } 336 337 /** 338 * ocxlflash_xsl_fault() - callback when translation error is triggered 339 * @data: Private data provided at callback registration, the context. 340 * @addr: Address that triggered the error. 341 * @dsisr: Value of dsisr register. 342 */ 343 static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr) 344 { 345 struct ocxlflash_context *ctx = data; 346 347 spin_lock(&ctx->slock); 348 ctx->fault_addr = addr; 349 ctx->fault_dsisr = dsisr; 350 ctx->pending_fault = true; 351 spin_unlock(&ctx->slock); 352 353 wake_up_all(&ctx->wq); 354 } 355 356 /** 357 * start_context() - local routine to start a context 358 * @ctx: Adapter context to be started. 359 * 360 * Assign the context specific MMIO space, add and enable the PE. 361 * 362 * Return: 0 on success, -errno on failure 363 */ 364 static int start_context(struct ocxlflash_context *ctx) 365 { 366 struct ocxl_hw_afu *afu = ctx->hw_afu; 367 struct ocxl_afu_config *acfg = &afu->acfg; 368 void *link_token = afu->link_token; 369 struct device *dev = afu->dev; 370 bool master = ctx->master; 371 struct mm_struct *mm; 372 int rc = 0; 373 u32 pid; 374 375 mutex_lock(&ctx->state_mutex); 376 if (ctx->state != OPENED) { 377 dev_err(dev, "%s: Context state invalid, state=%d\n", 378 __func__, ctx->state); 379 rc = -EINVAL; 380 goto out; 381 } 382 383 if (master) { 384 ctx->psn_size = acfg->global_mmio_size; 385 ctx->psn_phys = afu->gmmio_phys; 386 } else { 387 ctx->psn_size = acfg->pp_mmio_stride; 388 ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size); 389 } 390 391 /* pid and mm not set for master contexts */ 392 if (master) { 393 pid = 0; 394 mm = NULL; 395 } else { 396 pid = current->mm->context.id; 397 mm = current->mm; 398 } 399 400 rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm, 401 ocxlflash_xsl_fault, ctx); 402 if (unlikely(rc)) { 403 dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n", 404 __func__, rc); 405 goto out; 406 } 407 408 ctx->state = STARTED; 409 out: 410 mutex_unlock(&ctx->state_mutex); 411 return rc; 412 } 413 414 /** 415 * ocxlflash_start_context() - start a kernel context 416 * @ctx_cookie: Adapter context to be started. 417 * 418 * Return: 0 on success, -errno on failure 419 */ 420 static int ocxlflash_start_context(void *ctx_cookie) 421 { 422 struct ocxlflash_context *ctx = ctx_cookie; 423 424 return start_context(ctx); 425 } 426 427 /** 428 * ocxlflash_stop_context() - stop a context 429 * @ctx_cookie: Adapter context to be stopped. 430 * 431 * Return: 0 on success, -errno on failure 432 */ 433 static int ocxlflash_stop_context(void *ctx_cookie) 434 { 435 struct ocxlflash_context *ctx = ctx_cookie; 436 struct ocxl_hw_afu *afu = ctx->hw_afu; 437 struct ocxl_afu_config *acfg = &afu->acfg; 438 struct pci_dev *pdev = afu->pdev; 439 struct device *dev = afu->dev; 440 enum ocxlflash_ctx_state state; 441 int rc = 0; 442 443 mutex_lock(&ctx->state_mutex); 444 state = ctx->state; 445 ctx->state = CLOSED; 446 mutex_unlock(&ctx->state_mutex); 447 if (state != STARTED) 448 goto out; 449 450 rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos, 451 ctx->pe); 452 if (unlikely(rc)) { 453 dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n", 454 __func__, rc); 455 /* If EBUSY, PE could be referenced in future by the AFU */ 456 if (rc == -EBUSY) 457 goto out; 458 } 459 460 rc = ocxl_link_remove_pe(afu->link_token, ctx->pe); 461 if (unlikely(rc)) { 462 dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n", 463 __func__, rc); 464 goto out; 465 } 466 out: 467 return rc; 468 } 469 470 /** 471 * ocxlflash_afu_reset() - reset the AFU 472 * @ctx_cookie: Adapter context. 473 */ 474 static int ocxlflash_afu_reset(void *ctx_cookie) 475 { 476 struct ocxlflash_context *ctx = ctx_cookie; 477 struct device *dev = ctx->hw_afu->dev; 478 479 /* Pending implementation from OCXL transport services */ 480 dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__); 481 482 /* Silently return success until it is implemented */ 483 return 0; 484 } 485 486 /** 487 * ocxlflash_set_master() - sets the context as master 488 * @ctx_cookie: Adapter context to set as master. 489 */ 490 static void ocxlflash_set_master(void *ctx_cookie) 491 { 492 struct ocxlflash_context *ctx = ctx_cookie; 493 494 ctx->master = true; 495 } 496 497 /** 498 * ocxlflash_get_context() - obtains the context associated with the host 499 * @pdev: PCI device associated with the host. 500 * @afu_cookie: Hardware AFU associated with the host. 501 * 502 * Return: returns the pointer to host adapter context 503 */ 504 static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie) 505 { 506 struct ocxl_hw_afu *afu = afu_cookie; 507 508 return afu->ocxl_ctx; 509 } 510 511 /** 512 * ocxlflash_dev_context_init() - allocate and initialize an adapter context 513 * @pdev: PCI device associated with the host. 514 * @afu_cookie: Hardware AFU associated with the host. 515 * 516 * Return: returns the adapter context on success, ERR_PTR on failure 517 */ 518 static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie) 519 { 520 struct ocxl_hw_afu *afu = afu_cookie; 521 struct device *dev = afu->dev; 522 struct ocxlflash_context *ctx; 523 int rc; 524 525 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 526 if (unlikely(!ctx)) { 527 dev_err(dev, "%s: Context allocation failed\n", __func__); 528 rc = -ENOMEM; 529 goto err1; 530 } 531 532 idr_preload(GFP_KERNEL); 533 rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT); 534 idr_preload_end(); 535 if (unlikely(rc < 0)) { 536 dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc); 537 goto err2; 538 } 539 540 spin_lock_init(&ctx->slock); 541 init_waitqueue_head(&ctx->wq); 542 mutex_init(&ctx->state_mutex); 543 544 ctx->state = OPENED; 545 ctx->pe = rc; 546 ctx->master = false; 547 ctx->mapping = NULL; 548 ctx->hw_afu = afu; 549 ctx->irq_bitmap = 0; 550 ctx->pending_irq = false; 551 ctx->pending_fault = false; 552 out: 553 return ctx; 554 err2: 555 kfree(ctx); 556 err1: 557 ctx = ERR_PTR(rc); 558 goto out; 559 } 560 561 /** 562 * ocxlflash_release_context() - releases an adapter context 563 * @ctx_cookie: Adapter context to be released. 564 * 565 * Return: 0 on success, -errno on failure 566 */ 567 static int ocxlflash_release_context(void *ctx_cookie) 568 { 569 struct ocxlflash_context *ctx = ctx_cookie; 570 struct device *dev; 571 int rc = 0; 572 573 if (!ctx) 574 goto out; 575 576 dev = ctx->hw_afu->dev; 577 mutex_lock(&ctx->state_mutex); 578 if (ctx->state >= STARTED) { 579 dev_err(dev, "%s: Context in use, state=%d\n", __func__, 580 ctx->state); 581 mutex_unlock(&ctx->state_mutex); 582 rc = -EBUSY; 583 goto out; 584 } 585 mutex_unlock(&ctx->state_mutex); 586 587 idr_remove(&ctx->hw_afu->idr, ctx->pe); 588 ocxlflash_release_mapping(ctx); 589 kfree(ctx); 590 out: 591 return rc; 592 } 593 594 /** 595 * ocxlflash_perst_reloads_same_image() - sets the image reload policy 596 * @afu_cookie: Hardware AFU associated with the host. 597 * @image: Whether to load the same image on PERST. 598 */ 599 static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image) 600 { 601 struct ocxl_hw_afu *afu = afu_cookie; 602 603 afu->perst_same_image = image; 604 } 605 606 /** 607 * ocxlflash_read_adapter_vpd() - reads the adapter VPD 608 * @pdev: PCI device associated with the host. 609 * @buf: Buffer to get the VPD data. 610 * @count: Size of buffer (maximum bytes that can be read). 611 * 612 * Return: size of VPD on success, -errno on failure 613 */ 614 static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf, 615 size_t count) 616 { 617 return pci_read_vpd(pdev, 0, count, buf); 618 } 619 620 /** 621 * free_afu_irqs() - internal service to free interrupts 622 * @ctx: Adapter context. 623 */ 624 static void free_afu_irqs(struct ocxlflash_context *ctx) 625 { 626 struct ocxl_hw_afu *afu = ctx->hw_afu; 627 struct device *dev = afu->dev; 628 int i; 629 630 if (!ctx->irqs) { 631 dev_err(dev, "%s: Interrupts not allocated\n", __func__); 632 return; 633 } 634 635 for (i = ctx->num_irqs; i >= 0; i--) 636 ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq); 637 638 kfree(ctx->irqs); 639 ctx->irqs = NULL; 640 } 641 642 /** 643 * alloc_afu_irqs() - internal service to allocate interrupts 644 * @ctx: Context associated with the request. 645 * @num: Number of interrupts requested. 646 * 647 * Return: 0 on success, -errno on failure 648 */ 649 static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num) 650 { 651 struct ocxl_hw_afu *afu = ctx->hw_afu; 652 struct device *dev = afu->dev; 653 struct ocxlflash_irqs *irqs; 654 u64 addr; 655 int rc = 0; 656 int hwirq; 657 int i; 658 659 if (ctx->irqs) { 660 dev_err(dev, "%s: Interrupts already allocated\n", __func__); 661 rc = -EEXIST; 662 goto out; 663 } 664 665 if (num > OCXL_MAX_IRQS) { 666 dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num); 667 rc = -EINVAL; 668 goto out; 669 } 670 671 irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL); 672 if (unlikely(!irqs)) { 673 dev_err(dev, "%s: Context irqs allocation failed\n", __func__); 674 rc = -ENOMEM; 675 goto out; 676 } 677 678 for (i = 0; i < num; i++) { 679 rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr); 680 if (unlikely(rc)) { 681 dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n", 682 __func__, rc); 683 goto err; 684 } 685 686 irqs[i].hwirq = hwirq; 687 irqs[i].ptrig = addr; 688 } 689 690 ctx->irqs = irqs; 691 ctx->num_irqs = num; 692 out: 693 return rc; 694 err: 695 for (i = i-1; i >= 0; i--) 696 ocxl_link_free_irq(afu->link_token, irqs[i].hwirq); 697 kfree(irqs); 698 goto out; 699 } 700 701 /** 702 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts 703 * @ctx_cookie: Context associated with the request. 704 * @num: Number of interrupts requested. 705 * 706 * Return: 0 on success, -errno on failure 707 */ 708 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num) 709 { 710 return alloc_afu_irqs(ctx_cookie, num); 711 } 712 713 /** 714 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context 715 * @ctx_cookie: Adapter context. 716 */ 717 static void ocxlflash_free_afu_irqs(void *ctx_cookie) 718 { 719 free_afu_irqs(ctx_cookie); 720 } 721 722 /** 723 * ocxlflash_unconfig_afu() - unconfigure the AFU 724 * @afu: AFU associated with the host. 725 */ 726 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu) 727 { 728 if (afu->gmmio_virt) { 729 iounmap(afu->gmmio_virt); 730 afu->gmmio_virt = NULL; 731 } 732 } 733 734 /** 735 * ocxlflash_destroy_afu() - destroy the AFU structure 736 * @afu_cookie: AFU to be freed. 737 */ 738 static void ocxlflash_destroy_afu(void *afu_cookie) 739 { 740 struct ocxl_hw_afu *afu = afu_cookie; 741 int pos; 742 743 if (!afu) 744 return; 745 746 ocxlflash_release_context(afu->ocxl_ctx); 747 idr_destroy(&afu->idr); 748 749 /* Disable the AFU */ 750 pos = afu->acfg.dvsec_afu_control_pos; 751 ocxl_config_set_afu_state(afu->pdev, pos, 0); 752 753 ocxlflash_unconfig_afu(afu); 754 kfree(afu); 755 } 756 757 /** 758 * ocxlflash_config_fn() - configure the host function 759 * @pdev: PCI device associated with the host. 760 * @afu: AFU associated with the host. 761 * 762 * Return: 0 on success, -errno on failure 763 */ 764 static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 765 { 766 struct ocxl_fn_config *fcfg = &afu->fcfg; 767 struct device *dev = &pdev->dev; 768 u16 base, enabled, supported; 769 int rc = 0; 770 771 /* Read DVSEC config of the function */ 772 rc = ocxl_config_read_function(pdev, fcfg); 773 if (unlikely(rc)) { 774 dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n", 775 __func__, rc); 776 goto out; 777 } 778 779 /* Check if function has AFUs defined, only 1 per function supported */ 780 if (fcfg->max_afu_index >= 0) { 781 afu->is_present = true; 782 if (fcfg->max_afu_index != 0) 783 dev_warn(dev, "%s: Unexpected AFU index value %d\n", 784 __func__, fcfg->max_afu_index); 785 } 786 787 rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported); 788 if (unlikely(rc)) { 789 dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n", 790 __func__, rc); 791 goto out; 792 } 793 794 afu->fn_actag_base = base; 795 afu->fn_actag_enabled = enabled; 796 797 ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled); 798 dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n", 799 __func__, base, enabled); 800 801 rc = ocxl_link_setup(pdev, 0, &afu->link_token); 802 if (unlikely(rc)) { 803 dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n", 804 __func__, rc); 805 goto out; 806 } 807 808 rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos); 809 if (unlikely(rc)) { 810 dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n", 811 __func__, rc); 812 goto err; 813 } 814 out: 815 return rc; 816 err: 817 ocxl_link_release(pdev, afu->link_token); 818 goto out; 819 } 820 821 /** 822 * ocxlflash_unconfig_fn() - unconfigure the host function 823 * @pdev: PCI device associated with the host. 824 * @afu: AFU associated with the host. 825 */ 826 static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 827 { 828 ocxl_link_release(pdev, afu->link_token); 829 } 830 831 /** 832 * ocxlflash_map_mmio() - map the AFU MMIO space 833 * @afu: AFU associated with the host. 834 * 835 * Return: 0 on success, -errno on failure 836 */ 837 static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu) 838 { 839 struct ocxl_afu_config *acfg = &afu->acfg; 840 struct pci_dev *pdev = afu->pdev; 841 struct device *dev = afu->dev; 842 phys_addr_t gmmio, ppmmio; 843 int rc = 0; 844 845 rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash"); 846 if (unlikely(rc)) { 847 dev_err(dev, "%s: pci_request_region for global failed rc=%d\n", 848 __func__, rc); 849 goto out; 850 } 851 gmmio = pci_resource_start(pdev, acfg->global_mmio_bar); 852 gmmio += acfg->global_mmio_offset; 853 854 rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash"); 855 if (unlikely(rc)) { 856 dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n", 857 __func__, rc); 858 goto err1; 859 } 860 ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar); 861 ppmmio += acfg->pp_mmio_offset; 862 863 afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size); 864 if (unlikely(!afu->gmmio_virt)) { 865 dev_err(dev, "%s: MMIO mapping failed\n", __func__); 866 rc = -ENOMEM; 867 goto err2; 868 } 869 870 afu->gmmio_phys = gmmio; 871 afu->ppmmio_phys = ppmmio; 872 out: 873 return rc; 874 err2: 875 pci_release_region(pdev, acfg->pp_mmio_bar); 876 err1: 877 pci_release_region(pdev, acfg->global_mmio_bar); 878 goto out; 879 } 880 881 /** 882 * ocxlflash_config_afu() - configure the host AFU 883 * @pdev: PCI device associated with the host. 884 * @afu: AFU associated with the host. 885 * 886 * Must be called _after_ host function configuration. 887 * 888 * Return: 0 on success, -errno on failure 889 */ 890 static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 891 { 892 struct ocxl_afu_config *acfg = &afu->acfg; 893 struct ocxl_fn_config *fcfg = &afu->fcfg; 894 struct device *dev = &pdev->dev; 895 int count; 896 int base; 897 int pos; 898 int rc = 0; 899 900 /* This HW AFU function does not have any AFUs defined */ 901 if (!afu->is_present) 902 goto out; 903 904 /* Read AFU config at index 0 */ 905 rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0); 906 if (unlikely(rc)) { 907 dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n", 908 __func__, rc); 909 goto out; 910 } 911 912 /* Only one AFU per function is supported, so actag_base is same */ 913 base = afu->fn_actag_base; 914 count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled); 915 pos = acfg->dvsec_afu_control_pos; 916 917 ocxl_config_set_afu_actag(pdev, pos, base, count); 918 dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count); 919 afu->afu_actag_base = base; 920 afu->afu_actag_enabled = count; 921 afu->max_pasid = 1 << acfg->pasid_supported_log; 922 923 ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log); 924 925 rc = ocxlflash_map_mmio(afu); 926 if (unlikely(rc)) { 927 dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n", 928 __func__, rc); 929 goto out; 930 } 931 932 /* Enable the AFU */ 933 ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1); 934 out: 935 return rc; 936 } 937 938 /** 939 * ocxlflash_create_afu() - create the AFU for OCXL 940 * @pdev: PCI device associated with the host. 941 * 942 * Return: AFU on success, NULL on failure 943 */ 944 static void *ocxlflash_create_afu(struct pci_dev *pdev) 945 { 946 struct device *dev = &pdev->dev; 947 struct ocxlflash_context *ctx; 948 struct ocxl_hw_afu *afu; 949 int rc; 950 951 afu = kzalloc(sizeof(*afu), GFP_KERNEL); 952 if (unlikely(!afu)) { 953 dev_err(dev, "%s: HW AFU allocation failed\n", __func__); 954 goto out; 955 } 956 957 afu->pdev = pdev; 958 afu->dev = dev; 959 idr_init(&afu->idr); 960 961 rc = ocxlflash_config_fn(pdev, afu); 962 if (unlikely(rc)) { 963 dev_err(dev, "%s: Function configuration failed rc=%d\n", 964 __func__, rc); 965 goto err1; 966 } 967 968 rc = ocxlflash_config_afu(pdev, afu); 969 if (unlikely(rc)) { 970 dev_err(dev, "%s: AFU configuration failed rc=%d\n", 971 __func__, rc); 972 goto err2; 973 } 974 975 ctx = ocxlflash_dev_context_init(pdev, afu); 976 if (IS_ERR(ctx)) { 977 rc = PTR_ERR(ctx); 978 dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n", 979 __func__, rc); 980 goto err3; 981 } 982 983 afu->ocxl_ctx = ctx; 984 out: 985 return afu; 986 err3: 987 ocxlflash_unconfig_afu(afu); 988 err2: 989 ocxlflash_unconfig_fn(pdev, afu); 990 err1: 991 idr_destroy(&afu->idr); 992 kfree(afu); 993 afu = NULL; 994 goto out; 995 } 996 997 /** 998 * ctx_event_pending() - check for any event pending on the context 999 * @ctx: Context to be checked. 1000 * 1001 * Return: true if there is an event pending, false if none pending 1002 */ 1003 static inline bool ctx_event_pending(struct ocxlflash_context *ctx) 1004 { 1005 if (ctx->pending_irq || ctx->pending_fault) 1006 return true; 1007 1008 return false; 1009 } 1010 1011 /** 1012 * afu_poll() - poll the AFU for events on the context 1013 * @file: File associated with the adapter context. 1014 * @poll: Poll structure from the user. 1015 * 1016 * Return: poll mask 1017 */ 1018 static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) 1019 { 1020 struct ocxlflash_context *ctx = file->private_data; 1021 struct device *dev = ctx->hw_afu->dev; 1022 ulong lock_flags; 1023 int mask = 0; 1024 1025 poll_wait(file, &ctx->wq, poll); 1026 1027 spin_lock_irqsave(&ctx->slock, lock_flags); 1028 if (ctx_event_pending(ctx)) 1029 mask |= POLLIN | POLLRDNORM; 1030 else if (ctx->state == CLOSED) 1031 mask |= POLLERR; 1032 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1033 1034 dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n", 1035 __func__, ctx->pe, mask); 1036 1037 return mask; 1038 } 1039 1040 /** 1041 * afu_read() - perform a read on the context for any event 1042 * @file: File associated with the adapter context. 1043 * @buf: Buffer to receive the data. 1044 * @count: Size of buffer (maximum bytes that can be read). 1045 * @off: Offset. 1046 * 1047 * Return: size of the data read on success, -errno on failure 1048 */ 1049 static ssize_t afu_read(struct file *file, char __user *buf, size_t count, 1050 loff_t *off) 1051 { 1052 struct ocxlflash_context *ctx = file->private_data; 1053 struct device *dev = ctx->hw_afu->dev; 1054 struct cxl_event event; 1055 ulong lock_flags; 1056 ssize_t esize; 1057 ssize_t rc; 1058 int bit; 1059 DEFINE_WAIT(event_wait); 1060 1061 if (*off != 0) { 1062 dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n", 1063 __func__, *off); 1064 rc = -EINVAL; 1065 goto out; 1066 } 1067 1068 spin_lock_irqsave(&ctx->slock, lock_flags); 1069 1070 for (;;) { 1071 prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE); 1072 1073 if (ctx_event_pending(ctx) || (ctx->state == CLOSED)) 1074 break; 1075 1076 if (file->f_flags & O_NONBLOCK) { 1077 dev_err(dev, "%s: File cannot be blocked on I/O\n", 1078 __func__); 1079 rc = -EAGAIN; 1080 goto err; 1081 } 1082 1083 if (signal_pending(current)) { 1084 dev_err(dev, "%s: Signal pending on the process\n", 1085 __func__); 1086 rc = -ERESTARTSYS; 1087 goto err; 1088 } 1089 1090 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1091 schedule(); 1092 spin_lock_irqsave(&ctx->slock, lock_flags); 1093 } 1094 1095 finish_wait(&ctx->wq, &event_wait); 1096 1097 memset(&event, 0, sizeof(event)); 1098 event.header.process_element = ctx->pe; 1099 event.header.size = sizeof(struct cxl_event_header); 1100 if (ctx->pending_irq) { 1101 esize = sizeof(struct cxl_event_afu_interrupt); 1102 event.header.size += esize; 1103 event.header.type = CXL_EVENT_AFU_INTERRUPT; 1104 1105 bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs); 1106 clear_bit(bit, &ctx->irq_bitmap); 1107 event.irq.irq = bit + 1; 1108 if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs)) 1109 ctx->pending_irq = false; 1110 } else if (ctx->pending_fault) { 1111 event.header.size += sizeof(struct cxl_event_data_storage); 1112 event.header.type = CXL_EVENT_DATA_STORAGE; 1113 event.fault.addr = ctx->fault_addr; 1114 event.fault.dsisr = ctx->fault_dsisr; 1115 ctx->pending_fault = false; 1116 } 1117 1118 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1119 1120 if (copy_to_user(buf, &event, event.header.size)) { 1121 dev_err(dev, "%s: copy_to_user failed\n", __func__); 1122 rc = -EFAULT; 1123 goto out; 1124 } 1125 1126 rc = event.header.size; 1127 out: 1128 return rc; 1129 err: 1130 finish_wait(&ctx->wq, &event_wait); 1131 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1132 goto out; 1133 } 1134 1135 /** 1136 * afu_release() - release and free the context 1137 * @inode: File inode pointer. 1138 * @file: File associated with the context. 1139 * 1140 * Return: 0 on success, -errno on failure 1141 */ 1142 static int afu_release(struct inode *inode, struct file *file) 1143 { 1144 struct ocxlflash_context *ctx = file->private_data; 1145 int i; 1146 1147 /* Unmap and free the interrupts associated with the context */ 1148 for (i = ctx->num_irqs; i >= 0; i--) 1149 afu_unmap_irq(0, ctx, i, ctx); 1150 free_afu_irqs(ctx); 1151 1152 return ocxlflash_release_context(ctx); 1153 } 1154 1155 /** 1156 * ocxlflash_mmap_fault() - mmap fault handler 1157 * @vmf: VM fault associated with current fault. 1158 * 1159 * Return: 0 on success, -errno on failure 1160 */ 1161 static int ocxlflash_mmap_fault(struct vm_fault *vmf) 1162 { 1163 struct vm_area_struct *vma = vmf->vma; 1164 struct ocxlflash_context *ctx = vma->vm_file->private_data; 1165 struct device *dev = ctx->hw_afu->dev; 1166 u64 mmio_area, offset; 1167 1168 offset = vmf->pgoff << PAGE_SHIFT; 1169 if (offset >= ctx->psn_size) 1170 return VM_FAULT_SIGBUS; 1171 1172 mutex_lock(&ctx->state_mutex); 1173 if (ctx->state != STARTED) { 1174 dev_err(dev, "%s: Context not started, state=%d\n", 1175 __func__, ctx->state); 1176 mutex_unlock(&ctx->state_mutex); 1177 return VM_FAULT_SIGBUS; 1178 } 1179 mutex_unlock(&ctx->state_mutex); 1180 1181 mmio_area = ctx->psn_phys; 1182 mmio_area += offset; 1183 1184 vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT); 1185 return VM_FAULT_NOPAGE; 1186 } 1187 1188 static const struct vm_operations_struct ocxlflash_vmops = { 1189 .fault = ocxlflash_mmap_fault, 1190 }; 1191 1192 /** 1193 * afu_mmap() - map the fault handler operations 1194 * @file: File associated with the context. 1195 * @vma: VM area associated with mapping. 1196 * 1197 * Return: 0 on success, -errno on failure 1198 */ 1199 static int afu_mmap(struct file *file, struct vm_area_struct *vma) 1200 { 1201 struct ocxlflash_context *ctx = file->private_data; 1202 1203 if ((vma_pages(vma) + vma->vm_pgoff) > 1204 (ctx->psn_size >> PAGE_SHIFT)) 1205 return -EINVAL; 1206 1207 vma->vm_flags |= VM_IO | VM_PFNMAP; 1208 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1209 vma->vm_ops = &ocxlflash_vmops; 1210 return 0; 1211 } 1212 1213 static const struct file_operations ocxl_afu_fops = { 1214 .owner = THIS_MODULE, 1215 .poll = afu_poll, 1216 .read = afu_read, 1217 .release = afu_release, 1218 .mmap = afu_mmap, 1219 }; 1220 1221 #define PATCH_FOPS(NAME) \ 1222 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0) 1223 1224 /** 1225 * ocxlflash_get_fd() - get file descriptor for an adapter context 1226 * @ctx_cookie: Adapter context. 1227 * @fops: File operations to be associated. 1228 * @fd: File descriptor to be returned back. 1229 * 1230 * Return: pointer to the file on success, ERR_PTR on failure 1231 */ 1232 static struct file *ocxlflash_get_fd(void *ctx_cookie, 1233 struct file_operations *fops, int *fd) 1234 { 1235 struct ocxlflash_context *ctx = ctx_cookie; 1236 struct device *dev = ctx->hw_afu->dev; 1237 struct file *file; 1238 int flags, fdtmp; 1239 int rc = 0; 1240 char *name = NULL; 1241 1242 /* Only allow one fd per context */ 1243 if (ctx->mapping) { 1244 dev_err(dev, "%s: Context is already mapped to an fd\n", 1245 __func__); 1246 rc = -EEXIST; 1247 goto err1; 1248 } 1249 1250 flags = O_RDWR | O_CLOEXEC; 1251 1252 /* This code is similar to anon_inode_getfd() */ 1253 rc = get_unused_fd_flags(flags); 1254 if (unlikely(rc < 0)) { 1255 dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n", 1256 __func__, rc); 1257 goto err1; 1258 } 1259 fdtmp = rc; 1260 1261 /* Patch the file ops that are not defined */ 1262 if (fops) { 1263 PATCH_FOPS(poll); 1264 PATCH_FOPS(read); 1265 PATCH_FOPS(release); 1266 PATCH_FOPS(mmap); 1267 } else /* Use default ops */ 1268 fops = (struct file_operations *)&ocxl_afu_fops; 1269 1270 name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe); 1271 file = ocxlflash_getfile(dev, name, fops, ctx, flags); 1272 kfree(name); 1273 if (IS_ERR(file)) { 1274 rc = PTR_ERR(file); 1275 dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n", 1276 __func__, rc); 1277 goto err2; 1278 } 1279 1280 ctx->mapping = file->f_mapping; 1281 *fd = fdtmp; 1282 out: 1283 return file; 1284 err2: 1285 put_unused_fd(fdtmp); 1286 err1: 1287 file = ERR_PTR(rc); 1288 goto out; 1289 } 1290 1291 /** 1292 * ocxlflash_fops_get_context() - get the context associated with the file 1293 * @file: File associated with the adapter context. 1294 * 1295 * Return: pointer to the context 1296 */ 1297 static void *ocxlflash_fops_get_context(struct file *file) 1298 { 1299 return file->private_data; 1300 } 1301 1302 /** 1303 * ocxlflash_afu_irq() - interrupt handler for user contexts 1304 * @irq: Interrupt number. 1305 * @data: Private data provided at interrupt registration, the context. 1306 * 1307 * Return: Always return IRQ_HANDLED. 1308 */ 1309 static irqreturn_t ocxlflash_afu_irq(int irq, void *data) 1310 { 1311 struct ocxlflash_context *ctx = data; 1312 struct device *dev = ctx->hw_afu->dev; 1313 int i; 1314 1315 dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n", 1316 __func__, ctx->pe, irq); 1317 1318 for (i = 0; i < ctx->num_irqs; i++) { 1319 if (ctx->irqs[i].virq == irq) 1320 break; 1321 } 1322 if (unlikely(i >= ctx->num_irqs)) { 1323 dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__); 1324 goto out; 1325 } 1326 1327 spin_lock(&ctx->slock); 1328 set_bit(i - 1, &ctx->irq_bitmap); 1329 ctx->pending_irq = true; 1330 spin_unlock(&ctx->slock); 1331 1332 wake_up_all(&ctx->wq); 1333 out: 1334 return IRQ_HANDLED; 1335 } 1336 1337 /** 1338 * ocxlflash_start_work() - start a user context 1339 * @ctx_cookie: Context to be started. 1340 * @num_irqs: Number of interrupts requested. 1341 * 1342 * Return: 0 on success, -errno on failure 1343 */ 1344 static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs) 1345 { 1346 struct ocxlflash_context *ctx = ctx_cookie; 1347 struct ocxl_hw_afu *afu = ctx->hw_afu; 1348 struct device *dev = afu->dev; 1349 char *name; 1350 int rc = 0; 1351 int i; 1352 1353 rc = alloc_afu_irqs(ctx, num_irqs); 1354 if (unlikely(rc < 0)) { 1355 dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc); 1356 goto out; 1357 } 1358 1359 for (i = 0; i < num_irqs; i++) { 1360 name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i", 1361 dev_name(dev), ctx->pe, i); 1362 rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name); 1363 kfree(name); 1364 if (unlikely(rc < 0)) { 1365 dev_err(dev, "%s: afu_map_irq failed rc=%d\n", 1366 __func__, rc); 1367 goto err; 1368 } 1369 } 1370 1371 rc = start_context(ctx); 1372 if (unlikely(rc)) { 1373 dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc); 1374 goto err; 1375 } 1376 out: 1377 return rc; 1378 err: 1379 for (i = i-1; i >= 0; i--) 1380 afu_unmap_irq(0, ctx, i, ctx); 1381 free_afu_irqs(ctx); 1382 goto out; 1383 }; 1384 1385 /** 1386 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor 1387 * @file: File installed with adapter file descriptor. 1388 * @vma: VM area associated with mapping. 1389 * 1390 * Return: 0 on success, -errno on failure 1391 */ 1392 static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma) 1393 { 1394 return afu_mmap(file, vma); 1395 } 1396 1397 /** 1398 * ocxlflash_fd_release() - release the context associated with the file 1399 * @inode: File inode pointer. 1400 * @file: File associated with the adapter context. 1401 * 1402 * Return: 0 on success, -errno on failure 1403 */ 1404 static int ocxlflash_fd_release(struct inode *inode, struct file *file) 1405 { 1406 return afu_release(inode, file); 1407 } 1408 1409 /* Backend ops to ocxlflash services */ 1410 const struct cxlflash_backend_ops cxlflash_ocxl_ops = { 1411 .module = THIS_MODULE, 1412 .psa_map = ocxlflash_psa_map, 1413 .psa_unmap = ocxlflash_psa_unmap, 1414 .process_element = ocxlflash_process_element, 1415 .map_afu_irq = ocxlflash_map_afu_irq, 1416 .unmap_afu_irq = ocxlflash_unmap_afu_irq, 1417 .get_irq_objhndl = ocxlflash_get_irq_objhndl, 1418 .start_context = ocxlflash_start_context, 1419 .stop_context = ocxlflash_stop_context, 1420 .afu_reset = ocxlflash_afu_reset, 1421 .set_master = ocxlflash_set_master, 1422 .get_context = ocxlflash_get_context, 1423 .dev_context_init = ocxlflash_dev_context_init, 1424 .release_context = ocxlflash_release_context, 1425 .perst_reloads_same_image = ocxlflash_perst_reloads_same_image, 1426 .read_adapter_vpd = ocxlflash_read_adapter_vpd, 1427 .allocate_afu_irqs = ocxlflash_allocate_afu_irqs, 1428 .free_afu_irqs = ocxlflash_free_afu_irqs, 1429 .create_afu = ocxlflash_create_afu, 1430 .destroy_afu = ocxlflash_destroy_afu, 1431 .get_fd = ocxlflash_get_fd, 1432 .fops_get_context = ocxlflash_fops_get_context, 1433 .start_work = ocxlflash_start_work, 1434 .fd_mmap = ocxlflash_fd_mmap, 1435 .fd_release = ocxlflash_fd_release, 1436 }; 1437