1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 5 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2018 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/file.h> 16 #include <linux/idr.h> 17 #include <linux/module.h> 18 #include <linux/mount.h> 19 #include <linux/poll.h> 20 #include <linux/sched/signal.h> 21 22 #include <misc/ocxl.h> 23 24 #include <uapi/misc/cxl.h> 25 26 #include "backend.h" 27 #include "ocxl_hw.h" 28 29 /* 30 * Pseudo-filesystem to allocate inodes. 31 */ 32 33 #define OCXLFLASH_FS_MAGIC 0x1697698f 34 35 static int ocxlflash_fs_cnt; 36 static struct vfsmount *ocxlflash_vfs_mount; 37 38 static const struct dentry_operations ocxlflash_fs_dops = { 39 .d_dname = simple_dname, 40 }; 41 42 /* 43 * ocxlflash_fs_mount() - mount the pseudo-filesystem 44 * @fs_type: File system type. 45 * @flags: Flags for the filesystem. 46 * @dev_name: Device name associated with the filesystem. 47 * @data: Data pointer. 48 * 49 * Return: pointer to the directory entry structure 50 */ 51 static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type, 52 int flags, const char *dev_name, 53 void *data) 54 { 55 return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops, 56 OCXLFLASH_FS_MAGIC); 57 } 58 59 static struct file_system_type ocxlflash_fs_type = { 60 .name = "ocxlflash", 61 .owner = THIS_MODULE, 62 .mount = ocxlflash_fs_mount, 63 .kill_sb = kill_anon_super, 64 }; 65 66 /* 67 * ocxlflash_release_mapping() - release the memory mapping 68 * @ctx: Context whose mapping is to be released. 69 */ 70 static void ocxlflash_release_mapping(struct ocxlflash_context *ctx) 71 { 72 if (ctx->mapping) 73 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 74 ctx->mapping = NULL; 75 } 76 77 /* 78 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file 79 * @dev: Generic device of the host. 80 * @name: Name of the pseudo filesystem. 81 * @fops: File operations. 82 * @priv: Private data. 83 * @flags: Flags for the file. 84 * 85 * Return: pointer to the file on success, ERR_PTR on failure 86 */ 87 static struct file *ocxlflash_getfile(struct device *dev, const char *name, 88 const struct file_operations *fops, 89 void *priv, int flags) 90 { 91 struct qstr this; 92 struct path path; 93 struct file *file; 94 struct inode *inode = NULL; 95 int rc; 96 97 if (fops->owner && !try_module_get(fops->owner)) { 98 dev_err(dev, "%s: Owner does not exist\n", __func__); 99 rc = -ENOENT; 100 goto err1; 101 } 102 103 rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount, 104 &ocxlflash_fs_cnt); 105 if (unlikely(rc < 0)) { 106 dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n", 107 __func__, rc); 108 goto err2; 109 } 110 111 inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb); 112 if (IS_ERR(inode)) { 113 rc = PTR_ERR(inode); 114 dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n", 115 __func__, rc); 116 goto err3; 117 } 118 119 this.name = name; 120 this.len = strlen(name); 121 this.hash = 0; 122 path.dentry = d_alloc_pseudo(ocxlflash_vfs_mount->mnt_sb, &this); 123 if (!path.dentry) { 124 dev_err(dev, "%s: d_alloc_pseudo failed\n", __func__); 125 rc = -ENOMEM; 126 goto err4; 127 } 128 129 path.mnt = mntget(ocxlflash_vfs_mount); 130 d_instantiate(path.dentry, inode); 131 132 file = alloc_file(&path, OPEN_FMODE(flags), fops); 133 if (IS_ERR(file)) { 134 rc = PTR_ERR(file); 135 dev_err(dev, "%s: alloc_file failed rc=%d\n", 136 __func__, rc); 137 path_put(&path); 138 goto err3; 139 } 140 141 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); 142 file->private_data = priv; 143 out: 144 return file; 145 err4: 146 iput(inode); 147 err3: 148 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 149 err2: 150 module_put(fops->owner); 151 err1: 152 file = ERR_PTR(rc); 153 goto out; 154 } 155 156 /** 157 * ocxlflash_psa_map() - map the process specific MMIO space 158 * @ctx_cookie: Adapter context for which the mapping needs to be done. 159 * 160 * Return: MMIO pointer of the mapped region 161 */ 162 static void __iomem *ocxlflash_psa_map(void *ctx_cookie) 163 { 164 struct ocxlflash_context *ctx = ctx_cookie; 165 struct device *dev = ctx->hw_afu->dev; 166 167 mutex_lock(&ctx->state_mutex); 168 if (ctx->state != STARTED) { 169 dev_err(dev, "%s: Context not started, state=%d\n", __func__, 170 ctx->state); 171 mutex_unlock(&ctx->state_mutex); 172 return NULL; 173 } 174 mutex_unlock(&ctx->state_mutex); 175 176 return ioremap(ctx->psn_phys, ctx->psn_size); 177 } 178 179 /** 180 * ocxlflash_psa_unmap() - unmap the process specific MMIO space 181 * @addr: MMIO pointer to unmap. 182 */ 183 static void ocxlflash_psa_unmap(void __iomem *addr) 184 { 185 iounmap(addr); 186 } 187 188 /** 189 * ocxlflash_process_element() - get process element of the adapter context 190 * @ctx_cookie: Adapter context associated with the process element. 191 * 192 * Return: process element of the adapter context 193 */ 194 static int ocxlflash_process_element(void *ctx_cookie) 195 { 196 struct ocxlflash_context *ctx = ctx_cookie; 197 198 return ctx->pe; 199 } 200 201 /** 202 * afu_map_irq() - map the interrupt of the adapter context 203 * @flags: Flags. 204 * @ctx: Adapter context. 205 * @num: Per-context AFU interrupt number. 206 * @handler: Interrupt handler to register. 207 * @cookie: Interrupt handler private data. 208 * @name: Name of the interrupt. 209 * 210 * Return: 0 on success, -errno on failure 211 */ 212 static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num, 213 irq_handler_t handler, void *cookie, char *name) 214 { 215 struct ocxl_hw_afu *afu = ctx->hw_afu; 216 struct device *dev = afu->dev; 217 struct ocxlflash_irqs *irq; 218 void __iomem *vtrig; 219 u32 virq; 220 int rc = 0; 221 222 if (num < 0 || num >= ctx->num_irqs) { 223 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 224 rc = -ENOENT; 225 goto out; 226 } 227 228 irq = &ctx->irqs[num]; 229 virq = irq_create_mapping(NULL, irq->hwirq); 230 if (unlikely(!virq)) { 231 dev_err(dev, "%s: irq_create_mapping failed\n", __func__); 232 rc = -ENOMEM; 233 goto out; 234 } 235 236 rc = request_irq(virq, handler, 0, name, cookie); 237 if (unlikely(rc)) { 238 dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc); 239 goto err1; 240 } 241 242 vtrig = ioremap(irq->ptrig, PAGE_SIZE); 243 if (unlikely(!vtrig)) { 244 dev_err(dev, "%s: Trigger page mapping failed\n", __func__); 245 rc = -ENOMEM; 246 goto err2; 247 } 248 249 irq->virq = virq; 250 irq->vtrig = vtrig; 251 out: 252 return rc; 253 err2: 254 free_irq(virq, cookie); 255 err1: 256 irq_dispose_mapping(virq); 257 goto out; 258 } 259 260 /** 261 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context 262 * @ctx_cookie: Adapter context. 263 * @num: Per-context AFU interrupt number. 264 * @handler: Interrupt handler to register. 265 * @cookie: Interrupt handler private data. 266 * @name: Name of the interrupt. 267 * 268 * Return: 0 on success, -errno on failure 269 */ 270 static int ocxlflash_map_afu_irq(void *ctx_cookie, int num, 271 irq_handler_t handler, void *cookie, 272 char *name) 273 { 274 return afu_map_irq(0, ctx_cookie, num, handler, cookie, name); 275 } 276 277 /** 278 * afu_unmap_irq() - unmap the interrupt 279 * @flags: Flags. 280 * @ctx: Adapter context. 281 * @num: Per-context AFU interrupt number. 282 * @cookie: Interrupt handler private data. 283 */ 284 static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num, 285 void *cookie) 286 { 287 struct ocxl_hw_afu *afu = ctx->hw_afu; 288 struct device *dev = afu->dev; 289 struct ocxlflash_irqs *irq; 290 291 if (num < 0 || num >= ctx->num_irqs) { 292 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 293 return; 294 } 295 296 irq = &ctx->irqs[num]; 297 if (irq->vtrig) 298 iounmap(irq->vtrig); 299 300 if (irq_find_mapping(NULL, irq->hwirq)) { 301 free_irq(irq->virq, cookie); 302 irq_dispose_mapping(irq->virq); 303 } 304 305 memset(irq, 0, sizeof(*irq)); 306 } 307 308 /** 309 * ocxlflash_unmap_afu_irq() - unmap the interrupt 310 * @ctx_cookie: Adapter context. 311 * @num: Per-context AFU interrupt number. 312 * @cookie: Interrupt handler private data. 313 */ 314 static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie) 315 { 316 return afu_unmap_irq(0, ctx_cookie, num, cookie); 317 } 318 319 /** 320 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt 321 * @ctx_cookie: Context associated with the interrupt. 322 * @irq: Interrupt number. 323 * 324 * Return: effective address of the mapped region 325 */ 326 static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq) 327 { 328 struct ocxlflash_context *ctx = ctx_cookie; 329 330 if (irq < 0 || irq >= ctx->num_irqs) 331 return 0; 332 333 return (__force u64)ctx->irqs[irq].vtrig; 334 } 335 336 /** 337 * ocxlflash_xsl_fault() - callback when translation error is triggered 338 * @data: Private data provided at callback registration, the context. 339 * @addr: Address that triggered the error. 340 * @dsisr: Value of dsisr register. 341 */ 342 static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr) 343 { 344 struct ocxlflash_context *ctx = data; 345 346 spin_lock(&ctx->slock); 347 ctx->fault_addr = addr; 348 ctx->fault_dsisr = dsisr; 349 ctx->pending_fault = true; 350 spin_unlock(&ctx->slock); 351 352 wake_up_all(&ctx->wq); 353 } 354 355 /** 356 * start_context() - local routine to start a context 357 * @ctx: Adapter context to be started. 358 * 359 * Assign the context specific MMIO space, add and enable the PE. 360 * 361 * Return: 0 on success, -errno on failure 362 */ 363 static int start_context(struct ocxlflash_context *ctx) 364 { 365 struct ocxl_hw_afu *afu = ctx->hw_afu; 366 struct ocxl_afu_config *acfg = &afu->acfg; 367 void *link_token = afu->link_token; 368 struct device *dev = afu->dev; 369 bool master = ctx->master; 370 struct mm_struct *mm; 371 int rc = 0; 372 u32 pid; 373 374 mutex_lock(&ctx->state_mutex); 375 if (ctx->state != OPENED) { 376 dev_err(dev, "%s: Context state invalid, state=%d\n", 377 __func__, ctx->state); 378 rc = -EINVAL; 379 goto out; 380 } 381 382 if (master) { 383 ctx->psn_size = acfg->global_mmio_size; 384 ctx->psn_phys = afu->gmmio_phys; 385 } else { 386 ctx->psn_size = acfg->pp_mmio_stride; 387 ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size); 388 } 389 390 /* pid and mm not set for master contexts */ 391 if (master) { 392 pid = 0; 393 mm = NULL; 394 } else { 395 pid = current->mm->context.id; 396 mm = current->mm; 397 } 398 399 rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm, 400 ocxlflash_xsl_fault, ctx); 401 if (unlikely(rc)) { 402 dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n", 403 __func__, rc); 404 goto out; 405 } 406 407 ctx->state = STARTED; 408 out: 409 mutex_unlock(&ctx->state_mutex); 410 return rc; 411 } 412 413 /** 414 * ocxlflash_start_context() - start a kernel context 415 * @ctx_cookie: Adapter context to be started. 416 * 417 * Return: 0 on success, -errno on failure 418 */ 419 static int ocxlflash_start_context(void *ctx_cookie) 420 { 421 struct ocxlflash_context *ctx = ctx_cookie; 422 423 return start_context(ctx); 424 } 425 426 /** 427 * ocxlflash_stop_context() - stop a context 428 * @ctx_cookie: Adapter context to be stopped. 429 * 430 * Return: 0 on success, -errno on failure 431 */ 432 static int ocxlflash_stop_context(void *ctx_cookie) 433 { 434 struct ocxlflash_context *ctx = ctx_cookie; 435 struct ocxl_hw_afu *afu = ctx->hw_afu; 436 struct ocxl_afu_config *acfg = &afu->acfg; 437 struct pci_dev *pdev = afu->pdev; 438 struct device *dev = afu->dev; 439 enum ocxlflash_ctx_state state; 440 int rc = 0; 441 442 mutex_lock(&ctx->state_mutex); 443 state = ctx->state; 444 ctx->state = CLOSED; 445 mutex_unlock(&ctx->state_mutex); 446 if (state != STARTED) 447 goto out; 448 449 rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos, 450 ctx->pe); 451 if (unlikely(rc)) { 452 dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n", 453 __func__, rc); 454 /* If EBUSY, PE could be referenced in future by the AFU */ 455 if (rc == -EBUSY) 456 goto out; 457 } 458 459 rc = ocxl_link_remove_pe(afu->link_token, ctx->pe); 460 if (unlikely(rc)) { 461 dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n", 462 __func__, rc); 463 goto out; 464 } 465 out: 466 return rc; 467 } 468 469 /** 470 * ocxlflash_afu_reset() - reset the AFU 471 * @ctx_cookie: Adapter context. 472 */ 473 static int ocxlflash_afu_reset(void *ctx_cookie) 474 { 475 struct ocxlflash_context *ctx = ctx_cookie; 476 struct device *dev = ctx->hw_afu->dev; 477 478 /* Pending implementation from OCXL transport services */ 479 dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__); 480 481 /* Silently return success until it is implemented */ 482 return 0; 483 } 484 485 /** 486 * ocxlflash_set_master() - sets the context as master 487 * @ctx_cookie: Adapter context to set as master. 488 */ 489 static void ocxlflash_set_master(void *ctx_cookie) 490 { 491 struct ocxlflash_context *ctx = ctx_cookie; 492 493 ctx->master = true; 494 } 495 496 /** 497 * ocxlflash_get_context() - obtains the context associated with the host 498 * @pdev: PCI device associated with the host. 499 * @afu_cookie: Hardware AFU associated with the host. 500 * 501 * Return: returns the pointer to host adapter context 502 */ 503 static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie) 504 { 505 struct ocxl_hw_afu *afu = afu_cookie; 506 507 return afu->ocxl_ctx; 508 } 509 510 /** 511 * ocxlflash_dev_context_init() - allocate and initialize an adapter context 512 * @pdev: PCI device associated with the host. 513 * @afu_cookie: Hardware AFU associated with the host. 514 * 515 * Return: returns the adapter context on success, ERR_PTR on failure 516 */ 517 static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie) 518 { 519 struct ocxl_hw_afu *afu = afu_cookie; 520 struct device *dev = afu->dev; 521 struct ocxlflash_context *ctx; 522 int rc; 523 524 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 525 if (unlikely(!ctx)) { 526 dev_err(dev, "%s: Context allocation failed\n", __func__); 527 rc = -ENOMEM; 528 goto err1; 529 } 530 531 idr_preload(GFP_KERNEL); 532 rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT); 533 idr_preload_end(); 534 if (unlikely(rc < 0)) { 535 dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc); 536 goto err2; 537 } 538 539 spin_lock_init(&ctx->slock); 540 init_waitqueue_head(&ctx->wq); 541 mutex_init(&ctx->state_mutex); 542 543 ctx->state = OPENED; 544 ctx->pe = rc; 545 ctx->master = false; 546 ctx->mapping = NULL; 547 ctx->hw_afu = afu; 548 ctx->irq_bitmap = 0; 549 ctx->pending_irq = false; 550 ctx->pending_fault = false; 551 out: 552 return ctx; 553 err2: 554 kfree(ctx); 555 err1: 556 ctx = ERR_PTR(rc); 557 goto out; 558 } 559 560 /** 561 * ocxlflash_release_context() - releases an adapter context 562 * @ctx_cookie: Adapter context to be released. 563 * 564 * Return: 0 on success, -errno on failure 565 */ 566 static int ocxlflash_release_context(void *ctx_cookie) 567 { 568 struct ocxlflash_context *ctx = ctx_cookie; 569 struct device *dev; 570 int rc = 0; 571 572 if (!ctx) 573 goto out; 574 575 dev = ctx->hw_afu->dev; 576 mutex_lock(&ctx->state_mutex); 577 if (ctx->state >= STARTED) { 578 dev_err(dev, "%s: Context in use, state=%d\n", __func__, 579 ctx->state); 580 mutex_unlock(&ctx->state_mutex); 581 rc = -EBUSY; 582 goto out; 583 } 584 mutex_unlock(&ctx->state_mutex); 585 586 idr_remove(&ctx->hw_afu->idr, ctx->pe); 587 ocxlflash_release_mapping(ctx); 588 kfree(ctx); 589 out: 590 return rc; 591 } 592 593 /** 594 * ocxlflash_perst_reloads_same_image() - sets the image reload policy 595 * @afu_cookie: Hardware AFU associated with the host. 596 * @image: Whether to load the same image on PERST. 597 */ 598 static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image) 599 { 600 struct ocxl_hw_afu *afu = afu_cookie; 601 602 afu->perst_same_image = image; 603 } 604 605 /** 606 * ocxlflash_read_adapter_vpd() - reads the adapter VPD 607 * @pdev: PCI device associated with the host. 608 * @buf: Buffer to get the VPD data. 609 * @count: Size of buffer (maximum bytes that can be read). 610 * 611 * Return: size of VPD on success, -errno on failure 612 */ 613 static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf, 614 size_t count) 615 { 616 return pci_read_vpd(pdev, 0, count, buf); 617 } 618 619 /** 620 * free_afu_irqs() - internal service to free interrupts 621 * @ctx: Adapter context. 622 */ 623 static void free_afu_irqs(struct ocxlflash_context *ctx) 624 { 625 struct ocxl_hw_afu *afu = ctx->hw_afu; 626 struct device *dev = afu->dev; 627 int i; 628 629 if (!ctx->irqs) { 630 dev_err(dev, "%s: Interrupts not allocated\n", __func__); 631 return; 632 } 633 634 for (i = ctx->num_irqs; i >= 0; i--) 635 ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq); 636 637 kfree(ctx->irqs); 638 ctx->irqs = NULL; 639 } 640 641 /** 642 * alloc_afu_irqs() - internal service to allocate interrupts 643 * @ctx: Context associated with the request. 644 * @num: Number of interrupts requested. 645 * 646 * Return: 0 on success, -errno on failure 647 */ 648 static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num) 649 { 650 struct ocxl_hw_afu *afu = ctx->hw_afu; 651 struct device *dev = afu->dev; 652 struct ocxlflash_irqs *irqs; 653 u64 addr; 654 int rc = 0; 655 int hwirq; 656 int i; 657 658 if (ctx->irqs) { 659 dev_err(dev, "%s: Interrupts already allocated\n", __func__); 660 rc = -EEXIST; 661 goto out; 662 } 663 664 if (num > OCXL_MAX_IRQS) { 665 dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num); 666 rc = -EINVAL; 667 goto out; 668 } 669 670 irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL); 671 if (unlikely(!irqs)) { 672 dev_err(dev, "%s: Context irqs allocation failed\n", __func__); 673 rc = -ENOMEM; 674 goto out; 675 } 676 677 for (i = 0; i < num; i++) { 678 rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr); 679 if (unlikely(rc)) { 680 dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n", 681 __func__, rc); 682 goto err; 683 } 684 685 irqs[i].hwirq = hwirq; 686 irqs[i].ptrig = addr; 687 } 688 689 ctx->irqs = irqs; 690 ctx->num_irqs = num; 691 out: 692 return rc; 693 err: 694 for (i = i-1; i >= 0; i--) 695 ocxl_link_free_irq(afu->link_token, irqs[i].hwirq); 696 kfree(irqs); 697 goto out; 698 } 699 700 /** 701 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts 702 * @ctx_cookie: Context associated with the request. 703 * @num: Number of interrupts requested. 704 * 705 * Return: 0 on success, -errno on failure 706 */ 707 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num) 708 { 709 return alloc_afu_irqs(ctx_cookie, num); 710 } 711 712 /** 713 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context 714 * @ctx_cookie: Adapter context. 715 */ 716 static void ocxlflash_free_afu_irqs(void *ctx_cookie) 717 { 718 free_afu_irqs(ctx_cookie); 719 } 720 721 /** 722 * ocxlflash_unconfig_afu() - unconfigure the AFU 723 * @afu: AFU associated with the host. 724 */ 725 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu) 726 { 727 if (afu->gmmio_virt) { 728 iounmap(afu->gmmio_virt); 729 afu->gmmio_virt = NULL; 730 } 731 } 732 733 /** 734 * ocxlflash_destroy_afu() - destroy the AFU structure 735 * @afu_cookie: AFU to be freed. 736 */ 737 static void ocxlflash_destroy_afu(void *afu_cookie) 738 { 739 struct ocxl_hw_afu *afu = afu_cookie; 740 int pos; 741 742 if (!afu) 743 return; 744 745 ocxlflash_release_context(afu->ocxl_ctx); 746 idr_destroy(&afu->idr); 747 748 /* Disable the AFU */ 749 pos = afu->acfg.dvsec_afu_control_pos; 750 ocxl_config_set_afu_state(afu->pdev, pos, 0); 751 752 ocxlflash_unconfig_afu(afu); 753 kfree(afu); 754 } 755 756 /** 757 * ocxlflash_config_fn() - configure the host function 758 * @pdev: PCI device associated with the host. 759 * @afu: AFU associated with the host. 760 * 761 * Return: 0 on success, -errno on failure 762 */ 763 static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 764 { 765 struct ocxl_fn_config *fcfg = &afu->fcfg; 766 struct device *dev = &pdev->dev; 767 u16 base, enabled, supported; 768 int rc = 0; 769 770 /* Read DVSEC config of the function */ 771 rc = ocxl_config_read_function(pdev, fcfg); 772 if (unlikely(rc)) { 773 dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n", 774 __func__, rc); 775 goto out; 776 } 777 778 /* Check if function has AFUs defined, only 1 per function supported */ 779 if (fcfg->max_afu_index >= 0) { 780 afu->is_present = true; 781 if (fcfg->max_afu_index != 0) 782 dev_warn(dev, "%s: Unexpected AFU index value %d\n", 783 __func__, fcfg->max_afu_index); 784 } 785 786 rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported); 787 if (unlikely(rc)) { 788 dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n", 789 __func__, rc); 790 goto out; 791 } 792 793 afu->fn_actag_base = base; 794 afu->fn_actag_enabled = enabled; 795 796 ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled); 797 dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n", 798 __func__, base, enabled); 799 800 rc = ocxl_link_setup(pdev, 0, &afu->link_token); 801 if (unlikely(rc)) { 802 dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n", 803 __func__, rc); 804 goto out; 805 } 806 807 rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos); 808 if (unlikely(rc)) { 809 dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n", 810 __func__, rc); 811 goto err; 812 } 813 out: 814 return rc; 815 err: 816 ocxl_link_release(pdev, afu->link_token); 817 goto out; 818 } 819 820 /** 821 * ocxlflash_unconfig_fn() - unconfigure the host function 822 * @pdev: PCI device associated with the host. 823 * @afu: AFU associated with the host. 824 */ 825 static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 826 { 827 ocxl_link_release(pdev, afu->link_token); 828 } 829 830 /** 831 * ocxlflash_map_mmio() - map the AFU MMIO space 832 * @afu: AFU associated with the host. 833 * 834 * Return: 0 on success, -errno on failure 835 */ 836 static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu) 837 { 838 struct ocxl_afu_config *acfg = &afu->acfg; 839 struct pci_dev *pdev = afu->pdev; 840 struct device *dev = afu->dev; 841 phys_addr_t gmmio, ppmmio; 842 int rc = 0; 843 844 rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash"); 845 if (unlikely(rc)) { 846 dev_err(dev, "%s: pci_request_region for global failed rc=%d\n", 847 __func__, rc); 848 goto out; 849 } 850 gmmio = pci_resource_start(pdev, acfg->global_mmio_bar); 851 gmmio += acfg->global_mmio_offset; 852 853 rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash"); 854 if (unlikely(rc)) { 855 dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n", 856 __func__, rc); 857 goto err1; 858 } 859 ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar); 860 ppmmio += acfg->pp_mmio_offset; 861 862 afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size); 863 if (unlikely(!afu->gmmio_virt)) { 864 dev_err(dev, "%s: MMIO mapping failed\n", __func__); 865 rc = -ENOMEM; 866 goto err2; 867 } 868 869 afu->gmmio_phys = gmmio; 870 afu->ppmmio_phys = ppmmio; 871 out: 872 return rc; 873 err2: 874 pci_release_region(pdev, acfg->pp_mmio_bar); 875 err1: 876 pci_release_region(pdev, acfg->global_mmio_bar); 877 goto out; 878 } 879 880 /** 881 * ocxlflash_config_afu() - configure the host AFU 882 * @pdev: PCI device associated with the host. 883 * @afu: AFU associated with the host. 884 * 885 * Must be called _after_ host function configuration. 886 * 887 * Return: 0 on success, -errno on failure 888 */ 889 static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 890 { 891 struct ocxl_afu_config *acfg = &afu->acfg; 892 struct ocxl_fn_config *fcfg = &afu->fcfg; 893 struct device *dev = &pdev->dev; 894 int count; 895 int base; 896 int pos; 897 int rc = 0; 898 899 /* This HW AFU function does not have any AFUs defined */ 900 if (!afu->is_present) 901 goto out; 902 903 /* Read AFU config at index 0 */ 904 rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0); 905 if (unlikely(rc)) { 906 dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n", 907 __func__, rc); 908 goto out; 909 } 910 911 /* Only one AFU per function is supported, so actag_base is same */ 912 base = afu->fn_actag_base; 913 count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled); 914 pos = acfg->dvsec_afu_control_pos; 915 916 ocxl_config_set_afu_actag(pdev, pos, base, count); 917 dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count); 918 afu->afu_actag_base = base; 919 afu->afu_actag_enabled = count; 920 afu->max_pasid = 1 << acfg->pasid_supported_log; 921 922 ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log); 923 924 rc = ocxlflash_map_mmio(afu); 925 if (unlikely(rc)) { 926 dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n", 927 __func__, rc); 928 goto out; 929 } 930 931 /* Enable the AFU */ 932 ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1); 933 out: 934 return rc; 935 } 936 937 /** 938 * ocxlflash_create_afu() - create the AFU for OCXL 939 * @pdev: PCI device associated with the host. 940 * 941 * Return: AFU on success, NULL on failure 942 */ 943 static void *ocxlflash_create_afu(struct pci_dev *pdev) 944 { 945 struct device *dev = &pdev->dev; 946 struct ocxlflash_context *ctx; 947 struct ocxl_hw_afu *afu; 948 int rc; 949 950 afu = kzalloc(sizeof(*afu), GFP_KERNEL); 951 if (unlikely(!afu)) { 952 dev_err(dev, "%s: HW AFU allocation failed\n", __func__); 953 goto out; 954 } 955 956 afu->pdev = pdev; 957 afu->dev = dev; 958 idr_init(&afu->idr); 959 960 rc = ocxlflash_config_fn(pdev, afu); 961 if (unlikely(rc)) { 962 dev_err(dev, "%s: Function configuration failed rc=%d\n", 963 __func__, rc); 964 goto err1; 965 } 966 967 rc = ocxlflash_config_afu(pdev, afu); 968 if (unlikely(rc)) { 969 dev_err(dev, "%s: AFU configuration failed rc=%d\n", 970 __func__, rc); 971 goto err2; 972 } 973 974 ctx = ocxlflash_dev_context_init(pdev, afu); 975 if (IS_ERR(ctx)) { 976 rc = PTR_ERR(ctx); 977 dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n", 978 __func__, rc); 979 goto err3; 980 } 981 982 afu->ocxl_ctx = ctx; 983 out: 984 return afu; 985 err3: 986 ocxlflash_unconfig_afu(afu); 987 err2: 988 ocxlflash_unconfig_fn(pdev, afu); 989 err1: 990 idr_destroy(&afu->idr); 991 kfree(afu); 992 afu = NULL; 993 goto out; 994 } 995 996 /** 997 * ctx_event_pending() - check for any event pending on the context 998 * @ctx: Context to be checked. 999 * 1000 * Return: true if there is an event pending, false if none pending 1001 */ 1002 static inline bool ctx_event_pending(struct ocxlflash_context *ctx) 1003 { 1004 if (ctx->pending_irq || ctx->pending_fault) 1005 return true; 1006 1007 return false; 1008 } 1009 1010 /** 1011 * afu_poll() - poll the AFU for events on the context 1012 * @file: File associated with the adapter context. 1013 * @poll: Poll structure from the user. 1014 * 1015 * Return: poll mask 1016 */ 1017 static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) 1018 { 1019 struct ocxlflash_context *ctx = file->private_data; 1020 struct device *dev = ctx->hw_afu->dev; 1021 ulong lock_flags; 1022 int mask = 0; 1023 1024 poll_wait(file, &ctx->wq, poll); 1025 1026 spin_lock_irqsave(&ctx->slock, lock_flags); 1027 if (ctx_event_pending(ctx)) 1028 mask |= POLLIN | POLLRDNORM; 1029 else if (ctx->state == CLOSED) 1030 mask |= POLLERR; 1031 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1032 1033 dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n", 1034 __func__, ctx->pe, mask); 1035 1036 return mask; 1037 } 1038 1039 /** 1040 * afu_read() - perform a read on the context for any event 1041 * @file: File associated with the adapter context. 1042 * @buf: Buffer to receive the data. 1043 * @count: Size of buffer (maximum bytes that can be read). 1044 * @off: Offset. 1045 * 1046 * Return: size of the data read on success, -errno on failure 1047 */ 1048 static ssize_t afu_read(struct file *file, char __user *buf, size_t count, 1049 loff_t *off) 1050 { 1051 struct ocxlflash_context *ctx = file->private_data; 1052 struct device *dev = ctx->hw_afu->dev; 1053 struct cxl_event event; 1054 ulong lock_flags; 1055 ssize_t esize; 1056 ssize_t rc; 1057 int bit; 1058 DEFINE_WAIT(event_wait); 1059 1060 if (*off != 0) { 1061 dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n", 1062 __func__, *off); 1063 rc = -EINVAL; 1064 goto out; 1065 } 1066 1067 spin_lock_irqsave(&ctx->slock, lock_flags); 1068 1069 for (;;) { 1070 prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE); 1071 1072 if (ctx_event_pending(ctx) || (ctx->state == CLOSED)) 1073 break; 1074 1075 if (file->f_flags & O_NONBLOCK) { 1076 dev_err(dev, "%s: File cannot be blocked on I/O\n", 1077 __func__); 1078 rc = -EAGAIN; 1079 goto err; 1080 } 1081 1082 if (signal_pending(current)) { 1083 dev_err(dev, "%s: Signal pending on the process\n", 1084 __func__); 1085 rc = -ERESTARTSYS; 1086 goto err; 1087 } 1088 1089 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1090 schedule(); 1091 spin_lock_irqsave(&ctx->slock, lock_flags); 1092 } 1093 1094 finish_wait(&ctx->wq, &event_wait); 1095 1096 memset(&event, 0, sizeof(event)); 1097 event.header.process_element = ctx->pe; 1098 event.header.size = sizeof(struct cxl_event_header); 1099 if (ctx->pending_irq) { 1100 esize = sizeof(struct cxl_event_afu_interrupt); 1101 event.header.size += esize; 1102 event.header.type = CXL_EVENT_AFU_INTERRUPT; 1103 1104 bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs); 1105 clear_bit(bit, &ctx->irq_bitmap); 1106 event.irq.irq = bit + 1; 1107 if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs)) 1108 ctx->pending_irq = false; 1109 } else if (ctx->pending_fault) { 1110 event.header.size += sizeof(struct cxl_event_data_storage); 1111 event.header.type = CXL_EVENT_DATA_STORAGE; 1112 event.fault.addr = ctx->fault_addr; 1113 event.fault.dsisr = ctx->fault_dsisr; 1114 ctx->pending_fault = false; 1115 } 1116 1117 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1118 1119 if (copy_to_user(buf, &event, event.header.size)) { 1120 dev_err(dev, "%s: copy_to_user failed\n", __func__); 1121 rc = -EFAULT; 1122 goto out; 1123 } 1124 1125 rc = event.header.size; 1126 out: 1127 return rc; 1128 err: 1129 finish_wait(&ctx->wq, &event_wait); 1130 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1131 goto out; 1132 } 1133 1134 /** 1135 * afu_release() - release and free the context 1136 * @inode: File inode pointer. 1137 * @file: File associated with the context. 1138 * 1139 * Return: 0 on success, -errno on failure 1140 */ 1141 static int afu_release(struct inode *inode, struct file *file) 1142 { 1143 struct ocxlflash_context *ctx = file->private_data; 1144 int i; 1145 1146 /* Unmap and free the interrupts associated with the context */ 1147 for (i = ctx->num_irqs; i >= 0; i--) 1148 afu_unmap_irq(0, ctx, i, ctx); 1149 free_afu_irqs(ctx); 1150 1151 return ocxlflash_release_context(ctx); 1152 } 1153 1154 /** 1155 * ocxlflash_mmap_fault() - mmap fault handler 1156 * @vmf: VM fault associated with current fault. 1157 * 1158 * Return: 0 on success, -errno on failure 1159 */ 1160 static int ocxlflash_mmap_fault(struct vm_fault *vmf) 1161 { 1162 struct vm_area_struct *vma = vmf->vma; 1163 struct ocxlflash_context *ctx = vma->vm_file->private_data; 1164 struct device *dev = ctx->hw_afu->dev; 1165 u64 mmio_area, offset; 1166 1167 offset = vmf->pgoff << PAGE_SHIFT; 1168 if (offset >= ctx->psn_size) 1169 return VM_FAULT_SIGBUS; 1170 1171 mutex_lock(&ctx->state_mutex); 1172 if (ctx->state != STARTED) { 1173 dev_err(dev, "%s: Context not started, state=%d\n", 1174 __func__, ctx->state); 1175 mutex_unlock(&ctx->state_mutex); 1176 return VM_FAULT_SIGBUS; 1177 } 1178 mutex_unlock(&ctx->state_mutex); 1179 1180 mmio_area = ctx->psn_phys; 1181 mmio_area += offset; 1182 1183 vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT); 1184 return VM_FAULT_NOPAGE; 1185 } 1186 1187 static const struct vm_operations_struct ocxlflash_vmops = { 1188 .fault = ocxlflash_mmap_fault, 1189 }; 1190 1191 /** 1192 * afu_mmap() - map the fault handler operations 1193 * @file: File associated with the context. 1194 * @vma: VM area associated with mapping. 1195 * 1196 * Return: 0 on success, -errno on failure 1197 */ 1198 static int afu_mmap(struct file *file, struct vm_area_struct *vma) 1199 { 1200 struct ocxlflash_context *ctx = file->private_data; 1201 1202 if ((vma_pages(vma) + vma->vm_pgoff) > 1203 (ctx->psn_size >> PAGE_SHIFT)) 1204 return -EINVAL; 1205 1206 vma->vm_flags |= VM_IO | VM_PFNMAP; 1207 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1208 vma->vm_ops = &ocxlflash_vmops; 1209 return 0; 1210 } 1211 1212 static const struct file_operations ocxl_afu_fops = { 1213 .owner = THIS_MODULE, 1214 .poll = afu_poll, 1215 .read = afu_read, 1216 .release = afu_release, 1217 .mmap = afu_mmap, 1218 }; 1219 1220 #define PATCH_FOPS(NAME) \ 1221 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0) 1222 1223 /** 1224 * ocxlflash_get_fd() - get file descriptor for an adapter context 1225 * @ctx_cookie: Adapter context. 1226 * @fops: File operations to be associated. 1227 * @fd: File descriptor to be returned back. 1228 * 1229 * Return: pointer to the file on success, ERR_PTR on failure 1230 */ 1231 static struct file *ocxlflash_get_fd(void *ctx_cookie, 1232 struct file_operations *fops, int *fd) 1233 { 1234 struct ocxlflash_context *ctx = ctx_cookie; 1235 struct device *dev = ctx->hw_afu->dev; 1236 struct file *file; 1237 int flags, fdtmp; 1238 int rc = 0; 1239 char *name = NULL; 1240 1241 /* Only allow one fd per context */ 1242 if (ctx->mapping) { 1243 dev_err(dev, "%s: Context is already mapped to an fd\n", 1244 __func__); 1245 rc = -EEXIST; 1246 goto err1; 1247 } 1248 1249 flags = O_RDWR | O_CLOEXEC; 1250 1251 /* This code is similar to anon_inode_getfd() */ 1252 rc = get_unused_fd_flags(flags); 1253 if (unlikely(rc < 0)) { 1254 dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n", 1255 __func__, rc); 1256 goto err1; 1257 } 1258 fdtmp = rc; 1259 1260 /* Patch the file ops that are not defined */ 1261 if (fops) { 1262 PATCH_FOPS(poll); 1263 PATCH_FOPS(read); 1264 PATCH_FOPS(release); 1265 PATCH_FOPS(mmap); 1266 } else /* Use default ops */ 1267 fops = (struct file_operations *)&ocxl_afu_fops; 1268 1269 name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe); 1270 file = ocxlflash_getfile(dev, name, fops, ctx, flags); 1271 kfree(name); 1272 if (IS_ERR(file)) { 1273 rc = PTR_ERR(file); 1274 dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n", 1275 __func__, rc); 1276 goto err2; 1277 } 1278 1279 ctx->mapping = file->f_mapping; 1280 *fd = fdtmp; 1281 out: 1282 return file; 1283 err2: 1284 put_unused_fd(fdtmp); 1285 err1: 1286 file = ERR_PTR(rc); 1287 goto out; 1288 } 1289 1290 /** 1291 * ocxlflash_fops_get_context() - get the context associated with the file 1292 * @file: File associated with the adapter context. 1293 * 1294 * Return: pointer to the context 1295 */ 1296 static void *ocxlflash_fops_get_context(struct file *file) 1297 { 1298 return file->private_data; 1299 } 1300 1301 /** 1302 * ocxlflash_afu_irq() - interrupt handler for user contexts 1303 * @irq: Interrupt number. 1304 * @data: Private data provided at interrupt registration, the context. 1305 * 1306 * Return: Always return IRQ_HANDLED. 1307 */ 1308 static irqreturn_t ocxlflash_afu_irq(int irq, void *data) 1309 { 1310 struct ocxlflash_context *ctx = data; 1311 struct device *dev = ctx->hw_afu->dev; 1312 int i; 1313 1314 dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n", 1315 __func__, ctx->pe, irq); 1316 1317 for (i = 0; i < ctx->num_irqs; i++) { 1318 if (ctx->irqs[i].virq == irq) 1319 break; 1320 } 1321 if (unlikely(i >= ctx->num_irqs)) { 1322 dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__); 1323 goto out; 1324 } 1325 1326 spin_lock(&ctx->slock); 1327 set_bit(i - 1, &ctx->irq_bitmap); 1328 ctx->pending_irq = true; 1329 spin_unlock(&ctx->slock); 1330 1331 wake_up_all(&ctx->wq); 1332 out: 1333 return IRQ_HANDLED; 1334 } 1335 1336 /** 1337 * ocxlflash_start_work() - start a user context 1338 * @ctx_cookie: Context to be started. 1339 * @num_irqs: Number of interrupts requested. 1340 * 1341 * Return: 0 on success, -errno on failure 1342 */ 1343 static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs) 1344 { 1345 struct ocxlflash_context *ctx = ctx_cookie; 1346 struct ocxl_hw_afu *afu = ctx->hw_afu; 1347 struct device *dev = afu->dev; 1348 char *name; 1349 int rc = 0; 1350 int i; 1351 1352 rc = alloc_afu_irqs(ctx, num_irqs); 1353 if (unlikely(rc < 0)) { 1354 dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc); 1355 goto out; 1356 } 1357 1358 for (i = 0; i < num_irqs; i++) { 1359 name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i", 1360 dev_name(dev), ctx->pe, i); 1361 rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name); 1362 kfree(name); 1363 if (unlikely(rc < 0)) { 1364 dev_err(dev, "%s: afu_map_irq failed rc=%d\n", 1365 __func__, rc); 1366 goto err; 1367 } 1368 } 1369 1370 rc = start_context(ctx); 1371 if (unlikely(rc)) { 1372 dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc); 1373 goto err; 1374 } 1375 out: 1376 return rc; 1377 err: 1378 for (i = i-1; i >= 0; i--) 1379 afu_unmap_irq(0, ctx, i, ctx); 1380 free_afu_irqs(ctx); 1381 goto out; 1382 }; 1383 1384 /** 1385 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor 1386 * @file: File installed with adapter file descriptor. 1387 * @vma: VM area associated with mapping. 1388 * 1389 * Return: 0 on success, -errno on failure 1390 */ 1391 static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma) 1392 { 1393 return afu_mmap(file, vma); 1394 } 1395 1396 /** 1397 * ocxlflash_fd_release() - release the context associated with the file 1398 * @inode: File inode pointer. 1399 * @file: File associated with the adapter context. 1400 * 1401 * Return: 0 on success, -errno on failure 1402 */ 1403 static int ocxlflash_fd_release(struct inode *inode, struct file *file) 1404 { 1405 return afu_release(inode, file); 1406 } 1407 1408 /* Backend ops to ocxlflash services */ 1409 const struct cxlflash_backend_ops cxlflash_ocxl_ops = { 1410 .module = THIS_MODULE, 1411 .psa_map = ocxlflash_psa_map, 1412 .psa_unmap = ocxlflash_psa_unmap, 1413 .process_element = ocxlflash_process_element, 1414 .map_afu_irq = ocxlflash_map_afu_irq, 1415 .unmap_afu_irq = ocxlflash_unmap_afu_irq, 1416 .get_irq_objhndl = ocxlflash_get_irq_objhndl, 1417 .start_context = ocxlflash_start_context, 1418 .stop_context = ocxlflash_stop_context, 1419 .afu_reset = ocxlflash_afu_reset, 1420 .set_master = ocxlflash_set_master, 1421 .get_context = ocxlflash_get_context, 1422 .dev_context_init = ocxlflash_dev_context_init, 1423 .release_context = ocxlflash_release_context, 1424 .perst_reloads_same_image = ocxlflash_perst_reloads_same_image, 1425 .read_adapter_vpd = ocxlflash_read_adapter_vpd, 1426 .allocate_afu_irqs = ocxlflash_allocate_afu_irqs, 1427 .free_afu_irqs = ocxlflash_free_afu_irqs, 1428 .create_afu = ocxlflash_create_afu, 1429 .destroy_afu = ocxlflash_destroy_afu, 1430 .get_fd = ocxlflash_get_fd, 1431 .fops_get_context = ocxlflash_fops_get_context, 1432 .start_work = ocxlflash_start_work, 1433 .fd_mmap = ocxlflash_fd_mmap, 1434 .fd_release = ocxlflash_fd_release, 1435 }; 1436