1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 5 * Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2018 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/file.h> 16 #include <linux/idr.h> 17 #include <linux/module.h> 18 #include <linux/mount.h> 19 #include <linux/poll.h> 20 #include <linux/sched/signal.h> 21 22 #include <misc/ocxl.h> 23 24 #include <uapi/misc/cxl.h> 25 26 #include "backend.h" 27 #include "ocxl_hw.h" 28 29 /* 30 * Pseudo-filesystem to allocate inodes. 31 */ 32 33 #define OCXLFLASH_FS_MAGIC 0x1697698f 34 35 static int ocxlflash_fs_cnt; 36 static struct vfsmount *ocxlflash_vfs_mount; 37 38 static const struct dentry_operations ocxlflash_fs_dops = { 39 .d_dname = simple_dname, 40 }; 41 42 /* 43 * ocxlflash_fs_mount() - mount the pseudo-filesystem 44 * @fs_type: File system type. 45 * @flags: Flags for the filesystem. 46 * @dev_name: Device name associated with the filesystem. 47 * @data: Data pointer. 48 * 49 * Return: pointer to the directory entry structure 50 */ 51 static struct dentry *ocxlflash_fs_mount(struct file_system_type *fs_type, 52 int flags, const char *dev_name, 53 void *data) 54 { 55 return mount_pseudo(fs_type, "ocxlflash:", NULL, &ocxlflash_fs_dops, 56 OCXLFLASH_FS_MAGIC); 57 } 58 59 static struct file_system_type ocxlflash_fs_type = { 60 .name = "ocxlflash", 61 .owner = THIS_MODULE, 62 .mount = ocxlflash_fs_mount, 63 .kill_sb = kill_anon_super, 64 }; 65 66 /* 67 * ocxlflash_release_mapping() - release the memory mapping 68 * @ctx: Context whose mapping is to be released. 69 */ 70 static void ocxlflash_release_mapping(struct ocxlflash_context *ctx) 71 { 72 if (ctx->mapping) 73 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 74 ctx->mapping = NULL; 75 } 76 77 /* 78 * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file 79 * @dev: Generic device of the host. 80 * @name: Name of the pseudo filesystem. 81 * @fops: File operations. 82 * @priv: Private data. 83 * @flags: Flags for the file. 84 * 85 * Return: pointer to the file on success, ERR_PTR on failure 86 */ 87 static struct file *ocxlflash_getfile(struct device *dev, const char *name, 88 const struct file_operations *fops, 89 void *priv, int flags) 90 { 91 struct file *file; 92 struct inode *inode; 93 int rc; 94 95 if (fops->owner && !try_module_get(fops->owner)) { 96 dev_err(dev, "%s: Owner does not exist\n", __func__); 97 rc = -ENOENT; 98 goto err1; 99 } 100 101 rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount, 102 &ocxlflash_fs_cnt); 103 if (unlikely(rc < 0)) { 104 dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n", 105 __func__, rc); 106 goto err2; 107 } 108 109 inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb); 110 if (IS_ERR(inode)) { 111 rc = PTR_ERR(inode); 112 dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n", 113 __func__, rc); 114 goto err3; 115 } 116 117 file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name, 118 flags & (O_ACCMODE | O_NONBLOCK), fops); 119 if (IS_ERR(file)) { 120 rc = PTR_ERR(file); 121 dev_err(dev, "%s: alloc_file failed rc=%d\n", 122 __func__, rc); 123 goto err4; 124 } 125 126 file->private_data = priv; 127 out: 128 return file; 129 err4: 130 iput(inode); 131 err3: 132 simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt); 133 err2: 134 module_put(fops->owner); 135 err1: 136 file = ERR_PTR(rc); 137 goto out; 138 } 139 140 /** 141 * ocxlflash_psa_map() - map the process specific MMIO space 142 * @ctx_cookie: Adapter context for which the mapping needs to be done. 143 * 144 * Return: MMIO pointer of the mapped region 145 */ 146 static void __iomem *ocxlflash_psa_map(void *ctx_cookie) 147 { 148 struct ocxlflash_context *ctx = ctx_cookie; 149 struct device *dev = ctx->hw_afu->dev; 150 151 mutex_lock(&ctx->state_mutex); 152 if (ctx->state != STARTED) { 153 dev_err(dev, "%s: Context not started, state=%d\n", __func__, 154 ctx->state); 155 mutex_unlock(&ctx->state_mutex); 156 return NULL; 157 } 158 mutex_unlock(&ctx->state_mutex); 159 160 return ioremap(ctx->psn_phys, ctx->psn_size); 161 } 162 163 /** 164 * ocxlflash_psa_unmap() - unmap the process specific MMIO space 165 * @addr: MMIO pointer to unmap. 166 */ 167 static void ocxlflash_psa_unmap(void __iomem *addr) 168 { 169 iounmap(addr); 170 } 171 172 /** 173 * ocxlflash_process_element() - get process element of the adapter context 174 * @ctx_cookie: Adapter context associated with the process element. 175 * 176 * Return: process element of the adapter context 177 */ 178 static int ocxlflash_process_element(void *ctx_cookie) 179 { 180 struct ocxlflash_context *ctx = ctx_cookie; 181 182 return ctx->pe; 183 } 184 185 /** 186 * afu_map_irq() - map the interrupt of the adapter context 187 * @flags: Flags. 188 * @ctx: Adapter context. 189 * @num: Per-context AFU interrupt number. 190 * @handler: Interrupt handler to register. 191 * @cookie: Interrupt handler private data. 192 * @name: Name of the interrupt. 193 * 194 * Return: 0 on success, -errno on failure 195 */ 196 static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num, 197 irq_handler_t handler, void *cookie, char *name) 198 { 199 struct ocxl_hw_afu *afu = ctx->hw_afu; 200 struct device *dev = afu->dev; 201 struct ocxlflash_irqs *irq; 202 void __iomem *vtrig; 203 u32 virq; 204 int rc = 0; 205 206 if (num < 0 || num >= ctx->num_irqs) { 207 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 208 rc = -ENOENT; 209 goto out; 210 } 211 212 irq = &ctx->irqs[num]; 213 virq = irq_create_mapping(NULL, irq->hwirq); 214 if (unlikely(!virq)) { 215 dev_err(dev, "%s: irq_create_mapping failed\n", __func__); 216 rc = -ENOMEM; 217 goto out; 218 } 219 220 rc = request_irq(virq, handler, 0, name, cookie); 221 if (unlikely(rc)) { 222 dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc); 223 goto err1; 224 } 225 226 vtrig = ioremap(irq->ptrig, PAGE_SIZE); 227 if (unlikely(!vtrig)) { 228 dev_err(dev, "%s: Trigger page mapping failed\n", __func__); 229 rc = -ENOMEM; 230 goto err2; 231 } 232 233 irq->virq = virq; 234 irq->vtrig = vtrig; 235 out: 236 return rc; 237 err2: 238 free_irq(virq, cookie); 239 err1: 240 irq_dispose_mapping(virq); 241 goto out; 242 } 243 244 /** 245 * ocxlflash_map_afu_irq() - map the interrupt of the adapter context 246 * @ctx_cookie: Adapter context. 247 * @num: Per-context AFU interrupt number. 248 * @handler: Interrupt handler to register. 249 * @cookie: Interrupt handler private data. 250 * @name: Name of the interrupt. 251 * 252 * Return: 0 on success, -errno on failure 253 */ 254 static int ocxlflash_map_afu_irq(void *ctx_cookie, int num, 255 irq_handler_t handler, void *cookie, 256 char *name) 257 { 258 return afu_map_irq(0, ctx_cookie, num, handler, cookie, name); 259 } 260 261 /** 262 * afu_unmap_irq() - unmap the interrupt 263 * @flags: Flags. 264 * @ctx: Adapter context. 265 * @num: Per-context AFU interrupt number. 266 * @cookie: Interrupt handler private data. 267 */ 268 static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num, 269 void *cookie) 270 { 271 struct ocxl_hw_afu *afu = ctx->hw_afu; 272 struct device *dev = afu->dev; 273 struct ocxlflash_irqs *irq; 274 275 if (num < 0 || num >= ctx->num_irqs) { 276 dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num); 277 return; 278 } 279 280 irq = &ctx->irqs[num]; 281 if (irq->vtrig) 282 iounmap(irq->vtrig); 283 284 if (irq_find_mapping(NULL, irq->hwirq)) { 285 free_irq(irq->virq, cookie); 286 irq_dispose_mapping(irq->virq); 287 } 288 289 memset(irq, 0, sizeof(*irq)); 290 } 291 292 /** 293 * ocxlflash_unmap_afu_irq() - unmap the interrupt 294 * @ctx_cookie: Adapter context. 295 * @num: Per-context AFU interrupt number. 296 * @cookie: Interrupt handler private data. 297 */ 298 static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie) 299 { 300 return afu_unmap_irq(0, ctx_cookie, num, cookie); 301 } 302 303 /** 304 * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt 305 * @ctx_cookie: Context associated with the interrupt. 306 * @irq: Interrupt number. 307 * 308 * Return: effective address of the mapped region 309 */ 310 static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq) 311 { 312 struct ocxlflash_context *ctx = ctx_cookie; 313 314 if (irq < 0 || irq >= ctx->num_irqs) 315 return 0; 316 317 return (__force u64)ctx->irqs[irq].vtrig; 318 } 319 320 /** 321 * ocxlflash_xsl_fault() - callback when translation error is triggered 322 * @data: Private data provided at callback registration, the context. 323 * @addr: Address that triggered the error. 324 * @dsisr: Value of dsisr register. 325 */ 326 static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr) 327 { 328 struct ocxlflash_context *ctx = data; 329 330 spin_lock(&ctx->slock); 331 ctx->fault_addr = addr; 332 ctx->fault_dsisr = dsisr; 333 ctx->pending_fault = true; 334 spin_unlock(&ctx->slock); 335 336 wake_up_all(&ctx->wq); 337 } 338 339 /** 340 * start_context() - local routine to start a context 341 * @ctx: Adapter context to be started. 342 * 343 * Assign the context specific MMIO space, add and enable the PE. 344 * 345 * Return: 0 on success, -errno on failure 346 */ 347 static int start_context(struct ocxlflash_context *ctx) 348 { 349 struct ocxl_hw_afu *afu = ctx->hw_afu; 350 struct ocxl_afu_config *acfg = &afu->acfg; 351 void *link_token = afu->link_token; 352 struct device *dev = afu->dev; 353 bool master = ctx->master; 354 struct mm_struct *mm; 355 int rc = 0; 356 u32 pid; 357 358 mutex_lock(&ctx->state_mutex); 359 if (ctx->state != OPENED) { 360 dev_err(dev, "%s: Context state invalid, state=%d\n", 361 __func__, ctx->state); 362 rc = -EINVAL; 363 goto out; 364 } 365 366 if (master) { 367 ctx->psn_size = acfg->global_mmio_size; 368 ctx->psn_phys = afu->gmmio_phys; 369 } else { 370 ctx->psn_size = acfg->pp_mmio_stride; 371 ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size); 372 } 373 374 /* pid and mm not set for master contexts */ 375 if (master) { 376 pid = 0; 377 mm = NULL; 378 } else { 379 pid = current->mm->context.id; 380 mm = current->mm; 381 } 382 383 rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm, 384 ocxlflash_xsl_fault, ctx); 385 if (unlikely(rc)) { 386 dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n", 387 __func__, rc); 388 goto out; 389 } 390 391 ctx->state = STARTED; 392 out: 393 mutex_unlock(&ctx->state_mutex); 394 return rc; 395 } 396 397 /** 398 * ocxlflash_start_context() - start a kernel context 399 * @ctx_cookie: Adapter context to be started. 400 * 401 * Return: 0 on success, -errno on failure 402 */ 403 static int ocxlflash_start_context(void *ctx_cookie) 404 { 405 struct ocxlflash_context *ctx = ctx_cookie; 406 407 return start_context(ctx); 408 } 409 410 /** 411 * ocxlflash_stop_context() - stop a context 412 * @ctx_cookie: Adapter context to be stopped. 413 * 414 * Return: 0 on success, -errno on failure 415 */ 416 static int ocxlflash_stop_context(void *ctx_cookie) 417 { 418 struct ocxlflash_context *ctx = ctx_cookie; 419 struct ocxl_hw_afu *afu = ctx->hw_afu; 420 struct ocxl_afu_config *acfg = &afu->acfg; 421 struct pci_dev *pdev = afu->pdev; 422 struct device *dev = afu->dev; 423 enum ocxlflash_ctx_state state; 424 int rc = 0; 425 426 mutex_lock(&ctx->state_mutex); 427 state = ctx->state; 428 ctx->state = CLOSED; 429 mutex_unlock(&ctx->state_mutex); 430 if (state != STARTED) 431 goto out; 432 433 rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos, 434 ctx->pe); 435 if (unlikely(rc)) { 436 dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n", 437 __func__, rc); 438 /* If EBUSY, PE could be referenced in future by the AFU */ 439 if (rc == -EBUSY) 440 goto out; 441 } 442 443 rc = ocxl_link_remove_pe(afu->link_token, ctx->pe); 444 if (unlikely(rc)) { 445 dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n", 446 __func__, rc); 447 goto out; 448 } 449 out: 450 return rc; 451 } 452 453 /** 454 * ocxlflash_afu_reset() - reset the AFU 455 * @ctx_cookie: Adapter context. 456 */ 457 static int ocxlflash_afu_reset(void *ctx_cookie) 458 { 459 struct ocxlflash_context *ctx = ctx_cookie; 460 struct device *dev = ctx->hw_afu->dev; 461 462 /* Pending implementation from OCXL transport services */ 463 dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__); 464 465 /* Silently return success until it is implemented */ 466 return 0; 467 } 468 469 /** 470 * ocxlflash_set_master() - sets the context as master 471 * @ctx_cookie: Adapter context to set as master. 472 */ 473 static void ocxlflash_set_master(void *ctx_cookie) 474 { 475 struct ocxlflash_context *ctx = ctx_cookie; 476 477 ctx->master = true; 478 } 479 480 /** 481 * ocxlflash_get_context() - obtains the context associated with the host 482 * @pdev: PCI device associated with the host. 483 * @afu_cookie: Hardware AFU associated with the host. 484 * 485 * Return: returns the pointer to host adapter context 486 */ 487 static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie) 488 { 489 struct ocxl_hw_afu *afu = afu_cookie; 490 491 return afu->ocxl_ctx; 492 } 493 494 /** 495 * ocxlflash_dev_context_init() - allocate and initialize an adapter context 496 * @pdev: PCI device associated with the host. 497 * @afu_cookie: Hardware AFU associated with the host. 498 * 499 * Return: returns the adapter context on success, ERR_PTR on failure 500 */ 501 static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie) 502 { 503 struct ocxl_hw_afu *afu = afu_cookie; 504 struct device *dev = afu->dev; 505 struct ocxlflash_context *ctx; 506 int rc; 507 508 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 509 if (unlikely(!ctx)) { 510 dev_err(dev, "%s: Context allocation failed\n", __func__); 511 rc = -ENOMEM; 512 goto err1; 513 } 514 515 idr_preload(GFP_KERNEL); 516 rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT); 517 idr_preload_end(); 518 if (unlikely(rc < 0)) { 519 dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc); 520 goto err2; 521 } 522 523 spin_lock_init(&ctx->slock); 524 init_waitqueue_head(&ctx->wq); 525 mutex_init(&ctx->state_mutex); 526 527 ctx->state = OPENED; 528 ctx->pe = rc; 529 ctx->master = false; 530 ctx->mapping = NULL; 531 ctx->hw_afu = afu; 532 ctx->irq_bitmap = 0; 533 ctx->pending_irq = false; 534 ctx->pending_fault = false; 535 out: 536 return ctx; 537 err2: 538 kfree(ctx); 539 err1: 540 ctx = ERR_PTR(rc); 541 goto out; 542 } 543 544 /** 545 * ocxlflash_release_context() - releases an adapter context 546 * @ctx_cookie: Adapter context to be released. 547 * 548 * Return: 0 on success, -errno on failure 549 */ 550 static int ocxlflash_release_context(void *ctx_cookie) 551 { 552 struct ocxlflash_context *ctx = ctx_cookie; 553 struct device *dev; 554 int rc = 0; 555 556 if (!ctx) 557 goto out; 558 559 dev = ctx->hw_afu->dev; 560 mutex_lock(&ctx->state_mutex); 561 if (ctx->state >= STARTED) { 562 dev_err(dev, "%s: Context in use, state=%d\n", __func__, 563 ctx->state); 564 mutex_unlock(&ctx->state_mutex); 565 rc = -EBUSY; 566 goto out; 567 } 568 mutex_unlock(&ctx->state_mutex); 569 570 idr_remove(&ctx->hw_afu->idr, ctx->pe); 571 ocxlflash_release_mapping(ctx); 572 kfree(ctx); 573 out: 574 return rc; 575 } 576 577 /** 578 * ocxlflash_perst_reloads_same_image() - sets the image reload policy 579 * @afu_cookie: Hardware AFU associated with the host. 580 * @image: Whether to load the same image on PERST. 581 */ 582 static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image) 583 { 584 struct ocxl_hw_afu *afu = afu_cookie; 585 586 afu->perst_same_image = image; 587 } 588 589 /** 590 * ocxlflash_read_adapter_vpd() - reads the adapter VPD 591 * @pdev: PCI device associated with the host. 592 * @buf: Buffer to get the VPD data. 593 * @count: Size of buffer (maximum bytes that can be read). 594 * 595 * Return: size of VPD on success, -errno on failure 596 */ 597 static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf, 598 size_t count) 599 { 600 return pci_read_vpd(pdev, 0, count, buf); 601 } 602 603 /** 604 * free_afu_irqs() - internal service to free interrupts 605 * @ctx: Adapter context. 606 */ 607 static void free_afu_irqs(struct ocxlflash_context *ctx) 608 { 609 struct ocxl_hw_afu *afu = ctx->hw_afu; 610 struct device *dev = afu->dev; 611 int i; 612 613 if (!ctx->irqs) { 614 dev_err(dev, "%s: Interrupts not allocated\n", __func__); 615 return; 616 } 617 618 for (i = ctx->num_irqs; i >= 0; i--) 619 ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq); 620 621 kfree(ctx->irqs); 622 ctx->irqs = NULL; 623 } 624 625 /** 626 * alloc_afu_irqs() - internal service to allocate interrupts 627 * @ctx: Context associated with the request. 628 * @num: Number of interrupts requested. 629 * 630 * Return: 0 on success, -errno on failure 631 */ 632 static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num) 633 { 634 struct ocxl_hw_afu *afu = ctx->hw_afu; 635 struct device *dev = afu->dev; 636 struct ocxlflash_irqs *irqs; 637 u64 addr; 638 int rc = 0; 639 int hwirq; 640 int i; 641 642 if (ctx->irqs) { 643 dev_err(dev, "%s: Interrupts already allocated\n", __func__); 644 rc = -EEXIST; 645 goto out; 646 } 647 648 if (num > OCXL_MAX_IRQS) { 649 dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num); 650 rc = -EINVAL; 651 goto out; 652 } 653 654 irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL); 655 if (unlikely(!irqs)) { 656 dev_err(dev, "%s: Context irqs allocation failed\n", __func__); 657 rc = -ENOMEM; 658 goto out; 659 } 660 661 for (i = 0; i < num; i++) { 662 rc = ocxl_link_irq_alloc(afu->link_token, &hwirq, &addr); 663 if (unlikely(rc)) { 664 dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n", 665 __func__, rc); 666 goto err; 667 } 668 669 irqs[i].hwirq = hwirq; 670 irqs[i].ptrig = addr; 671 } 672 673 ctx->irqs = irqs; 674 ctx->num_irqs = num; 675 out: 676 return rc; 677 err: 678 for (i = i-1; i >= 0; i--) 679 ocxl_link_free_irq(afu->link_token, irqs[i].hwirq); 680 kfree(irqs); 681 goto out; 682 } 683 684 /** 685 * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts 686 * @ctx_cookie: Context associated with the request. 687 * @num: Number of interrupts requested. 688 * 689 * Return: 0 on success, -errno on failure 690 */ 691 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num) 692 { 693 return alloc_afu_irqs(ctx_cookie, num); 694 } 695 696 /** 697 * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context 698 * @ctx_cookie: Adapter context. 699 */ 700 static void ocxlflash_free_afu_irqs(void *ctx_cookie) 701 { 702 free_afu_irqs(ctx_cookie); 703 } 704 705 /** 706 * ocxlflash_unconfig_afu() - unconfigure the AFU 707 * @afu: AFU associated with the host. 708 */ 709 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu) 710 { 711 if (afu->gmmio_virt) { 712 iounmap(afu->gmmio_virt); 713 afu->gmmio_virt = NULL; 714 } 715 } 716 717 /** 718 * ocxlflash_destroy_afu() - destroy the AFU structure 719 * @afu_cookie: AFU to be freed. 720 */ 721 static void ocxlflash_destroy_afu(void *afu_cookie) 722 { 723 struct ocxl_hw_afu *afu = afu_cookie; 724 int pos; 725 726 if (!afu) 727 return; 728 729 ocxlflash_release_context(afu->ocxl_ctx); 730 idr_destroy(&afu->idr); 731 732 /* Disable the AFU */ 733 pos = afu->acfg.dvsec_afu_control_pos; 734 ocxl_config_set_afu_state(afu->pdev, pos, 0); 735 736 ocxlflash_unconfig_afu(afu); 737 kfree(afu); 738 } 739 740 /** 741 * ocxlflash_config_fn() - configure the host function 742 * @pdev: PCI device associated with the host. 743 * @afu: AFU associated with the host. 744 * 745 * Return: 0 on success, -errno on failure 746 */ 747 static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 748 { 749 struct ocxl_fn_config *fcfg = &afu->fcfg; 750 struct device *dev = &pdev->dev; 751 u16 base, enabled, supported; 752 int rc = 0; 753 754 /* Read DVSEC config of the function */ 755 rc = ocxl_config_read_function(pdev, fcfg); 756 if (unlikely(rc)) { 757 dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n", 758 __func__, rc); 759 goto out; 760 } 761 762 /* Check if function has AFUs defined, only 1 per function supported */ 763 if (fcfg->max_afu_index >= 0) { 764 afu->is_present = true; 765 if (fcfg->max_afu_index != 0) 766 dev_warn(dev, "%s: Unexpected AFU index value %d\n", 767 __func__, fcfg->max_afu_index); 768 } 769 770 rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported); 771 if (unlikely(rc)) { 772 dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n", 773 __func__, rc); 774 goto out; 775 } 776 777 afu->fn_actag_base = base; 778 afu->fn_actag_enabled = enabled; 779 780 ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled); 781 dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n", 782 __func__, base, enabled); 783 784 rc = ocxl_link_setup(pdev, 0, &afu->link_token); 785 if (unlikely(rc)) { 786 dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n", 787 __func__, rc); 788 goto out; 789 } 790 791 rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos); 792 if (unlikely(rc)) { 793 dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n", 794 __func__, rc); 795 goto err; 796 } 797 out: 798 return rc; 799 err: 800 ocxl_link_release(pdev, afu->link_token); 801 goto out; 802 } 803 804 /** 805 * ocxlflash_unconfig_fn() - unconfigure the host function 806 * @pdev: PCI device associated with the host. 807 * @afu: AFU associated with the host. 808 */ 809 static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 810 { 811 ocxl_link_release(pdev, afu->link_token); 812 } 813 814 /** 815 * ocxlflash_map_mmio() - map the AFU MMIO space 816 * @afu: AFU associated with the host. 817 * 818 * Return: 0 on success, -errno on failure 819 */ 820 static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu) 821 { 822 struct ocxl_afu_config *acfg = &afu->acfg; 823 struct pci_dev *pdev = afu->pdev; 824 struct device *dev = afu->dev; 825 phys_addr_t gmmio, ppmmio; 826 int rc = 0; 827 828 rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash"); 829 if (unlikely(rc)) { 830 dev_err(dev, "%s: pci_request_region for global failed rc=%d\n", 831 __func__, rc); 832 goto out; 833 } 834 gmmio = pci_resource_start(pdev, acfg->global_mmio_bar); 835 gmmio += acfg->global_mmio_offset; 836 837 rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash"); 838 if (unlikely(rc)) { 839 dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n", 840 __func__, rc); 841 goto err1; 842 } 843 ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar); 844 ppmmio += acfg->pp_mmio_offset; 845 846 afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size); 847 if (unlikely(!afu->gmmio_virt)) { 848 dev_err(dev, "%s: MMIO mapping failed\n", __func__); 849 rc = -ENOMEM; 850 goto err2; 851 } 852 853 afu->gmmio_phys = gmmio; 854 afu->ppmmio_phys = ppmmio; 855 out: 856 return rc; 857 err2: 858 pci_release_region(pdev, acfg->pp_mmio_bar); 859 err1: 860 pci_release_region(pdev, acfg->global_mmio_bar); 861 goto out; 862 } 863 864 /** 865 * ocxlflash_config_afu() - configure the host AFU 866 * @pdev: PCI device associated with the host. 867 * @afu: AFU associated with the host. 868 * 869 * Must be called _after_ host function configuration. 870 * 871 * Return: 0 on success, -errno on failure 872 */ 873 static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu) 874 { 875 struct ocxl_afu_config *acfg = &afu->acfg; 876 struct ocxl_fn_config *fcfg = &afu->fcfg; 877 struct device *dev = &pdev->dev; 878 int count; 879 int base; 880 int pos; 881 int rc = 0; 882 883 /* This HW AFU function does not have any AFUs defined */ 884 if (!afu->is_present) 885 goto out; 886 887 /* Read AFU config at index 0 */ 888 rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0); 889 if (unlikely(rc)) { 890 dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n", 891 __func__, rc); 892 goto out; 893 } 894 895 /* Only one AFU per function is supported, so actag_base is same */ 896 base = afu->fn_actag_base; 897 count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled); 898 pos = acfg->dvsec_afu_control_pos; 899 900 ocxl_config_set_afu_actag(pdev, pos, base, count); 901 dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count); 902 afu->afu_actag_base = base; 903 afu->afu_actag_enabled = count; 904 afu->max_pasid = 1 << acfg->pasid_supported_log; 905 906 ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log); 907 908 rc = ocxlflash_map_mmio(afu); 909 if (unlikely(rc)) { 910 dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n", 911 __func__, rc); 912 goto out; 913 } 914 915 /* Enable the AFU */ 916 ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1); 917 out: 918 return rc; 919 } 920 921 /** 922 * ocxlflash_create_afu() - create the AFU for OCXL 923 * @pdev: PCI device associated with the host. 924 * 925 * Return: AFU on success, NULL on failure 926 */ 927 static void *ocxlflash_create_afu(struct pci_dev *pdev) 928 { 929 struct device *dev = &pdev->dev; 930 struct ocxlflash_context *ctx; 931 struct ocxl_hw_afu *afu; 932 int rc; 933 934 afu = kzalloc(sizeof(*afu), GFP_KERNEL); 935 if (unlikely(!afu)) { 936 dev_err(dev, "%s: HW AFU allocation failed\n", __func__); 937 goto out; 938 } 939 940 afu->pdev = pdev; 941 afu->dev = dev; 942 idr_init(&afu->idr); 943 944 rc = ocxlflash_config_fn(pdev, afu); 945 if (unlikely(rc)) { 946 dev_err(dev, "%s: Function configuration failed rc=%d\n", 947 __func__, rc); 948 goto err1; 949 } 950 951 rc = ocxlflash_config_afu(pdev, afu); 952 if (unlikely(rc)) { 953 dev_err(dev, "%s: AFU configuration failed rc=%d\n", 954 __func__, rc); 955 goto err2; 956 } 957 958 ctx = ocxlflash_dev_context_init(pdev, afu); 959 if (IS_ERR(ctx)) { 960 rc = PTR_ERR(ctx); 961 dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n", 962 __func__, rc); 963 goto err3; 964 } 965 966 afu->ocxl_ctx = ctx; 967 out: 968 return afu; 969 err3: 970 ocxlflash_unconfig_afu(afu); 971 err2: 972 ocxlflash_unconfig_fn(pdev, afu); 973 err1: 974 idr_destroy(&afu->idr); 975 kfree(afu); 976 afu = NULL; 977 goto out; 978 } 979 980 /** 981 * ctx_event_pending() - check for any event pending on the context 982 * @ctx: Context to be checked. 983 * 984 * Return: true if there is an event pending, false if none pending 985 */ 986 static inline bool ctx_event_pending(struct ocxlflash_context *ctx) 987 { 988 if (ctx->pending_irq || ctx->pending_fault) 989 return true; 990 991 return false; 992 } 993 994 /** 995 * afu_poll() - poll the AFU for events on the context 996 * @file: File associated with the adapter context. 997 * @poll: Poll structure from the user. 998 * 999 * Return: poll mask 1000 */ 1001 static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll) 1002 { 1003 struct ocxlflash_context *ctx = file->private_data; 1004 struct device *dev = ctx->hw_afu->dev; 1005 ulong lock_flags; 1006 int mask = 0; 1007 1008 poll_wait(file, &ctx->wq, poll); 1009 1010 spin_lock_irqsave(&ctx->slock, lock_flags); 1011 if (ctx_event_pending(ctx)) 1012 mask |= POLLIN | POLLRDNORM; 1013 else if (ctx->state == CLOSED) 1014 mask |= POLLERR; 1015 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1016 1017 dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n", 1018 __func__, ctx->pe, mask); 1019 1020 return mask; 1021 } 1022 1023 /** 1024 * afu_read() - perform a read on the context for any event 1025 * @file: File associated with the adapter context. 1026 * @buf: Buffer to receive the data. 1027 * @count: Size of buffer (maximum bytes that can be read). 1028 * @off: Offset. 1029 * 1030 * Return: size of the data read on success, -errno on failure 1031 */ 1032 static ssize_t afu_read(struct file *file, char __user *buf, size_t count, 1033 loff_t *off) 1034 { 1035 struct ocxlflash_context *ctx = file->private_data; 1036 struct device *dev = ctx->hw_afu->dev; 1037 struct cxl_event event; 1038 ulong lock_flags; 1039 ssize_t esize; 1040 ssize_t rc; 1041 int bit; 1042 DEFINE_WAIT(event_wait); 1043 1044 if (*off != 0) { 1045 dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n", 1046 __func__, *off); 1047 rc = -EINVAL; 1048 goto out; 1049 } 1050 1051 spin_lock_irqsave(&ctx->slock, lock_flags); 1052 1053 for (;;) { 1054 prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE); 1055 1056 if (ctx_event_pending(ctx) || (ctx->state == CLOSED)) 1057 break; 1058 1059 if (file->f_flags & O_NONBLOCK) { 1060 dev_err(dev, "%s: File cannot be blocked on I/O\n", 1061 __func__); 1062 rc = -EAGAIN; 1063 goto err; 1064 } 1065 1066 if (signal_pending(current)) { 1067 dev_err(dev, "%s: Signal pending on the process\n", 1068 __func__); 1069 rc = -ERESTARTSYS; 1070 goto err; 1071 } 1072 1073 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1074 schedule(); 1075 spin_lock_irqsave(&ctx->slock, lock_flags); 1076 } 1077 1078 finish_wait(&ctx->wq, &event_wait); 1079 1080 memset(&event, 0, sizeof(event)); 1081 event.header.process_element = ctx->pe; 1082 event.header.size = sizeof(struct cxl_event_header); 1083 if (ctx->pending_irq) { 1084 esize = sizeof(struct cxl_event_afu_interrupt); 1085 event.header.size += esize; 1086 event.header.type = CXL_EVENT_AFU_INTERRUPT; 1087 1088 bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs); 1089 clear_bit(bit, &ctx->irq_bitmap); 1090 event.irq.irq = bit + 1; 1091 if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs)) 1092 ctx->pending_irq = false; 1093 } else if (ctx->pending_fault) { 1094 event.header.size += sizeof(struct cxl_event_data_storage); 1095 event.header.type = CXL_EVENT_DATA_STORAGE; 1096 event.fault.addr = ctx->fault_addr; 1097 event.fault.dsisr = ctx->fault_dsisr; 1098 ctx->pending_fault = false; 1099 } 1100 1101 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1102 1103 if (copy_to_user(buf, &event, event.header.size)) { 1104 dev_err(dev, "%s: copy_to_user failed\n", __func__); 1105 rc = -EFAULT; 1106 goto out; 1107 } 1108 1109 rc = event.header.size; 1110 out: 1111 return rc; 1112 err: 1113 finish_wait(&ctx->wq, &event_wait); 1114 spin_unlock_irqrestore(&ctx->slock, lock_flags); 1115 goto out; 1116 } 1117 1118 /** 1119 * afu_release() - release and free the context 1120 * @inode: File inode pointer. 1121 * @file: File associated with the context. 1122 * 1123 * Return: 0 on success, -errno on failure 1124 */ 1125 static int afu_release(struct inode *inode, struct file *file) 1126 { 1127 struct ocxlflash_context *ctx = file->private_data; 1128 int i; 1129 1130 /* Unmap and free the interrupts associated with the context */ 1131 for (i = ctx->num_irqs; i >= 0; i--) 1132 afu_unmap_irq(0, ctx, i, ctx); 1133 free_afu_irqs(ctx); 1134 1135 return ocxlflash_release_context(ctx); 1136 } 1137 1138 /** 1139 * ocxlflash_mmap_fault() - mmap fault handler 1140 * @vmf: VM fault associated with current fault. 1141 * 1142 * Return: 0 on success, -errno on failure 1143 */ 1144 static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf) 1145 { 1146 struct vm_area_struct *vma = vmf->vma; 1147 struct ocxlflash_context *ctx = vma->vm_file->private_data; 1148 struct device *dev = ctx->hw_afu->dev; 1149 u64 mmio_area, offset; 1150 1151 offset = vmf->pgoff << PAGE_SHIFT; 1152 if (offset >= ctx->psn_size) 1153 return VM_FAULT_SIGBUS; 1154 1155 mutex_lock(&ctx->state_mutex); 1156 if (ctx->state != STARTED) { 1157 dev_err(dev, "%s: Context not started, state=%d\n", 1158 __func__, ctx->state); 1159 mutex_unlock(&ctx->state_mutex); 1160 return VM_FAULT_SIGBUS; 1161 } 1162 mutex_unlock(&ctx->state_mutex); 1163 1164 mmio_area = ctx->psn_phys; 1165 mmio_area += offset; 1166 1167 return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT); 1168 } 1169 1170 static const struct vm_operations_struct ocxlflash_vmops = { 1171 .fault = ocxlflash_mmap_fault, 1172 }; 1173 1174 /** 1175 * afu_mmap() - map the fault handler operations 1176 * @file: File associated with the context. 1177 * @vma: VM area associated with mapping. 1178 * 1179 * Return: 0 on success, -errno on failure 1180 */ 1181 static int afu_mmap(struct file *file, struct vm_area_struct *vma) 1182 { 1183 struct ocxlflash_context *ctx = file->private_data; 1184 1185 if ((vma_pages(vma) + vma->vm_pgoff) > 1186 (ctx->psn_size >> PAGE_SHIFT)) 1187 return -EINVAL; 1188 1189 vma->vm_flags |= VM_IO | VM_PFNMAP; 1190 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1191 vma->vm_ops = &ocxlflash_vmops; 1192 return 0; 1193 } 1194 1195 static const struct file_operations ocxl_afu_fops = { 1196 .owner = THIS_MODULE, 1197 .poll = afu_poll, 1198 .read = afu_read, 1199 .release = afu_release, 1200 .mmap = afu_mmap, 1201 }; 1202 1203 #define PATCH_FOPS(NAME) \ 1204 do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0) 1205 1206 /** 1207 * ocxlflash_get_fd() - get file descriptor for an adapter context 1208 * @ctx_cookie: Adapter context. 1209 * @fops: File operations to be associated. 1210 * @fd: File descriptor to be returned back. 1211 * 1212 * Return: pointer to the file on success, ERR_PTR on failure 1213 */ 1214 static struct file *ocxlflash_get_fd(void *ctx_cookie, 1215 struct file_operations *fops, int *fd) 1216 { 1217 struct ocxlflash_context *ctx = ctx_cookie; 1218 struct device *dev = ctx->hw_afu->dev; 1219 struct file *file; 1220 int flags, fdtmp; 1221 int rc = 0; 1222 char *name = NULL; 1223 1224 /* Only allow one fd per context */ 1225 if (ctx->mapping) { 1226 dev_err(dev, "%s: Context is already mapped to an fd\n", 1227 __func__); 1228 rc = -EEXIST; 1229 goto err1; 1230 } 1231 1232 flags = O_RDWR | O_CLOEXEC; 1233 1234 /* This code is similar to anon_inode_getfd() */ 1235 rc = get_unused_fd_flags(flags); 1236 if (unlikely(rc < 0)) { 1237 dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n", 1238 __func__, rc); 1239 goto err1; 1240 } 1241 fdtmp = rc; 1242 1243 /* Patch the file ops that are not defined */ 1244 if (fops) { 1245 PATCH_FOPS(poll); 1246 PATCH_FOPS(read); 1247 PATCH_FOPS(release); 1248 PATCH_FOPS(mmap); 1249 } else /* Use default ops */ 1250 fops = (struct file_operations *)&ocxl_afu_fops; 1251 1252 name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe); 1253 file = ocxlflash_getfile(dev, name, fops, ctx, flags); 1254 kfree(name); 1255 if (IS_ERR(file)) { 1256 rc = PTR_ERR(file); 1257 dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n", 1258 __func__, rc); 1259 goto err2; 1260 } 1261 1262 ctx->mapping = file->f_mapping; 1263 *fd = fdtmp; 1264 out: 1265 return file; 1266 err2: 1267 put_unused_fd(fdtmp); 1268 err1: 1269 file = ERR_PTR(rc); 1270 goto out; 1271 } 1272 1273 /** 1274 * ocxlflash_fops_get_context() - get the context associated with the file 1275 * @file: File associated with the adapter context. 1276 * 1277 * Return: pointer to the context 1278 */ 1279 static void *ocxlflash_fops_get_context(struct file *file) 1280 { 1281 return file->private_data; 1282 } 1283 1284 /** 1285 * ocxlflash_afu_irq() - interrupt handler for user contexts 1286 * @irq: Interrupt number. 1287 * @data: Private data provided at interrupt registration, the context. 1288 * 1289 * Return: Always return IRQ_HANDLED. 1290 */ 1291 static irqreturn_t ocxlflash_afu_irq(int irq, void *data) 1292 { 1293 struct ocxlflash_context *ctx = data; 1294 struct device *dev = ctx->hw_afu->dev; 1295 int i; 1296 1297 dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n", 1298 __func__, ctx->pe, irq); 1299 1300 for (i = 0; i < ctx->num_irqs; i++) { 1301 if (ctx->irqs[i].virq == irq) 1302 break; 1303 } 1304 if (unlikely(i >= ctx->num_irqs)) { 1305 dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__); 1306 goto out; 1307 } 1308 1309 spin_lock(&ctx->slock); 1310 set_bit(i - 1, &ctx->irq_bitmap); 1311 ctx->pending_irq = true; 1312 spin_unlock(&ctx->slock); 1313 1314 wake_up_all(&ctx->wq); 1315 out: 1316 return IRQ_HANDLED; 1317 } 1318 1319 /** 1320 * ocxlflash_start_work() - start a user context 1321 * @ctx_cookie: Context to be started. 1322 * @num_irqs: Number of interrupts requested. 1323 * 1324 * Return: 0 on success, -errno on failure 1325 */ 1326 static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs) 1327 { 1328 struct ocxlflash_context *ctx = ctx_cookie; 1329 struct ocxl_hw_afu *afu = ctx->hw_afu; 1330 struct device *dev = afu->dev; 1331 char *name; 1332 int rc = 0; 1333 int i; 1334 1335 rc = alloc_afu_irqs(ctx, num_irqs); 1336 if (unlikely(rc < 0)) { 1337 dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc); 1338 goto out; 1339 } 1340 1341 for (i = 0; i < num_irqs; i++) { 1342 name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i", 1343 dev_name(dev), ctx->pe, i); 1344 rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name); 1345 kfree(name); 1346 if (unlikely(rc < 0)) { 1347 dev_err(dev, "%s: afu_map_irq failed rc=%d\n", 1348 __func__, rc); 1349 goto err; 1350 } 1351 } 1352 1353 rc = start_context(ctx); 1354 if (unlikely(rc)) { 1355 dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc); 1356 goto err; 1357 } 1358 out: 1359 return rc; 1360 err: 1361 for (i = i-1; i >= 0; i--) 1362 afu_unmap_irq(0, ctx, i, ctx); 1363 free_afu_irqs(ctx); 1364 goto out; 1365 }; 1366 1367 /** 1368 * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor 1369 * @file: File installed with adapter file descriptor. 1370 * @vma: VM area associated with mapping. 1371 * 1372 * Return: 0 on success, -errno on failure 1373 */ 1374 static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma) 1375 { 1376 return afu_mmap(file, vma); 1377 } 1378 1379 /** 1380 * ocxlflash_fd_release() - release the context associated with the file 1381 * @inode: File inode pointer. 1382 * @file: File associated with the adapter context. 1383 * 1384 * Return: 0 on success, -errno on failure 1385 */ 1386 static int ocxlflash_fd_release(struct inode *inode, struct file *file) 1387 { 1388 return afu_release(inode, file); 1389 } 1390 1391 /* Backend ops to ocxlflash services */ 1392 const struct cxlflash_backend_ops cxlflash_ocxl_ops = { 1393 .module = THIS_MODULE, 1394 .psa_map = ocxlflash_psa_map, 1395 .psa_unmap = ocxlflash_psa_unmap, 1396 .process_element = ocxlflash_process_element, 1397 .map_afu_irq = ocxlflash_map_afu_irq, 1398 .unmap_afu_irq = ocxlflash_unmap_afu_irq, 1399 .get_irq_objhndl = ocxlflash_get_irq_objhndl, 1400 .start_context = ocxlflash_start_context, 1401 .stop_context = ocxlflash_stop_context, 1402 .afu_reset = ocxlflash_afu_reset, 1403 .set_master = ocxlflash_set_master, 1404 .get_context = ocxlflash_get_context, 1405 .dev_context_init = ocxlflash_dev_context_init, 1406 .release_context = ocxlflash_release_context, 1407 .perst_reloads_same_image = ocxlflash_perst_reloads_same_image, 1408 .read_adapter_vpd = ocxlflash_read_adapter_vpd, 1409 .allocate_afu_irqs = ocxlflash_allocate_afu_irqs, 1410 .free_afu_irqs = ocxlflash_free_afu_irqs, 1411 .create_afu = ocxlflash_create_afu, 1412 .destroy_afu = ocxlflash_destroy_afu, 1413 .get_fd = ocxlflash_get_fd, 1414 .fops_get_context = ocxlflash_fops_get_context, 1415 .start_work = ocxlflash_start_work, 1416 .fd_mmap = ocxlflash_fd_mmap, 1417 .fd_release = ocxlflash_fd_release, 1418 }; 1419