xref: /openbmc/linux/drivers/scsi/cxlflash/ocxl_hw.c (revision 02a9c6ee)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * CXL Flash Device Driver
4  *
5  * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6  *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
7  *
8  * Copyright (C) 2018 IBM Corporation
9  */
10 
11 #include <linux/file.h>
12 #include <linux/idr.h>
13 #include <linux/module.h>
14 #include <linux/mount.h>
15 #include <linux/pseudo_fs.h>
16 #include <linux/poll.h>
17 #include <linux/sched/signal.h>
18 #include <linux/interrupt.h>
19 #include <asm/xive.h>
20 #include <misc/ocxl.h>
21 
22 #include <uapi/misc/cxl.h>
23 
24 #include "backend.h"
25 #include "ocxl_hw.h"
26 
27 /*
28  * Pseudo-filesystem to allocate inodes.
29  */
30 
31 #define OCXLFLASH_FS_MAGIC      0x1697698f
32 
33 static int ocxlflash_fs_cnt;
34 static struct vfsmount *ocxlflash_vfs_mount;
35 
36 static int ocxlflash_fs_init_fs_context(struct fs_context *fc)
37 {
38 	return init_pseudo(fc, OCXLFLASH_FS_MAGIC) ? 0 : -ENOMEM;
39 }
40 
41 static struct file_system_type ocxlflash_fs_type = {
42 	.name		= "ocxlflash",
43 	.owner		= THIS_MODULE,
44 	.init_fs_context = ocxlflash_fs_init_fs_context,
45 	.kill_sb	= kill_anon_super,
46 };
47 
48 /*
49  * ocxlflash_release_mapping() - release the memory mapping
50  * @ctx:	Context whose mapping is to be released.
51  */
52 static void ocxlflash_release_mapping(struct ocxlflash_context *ctx)
53 {
54 	if (ctx->mapping)
55 		simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
56 	ctx->mapping = NULL;
57 }
58 
59 /*
60  * ocxlflash_getfile() - allocate pseudo filesystem, inode, and the file
61  * @dev:	Generic device of the host.
62  * @name:	Name of the pseudo filesystem.
63  * @fops:	File operations.
64  * @priv:	Private data.
65  * @flags:	Flags for the file.
66  *
67  * Return: pointer to the file on success, ERR_PTR on failure
68  */
69 static struct file *ocxlflash_getfile(struct device *dev, const char *name,
70 				      const struct file_operations *fops,
71 				      void *priv, int flags)
72 {
73 	struct file *file;
74 	struct inode *inode;
75 	int rc;
76 
77 	if (fops->owner && !try_module_get(fops->owner)) {
78 		dev_err(dev, "%s: Owner does not exist\n", __func__);
79 		rc = -ENOENT;
80 		goto err1;
81 	}
82 
83 	rc = simple_pin_fs(&ocxlflash_fs_type, &ocxlflash_vfs_mount,
84 			   &ocxlflash_fs_cnt);
85 	if (unlikely(rc < 0)) {
86 		dev_err(dev, "%s: Cannot mount ocxlflash pseudofs rc=%d\n",
87 			__func__, rc);
88 		goto err2;
89 	}
90 
91 	inode = alloc_anon_inode(ocxlflash_vfs_mount->mnt_sb);
92 	if (IS_ERR(inode)) {
93 		rc = PTR_ERR(inode);
94 		dev_err(dev, "%s: alloc_anon_inode failed rc=%d\n",
95 			__func__, rc);
96 		goto err3;
97 	}
98 
99 	file = alloc_file_pseudo(inode, ocxlflash_vfs_mount, name,
100 				 flags & (O_ACCMODE | O_NONBLOCK), fops);
101 	if (IS_ERR(file)) {
102 		rc = PTR_ERR(file);
103 		dev_err(dev, "%s: alloc_file failed rc=%d\n",
104 			__func__, rc);
105 		goto err4;
106 	}
107 
108 	file->private_data = priv;
109 out:
110 	return file;
111 err4:
112 	iput(inode);
113 err3:
114 	simple_release_fs(&ocxlflash_vfs_mount, &ocxlflash_fs_cnt);
115 err2:
116 	module_put(fops->owner);
117 err1:
118 	file = ERR_PTR(rc);
119 	goto out;
120 }
121 
122 /**
123  * ocxlflash_psa_map() - map the process specific MMIO space
124  * @ctx_cookie:	Adapter context for which the mapping needs to be done.
125  *
126  * Return: MMIO pointer of the mapped region
127  */
128 static void __iomem *ocxlflash_psa_map(void *ctx_cookie)
129 {
130 	struct ocxlflash_context *ctx = ctx_cookie;
131 	struct device *dev = ctx->hw_afu->dev;
132 
133 	mutex_lock(&ctx->state_mutex);
134 	if (ctx->state != STARTED) {
135 		dev_err(dev, "%s: Context not started, state=%d\n", __func__,
136 			ctx->state);
137 		mutex_unlock(&ctx->state_mutex);
138 		return NULL;
139 	}
140 	mutex_unlock(&ctx->state_mutex);
141 
142 	return ioremap(ctx->psn_phys, ctx->psn_size);
143 }
144 
145 /**
146  * ocxlflash_psa_unmap() - unmap the process specific MMIO space
147  * @addr:	MMIO pointer to unmap.
148  */
149 static void ocxlflash_psa_unmap(void __iomem *addr)
150 {
151 	iounmap(addr);
152 }
153 
154 /**
155  * ocxlflash_process_element() - get process element of the adapter context
156  * @ctx_cookie:	Adapter context associated with the process element.
157  *
158  * Return: process element of the adapter context
159  */
160 static int ocxlflash_process_element(void *ctx_cookie)
161 {
162 	struct ocxlflash_context *ctx = ctx_cookie;
163 
164 	return ctx->pe;
165 }
166 
167 /**
168  * afu_map_irq() - map the interrupt of the adapter context
169  * @flags:	Flags.
170  * @ctx:	Adapter context.
171  * @num:	Per-context AFU interrupt number.
172  * @handler:	Interrupt handler to register.
173  * @cookie:	Interrupt handler private data.
174  * @name:	Name of the interrupt.
175  *
176  * Return: 0 on success, -errno on failure
177  */
178 static int afu_map_irq(u64 flags, struct ocxlflash_context *ctx, int num,
179 		       irq_handler_t handler, void *cookie, char *name)
180 {
181 	struct ocxl_hw_afu *afu = ctx->hw_afu;
182 	struct device *dev = afu->dev;
183 	struct ocxlflash_irqs *irq;
184 	struct xive_irq_data *xd;
185 	u32 virq;
186 	int rc = 0;
187 
188 	if (num < 0 || num >= ctx->num_irqs) {
189 		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
190 		rc = -ENOENT;
191 		goto out;
192 	}
193 
194 	irq = &ctx->irqs[num];
195 	virq = irq_create_mapping(NULL, irq->hwirq);
196 	if (unlikely(!virq)) {
197 		dev_err(dev, "%s: irq_create_mapping failed\n", __func__);
198 		rc = -ENOMEM;
199 		goto out;
200 	}
201 
202 	rc = request_irq(virq, handler, 0, name, cookie);
203 	if (unlikely(rc)) {
204 		dev_err(dev, "%s: request_irq failed rc=%d\n", __func__, rc);
205 		goto err1;
206 	}
207 
208 	xd = irq_get_handler_data(virq);
209 	if (unlikely(!xd)) {
210 		dev_err(dev, "%s: Can't get interrupt data\n", __func__);
211 		rc = -ENXIO;
212 		goto err2;
213 	}
214 
215 	irq->virq = virq;
216 	irq->vtrig = xd->trig_mmio;
217 out:
218 	return rc;
219 err2:
220 	free_irq(virq, cookie);
221 err1:
222 	irq_dispose_mapping(virq);
223 	goto out;
224 }
225 
226 /**
227  * ocxlflash_map_afu_irq() - map the interrupt of the adapter context
228  * @ctx_cookie:	Adapter context.
229  * @num:	Per-context AFU interrupt number.
230  * @handler:	Interrupt handler to register.
231  * @cookie:	Interrupt handler private data.
232  * @name:	Name of the interrupt.
233  *
234  * Return: 0 on success, -errno on failure
235  */
236 static int ocxlflash_map_afu_irq(void *ctx_cookie, int num,
237 				 irq_handler_t handler, void *cookie,
238 				 char *name)
239 {
240 	return afu_map_irq(0, ctx_cookie, num, handler, cookie, name);
241 }
242 
243 /**
244  * afu_unmap_irq() - unmap the interrupt
245  * @flags:	Flags.
246  * @ctx:	Adapter context.
247  * @num:	Per-context AFU interrupt number.
248  * @cookie:	Interrupt handler private data.
249  */
250 static void afu_unmap_irq(u64 flags, struct ocxlflash_context *ctx, int num,
251 			  void *cookie)
252 {
253 	struct ocxl_hw_afu *afu = ctx->hw_afu;
254 	struct device *dev = afu->dev;
255 	struct ocxlflash_irqs *irq;
256 
257 	if (num < 0 || num >= ctx->num_irqs) {
258 		dev_err(dev, "%s: Interrupt %d not allocated\n", __func__, num);
259 		return;
260 	}
261 
262 	irq = &ctx->irqs[num];
263 
264 	if (irq_find_mapping(NULL, irq->hwirq)) {
265 		free_irq(irq->virq, cookie);
266 		irq_dispose_mapping(irq->virq);
267 	}
268 
269 	memset(irq, 0, sizeof(*irq));
270 }
271 
272 /**
273  * ocxlflash_unmap_afu_irq() - unmap the interrupt
274  * @ctx_cookie:	Adapter context.
275  * @num:	Per-context AFU interrupt number.
276  * @cookie:	Interrupt handler private data.
277  */
278 static void ocxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
279 {
280 	return afu_unmap_irq(0, ctx_cookie, num, cookie);
281 }
282 
283 /**
284  * ocxlflash_get_irq_objhndl() - get the object handle for an interrupt
285  * @ctx_cookie:	Context associated with the interrupt.
286  * @irq:	Interrupt number.
287  *
288  * Return: effective address of the mapped region
289  */
290 static u64 ocxlflash_get_irq_objhndl(void *ctx_cookie, int irq)
291 {
292 	struct ocxlflash_context *ctx = ctx_cookie;
293 
294 	if (irq < 0 || irq >= ctx->num_irqs)
295 		return 0;
296 
297 	return (__force u64)ctx->irqs[irq].vtrig;
298 }
299 
300 /**
301  * ocxlflash_xsl_fault() - callback when translation error is triggered
302  * @data:	Private data provided at callback registration, the context.
303  * @addr:	Address that triggered the error.
304  * @dsisr:	Value of dsisr register.
305  */
306 static void ocxlflash_xsl_fault(void *data, u64 addr, u64 dsisr)
307 {
308 	struct ocxlflash_context *ctx = data;
309 
310 	spin_lock(&ctx->slock);
311 	ctx->fault_addr = addr;
312 	ctx->fault_dsisr = dsisr;
313 	ctx->pending_fault = true;
314 	spin_unlock(&ctx->slock);
315 
316 	wake_up_all(&ctx->wq);
317 }
318 
319 /**
320  * start_context() - local routine to start a context
321  * @ctx:	Adapter context to be started.
322  *
323  * Assign the context specific MMIO space, add and enable the PE.
324  *
325  * Return: 0 on success, -errno on failure
326  */
327 static int start_context(struct ocxlflash_context *ctx)
328 {
329 	struct ocxl_hw_afu *afu = ctx->hw_afu;
330 	struct ocxl_afu_config *acfg = &afu->acfg;
331 	void *link_token = afu->link_token;
332 	struct device *dev = afu->dev;
333 	bool master = ctx->master;
334 	struct mm_struct *mm;
335 	int rc = 0;
336 	u32 pid;
337 
338 	mutex_lock(&ctx->state_mutex);
339 	if (ctx->state != OPENED) {
340 		dev_err(dev, "%s: Context state invalid, state=%d\n",
341 			__func__, ctx->state);
342 		rc = -EINVAL;
343 		goto out;
344 	}
345 
346 	if (master) {
347 		ctx->psn_size = acfg->global_mmio_size;
348 		ctx->psn_phys = afu->gmmio_phys;
349 	} else {
350 		ctx->psn_size = acfg->pp_mmio_stride;
351 		ctx->psn_phys = afu->ppmmio_phys + (ctx->pe * ctx->psn_size);
352 	}
353 
354 	/* pid and mm not set for master contexts */
355 	if (master) {
356 		pid = 0;
357 		mm = NULL;
358 	} else {
359 		pid = current->mm->context.id;
360 		mm = current->mm;
361 	}
362 
363 	rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
364 			      ocxlflash_xsl_fault, ctx);
365 	if (unlikely(rc)) {
366 		dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
367 			__func__, rc);
368 		goto out;
369 	}
370 
371 	ctx->state = STARTED;
372 out:
373 	mutex_unlock(&ctx->state_mutex);
374 	return rc;
375 }
376 
377 /**
378  * ocxlflash_start_context() - start a kernel context
379  * @ctx_cookie:	Adapter context to be started.
380  *
381  * Return: 0 on success, -errno on failure
382  */
383 static int ocxlflash_start_context(void *ctx_cookie)
384 {
385 	struct ocxlflash_context *ctx = ctx_cookie;
386 
387 	return start_context(ctx);
388 }
389 
390 /**
391  * ocxlflash_stop_context() - stop a context
392  * @ctx_cookie:	Adapter context to be stopped.
393  *
394  * Return: 0 on success, -errno on failure
395  */
396 static int ocxlflash_stop_context(void *ctx_cookie)
397 {
398 	struct ocxlflash_context *ctx = ctx_cookie;
399 	struct ocxl_hw_afu *afu = ctx->hw_afu;
400 	struct ocxl_afu_config *acfg = &afu->acfg;
401 	struct pci_dev *pdev = afu->pdev;
402 	struct device *dev = afu->dev;
403 	enum ocxlflash_ctx_state state;
404 	int rc = 0;
405 
406 	mutex_lock(&ctx->state_mutex);
407 	state = ctx->state;
408 	ctx->state = CLOSED;
409 	mutex_unlock(&ctx->state_mutex);
410 	if (state != STARTED)
411 		goto out;
412 
413 	rc = ocxl_config_terminate_pasid(pdev, acfg->dvsec_afu_control_pos,
414 					 ctx->pe);
415 	if (unlikely(rc)) {
416 		dev_err(dev, "%s: ocxl_config_terminate_pasid failed rc=%d\n",
417 			__func__, rc);
418 		/* If EBUSY, PE could be referenced in future by the AFU */
419 		if (rc == -EBUSY)
420 			goto out;
421 	}
422 
423 	rc = ocxl_link_remove_pe(afu->link_token, ctx->pe);
424 	if (unlikely(rc)) {
425 		dev_err(dev, "%s: ocxl_link_remove_pe failed rc=%d\n",
426 			__func__, rc);
427 		goto out;
428 	}
429 out:
430 	return rc;
431 }
432 
433 /**
434  * ocxlflash_afu_reset() - reset the AFU
435  * @ctx_cookie:	Adapter context.
436  */
437 static int ocxlflash_afu_reset(void *ctx_cookie)
438 {
439 	struct ocxlflash_context *ctx = ctx_cookie;
440 	struct device *dev = ctx->hw_afu->dev;
441 
442 	/* Pending implementation from OCXL transport services */
443 	dev_err_once(dev, "%s: afu_reset() fop not supported\n", __func__);
444 
445 	/* Silently return success until it is implemented */
446 	return 0;
447 }
448 
449 /**
450  * ocxlflash_set_master() - sets the context as master
451  * @ctx_cookie:	Adapter context to set as master.
452  */
453 static void ocxlflash_set_master(void *ctx_cookie)
454 {
455 	struct ocxlflash_context *ctx = ctx_cookie;
456 
457 	ctx->master = true;
458 }
459 
460 /**
461  * ocxlflash_get_context() - obtains the context associated with the host
462  * @pdev:	PCI device associated with the host.
463  * @afu_cookie:	Hardware AFU associated with the host.
464  *
465  * Return: returns the pointer to host adapter context
466  */
467 static void *ocxlflash_get_context(struct pci_dev *pdev, void *afu_cookie)
468 {
469 	struct ocxl_hw_afu *afu = afu_cookie;
470 
471 	return afu->ocxl_ctx;
472 }
473 
474 /**
475  * ocxlflash_dev_context_init() - allocate and initialize an adapter context
476  * @pdev:	PCI device associated with the host.
477  * @afu_cookie:	Hardware AFU associated with the host.
478  *
479  * Return: returns the adapter context on success, ERR_PTR on failure
480  */
481 static void *ocxlflash_dev_context_init(struct pci_dev *pdev, void *afu_cookie)
482 {
483 	struct ocxl_hw_afu *afu = afu_cookie;
484 	struct device *dev = afu->dev;
485 	struct ocxlflash_context *ctx;
486 	int rc;
487 
488 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
489 	if (unlikely(!ctx)) {
490 		dev_err(dev, "%s: Context allocation failed\n", __func__);
491 		rc = -ENOMEM;
492 		goto err1;
493 	}
494 
495 	idr_preload(GFP_KERNEL);
496 	rc = idr_alloc(&afu->idr, ctx, 0, afu->max_pasid, GFP_NOWAIT);
497 	idr_preload_end();
498 	if (unlikely(rc < 0)) {
499 		dev_err(dev, "%s: idr_alloc failed rc=%d\n", __func__, rc);
500 		goto err2;
501 	}
502 
503 	spin_lock_init(&ctx->slock);
504 	init_waitqueue_head(&ctx->wq);
505 	mutex_init(&ctx->state_mutex);
506 
507 	ctx->state = OPENED;
508 	ctx->pe = rc;
509 	ctx->master = false;
510 	ctx->mapping = NULL;
511 	ctx->hw_afu = afu;
512 	ctx->irq_bitmap = 0;
513 	ctx->pending_irq = false;
514 	ctx->pending_fault = false;
515 out:
516 	return ctx;
517 err2:
518 	kfree(ctx);
519 err1:
520 	ctx = ERR_PTR(rc);
521 	goto out;
522 }
523 
524 /**
525  * ocxlflash_release_context() - releases an adapter context
526  * @ctx_cookie:	Adapter context to be released.
527  *
528  * Return: 0 on success, -errno on failure
529  */
530 static int ocxlflash_release_context(void *ctx_cookie)
531 {
532 	struct ocxlflash_context *ctx = ctx_cookie;
533 	struct device *dev;
534 	int rc = 0;
535 
536 	if (!ctx)
537 		goto out;
538 
539 	dev = ctx->hw_afu->dev;
540 	mutex_lock(&ctx->state_mutex);
541 	if (ctx->state >= STARTED) {
542 		dev_err(dev, "%s: Context in use, state=%d\n", __func__,
543 			ctx->state);
544 		mutex_unlock(&ctx->state_mutex);
545 		rc = -EBUSY;
546 		goto out;
547 	}
548 	mutex_unlock(&ctx->state_mutex);
549 
550 	idr_remove(&ctx->hw_afu->idr, ctx->pe);
551 	ocxlflash_release_mapping(ctx);
552 	kfree(ctx);
553 out:
554 	return rc;
555 }
556 
557 /**
558  * ocxlflash_perst_reloads_same_image() - sets the image reload policy
559  * @afu_cookie:	Hardware AFU associated with the host.
560  * @image:	Whether to load the same image on PERST.
561  */
562 static void ocxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
563 {
564 	struct ocxl_hw_afu *afu = afu_cookie;
565 
566 	afu->perst_same_image = image;
567 }
568 
569 /**
570  * ocxlflash_read_adapter_vpd() - reads the adapter VPD
571  * @pdev:	PCI device associated with the host.
572  * @buf:	Buffer to get the VPD data.
573  * @count:	Size of buffer (maximum bytes that can be read).
574  *
575  * Return: size of VPD on success, -errno on failure
576  */
577 static ssize_t ocxlflash_read_adapter_vpd(struct pci_dev *pdev, void *buf,
578 					  size_t count)
579 {
580 	return pci_read_vpd(pdev, 0, count, buf);
581 }
582 
583 /**
584  * free_afu_irqs() - internal service to free interrupts
585  * @ctx:	Adapter context.
586  */
587 static void free_afu_irqs(struct ocxlflash_context *ctx)
588 {
589 	struct ocxl_hw_afu *afu = ctx->hw_afu;
590 	struct device *dev = afu->dev;
591 	int i;
592 
593 	if (!ctx->irqs) {
594 		dev_err(dev, "%s: Interrupts not allocated\n", __func__);
595 		return;
596 	}
597 
598 	for (i = ctx->num_irqs; i >= 0; i--)
599 		ocxl_link_free_irq(afu->link_token, ctx->irqs[i].hwirq);
600 
601 	kfree(ctx->irqs);
602 	ctx->irqs = NULL;
603 }
604 
605 /**
606  * alloc_afu_irqs() - internal service to allocate interrupts
607  * @ctx:	Context associated with the request.
608  * @num:	Number of interrupts requested.
609  *
610  * Return: 0 on success, -errno on failure
611  */
612 static int alloc_afu_irqs(struct ocxlflash_context *ctx, int num)
613 {
614 	struct ocxl_hw_afu *afu = ctx->hw_afu;
615 	struct device *dev = afu->dev;
616 	struct ocxlflash_irqs *irqs;
617 	int rc = 0;
618 	int hwirq;
619 	int i;
620 
621 	if (ctx->irqs) {
622 		dev_err(dev, "%s: Interrupts already allocated\n", __func__);
623 		rc = -EEXIST;
624 		goto out;
625 	}
626 
627 	if (num > OCXL_MAX_IRQS) {
628 		dev_err(dev, "%s: Too many interrupts num=%d\n", __func__, num);
629 		rc = -EINVAL;
630 		goto out;
631 	}
632 
633 	irqs = kcalloc(num, sizeof(*irqs), GFP_KERNEL);
634 	if (unlikely(!irqs)) {
635 		dev_err(dev, "%s: Context irqs allocation failed\n", __func__);
636 		rc = -ENOMEM;
637 		goto out;
638 	}
639 
640 	for (i = 0; i < num; i++) {
641 		rc = ocxl_link_irq_alloc(afu->link_token, &hwirq);
642 		if (unlikely(rc)) {
643 			dev_err(dev, "%s: ocxl_link_irq_alloc failed rc=%d\n",
644 				__func__, rc);
645 			goto err;
646 		}
647 
648 		irqs[i].hwirq = hwirq;
649 	}
650 
651 	ctx->irqs = irqs;
652 	ctx->num_irqs = num;
653 out:
654 	return rc;
655 err:
656 	for (i = i-1; i >= 0; i--)
657 		ocxl_link_free_irq(afu->link_token, irqs[i].hwirq);
658 	kfree(irqs);
659 	goto out;
660 }
661 
662 /**
663  * ocxlflash_allocate_afu_irqs() - allocates the requested number of interrupts
664  * @ctx_cookie:	Context associated with the request.
665  * @num:	Number of interrupts requested.
666  *
667  * Return: 0 on success, -errno on failure
668  */
669 static int ocxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
670 {
671 	return alloc_afu_irqs(ctx_cookie, num);
672 }
673 
674 /**
675  * ocxlflash_free_afu_irqs() - frees the interrupts of an adapter context
676  * @ctx_cookie:	Adapter context.
677  */
678 static void ocxlflash_free_afu_irqs(void *ctx_cookie)
679 {
680 	free_afu_irqs(ctx_cookie);
681 }
682 
683 /**
684  * ocxlflash_unconfig_afu() - unconfigure the AFU
685  * @afu: AFU associated with the host.
686  */
687 static void ocxlflash_unconfig_afu(struct ocxl_hw_afu *afu)
688 {
689 	if (afu->gmmio_virt) {
690 		iounmap(afu->gmmio_virt);
691 		afu->gmmio_virt = NULL;
692 	}
693 }
694 
695 /**
696  * ocxlflash_destroy_afu() - destroy the AFU structure
697  * @afu_cookie:	AFU to be freed.
698  */
699 static void ocxlflash_destroy_afu(void *afu_cookie)
700 {
701 	struct ocxl_hw_afu *afu = afu_cookie;
702 	int pos;
703 
704 	if (!afu)
705 		return;
706 
707 	ocxlflash_release_context(afu->ocxl_ctx);
708 	idr_destroy(&afu->idr);
709 
710 	/* Disable the AFU */
711 	pos = afu->acfg.dvsec_afu_control_pos;
712 	ocxl_config_set_afu_state(afu->pdev, pos, 0);
713 
714 	ocxlflash_unconfig_afu(afu);
715 	kfree(afu);
716 }
717 
718 /**
719  * ocxlflash_config_fn() - configure the host function
720  * @pdev:	PCI device associated with the host.
721  * @afu:	AFU associated with the host.
722  *
723  * Return: 0 on success, -errno on failure
724  */
725 static int ocxlflash_config_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
726 {
727 	struct ocxl_fn_config *fcfg = &afu->fcfg;
728 	struct device *dev = &pdev->dev;
729 	u16 base, enabled, supported;
730 	int rc = 0;
731 
732 	/* Read DVSEC config of the function */
733 	rc = ocxl_config_read_function(pdev, fcfg);
734 	if (unlikely(rc)) {
735 		dev_err(dev, "%s: ocxl_config_read_function failed rc=%d\n",
736 			__func__, rc);
737 		goto out;
738 	}
739 
740 	/* Check if function has AFUs defined, only 1 per function supported */
741 	if (fcfg->max_afu_index >= 0) {
742 		afu->is_present = true;
743 		if (fcfg->max_afu_index != 0)
744 			dev_warn(dev, "%s: Unexpected AFU index value %d\n",
745 				 __func__, fcfg->max_afu_index);
746 	}
747 
748 	rc = ocxl_config_get_actag_info(pdev, &base, &enabled, &supported);
749 	if (unlikely(rc)) {
750 		dev_err(dev, "%s: ocxl_config_get_actag_info failed rc=%d\n",
751 			__func__, rc);
752 		goto out;
753 	}
754 
755 	afu->fn_actag_base = base;
756 	afu->fn_actag_enabled = enabled;
757 
758 	ocxl_config_set_actag(pdev, fcfg->dvsec_function_pos, base, enabled);
759 	dev_dbg(dev, "%s: Function acTag range base=%u enabled=%u\n",
760 		__func__, base, enabled);
761 
762 	rc = ocxl_link_setup(pdev, 0, &afu->link_token);
763 	if (unlikely(rc)) {
764 		dev_err(dev, "%s: ocxl_link_setup failed rc=%d\n",
765 			__func__, rc);
766 		goto out;
767 	}
768 
769 	rc = ocxl_config_set_TL(pdev, fcfg->dvsec_tl_pos);
770 	if (unlikely(rc)) {
771 		dev_err(dev, "%s: ocxl_config_set_TL failed rc=%d\n",
772 			__func__, rc);
773 		goto err;
774 	}
775 out:
776 	return rc;
777 err:
778 	ocxl_link_release(pdev, afu->link_token);
779 	goto out;
780 }
781 
782 /**
783  * ocxlflash_unconfig_fn() - unconfigure the host function
784  * @pdev:	PCI device associated with the host.
785  * @afu:	AFU associated with the host.
786  */
787 static void ocxlflash_unconfig_fn(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
788 {
789 	ocxl_link_release(pdev, afu->link_token);
790 }
791 
792 /**
793  * ocxlflash_map_mmio() - map the AFU MMIO space
794  * @afu: AFU associated with the host.
795  *
796  * Return: 0 on success, -errno on failure
797  */
798 static int ocxlflash_map_mmio(struct ocxl_hw_afu *afu)
799 {
800 	struct ocxl_afu_config *acfg = &afu->acfg;
801 	struct pci_dev *pdev = afu->pdev;
802 	struct device *dev = afu->dev;
803 	phys_addr_t gmmio, ppmmio;
804 	int rc = 0;
805 
806 	rc = pci_request_region(pdev, acfg->global_mmio_bar, "ocxlflash");
807 	if (unlikely(rc)) {
808 		dev_err(dev, "%s: pci_request_region for global failed rc=%d\n",
809 			__func__, rc);
810 		goto out;
811 	}
812 	gmmio = pci_resource_start(pdev, acfg->global_mmio_bar);
813 	gmmio += acfg->global_mmio_offset;
814 
815 	rc = pci_request_region(pdev, acfg->pp_mmio_bar, "ocxlflash");
816 	if (unlikely(rc)) {
817 		dev_err(dev, "%s: pci_request_region for pp bar failed rc=%d\n",
818 			__func__, rc);
819 		goto err1;
820 	}
821 	ppmmio = pci_resource_start(pdev, acfg->pp_mmio_bar);
822 	ppmmio += acfg->pp_mmio_offset;
823 
824 	afu->gmmio_virt = ioremap(gmmio, acfg->global_mmio_size);
825 	if (unlikely(!afu->gmmio_virt)) {
826 		dev_err(dev, "%s: MMIO mapping failed\n", __func__);
827 		rc = -ENOMEM;
828 		goto err2;
829 	}
830 
831 	afu->gmmio_phys = gmmio;
832 	afu->ppmmio_phys = ppmmio;
833 out:
834 	return rc;
835 err2:
836 	pci_release_region(pdev, acfg->pp_mmio_bar);
837 err1:
838 	pci_release_region(pdev, acfg->global_mmio_bar);
839 	goto out;
840 }
841 
842 /**
843  * ocxlflash_config_afu() - configure the host AFU
844  * @pdev:	PCI device associated with the host.
845  * @afu:	AFU associated with the host.
846  *
847  * Must be called _after_ host function configuration.
848  *
849  * Return: 0 on success, -errno on failure
850  */
851 static int ocxlflash_config_afu(struct pci_dev *pdev, struct ocxl_hw_afu *afu)
852 {
853 	struct ocxl_afu_config *acfg = &afu->acfg;
854 	struct ocxl_fn_config *fcfg = &afu->fcfg;
855 	struct device *dev = &pdev->dev;
856 	int count;
857 	int base;
858 	int pos;
859 	int rc = 0;
860 
861 	/* This HW AFU function does not have any AFUs defined */
862 	if (!afu->is_present)
863 		goto out;
864 
865 	/* Read AFU config at index 0 */
866 	rc = ocxl_config_read_afu(pdev, fcfg, acfg, 0);
867 	if (unlikely(rc)) {
868 		dev_err(dev, "%s: ocxl_config_read_afu failed rc=%d\n",
869 			__func__, rc);
870 		goto out;
871 	}
872 
873 	/* Only one AFU per function is supported, so actag_base is same */
874 	base = afu->fn_actag_base;
875 	count = min_t(int, acfg->actag_supported, afu->fn_actag_enabled);
876 	pos = acfg->dvsec_afu_control_pos;
877 
878 	ocxl_config_set_afu_actag(pdev, pos, base, count);
879 	dev_dbg(dev, "%s: acTag base=%d enabled=%d\n", __func__, base, count);
880 	afu->afu_actag_base = base;
881 	afu->afu_actag_enabled = count;
882 	afu->max_pasid = 1 << acfg->pasid_supported_log;
883 
884 	ocxl_config_set_afu_pasid(pdev, pos, 0, acfg->pasid_supported_log);
885 
886 	rc = ocxlflash_map_mmio(afu);
887 	if (unlikely(rc)) {
888 		dev_err(dev, "%s: ocxlflash_map_mmio failed rc=%d\n",
889 			__func__, rc);
890 		goto out;
891 	}
892 
893 	/* Enable the AFU */
894 	ocxl_config_set_afu_state(pdev, acfg->dvsec_afu_control_pos, 1);
895 out:
896 	return rc;
897 }
898 
899 /**
900  * ocxlflash_create_afu() - create the AFU for OCXL
901  * @pdev:	PCI device associated with the host.
902  *
903  * Return: AFU on success, NULL on failure
904  */
905 static void *ocxlflash_create_afu(struct pci_dev *pdev)
906 {
907 	struct device *dev = &pdev->dev;
908 	struct ocxlflash_context *ctx;
909 	struct ocxl_hw_afu *afu;
910 	int rc;
911 
912 	afu = kzalloc(sizeof(*afu), GFP_KERNEL);
913 	if (unlikely(!afu)) {
914 		dev_err(dev, "%s: HW AFU allocation failed\n", __func__);
915 		goto out;
916 	}
917 
918 	afu->pdev = pdev;
919 	afu->dev = dev;
920 	idr_init(&afu->idr);
921 
922 	rc = ocxlflash_config_fn(pdev, afu);
923 	if (unlikely(rc)) {
924 		dev_err(dev, "%s: Function configuration failed rc=%d\n",
925 			__func__, rc);
926 		goto err1;
927 	}
928 
929 	rc = ocxlflash_config_afu(pdev, afu);
930 	if (unlikely(rc)) {
931 		dev_err(dev, "%s: AFU configuration failed rc=%d\n",
932 			__func__, rc);
933 		goto err2;
934 	}
935 
936 	ctx = ocxlflash_dev_context_init(pdev, afu);
937 	if (IS_ERR(ctx)) {
938 		rc = PTR_ERR(ctx);
939 		dev_err(dev, "%s: ocxlflash_dev_context_init failed rc=%d\n",
940 			__func__, rc);
941 		goto err3;
942 	}
943 
944 	afu->ocxl_ctx = ctx;
945 out:
946 	return afu;
947 err3:
948 	ocxlflash_unconfig_afu(afu);
949 err2:
950 	ocxlflash_unconfig_fn(pdev, afu);
951 err1:
952 	idr_destroy(&afu->idr);
953 	kfree(afu);
954 	afu = NULL;
955 	goto out;
956 }
957 
958 /**
959  * ctx_event_pending() - check for any event pending on the context
960  * @ctx:	Context to be checked.
961  *
962  * Return: true if there is an event pending, false if none pending
963  */
964 static inline bool ctx_event_pending(struct ocxlflash_context *ctx)
965 {
966 	if (ctx->pending_irq || ctx->pending_fault)
967 		return true;
968 
969 	return false;
970 }
971 
972 /**
973  * afu_poll() - poll the AFU for events on the context
974  * @file:	File associated with the adapter context.
975  * @poll:	Poll structure from the user.
976  *
977  * Return: poll mask
978  */
979 static unsigned int afu_poll(struct file *file, struct poll_table_struct *poll)
980 {
981 	struct ocxlflash_context *ctx = file->private_data;
982 	struct device *dev = ctx->hw_afu->dev;
983 	ulong lock_flags;
984 	int mask = 0;
985 
986 	poll_wait(file, &ctx->wq, poll);
987 
988 	spin_lock_irqsave(&ctx->slock, lock_flags);
989 	if (ctx_event_pending(ctx))
990 		mask |= POLLIN | POLLRDNORM;
991 	else if (ctx->state == CLOSED)
992 		mask |= POLLERR;
993 	spin_unlock_irqrestore(&ctx->slock, lock_flags);
994 
995 	dev_dbg(dev, "%s: Poll wait completed for pe %i mask %i\n",
996 		__func__, ctx->pe, mask);
997 
998 	return mask;
999 }
1000 
1001 /**
1002  * afu_read() - perform a read on the context for any event
1003  * @file:	File associated with the adapter context.
1004  * @buf:	Buffer to receive the data.
1005  * @count:	Size of buffer (maximum bytes that can be read).
1006  * @off:	Offset.
1007  *
1008  * Return: size of the data read on success, -errno on failure
1009  */
1010 static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
1011 			loff_t *off)
1012 {
1013 	struct ocxlflash_context *ctx = file->private_data;
1014 	struct device *dev = ctx->hw_afu->dev;
1015 	struct cxl_event event;
1016 	ulong lock_flags;
1017 	ssize_t esize;
1018 	ssize_t rc;
1019 	int bit;
1020 	DEFINE_WAIT(event_wait);
1021 
1022 	if (*off != 0) {
1023 		dev_err(dev, "%s: Non-zero offset not supported, off=%lld\n",
1024 			__func__, *off);
1025 		rc = -EINVAL;
1026 		goto out;
1027 	}
1028 
1029 	spin_lock_irqsave(&ctx->slock, lock_flags);
1030 
1031 	for (;;) {
1032 		prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE);
1033 
1034 		if (ctx_event_pending(ctx) || (ctx->state == CLOSED))
1035 			break;
1036 
1037 		if (file->f_flags & O_NONBLOCK) {
1038 			dev_err(dev, "%s: File cannot be blocked on I/O\n",
1039 				__func__);
1040 			rc = -EAGAIN;
1041 			goto err;
1042 		}
1043 
1044 		if (signal_pending(current)) {
1045 			dev_err(dev, "%s: Signal pending on the process\n",
1046 				__func__);
1047 			rc = -ERESTARTSYS;
1048 			goto err;
1049 		}
1050 
1051 		spin_unlock_irqrestore(&ctx->slock, lock_flags);
1052 		schedule();
1053 		spin_lock_irqsave(&ctx->slock, lock_flags);
1054 	}
1055 
1056 	finish_wait(&ctx->wq, &event_wait);
1057 
1058 	memset(&event, 0, sizeof(event));
1059 	event.header.process_element = ctx->pe;
1060 	event.header.size = sizeof(struct cxl_event_header);
1061 	if (ctx->pending_irq) {
1062 		esize = sizeof(struct cxl_event_afu_interrupt);
1063 		event.header.size += esize;
1064 		event.header.type = CXL_EVENT_AFU_INTERRUPT;
1065 
1066 		bit = find_first_bit(&ctx->irq_bitmap, ctx->num_irqs);
1067 		clear_bit(bit, &ctx->irq_bitmap);
1068 		event.irq.irq = bit + 1;
1069 		if (bitmap_empty(&ctx->irq_bitmap, ctx->num_irqs))
1070 			ctx->pending_irq = false;
1071 	} else if (ctx->pending_fault) {
1072 		event.header.size += sizeof(struct cxl_event_data_storage);
1073 		event.header.type = CXL_EVENT_DATA_STORAGE;
1074 		event.fault.addr = ctx->fault_addr;
1075 		event.fault.dsisr = ctx->fault_dsisr;
1076 		ctx->pending_fault = false;
1077 	}
1078 
1079 	spin_unlock_irqrestore(&ctx->slock, lock_flags);
1080 
1081 	if (copy_to_user(buf, &event, event.header.size)) {
1082 		dev_err(dev, "%s: copy_to_user failed\n", __func__);
1083 		rc = -EFAULT;
1084 		goto out;
1085 	}
1086 
1087 	rc = event.header.size;
1088 out:
1089 	return rc;
1090 err:
1091 	finish_wait(&ctx->wq, &event_wait);
1092 	spin_unlock_irqrestore(&ctx->slock, lock_flags);
1093 	goto out;
1094 }
1095 
1096 /**
1097  * afu_release() - release and free the context
1098  * @inode:	File inode pointer.
1099  * @file:	File associated with the context.
1100  *
1101  * Return: 0 on success, -errno on failure
1102  */
1103 static int afu_release(struct inode *inode, struct file *file)
1104 {
1105 	struct ocxlflash_context *ctx = file->private_data;
1106 	int i;
1107 
1108 	/* Unmap and free the interrupts associated with the context */
1109 	for (i = ctx->num_irqs; i >= 0; i--)
1110 		afu_unmap_irq(0, ctx, i, ctx);
1111 	free_afu_irqs(ctx);
1112 
1113 	return ocxlflash_release_context(ctx);
1114 }
1115 
1116 /**
1117  * ocxlflash_mmap_fault() - mmap fault handler
1118  * @vmf:	VM fault associated with current fault.
1119  *
1120  * Return: 0 on success, -errno on failure
1121  */
1122 static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
1123 {
1124 	struct vm_area_struct *vma = vmf->vma;
1125 	struct ocxlflash_context *ctx = vma->vm_file->private_data;
1126 	struct device *dev = ctx->hw_afu->dev;
1127 	u64 mmio_area, offset;
1128 
1129 	offset = vmf->pgoff << PAGE_SHIFT;
1130 	if (offset >= ctx->psn_size)
1131 		return VM_FAULT_SIGBUS;
1132 
1133 	mutex_lock(&ctx->state_mutex);
1134 	if (ctx->state != STARTED) {
1135 		dev_err(dev, "%s: Context not started, state=%d\n",
1136 			__func__, ctx->state);
1137 		mutex_unlock(&ctx->state_mutex);
1138 		return VM_FAULT_SIGBUS;
1139 	}
1140 	mutex_unlock(&ctx->state_mutex);
1141 
1142 	mmio_area = ctx->psn_phys;
1143 	mmio_area += offset;
1144 
1145 	return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
1146 }
1147 
1148 static const struct vm_operations_struct ocxlflash_vmops = {
1149 	.fault = ocxlflash_mmap_fault,
1150 };
1151 
1152 /**
1153  * afu_mmap() - map the fault handler operations
1154  * @file:	File associated with the context.
1155  * @vma:	VM area associated with mapping.
1156  *
1157  * Return: 0 on success, -errno on failure
1158  */
1159 static int afu_mmap(struct file *file, struct vm_area_struct *vma)
1160 {
1161 	struct ocxlflash_context *ctx = file->private_data;
1162 
1163 	if ((vma_pages(vma) + vma->vm_pgoff) >
1164 	    (ctx->psn_size >> PAGE_SHIFT))
1165 		return -EINVAL;
1166 
1167 	vma->vm_flags |= VM_IO | VM_PFNMAP;
1168 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1169 	vma->vm_ops = &ocxlflash_vmops;
1170 	return 0;
1171 }
1172 
1173 static const struct file_operations ocxl_afu_fops = {
1174 	.owner		= THIS_MODULE,
1175 	.poll		= afu_poll,
1176 	.read		= afu_read,
1177 	.release	= afu_release,
1178 	.mmap		= afu_mmap,
1179 };
1180 
1181 #define PATCH_FOPS(NAME)						\
1182 	do { if (!fops->NAME) fops->NAME = ocxl_afu_fops.NAME; } while (0)
1183 
1184 /**
1185  * ocxlflash_get_fd() - get file descriptor for an adapter context
1186  * @ctx_cookie:	Adapter context.
1187  * @fops:	File operations to be associated.
1188  * @fd:		File descriptor to be returned back.
1189  *
1190  * Return: pointer to the file on success, ERR_PTR on failure
1191  */
1192 static struct file *ocxlflash_get_fd(void *ctx_cookie,
1193 				     struct file_operations *fops, int *fd)
1194 {
1195 	struct ocxlflash_context *ctx = ctx_cookie;
1196 	struct device *dev = ctx->hw_afu->dev;
1197 	struct file *file;
1198 	int flags, fdtmp;
1199 	int rc = 0;
1200 	char *name = NULL;
1201 
1202 	/* Only allow one fd per context */
1203 	if (ctx->mapping) {
1204 		dev_err(dev, "%s: Context is already mapped to an fd\n",
1205 			__func__);
1206 		rc = -EEXIST;
1207 		goto err1;
1208 	}
1209 
1210 	flags = O_RDWR | O_CLOEXEC;
1211 
1212 	/* This code is similar to anon_inode_getfd() */
1213 	rc = get_unused_fd_flags(flags);
1214 	if (unlikely(rc < 0)) {
1215 		dev_err(dev, "%s: get_unused_fd_flags failed rc=%d\n",
1216 			__func__, rc);
1217 		goto err1;
1218 	}
1219 	fdtmp = rc;
1220 
1221 	/* Patch the file ops that are not defined */
1222 	if (fops) {
1223 		PATCH_FOPS(poll);
1224 		PATCH_FOPS(read);
1225 		PATCH_FOPS(release);
1226 		PATCH_FOPS(mmap);
1227 	} else /* Use default ops */
1228 		fops = (struct file_operations *)&ocxl_afu_fops;
1229 
1230 	name = kasprintf(GFP_KERNEL, "ocxlflash:%d", ctx->pe);
1231 	file = ocxlflash_getfile(dev, name, fops, ctx, flags);
1232 	kfree(name);
1233 	if (IS_ERR(file)) {
1234 		rc = PTR_ERR(file);
1235 		dev_err(dev, "%s: ocxlflash_getfile failed rc=%d\n",
1236 			__func__, rc);
1237 		goto err2;
1238 	}
1239 
1240 	ctx->mapping = file->f_mapping;
1241 	*fd = fdtmp;
1242 out:
1243 	return file;
1244 err2:
1245 	put_unused_fd(fdtmp);
1246 err1:
1247 	file = ERR_PTR(rc);
1248 	goto out;
1249 }
1250 
1251 /**
1252  * ocxlflash_fops_get_context() - get the context associated with the file
1253  * @file:	File associated with the adapter context.
1254  *
1255  * Return: pointer to the context
1256  */
1257 static void *ocxlflash_fops_get_context(struct file *file)
1258 {
1259 	return file->private_data;
1260 }
1261 
1262 /**
1263  * ocxlflash_afu_irq() - interrupt handler for user contexts
1264  * @irq:	Interrupt number.
1265  * @data:	Private data provided at interrupt registration, the context.
1266  *
1267  * Return: Always return IRQ_HANDLED.
1268  */
1269 static irqreturn_t ocxlflash_afu_irq(int irq, void *data)
1270 {
1271 	struct ocxlflash_context *ctx = data;
1272 	struct device *dev = ctx->hw_afu->dev;
1273 	int i;
1274 
1275 	dev_dbg(dev, "%s: Interrupt raised for pe %i virq %i\n",
1276 		__func__, ctx->pe, irq);
1277 
1278 	for (i = 0; i < ctx->num_irqs; i++) {
1279 		if (ctx->irqs[i].virq == irq)
1280 			break;
1281 	}
1282 	if (unlikely(i >= ctx->num_irqs)) {
1283 		dev_err(dev, "%s: Received AFU IRQ out of range\n", __func__);
1284 		goto out;
1285 	}
1286 
1287 	spin_lock(&ctx->slock);
1288 	set_bit(i - 1, &ctx->irq_bitmap);
1289 	ctx->pending_irq = true;
1290 	spin_unlock(&ctx->slock);
1291 
1292 	wake_up_all(&ctx->wq);
1293 out:
1294 	return IRQ_HANDLED;
1295 }
1296 
1297 /**
1298  * ocxlflash_start_work() - start a user context
1299  * @ctx_cookie:	Context to be started.
1300  * @num_irqs:	Number of interrupts requested.
1301  *
1302  * Return: 0 on success, -errno on failure
1303  */
1304 static int ocxlflash_start_work(void *ctx_cookie, u64 num_irqs)
1305 {
1306 	struct ocxlflash_context *ctx = ctx_cookie;
1307 	struct ocxl_hw_afu *afu = ctx->hw_afu;
1308 	struct device *dev = afu->dev;
1309 	char *name;
1310 	int rc = 0;
1311 	int i;
1312 
1313 	rc = alloc_afu_irqs(ctx, num_irqs);
1314 	if (unlikely(rc < 0)) {
1315 		dev_err(dev, "%s: alloc_afu_irqs failed rc=%d\n", __func__, rc);
1316 		goto out;
1317 	}
1318 
1319 	for (i = 0; i < num_irqs; i++) {
1320 		name = kasprintf(GFP_KERNEL, "ocxlflash-%s-pe%i-%i",
1321 				 dev_name(dev), ctx->pe, i);
1322 		rc = afu_map_irq(0, ctx, i, ocxlflash_afu_irq, ctx, name);
1323 		kfree(name);
1324 		if (unlikely(rc < 0)) {
1325 			dev_err(dev, "%s: afu_map_irq failed rc=%d\n",
1326 				__func__, rc);
1327 			goto err;
1328 		}
1329 	}
1330 
1331 	rc = start_context(ctx);
1332 	if (unlikely(rc)) {
1333 		dev_err(dev, "%s: start_context failed rc=%d\n", __func__, rc);
1334 		goto err;
1335 	}
1336 out:
1337 	return rc;
1338 err:
1339 	for (i = i-1; i >= 0; i--)
1340 		afu_unmap_irq(0, ctx, i, ctx);
1341 	free_afu_irqs(ctx);
1342 	goto out;
1343 };
1344 
1345 /**
1346  * ocxlflash_fd_mmap() - mmap handler for adapter file descriptor
1347  * @file:	File installed with adapter file descriptor.
1348  * @vma:	VM area associated with mapping.
1349  *
1350  * Return: 0 on success, -errno on failure
1351  */
1352 static int ocxlflash_fd_mmap(struct file *file, struct vm_area_struct *vma)
1353 {
1354 	return afu_mmap(file, vma);
1355 }
1356 
1357 /**
1358  * ocxlflash_fd_release() - release the context associated with the file
1359  * @inode:	File inode pointer.
1360  * @file:	File associated with the adapter context.
1361  *
1362  * Return: 0 on success, -errno on failure
1363  */
1364 static int ocxlflash_fd_release(struct inode *inode, struct file *file)
1365 {
1366 	return afu_release(inode, file);
1367 }
1368 
1369 /* Backend ops to ocxlflash services */
1370 const struct cxlflash_backend_ops cxlflash_ocxl_ops = {
1371 	.module			= THIS_MODULE,
1372 	.psa_map		= ocxlflash_psa_map,
1373 	.psa_unmap		= ocxlflash_psa_unmap,
1374 	.process_element	= ocxlflash_process_element,
1375 	.map_afu_irq		= ocxlflash_map_afu_irq,
1376 	.unmap_afu_irq		= ocxlflash_unmap_afu_irq,
1377 	.get_irq_objhndl	= ocxlflash_get_irq_objhndl,
1378 	.start_context		= ocxlflash_start_context,
1379 	.stop_context		= ocxlflash_stop_context,
1380 	.afu_reset		= ocxlflash_afu_reset,
1381 	.set_master		= ocxlflash_set_master,
1382 	.get_context		= ocxlflash_get_context,
1383 	.dev_context_init	= ocxlflash_dev_context_init,
1384 	.release_context	= ocxlflash_release_context,
1385 	.perst_reloads_same_image = ocxlflash_perst_reloads_same_image,
1386 	.read_adapter_vpd	= ocxlflash_read_adapter_vpd,
1387 	.allocate_afu_irqs	= ocxlflash_allocate_afu_irqs,
1388 	.free_afu_irqs		= ocxlflash_free_afu_irqs,
1389 	.create_afu		= ocxlflash_create_afu,
1390 	.destroy_afu		= ocxlflash_destroy_afu,
1391 	.get_fd			= ocxlflash_get_fd,
1392 	.fops_get_context	= ocxlflash_fops_get_context,
1393 	.start_work		= ocxlflash_start_work,
1394 	.fd_mmap		= ocxlflash_fd_mmap,
1395 	.fd_release		= ocxlflash_fd_release,
1396 };
1397