xref: /openbmc/linux/drivers/scsi/cxlflash/main.c (revision 8730046c)
1 /*
2  * CXL Flash Device Driver
3  *
4  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2015 IBM Corporation
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 
20 #include <asm/unaligned.h>
21 
22 #include <misc/cxl.h>
23 
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
27 
28 #include "main.h"
29 #include "sislite.h"
30 #include "common.h"
31 
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
36 
37 /**
38  * process_cmd_err() - command error handler
39  * @cmd:	AFU command that experienced the error.
40  * @scp:	SCSI command associated with the AFU command in error.
41  *
42  * Translates error bits from AFU command to SCSI command results.
43  */
44 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
45 {
46 	struct sisl_ioarcb *ioarcb;
47 	struct sisl_ioasa *ioasa;
48 	u32 resid;
49 
50 	if (unlikely(!cmd))
51 		return;
52 
53 	ioarcb = &(cmd->rcb);
54 	ioasa = &(cmd->sa);
55 
56 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
57 		resid = ioasa->resid;
58 		scsi_set_resid(scp, resid);
59 		pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
60 			 __func__, cmd, scp, resid);
61 	}
62 
63 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
64 		pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
65 			 __func__, cmd, scp);
66 		scp->result = (DID_ERROR << 16);
67 	}
68 
69 	pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
70 		 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
71 		 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
72 		 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
73 		 ioasa->fc_extra);
74 
75 	if (ioasa->rc.scsi_rc) {
76 		/* We have a SCSI status */
77 		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
78 			memcpy(scp->sense_buffer, ioasa->sense_data,
79 			       SISL_SENSE_DATA_LEN);
80 			scp->result = ioasa->rc.scsi_rc;
81 		} else
82 			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
83 	}
84 
85 	/*
86 	 * We encountered an error. Set scp->result based on nature
87 	 * of error.
88 	 */
89 	if (ioasa->rc.fc_rc) {
90 		/* We have an FC status */
91 		switch (ioasa->rc.fc_rc) {
92 		case SISL_FC_RC_LINKDOWN:
93 			scp->result = (DID_REQUEUE << 16);
94 			break;
95 		case SISL_FC_RC_RESID:
96 			/* This indicates an FCP resid underrun */
97 			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
98 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
99 				 * then we will handle this error else where.
100 				 * If not then we must handle it here.
101 				 * This is probably an AFU bug.
102 				 */
103 				scp->result = (DID_ERROR << 16);
104 			}
105 			break;
106 		case SISL_FC_RC_RESIDERR:
107 			/* Resid mismatch between adapter and device */
108 		case SISL_FC_RC_TGTABORT:
109 		case SISL_FC_RC_ABORTOK:
110 		case SISL_FC_RC_ABORTFAIL:
111 		case SISL_FC_RC_NOLOGI:
112 		case SISL_FC_RC_ABORTPEND:
113 		case SISL_FC_RC_WRABORTPEND:
114 		case SISL_FC_RC_NOEXP:
115 		case SISL_FC_RC_INUSE:
116 			scp->result = (DID_ERROR << 16);
117 			break;
118 		}
119 	}
120 
121 	if (ioasa->rc.afu_rc) {
122 		/* We have an AFU error */
123 		switch (ioasa->rc.afu_rc) {
124 		case SISL_AFU_RC_NO_CHANNELS:
125 			scp->result = (DID_NO_CONNECT << 16);
126 			break;
127 		case SISL_AFU_RC_DATA_DMA_ERR:
128 			switch (ioasa->afu_extra) {
129 			case SISL_AFU_DMA_ERR_PAGE_IN:
130 				/* Retry */
131 				scp->result = (DID_IMM_RETRY << 16);
132 				break;
133 			case SISL_AFU_DMA_ERR_INVALID_EA:
134 			default:
135 				scp->result = (DID_ERROR << 16);
136 			}
137 			break;
138 		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
139 			/* Retry */
140 			scp->result = (DID_ALLOC_FAILURE << 16);
141 			break;
142 		default:
143 			scp->result = (DID_ERROR << 16);
144 		}
145 	}
146 }
147 
148 /**
149  * cmd_complete() - command completion handler
150  * @cmd:	AFU command that has completed.
151  *
152  * Prepares and submits command that has either completed or timed out to
153  * the SCSI stack. Checks AFU command back into command pool for non-internal
154  * (cmd->scp populated) commands.
155  */
156 static void cmd_complete(struct afu_cmd *cmd)
157 {
158 	struct scsi_cmnd *scp;
159 	ulong lock_flags;
160 	struct afu *afu = cmd->parent;
161 	struct cxlflash_cfg *cfg = afu->parent;
162 	bool cmd_is_tmf;
163 
164 	if (cmd->scp) {
165 		scp = cmd->scp;
166 		if (unlikely(cmd->sa.ioasc))
167 			process_cmd_err(cmd, scp);
168 		else
169 			scp->result = (DID_OK << 16);
170 
171 		cmd_is_tmf = cmd->cmd_tmf;
172 
173 		pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
174 				     "ioasc=%d\n", __func__, scp, scp->result,
175 				     cmd->sa.ioasc);
176 
177 		scsi_dma_unmap(scp);
178 		scp->scsi_done(scp);
179 
180 		if (cmd_is_tmf) {
181 			spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
182 			cfg->tmf_active = false;
183 			wake_up_all_locked(&cfg->tmf_waitq);
184 			spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
185 		}
186 	} else
187 		complete(&cmd->cevent);
188 }
189 
190 /**
191  * context_reset_ioarrin() - reset command owner context via IOARRIN register
192  * @cmd:	AFU command that timed out.
193  */
194 static void context_reset_ioarrin(struct afu_cmd *cmd)
195 {
196 	int nretry = 0;
197 	u64 rrin = 0x1;
198 	struct afu *afu = cmd->parent;
199 	struct cxlflash_cfg *cfg = afu->parent;
200 	struct device *dev = &cfg->dev->dev;
201 
202 	pr_debug("%s: cmd=%p\n", __func__, cmd);
203 
204 	writeq_be(rrin, &afu->host_map->ioarrin);
205 	do {
206 		rrin = readq_be(&afu->host_map->ioarrin);
207 		if (rrin != 0x1)
208 			break;
209 		/* Double delay each time */
210 		udelay(1 << nretry);
211 	} while (nretry++ < MC_ROOM_RETRY_CNT);
212 
213 	dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
214 		__func__, rrin, nretry);
215 }
216 
217 /**
218  * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
219  * @afu:	AFU associated with the host.
220  * @cmd:	AFU command to send.
221  *
222  * Return:
223  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
224  */
225 static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
226 {
227 	struct cxlflash_cfg *cfg = afu->parent;
228 	struct device *dev = &cfg->dev->dev;
229 	int rc = 0;
230 	s64 room;
231 	ulong lock_flags;
232 
233 	/*
234 	 * To avoid the performance penalty of MMIO, spread the update of
235 	 * 'room' over multiple commands.
236 	 */
237 	spin_lock_irqsave(&afu->rrin_slock, lock_flags);
238 	if (--afu->room < 0) {
239 		room = readq_be(&afu->host_map->cmd_room);
240 		if (room <= 0) {
241 			dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
242 					    "0x%02X, room=0x%016llX\n",
243 					    __func__, cmd->rcb.cdb[0], room);
244 			afu->room = 0;
245 			rc = SCSI_MLQUEUE_HOST_BUSY;
246 			goto out;
247 		}
248 		afu->room = room - 1;
249 	}
250 
251 	writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
252 out:
253 	spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
254 	pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
255 		 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
256 	return rc;
257 }
258 
259 /**
260  * wait_resp() - polls for a response or timeout to a sent AFU command
261  * @afu:	AFU associated with the host.
262  * @cmd:	AFU command that was sent.
263  *
264  * Return:
265  *	0 on success, -1 on timeout/error
266  */
267 static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
268 {
269 	int rc = 0;
270 	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
271 
272 	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
273 	if (!timeout) {
274 		afu->context_reset(cmd);
275 		rc = -1;
276 	}
277 
278 	if (unlikely(cmd->sa.ioasc != 0)) {
279 		pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
280 		       "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
281 		       cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
282 		       cmd->sa.rc.fc_rc);
283 		rc = -1;
284 	}
285 
286 	return rc;
287 }
288 
289 /**
290  * send_tmf() - sends a Task Management Function (TMF)
291  * @afu:	AFU to checkout from.
292  * @scp:	SCSI command from stack.
293  * @tmfcmd:	TMF command to send.
294  *
295  * Return:
296  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
297  */
298 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
299 {
300 	u32 port_sel = scp->device->channel + 1;
301 	struct Scsi_Host *host = scp->device->host;
302 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
303 	struct afu_cmd *cmd = sc_to_afucz(scp);
304 	struct device *dev = &cfg->dev->dev;
305 	ulong lock_flags;
306 	int rc = 0;
307 	ulong to;
308 
309 	/* When Task Management Function is active do not send another */
310 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
311 	if (cfg->tmf_active)
312 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
313 						  !cfg->tmf_active,
314 						  cfg->tmf_slock);
315 	cfg->tmf_active = true;
316 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
317 
318 	cmd->scp = scp;
319 	cmd->parent = afu;
320 	cmd->cmd_tmf = true;
321 
322 	cmd->rcb.ctx_id = afu->ctx_hndl;
323 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
324 	cmd->rcb.port_sel = port_sel;
325 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
326 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
327 			      SISL_REQ_FLAGS_SUP_UNDERRUN |
328 			      SISL_REQ_FLAGS_TMF_CMD);
329 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
330 
331 	rc = afu->send_cmd(afu, cmd);
332 	if (unlikely(rc)) {
333 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
334 		cfg->tmf_active = false;
335 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
336 		goto out;
337 	}
338 
339 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
340 	to = msecs_to_jiffies(5000);
341 	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
342 						       !cfg->tmf_active,
343 						       cfg->tmf_slock,
344 						       to);
345 	if (!to) {
346 		cfg->tmf_active = false;
347 		dev_err(dev, "%s: TMF timed out!\n", __func__);
348 		rc = -1;
349 	}
350 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
351 out:
352 	return rc;
353 }
354 
355 static void afu_unmap(struct kref *ref)
356 {
357 	struct afu *afu = container_of(ref, struct afu, mapcount);
358 
359 	if (likely(afu->afu_map)) {
360 		cxl_psa_unmap((void __iomem *)afu->afu_map);
361 		afu->afu_map = NULL;
362 	}
363 }
364 
365 /**
366  * cxlflash_driver_info() - information handler for this host driver
367  * @host:	SCSI host associated with device.
368  *
369  * Return: A string describing the device.
370  */
371 static const char *cxlflash_driver_info(struct Scsi_Host *host)
372 {
373 	return CXLFLASH_ADAPTER_NAME;
374 }
375 
376 /**
377  * cxlflash_queuecommand() - sends a mid-layer request
378  * @host:	SCSI host associated with device.
379  * @scp:	SCSI command to send.
380  *
381  * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
382  */
383 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
384 {
385 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
386 	struct afu *afu = cfg->afu;
387 	struct device *dev = &cfg->dev->dev;
388 	struct afu_cmd *cmd = sc_to_afucz(scp);
389 	struct scatterlist *sg = scsi_sglist(scp);
390 	u32 port_sel = scp->device->channel + 1;
391 	u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
392 	ulong lock_flags;
393 	int nseg = 0;
394 	int rc = 0;
395 	int kref_got = 0;
396 
397 	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
398 			    "cdb=(%08X-%08X-%08X-%08X)\n",
399 			    __func__, scp, host->host_no, scp->device->channel,
400 			    scp->device->id, scp->device->lun,
401 			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
402 			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
403 			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
404 			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
405 
406 	/*
407 	 * If a Task Management Function is active, wait for it to complete
408 	 * before continuing with regular commands.
409 	 */
410 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
411 	if (cfg->tmf_active) {
412 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
413 		rc = SCSI_MLQUEUE_HOST_BUSY;
414 		goto out;
415 	}
416 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
417 
418 	switch (cfg->state) {
419 	case STATE_RESET:
420 		dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
421 		rc = SCSI_MLQUEUE_HOST_BUSY;
422 		goto out;
423 	case STATE_FAILTERM:
424 		dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
425 		scp->result = (DID_NO_CONNECT << 16);
426 		scp->scsi_done(scp);
427 		rc = 0;
428 		goto out;
429 	default:
430 		break;
431 	}
432 
433 	kref_get(&cfg->afu->mapcount);
434 	kref_got = 1;
435 
436 	if (likely(sg)) {
437 		nseg = scsi_dma_map(scp);
438 		if (unlikely(nseg < 0)) {
439 			dev_err(dev, "%s: Fail DMA map!\n", __func__);
440 			rc = SCSI_MLQUEUE_HOST_BUSY;
441 			goto out;
442 		}
443 
444 		cmd->rcb.data_len = sg_dma_len(sg);
445 		cmd->rcb.data_ea = sg_dma_address(sg);
446 	}
447 
448 	cmd->scp = scp;
449 	cmd->parent = afu;
450 
451 	cmd->rcb.ctx_id = afu->ctx_hndl;
452 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
453 	cmd->rcb.port_sel = port_sel;
454 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
455 
456 	if (scp->sc_data_direction == DMA_TO_DEVICE)
457 		req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
458 
459 	cmd->rcb.req_flags = req_flags;
460 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
461 
462 	rc = afu->send_cmd(afu, cmd);
463 	if (unlikely(rc))
464 		scsi_dma_unmap(scp);
465 out:
466 	if (kref_got)
467 		kref_put(&afu->mapcount, afu_unmap);
468 	pr_devel("%s: returning rc=%d\n", __func__, rc);
469 	return rc;
470 }
471 
472 /**
473  * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
474  * @cfg:	Internal structure associated with the host.
475  */
476 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
477 {
478 	struct pci_dev *pdev = cfg->dev;
479 
480 	if (pci_channel_offline(pdev))
481 		wait_event_timeout(cfg->reset_waitq,
482 				   !pci_channel_offline(pdev),
483 				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
484 }
485 
486 /**
487  * free_mem() - free memory associated with the AFU
488  * @cfg:	Internal structure associated with the host.
489  */
490 static void free_mem(struct cxlflash_cfg *cfg)
491 {
492 	struct afu *afu = cfg->afu;
493 
494 	if (cfg->afu) {
495 		free_pages((ulong)afu, get_order(sizeof(struct afu)));
496 		cfg->afu = NULL;
497 	}
498 }
499 
500 /**
501  * stop_afu() - stops the AFU command timers and unmaps the MMIO space
502  * @cfg:	Internal structure associated with the host.
503  *
504  * Safe to call with AFU in a partially allocated/initialized state.
505  *
506  * Waits for any active internal AFU commands to timeout and then unmaps
507  * the MMIO space.
508  */
509 static void stop_afu(struct cxlflash_cfg *cfg)
510 {
511 	struct afu *afu = cfg->afu;
512 
513 	if (likely(afu)) {
514 		while (atomic_read(&afu->cmds_active))
515 			ssleep(1);
516 		if (likely(afu->afu_map)) {
517 			cxl_psa_unmap((void __iomem *)afu->afu_map);
518 			afu->afu_map = NULL;
519 		}
520 		kref_put(&afu->mapcount, afu_unmap);
521 	}
522 }
523 
524 /**
525  * term_intr() - disables all AFU interrupts
526  * @cfg:	Internal structure associated with the host.
527  * @level:	Depth of allocation, where to begin waterfall tear down.
528  *
529  * Safe to call with AFU/MC in partially allocated/initialized state.
530  */
531 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
532 {
533 	struct afu *afu = cfg->afu;
534 	struct device *dev = &cfg->dev->dev;
535 
536 	if (!afu || !cfg->mcctx) {
537 		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
538 		return;
539 	}
540 
541 	switch (level) {
542 	case UNMAP_THREE:
543 		cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
544 	case UNMAP_TWO:
545 		cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
546 	case UNMAP_ONE:
547 		cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
548 	case FREE_IRQ:
549 		cxl_free_afu_irqs(cfg->mcctx);
550 		/* fall through */
551 	case UNDO_NOOP:
552 		/* No action required */
553 		break;
554 	}
555 }
556 
557 /**
558  * term_mc() - terminates the master context
559  * @cfg:	Internal structure associated with the host.
560  * @level:	Depth of allocation, where to begin waterfall tear down.
561  *
562  * Safe to call with AFU/MC in partially allocated/initialized state.
563  */
564 static void term_mc(struct cxlflash_cfg *cfg)
565 {
566 	int rc = 0;
567 	struct afu *afu = cfg->afu;
568 	struct device *dev = &cfg->dev->dev;
569 
570 	if (!afu || !cfg->mcctx) {
571 		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
572 		return;
573 	}
574 
575 	rc = cxl_stop_context(cfg->mcctx);
576 	WARN_ON(rc);
577 	cfg->mcctx = NULL;
578 }
579 
580 /**
581  * term_afu() - terminates the AFU
582  * @cfg:	Internal structure associated with the host.
583  *
584  * Safe to call with AFU/MC in partially allocated/initialized state.
585  */
586 static void term_afu(struct cxlflash_cfg *cfg)
587 {
588 	/*
589 	 * Tear down is carefully orchestrated to ensure
590 	 * no interrupts can come in when the problem state
591 	 * area is unmapped.
592 	 *
593 	 * 1) Disable all AFU interrupts
594 	 * 2) Unmap the problem state area
595 	 * 3) Stop the master context
596 	 */
597 	term_intr(cfg, UNMAP_THREE);
598 	if (cfg->afu)
599 		stop_afu(cfg);
600 
601 	term_mc(cfg);
602 
603 	pr_debug("%s: returning\n", __func__);
604 }
605 
606 /**
607  * notify_shutdown() - notifies device of pending shutdown
608  * @cfg:	Internal structure associated with the host.
609  * @wait:	Whether to wait for shutdown processing to complete.
610  *
611  * This function will notify the AFU that the adapter is being shutdown
612  * and will wait for shutdown processing to complete if wait is true.
613  * This notification should flush pending I/Os to the device and halt
614  * further I/Os until the next AFU reset is issued and device restarted.
615  */
616 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
617 {
618 	struct afu *afu = cfg->afu;
619 	struct device *dev = &cfg->dev->dev;
620 	struct sisl_global_map __iomem *global;
621 	struct dev_dependent_vals *ddv;
622 	u64 reg, status;
623 	int i, retry_cnt = 0;
624 
625 	ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
626 	if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
627 		return;
628 
629 	if (!afu || !afu->afu_map) {
630 		dev_dbg(dev, "%s: The problem state area is not mapped\n",
631 			__func__);
632 		return;
633 	}
634 
635 	global = &afu->afu_map->global;
636 
637 	/* Notify AFU */
638 	for (i = 0; i < NUM_FC_PORTS; i++) {
639 		reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
640 		reg |= SISL_FC_SHUTDOWN_NORMAL;
641 		writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
642 	}
643 
644 	if (!wait)
645 		return;
646 
647 	/* Wait up to 1.5 seconds for shutdown processing to complete */
648 	for (i = 0; i < NUM_FC_PORTS; i++) {
649 		retry_cnt = 0;
650 		while (true) {
651 			status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
652 			if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
653 				break;
654 			if (++retry_cnt >= MC_RETRY_CNT) {
655 				dev_dbg(dev, "%s: port %d shutdown processing "
656 					"not yet completed\n", __func__, i);
657 				break;
658 			}
659 			msleep(100 * retry_cnt);
660 		}
661 	}
662 }
663 
664 /**
665  * cxlflash_remove() - PCI entry point to tear down host
666  * @pdev:	PCI device associated with the host.
667  *
668  * Safe to use as a cleanup in partially allocated/initialized state.
669  */
670 static void cxlflash_remove(struct pci_dev *pdev)
671 {
672 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
673 	ulong lock_flags;
674 
675 	if (!pci_is_enabled(pdev)) {
676 		pr_debug("%s: Device is disabled\n", __func__);
677 		return;
678 	}
679 
680 	/* If a Task Management Function is active, wait for it to complete
681 	 * before continuing with remove.
682 	 */
683 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
684 	if (cfg->tmf_active)
685 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
686 						  !cfg->tmf_active,
687 						  cfg->tmf_slock);
688 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
689 
690 	/* Notify AFU and wait for shutdown processing to complete */
691 	notify_shutdown(cfg, true);
692 
693 	cfg->state = STATE_FAILTERM;
694 	cxlflash_stop_term_user_contexts(cfg);
695 
696 	switch (cfg->init_state) {
697 	case INIT_STATE_SCSI:
698 		cxlflash_term_local_luns(cfg);
699 		scsi_remove_host(cfg->host);
700 		/* fall through */
701 	case INIT_STATE_AFU:
702 		cancel_work_sync(&cfg->work_q);
703 		term_afu(cfg);
704 	case INIT_STATE_PCI:
705 		pci_disable_device(pdev);
706 	case INIT_STATE_NONE:
707 		free_mem(cfg);
708 		scsi_host_put(cfg->host);
709 		break;
710 	}
711 
712 	pr_debug("%s: returning\n", __func__);
713 }
714 
715 /**
716  * alloc_mem() - allocates the AFU and its command pool
717  * @cfg:	Internal structure associated with the host.
718  *
719  * A partially allocated state remains on failure.
720  *
721  * Return:
722  *	0 on success
723  *	-ENOMEM on failure to allocate memory
724  */
725 static int alloc_mem(struct cxlflash_cfg *cfg)
726 {
727 	int rc = 0;
728 	struct device *dev = &cfg->dev->dev;
729 
730 	/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
731 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
732 					    get_order(sizeof(struct afu)));
733 	if (unlikely(!cfg->afu)) {
734 		dev_err(dev, "%s: cannot get %d free pages\n",
735 			__func__, get_order(sizeof(struct afu)));
736 		rc = -ENOMEM;
737 		goto out;
738 	}
739 	cfg->afu->parent = cfg;
740 	cfg->afu->afu_map = NULL;
741 out:
742 	return rc;
743 }
744 
745 /**
746  * init_pci() - initializes the host as a PCI device
747  * @cfg:	Internal structure associated with the host.
748  *
749  * Return: 0 on success, -errno on failure
750  */
751 static int init_pci(struct cxlflash_cfg *cfg)
752 {
753 	struct pci_dev *pdev = cfg->dev;
754 	int rc = 0;
755 
756 	rc = pci_enable_device(pdev);
757 	if (rc || pci_channel_offline(pdev)) {
758 		if (pci_channel_offline(pdev)) {
759 			cxlflash_wait_for_pci_err_recovery(cfg);
760 			rc = pci_enable_device(pdev);
761 		}
762 
763 		if (rc) {
764 			dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
765 				__func__);
766 			cxlflash_wait_for_pci_err_recovery(cfg);
767 			goto out;
768 		}
769 	}
770 
771 out:
772 	pr_debug("%s: returning rc=%d\n", __func__, rc);
773 	return rc;
774 }
775 
776 /**
777  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
778  * @cfg:	Internal structure associated with the host.
779  *
780  * Return: 0 on success, -errno on failure
781  */
782 static int init_scsi(struct cxlflash_cfg *cfg)
783 {
784 	struct pci_dev *pdev = cfg->dev;
785 	int rc = 0;
786 
787 	rc = scsi_add_host(cfg->host, &pdev->dev);
788 	if (rc) {
789 		dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
790 			__func__, rc);
791 		goto out;
792 	}
793 
794 	scsi_scan_host(cfg->host);
795 
796 out:
797 	pr_debug("%s: returning rc=%d\n", __func__, rc);
798 	return rc;
799 }
800 
801 /**
802  * set_port_online() - transitions the specified host FC port to online state
803  * @fc_regs:	Top of MMIO region defined for specified port.
804  *
805  * The provided MMIO region must be mapped prior to call. Online state means
806  * that the FC link layer has synced, completed the handshaking process, and
807  * is ready for login to start.
808  */
809 static void set_port_online(__be64 __iomem *fc_regs)
810 {
811 	u64 cmdcfg;
812 
813 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
814 	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
815 	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
816 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
817 }
818 
819 /**
820  * set_port_offline() - transitions the specified host FC port to offline state
821  * @fc_regs:	Top of MMIO region defined for specified port.
822  *
823  * The provided MMIO region must be mapped prior to call.
824  */
825 static void set_port_offline(__be64 __iomem *fc_regs)
826 {
827 	u64 cmdcfg;
828 
829 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
830 	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
831 	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
832 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
833 }
834 
835 /**
836  * wait_port_online() - waits for the specified host FC port come online
837  * @fc_regs:	Top of MMIO region defined for specified port.
838  * @delay_us:	Number of microseconds to delay between reading port status.
839  * @nretry:	Number of cycles to retry reading port status.
840  *
841  * The provided MMIO region must be mapped prior to call. This will timeout
842  * when the cable is not plugged in.
843  *
844  * Return:
845  *	TRUE (1) when the specified port is online
846  *	FALSE (0) when the specified port fails to come online after timeout
847  *	-EINVAL when @delay_us is less than 1000
848  */
849 static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
850 {
851 	u64 status;
852 
853 	if (delay_us < 1000) {
854 		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
855 		return -EINVAL;
856 	}
857 
858 	do {
859 		msleep(delay_us / 1000);
860 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
861 		if (status == U64_MAX)
862 			nretry /= 2;
863 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
864 		 nretry--);
865 
866 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
867 }
868 
869 /**
870  * wait_port_offline() - waits for the specified host FC port go offline
871  * @fc_regs:	Top of MMIO region defined for specified port.
872  * @delay_us:	Number of microseconds to delay between reading port status.
873  * @nretry:	Number of cycles to retry reading port status.
874  *
875  * The provided MMIO region must be mapped prior to call.
876  *
877  * Return:
878  *	TRUE (1) when the specified port is offline
879  *	FALSE (0) when the specified port fails to go offline after timeout
880  *	-EINVAL when @delay_us is less than 1000
881  */
882 static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
883 {
884 	u64 status;
885 
886 	if (delay_us < 1000) {
887 		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
888 		return -EINVAL;
889 	}
890 
891 	do {
892 		msleep(delay_us / 1000);
893 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
894 		if (status == U64_MAX)
895 			nretry /= 2;
896 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
897 		 nretry--);
898 
899 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
900 }
901 
902 /**
903  * afu_set_wwpn() - configures the WWPN for the specified host FC port
904  * @afu:	AFU associated with the host that owns the specified FC port.
905  * @port:	Port number being configured.
906  * @fc_regs:	Top of MMIO region defined for specified port.
907  * @wwpn:	The world-wide-port-number previously discovered for port.
908  *
909  * The provided MMIO region must be mapped prior to call. As part of the
910  * sequence to configure the WWPN, the port is toggled offline and then back
911  * online. This toggling action can cause this routine to delay up to a few
912  * seconds. When configured to use the internal LUN feature of the AFU, a
913  * failure to come online is overridden.
914  */
915 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
916 			 u64 wwpn)
917 {
918 	set_port_offline(fc_regs);
919 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
920 			       FC_PORT_STATUS_RETRY_CNT)) {
921 		pr_debug("%s: wait on port %d to go offline timed out\n",
922 			 __func__, port);
923 	}
924 
925 	writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
926 
927 	set_port_online(fc_regs);
928 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
929 			      FC_PORT_STATUS_RETRY_CNT)) {
930 		pr_debug("%s: wait on port %d to go online timed out\n",
931 			 __func__, port);
932 	}
933 }
934 
935 /**
936  * afu_link_reset() - resets the specified host FC port
937  * @afu:	AFU associated with the host that owns the specified FC port.
938  * @port:	Port number being configured.
939  * @fc_regs:	Top of MMIO region defined for specified port.
940  *
941  * The provided MMIO region must be mapped prior to call. The sequence to
942  * reset the port involves toggling it offline and then back online. This
943  * action can cause this routine to delay up to a few seconds. An effort
944  * is made to maintain link with the device by switching to host to use
945  * the alternate port exclusively while the reset takes place.
946  * failure to come online is overridden.
947  */
948 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
949 {
950 	u64 port_sel;
951 
952 	/* first switch the AFU to the other links, if any */
953 	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
954 	port_sel &= ~(1ULL << port);
955 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
956 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
957 
958 	set_port_offline(fc_regs);
959 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
960 			       FC_PORT_STATUS_RETRY_CNT))
961 		pr_err("%s: wait on port %d to go offline timed out\n",
962 		       __func__, port);
963 
964 	set_port_online(fc_regs);
965 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
966 			      FC_PORT_STATUS_RETRY_CNT))
967 		pr_err("%s: wait on port %d to go online timed out\n",
968 		       __func__, port);
969 
970 	/* switch back to include this port */
971 	port_sel |= (1ULL << port);
972 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
973 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
974 
975 	pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
976 }
977 
978 /*
979  * Asynchronous interrupt information table
980  */
981 static const struct asyc_intr_info ainfo[] = {
982 	{SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
983 	{SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
984 	{SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
985 	{SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
986 	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
987 	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
988 	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
989 	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
990 	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
991 	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
992 	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
993 	{SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
994 	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
995 	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
996 	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
997 	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
998 	{0x0, "", 0, 0}		/* terminator */
999 };
1000 
1001 /**
1002  * find_ainfo() - locates and returns asynchronous interrupt information
1003  * @status:	Status code set by AFU on error.
1004  *
1005  * Return: The located information or NULL when the status code is invalid.
1006  */
1007 static const struct asyc_intr_info *find_ainfo(u64 status)
1008 {
1009 	const struct asyc_intr_info *info;
1010 
1011 	for (info = &ainfo[0]; info->status; info++)
1012 		if (info->status == status)
1013 			return info;
1014 
1015 	return NULL;
1016 }
1017 
1018 /**
1019  * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1020  * @afu:	AFU associated with the host.
1021  */
1022 static void afu_err_intr_init(struct afu *afu)
1023 {
1024 	int i;
1025 	u64 reg;
1026 
1027 	/* global async interrupts: AFU clears afu_ctrl on context exit
1028 	 * if async interrupts were sent to that context. This prevents
1029 	 * the AFU form sending further async interrupts when
1030 	 * there is
1031 	 * nobody to receive them.
1032 	 */
1033 
1034 	/* mask all */
1035 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1036 	/* set LISN# to send and point to master context */
1037 	reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1038 
1039 	if (afu->internal_lun)
1040 		reg |= 1;	/* Bit 63 indicates local lun */
1041 	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1042 	/* clear all */
1043 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1044 	/* unmask bits that are of interest */
1045 	/* note: afu can send an interrupt after this step */
1046 	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1047 	/* clear again in case a bit came on after previous clear but before */
1048 	/* unmask */
1049 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1050 
1051 	/* Clear/Set internal lun bits */
1052 	reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1053 	reg &= SISL_FC_INTERNAL_MASK;
1054 	if (afu->internal_lun)
1055 		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1056 	writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1057 
1058 	/* now clear FC errors */
1059 	for (i = 0; i < NUM_FC_PORTS; i++) {
1060 		writeq_be(0xFFFFFFFFU,
1061 			  &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1062 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1063 	}
1064 
1065 	/* sync interrupts for master's IOARRIN write */
1066 	/* note that unlike asyncs, there can be no pending sync interrupts */
1067 	/* at this time (this is a fresh context and master has not written */
1068 	/* IOARRIN yet), so there is nothing to clear. */
1069 
1070 	/* set LISN#, it is always sent to the context that wrote IOARRIN */
1071 	writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1072 	writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1073 }
1074 
1075 /**
1076  * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1077  * @irq:	Interrupt number.
1078  * @data:	Private data provided at interrupt registration, the AFU.
1079  *
1080  * Return: Always return IRQ_HANDLED.
1081  */
1082 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1083 {
1084 	struct afu *afu = (struct afu *)data;
1085 	u64 reg;
1086 	u64 reg_unmasked;
1087 
1088 	reg = readq_be(&afu->host_map->intr_status);
1089 	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1090 
1091 	if (reg_unmasked == 0UL) {
1092 		pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1093 		       __func__, (u64)afu, reg);
1094 		goto cxlflash_sync_err_irq_exit;
1095 	}
1096 
1097 	pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1098 	       __func__, (u64)afu, reg);
1099 
1100 	writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1101 
1102 cxlflash_sync_err_irq_exit:
1103 	pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1104 	return IRQ_HANDLED;
1105 }
1106 
1107 /**
1108  * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1109  * @irq:	Interrupt number.
1110  * @data:	Private data provided at interrupt registration, the AFU.
1111  *
1112  * Return: Always return IRQ_HANDLED.
1113  */
1114 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1115 {
1116 	struct afu *afu = (struct afu *)data;
1117 	struct afu_cmd *cmd;
1118 	bool toggle = afu->toggle;
1119 	u64 entry,
1120 	    *hrrq_start = afu->hrrq_start,
1121 	    *hrrq_end = afu->hrrq_end,
1122 	    *hrrq_curr = afu->hrrq_curr;
1123 
1124 	/* Process however many RRQ entries that are ready */
1125 	while (true) {
1126 		entry = *hrrq_curr;
1127 
1128 		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1129 			break;
1130 
1131 		cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1132 		cmd_complete(cmd);
1133 
1134 		/* Advance to next entry or wrap and flip the toggle bit */
1135 		if (hrrq_curr < hrrq_end)
1136 			hrrq_curr++;
1137 		else {
1138 			hrrq_curr = hrrq_start;
1139 			toggle ^= SISL_RESP_HANDLE_T_BIT;
1140 		}
1141 	}
1142 
1143 	afu->hrrq_curr = hrrq_curr;
1144 	afu->toggle = toggle;
1145 
1146 	return IRQ_HANDLED;
1147 }
1148 
1149 /**
1150  * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1151  * @irq:	Interrupt number.
1152  * @data:	Private data provided at interrupt registration, the AFU.
1153  *
1154  * Return: Always return IRQ_HANDLED.
1155  */
1156 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1157 {
1158 	struct afu *afu = (struct afu *)data;
1159 	struct cxlflash_cfg *cfg = afu->parent;
1160 	struct device *dev = &cfg->dev->dev;
1161 	u64 reg_unmasked;
1162 	const struct asyc_intr_info *info;
1163 	struct sisl_global_map __iomem *global = &afu->afu_map->global;
1164 	u64 reg;
1165 	u8 port;
1166 	int i;
1167 
1168 	reg = readq_be(&global->regs.aintr_status);
1169 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1170 
1171 	if (reg_unmasked == 0) {
1172 		dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
1173 			__func__, reg);
1174 		goto out;
1175 	}
1176 
1177 	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1178 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
1179 
1180 	/* Check each bit that is on */
1181 	for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1182 		info = find_ainfo(1ULL << i);
1183 		if (((reg_unmasked & 0x1) == 0) || !info)
1184 			continue;
1185 
1186 		port = info->port;
1187 
1188 		dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1189 			__func__, port, info->desc,
1190 		       readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1191 
1192 		/*
1193 		 * Do link reset first, some OTHER errors will set FC_ERROR
1194 		 * again if cleared before or w/o a reset
1195 		 */
1196 		if (info->action & LINK_RESET) {
1197 			dev_err(dev, "%s: FC Port %d: resetting link\n",
1198 				__func__, port);
1199 			cfg->lr_state = LINK_RESET_REQUIRED;
1200 			cfg->lr_port = port;
1201 			kref_get(&cfg->afu->mapcount);
1202 			schedule_work(&cfg->work_q);
1203 		}
1204 
1205 		if (info->action & CLR_FC_ERROR) {
1206 			reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1207 
1208 			/*
1209 			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1210 			 * should be the same and tracing one is sufficient.
1211 			 */
1212 
1213 			dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
1214 				__func__, port, reg);
1215 
1216 			writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1217 			writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1218 		}
1219 
1220 		if (info->action & SCAN_HOST) {
1221 			atomic_inc(&cfg->scan_host_needed);
1222 			kref_get(&cfg->afu->mapcount);
1223 			schedule_work(&cfg->work_q);
1224 		}
1225 	}
1226 
1227 out:
1228 	dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
1229 	return IRQ_HANDLED;
1230 }
1231 
1232 /**
1233  * start_context() - starts the master context
1234  * @cfg:	Internal structure associated with the host.
1235  *
1236  * Return: A success or failure value from CXL services.
1237  */
1238 static int start_context(struct cxlflash_cfg *cfg)
1239 {
1240 	int rc = 0;
1241 
1242 	rc = cxl_start_context(cfg->mcctx,
1243 			       cfg->afu->work.work_element_descriptor,
1244 			       NULL);
1245 
1246 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1247 	return rc;
1248 }
1249 
1250 /**
1251  * read_vpd() - obtains the WWPNs from VPD
1252  * @cfg:	Internal structure associated with the host.
1253  * @wwpn:	Array of size NUM_FC_PORTS to pass back WWPNs
1254  *
1255  * Return: 0 on success, -errno on failure
1256  */
1257 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1258 {
1259 	struct pci_dev *dev = cfg->dev;
1260 	int rc = 0;
1261 	int ro_start, ro_size, i, j, k;
1262 	ssize_t vpd_size;
1263 	char vpd_data[CXLFLASH_VPD_LEN];
1264 	char tmp_buf[WWPN_BUF_LEN] = { 0 };
1265 	char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1266 
1267 	/* Get the VPD data from the device */
1268 	vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
1269 	if (unlikely(vpd_size <= 0)) {
1270 		dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
1271 		       __func__, vpd_size);
1272 		rc = -ENODEV;
1273 		goto out;
1274 	}
1275 
1276 	/* Get the read only section offset */
1277 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1278 				    PCI_VPD_LRDT_RO_DATA);
1279 	if (unlikely(ro_start < 0)) {
1280 		dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
1281 			__func__);
1282 		rc = -ENODEV;
1283 		goto out;
1284 	}
1285 
1286 	/* Get the read only section size, cap when extends beyond read VPD */
1287 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1288 	j = ro_size;
1289 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1290 	if (unlikely((i + j) > vpd_size)) {
1291 		pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1292 			 __func__, (i + j), vpd_size);
1293 		ro_size = vpd_size - i;
1294 	}
1295 
1296 	/*
1297 	 * Find the offset of the WWPN tag within the read only
1298 	 * VPD data and validate the found field (partials are
1299 	 * no good to us). Convert the ASCII data to an integer
1300 	 * value. Note that we must copy to a temporary buffer
1301 	 * because the conversion service requires that the ASCII
1302 	 * string be terminated.
1303 	 */
1304 	for (k = 0; k < NUM_FC_PORTS; k++) {
1305 		j = ro_size;
1306 		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1307 
1308 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1309 		if (unlikely(i < 0)) {
1310 			dev_err(&dev->dev, "%s: Port %d WWPN not found "
1311 				"in VPD\n", __func__, k);
1312 			rc = -ENODEV;
1313 			goto out;
1314 		}
1315 
1316 		j = pci_vpd_info_field_size(&vpd_data[i]);
1317 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
1318 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1319 			dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
1320 				"VPD corrupt\n",
1321 			       __func__, k);
1322 			rc = -ENODEV;
1323 			goto out;
1324 		}
1325 
1326 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1327 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1328 		if (unlikely(rc)) {
1329 			dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
1330 				"to integer\n", __func__, k);
1331 			rc = -ENODEV;
1332 			goto out;
1333 		}
1334 	}
1335 
1336 out:
1337 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1338 	return rc;
1339 }
1340 
1341 /**
1342  * init_pcr() - initialize the provisioning and control registers
1343  * @cfg:	Internal structure associated with the host.
1344  *
1345  * Also sets up fast access to the mapped registers and initializes AFU
1346  * command fields that never change.
1347  */
1348 static void init_pcr(struct cxlflash_cfg *cfg)
1349 {
1350 	struct afu *afu = cfg->afu;
1351 	struct sisl_ctrl_map __iomem *ctrl_map;
1352 	int i;
1353 
1354 	for (i = 0; i < MAX_CONTEXT; i++) {
1355 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1356 		/* Disrupt any clients that could be running */
1357 		/* e.g. clients that survived a master restart */
1358 		writeq_be(0, &ctrl_map->rht_start);
1359 		writeq_be(0, &ctrl_map->rht_cnt_id);
1360 		writeq_be(0, &ctrl_map->ctx_cap);
1361 	}
1362 
1363 	/* Copy frequently used fields into afu */
1364 	afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1365 	afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1366 	afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1367 
1368 	/* Program the Endian Control for the master context */
1369 	writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1370 }
1371 
1372 /**
1373  * init_global() - initialize AFU global registers
1374  * @cfg:	Internal structure associated with the host.
1375  */
1376 static int init_global(struct cxlflash_cfg *cfg)
1377 {
1378 	struct afu *afu = cfg->afu;
1379 	struct device *dev = &cfg->dev->dev;
1380 	u64 wwpn[NUM_FC_PORTS];	/* wwpn of AFU ports */
1381 	int i = 0, num_ports = 0;
1382 	int rc = 0;
1383 	u64 reg;
1384 
1385 	rc = read_vpd(cfg, &wwpn[0]);
1386 	if (rc) {
1387 		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1388 		goto out;
1389 	}
1390 
1391 	pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1392 
1393 	/* Set up RRQ in AFU for master issued cmds */
1394 	writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1395 	writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1396 
1397 	/* AFU configuration */
1398 	reg = readq_be(&afu->afu_map->global.regs.afu_config);
1399 	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1400 	/* enable all auto retry options and control endianness */
1401 	/* leave others at default: */
1402 	/* CTX_CAP write protected, mbox_r does not clear on read and */
1403 	/* checker on if dual afu */
1404 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1405 
1406 	/* Global port select: select either port */
1407 	if (afu->internal_lun) {
1408 		/* Only use port 0 */
1409 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1410 		num_ports = NUM_FC_PORTS - 1;
1411 	} else {
1412 		writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1413 		num_ports = NUM_FC_PORTS;
1414 	}
1415 
1416 	for (i = 0; i < num_ports; i++) {
1417 		/* Unmask all errors (but they are still masked at AFU) */
1418 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1419 		/* Clear CRC error cnt & set a threshold */
1420 		(void)readq_be(&afu->afu_map->global.
1421 			       fc_regs[i][FC_CNT_CRCERR / 8]);
1422 		writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1423 			  [FC_CRC_THRESH / 8]);
1424 
1425 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1426 		if (wwpn[i] != 0)
1427 			afu_set_wwpn(afu, i,
1428 				     &afu->afu_map->global.fc_regs[i][0],
1429 				     wwpn[i]);
1430 		/* Programming WWPN back to back causes additional
1431 		 * offline/online transitions and a PLOGI
1432 		 */
1433 		msleep(100);
1434 	}
1435 
1436 	/* Set up master's own CTX_CAP to allow real mode, host translation */
1437 	/* tables, afu cmds and read/write GSCSI cmds. */
1438 	/* First, unlock ctx_cap write by reading mbox */
1439 	(void)readq_be(&afu->ctrl_map->mbox_r);	/* unlock ctx_cap */
1440 	writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1441 		   SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1442 		   SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1443 		  &afu->ctrl_map->ctx_cap);
1444 	/* Initialize heartbeat */
1445 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1446 
1447 out:
1448 	return rc;
1449 }
1450 
1451 /**
1452  * start_afu() - initializes and starts the AFU
1453  * @cfg:	Internal structure associated with the host.
1454  */
1455 static int start_afu(struct cxlflash_cfg *cfg)
1456 {
1457 	struct afu *afu = cfg->afu;
1458 	int rc = 0;
1459 
1460 	init_pcr(cfg);
1461 
1462 	/* After an AFU reset, RRQ entries are stale, clear them */
1463 	memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
1464 
1465 	/* Initialize RRQ pointers */
1466 	afu->hrrq_start = &afu->rrq_entry[0];
1467 	afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1468 	afu->hrrq_curr = afu->hrrq_start;
1469 	afu->toggle = 1;
1470 
1471 	rc = init_global(cfg);
1472 
1473 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1474 	return rc;
1475 }
1476 
1477 /**
1478  * init_intr() - setup interrupt handlers for the master context
1479  * @cfg:	Internal structure associated with the host.
1480  *
1481  * Return: 0 on success, -errno on failure
1482  */
1483 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1484 				 struct cxl_context *ctx)
1485 {
1486 	struct afu *afu = cfg->afu;
1487 	struct device *dev = &cfg->dev->dev;
1488 	int rc = 0;
1489 	enum undo_level level = UNDO_NOOP;
1490 
1491 	rc = cxl_allocate_afu_irqs(ctx, 3);
1492 	if (unlikely(rc)) {
1493 		dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1494 			__func__, rc);
1495 		level = UNDO_NOOP;
1496 		goto out;
1497 	}
1498 
1499 	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1500 			     "SISL_MSI_SYNC_ERROR");
1501 	if (unlikely(rc <= 0)) {
1502 		dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1503 			__func__);
1504 		level = FREE_IRQ;
1505 		goto out;
1506 	}
1507 
1508 	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1509 			     "SISL_MSI_RRQ_UPDATED");
1510 	if (unlikely(rc <= 0)) {
1511 		dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1512 			__func__);
1513 		level = UNMAP_ONE;
1514 		goto out;
1515 	}
1516 
1517 	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1518 			     "SISL_MSI_ASYNC_ERROR");
1519 	if (unlikely(rc <= 0)) {
1520 		dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1521 			__func__);
1522 		level = UNMAP_TWO;
1523 		goto out;
1524 	}
1525 out:
1526 	return level;
1527 }
1528 
1529 /**
1530  * init_mc() - create and register as the master context
1531  * @cfg:	Internal structure associated with the host.
1532  *
1533  * Return: 0 on success, -errno on failure
1534  */
1535 static int init_mc(struct cxlflash_cfg *cfg)
1536 {
1537 	struct cxl_context *ctx;
1538 	struct device *dev = &cfg->dev->dev;
1539 	int rc = 0;
1540 	enum undo_level level;
1541 
1542 	ctx = cxl_get_context(cfg->dev);
1543 	if (unlikely(!ctx)) {
1544 		rc = -ENOMEM;
1545 		goto ret;
1546 	}
1547 	cfg->mcctx = ctx;
1548 
1549 	/* Set it up as a master with the CXL */
1550 	cxl_set_master(ctx);
1551 
1552 	/* During initialization reset the AFU to start from a clean slate */
1553 	rc = cxl_afu_reset(cfg->mcctx);
1554 	if (unlikely(rc)) {
1555 		dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1556 			__func__, rc);
1557 		goto ret;
1558 	}
1559 
1560 	level = init_intr(cfg, ctx);
1561 	if (unlikely(level)) {
1562 		dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
1563 			__func__, rc);
1564 		goto out;
1565 	}
1566 
1567 	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
1568 	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1569 	 * element (pe) that is embedded in the context (ctx)
1570 	 */
1571 	rc = start_context(cfg);
1572 	if (unlikely(rc)) {
1573 		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1574 		level = UNMAP_THREE;
1575 		goto out;
1576 	}
1577 ret:
1578 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1579 	return rc;
1580 out:
1581 	term_intr(cfg, level);
1582 	goto ret;
1583 }
1584 
1585 /**
1586  * init_afu() - setup as master context and start AFU
1587  * @cfg:	Internal structure associated with the host.
1588  *
1589  * This routine is a higher level of control for configuring the
1590  * AFU on probe and reset paths.
1591  *
1592  * Return: 0 on success, -errno on failure
1593  */
1594 static int init_afu(struct cxlflash_cfg *cfg)
1595 {
1596 	u64 reg;
1597 	int rc = 0;
1598 	struct afu *afu = cfg->afu;
1599 	struct device *dev = &cfg->dev->dev;
1600 
1601 	cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1602 
1603 	rc = init_mc(cfg);
1604 	if (rc) {
1605 		dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1606 			__func__, rc);
1607 		goto out;
1608 	}
1609 
1610 	/* Map the entire MMIO space of the AFU */
1611 	afu->afu_map = cxl_psa_map(cfg->mcctx);
1612 	if (!afu->afu_map) {
1613 		dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1614 		rc = -ENOMEM;
1615 		goto err1;
1616 	}
1617 	kref_init(&afu->mapcount);
1618 
1619 	/* No byte reverse on reading afu_version or string will be backwards */
1620 	reg = readq(&afu->afu_map->global.regs.afu_version);
1621 	memcpy(afu->version, &reg, sizeof(reg));
1622 	afu->interface_version =
1623 	    readq_be(&afu->afu_map->global.regs.interface_version);
1624 	if ((afu->interface_version + 1) == 0) {
1625 		pr_err("Back level AFU, please upgrade. AFU version %s "
1626 		       "interface version 0x%llx\n", afu->version,
1627 		       afu->interface_version);
1628 		rc = -EINVAL;
1629 		goto err2;
1630 	}
1631 
1632 	afu->send_cmd = send_cmd_ioarrin;
1633 	afu->context_reset = context_reset_ioarrin;
1634 
1635 	pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
1636 		 afu->version, afu->interface_version);
1637 
1638 	rc = start_afu(cfg);
1639 	if (rc) {
1640 		dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1641 			__func__, rc);
1642 		goto err2;
1643 	}
1644 
1645 	afu_err_intr_init(cfg->afu);
1646 	spin_lock_init(&afu->rrin_slock);
1647 	afu->room = readq_be(&afu->host_map->cmd_room);
1648 
1649 	/* Restore the LUN mappings */
1650 	cxlflash_restore_luntable(cfg);
1651 out:
1652 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1653 	return rc;
1654 
1655 err2:
1656 	kref_put(&afu->mapcount, afu_unmap);
1657 err1:
1658 	term_intr(cfg, UNMAP_THREE);
1659 	term_mc(cfg);
1660 	goto out;
1661 }
1662 
1663 /**
1664  * cxlflash_afu_sync() - builds and sends an AFU sync command
1665  * @afu:	AFU associated with the host.
1666  * @ctx_hndl_u:	Identifies context requesting sync.
1667  * @res_hndl_u:	Identifies resource requesting sync.
1668  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
1669  *
1670  * The AFU can only take 1 sync command at a time. This routine enforces this
1671  * limitation by using a mutex to provide exclusive access to the AFU during
1672  * the sync. This design point requires calling threads to not be on interrupt
1673  * context due to the possibility of sleeping during concurrent sync operations.
1674  *
1675  * AFU sync operations are only necessary and allowed when the device is
1676  * operating normally. When not operating normally, sync requests can occur as
1677  * part of cleaning up resources associated with an adapter prior to removal.
1678  * In this scenario, these requests are simply ignored (safe due to the AFU
1679  * going away).
1680  *
1681  * Return:
1682  *	0 on success
1683  *	-1 on failure
1684  */
1685 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1686 		      res_hndl_t res_hndl_u, u8 mode)
1687 {
1688 	struct cxlflash_cfg *cfg = afu->parent;
1689 	struct device *dev = &cfg->dev->dev;
1690 	struct afu_cmd *cmd = NULL;
1691 	char *buf = NULL;
1692 	int rc = 0;
1693 	static DEFINE_MUTEX(sync_active);
1694 
1695 	if (cfg->state != STATE_NORMAL) {
1696 		pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1697 		return 0;
1698 	}
1699 
1700 	mutex_lock(&sync_active);
1701 	atomic_inc(&afu->cmds_active);
1702 	buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
1703 	if (unlikely(!buf)) {
1704 		dev_err(dev, "%s: no memory for command\n", __func__);
1705 		rc = -1;
1706 		goto out;
1707 	}
1708 
1709 	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
1710 	init_completion(&cmd->cevent);
1711 	cmd->parent = afu;
1712 
1713 	pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1714 
1715 	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1716 	cmd->rcb.ctx_id = afu->ctx_hndl;
1717 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
1718 	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1719 
1720 	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
1721 	cmd->rcb.cdb[1] = mode;
1722 
1723 	/* The cdb is aligned, no unaligned accessors required */
1724 	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1725 	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1726 
1727 	rc = afu->send_cmd(afu, cmd);
1728 	if (unlikely(rc))
1729 		goto out;
1730 
1731 	rc = wait_resp(afu, cmd);
1732 	if (unlikely(rc))
1733 		rc = -1;
1734 out:
1735 	atomic_dec(&afu->cmds_active);
1736 	mutex_unlock(&sync_active);
1737 	kfree(buf);
1738 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1739 	return rc;
1740 }
1741 
1742 /**
1743  * afu_reset() - resets the AFU
1744  * @cfg:	Internal structure associated with the host.
1745  *
1746  * Return: 0 on success, -errno on failure
1747  */
1748 static int afu_reset(struct cxlflash_cfg *cfg)
1749 {
1750 	int rc = 0;
1751 	/* Stop the context before the reset. Since the context is
1752 	 * no longer available restart it after the reset is complete
1753 	 */
1754 
1755 	term_afu(cfg);
1756 
1757 	rc = init_afu(cfg);
1758 
1759 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1760 	return rc;
1761 }
1762 
1763 /**
1764  * drain_ioctls() - wait until all currently executing ioctls have completed
1765  * @cfg:	Internal structure associated with the host.
1766  *
1767  * Obtain write access to read/write semaphore that wraps ioctl
1768  * handling to 'drain' ioctls currently executing.
1769  */
1770 static void drain_ioctls(struct cxlflash_cfg *cfg)
1771 {
1772 	down_write(&cfg->ioctl_rwsem);
1773 	up_write(&cfg->ioctl_rwsem);
1774 }
1775 
1776 /**
1777  * cxlflash_eh_device_reset_handler() - reset a single LUN
1778  * @scp:	SCSI command to send.
1779  *
1780  * Return:
1781  *	SUCCESS as defined in scsi/scsi.h
1782  *	FAILED as defined in scsi/scsi.h
1783  */
1784 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1785 {
1786 	int rc = SUCCESS;
1787 	struct Scsi_Host *host = scp->device->host;
1788 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1789 	struct afu *afu = cfg->afu;
1790 	int rcr = 0;
1791 
1792 	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1793 		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1794 		 host->host_no, scp->device->channel,
1795 		 scp->device->id, scp->device->lun,
1796 		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1797 		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1798 		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1799 		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1800 
1801 retry:
1802 	switch (cfg->state) {
1803 	case STATE_NORMAL:
1804 		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
1805 		if (unlikely(rcr))
1806 			rc = FAILED;
1807 		break;
1808 	case STATE_RESET:
1809 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1810 		goto retry;
1811 	default:
1812 		rc = FAILED;
1813 		break;
1814 	}
1815 
1816 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1817 	return rc;
1818 }
1819 
1820 /**
1821  * cxlflash_eh_host_reset_handler() - reset the host adapter
1822  * @scp:	SCSI command from stack identifying host.
1823  *
1824  * Following a reset, the state is evaluated again in case an EEH occurred
1825  * during the reset. In such a scenario, the host reset will either yield
1826  * until the EEH recovery is complete or return success or failure based
1827  * upon the current device state.
1828  *
1829  * Return:
1830  *	SUCCESS as defined in scsi/scsi.h
1831  *	FAILED as defined in scsi/scsi.h
1832  */
1833 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1834 {
1835 	int rc = SUCCESS;
1836 	int rcr = 0;
1837 	struct Scsi_Host *host = scp->device->host;
1838 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1839 
1840 	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1841 		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1842 		 host->host_no, scp->device->channel,
1843 		 scp->device->id, scp->device->lun,
1844 		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1845 		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1846 		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1847 		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1848 
1849 	switch (cfg->state) {
1850 	case STATE_NORMAL:
1851 		cfg->state = STATE_RESET;
1852 		drain_ioctls(cfg);
1853 		cxlflash_mark_contexts_error(cfg);
1854 		rcr = afu_reset(cfg);
1855 		if (rcr) {
1856 			rc = FAILED;
1857 			cfg->state = STATE_FAILTERM;
1858 		} else
1859 			cfg->state = STATE_NORMAL;
1860 		wake_up_all(&cfg->reset_waitq);
1861 		ssleep(1);
1862 		/* fall through */
1863 	case STATE_RESET:
1864 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1865 		if (cfg->state == STATE_NORMAL)
1866 			break;
1867 		/* fall through */
1868 	default:
1869 		rc = FAILED;
1870 		break;
1871 	}
1872 
1873 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1874 	return rc;
1875 }
1876 
1877 /**
1878  * cxlflash_change_queue_depth() - change the queue depth for the device
1879  * @sdev:	SCSI device destined for queue depth change.
1880  * @qdepth:	Requested queue depth value to set.
1881  *
1882  * The requested queue depth is capped to the maximum supported value.
1883  *
1884  * Return: The actual queue depth set.
1885  */
1886 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
1887 {
1888 
1889 	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
1890 		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
1891 
1892 	scsi_change_queue_depth(sdev, qdepth);
1893 	return sdev->queue_depth;
1894 }
1895 
1896 /**
1897  * cxlflash_show_port_status() - queries and presents the current port status
1898  * @port:	Desired port for status reporting.
1899  * @afu:	AFU owning the specified port.
1900  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
1901  *
1902  * Return: The size of the ASCII string returned in @buf.
1903  */
1904 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
1905 {
1906 	char *disp_status;
1907 	u64 status;
1908 	__be64 __iomem *fc_regs;
1909 
1910 	if (port >= NUM_FC_PORTS)
1911 		return 0;
1912 
1913 	fc_regs = &afu->afu_map->global.fc_regs[port][0];
1914 	status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1915 	status &= FC_MTIP_STATUS_MASK;
1916 
1917 	if (status == FC_MTIP_STATUS_ONLINE)
1918 		disp_status = "online";
1919 	else if (status == FC_MTIP_STATUS_OFFLINE)
1920 		disp_status = "offline";
1921 	else
1922 		disp_status = "unknown";
1923 
1924 	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
1925 }
1926 
1927 /**
1928  * port0_show() - queries and presents the current status of port 0
1929  * @dev:	Generic device associated with the host owning the port.
1930  * @attr:	Device attribute representing the port.
1931  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
1932  *
1933  * Return: The size of the ASCII string returned in @buf.
1934  */
1935 static ssize_t port0_show(struct device *dev,
1936 			  struct device_attribute *attr,
1937 			  char *buf)
1938 {
1939 	struct Scsi_Host *shost = class_to_shost(dev);
1940 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
1941 	struct afu *afu = cfg->afu;
1942 
1943 	return cxlflash_show_port_status(0, afu, buf);
1944 }
1945 
1946 /**
1947  * port1_show() - queries and presents the current status of port 1
1948  * @dev:	Generic device associated with the host owning the port.
1949  * @attr:	Device attribute representing the port.
1950  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
1951  *
1952  * Return: The size of the ASCII string returned in @buf.
1953  */
1954 static ssize_t port1_show(struct device *dev,
1955 			  struct device_attribute *attr,
1956 			  char *buf)
1957 {
1958 	struct Scsi_Host *shost = class_to_shost(dev);
1959 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
1960 	struct afu *afu = cfg->afu;
1961 
1962 	return cxlflash_show_port_status(1, afu, buf);
1963 }
1964 
1965 /**
1966  * lun_mode_show() - presents the current LUN mode of the host
1967  * @dev:	Generic device associated with the host.
1968  * @attr:	Device attribute representing the LUN mode.
1969  * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
1970  *
1971  * Return: The size of the ASCII string returned in @buf.
1972  */
1973 static ssize_t lun_mode_show(struct device *dev,
1974 			     struct device_attribute *attr, char *buf)
1975 {
1976 	struct Scsi_Host *shost = class_to_shost(dev);
1977 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
1978 	struct afu *afu = cfg->afu;
1979 
1980 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
1981 }
1982 
1983 /**
1984  * lun_mode_store() - sets the LUN mode of the host
1985  * @dev:	Generic device associated with the host.
1986  * @attr:	Device attribute representing the LUN mode.
1987  * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
1988  * @count:	Length of data resizing in @buf.
1989  *
1990  * The CXL Flash AFU supports a dummy LUN mode where the external
1991  * links and storage are not required. Space on the FPGA is used
1992  * to create 1 or 2 small LUNs which are presented to the system
1993  * as if they were a normal storage device. This feature is useful
1994  * during development and also provides manufacturing with a way
1995  * to test the AFU without an actual device.
1996  *
1997  * 0 = external LUN[s] (default)
1998  * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
1999  * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2000  * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2001  * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2002  *
2003  * Return: The size of the ASCII string returned in @buf.
2004  */
2005 static ssize_t lun_mode_store(struct device *dev,
2006 			      struct device_attribute *attr,
2007 			      const char *buf, size_t count)
2008 {
2009 	struct Scsi_Host *shost = class_to_shost(dev);
2010 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2011 	struct afu *afu = cfg->afu;
2012 	int rc;
2013 	u32 lun_mode;
2014 
2015 	rc = kstrtouint(buf, 10, &lun_mode);
2016 	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2017 		afu->internal_lun = lun_mode;
2018 
2019 		/*
2020 		 * When configured for internal LUN, there is only one channel,
2021 		 * channel number 0, else there will be 2 (default).
2022 		 */
2023 		if (afu->internal_lun)
2024 			shost->max_channel = 0;
2025 		else
2026 			shost->max_channel = NUM_FC_PORTS - 1;
2027 
2028 		afu_reset(cfg);
2029 		scsi_scan_host(cfg->host);
2030 	}
2031 
2032 	return count;
2033 }
2034 
2035 /**
2036  * ioctl_version_show() - presents the current ioctl version of the host
2037  * @dev:	Generic device associated with the host.
2038  * @attr:	Device attribute representing the ioctl version.
2039  * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
2040  *
2041  * Return: The size of the ASCII string returned in @buf.
2042  */
2043 static ssize_t ioctl_version_show(struct device *dev,
2044 				  struct device_attribute *attr, char *buf)
2045 {
2046 	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2047 }
2048 
2049 /**
2050  * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2051  * @port:	Desired port for status reporting.
2052  * @afu:	AFU owning the specified port.
2053  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2054  *
2055  * Return: The size of the ASCII string returned in @buf.
2056  */
2057 static ssize_t cxlflash_show_port_lun_table(u32 port,
2058 					    struct afu *afu,
2059 					    char *buf)
2060 {
2061 	int i;
2062 	ssize_t bytes = 0;
2063 	__be64 __iomem *fc_port;
2064 
2065 	if (port >= NUM_FC_PORTS)
2066 		return 0;
2067 
2068 	fc_port = &afu->afu_map->global.fc_port[port][0];
2069 
2070 	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2071 		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2072 				   "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2073 	return bytes;
2074 }
2075 
2076 /**
2077  * port0_lun_table_show() - presents the current LUN table of port 0
2078  * @dev:	Generic device associated with the host owning the port.
2079  * @attr:	Device attribute representing the port.
2080  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2081  *
2082  * Return: The size of the ASCII string returned in @buf.
2083  */
2084 static ssize_t port0_lun_table_show(struct device *dev,
2085 				    struct device_attribute *attr,
2086 				    char *buf)
2087 {
2088 	struct Scsi_Host *shost = class_to_shost(dev);
2089 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2090 	struct afu *afu = cfg->afu;
2091 
2092 	return cxlflash_show_port_lun_table(0, afu, buf);
2093 }
2094 
2095 /**
2096  * port1_lun_table_show() - presents the current LUN table of port 1
2097  * @dev:	Generic device associated with the host owning the port.
2098  * @attr:	Device attribute representing the port.
2099  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2100  *
2101  * Return: The size of the ASCII string returned in @buf.
2102  */
2103 static ssize_t port1_lun_table_show(struct device *dev,
2104 				    struct device_attribute *attr,
2105 				    char *buf)
2106 {
2107 	struct Scsi_Host *shost = class_to_shost(dev);
2108 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2109 	struct afu *afu = cfg->afu;
2110 
2111 	return cxlflash_show_port_lun_table(1, afu, buf);
2112 }
2113 
2114 /**
2115  * mode_show() - presents the current mode of the device
2116  * @dev:	Generic device associated with the device.
2117  * @attr:	Device attribute representing the device mode.
2118  * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2119  *
2120  * Return: The size of the ASCII string returned in @buf.
2121  */
2122 static ssize_t mode_show(struct device *dev,
2123 			 struct device_attribute *attr, char *buf)
2124 {
2125 	struct scsi_device *sdev = to_scsi_device(dev);
2126 
2127 	return scnprintf(buf, PAGE_SIZE, "%s\n",
2128 			 sdev->hostdata ? "superpipe" : "legacy");
2129 }
2130 
2131 /*
2132  * Host attributes
2133  */
2134 static DEVICE_ATTR_RO(port0);
2135 static DEVICE_ATTR_RO(port1);
2136 static DEVICE_ATTR_RW(lun_mode);
2137 static DEVICE_ATTR_RO(ioctl_version);
2138 static DEVICE_ATTR_RO(port0_lun_table);
2139 static DEVICE_ATTR_RO(port1_lun_table);
2140 
2141 static struct device_attribute *cxlflash_host_attrs[] = {
2142 	&dev_attr_port0,
2143 	&dev_attr_port1,
2144 	&dev_attr_lun_mode,
2145 	&dev_attr_ioctl_version,
2146 	&dev_attr_port0_lun_table,
2147 	&dev_attr_port1_lun_table,
2148 	NULL
2149 };
2150 
2151 /*
2152  * Device attributes
2153  */
2154 static DEVICE_ATTR_RO(mode);
2155 
2156 static struct device_attribute *cxlflash_dev_attrs[] = {
2157 	&dev_attr_mode,
2158 	NULL
2159 };
2160 
2161 /*
2162  * Host template
2163  */
2164 static struct scsi_host_template driver_template = {
2165 	.module = THIS_MODULE,
2166 	.name = CXLFLASH_ADAPTER_NAME,
2167 	.info = cxlflash_driver_info,
2168 	.ioctl = cxlflash_ioctl,
2169 	.proc_name = CXLFLASH_NAME,
2170 	.queuecommand = cxlflash_queuecommand,
2171 	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2172 	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2173 	.change_queue_depth = cxlflash_change_queue_depth,
2174 	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
2175 	.can_queue = CXLFLASH_MAX_CMDS,
2176 	.cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
2177 	.this_id = -1,
2178 	.sg_tablesize = 1,	/* No scatter gather support */
2179 	.max_sectors = CXLFLASH_MAX_SECTORS,
2180 	.use_clustering = ENABLE_CLUSTERING,
2181 	.shost_attrs = cxlflash_host_attrs,
2182 	.sdev_attrs = cxlflash_dev_attrs,
2183 };
2184 
2185 /*
2186  * Device dependent values
2187  */
2188 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
2189 					0ULL };
2190 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
2191 					CXLFLASH_NOTIFY_SHUTDOWN };
2192 
2193 /*
2194  * PCI device binding table
2195  */
2196 static struct pci_device_id cxlflash_pci_table[] = {
2197 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2198 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2199 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2200 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
2201 	{}
2202 };
2203 
2204 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2205 
2206 /**
2207  * cxlflash_worker_thread() - work thread handler for the AFU
2208  * @work:	Work structure contained within cxlflash associated with host.
2209  *
2210  * Handles the following events:
2211  * - Link reset which cannot be performed on interrupt context due to
2212  * blocking up to a few seconds
2213  * - Rescan the host
2214  */
2215 static void cxlflash_worker_thread(struct work_struct *work)
2216 {
2217 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2218 						work_q);
2219 	struct afu *afu = cfg->afu;
2220 	struct device *dev = &cfg->dev->dev;
2221 	int port;
2222 	ulong lock_flags;
2223 
2224 	/* Avoid MMIO if the device has failed */
2225 
2226 	if (cfg->state != STATE_NORMAL)
2227 		return;
2228 
2229 	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2230 
2231 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
2232 		port = cfg->lr_port;
2233 		if (port < 0)
2234 			dev_err(dev, "%s: invalid port index %d\n",
2235 				__func__, port);
2236 		else {
2237 			spin_unlock_irqrestore(cfg->host->host_lock,
2238 					       lock_flags);
2239 
2240 			/* The reset can block... */
2241 			afu_link_reset(afu, port,
2242 				       &afu->afu_map->global.fc_regs[port][0]);
2243 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2244 		}
2245 
2246 		cfg->lr_state = LINK_RESET_COMPLETE;
2247 	}
2248 
2249 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2250 
2251 	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2252 		scsi_scan_host(cfg->host);
2253 	kref_put(&afu->mapcount, afu_unmap);
2254 }
2255 
2256 /**
2257  * cxlflash_probe() - PCI entry point to add host
2258  * @pdev:	PCI device associated with the host.
2259  * @dev_id:	PCI device id associated with device.
2260  *
2261  * Return: 0 on success, -errno on failure
2262  */
2263 static int cxlflash_probe(struct pci_dev *pdev,
2264 			  const struct pci_device_id *dev_id)
2265 {
2266 	struct Scsi_Host *host;
2267 	struct cxlflash_cfg *cfg = NULL;
2268 	struct dev_dependent_vals *ddv;
2269 	int rc = 0;
2270 
2271 	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2272 		__func__, pdev->irq);
2273 
2274 	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2275 	driver_template.max_sectors = ddv->max_sectors;
2276 
2277 	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2278 	if (!host) {
2279 		dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2280 			__func__);
2281 		rc = -ENOMEM;
2282 		goto out;
2283 	}
2284 
2285 	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2286 	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2287 	host->max_channel = NUM_FC_PORTS - 1;
2288 	host->unique_id = host->host_no;
2289 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2290 
2291 	cfg = (struct cxlflash_cfg *)host->hostdata;
2292 	cfg->host = host;
2293 	rc = alloc_mem(cfg);
2294 	if (rc) {
2295 		dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
2296 			__func__);
2297 		rc = -ENOMEM;
2298 		scsi_host_put(cfg->host);
2299 		goto out;
2300 	}
2301 
2302 	cfg->init_state = INIT_STATE_NONE;
2303 	cfg->dev = pdev;
2304 	cfg->cxl_fops = cxlflash_cxl_fops;
2305 
2306 	/*
2307 	 * The promoted LUNs move to the top of the LUN table. The rest stay
2308 	 * on the bottom half. The bottom half grows from the end
2309 	 * (index = 255), whereas the top half grows from the beginning
2310 	 * (index = 0).
2311 	 */
2312 	cfg->promote_lun_index  = 0;
2313 	cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2314 	cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2315 
2316 	cfg->dev_id = (struct pci_device_id *)dev_id;
2317 
2318 	init_waitqueue_head(&cfg->tmf_waitq);
2319 	init_waitqueue_head(&cfg->reset_waitq);
2320 
2321 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2322 	cfg->lr_state = LINK_RESET_INVALID;
2323 	cfg->lr_port = -1;
2324 	spin_lock_init(&cfg->tmf_slock);
2325 	mutex_init(&cfg->ctx_tbl_list_mutex);
2326 	mutex_init(&cfg->ctx_recovery_mutex);
2327 	init_rwsem(&cfg->ioctl_rwsem);
2328 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2329 	INIT_LIST_HEAD(&cfg->lluns);
2330 
2331 	pci_set_drvdata(pdev, cfg);
2332 
2333 	cfg->cxl_afu = cxl_pci_to_afu(pdev);
2334 
2335 	rc = init_pci(cfg);
2336 	if (rc) {
2337 		dev_err(&pdev->dev, "%s: call to init_pci "
2338 			"failed rc=%d!\n", __func__, rc);
2339 		goto out_remove;
2340 	}
2341 	cfg->init_state = INIT_STATE_PCI;
2342 
2343 	rc = init_afu(cfg);
2344 	if (rc) {
2345 		dev_err(&pdev->dev, "%s: call to init_afu "
2346 			"failed rc=%d!\n", __func__, rc);
2347 		goto out_remove;
2348 	}
2349 	cfg->init_state = INIT_STATE_AFU;
2350 
2351 	rc = init_scsi(cfg);
2352 	if (rc) {
2353 		dev_err(&pdev->dev, "%s: call to init_scsi "
2354 			"failed rc=%d!\n", __func__, rc);
2355 		goto out_remove;
2356 	}
2357 	cfg->init_state = INIT_STATE_SCSI;
2358 
2359 out:
2360 	pr_debug("%s: returning rc=%d\n", __func__, rc);
2361 	return rc;
2362 
2363 out_remove:
2364 	cxlflash_remove(pdev);
2365 	goto out;
2366 }
2367 
2368 /**
2369  * cxlflash_pci_error_detected() - called when a PCI error is detected
2370  * @pdev:	PCI device struct.
2371  * @state:	PCI channel state.
2372  *
2373  * When an EEH occurs during an active reset, wait until the reset is
2374  * complete and then take action based upon the device state.
2375  *
2376  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2377  */
2378 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2379 						    pci_channel_state_t state)
2380 {
2381 	int rc = 0;
2382 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2383 	struct device *dev = &cfg->dev->dev;
2384 
2385 	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2386 
2387 	switch (state) {
2388 	case pci_channel_io_frozen:
2389 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2390 		if (cfg->state == STATE_FAILTERM)
2391 			return PCI_ERS_RESULT_DISCONNECT;
2392 
2393 		cfg->state = STATE_RESET;
2394 		scsi_block_requests(cfg->host);
2395 		drain_ioctls(cfg);
2396 		rc = cxlflash_mark_contexts_error(cfg);
2397 		if (unlikely(rc))
2398 			dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2399 				__func__, rc);
2400 		term_afu(cfg);
2401 		return PCI_ERS_RESULT_NEED_RESET;
2402 	case pci_channel_io_perm_failure:
2403 		cfg->state = STATE_FAILTERM;
2404 		wake_up_all(&cfg->reset_waitq);
2405 		scsi_unblock_requests(cfg->host);
2406 		return PCI_ERS_RESULT_DISCONNECT;
2407 	default:
2408 		break;
2409 	}
2410 	return PCI_ERS_RESULT_NEED_RESET;
2411 }
2412 
2413 /**
2414  * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2415  * @pdev:	PCI device struct.
2416  *
2417  * This routine is called by the pci error recovery code after the PCI
2418  * slot has been reset, just before we should resume normal operations.
2419  *
2420  * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2421  */
2422 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2423 {
2424 	int rc = 0;
2425 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2426 	struct device *dev = &cfg->dev->dev;
2427 
2428 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2429 
2430 	rc = init_afu(cfg);
2431 	if (unlikely(rc)) {
2432 		dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2433 		return PCI_ERS_RESULT_DISCONNECT;
2434 	}
2435 
2436 	return PCI_ERS_RESULT_RECOVERED;
2437 }
2438 
2439 /**
2440  * cxlflash_pci_resume() - called when normal operation can resume
2441  * @pdev:	PCI device struct
2442  */
2443 static void cxlflash_pci_resume(struct pci_dev *pdev)
2444 {
2445 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2446 	struct device *dev = &cfg->dev->dev;
2447 
2448 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2449 
2450 	cfg->state = STATE_NORMAL;
2451 	wake_up_all(&cfg->reset_waitq);
2452 	scsi_unblock_requests(cfg->host);
2453 }
2454 
2455 static const struct pci_error_handlers cxlflash_err_handler = {
2456 	.error_detected = cxlflash_pci_error_detected,
2457 	.slot_reset = cxlflash_pci_slot_reset,
2458 	.resume = cxlflash_pci_resume,
2459 };
2460 
2461 /*
2462  * PCI device structure
2463  */
2464 static struct pci_driver cxlflash_driver = {
2465 	.name = CXLFLASH_NAME,
2466 	.id_table = cxlflash_pci_table,
2467 	.probe = cxlflash_probe,
2468 	.remove = cxlflash_remove,
2469 	.shutdown = cxlflash_remove,
2470 	.err_handler = &cxlflash_err_handler,
2471 };
2472 
2473 /**
2474  * init_cxlflash() - module entry point
2475  *
2476  * Return: 0 on success, -errno on failure
2477  */
2478 static int __init init_cxlflash(void)
2479 {
2480 	pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME);
2481 
2482 	cxlflash_list_init();
2483 
2484 	return pci_register_driver(&cxlflash_driver);
2485 }
2486 
2487 /**
2488  * exit_cxlflash() - module exit point
2489  */
2490 static void __exit exit_cxlflash(void)
2491 {
2492 	cxlflash_term_global_luns();
2493 	cxlflash_free_errpage();
2494 
2495 	pci_unregister_driver(&cxlflash_driver);
2496 }
2497 
2498 module_init(init_cxlflash);
2499 module_exit(exit_cxlflash);
2500