xref: /openbmc/linux/drivers/scsi/cxlflash/main.c (revision de2bdb3d)
1 /*
2  * CXL Flash Device Driver
3  *
4  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2015 IBM Corporation
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 
20 #include <asm/unaligned.h>
21 
22 #include <misc/cxl.h>
23 
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
27 
28 #include "main.h"
29 #include "sislite.h"
30 #include "common.h"
31 
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
36 
37 /**
38  * cmd_checkout() - checks out an AFU command
39  * @afu:	AFU to checkout from.
40  *
41  * Commands are checked out in a round-robin fashion. Note that since
42  * the command pool is larger than the hardware queue, the majority of
43  * times we will only loop once or twice before getting a command. The
44  * buffer and CDB within the command are initialized (zeroed) prior to
45  * returning.
46  *
47  * Return: The checked out command or NULL when command pool is empty.
48  */
49 static struct afu_cmd *cmd_checkout(struct afu *afu)
50 {
51 	int k, dec = CXLFLASH_NUM_CMDS;
52 	struct afu_cmd *cmd;
53 
54 	while (dec--) {
55 		k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
56 
57 		cmd = &afu->cmd[k];
58 
59 		if (!atomic_dec_if_positive(&cmd->free)) {
60 			pr_devel("%s: returning found index=%d cmd=%p\n",
61 				 __func__, cmd->slot, cmd);
62 			memset(cmd->buf, 0, CMD_BUFSIZE);
63 			memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
64 			return cmd;
65 		}
66 	}
67 
68 	return NULL;
69 }
70 
71 /**
72  * cmd_checkin() - checks in an AFU command
73  * @cmd:	AFU command to checkin.
74  *
75  * Safe to pass commands that have already been checked in. Several
76  * internal tracking fields are reset as part of the checkin. Note
77  * that these are intentionally reset prior to toggling the free bit
78  * to avoid clobbering values in the event that the command is checked
79  * out right away.
80  */
81 static void cmd_checkin(struct afu_cmd *cmd)
82 {
83 	cmd->rcb.scp = NULL;
84 	cmd->rcb.timeout = 0;
85 	cmd->sa.ioasc = 0;
86 	cmd->cmd_tmf = false;
87 	cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
88 
89 	if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
90 		pr_err("%s: Freeing cmd (%d) that is not in use!\n",
91 		       __func__, cmd->slot);
92 		return;
93 	}
94 
95 	pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
96 }
97 
98 /**
99  * process_cmd_err() - command error handler
100  * @cmd:	AFU command that experienced the error.
101  * @scp:	SCSI command associated with the AFU command in error.
102  *
103  * Translates error bits from AFU command to SCSI command results.
104  */
105 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
106 {
107 	struct sisl_ioarcb *ioarcb;
108 	struct sisl_ioasa *ioasa;
109 	u32 resid;
110 
111 	if (unlikely(!cmd))
112 		return;
113 
114 	ioarcb = &(cmd->rcb);
115 	ioasa = &(cmd->sa);
116 
117 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
118 		resid = ioasa->resid;
119 		scsi_set_resid(scp, resid);
120 		pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
121 			 __func__, cmd, scp, resid);
122 	}
123 
124 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
125 		pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
126 			 __func__, cmd, scp);
127 		scp->result = (DID_ERROR << 16);
128 	}
129 
130 	pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
131 		 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
132 		 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
133 		 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
134 		 ioasa->fc_extra);
135 
136 	if (ioasa->rc.scsi_rc) {
137 		/* We have a SCSI status */
138 		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
139 			memcpy(scp->sense_buffer, ioasa->sense_data,
140 			       SISL_SENSE_DATA_LEN);
141 			scp->result = ioasa->rc.scsi_rc;
142 		} else
143 			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
144 	}
145 
146 	/*
147 	 * We encountered an error. Set scp->result based on nature
148 	 * of error.
149 	 */
150 	if (ioasa->rc.fc_rc) {
151 		/* We have an FC status */
152 		switch (ioasa->rc.fc_rc) {
153 		case SISL_FC_RC_LINKDOWN:
154 			scp->result = (DID_REQUEUE << 16);
155 			break;
156 		case SISL_FC_RC_RESID:
157 			/* This indicates an FCP resid underrun */
158 			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
159 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
160 				 * then we will handle this error else where.
161 				 * If not then we must handle it here.
162 				 * This is probably an AFU bug.
163 				 */
164 				scp->result = (DID_ERROR << 16);
165 			}
166 			break;
167 		case SISL_FC_RC_RESIDERR:
168 			/* Resid mismatch between adapter and device */
169 		case SISL_FC_RC_TGTABORT:
170 		case SISL_FC_RC_ABORTOK:
171 		case SISL_FC_RC_ABORTFAIL:
172 		case SISL_FC_RC_NOLOGI:
173 		case SISL_FC_RC_ABORTPEND:
174 		case SISL_FC_RC_WRABORTPEND:
175 		case SISL_FC_RC_NOEXP:
176 		case SISL_FC_RC_INUSE:
177 			scp->result = (DID_ERROR << 16);
178 			break;
179 		}
180 	}
181 
182 	if (ioasa->rc.afu_rc) {
183 		/* We have an AFU error */
184 		switch (ioasa->rc.afu_rc) {
185 		case SISL_AFU_RC_NO_CHANNELS:
186 			scp->result = (DID_NO_CONNECT << 16);
187 			break;
188 		case SISL_AFU_RC_DATA_DMA_ERR:
189 			switch (ioasa->afu_extra) {
190 			case SISL_AFU_DMA_ERR_PAGE_IN:
191 				/* Retry */
192 				scp->result = (DID_IMM_RETRY << 16);
193 				break;
194 			case SISL_AFU_DMA_ERR_INVALID_EA:
195 			default:
196 				scp->result = (DID_ERROR << 16);
197 			}
198 			break;
199 		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
200 			/* Retry */
201 			scp->result = (DID_ALLOC_FAILURE << 16);
202 			break;
203 		default:
204 			scp->result = (DID_ERROR << 16);
205 		}
206 	}
207 }
208 
209 /**
210  * cmd_complete() - command completion handler
211  * @cmd:	AFU command that has completed.
212  *
213  * Prepares and submits command that has either completed or timed out to
214  * the SCSI stack. Checks AFU command back into command pool for non-internal
215  * (rcb.scp populated) commands.
216  */
217 static void cmd_complete(struct afu_cmd *cmd)
218 {
219 	struct scsi_cmnd *scp;
220 	ulong lock_flags;
221 	struct afu *afu = cmd->parent;
222 	struct cxlflash_cfg *cfg = afu->parent;
223 	bool cmd_is_tmf;
224 
225 	spin_lock_irqsave(&cmd->slock, lock_flags);
226 	cmd->sa.host_use_b[0] |= B_DONE;
227 	spin_unlock_irqrestore(&cmd->slock, lock_flags);
228 
229 	if (cmd->rcb.scp) {
230 		scp = cmd->rcb.scp;
231 		if (unlikely(cmd->sa.ioasc))
232 			process_cmd_err(cmd, scp);
233 		else
234 			scp->result = (DID_OK << 16);
235 
236 		cmd_is_tmf = cmd->cmd_tmf;
237 		cmd_checkin(cmd); /* Don't use cmd after here */
238 
239 		pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
240 				     "ioasc=%d\n", __func__, scp, scp->result,
241 				     cmd->sa.ioasc);
242 
243 		scsi_dma_unmap(scp);
244 		scp->scsi_done(scp);
245 
246 		if (cmd_is_tmf) {
247 			spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
248 			cfg->tmf_active = false;
249 			wake_up_all_locked(&cfg->tmf_waitq);
250 			spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
251 		}
252 	} else
253 		complete(&cmd->cevent);
254 }
255 
256 /**
257  * context_reset() - timeout handler for AFU commands
258  * @cmd:	AFU command that timed out.
259  *
260  * Sends a reset to the AFU.
261  */
262 static void context_reset(struct afu_cmd *cmd)
263 {
264 	int nretry = 0;
265 	u64 rrin = 0x1;
266 	u64 room = 0;
267 	struct afu *afu = cmd->parent;
268 	ulong lock_flags;
269 
270 	pr_debug("%s: cmd=%p\n", __func__, cmd);
271 
272 	spin_lock_irqsave(&cmd->slock, lock_flags);
273 
274 	/* Already completed? */
275 	if (cmd->sa.host_use_b[0] & B_DONE) {
276 		spin_unlock_irqrestore(&cmd->slock, lock_flags);
277 		return;
278 	}
279 
280 	cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
281 	spin_unlock_irqrestore(&cmd->slock, lock_flags);
282 
283 	/*
284 	 * We really want to send this reset at all costs, so spread
285 	 * out wait time on successive retries for available room.
286 	 */
287 	do {
288 		room = readq_be(&afu->host_map->cmd_room);
289 		atomic64_set(&afu->room, room);
290 		if (room)
291 			goto write_rrin;
292 		udelay(1 << nretry);
293 	} while (nretry++ < MC_ROOM_RETRY_CNT);
294 
295 	pr_err("%s: no cmd_room to send reset\n", __func__);
296 	return;
297 
298 write_rrin:
299 	nretry = 0;
300 	writeq_be(rrin, &afu->host_map->ioarrin);
301 	do {
302 		rrin = readq_be(&afu->host_map->ioarrin);
303 		if (rrin != 0x1)
304 			break;
305 		/* Double delay each time */
306 		udelay(1 << nretry);
307 	} while (nretry++ < MC_ROOM_RETRY_CNT);
308 }
309 
310 /**
311  * send_cmd() - sends an AFU command
312  * @afu:	AFU associated with the host.
313  * @cmd:	AFU command to send.
314  *
315  * Return:
316  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
317  */
318 static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
319 {
320 	struct cxlflash_cfg *cfg = afu->parent;
321 	struct device *dev = &cfg->dev->dev;
322 	int nretry = 0;
323 	int rc = 0;
324 	u64 room;
325 	long newval;
326 
327 	/*
328 	 * This routine is used by critical users such an AFU sync and to
329 	 * send a task management function (TMF). Thus we want to retry a
330 	 * bit before returning an error. To avoid the performance penalty
331 	 * of MMIO, we spread the update of 'room' over multiple commands.
332 	 */
333 retry:
334 	newval = atomic64_dec_if_positive(&afu->room);
335 	if (!newval) {
336 		do {
337 			room = readq_be(&afu->host_map->cmd_room);
338 			atomic64_set(&afu->room, room);
339 			if (room)
340 				goto write_ioarrin;
341 			udelay(1 << nretry);
342 		} while (nretry++ < MC_ROOM_RETRY_CNT);
343 
344 		dev_err(dev, "%s: no cmd_room to send 0x%X\n",
345 		       __func__, cmd->rcb.cdb[0]);
346 
347 		goto no_room;
348 	} else if (unlikely(newval < 0)) {
349 		/* This should be rare. i.e. Only if two threads race and
350 		 * decrement before the MMIO read is done. In this case
351 		 * just benefit from the other thread having updated
352 		 * afu->room.
353 		 */
354 		if (nretry++ < MC_ROOM_RETRY_CNT) {
355 			udelay(1 << nretry);
356 			goto retry;
357 		}
358 
359 		goto no_room;
360 	}
361 
362 write_ioarrin:
363 	writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
364 out:
365 	pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
366 		 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
367 	return rc;
368 
369 no_room:
370 	afu->read_room = true;
371 	kref_get(&cfg->afu->mapcount);
372 	schedule_work(&cfg->work_q);
373 	rc = SCSI_MLQUEUE_HOST_BUSY;
374 	goto out;
375 }
376 
377 /**
378  * wait_resp() - polls for a response or timeout to a sent AFU command
379  * @afu:	AFU associated with the host.
380  * @cmd:	AFU command that was sent.
381  */
382 static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
383 {
384 	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
385 
386 	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
387 	if (!timeout)
388 		context_reset(cmd);
389 
390 	if (unlikely(cmd->sa.ioasc != 0))
391 		pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
392 		       "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
393 		       cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
394 		       cmd->sa.rc.fc_rc);
395 }
396 
397 /**
398  * send_tmf() - sends a Task Management Function (TMF)
399  * @afu:	AFU to checkout from.
400  * @scp:	SCSI command from stack.
401  * @tmfcmd:	TMF command to send.
402  *
403  * Return:
404  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
405  */
406 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
407 {
408 	struct afu_cmd *cmd;
409 
410 	u32 port_sel = scp->device->channel + 1;
411 	short lflag = 0;
412 	struct Scsi_Host *host = scp->device->host;
413 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
414 	struct device *dev = &cfg->dev->dev;
415 	ulong lock_flags;
416 	int rc = 0;
417 	ulong to;
418 
419 	cmd = cmd_checkout(afu);
420 	if (unlikely(!cmd)) {
421 		dev_err(dev, "%s: could not get a free command\n", __func__);
422 		rc = SCSI_MLQUEUE_HOST_BUSY;
423 		goto out;
424 	}
425 
426 	/* When Task Management Function is active do not send another */
427 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
428 	if (cfg->tmf_active)
429 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
430 						  !cfg->tmf_active,
431 						  cfg->tmf_slock);
432 	cfg->tmf_active = true;
433 	cmd->cmd_tmf = true;
434 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
435 
436 	cmd->rcb.ctx_id = afu->ctx_hndl;
437 	cmd->rcb.port_sel = port_sel;
438 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
439 
440 	lflag = SISL_REQ_FLAGS_TMF_CMD;
441 
442 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
443 			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
444 
445 	/* Stash the scp in the reserved field, for reuse during interrupt */
446 	cmd->rcb.scp = scp;
447 
448 	/* Copy the CDB from the cmd passed in */
449 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
450 
451 	/* Send the command */
452 	rc = send_cmd(afu, cmd);
453 	if (unlikely(rc)) {
454 		cmd_checkin(cmd);
455 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
456 		cfg->tmf_active = false;
457 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
458 		goto out;
459 	}
460 
461 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
462 	to = msecs_to_jiffies(5000);
463 	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
464 						       !cfg->tmf_active,
465 						       cfg->tmf_slock,
466 						       to);
467 	if (!to) {
468 		cfg->tmf_active = false;
469 		dev_err(dev, "%s: TMF timed out!\n", __func__);
470 		rc = -1;
471 	}
472 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
473 out:
474 	return rc;
475 }
476 
477 static void afu_unmap(struct kref *ref)
478 {
479 	struct afu *afu = container_of(ref, struct afu, mapcount);
480 
481 	if (likely(afu->afu_map)) {
482 		cxl_psa_unmap((void __iomem *)afu->afu_map);
483 		afu->afu_map = NULL;
484 	}
485 }
486 
487 /**
488  * cxlflash_driver_info() - information handler for this host driver
489  * @host:	SCSI host associated with device.
490  *
491  * Return: A string describing the device.
492  */
493 static const char *cxlflash_driver_info(struct Scsi_Host *host)
494 {
495 	return CXLFLASH_ADAPTER_NAME;
496 }
497 
498 /**
499  * cxlflash_queuecommand() - sends a mid-layer request
500  * @host:	SCSI host associated with device.
501  * @scp:	SCSI command to send.
502  *
503  * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
504  */
505 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
506 {
507 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
508 	struct afu *afu = cfg->afu;
509 	struct device *dev = &cfg->dev->dev;
510 	struct afu_cmd *cmd;
511 	u32 port_sel = scp->device->channel + 1;
512 	int nseg, i, ncount;
513 	struct scatterlist *sg;
514 	ulong lock_flags;
515 	short lflag = 0;
516 	int rc = 0;
517 	int kref_got = 0;
518 
519 	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
520 			    "cdb=(%08X-%08X-%08X-%08X)\n",
521 			    __func__, scp, host->host_no, scp->device->channel,
522 			    scp->device->id, scp->device->lun,
523 			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
524 			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
525 			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
526 			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
527 
528 	/*
529 	 * If a Task Management Function is active, wait for it to complete
530 	 * before continuing with regular commands.
531 	 */
532 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
533 	if (cfg->tmf_active) {
534 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
535 		rc = SCSI_MLQUEUE_HOST_BUSY;
536 		goto out;
537 	}
538 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
539 
540 	switch (cfg->state) {
541 	case STATE_RESET:
542 		dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
543 		rc = SCSI_MLQUEUE_HOST_BUSY;
544 		goto out;
545 	case STATE_FAILTERM:
546 		dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
547 		scp->result = (DID_NO_CONNECT << 16);
548 		scp->scsi_done(scp);
549 		rc = 0;
550 		goto out;
551 	default:
552 		break;
553 	}
554 
555 	cmd = cmd_checkout(afu);
556 	if (unlikely(!cmd)) {
557 		dev_err(dev, "%s: could not get a free command\n", __func__);
558 		rc = SCSI_MLQUEUE_HOST_BUSY;
559 		goto out;
560 	}
561 
562 	kref_get(&cfg->afu->mapcount);
563 	kref_got = 1;
564 
565 	cmd->rcb.ctx_id = afu->ctx_hndl;
566 	cmd->rcb.port_sel = port_sel;
567 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
568 
569 	if (scp->sc_data_direction == DMA_TO_DEVICE)
570 		lflag = SISL_REQ_FLAGS_HOST_WRITE;
571 	else
572 		lflag = SISL_REQ_FLAGS_HOST_READ;
573 
574 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
575 			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
576 
577 	/* Stash the scp in the reserved field, for reuse during interrupt */
578 	cmd->rcb.scp = scp;
579 
580 	nseg = scsi_dma_map(scp);
581 	if (unlikely(nseg < 0)) {
582 		dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
583 			__func__, nseg);
584 		rc = SCSI_MLQUEUE_HOST_BUSY;
585 		goto out;
586 	}
587 
588 	ncount = scsi_sg_count(scp);
589 	scsi_for_each_sg(scp, sg, ncount, i) {
590 		cmd->rcb.data_len = sg_dma_len(sg);
591 		cmd->rcb.data_ea = sg_dma_address(sg);
592 	}
593 
594 	/* Copy the CDB from the scsi_cmnd passed in */
595 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
596 
597 	/* Send the command */
598 	rc = send_cmd(afu, cmd);
599 	if (unlikely(rc)) {
600 		cmd_checkin(cmd);
601 		scsi_dma_unmap(scp);
602 	}
603 
604 out:
605 	if (kref_got)
606 		kref_put(&afu->mapcount, afu_unmap);
607 	pr_devel("%s: returning rc=%d\n", __func__, rc);
608 	return rc;
609 }
610 
611 /**
612  * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
613  * @cfg:	Internal structure associated with the host.
614  */
615 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
616 {
617 	struct pci_dev *pdev = cfg->dev;
618 
619 	if (pci_channel_offline(pdev))
620 		wait_event_timeout(cfg->reset_waitq,
621 				   !pci_channel_offline(pdev),
622 				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
623 }
624 
625 /**
626  * free_mem() - free memory associated with the AFU
627  * @cfg:	Internal structure associated with the host.
628  */
629 static void free_mem(struct cxlflash_cfg *cfg)
630 {
631 	int i;
632 	char *buf = NULL;
633 	struct afu *afu = cfg->afu;
634 
635 	if (cfg->afu) {
636 		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
637 			buf = afu->cmd[i].buf;
638 			if (!((u64)buf & (PAGE_SIZE - 1)))
639 				free_page((ulong)buf);
640 		}
641 
642 		free_pages((ulong)afu, get_order(sizeof(struct afu)));
643 		cfg->afu = NULL;
644 	}
645 }
646 
647 /**
648  * stop_afu() - stops the AFU command timers and unmaps the MMIO space
649  * @cfg:	Internal structure associated with the host.
650  *
651  * Safe to call with AFU in a partially allocated/initialized state.
652  *
653  * Cleans up all state associated with the command queue, and unmaps
654  * the MMIO space.
655  *
656  *  - complete() will take care of commands we initiated (they'll be checked
657  *  in as part of the cleanup that occurs after the completion)
658  *
659  *  - cmd_checkin() will take care of entries that we did not initiate and that
660  *  have not (and will not) complete because they are sitting on a [now stale]
661  *  hardware queue
662  */
663 static void stop_afu(struct cxlflash_cfg *cfg)
664 {
665 	int i;
666 	struct afu *afu = cfg->afu;
667 	struct afu_cmd *cmd;
668 
669 	if (likely(afu)) {
670 		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
671 			cmd = &afu->cmd[i];
672 			complete(&cmd->cevent);
673 			if (!atomic_read(&cmd->free))
674 				cmd_checkin(cmd);
675 		}
676 
677 		if (likely(afu->afu_map)) {
678 			cxl_psa_unmap((void __iomem *)afu->afu_map);
679 			afu->afu_map = NULL;
680 		}
681 		kref_put(&afu->mapcount, afu_unmap);
682 	}
683 }
684 
685 /**
686  * term_intr() - disables all AFU interrupts
687  * @cfg:	Internal structure associated with the host.
688  * @level:	Depth of allocation, where to begin waterfall tear down.
689  *
690  * Safe to call with AFU/MC in partially allocated/initialized state.
691  */
692 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
693 {
694 	struct afu *afu = cfg->afu;
695 	struct device *dev = &cfg->dev->dev;
696 
697 	if (!afu || !cfg->mcctx) {
698 		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
699 		return;
700 	}
701 
702 	switch (level) {
703 	case UNMAP_THREE:
704 		cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
705 	case UNMAP_TWO:
706 		cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
707 	case UNMAP_ONE:
708 		cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
709 	case FREE_IRQ:
710 		cxl_free_afu_irqs(cfg->mcctx);
711 		/* fall through */
712 	case UNDO_NOOP:
713 		/* No action required */
714 		break;
715 	}
716 }
717 
718 /**
719  * term_mc() - terminates the master context
720  * @cfg:	Internal structure associated with the host.
721  * @level:	Depth of allocation, where to begin waterfall tear down.
722  *
723  * Safe to call with AFU/MC in partially allocated/initialized state.
724  */
725 static void term_mc(struct cxlflash_cfg *cfg)
726 {
727 	int rc = 0;
728 	struct afu *afu = cfg->afu;
729 	struct device *dev = &cfg->dev->dev;
730 
731 	if (!afu || !cfg->mcctx) {
732 		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
733 		return;
734 	}
735 
736 	rc = cxl_stop_context(cfg->mcctx);
737 	WARN_ON(rc);
738 	cfg->mcctx = NULL;
739 }
740 
741 /**
742  * term_afu() - terminates the AFU
743  * @cfg:	Internal structure associated with the host.
744  *
745  * Safe to call with AFU/MC in partially allocated/initialized state.
746  */
747 static void term_afu(struct cxlflash_cfg *cfg)
748 {
749 	/*
750 	 * Tear down is carefully orchestrated to ensure
751 	 * no interrupts can come in when the problem state
752 	 * area is unmapped.
753 	 *
754 	 * 1) Disable all AFU interrupts
755 	 * 2) Unmap the problem state area
756 	 * 3) Stop the master context
757 	 */
758 	term_intr(cfg, UNMAP_THREE);
759 	if (cfg->afu)
760 		stop_afu(cfg);
761 
762 	term_mc(cfg);
763 
764 	pr_debug("%s: returning\n", __func__);
765 }
766 
767 /**
768  * notify_shutdown() - notifies device of pending shutdown
769  * @cfg:	Internal structure associated with the host.
770  * @wait:	Whether to wait for shutdown processing to complete.
771  *
772  * This function will notify the AFU that the adapter is being shutdown
773  * and will wait for shutdown processing to complete if wait is true.
774  * This notification should flush pending I/Os to the device and halt
775  * further I/Os until the next AFU reset is issued and device restarted.
776  */
777 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
778 {
779 	struct afu *afu = cfg->afu;
780 	struct device *dev = &cfg->dev->dev;
781 	struct sisl_global_map __iomem *global;
782 	struct dev_dependent_vals *ddv;
783 	u64 reg, status;
784 	int i, retry_cnt = 0;
785 
786 	ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
787 	if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
788 		return;
789 
790 	if (!afu || !afu->afu_map) {
791 		dev_dbg(dev, "%s: The problem state area is not mapped\n",
792 			__func__);
793 		return;
794 	}
795 
796 	global = &afu->afu_map->global;
797 
798 	/* Notify AFU */
799 	for (i = 0; i < NUM_FC_PORTS; i++) {
800 		reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
801 		reg |= SISL_FC_SHUTDOWN_NORMAL;
802 		writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
803 	}
804 
805 	if (!wait)
806 		return;
807 
808 	/* Wait up to 1.5 seconds for shutdown processing to complete */
809 	for (i = 0; i < NUM_FC_PORTS; i++) {
810 		retry_cnt = 0;
811 		while (true) {
812 			status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
813 			if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
814 				break;
815 			if (++retry_cnt >= MC_RETRY_CNT) {
816 				dev_dbg(dev, "%s: port %d shutdown processing "
817 					"not yet completed\n", __func__, i);
818 				break;
819 			}
820 			msleep(100 * retry_cnt);
821 		}
822 	}
823 }
824 
825 /**
826  * cxlflash_remove() - PCI entry point to tear down host
827  * @pdev:	PCI device associated with the host.
828  *
829  * Safe to use as a cleanup in partially allocated/initialized state.
830  */
831 static void cxlflash_remove(struct pci_dev *pdev)
832 {
833 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
834 	ulong lock_flags;
835 
836 	if (!pci_is_enabled(pdev)) {
837 		pr_debug("%s: Device is disabled\n", __func__);
838 		return;
839 	}
840 
841 	/* If a Task Management Function is active, wait for it to complete
842 	 * before continuing with remove.
843 	 */
844 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
845 	if (cfg->tmf_active)
846 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
847 						  !cfg->tmf_active,
848 						  cfg->tmf_slock);
849 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
850 
851 	/* Notify AFU and wait for shutdown processing to complete */
852 	notify_shutdown(cfg, true);
853 
854 	cfg->state = STATE_FAILTERM;
855 	cxlflash_stop_term_user_contexts(cfg);
856 
857 	switch (cfg->init_state) {
858 	case INIT_STATE_SCSI:
859 		cxlflash_term_local_luns(cfg);
860 		scsi_remove_host(cfg->host);
861 		/* fall through */
862 	case INIT_STATE_AFU:
863 		cancel_work_sync(&cfg->work_q);
864 		term_afu(cfg);
865 	case INIT_STATE_PCI:
866 		pci_disable_device(pdev);
867 	case INIT_STATE_NONE:
868 		free_mem(cfg);
869 		scsi_host_put(cfg->host);
870 		break;
871 	}
872 
873 	pr_debug("%s: returning\n", __func__);
874 }
875 
876 /**
877  * alloc_mem() - allocates the AFU and its command pool
878  * @cfg:	Internal structure associated with the host.
879  *
880  * A partially allocated state remains on failure.
881  *
882  * Return:
883  *	0 on success
884  *	-ENOMEM on failure to allocate memory
885  */
886 static int alloc_mem(struct cxlflash_cfg *cfg)
887 {
888 	int rc = 0;
889 	int i;
890 	char *buf = NULL;
891 	struct device *dev = &cfg->dev->dev;
892 
893 	/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
894 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
895 					    get_order(sizeof(struct afu)));
896 	if (unlikely(!cfg->afu)) {
897 		dev_err(dev, "%s: cannot get %d free pages\n",
898 			__func__, get_order(sizeof(struct afu)));
899 		rc = -ENOMEM;
900 		goto out;
901 	}
902 	cfg->afu->parent = cfg;
903 	cfg->afu->afu_map = NULL;
904 
905 	for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
906 		if (!((u64)buf & (PAGE_SIZE - 1))) {
907 			buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
908 			if (unlikely(!buf)) {
909 				dev_err(dev,
910 					"%s: Allocate command buffers fail!\n",
911 				       __func__);
912 				rc = -ENOMEM;
913 				free_mem(cfg);
914 				goto out;
915 			}
916 		}
917 
918 		cfg->afu->cmd[i].buf = buf;
919 		atomic_set(&cfg->afu->cmd[i].free, 1);
920 		cfg->afu->cmd[i].slot = i;
921 	}
922 
923 out:
924 	return rc;
925 }
926 
927 /**
928  * init_pci() - initializes the host as a PCI device
929  * @cfg:	Internal structure associated with the host.
930  *
931  * Return: 0 on success, -errno on failure
932  */
933 static int init_pci(struct cxlflash_cfg *cfg)
934 {
935 	struct pci_dev *pdev = cfg->dev;
936 	int rc = 0;
937 
938 	rc = pci_enable_device(pdev);
939 	if (rc || pci_channel_offline(pdev)) {
940 		if (pci_channel_offline(pdev)) {
941 			cxlflash_wait_for_pci_err_recovery(cfg);
942 			rc = pci_enable_device(pdev);
943 		}
944 
945 		if (rc) {
946 			dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
947 				__func__);
948 			cxlflash_wait_for_pci_err_recovery(cfg);
949 			goto out;
950 		}
951 	}
952 
953 out:
954 	pr_debug("%s: returning rc=%d\n", __func__, rc);
955 	return rc;
956 }
957 
958 /**
959  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
960  * @cfg:	Internal structure associated with the host.
961  *
962  * Return: 0 on success, -errno on failure
963  */
964 static int init_scsi(struct cxlflash_cfg *cfg)
965 {
966 	struct pci_dev *pdev = cfg->dev;
967 	int rc = 0;
968 
969 	rc = scsi_add_host(cfg->host, &pdev->dev);
970 	if (rc) {
971 		dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
972 			__func__, rc);
973 		goto out;
974 	}
975 
976 	scsi_scan_host(cfg->host);
977 
978 out:
979 	pr_debug("%s: returning rc=%d\n", __func__, rc);
980 	return rc;
981 }
982 
983 /**
984  * set_port_online() - transitions the specified host FC port to online state
985  * @fc_regs:	Top of MMIO region defined for specified port.
986  *
987  * The provided MMIO region must be mapped prior to call. Online state means
988  * that the FC link layer has synced, completed the handshaking process, and
989  * is ready for login to start.
990  */
991 static void set_port_online(__be64 __iomem *fc_regs)
992 {
993 	u64 cmdcfg;
994 
995 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
996 	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
997 	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
998 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
999 }
1000 
1001 /**
1002  * set_port_offline() - transitions the specified host FC port to offline state
1003  * @fc_regs:	Top of MMIO region defined for specified port.
1004  *
1005  * The provided MMIO region must be mapped prior to call.
1006  */
1007 static void set_port_offline(__be64 __iomem *fc_regs)
1008 {
1009 	u64 cmdcfg;
1010 
1011 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1012 	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
1013 	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
1014 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1015 }
1016 
1017 /**
1018  * wait_port_online() - waits for the specified host FC port come online
1019  * @fc_regs:	Top of MMIO region defined for specified port.
1020  * @delay_us:	Number of microseconds to delay between reading port status.
1021  * @nretry:	Number of cycles to retry reading port status.
1022  *
1023  * The provided MMIO region must be mapped prior to call. This will timeout
1024  * when the cable is not plugged in.
1025  *
1026  * Return:
1027  *	TRUE (1) when the specified port is online
1028  *	FALSE (0) when the specified port fails to come online after timeout
1029  *	-EINVAL when @delay_us is less than 1000
1030  */
1031 static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1032 {
1033 	u64 status;
1034 
1035 	if (delay_us < 1000) {
1036 		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
1037 		return -EINVAL;
1038 	}
1039 
1040 	do {
1041 		msleep(delay_us / 1000);
1042 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1043 		if (status == U64_MAX)
1044 			nretry /= 2;
1045 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1046 		 nretry--);
1047 
1048 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1049 }
1050 
1051 /**
1052  * wait_port_offline() - waits for the specified host FC port go offline
1053  * @fc_regs:	Top of MMIO region defined for specified port.
1054  * @delay_us:	Number of microseconds to delay between reading port status.
1055  * @nretry:	Number of cycles to retry reading port status.
1056  *
1057  * The provided MMIO region must be mapped prior to call.
1058  *
1059  * Return:
1060  *	TRUE (1) when the specified port is offline
1061  *	FALSE (0) when the specified port fails to go offline after timeout
1062  *	-EINVAL when @delay_us is less than 1000
1063  */
1064 static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1065 {
1066 	u64 status;
1067 
1068 	if (delay_us < 1000) {
1069 		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
1070 		return -EINVAL;
1071 	}
1072 
1073 	do {
1074 		msleep(delay_us / 1000);
1075 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1076 		if (status == U64_MAX)
1077 			nretry /= 2;
1078 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1079 		 nretry--);
1080 
1081 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1082 }
1083 
1084 /**
1085  * afu_set_wwpn() - configures the WWPN for the specified host FC port
1086  * @afu:	AFU associated with the host that owns the specified FC port.
1087  * @port:	Port number being configured.
1088  * @fc_regs:	Top of MMIO region defined for specified port.
1089  * @wwpn:	The world-wide-port-number previously discovered for port.
1090  *
1091  * The provided MMIO region must be mapped prior to call. As part of the
1092  * sequence to configure the WWPN, the port is toggled offline and then back
1093  * online. This toggling action can cause this routine to delay up to a few
1094  * seconds. When configured to use the internal LUN feature of the AFU, a
1095  * failure to come online is overridden.
1096  */
1097 static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1098 			 u64 wwpn)
1099 {
1100 	set_port_offline(fc_regs);
1101 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1102 			       FC_PORT_STATUS_RETRY_CNT)) {
1103 		pr_debug("%s: wait on port %d to go offline timed out\n",
1104 			 __func__, port);
1105 	}
1106 
1107 	writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1108 
1109 	set_port_online(fc_regs);
1110 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1111 			      FC_PORT_STATUS_RETRY_CNT)) {
1112 		pr_debug("%s: wait on port %d to go online timed out\n",
1113 			 __func__, port);
1114 	}
1115 }
1116 
1117 /**
1118  * afu_link_reset() - resets the specified host FC port
1119  * @afu:	AFU associated with the host that owns the specified FC port.
1120  * @port:	Port number being configured.
1121  * @fc_regs:	Top of MMIO region defined for specified port.
1122  *
1123  * The provided MMIO region must be mapped prior to call. The sequence to
1124  * reset the port involves toggling it offline and then back online. This
1125  * action can cause this routine to delay up to a few seconds. An effort
1126  * is made to maintain link with the device by switching to host to use
1127  * the alternate port exclusively while the reset takes place.
1128  * failure to come online is overridden.
1129  */
1130 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1131 {
1132 	u64 port_sel;
1133 
1134 	/* first switch the AFU to the other links, if any */
1135 	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1136 	port_sel &= ~(1ULL << port);
1137 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1138 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1139 
1140 	set_port_offline(fc_regs);
1141 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1142 			       FC_PORT_STATUS_RETRY_CNT))
1143 		pr_err("%s: wait on port %d to go offline timed out\n",
1144 		       __func__, port);
1145 
1146 	set_port_online(fc_regs);
1147 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1148 			      FC_PORT_STATUS_RETRY_CNT))
1149 		pr_err("%s: wait on port %d to go online timed out\n",
1150 		       __func__, port);
1151 
1152 	/* switch back to include this port */
1153 	port_sel |= (1ULL << port);
1154 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1155 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1156 
1157 	pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1158 }
1159 
1160 /*
1161  * Asynchronous interrupt information table
1162  */
1163 static const struct asyc_intr_info ainfo[] = {
1164 	{SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1165 	{SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1166 	{SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1167 	{SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
1168 	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1169 	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
1170 	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1171 	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
1172 	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1173 	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1174 	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1175 	{SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
1176 	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1177 	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
1178 	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1179 	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
1180 	{0x0, "", 0, 0}		/* terminator */
1181 };
1182 
1183 /**
1184  * find_ainfo() - locates and returns asynchronous interrupt information
1185  * @status:	Status code set by AFU on error.
1186  *
1187  * Return: The located information or NULL when the status code is invalid.
1188  */
1189 static const struct asyc_intr_info *find_ainfo(u64 status)
1190 {
1191 	const struct asyc_intr_info *info;
1192 
1193 	for (info = &ainfo[0]; info->status; info++)
1194 		if (info->status == status)
1195 			return info;
1196 
1197 	return NULL;
1198 }
1199 
1200 /**
1201  * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1202  * @afu:	AFU associated with the host.
1203  */
1204 static void afu_err_intr_init(struct afu *afu)
1205 {
1206 	int i;
1207 	u64 reg;
1208 
1209 	/* global async interrupts: AFU clears afu_ctrl on context exit
1210 	 * if async interrupts were sent to that context. This prevents
1211 	 * the AFU form sending further async interrupts when
1212 	 * there is
1213 	 * nobody to receive them.
1214 	 */
1215 
1216 	/* mask all */
1217 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1218 	/* set LISN# to send and point to master context */
1219 	reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1220 
1221 	if (afu->internal_lun)
1222 		reg |= 1;	/* Bit 63 indicates local lun */
1223 	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1224 	/* clear all */
1225 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1226 	/* unmask bits that are of interest */
1227 	/* note: afu can send an interrupt after this step */
1228 	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1229 	/* clear again in case a bit came on after previous clear but before */
1230 	/* unmask */
1231 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1232 
1233 	/* Clear/Set internal lun bits */
1234 	reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1235 	reg &= SISL_FC_INTERNAL_MASK;
1236 	if (afu->internal_lun)
1237 		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1238 	writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1239 
1240 	/* now clear FC errors */
1241 	for (i = 0; i < NUM_FC_PORTS; i++) {
1242 		writeq_be(0xFFFFFFFFU,
1243 			  &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1244 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1245 	}
1246 
1247 	/* sync interrupts for master's IOARRIN write */
1248 	/* note that unlike asyncs, there can be no pending sync interrupts */
1249 	/* at this time (this is a fresh context and master has not written */
1250 	/* IOARRIN yet), so there is nothing to clear. */
1251 
1252 	/* set LISN#, it is always sent to the context that wrote IOARRIN */
1253 	writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1254 	writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1255 }
1256 
1257 /**
1258  * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1259  * @irq:	Interrupt number.
1260  * @data:	Private data provided at interrupt registration, the AFU.
1261  *
1262  * Return: Always return IRQ_HANDLED.
1263  */
1264 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1265 {
1266 	struct afu *afu = (struct afu *)data;
1267 	u64 reg;
1268 	u64 reg_unmasked;
1269 
1270 	reg = readq_be(&afu->host_map->intr_status);
1271 	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1272 
1273 	if (reg_unmasked == 0UL) {
1274 		pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1275 		       __func__, (u64)afu, reg);
1276 		goto cxlflash_sync_err_irq_exit;
1277 	}
1278 
1279 	pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1280 	       __func__, (u64)afu, reg);
1281 
1282 	writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1283 
1284 cxlflash_sync_err_irq_exit:
1285 	pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1286 	return IRQ_HANDLED;
1287 }
1288 
1289 /**
1290  * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1291  * @irq:	Interrupt number.
1292  * @data:	Private data provided at interrupt registration, the AFU.
1293  *
1294  * Return: Always return IRQ_HANDLED.
1295  */
1296 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1297 {
1298 	struct afu *afu = (struct afu *)data;
1299 	struct afu_cmd *cmd;
1300 	bool toggle = afu->toggle;
1301 	u64 entry,
1302 	    *hrrq_start = afu->hrrq_start,
1303 	    *hrrq_end = afu->hrrq_end,
1304 	    *hrrq_curr = afu->hrrq_curr;
1305 
1306 	/* Process however many RRQ entries that are ready */
1307 	while (true) {
1308 		entry = *hrrq_curr;
1309 
1310 		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1311 			break;
1312 
1313 		cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1314 		cmd_complete(cmd);
1315 
1316 		/* Advance to next entry or wrap and flip the toggle bit */
1317 		if (hrrq_curr < hrrq_end)
1318 			hrrq_curr++;
1319 		else {
1320 			hrrq_curr = hrrq_start;
1321 			toggle ^= SISL_RESP_HANDLE_T_BIT;
1322 		}
1323 	}
1324 
1325 	afu->hrrq_curr = hrrq_curr;
1326 	afu->toggle = toggle;
1327 
1328 	return IRQ_HANDLED;
1329 }
1330 
1331 /**
1332  * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1333  * @irq:	Interrupt number.
1334  * @data:	Private data provided at interrupt registration, the AFU.
1335  *
1336  * Return: Always return IRQ_HANDLED.
1337  */
1338 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1339 {
1340 	struct afu *afu = (struct afu *)data;
1341 	struct cxlflash_cfg *cfg = afu->parent;
1342 	struct device *dev = &cfg->dev->dev;
1343 	u64 reg_unmasked;
1344 	const struct asyc_intr_info *info;
1345 	struct sisl_global_map __iomem *global = &afu->afu_map->global;
1346 	u64 reg;
1347 	u8 port;
1348 	int i;
1349 
1350 	reg = readq_be(&global->regs.aintr_status);
1351 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1352 
1353 	if (reg_unmasked == 0) {
1354 		dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
1355 			__func__, reg);
1356 		goto out;
1357 	}
1358 
1359 	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1360 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
1361 
1362 	/* Check each bit that is on */
1363 	for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1364 		info = find_ainfo(1ULL << i);
1365 		if (((reg_unmasked & 0x1) == 0) || !info)
1366 			continue;
1367 
1368 		port = info->port;
1369 
1370 		dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1371 			__func__, port, info->desc,
1372 		       readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1373 
1374 		/*
1375 		 * Do link reset first, some OTHER errors will set FC_ERROR
1376 		 * again if cleared before or w/o a reset
1377 		 */
1378 		if (info->action & LINK_RESET) {
1379 			dev_err(dev, "%s: FC Port %d: resetting link\n",
1380 				__func__, port);
1381 			cfg->lr_state = LINK_RESET_REQUIRED;
1382 			cfg->lr_port = port;
1383 			kref_get(&cfg->afu->mapcount);
1384 			schedule_work(&cfg->work_q);
1385 		}
1386 
1387 		if (info->action & CLR_FC_ERROR) {
1388 			reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1389 
1390 			/*
1391 			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1392 			 * should be the same and tracing one is sufficient.
1393 			 */
1394 
1395 			dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
1396 				__func__, port, reg);
1397 
1398 			writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1399 			writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1400 		}
1401 
1402 		if (info->action & SCAN_HOST) {
1403 			atomic_inc(&cfg->scan_host_needed);
1404 			kref_get(&cfg->afu->mapcount);
1405 			schedule_work(&cfg->work_q);
1406 		}
1407 	}
1408 
1409 out:
1410 	dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
1411 	return IRQ_HANDLED;
1412 }
1413 
1414 /**
1415  * start_context() - starts the master context
1416  * @cfg:	Internal structure associated with the host.
1417  *
1418  * Return: A success or failure value from CXL services.
1419  */
1420 static int start_context(struct cxlflash_cfg *cfg)
1421 {
1422 	int rc = 0;
1423 
1424 	rc = cxl_start_context(cfg->mcctx,
1425 			       cfg->afu->work.work_element_descriptor,
1426 			       NULL);
1427 
1428 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1429 	return rc;
1430 }
1431 
1432 /**
1433  * read_vpd() - obtains the WWPNs from VPD
1434  * @cfg:	Internal structure associated with the host.
1435  * @wwpn:	Array of size NUM_FC_PORTS to pass back WWPNs
1436  *
1437  * Return: 0 on success, -errno on failure
1438  */
1439 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1440 {
1441 	struct pci_dev *dev = cfg->dev;
1442 	int rc = 0;
1443 	int ro_start, ro_size, i, j, k;
1444 	ssize_t vpd_size;
1445 	char vpd_data[CXLFLASH_VPD_LEN];
1446 	char tmp_buf[WWPN_BUF_LEN] = { 0 };
1447 	char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1448 
1449 	/* Get the VPD data from the device */
1450 	vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
1451 	if (unlikely(vpd_size <= 0)) {
1452 		dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
1453 		       __func__, vpd_size);
1454 		rc = -ENODEV;
1455 		goto out;
1456 	}
1457 
1458 	/* Get the read only section offset */
1459 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1460 				    PCI_VPD_LRDT_RO_DATA);
1461 	if (unlikely(ro_start < 0)) {
1462 		dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
1463 			__func__);
1464 		rc = -ENODEV;
1465 		goto out;
1466 	}
1467 
1468 	/* Get the read only section size, cap when extends beyond read VPD */
1469 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1470 	j = ro_size;
1471 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1472 	if (unlikely((i + j) > vpd_size)) {
1473 		pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1474 			 __func__, (i + j), vpd_size);
1475 		ro_size = vpd_size - i;
1476 	}
1477 
1478 	/*
1479 	 * Find the offset of the WWPN tag within the read only
1480 	 * VPD data and validate the found field (partials are
1481 	 * no good to us). Convert the ASCII data to an integer
1482 	 * value. Note that we must copy to a temporary buffer
1483 	 * because the conversion service requires that the ASCII
1484 	 * string be terminated.
1485 	 */
1486 	for (k = 0; k < NUM_FC_PORTS; k++) {
1487 		j = ro_size;
1488 		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1489 
1490 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1491 		if (unlikely(i < 0)) {
1492 			dev_err(&dev->dev, "%s: Port %d WWPN not found "
1493 				"in VPD\n", __func__, k);
1494 			rc = -ENODEV;
1495 			goto out;
1496 		}
1497 
1498 		j = pci_vpd_info_field_size(&vpd_data[i]);
1499 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
1500 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1501 			dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
1502 				"VPD corrupt\n",
1503 			       __func__, k);
1504 			rc = -ENODEV;
1505 			goto out;
1506 		}
1507 
1508 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1509 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1510 		if (unlikely(rc)) {
1511 			dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
1512 				"to integer\n", __func__, k);
1513 			rc = -ENODEV;
1514 			goto out;
1515 		}
1516 	}
1517 
1518 out:
1519 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1520 	return rc;
1521 }
1522 
1523 /**
1524  * init_pcr() - initialize the provisioning and control registers
1525  * @cfg:	Internal structure associated with the host.
1526  *
1527  * Also sets up fast access to the mapped registers and initializes AFU
1528  * command fields that never change.
1529  */
1530 static void init_pcr(struct cxlflash_cfg *cfg)
1531 {
1532 	struct afu *afu = cfg->afu;
1533 	struct sisl_ctrl_map __iomem *ctrl_map;
1534 	int i;
1535 
1536 	for (i = 0; i < MAX_CONTEXT; i++) {
1537 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1538 		/* Disrupt any clients that could be running */
1539 		/* e.g. clients that survived a master restart */
1540 		writeq_be(0, &ctrl_map->rht_start);
1541 		writeq_be(0, &ctrl_map->rht_cnt_id);
1542 		writeq_be(0, &ctrl_map->ctx_cap);
1543 	}
1544 
1545 	/* Copy frequently used fields into afu */
1546 	afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1547 	afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1548 	afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1549 
1550 	/* Program the Endian Control for the master context */
1551 	writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1552 
1553 	/* Initialize cmd fields that never change */
1554 	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1555 		afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1556 		afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1557 		afu->cmd[i].rcb.rrq = 0x0;
1558 	}
1559 }
1560 
1561 /**
1562  * init_global() - initialize AFU global registers
1563  * @cfg:	Internal structure associated with the host.
1564  */
1565 static int init_global(struct cxlflash_cfg *cfg)
1566 {
1567 	struct afu *afu = cfg->afu;
1568 	struct device *dev = &cfg->dev->dev;
1569 	u64 wwpn[NUM_FC_PORTS];	/* wwpn of AFU ports */
1570 	int i = 0, num_ports = 0;
1571 	int rc = 0;
1572 	u64 reg;
1573 
1574 	rc = read_vpd(cfg, &wwpn[0]);
1575 	if (rc) {
1576 		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1577 		goto out;
1578 	}
1579 
1580 	pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1581 
1582 	/* Set up RRQ in AFU for master issued cmds */
1583 	writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1584 	writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1585 
1586 	/* AFU configuration */
1587 	reg = readq_be(&afu->afu_map->global.regs.afu_config);
1588 	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1589 	/* enable all auto retry options and control endianness */
1590 	/* leave others at default: */
1591 	/* CTX_CAP write protected, mbox_r does not clear on read and */
1592 	/* checker on if dual afu */
1593 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1594 
1595 	/* Global port select: select either port */
1596 	if (afu->internal_lun) {
1597 		/* Only use port 0 */
1598 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1599 		num_ports = NUM_FC_PORTS - 1;
1600 	} else {
1601 		writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1602 		num_ports = NUM_FC_PORTS;
1603 	}
1604 
1605 	for (i = 0; i < num_ports; i++) {
1606 		/* Unmask all errors (but they are still masked at AFU) */
1607 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1608 		/* Clear CRC error cnt & set a threshold */
1609 		(void)readq_be(&afu->afu_map->global.
1610 			       fc_regs[i][FC_CNT_CRCERR / 8]);
1611 		writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1612 			  [FC_CRC_THRESH / 8]);
1613 
1614 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1615 		if (wwpn[i] != 0)
1616 			afu_set_wwpn(afu, i,
1617 				     &afu->afu_map->global.fc_regs[i][0],
1618 				     wwpn[i]);
1619 		/* Programming WWPN back to back causes additional
1620 		 * offline/online transitions and a PLOGI
1621 		 */
1622 		msleep(100);
1623 	}
1624 
1625 	/* Set up master's own CTX_CAP to allow real mode, host translation */
1626 	/* tables, afu cmds and read/write GSCSI cmds. */
1627 	/* First, unlock ctx_cap write by reading mbox */
1628 	(void)readq_be(&afu->ctrl_map->mbox_r);	/* unlock ctx_cap */
1629 	writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1630 		   SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1631 		   SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1632 		  &afu->ctrl_map->ctx_cap);
1633 	/* Initialize heartbeat */
1634 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1635 
1636 out:
1637 	return rc;
1638 }
1639 
1640 /**
1641  * start_afu() - initializes and starts the AFU
1642  * @cfg:	Internal structure associated with the host.
1643  */
1644 static int start_afu(struct cxlflash_cfg *cfg)
1645 {
1646 	struct afu *afu = cfg->afu;
1647 	struct afu_cmd *cmd;
1648 
1649 	int i = 0;
1650 	int rc = 0;
1651 
1652 	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1653 		cmd = &afu->cmd[i];
1654 
1655 		init_completion(&cmd->cevent);
1656 		spin_lock_init(&cmd->slock);
1657 		cmd->parent = afu;
1658 	}
1659 
1660 	init_pcr(cfg);
1661 
1662 	/* After an AFU reset, RRQ entries are stale, clear them */
1663 	memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
1664 
1665 	/* Initialize RRQ pointers */
1666 	afu->hrrq_start = &afu->rrq_entry[0];
1667 	afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1668 	afu->hrrq_curr = afu->hrrq_start;
1669 	afu->toggle = 1;
1670 
1671 	rc = init_global(cfg);
1672 
1673 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1674 	return rc;
1675 }
1676 
1677 /**
1678  * init_intr() - setup interrupt handlers for the master context
1679  * @cfg:	Internal structure associated with the host.
1680  *
1681  * Return: 0 on success, -errno on failure
1682  */
1683 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1684 				 struct cxl_context *ctx)
1685 {
1686 	struct afu *afu = cfg->afu;
1687 	struct device *dev = &cfg->dev->dev;
1688 	int rc = 0;
1689 	enum undo_level level = UNDO_NOOP;
1690 
1691 	rc = cxl_allocate_afu_irqs(ctx, 3);
1692 	if (unlikely(rc)) {
1693 		dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1694 			__func__, rc);
1695 		level = UNDO_NOOP;
1696 		goto out;
1697 	}
1698 
1699 	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1700 			     "SISL_MSI_SYNC_ERROR");
1701 	if (unlikely(rc <= 0)) {
1702 		dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1703 			__func__);
1704 		level = FREE_IRQ;
1705 		goto out;
1706 	}
1707 
1708 	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1709 			     "SISL_MSI_RRQ_UPDATED");
1710 	if (unlikely(rc <= 0)) {
1711 		dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1712 			__func__);
1713 		level = UNMAP_ONE;
1714 		goto out;
1715 	}
1716 
1717 	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1718 			     "SISL_MSI_ASYNC_ERROR");
1719 	if (unlikely(rc <= 0)) {
1720 		dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1721 			__func__);
1722 		level = UNMAP_TWO;
1723 		goto out;
1724 	}
1725 out:
1726 	return level;
1727 }
1728 
1729 /**
1730  * init_mc() - create and register as the master context
1731  * @cfg:	Internal structure associated with the host.
1732  *
1733  * Return: 0 on success, -errno on failure
1734  */
1735 static int init_mc(struct cxlflash_cfg *cfg)
1736 {
1737 	struct cxl_context *ctx;
1738 	struct device *dev = &cfg->dev->dev;
1739 	int rc = 0;
1740 	enum undo_level level;
1741 
1742 	ctx = cxl_get_context(cfg->dev);
1743 	if (unlikely(!ctx)) {
1744 		rc = -ENOMEM;
1745 		goto ret;
1746 	}
1747 	cfg->mcctx = ctx;
1748 
1749 	/* Set it up as a master with the CXL */
1750 	cxl_set_master(ctx);
1751 
1752 	/* During initialization reset the AFU to start from a clean slate */
1753 	rc = cxl_afu_reset(cfg->mcctx);
1754 	if (unlikely(rc)) {
1755 		dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1756 			__func__, rc);
1757 		goto ret;
1758 	}
1759 
1760 	level = init_intr(cfg, ctx);
1761 	if (unlikely(level)) {
1762 		dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
1763 			__func__, rc);
1764 		goto out;
1765 	}
1766 
1767 	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
1768 	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1769 	 * element (pe) that is embedded in the context (ctx)
1770 	 */
1771 	rc = start_context(cfg);
1772 	if (unlikely(rc)) {
1773 		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1774 		level = UNMAP_THREE;
1775 		goto out;
1776 	}
1777 ret:
1778 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1779 	return rc;
1780 out:
1781 	term_intr(cfg, level);
1782 	goto ret;
1783 }
1784 
1785 /**
1786  * init_afu() - setup as master context and start AFU
1787  * @cfg:	Internal structure associated with the host.
1788  *
1789  * This routine is a higher level of control for configuring the
1790  * AFU on probe and reset paths.
1791  *
1792  * Return: 0 on success, -errno on failure
1793  */
1794 static int init_afu(struct cxlflash_cfg *cfg)
1795 {
1796 	u64 reg;
1797 	int rc = 0;
1798 	struct afu *afu = cfg->afu;
1799 	struct device *dev = &cfg->dev->dev;
1800 
1801 	cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1802 
1803 	rc = init_mc(cfg);
1804 	if (rc) {
1805 		dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1806 			__func__, rc);
1807 		goto out;
1808 	}
1809 
1810 	/* Map the entire MMIO space of the AFU */
1811 	afu->afu_map = cxl_psa_map(cfg->mcctx);
1812 	if (!afu->afu_map) {
1813 		dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1814 		rc = -ENOMEM;
1815 		goto err1;
1816 	}
1817 	kref_init(&afu->mapcount);
1818 
1819 	/* No byte reverse on reading afu_version or string will be backwards */
1820 	reg = readq(&afu->afu_map->global.regs.afu_version);
1821 	memcpy(afu->version, &reg, sizeof(reg));
1822 	afu->interface_version =
1823 	    readq_be(&afu->afu_map->global.regs.interface_version);
1824 	if ((afu->interface_version + 1) == 0) {
1825 		pr_err("Back level AFU, please upgrade. AFU version %s "
1826 		       "interface version 0x%llx\n", afu->version,
1827 		       afu->interface_version);
1828 		rc = -EINVAL;
1829 		goto err2;
1830 	}
1831 
1832 	pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
1833 		 afu->version, afu->interface_version);
1834 
1835 	rc = start_afu(cfg);
1836 	if (rc) {
1837 		dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1838 			__func__, rc);
1839 		goto err2;
1840 	}
1841 
1842 	afu_err_intr_init(cfg->afu);
1843 	atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1844 
1845 	/* Restore the LUN mappings */
1846 	cxlflash_restore_luntable(cfg);
1847 out:
1848 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1849 	return rc;
1850 
1851 err2:
1852 	kref_put(&afu->mapcount, afu_unmap);
1853 err1:
1854 	term_intr(cfg, UNMAP_THREE);
1855 	term_mc(cfg);
1856 	goto out;
1857 }
1858 
1859 /**
1860  * cxlflash_afu_sync() - builds and sends an AFU sync command
1861  * @afu:	AFU associated with the host.
1862  * @ctx_hndl_u:	Identifies context requesting sync.
1863  * @res_hndl_u:	Identifies resource requesting sync.
1864  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
1865  *
1866  * The AFU can only take 1 sync command at a time. This routine enforces this
1867  * limitation by using a mutex to provide exclusive access to the AFU during
1868  * the sync. This design point requires calling threads to not be on interrupt
1869  * context due to the possibility of sleeping during concurrent sync operations.
1870  *
1871  * AFU sync operations are only necessary and allowed when the device is
1872  * operating normally. When not operating normally, sync requests can occur as
1873  * part of cleaning up resources associated with an adapter prior to removal.
1874  * In this scenario, these requests are simply ignored (safe due to the AFU
1875  * going away).
1876  *
1877  * Return:
1878  *	0 on success
1879  *	-1 on failure
1880  */
1881 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1882 		      res_hndl_t res_hndl_u, u8 mode)
1883 {
1884 	struct cxlflash_cfg *cfg = afu->parent;
1885 	struct device *dev = &cfg->dev->dev;
1886 	struct afu_cmd *cmd = NULL;
1887 	int rc = 0;
1888 	int retry_cnt = 0;
1889 	static DEFINE_MUTEX(sync_active);
1890 
1891 	if (cfg->state != STATE_NORMAL) {
1892 		pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1893 		return 0;
1894 	}
1895 
1896 	mutex_lock(&sync_active);
1897 retry:
1898 	cmd = cmd_checkout(afu);
1899 	if (unlikely(!cmd)) {
1900 		retry_cnt++;
1901 		udelay(1000 * retry_cnt);
1902 		if (retry_cnt < MC_RETRY_CNT)
1903 			goto retry;
1904 		dev_err(dev, "%s: could not get a free command\n", __func__);
1905 		rc = -1;
1906 		goto out;
1907 	}
1908 
1909 	pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1910 
1911 	memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
1912 
1913 	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1914 	cmd->rcb.port_sel = 0x0;	/* NA */
1915 	cmd->rcb.lun_id = 0x0;	/* NA */
1916 	cmd->rcb.data_len = 0x0;
1917 	cmd->rcb.data_ea = 0x0;
1918 	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1919 
1920 	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
1921 	cmd->rcb.cdb[1] = mode;
1922 
1923 	/* The cdb is aligned, no unaligned accessors required */
1924 	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1925 	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1926 
1927 	rc = send_cmd(afu, cmd);
1928 	if (unlikely(rc))
1929 		goto out;
1930 
1931 	wait_resp(afu, cmd);
1932 
1933 	/* Set on timeout */
1934 	if (unlikely((cmd->sa.ioasc != 0) ||
1935 		     (cmd->sa.host_use_b[0] & B_ERROR)))
1936 		rc = -1;
1937 out:
1938 	mutex_unlock(&sync_active);
1939 	if (cmd)
1940 		cmd_checkin(cmd);
1941 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1942 	return rc;
1943 }
1944 
1945 /**
1946  * afu_reset() - resets the AFU
1947  * @cfg:	Internal structure associated with the host.
1948  *
1949  * Return: 0 on success, -errno on failure
1950  */
1951 static int afu_reset(struct cxlflash_cfg *cfg)
1952 {
1953 	int rc = 0;
1954 	/* Stop the context before the reset. Since the context is
1955 	 * no longer available restart it after the reset is complete
1956 	 */
1957 
1958 	term_afu(cfg);
1959 
1960 	rc = init_afu(cfg);
1961 
1962 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1963 	return rc;
1964 }
1965 
1966 /**
1967  * drain_ioctls() - wait until all currently executing ioctls have completed
1968  * @cfg:	Internal structure associated with the host.
1969  *
1970  * Obtain write access to read/write semaphore that wraps ioctl
1971  * handling to 'drain' ioctls currently executing.
1972  */
1973 static void drain_ioctls(struct cxlflash_cfg *cfg)
1974 {
1975 	down_write(&cfg->ioctl_rwsem);
1976 	up_write(&cfg->ioctl_rwsem);
1977 }
1978 
1979 /**
1980  * cxlflash_eh_device_reset_handler() - reset a single LUN
1981  * @scp:	SCSI command to send.
1982  *
1983  * Return:
1984  *	SUCCESS as defined in scsi/scsi.h
1985  *	FAILED as defined in scsi/scsi.h
1986  */
1987 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1988 {
1989 	int rc = SUCCESS;
1990 	struct Scsi_Host *host = scp->device->host;
1991 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1992 	struct afu *afu = cfg->afu;
1993 	int rcr = 0;
1994 
1995 	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1996 		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1997 		 host->host_no, scp->device->channel,
1998 		 scp->device->id, scp->device->lun,
1999 		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2000 		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2001 		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2002 		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2003 
2004 retry:
2005 	switch (cfg->state) {
2006 	case STATE_NORMAL:
2007 		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
2008 		if (unlikely(rcr))
2009 			rc = FAILED;
2010 		break;
2011 	case STATE_RESET:
2012 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2013 		goto retry;
2014 	default:
2015 		rc = FAILED;
2016 		break;
2017 	}
2018 
2019 	pr_debug("%s: returning rc=%d\n", __func__, rc);
2020 	return rc;
2021 }
2022 
2023 /**
2024  * cxlflash_eh_host_reset_handler() - reset the host adapter
2025  * @scp:	SCSI command from stack identifying host.
2026  *
2027  * Following a reset, the state is evaluated again in case an EEH occurred
2028  * during the reset. In such a scenario, the host reset will either yield
2029  * until the EEH recovery is complete or return success or failure based
2030  * upon the current device state.
2031  *
2032  * Return:
2033  *	SUCCESS as defined in scsi/scsi.h
2034  *	FAILED as defined in scsi/scsi.h
2035  */
2036 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2037 {
2038 	int rc = SUCCESS;
2039 	int rcr = 0;
2040 	struct Scsi_Host *host = scp->device->host;
2041 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
2042 
2043 	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
2044 		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
2045 		 host->host_no, scp->device->channel,
2046 		 scp->device->id, scp->device->lun,
2047 		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2048 		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2049 		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2050 		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2051 
2052 	switch (cfg->state) {
2053 	case STATE_NORMAL:
2054 		cfg->state = STATE_RESET;
2055 		drain_ioctls(cfg);
2056 		cxlflash_mark_contexts_error(cfg);
2057 		rcr = afu_reset(cfg);
2058 		if (rcr) {
2059 			rc = FAILED;
2060 			cfg->state = STATE_FAILTERM;
2061 		} else
2062 			cfg->state = STATE_NORMAL;
2063 		wake_up_all(&cfg->reset_waitq);
2064 		ssleep(1);
2065 		/* fall through */
2066 	case STATE_RESET:
2067 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2068 		if (cfg->state == STATE_NORMAL)
2069 			break;
2070 		/* fall through */
2071 	default:
2072 		rc = FAILED;
2073 		break;
2074 	}
2075 
2076 	pr_debug("%s: returning rc=%d\n", __func__, rc);
2077 	return rc;
2078 }
2079 
2080 /**
2081  * cxlflash_change_queue_depth() - change the queue depth for the device
2082  * @sdev:	SCSI device destined for queue depth change.
2083  * @qdepth:	Requested queue depth value to set.
2084  *
2085  * The requested queue depth is capped to the maximum supported value.
2086  *
2087  * Return: The actual queue depth set.
2088  */
2089 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2090 {
2091 
2092 	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2093 		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2094 
2095 	scsi_change_queue_depth(sdev, qdepth);
2096 	return sdev->queue_depth;
2097 }
2098 
2099 /**
2100  * cxlflash_show_port_status() - queries and presents the current port status
2101  * @port:	Desired port for status reporting.
2102  * @afu:	AFU owning the specified port.
2103  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2104  *
2105  * Return: The size of the ASCII string returned in @buf.
2106  */
2107 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
2108 {
2109 	char *disp_status;
2110 	u64 status;
2111 	__be64 __iomem *fc_regs;
2112 
2113 	if (port >= NUM_FC_PORTS)
2114 		return 0;
2115 
2116 	fc_regs = &afu->afu_map->global.fc_regs[port][0];
2117 	status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
2118 	status &= FC_MTIP_STATUS_MASK;
2119 
2120 	if (status == FC_MTIP_STATUS_ONLINE)
2121 		disp_status = "online";
2122 	else if (status == FC_MTIP_STATUS_OFFLINE)
2123 		disp_status = "offline";
2124 	else
2125 		disp_status = "unknown";
2126 
2127 	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2128 }
2129 
2130 /**
2131  * port0_show() - queries and presents the current status of port 0
2132  * @dev:	Generic device associated with the host owning the port.
2133  * @attr:	Device attribute representing the port.
2134  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2135  *
2136  * Return: The size of the ASCII string returned in @buf.
2137  */
2138 static ssize_t port0_show(struct device *dev,
2139 			  struct device_attribute *attr,
2140 			  char *buf)
2141 {
2142 	struct Scsi_Host *shost = class_to_shost(dev);
2143 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2144 	struct afu *afu = cfg->afu;
2145 
2146 	return cxlflash_show_port_status(0, afu, buf);
2147 }
2148 
2149 /**
2150  * port1_show() - queries and presents the current status of port 1
2151  * @dev:	Generic device associated with the host owning the port.
2152  * @attr:	Device attribute representing the port.
2153  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2154  *
2155  * Return: The size of the ASCII string returned in @buf.
2156  */
2157 static ssize_t port1_show(struct device *dev,
2158 			  struct device_attribute *attr,
2159 			  char *buf)
2160 {
2161 	struct Scsi_Host *shost = class_to_shost(dev);
2162 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2163 	struct afu *afu = cfg->afu;
2164 
2165 	return cxlflash_show_port_status(1, afu, buf);
2166 }
2167 
2168 /**
2169  * lun_mode_show() - presents the current LUN mode of the host
2170  * @dev:	Generic device associated with the host.
2171  * @attr:	Device attribute representing the LUN mode.
2172  * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2173  *
2174  * Return: The size of the ASCII string returned in @buf.
2175  */
2176 static ssize_t lun_mode_show(struct device *dev,
2177 			     struct device_attribute *attr, char *buf)
2178 {
2179 	struct Scsi_Host *shost = class_to_shost(dev);
2180 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2181 	struct afu *afu = cfg->afu;
2182 
2183 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2184 }
2185 
2186 /**
2187  * lun_mode_store() - sets the LUN mode of the host
2188  * @dev:	Generic device associated with the host.
2189  * @attr:	Device attribute representing the LUN mode.
2190  * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2191  * @count:	Length of data resizing in @buf.
2192  *
2193  * The CXL Flash AFU supports a dummy LUN mode where the external
2194  * links and storage are not required. Space on the FPGA is used
2195  * to create 1 or 2 small LUNs which are presented to the system
2196  * as if they were a normal storage device. This feature is useful
2197  * during development and also provides manufacturing with a way
2198  * to test the AFU without an actual device.
2199  *
2200  * 0 = external LUN[s] (default)
2201  * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2202  * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2203  * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2204  * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2205  *
2206  * Return: The size of the ASCII string returned in @buf.
2207  */
2208 static ssize_t lun_mode_store(struct device *dev,
2209 			      struct device_attribute *attr,
2210 			      const char *buf, size_t count)
2211 {
2212 	struct Scsi_Host *shost = class_to_shost(dev);
2213 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2214 	struct afu *afu = cfg->afu;
2215 	int rc;
2216 	u32 lun_mode;
2217 
2218 	rc = kstrtouint(buf, 10, &lun_mode);
2219 	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2220 		afu->internal_lun = lun_mode;
2221 
2222 		/*
2223 		 * When configured for internal LUN, there is only one channel,
2224 		 * channel number 0, else there will be 2 (default).
2225 		 */
2226 		if (afu->internal_lun)
2227 			shost->max_channel = 0;
2228 		else
2229 			shost->max_channel = NUM_FC_PORTS - 1;
2230 
2231 		afu_reset(cfg);
2232 		scsi_scan_host(cfg->host);
2233 	}
2234 
2235 	return count;
2236 }
2237 
2238 /**
2239  * ioctl_version_show() - presents the current ioctl version of the host
2240  * @dev:	Generic device associated with the host.
2241  * @attr:	Device attribute representing the ioctl version.
2242  * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
2243  *
2244  * Return: The size of the ASCII string returned in @buf.
2245  */
2246 static ssize_t ioctl_version_show(struct device *dev,
2247 				  struct device_attribute *attr, char *buf)
2248 {
2249 	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2250 }
2251 
2252 /**
2253  * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2254  * @port:	Desired port for status reporting.
2255  * @afu:	AFU owning the specified port.
2256  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2257  *
2258  * Return: The size of the ASCII string returned in @buf.
2259  */
2260 static ssize_t cxlflash_show_port_lun_table(u32 port,
2261 					    struct afu *afu,
2262 					    char *buf)
2263 {
2264 	int i;
2265 	ssize_t bytes = 0;
2266 	__be64 __iomem *fc_port;
2267 
2268 	if (port >= NUM_FC_PORTS)
2269 		return 0;
2270 
2271 	fc_port = &afu->afu_map->global.fc_port[port][0];
2272 
2273 	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2274 		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2275 				   "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2276 	return bytes;
2277 }
2278 
2279 /**
2280  * port0_lun_table_show() - presents the current LUN table of port 0
2281  * @dev:	Generic device associated with the host owning the port.
2282  * @attr:	Device attribute representing the port.
2283  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2284  *
2285  * Return: The size of the ASCII string returned in @buf.
2286  */
2287 static ssize_t port0_lun_table_show(struct device *dev,
2288 				    struct device_attribute *attr,
2289 				    char *buf)
2290 {
2291 	struct Scsi_Host *shost = class_to_shost(dev);
2292 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2293 	struct afu *afu = cfg->afu;
2294 
2295 	return cxlflash_show_port_lun_table(0, afu, buf);
2296 }
2297 
2298 /**
2299  * port1_lun_table_show() - presents the current LUN table of port 1
2300  * @dev:	Generic device associated with the host owning the port.
2301  * @attr:	Device attribute representing the port.
2302  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2303  *
2304  * Return: The size of the ASCII string returned in @buf.
2305  */
2306 static ssize_t port1_lun_table_show(struct device *dev,
2307 				    struct device_attribute *attr,
2308 				    char *buf)
2309 {
2310 	struct Scsi_Host *shost = class_to_shost(dev);
2311 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2312 	struct afu *afu = cfg->afu;
2313 
2314 	return cxlflash_show_port_lun_table(1, afu, buf);
2315 }
2316 
2317 /**
2318  * mode_show() - presents the current mode of the device
2319  * @dev:	Generic device associated with the device.
2320  * @attr:	Device attribute representing the device mode.
2321  * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2322  *
2323  * Return: The size of the ASCII string returned in @buf.
2324  */
2325 static ssize_t mode_show(struct device *dev,
2326 			 struct device_attribute *attr, char *buf)
2327 {
2328 	struct scsi_device *sdev = to_scsi_device(dev);
2329 
2330 	return scnprintf(buf, PAGE_SIZE, "%s\n",
2331 			 sdev->hostdata ? "superpipe" : "legacy");
2332 }
2333 
2334 /*
2335  * Host attributes
2336  */
2337 static DEVICE_ATTR_RO(port0);
2338 static DEVICE_ATTR_RO(port1);
2339 static DEVICE_ATTR_RW(lun_mode);
2340 static DEVICE_ATTR_RO(ioctl_version);
2341 static DEVICE_ATTR_RO(port0_lun_table);
2342 static DEVICE_ATTR_RO(port1_lun_table);
2343 
2344 static struct device_attribute *cxlflash_host_attrs[] = {
2345 	&dev_attr_port0,
2346 	&dev_attr_port1,
2347 	&dev_attr_lun_mode,
2348 	&dev_attr_ioctl_version,
2349 	&dev_attr_port0_lun_table,
2350 	&dev_attr_port1_lun_table,
2351 	NULL
2352 };
2353 
2354 /*
2355  * Device attributes
2356  */
2357 static DEVICE_ATTR_RO(mode);
2358 
2359 static struct device_attribute *cxlflash_dev_attrs[] = {
2360 	&dev_attr_mode,
2361 	NULL
2362 };
2363 
2364 /*
2365  * Host template
2366  */
2367 static struct scsi_host_template driver_template = {
2368 	.module = THIS_MODULE,
2369 	.name = CXLFLASH_ADAPTER_NAME,
2370 	.info = cxlflash_driver_info,
2371 	.ioctl = cxlflash_ioctl,
2372 	.proc_name = CXLFLASH_NAME,
2373 	.queuecommand = cxlflash_queuecommand,
2374 	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2375 	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2376 	.change_queue_depth = cxlflash_change_queue_depth,
2377 	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
2378 	.can_queue = CXLFLASH_MAX_CMDS,
2379 	.this_id = -1,
2380 	.sg_tablesize = SG_NONE,	/* No scatter gather support */
2381 	.max_sectors = CXLFLASH_MAX_SECTORS,
2382 	.use_clustering = ENABLE_CLUSTERING,
2383 	.shost_attrs = cxlflash_host_attrs,
2384 	.sdev_attrs = cxlflash_dev_attrs,
2385 };
2386 
2387 /*
2388  * Device dependent values
2389  */
2390 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
2391 					0ULL };
2392 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
2393 					CXLFLASH_NOTIFY_SHUTDOWN };
2394 
2395 /*
2396  * PCI device binding table
2397  */
2398 static struct pci_device_id cxlflash_pci_table[] = {
2399 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2400 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2401 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2402 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
2403 	{}
2404 };
2405 
2406 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2407 
2408 /**
2409  * cxlflash_worker_thread() - work thread handler for the AFU
2410  * @work:	Work structure contained within cxlflash associated with host.
2411  *
2412  * Handles the following events:
2413  * - Link reset which cannot be performed on interrupt context due to
2414  * blocking up to a few seconds
2415  * - Read AFU command room
2416  * - Rescan the host
2417  */
2418 static void cxlflash_worker_thread(struct work_struct *work)
2419 {
2420 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2421 						work_q);
2422 	struct afu *afu = cfg->afu;
2423 	struct device *dev = &cfg->dev->dev;
2424 	int port;
2425 	ulong lock_flags;
2426 
2427 	/* Avoid MMIO if the device has failed */
2428 
2429 	if (cfg->state != STATE_NORMAL)
2430 		return;
2431 
2432 	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2433 
2434 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
2435 		port = cfg->lr_port;
2436 		if (port < 0)
2437 			dev_err(dev, "%s: invalid port index %d\n",
2438 				__func__, port);
2439 		else {
2440 			spin_unlock_irqrestore(cfg->host->host_lock,
2441 					       lock_flags);
2442 
2443 			/* The reset can block... */
2444 			afu_link_reset(afu, port,
2445 				       &afu->afu_map->global.fc_regs[port][0]);
2446 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2447 		}
2448 
2449 		cfg->lr_state = LINK_RESET_COMPLETE;
2450 	}
2451 
2452 	if (afu->read_room) {
2453 		atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2454 		afu->read_room = false;
2455 	}
2456 
2457 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2458 
2459 	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2460 		scsi_scan_host(cfg->host);
2461 	kref_put(&afu->mapcount, afu_unmap);
2462 }
2463 
2464 /**
2465  * cxlflash_probe() - PCI entry point to add host
2466  * @pdev:	PCI device associated with the host.
2467  * @dev_id:	PCI device id associated with device.
2468  *
2469  * Return: 0 on success, -errno on failure
2470  */
2471 static int cxlflash_probe(struct pci_dev *pdev,
2472 			  const struct pci_device_id *dev_id)
2473 {
2474 	struct Scsi_Host *host;
2475 	struct cxlflash_cfg *cfg = NULL;
2476 	struct dev_dependent_vals *ddv;
2477 	int rc = 0;
2478 
2479 	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2480 		__func__, pdev->irq);
2481 
2482 	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2483 	driver_template.max_sectors = ddv->max_sectors;
2484 
2485 	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2486 	if (!host) {
2487 		dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2488 			__func__);
2489 		rc = -ENOMEM;
2490 		goto out;
2491 	}
2492 
2493 	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2494 	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2495 	host->max_channel = NUM_FC_PORTS - 1;
2496 	host->unique_id = host->host_no;
2497 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2498 
2499 	cfg = (struct cxlflash_cfg *)host->hostdata;
2500 	cfg->host = host;
2501 	rc = alloc_mem(cfg);
2502 	if (rc) {
2503 		dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
2504 			__func__);
2505 		rc = -ENOMEM;
2506 		scsi_host_put(cfg->host);
2507 		goto out;
2508 	}
2509 
2510 	cfg->init_state = INIT_STATE_NONE;
2511 	cfg->dev = pdev;
2512 	cfg->cxl_fops = cxlflash_cxl_fops;
2513 
2514 	/*
2515 	 * The promoted LUNs move to the top of the LUN table. The rest stay
2516 	 * on the bottom half. The bottom half grows from the end
2517 	 * (index = 255), whereas the top half grows from the beginning
2518 	 * (index = 0).
2519 	 */
2520 	cfg->promote_lun_index  = 0;
2521 	cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2522 	cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2523 
2524 	cfg->dev_id = (struct pci_device_id *)dev_id;
2525 
2526 	init_waitqueue_head(&cfg->tmf_waitq);
2527 	init_waitqueue_head(&cfg->reset_waitq);
2528 
2529 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2530 	cfg->lr_state = LINK_RESET_INVALID;
2531 	cfg->lr_port = -1;
2532 	spin_lock_init(&cfg->tmf_slock);
2533 	mutex_init(&cfg->ctx_tbl_list_mutex);
2534 	mutex_init(&cfg->ctx_recovery_mutex);
2535 	init_rwsem(&cfg->ioctl_rwsem);
2536 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2537 	INIT_LIST_HEAD(&cfg->lluns);
2538 
2539 	pci_set_drvdata(pdev, cfg);
2540 
2541 	cfg->cxl_afu = cxl_pci_to_afu(pdev);
2542 
2543 	rc = init_pci(cfg);
2544 	if (rc) {
2545 		dev_err(&pdev->dev, "%s: call to init_pci "
2546 			"failed rc=%d!\n", __func__, rc);
2547 		goto out_remove;
2548 	}
2549 	cfg->init_state = INIT_STATE_PCI;
2550 
2551 	rc = init_afu(cfg);
2552 	if (rc) {
2553 		dev_err(&pdev->dev, "%s: call to init_afu "
2554 			"failed rc=%d!\n", __func__, rc);
2555 		goto out_remove;
2556 	}
2557 	cfg->init_state = INIT_STATE_AFU;
2558 
2559 	rc = init_scsi(cfg);
2560 	if (rc) {
2561 		dev_err(&pdev->dev, "%s: call to init_scsi "
2562 			"failed rc=%d!\n", __func__, rc);
2563 		goto out_remove;
2564 	}
2565 	cfg->init_state = INIT_STATE_SCSI;
2566 
2567 out:
2568 	pr_debug("%s: returning rc=%d\n", __func__, rc);
2569 	return rc;
2570 
2571 out_remove:
2572 	cxlflash_remove(pdev);
2573 	goto out;
2574 }
2575 
2576 /**
2577  * cxlflash_pci_error_detected() - called when a PCI error is detected
2578  * @pdev:	PCI device struct.
2579  * @state:	PCI channel state.
2580  *
2581  * When an EEH occurs during an active reset, wait until the reset is
2582  * complete and then take action based upon the device state.
2583  *
2584  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2585  */
2586 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2587 						    pci_channel_state_t state)
2588 {
2589 	int rc = 0;
2590 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2591 	struct device *dev = &cfg->dev->dev;
2592 
2593 	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2594 
2595 	switch (state) {
2596 	case pci_channel_io_frozen:
2597 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2598 		if (cfg->state == STATE_FAILTERM)
2599 			return PCI_ERS_RESULT_DISCONNECT;
2600 
2601 		cfg->state = STATE_RESET;
2602 		scsi_block_requests(cfg->host);
2603 		drain_ioctls(cfg);
2604 		rc = cxlflash_mark_contexts_error(cfg);
2605 		if (unlikely(rc))
2606 			dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2607 				__func__, rc);
2608 		term_afu(cfg);
2609 		return PCI_ERS_RESULT_NEED_RESET;
2610 	case pci_channel_io_perm_failure:
2611 		cfg->state = STATE_FAILTERM;
2612 		wake_up_all(&cfg->reset_waitq);
2613 		scsi_unblock_requests(cfg->host);
2614 		return PCI_ERS_RESULT_DISCONNECT;
2615 	default:
2616 		break;
2617 	}
2618 	return PCI_ERS_RESULT_NEED_RESET;
2619 }
2620 
2621 /**
2622  * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2623  * @pdev:	PCI device struct.
2624  *
2625  * This routine is called by the pci error recovery code after the PCI
2626  * slot has been reset, just before we should resume normal operations.
2627  *
2628  * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2629  */
2630 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2631 {
2632 	int rc = 0;
2633 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2634 	struct device *dev = &cfg->dev->dev;
2635 
2636 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2637 
2638 	rc = init_afu(cfg);
2639 	if (unlikely(rc)) {
2640 		dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2641 		return PCI_ERS_RESULT_DISCONNECT;
2642 	}
2643 
2644 	return PCI_ERS_RESULT_RECOVERED;
2645 }
2646 
2647 /**
2648  * cxlflash_pci_resume() - called when normal operation can resume
2649  * @pdev:	PCI device struct
2650  */
2651 static void cxlflash_pci_resume(struct pci_dev *pdev)
2652 {
2653 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2654 	struct device *dev = &cfg->dev->dev;
2655 
2656 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2657 
2658 	cfg->state = STATE_NORMAL;
2659 	wake_up_all(&cfg->reset_waitq);
2660 	scsi_unblock_requests(cfg->host);
2661 }
2662 
2663 static const struct pci_error_handlers cxlflash_err_handler = {
2664 	.error_detected = cxlflash_pci_error_detected,
2665 	.slot_reset = cxlflash_pci_slot_reset,
2666 	.resume = cxlflash_pci_resume,
2667 };
2668 
2669 /*
2670  * PCI device structure
2671  */
2672 static struct pci_driver cxlflash_driver = {
2673 	.name = CXLFLASH_NAME,
2674 	.id_table = cxlflash_pci_table,
2675 	.probe = cxlflash_probe,
2676 	.remove = cxlflash_remove,
2677 	.shutdown = cxlflash_remove,
2678 	.err_handler = &cxlflash_err_handler,
2679 };
2680 
2681 /**
2682  * init_cxlflash() - module entry point
2683  *
2684  * Return: 0 on success, -errno on failure
2685  */
2686 static int __init init_cxlflash(void)
2687 {
2688 	pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME);
2689 
2690 	cxlflash_list_init();
2691 
2692 	return pci_register_driver(&cxlflash_driver);
2693 }
2694 
2695 /**
2696  * exit_cxlflash() - module exit point
2697  */
2698 static void __exit exit_cxlflash(void)
2699 {
2700 	cxlflash_term_global_luns();
2701 	cxlflash_free_errpage();
2702 
2703 	pci_unregister_driver(&cxlflash_driver);
2704 }
2705 
2706 module_init(init_cxlflash);
2707 module_exit(exit_cxlflash);
2708