xref: /openbmc/linux/drivers/scsi/cxlflash/main.c (revision 7c4c41f1)
1c21e0bbfSMatthew R. Ochs /*
2c21e0bbfSMatthew R. Ochs  * CXL Flash Device Driver
3c21e0bbfSMatthew R. Ochs  *
4c21e0bbfSMatthew R. Ochs  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5c21e0bbfSMatthew R. Ochs  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6c21e0bbfSMatthew R. Ochs  *
7c21e0bbfSMatthew R. Ochs  * Copyright (C) 2015 IBM Corporation
8c21e0bbfSMatthew R. Ochs  *
9c21e0bbfSMatthew R. Ochs  * This program is free software; you can redistribute it and/or
10c21e0bbfSMatthew R. Ochs  * modify it under the terms of the GNU General Public License
11c21e0bbfSMatthew R. Ochs  * as published by the Free Software Foundation; either version
12c21e0bbfSMatthew R. Ochs  * 2 of the License, or (at your option) any later version.
13c21e0bbfSMatthew R. Ochs  */
14c21e0bbfSMatthew R. Ochs 
15c21e0bbfSMatthew R. Ochs #include <linux/delay.h>
16c21e0bbfSMatthew R. Ochs #include <linux/list.h>
17c21e0bbfSMatthew R. Ochs #include <linux/module.h>
18c21e0bbfSMatthew R. Ochs #include <linux/pci.h>
19c21e0bbfSMatthew R. Ochs 
20c21e0bbfSMatthew R. Ochs #include <asm/unaligned.h>
21c21e0bbfSMatthew R. Ochs 
22c21e0bbfSMatthew R. Ochs #include <misc/cxl.h>
23c21e0bbfSMatthew R. Ochs 
24c21e0bbfSMatthew R. Ochs #include <scsi/scsi_cmnd.h>
25c21e0bbfSMatthew R. Ochs #include <scsi/scsi_host.h>
2665be2c79SMatthew R. Ochs #include <uapi/scsi/cxlflash_ioctl.h>
27c21e0bbfSMatthew R. Ochs 
28c21e0bbfSMatthew R. Ochs #include "main.h"
29c21e0bbfSMatthew R. Ochs #include "sislite.h"
30c21e0bbfSMatthew R. Ochs #include "common.h"
31c21e0bbfSMatthew R. Ochs 
32c21e0bbfSMatthew R. Ochs MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35c21e0bbfSMatthew R. Ochs MODULE_LICENSE("GPL");
36c21e0bbfSMatthew R. Ochs 
37c21e0bbfSMatthew R. Ochs /**
38c21e0bbfSMatthew R. Ochs  * process_cmd_err() - command error handler
39c21e0bbfSMatthew R. Ochs  * @cmd:	AFU command that experienced the error.
40c21e0bbfSMatthew R. Ochs  * @scp:	SCSI command associated with the AFU command in error.
41c21e0bbfSMatthew R. Ochs  *
42c21e0bbfSMatthew R. Ochs  * Translates error bits from AFU command to SCSI command results.
43c21e0bbfSMatthew R. Ochs  */
44c21e0bbfSMatthew R. Ochs static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
45c21e0bbfSMatthew R. Ochs {
46fb67d44dSMatthew R. Ochs 	struct afu *afu = cmd->parent;
47fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
48fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
49c21e0bbfSMatthew R. Ochs 	struct sisl_ioarcb *ioarcb;
50c21e0bbfSMatthew R. Ochs 	struct sisl_ioasa *ioasa;
518396012fSMatthew R. Ochs 	u32 resid;
52c21e0bbfSMatthew R. Ochs 
53c21e0bbfSMatthew R. Ochs 	if (unlikely(!cmd))
54c21e0bbfSMatthew R. Ochs 		return;
55c21e0bbfSMatthew R. Ochs 
56c21e0bbfSMatthew R. Ochs 	ioarcb = &(cmd->rcb);
57c21e0bbfSMatthew R. Ochs 	ioasa = &(cmd->sa);
58c21e0bbfSMatthew R. Ochs 
59c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
608396012fSMatthew R. Ochs 		resid = ioasa->resid;
618396012fSMatthew R. Ochs 		scsi_set_resid(scp, resid);
62fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
638396012fSMatthew R. Ochs 			__func__, cmd, scp, resid);
64c21e0bbfSMatthew R. Ochs 	}
65c21e0bbfSMatthew R. Ochs 
66c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
67fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
68c21e0bbfSMatthew R. Ochs 			__func__, cmd, scp);
69c21e0bbfSMatthew R. Ochs 		scp->result = (DID_ERROR << 16);
70c21e0bbfSMatthew R. Ochs 	}
71c21e0bbfSMatthew R. Ochs 
72fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
73fb67d44dSMatthew R. Ochs 		"afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
74fb67d44dSMatthew R. Ochs 		ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
75fb67d44dSMatthew R. Ochs 		ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
76c21e0bbfSMatthew R. Ochs 
77c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.scsi_rc) {
78c21e0bbfSMatthew R. Ochs 		/* We have a SCSI status */
79c21e0bbfSMatthew R. Ochs 		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
80c21e0bbfSMatthew R. Ochs 			memcpy(scp->sense_buffer, ioasa->sense_data,
81c21e0bbfSMatthew R. Ochs 			       SISL_SENSE_DATA_LEN);
82c21e0bbfSMatthew R. Ochs 			scp->result = ioasa->rc.scsi_rc;
83c21e0bbfSMatthew R. Ochs 		} else
84c21e0bbfSMatthew R. Ochs 			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
85c21e0bbfSMatthew R. Ochs 	}
86c21e0bbfSMatthew R. Ochs 
87c21e0bbfSMatthew R. Ochs 	/*
88c21e0bbfSMatthew R. Ochs 	 * We encountered an error. Set scp->result based on nature
89c21e0bbfSMatthew R. Ochs 	 * of error.
90c21e0bbfSMatthew R. Ochs 	 */
91c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.fc_rc) {
92c21e0bbfSMatthew R. Ochs 		/* We have an FC status */
93c21e0bbfSMatthew R. Ochs 		switch (ioasa->rc.fc_rc) {
94c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_LINKDOWN:
95c21e0bbfSMatthew R. Ochs 			scp->result = (DID_REQUEUE << 16);
96c21e0bbfSMatthew R. Ochs 			break;
97c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_RESID:
98c21e0bbfSMatthew R. Ochs 			/* This indicates an FCP resid underrun */
99c21e0bbfSMatthew R. Ochs 			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
100c21e0bbfSMatthew R. Ochs 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
101c21e0bbfSMatthew R. Ochs 				 * then we will handle this error else where.
102c21e0bbfSMatthew R. Ochs 				 * If not then we must handle it here.
1038396012fSMatthew R. Ochs 				 * This is probably an AFU bug.
104c21e0bbfSMatthew R. Ochs 				 */
105c21e0bbfSMatthew R. Ochs 				scp->result = (DID_ERROR << 16);
106c21e0bbfSMatthew R. Ochs 			}
107c21e0bbfSMatthew R. Ochs 			break;
108c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_RESIDERR:
109c21e0bbfSMatthew R. Ochs 			/* Resid mismatch between adapter and device */
110c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_TGTABORT:
111c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_ABORTOK:
112c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_ABORTFAIL:
113c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_NOLOGI:
114c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_ABORTPEND:
115c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_WRABORTPEND:
116c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_NOEXP:
117c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_INUSE:
118c21e0bbfSMatthew R. Ochs 			scp->result = (DID_ERROR << 16);
119c21e0bbfSMatthew R. Ochs 			break;
120c21e0bbfSMatthew R. Ochs 		}
121c21e0bbfSMatthew R. Ochs 	}
122c21e0bbfSMatthew R. Ochs 
123c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.afu_rc) {
124c21e0bbfSMatthew R. Ochs 		/* We have an AFU error */
125c21e0bbfSMatthew R. Ochs 		switch (ioasa->rc.afu_rc) {
126c21e0bbfSMatthew R. Ochs 		case SISL_AFU_RC_NO_CHANNELS:
1278396012fSMatthew R. Ochs 			scp->result = (DID_NO_CONNECT << 16);
128c21e0bbfSMatthew R. Ochs 			break;
129c21e0bbfSMatthew R. Ochs 		case SISL_AFU_RC_DATA_DMA_ERR:
130c21e0bbfSMatthew R. Ochs 			switch (ioasa->afu_extra) {
131c21e0bbfSMatthew R. Ochs 			case SISL_AFU_DMA_ERR_PAGE_IN:
132c21e0bbfSMatthew R. Ochs 				/* Retry */
133c21e0bbfSMatthew R. Ochs 				scp->result = (DID_IMM_RETRY << 16);
134c21e0bbfSMatthew R. Ochs 				break;
135c21e0bbfSMatthew R. Ochs 			case SISL_AFU_DMA_ERR_INVALID_EA:
136c21e0bbfSMatthew R. Ochs 			default:
137c21e0bbfSMatthew R. Ochs 				scp->result = (DID_ERROR << 16);
138c21e0bbfSMatthew R. Ochs 			}
139c21e0bbfSMatthew R. Ochs 			break;
140c21e0bbfSMatthew R. Ochs 		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
141c21e0bbfSMatthew R. Ochs 			/* Retry */
142c21e0bbfSMatthew R. Ochs 			scp->result = (DID_ALLOC_FAILURE << 16);
143c21e0bbfSMatthew R. Ochs 			break;
144c21e0bbfSMatthew R. Ochs 		default:
145c21e0bbfSMatthew R. Ochs 			scp->result = (DID_ERROR << 16);
146c21e0bbfSMatthew R. Ochs 		}
147c21e0bbfSMatthew R. Ochs 	}
148c21e0bbfSMatthew R. Ochs }
149c21e0bbfSMatthew R. Ochs 
150c21e0bbfSMatthew R. Ochs /**
151c21e0bbfSMatthew R. Ochs  * cmd_complete() - command completion handler
152c21e0bbfSMatthew R. Ochs  * @cmd:	AFU command that has completed.
153c21e0bbfSMatthew R. Ochs  *
154c21e0bbfSMatthew R. Ochs  * Prepares and submits command that has either completed or timed out to
155c21e0bbfSMatthew R. Ochs  * the SCSI stack. Checks AFU command back into command pool for non-internal
156fe7f9698SMatthew R. Ochs  * (cmd->scp populated) commands.
157c21e0bbfSMatthew R. Ochs  */
158c21e0bbfSMatthew R. Ochs static void cmd_complete(struct afu_cmd *cmd)
159c21e0bbfSMatthew R. Ochs {
160c21e0bbfSMatthew R. Ochs 	struct scsi_cmnd *scp;
161c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
162c21e0bbfSMatthew R. Ochs 	struct afu *afu = cmd->parent;
163c21e0bbfSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
164fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
165a002bf83SUma Krishnan 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
166c21e0bbfSMatthew R. Ochs 	bool cmd_is_tmf;
167c21e0bbfSMatthew R. Ochs 
168a002bf83SUma Krishnan 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
169a002bf83SUma Krishnan 	list_del(&cmd->list);
170a002bf83SUma Krishnan 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
171a002bf83SUma Krishnan 
172fe7f9698SMatthew R. Ochs 	if (cmd->scp) {
173fe7f9698SMatthew R. Ochs 		scp = cmd->scp;
1748396012fSMatthew R. Ochs 		if (unlikely(cmd->sa.ioasc))
175c21e0bbfSMatthew R. Ochs 			process_cmd_err(cmd, scp);
176c21e0bbfSMatthew R. Ochs 		else
177c21e0bbfSMatthew R. Ochs 			scp->result = (DID_OK << 16);
178c21e0bbfSMatthew R. Ochs 
179c21e0bbfSMatthew R. Ochs 		cmd_is_tmf = cmd->cmd_tmf;
180c21e0bbfSMatthew R. Ochs 
181fb67d44dSMatthew R. Ochs 		dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
182fb67d44dSMatthew R. Ochs 				    __func__, scp, scp->result, cmd->sa.ioasc);
183c21e0bbfSMatthew R. Ochs 
184c21e0bbfSMatthew R. Ochs 		scp->scsi_done(scp);
185c21e0bbfSMatthew R. Ochs 
186c21e0bbfSMatthew R. Ochs 		if (cmd_is_tmf) {
187018d1dc9SMatthew R. Ochs 			spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
188c21e0bbfSMatthew R. Ochs 			cfg->tmf_active = false;
189c21e0bbfSMatthew R. Ochs 			wake_up_all_locked(&cfg->tmf_waitq);
190018d1dc9SMatthew R. Ochs 			spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
191c21e0bbfSMatthew R. Ochs 		}
192c21e0bbfSMatthew R. Ochs 	} else
193c21e0bbfSMatthew R. Ochs 		complete(&cmd->cevent);
194c21e0bbfSMatthew R. Ochs }
195c21e0bbfSMatthew R. Ochs 
196c21e0bbfSMatthew R. Ochs /**
197a1ea04b3SUma Krishnan  * flush_pending_cmds() - flush all pending commands on this hardware queue
198a1ea04b3SUma Krishnan  * @hwq:	Hardware queue to flush.
199a1ea04b3SUma Krishnan  *
200a1ea04b3SUma Krishnan  * The hardware send queue lock associated with this hardware queue must be
201a1ea04b3SUma Krishnan  * held when calling this routine.
202a1ea04b3SUma Krishnan  */
203a1ea04b3SUma Krishnan static void flush_pending_cmds(struct hwq *hwq)
204a1ea04b3SUma Krishnan {
205a1ea04b3SUma Krishnan 	struct afu_cmd *cmd, *tmp;
206a1ea04b3SUma Krishnan 	struct scsi_cmnd *scp;
207a1ea04b3SUma Krishnan 
208a1ea04b3SUma Krishnan 	list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
209a1ea04b3SUma Krishnan 		/* Bypass command when on a doneq, cmd_complete() will handle */
210a1ea04b3SUma Krishnan 		if (!list_empty(&cmd->queue))
211a1ea04b3SUma Krishnan 			continue;
212a1ea04b3SUma Krishnan 
213a1ea04b3SUma Krishnan 		list_del(&cmd->list);
214a1ea04b3SUma Krishnan 
215a1ea04b3SUma Krishnan 		if (cmd->scp) {
216a1ea04b3SUma Krishnan 			scp = cmd->scp;
217a1ea04b3SUma Krishnan 			scp->result = (DID_IMM_RETRY << 16);
218a1ea04b3SUma Krishnan 			scp->scsi_done(scp);
219a1ea04b3SUma Krishnan 		} else {
220a1ea04b3SUma Krishnan 			cmd->cmd_aborted = true;
221a1ea04b3SUma Krishnan 			complete(&cmd->cevent);
222a1ea04b3SUma Krishnan 		}
223a1ea04b3SUma Krishnan 	}
224a1ea04b3SUma Krishnan }
225a1ea04b3SUma Krishnan 
226a1ea04b3SUma Krishnan /**
227a96851d3SUma Krishnan  * context_reset() - reset context via specified register
228a96851d3SUma Krishnan  * @hwq:	Hardware queue owning the context to be reset.
2299c7d1ee5SMatthew R. Ochs  * @reset_reg:	MMIO register to perform reset.
230a96851d3SUma Krishnan  *
2317c4c41f1SUma Krishnan  * When the reset is successful, the SISLite specification guarantees that
2327c4c41f1SUma Krishnan  * the AFU has aborted all currently pending I/O. Accordingly, these commands
2337c4c41f1SUma Krishnan  * must be flushed.
2347c4c41f1SUma Krishnan  *
235a96851d3SUma Krishnan  * Return: 0 on success, -errno on failure
23615305514SMatthew R. Ochs  */
237a96851d3SUma Krishnan static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
23815305514SMatthew R. Ochs {
239a96851d3SUma Krishnan 	struct cxlflash_cfg *cfg = hwq->afu->parent;
2403d2f617dSUma Krishnan 	struct device *dev = &cfg->dev->dev;
241a96851d3SUma Krishnan 	int rc = -ETIMEDOUT;
242a96851d3SUma Krishnan 	int nretry = 0;
243a96851d3SUma Krishnan 	u64 val = 0x1;
2447c4c41f1SUma Krishnan 	ulong lock_flags;
24515305514SMatthew R. Ochs 
246a96851d3SUma Krishnan 	dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
24715305514SMatthew R. Ochs 
2487c4c41f1SUma Krishnan 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2497c4c41f1SUma Krishnan 
250a96851d3SUma Krishnan 	writeq_be(val, reset_reg);
25115305514SMatthew R. Ochs 	do {
252a96851d3SUma Krishnan 		val = readq_be(reset_reg);
253a96851d3SUma Krishnan 		if ((val & 0x1) == 0x0) {
254a96851d3SUma Krishnan 			rc = 0;
25515305514SMatthew R. Ochs 			break;
256a96851d3SUma Krishnan 		}
257a96851d3SUma Krishnan 
25815305514SMatthew R. Ochs 		/* Double delay each time */
259ea765431SManoj N. Kumar 		udelay(1 << nretry);
26015305514SMatthew R. Ochs 	} while (nretry++ < MC_ROOM_RETRY_CNT);
2613d2f617dSUma Krishnan 
2627c4c41f1SUma Krishnan 	if (!rc)
2637c4c41f1SUma Krishnan 		flush_pending_cmds(hwq);
2647c4c41f1SUma Krishnan 
2657c4c41f1SUma Krishnan 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2667c4c41f1SUma Krishnan 
267a96851d3SUma Krishnan 	dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
268a96851d3SUma Krishnan 		__func__, rc, val, nretry);
269a96851d3SUma Krishnan 	return rc;
27015305514SMatthew R. Ochs }
27115305514SMatthew R. Ochs 
27215305514SMatthew R. Ochs /**
273a96851d3SUma Krishnan  * context_reset_ioarrin() - reset context via IOARRIN register
274a96851d3SUma Krishnan  * @hwq:	Hardware queue owning the context to be reset.
275a96851d3SUma Krishnan  *
276a96851d3SUma Krishnan  * Return: 0 on success, -errno on failure
2779c7d1ee5SMatthew R. Ochs  */
278a96851d3SUma Krishnan static int context_reset_ioarrin(struct hwq *hwq)
2799c7d1ee5SMatthew R. Ochs {
280a96851d3SUma Krishnan 	return context_reset(hwq, &hwq->host_map->ioarrin);
2819c7d1ee5SMatthew R. Ochs }
2829c7d1ee5SMatthew R. Ochs 
2839c7d1ee5SMatthew R. Ochs /**
284a96851d3SUma Krishnan  * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
285a96851d3SUma Krishnan  * @hwq:	Hardware queue owning the context to be reset.
286a96851d3SUma Krishnan  *
287a96851d3SUma Krishnan  * Return: 0 on success, -errno on failure
288696d0b0cSMatthew R. Ochs  */
289a96851d3SUma Krishnan static int context_reset_sq(struct hwq *hwq)
290696d0b0cSMatthew R. Ochs {
291a96851d3SUma Krishnan 	return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
292696d0b0cSMatthew R. Ochs }
293696d0b0cSMatthew R. Ochs 
294696d0b0cSMatthew R. Ochs /**
29548b4be36SMatthew R. Ochs  * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
29615305514SMatthew R. Ochs  * @afu:	AFU associated with the host.
29715305514SMatthew R. Ochs  * @cmd:	AFU command to send.
29815305514SMatthew R. Ochs  *
29915305514SMatthew R. Ochs  * Return:
3001284fb0cSMatthew R. Ochs  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
30115305514SMatthew R. Ochs  */
30248b4be36SMatthew R. Ochs static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
30315305514SMatthew R. Ochs {
30415305514SMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
30515305514SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
306bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
30715305514SMatthew R. Ochs 	int rc = 0;
30811f7b184SUma Krishnan 	s64 room;
30911f7b184SUma Krishnan 	ulong lock_flags;
31015305514SMatthew R. Ochs 
31115305514SMatthew R. Ochs 	/*
31211f7b184SUma Krishnan 	 * To avoid the performance penalty of MMIO, spread the update of
31311f7b184SUma Krishnan 	 * 'room' over multiple commands.
31415305514SMatthew R. Ochs 	 */
31566ea9bccSUma Krishnan 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
316bfc0bab1SUma Krishnan 	if (--hwq->room < 0) {
317bfc0bab1SUma Krishnan 		room = readq_be(&hwq->host_map->cmd_room);
31811f7b184SUma Krishnan 		if (room <= 0) {
31911f7b184SUma Krishnan 			dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
32011f7b184SUma Krishnan 					    "0x%02X, room=0x%016llX\n",
32111f7b184SUma Krishnan 					    __func__, cmd->rcb.cdb[0], room);
322bfc0bab1SUma Krishnan 			hwq->room = 0;
32311f7b184SUma Krishnan 			rc = SCSI_MLQUEUE_HOST_BUSY;
32411f7b184SUma Krishnan 			goto out;
32511f7b184SUma Krishnan 		}
326bfc0bab1SUma Krishnan 		hwq->room = room - 1;
32715305514SMatthew R. Ochs 	}
32815305514SMatthew R. Ochs 
329a002bf83SUma Krishnan 	list_add(&cmd->list, &hwq->pending_cmds);
330bfc0bab1SUma Krishnan 	writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
33115305514SMatthew R. Ochs out:
33266ea9bccSUma Krishnan 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
333fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
334fb67d44dSMatthew R. Ochs 		cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
33515305514SMatthew R. Ochs 	return rc;
33615305514SMatthew R. Ochs }
33715305514SMatthew R. Ochs 
33815305514SMatthew R. Ochs /**
339696d0b0cSMatthew R. Ochs  * send_cmd_sq() - sends an AFU command via SQ ring
340696d0b0cSMatthew R. Ochs  * @afu:	AFU associated with the host.
341696d0b0cSMatthew R. Ochs  * @cmd:	AFU command to send.
342696d0b0cSMatthew R. Ochs  *
343696d0b0cSMatthew R. Ochs  * Return:
344696d0b0cSMatthew R. Ochs  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
345696d0b0cSMatthew R. Ochs  */
346696d0b0cSMatthew R. Ochs static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
347696d0b0cSMatthew R. Ochs {
348696d0b0cSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
349696d0b0cSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
350bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
351696d0b0cSMatthew R. Ochs 	int rc = 0;
352696d0b0cSMatthew R. Ochs 	int newval;
353696d0b0cSMatthew R. Ochs 	ulong lock_flags;
354696d0b0cSMatthew R. Ochs 
355bfc0bab1SUma Krishnan 	newval = atomic_dec_if_positive(&hwq->hsq_credits);
356696d0b0cSMatthew R. Ochs 	if (newval <= 0) {
357696d0b0cSMatthew R. Ochs 		rc = SCSI_MLQUEUE_HOST_BUSY;
358696d0b0cSMatthew R. Ochs 		goto out;
359696d0b0cSMatthew R. Ochs 	}
360696d0b0cSMatthew R. Ochs 
361696d0b0cSMatthew R. Ochs 	cmd->rcb.ioasa = &cmd->sa;
362696d0b0cSMatthew R. Ochs 
363bfc0bab1SUma Krishnan 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
364696d0b0cSMatthew R. Ochs 
365bfc0bab1SUma Krishnan 	*hwq->hsq_curr = cmd->rcb;
366bfc0bab1SUma Krishnan 	if (hwq->hsq_curr < hwq->hsq_end)
367bfc0bab1SUma Krishnan 		hwq->hsq_curr++;
368696d0b0cSMatthew R. Ochs 	else
369bfc0bab1SUma Krishnan 		hwq->hsq_curr = hwq->hsq_start;
370a002bf83SUma Krishnan 
371a002bf83SUma Krishnan 	list_add(&cmd->list, &hwq->pending_cmds);
372bfc0bab1SUma Krishnan 	writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
373696d0b0cSMatthew R. Ochs 
374bfc0bab1SUma Krishnan 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
375696d0b0cSMatthew R. Ochs out:
376fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
377fb67d44dSMatthew R. Ochs 	       "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
378bfc0bab1SUma Krishnan 	       cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
379bfc0bab1SUma Krishnan 	       readq_be(&hwq->host_map->sq_head),
380bfc0bab1SUma Krishnan 	       readq_be(&hwq->host_map->sq_tail));
381696d0b0cSMatthew R. Ochs 	return rc;
382696d0b0cSMatthew R. Ochs }
383696d0b0cSMatthew R. Ochs 
384696d0b0cSMatthew R. Ochs /**
38515305514SMatthew R. Ochs  * wait_resp() - polls for a response or timeout to a sent AFU command
38615305514SMatthew R. Ochs  * @afu:	AFU associated with the host.
38715305514SMatthew R. Ochs  * @cmd:	AFU command that was sent.
3889ba848acSMatthew R. Ochs  *
389a96851d3SUma Krishnan  * Return: 0 on success, -errno on failure
39015305514SMatthew R. Ochs  */
3919ba848acSMatthew R. Ochs static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
39215305514SMatthew R. Ochs {
393fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
394fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
3959ba848acSMatthew R. Ochs 	int rc = 0;
39615305514SMatthew R. Ochs 	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
39715305514SMatthew R. Ochs 
39815305514SMatthew R. Ochs 	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
399a96851d3SUma Krishnan 	if (!timeout)
400a96851d3SUma Krishnan 		rc = -ETIMEDOUT;
40115305514SMatthew R. Ochs 
402a1ea04b3SUma Krishnan 	if (cmd->cmd_aborted)
403a1ea04b3SUma Krishnan 		rc = -EAGAIN;
404a1ea04b3SUma Krishnan 
4059ba848acSMatthew R. Ochs 	if (unlikely(cmd->sa.ioasc != 0)) {
406fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
407fb67d44dSMatthew R. Ochs 			__func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
408a96851d3SUma Krishnan 		rc = -EIO;
4099ba848acSMatthew R. Ochs 	}
4109ba848acSMatthew R. Ochs 
4119ba848acSMatthew R. Ochs 	return rc;
41215305514SMatthew R. Ochs }
41315305514SMatthew R. Ochs 
41415305514SMatthew R. Ochs /**
4151dd0c0e4SMatthew R. Ochs  * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
4161dd0c0e4SMatthew R. Ochs  * @host:	SCSI host associated with device.
4171dd0c0e4SMatthew R. Ochs  * @scp:	SCSI command to send.
4181dd0c0e4SMatthew R. Ochs  * @afu:	SCSI command to send.
4191dd0c0e4SMatthew R. Ochs  *
4201dd0c0e4SMatthew R. Ochs  * Hashes a command based upon the hardware queue mode.
4211dd0c0e4SMatthew R. Ochs  *
4221dd0c0e4SMatthew R. Ochs  * Return: Trusted index of target hardware queue
4231dd0c0e4SMatthew R. Ochs  */
4241dd0c0e4SMatthew R. Ochs static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
4251dd0c0e4SMatthew R. Ochs 			     struct afu *afu)
4261dd0c0e4SMatthew R. Ochs {
4271dd0c0e4SMatthew R. Ochs 	u32 tag;
4281dd0c0e4SMatthew R. Ochs 	u32 hwq = 0;
4291dd0c0e4SMatthew R. Ochs 
4301dd0c0e4SMatthew R. Ochs 	if (afu->num_hwqs == 1)
4311dd0c0e4SMatthew R. Ochs 		return 0;
4321dd0c0e4SMatthew R. Ochs 
4331dd0c0e4SMatthew R. Ochs 	switch (afu->hwq_mode) {
4341dd0c0e4SMatthew R. Ochs 	case HWQ_MODE_RR:
4351dd0c0e4SMatthew R. Ochs 		hwq = afu->hwq_rr_count++ % afu->num_hwqs;
4361dd0c0e4SMatthew R. Ochs 		break;
4371dd0c0e4SMatthew R. Ochs 	case HWQ_MODE_TAG:
4381dd0c0e4SMatthew R. Ochs 		tag = blk_mq_unique_tag(scp->request);
4391dd0c0e4SMatthew R. Ochs 		hwq = blk_mq_unique_tag_to_hwq(tag);
4401dd0c0e4SMatthew R. Ochs 		break;
4411dd0c0e4SMatthew R. Ochs 	case HWQ_MODE_CPU:
4421dd0c0e4SMatthew R. Ochs 		hwq = smp_processor_id() % afu->num_hwqs;
4431dd0c0e4SMatthew R. Ochs 		break;
4441dd0c0e4SMatthew R. Ochs 	default:
4451dd0c0e4SMatthew R. Ochs 		WARN_ON_ONCE(1);
4461dd0c0e4SMatthew R. Ochs 	}
4471dd0c0e4SMatthew R. Ochs 
4481dd0c0e4SMatthew R. Ochs 	return hwq;
4491dd0c0e4SMatthew R. Ochs }
4501dd0c0e4SMatthew R. Ochs 
4511dd0c0e4SMatthew R. Ochs /**
452c21e0bbfSMatthew R. Ochs  * send_tmf() - sends a Task Management Function (TMF)
453c21e0bbfSMatthew R. Ochs  * @afu:	AFU to checkout from.
454c21e0bbfSMatthew R. Ochs  * @scp:	SCSI command from stack.
455c21e0bbfSMatthew R. Ochs  * @tmfcmd:	TMF command to send.
456c21e0bbfSMatthew R. Ochs  *
457c21e0bbfSMatthew R. Ochs  * Return:
4581284fb0cSMatthew R. Ochs  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
459c21e0bbfSMatthew R. Ochs  */
460c21e0bbfSMatthew R. Ochs static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
461c21e0bbfSMatthew R. Ochs {
4621dd0c0e4SMatthew R. Ochs 	struct Scsi_Host *host = scp->device->host;
4631dd0c0e4SMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(host);
464d4ace351SMatthew R. Ochs 	struct afu_cmd *cmd = sc_to_afucz(scp);
4654392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
4661dd0c0e4SMatthew R. Ochs 	int hwq_index = cmd_to_target_hwq(host, scp, afu);
4671dd0c0e4SMatthew R. Ochs 	struct hwq *hwq = get_hwq(afu, hwq_index);
468c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
469c21e0bbfSMatthew R. Ochs 	int rc = 0;
470018d1dc9SMatthew R. Ochs 	ulong to;
471c21e0bbfSMatthew R. Ochs 
472018d1dc9SMatthew R. Ochs 	/* When Task Management Function is active do not send another */
473018d1dc9SMatthew R. Ochs 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
474c21e0bbfSMatthew R. Ochs 	if (cfg->tmf_active)
475018d1dc9SMatthew R. Ochs 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
476018d1dc9SMatthew R. Ochs 						  !cfg->tmf_active,
477018d1dc9SMatthew R. Ochs 						  cfg->tmf_slock);
478c21e0bbfSMatthew R. Ochs 	cfg->tmf_active = true;
479018d1dc9SMatthew R. Ochs 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
480c21e0bbfSMatthew R. Ochs 
481fe7f9698SMatthew R. Ochs 	cmd->scp = scp;
482d4ace351SMatthew R. Ochs 	cmd->parent = afu;
483d4ace351SMatthew R. Ochs 	cmd->cmd_tmf = true;
4841dd0c0e4SMatthew R. Ochs 	cmd->hwq_index = hwq_index;
485d4ace351SMatthew R. Ochs 
486bfc0bab1SUma Krishnan 	cmd->rcb.ctx_id = hwq->ctx_hndl;
4875fbb96c8SMatthew R. Ochs 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
4888fa4f177SMatthew R. Ochs 	cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
489c21e0bbfSMatthew R. Ochs 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
490c21e0bbfSMatthew R. Ochs 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
491d4ace351SMatthew R. Ochs 			      SISL_REQ_FLAGS_SUP_UNDERRUN |
492d4ace351SMatthew R. Ochs 			      SISL_REQ_FLAGS_TMF_CMD);
493c21e0bbfSMatthew R. Ochs 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
494c21e0bbfSMatthew R. Ochs 
49548b4be36SMatthew R. Ochs 	rc = afu->send_cmd(afu, cmd);
496c21e0bbfSMatthew R. Ochs 	if (unlikely(rc)) {
497018d1dc9SMatthew R. Ochs 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
498c21e0bbfSMatthew R. Ochs 		cfg->tmf_active = false;
499018d1dc9SMatthew R. Ochs 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
500c21e0bbfSMatthew R. Ochs 		goto out;
501c21e0bbfSMatthew R. Ochs 	}
502c21e0bbfSMatthew R. Ochs 
503018d1dc9SMatthew R. Ochs 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
504018d1dc9SMatthew R. Ochs 	to = msecs_to_jiffies(5000);
505018d1dc9SMatthew R. Ochs 	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
506018d1dc9SMatthew R. Ochs 						       !cfg->tmf_active,
507018d1dc9SMatthew R. Ochs 						       cfg->tmf_slock,
508018d1dc9SMatthew R. Ochs 						       to);
509018d1dc9SMatthew R. Ochs 	if (!to) {
510018d1dc9SMatthew R. Ochs 		cfg->tmf_active = false;
511fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: TMF timed out\n", __func__);
512018d1dc9SMatthew R. Ochs 		rc = -1;
513018d1dc9SMatthew R. Ochs 	}
514018d1dc9SMatthew R. Ochs 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
515c21e0bbfSMatthew R. Ochs out:
516c21e0bbfSMatthew R. Ochs 	return rc;
517c21e0bbfSMatthew R. Ochs }
518c21e0bbfSMatthew R. Ochs 
519c21e0bbfSMatthew R. Ochs /**
520c21e0bbfSMatthew R. Ochs  * cxlflash_driver_info() - information handler for this host driver
521c21e0bbfSMatthew R. Ochs  * @host:	SCSI host associated with device.
522c21e0bbfSMatthew R. Ochs  *
523c21e0bbfSMatthew R. Ochs  * Return: A string describing the device.
524c21e0bbfSMatthew R. Ochs  */
525c21e0bbfSMatthew R. Ochs static const char *cxlflash_driver_info(struct Scsi_Host *host)
526c21e0bbfSMatthew R. Ochs {
527c21e0bbfSMatthew R. Ochs 	return CXLFLASH_ADAPTER_NAME;
528c21e0bbfSMatthew R. Ochs }
529c21e0bbfSMatthew R. Ochs 
530c21e0bbfSMatthew R. Ochs /**
531c21e0bbfSMatthew R. Ochs  * cxlflash_queuecommand() - sends a mid-layer request
532c21e0bbfSMatthew R. Ochs  * @host:	SCSI host associated with device.
533c21e0bbfSMatthew R. Ochs  * @scp:	SCSI command to send.
534c21e0bbfSMatthew R. Ochs  *
5351284fb0cSMatthew R. Ochs  * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
536c21e0bbfSMatthew R. Ochs  */
537c21e0bbfSMatthew R. Ochs static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
538c21e0bbfSMatthew R. Ochs {
539fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(host);
540c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
5414392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
5425fbb96c8SMatthew R. Ochs 	struct afu_cmd *cmd = sc_to_afucz(scp);
5439d89326cSMatthew R. Ochs 	struct scatterlist *sg = scsi_sglist(scp);
5441dd0c0e4SMatthew R. Ochs 	int hwq_index = cmd_to_target_hwq(host, scp, afu);
5451dd0c0e4SMatthew R. Ochs 	struct hwq *hwq = get_hwq(afu, hwq_index);
5469d89326cSMatthew R. Ochs 	u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
547c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
548c21e0bbfSMatthew R. Ochs 	int rc = 0;
549c21e0bbfSMatthew R. Ochs 
5504392ba49SMatthew R. Ochs 	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
551fb67d44dSMatthew R. Ochs 			    "cdb=(%08x-%08x-%08x-%08x)\n",
552c21e0bbfSMatthew R. Ochs 			    __func__, scp, host->host_no, scp->device->channel,
553c21e0bbfSMatthew R. Ochs 			    scp->device->id, scp->device->lun,
554c21e0bbfSMatthew R. Ochs 			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
555c21e0bbfSMatthew R. Ochs 			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
556c21e0bbfSMatthew R. Ochs 			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
557c21e0bbfSMatthew R. Ochs 			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
558c21e0bbfSMatthew R. Ochs 
559018d1dc9SMatthew R. Ochs 	/*
560018d1dc9SMatthew R. Ochs 	 * If a Task Management Function is active, wait for it to complete
561c21e0bbfSMatthew R. Ochs 	 * before continuing with regular commands.
562c21e0bbfSMatthew R. Ochs 	 */
563018d1dc9SMatthew R. Ochs 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
564c21e0bbfSMatthew R. Ochs 	if (cfg->tmf_active) {
565018d1dc9SMatthew R. Ochs 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
566c21e0bbfSMatthew R. Ochs 		rc = SCSI_MLQUEUE_HOST_BUSY;
567c21e0bbfSMatthew R. Ochs 		goto out;
568c21e0bbfSMatthew R. Ochs 	}
569018d1dc9SMatthew R. Ochs 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
570c21e0bbfSMatthew R. Ochs 
5715cdac81aSMatthew R. Ochs 	switch (cfg->state) {
572323e3342SMatthew R. Ochs 	case STATE_PROBING:
573323e3342SMatthew R. Ochs 	case STATE_PROBED:
574439e85c1SMatthew R. Ochs 	case STATE_RESET:
575fb67d44dSMatthew R. Ochs 		dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
5765cdac81aSMatthew R. Ochs 		rc = SCSI_MLQUEUE_HOST_BUSY;
5775cdac81aSMatthew R. Ochs 		goto out;
5785cdac81aSMatthew R. Ochs 	case STATE_FAILTERM:
579fb67d44dSMatthew R. Ochs 		dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
5805cdac81aSMatthew R. Ochs 		scp->result = (DID_NO_CONNECT << 16);
5815cdac81aSMatthew R. Ochs 		scp->scsi_done(scp);
5825cdac81aSMatthew R. Ochs 		rc = 0;
5835cdac81aSMatthew R. Ochs 		goto out;
5845cdac81aSMatthew R. Ochs 	default:
5855cdac81aSMatthew R. Ochs 		break;
5865cdac81aSMatthew R. Ochs 	}
5875cdac81aSMatthew R. Ochs 
5889d89326cSMatthew R. Ochs 	if (likely(sg)) {
58950b787f7SMatthew R. Ochs 		cmd->rcb.data_len = sg->length;
59050b787f7SMatthew R. Ochs 		cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
5919d89326cSMatthew R. Ochs 	}
5929d89326cSMatthew R. Ochs 
593fe7f9698SMatthew R. Ochs 	cmd->scp = scp;
5949d89326cSMatthew R. Ochs 	cmd->parent = afu;
5951dd0c0e4SMatthew R. Ochs 	cmd->hwq_index = hwq_index;
5969d89326cSMatthew R. Ochs 
597bfc0bab1SUma Krishnan 	cmd->rcb.ctx_id = hwq->ctx_hndl;
5985fbb96c8SMatthew R. Ochs 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
5998fa4f177SMatthew R. Ochs 	cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
600c21e0bbfSMatthew R. Ochs 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
601c21e0bbfSMatthew R. Ochs 
602c21e0bbfSMatthew R. Ochs 	if (scp->sc_data_direction == DMA_TO_DEVICE)
6039d89326cSMatthew R. Ochs 		req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
604c21e0bbfSMatthew R. Ochs 
6059d89326cSMatthew R. Ochs 	cmd->rcb.req_flags = req_flags;
606c21e0bbfSMatthew R. Ochs 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
607c21e0bbfSMatthew R. Ochs 
60848b4be36SMatthew R. Ochs 	rc = afu->send_cmd(afu, cmd);
609c21e0bbfSMatthew R. Ochs out:
610c21e0bbfSMatthew R. Ochs 	return rc;
611c21e0bbfSMatthew R. Ochs }
612c21e0bbfSMatthew R. Ochs 
613c21e0bbfSMatthew R. Ochs /**
614c21e0bbfSMatthew R. Ochs  * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
6151284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
616c21e0bbfSMatthew R. Ochs  */
617c21e0bbfSMatthew R. Ochs static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
618c21e0bbfSMatthew R. Ochs {
619c21e0bbfSMatthew R. Ochs 	struct pci_dev *pdev = cfg->dev;
620c21e0bbfSMatthew R. Ochs 
621c21e0bbfSMatthew R. Ochs 	if (pci_channel_offline(pdev))
622439e85c1SMatthew R. Ochs 		wait_event_timeout(cfg->reset_waitq,
623c21e0bbfSMatthew R. Ochs 				   !pci_channel_offline(pdev),
624c21e0bbfSMatthew R. Ochs 				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
625c21e0bbfSMatthew R. Ochs }
626c21e0bbfSMatthew R. Ochs 
627c21e0bbfSMatthew R. Ochs /**
628c21e0bbfSMatthew R. Ochs  * free_mem() - free memory associated with the AFU
6291284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
630c21e0bbfSMatthew R. Ochs  */
631c21e0bbfSMatthew R. Ochs static void free_mem(struct cxlflash_cfg *cfg)
632c21e0bbfSMatthew R. Ochs {
633c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
634c21e0bbfSMatthew R. Ochs 
635c21e0bbfSMatthew R. Ochs 	if (cfg->afu) {
636c21e0bbfSMatthew R. Ochs 		free_pages((ulong)afu, get_order(sizeof(struct afu)));
637c21e0bbfSMatthew R. Ochs 		cfg->afu = NULL;
638c21e0bbfSMatthew R. Ochs 	}
639c21e0bbfSMatthew R. Ochs }
640c21e0bbfSMatthew R. Ochs 
641c21e0bbfSMatthew R. Ochs /**
6420b09e711SUma Krishnan  * cxlflash_reset_sync() - synchronizing point for asynchronous resets
6430b09e711SUma Krishnan  * @cfg:	Internal structure associated with the host.
6440b09e711SUma Krishnan  */
6450b09e711SUma Krishnan static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
6460b09e711SUma Krishnan {
6470b09e711SUma Krishnan 	if (cfg->async_reset_cookie == 0)
6480b09e711SUma Krishnan 		return;
6490b09e711SUma Krishnan 
6500b09e711SUma Krishnan 	/* Wait until all async calls prior to this cookie have completed */
6510b09e711SUma Krishnan 	async_synchronize_cookie(cfg->async_reset_cookie + 1);
6520b09e711SUma Krishnan 	cfg->async_reset_cookie = 0;
6530b09e711SUma Krishnan }
6540b09e711SUma Krishnan 
6550b09e711SUma Krishnan /**
656c21e0bbfSMatthew R. Ochs  * stop_afu() - stops the AFU command timers and unmaps the MMIO space
6571284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
658c21e0bbfSMatthew R. Ochs  *
659c21e0bbfSMatthew R. Ochs  * Safe to call with AFU in a partially allocated/initialized state.
660ee91e332SManoj Kumar  *
6610df5bef7SUma Krishnan  * Cancels scheduled worker threads, waits for any active internal AFU
662cba06e6dSMatthew R. Ochs  * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
663c21e0bbfSMatthew R. Ochs  */
664c21e0bbfSMatthew R. Ochs static void stop_afu(struct cxlflash_cfg *cfg)
665c21e0bbfSMatthew R. Ochs {
666c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
667bfc0bab1SUma Krishnan 	struct hwq *hwq;
668bfc0bab1SUma Krishnan 	int i;
669c21e0bbfSMatthew R. Ochs 
6700df5bef7SUma Krishnan 	cancel_work_sync(&cfg->work_q);
6710b09e711SUma Krishnan 	if (!current_is_async())
6720b09e711SUma Krishnan 		cxlflash_reset_sync(cfg);
6730df5bef7SUma Krishnan 
674c21e0bbfSMatthew R. Ochs 	if (likely(afu)) {
675de01283bSMatthew R. Ochs 		while (atomic_read(&afu->cmds_active))
676de01283bSMatthew R. Ochs 			ssleep(1);
677bfc0bab1SUma Krishnan 
678bfc0bab1SUma Krishnan 		if (afu_is_irqpoll_enabled(afu)) {
6793065267aSMatthew R. Ochs 			for (i = 0; i < afu->num_hwqs; i++) {
680bfc0bab1SUma Krishnan 				hwq = get_hwq(afu, i);
681bfc0bab1SUma Krishnan 
682bfc0bab1SUma Krishnan 				irq_poll_disable(&hwq->irqpoll);
683bfc0bab1SUma Krishnan 			}
684bfc0bab1SUma Krishnan 		}
685bfc0bab1SUma Krishnan 
686c21e0bbfSMatthew R. Ochs 		if (likely(afu->afu_map)) {
6871786f4a0SMatthew R. Ochs 			cxl_psa_unmap((void __iomem *)afu->afu_map);
688c21e0bbfSMatthew R. Ochs 			afu->afu_map = NULL;
689c21e0bbfSMatthew R. Ochs 		}
690c21e0bbfSMatthew R. Ochs 	}
691c21e0bbfSMatthew R. Ochs }
692c21e0bbfSMatthew R. Ochs 
693c21e0bbfSMatthew R. Ochs /**
6949526f360SManoj N. Kumar  * term_intr() - disables all AFU interrupts
6951284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
696c21e0bbfSMatthew R. Ochs  * @level:	Depth of allocation, where to begin waterfall tear down.
697bfc0bab1SUma Krishnan  * @index:	Index of the hardware queue.
698c21e0bbfSMatthew R. Ochs  *
699c21e0bbfSMatthew R. Ochs  * Safe to call with AFU/MC in partially allocated/initialized state.
700c21e0bbfSMatthew R. Ochs  */
701bfc0bab1SUma Krishnan static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
702bfc0bab1SUma Krishnan 		      u32 index)
703c21e0bbfSMatthew R. Ochs {
704c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
7054392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
706bfc0bab1SUma Krishnan 	struct hwq *hwq;
707c21e0bbfSMatthew R. Ochs 
708bfc0bab1SUma Krishnan 	if (!afu) {
709bfc0bab1SUma Krishnan 		dev_err(dev, "%s: returning with NULL afu\n", __func__);
710bfc0bab1SUma Krishnan 		return;
711bfc0bab1SUma Krishnan 	}
712bfc0bab1SUma Krishnan 
713bfc0bab1SUma Krishnan 	hwq = get_hwq(afu, index);
714bfc0bab1SUma Krishnan 
715bfc0bab1SUma Krishnan 	if (!hwq->ctx) {
716bfc0bab1SUma Krishnan 		dev_err(dev, "%s: returning with NULL MC\n", __func__);
717c21e0bbfSMatthew R. Ochs 		return;
718c21e0bbfSMatthew R. Ochs 	}
719c21e0bbfSMatthew R. Ochs 
720c21e0bbfSMatthew R. Ochs 	switch (level) {
721c21e0bbfSMatthew R. Ochs 	case UNMAP_THREE:
722bfc0bab1SUma Krishnan 		/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
723bfc0bab1SUma Krishnan 		if (index == PRIMARY_HWQ)
724bfc0bab1SUma Krishnan 			cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
725c21e0bbfSMatthew R. Ochs 	case UNMAP_TWO:
726bfc0bab1SUma Krishnan 		cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
727c21e0bbfSMatthew R. Ochs 	case UNMAP_ONE:
728bfc0bab1SUma Krishnan 		cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
729c21e0bbfSMatthew R. Ochs 	case FREE_IRQ:
730bfc0bab1SUma Krishnan 		cxl_free_afu_irqs(hwq->ctx);
7319526f360SManoj N. Kumar 		/* fall through */
7329526f360SManoj N. Kumar 	case UNDO_NOOP:
7339526f360SManoj N. Kumar 		/* No action required */
7349526f360SManoj N. Kumar 		break;
735c21e0bbfSMatthew R. Ochs 	}
736c21e0bbfSMatthew R. Ochs }
737c21e0bbfSMatthew R. Ochs 
738c21e0bbfSMatthew R. Ochs /**
7399526f360SManoj N. Kumar  * term_mc() - terminates the master context
7409526f360SManoj N. Kumar  * @cfg:	Internal structure associated with the host.
741bfc0bab1SUma Krishnan  * @index:	Index of the hardware queue.
7429526f360SManoj N. Kumar  *
7439526f360SManoj N. Kumar  * Safe to call with AFU/MC in partially allocated/initialized state.
7449526f360SManoj N. Kumar  */
745bfc0bab1SUma Krishnan static void term_mc(struct cxlflash_cfg *cfg, u32 index)
7469526f360SManoj N. Kumar {
7479526f360SManoj N. Kumar 	struct afu *afu = cfg->afu;
7489526f360SManoj N. Kumar 	struct device *dev = &cfg->dev->dev;
749bfc0bab1SUma Krishnan 	struct hwq *hwq;
750a1ea04b3SUma Krishnan 	ulong lock_flags;
7519526f360SManoj N. Kumar 
752bfc0bab1SUma Krishnan 	if (!afu) {
753bfc0bab1SUma Krishnan 		dev_err(dev, "%s: returning with NULL afu\n", __func__);
7549526f360SManoj N. Kumar 		return;
7559526f360SManoj N. Kumar 	}
7569526f360SManoj N. Kumar 
757bfc0bab1SUma Krishnan 	hwq = get_hwq(afu, index);
758bfc0bab1SUma Krishnan 
759bfc0bab1SUma Krishnan 	if (!hwq->ctx) {
760bfc0bab1SUma Krishnan 		dev_err(dev, "%s: returning with NULL MC\n", __func__);
761bfc0bab1SUma Krishnan 		return;
762bfc0bab1SUma Krishnan 	}
763bfc0bab1SUma Krishnan 
764bfc0bab1SUma Krishnan 	WARN_ON(cxl_stop_context(hwq->ctx));
765bfc0bab1SUma Krishnan 	if (index != PRIMARY_HWQ)
766bfc0bab1SUma Krishnan 		WARN_ON(cxl_release_context(hwq->ctx));
767bfc0bab1SUma Krishnan 	hwq->ctx = NULL;
768a1ea04b3SUma Krishnan 
769a1ea04b3SUma Krishnan 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
770a1ea04b3SUma Krishnan 	flush_pending_cmds(hwq);
771a1ea04b3SUma Krishnan 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
7729526f360SManoj N. Kumar }
7739526f360SManoj N. Kumar 
7749526f360SManoj N. Kumar /**
775c21e0bbfSMatthew R. Ochs  * term_afu() - terminates the AFU
7761284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
777c21e0bbfSMatthew R. Ochs  *
778c21e0bbfSMatthew R. Ochs  * Safe to call with AFU/MC in partially allocated/initialized state.
779c21e0bbfSMatthew R. Ochs  */
780c21e0bbfSMatthew R. Ochs static void term_afu(struct cxlflash_cfg *cfg)
781c21e0bbfSMatthew R. Ochs {
782fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
783bfc0bab1SUma Krishnan 	int k;
784fb67d44dSMatthew R. Ochs 
7859526f360SManoj N. Kumar 	/*
7869526f360SManoj N. Kumar 	 * Tear down is carefully orchestrated to ensure
7879526f360SManoj N. Kumar 	 * no interrupts can come in when the problem state
7889526f360SManoj N. Kumar 	 * area is unmapped.
7899526f360SManoj N. Kumar 	 *
790bfc0bab1SUma Krishnan 	 * 1) Disable all AFU interrupts for each master
7919526f360SManoj N. Kumar 	 * 2) Unmap the problem state area
792bfc0bab1SUma Krishnan 	 * 3) Stop each master context
7939526f360SManoj N. Kumar 	 */
7943065267aSMatthew R. Ochs 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
795bfc0bab1SUma Krishnan 		term_intr(cfg, UNMAP_THREE, k);
796bfc0bab1SUma Krishnan 
797c21e0bbfSMatthew R. Ochs 	if (cfg->afu)
798c21e0bbfSMatthew R. Ochs 		stop_afu(cfg);
799c21e0bbfSMatthew R. Ochs 
8003065267aSMatthew R. Ochs 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
801bfc0bab1SUma Krishnan 		term_mc(cfg, k);
8026ded8b3cSUma Krishnan 
803fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning\n", __func__);
804c21e0bbfSMatthew R. Ochs }
805c21e0bbfSMatthew R. Ochs 
806c21e0bbfSMatthew R. Ochs /**
807704c4b0dSUma Krishnan  * notify_shutdown() - notifies device of pending shutdown
808704c4b0dSUma Krishnan  * @cfg:	Internal structure associated with the host.
809704c4b0dSUma Krishnan  * @wait:	Whether to wait for shutdown processing to complete.
810704c4b0dSUma Krishnan  *
811704c4b0dSUma Krishnan  * This function will notify the AFU that the adapter is being shutdown
812704c4b0dSUma Krishnan  * and will wait for shutdown processing to complete if wait is true.
813704c4b0dSUma Krishnan  * This notification should flush pending I/Os to the device and halt
814704c4b0dSUma Krishnan  * further I/Os until the next AFU reset is issued and device restarted.
815704c4b0dSUma Krishnan  */
816704c4b0dSUma Krishnan static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
817704c4b0dSUma Krishnan {
818704c4b0dSUma Krishnan 	struct afu *afu = cfg->afu;
819704c4b0dSUma Krishnan 	struct device *dev = &cfg->dev->dev;
820704c4b0dSUma Krishnan 	struct dev_dependent_vals *ddv;
8210aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
822704c4b0dSUma Krishnan 	u64 reg, status;
823704c4b0dSUma Krishnan 	int i, retry_cnt = 0;
824704c4b0dSUma Krishnan 
825704c4b0dSUma Krishnan 	ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
826704c4b0dSUma Krishnan 	if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
827704c4b0dSUma Krishnan 		return;
828704c4b0dSUma Krishnan 
8291bd2b282SUma Krishnan 	if (!afu || !afu->afu_map) {
830fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
8311bd2b282SUma Krishnan 		return;
8321bd2b282SUma Krishnan 	}
8331bd2b282SUma Krishnan 
834704c4b0dSUma Krishnan 	/* Notify AFU */
83578ae028eSMatthew R. Ochs 	for (i = 0; i < cfg->num_fc_ports; i++) {
8360aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, i);
8370aa14887SMatthew R. Ochs 
8380aa14887SMatthew R. Ochs 		reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
839704c4b0dSUma Krishnan 		reg |= SISL_FC_SHUTDOWN_NORMAL;
8400aa14887SMatthew R. Ochs 		writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
841704c4b0dSUma Krishnan 	}
842704c4b0dSUma Krishnan 
843704c4b0dSUma Krishnan 	if (!wait)
844704c4b0dSUma Krishnan 		return;
845704c4b0dSUma Krishnan 
846704c4b0dSUma Krishnan 	/* Wait up to 1.5 seconds for shutdown processing to complete */
84778ae028eSMatthew R. Ochs 	for (i = 0; i < cfg->num_fc_ports; i++) {
8480aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, i);
849704c4b0dSUma Krishnan 		retry_cnt = 0;
8500aa14887SMatthew R. Ochs 
851704c4b0dSUma Krishnan 		while (true) {
8520aa14887SMatthew R. Ochs 			status = readq_be(&fc_port_regs[FC_STATUS / 8]);
853704c4b0dSUma Krishnan 			if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
854704c4b0dSUma Krishnan 				break;
855704c4b0dSUma Krishnan 			if (++retry_cnt >= MC_RETRY_CNT) {
856704c4b0dSUma Krishnan 				dev_dbg(dev, "%s: port %d shutdown processing "
857704c4b0dSUma Krishnan 					"not yet completed\n", __func__, i);
858704c4b0dSUma Krishnan 				break;
859704c4b0dSUma Krishnan 			}
860704c4b0dSUma Krishnan 			msleep(100 * retry_cnt);
861704c4b0dSUma Krishnan 		}
862704c4b0dSUma Krishnan 	}
863704c4b0dSUma Krishnan }
864704c4b0dSUma Krishnan 
865704c4b0dSUma Krishnan /**
866c21e0bbfSMatthew R. Ochs  * cxlflash_remove() - PCI entry point to tear down host
867c21e0bbfSMatthew R. Ochs  * @pdev:	PCI device associated with the host.
868c21e0bbfSMatthew R. Ochs  *
869323e3342SMatthew R. Ochs  * Safe to use as a cleanup in partially allocated/initialized state. Note that
870323e3342SMatthew R. Ochs  * the reset_waitq is flushed as part of the stop/termination of user contexts.
871c21e0bbfSMatthew R. Ochs  */
872c21e0bbfSMatthew R. Ochs static void cxlflash_remove(struct pci_dev *pdev)
873c21e0bbfSMatthew R. Ochs {
874c21e0bbfSMatthew R. Ochs 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
875fb67d44dSMatthew R. Ochs 	struct device *dev = &pdev->dev;
876c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
877c21e0bbfSMatthew R. Ochs 
878babf985dSUma Krishnan 	if (!pci_is_enabled(pdev)) {
879fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: Device is disabled\n", __func__);
880babf985dSUma Krishnan 		return;
881babf985dSUma Krishnan 	}
882babf985dSUma Krishnan 
883c21e0bbfSMatthew R. Ochs 	/* If a Task Management Function is active, wait for it to complete
884c21e0bbfSMatthew R. Ochs 	 * before continuing with remove.
885c21e0bbfSMatthew R. Ochs 	 */
886018d1dc9SMatthew R. Ochs 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
887c21e0bbfSMatthew R. Ochs 	if (cfg->tmf_active)
888018d1dc9SMatthew R. Ochs 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
889018d1dc9SMatthew R. Ochs 						  !cfg->tmf_active,
890018d1dc9SMatthew R. Ochs 						  cfg->tmf_slock);
891018d1dc9SMatthew R. Ochs 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
892c21e0bbfSMatthew R. Ochs 
893704c4b0dSUma Krishnan 	/* Notify AFU and wait for shutdown processing to complete */
894704c4b0dSUma Krishnan 	notify_shutdown(cfg, true);
895704c4b0dSUma Krishnan 
8965cdac81aSMatthew R. Ochs 	cfg->state = STATE_FAILTERM;
89765be2c79SMatthew R. Ochs 	cxlflash_stop_term_user_contexts(cfg);
8985cdac81aSMatthew R. Ochs 
899c21e0bbfSMatthew R. Ochs 	switch (cfg->init_state) {
900c21e0bbfSMatthew R. Ochs 	case INIT_STATE_SCSI:
90165be2c79SMatthew R. Ochs 		cxlflash_term_local_luns(cfg);
902c21e0bbfSMatthew R. Ochs 		scsi_remove_host(cfg->host);
903c21e0bbfSMatthew R. Ochs 	case INIT_STATE_AFU:
904b45cdbafSManoj Kumar 		term_afu(cfg);
905c21e0bbfSMatthew R. Ochs 	case INIT_STATE_PCI:
906c21e0bbfSMatthew R. Ochs 		pci_disable_device(pdev);
907c21e0bbfSMatthew R. Ochs 	case INIT_STATE_NONE:
908c21e0bbfSMatthew R. Ochs 		free_mem(cfg);
9098b5b1e87SMatthew R. Ochs 		scsi_host_put(cfg->host);
910c21e0bbfSMatthew R. Ochs 		break;
911c21e0bbfSMatthew R. Ochs 	}
912c21e0bbfSMatthew R. Ochs 
913fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning\n", __func__);
914c21e0bbfSMatthew R. Ochs }
915c21e0bbfSMatthew R. Ochs 
916c21e0bbfSMatthew R. Ochs /**
917c21e0bbfSMatthew R. Ochs  * alloc_mem() - allocates the AFU and its command pool
9181284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
919c21e0bbfSMatthew R. Ochs  *
920c21e0bbfSMatthew R. Ochs  * A partially allocated state remains on failure.
921c21e0bbfSMatthew R. Ochs  *
922c21e0bbfSMatthew R. Ochs  * Return:
923c21e0bbfSMatthew R. Ochs  *	0 on success
924c21e0bbfSMatthew R. Ochs  *	-ENOMEM on failure to allocate memory
925c21e0bbfSMatthew R. Ochs  */
926c21e0bbfSMatthew R. Ochs static int alloc_mem(struct cxlflash_cfg *cfg)
927c21e0bbfSMatthew R. Ochs {
928c21e0bbfSMatthew R. Ochs 	int rc = 0;
9294392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
930c21e0bbfSMatthew R. Ochs 
931696d0b0cSMatthew R. Ochs 	/* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
932c21e0bbfSMatthew R. Ochs 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
933c21e0bbfSMatthew R. Ochs 					    get_order(sizeof(struct afu)));
934c21e0bbfSMatthew R. Ochs 	if (unlikely(!cfg->afu)) {
9354392ba49SMatthew R. Ochs 		dev_err(dev, "%s: cannot get %d free pages\n",
936c21e0bbfSMatthew R. Ochs 			__func__, get_order(sizeof(struct afu)));
937c21e0bbfSMatthew R. Ochs 		rc = -ENOMEM;
938c21e0bbfSMatthew R. Ochs 		goto out;
939c21e0bbfSMatthew R. Ochs 	}
940c21e0bbfSMatthew R. Ochs 	cfg->afu->parent = cfg;
9413065267aSMatthew R. Ochs 	cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
942c21e0bbfSMatthew R. Ochs 	cfg->afu->afu_map = NULL;
943c21e0bbfSMatthew R. Ochs out:
944c21e0bbfSMatthew R. Ochs 	return rc;
945c21e0bbfSMatthew R. Ochs }
946c21e0bbfSMatthew R. Ochs 
947c21e0bbfSMatthew R. Ochs /**
948c21e0bbfSMatthew R. Ochs  * init_pci() - initializes the host as a PCI device
9491284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
950c21e0bbfSMatthew R. Ochs  *
9511284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
952c21e0bbfSMatthew R. Ochs  */
953c21e0bbfSMatthew R. Ochs static int init_pci(struct cxlflash_cfg *cfg)
954c21e0bbfSMatthew R. Ochs {
955c21e0bbfSMatthew R. Ochs 	struct pci_dev *pdev = cfg->dev;
956fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
957c21e0bbfSMatthew R. Ochs 	int rc = 0;
958c21e0bbfSMatthew R. Ochs 
959c21e0bbfSMatthew R. Ochs 	rc = pci_enable_device(pdev);
960c21e0bbfSMatthew R. Ochs 	if (rc || pci_channel_offline(pdev)) {
961c21e0bbfSMatthew R. Ochs 		if (pci_channel_offline(pdev)) {
962c21e0bbfSMatthew R. Ochs 			cxlflash_wait_for_pci_err_recovery(cfg);
963c21e0bbfSMatthew R. Ochs 			rc = pci_enable_device(pdev);
964c21e0bbfSMatthew R. Ochs 		}
965c21e0bbfSMatthew R. Ochs 
966c21e0bbfSMatthew R. Ochs 		if (rc) {
967fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: Cannot enable adapter\n", __func__);
968c21e0bbfSMatthew R. Ochs 			cxlflash_wait_for_pci_err_recovery(cfg);
969961487e4SManoj N. Kumar 			goto out;
970c21e0bbfSMatthew R. Ochs 		}
971c21e0bbfSMatthew R. Ochs 	}
972c21e0bbfSMatthew R. Ochs 
973c21e0bbfSMatthew R. Ochs out:
974fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
975c21e0bbfSMatthew R. Ochs 	return rc;
976c21e0bbfSMatthew R. Ochs }
977c21e0bbfSMatthew R. Ochs 
978c21e0bbfSMatthew R. Ochs /**
979c21e0bbfSMatthew R. Ochs  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
9801284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
981c21e0bbfSMatthew R. Ochs  *
9821284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
983c21e0bbfSMatthew R. Ochs  */
984c21e0bbfSMatthew R. Ochs static int init_scsi(struct cxlflash_cfg *cfg)
985c21e0bbfSMatthew R. Ochs {
986c21e0bbfSMatthew R. Ochs 	struct pci_dev *pdev = cfg->dev;
987fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
988c21e0bbfSMatthew R. Ochs 	int rc = 0;
989c21e0bbfSMatthew R. Ochs 
990c21e0bbfSMatthew R. Ochs 	rc = scsi_add_host(cfg->host, &pdev->dev);
991c21e0bbfSMatthew R. Ochs 	if (rc) {
992fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
993c21e0bbfSMatthew R. Ochs 		goto out;
994c21e0bbfSMatthew R. Ochs 	}
995c21e0bbfSMatthew R. Ochs 
996c21e0bbfSMatthew R. Ochs 	scsi_scan_host(cfg->host);
997c21e0bbfSMatthew R. Ochs 
998c21e0bbfSMatthew R. Ochs out:
999fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1000c21e0bbfSMatthew R. Ochs 	return rc;
1001c21e0bbfSMatthew R. Ochs }
1002c21e0bbfSMatthew R. Ochs 
1003c21e0bbfSMatthew R. Ochs /**
1004c21e0bbfSMatthew R. Ochs  * set_port_online() - transitions the specified host FC port to online state
1005c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
1006c21e0bbfSMatthew R. Ochs  *
1007c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call. Online state means
1008c21e0bbfSMatthew R. Ochs  * that the FC link layer has synced, completed the handshaking process, and
1009c21e0bbfSMatthew R. Ochs  * is ready for login to start.
1010c21e0bbfSMatthew R. Ochs  */
10111786f4a0SMatthew R. Ochs static void set_port_online(__be64 __iomem *fc_regs)
1012c21e0bbfSMatthew R. Ochs {
1013c21e0bbfSMatthew R. Ochs 	u64 cmdcfg;
1014c21e0bbfSMatthew R. Ochs 
1015c21e0bbfSMatthew R. Ochs 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1016c21e0bbfSMatthew R. Ochs 	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
1017c21e0bbfSMatthew R. Ochs 	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
1018c21e0bbfSMatthew R. Ochs 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1019c21e0bbfSMatthew R. Ochs }
1020c21e0bbfSMatthew R. Ochs 
1021c21e0bbfSMatthew R. Ochs /**
1022c21e0bbfSMatthew R. Ochs  * set_port_offline() - transitions the specified host FC port to offline state
1023c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
1024c21e0bbfSMatthew R. Ochs  *
1025c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call.
1026c21e0bbfSMatthew R. Ochs  */
10271786f4a0SMatthew R. Ochs static void set_port_offline(__be64 __iomem *fc_regs)
1028c21e0bbfSMatthew R. Ochs {
1029c21e0bbfSMatthew R. Ochs 	u64 cmdcfg;
1030c21e0bbfSMatthew R. Ochs 
1031c21e0bbfSMatthew R. Ochs 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1032c21e0bbfSMatthew R. Ochs 	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
1033c21e0bbfSMatthew R. Ochs 	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
1034c21e0bbfSMatthew R. Ochs 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1035c21e0bbfSMatthew R. Ochs }
1036c21e0bbfSMatthew R. Ochs 
1037c21e0bbfSMatthew R. Ochs /**
1038c21e0bbfSMatthew R. Ochs  * wait_port_online() - waits for the specified host FC port come online
1039c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
1040c21e0bbfSMatthew R. Ochs  * @delay_us:	Number of microseconds to delay between reading port status.
1041c21e0bbfSMatthew R. Ochs  * @nretry:	Number of cycles to retry reading port status.
1042c21e0bbfSMatthew R. Ochs  *
1043c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call. This will timeout
1044c21e0bbfSMatthew R. Ochs  * when the cable is not plugged in.
1045c21e0bbfSMatthew R. Ochs  *
1046c21e0bbfSMatthew R. Ochs  * Return:
1047c21e0bbfSMatthew R. Ochs  *	TRUE (1) when the specified port is online
1048c21e0bbfSMatthew R. Ochs  *	FALSE (0) when the specified port fails to come online after timeout
1049c21e0bbfSMatthew R. Ochs  */
1050fb67d44dSMatthew R. Ochs static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1051c21e0bbfSMatthew R. Ochs {
1052c21e0bbfSMatthew R. Ochs 	u64 status;
1053c21e0bbfSMatthew R. Ochs 
1054fb67d44dSMatthew R. Ochs 	WARN_ON(delay_us < 1000);
1055c21e0bbfSMatthew R. Ochs 
1056c21e0bbfSMatthew R. Ochs 	do {
1057c21e0bbfSMatthew R. Ochs 		msleep(delay_us / 1000);
1058c21e0bbfSMatthew R. Ochs 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
105905dab432SMatthew R. Ochs 		if (status == U64_MAX)
106005dab432SMatthew R. Ochs 			nretry /= 2;
1061c21e0bbfSMatthew R. Ochs 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1062c21e0bbfSMatthew R. Ochs 		 nretry--);
1063c21e0bbfSMatthew R. Ochs 
1064c21e0bbfSMatthew R. Ochs 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1065c21e0bbfSMatthew R. Ochs }
1066c21e0bbfSMatthew R. Ochs 
1067c21e0bbfSMatthew R. Ochs /**
1068c21e0bbfSMatthew R. Ochs  * wait_port_offline() - waits for the specified host FC port go offline
1069c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
1070c21e0bbfSMatthew R. Ochs  * @delay_us:	Number of microseconds to delay between reading port status.
1071c21e0bbfSMatthew R. Ochs  * @nretry:	Number of cycles to retry reading port status.
1072c21e0bbfSMatthew R. Ochs  *
1073c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call.
1074c21e0bbfSMatthew R. Ochs  *
1075c21e0bbfSMatthew R. Ochs  * Return:
1076c21e0bbfSMatthew R. Ochs  *	TRUE (1) when the specified port is offline
1077c21e0bbfSMatthew R. Ochs  *	FALSE (0) when the specified port fails to go offline after timeout
1078c21e0bbfSMatthew R. Ochs  */
1079fb67d44dSMatthew R. Ochs static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1080c21e0bbfSMatthew R. Ochs {
1081c21e0bbfSMatthew R. Ochs 	u64 status;
1082c21e0bbfSMatthew R. Ochs 
1083fb67d44dSMatthew R. Ochs 	WARN_ON(delay_us < 1000);
1084c21e0bbfSMatthew R. Ochs 
1085c21e0bbfSMatthew R. Ochs 	do {
1086c21e0bbfSMatthew R. Ochs 		msleep(delay_us / 1000);
1087c21e0bbfSMatthew R. Ochs 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
108805dab432SMatthew R. Ochs 		if (status == U64_MAX)
108905dab432SMatthew R. Ochs 			nretry /= 2;
1090c21e0bbfSMatthew R. Ochs 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1091c21e0bbfSMatthew R. Ochs 		 nretry--);
1092c21e0bbfSMatthew R. Ochs 
1093c21e0bbfSMatthew R. Ochs 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1094c21e0bbfSMatthew R. Ochs }
1095c21e0bbfSMatthew R. Ochs 
1096c21e0bbfSMatthew R. Ochs /**
1097c21e0bbfSMatthew R. Ochs  * afu_set_wwpn() - configures the WWPN for the specified host FC port
1098c21e0bbfSMatthew R. Ochs  * @afu:	AFU associated with the host that owns the specified FC port.
1099c21e0bbfSMatthew R. Ochs  * @port:	Port number being configured.
1100c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
1101c21e0bbfSMatthew R. Ochs  * @wwpn:	The world-wide-port-number previously discovered for port.
1102c21e0bbfSMatthew R. Ochs  *
1103c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call. As part of the
1104c21e0bbfSMatthew R. Ochs  * sequence to configure the WWPN, the port is toggled offline and then back
1105c21e0bbfSMatthew R. Ochs  * online. This toggling action can cause this routine to delay up to a few
1106c21e0bbfSMatthew R. Ochs  * seconds. When configured to use the internal LUN feature of the AFU, a
1107c21e0bbfSMatthew R. Ochs  * failure to come online is overridden.
1108c21e0bbfSMatthew R. Ochs  */
1109f8013261SMatthew R. Ochs static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
11101786f4a0SMatthew R. Ochs 			 u64 wwpn)
1111c21e0bbfSMatthew R. Ochs {
1112fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
1113fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1114fb67d44dSMatthew R. Ochs 
1115c21e0bbfSMatthew R. Ochs 	set_port_offline(fc_regs);
1116c21e0bbfSMatthew R. Ochs 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1117c21e0bbfSMatthew R. Ochs 			       FC_PORT_STATUS_RETRY_CNT)) {
1118fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1119c21e0bbfSMatthew R. Ochs 			__func__, port);
1120c21e0bbfSMatthew R. Ochs 	}
1121c21e0bbfSMatthew R. Ochs 
1122c21e0bbfSMatthew R. Ochs 	writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1123c21e0bbfSMatthew R. Ochs 
1124c21e0bbfSMatthew R. Ochs 	set_port_online(fc_regs);
1125c21e0bbfSMatthew R. Ochs 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1126c21e0bbfSMatthew R. Ochs 			      FC_PORT_STATUS_RETRY_CNT)) {
1127fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1128c21e0bbfSMatthew R. Ochs 			__func__, port);
1129c21e0bbfSMatthew R. Ochs 	}
1130c21e0bbfSMatthew R. Ochs }
1131c21e0bbfSMatthew R. Ochs 
1132c21e0bbfSMatthew R. Ochs /**
1133c21e0bbfSMatthew R. Ochs  * afu_link_reset() - resets the specified host FC port
1134c21e0bbfSMatthew R. Ochs  * @afu:	AFU associated with the host that owns the specified FC port.
1135c21e0bbfSMatthew R. Ochs  * @port:	Port number being configured.
1136c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
1137c21e0bbfSMatthew R. Ochs  *
1138c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call. The sequence to
1139c21e0bbfSMatthew R. Ochs  * reset the port involves toggling it offline and then back online. This
1140c21e0bbfSMatthew R. Ochs  * action can cause this routine to delay up to a few seconds. An effort
1141c21e0bbfSMatthew R. Ochs  * is made to maintain link with the device by switching to host to use
1142c21e0bbfSMatthew R. Ochs  * the alternate port exclusively while the reset takes place.
1143c21e0bbfSMatthew R. Ochs  * failure to come online is overridden.
1144c21e0bbfSMatthew R. Ochs  */
11451786f4a0SMatthew R. Ochs static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1146c21e0bbfSMatthew R. Ochs {
1147fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
1148fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1149c21e0bbfSMatthew R. Ochs 	u64 port_sel;
1150c21e0bbfSMatthew R. Ochs 
1151c21e0bbfSMatthew R. Ochs 	/* first switch the AFU to the other links, if any */
1152c21e0bbfSMatthew R. Ochs 	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
11534da74db0SDan Carpenter 	port_sel &= ~(1ULL << port);
1154c21e0bbfSMatthew R. Ochs 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1155c21e0bbfSMatthew R. Ochs 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1156c21e0bbfSMatthew R. Ochs 
1157c21e0bbfSMatthew R. Ochs 	set_port_offline(fc_regs);
1158c21e0bbfSMatthew R. Ochs 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1159c21e0bbfSMatthew R. Ochs 			       FC_PORT_STATUS_RETRY_CNT))
1160fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1161c21e0bbfSMatthew R. Ochs 			__func__, port);
1162c21e0bbfSMatthew R. Ochs 
1163c21e0bbfSMatthew R. Ochs 	set_port_online(fc_regs);
1164c21e0bbfSMatthew R. Ochs 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1165c21e0bbfSMatthew R. Ochs 			      FC_PORT_STATUS_RETRY_CNT))
1166fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: wait on port %d to go online timed out\n",
1167c21e0bbfSMatthew R. Ochs 			__func__, port);
1168c21e0bbfSMatthew R. Ochs 
1169c21e0bbfSMatthew R. Ochs 	/* switch back to include this port */
11704da74db0SDan Carpenter 	port_sel |= (1ULL << port);
1171c21e0bbfSMatthew R. Ochs 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1172c21e0bbfSMatthew R. Ochs 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1173c21e0bbfSMatthew R. Ochs 
1174fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1175c21e0bbfSMatthew R. Ochs }
1176c21e0bbfSMatthew R. Ochs 
1177c21e0bbfSMatthew R. Ochs /**
1178c21e0bbfSMatthew R. Ochs  * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1179c21e0bbfSMatthew R. Ochs  * @afu:	AFU associated with the host.
1180c21e0bbfSMatthew R. Ochs  */
1181c21e0bbfSMatthew R. Ochs static void afu_err_intr_init(struct afu *afu)
1182c21e0bbfSMatthew R. Ochs {
118378ae028eSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
11840aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
1185c21e0bbfSMatthew R. Ochs 	int i;
1186bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1187c21e0bbfSMatthew R. Ochs 	u64 reg;
1188c21e0bbfSMatthew R. Ochs 
1189c21e0bbfSMatthew R. Ochs 	/* global async interrupts: AFU clears afu_ctrl on context exit
1190c21e0bbfSMatthew R. Ochs 	 * if async interrupts were sent to that context. This prevents
1191c21e0bbfSMatthew R. Ochs 	 * the AFU form sending further async interrupts when
1192c21e0bbfSMatthew R. Ochs 	 * there is
1193c21e0bbfSMatthew R. Ochs 	 * nobody to receive them.
1194c21e0bbfSMatthew R. Ochs 	 */
1195c21e0bbfSMatthew R. Ochs 
1196c21e0bbfSMatthew R. Ochs 	/* mask all */
1197c21e0bbfSMatthew R. Ochs 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1198bfc0bab1SUma Krishnan 	/* set LISN# to send and point to primary master context */
1199bfc0bab1SUma Krishnan 	reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1200c21e0bbfSMatthew R. Ochs 
1201c21e0bbfSMatthew R. Ochs 	if (afu->internal_lun)
1202c21e0bbfSMatthew R. Ochs 		reg |= 1;	/* Bit 63 indicates local lun */
1203c21e0bbfSMatthew R. Ochs 	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1204c21e0bbfSMatthew R. Ochs 	/* clear all */
1205c21e0bbfSMatthew R. Ochs 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1206c21e0bbfSMatthew R. Ochs 	/* unmask bits that are of interest */
1207c21e0bbfSMatthew R. Ochs 	/* note: afu can send an interrupt after this step */
1208c21e0bbfSMatthew R. Ochs 	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1209c21e0bbfSMatthew R. Ochs 	/* clear again in case a bit came on after previous clear but before */
1210c21e0bbfSMatthew R. Ochs 	/* unmask */
1211c21e0bbfSMatthew R. Ochs 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1212c21e0bbfSMatthew R. Ochs 
1213c21e0bbfSMatthew R. Ochs 	/* Clear/Set internal lun bits */
12140aa14887SMatthew R. Ochs 	fc_port_regs = get_fc_port_regs(cfg, 0);
12150aa14887SMatthew R. Ochs 	reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1216c21e0bbfSMatthew R. Ochs 	reg &= SISL_FC_INTERNAL_MASK;
1217c21e0bbfSMatthew R. Ochs 	if (afu->internal_lun)
1218c21e0bbfSMatthew R. Ochs 		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
12190aa14887SMatthew R. Ochs 	writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1220c21e0bbfSMatthew R. Ochs 
1221c21e0bbfSMatthew R. Ochs 	/* now clear FC errors */
122278ae028eSMatthew R. Ochs 	for (i = 0; i < cfg->num_fc_ports; i++) {
12230aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, i);
12240aa14887SMatthew R. Ochs 
12250aa14887SMatthew R. Ochs 		writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
12260aa14887SMatthew R. Ochs 		writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1227c21e0bbfSMatthew R. Ochs 	}
1228c21e0bbfSMatthew R. Ochs 
1229c21e0bbfSMatthew R. Ochs 	/* sync interrupts for master's IOARRIN write */
1230c21e0bbfSMatthew R. Ochs 	/* note that unlike asyncs, there can be no pending sync interrupts */
1231c21e0bbfSMatthew R. Ochs 	/* at this time (this is a fresh context and master has not written */
1232c21e0bbfSMatthew R. Ochs 	/* IOARRIN yet), so there is nothing to clear. */
1233c21e0bbfSMatthew R. Ochs 
1234c21e0bbfSMatthew R. Ochs 	/* set LISN#, it is always sent to the context that wrote IOARRIN */
12353065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1236bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1237bfc0bab1SUma Krishnan 
1238bfc0bab1SUma Krishnan 		writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
1239bfc0bab1SUma Krishnan 		writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1240bfc0bab1SUma Krishnan 	}
1241c21e0bbfSMatthew R. Ochs }
1242c21e0bbfSMatthew R. Ochs 
1243c21e0bbfSMatthew R. Ochs /**
1244c21e0bbfSMatthew R. Ochs  * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1245c21e0bbfSMatthew R. Ochs  * @irq:	Interrupt number.
1246c21e0bbfSMatthew R. Ochs  * @data:	Private data provided at interrupt registration, the AFU.
1247c21e0bbfSMatthew R. Ochs  *
1248c21e0bbfSMatthew R. Ochs  * Return: Always return IRQ_HANDLED.
1249c21e0bbfSMatthew R. Ochs  */
1250c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1251c21e0bbfSMatthew R. Ochs {
1252bfc0bab1SUma Krishnan 	struct hwq *hwq = (struct hwq *)data;
1253bfc0bab1SUma Krishnan 	struct cxlflash_cfg *cfg = hwq->afu->parent;
1254fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1255c21e0bbfSMatthew R. Ochs 	u64 reg;
1256c21e0bbfSMatthew R. Ochs 	u64 reg_unmasked;
1257c21e0bbfSMatthew R. Ochs 
1258bfc0bab1SUma Krishnan 	reg = readq_be(&hwq->host_map->intr_status);
1259c21e0bbfSMatthew R. Ochs 	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1260c21e0bbfSMatthew R. Ochs 
1261c21e0bbfSMatthew R. Ochs 	if (reg_unmasked == 0UL) {
1262fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1263fb67d44dSMatthew R. Ochs 			__func__, reg);
1264c21e0bbfSMatthew R. Ochs 		goto cxlflash_sync_err_irq_exit;
1265c21e0bbfSMatthew R. Ochs 	}
1266c21e0bbfSMatthew R. Ochs 
1267fb67d44dSMatthew R. Ochs 	dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1268fb67d44dSMatthew R. Ochs 		__func__, reg);
1269c21e0bbfSMatthew R. Ochs 
1270bfc0bab1SUma Krishnan 	writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1271c21e0bbfSMatthew R. Ochs 
1272c21e0bbfSMatthew R. Ochs cxlflash_sync_err_irq_exit:
1273c21e0bbfSMatthew R. Ochs 	return IRQ_HANDLED;
1274c21e0bbfSMatthew R. Ochs }
1275c21e0bbfSMatthew R. Ochs 
1276c21e0bbfSMatthew R. Ochs /**
127776a6ebbeSMatthew R. Ochs  * process_hrrq() - process the read-response queue
127876a6ebbeSMatthew R. Ochs  * @afu:	AFU associated with the host.
1279f918b4a8SMatthew R. Ochs  * @doneq:	Queue of commands harvested from the RRQ.
1280cba06e6dSMatthew R. Ochs  * @budget:	Threshold of RRQ entries to process.
1281f918b4a8SMatthew R. Ochs  *
1282f918b4a8SMatthew R. Ochs  * This routine must be called holding the disabled RRQ spin lock.
1283c21e0bbfSMatthew R. Ochs  *
128476a6ebbeSMatthew R. Ochs  * Return: The number of entries processed.
1285c21e0bbfSMatthew R. Ochs  */
1286bfc0bab1SUma Krishnan static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1287c21e0bbfSMatthew R. Ochs {
1288bfc0bab1SUma Krishnan 	struct afu *afu = hwq->afu;
1289c21e0bbfSMatthew R. Ochs 	struct afu_cmd *cmd;
1290696d0b0cSMatthew R. Ochs 	struct sisl_ioasa *ioasa;
1291696d0b0cSMatthew R. Ochs 	struct sisl_ioarcb *ioarcb;
1292bfc0bab1SUma Krishnan 	bool toggle = hwq->toggle;
129376a6ebbeSMatthew R. Ochs 	int num_hrrq = 0;
1294c21e0bbfSMatthew R. Ochs 	u64 entry,
1295bfc0bab1SUma Krishnan 	    *hrrq_start = hwq->hrrq_start,
1296bfc0bab1SUma Krishnan 	    *hrrq_end = hwq->hrrq_end,
1297bfc0bab1SUma Krishnan 	    *hrrq_curr = hwq->hrrq_curr;
1298c21e0bbfSMatthew R. Ochs 
1299cba06e6dSMatthew R. Ochs 	/* Process ready RRQ entries up to the specified budget (if any) */
1300c21e0bbfSMatthew R. Ochs 	while (true) {
1301c21e0bbfSMatthew R. Ochs 		entry = *hrrq_curr;
1302c21e0bbfSMatthew R. Ochs 
1303c21e0bbfSMatthew R. Ochs 		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1304c21e0bbfSMatthew R. Ochs 			break;
1305c21e0bbfSMatthew R. Ochs 
1306696d0b0cSMatthew R. Ochs 		entry &= ~SISL_RESP_HANDLE_T_BIT;
1307696d0b0cSMatthew R. Ochs 
1308696d0b0cSMatthew R. Ochs 		if (afu_is_sq_cmd_mode(afu)) {
1309696d0b0cSMatthew R. Ochs 			ioasa = (struct sisl_ioasa *)entry;
1310696d0b0cSMatthew R. Ochs 			cmd = container_of(ioasa, struct afu_cmd, sa);
1311696d0b0cSMatthew R. Ochs 		} else {
1312696d0b0cSMatthew R. Ochs 			ioarcb = (struct sisl_ioarcb *)entry;
1313696d0b0cSMatthew R. Ochs 			cmd = container_of(ioarcb, struct afu_cmd, rcb);
1314696d0b0cSMatthew R. Ochs 		}
1315696d0b0cSMatthew R. Ochs 
1316f918b4a8SMatthew R. Ochs 		list_add_tail(&cmd->queue, doneq);
1317c21e0bbfSMatthew R. Ochs 
1318c21e0bbfSMatthew R. Ochs 		/* Advance to next entry or wrap and flip the toggle bit */
1319c21e0bbfSMatthew R. Ochs 		if (hrrq_curr < hrrq_end)
1320c21e0bbfSMatthew R. Ochs 			hrrq_curr++;
1321c21e0bbfSMatthew R. Ochs 		else {
1322c21e0bbfSMatthew R. Ochs 			hrrq_curr = hrrq_start;
1323c21e0bbfSMatthew R. Ochs 			toggle ^= SISL_RESP_HANDLE_T_BIT;
1324c21e0bbfSMatthew R. Ochs 		}
1325696d0b0cSMatthew R. Ochs 
1326bfc0bab1SUma Krishnan 		atomic_inc(&hwq->hsq_credits);
132776a6ebbeSMatthew R. Ochs 		num_hrrq++;
1328cba06e6dSMatthew R. Ochs 
1329cba06e6dSMatthew R. Ochs 		if (budget > 0 && num_hrrq >= budget)
1330cba06e6dSMatthew R. Ochs 			break;
1331c21e0bbfSMatthew R. Ochs 	}
1332c21e0bbfSMatthew R. Ochs 
1333bfc0bab1SUma Krishnan 	hwq->hrrq_curr = hrrq_curr;
1334bfc0bab1SUma Krishnan 	hwq->toggle = toggle;
1335c21e0bbfSMatthew R. Ochs 
133676a6ebbeSMatthew R. Ochs 	return num_hrrq;
133776a6ebbeSMatthew R. Ochs }
133876a6ebbeSMatthew R. Ochs 
133976a6ebbeSMatthew R. Ochs /**
1340f918b4a8SMatthew R. Ochs  * process_cmd_doneq() - process a queue of harvested RRQ commands
1341f918b4a8SMatthew R. Ochs  * @doneq:	Queue of completed commands.
1342f918b4a8SMatthew R. Ochs  *
1343f918b4a8SMatthew R. Ochs  * Note that upon return the queue can no longer be trusted.
1344f918b4a8SMatthew R. Ochs  */
1345f918b4a8SMatthew R. Ochs static void process_cmd_doneq(struct list_head *doneq)
1346f918b4a8SMatthew R. Ochs {
1347f918b4a8SMatthew R. Ochs 	struct afu_cmd *cmd, *tmp;
1348f918b4a8SMatthew R. Ochs 
1349f918b4a8SMatthew R. Ochs 	WARN_ON(list_empty(doneq));
1350f918b4a8SMatthew R. Ochs 
1351f918b4a8SMatthew R. Ochs 	list_for_each_entry_safe(cmd, tmp, doneq, queue)
1352f918b4a8SMatthew R. Ochs 		cmd_complete(cmd);
1353f918b4a8SMatthew R. Ochs }
1354f918b4a8SMatthew R. Ochs 
1355f918b4a8SMatthew R. Ochs /**
1356cba06e6dSMatthew R. Ochs  * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1357cba06e6dSMatthew R. Ochs  * @irqpoll:	IRQ poll structure associated with queue to poll.
1358cba06e6dSMatthew R. Ochs  * @budget:	Threshold of RRQ entries to process per poll.
1359cba06e6dSMatthew R. Ochs  *
1360cba06e6dSMatthew R. Ochs  * Return: The number of entries processed.
1361cba06e6dSMatthew R. Ochs  */
1362cba06e6dSMatthew R. Ochs static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1363cba06e6dSMatthew R. Ochs {
1364bfc0bab1SUma Krishnan 	struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1365cba06e6dSMatthew R. Ochs 	unsigned long hrrq_flags;
1366cba06e6dSMatthew R. Ochs 	LIST_HEAD(doneq);
1367cba06e6dSMatthew R. Ochs 	int num_entries = 0;
1368cba06e6dSMatthew R. Ochs 
1369bfc0bab1SUma Krishnan 	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1370cba06e6dSMatthew R. Ochs 
1371bfc0bab1SUma Krishnan 	num_entries = process_hrrq(hwq, &doneq, budget);
1372cba06e6dSMatthew R. Ochs 	if (num_entries < budget)
1373cba06e6dSMatthew R. Ochs 		irq_poll_complete(irqpoll);
1374cba06e6dSMatthew R. Ochs 
1375bfc0bab1SUma Krishnan 	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1376cba06e6dSMatthew R. Ochs 
1377cba06e6dSMatthew R. Ochs 	process_cmd_doneq(&doneq);
1378cba06e6dSMatthew R. Ochs 	return num_entries;
1379cba06e6dSMatthew R. Ochs }
1380cba06e6dSMatthew R. Ochs 
1381cba06e6dSMatthew R. Ochs /**
138276a6ebbeSMatthew R. Ochs  * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
138376a6ebbeSMatthew R. Ochs  * @irq:	Interrupt number.
138476a6ebbeSMatthew R. Ochs  * @data:	Private data provided at interrupt registration, the AFU.
138576a6ebbeSMatthew R. Ochs  *
1386f918b4a8SMatthew R. Ochs  * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
138776a6ebbeSMatthew R. Ochs  */
138876a6ebbeSMatthew R. Ochs static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
138976a6ebbeSMatthew R. Ochs {
1390bfc0bab1SUma Krishnan 	struct hwq *hwq = (struct hwq *)data;
1391bfc0bab1SUma Krishnan 	struct afu *afu = hwq->afu;
1392f918b4a8SMatthew R. Ochs 	unsigned long hrrq_flags;
1393f918b4a8SMatthew R. Ochs 	LIST_HEAD(doneq);
1394f918b4a8SMatthew R. Ochs 	int num_entries = 0;
139576a6ebbeSMatthew R. Ochs 
1396bfc0bab1SUma Krishnan 	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1397cba06e6dSMatthew R. Ochs 
1398cba06e6dSMatthew R. Ochs 	if (afu_is_irqpoll_enabled(afu)) {
1399bfc0bab1SUma Krishnan 		irq_poll_sched(&hwq->irqpoll);
1400bfc0bab1SUma Krishnan 		spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1401cba06e6dSMatthew R. Ochs 		return IRQ_HANDLED;
1402cba06e6dSMatthew R. Ochs 	}
1403cba06e6dSMatthew R. Ochs 
1404bfc0bab1SUma Krishnan 	num_entries = process_hrrq(hwq, &doneq, -1);
1405bfc0bab1SUma Krishnan 	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1406f918b4a8SMatthew R. Ochs 
1407f918b4a8SMatthew R. Ochs 	if (num_entries == 0)
1408f918b4a8SMatthew R. Ochs 		return IRQ_NONE;
1409f918b4a8SMatthew R. Ochs 
1410f918b4a8SMatthew R. Ochs 	process_cmd_doneq(&doneq);
1411c21e0bbfSMatthew R. Ochs 	return IRQ_HANDLED;
1412c21e0bbfSMatthew R. Ochs }
1413c21e0bbfSMatthew R. Ochs 
1414e2ef33faSMatthew R. Ochs /*
1415e2ef33faSMatthew R. Ochs  * Asynchronous interrupt information table
1416e2ef33faSMatthew R. Ochs  *
1417e2ef33faSMatthew R. Ochs  * NOTE:
1418e2ef33faSMatthew R. Ochs  *	- Order matters here as this array is indexed by bit position.
1419e2ef33faSMatthew R. Ochs  *
1420e2ef33faSMatthew R. Ochs  *	- The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1421e2ef33faSMatthew R. Ochs  *	  as complex and complains due to a lack of parentheses/braces.
1422e2ef33faSMatthew R. Ochs  */
1423e2ef33faSMatthew R. Ochs #define ASTATUS_FC(_a, _b, _c, _d)					 \
1424e2ef33faSMatthew R. Ochs 	{ SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1425e2ef33faSMatthew R. Ochs 
1426e2ef33faSMatthew R. Ochs #define BUILD_SISL_ASTATUS_FC_PORT(_a)					 \
1427e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LINK_UP, "link up", 0),				 \
1428e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LINK_DN, "link down", 0),			 \
1429e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),		 \
1430e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),		 \
1431e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1432e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),	 \
1433e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),		 \
1434e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1435e2ef33faSMatthew R. Ochs 
1436e2ef33faSMatthew R. Ochs static const struct asyc_intr_info ainfo[] = {
1437e2ef33faSMatthew R. Ochs 	BUILD_SISL_ASTATUS_FC_PORT(1),
1438e2ef33faSMatthew R. Ochs 	BUILD_SISL_ASTATUS_FC_PORT(0),
1439e2ef33faSMatthew R. Ochs 	BUILD_SISL_ASTATUS_FC_PORT(3),
1440e2ef33faSMatthew R. Ochs 	BUILD_SISL_ASTATUS_FC_PORT(2)
1441e2ef33faSMatthew R. Ochs };
1442e2ef33faSMatthew R. Ochs 
1443c21e0bbfSMatthew R. Ochs /**
1444c21e0bbfSMatthew R. Ochs  * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1445c21e0bbfSMatthew R. Ochs  * @irq:	Interrupt number.
1446c21e0bbfSMatthew R. Ochs  * @data:	Private data provided at interrupt registration, the AFU.
1447c21e0bbfSMatthew R. Ochs  *
1448c21e0bbfSMatthew R. Ochs  * Return: Always return IRQ_HANDLED.
1449c21e0bbfSMatthew R. Ochs  */
1450c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1451c21e0bbfSMatthew R. Ochs {
1452bfc0bab1SUma Krishnan 	struct hwq *hwq = (struct hwq *)data;
1453bfc0bab1SUma Krishnan 	struct afu *afu = hwq->afu;
14544392ba49SMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
14554392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1456c21e0bbfSMatthew R. Ochs 	const struct asyc_intr_info *info;
14571786f4a0SMatthew R. Ochs 	struct sisl_global_map __iomem *global = &afu->afu_map->global;
14580aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
1459e2ef33faSMatthew R. Ochs 	u64 reg_unmasked;
1460c21e0bbfSMatthew R. Ochs 	u64 reg;
1461e2ef33faSMatthew R. Ochs 	u64 bit;
1462c21e0bbfSMatthew R. Ochs 	u8 port;
1463c21e0bbfSMatthew R. Ochs 
1464c21e0bbfSMatthew R. Ochs 	reg = readq_be(&global->regs.aintr_status);
1465c21e0bbfSMatthew R. Ochs 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1466c21e0bbfSMatthew R. Ochs 
1467e2ef33faSMatthew R. Ochs 	if (unlikely(reg_unmasked == 0)) {
1468fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1469c21e0bbfSMatthew R. Ochs 			__func__, reg);
1470c21e0bbfSMatthew R. Ochs 		goto out;
1471c21e0bbfSMatthew R. Ochs 	}
1472c21e0bbfSMatthew R. Ochs 
1473f15fbf8dSMatthew R. Ochs 	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1474c21e0bbfSMatthew R. Ochs 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
1475c21e0bbfSMatthew R. Ochs 
1476f15fbf8dSMatthew R. Ochs 	/* Check each bit that is on */
1477e2ef33faSMatthew R. Ochs 	for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1478e2ef33faSMatthew R. Ochs 		if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1479e2ef33faSMatthew R. Ochs 			WARN_ON_ONCE(1);
1480c21e0bbfSMatthew R. Ochs 			continue;
1481e2ef33faSMatthew R. Ochs 		}
1482e2ef33faSMatthew R. Ochs 
1483e2ef33faSMatthew R. Ochs 		info = &ainfo[bit];
1484e2ef33faSMatthew R. Ochs 		if (unlikely(info->status != 1ULL << bit)) {
1485e2ef33faSMatthew R. Ochs 			WARN_ON_ONCE(1);
1486e2ef33faSMatthew R. Ochs 			continue;
1487e2ef33faSMatthew R. Ochs 		}
1488c21e0bbfSMatthew R. Ochs 
1489c21e0bbfSMatthew R. Ochs 		port = info->port;
14900aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, port);
1491c21e0bbfSMatthew R. Ochs 
1492fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1493c21e0bbfSMatthew R. Ochs 			__func__, port, info->desc,
14940aa14887SMatthew R. Ochs 		       readq_be(&fc_port_regs[FC_STATUS / 8]));
1495c21e0bbfSMatthew R. Ochs 
1496c21e0bbfSMatthew R. Ochs 		/*
1497f15fbf8dSMatthew R. Ochs 		 * Do link reset first, some OTHER errors will set FC_ERROR
1498c21e0bbfSMatthew R. Ochs 		 * again if cleared before or w/o a reset
1499c21e0bbfSMatthew R. Ochs 		 */
1500c21e0bbfSMatthew R. Ochs 		if (info->action & LINK_RESET) {
15014392ba49SMatthew R. Ochs 			dev_err(dev, "%s: FC Port %d: resetting link\n",
1502c21e0bbfSMatthew R. Ochs 				__func__, port);
1503c21e0bbfSMatthew R. Ochs 			cfg->lr_state = LINK_RESET_REQUIRED;
1504c21e0bbfSMatthew R. Ochs 			cfg->lr_port = port;
1505c21e0bbfSMatthew R. Ochs 			schedule_work(&cfg->work_q);
1506c21e0bbfSMatthew R. Ochs 		}
1507c21e0bbfSMatthew R. Ochs 
1508c21e0bbfSMatthew R. Ochs 		if (info->action & CLR_FC_ERROR) {
15090aa14887SMatthew R. Ochs 			reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1510c21e0bbfSMatthew R. Ochs 
1511c21e0bbfSMatthew R. Ochs 			/*
1512f15fbf8dSMatthew R. Ochs 			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1513c21e0bbfSMatthew R. Ochs 			 * should be the same and tracing one is sufficient.
1514c21e0bbfSMatthew R. Ochs 			 */
1515c21e0bbfSMatthew R. Ochs 
1516fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1517c21e0bbfSMatthew R. Ochs 				__func__, port, reg);
1518c21e0bbfSMatthew R. Ochs 
15190aa14887SMatthew R. Ochs 			writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
15200aa14887SMatthew R. Ochs 			writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1521c21e0bbfSMatthew R. Ochs 		}
1522ef51074aSMatthew R. Ochs 
1523ef51074aSMatthew R. Ochs 		if (info->action & SCAN_HOST) {
1524ef51074aSMatthew R. Ochs 			atomic_inc(&cfg->scan_host_needed);
1525ef51074aSMatthew R. Ochs 			schedule_work(&cfg->work_q);
1526ef51074aSMatthew R. Ochs 		}
1527c21e0bbfSMatthew R. Ochs 	}
1528c21e0bbfSMatthew R. Ochs 
1529c21e0bbfSMatthew R. Ochs out:
1530c21e0bbfSMatthew R. Ochs 	return IRQ_HANDLED;
1531c21e0bbfSMatthew R. Ochs }
1532c21e0bbfSMatthew R. Ochs 
1533c21e0bbfSMatthew R. Ochs /**
1534c21e0bbfSMatthew R. Ochs  * start_context() - starts the master context
15351284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1536bfc0bab1SUma Krishnan  * @index:	Index of the hardware queue.
1537c21e0bbfSMatthew R. Ochs  *
1538c21e0bbfSMatthew R. Ochs  * Return: A success or failure value from CXL services.
1539c21e0bbfSMatthew R. Ochs  */
1540bfc0bab1SUma Krishnan static int start_context(struct cxlflash_cfg *cfg, u32 index)
1541c21e0bbfSMatthew R. Ochs {
1542fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1543bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(cfg->afu, index);
1544c21e0bbfSMatthew R. Ochs 	int rc = 0;
1545c21e0bbfSMatthew R. Ochs 
1546bfc0bab1SUma Krishnan 	rc = cxl_start_context(hwq->ctx,
1547bfc0bab1SUma Krishnan 			       hwq->work.work_element_descriptor,
1548c21e0bbfSMatthew R. Ochs 			       NULL);
1549c21e0bbfSMatthew R. Ochs 
1550fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1551c21e0bbfSMatthew R. Ochs 	return rc;
1552c21e0bbfSMatthew R. Ochs }
1553c21e0bbfSMatthew R. Ochs 
1554c21e0bbfSMatthew R. Ochs /**
1555c21e0bbfSMatthew R. Ochs  * read_vpd() - obtains the WWPNs from VPD
15561284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
155778ae028eSMatthew R. Ochs  * @wwpn:	Array of size MAX_FC_PORTS to pass back WWPNs
1558c21e0bbfSMatthew R. Ochs  *
15591284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
1560c21e0bbfSMatthew R. Ochs  */
1561c21e0bbfSMatthew R. Ochs static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1562c21e0bbfSMatthew R. Ochs {
1563fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1564fb67d44dSMatthew R. Ochs 	struct pci_dev *pdev = cfg->dev;
1565c21e0bbfSMatthew R. Ochs 	int rc = 0;
1566c21e0bbfSMatthew R. Ochs 	int ro_start, ro_size, i, j, k;
1567c21e0bbfSMatthew R. Ochs 	ssize_t vpd_size;
1568c21e0bbfSMatthew R. Ochs 	char vpd_data[CXLFLASH_VPD_LEN];
1569c21e0bbfSMatthew R. Ochs 	char tmp_buf[WWPN_BUF_LEN] = { 0 };
15701cd7fabcSMatthew R. Ochs 	char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1571c21e0bbfSMatthew R. Ochs 
1572c21e0bbfSMatthew R. Ochs 	/* Get the VPD data from the device */
1573fb67d44dSMatthew R. Ochs 	vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1574c21e0bbfSMatthew R. Ochs 	if (unlikely(vpd_size <= 0)) {
1575fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1576c21e0bbfSMatthew R. Ochs 			__func__, vpd_size);
1577c21e0bbfSMatthew R. Ochs 		rc = -ENODEV;
1578c21e0bbfSMatthew R. Ochs 		goto out;
1579c21e0bbfSMatthew R. Ochs 	}
1580c21e0bbfSMatthew R. Ochs 
1581c21e0bbfSMatthew R. Ochs 	/* Get the read only section offset */
1582c21e0bbfSMatthew R. Ochs 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1583c21e0bbfSMatthew R. Ochs 				    PCI_VPD_LRDT_RO_DATA);
1584c21e0bbfSMatthew R. Ochs 	if (unlikely(ro_start < 0)) {
1585fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1586c21e0bbfSMatthew R. Ochs 		rc = -ENODEV;
1587c21e0bbfSMatthew R. Ochs 		goto out;
1588c21e0bbfSMatthew R. Ochs 	}
1589c21e0bbfSMatthew R. Ochs 
1590c21e0bbfSMatthew R. Ochs 	/* Get the read only section size, cap when extends beyond read VPD */
1591c21e0bbfSMatthew R. Ochs 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1592c21e0bbfSMatthew R. Ochs 	j = ro_size;
1593c21e0bbfSMatthew R. Ochs 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1594c21e0bbfSMatthew R. Ochs 	if (unlikely((i + j) > vpd_size)) {
1595fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1596c21e0bbfSMatthew R. Ochs 			__func__, (i + j), vpd_size);
1597c21e0bbfSMatthew R. Ochs 		ro_size = vpd_size - i;
1598c21e0bbfSMatthew R. Ochs 	}
1599c21e0bbfSMatthew R. Ochs 
1600c21e0bbfSMatthew R. Ochs 	/*
1601c21e0bbfSMatthew R. Ochs 	 * Find the offset of the WWPN tag within the read only
1602c21e0bbfSMatthew R. Ochs 	 * VPD data and validate the found field (partials are
1603c21e0bbfSMatthew R. Ochs 	 * no good to us). Convert the ASCII data to an integer
1604c21e0bbfSMatthew R. Ochs 	 * value. Note that we must copy to a temporary buffer
1605c21e0bbfSMatthew R. Ochs 	 * because the conversion service requires that the ASCII
1606c21e0bbfSMatthew R. Ochs 	 * string be terminated.
1607c21e0bbfSMatthew R. Ochs 	 */
160878ae028eSMatthew R. Ochs 	for (k = 0; k < cfg->num_fc_ports; k++) {
1609c21e0bbfSMatthew R. Ochs 		j = ro_size;
1610c21e0bbfSMatthew R. Ochs 		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1611c21e0bbfSMatthew R. Ochs 
1612c21e0bbfSMatthew R. Ochs 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1613c21e0bbfSMatthew R. Ochs 		if (unlikely(i < 0)) {
1614fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
1615fb67d44dSMatthew R. Ochs 				__func__, k);
1616c21e0bbfSMatthew R. Ochs 			rc = -ENODEV;
1617c21e0bbfSMatthew R. Ochs 			goto out;
1618c21e0bbfSMatthew R. Ochs 		}
1619c21e0bbfSMatthew R. Ochs 
1620c21e0bbfSMatthew R. Ochs 		j = pci_vpd_info_field_size(&vpd_data[i]);
1621c21e0bbfSMatthew R. Ochs 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
1622c21e0bbfSMatthew R. Ochs 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1623fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1624c21e0bbfSMatthew R. Ochs 				__func__, k);
1625c21e0bbfSMatthew R. Ochs 			rc = -ENODEV;
1626c21e0bbfSMatthew R. Ochs 			goto out;
1627c21e0bbfSMatthew R. Ochs 		}
1628c21e0bbfSMatthew R. Ochs 
1629c21e0bbfSMatthew R. Ochs 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1630c21e0bbfSMatthew R. Ochs 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1631c21e0bbfSMatthew R. Ochs 		if (unlikely(rc)) {
1632fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1633fb67d44dSMatthew R. Ochs 				__func__, k);
1634c21e0bbfSMatthew R. Ochs 			rc = -ENODEV;
1635c21e0bbfSMatthew R. Ochs 			goto out;
1636c21e0bbfSMatthew R. Ochs 		}
163778ae028eSMatthew R. Ochs 
163878ae028eSMatthew R. Ochs 		dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1639c21e0bbfSMatthew R. Ochs 	}
1640c21e0bbfSMatthew R. Ochs 
1641c21e0bbfSMatthew R. Ochs out:
1642fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1643c21e0bbfSMatthew R. Ochs 	return rc;
1644c21e0bbfSMatthew R. Ochs }
1645c21e0bbfSMatthew R. Ochs 
1646c21e0bbfSMatthew R. Ochs /**
1647c21e0bbfSMatthew R. Ochs  * init_pcr() - initialize the provisioning and control registers
16481284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1649c21e0bbfSMatthew R. Ochs  *
1650c21e0bbfSMatthew R. Ochs  * Also sets up fast access to the mapped registers and initializes AFU
1651c21e0bbfSMatthew R. Ochs  * command fields that never change.
1652c21e0bbfSMatthew R. Ochs  */
165315305514SMatthew R. Ochs static void init_pcr(struct cxlflash_cfg *cfg)
1654c21e0bbfSMatthew R. Ochs {
1655c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
16561786f4a0SMatthew R. Ochs 	struct sisl_ctrl_map __iomem *ctrl_map;
1657bfc0bab1SUma Krishnan 	struct hwq *hwq;
1658c21e0bbfSMatthew R. Ochs 	int i;
1659c21e0bbfSMatthew R. Ochs 
1660c21e0bbfSMatthew R. Ochs 	for (i = 0; i < MAX_CONTEXT; i++) {
1661c21e0bbfSMatthew R. Ochs 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1662f15fbf8dSMatthew R. Ochs 		/* Disrupt any clients that could be running */
1663c21e0bbfSMatthew R. Ochs 		/* e.g. clients that survived a master restart */
1664c21e0bbfSMatthew R. Ochs 		writeq_be(0, &ctrl_map->rht_start);
1665c21e0bbfSMatthew R. Ochs 		writeq_be(0, &ctrl_map->rht_cnt_id);
1666c21e0bbfSMatthew R. Ochs 		writeq_be(0, &ctrl_map->ctx_cap);
1667c21e0bbfSMatthew R. Ochs 	}
1668c21e0bbfSMatthew R. Ochs 
1669bfc0bab1SUma Krishnan 	/* Copy frequently used fields into hwq */
16703065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1671bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1672bfc0bab1SUma Krishnan 
1673bfc0bab1SUma Krishnan 		hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
1674bfc0bab1SUma Krishnan 		hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1675bfc0bab1SUma Krishnan 		hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1676c21e0bbfSMatthew R. Ochs 
1677c21e0bbfSMatthew R. Ochs 		/* Program the Endian Control for the master context */
1678bfc0bab1SUma Krishnan 		writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1679bfc0bab1SUma Krishnan 	}
1680c21e0bbfSMatthew R. Ochs }
1681c21e0bbfSMatthew R. Ochs 
1682c21e0bbfSMatthew R. Ochs /**
1683c21e0bbfSMatthew R. Ochs  * init_global() - initialize AFU global registers
16841284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1685c21e0bbfSMatthew R. Ochs  */
168615305514SMatthew R. Ochs static int init_global(struct cxlflash_cfg *cfg)
1687c21e0bbfSMatthew R. Ochs {
1688c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
16894392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1690bfc0bab1SUma Krishnan 	struct hwq *hwq;
1691bfc0bab1SUma Krishnan 	struct sisl_host_map __iomem *hmap;
16920aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
169378ae028eSMatthew R. Ochs 	u64 wwpn[MAX_FC_PORTS];	/* wwpn of AFU ports */
1694c21e0bbfSMatthew R. Ochs 	int i = 0, num_ports = 0;
1695c21e0bbfSMatthew R. Ochs 	int rc = 0;
1696c21e0bbfSMatthew R. Ochs 	u64 reg;
1697c21e0bbfSMatthew R. Ochs 
1698c21e0bbfSMatthew R. Ochs 	rc = read_vpd(cfg, &wwpn[0]);
1699c21e0bbfSMatthew R. Ochs 	if (rc) {
17004392ba49SMatthew R. Ochs 		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1701c21e0bbfSMatthew R. Ochs 		goto out;
1702c21e0bbfSMatthew R. Ochs 	}
1703c21e0bbfSMatthew R. Ochs 
1704bfc0bab1SUma Krishnan 	/* Set up RRQ and SQ in HWQ for master issued cmds */
17053065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1706bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1707bfc0bab1SUma Krishnan 		hmap = hwq->host_map;
1708bfc0bab1SUma Krishnan 
1709bfc0bab1SUma Krishnan 		writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1710bfc0bab1SUma Krishnan 		writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1711c21e0bbfSMatthew R. Ochs 
1712696d0b0cSMatthew R. Ochs 		if (afu_is_sq_cmd_mode(afu)) {
1713bfc0bab1SUma Krishnan 			writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1714bfc0bab1SUma Krishnan 			writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1715bfc0bab1SUma Krishnan 		}
1716696d0b0cSMatthew R. Ochs 	}
1717696d0b0cSMatthew R. Ochs 
1718c21e0bbfSMatthew R. Ochs 	/* AFU configuration */
1719c21e0bbfSMatthew R. Ochs 	reg = readq_be(&afu->afu_map->global.regs.afu_config);
1720c21e0bbfSMatthew R. Ochs 	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1721c21e0bbfSMatthew R. Ochs 	/* enable all auto retry options and control endianness */
1722c21e0bbfSMatthew R. Ochs 	/* leave others at default: */
1723c21e0bbfSMatthew R. Ochs 	/* CTX_CAP write protected, mbox_r does not clear on read and */
1724c21e0bbfSMatthew R. Ochs 	/* checker on if dual afu */
1725c21e0bbfSMatthew R. Ochs 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1726c21e0bbfSMatthew R. Ochs 
1727f15fbf8dSMatthew R. Ochs 	/* Global port select: select either port */
1728c21e0bbfSMatthew R. Ochs 	if (afu->internal_lun) {
1729f15fbf8dSMatthew R. Ochs 		/* Only use port 0 */
1730c21e0bbfSMatthew R. Ochs 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
173178ae028eSMatthew R. Ochs 		num_ports = 0;
1732c21e0bbfSMatthew R. Ochs 	} else {
17338fa4f177SMatthew R. Ochs 		writeq_be(PORT_MASK(cfg->num_fc_ports),
17348fa4f177SMatthew R. Ochs 			  &afu->afu_map->global.regs.afu_port_sel);
173578ae028eSMatthew R. Ochs 		num_ports = cfg->num_fc_ports;
1736c21e0bbfSMatthew R. Ochs 	}
1737c21e0bbfSMatthew R. Ochs 
1738c21e0bbfSMatthew R. Ochs 	for (i = 0; i < num_ports; i++) {
17390aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, i);
17400aa14887SMatthew R. Ochs 
1741f15fbf8dSMatthew R. Ochs 		/* Unmask all errors (but they are still masked at AFU) */
17420aa14887SMatthew R. Ochs 		writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1743f15fbf8dSMatthew R. Ochs 		/* Clear CRC error cnt & set a threshold */
17440aa14887SMatthew R. Ochs 		(void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
17450aa14887SMatthew R. Ochs 		writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1746c21e0bbfSMatthew R. Ochs 
1747f15fbf8dSMatthew R. Ochs 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1748f8013261SMatthew R. Ochs 		if (wwpn[i] != 0)
17490aa14887SMatthew R. Ochs 			afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1750c21e0bbfSMatthew R. Ochs 		/* Programming WWPN back to back causes additional
1751c21e0bbfSMatthew R. Ochs 		 * offline/online transitions and a PLOGI
1752c21e0bbfSMatthew R. Ochs 		 */
1753c21e0bbfSMatthew R. Ochs 		msleep(100);
1754c21e0bbfSMatthew R. Ochs 	}
1755c21e0bbfSMatthew R. Ochs 
1756f15fbf8dSMatthew R. Ochs 	/* Set up master's own CTX_CAP to allow real mode, host translation */
1757f15fbf8dSMatthew R. Ochs 	/* tables, afu cmds and read/write GSCSI cmds. */
1758c21e0bbfSMatthew R. Ochs 	/* First, unlock ctx_cap write by reading mbox */
17593065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1760bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1761bfc0bab1SUma Krishnan 
1762bfc0bab1SUma Krishnan 		(void)readq_be(&hwq->ctrl_map->mbox_r);	/* unlock ctx_cap */
1763c21e0bbfSMatthew R. Ochs 		writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1764c21e0bbfSMatthew R. Ochs 			SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1765c21e0bbfSMatthew R. Ochs 			SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1766bfc0bab1SUma Krishnan 			&hwq->ctrl_map->ctx_cap);
1767bfc0bab1SUma Krishnan 	}
1768f15fbf8dSMatthew R. Ochs 	/* Initialize heartbeat */
1769c21e0bbfSMatthew R. Ochs 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1770c21e0bbfSMatthew R. Ochs out:
1771c21e0bbfSMatthew R. Ochs 	return rc;
1772c21e0bbfSMatthew R. Ochs }
1773c21e0bbfSMatthew R. Ochs 
1774c21e0bbfSMatthew R. Ochs /**
1775c21e0bbfSMatthew R. Ochs  * start_afu() - initializes and starts the AFU
17761284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1777c21e0bbfSMatthew R. Ochs  */
1778c21e0bbfSMatthew R. Ochs static int start_afu(struct cxlflash_cfg *cfg)
1779c21e0bbfSMatthew R. Ochs {
1780c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
1781fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1782bfc0bab1SUma Krishnan 	struct hwq *hwq;
1783c21e0bbfSMatthew R. Ochs 	int rc = 0;
1784bfc0bab1SUma Krishnan 	int i;
1785c21e0bbfSMatthew R. Ochs 
1786c21e0bbfSMatthew R. Ochs 	init_pcr(cfg);
1787c21e0bbfSMatthew R. Ochs 
1788bfc0bab1SUma Krishnan 	/* Initialize each HWQ */
17893065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1790bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1791bfc0bab1SUma Krishnan 
1792bfc0bab1SUma Krishnan 		/* After an AFU reset, RRQ entries are stale, clear them */
1793bfc0bab1SUma Krishnan 		memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1794bfc0bab1SUma Krishnan 
1795bfc0bab1SUma Krishnan 		/* Initialize RRQ pointers */
1796bfc0bab1SUma Krishnan 		hwq->hrrq_start = &hwq->rrq_entry[0];
1797bfc0bab1SUma Krishnan 		hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1798bfc0bab1SUma Krishnan 		hwq->hrrq_curr = hwq->hrrq_start;
1799bfc0bab1SUma Krishnan 		hwq->toggle = 1;
180066ea9bccSUma Krishnan 
180166ea9bccSUma Krishnan 		/* Initialize spin locks */
1802bfc0bab1SUma Krishnan 		spin_lock_init(&hwq->hrrq_slock);
180366ea9bccSUma Krishnan 		spin_lock_init(&hwq->hsq_slock);
1804c21e0bbfSMatthew R. Ochs 
1805696d0b0cSMatthew R. Ochs 		/* Initialize SQ */
1806696d0b0cSMatthew R. Ochs 		if (afu_is_sq_cmd_mode(afu)) {
1807bfc0bab1SUma Krishnan 			memset(&hwq->sq, 0, sizeof(hwq->sq));
1808bfc0bab1SUma Krishnan 			hwq->hsq_start = &hwq->sq[0];
1809bfc0bab1SUma Krishnan 			hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1810bfc0bab1SUma Krishnan 			hwq->hsq_curr = hwq->hsq_start;
1811696d0b0cSMatthew R. Ochs 
1812bfc0bab1SUma Krishnan 			atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1813696d0b0cSMatthew R. Ochs 		}
1814696d0b0cSMatthew R. Ochs 
1815cba06e6dSMatthew R. Ochs 		/* Initialize IRQ poll */
1816cba06e6dSMatthew R. Ochs 		if (afu_is_irqpoll_enabled(afu))
1817bfc0bab1SUma Krishnan 			irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1818cba06e6dSMatthew R. Ochs 				      cxlflash_irqpoll);
1819cba06e6dSMatthew R. Ochs 
1820bfc0bab1SUma Krishnan 	}
1821bfc0bab1SUma Krishnan 
1822c21e0bbfSMatthew R. Ochs 	rc = init_global(cfg);
1823c21e0bbfSMatthew R. Ochs 
1824fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1825c21e0bbfSMatthew R. Ochs 	return rc;
1826c21e0bbfSMatthew R. Ochs }
1827c21e0bbfSMatthew R. Ochs 
1828c21e0bbfSMatthew R. Ochs /**
18299526f360SManoj N. Kumar  * init_intr() - setup interrupt handlers for the master context
18301284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1831bfc0bab1SUma Krishnan  * @hwq:	Hardware queue to initialize.
1832c21e0bbfSMatthew R. Ochs  *
18331284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
1834c21e0bbfSMatthew R. Ochs  */
18359526f360SManoj N. Kumar static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1836bfc0bab1SUma Krishnan 				 struct hwq *hwq)
1837c21e0bbfSMatthew R. Ochs {
18389526f360SManoj N. Kumar 	struct device *dev = &cfg->dev->dev;
1839bfc0bab1SUma Krishnan 	struct cxl_context *ctx = hwq->ctx;
1840c21e0bbfSMatthew R. Ochs 	int rc = 0;
18419526f360SManoj N. Kumar 	enum undo_level level = UNDO_NOOP;
1842bfc0bab1SUma Krishnan 	bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1843bfc0bab1SUma Krishnan 	int num_irqs = is_primary_hwq ? 3 : 2;
1844c21e0bbfSMatthew R. Ochs 
1845bfc0bab1SUma Krishnan 	rc = cxl_allocate_afu_irqs(ctx, num_irqs);
1846c21e0bbfSMatthew R. Ochs 	if (unlikely(rc)) {
1847fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1848c21e0bbfSMatthew R. Ochs 			__func__, rc);
18499526f360SManoj N. Kumar 		level = UNDO_NOOP;
1850c21e0bbfSMatthew R. Ochs 		goto out;
1851c21e0bbfSMatthew R. Ochs 	}
1852c21e0bbfSMatthew R. Ochs 
1853bfc0bab1SUma Krishnan 	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1854c21e0bbfSMatthew R. Ochs 			     "SISL_MSI_SYNC_ERROR");
1855c21e0bbfSMatthew R. Ochs 	if (unlikely(rc <= 0)) {
1856fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1857c21e0bbfSMatthew R. Ochs 		level = FREE_IRQ;
1858c21e0bbfSMatthew R. Ochs 		goto out;
1859c21e0bbfSMatthew R. Ochs 	}
1860c21e0bbfSMatthew R. Ochs 
1861bfc0bab1SUma Krishnan 	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1862c21e0bbfSMatthew R. Ochs 			     "SISL_MSI_RRQ_UPDATED");
1863c21e0bbfSMatthew R. Ochs 	if (unlikely(rc <= 0)) {
1864fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1865c21e0bbfSMatthew R. Ochs 		level = UNMAP_ONE;
1866c21e0bbfSMatthew R. Ochs 		goto out;
1867c21e0bbfSMatthew R. Ochs 	}
1868c21e0bbfSMatthew R. Ochs 
1869bfc0bab1SUma Krishnan 	/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1870bfc0bab1SUma Krishnan 	if (!is_primary_hwq)
1871bfc0bab1SUma Krishnan 		goto out;
1872bfc0bab1SUma Krishnan 
1873bfc0bab1SUma Krishnan 	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1874c21e0bbfSMatthew R. Ochs 			     "SISL_MSI_ASYNC_ERROR");
1875c21e0bbfSMatthew R. Ochs 	if (unlikely(rc <= 0)) {
1876fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1877c21e0bbfSMatthew R. Ochs 		level = UNMAP_TWO;
1878c21e0bbfSMatthew R. Ochs 		goto out;
1879c21e0bbfSMatthew R. Ochs 	}
18809526f360SManoj N. Kumar out:
18819526f360SManoj N. Kumar 	return level;
18829526f360SManoj N. Kumar }
1883c21e0bbfSMatthew R. Ochs 
18849526f360SManoj N. Kumar /**
18859526f360SManoj N. Kumar  * init_mc() - create and register as the master context
18869526f360SManoj N. Kumar  * @cfg:	Internal structure associated with the host.
1887bfc0bab1SUma Krishnan  * index:	HWQ Index of the master context.
18889526f360SManoj N. Kumar  *
18899526f360SManoj N. Kumar  * Return: 0 on success, -errno on failure
18909526f360SManoj N. Kumar  */
1891bfc0bab1SUma Krishnan static int init_mc(struct cxlflash_cfg *cfg, u32 index)
18929526f360SManoj N. Kumar {
18939526f360SManoj N. Kumar 	struct cxl_context *ctx;
18949526f360SManoj N. Kumar 	struct device *dev = &cfg->dev->dev;
1895bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(cfg->afu, index);
18969526f360SManoj N. Kumar 	int rc = 0;
18979526f360SManoj N. Kumar 	enum undo_level level;
18989526f360SManoj N. Kumar 
1899bfc0bab1SUma Krishnan 	hwq->afu = cfg->afu;
1900bfc0bab1SUma Krishnan 	hwq->index = index;
1901a002bf83SUma Krishnan 	INIT_LIST_HEAD(&hwq->pending_cmds);
1902bfc0bab1SUma Krishnan 
1903bfc0bab1SUma Krishnan 	if (index == PRIMARY_HWQ)
19049526f360SManoj N. Kumar 		ctx = cxl_get_context(cfg->dev);
1905bfc0bab1SUma Krishnan 	else
1906bfc0bab1SUma Krishnan 		ctx = cxl_dev_context_init(cfg->dev);
19079526f360SManoj N. Kumar 	if (unlikely(!ctx)) {
19089526f360SManoj N. Kumar 		rc = -ENOMEM;
1909bfc0bab1SUma Krishnan 		goto err1;
19109526f360SManoj N. Kumar 	}
1911bfc0bab1SUma Krishnan 
1912bfc0bab1SUma Krishnan 	WARN_ON(hwq->ctx);
1913bfc0bab1SUma Krishnan 	hwq->ctx = ctx;
19149526f360SManoj N. Kumar 
19159526f360SManoj N. Kumar 	/* Set it up as a master with the CXL */
19169526f360SManoj N. Kumar 	cxl_set_master(ctx);
19179526f360SManoj N. Kumar 
1918bfc0bab1SUma Krishnan 	/* Reset AFU when initializing primary context */
1919bfc0bab1SUma Krishnan 	if (index == PRIMARY_HWQ) {
1920bfc0bab1SUma Krishnan 		rc = cxl_afu_reset(ctx);
19219526f360SManoj N. Kumar 		if (unlikely(rc)) {
1922bfc0bab1SUma Krishnan 			dev_err(dev, "%s: AFU reset failed rc=%d\n",
1923bfc0bab1SUma Krishnan 				      __func__, rc);
1924bfc0bab1SUma Krishnan 			goto err1;
1925bfc0bab1SUma Krishnan 		}
19269526f360SManoj N. Kumar 	}
19279526f360SManoj N. Kumar 
1928bfc0bab1SUma Krishnan 	level = init_intr(cfg, hwq);
19299526f360SManoj N. Kumar 	if (unlikely(level)) {
1930fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
1931bfc0bab1SUma Krishnan 		goto err2;
19329526f360SManoj N. Kumar 	}
1933c21e0bbfSMatthew R. Ochs 
1934c21e0bbfSMatthew R. Ochs 	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
1935c21e0bbfSMatthew R. Ochs 	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1936c21e0bbfSMatthew R. Ochs 	 * element (pe) that is embedded in the context (ctx)
1937c21e0bbfSMatthew R. Ochs 	 */
1938bfc0bab1SUma Krishnan 	rc = start_context(cfg, index);
1939c21e0bbfSMatthew R. Ochs 	if (unlikely(rc)) {
1940c21e0bbfSMatthew R. Ochs 		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1941c21e0bbfSMatthew R. Ochs 		level = UNMAP_THREE;
1942bfc0bab1SUma Krishnan 		goto err2;
1943c21e0bbfSMatthew R. Ochs 	}
1944bfc0bab1SUma Krishnan 
1945bfc0bab1SUma Krishnan out:
1946fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1947c21e0bbfSMatthew R. Ochs 	return rc;
1948bfc0bab1SUma Krishnan err2:
1949bfc0bab1SUma Krishnan 	term_intr(cfg, level, index);
1950bfc0bab1SUma Krishnan 	if (index != PRIMARY_HWQ)
1951bfc0bab1SUma Krishnan 		cxl_release_context(ctx);
1952bfc0bab1SUma Krishnan err1:
1953bfc0bab1SUma Krishnan 	hwq->ctx = NULL;
1954bfc0bab1SUma Krishnan 	goto out;
1955c21e0bbfSMatthew R. Ochs }
1956c21e0bbfSMatthew R. Ochs 
1957c21e0bbfSMatthew R. Ochs /**
195856518072SMatthew R. Ochs  * get_num_afu_ports() - determines and configures the number of AFU ports
195956518072SMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
196056518072SMatthew R. Ochs  *
196156518072SMatthew R. Ochs  * This routine determines the number of AFU ports by converting the global
196256518072SMatthew R. Ochs  * port selection mask. The converted value is only valid following an AFU
196356518072SMatthew R. Ochs  * reset (explicit or power-on). This routine must be invoked shortly after
196456518072SMatthew R. Ochs  * mapping as other routines are dependent on the number of ports during the
196556518072SMatthew R. Ochs  * initialization sequence.
196656518072SMatthew R. Ochs  *
196756518072SMatthew R. Ochs  * To support legacy AFUs that might not have reflected an initial global
196856518072SMatthew R. Ochs  * port mask (value read is 0), default to the number of ports originally
196956518072SMatthew R. Ochs  * supported by the cxlflash driver (2) before hardware with other port
197056518072SMatthew R. Ochs  * offerings was introduced.
197156518072SMatthew R. Ochs  */
197256518072SMatthew R. Ochs static void get_num_afu_ports(struct cxlflash_cfg *cfg)
197356518072SMatthew R. Ochs {
197456518072SMatthew R. Ochs 	struct afu *afu = cfg->afu;
197556518072SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
197656518072SMatthew R. Ochs 	u64 port_mask;
197756518072SMatthew R. Ochs 	int num_fc_ports = LEGACY_FC_PORTS;
197856518072SMatthew R. Ochs 
197956518072SMatthew R. Ochs 	port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
198056518072SMatthew R. Ochs 	if (port_mask != 0ULL)
198156518072SMatthew R. Ochs 		num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
198256518072SMatthew R. Ochs 
198356518072SMatthew R. Ochs 	dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
198456518072SMatthew R. Ochs 		__func__, port_mask, num_fc_ports);
198556518072SMatthew R. Ochs 
198656518072SMatthew R. Ochs 	cfg->num_fc_ports = num_fc_ports;
198756518072SMatthew R. Ochs 	cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
198856518072SMatthew R. Ochs }
198956518072SMatthew R. Ochs 
199056518072SMatthew R. Ochs /**
1991c21e0bbfSMatthew R. Ochs  * init_afu() - setup as master context and start AFU
19921284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1993c21e0bbfSMatthew R. Ochs  *
1994c21e0bbfSMatthew R. Ochs  * This routine is a higher level of control for configuring the
1995c21e0bbfSMatthew R. Ochs  * AFU on probe and reset paths.
1996c21e0bbfSMatthew R. Ochs  *
19971284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
1998c21e0bbfSMatthew R. Ochs  */
1999c21e0bbfSMatthew R. Ochs static int init_afu(struct cxlflash_cfg *cfg)
2000c21e0bbfSMatthew R. Ochs {
2001c21e0bbfSMatthew R. Ochs 	u64 reg;
2002c21e0bbfSMatthew R. Ochs 	int rc = 0;
2003c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
2004c21e0bbfSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
2005bfc0bab1SUma Krishnan 	struct hwq *hwq;
2006bfc0bab1SUma Krishnan 	int i;
2007c21e0bbfSMatthew R. Ochs 
20085cdac81aSMatthew R. Ochs 	cxl_perst_reloads_same_image(cfg->cxl_afu, true);
20095cdac81aSMatthew R. Ochs 
20103065267aSMatthew R. Ochs 	afu->num_hwqs = afu->desired_hwqs;
20113065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
2012bfc0bab1SUma Krishnan 		rc = init_mc(cfg, i);
2013c21e0bbfSMatthew R. Ochs 		if (rc) {
2014bfc0bab1SUma Krishnan 			dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2015bfc0bab1SUma Krishnan 				__func__, rc, i);
2016bfc0bab1SUma Krishnan 			goto err1;
2017bfc0bab1SUma Krishnan 		}
2018c21e0bbfSMatthew R. Ochs 	}
2019c21e0bbfSMatthew R. Ochs 
2020bfc0bab1SUma Krishnan 	/* Map the entire MMIO space of the AFU using the first context */
2021bfc0bab1SUma Krishnan 	hwq = get_hwq(afu, PRIMARY_HWQ);
2022bfc0bab1SUma Krishnan 	afu->afu_map = cxl_psa_map(hwq->ctx);
2023c21e0bbfSMatthew R. Ochs 	if (!afu->afu_map) {
2024fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
2025ee3491baSMatthew R. Ochs 		rc = -ENOMEM;
2026c21e0bbfSMatthew R. Ochs 		goto err1;
2027c21e0bbfSMatthew R. Ochs 	}
2028c21e0bbfSMatthew R. Ochs 
2029e5ce067bSMatthew R. Ochs 	/* No byte reverse on reading afu_version or string will be backwards */
2030e5ce067bSMatthew R. Ochs 	reg = readq(&afu->afu_map->global.regs.afu_version);
2031e5ce067bSMatthew R. Ochs 	memcpy(afu->version, &reg, sizeof(reg));
2032c21e0bbfSMatthew R. Ochs 	afu->interface_version =
2033c21e0bbfSMatthew R. Ochs 	    readq_be(&afu->afu_map->global.regs.interface_version);
2034e5ce067bSMatthew R. Ochs 	if ((afu->interface_version + 1) == 0) {
2035fb67d44dSMatthew R. Ochs 		dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2036fb67d44dSMatthew R. Ochs 			"interface version %016llx\n", afu->version,
2037e5ce067bSMatthew R. Ochs 		       afu->interface_version);
2038e5ce067bSMatthew R. Ochs 		rc = -EINVAL;
20390df5bef7SUma Krishnan 		goto err1;
2040ee3491baSMatthew R. Ochs 	}
2041ee3491baSMatthew R. Ochs 
2042696d0b0cSMatthew R. Ochs 	if (afu_is_sq_cmd_mode(afu)) {
2043696d0b0cSMatthew R. Ochs 		afu->send_cmd = send_cmd_sq;
2044696d0b0cSMatthew R. Ochs 		afu->context_reset = context_reset_sq;
2045696d0b0cSMatthew R. Ochs 	} else {
204648b4be36SMatthew R. Ochs 		afu->send_cmd = send_cmd_ioarrin;
204748b4be36SMatthew R. Ochs 		afu->context_reset = context_reset_ioarrin;
2048696d0b0cSMatthew R. Ochs 	}
204948b4be36SMatthew R. Ochs 
2050fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2051ee3491baSMatthew R. Ochs 		afu->version, afu->interface_version);
2052c21e0bbfSMatthew R. Ochs 
205356518072SMatthew R. Ochs 	get_num_afu_ports(cfg);
205456518072SMatthew R. Ochs 
2055c21e0bbfSMatthew R. Ochs 	rc = start_afu(cfg);
2056c21e0bbfSMatthew R. Ochs 	if (rc) {
2057fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
20580df5bef7SUma Krishnan 		goto err1;
2059c21e0bbfSMatthew R. Ochs 	}
2060c21e0bbfSMatthew R. Ochs 
2061c21e0bbfSMatthew R. Ochs 	afu_err_intr_init(cfg->afu);
20623065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
2063bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
2064bfc0bab1SUma Krishnan 
2065bfc0bab1SUma Krishnan 		hwq->room = readq_be(&hwq->host_map->cmd_room);
2066bfc0bab1SUma Krishnan 	}
2067c21e0bbfSMatthew R. Ochs 
20682cb79266SMatthew R. Ochs 	/* Restore the LUN mappings */
20692cb79266SMatthew R. Ochs 	cxlflash_restore_luntable(cfg);
2070ee3491baSMatthew R. Ochs out:
2071fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2072c21e0bbfSMatthew R. Ochs 	return rc;
2073ee3491baSMatthew R. Ochs 
2074ee3491baSMatthew R. Ochs err1:
20753065267aSMatthew R. Ochs 	for (i = afu->num_hwqs - 1; i >= 0; i--) {
2076bfc0bab1SUma Krishnan 		term_intr(cfg, UNMAP_THREE, i);
2077bfc0bab1SUma Krishnan 		term_mc(cfg, i);
2078bfc0bab1SUma Krishnan 	}
2079ee3491baSMatthew R. Ochs 	goto out;
2080c21e0bbfSMatthew R. Ochs }
2081c21e0bbfSMatthew R. Ochs 
2082c21e0bbfSMatthew R. Ochs /**
20830b09e711SUma Krishnan  * afu_reset() - resets the AFU
20840b09e711SUma Krishnan  * @cfg:	Internal structure associated with the host.
20850b09e711SUma Krishnan  *
20860b09e711SUma Krishnan  * Return: 0 on success, -errno on failure
20870b09e711SUma Krishnan  */
20880b09e711SUma Krishnan static int afu_reset(struct cxlflash_cfg *cfg)
20890b09e711SUma Krishnan {
20900b09e711SUma Krishnan 	struct device *dev = &cfg->dev->dev;
20910b09e711SUma Krishnan 	int rc = 0;
20920b09e711SUma Krishnan 
20930b09e711SUma Krishnan 	/* Stop the context before the reset. Since the context is
20940b09e711SUma Krishnan 	 * no longer available restart it after the reset is complete
20950b09e711SUma Krishnan 	 */
20960b09e711SUma Krishnan 	term_afu(cfg);
20970b09e711SUma Krishnan 
20980b09e711SUma Krishnan 	rc = init_afu(cfg);
20990b09e711SUma Krishnan 
21000b09e711SUma Krishnan 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
21010b09e711SUma Krishnan 	return rc;
21020b09e711SUma Krishnan }
21030b09e711SUma Krishnan 
21040b09e711SUma Krishnan /**
21050b09e711SUma Krishnan  * drain_ioctls() - wait until all currently executing ioctls have completed
21060b09e711SUma Krishnan  * @cfg:	Internal structure associated with the host.
21070b09e711SUma Krishnan  *
21080b09e711SUma Krishnan  * Obtain write access to read/write semaphore that wraps ioctl
21090b09e711SUma Krishnan  * handling to 'drain' ioctls currently executing.
21100b09e711SUma Krishnan  */
21110b09e711SUma Krishnan static void drain_ioctls(struct cxlflash_cfg *cfg)
21120b09e711SUma Krishnan {
21130b09e711SUma Krishnan 	down_write(&cfg->ioctl_rwsem);
21140b09e711SUma Krishnan 	up_write(&cfg->ioctl_rwsem);
21150b09e711SUma Krishnan }
21160b09e711SUma Krishnan 
21170b09e711SUma Krishnan /**
21180b09e711SUma Krishnan  * cxlflash_async_reset_host() - asynchronous host reset handler
21190b09e711SUma Krishnan  * @data:	Private data provided while scheduling reset.
21200b09e711SUma Krishnan  * @cookie:	Cookie that can be used for checkpointing.
21210b09e711SUma Krishnan  */
21220b09e711SUma Krishnan static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
21230b09e711SUma Krishnan {
21240b09e711SUma Krishnan 	struct cxlflash_cfg *cfg = data;
21250b09e711SUma Krishnan 	struct device *dev = &cfg->dev->dev;
21260b09e711SUma Krishnan 	int rc = 0;
21270b09e711SUma Krishnan 
21280b09e711SUma Krishnan 	if (cfg->state != STATE_RESET) {
21290b09e711SUma Krishnan 		dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
21300b09e711SUma Krishnan 			__func__, cfg->state);
21310b09e711SUma Krishnan 		goto out;
21320b09e711SUma Krishnan 	}
21330b09e711SUma Krishnan 
21340b09e711SUma Krishnan 	drain_ioctls(cfg);
21350b09e711SUma Krishnan 	cxlflash_mark_contexts_error(cfg);
21360b09e711SUma Krishnan 	rc = afu_reset(cfg);
21370b09e711SUma Krishnan 	if (rc)
21380b09e711SUma Krishnan 		cfg->state = STATE_FAILTERM;
21390b09e711SUma Krishnan 	else
21400b09e711SUma Krishnan 		cfg->state = STATE_NORMAL;
21410b09e711SUma Krishnan 	wake_up_all(&cfg->reset_waitq);
21420b09e711SUma Krishnan 
21430b09e711SUma Krishnan out:
21440b09e711SUma Krishnan 	scsi_unblock_requests(cfg->host);
21450b09e711SUma Krishnan }
21460b09e711SUma Krishnan 
21470b09e711SUma Krishnan /**
21480b09e711SUma Krishnan  * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
21490b09e711SUma Krishnan  * @cfg:	Internal structure associated with the host.
21500b09e711SUma Krishnan  */
21510b09e711SUma Krishnan static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
21520b09e711SUma Krishnan {
21530b09e711SUma Krishnan 	struct device *dev = &cfg->dev->dev;
21540b09e711SUma Krishnan 
21550b09e711SUma Krishnan 	if (cfg->state != STATE_NORMAL) {
21560b09e711SUma Krishnan 		dev_dbg(dev, "%s: Not performing reset state=%d\n",
21570b09e711SUma Krishnan 			__func__, cfg->state);
21580b09e711SUma Krishnan 		return;
21590b09e711SUma Krishnan 	}
21600b09e711SUma Krishnan 
21610b09e711SUma Krishnan 	cfg->state = STATE_RESET;
21620b09e711SUma Krishnan 	scsi_block_requests(cfg->host);
21630b09e711SUma Krishnan 	cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
21640b09e711SUma Krishnan 						 cfg);
21650b09e711SUma Krishnan }
21660b09e711SUma Krishnan 
21670b09e711SUma Krishnan /**
2168c21e0bbfSMatthew R. Ochs  * cxlflash_afu_sync() - builds and sends an AFU sync command
2169c21e0bbfSMatthew R. Ochs  * @afu:	AFU associated with the host.
2170c21e0bbfSMatthew R. Ochs  * @ctx_hndl_u:	Identifies context requesting sync.
2171c21e0bbfSMatthew R. Ochs  * @res_hndl_u:	Identifies resource requesting sync.
2172c21e0bbfSMatthew R. Ochs  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
2173c21e0bbfSMatthew R. Ochs  *
2174c21e0bbfSMatthew R. Ochs  * The AFU can only take 1 sync command at a time. This routine enforces this
2175f15fbf8dSMatthew R. Ochs  * limitation by using a mutex to provide exclusive access to the AFU during
2176c21e0bbfSMatthew R. Ochs  * the sync. This design point requires calling threads to not be on interrupt
2177c21e0bbfSMatthew R. Ochs  * context due to the possibility of sleeping during concurrent sync operations.
2178c21e0bbfSMatthew R. Ochs  *
21795cdac81aSMatthew R. Ochs  * AFU sync operations are only necessary and allowed when the device is
21805cdac81aSMatthew R. Ochs  * operating normally. When not operating normally, sync requests can occur as
21815cdac81aSMatthew R. Ochs  * part of cleaning up resources associated with an adapter prior to removal.
21825cdac81aSMatthew R. Ochs  * In this scenario, these requests are simply ignored (safe due to the AFU
21835cdac81aSMatthew R. Ochs  * going away).
21845cdac81aSMatthew R. Ochs  *
2185c21e0bbfSMatthew R. Ochs  * Return:
2186539d890cSUma Krishnan  *	0 on success, -errno on failure
2187c21e0bbfSMatthew R. Ochs  */
2188c21e0bbfSMatthew R. Ochs int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
2189c21e0bbfSMatthew R. Ochs 		      res_hndl_t res_hndl_u, u8 mode)
2190c21e0bbfSMatthew R. Ochs {
21915cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
21924392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
2193c21e0bbfSMatthew R. Ochs 	struct afu_cmd *cmd = NULL;
2194bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2195350bb478SMatthew R. Ochs 	char *buf = NULL;
2196c21e0bbfSMatthew R. Ochs 	int rc = 0;
2197a96851d3SUma Krishnan 	int nretry = 0;
2198c21e0bbfSMatthew R. Ochs 	static DEFINE_MUTEX(sync_active);
2199c21e0bbfSMatthew R. Ochs 
22005cdac81aSMatthew R. Ochs 	if (cfg->state != STATE_NORMAL) {
2201fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: Sync not required state=%u\n",
2202fb67d44dSMatthew R. Ochs 			__func__, cfg->state);
22035cdac81aSMatthew R. Ochs 		return 0;
22045cdac81aSMatthew R. Ochs 	}
22055cdac81aSMatthew R. Ochs 
2206c21e0bbfSMatthew R. Ochs 	mutex_lock(&sync_active);
2207de01283bSMatthew R. Ochs 	atomic_inc(&afu->cmds_active);
2208a1ea04b3SUma Krishnan 	buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2209350bb478SMatthew R. Ochs 	if (unlikely(!buf)) {
2210350bb478SMatthew R. Ochs 		dev_err(dev, "%s: no memory for command\n", __func__);
2211539d890cSUma Krishnan 		rc = -ENOMEM;
2212c21e0bbfSMatthew R. Ochs 		goto out;
2213c21e0bbfSMatthew R. Ochs 	}
2214c21e0bbfSMatthew R. Ochs 
2215350bb478SMatthew R. Ochs 	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2216a96851d3SUma Krishnan 
2217a96851d3SUma Krishnan retry:
2218a1ea04b3SUma Krishnan 	memset(cmd, 0, sizeof(*cmd));
2219a1ea04b3SUma Krishnan 	INIT_LIST_HEAD(&cmd->queue);
2220350bb478SMatthew R. Ochs 	init_completion(&cmd->cevent);
2221350bb478SMatthew R. Ochs 	cmd->parent = afu;
2222bfc0bab1SUma Krishnan 	cmd->hwq_index = hwq->index;
2223350bb478SMatthew R. Ochs 
2224a96851d3SUma Krishnan 	dev_dbg(dev, "%s: afu=%p cmd=%p ctx=%d nretry=%d\n",
2225a96851d3SUma Krishnan 		__func__, afu, cmd, ctx_hndl_u, nretry);
2226c21e0bbfSMatthew R. Ochs 
2227c21e0bbfSMatthew R. Ochs 	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2228bfc0bab1SUma Krishnan 	cmd->rcb.ctx_id = hwq->ctx_hndl;
2229350bb478SMatthew R. Ochs 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
2230c21e0bbfSMatthew R. Ochs 	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2231c21e0bbfSMatthew R. Ochs 
2232c21e0bbfSMatthew R. Ochs 	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
2233c21e0bbfSMatthew R. Ochs 	cmd->rcb.cdb[1] = mode;
2234c21e0bbfSMatthew R. Ochs 
2235c21e0bbfSMatthew R. Ochs 	/* The cdb is aligned, no unaligned accessors required */
22361786f4a0SMatthew R. Ochs 	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
22371786f4a0SMatthew R. Ochs 	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
2238c21e0bbfSMatthew R. Ochs 
223948b4be36SMatthew R. Ochs 	rc = afu->send_cmd(afu, cmd);
2240539d890cSUma Krishnan 	if (unlikely(rc)) {
2241539d890cSUma Krishnan 		rc = -ENOBUFS;
2242c21e0bbfSMatthew R. Ochs 		goto out;
2243539d890cSUma Krishnan 	}
2244c21e0bbfSMatthew R. Ochs 
22459ba848acSMatthew R. Ochs 	rc = wait_resp(afu, cmd);
2246a1ea04b3SUma Krishnan 	switch (rc) {
2247a1ea04b3SUma Krishnan 	case -ETIMEDOUT:
2248a96851d3SUma Krishnan 		rc = afu->context_reset(hwq);
2249a1ea04b3SUma Krishnan 		if (rc) {
22500b09e711SUma Krishnan 			cxlflash_schedule_async_reset(cfg);
2251a1ea04b3SUma Krishnan 			break;
2252a1ea04b3SUma Krishnan 		}
2253a1ea04b3SUma Krishnan 		/* fall through to retry */
2254a1ea04b3SUma Krishnan 	case -EAGAIN:
2255a1ea04b3SUma Krishnan 		if (++nretry < 2)
2256a1ea04b3SUma Krishnan 			goto retry;
2257a1ea04b3SUma Krishnan 		/* fall through to exit */
2258a1ea04b3SUma Krishnan 	default:
2259a1ea04b3SUma Krishnan 		break;
2260a96851d3SUma Krishnan 	}
2261a96851d3SUma Krishnan 
2262c21e0bbfSMatthew R. Ochs out:
2263de01283bSMatthew R. Ochs 	atomic_dec(&afu->cmds_active);
2264c21e0bbfSMatthew R. Ochs 	mutex_unlock(&sync_active);
2265350bb478SMatthew R. Ochs 	kfree(buf);
2266fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2267c21e0bbfSMatthew R. Ochs 	return rc;
2268c21e0bbfSMatthew R. Ochs }
2269c21e0bbfSMatthew R. Ochs 
2270c21e0bbfSMatthew R. Ochs /**
22717c4c41f1SUma Krishnan  * cxlflash_eh_abort_handler() - abort a SCSI command
22727c4c41f1SUma Krishnan  * @scp:	SCSI command to abort.
22737c4c41f1SUma Krishnan  *
22747c4c41f1SUma Krishnan  * CXL Flash devices do not support a single command abort. Reset the context
22757c4c41f1SUma Krishnan  * as per SISLite specification. Flush any pending commands in the hardware
22767c4c41f1SUma Krishnan  * queue before the reset.
22777c4c41f1SUma Krishnan  *
22787c4c41f1SUma Krishnan  * Return: SUCCESS/FAILED as defined in scsi/scsi.h
22797c4c41f1SUma Krishnan  */
22807c4c41f1SUma Krishnan static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
22817c4c41f1SUma Krishnan {
22827c4c41f1SUma Krishnan 	int rc = FAILED;
22837c4c41f1SUma Krishnan 	struct Scsi_Host *host = scp->device->host;
22847c4c41f1SUma Krishnan 	struct cxlflash_cfg *cfg = shost_priv(host);
22857c4c41f1SUma Krishnan 	struct afu_cmd *cmd = sc_to_afuc(scp);
22867c4c41f1SUma Krishnan 	struct device *dev = &cfg->dev->dev;
22877c4c41f1SUma Krishnan 	struct afu *afu = cfg->afu;
22887c4c41f1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
22897c4c41f1SUma Krishnan 
22907c4c41f1SUma Krishnan 	dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
22917c4c41f1SUma Krishnan 		"cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
22927c4c41f1SUma Krishnan 		scp->device->channel, scp->device->id, scp->device->lun,
22937c4c41f1SUma Krishnan 		get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
22947c4c41f1SUma Krishnan 		get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
22957c4c41f1SUma Krishnan 		get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
22967c4c41f1SUma Krishnan 		get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
22977c4c41f1SUma Krishnan 
22987c4c41f1SUma Krishnan 	/* When the state is not normal, another reset/reload is in progress.
22997c4c41f1SUma Krishnan 	 * Return failed and the mid-layer will invoke host reset handler.
23007c4c41f1SUma Krishnan 	 */
23017c4c41f1SUma Krishnan 	if (cfg->state != STATE_NORMAL) {
23027c4c41f1SUma Krishnan 		dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
23037c4c41f1SUma Krishnan 			__func__, cfg->state);
23047c4c41f1SUma Krishnan 		goto out;
23057c4c41f1SUma Krishnan 	}
23067c4c41f1SUma Krishnan 
23077c4c41f1SUma Krishnan 	rc = afu->context_reset(hwq);
23087c4c41f1SUma Krishnan 	if (unlikely(rc))
23097c4c41f1SUma Krishnan 		goto out;
23107c4c41f1SUma Krishnan 
23117c4c41f1SUma Krishnan 	rc = SUCCESS;
23127c4c41f1SUma Krishnan 
23137c4c41f1SUma Krishnan out:
23147c4c41f1SUma Krishnan 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
23157c4c41f1SUma Krishnan 	return rc;
23167c4c41f1SUma Krishnan }
23177c4c41f1SUma Krishnan 
23187c4c41f1SUma Krishnan /**
231915305514SMatthew R. Ochs  * cxlflash_eh_device_reset_handler() - reset a single LUN
232015305514SMatthew R. Ochs  * @scp:	SCSI command to send.
232115305514SMatthew R. Ochs  *
232215305514SMatthew R. Ochs  * Return:
232315305514SMatthew R. Ochs  *	SUCCESS as defined in scsi/scsi.h
232415305514SMatthew R. Ochs  *	FAILED as defined in scsi/scsi.h
232515305514SMatthew R. Ochs  */
232615305514SMatthew R. Ochs static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
232715305514SMatthew R. Ochs {
232815305514SMatthew R. Ochs 	int rc = SUCCESS;
232915305514SMatthew R. Ochs 	struct Scsi_Host *host = scp->device->host;
2330fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(host);
2331fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
233215305514SMatthew R. Ochs 	struct afu *afu = cfg->afu;
233315305514SMatthew R. Ochs 	int rcr = 0;
233415305514SMatthew R. Ochs 
2335fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2336fb67d44dSMatthew R. Ochs 		"cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2337fb67d44dSMatthew R. Ochs 		scp->device->channel, scp->device->id, scp->device->lun,
233815305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
233915305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
234015305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
234115305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
234215305514SMatthew R. Ochs 
2343ed486daaSMatthew R. Ochs retry:
234415305514SMatthew R. Ochs 	switch (cfg->state) {
234515305514SMatthew R. Ochs 	case STATE_NORMAL:
234615305514SMatthew R. Ochs 		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
234715305514SMatthew R. Ochs 		if (unlikely(rcr))
234815305514SMatthew R. Ochs 			rc = FAILED;
234915305514SMatthew R. Ochs 		break;
235015305514SMatthew R. Ochs 	case STATE_RESET:
235115305514SMatthew R. Ochs 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2352ed486daaSMatthew R. Ochs 		goto retry;
235315305514SMatthew R. Ochs 	default:
235415305514SMatthew R. Ochs 		rc = FAILED;
235515305514SMatthew R. Ochs 		break;
235615305514SMatthew R. Ochs 	}
235715305514SMatthew R. Ochs 
2358fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
235915305514SMatthew R. Ochs 	return rc;
236015305514SMatthew R. Ochs }
236115305514SMatthew R. Ochs 
236215305514SMatthew R. Ochs /**
236315305514SMatthew R. Ochs  * cxlflash_eh_host_reset_handler() - reset the host adapter
236415305514SMatthew R. Ochs  * @scp:	SCSI command from stack identifying host.
236515305514SMatthew R. Ochs  *
23661d3324c3SMatthew R. Ochs  * Following a reset, the state is evaluated again in case an EEH occurred
23671d3324c3SMatthew R. Ochs  * during the reset. In such a scenario, the host reset will either yield
23681d3324c3SMatthew R. Ochs  * until the EEH recovery is complete or return success or failure based
23691d3324c3SMatthew R. Ochs  * upon the current device state.
23701d3324c3SMatthew R. Ochs  *
237115305514SMatthew R. Ochs  * Return:
237215305514SMatthew R. Ochs  *	SUCCESS as defined in scsi/scsi.h
237315305514SMatthew R. Ochs  *	FAILED as defined in scsi/scsi.h
237415305514SMatthew R. Ochs  */
237515305514SMatthew R. Ochs static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
237615305514SMatthew R. Ochs {
237715305514SMatthew R. Ochs 	int rc = SUCCESS;
237815305514SMatthew R. Ochs 	int rcr = 0;
237915305514SMatthew R. Ochs 	struct Scsi_Host *host = scp->device->host;
2380fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(host);
2381fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
238215305514SMatthew R. Ochs 
2383fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2384fb67d44dSMatthew R. Ochs 		"cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2385fb67d44dSMatthew R. Ochs 		scp->device->channel, scp->device->id, scp->device->lun,
238615305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
238715305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
238815305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
238915305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
239015305514SMatthew R. Ochs 
239115305514SMatthew R. Ochs 	switch (cfg->state) {
239215305514SMatthew R. Ochs 	case STATE_NORMAL:
239315305514SMatthew R. Ochs 		cfg->state = STATE_RESET;
2394f411396dSManoj N. Kumar 		drain_ioctls(cfg);
239515305514SMatthew R. Ochs 		cxlflash_mark_contexts_error(cfg);
239615305514SMatthew R. Ochs 		rcr = afu_reset(cfg);
239715305514SMatthew R. Ochs 		if (rcr) {
239815305514SMatthew R. Ochs 			rc = FAILED;
239915305514SMatthew R. Ochs 			cfg->state = STATE_FAILTERM;
240015305514SMatthew R. Ochs 		} else
240115305514SMatthew R. Ochs 			cfg->state = STATE_NORMAL;
240215305514SMatthew R. Ochs 		wake_up_all(&cfg->reset_waitq);
24031d3324c3SMatthew R. Ochs 		ssleep(1);
24041d3324c3SMatthew R. Ochs 		/* fall through */
240515305514SMatthew R. Ochs 	case STATE_RESET:
240615305514SMatthew R. Ochs 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
240715305514SMatthew R. Ochs 		if (cfg->state == STATE_NORMAL)
240815305514SMatthew R. Ochs 			break;
240915305514SMatthew R. Ochs 		/* fall through */
241015305514SMatthew R. Ochs 	default:
241115305514SMatthew R. Ochs 		rc = FAILED;
241215305514SMatthew R. Ochs 		break;
241315305514SMatthew R. Ochs 	}
241415305514SMatthew R. Ochs 
2415fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
241615305514SMatthew R. Ochs 	return rc;
241715305514SMatthew R. Ochs }
241815305514SMatthew R. Ochs 
241915305514SMatthew R. Ochs /**
242015305514SMatthew R. Ochs  * cxlflash_change_queue_depth() - change the queue depth for the device
242115305514SMatthew R. Ochs  * @sdev:	SCSI device destined for queue depth change.
242215305514SMatthew R. Ochs  * @qdepth:	Requested queue depth value to set.
242315305514SMatthew R. Ochs  *
242415305514SMatthew R. Ochs  * The requested queue depth is capped to the maximum supported value.
242515305514SMatthew R. Ochs  *
242615305514SMatthew R. Ochs  * Return: The actual queue depth set.
242715305514SMatthew R. Ochs  */
242815305514SMatthew R. Ochs static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
242915305514SMatthew R. Ochs {
243015305514SMatthew R. Ochs 
243115305514SMatthew R. Ochs 	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
243215305514SMatthew R. Ochs 		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
243315305514SMatthew R. Ochs 
243415305514SMatthew R. Ochs 	scsi_change_queue_depth(sdev, qdepth);
243515305514SMatthew R. Ochs 	return sdev->queue_depth;
243615305514SMatthew R. Ochs }
243715305514SMatthew R. Ochs 
243815305514SMatthew R. Ochs /**
243915305514SMatthew R. Ochs  * cxlflash_show_port_status() - queries and presents the current port status
2440e0f01a21SMatthew R. Ochs  * @port:	Desired port for status reporting.
24413b225cd3SMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
244215305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
244315305514SMatthew R. Ochs  *
244478ae028eSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf or -EINVAL.
244515305514SMatthew R. Ochs  */
24463b225cd3SMatthew R. Ochs static ssize_t cxlflash_show_port_status(u32 port,
24473b225cd3SMatthew R. Ochs 					 struct cxlflash_cfg *cfg,
24483b225cd3SMatthew R. Ochs 					 char *buf)
244915305514SMatthew R. Ochs {
245078ae028eSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
245115305514SMatthew R. Ochs 	char *disp_status;
245215305514SMatthew R. Ochs 	u64 status;
24530aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
245415305514SMatthew R. Ochs 
245578ae028eSMatthew R. Ochs 	WARN_ON(port >= MAX_FC_PORTS);
245678ae028eSMatthew R. Ochs 
245778ae028eSMatthew R. Ochs 	if (port >= cfg->num_fc_ports) {
245878ae028eSMatthew R. Ochs 		dev_info(dev, "%s: Port %d not supported on this card.\n",
245978ae028eSMatthew R. Ochs 			__func__, port);
246078ae028eSMatthew R. Ochs 		return -EINVAL;
246178ae028eSMatthew R. Ochs 	}
246215305514SMatthew R. Ochs 
24630aa14887SMatthew R. Ochs 	fc_port_regs = get_fc_port_regs(cfg, port);
24640aa14887SMatthew R. Ochs 	status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2465e0f01a21SMatthew R. Ochs 	status &= FC_MTIP_STATUS_MASK;
246615305514SMatthew R. Ochs 
246715305514SMatthew R. Ochs 	if (status == FC_MTIP_STATUS_ONLINE)
246815305514SMatthew R. Ochs 		disp_status = "online";
246915305514SMatthew R. Ochs 	else if (status == FC_MTIP_STATUS_OFFLINE)
247015305514SMatthew R. Ochs 		disp_status = "offline";
247115305514SMatthew R. Ochs 	else
247215305514SMatthew R. Ochs 		disp_status = "unknown";
247315305514SMatthew R. Ochs 
2474e0f01a21SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
247515305514SMatthew R. Ochs }
247615305514SMatthew R. Ochs 
247715305514SMatthew R. Ochs /**
2478e0f01a21SMatthew R. Ochs  * port0_show() - queries and presents the current status of port 0
2479e0f01a21SMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
2480e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the port.
2481e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2482e0f01a21SMatthew R. Ochs  *
2483e0f01a21SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2484e0f01a21SMatthew R. Ochs  */
2485e0f01a21SMatthew R. Ochs static ssize_t port0_show(struct device *dev,
2486e0f01a21SMatthew R. Ochs 			  struct device_attribute *attr,
2487e0f01a21SMatthew R. Ochs 			  char *buf)
2488e0f01a21SMatthew R. Ochs {
2489fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2490e0f01a21SMatthew R. Ochs 
24913b225cd3SMatthew R. Ochs 	return cxlflash_show_port_status(0, cfg, buf);
2492e0f01a21SMatthew R. Ochs }
2493e0f01a21SMatthew R. Ochs 
2494e0f01a21SMatthew R. Ochs /**
2495e0f01a21SMatthew R. Ochs  * port1_show() - queries and presents the current status of port 1
2496e0f01a21SMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
2497e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the port.
2498e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2499e0f01a21SMatthew R. Ochs  *
2500e0f01a21SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2501e0f01a21SMatthew R. Ochs  */
2502e0f01a21SMatthew R. Ochs static ssize_t port1_show(struct device *dev,
2503e0f01a21SMatthew R. Ochs 			  struct device_attribute *attr,
2504e0f01a21SMatthew R. Ochs 			  char *buf)
2505e0f01a21SMatthew R. Ochs {
2506fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2507e0f01a21SMatthew R. Ochs 
25083b225cd3SMatthew R. Ochs 	return cxlflash_show_port_status(1, cfg, buf);
2509e0f01a21SMatthew R. Ochs }
2510e0f01a21SMatthew R. Ochs 
2511e0f01a21SMatthew R. Ochs /**
25121cd7fabcSMatthew R. Ochs  * port2_show() - queries and presents the current status of port 2
25131cd7fabcSMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
25141cd7fabcSMatthew R. Ochs  * @attr:	Device attribute representing the port.
25151cd7fabcSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
25161cd7fabcSMatthew R. Ochs  *
25171cd7fabcSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
25181cd7fabcSMatthew R. Ochs  */
25191cd7fabcSMatthew R. Ochs static ssize_t port2_show(struct device *dev,
25201cd7fabcSMatthew R. Ochs 			  struct device_attribute *attr,
25211cd7fabcSMatthew R. Ochs 			  char *buf)
25221cd7fabcSMatthew R. Ochs {
25231cd7fabcSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
25241cd7fabcSMatthew R. Ochs 
25251cd7fabcSMatthew R. Ochs 	return cxlflash_show_port_status(2, cfg, buf);
25261cd7fabcSMatthew R. Ochs }
25271cd7fabcSMatthew R. Ochs 
25281cd7fabcSMatthew R. Ochs /**
25291cd7fabcSMatthew R. Ochs  * port3_show() - queries and presents the current status of port 3
25301cd7fabcSMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
25311cd7fabcSMatthew R. Ochs  * @attr:	Device attribute representing the port.
25321cd7fabcSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
25331cd7fabcSMatthew R. Ochs  *
25341cd7fabcSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
25351cd7fabcSMatthew R. Ochs  */
25361cd7fabcSMatthew R. Ochs static ssize_t port3_show(struct device *dev,
25371cd7fabcSMatthew R. Ochs 			  struct device_attribute *attr,
25381cd7fabcSMatthew R. Ochs 			  char *buf)
25391cd7fabcSMatthew R. Ochs {
25401cd7fabcSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
25411cd7fabcSMatthew R. Ochs 
25421cd7fabcSMatthew R. Ochs 	return cxlflash_show_port_status(3, cfg, buf);
25431cd7fabcSMatthew R. Ochs }
25441cd7fabcSMatthew R. Ochs 
25451cd7fabcSMatthew R. Ochs /**
2546e0f01a21SMatthew R. Ochs  * lun_mode_show() - presents the current LUN mode of the host
254715305514SMatthew R. Ochs  * @dev:	Generic device associated with the host.
2548e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the LUN mode.
254915305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
255015305514SMatthew R. Ochs  *
255115305514SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
255215305514SMatthew R. Ochs  */
2553e0f01a21SMatthew R. Ochs static ssize_t lun_mode_show(struct device *dev,
255415305514SMatthew R. Ochs 			     struct device_attribute *attr, char *buf)
255515305514SMatthew R. Ochs {
2556fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
255715305514SMatthew R. Ochs 	struct afu *afu = cfg->afu;
255815305514SMatthew R. Ochs 
2559e0f01a21SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
256015305514SMatthew R. Ochs }
256115305514SMatthew R. Ochs 
256215305514SMatthew R. Ochs /**
2563e0f01a21SMatthew R. Ochs  * lun_mode_store() - sets the LUN mode of the host
256415305514SMatthew R. Ochs  * @dev:	Generic device associated with the host.
2565e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the LUN mode.
256615305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
256715305514SMatthew R. Ochs  * @count:	Length of data resizing in @buf.
256815305514SMatthew R. Ochs  *
256915305514SMatthew R. Ochs  * The CXL Flash AFU supports a dummy LUN mode where the external
257015305514SMatthew R. Ochs  * links and storage are not required. Space on the FPGA is used
257115305514SMatthew R. Ochs  * to create 1 or 2 small LUNs which are presented to the system
257215305514SMatthew R. Ochs  * as if they were a normal storage device. This feature is useful
257315305514SMatthew R. Ochs  * during development and also provides manufacturing with a way
257415305514SMatthew R. Ochs  * to test the AFU without an actual device.
257515305514SMatthew R. Ochs  *
257615305514SMatthew R. Ochs  * 0 = external LUN[s] (default)
257715305514SMatthew R. Ochs  * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
257815305514SMatthew R. Ochs  * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
257915305514SMatthew R. Ochs  * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
258015305514SMatthew R. Ochs  * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
258115305514SMatthew R. Ochs  *
258215305514SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
258315305514SMatthew R. Ochs  */
2584e0f01a21SMatthew R. Ochs static ssize_t lun_mode_store(struct device *dev,
258515305514SMatthew R. Ochs 			      struct device_attribute *attr,
258615305514SMatthew R. Ochs 			      const char *buf, size_t count)
258715305514SMatthew R. Ochs {
258815305514SMatthew R. Ochs 	struct Scsi_Host *shost = class_to_shost(dev);
2589fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(shost);
259015305514SMatthew R. Ochs 	struct afu *afu = cfg->afu;
259115305514SMatthew R. Ochs 	int rc;
259215305514SMatthew R. Ochs 	u32 lun_mode;
259315305514SMatthew R. Ochs 
259415305514SMatthew R. Ochs 	rc = kstrtouint(buf, 10, &lun_mode);
259515305514SMatthew R. Ochs 	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
259615305514SMatthew R. Ochs 		afu->internal_lun = lun_mode;
2597603ecce9SManoj N. Kumar 
2598603ecce9SManoj N. Kumar 		/*
2599603ecce9SManoj N. Kumar 		 * When configured for internal LUN, there is only one channel,
260078ae028eSMatthew R. Ochs 		 * channel number 0, else there will be one less than the number
260178ae028eSMatthew R. Ochs 		 * of fc ports for this card.
2602603ecce9SManoj N. Kumar 		 */
2603603ecce9SManoj N. Kumar 		if (afu->internal_lun)
2604603ecce9SManoj N. Kumar 			shost->max_channel = 0;
2605603ecce9SManoj N. Kumar 		else
26068fa4f177SMatthew R. Ochs 			shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2607603ecce9SManoj N. Kumar 
260815305514SMatthew R. Ochs 		afu_reset(cfg);
260915305514SMatthew R. Ochs 		scsi_scan_host(cfg->host);
261015305514SMatthew R. Ochs 	}
261115305514SMatthew R. Ochs 
261215305514SMatthew R. Ochs 	return count;
261315305514SMatthew R. Ochs }
261415305514SMatthew R. Ochs 
261515305514SMatthew R. Ochs /**
2616e0f01a21SMatthew R. Ochs  * ioctl_version_show() - presents the current ioctl version of the host
261715305514SMatthew R. Ochs  * @dev:	Generic device associated with the host.
261815305514SMatthew R. Ochs  * @attr:	Device attribute representing the ioctl version.
261915305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
262015305514SMatthew R. Ochs  *
262115305514SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
262215305514SMatthew R. Ochs  */
2623e0f01a21SMatthew R. Ochs static ssize_t ioctl_version_show(struct device *dev,
2624e0f01a21SMatthew R. Ochs 				  struct device_attribute *attr, char *buf)
262515305514SMatthew R. Ochs {
262615305514SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
262715305514SMatthew R. Ochs }
262815305514SMatthew R. Ochs 
262915305514SMatthew R. Ochs /**
2630e0f01a21SMatthew R. Ochs  * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2631e0f01a21SMatthew R. Ochs  * @port:	Desired port for status reporting.
26323b225cd3SMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
2633e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2634e0f01a21SMatthew R. Ochs  *
263578ae028eSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf or -EINVAL.
2636e0f01a21SMatthew R. Ochs  */
2637e0f01a21SMatthew R. Ochs static ssize_t cxlflash_show_port_lun_table(u32 port,
26383b225cd3SMatthew R. Ochs 					    struct cxlflash_cfg *cfg,
2639e0f01a21SMatthew R. Ochs 					    char *buf)
2640e0f01a21SMatthew R. Ochs {
264178ae028eSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
26420aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_luns;
2643e0f01a21SMatthew R. Ochs 	int i;
2644e0f01a21SMatthew R. Ochs 	ssize_t bytes = 0;
2645e0f01a21SMatthew R. Ochs 
264678ae028eSMatthew R. Ochs 	WARN_ON(port >= MAX_FC_PORTS);
264778ae028eSMatthew R. Ochs 
264878ae028eSMatthew R. Ochs 	if (port >= cfg->num_fc_ports) {
264978ae028eSMatthew R. Ochs 		dev_info(dev, "%s: Port %d not supported on this card.\n",
265078ae028eSMatthew R. Ochs 			__func__, port);
265178ae028eSMatthew R. Ochs 		return -EINVAL;
265278ae028eSMatthew R. Ochs 	}
2653e0f01a21SMatthew R. Ochs 
26540aa14887SMatthew R. Ochs 	fc_port_luns = get_fc_port_luns(cfg, port);
2655e0f01a21SMatthew R. Ochs 
2656e0f01a21SMatthew R. Ochs 	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2657e0f01a21SMatthew R. Ochs 		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
26580aa14887SMatthew R. Ochs 				   "%03d: %016llx\n",
26590aa14887SMatthew R. Ochs 				   i, readq_be(&fc_port_luns[i]));
2660e0f01a21SMatthew R. Ochs 	return bytes;
2661e0f01a21SMatthew R. Ochs }
2662e0f01a21SMatthew R. Ochs 
2663e0f01a21SMatthew R. Ochs /**
2664e0f01a21SMatthew R. Ochs  * port0_lun_table_show() - presents the current LUN table of port 0
2665e0f01a21SMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
2666e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the port.
2667e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2668e0f01a21SMatthew R. Ochs  *
2669e0f01a21SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2670e0f01a21SMatthew R. Ochs  */
2671e0f01a21SMatthew R. Ochs static ssize_t port0_lun_table_show(struct device *dev,
2672e0f01a21SMatthew R. Ochs 				    struct device_attribute *attr,
2673e0f01a21SMatthew R. Ochs 				    char *buf)
2674e0f01a21SMatthew R. Ochs {
2675fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2676e0f01a21SMatthew R. Ochs 
26773b225cd3SMatthew R. Ochs 	return cxlflash_show_port_lun_table(0, cfg, buf);
2678e0f01a21SMatthew R. Ochs }
2679e0f01a21SMatthew R. Ochs 
2680e0f01a21SMatthew R. Ochs /**
2681e0f01a21SMatthew R. Ochs  * port1_lun_table_show() - presents the current LUN table of port 1
2682e0f01a21SMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
2683e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the port.
2684e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2685e0f01a21SMatthew R. Ochs  *
2686e0f01a21SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2687e0f01a21SMatthew R. Ochs  */
2688e0f01a21SMatthew R. Ochs static ssize_t port1_lun_table_show(struct device *dev,
2689e0f01a21SMatthew R. Ochs 				    struct device_attribute *attr,
2690e0f01a21SMatthew R. Ochs 				    char *buf)
2691e0f01a21SMatthew R. Ochs {
2692fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2693e0f01a21SMatthew R. Ochs 
26943b225cd3SMatthew R. Ochs 	return cxlflash_show_port_lun_table(1, cfg, buf);
2695e0f01a21SMatthew R. Ochs }
2696e0f01a21SMatthew R. Ochs 
2697e0f01a21SMatthew R. Ochs /**
26981cd7fabcSMatthew R. Ochs  * port2_lun_table_show() - presents the current LUN table of port 2
26991cd7fabcSMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
27001cd7fabcSMatthew R. Ochs  * @attr:	Device attribute representing the port.
27011cd7fabcSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
27021cd7fabcSMatthew R. Ochs  *
27031cd7fabcSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
27041cd7fabcSMatthew R. Ochs  */
27051cd7fabcSMatthew R. Ochs static ssize_t port2_lun_table_show(struct device *dev,
27061cd7fabcSMatthew R. Ochs 				    struct device_attribute *attr,
27071cd7fabcSMatthew R. Ochs 				    char *buf)
27081cd7fabcSMatthew R. Ochs {
27091cd7fabcSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
27101cd7fabcSMatthew R. Ochs 
27111cd7fabcSMatthew R. Ochs 	return cxlflash_show_port_lun_table(2, cfg, buf);
27121cd7fabcSMatthew R. Ochs }
27131cd7fabcSMatthew R. Ochs 
27141cd7fabcSMatthew R. Ochs /**
27151cd7fabcSMatthew R. Ochs  * port3_lun_table_show() - presents the current LUN table of port 3
27161cd7fabcSMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
27171cd7fabcSMatthew R. Ochs  * @attr:	Device attribute representing the port.
27181cd7fabcSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
27191cd7fabcSMatthew R. Ochs  *
27201cd7fabcSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
27211cd7fabcSMatthew R. Ochs  */
27221cd7fabcSMatthew R. Ochs static ssize_t port3_lun_table_show(struct device *dev,
27231cd7fabcSMatthew R. Ochs 				    struct device_attribute *attr,
27241cd7fabcSMatthew R. Ochs 				    char *buf)
27251cd7fabcSMatthew R. Ochs {
27261cd7fabcSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
27271cd7fabcSMatthew R. Ochs 
27281cd7fabcSMatthew R. Ochs 	return cxlflash_show_port_lun_table(3, cfg, buf);
27291cd7fabcSMatthew R. Ochs }
27301cd7fabcSMatthew R. Ochs 
27311cd7fabcSMatthew R. Ochs /**
2732cba06e6dSMatthew R. Ochs  * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2733cba06e6dSMatthew R. Ochs  * @dev:	Generic device associated with the host.
2734cba06e6dSMatthew R. Ochs  * @attr:	Device attribute representing the IRQ poll weight.
2735cba06e6dSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the current IRQ poll
2736cba06e6dSMatthew R. Ochs  *		weight in ASCII.
2737cba06e6dSMatthew R. Ochs  *
2738cba06e6dSMatthew R. Ochs  * An IRQ poll weight of 0 indicates polling is disabled.
2739cba06e6dSMatthew R. Ochs  *
2740cba06e6dSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2741cba06e6dSMatthew R. Ochs  */
2742cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_show(struct device *dev,
2743cba06e6dSMatthew R. Ochs 				   struct device_attribute *attr, char *buf)
2744cba06e6dSMatthew R. Ochs {
2745cba06e6dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2746cba06e6dSMatthew R. Ochs 	struct afu *afu = cfg->afu;
2747cba06e6dSMatthew R. Ochs 
2748cba06e6dSMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2749cba06e6dSMatthew R. Ochs }
2750cba06e6dSMatthew R. Ochs 
2751cba06e6dSMatthew R. Ochs /**
2752cba06e6dSMatthew R. Ochs  * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2753cba06e6dSMatthew R. Ochs  * @dev:	Generic device associated with the host.
2754cba06e6dSMatthew R. Ochs  * @attr:	Device attribute representing the IRQ poll weight.
2755cba06e6dSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE containing the desired IRQ poll
2756cba06e6dSMatthew R. Ochs  *		weight in ASCII.
2757cba06e6dSMatthew R. Ochs  * @count:	Length of data resizing in @buf.
2758cba06e6dSMatthew R. Ochs  *
2759cba06e6dSMatthew R. Ochs  * An IRQ poll weight of 0 indicates polling is disabled.
2760cba06e6dSMatthew R. Ochs  *
2761cba06e6dSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2762cba06e6dSMatthew R. Ochs  */
2763cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_store(struct device *dev,
2764cba06e6dSMatthew R. Ochs 				    struct device_attribute *attr,
2765cba06e6dSMatthew R. Ochs 				    const char *buf, size_t count)
2766cba06e6dSMatthew R. Ochs {
2767cba06e6dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2768cba06e6dSMatthew R. Ochs 	struct device *cfgdev = &cfg->dev->dev;
2769cba06e6dSMatthew R. Ochs 	struct afu *afu = cfg->afu;
2770bfc0bab1SUma Krishnan 	struct hwq *hwq;
2771cba06e6dSMatthew R. Ochs 	u32 weight;
2772bfc0bab1SUma Krishnan 	int rc, i;
2773cba06e6dSMatthew R. Ochs 
2774cba06e6dSMatthew R. Ochs 	rc = kstrtouint(buf, 10, &weight);
2775cba06e6dSMatthew R. Ochs 	if (rc)
2776cba06e6dSMatthew R. Ochs 		return -EINVAL;
2777cba06e6dSMatthew R. Ochs 
2778cba06e6dSMatthew R. Ochs 	if (weight > 256) {
2779cba06e6dSMatthew R. Ochs 		dev_info(cfgdev,
2780cba06e6dSMatthew R. Ochs 			 "Invalid IRQ poll weight. It must be 256 or less.\n");
2781cba06e6dSMatthew R. Ochs 		return -EINVAL;
2782cba06e6dSMatthew R. Ochs 	}
2783cba06e6dSMatthew R. Ochs 
2784cba06e6dSMatthew R. Ochs 	if (weight == afu->irqpoll_weight) {
2785cba06e6dSMatthew R. Ochs 		dev_info(cfgdev,
2786cba06e6dSMatthew R. Ochs 			 "Current IRQ poll weight has the same weight.\n");
2787cba06e6dSMatthew R. Ochs 		return -EINVAL;
2788cba06e6dSMatthew R. Ochs 	}
2789cba06e6dSMatthew R. Ochs 
2790bfc0bab1SUma Krishnan 	if (afu_is_irqpoll_enabled(afu)) {
27913065267aSMatthew R. Ochs 		for (i = 0; i < afu->num_hwqs; i++) {
2792bfc0bab1SUma Krishnan 			hwq = get_hwq(afu, i);
2793bfc0bab1SUma Krishnan 
2794bfc0bab1SUma Krishnan 			irq_poll_disable(&hwq->irqpoll);
2795bfc0bab1SUma Krishnan 		}
2796bfc0bab1SUma Krishnan 	}
2797cba06e6dSMatthew R. Ochs 
2798cba06e6dSMatthew R. Ochs 	afu->irqpoll_weight = weight;
2799cba06e6dSMatthew R. Ochs 
2800bfc0bab1SUma Krishnan 	if (weight > 0) {
28013065267aSMatthew R. Ochs 		for (i = 0; i < afu->num_hwqs; i++) {
2802bfc0bab1SUma Krishnan 			hwq = get_hwq(afu, i);
2803bfc0bab1SUma Krishnan 
2804bfc0bab1SUma Krishnan 			irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2805bfc0bab1SUma Krishnan 		}
2806bfc0bab1SUma Krishnan 	}
2807cba06e6dSMatthew R. Ochs 
2808cba06e6dSMatthew R. Ochs 	return count;
2809cba06e6dSMatthew R. Ochs }
2810cba06e6dSMatthew R. Ochs 
2811cba06e6dSMatthew R. Ochs /**
28123065267aSMatthew R. Ochs  * num_hwqs_show() - presents the number of hardware queues for the host
28133065267aSMatthew R. Ochs  * @dev:	Generic device associated with the host.
28143065267aSMatthew R. Ochs  * @attr:	Device attribute representing the number of hardware queues.
28153065267aSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the number of hardware
28163065267aSMatthew R. Ochs  *		queues in ASCII.
28173065267aSMatthew R. Ochs  *
28183065267aSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
28193065267aSMatthew R. Ochs  */
28203065267aSMatthew R. Ochs static ssize_t num_hwqs_show(struct device *dev,
28213065267aSMatthew R. Ochs 			     struct device_attribute *attr, char *buf)
28223065267aSMatthew R. Ochs {
28233065267aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
28243065267aSMatthew R. Ochs 	struct afu *afu = cfg->afu;
28253065267aSMatthew R. Ochs 
28263065267aSMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
28273065267aSMatthew R. Ochs }
28283065267aSMatthew R. Ochs 
28293065267aSMatthew R. Ochs /**
28303065267aSMatthew R. Ochs  * num_hwqs_store() - sets the number of hardware queues for the host
28313065267aSMatthew R. Ochs  * @dev:	Generic device associated with the host.
28323065267aSMatthew R. Ochs  * @attr:	Device attribute representing the number of hardware queues.
28333065267aSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE containing the number of hardware
28343065267aSMatthew R. Ochs  *		queues in ASCII.
28353065267aSMatthew R. Ochs  * @count:	Length of data resizing in @buf.
28363065267aSMatthew R. Ochs  *
28373065267aSMatthew R. Ochs  * n > 0: num_hwqs = n
28383065267aSMatthew R. Ochs  * n = 0: num_hwqs = num_online_cpus()
28393065267aSMatthew R. Ochs  * n < 0: num_online_cpus() / abs(n)
28403065267aSMatthew R. Ochs  *
28413065267aSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
28423065267aSMatthew R. Ochs  */
28433065267aSMatthew R. Ochs static ssize_t num_hwqs_store(struct device *dev,
28443065267aSMatthew R. Ochs 			      struct device_attribute *attr,
28453065267aSMatthew R. Ochs 			      const char *buf, size_t count)
28463065267aSMatthew R. Ochs {
28473065267aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
28483065267aSMatthew R. Ochs 	struct afu *afu = cfg->afu;
28493065267aSMatthew R. Ochs 	int rc;
28503065267aSMatthew R. Ochs 	int nhwqs, num_hwqs;
28513065267aSMatthew R. Ochs 
28523065267aSMatthew R. Ochs 	rc = kstrtoint(buf, 10, &nhwqs);
28533065267aSMatthew R. Ochs 	if (rc)
28543065267aSMatthew R. Ochs 		return -EINVAL;
28553065267aSMatthew R. Ochs 
28563065267aSMatthew R. Ochs 	if (nhwqs >= 1)
28573065267aSMatthew R. Ochs 		num_hwqs = nhwqs;
28583065267aSMatthew R. Ochs 	else if (nhwqs == 0)
28593065267aSMatthew R. Ochs 		num_hwqs = num_online_cpus();
28603065267aSMatthew R. Ochs 	else
28613065267aSMatthew R. Ochs 		num_hwqs = num_online_cpus() / abs(nhwqs);
28623065267aSMatthew R. Ochs 
28633065267aSMatthew R. Ochs 	afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
28643065267aSMatthew R. Ochs 	WARN_ON_ONCE(afu->desired_hwqs == 0);
28653065267aSMatthew R. Ochs 
28663065267aSMatthew R. Ochs retry:
28673065267aSMatthew R. Ochs 	switch (cfg->state) {
28683065267aSMatthew R. Ochs 	case STATE_NORMAL:
28693065267aSMatthew R. Ochs 		cfg->state = STATE_RESET;
28703065267aSMatthew R. Ochs 		drain_ioctls(cfg);
28713065267aSMatthew R. Ochs 		cxlflash_mark_contexts_error(cfg);
28723065267aSMatthew R. Ochs 		rc = afu_reset(cfg);
28733065267aSMatthew R. Ochs 		if (rc)
28743065267aSMatthew R. Ochs 			cfg->state = STATE_FAILTERM;
28753065267aSMatthew R. Ochs 		else
28763065267aSMatthew R. Ochs 			cfg->state = STATE_NORMAL;
28773065267aSMatthew R. Ochs 		wake_up_all(&cfg->reset_waitq);
28783065267aSMatthew R. Ochs 		break;
28793065267aSMatthew R. Ochs 	case STATE_RESET:
28803065267aSMatthew R. Ochs 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
28813065267aSMatthew R. Ochs 		if (cfg->state == STATE_NORMAL)
28823065267aSMatthew R. Ochs 			goto retry;
28833065267aSMatthew R. Ochs 	default:
28843065267aSMatthew R. Ochs 		/* Ideally should not happen */
28853065267aSMatthew R. Ochs 		dev_err(dev, "%s: Device is not ready, state=%d\n",
28863065267aSMatthew R. Ochs 			__func__, cfg->state);
28873065267aSMatthew R. Ochs 		break;
28883065267aSMatthew R. Ochs 	}
28893065267aSMatthew R. Ochs 
28903065267aSMatthew R. Ochs 	return count;
28913065267aSMatthew R. Ochs }
28923065267aSMatthew R. Ochs 
28931dd0c0e4SMatthew R. Ochs static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
28941dd0c0e4SMatthew R. Ochs 
28951dd0c0e4SMatthew R. Ochs /**
28961dd0c0e4SMatthew R. Ochs  * hwq_mode_show() - presents the HWQ steering mode for the host
28971dd0c0e4SMatthew R. Ochs  * @dev:	Generic device associated with the host.
28981dd0c0e4SMatthew R. Ochs  * @attr:	Device attribute representing the HWQ steering mode.
28991dd0c0e4SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the HWQ steering mode
29001dd0c0e4SMatthew R. Ochs  *		as a character string.
29011dd0c0e4SMatthew R. Ochs  *
29021dd0c0e4SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
29031dd0c0e4SMatthew R. Ochs  */
29041dd0c0e4SMatthew R. Ochs static ssize_t hwq_mode_show(struct device *dev,
29051dd0c0e4SMatthew R. Ochs 			     struct device_attribute *attr, char *buf)
29061dd0c0e4SMatthew R. Ochs {
29071dd0c0e4SMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
29081dd0c0e4SMatthew R. Ochs 	struct afu *afu = cfg->afu;
29091dd0c0e4SMatthew R. Ochs 
29101dd0c0e4SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
29111dd0c0e4SMatthew R. Ochs }
29121dd0c0e4SMatthew R. Ochs 
29131dd0c0e4SMatthew R. Ochs /**
29141dd0c0e4SMatthew R. Ochs  * hwq_mode_store() - sets the HWQ steering mode for the host
29151dd0c0e4SMatthew R. Ochs  * @dev:	Generic device associated with the host.
29161dd0c0e4SMatthew R. Ochs  * @attr:	Device attribute representing the HWQ steering mode.
29171dd0c0e4SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE containing the HWQ steering mode
29181dd0c0e4SMatthew R. Ochs  *		as a character string.
29191dd0c0e4SMatthew R. Ochs  * @count:	Length of data resizing in @buf.
29201dd0c0e4SMatthew R. Ochs  *
29211dd0c0e4SMatthew R. Ochs  * rr = Round-Robin
29221dd0c0e4SMatthew R. Ochs  * tag = Block MQ Tagging
29231dd0c0e4SMatthew R. Ochs  * cpu = CPU Affinity
29241dd0c0e4SMatthew R. Ochs  *
29251dd0c0e4SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
29261dd0c0e4SMatthew R. Ochs  */
29271dd0c0e4SMatthew R. Ochs static ssize_t hwq_mode_store(struct device *dev,
29281dd0c0e4SMatthew R. Ochs 			      struct device_attribute *attr,
29291dd0c0e4SMatthew R. Ochs 			      const char *buf, size_t count)
29301dd0c0e4SMatthew R. Ochs {
29311dd0c0e4SMatthew R. Ochs 	struct Scsi_Host *shost = class_to_shost(dev);
29321dd0c0e4SMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(shost);
29331dd0c0e4SMatthew R. Ochs 	struct device *cfgdev = &cfg->dev->dev;
29341dd0c0e4SMatthew R. Ochs 	struct afu *afu = cfg->afu;
29351dd0c0e4SMatthew R. Ochs 	int i;
29361dd0c0e4SMatthew R. Ochs 	u32 mode = MAX_HWQ_MODE;
29371dd0c0e4SMatthew R. Ochs 
29381dd0c0e4SMatthew R. Ochs 	for (i = 0; i < MAX_HWQ_MODE; i++) {
29391dd0c0e4SMatthew R. Ochs 		if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
29401dd0c0e4SMatthew R. Ochs 			mode = i;
29411dd0c0e4SMatthew R. Ochs 			break;
29421dd0c0e4SMatthew R. Ochs 		}
29431dd0c0e4SMatthew R. Ochs 	}
29441dd0c0e4SMatthew R. Ochs 
29451dd0c0e4SMatthew R. Ochs 	if (mode >= MAX_HWQ_MODE) {
29461dd0c0e4SMatthew R. Ochs 		dev_info(cfgdev, "Invalid HWQ steering mode.\n");
29471dd0c0e4SMatthew R. Ochs 		return -EINVAL;
29481dd0c0e4SMatthew R. Ochs 	}
29491dd0c0e4SMatthew R. Ochs 
29501dd0c0e4SMatthew R. Ochs 	if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
29511dd0c0e4SMatthew R. Ochs 		dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
29521dd0c0e4SMatthew R. Ochs 			 "HWQ steering mode.\n");
29531dd0c0e4SMatthew R. Ochs 		return -EINVAL;
29541dd0c0e4SMatthew R. Ochs 	}
29551dd0c0e4SMatthew R. Ochs 
29561dd0c0e4SMatthew R. Ochs 	afu->hwq_mode = mode;
29571dd0c0e4SMatthew R. Ochs 
29581dd0c0e4SMatthew R. Ochs 	return count;
29591dd0c0e4SMatthew R. Ochs }
29601dd0c0e4SMatthew R. Ochs 
29613065267aSMatthew R. Ochs /**
2962e0f01a21SMatthew R. Ochs  * mode_show() - presents the current mode of the device
296315305514SMatthew R. Ochs  * @dev:	Generic device associated with the device.
296415305514SMatthew R. Ochs  * @attr:	Device attribute representing the device mode.
296515305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
296615305514SMatthew R. Ochs  *
296715305514SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
296815305514SMatthew R. Ochs  */
2969e0f01a21SMatthew R. Ochs static ssize_t mode_show(struct device *dev,
297015305514SMatthew R. Ochs 			 struct device_attribute *attr, char *buf)
297115305514SMatthew R. Ochs {
297215305514SMatthew R. Ochs 	struct scsi_device *sdev = to_scsi_device(dev);
297315305514SMatthew R. Ochs 
2974e0f01a21SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%s\n",
297515305514SMatthew R. Ochs 			 sdev->hostdata ? "superpipe" : "legacy");
297615305514SMatthew R. Ochs }
297715305514SMatthew R. Ochs 
297815305514SMatthew R. Ochs /*
297915305514SMatthew R. Ochs  * Host attributes
298015305514SMatthew R. Ochs  */
2981e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0);
2982e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1);
29831cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port2);
29841cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port3);
2985e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RW(lun_mode);
2986e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(ioctl_version);
2987e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0_lun_table);
2988e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1_lun_table);
29891cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port2_lun_table);
29901cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port3_lun_table);
2991cba06e6dSMatthew R. Ochs static DEVICE_ATTR_RW(irqpoll_weight);
29923065267aSMatthew R. Ochs static DEVICE_ATTR_RW(num_hwqs);
29931dd0c0e4SMatthew R. Ochs static DEVICE_ATTR_RW(hwq_mode);
299415305514SMatthew R. Ochs 
299515305514SMatthew R. Ochs static struct device_attribute *cxlflash_host_attrs[] = {
299615305514SMatthew R. Ochs 	&dev_attr_port0,
299715305514SMatthew R. Ochs 	&dev_attr_port1,
29981cd7fabcSMatthew R. Ochs 	&dev_attr_port2,
29991cd7fabcSMatthew R. Ochs 	&dev_attr_port3,
300015305514SMatthew R. Ochs 	&dev_attr_lun_mode,
300115305514SMatthew R. Ochs 	&dev_attr_ioctl_version,
3002e0f01a21SMatthew R. Ochs 	&dev_attr_port0_lun_table,
3003e0f01a21SMatthew R. Ochs 	&dev_attr_port1_lun_table,
30041cd7fabcSMatthew R. Ochs 	&dev_attr_port2_lun_table,
30051cd7fabcSMatthew R. Ochs 	&dev_attr_port3_lun_table,
3006cba06e6dSMatthew R. Ochs 	&dev_attr_irqpoll_weight,
30073065267aSMatthew R. Ochs 	&dev_attr_num_hwqs,
30081dd0c0e4SMatthew R. Ochs 	&dev_attr_hwq_mode,
300915305514SMatthew R. Ochs 	NULL
301015305514SMatthew R. Ochs };
301115305514SMatthew R. Ochs 
301215305514SMatthew R. Ochs /*
301315305514SMatthew R. Ochs  * Device attributes
301415305514SMatthew R. Ochs  */
3015e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(mode);
301615305514SMatthew R. Ochs 
301715305514SMatthew R. Ochs static struct device_attribute *cxlflash_dev_attrs[] = {
301815305514SMatthew R. Ochs 	&dev_attr_mode,
301915305514SMatthew R. Ochs 	NULL
302015305514SMatthew R. Ochs };
302115305514SMatthew R. Ochs 
302215305514SMatthew R. Ochs /*
302315305514SMatthew R. Ochs  * Host template
302415305514SMatthew R. Ochs  */
302515305514SMatthew R. Ochs static struct scsi_host_template driver_template = {
302615305514SMatthew R. Ochs 	.module = THIS_MODULE,
302715305514SMatthew R. Ochs 	.name = CXLFLASH_ADAPTER_NAME,
302815305514SMatthew R. Ochs 	.info = cxlflash_driver_info,
302915305514SMatthew R. Ochs 	.ioctl = cxlflash_ioctl,
303015305514SMatthew R. Ochs 	.proc_name = CXLFLASH_NAME,
303115305514SMatthew R. Ochs 	.queuecommand = cxlflash_queuecommand,
30327c4c41f1SUma Krishnan 	.eh_abort_handler = cxlflash_eh_abort_handler,
303315305514SMatthew R. Ochs 	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
303415305514SMatthew R. Ochs 	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
303515305514SMatthew R. Ochs 	.change_queue_depth = cxlflash_change_queue_depth,
303683430833SManoj N. Kumar 	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
303715305514SMatthew R. Ochs 	.can_queue = CXLFLASH_MAX_CMDS,
30385fbb96c8SMatthew R. Ochs 	.cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
303915305514SMatthew R. Ochs 	.this_id = -1,
304068ab2d76SUma Krishnan 	.sg_tablesize = 1,	/* No scatter gather support */
304115305514SMatthew R. Ochs 	.max_sectors = CXLFLASH_MAX_SECTORS,
304215305514SMatthew R. Ochs 	.use_clustering = ENABLE_CLUSTERING,
304315305514SMatthew R. Ochs 	.shost_attrs = cxlflash_host_attrs,
304415305514SMatthew R. Ochs 	.sdev_attrs = cxlflash_dev_attrs,
304515305514SMatthew R. Ochs };
304615305514SMatthew R. Ochs 
304715305514SMatthew R. Ochs /*
304815305514SMatthew R. Ochs  * Device dependent values
304915305514SMatthew R. Ochs  */
305096e1b660SUma Krishnan static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
305196e1b660SUma Krishnan 					0ULL };
305296e1b660SUma Krishnan static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3053704c4b0dSUma Krishnan 					CXLFLASH_NOTIFY_SHUTDOWN };
305494344520SMatthew R. Ochs static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
305594344520SMatthew R. Ochs 					CXLFLASH_NOTIFY_SHUTDOWN };
305615305514SMatthew R. Ochs 
305715305514SMatthew R. Ochs /*
305815305514SMatthew R. Ochs  * PCI device binding table
305915305514SMatthew R. Ochs  */
306015305514SMatthew R. Ochs static struct pci_device_id cxlflash_pci_table[] = {
306115305514SMatthew R. Ochs 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
306215305514SMatthew R. Ochs 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3063a2746fb1SManoj Kumar 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3064a2746fb1SManoj Kumar 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
306594344520SMatthew R. Ochs 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
306694344520SMatthew R. Ochs 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
306715305514SMatthew R. Ochs 	{}
306815305514SMatthew R. Ochs };
306915305514SMatthew R. Ochs 
307015305514SMatthew R. Ochs MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
307115305514SMatthew R. Ochs 
307215305514SMatthew R. Ochs /**
3073c21e0bbfSMatthew R. Ochs  * cxlflash_worker_thread() - work thread handler for the AFU
3074c21e0bbfSMatthew R. Ochs  * @work:	Work structure contained within cxlflash associated with host.
3075c21e0bbfSMatthew R. Ochs  *
3076c21e0bbfSMatthew R. Ochs  * Handles the following events:
3077c21e0bbfSMatthew R. Ochs  * - Link reset which cannot be performed on interrupt context due to
3078c21e0bbfSMatthew R. Ochs  * blocking up to a few seconds
3079ef51074aSMatthew R. Ochs  * - Rescan the host
3080c21e0bbfSMatthew R. Ochs  */
3081c21e0bbfSMatthew R. Ochs static void cxlflash_worker_thread(struct work_struct *work)
3082c21e0bbfSMatthew R. Ochs {
30835cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
30845cdac81aSMatthew R. Ochs 						work_q);
3085c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
30864392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
30870aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
3088c21e0bbfSMatthew R. Ochs 	int port;
3089c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
3090c21e0bbfSMatthew R. Ochs 
30915cdac81aSMatthew R. Ochs 	/* Avoid MMIO if the device has failed */
30925cdac81aSMatthew R. Ochs 
30935cdac81aSMatthew R. Ochs 	if (cfg->state != STATE_NORMAL)
30945cdac81aSMatthew R. Ochs 		return;
30955cdac81aSMatthew R. Ochs 
3096c21e0bbfSMatthew R. Ochs 	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3097c21e0bbfSMatthew R. Ochs 
3098c21e0bbfSMatthew R. Ochs 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
3099c21e0bbfSMatthew R. Ochs 		port = cfg->lr_port;
3100c21e0bbfSMatthew R. Ochs 		if (port < 0)
31014392ba49SMatthew R. Ochs 			dev_err(dev, "%s: invalid port index %d\n",
31024392ba49SMatthew R. Ochs 				__func__, port);
3103c21e0bbfSMatthew R. Ochs 		else {
3104c21e0bbfSMatthew R. Ochs 			spin_unlock_irqrestore(cfg->host->host_lock,
3105c21e0bbfSMatthew R. Ochs 					       lock_flags);
3106c21e0bbfSMatthew R. Ochs 
3107c21e0bbfSMatthew R. Ochs 			/* The reset can block... */
31080aa14887SMatthew R. Ochs 			fc_port_regs = get_fc_port_regs(cfg, port);
31090aa14887SMatthew R. Ochs 			afu_link_reset(afu, port, fc_port_regs);
3110c21e0bbfSMatthew R. Ochs 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3111c21e0bbfSMatthew R. Ochs 		}
3112c21e0bbfSMatthew R. Ochs 
3113c21e0bbfSMatthew R. Ochs 		cfg->lr_state = LINK_RESET_COMPLETE;
3114c21e0bbfSMatthew R. Ochs 	}
3115c21e0bbfSMatthew R. Ochs 
3116c21e0bbfSMatthew R. Ochs 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3117ef51074aSMatthew R. Ochs 
3118ef51074aSMatthew R. Ochs 	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3119ef51074aSMatthew R. Ochs 		scsi_scan_host(cfg->host);
3120c21e0bbfSMatthew R. Ochs }
3121c21e0bbfSMatthew R. Ochs 
3122c21e0bbfSMatthew R. Ochs /**
3123c21e0bbfSMatthew R. Ochs  * cxlflash_probe() - PCI entry point to add host
3124c21e0bbfSMatthew R. Ochs  * @pdev:	PCI device associated with the host.
3125c21e0bbfSMatthew R. Ochs  * @dev_id:	PCI device id associated with device.
3126c21e0bbfSMatthew R. Ochs  *
3127323e3342SMatthew R. Ochs  * The device will initially start out in a 'probing' state and
3128323e3342SMatthew R. Ochs  * transition to the 'normal' state at the end of a successful
3129323e3342SMatthew R. Ochs  * probe. Should an EEH event occur during probe, the notification
3130323e3342SMatthew R. Ochs  * thread (error_detected()) will wait until the probe handler
3131323e3342SMatthew R. Ochs  * is nearly complete. At that time, the device will be moved to
3132323e3342SMatthew R. Ochs  * a 'probed' state and the EEH thread woken up to drive the slot
3133323e3342SMatthew R. Ochs  * reset and recovery (device moves to 'normal' state). Meanwhile,
3134323e3342SMatthew R. Ochs  * the probe will be allowed to exit successfully.
3135323e3342SMatthew R. Ochs  *
31361284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
3137c21e0bbfSMatthew R. Ochs  */
3138c21e0bbfSMatthew R. Ochs static int cxlflash_probe(struct pci_dev *pdev,
3139c21e0bbfSMatthew R. Ochs 			  const struct pci_device_id *dev_id)
3140c21e0bbfSMatthew R. Ochs {
3141c21e0bbfSMatthew R. Ochs 	struct Scsi_Host *host;
3142c21e0bbfSMatthew R. Ochs 	struct cxlflash_cfg *cfg = NULL;
3143fb67d44dSMatthew R. Ochs 	struct device *dev = &pdev->dev;
3144c21e0bbfSMatthew R. Ochs 	struct dev_dependent_vals *ddv;
3145c21e0bbfSMatthew R. Ochs 	int rc = 0;
314678ae028eSMatthew R. Ochs 	int k;
3147c21e0bbfSMatthew R. Ochs 
3148c21e0bbfSMatthew R. Ochs 	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3149c21e0bbfSMatthew R. Ochs 		__func__, pdev->irq);
3150c21e0bbfSMatthew R. Ochs 
3151c21e0bbfSMatthew R. Ochs 	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3152c21e0bbfSMatthew R. Ochs 	driver_template.max_sectors = ddv->max_sectors;
3153c21e0bbfSMatthew R. Ochs 
3154c21e0bbfSMatthew R. Ochs 	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3155c21e0bbfSMatthew R. Ochs 	if (!host) {
3156fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3157c21e0bbfSMatthew R. Ochs 		rc = -ENOMEM;
3158c21e0bbfSMatthew R. Ochs 		goto out;
3159c21e0bbfSMatthew R. Ochs 	}
3160c21e0bbfSMatthew R. Ochs 
3161c21e0bbfSMatthew R. Ochs 	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3162c21e0bbfSMatthew R. Ochs 	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3163c21e0bbfSMatthew R. Ochs 	host->unique_id = host->host_no;
3164c21e0bbfSMatthew R. Ochs 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3165c21e0bbfSMatthew R. Ochs 
3166fb67d44dSMatthew R. Ochs 	cfg = shost_priv(host);
3167c21e0bbfSMatthew R. Ochs 	cfg->host = host;
3168c21e0bbfSMatthew R. Ochs 	rc = alloc_mem(cfg);
3169c21e0bbfSMatthew R. Ochs 	if (rc) {
3170fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: alloc_mem failed\n", __func__);
3171c21e0bbfSMatthew R. Ochs 		rc = -ENOMEM;
31728b5b1e87SMatthew R. Ochs 		scsi_host_put(cfg->host);
3173c21e0bbfSMatthew R. Ochs 		goto out;
3174c21e0bbfSMatthew R. Ochs 	}
3175c21e0bbfSMatthew R. Ochs 
3176c21e0bbfSMatthew R. Ochs 	cfg->init_state = INIT_STATE_NONE;
3177c21e0bbfSMatthew R. Ochs 	cfg->dev = pdev;
317817ead26fSMatthew R. Ochs 	cfg->cxl_fops = cxlflash_cxl_fops;
31792cb79266SMatthew R. Ochs 
31802cb79266SMatthew R. Ochs 	/*
318178ae028eSMatthew R. Ochs 	 * Promoted LUNs move to the top of the LUN table. The rest stay on
318278ae028eSMatthew R. Ochs 	 * the bottom half. The bottom half grows from the end (index = 255),
318378ae028eSMatthew R. Ochs 	 * whereas the top half grows from the beginning (index = 0).
318478ae028eSMatthew R. Ochs 	 *
318578ae028eSMatthew R. Ochs 	 * Initialize the last LUN index for all possible ports.
31862cb79266SMatthew R. Ochs 	 */
31872cb79266SMatthew R. Ochs 	cfg->promote_lun_index = 0;
318878ae028eSMatthew R. Ochs 
318978ae028eSMatthew R. Ochs 	for (k = 0; k < MAX_FC_PORTS; k++)
319078ae028eSMatthew R. Ochs 		cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
31912cb79266SMatthew R. Ochs 
3192c21e0bbfSMatthew R. Ochs 	cfg->dev_id = (struct pci_device_id *)dev_id;
3193c21e0bbfSMatthew R. Ochs 
3194c21e0bbfSMatthew R. Ochs 	init_waitqueue_head(&cfg->tmf_waitq);
3195439e85c1SMatthew R. Ochs 	init_waitqueue_head(&cfg->reset_waitq);
3196c21e0bbfSMatthew R. Ochs 
3197c21e0bbfSMatthew R. Ochs 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3198c21e0bbfSMatthew R. Ochs 	cfg->lr_state = LINK_RESET_INVALID;
3199c21e0bbfSMatthew R. Ochs 	cfg->lr_port = -1;
32000d73122cSMatthew R. Ochs 	spin_lock_init(&cfg->tmf_slock);
320165be2c79SMatthew R. Ochs 	mutex_init(&cfg->ctx_tbl_list_mutex);
320265be2c79SMatthew R. Ochs 	mutex_init(&cfg->ctx_recovery_mutex);
32030a27ae51SMatthew R. Ochs 	init_rwsem(&cfg->ioctl_rwsem);
320465be2c79SMatthew R. Ochs 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
320565be2c79SMatthew R. Ochs 	INIT_LIST_HEAD(&cfg->lluns);
3206c21e0bbfSMatthew R. Ochs 
3207c21e0bbfSMatthew R. Ochs 	pci_set_drvdata(pdev, cfg);
3208c21e0bbfSMatthew R. Ochs 
3209c21e0bbfSMatthew R. Ochs 	cfg->cxl_afu = cxl_pci_to_afu(pdev);
3210c21e0bbfSMatthew R. Ochs 
3211c21e0bbfSMatthew R. Ochs 	rc = init_pci(cfg);
3212c21e0bbfSMatthew R. Ochs 	if (rc) {
3213fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3214c21e0bbfSMatthew R. Ochs 		goto out_remove;
3215c21e0bbfSMatthew R. Ochs 	}
3216c21e0bbfSMatthew R. Ochs 	cfg->init_state = INIT_STATE_PCI;
3217c21e0bbfSMatthew R. Ochs 
3218c21e0bbfSMatthew R. Ochs 	rc = init_afu(cfg);
3219323e3342SMatthew R. Ochs 	if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3220fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3221c21e0bbfSMatthew R. Ochs 		goto out_remove;
3222c21e0bbfSMatthew R. Ochs 	}
3223c21e0bbfSMatthew R. Ochs 	cfg->init_state = INIT_STATE_AFU;
3224c21e0bbfSMatthew R. Ochs 
3225c21e0bbfSMatthew R. Ochs 	rc = init_scsi(cfg);
3226c21e0bbfSMatthew R. Ochs 	if (rc) {
3227fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3228c21e0bbfSMatthew R. Ochs 		goto out_remove;
3229c21e0bbfSMatthew R. Ochs 	}
3230c21e0bbfSMatthew R. Ochs 	cfg->init_state = INIT_STATE_SCSI;
3231c21e0bbfSMatthew R. Ochs 
3232323e3342SMatthew R. Ochs 	if (wq_has_sleeper(&cfg->reset_waitq)) {
3233323e3342SMatthew R. Ochs 		cfg->state = STATE_PROBED;
3234323e3342SMatthew R. Ochs 		wake_up_all(&cfg->reset_waitq);
3235323e3342SMatthew R. Ochs 	} else
3236323e3342SMatthew R. Ochs 		cfg->state = STATE_NORMAL;
3237c21e0bbfSMatthew R. Ochs out:
3238fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3239c21e0bbfSMatthew R. Ochs 	return rc;
3240c21e0bbfSMatthew R. Ochs 
3241c21e0bbfSMatthew R. Ochs out_remove:
3242c21e0bbfSMatthew R. Ochs 	cxlflash_remove(pdev);
3243c21e0bbfSMatthew R. Ochs 	goto out;
3244c21e0bbfSMatthew R. Ochs }
3245c21e0bbfSMatthew R. Ochs 
32465cdac81aSMatthew R. Ochs /**
32475cdac81aSMatthew R. Ochs  * cxlflash_pci_error_detected() - called when a PCI error is detected
32485cdac81aSMatthew R. Ochs  * @pdev:	PCI device struct.
32495cdac81aSMatthew R. Ochs  * @state:	PCI channel state.
32505cdac81aSMatthew R. Ochs  *
32511d3324c3SMatthew R. Ochs  * When an EEH occurs during an active reset, wait until the reset is
32521d3324c3SMatthew R. Ochs  * complete and then take action based upon the device state.
32531d3324c3SMatthew R. Ochs  *
32545cdac81aSMatthew R. Ochs  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
32555cdac81aSMatthew R. Ochs  */
32565cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
32575cdac81aSMatthew R. Ochs 						    pci_channel_state_t state)
32585cdac81aSMatthew R. Ochs {
325965be2c79SMatthew R. Ochs 	int rc = 0;
32605cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
32615cdac81aSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
32625cdac81aSMatthew R. Ochs 
32635cdac81aSMatthew R. Ochs 	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
32645cdac81aSMatthew R. Ochs 
32655cdac81aSMatthew R. Ochs 	switch (state) {
32665cdac81aSMatthew R. Ochs 	case pci_channel_io_frozen:
3267323e3342SMatthew R. Ochs 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3268323e3342SMatthew R. Ochs 					     cfg->state != STATE_PROBING);
32691d3324c3SMatthew R. Ochs 		if (cfg->state == STATE_FAILTERM)
32701d3324c3SMatthew R. Ochs 			return PCI_ERS_RESULT_DISCONNECT;
32711d3324c3SMatthew R. Ochs 
3272439e85c1SMatthew R. Ochs 		cfg->state = STATE_RESET;
32735cdac81aSMatthew R. Ochs 		scsi_block_requests(cfg->host);
32740a27ae51SMatthew R. Ochs 		drain_ioctls(cfg);
327565be2c79SMatthew R. Ochs 		rc = cxlflash_mark_contexts_error(cfg);
327665be2c79SMatthew R. Ochs 		if (unlikely(rc))
3277fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
327865be2c79SMatthew R. Ochs 				__func__, rc);
32799526f360SManoj N. Kumar 		term_afu(cfg);
32805cdac81aSMatthew R. Ochs 		return PCI_ERS_RESULT_NEED_RESET;
32815cdac81aSMatthew R. Ochs 	case pci_channel_io_perm_failure:
32825cdac81aSMatthew R. Ochs 		cfg->state = STATE_FAILTERM;
3283439e85c1SMatthew R. Ochs 		wake_up_all(&cfg->reset_waitq);
32845cdac81aSMatthew R. Ochs 		scsi_unblock_requests(cfg->host);
32855cdac81aSMatthew R. Ochs 		return PCI_ERS_RESULT_DISCONNECT;
32865cdac81aSMatthew R. Ochs 	default:
32875cdac81aSMatthew R. Ochs 		break;
32885cdac81aSMatthew R. Ochs 	}
32895cdac81aSMatthew R. Ochs 	return PCI_ERS_RESULT_NEED_RESET;
32905cdac81aSMatthew R. Ochs }
32915cdac81aSMatthew R. Ochs 
32925cdac81aSMatthew R. Ochs /**
32935cdac81aSMatthew R. Ochs  * cxlflash_pci_slot_reset() - called when PCI slot has been reset
32945cdac81aSMatthew R. Ochs  * @pdev:	PCI device struct.
32955cdac81aSMatthew R. Ochs  *
32965cdac81aSMatthew R. Ochs  * This routine is called by the pci error recovery code after the PCI
32975cdac81aSMatthew R. Ochs  * slot has been reset, just before we should resume normal operations.
32985cdac81aSMatthew R. Ochs  *
32995cdac81aSMatthew R. Ochs  * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
33005cdac81aSMatthew R. Ochs  */
33015cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
33025cdac81aSMatthew R. Ochs {
33035cdac81aSMatthew R. Ochs 	int rc = 0;
33045cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
33055cdac81aSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
33065cdac81aSMatthew R. Ochs 
33075cdac81aSMatthew R. Ochs 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
33085cdac81aSMatthew R. Ochs 
33095cdac81aSMatthew R. Ochs 	rc = init_afu(cfg);
33105cdac81aSMatthew R. Ochs 	if (unlikely(rc)) {
3311fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
33125cdac81aSMatthew R. Ochs 		return PCI_ERS_RESULT_DISCONNECT;
33135cdac81aSMatthew R. Ochs 	}
33145cdac81aSMatthew R. Ochs 
33155cdac81aSMatthew R. Ochs 	return PCI_ERS_RESULT_RECOVERED;
33165cdac81aSMatthew R. Ochs }
33175cdac81aSMatthew R. Ochs 
33185cdac81aSMatthew R. Ochs /**
33195cdac81aSMatthew R. Ochs  * cxlflash_pci_resume() - called when normal operation can resume
33205cdac81aSMatthew R. Ochs  * @pdev:	PCI device struct
33215cdac81aSMatthew R. Ochs  */
33225cdac81aSMatthew R. Ochs static void cxlflash_pci_resume(struct pci_dev *pdev)
33235cdac81aSMatthew R. Ochs {
33245cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
33255cdac81aSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
33265cdac81aSMatthew R. Ochs 
33275cdac81aSMatthew R. Ochs 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
33285cdac81aSMatthew R. Ochs 
33295cdac81aSMatthew R. Ochs 	cfg->state = STATE_NORMAL;
3330439e85c1SMatthew R. Ochs 	wake_up_all(&cfg->reset_waitq);
33315cdac81aSMatthew R. Ochs 	scsi_unblock_requests(cfg->host);
33325cdac81aSMatthew R. Ochs }
33335cdac81aSMatthew R. Ochs 
33345cdac81aSMatthew R. Ochs static const struct pci_error_handlers cxlflash_err_handler = {
33355cdac81aSMatthew R. Ochs 	.error_detected = cxlflash_pci_error_detected,
33365cdac81aSMatthew R. Ochs 	.slot_reset = cxlflash_pci_slot_reset,
33375cdac81aSMatthew R. Ochs 	.resume = cxlflash_pci_resume,
33385cdac81aSMatthew R. Ochs };
33395cdac81aSMatthew R. Ochs 
3340c21e0bbfSMatthew R. Ochs /*
3341c21e0bbfSMatthew R. Ochs  * PCI device structure
3342c21e0bbfSMatthew R. Ochs  */
3343c21e0bbfSMatthew R. Ochs static struct pci_driver cxlflash_driver = {
3344c21e0bbfSMatthew R. Ochs 	.name = CXLFLASH_NAME,
3345c21e0bbfSMatthew R. Ochs 	.id_table = cxlflash_pci_table,
3346c21e0bbfSMatthew R. Ochs 	.probe = cxlflash_probe,
3347c21e0bbfSMatthew R. Ochs 	.remove = cxlflash_remove,
3348babf985dSUma Krishnan 	.shutdown = cxlflash_remove,
33495cdac81aSMatthew R. Ochs 	.err_handler = &cxlflash_err_handler,
3350c21e0bbfSMatthew R. Ochs };
3351c21e0bbfSMatthew R. Ochs 
3352c21e0bbfSMatthew R. Ochs /**
3353c21e0bbfSMatthew R. Ochs  * init_cxlflash() - module entry point
3354c21e0bbfSMatthew R. Ochs  *
33551284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
3356c21e0bbfSMatthew R. Ochs  */
3357c21e0bbfSMatthew R. Ochs static int __init init_cxlflash(void)
3358c21e0bbfSMatthew R. Ochs {
3359cd41e18dSMatthew R. Ochs 	check_sizes();
336065be2c79SMatthew R. Ochs 	cxlflash_list_init();
336165be2c79SMatthew R. Ochs 
3362c21e0bbfSMatthew R. Ochs 	return pci_register_driver(&cxlflash_driver);
3363c21e0bbfSMatthew R. Ochs }
3364c21e0bbfSMatthew R. Ochs 
3365c21e0bbfSMatthew R. Ochs /**
3366c21e0bbfSMatthew R. Ochs  * exit_cxlflash() - module exit point
3367c21e0bbfSMatthew R. Ochs  */
3368c21e0bbfSMatthew R. Ochs static void __exit exit_cxlflash(void)
3369c21e0bbfSMatthew R. Ochs {
337065be2c79SMatthew R. Ochs 	cxlflash_term_global_luns();
337165be2c79SMatthew R. Ochs 	cxlflash_free_errpage();
337265be2c79SMatthew R. Ochs 
3373c21e0bbfSMatthew R. Ochs 	pci_unregister_driver(&cxlflash_driver);
3374c21e0bbfSMatthew R. Ochs }
3375c21e0bbfSMatthew R. Ochs 
3376c21e0bbfSMatthew R. Ochs module_init(init_cxlflash);
3377c21e0bbfSMatthew R. Ochs module_exit(exit_cxlflash);
3378