xref: /openbmc/linux/drivers/scsi/cxlflash/main.c (revision 66ea9bcc)
1c21e0bbfSMatthew R. Ochs /*
2c21e0bbfSMatthew R. Ochs  * CXL Flash Device Driver
3c21e0bbfSMatthew R. Ochs  *
4c21e0bbfSMatthew R. Ochs  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5c21e0bbfSMatthew R. Ochs  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6c21e0bbfSMatthew R. Ochs  *
7c21e0bbfSMatthew R. Ochs  * Copyright (C) 2015 IBM Corporation
8c21e0bbfSMatthew R. Ochs  *
9c21e0bbfSMatthew R. Ochs  * This program is free software; you can redistribute it and/or
10c21e0bbfSMatthew R. Ochs  * modify it under the terms of the GNU General Public License
11c21e0bbfSMatthew R. Ochs  * as published by the Free Software Foundation; either version
12c21e0bbfSMatthew R. Ochs  * 2 of the License, or (at your option) any later version.
13c21e0bbfSMatthew R. Ochs  */
14c21e0bbfSMatthew R. Ochs 
15c21e0bbfSMatthew R. Ochs #include <linux/delay.h>
16c21e0bbfSMatthew R. Ochs #include <linux/list.h>
17c21e0bbfSMatthew R. Ochs #include <linux/module.h>
18c21e0bbfSMatthew R. Ochs #include <linux/pci.h>
19c21e0bbfSMatthew R. Ochs 
20c21e0bbfSMatthew R. Ochs #include <asm/unaligned.h>
21c21e0bbfSMatthew R. Ochs 
22c21e0bbfSMatthew R. Ochs #include <misc/cxl.h>
23c21e0bbfSMatthew R. Ochs 
24c21e0bbfSMatthew R. Ochs #include <scsi/scsi_cmnd.h>
25c21e0bbfSMatthew R. Ochs #include <scsi/scsi_host.h>
2665be2c79SMatthew R. Ochs #include <uapi/scsi/cxlflash_ioctl.h>
27c21e0bbfSMatthew R. Ochs 
28c21e0bbfSMatthew R. Ochs #include "main.h"
29c21e0bbfSMatthew R. Ochs #include "sislite.h"
30c21e0bbfSMatthew R. Ochs #include "common.h"
31c21e0bbfSMatthew R. Ochs 
32c21e0bbfSMatthew R. Ochs MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34c21e0bbfSMatthew R. Ochs MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35c21e0bbfSMatthew R. Ochs MODULE_LICENSE("GPL");
36c21e0bbfSMatthew R. Ochs 
37c21e0bbfSMatthew R. Ochs /**
38c21e0bbfSMatthew R. Ochs  * process_cmd_err() - command error handler
39c21e0bbfSMatthew R. Ochs  * @cmd:	AFU command that experienced the error.
40c21e0bbfSMatthew R. Ochs  * @scp:	SCSI command associated with the AFU command in error.
41c21e0bbfSMatthew R. Ochs  *
42c21e0bbfSMatthew R. Ochs  * Translates error bits from AFU command to SCSI command results.
43c21e0bbfSMatthew R. Ochs  */
44c21e0bbfSMatthew R. Ochs static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
45c21e0bbfSMatthew R. Ochs {
46fb67d44dSMatthew R. Ochs 	struct afu *afu = cmd->parent;
47fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
48fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
49c21e0bbfSMatthew R. Ochs 	struct sisl_ioarcb *ioarcb;
50c21e0bbfSMatthew R. Ochs 	struct sisl_ioasa *ioasa;
518396012fSMatthew R. Ochs 	u32 resid;
52c21e0bbfSMatthew R. Ochs 
53c21e0bbfSMatthew R. Ochs 	if (unlikely(!cmd))
54c21e0bbfSMatthew R. Ochs 		return;
55c21e0bbfSMatthew R. Ochs 
56c21e0bbfSMatthew R. Ochs 	ioarcb = &(cmd->rcb);
57c21e0bbfSMatthew R. Ochs 	ioasa = &(cmd->sa);
58c21e0bbfSMatthew R. Ochs 
59c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
608396012fSMatthew R. Ochs 		resid = ioasa->resid;
618396012fSMatthew R. Ochs 		scsi_set_resid(scp, resid);
62fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
638396012fSMatthew R. Ochs 			__func__, cmd, scp, resid);
64c21e0bbfSMatthew R. Ochs 	}
65c21e0bbfSMatthew R. Ochs 
66c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
67fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
68c21e0bbfSMatthew R. Ochs 			__func__, cmd, scp);
69c21e0bbfSMatthew R. Ochs 		scp->result = (DID_ERROR << 16);
70c21e0bbfSMatthew R. Ochs 	}
71c21e0bbfSMatthew R. Ochs 
72fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
73fb67d44dSMatthew R. Ochs 		"afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
74fb67d44dSMatthew R. Ochs 		ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
75fb67d44dSMatthew R. Ochs 		ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
76c21e0bbfSMatthew R. Ochs 
77c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.scsi_rc) {
78c21e0bbfSMatthew R. Ochs 		/* We have a SCSI status */
79c21e0bbfSMatthew R. Ochs 		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
80c21e0bbfSMatthew R. Ochs 			memcpy(scp->sense_buffer, ioasa->sense_data,
81c21e0bbfSMatthew R. Ochs 			       SISL_SENSE_DATA_LEN);
82c21e0bbfSMatthew R. Ochs 			scp->result = ioasa->rc.scsi_rc;
83c21e0bbfSMatthew R. Ochs 		} else
84c21e0bbfSMatthew R. Ochs 			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
85c21e0bbfSMatthew R. Ochs 	}
86c21e0bbfSMatthew R. Ochs 
87c21e0bbfSMatthew R. Ochs 	/*
88c21e0bbfSMatthew R. Ochs 	 * We encountered an error. Set scp->result based on nature
89c21e0bbfSMatthew R. Ochs 	 * of error.
90c21e0bbfSMatthew R. Ochs 	 */
91c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.fc_rc) {
92c21e0bbfSMatthew R. Ochs 		/* We have an FC status */
93c21e0bbfSMatthew R. Ochs 		switch (ioasa->rc.fc_rc) {
94c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_LINKDOWN:
95c21e0bbfSMatthew R. Ochs 			scp->result = (DID_REQUEUE << 16);
96c21e0bbfSMatthew R. Ochs 			break;
97c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_RESID:
98c21e0bbfSMatthew R. Ochs 			/* This indicates an FCP resid underrun */
99c21e0bbfSMatthew R. Ochs 			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
100c21e0bbfSMatthew R. Ochs 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
101c21e0bbfSMatthew R. Ochs 				 * then we will handle this error else where.
102c21e0bbfSMatthew R. Ochs 				 * If not then we must handle it here.
1038396012fSMatthew R. Ochs 				 * This is probably an AFU bug.
104c21e0bbfSMatthew R. Ochs 				 */
105c21e0bbfSMatthew R. Ochs 				scp->result = (DID_ERROR << 16);
106c21e0bbfSMatthew R. Ochs 			}
107c21e0bbfSMatthew R. Ochs 			break;
108c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_RESIDERR:
109c21e0bbfSMatthew R. Ochs 			/* Resid mismatch between adapter and device */
110c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_TGTABORT:
111c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_ABORTOK:
112c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_ABORTFAIL:
113c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_NOLOGI:
114c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_ABORTPEND:
115c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_WRABORTPEND:
116c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_NOEXP:
117c21e0bbfSMatthew R. Ochs 		case SISL_FC_RC_INUSE:
118c21e0bbfSMatthew R. Ochs 			scp->result = (DID_ERROR << 16);
119c21e0bbfSMatthew R. Ochs 			break;
120c21e0bbfSMatthew R. Ochs 		}
121c21e0bbfSMatthew R. Ochs 	}
122c21e0bbfSMatthew R. Ochs 
123c21e0bbfSMatthew R. Ochs 	if (ioasa->rc.afu_rc) {
124c21e0bbfSMatthew R. Ochs 		/* We have an AFU error */
125c21e0bbfSMatthew R. Ochs 		switch (ioasa->rc.afu_rc) {
126c21e0bbfSMatthew R. Ochs 		case SISL_AFU_RC_NO_CHANNELS:
1278396012fSMatthew R. Ochs 			scp->result = (DID_NO_CONNECT << 16);
128c21e0bbfSMatthew R. Ochs 			break;
129c21e0bbfSMatthew R. Ochs 		case SISL_AFU_RC_DATA_DMA_ERR:
130c21e0bbfSMatthew R. Ochs 			switch (ioasa->afu_extra) {
131c21e0bbfSMatthew R. Ochs 			case SISL_AFU_DMA_ERR_PAGE_IN:
132c21e0bbfSMatthew R. Ochs 				/* Retry */
133c21e0bbfSMatthew R. Ochs 				scp->result = (DID_IMM_RETRY << 16);
134c21e0bbfSMatthew R. Ochs 				break;
135c21e0bbfSMatthew R. Ochs 			case SISL_AFU_DMA_ERR_INVALID_EA:
136c21e0bbfSMatthew R. Ochs 			default:
137c21e0bbfSMatthew R. Ochs 				scp->result = (DID_ERROR << 16);
138c21e0bbfSMatthew R. Ochs 			}
139c21e0bbfSMatthew R. Ochs 			break;
140c21e0bbfSMatthew R. Ochs 		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
141c21e0bbfSMatthew R. Ochs 			/* Retry */
142c21e0bbfSMatthew R. Ochs 			scp->result = (DID_ALLOC_FAILURE << 16);
143c21e0bbfSMatthew R. Ochs 			break;
144c21e0bbfSMatthew R. Ochs 		default:
145c21e0bbfSMatthew R. Ochs 			scp->result = (DID_ERROR << 16);
146c21e0bbfSMatthew R. Ochs 		}
147c21e0bbfSMatthew R. Ochs 	}
148c21e0bbfSMatthew R. Ochs }
149c21e0bbfSMatthew R. Ochs 
150c21e0bbfSMatthew R. Ochs /**
151c21e0bbfSMatthew R. Ochs  * cmd_complete() - command completion handler
152c21e0bbfSMatthew R. Ochs  * @cmd:	AFU command that has completed.
153c21e0bbfSMatthew R. Ochs  *
154c21e0bbfSMatthew R. Ochs  * Prepares and submits command that has either completed or timed out to
155c21e0bbfSMatthew R. Ochs  * the SCSI stack. Checks AFU command back into command pool for non-internal
156fe7f9698SMatthew R. Ochs  * (cmd->scp populated) commands.
157c21e0bbfSMatthew R. Ochs  */
158c21e0bbfSMatthew R. Ochs static void cmd_complete(struct afu_cmd *cmd)
159c21e0bbfSMatthew R. Ochs {
160c21e0bbfSMatthew R. Ochs 	struct scsi_cmnd *scp;
161c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
162c21e0bbfSMatthew R. Ochs 	struct afu *afu = cmd->parent;
163c21e0bbfSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
164fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
165c21e0bbfSMatthew R. Ochs 	bool cmd_is_tmf;
166c21e0bbfSMatthew R. Ochs 
167fe7f9698SMatthew R. Ochs 	if (cmd->scp) {
168fe7f9698SMatthew R. Ochs 		scp = cmd->scp;
1698396012fSMatthew R. Ochs 		if (unlikely(cmd->sa.ioasc))
170c21e0bbfSMatthew R. Ochs 			process_cmd_err(cmd, scp);
171c21e0bbfSMatthew R. Ochs 		else
172c21e0bbfSMatthew R. Ochs 			scp->result = (DID_OK << 16);
173c21e0bbfSMatthew R. Ochs 
174c21e0bbfSMatthew R. Ochs 		cmd_is_tmf = cmd->cmd_tmf;
175c21e0bbfSMatthew R. Ochs 
176fb67d44dSMatthew R. Ochs 		dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
177fb67d44dSMatthew R. Ochs 				    __func__, scp, scp->result, cmd->sa.ioasc);
178c21e0bbfSMatthew R. Ochs 
179c21e0bbfSMatthew R. Ochs 		scp->scsi_done(scp);
180c21e0bbfSMatthew R. Ochs 
181c21e0bbfSMatthew R. Ochs 		if (cmd_is_tmf) {
182018d1dc9SMatthew R. Ochs 			spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
183c21e0bbfSMatthew R. Ochs 			cfg->tmf_active = false;
184c21e0bbfSMatthew R. Ochs 			wake_up_all_locked(&cfg->tmf_waitq);
185018d1dc9SMatthew R. Ochs 			spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
186c21e0bbfSMatthew R. Ochs 		}
187c21e0bbfSMatthew R. Ochs 	} else
188c21e0bbfSMatthew R. Ochs 		complete(&cmd->cevent);
189c21e0bbfSMatthew R. Ochs }
190c21e0bbfSMatthew R. Ochs 
191c21e0bbfSMatthew R. Ochs /**
1929c7d1ee5SMatthew R. Ochs  * context_reset() - reset command owner context via specified register
19315305514SMatthew R. Ochs  * @cmd:	AFU command that timed out.
1949c7d1ee5SMatthew R. Ochs  * @reset_reg:	MMIO register to perform reset.
19515305514SMatthew R. Ochs  */
1969c7d1ee5SMatthew R. Ochs static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg)
19715305514SMatthew R. Ochs {
19815305514SMatthew R. Ochs 	int nretry = 0;
19915305514SMatthew R. Ochs 	u64 rrin = 0x1;
20015305514SMatthew R. Ochs 	struct afu *afu = cmd->parent;
2013d2f617dSUma Krishnan 	struct cxlflash_cfg *cfg = afu->parent;
2023d2f617dSUma Krishnan 	struct device *dev = &cfg->dev->dev;
20315305514SMatthew R. Ochs 
204fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: cmd=%p\n", __func__, cmd);
20515305514SMatthew R. Ochs 
2069c7d1ee5SMatthew R. Ochs 	writeq_be(rrin, reset_reg);
20715305514SMatthew R. Ochs 	do {
2089c7d1ee5SMatthew R. Ochs 		rrin = readq_be(reset_reg);
20915305514SMatthew R. Ochs 		if (rrin != 0x1)
21015305514SMatthew R. Ochs 			break;
21115305514SMatthew R. Ochs 		/* Double delay each time */
212ea765431SManoj N. Kumar 		udelay(1 << nretry);
21315305514SMatthew R. Ochs 	} while (nretry++ < MC_ROOM_RETRY_CNT);
2143d2f617dSUma Krishnan 
215fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rrin=%016llx nretry=%d\n",
2163d2f617dSUma Krishnan 		__func__, rrin, nretry);
21715305514SMatthew R. Ochs }
21815305514SMatthew R. Ochs 
21915305514SMatthew R. Ochs /**
2209c7d1ee5SMatthew R. Ochs  * context_reset_ioarrin() - reset command owner context via IOARRIN register
2219c7d1ee5SMatthew R. Ochs  * @cmd:	AFU command that timed out.
2229c7d1ee5SMatthew R. Ochs  */
2239c7d1ee5SMatthew R. Ochs static void context_reset_ioarrin(struct afu_cmd *cmd)
2249c7d1ee5SMatthew R. Ochs {
2259c7d1ee5SMatthew R. Ochs 	struct afu *afu = cmd->parent;
226bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2279c7d1ee5SMatthew R. Ochs 
228bfc0bab1SUma Krishnan 	context_reset(cmd, &hwq->host_map->ioarrin);
2299c7d1ee5SMatthew R. Ochs }
2309c7d1ee5SMatthew R. Ochs 
2319c7d1ee5SMatthew R. Ochs /**
232696d0b0cSMatthew R. Ochs  * context_reset_sq() - reset command owner context w/ SQ Context Reset register
233696d0b0cSMatthew R. Ochs  * @cmd:	AFU command that timed out.
234696d0b0cSMatthew R. Ochs  */
235696d0b0cSMatthew R. Ochs static void context_reset_sq(struct afu_cmd *cmd)
236696d0b0cSMatthew R. Ochs {
237696d0b0cSMatthew R. Ochs 	struct afu *afu = cmd->parent;
238bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
239696d0b0cSMatthew R. Ochs 
240bfc0bab1SUma Krishnan 	context_reset(cmd, &hwq->host_map->sq_ctx_reset);
241696d0b0cSMatthew R. Ochs }
242696d0b0cSMatthew R. Ochs 
243696d0b0cSMatthew R. Ochs /**
24448b4be36SMatthew R. Ochs  * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
24515305514SMatthew R. Ochs  * @afu:	AFU associated with the host.
24615305514SMatthew R. Ochs  * @cmd:	AFU command to send.
24715305514SMatthew R. Ochs  *
24815305514SMatthew R. Ochs  * Return:
2491284fb0cSMatthew R. Ochs  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
25015305514SMatthew R. Ochs  */
25148b4be36SMatthew R. Ochs static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
25215305514SMatthew R. Ochs {
25315305514SMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
25415305514SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
255bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
25615305514SMatthew R. Ochs 	int rc = 0;
25711f7b184SUma Krishnan 	s64 room;
25811f7b184SUma Krishnan 	ulong lock_flags;
25915305514SMatthew R. Ochs 
26015305514SMatthew R. Ochs 	/*
26111f7b184SUma Krishnan 	 * To avoid the performance penalty of MMIO, spread the update of
26211f7b184SUma Krishnan 	 * 'room' over multiple commands.
26315305514SMatthew R. Ochs 	 */
26466ea9bccSUma Krishnan 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
265bfc0bab1SUma Krishnan 	if (--hwq->room < 0) {
266bfc0bab1SUma Krishnan 		room = readq_be(&hwq->host_map->cmd_room);
26711f7b184SUma Krishnan 		if (room <= 0) {
26811f7b184SUma Krishnan 			dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
26911f7b184SUma Krishnan 					    "0x%02X, room=0x%016llX\n",
27011f7b184SUma Krishnan 					    __func__, cmd->rcb.cdb[0], room);
271bfc0bab1SUma Krishnan 			hwq->room = 0;
27211f7b184SUma Krishnan 			rc = SCSI_MLQUEUE_HOST_BUSY;
27311f7b184SUma Krishnan 			goto out;
27411f7b184SUma Krishnan 		}
275bfc0bab1SUma Krishnan 		hwq->room = room - 1;
27615305514SMatthew R. Ochs 	}
27715305514SMatthew R. Ochs 
278bfc0bab1SUma Krishnan 	writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
27915305514SMatthew R. Ochs out:
28066ea9bccSUma Krishnan 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
281fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__,
282fb67d44dSMatthew R. Ochs 		cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
28315305514SMatthew R. Ochs 	return rc;
28415305514SMatthew R. Ochs }
28515305514SMatthew R. Ochs 
28615305514SMatthew R. Ochs /**
287696d0b0cSMatthew R. Ochs  * send_cmd_sq() - sends an AFU command via SQ ring
288696d0b0cSMatthew R. Ochs  * @afu:	AFU associated with the host.
289696d0b0cSMatthew R. Ochs  * @cmd:	AFU command to send.
290696d0b0cSMatthew R. Ochs  *
291696d0b0cSMatthew R. Ochs  * Return:
292696d0b0cSMatthew R. Ochs  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
293696d0b0cSMatthew R. Ochs  */
294696d0b0cSMatthew R. Ochs static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
295696d0b0cSMatthew R. Ochs {
296696d0b0cSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
297696d0b0cSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
298bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
299696d0b0cSMatthew R. Ochs 	int rc = 0;
300696d0b0cSMatthew R. Ochs 	int newval;
301696d0b0cSMatthew R. Ochs 	ulong lock_flags;
302696d0b0cSMatthew R. Ochs 
303bfc0bab1SUma Krishnan 	newval = atomic_dec_if_positive(&hwq->hsq_credits);
304696d0b0cSMatthew R. Ochs 	if (newval <= 0) {
305696d0b0cSMatthew R. Ochs 		rc = SCSI_MLQUEUE_HOST_BUSY;
306696d0b0cSMatthew R. Ochs 		goto out;
307696d0b0cSMatthew R. Ochs 	}
308696d0b0cSMatthew R. Ochs 
309696d0b0cSMatthew R. Ochs 	cmd->rcb.ioasa = &cmd->sa;
310696d0b0cSMatthew R. Ochs 
311bfc0bab1SUma Krishnan 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
312696d0b0cSMatthew R. Ochs 
313bfc0bab1SUma Krishnan 	*hwq->hsq_curr = cmd->rcb;
314bfc0bab1SUma Krishnan 	if (hwq->hsq_curr < hwq->hsq_end)
315bfc0bab1SUma Krishnan 		hwq->hsq_curr++;
316696d0b0cSMatthew R. Ochs 	else
317bfc0bab1SUma Krishnan 		hwq->hsq_curr = hwq->hsq_start;
318bfc0bab1SUma Krishnan 	writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
319696d0b0cSMatthew R. Ochs 
320bfc0bab1SUma Krishnan 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
321696d0b0cSMatthew R. Ochs out:
322fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
323fb67d44dSMatthew R. Ochs 	       "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
324bfc0bab1SUma Krishnan 	       cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
325bfc0bab1SUma Krishnan 	       readq_be(&hwq->host_map->sq_head),
326bfc0bab1SUma Krishnan 	       readq_be(&hwq->host_map->sq_tail));
327696d0b0cSMatthew R. Ochs 	return rc;
328696d0b0cSMatthew R. Ochs }
329696d0b0cSMatthew R. Ochs 
330696d0b0cSMatthew R. Ochs /**
33115305514SMatthew R. Ochs  * wait_resp() - polls for a response or timeout to a sent AFU command
33215305514SMatthew R. Ochs  * @afu:	AFU associated with the host.
33315305514SMatthew R. Ochs  * @cmd:	AFU command that was sent.
3349ba848acSMatthew R. Ochs  *
3359ba848acSMatthew R. Ochs  * Return:
3369ba848acSMatthew R. Ochs  *	0 on success, -1 on timeout/error
33715305514SMatthew R. Ochs  */
3389ba848acSMatthew R. Ochs static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
33915305514SMatthew R. Ochs {
340fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
341fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
3429ba848acSMatthew R. Ochs 	int rc = 0;
34315305514SMatthew R. Ochs 	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
34415305514SMatthew R. Ochs 
34515305514SMatthew R. Ochs 	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
3469ba848acSMatthew R. Ochs 	if (!timeout) {
34748b4be36SMatthew R. Ochs 		afu->context_reset(cmd);
3489ba848acSMatthew R. Ochs 		rc = -1;
3499ba848acSMatthew R. Ochs 	}
35015305514SMatthew R. Ochs 
3519ba848acSMatthew R. Ochs 	if (unlikely(cmd->sa.ioasc != 0)) {
352fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
353fb67d44dSMatthew R. Ochs 			__func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
3549ba848acSMatthew R. Ochs 		rc = -1;
3559ba848acSMatthew R. Ochs 	}
3569ba848acSMatthew R. Ochs 
3579ba848acSMatthew R. Ochs 	return rc;
35815305514SMatthew R. Ochs }
35915305514SMatthew R. Ochs 
36015305514SMatthew R. Ochs /**
3611dd0c0e4SMatthew R. Ochs  * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
3621dd0c0e4SMatthew R. Ochs  * @host:	SCSI host associated with device.
3631dd0c0e4SMatthew R. Ochs  * @scp:	SCSI command to send.
3641dd0c0e4SMatthew R. Ochs  * @afu:	SCSI command to send.
3651dd0c0e4SMatthew R. Ochs  *
3661dd0c0e4SMatthew R. Ochs  * Hashes a command based upon the hardware queue mode.
3671dd0c0e4SMatthew R. Ochs  *
3681dd0c0e4SMatthew R. Ochs  * Return: Trusted index of target hardware queue
3691dd0c0e4SMatthew R. Ochs  */
3701dd0c0e4SMatthew R. Ochs static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
3711dd0c0e4SMatthew R. Ochs 			     struct afu *afu)
3721dd0c0e4SMatthew R. Ochs {
3731dd0c0e4SMatthew R. Ochs 	u32 tag;
3741dd0c0e4SMatthew R. Ochs 	u32 hwq = 0;
3751dd0c0e4SMatthew R. Ochs 
3761dd0c0e4SMatthew R. Ochs 	if (afu->num_hwqs == 1)
3771dd0c0e4SMatthew R. Ochs 		return 0;
3781dd0c0e4SMatthew R. Ochs 
3791dd0c0e4SMatthew R. Ochs 	switch (afu->hwq_mode) {
3801dd0c0e4SMatthew R. Ochs 	case HWQ_MODE_RR:
3811dd0c0e4SMatthew R. Ochs 		hwq = afu->hwq_rr_count++ % afu->num_hwqs;
3821dd0c0e4SMatthew R. Ochs 		break;
3831dd0c0e4SMatthew R. Ochs 	case HWQ_MODE_TAG:
3841dd0c0e4SMatthew R. Ochs 		tag = blk_mq_unique_tag(scp->request);
3851dd0c0e4SMatthew R. Ochs 		hwq = blk_mq_unique_tag_to_hwq(tag);
3861dd0c0e4SMatthew R. Ochs 		break;
3871dd0c0e4SMatthew R. Ochs 	case HWQ_MODE_CPU:
3881dd0c0e4SMatthew R. Ochs 		hwq = smp_processor_id() % afu->num_hwqs;
3891dd0c0e4SMatthew R. Ochs 		break;
3901dd0c0e4SMatthew R. Ochs 	default:
3911dd0c0e4SMatthew R. Ochs 		WARN_ON_ONCE(1);
3921dd0c0e4SMatthew R. Ochs 	}
3931dd0c0e4SMatthew R. Ochs 
3941dd0c0e4SMatthew R. Ochs 	return hwq;
3951dd0c0e4SMatthew R. Ochs }
3961dd0c0e4SMatthew R. Ochs 
3971dd0c0e4SMatthew R. Ochs /**
398c21e0bbfSMatthew R. Ochs  * send_tmf() - sends a Task Management Function (TMF)
399c21e0bbfSMatthew R. Ochs  * @afu:	AFU to checkout from.
400c21e0bbfSMatthew R. Ochs  * @scp:	SCSI command from stack.
401c21e0bbfSMatthew R. Ochs  * @tmfcmd:	TMF command to send.
402c21e0bbfSMatthew R. Ochs  *
403c21e0bbfSMatthew R. Ochs  * Return:
4041284fb0cSMatthew R. Ochs  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
405c21e0bbfSMatthew R. Ochs  */
406c21e0bbfSMatthew R. Ochs static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
407c21e0bbfSMatthew R. Ochs {
4081dd0c0e4SMatthew R. Ochs 	struct Scsi_Host *host = scp->device->host;
4091dd0c0e4SMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(host);
410d4ace351SMatthew R. Ochs 	struct afu_cmd *cmd = sc_to_afucz(scp);
4114392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
4121dd0c0e4SMatthew R. Ochs 	int hwq_index = cmd_to_target_hwq(host, scp, afu);
4131dd0c0e4SMatthew R. Ochs 	struct hwq *hwq = get_hwq(afu, hwq_index);
414c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
415c21e0bbfSMatthew R. Ochs 	int rc = 0;
416018d1dc9SMatthew R. Ochs 	ulong to;
417c21e0bbfSMatthew R. Ochs 
418018d1dc9SMatthew R. Ochs 	/* When Task Management Function is active do not send another */
419018d1dc9SMatthew R. Ochs 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
420c21e0bbfSMatthew R. Ochs 	if (cfg->tmf_active)
421018d1dc9SMatthew R. Ochs 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
422018d1dc9SMatthew R. Ochs 						  !cfg->tmf_active,
423018d1dc9SMatthew R. Ochs 						  cfg->tmf_slock);
424c21e0bbfSMatthew R. Ochs 	cfg->tmf_active = true;
425018d1dc9SMatthew R. Ochs 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
426c21e0bbfSMatthew R. Ochs 
427fe7f9698SMatthew R. Ochs 	cmd->scp = scp;
428d4ace351SMatthew R. Ochs 	cmd->parent = afu;
429d4ace351SMatthew R. Ochs 	cmd->cmd_tmf = true;
4301dd0c0e4SMatthew R. Ochs 	cmd->hwq_index = hwq_index;
431d4ace351SMatthew R. Ochs 
432bfc0bab1SUma Krishnan 	cmd->rcb.ctx_id = hwq->ctx_hndl;
4335fbb96c8SMatthew R. Ochs 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
4348fa4f177SMatthew R. Ochs 	cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
435c21e0bbfSMatthew R. Ochs 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
436c21e0bbfSMatthew R. Ochs 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
437d4ace351SMatthew R. Ochs 			      SISL_REQ_FLAGS_SUP_UNDERRUN |
438d4ace351SMatthew R. Ochs 			      SISL_REQ_FLAGS_TMF_CMD);
439c21e0bbfSMatthew R. Ochs 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
440c21e0bbfSMatthew R. Ochs 
44148b4be36SMatthew R. Ochs 	rc = afu->send_cmd(afu, cmd);
442c21e0bbfSMatthew R. Ochs 	if (unlikely(rc)) {
443018d1dc9SMatthew R. Ochs 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
444c21e0bbfSMatthew R. Ochs 		cfg->tmf_active = false;
445018d1dc9SMatthew R. Ochs 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
446c21e0bbfSMatthew R. Ochs 		goto out;
447c21e0bbfSMatthew R. Ochs 	}
448c21e0bbfSMatthew R. Ochs 
449018d1dc9SMatthew R. Ochs 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
450018d1dc9SMatthew R. Ochs 	to = msecs_to_jiffies(5000);
451018d1dc9SMatthew R. Ochs 	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
452018d1dc9SMatthew R. Ochs 						       !cfg->tmf_active,
453018d1dc9SMatthew R. Ochs 						       cfg->tmf_slock,
454018d1dc9SMatthew R. Ochs 						       to);
455018d1dc9SMatthew R. Ochs 	if (!to) {
456018d1dc9SMatthew R. Ochs 		cfg->tmf_active = false;
457fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: TMF timed out\n", __func__);
458018d1dc9SMatthew R. Ochs 		rc = -1;
459018d1dc9SMatthew R. Ochs 	}
460018d1dc9SMatthew R. Ochs 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
461c21e0bbfSMatthew R. Ochs out:
462c21e0bbfSMatthew R. Ochs 	return rc;
463c21e0bbfSMatthew R. Ochs }
464c21e0bbfSMatthew R. Ochs 
465c21e0bbfSMatthew R. Ochs /**
466c21e0bbfSMatthew R. Ochs  * cxlflash_driver_info() - information handler for this host driver
467c21e0bbfSMatthew R. Ochs  * @host:	SCSI host associated with device.
468c21e0bbfSMatthew R. Ochs  *
469c21e0bbfSMatthew R. Ochs  * Return: A string describing the device.
470c21e0bbfSMatthew R. Ochs  */
471c21e0bbfSMatthew R. Ochs static const char *cxlflash_driver_info(struct Scsi_Host *host)
472c21e0bbfSMatthew R. Ochs {
473c21e0bbfSMatthew R. Ochs 	return CXLFLASH_ADAPTER_NAME;
474c21e0bbfSMatthew R. Ochs }
475c21e0bbfSMatthew R. Ochs 
476c21e0bbfSMatthew R. Ochs /**
477c21e0bbfSMatthew R. Ochs  * cxlflash_queuecommand() - sends a mid-layer request
478c21e0bbfSMatthew R. Ochs  * @host:	SCSI host associated with device.
479c21e0bbfSMatthew R. Ochs  * @scp:	SCSI command to send.
480c21e0bbfSMatthew R. Ochs  *
4811284fb0cSMatthew R. Ochs  * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
482c21e0bbfSMatthew R. Ochs  */
483c21e0bbfSMatthew R. Ochs static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
484c21e0bbfSMatthew R. Ochs {
485fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(host);
486c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
4874392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
4885fbb96c8SMatthew R. Ochs 	struct afu_cmd *cmd = sc_to_afucz(scp);
4899d89326cSMatthew R. Ochs 	struct scatterlist *sg = scsi_sglist(scp);
4901dd0c0e4SMatthew R. Ochs 	int hwq_index = cmd_to_target_hwq(host, scp, afu);
4911dd0c0e4SMatthew R. Ochs 	struct hwq *hwq = get_hwq(afu, hwq_index);
4929d89326cSMatthew R. Ochs 	u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
493c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
494c21e0bbfSMatthew R. Ochs 	int rc = 0;
495c21e0bbfSMatthew R. Ochs 
4964392ba49SMatthew R. Ochs 	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
497fb67d44dSMatthew R. Ochs 			    "cdb=(%08x-%08x-%08x-%08x)\n",
498c21e0bbfSMatthew R. Ochs 			    __func__, scp, host->host_no, scp->device->channel,
499c21e0bbfSMatthew R. Ochs 			    scp->device->id, scp->device->lun,
500c21e0bbfSMatthew R. Ochs 			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
501c21e0bbfSMatthew R. Ochs 			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
502c21e0bbfSMatthew R. Ochs 			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
503c21e0bbfSMatthew R. Ochs 			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
504c21e0bbfSMatthew R. Ochs 
505018d1dc9SMatthew R. Ochs 	/*
506018d1dc9SMatthew R. Ochs 	 * If a Task Management Function is active, wait for it to complete
507c21e0bbfSMatthew R. Ochs 	 * before continuing with regular commands.
508c21e0bbfSMatthew R. Ochs 	 */
509018d1dc9SMatthew R. Ochs 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
510c21e0bbfSMatthew R. Ochs 	if (cfg->tmf_active) {
511018d1dc9SMatthew R. Ochs 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
512c21e0bbfSMatthew R. Ochs 		rc = SCSI_MLQUEUE_HOST_BUSY;
513c21e0bbfSMatthew R. Ochs 		goto out;
514c21e0bbfSMatthew R. Ochs 	}
515018d1dc9SMatthew R. Ochs 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
516c21e0bbfSMatthew R. Ochs 
5175cdac81aSMatthew R. Ochs 	switch (cfg->state) {
518323e3342SMatthew R. Ochs 	case STATE_PROBING:
519323e3342SMatthew R. Ochs 	case STATE_PROBED:
520439e85c1SMatthew R. Ochs 	case STATE_RESET:
521fb67d44dSMatthew R. Ochs 		dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
5225cdac81aSMatthew R. Ochs 		rc = SCSI_MLQUEUE_HOST_BUSY;
5235cdac81aSMatthew R. Ochs 		goto out;
5245cdac81aSMatthew R. Ochs 	case STATE_FAILTERM:
525fb67d44dSMatthew R. Ochs 		dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
5265cdac81aSMatthew R. Ochs 		scp->result = (DID_NO_CONNECT << 16);
5275cdac81aSMatthew R. Ochs 		scp->scsi_done(scp);
5285cdac81aSMatthew R. Ochs 		rc = 0;
5295cdac81aSMatthew R. Ochs 		goto out;
5305cdac81aSMatthew R. Ochs 	default:
5315cdac81aSMatthew R. Ochs 		break;
5325cdac81aSMatthew R. Ochs 	}
5335cdac81aSMatthew R. Ochs 
5349d89326cSMatthew R. Ochs 	if (likely(sg)) {
53550b787f7SMatthew R. Ochs 		cmd->rcb.data_len = sg->length;
53650b787f7SMatthew R. Ochs 		cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
5379d89326cSMatthew R. Ochs 	}
5389d89326cSMatthew R. Ochs 
539fe7f9698SMatthew R. Ochs 	cmd->scp = scp;
5409d89326cSMatthew R. Ochs 	cmd->parent = afu;
5411dd0c0e4SMatthew R. Ochs 	cmd->hwq_index = hwq_index;
5429d89326cSMatthew R. Ochs 
543bfc0bab1SUma Krishnan 	cmd->rcb.ctx_id = hwq->ctx_hndl;
5445fbb96c8SMatthew R. Ochs 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
5458fa4f177SMatthew R. Ochs 	cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
546c21e0bbfSMatthew R. Ochs 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
547c21e0bbfSMatthew R. Ochs 
548c21e0bbfSMatthew R. Ochs 	if (scp->sc_data_direction == DMA_TO_DEVICE)
5499d89326cSMatthew R. Ochs 		req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
550c21e0bbfSMatthew R. Ochs 
5519d89326cSMatthew R. Ochs 	cmd->rcb.req_flags = req_flags;
552c21e0bbfSMatthew R. Ochs 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
553c21e0bbfSMatthew R. Ochs 
55448b4be36SMatthew R. Ochs 	rc = afu->send_cmd(afu, cmd);
555c21e0bbfSMatthew R. Ochs out:
556c21e0bbfSMatthew R. Ochs 	return rc;
557c21e0bbfSMatthew R. Ochs }
558c21e0bbfSMatthew R. Ochs 
559c21e0bbfSMatthew R. Ochs /**
560c21e0bbfSMatthew R. Ochs  * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
5611284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
562c21e0bbfSMatthew R. Ochs  */
563c21e0bbfSMatthew R. Ochs static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
564c21e0bbfSMatthew R. Ochs {
565c21e0bbfSMatthew R. Ochs 	struct pci_dev *pdev = cfg->dev;
566c21e0bbfSMatthew R. Ochs 
567c21e0bbfSMatthew R. Ochs 	if (pci_channel_offline(pdev))
568439e85c1SMatthew R. Ochs 		wait_event_timeout(cfg->reset_waitq,
569c21e0bbfSMatthew R. Ochs 				   !pci_channel_offline(pdev),
570c21e0bbfSMatthew R. Ochs 				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
571c21e0bbfSMatthew R. Ochs }
572c21e0bbfSMatthew R. Ochs 
573c21e0bbfSMatthew R. Ochs /**
574c21e0bbfSMatthew R. Ochs  * free_mem() - free memory associated with the AFU
5751284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
576c21e0bbfSMatthew R. Ochs  */
577c21e0bbfSMatthew R. Ochs static void free_mem(struct cxlflash_cfg *cfg)
578c21e0bbfSMatthew R. Ochs {
579c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
580c21e0bbfSMatthew R. Ochs 
581c21e0bbfSMatthew R. Ochs 	if (cfg->afu) {
582c21e0bbfSMatthew R. Ochs 		free_pages((ulong)afu, get_order(sizeof(struct afu)));
583c21e0bbfSMatthew R. Ochs 		cfg->afu = NULL;
584c21e0bbfSMatthew R. Ochs 	}
585c21e0bbfSMatthew R. Ochs }
586c21e0bbfSMatthew R. Ochs 
587c21e0bbfSMatthew R. Ochs /**
588c21e0bbfSMatthew R. Ochs  * stop_afu() - stops the AFU command timers and unmaps the MMIO space
5891284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
590c21e0bbfSMatthew R. Ochs  *
591c21e0bbfSMatthew R. Ochs  * Safe to call with AFU in a partially allocated/initialized state.
592ee91e332SManoj Kumar  *
5930df5bef7SUma Krishnan  * Cancels scheduled worker threads, waits for any active internal AFU
594cba06e6dSMatthew R. Ochs  * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
595c21e0bbfSMatthew R. Ochs  */
596c21e0bbfSMatthew R. Ochs static void stop_afu(struct cxlflash_cfg *cfg)
597c21e0bbfSMatthew R. Ochs {
598c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
599bfc0bab1SUma Krishnan 	struct hwq *hwq;
600bfc0bab1SUma Krishnan 	int i;
601c21e0bbfSMatthew R. Ochs 
6020df5bef7SUma Krishnan 	cancel_work_sync(&cfg->work_q);
6030df5bef7SUma Krishnan 
604c21e0bbfSMatthew R. Ochs 	if (likely(afu)) {
605de01283bSMatthew R. Ochs 		while (atomic_read(&afu->cmds_active))
606de01283bSMatthew R. Ochs 			ssleep(1);
607bfc0bab1SUma Krishnan 
608bfc0bab1SUma Krishnan 		if (afu_is_irqpoll_enabled(afu)) {
6093065267aSMatthew R. Ochs 			for (i = 0; i < afu->num_hwqs; i++) {
610bfc0bab1SUma Krishnan 				hwq = get_hwq(afu, i);
611bfc0bab1SUma Krishnan 
612bfc0bab1SUma Krishnan 				irq_poll_disable(&hwq->irqpoll);
613bfc0bab1SUma Krishnan 			}
614bfc0bab1SUma Krishnan 		}
615bfc0bab1SUma Krishnan 
616c21e0bbfSMatthew R. Ochs 		if (likely(afu->afu_map)) {
6171786f4a0SMatthew R. Ochs 			cxl_psa_unmap((void __iomem *)afu->afu_map);
618c21e0bbfSMatthew R. Ochs 			afu->afu_map = NULL;
619c21e0bbfSMatthew R. Ochs 		}
620c21e0bbfSMatthew R. Ochs 	}
621c21e0bbfSMatthew R. Ochs }
622c21e0bbfSMatthew R. Ochs 
623c21e0bbfSMatthew R. Ochs /**
6249526f360SManoj N. Kumar  * term_intr() - disables all AFU interrupts
6251284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
626c21e0bbfSMatthew R. Ochs  * @level:	Depth of allocation, where to begin waterfall tear down.
627bfc0bab1SUma Krishnan  * @index:	Index of the hardware queue.
628c21e0bbfSMatthew R. Ochs  *
629c21e0bbfSMatthew R. Ochs  * Safe to call with AFU/MC in partially allocated/initialized state.
630c21e0bbfSMatthew R. Ochs  */
631bfc0bab1SUma Krishnan static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
632bfc0bab1SUma Krishnan 		      u32 index)
633c21e0bbfSMatthew R. Ochs {
634c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
6354392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
636bfc0bab1SUma Krishnan 	struct hwq *hwq;
637c21e0bbfSMatthew R. Ochs 
638bfc0bab1SUma Krishnan 	if (!afu) {
639bfc0bab1SUma Krishnan 		dev_err(dev, "%s: returning with NULL afu\n", __func__);
640bfc0bab1SUma Krishnan 		return;
641bfc0bab1SUma Krishnan 	}
642bfc0bab1SUma Krishnan 
643bfc0bab1SUma Krishnan 	hwq = get_hwq(afu, index);
644bfc0bab1SUma Krishnan 
645bfc0bab1SUma Krishnan 	if (!hwq->ctx) {
646bfc0bab1SUma Krishnan 		dev_err(dev, "%s: returning with NULL MC\n", __func__);
647c21e0bbfSMatthew R. Ochs 		return;
648c21e0bbfSMatthew R. Ochs 	}
649c21e0bbfSMatthew R. Ochs 
650c21e0bbfSMatthew R. Ochs 	switch (level) {
651c21e0bbfSMatthew R. Ochs 	case UNMAP_THREE:
652bfc0bab1SUma Krishnan 		/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
653bfc0bab1SUma Krishnan 		if (index == PRIMARY_HWQ)
654bfc0bab1SUma Krishnan 			cxl_unmap_afu_irq(hwq->ctx, 3, hwq);
655c21e0bbfSMatthew R. Ochs 	case UNMAP_TWO:
656bfc0bab1SUma Krishnan 		cxl_unmap_afu_irq(hwq->ctx, 2, hwq);
657c21e0bbfSMatthew R. Ochs 	case UNMAP_ONE:
658bfc0bab1SUma Krishnan 		cxl_unmap_afu_irq(hwq->ctx, 1, hwq);
659c21e0bbfSMatthew R. Ochs 	case FREE_IRQ:
660bfc0bab1SUma Krishnan 		cxl_free_afu_irqs(hwq->ctx);
6619526f360SManoj N. Kumar 		/* fall through */
6629526f360SManoj N. Kumar 	case UNDO_NOOP:
6639526f360SManoj N. Kumar 		/* No action required */
6649526f360SManoj N. Kumar 		break;
665c21e0bbfSMatthew R. Ochs 	}
666c21e0bbfSMatthew R. Ochs }
667c21e0bbfSMatthew R. Ochs 
668c21e0bbfSMatthew R. Ochs /**
6699526f360SManoj N. Kumar  * term_mc() - terminates the master context
6709526f360SManoj N. Kumar  * @cfg:	Internal structure associated with the host.
671bfc0bab1SUma Krishnan  * @index:	Index of the hardware queue.
6729526f360SManoj N. Kumar  *
6739526f360SManoj N. Kumar  * Safe to call with AFU/MC in partially allocated/initialized state.
6749526f360SManoj N. Kumar  */
675bfc0bab1SUma Krishnan static void term_mc(struct cxlflash_cfg *cfg, u32 index)
6769526f360SManoj N. Kumar {
6779526f360SManoj N. Kumar 	struct afu *afu = cfg->afu;
6789526f360SManoj N. Kumar 	struct device *dev = &cfg->dev->dev;
679bfc0bab1SUma Krishnan 	struct hwq *hwq;
6809526f360SManoj N. Kumar 
681bfc0bab1SUma Krishnan 	if (!afu) {
682bfc0bab1SUma Krishnan 		dev_err(dev, "%s: returning with NULL afu\n", __func__);
6839526f360SManoj N. Kumar 		return;
6849526f360SManoj N. Kumar 	}
6859526f360SManoj N. Kumar 
686bfc0bab1SUma Krishnan 	hwq = get_hwq(afu, index);
687bfc0bab1SUma Krishnan 
688bfc0bab1SUma Krishnan 	if (!hwq->ctx) {
689bfc0bab1SUma Krishnan 		dev_err(dev, "%s: returning with NULL MC\n", __func__);
690bfc0bab1SUma Krishnan 		return;
691bfc0bab1SUma Krishnan 	}
692bfc0bab1SUma Krishnan 
693bfc0bab1SUma Krishnan 	WARN_ON(cxl_stop_context(hwq->ctx));
694bfc0bab1SUma Krishnan 	if (index != PRIMARY_HWQ)
695bfc0bab1SUma Krishnan 		WARN_ON(cxl_release_context(hwq->ctx));
696bfc0bab1SUma Krishnan 	hwq->ctx = NULL;
6979526f360SManoj N. Kumar }
6989526f360SManoj N. Kumar 
6999526f360SManoj N. Kumar /**
700c21e0bbfSMatthew R. Ochs  * term_afu() - terminates the AFU
7011284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
702c21e0bbfSMatthew R. Ochs  *
703c21e0bbfSMatthew R. Ochs  * Safe to call with AFU/MC in partially allocated/initialized state.
704c21e0bbfSMatthew R. Ochs  */
705c21e0bbfSMatthew R. Ochs static void term_afu(struct cxlflash_cfg *cfg)
706c21e0bbfSMatthew R. Ochs {
707fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
708bfc0bab1SUma Krishnan 	int k;
709fb67d44dSMatthew R. Ochs 
7109526f360SManoj N. Kumar 	/*
7119526f360SManoj N. Kumar 	 * Tear down is carefully orchestrated to ensure
7129526f360SManoj N. Kumar 	 * no interrupts can come in when the problem state
7139526f360SManoj N. Kumar 	 * area is unmapped.
7149526f360SManoj N. Kumar 	 *
715bfc0bab1SUma Krishnan 	 * 1) Disable all AFU interrupts for each master
7169526f360SManoj N. Kumar 	 * 2) Unmap the problem state area
717bfc0bab1SUma Krishnan 	 * 3) Stop each master context
7189526f360SManoj N. Kumar 	 */
7193065267aSMatthew R. Ochs 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
720bfc0bab1SUma Krishnan 		term_intr(cfg, UNMAP_THREE, k);
721bfc0bab1SUma Krishnan 
722c21e0bbfSMatthew R. Ochs 	if (cfg->afu)
723c21e0bbfSMatthew R. Ochs 		stop_afu(cfg);
724c21e0bbfSMatthew R. Ochs 
7253065267aSMatthew R. Ochs 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
726bfc0bab1SUma Krishnan 		term_mc(cfg, k);
7276ded8b3cSUma Krishnan 
728fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning\n", __func__);
729c21e0bbfSMatthew R. Ochs }
730c21e0bbfSMatthew R. Ochs 
731c21e0bbfSMatthew R. Ochs /**
732704c4b0dSUma Krishnan  * notify_shutdown() - notifies device of pending shutdown
733704c4b0dSUma Krishnan  * @cfg:	Internal structure associated with the host.
734704c4b0dSUma Krishnan  * @wait:	Whether to wait for shutdown processing to complete.
735704c4b0dSUma Krishnan  *
736704c4b0dSUma Krishnan  * This function will notify the AFU that the adapter is being shutdown
737704c4b0dSUma Krishnan  * and will wait for shutdown processing to complete if wait is true.
738704c4b0dSUma Krishnan  * This notification should flush pending I/Os to the device and halt
739704c4b0dSUma Krishnan  * further I/Os until the next AFU reset is issued and device restarted.
740704c4b0dSUma Krishnan  */
741704c4b0dSUma Krishnan static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
742704c4b0dSUma Krishnan {
743704c4b0dSUma Krishnan 	struct afu *afu = cfg->afu;
744704c4b0dSUma Krishnan 	struct device *dev = &cfg->dev->dev;
745704c4b0dSUma Krishnan 	struct dev_dependent_vals *ddv;
7460aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
747704c4b0dSUma Krishnan 	u64 reg, status;
748704c4b0dSUma Krishnan 	int i, retry_cnt = 0;
749704c4b0dSUma Krishnan 
750704c4b0dSUma Krishnan 	ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
751704c4b0dSUma Krishnan 	if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
752704c4b0dSUma Krishnan 		return;
753704c4b0dSUma Krishnan 
7541bd2b282SUma Krishnan 	if (!afu || !afu->afu_map) {
755fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
7561bd2b282SUma Krishnan 		return;
7571bd2b282SUma Krishnan 	}
7581bd2b282SUma Krishnan 
759704c4b0dSUma Krishnan 	/* Notify AFU */
76078ae028eSMatthew R. Ochs 	for (i = 0; i < cfg->num_fc_ports; i++) {
7610aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, i);
7620aa14887SMatthew R. Ochs 
7630aa14887SMatthew R. Ochs 		reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
764704c4b0dSUma Krishnan 		reg |= SISL_FC_SHUTDOWN_NORMAL;
7650aa14887SMatthew R. Ochs 		writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
766704c4b0dSUma Krishnan 	}
767704c4b0dSUma Krishnan 
768704c4b0dSUma Krishnan 	if (!wait)
769704c4b0dSUma Krishnan 		return;
770704c4b0dSUma Krishnan 
771704c4b0dSUma Krishnan 	/* Wait up to 1.5 seconds for shutdown processing to complete */
77278ae028eSMatthew R. Ochs 	for (i = 0; i < cfg->num_fc_ports; i++) {
7730aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, i);
774704c4b0dSUma Krishnan 		retry_cnt = 0;
7750aa14887SMatthew R. Ochs 
776704c4b0dSUma Krishnan 		while (true) {
7770aa14887SMatthew R. Ochs 			status = readq_be(&fc_port_regs[FC_STATUS / 8]);
778704c4b0dSUma Krishnan 			if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
779704c4b0dSUma Krishnan 				break;
780704c4b0dSUma Krishnan 			if (++retry_cnt >= MC_RETRY_CNT) {
781704c4b0dSUma Krishnan 				dev_dbg(dev, "%s: port %d shutdown processing "
782704c4b0dSUma Krishnan 					"not yet completed\n", __func__, i);
783704c4b0dSUma Krishnan 				break;
784704c4b0dSUma Krishnan 			}
785704c4b0dSUma Krishnan 			msleep(100 * retry_cnt);
786704c4b0dSUma Krishnan 		}
787704c4b0dSUma Krishnan 	}
788704c4b0dSUma Krishnan }
789704c4b0dSUma Krishnan 
790704c4b0dSUma Krishnan /**
791c21e0bbfSMatthew R. Ochs  * cxlflash_remove() - PCI entry point to tear down host
792c21e0bbfSMatthew R. Ochs  * @pdev:	PCI device associated with the host.
793c21e0bbfSMatthew R. Ochs  *
794323e3342SMatthew R. Ochs  * Safe to use as a cleanup in partially allocated/initialized state. Note that
795323e3342SMatthew R. Ochs  * the reset_waitq is flushed as part of the stop/termination of user contexts.
796c21e0bbfSMatthew R. Ochs  */
797c21e0bbfSMatthew R. Ochs static void cxlflash_remove(struct pci_dev *pdev)
798c21e0bbfSMatthew R. Ochs {
799c21e0bbfSMatthew R. Ochs 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
800fb67d44dSMatthew R. Ochs 	struct device *dev = &pdev->dev;
801c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
802c21e0bbfSMatthew R. Ochs 
803babf985dSUma Krishnan 	if (!pci_is_enabled(pdev)) {
804fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: Device is disabled\n", __func__);
805babf985dSUma Krishnan 		return;
806babf985dSUma Krishnan 	}
807babf985dSUma Krishnan 
808c21e0bbfSMatthew R. Ochs 	/* If a Task Management Function is active, wait for it to complete
809c21e0bbfSMatthew R. Ochs 	 * before continuing with remove.
810c21e0bbfSMatthew R. Ochs 	 */
811018d1dc9SMatthew R. Ochs 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
812c21e0bbfSMatthew R. Ochs 	if (cfg->tmf_active)
813018d1dc9SMatthew R. Ochs 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
814018d1dc9SMatthew R. Ochs 						  !cfg->tmf_active,
815018d1dc9SMatthew R. Ochs 						  cfg->tmf_slock);
816018d1dc9SMatthew R. Ochs 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
817c21e0bbfSMatthew R. Ochs 
818704c4b0dSUma Krishnan 	/* Notify AFU and wait for shutdown processing to complete */
819704c4b0dSUma Krishnan 	notify_shutdown(cfg, true);
820704c4b0dSUma Krishnan 
8215cdac81aSMatthew R. Ochs 	cfg->state = STATE_FAILTERM;
82265be2c79SMatthew R. Ochs 	cxlflash_stop_term_user_contexts(cfg);
8235cdac81aSMatthew R. Ochs 
824c21e0bbfSMatthew R. Ochs 	switch (cfg->init_state) {
825c21e0bbfSMatthew R. Ochs 	case INIT_STATE_SCSI:
82665be2c79SMatthew R. Ochs 		cxlflash_term_local_luns(cfg);
827c21e0bbfSMatthew R. Ochs 		scsi_remove_host(cfg->host);
828c21e0bbfSMatthew R. Ochs 	case INIT_STATE_AFU:
829b45cdbafSManoj Kumar 		term_afu(cfg);
830c21e0bbfSMatthew R. Ochs 	case INIT_STATE_PCI:
831c21e0bbfSMatthew R. Ochs 		pci_disable_device(pdev);
832c21e0bbfSMatthew R. Ochs 	case INIT_STATE_NONE:
833c21e0bbfSMatthew R. Ochs 		free_mem(cfg);
8348b5b1e87SMatthew R. Ochs 		scsi_host_put(cfg->host);
835c21e0bbfSMatthew R. Ochs 		break;
836c21e0bbfSMatthew R. Ochs 	}
837c21e0bbfSMatthew R. Ochs 
838fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning\n", __func__);
839c21e0bbfSMatthew R. Ochs }
840c21e0bbfSMatthew R. Ochs 
841c21e0bbfSMatthew R. Ochs /**
842c21e0bbfSMatthew R. Ochs  * alloc_mem() - allocates the AFU and its command pool
8431284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
844c21e0bbfSMatthew R. Ochs  *
845c21e0bbfSMatthew R. Ochs  * A partially allocated state remains on failure.
846c21e0bbfSMatthew R. Ochs  *
847c21e0bbfSMatthew R. Ochs  * Return:
848c21e0bbfSMatthew R. Ochs  *	0 on success
849c21e0bbfSMatthew R. Ochs  *	-ENOMEM on failure to allocate memory
850c21e0bbfSMatthew R. Ochs  */
851c21e0bbfSMatthew R. Ochs static int alloc_mem(struct cxlflash_cfg *cfg)
852c21e0bbfSMatthew R. Ochs {
853c21e0bbfSMatthew R. Ochs 	int rc = 0;
8544392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
855c21e0bbfSMatthew R. Ochs 
856696d0b0cSMatthew R. Ochs 	/* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
857c21e0bbfSMatthew R. Ochs 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
858c21e0bbfSMatthew R. Ochs 					    get_order(sizeof(struct afu)));
859c21e0bbfSMatthew R. Ochs 	if (unlikely(!cfg->afu)) {
8604392ba49SMatthew R. Ochs 		dev_err(dev, "%s: cannot get %d free pages\n",
861c21e0bbfSMatthew R. Ochs 			__func__, get_order(sizeof(struct afu)));
862c21e0bbfSMatthew R. Ochs 		rc = -ENOMEM;
863c21e0bbfSMatthew R. Ochs 		goto out;
864c21e0bbfSMatthew R. Ochs 	}
865c21e0bbfSMatthew R. Ochs 	cfg->afu->parent = cfg;
8663065267aSMatthew R. Ochs 	cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
867c21e0bbfSMatthew R. Ochs 	cfg->afu->afu_map = NULL;
868c21e0bbfSMatthew R. Ochs out:
869c21e0bbfSMatthew R. Ochs 	return rc;
870c21e0bbfSMatthew R. Ochs }
871c21e0bbfSMatthew R. Ochs 
872c21e0bbfSMatthew R. Ochs /**
873c21e0bbfSMatthew R. Ochs  * init_pci() - initializes the host as a PCI device
8741284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
875c21e0bbfSMatthew R. Ochs  *
8761284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
877c21e0bbfSMatthew R. Ochs  */
878c21e0bbfSMatthew R. Ochs static int init_pci(struct cxlflash_cfg *cfg)
879c21e0bbfSMatthew R. Ochs {
880c21e0bbfSMatthew R. Ochs 	struct pci_dev *pdev = cfg->dev;
881fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
882c21e0bbfSMatthew R. Ochs 	int rc = 0;
883c21e0bbfSMatthew R. Ochs 
884c21e0bbfSMatthew R. Ochs 	rc = pci_enable_device(pdev);
885c21e0bbfSMatthew R. Ochs 	if (rc || pci_channel_offline(pdev)) {
886c21e0bbfSMatthew R. Ochs 		if (pci_channel_offline(pdev)) {
887c21e0bbfSMatthew R. Ochs 			cxlflash_wait_for_pci_err_recovery(cfg);
888c21e0bbfSMatthew R. Ochs 			rc = pci_enable_device(pdev);
889c21e0bbfSMatthew R. Ochs 		}
890c21e0bbfSMatthew R. Ochs 
891c21e0bbfSMatthew R. Ochs 		if (rc) {
892fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: Cannot enable adapter\n", __func__);
893c21e0bbfSMatthew R. Ochs 			cxlflash_wait_for_pci_err_recovery(cfg);
894961487e4SManoj N. Kumar 			goto out;
895c21e0bbfSMatthew R. Ochs 		}
896c21e0bbfSMatthew R. Ochs 	}
897c21e0bbfSMatthew R. Ochs 
898c21e0bbfSMatthew R. Ochs out:
899fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
900c21e0bbfSMatthew R. Ochs 	return rc;
901c21e0bbfSMatthew R. Ochs }
902c21e0bbfSMatthew R. Ochs 
903c21e0bbfSMatthew R. Ochs /**
904c21e0bbfSMatthew R. Ochs  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
9051284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
906c21e0bbfSMatthew R. Ochs  *
9071284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
908c21e0bbfSMatthew R. Ochs  */
909c21e0bbfSMatthew R. Ochs static int init_scsi(struct cxlflash_cfg *cfg)
910c21e0bbfSMatthew R. Ochs {
911c21e0bbfSMatthew R. Ochs 	struct pci_dev *pdev = cfg->dev;
912fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
913c21e0bbfSMatthew R. Ochs 	int rc = 0;
914c21e0bbfSMatthew R. Ochs 
915c21e0bbfSMatthew R. Ochs 	rc = scsi_add_host(cfg->host, &pdev->dev);
916c21e0bbfSMatthew R. Ochs 	if (rc) {
917fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
918c21e0bbfSMatthew R. Ochs 		goto out;
919c21e0bbfSMatthew R. Ochs 	}
920c21e0bbfSMatthew R. Ochs 
921c21e0bbfSMatthew R. Ochs 	scsi_scan_host(cfg->host);
922c21e0bbfSMatthew R. Ochs 
923c21e0bbfSMatthew R. Ochs out:
924fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
925c21e0bbfSMatthew R. Ochs 	return rc;
926c21e0bbfSMatthew R. Ochs }
927c21e0bbfSMatthew R. Ochs 
928c21e0bbfSMatthew R. Ochs /**
929c21e0bbfSMatthew R. Ochs  * set_port_online() - transitions the specified host FC port to online state
930c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
931c21e0bbfSMatthew R. Ochs  *
932c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call. Online state means
933c21e0bbfSMatthew R. Ochs  * that the FC link layer has synced, completed the handshaking process, and
934c21e0bbfSMatthew R. Ochs  * is ready for login to start.
935c21e0bbfSMatthew R. Ochs  */
9361786f4a0SMatthew R. Ochs static void set_port_online(__be64 __iomem *fc_regs)
937c21e0bbfSMatthew R. Ochs {
938c21e0bbfSMatthew R. Ochs 	u64 cmdcfg;
939c21e0bbfSMatthew R. Ochs 
940c21e0bbfSMatthew R. Ochs 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
941c21e0bbfSMatthew R. Ochs 	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
942c21e0bbfSMatthew R. Ochs 	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
943c21e0bbfSMatthew R. Ochs 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
944c21e0bbfSMatthew R. Ochs }
945c21e0bbfSMatthew R. Ochs 
946c21e0bbfSMatthew R. Ochs /**
947c21e0bbfSMatthew R. Ochs  * set_port_offline() - transitions the specified host FC port to offline state
948c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
949c21e0bbfSMatthew R. Ochs  *
950c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call.
951c21e0bbfSMatthew R. Ochs  */
9521786f4a0SMatthew R. Ochs static void set_port_offline(__be64 __iomem *fc_regs)
953c21e0bbfSMatthew R. Ochs {
954c21e0bbfSMatthew R. Ochs 	u64 cmdcfg;
955c21e0bbfSMatthew R. Ochs 
956c21e0bbfSMatthew R. Ochs 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
957c21e0bbfSMatthew R. Ochs 	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
958c21e0bbfSMatthew R. Ochs 	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
959c21e0bbfSMatthew R. Ochs 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
960c21e0bbfSMatthew R. Ochs }
961c21e0bbfSMatthew R. Ochs 
962c21e0bbfSMatthew R. Ochs /**
963c21e0bbfSMatthew R. Ochs  * wait_port_online() - waits for the specified host FC port come online
964c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
965c21e0bbfSMatthew R. Ochs  * @delay_us:	Number of microseconds to delay between reading port status.
966c21e0bbfSMatthew R. Ochs  * @nretry:	Number of cycles to retry reading port status.
967c21e0bbfSMatthew R. Ochs  *
968c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call. This will timeout
969c21e0bbfSMatthew R. Ochs  * when the cable is not plugged in.
970c21e0bbfSMatthew R. Ochs  *
971c21e0bbfSMatthew R. Ochs  * Return:
972c21e0bbfSMatthew R. Ochs  *	TRUE (1) when the specified port is online
973c21e0bbfSMatthew R. Ochs  *	FALSE (0) when the specified port fails to come online after timeout
974c21e0bbfSMatthew R. Ochs  */
975fb67d44dSMatthew R. Ochs static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
976c21e0bbfSMatthew R. Ochs {
977c21e0bbfSMatthew R. Ochs 	u64 status;
978c21e0bbfSMatthew R. Ochs 
979fb67d44dSMatthew R. Ochs 	WARN_ON(delay_us < 1000);
980c21e0bbfSMatthew R. Ochs 
981c21e0bbfSMatthew R. Ochs 	do {
982c21e0bbfSMatthew R. Ochs 		msleep(delay_us / 1000);
983c21e0bbfSMatthew R. Ochs 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
98405dab432SMatthew R. Ochs 		if (status == U64_MAX)
98505dab432SMatthew R. Ochs 			nretry /= 2;
986c21e0bbfSMatthew R. Ochs 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
987c21e0bbfSMatthew R. Ochs 		 nretry--);
988c21e0bbfSMatthew R. Ochs 
989c21e0bbfSMatthew R. Ochs 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
990c21e0bbfSMatthew R. Ochs }
991c21e0bbfSMatthew R. Ochs 
992c21e0bbfSMatthew R. Ochs /**
993c21e0bbfSMatthew R. Ochs  * wait_port_offline() - waits for the specified host FC port go offline
994c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
995c21e0bbfSMatthew R. Ochs  * @delay_us:	Number of microseconds to delay between reading port status.
996c21e0bbfSMatthew R. Ochs  * @nretry:	Number of cycles to retry reading port status.
997c21e0bbfSMatthew R. Ochs  *
998c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call.
999c21e0bbfSMatthew R. Ochs  *
1000c21e0bbfSMatthew R. Ochs  * Return:
1001c21e0bbfSMatthew R. Ochs  *	TRUE (1) when the specified port is offline
1002c21e0bbfSMatthew R. Ochs  *	FALSE (0) when the specified port fails to go offline after timeout
1003c21e0bbfSMatthew R. Ochs  */
1004fb67d44dSMatthew R. Ochs static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1005c21e0bbfSMatthew R. Ochs {
1006c21e0bbfSMatthew R. Ochs 	u64 status;
1007c21e0bbfSMatthew R. Ochs 
1008fb67d44dSMatthew R. Ochs 	WARN_ON(delay_us < 1000);
1009c21e0bbfSMatthew R. Ochs 
1010c21e0bbfSMatthew R. Ochs 	do {
1011c21e0bbfSMatthew R. Ochs 		msleep(delay_us / 1000);
1012c21e0bbfSMatthew R. Ochs 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
101305dab432SMatthew R. Ochs 		if (status == U64_MAX)
101405dab432SMatthew R. Ochs 			nretry /= 2;
1015c21e0bbfSMatthew R. Ochs 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1016c21e0bbfSMatthew R. Ochs 		 nretry--);
1017c21e0bbfSMatthew R. Ochs 
1018c21e0bbfSMatthew R. Ochs 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1019c21e0bbfSMatthew R. Ochs }
1020c21e0bbfSMatthew R. Ochs 
1021c21e0bbfSMatthew R. Ochs /**
1022c21e0bbfSMatthew R. Ochs  * afu_set_wwpn() - configures the WWPN for the specified host FC port
1023c21e0bbfSMatthew R. Ochs  * @afu:	AFU associated with the host that owns the specified FC port.
1024c21e0bbfSMatthew R. Ochs  * @port:	Port number being configured.
1025c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
1026c21e0bbfSMatthew R. Ochs  * @wwpn:	The world-wide-port-number previously discovered for port.
1027c21e0bbfSMatthew R. Ochs  *
1028c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call. As part of the
1029c21e0bbfSMatthew R. Ochs  * sequence to configure the WWPN, the port is toggled offline and then back
1030c21e0bbfSMatthew R. Ochs  * online. This toggling action can cause this routine to delay up to a few
1031c21e0bbfSMatthew R. Ochs  * seconds. When configured to use the internal LUN feature of the AFU, a
1032c21e0bbfSMatthew R. Ochs  * failure to come online is overridden.
1033c21e0bbfSMatthew R. Ochs  */
1034f8013261SMatthew R. Ochs static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
10351786f4a0SMatthew R. Ochs 			 u64 wwpn)
1036c21e0bbfSMatthew R. Ochs {
1037fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
1038fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1039fb67d44dSMatthew R. Ochs 
1040c21e0bbfSMatthew R. Ochs 	set_port_offline(fc_regs);
1041c21e0bbfSMatthew R. Ochs 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1042c21e0bbfSMatthew R. Ochs 			       FC_PORT_STATUS_RETRY_CNT)) {
1043fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1044c21e0bbfSMatthew R. Ochs 			__func__, port);
1045c21e0bbfSMatthew R. Ochs 	}
1046c21e0bbfSMatthew R. Ochs 
1047c21e0bbfSMatthew R. Ochs 	writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1048c21e0bbfSMatthew R. Ochs 
1049c21e0bbfSMatthew R. Ochs 	set_port_online(fc_regs);
1050c21e0bbfSMatthew R. Ochs 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1051c21e0bbfSMatthew R. Ochs 			      FC_PORT_STATUS_RETRY_CNT)) {
1052fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1053c21e0bbfSMatthew R. Ochs 			__func__, port);
1054c21e0bbfSMatthew R. Ochs 	}
1055c21e0bbfSMatthew R. Ochs }
1056c21e0bbfSMatthew R. Ochs 
1057c21e0bbfSMatthew R. Ochs /**
1058c21e0bbfSMatthew R. Ochs  * afu_link_reset() - resets the specified host FC port
1059c21e0bbfSMatthew R. Ochs  * @afu:	AFU associated with the host that owns the specified FC port.
1060c21e0bbfSMatthew R. Ochs  * @port:	Port number being configured.
1061c21e0bbfSMatthew R. Ochs  * @fc_regs:	Top of MMIO region defined for specified port.
1062c21e0bbfSMatthew R. Ochs  *
1063c21e0bbfSMatthew R. Ochs  * The provided MMIO region must be mapped prior to call. The sequence to
1064c21e0bbfSMatthew R. Ochs  * reset the port involves toggling it offline and then back online. This
1065c21e0bbfSMatthew R. Ochs  * action can cause this routine to delay up to a few seconds. An effort
1066c21e0bbfSMatthew R. Ochs  * is made to maintain link with the device by switching to host to use
1067c21e0bbfSMatthew R. Ochs  * the alternate port exclusively while the reset takes place.
1068c21e0bbfSMatthew R. Ochs  * failure to come online is overridden.
1069c21e0bbfSMatthew R. Ochs  */
10701786f4a0SMatthew R. Ochs static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1071c21e0bbfSMatthew R. Ochs {
1072fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
1073fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1074c21e0bbfSMatthew R. Ochs 	u64 port_sel;
1075c21e0bbfSMatthew R. Ochs 
1076c21e0bbfSMatthew R. Ochs 	/* first switch the AFU to the other links, if any */
1077c21e0bbfSMatthew R. Ochs 	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
10784da74db0SDan Carpenter 	port_sel &= ~(1ULL << port);
1079c21e0bbfSMatthew R. Ochs 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1080c21e0bbfSMatthew R. Ochs 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1081c21e0bbfSMatthew R. Ochs 
1082c21e0bbfSMatthew R. Ochs 	set_port_offline(fc_regs);
1083c21e0bbfSMatthew R. Ochs 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1084c21e0bbfSMatthew R. Ochs 			       FC_PORT_STATUS_RETRY_CNT))
1085fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1086c21e0bbfSMatthew R. Ochs 			__func__, port);
1087c21e0bbfSMatthew R. Ochs 
1088c21e0bbfSMatthew R. Ochs 	set_port_online(fc_regs);
1089c21e0bbfSMatthew R. Ochs 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1090c21e0bbfSMatthew R. Ochs 			      FC_PORT_STATUS_RETRY_CNT))
1091fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: wait on port %d to go online timed out\n",
1092c21e0bbfSMatthew R. Ochs 			__func__, port);
1093c21e0bbfSMatthew R. Ochs 
1094c21e0bbfSMatthew R. Ochs 	/* switch back to include this port */
10954da74db0SDan Carpenter 	port_sel |= (1ULL << port);
1096c21e0bbfSMatthew R. Ochs 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1097c21e0bbfSMatthew R. Ochs 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1098c21e0bbfSMatthew R. Ochs 
1099fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1100c21e0bbfSMatthew R. Ochs }
1101c21e0bbfSMatthew R. Ochs 
1102c21e0bbfSMatthew R. Ochs /**
1103c21e0bbfSMatthew R. Ochs  * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1104c21e0bbfSMatthew R. Ochs  * @afu:	AFU associated with the host.
1105c21e0bbfSMatthew R. Ochs  */
1106c21e0bbfSMatthew R. Ochs static void afu_err_intr_init(struct afu *afu)
1107c21e0bbfSMatthew R. Ochs {
110878ae028eSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
11090aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
1110c21e0bbfSMatthew R. Ochs 	int i;
1111bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1112c21e0bbfSMatthew R. Ochs 	u64 reg;
1113c21e0bbfSMatthew R. Ochs 
1114c21e0bbfSMatthew R. Ochs 	/* global async interrupts: AFU clears afu_ctrl on context exit
1115c21e0bbfSMatthew R. Ochs 	 * if async interrupts were sent to that context. This prevents
1116c21e0bbfSMatthew R. Ochs 	 * the AFU form sending further async interrupts when
1117c21e0bbfSMatthew R. Ochs 	 * there is
1118c21e0bbfSMatthew R. Ochs 	 * nobody to receive them.
1119c21e0bbfSMatthew R. Ochs 	 */
1120c21e0bbfSMatthew R. Ochs 
1121c21e0bbfSMatthew R. Ochs 	/* mask all */
1122c21e0bbfSMatthew R. Ochs 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1123bfc0bab1SUma Krishnan 	/* set LISN# to send and point to primary master context */
1124bfc0bab1SUma Krishnan 	reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1125c21e0bbfSMatthew R. Ochs 
1126c21e0bbfSMatthew R. Ochs 	if (afu->internal_lun)
1127c21e0bbfSMatthew R. Ochs 		reg |= 1;	/* Bit 63 indicates local lun */
1128c21e0bbfSMatthew R. Ochs 	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1129c21e0bbfSMatthew R. Ochs 	/* clear all */
1130c21e0bbfSMatthew R. Ochs 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1131c21e0bbfSMatthew R. Ochs 	/* unmask bits that are of interest */
1132c21e0bbfSMatthew R. Ochs 	/* note: afu can send an interrupt after this step */
1133c21e0bbfSMatthew R. Ochs 	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1134c21e0bbfSMatthew R. Ochs 	/* clear again in case a bit came on after previous clear but before */
1135c21e0bbfSMatthew R. Ochs 	/* unmask */
1136c21e0bbfSMatthew R. Ochs 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1137c21e0bbfSMatthew R. Ochs 
1138c21e0bbfSMatthew R. Ochs 	/* Clear/Set internal lun bits */
11390aa14887SMatthew R. Ochs 	fc_port_regs = get_fc_port_regs(cfg, 0);
11400aa14887SMatthew R. Ochs 	reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1141c21e0bbfSMatthew R. Ochs 	reg &= SISL_FC_INTERNAL_MASK;
1142c21e0bbfSMatthew R. Ochs 	if (afu->internal_lun)
1143c21e0bbfSMatthew R. Ochs 		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
11440aa14887SMatthew R. Ochs 	writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1145c21e0bbfSMatthew R. Ochs 
1146c21e0bbfSMatthew R. Ochs 	/* now clear FC errors */
114778ae028eSMatthew R. Ochs 	for (i = 0; i < cfg->num_fc_ports; i++) {
11480aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, i);
11490aa14887SMatthew R. Ochs 
11500aa14887SMatthew R. Ochs 		writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
11510aa14887SMatthew R. Ochs 		writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1152c21e0bbfSMatthew R. Ochs 	}
1153c21e0bbfSMatthew R. Ochs 
1154c21e0bbfSMatthew R. Ochs 	/* sync interrupts for master's IOARRIN write */
1155c21e0bbfSMatthew R. Ochs 	/* note that unlike asyncs, there can be no pending sync interrupts */
1156c21e0bbfSMatthew R. Ochs 	/* at this time (this is a fresh context and master has not written */
1157c21e0bbfSMatthew R. Ochs 	/* IOARRIN yet), so there is nothing to clear. */
1158c21e0bbfSMatthew R. Ochs 
1159c21e0bbfSMatthew R. Ochs 	/* set LISN#, it is always sent to the context that wrote IOARRIN */
11603065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1161bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1162bfc0bab1SUma Krishnan 
1163bfc0bab1SUma Krishnan 		writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl);
1164bfc0bab1SUma Krishnan 		writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1165bfc0bab1SUma Krishnan 	}
1166c21e0bbfSMatthew R. Ochs }
1167c21e0bbfSMatthew R. Ochs 
1168c21e0bbfSMatthew R. Ochs /**
1169c21e0bbfSMatthew R. Ochs  * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1170c21e0bbfSMatthew R. Ochs  * @irq:	Interrupt number.
1171c21e0bbfSMatthew R. Ochs  * @data:	Private data provided at interrupt registration, the AFU.
1172c21e0bbfSMatthew R. Ochs  *
1173c21e0bbfSMatthew R. Ochs  * Return: Always return IRQ_HANDLED.
1174c21e0bbfSMatthew R. Ochs  */
1175c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1176c21e0bbfSMatthew R. Ochs {
1177bfc0bab1SUma Krishnan 	struct hwq *hwq = (struct hwq *)data;
1178bfc0bab1SUma Krishnan 	struct cxlflash_cfg *cfg = hwq->afu->parent;
1179fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1180c21e0bbfSMatthew R. Ochs 	u64 reg;
1181c21e0bbfSMatthew R. Ochs 	u64 reg_unmasked;
1182c21e0bbfSMatthew R. Ochs 
1183bfc0bab1SUma Krishnan 	reg = readq_be(&hwq->host_map->intr_status);
1184c21e0bbfSMatthew R. Ochs 	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1185c21e0bbfSMatthew R. Ochs 
1186c21e0bbfSMatthew R. Ochs 	if (reg_unmasked == 0UL) {
1187fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1188fb67d44dSMatthew R. Ochs 			__func__, reg);
1189c21e0bbfSMatthew R. Ochs 		goto cxlflash_sync_err_irq_exit;
1190c21e0bbfSMatthew R. Ochs 	}
1191c21e0bbfSMatthew R. Ochs 
1192fb67d44dSMatthew R. Ochs 	dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1193fb67d44dSMatthew R. Ochs 		__func__, reg);
1194c21e0bbfSMatthew R. Ochs 
1195bfc0bab1SUma Krishnan 	writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1196c21e0bbfSMatthew R. Ochs 
1197c21e0bbfSMatthew R. Ochs cxlflash_sync_err_irq_exit:
1198c21e0bbfSMatthew R. Ochs 	return IRQ_HANDLED;
1199c21e0bbfSMatthew R. Ochs }
1200c21e0bbfSMatthew R. Ochs 
1201c21e0bbfSMatthew R. Ochs /**
120276a6ebbeSMatthew R. Ochs  * process_hrrq() - process the read-response queue
120376a6ebbeSMatthew R. Ochs  * @afu:	AFU associated with the host.
1204f918b4a8SMatthew R. Ochs  * @doneq:	Queue of commands harvested from the RRQ.
1205cba06e6dSMatthew R. Ochs  * @budget:	Threshold of RRQ entries to process.
1206f918b4a8SMatthew R. Ochs  *
1207f918b4a8SMatthew R. Ochs  * This routine must be called holding the disabled RRQ spin lock.
1208c21e0bbfSMatthew R. Ochs  *
120976a6ebbeSMatthew R. Ochs  * Return: The number of entries processed.
1210c21e0bbfSMatthew R. Ochs  */
1211bfc0bab1SUma Krishnan static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1212c21e0bbfSMatthew R. Ochs {
1213bfc0bab1SUma Krishnan 	struct afu *afu = hwq->afu;
1214c21e0bbfSMatthew R. Ochs 	struct afu_cmd *cmd;
1215696d0b0cSMatthew R. Ochs 	struct sisl_ioasa *ioasa;
1216696d0b0cSMatthew R. Ochs 	struct sisl_ioarcb *ioarcb;
1217bfc0bab1SUma Krishnan 	bool toggle = hwq->toggle;
121876a6ebbeSMatthew R. Ochs 	int num_hrrq = 0;
1219c21e0bbfSMatthew R. Ochs 	u64 entry,
1220bfc0bab1SUma Krishnan 	    *hrrq_start = hwq->hrrq_start,
1221bfc0bab1SUma Krishnan 	    *hrrq_end = hwq->hrrq_end,
1222bfc0bab1SUma Krishnan 	    *hrrq_curr = hwq->hrrq_curr;
1223c21e0bbfSMatthew R. Ochs 
1224cba06e6dSMatthew R. Ochs 	/* Process ready RRQ entries up to the specified budget (if any) */
1225c21e0bbfSMatthew R. Ochs 	while (true) {
1226c21e0bbfSMatthew R. Ochs 		entry = *hrrq_curr;
1227c21e0bbfSMatthew R. Ochs 
1228c21e0bbfSMatthew R. Ochs 		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1229c21e0bbfSMatthew R. Ochs 			break;
1230c21e0bbfSMatthew R. Ochs 
1231696d0b0cSMatthew R. Ochs 		entry &= ~SISL_RESP_HANDLE_T_BIT;
1232696d0b0cSMatthew R. Ochs 
1233696d0b0cSMatthew R. Ochs 		if (afu_is_sq_cmd_mode(afu)) {
1234696d0b0cSMatthew R. Ochs 			ioasa = (struct sisl_ioasa *)entry;
1235696d0b0cSMatthew R. Ochs 			cmd = container_of(ioasa, struct afu_cmd, sa);
1236696d0b0cSMatthew R. Ochs 		} else {
1237696d0b0cSMatthew R. Ochs 			ioarcb = (struct sisl_ioarcb *)entry;
1238696d0b0cSMatthew R. Ochs 			cmd = container_of(ioarcb, struct afu_cmd, rcb);
1239696d0b0cSMatthew R. Ochs 		}
1240696d0b0cSMatthew R. Ochs 
1241f918b4a8SMatthew R. Ochs 		list_add_tail(&cmd->queue, doneq);
1242c21e0bbfSMatthew R. Ochs 
1243c21e0bbfSMatthew R. Ochs 		/* Advance to next entry or wrap and flip the toggle bit */
1244c21e0bbfSMatthew R. Ochs 		if (hrrq_curr < hrrq_end)
1245c21e0bbfSMatthew R. Ochs 			hrrq_curr++;
1246c21e0bbfSMatthew R. Ochs 		else {
1247c21e0bbfSMatthew R. Ochs 			hrrq_curr = hrrq_start;
1248c21e0bbfSMatthew R. Ochs 			toggle ^= SISL_RESP_HANDLE_T_BIT;
1249c21e0bbfSMatthew R. Ochs 		}
1250696d0b0cSMatthew R. Ochs 
1251bfc0bab1SUma Krishnan 		atomic_inc(&hwq->hsq_credits);
125276a6ebbeSMatthew R. Ochs 		num_hrrq++;
1253cba06e6dSMatthew R. Ochs 
1254cba06e6dSMatthew R. Ochs 		if (budget > 0 && num_hrrq >= budget)
1255cba06e6dSMatthew R. Ochs 			break;
1256c21e0bbfSMatthew R. Ochs 	}
1257c21e0bbfSMatthew R. Ochs 
1258bfc0bab1SUma Krishnan 	hwq->hrrq_curr = hrrq_curr;
1259bfc0bab1SUma Krishnan 	hwq->toggle = toggle;
1260c21e0bbfSMatthew R. Ochs 
126176a6ebbeSMatthew R. Ochs 	return num_hrrq;
126276a6ebbeSMatthew R. Ochs }
126376a6ebbeSMatthew R. Ochs 
126476a6ebbeSMatthew R. Ochs /**
1265f918b4a8SMatthew R. Ochs  * process_cmd_doneq() - process a queue of harvested RRQ commands
1266f918b4a8SMatthew R. Ochs  * @doneq:	Queue of completed commands.
1267f918b4a8SMatthew R. Ochs  *
1268f918b4a8SMatthew R. Ochs  * Note that upon return the queue can no longer be trusted.
1269f918b4a8SMatthew R. Ochs  */
1270f918b4a8SMatthew R. Ochs static void process_cmd_doneq(struct list_head *doneq)
1271f918b4a8SMatthew R. Ochs {
1272f918b4a8SMatthew R. Ochs 	struct afu_cmd *cmd, *tmp;
1273f918b4a8SMatthew R. Ochs 
1274f918b4a8SMatthew R. Ochs 	WARN_ON(list_empty(doneq));
1275f918b4a8SMatthew R. Ochs 
1276f918b4a8SMatthew R. Ochs 	list_for_each_entry_safe(cmd, tmp, doneq, queue)
1277f918b4a8SMatthew R. Ochs 		cmd_complete(cmd);
1278f918b4a8SMatthew R. Ochs }
1279f918b4a8SMatthew R. Ochs 
1280f918b4a8SMatthew R. Ochs /**
1281cba06e6dSMatthew R. Ochs  * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1282cba06e6dSMatthew R. Ochs  * @irqpoll:	IRQ poll structure associated with queue to poll.
1283cba06e6dSMatthew R. Ochs  * @budget:	Threshold of RRQ entries to process per poll.
1284cba06e6dSMatthew R. Ochs  *
1285cba06e6dSMatthew R. Ochs  * Return: The number of entries processed.
1286cba06e6dSMatthew R. Ochs  */
1287cba06e6dSMatthew R. Ochs static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1288cba06e6dSMatthew R. Ochs {
1289bfc0bab1SUma Krishnan 	struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1290cba06e6dSMatthew R. Ochs 	unsigned long hrrq_flags;
1291cba06e6dSMatthew R. Ochs 	LIST_HEAD(doneq);
1292cba06e6dSMatthew R. Ochs 	int num_entries = 0;
1293cba06e6dSMatthew R. Ochs 
1294bfc0bab1SUma Krishnan 	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1295cba06e6dSMatthew R. Ochs 
1296bfc0bab1SUma Krishnan 	num_entries = process_hrrq(hwq, &doneq, budget);
1297cba06e6dSMatthew R. Ochs 	if (num_entries < budget)
1298cba06e6dSMatthew R. Ochs 		irq_poll_complete(irqpoll);
1299cba06e6dSMatthew R. Ochs 
1300bfc0bab1SUma Krishnan 	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1301cba06e6dSMatthew R. Ochs 
1302cba06e6dSMatthew R. Ochs 	process_cmd_doneq(&doneq);
1303cba06e6dSMatthew R. Ochs 	return num_entries;
1304cba06e6dSMatthew R. Ochs }
1305cba06e6dSMatthew R. Ochs 
1306cba06e6dSMatthew R. Ochs /**
130776a6ebbeSMatthew R. Ochs  * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
130876a6ebbeSMatthew R. Ochs  * @irq:	Interrupt number.
130976a6ebbeSMatthew R. Ochs  * @data:	Private data provided at interrupt registration, the AFU.
131076a6ebbeSMatthew R. Ochs  *
1311f918b4a8SMatthew R. Ochs  * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
131276a6ebbeSMatthew R. Ochs  */
131376a6ebbeSMatthew R. Ochs static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
131476a6ebbeSMatthew R. Ochs {
1315bfc0bab1SUma Krishnan 	struct hwq *hwq = (struct hwq *)data;
1316bfc0bab1SUma Krishnan 	struct afu *afu = hwq->afu;
1317f918b4a8SMatthew R. Ochs 	unsigned long hrrq_flags;
1318f918b4a8SMatthew R. Ochs 	LIST_HEAD(doneq);
1319f918b4a8SMatthew R. Ochs 	int num_entries = 0;
132076a6ebbeSMatthew R. Ochs 
1321bfc0bab1SUma Krishnan 	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1322cba06e6dSMatthew R. Ochs 
1323cba06e6dSMatthew R. Ochs 	if (afu_is_irqpoll_enabled(afu)) {
1324bfc0bab1SUma Krishnan 		irq_poll_sched(&hwq->irqpoll);
1325bfc0bab1SUma Krishnan 		spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1326cba06e6dSMatthew R. Ochs 		return IRQ_HANDLED;
1327cba06e6dSMatthew R. Ochs 	}
1328cba06e6dSMatthew R. Ochs 
1329bfc0bab1SUma Krishnan 	num_entries = process_hrrq(hwq, &doneq, -1);
1330bfc0bab1SUma Krishnan 	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1331f918b4a8SMatthew R. Ochs 
1332f918b4a8SMatthew R. Ochs 	if (num_entries == 0)
1333f918b4a8SMatthew R. Ochs 		return IRQ_NONE;
1334f918b4a8SMatthew R. Ochs 
1335f918b4a8SMatthew R. Ochs 	process_cmd_doneq(&doneq);
1336c21e0bbfSMatthew R. Ochs 	return IRQ_HANDLED;
1337c21e0bbfSMatthew R. Ochs }
1338c21e0bbfSMatthew R. Ochs 
1339e2ef33faSMatthew R. Ochs /*
1340e2ef33faSMatthew R. Ochs  * Asynchronous interrupt information table
1341e2ef33faSMatthew R. Ochs  *
1342e2ef33faSMatthew R. Ochs  * NOTE:
1343e2ef33faSMatthew R. Ochs  *	- Order matters here as this array is indexed by bit position.
1344e2ef33faSMatthew R. Ochs  *
1345e2ef33faSMatthew R. Ochs  *	- The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1346e2ef33faSMatthew R. Ochs  *	  as complex and complains due to a lack of parentheses/braces.
1347e2ef33faSMatthew R. Ochs  */
1348e2ef33faSMatthew R. Ochs #define ASTATUS_FC(_a, _b, _c, _d)					 \
1349e2ef33faSMatthew R. Ochs 	{ SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1350e2ef33faSMatthew R. Ochs 
1351e2ef33faSMatthew R. Ochs #define BUILD_SISL_ASTATUS_FC_PORT(_a)					 \
1352e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LINK_UP, "link up", 0),				 \
1353e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LINK_DN, "link down", 0),			 \
1354e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),		 \
1355e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),		 \
1356e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1357e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),	 \
1358e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),		 \
1359e2ef33faSMatthew R. Ochs 	ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1360e2ef33faSMatthew R. Ochs 
1361e2ef33faSMatthew R. Ochs static const struct asyc_intr_info ainfo[] = {
1362e2ef33faSMatthew R. Ochs 	BUILD_SISL_ASTATUS_FC_PORT(1),
1363e2ef33faSMatthew R. Ochs 	BUILD_SISL_ASTATUS_FC_PORT(0),
1364e2ef33faSMatthew R. Ochs 	BUILD_SISL_ASTATUS_FC_PORT(3),
1365e2ef33faSMatthew R. Ochs 	BUILD_SISL_ASTATUS_FC_PORT(2)
1366e2ef33faSMatthew R. Ochs };
1367e2ef33faSMatthew R. Ochs 
1368c21e0bbfSMatthew R. Ochs /**
1369c21e0bbfSMatthew R. Ochs  * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1370c21e0bbfSMatthew R. Ochs  * @irq:	Interrupt number.
1371c21e0bbfSMatthew R. Ochs  * @data:	Private data provided at interrupt registration, the AFU.
1372c21e0bbfSMatthew R. Ochs  *
1373c21e0bbfSMatthew R. Ochs  * Return: Always return IRQ_HANDLED.
1374c21e0bbfSMatthew R. Ochs  */
1375c21e0bbfSMatthew R. Ochs static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1376c21e0bbfSMatthew R. Ochs {
1377bfc0bab1SUma Krishnan 	struct hwq *hwq = (struct hwq *)data;
1378bfc0bab1SUma Krishnan 	struct afu *afu = hwq->afu;
13794392ba49SMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
13804392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1381c21e0bbfSMatthew R. Ochs 	const struct asyc_intr_info *info;
13821786f4a0SMatthew R. Ochs 	struct sisl_global_map __iomem *global = &afu->afu_map->global;
13830aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
1384e2ef33faSMatthew R. Ochs 	u64 reg_unmasked;
1385c21e0bbfSMatthew R. Ochs 	u64 reg;
1386e2ef33faSMatthew R. Ochs 	u64 bit;
1387c21e0bbfSMatthew R. Ochs 	u8 port;
1388c21e0bbfSMatthew R. Ochs 
1389c21e0bbfSMatthew R. Ochs 	reg = readq_be(&global->regs.aintr_status);
1390c21e0bbfSMatthew R. Ochs 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1391c21e0bbfSMatthew R. Ochs 
1392e2ef33faSMatthew R. Ochs 	if (unlikely(reg_unmasked == 0)) {
1393fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1394c21e0bbfSMatthew R. Ochs 			__func__, reg);
1395c21e0bbfSMatthew R. Ochs 		goto out;
1396c21e0bbfSMatthew R. Ochs 	}
1397c21e0bbfSMatthew R. Ochs 
1398f15fbf8dSMatthew R. Ochs 	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1399c21e0bbfSMatthew R. Ochs 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
1400c21e0bbfSMatthew R. Ochs 
1401f15fbf8dSMatthew R. Ochs 	/* Check each bit that is on */
1402e2ef33faSMatthew R. Ochs 	for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1403e2ef33faSMatthew R. Ochs 		if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1404e2ef33faSMatthew R. Ochs 			WARN_ON_ONCE(1);
1405c21e0bbfSMatthew R. Ochs 			continue;
1406e2ef33faSMatthew R. Ochs 		}
1407e2ef33faSMatthew R. Ochs 
1408e2ef33faSMatthew R. Ochs 		info = &ainfo[bit];
1409e2ef33faSMatthew R. Ochs 		if (unlikely(info->status != 1ULL << bit)) {
1410e2ef33faSMatthew R. Ochs 			WARN_ON_ONCE(1);
1411e2ef33faSMatthew R. Ochs 			continue;
1412e2ef33faSMatthew R. Ochs 		}
1413c21e0bbfSMatthew R. Ochs 
1414c21e0bbfSMatthew R. Ochs 		port = info->port;
14150aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, port);
1416c21e0bbfSMatthew R. Ochs 
1417fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1418c21e0bbfSMatthew R. Ochs 			__func__, port, info->desc,
14190aa14887SMatthew R. Ochs 		       readq_be(&fc_port_regs[FC_STATUS / 8]));
1420c21e0bbfSMatthew R. Ochs 
1421c21e0bbfSMatthew R. Ochs 		/*
1422f15fbf8dSMatthew R. Ochs 		 * Do link reset first, some OTHER errors will set FC_ERROR
1423c21e0bbfSMatthew R. Ochs 		 * again if cleared before or w/o a reset
1424c21e0bbfSMatthew R. Ochs 		 */
1425c21e0bbfSMatthew R. Ochs 		if (info->action & LINK_RESET) {
14264392ba49SMatthew R. Ochs 			dev_err(dev, "%s: FC Port %d: resetting link\n",
1427c21e0bbfSMatthew R. Ochs 				__func__, port);
1428c21e0bbfSMatthew R. Ochs 			cfg->lr_state = LINK_RESET_REQUIRED;
1429c21e0bbfSMatthew R. Ochs 			cfg->lr_port = port;
1430c21e0bbfSMatthew R. Ochs 			schedule_work(&cfg->work_q);
1431c21e0bbfSMatthew R. Ochs 		}
1432c21e0bbfSMatthew R. Ochs 
1433c21e0bbfSMatthew R. Ochs 		if (info->action & CLR_FC_ERROR) {
14340aa14887SMatthew R. Ochs 			reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1435c21e0bbfSMatthew R. Ochs 
1436c21e0bbfSMatthew R. Ochs 			/*
1437f15fbf8dSMatthew R. Ochs 			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1438c21e0bbfSMatthew R. Ochs 			 * should be the same and tracing one is sufficient.
1439c21e0bbfSMatthew R. Ochs 			 */
1440c21e0bbfSMatthew R. Ochs 
1441fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1442c21e0bbfSMatthew R. Ochs 				__func__, port, reg);
1443c21e0bbfSMatthew R. Ochs 
14440aa14887SMatthew R. Ochs 			writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
14450aa14887SMatthew R. Ochs 			writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1446c21e0bbfSMatthew R. Ochs 		}
1447ef51074aSMatthew R. Ochs 
1448ef51074aSMatthew R. Ochs 		if (info->action & SCAN_HOST) {
1449ef51074aSMatthew R. Ochs 			atomic_inc(&cfg->scan_host_needed);
1450ef51074aSMatthew R. Ochs 			schedule_work(&cfg->work_q);
1451ef51074aSMatthew R. Ochs 		}
1452c21e0bbfSMatthew R. Ochs 	}
1453c21e0bbfSMatthew R. Ochs 
1454c21e0bbfSMatthew R. Ochs out:
1455c21e0bbfSMatthew R. Ochs 	return IRQ_HANDLED;
1456c21e0bbfSMatthew R. Ochs }
1457c21e0bbfSMatthew R. Ochs 
1458c21e0bbfSMatthew R. Ochs /**
1459c21e0bbfSMatthew R. Ochs  * start_context() - starts the master context
14601284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1461bfc0bab1SUma Krishnan  * @index:	Index of the hardware queue.
1462c21e0bbfSMatthew R. Ochs  *
1463c21e0bbfSMatthew R. Ochs  * Return: A success or failure value from CXL services.
1464c21e0bbfSMatthew R. Ochs  */
1465bfc0bab1SUma Krishnan static int start_context(struct cxlflash_cfg *cfg, u32 index)
1466c21e0bbfSMatthew R. Ochs {
1467fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1468bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(cfg->afu, index);
1469c21e0bbfSMatthew R. Ochs 	int rc = 0;
1470c21e0bbfSMatthew R. Ochs 
1471bfc0bab1SUma Krishnan 	rc = cxl_start_context(hwq->ctx,
1472bfc0bab1SUma Krishnan 			       hwq->work.work_element_descriptor,
1473c21e0bbfSMatthew R. Ochs 			       NULL);
1474c21e0bbfSMatthew R. Ochs 
1475fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1476c21e0bbfSMatthew R. Ochs 	return rc;
1477c21e0bbfSMatthew R. Ochs }
1478c21e0bbfSMatthew R. Ochs 
1479c21e0bbfSMatthew R. Ochs /**
1480c21e0bbfSMatthew R. Ochs  * read_vpd() - obtains the WWPNs from VPD
14811284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
148278ae028eSMatthew R. Ochs  * @wwpn:	Array of size MAX_FC_PORTS to pass back WWPNs
1483c21e0bbfSMatthew R. Ochs  *
14841284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
1485c21e0bbfSMatthew R. Ochs  */
1486c21e0bbfSMatthew R. Ochs static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1487c21e0bbfSMatthew R. Ochs {
1488fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1489fb67d44dSMatthew R. Ochs 	struct pci_dev *pdev = cfg->dev;
1490c21e0bbfSMatthew R. Ochs 	int rc = 0;
1491c21e0bbfSMatthew R. Ochs 	int ro_start, ro_size, i, j, k;
1492c21e0bbfSMatthew R. Ochs 	ssize_t vpd_size;
1493c21e0bbfSMatthew R. Ochs 	char vpd_data[CXLFLASH_VPD_LEN];
1494c21e0bbfSMatthew R. Ochs 	char tmp_buf[WWPN_BUF_LEN] = { 0 };
14951cd7fabcSMatthew R. Ochs 	char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1496c21e0bbfSMatthew R. Ochs 
1497c21e0bbfSMatthew R. Ochs 	/* Get the VPD data from the device */
1498fb67d44dSMatthew R. Ochs 	vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1499c21e0bbfSMatthew R. Ochs 	if (unlikely(vpd_size <= 0)) {
1500fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1501c21e0bbfSMatthew R. Ochs 			__func__, vpd_size);
1502c21e0bbfSMatthew R. Ochs 		rc = -ENODEV;
1503c21e0bbfSMatthew R. Ochs 		goto out;
1504c21e0bbfSMatthew R. Ochs 	}
1505c21e0bbfSMatthew R. Ochs 
1506c21e0bbfSMatthew R. Ochs 	/* Get the read only section offset */
1507c21e0bbfSMatthew R. Ochs 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1508c21e0bbfSMatthew R. Ochs 				    PCI_VPD_LRDT_RO_DATA);
1509c21e0bbfSMatthew R. Ochs 	if (unlikely(ro_start < 0)) {
1510fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1511c21e0bbfSMatthew R. Ochs 		rc = -ENODEV;
1512c21e0bbfSMatthew R. Ochs 		goto out;
1513c21e0bbfSMatthew R. Ochs 	}
1514c21e0bbfSMatthew R. Ochs 
1515c21e0bbfSMatthew R. Ochs 	/* Get the read only section size, cap when extends beyond read VPD */
1516c21e0bbfSMatthew R. Ochs 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1517c21e0bbfSMatthew R. Ochs 	j = ro_size;
1518c21e0bbfSMatthew R. Ochs 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1519c21e0bbfSMatthew R. Ochs 	if (unlikely((i + j) > vpd_size)) {
1520fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1521c21e0bbfSMatthew R. Ochs 			__func__, (i + j), vpd_size);
1522c21e0bbfSMatthew R. Ochs 		ro_size = vpd_size - i;
1523c21e0bbfSMatthew R. Ochs 	}
1524c21e0bbfSMatthew R. Ochs 
1525c21e0bbfSMatthew R. Ochs 	/*
1526c21e0bbfSMatthew R. Ochs 	 * Find the offset of the WWPN tag within the read only
1527c21e0bbfSMatthew R. Ochs 	 * VPD data and validate the found field (partials are
1528c21e0bbfSMatthew R. Ochs 	 * no good to us). Convert the ASCII data to an integer
1529c21e0bbfSMatthew R. Ochs 	 * value. Note that we must copy to a temporary buffer
1530c21e0bbfSMatthew R. Ochs 	 * because the conversion service requires that the ASCII
1531c21e0bbfSMatthew R. Ochs 	 * string be terminated.
1532c21e0bbfSMatthew R. Ochs 	 */
153378ae028eSMatthew R. Ochs 	for (k = 0; k < cfg->num_fc_ports; k++) {
1534c21e0bbfSMatthew R. Ochs 		j = ro_size;
1535c21e0bbfSMatthew R. Ochs 		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1536c21e0bbfSMatthew R. Ochs 
1537c21e0bbfSMatthew R. Ochs 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1538c21e0bbfSMatthew R. Ochs 		if (unlikely(i < 0)) {
1539fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: Port %d WWPN not found in VPD\n",
1540fb67d44dSMatthew R. Ochs 				__func__, k);
1541c21e0bbfSMatthew R. Ochs 			rc = -ENODEV;
1542c21e0bbfSMatthew R. Ochs 			goto out;
1543c21e0bbfSMatthew R. Ochs 		}
1544c21e0bbfSMatthew R. Ochs 
1545c21e0bbfSMatthew R. Ochs 		j = pci_vpd_info_field_size(&vpd_data[i]);
1546c21e0bbfSMatthew R. Ochs 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
1547c21e0bbfSMatthew R. Ochs 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1548fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1549c21e0bbfSMatthew R. Ochs 				__func__, k);
1550c21e0bbfSMatthew R. Ochs 			rc = -ENODEV;
1551c21e0bbfSMatthew R. Ochs 			goto out;
1552c21e0bbfSMatthew R. Ochs 		}
1553c21e0bbfSMatthew R. Ochs 
1554c21e0bbfSMatthew R. Ochs 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1555c21e0bbfSMatthew R. Ochs 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1556c21e0bbfSMatthew R. Ochs 		if (unlikely(rc)) {
1557fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1558fb67d44dSMatthew R. Ochs 				__func__, k);
1559c21e0bbfSMatthew R. Ochs 			rc = -ENODEV;
1560c21e0bbfSMatthew R. Ochs 			goto out;
1561c21e0bbfSMatthew R. Ochs 		}
156278ae028eSMatthew R. Ochs 
156378ae028eSMatthew R. Ochs 		dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1564c21e0bbfSMatthew R. Ochs 	}
1565c21e0bbfSMatthew R. Ochs 
1566c21e0bbfSMatthew R. Ochs out:
1567fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1568c21e0bbfSMatthew R. Ochs 	return rc;
1569c21e0bbfSMatthew R. Ochs }
1570c21e0bbfSMatthew R. Ochs 
1571c21e0bbfSMatthew R. Ochs /**
1572c21e0bbfSMatthew R. Ochs  * init_pcr() - initialize the provisioning and control registers
15731284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1574c21e0bbfSMatthew R. Ochs  *
1575c21e0bbfSMatthew R. Ochs  * Also sets up fast access to the mapped registers and initializes AFU
1576c21e0bbfSMatthew R. Ochs  * command fields that never change.
1577c21e0bbfSMatthew R. Ochs  */
157815305514SMatthew R. Ochs static void init_pcr(struct cxlflash_cfg *cfg)
1579c21e0bbfSMatthew R. Ochs {
1580c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
15811786f4a0SMatthew R. Ochs 	struct sisl_ctrl_map __iomem *ctrl_map;
1582bfc0bab1SUma Krishnan 	struct hwq *hwq;
1583c21e0bbfSMatthew R. Ochs 	int i;
1584c21e0bbfSMatthew R. Ochs 
1585c21e0bbfSMatthew R. Ochs 	for (i = 0; i < MAX_CONTEXT; i++) {
1586c21e0bbfSMatthew R. Ochs 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1587f15fbf8dSMatthew R. Ochs 		/* Disrupt any clients that could be running */
1588c21e0bbfSMatthew R. Ochs 		/* e.g. clients that survived a master restart */
1589c21e0bbfSMatthew R. Ochs 		writeq_be(0, &ctrl_map->rht_start);
1590c21e0bbfSMatthew R. Ochs 		writeq_be(0, &ctrl_map->rht_cnt_id);
1591c21e0bbfSMatthew R. Ochs 		writeq_be(0, &ctrl_map->ctx_cap);
1592c21e0bbfSMatthew R. Ochs 	}
1593c21e0bbfSMatthew R. Ochs 
1594bfc0bab1SUma Krishnan 	/* Copy frequently used fields into hwq */
15953065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1596bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1597bfc0bab1SUma Krishnan 
1598bfc0bab1SUma Krishnan 		hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx);
1599bfc0bab1SUma Krishnan 		hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1600bfc0bab1SUma Krishnan 		hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1601c21e0bbfSMatthew R. Ochs 
1602c21e0bbfSMatthew R. Ochs 		/* Program the Endian Control for the master context */
1603bfc0bab1SUma Krishnan 		writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1604bfc0bab1SUma Krishnan 	}
1605c21e0bbfSMatthew R. Ochs }
1606c21e0bbfSMatthew R. Ochs 
1607c21e0bbfSMatthew R. Ochs /**
1608c21e0bbfSMatthew R. Ochs  * init_global() - initialize AFU global registers
16091284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1610c21e0bbfSMatthew R. Ochs  */
161115305514SMatthew R. Ochs static int init_global(struct cxlflash_cfg *cfg)
1612c21e0bbfSMatthew R. Ochs {
1613c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
16144392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1615bfc0bab1SUma Krishnan 	struct hwq *hwq;
1616bfc0bab1SUma Krishnan 	struct sisl_host_map __iomem *hmap;
16170aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
161878ae028eSMatthew R. Ochs 	u64 wwpn[MAX_FC_PORTS];	/* wwpn of AFU ports */
1619c21e0bbfSMatthew R. Ochs 	int i = 0, num_ports = 0;
1620c21e0bbfSMatthew R. Ochs 	int rc = 0;
1621c21e0bbfSMatthew R. Ochs 	u64 reg;
1622c21e0bbfSMatthew R. Ochs 
1623c21e0bbfSMatthew R. Ochs 	rc = read_vpd(cfg, &wwpn[0]);
1624c21e0bbfSMatthew R. Ochs 	if (rc) {
16254392ba49SMatthew R. Ochs 		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1626c21e0bbfSMatthew R. Ochs 		goto out;
1627c21e0bbfSMatthew R. Ochs 	}
1628c21e0bbfSMatthew R. Ochs 
1629bfc0bab1SUma Krishnan 	/* Set up RRQ and SQ in HWQ for master issued cmds */
16303065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1631bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1632bfc0bab1SUma Krishnan 		hmap = hwq->host_map;
1633bfc0bab1SUma Krishnan 
1634bfc0bab1SUma Krishnan 		writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1635bfc0bab1SUma Krishnan 		writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1636c21e0bbfSMatthew R. Ochs 
1637696d0b0cSMatthew R. Ochs 		if (afu_is_sq_cmd_mode(afu)) {
1638bfc0bab1SUma Krishnan 			writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1639bfc0bab1SUma Krishnan 			writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1640bfc0bab1SUma Krishnan 		}
1641696d0b0cSMatthew R. Ochs 	}
1642696d0b0cSMatthew R. Ochs 
1643c21e0bbfSMatthew R. Ochs 	/* AFU configuration */
1644c21e0bbfSMatthew R. Ochs 	reg = readq_be(&afu->afu_map->global.regs.afu_config);
1645c21e0bbfSMatthew R. Ochs 	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1646c21e0bbfSMatthew R. Ochs 	/* enable all auto retry options and control endianness */
1647c21e0bbfSMatthew R. Ochs 	/* leave others at default: */
1648c21e0bbfSMatthew R. Ochs 	/* CTX_CAP write protected, mbox_r does not clear on read and */
1649c21e0bbfSMatthew R. Ochs 	/* checker on if dual afu */
1650c21e0bbfSMatthew R. Ochs 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1651c21e0bbfSMatthew R. Ochs 
1652f15fbf8dSMatthew R. Ochs 	/* Global port select: select either port */
1653c21e0bbfSMatthew R. Ochs 	if (afu->internal_lun) {
1654f15fbf8dSMatthew R. Ochs 		/* Only use port 0 */
1655c21e0bbfSMatthew R. Ochs 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
165678ae028eSMatthew R. Ochs 		num_ports = 0;
1657c21e0bbfSMatthew R. Ochs 	} else {
16588fa4f177SMatthew R. Ochs 		writeq_be(PORT_MASK(cfg->num_fc_ports),
16598fa4f177SMatthew R. Ochs 			  &afu->afu_map->global.regs.afu_port_sel);
166078ae028eSMatthew R. Ochs 		num_ports = cfg->num_fc_ports;
1661c21e0bbfSMatthew R. Ochs 	}
1662c21e0bbfSMatthew R. Ochs 
1663c21e0bbfSMatthew R. Ochs 	for (i = 0; i < num_ports; i++) {
16640aa14887SMatthew R. Ochs 		fc_port_regs = get_fc_port_regs(cfg, i);
16650aa14887SMatthew R. Ochs 
1666f15fbf8dSMatthew R. Ochs 		/* Unmask all errors (but they are still masked at AFU) */
16670aa14887SMatthew R. Ochs 		writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1668f15fbf8dSMatthew R. Ochs 		/* Clear CRC error cnt & set a threshold */
16690aa14887SMatthew R. Ochs 		(void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
16700aa14887SMatthew R. Ochs 		writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1671c21e0bbfSMatthew R. Ochs 
1672f15fbf8dSMatthew R. Ochs 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1673f8013261SMatthew R. Ochs 		if (wwpn[i] != 0)
16740aa14887SMatthew R. Ochs 			afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1675c21e0bbfSMatthew R. Ochs 		/* Programming WWPN back to back causes additional
1676c21e0bbfSMatthew R. Ochs 		 * offline/online transitions and a PLOGI
1677c21e0bbfSMatthew R. Ochs 		 */
1678c21e0bbfSMatthew R. Ochs 		msleep(100);
1679c21e0bbfSMatthew R. Ochs 	}
1680c21e0bbfSMatthew R. Ochs 
1681f15fbf8dSMatthew R. Ochs 	/* Set up master's own CTX_CAP to allow real mode, host translation */
1682f15fbf8dSMatthew R. Ochs 	/* tables, afu cmds and read/write GSCSI cmds. */
1683c21e0bbfSMatthew R. Ochs 	/* First, unlock ctx_cap write by reading mbox */
16843065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1685bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1686bfc0bab1SUma Krishnan 
1687bfc0bab1SUma Krishnan 		(void)readq_be(&hwq->ctrl_map->mbox_r);	/* unlock ctx_cap */
1688c21e0bbfSMatthew R. Ochs 		writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1689c21e0bbfSMatthew R. Ochs 			SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1690c21e0bbfSMatthew R. Ochs 			SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1691bfc0bab1SUma Krishnan 			&hwq->ctrl_map->ctx_cap);
1692bfc0bab1SUma Krishnan 	}
1693f15fbf8dSMatthew R. Ochs 	/* Initialize heartbeat */
1694c21e0bbfSMatthew R. Ochs 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1695c21e0bbfSMatthew R. Ochs out:
1696c21e0bbfSMatthew R. Ochs 	return rc;
1697c21e0bbfSMatthew R. Ochs }
1698c21e0bbfSMatthew R. Ochs 
1699c21e0bbfSMatthew R. Ochs /**
1700c21e0bbfSMatthew R. Ochs  * start_afu() - initializes and starts the AFU
17011284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1702c21e0bbfSMatthew R. Ochs  */
1703c21e0bbfSMatthew R. Ochs static int start_afu(struct cxlflash_cfg *cfg)
1704c21e0bbfSMatthew R. Ochs {
1705c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
1706fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1707bfc0bab1SUma Krishnan 	struct hwq *hwq;
1708c21e0bbfSMatthew R. Ochs 	int rc = 0;
1709bfc0bab1SUma Krishnan 	int i;
1710c21e0bbfSMatthew R. Ochs 
1711c21e0bbfSMatthew R. Ochs 	init_pcr(cfg);
1712c21e0bbfSMatthew R. Ochs 
1713bfc0bab1SUma Krishnan 	/* Initialize each HWQ */
17143065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1715bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1716bfc0bab1SUma Krishnan 
1717bfc0bab1SUma Krishnan 		/* After an AFU reset, RRQ entries are stale, clear them */
1718bfc0bab1SUma Krishnan 		memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1719bfc0bab1SUma Krishnan 
1720bfc0bab1SUma Krishnan 		/* Initialize RRQ pointers */
1721bfc0bab1SUma Krishnan 		hwq->hrrq_start = &hwq->rrq_entry[0];
1722bfc0bab1SUma Krishnan 		hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1723bfc0bab1SUma Krishnan 		hwq->hrrq_curr = hwq->hrrq_start;
1724bfc0bab1SUma Krishnan 		hwq->toggle = 1;
172566ea9bccSUma Krishnan 
172666ea9bccSUma Krishnan 		/* Initialize spin locks */
1727bfc0bab1SUma Krishnan 		spin_lock_init(&hwq->hrrq_slock);
172866ea9bccSUma Krishnan 		spin_lock_init(&hwq->hsq_slock);
1729c21e0bbfSMatthew R. Ochs 
1730696d0b0cSMatthew R. Ochs 		/* Initialize SQ */
1731696d0b0cSMatthew R. Ochs 		if (afu_is_sq_cmd_mode(afu)) {
1732bfc0bab1SUma Krishnan 			memset(&hwq->sq, 0, sizeof(hwq->sq));
1733bfc0bab1SUma Krishnan 			hwq->hsq_start = &hwq->sq[0];
1734bfc0bab1SUma Krishnan 			hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1735bfc0bab1SUma Krishnan 			hwq->hsq_curr = hwq->hsq_start;
1736696d0b0cSMatthew R. Ochs 
1737bfc0bab1SUma Krishnan 			atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1738696d0b0cSMatthew R. Ochs 		}
1739696d0b0cSMatthew R. Ochs 
1740cba06e6dSMatthew R. Ochs 		/* Initialize IRQ poll */
1741cba06e6dSMatthew R. Ochs 		if (afu_is_irqpoll_enabled(afu))
1742bfc0bab1SUma Krishnan 			irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1743cba06e6dSMatthew R. Ochs 				      cxlflash_irqpoll);
1744cba06e6dSMatthew R. Ochs 
1745bfc0bab1SUma Krishnan 	}
1746bfc0bab1SUma Krishnan 
1747c21e0bbfSMatthew R. Ochs 	rc = init_global(cfg);
1748c21e0bbfSMatthew R. Ochs 
1749fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1750c21e0bbfSMatthew R. Ochs 	return rc;
1751c21e0bbfSMatthew R. Ochs }
1752c21e0bbfSMatthew R. Ochs 
1753c21e0bbfSMatthew R. Ochs /**
17549526f360SManoj N. Kumar  * init_intr() - setup interrupt handlers for the master context
17551284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1756bfc0bab1SUma Krishnan  * @hwq:	Hardware queue to initialize.
1757c21e0bbfSMatthew R. Ochs  *
17581284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
1759c21e0bbfSMatthew R. Ochs  */
17609526f360SManoj N. Kumar static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1761bfc0bab1SUma Krishnan 				 struct hwq *hwq)
1762c21e0bbfSMatthew R. Ochs {
17639526f360SManoj N. Kumar 	struct device *dev = &cfg->dev->dev;
1764bfc0bab1SUma Krishnan 	struct cxl_context *ctx = hwq->ctx;
1765c21e0bbfSMatthew R. Ochs 	int rc = 0;
17669526f360SManoj N. Kumar 	enum undo_level level = UNDO_NOOP;
1767bfc0bab1SUma Krishnan 	bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1768bfc0bab1SUma Krishnan 	int num_irqs = is_primary_hwq ? 3 : 2;
1769c21e0bbfSMatthew R. Ochs 
1770bfc0bab1SUma Krishnan 	rc = cxl_allocate_afu_irqs(ctx, num_irqs);
1771c21e0bbfSMatthew R. Ochs 	if (unlikely(rc)) {
1772fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1773c21e0bbfSMatthew R. Ochs 			__func__, rc);
17749526f360SManoj N. Kumar 		level = UNDO_NOOP;
1775c21e0bbfSMatthew R. Ochs 		goto out;
1776c21e0bbfSMatthew R. Ochs 	}
1777c21e0bbfSMatthew R. Ochs 
1778bfc0bab1SUma Krishnan 	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1779c21e0bbfSMatthew R. Ochs 			     "SISL_MSI_SYNC_ERROR");
1780c21e0bbfSMatthew R. Ochs 	if (unlikely(rc <= 0)) {
1781fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1782c21e0bbfSMatthew R. Ochs 		level = FREE_IRQ;
1783c21e0bbfSMatthew R. Ochs 		goto out;
1784c21e0bbfSMatthew R. Ochs 	}
1785c21e0bbfSMatthew R. Ochs 
1786bfc0bab1SUma Krishnan 	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1787c21e0bbfSMatthew R. Ochs 			     "SISL_MSI_RRQ_UPDATED");
1788c21e0bbfSMatthew R. Ochs 	if (unlikely(rc <= 0)) {
1789fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1790c21e0bbfSMatthew R. Ochs 		level = UNMAP_ONE;
1791c21e0bbfSMatthew R. Ochs 		goto out;
1792c21e0bbfSMatthew R. Ochs 	}
1793c21e0bbfSMatthew R. Ochs 
1794bfc0bab1SUma Krishnan 	/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1795bfc0bab1SUma Krishnan 	if (!is_primary_hwq)
1796bfc0bab1SUma Krishnan 		goto out;
1797bfc0bab1SUma Krishnan 
1798bfc0bab1SUma Krishnan 	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1799c21e0bbfSMatthew R. Ochs 			     "SISL_MSI_ASYNC_ERROR");
1800c21e0bbfSMatthew R. Ochs 	if (unlikely(rc <= 0)) {
1801fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1802c21e0bbfSMatthew R. Ochs 		level = UNMAP_TWO;
1803c21e0bbfSMatthew R. Ochs 		goto out;
1804c21e0bbfSMatthew R. Ochs 	}
18059526f360SManoj N. Kumar out:
18069526f360SManoj N. Kumar 	return level;
18079526f360SManoj N. Kumar }
1808c21e0bbfSMatthew R. Ochs 
18099526f360SManoj N. Kumar /**
18109526f360SManoj N. Kumar  * init_mc() - create and register as the master context
18119526f360SManoj N. Kumar  * @cfg:	Internal structure associated with the host.
1812bfc0bab1SUma Krishnan  * index:	HWQ Index of the master context.
18139526f360SManoj N. Kumar  *
18149526f360SManoj N. Kumar  * Return: 0 on success, -errno on failure
18159526f360SManoj N. Kumar  */
1816bfc0bab1SUma Krishnan static int init_mc(struct cxlflash_cfg *cfg, u32 index)
18179526f360SManoj N. Kumar {
18189526f360SManoj N. Kumar 	struct cxl_context *ctx;
18199526f360SManoj N. Kumar 	struct device *dev = &cfg->dev->dev;
1820bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(cfg->afu, index);
18219526f360SManoj N. Kumar 	int rc = 0;
18229526f360SManoj N. Kumar 	enum undo_level level;
18239526f360SManoj N. Kumar 
1824bfc0bab1SUma Krishnan 	hwq->afu = cfg->afu;
1825bfc0bab1SUma Krishnan 	hwq->index = index;
1826bfc0bab1SUma Krishnan 
1827bfc0bab1SUma Krishnan 	if (index == PRIMARY_HWQ)
18289526f360SManoj N. Kumar 		ctx = cxl_get_context(cfg->dev);
1829bfc0bab1SUma Krishnan 	else
1830bfc0bab1SUma Krishnan 		ctx = cxl_dev_context_init(cfg->dev);
18319526f360SManoj N. Kumar 	if (unlikely(!ctx)) {
18329526f360SManoj N. Kumar 		rc = -ENOMEM;
1833bfc0bab1SUma Krishnan 		goto err1;
18349526f360SManoj N. Kumar 	}
1835bfc0bab1SUma Krishnan 
1836bfc0bab1SUma Krishnan 	WARN_ON(hwq->ctx);
1837bfc0bab1SUma Krishnan 	hwq->ctx = ctx;
18389526f360SManoj N. Kumar 
18399526f360SManoj N. Kumar 	/* Set it up as a master with the CXL */
18409526f360SManoj N. Kumar 	cxl_set_master(ctx);
18419526f360SManoj N. Kumar 
1842bfc0bab1SUma Krishnan 	/* Reset AFU when initializing primary context */
1843bfc0bab1SUma Krishnan 	if (index == PRIMARY_HWQ) {
1844bfc0bab1SUma Krishnan 		rc = cxl_afu_reset(ctx);
18459526f360SManoj N. Kumar 		if (unlikely(rc)) {
1846bfc0bab1SUma Krishnan 			dev_err(dev, "%s: AFU reset failed rc=%d\n",
1847bfc0bab1SUma Krishnan 				      __func__, rc);
1848bfc0bab1SUma Krishnan 			goto err1;
1849bfc0bab1SUma Krishnan 		}
18509526f360SManoj N. Kumar 	}
18519526f360SManoj N. Kumar 
1852bfc0bab1SUma Krishnan 	level = init_intr(cfg, hwq);
18539526f360SManoj N. Kumar 	if (unlikely(level)) {
1854fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
1855bfc0bab1SUma Krishnan 		goto err2;
18569526f360SManoj N. Kumar 	}
1857c21e0bbfSMatthew R. Ochs 
1858c21e0bbfSMatthew R. Ochs 	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
1859c21e0bbfSMatthew R. Ochs 	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1860c21e0bbfSMatthew R. Ochs 	 * element (pe) that is embedded in the context (ctx)
1861c21e0bbfSMatthew R. Ochs 	 */
1862bfc0bab1SUma Krishnan 	rc = start_context(cfg, index);
1863c21e0bbfSMatthew R. Ochs 	if (unlikely(rc)) {
1864c21e0bbfSMatthew R. Ochs 		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1865c21e0bbfSMatthew R. Ochs 		level = UNMAP_THREE;
1866bfc0bab1SUma Krishnan 		goto err2;
1867c21e0bbfSMatthew R. Ochs 	}
1868bfc0bab1SUma Krishnan 
1869bfc0bab1SUma Krishnan out:
1870fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1871c21e0bbfSMatthew R. Ochs 	return rc;
1872bfc0bab1SUma Krishnan err2:
1873bfc0bab1SUma Krishnan 	term_intr(cfg, level, index);
1874bfc0bab1SUma Krishnan 	if (index != PRIMARY_HWQ)
1875bfc0bab1SUma Krishnan 		cxl_release_context(ctx);
1876bfc0bab1SUma Krishnan err1:
1877bfc0bab1SUma Krishnan 	hwq->ctx = NULL;
1878bfc0bab1SUma Krishnan 	goto out;
1879c21e0bbfSMatthew R. Ochs }
1880c21e0bbfSMatthew R. Ochs 
1881c21e0bbfSMatthew R. Ochs /**
188256518072SMatthew R. Ochs  * get_num_afu_ports() - determines and configures the number of AFU ports
188356518072SMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
188456518072SMatthew R. Ochs  *
188556518072SMatthew R. Ochs  * This routine determines the number of AFU ports by converting the global
188656518072SMatthew R. Ochs  * port selection mask. The converted value is only valid following an AFU
188756518072SMatthew R. Ochs  * reset (explicit or power-on). This routine must be invoked shortly after
188856518072SMatthew R. Ochs  * mapping as other routines are dependent on the number of ports during the
188956518072SMatthew R. Ochs  * initialization sequence.
189056518072SMatthew R. Ochs  *
189156518072SMatthew R. Ochs  * To support legacy AFUs that might not have reflected an initial global
189256518072SMatthew R. Ochs  * port mask (value read is 0), default to the number of ports originally
189356518072SMatthew R. Ochs  * supported by the cxlflash driver (2) before hardware with other port
189456518072SMatthew R. Ochs  * offerings was introduced.
189556518072SMatthew R. Ochs  */
189656518072SMatthew R. Ochs static void get_num_afu_ports(struct cxlflash_cfg *cfg)
189756518072SMatthew R. Ochs {
189856518072SMatthew R. Ochs 	struct afu *afu = cfg->afu;
189956518072SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
190056518072SMatthew R. Ochs 	u64 port_mask;
190156518072SMatthew R. Ochs 	int num_fc_ports = LEGACY_FC_PORTS;
190256518072SMatthew R. Ochs 
190356518072SMatthew R. Ochs 	port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
190456518072SMatthew R. Ochs 	if (port_mask != 0ULL)
190556518072SMatthew R. Ochs 		num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
190656518072SMatthew R. Ochs 
190756518072SMatthew R. Ochs 	dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
190856518072SMatthew R. Ochs 		__func__, port_mask, num_fc_ports);
190956518072SMatthew R. Ochs 
191056518072SMatthew R. Ochs 	cfg->num_fc_ports = num_fc_ports;
191156518072SMatthew R. Ochs 	cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
191256518072SMatthew R. Ochs }
191356518072SMatthew R. Ochs 
191456518072SMatthew R. Ochs /**
1915c21e0bbfSMatthew R. Ochs  * init_afu() - setup as master context and start AFU
19161284fb0cSMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
1917c21e0bbfSMatthew R. Ochs  *
1918c21e0bbfSMatthew R. Ochs  * This routine is a higher level of control for configuring the
1919c21e0bbfSMatthew R. Ochs  * AFU on probe and reset paths.
1920c21e0bbfSMatthew R. Ochs  *
19211284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
1922c21e0bbfSMatthew R. Ochs  */
1923c21e0bbfSMatthew R. Ochs static int init_afu(struct cxlflash_cfg *cfg)
1924c21e0bbfSMatthew R. Ochs {
1925c21e0bbfSMatthew R. Ochs 	u64 reg;
1926c21e0bbfSMatthew R. Ochs 	int rc = 0;
1927c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
1928c21e0bbfSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
1929bfc0bab1SUma Krishnan 	struct hwq *hwq;
1930bfc0bab1SUma Krishnan 	int i;
1931c21e0bbfSMatthew R. Ochs 
19325cdac81aSMatthew R. Ochs 	cxl_perst_reloads_same_image(cfg->cxl_afu, true);
19335cdac81aSMatthew R. Ochs 
19343065267aSMatthew R. Ochs 	afu->num_hwqs = afu->desired_hwqs;
19353065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1936bfc0bab1SUma Krishnan 		rc = init_mc(cfg, i);
1937c21e0bbfSMatthew R. Ochs 		if (rc) {
1938bfc0bab1SUma Krishnan 			dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
1939bfc0bab1SUma Krishnan 				__func__, rc, i);
1940bfc0bab1SUma Krishnan 			goto err1;
1941bfc0bab1SUma Krishnan 		}
1942c21e0bbfSMatthew R. Ochs 	}
1943c21e0bbfSMatthew R. Ochs 
1944bfc0bab1SUma Krishnan 	/* Map the entire MMIO space of the AFU using the first context */
1945bfc0bab1SUma Krishnan 	hwq = get_hwq(afu, PRIMARY_HWQ);
1946bfc0bab1SUma Krishnan 	afu->afu_map = cxl_psa_map(hwq->ctx);
1947c21e0bbfSMatthew R. Ochs 	if (!afu->afu_map) {
1948fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
1949ee3491baSMatthew R. Ochs 		rc = -ENOMEM;
1950c21e0bbfSMatthew R. Ochs 		goto err1;
1951c21e0bbfSMatthew R. Ochs 	}
1952c21e0bbfSMatthew R. Ochs 
1953e5ce067bSMatthew R. Ochs 	/* No byte reverse on reading afu_version or string will be backwards */
1954e5ce067bSMatthew R. Ochs 	reg = readq(&afu->afu_map->global.regs.afu_version);
1955e5ce067bSMatthew R. Ochs 	memcpy(afu->version, &reg, sizeof(reg));
1956c21e0bbfSMatthew R. Ochs 	afu->interface_version =
1957c21e0bbfSMatthew R. Ochs 	    readq_be(&afu->afu_map->global.regs.interface_version);
1958e5ce067bSMatthew R. Ochs 	if ((afu->interface_version + 1) == 0) {
1959fb67d44dSMatthew R. Ochs 		dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
1960fb67d44dSMatthew R. Ochs 			"interface version %016llx\n", afu->version,
1961e5ce067bSMatthew R. Ochs 		       afu->interface_version);
1962e5ce067bSMatthew R. Ochs 		rc = -EINVAL;
19630df5bef7SUma Krishnan 		goto err1;
1964ee3491baSMatthew R. Ochs 	}
1965ee3491baSMatthew R. Ochs 
1966696d0b0cSMatthew R. Ochs 	if (afu_is_sq_cmd_mode(afu)) {
1967696d0b0cSMatthew R. Ochs 		afu->send_cmd = send_cmd_sq;
1968696d0b0cSMatthew R. Ochs 		afu->context_reset = context_reset_sq;
1969696d0b0cSMatthew R. Ochs 	} else {
197048b4be36SMatthew R. Ochs 		afu->send_cmd = send_cmd_ioarrin;
197148b4be36SMatthew R. Ochs 		afu->context_reset = context_reset_ioarrin;
1972696d0b0cSMatthew R. Ochs 	}
197348b4be36SMatthew R. Ochs 
1974fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
1975ee3491baSMatthew R. Ochs 		afu->version, afu->interface_version);
1976c21e0bbfSMatthew R. Ochs 
197756518072SMatthew R. Ochs 	get_num_afu_ports(cfg);
197856518072SMatthew R. Ochs 
1979c21e0bbfSMatthew R. Ochs 	rc = start_afu(cfg);
1980c21e0bbfSMatthew R. Ochs 	if (rc) {
1981fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
19820df5bef7SUma Krishnan 		goto err1;
1983c21e0bbfSMatthew R. Ochs 	}
1984c21e0bbfSMatthew R. Ochs 
1985c21e0bbfSMatthew R. Ochs 	afu_err_intr_init(cfg->afu);
19863065267aSMatthew R. Ochs 	for (i = 0; i < afu->num_hwqs; i++) {
1987bfc0bab1SUma Krishnan 		hwq = get_hwq(afu, i);
1988bfc0bab1SUma Krishnan 
1989bfc0bab1SUma Krishnan 		hwq->room = readq_be(&hwq->host_map->cmd_room);
1990bfc0bab1SUma Krishnan 	}
1991c21e0bbfSMatthew R. Ochs 
19922cb79266SMatthew R. Ochs 	/* Restore the LUN mappings */
19932cb79266SMatthew R. Ochs 	cxlflash_restore_luntable(cfg);
1994ee3491baSMatthew R. Ochs out:
1995fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1996c21e0bbfSMatthew R. Ochs 	return rc;
1997ee3491baSMatthew R. Ochs 
1998ee3491baSMatthew R. Ochs err1:
19993065267aSMatthew R. Ochs 	for (i = afu->num_hwqs - 1; i >= 0; i--) {
2000bfc0bab1SUma Krishnan 		term_intr(cfg, UNMAP_THREE, i);
2001bfc0bab1SUma Krishnan 		term_mc(cfg, i);
2002bfc0bab1SUma Krishnan 	}
2003ee3491baSMatthew R. Ochs 	goto out;
2004c21e0bbfSMatthew R. Ochs }
2005c21e0bbfSMatthew R. Ochs 
2006c21e0bbfSMatthew R. Ochs /**
2007c21e0bbfSMatthew R. Ochs  * cxlflash_afu_sync() - builds and sends an AFU sync command
2008c21e0bbfSMatthew R. Ochs  * @afu:	AFU associated with the host.
2009c21e0bbfSMatthew R. Ochs  * @ctx_hndl_u:	Identifies context requesting sync.
2010c21e0bbfSMatthew R. Ochs  * @res_hndl_u:	Identifies resource requesting sync.
2011c21e0bbfSMatthew R. Ochs  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
2012c21e0bbfSMatthew R. Ochs  *
2013c21e0bbfSMatthew R. Ochs  * The AFU can only take 1 sync command at a time. This routine enforces this
2014f15fbf8dSMatthew R. Ochs  * limitation by using a mutex to provide exclusive access to the AFU during
2015c21e0bbfSMatthew R. Ochs  * the sync. This design point requires calling threads to not be on interrupt
2016c21e0bbfSMatthew R. Ochs  * context due to the possibility of sleeping during concurrent sync operations.
2017c21e0bbfSMatthew R. Ochs  *
20185cdac81aSMatthew R. Ochs  * AFU sync operations are only necessary and allowed when the device is
20195cdac81aSMatthew R. Ochs  * operating normally. When not operating normally, sync requests can occur as
20205cdac81aSMatthew R. Ochs  * part of cleaning up resources associated with an adapter prior to removal.
20215cdac81aSMatthew R. Ochs  * In this scenario, these requests are simply ignored (safe due to the AFU
20225cdac81aSMatthew R. Ochs  * going away).
20235cdac81aSMatthew R. Ochs  *
2024c21e0bbfSMatthew R. Ochs  * Return:
2025c21e0bbfSMatthew R. Ochs  *	0 on success
2026c21e0bbfSMatthew R. Ochs  *	-1 on failure
2027c21e0bbfSMatthew R. Ochs  */
2028c21e0bbfSMatthew R. Ochs int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
2029c21e0bbfSMatthew R. Ochs 		      res_hndl_t res_hndl_u, u8 mode)
2030c21e0bbfSMatthew R. Ochs {
20315cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = afu->parent;
20324392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
2033c21e0bbfSMatthew R. Ochs 	struct afu_cmd *cmd = NULL;
2034bfc0bab1SUma Krishnan 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2035350bb478SMatthew R. Ochs 	char *buf = NULL;
2036c21e0bbfSMatthew R. Ochs 	int rc = 0;
2037c21e0bbfSMatthew R. Ochs 	static DEFINE_MUTEX(sync_active);
2038c21e0bbfSMatthew R. Ochs 
20395cdac81aSMatthew R. Ochs 	if (cfg->state != STATE_NORMAL) {
2040fb67d44dSMatthew R. Ochs 		dev_dbg(dev, "%s: Sync not required state=%u\n",
2041fb67d44dSMatthew R. Ochs 			__func__, cfg->state);
20425cdac81aSMatthew R. Ochs 		return 0;
20435cdac81aSMatthew R. Ochs 	}
20445cdac81aSMatthew R. Ochs 
2045c21e0bbfSMatthew R. Ochs 	mutex_lock(&sync_active);
2046de01283bSMatthew R. Ochs 	atomic_inc(&afu->cmds_active);
2047350bb478SMatthew R. Ochs 	buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2048350bb478SMatthew R. Ochs 	if (unlikely(!buf)) {
2049350bb478SMatthew R. Ochs 		dev_err(dev, "%s: no memory for command\n", __func__);
2050c21e0bbfSMatthew R. Ochs 		rc = -1;
2051c21e0bbfSMatthew R. Ochs 		goto out;
2052c21e0bbfSMatthew R. Ochs 	}
2053c21e0bbfSMatthew R. Ochs 
2054350bb478SMatthew R. Ochs 	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2055350bb478SMatthew R. Ochs 	init_completion(&cmd->cevent);
2056350bb478SMatthew R. Ochs 	cmd->parent = afu;
2057bfc0bab1SUma Krishnan 	cmd->hwq_index = hwq->index;
2058350bb478SMatthew R. Ochs 
2059fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
2060c21e0bbfSMatthew R. Ochs 
2061c21e0bbfSMatthew R. Ochs 	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2062bfc0bab1SUma Krishnan 	cmd->rcb.ctx_id = hwq->ctx_hndl;
2063350bb478SMatthew R. Ochs 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
2064c21e0bbfSMatthew R. Ochs 	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2065c21e0bbfSMatthew R. Ochs 
2066c21e0bbfSMatthew R. Ochs 	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
2067c21e0bbfSMatthew R. Ochs 	cmd->rcb.cdb[1] = mode;
2068c21e0bbfSMatthew R. Ochs 
2069c21e0bbfSMatthew R. Ochs 	/* The cdb is aligned, no unaligned accessors required */
20701786f4a0SMatthew R. Ochs 	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
20711786f4a0SMatthew R. Ochs 	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
2072c21e0bbfSMatthew R. Ochs 
207348b4be36SMatthew R. Ochs 	rc = afu->send_cmd(afu, cmd);
2074c21e0bbfSMatthew R. Ochs 	if (unlikely(rc))
2075c21e0bbfSMatthew R. Ochs 		goto out;
2076c21e0bbfSMatthew R. Ochs 
20779ba848acSMatthew R. Ochs 	rc = wait_resp(afu, cmd);
20789ba848acSMatthew R. Ochs 	if (unlikely(rc))
2079c21e0bbfSMatthew R. Ochs 		rc = -1;
2080c21e0bbfSMatthew R. Ochs out:
2081de01283bSMatthew R. Ochs 	atomic_dec(&afu->cmds_active);
2082c21e0bbfSMatthew R. Ochs 	mutex_unlock(&sync_active);
2083350bb478SMatthew R. Ochs 	kfree(buf);
2084fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2085c21e0bbfSMatthew R. Ochs 	return rc;
2086c21e0bbfSMatthew R. Ochs }
2087c21e0bbfSMatthew R. Ochs 
2088c21e0bbfSMatthew R. Ochs /**
208915305514SMatthew R. Ochs  * afu_reset() - resets the AFU
209015305514SMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
2091c21e0bbfSMatthew R. Ochs  *
20921284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
2093c21e0bbfSMatthew R. Ochs  */
209415305514SMatthew R. Ochs static int afu_reset(struct cxlflash_cfg *cfg)
2095c21e0bbfSMatthew R. Ochs {
2096fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
2097c21e0bbfSMatthew R. Ochs 	int rc = 0;
2098fb67d44dSMatthew R. Ochs 
2099c21e0bbfSMatthew R. Ochs 	/* Stop the context before the reset. Since the context is
2100c21e0bbfSMatthew R. Ochs 	 * no longer available restart it after the reset is complete
2101c21e0bbfSMatthew R. Ochs 	 */
2102c21e0bbfSMatthew R. Ochs 	term_afu(cfg);
2103c21e0bbfSMatthew R. Ochs 
2104c21e0bbfSMatthew R. Ochs 	rc = init_afu(cfg);
2105c21e0bbfSMatthew R. Ochs 
2106fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2107c21e0bbfSMatthew R. Ochs 	return rc;
2108c21e0bbfSMatthew R. Ochs }
2109c21e0bbfSMatthew R. Ochs 
2110c21e0bbfSMatthew R. Ochs /**
2111f411396dSManoj N. Kumar  * drain_ioctls() - wait until all currently executing ioctls have completed
2112f411396dSManoj N. Kumar  * @cfg:	Internal structure associated with the host.
2113f411396dSManoj N. Kumar  *
2114f411396dSManoj N. Kumar  * Obtain write access to read/write semaphore that wraps ioctl
2115f411396dSManoj N. Kumar  * handling to 'drain' ioctls currently executing.
2116f411396dSManoj N. Kumar  */
2117f411396dSManoj N. Kumar static void drain_ioctls(struct cxlflash_cfg *cfg)
2118f411396dSManoj N. Kumar {
2119f411396dSManoj N. Kumar 	down_write(&cfg->ioctl_rwsem);
2120f411396dSManoj N. Kumar 	up_write(&cfg->ioctl_rwsem);
2121f411396dSManoj N. Kumar }
2122f411396dSManoj N. Kumar 
2123f411396dSManoj N. Kumar /**
212415305514SMatthew R. Ochs  * cxlflash_eh_device_reset_handler() - reset a single LUN
212515305514SMatthew R. Ochs  * @scp:	SCSI command to send.
212615305514SMatthew R. Ochs  *
212715305514SMatthew R. Ochs  * Return:
212815305514SMatthew R. Ochs  *	SUCCESS as defined in scsi/scsi.h
212915305514SMatthew R. Ochs  *	FAILED as defined in scsi/scsi.h
213015305514SMatthew R. Ochs  */
213115305514SMatthew R. Ochs static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
213215305514SMatthew R. Ochs {
213315305514SMatthew R. Ochs 	int rc = SUCCESS;
213415305514SMatthew R. Ochs 	struct Scsi_Host *host = scp->device->host;
2135fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(host);
2136fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
213715305514SMatthew R. Ochs 	struct afu *afu = cfg->afu;
213815305514SMatthew R. Ochs 	int rcr = 0;
213915305514SMatthew R. Ochs 
2140fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2141fb67d44dSMatthew R. Ochs 		"cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2142fb67d44dSMatthew R. Ochs 		scp->device->channel, scp->device->id, scp->device->lun,
214315305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
214415305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
214515305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
214615305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
214715305514SMatthew R. Ochs 
2148ed486daaSMatthew R. Ochs retry:
214915305514SMatthew R. Ochs 	switch (cfg->state) {
215015305514SMatthew R. Ochs 	case STATE_NORMAL:
215115305514SMatthew R. Ochs 		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
215215305514SMatthew R. Ochs 		if (unlikely(rcr))
215315305514SMatthew R. Ochs 			rc = FAILED;
215415305514SMatthew R. Ochs 		break;
215515305514SMatthew R. Ochs 	case STATE_RESET:
215615305514SMatthew R. Ochs 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2157ed486daaSMatthew R. Ochs 		goto retry;
215815305514SMatthew R. Ochs 	default:
215915305514SMatthew R. Ochs 		rc = FAILED;
216015305514SMatthew R. Ochs 		break;
216115305514SMatthew R. Ochs 	}
216215305514SMatthew R. Ochs 
2163fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
216415305514SMatthew R. Ochs 	return rc;
216515305514SMatthew R. Ochs }
216615305514SMatthew R. Ochs 
216715305514SMatthew R. Ochs /**
216815305514SMatthew R. Ochs  * cxlflash_eh_host_reset_handler() - reset the host adapter
216915305514SMatthew R. Ochs  * @scp:	SCSI command from stack identifying host.
217015305514SMatthew R. Ochs  *
21711d3324c3SMatthew R. Ochs  * Following a reset, the state is evaluated again in case an EEH occurred
21721d3324c3SMatthew R. Ochs  * during the reset. In such a scenario, the host reset will either yield
21731d3324c3SMatthew R. Ochs  * until the EEH recovery is complete or return success or failure based
21741d3324c3SMatthew R. Ochs  * upon the current device state.
21751d3324c3SMatthew R. Ochs  *
217615305514SMatthew R. Ochs  * Return:
217715305514SMatthew R. Ochs  *	SUCCESS as defined in scsi/scsi.h
217815305514SMatthew R. Ochs  *	FAILED as defined in scsi/scsi.h
217915305514SMatthew R. Ochs  */
218015305514SMatthew R. Ochs static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
218115305514SMatthew R. Ochs {
218215305514SMatthew R. Ochs 	int rc = SUCCESS;
218315305514SMatthew R. Ochs 	int rcr = 0;
218415305514SMatthew R. Ochs 	struct Scsi_Host *host = scp->device->host;
2185fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(host);
2186fb67d44dSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
218715305514SMatthew R. Ochs 
2188fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2189fb67d44dSMatthew R. Ochs 		"cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2190fb67d44dSMatthew R. Ochs 		scp->device->channel, scp->device->id, scp->device->lun,
219115305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
219215305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
219315305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
219415305514SMatthew R. Ochs 		get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
219515305514SMatthew R. Ochs 
219615305514SMatthew R. Ochs 	switch (cfg->state) {
219715305514SMatthew R. Ochs 	case STATE_NORMAL:
219815305514SMatthew R. Ochs 		cfg->state = STATE_RESET;
2199f411396dSManoj N. Kumar 		drain_ioctls(cfg);
220015305514SMatthew R. Ochs 		cxlflash_mark_contexts_error(cfg);
220115305514SMatthew R. Ochs 		rcr = afu_reset(cfg);
220215305514SMatthew R. Ochs 		if (rcr) {
220315305514SMatthew R. Ochs 			rc = FAILED;
220415305514SMatthew R. Ochs 			cfg->state = STATE_FAILTERM;
220515305514SMatthew R. Ochs 		} else
220615305514SMatthew R. Ochs 			cfg->state = STATE_NORMAL;
220715305514SMatthew R. Ochs 		wake_up_all(&cfg->reset_waitq);
22081d3324c3SMatthew R. Ochs 		ssleep(1);
22091d3324c3SMatthew R. Ochs 		/* fall through */
221015305514SMatthew R. Ochs 	case STATE_RESET:
221115305514SMatthew R. Ochs 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
221215305514SMatthew R. Ochs 		if (cfg->state == STATE_NORMAL)
221315305514SMatthew R. Ochs 			break;
221415305514SMatthew R. Ochs 		/* fall through */
221515305514SMatthew R. Ochs 	default:
221615305514SMatthew R. Ochs 		rc = FAILED;
221715305514SMatthew R. Ochs 		break;
221815305514SMatthew R. Ochs 	}
221915305514SMatthew R. Ochs 
2220fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
222115305514SMatthew R. Ochs 	return rc;
222215305514SMatthew R. Ochs }
222315305514SMatthew R. Ochs 
222415305514SMatthew R. Ochs /**
222515305514SMatthew R. Ochs  * cxlflash_change_queue_depth() - change the queue depth for the device
222615305514SMatthew R. Ochs  * @sdev:	SCSI device destined for queue depth change.
222715305514SMatthew R. Ochs  * @qdepth:	Requested queue depth value to set.
222815305514SMatthew R. Ochs  *
222915305514SMatthew R. Ochs  * The requested queue depth is capped to the maximum supported value.
223015305514SMatthew R. Ochs  *
223115305514SMatthew R. Ochs  * Return: The actual queue depth set.
223215305514SMatthew R. Ochs  */
223315305514SMatthew R. Ochs static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
223415305514SMatthew R. Ochs {
223515305514SMatthew R. Ochs 
223615305514SMatthew R. Ochs 	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
223715305514SMatthew R. Ochs 		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
223815305514SMatthew R. Ochs 
223915305514SMatthew R. Ochs 	scsi_change_queue_depth(sdev, qdepth);
224015305514SMatthew R. Ochs 	return sdev->queue_depth;
224115305514SMatthew R. Ochs }
224215305514SMatthew R. Ochs 
224315305514SMatthew R. Ochs /**
224415305514SMatthew R. Ochs  * cxlflash_show_port_status() - queries and presents the current port status
2245e0f01a21SMatthew R. Ochs  * @port:	Desired port for status reporting.
22463b225cd3SMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
224715305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
224815305514SMatthew R. Ochs  *
224978ae028eSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf or -EINVAL.
225015305514SMatthew R. Ochs  */
22513b225cd3SMatthew R. Ochs static ssize_t cxlflash_show_port_status(u32 port,
22523b225cd3SMatthew R. Ochs 					 struct cxlflash_cfg *cfg,
22533b225cd3SMatthew R. Ochs 					 char *buf)
225415305514SMatthew R. Ochs {
225578ae028eSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
225615305514SMatthew R. Ochs 	char *disp_status;
225715305514SMatthew R. Ochs 	u64 status;
22580aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
225915305514SMatthew R. Ochs 
226078ae028eSMatthew R. Ochs 	WARN_ON(port >= MAX_FC_PORTS);
226178ae028eSMatthew R. Ochs 
226278ae028eSMatthew R. Ochs 	if (port >= cfg->num_fc_ports) {
226378ae028eSMatthew R. Ochs 		dev_info(dev, "%s: Port %d not supported on this card.\n",
226478ae028eSMatthew R. Ochs 			__func__, port);
226578ae028eSMatthew R. Ochs 		return -EINVAL;
226678ae028eSMatthew R. Ochs 	}
226715305514SMatthew R. Ochs 
22680aa14887SMatthew R. Ochs 	fc_port_regs = get_fc_port_regs(cfg, port);
22690aa14887SMatthew R. Ochs 	status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2270e0f01a21SMatthew R. Ochs 	status &= FC_MTIP_STATUS_MASK;
227115305514SMatthew R. Ochs 
227215305514SMatthew R. Ochs 	if (status == FC_MTIP_STATUS_ONLINE)
227315305514SMatthew R. Ochs 		disp_status = "online";
227415305514SMatthew R. Ochs 	else if (status == FC_MTIP_STATUS_OFFLINE)
227515305514SMatthew R. Ochs 		disp_status = "offline";
227615305514SMatthew R. Ochs 	else
227715305514SMatthew R. Ochs 		disp_status = "unknown";
227815305514SMatthew R. Ochs 
2279e0f01a21SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
228015305514SMatthew R. Ochs }
228115305514SMatthew R. Ochs 
228215305514SMatthew R. Ochs /**
2283e0f01a21SMatthew R. Ochs  * port0_show() - queries and presents the current status of port 0
2284e0f01a21SMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
2285e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the port.
2286e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2287e0f01a21SMatthew R. Ochs  *
2288e0f01a21SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2289e0f01a21SMatthew R. Ochs  */
2290e0f01a21SMatthew R. Ochs static ssize_t port0_show(struct device *dev,
2291e0f01a21SMatthew R. Ochs 			  struct device_attribute *attr,
2292e0f01a21SMatthew R. Ochs 			  char *buf)
2293e0f01a21SMatthew R. Ochs {
2294fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2295e0f01a21SMatthew R. Ochs 
22963b225cd3SMatthew R. Ochs 	return cxlflash_show_port_status(0, cfg, buf);
2297e0f01a21SMatthew R. Ochs }
2298e0f01a21SMatthew R. Ochs 
2299e0f01a21SMatthew R. Ochs /**
2300e0f01a21SMatthew R. Ochs  * port1_show() - queries and presents the current status of port 1
2301e0f01a21SMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
2302e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the port.
2303e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2304e0f01a21SMatthew R. Ochs  *
2305e0f01a21SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2306e0f01a21SMatthew R. Ochs  */
2307e0f01a21SMatthew R. Ochs static ssize_t port1_show(struct device *dev,
2308e0f01a21SMatthew R. Ochs 			  struct device_attribute *attr,
2309e0f01a21SMatthew R. Ochs 			  char *buf)
2310e0f01a21SMatthew R. Ochs {
2311fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2312e0f01a21SMatthew R. Ochs 
23133b225cd3SMatthew R. Ochs 	return cxlflash_show_port_status(1, cfg, buf);
2314e0f01a21SMatthew R. Ochs }
2315e0f01a21SMatthew R. Ochs 
2316e0f01a21SMatthew R. Ochs /**
23171cd7fabcSMatthew R. Ochs  * port2_show() - queries and presents the current status of port 2
23181cd7fabcSMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
23191cd7fabcSMatthew R. Ochs  * @attr:	Device attribute representing the port.
23201cd7fabcSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
23211cd7fabcSMatthew R. Ochs  *
23221cd7fabcSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
23231cd7fabcSMatthew R. Ochs  */
23241cd7fabcSMatthew R. Ochs static ssize_t port2_show(struct device *dev,
23251cd7fabcSMatthew R. Ochs 			  struct device_attribute *attr,
23261cd7fabcSMatthew R. Ochs 			  char *buf)
23271cd7fabcSMatthew R. Ochs {
23281cd7fabcSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
23291cd7fabcSMatthew R. Ochs 
23301cd7fabcSMatthew R. Ochs 	return cxlflash_show_port_status(2, cfg, buf);
23311cd7fabcSMatthew R. Ochs }
23321cd7fabcSMatthew R. Ochs 
23331cd7fabcSMatthew R. Ochs /**
23341cd7fabcSMatthew R. Ochs  * port3_show() - queries and presents the current status of port 3
23351cd7fabcSMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
23361cd7fabcSMatthew R. Ochs  * @attr:	Device attribute representing the port.
23371cd7fabcSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
23381cd7fabcSMatthew R. Ochs  *
23391cd7fabcSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
23401cd7fabcSMatthew R. Ochs  */
23411cd7fabcSMatthew R. Ochs static ssize_t port3_show(struct device *dev,
23421cd7fabcSMatthew R. Ochs 			  struct device_attribute *attr,
23431cd7fabcSMatthew R. Ochs 			  char *buf)
23441cd7fabcSMatthew R. Ochs {
23451cd7fabcSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
23461cd7fabcSMatthew R. Ochs 
23471cd7fabcSMatthew R. Ochs 	return cxlflash_show_port_status(3, cfg, buf);
23481cd7fabcSMatthew R. Ochs }
23491cd7fabcSMatthew R. Ochs 
23501cd7fabcSMatthew R. Ochs /**
2351e0f01a21SMatthew R. Ochs  * lun_mode_show() - presents the current LUN mode of the host
235215305514SMatthew R. Ochs  * @dev:	Generic device associated with the host.
2353e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the LUN mode.
235415305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
235515305514SMatthew R. Ochs  *
235615305514SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
235715305514SMatthew R. Ochs  */
2358e0f01a21SMatthew R. Ochs static ssize_t lun_mode_show(struct device *dev,
235915305514SMatthew R. Ochs 			     struct device_attribute *attr, char *buf)
236015305514SMatthew R. Ochs {
2361fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
236215305514SMatthew R. Ochs 	struct afu *afu = cfg->afu;
236315305514SMatthew R. Ochs 
2364e0f01a21SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
236515305514SMatthew R. Ochs }
236615305514SMatthew R. Ochs 
236715305514SMatthew R. Ochs /**
2368e0f01a21SMatthew R. Ochs  * lun_mode_store() - sets the LUN mode of the host
236915305514SMatthew R. Ochs  * @dev:	Generic device associated with the host.
2370e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the LUN mode.
237115305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
237215305514SMatthew R. Ochs  * @count:	Length of data resizing in @buf.
237315305514SMatthew R. Ochs  *
237415305514SMatthew R. Ochs  * The CXL Flash AFU supports a dummy LUN mode where the external
237515305514SMatthew R. Ochs  * links and storage are not required. Space on the FPGA is used
237615305514SMatthew R. Ochs  * to create 1 or 2 small LUNs which are presented to the system
237715305514SMatthew R. Ochs  * as if they were a normal storage device. This feature is useful
237815305514SMatthew R. Ochs  * during development and also provides manufacturing with a way
237915305514SMatthew R. Ochs  * to test the AFU without an actual device.
238015305514SMatthew R. Ochs  *
238115305514SMatthew R. Ochs  * 0 = external LUN[s] (default)
238215305514SMatthew R. Ochs  * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
238315305514SMatthew R. Ochs  * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
238415305514SMatthew R. Ochs  * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
238515305514SMatthew R. Ochs  * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
238615305514SMatthew R. Ochs  *
238715305514SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
238815305514SMatthew R. Ochs  */
2389e0f01a21SMatthew R. Ochs static ssize_t lun_mode_store(struct device *dev,
239015305514SMatthew R. Ochs 			      struct device_attribute *attr,
239115305514SMatthew R. Ochs 			      const char *buf, size_t count)
239215305514SMatthew R. Ochs {
239315305514SMatthew R. Ochs 	struct Scsi_Host *shost = class_to_shost(dev);
2394fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(shost);
239515305514SMatthew R. Ochs 	struct afu *afu = cfg->afu;
239615305514SMatthew R. Ochs 	int rc;
239715305514SMatthew R. Ochs 	u32 lun_mode;
239815305514SMatthew R. Ochs 
239915305514SMatthew R. Ochs 	rc = kstrtouint(buf, 10, &lun_mode);
240015305514SMatthew R. Ochs 	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
240115305514SMatthew R. Ochs 		afu->internal_lun = lun_mode;
2402603ecce9SManoj N. Kumar 
2403603ecce9SManoj N. Kumar 		/*
2404603ecce9SManoj N. Kumar 		 * When configured for internal LUN, there is only one channel,
240578ae028eSMatthew R. Ochs 		 * channel number 0, else there will be one less than the number
240678ae028eSMatthew R. Ochs 		 * of fc ports for this card.
2407603ecce9SManoj N. Kumar 		 */
2408603ecce9SManoj N. Kumar 		if (afu->internal_lun)
2409603ecce9SManoj N. Kumar 			shost->max_channel = 0;
2410603ecce9SManoj N. Kumar 		else
24118fa4f177SMatthew R. Ochs 			shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2412603ecce9SManoj N. Kumar 
241315305514SMatthew R. Ochs 		afu_reset(cfg);
241415305514SMatthew R. Ochs 		scsi_scan_host(cfg->host);
241515305514SMatthew R. Ochs 	}
241615305514SMatthew R. Ochs 
241715305514SMatthew R. Ochs 	return count;
241815305514SMatthew R. Ochs }
241915305514SMatthew R. Ochs 
242015305514SMatthew R. Ochs /**
2421e0f01a21SMatthew R. Ochs  * ioctl_version_show() - presents the current ioctl version of the host
242215305514SMatthew R. Ochs  * @dev:	Generic device associated with the host.
242315305514SMatthew R. Ochs  * @attr:	Device attribute representing the ioctl version.
242415305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
242515305514SMatthew R. Ochs  *
242615305514SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
242715305514SMatthew R. Ochs  */
2428e0f01a21SMatthew R. Ochs static ssize_t ioctl_version_show(struct device *dev,
2429e0f01a21SMatthew R. Ochs 				  struct device_attribute *attr, char *buf)
243015305514SMatthew R. Ochs {
243115305514SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
243215305514SMatthew R. Ochs }
243315305514SMatthew R. Ochs 
243415305514SMatthew R. Ochs /**
2435e0f01a21SMatthew R. Ochs  * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2436e0f01a21SMatthew R. Ochs  * @port:	Desired port for status reporting.
24373b225cd3SMatthew R. Ochs  * @cfg:	Internal structure associated with the host.
2438e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2439e0f01a21SMatthew R. Ochs  *
244078ae028eSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf or -EINVAL.
2441e0f01a21SMatthew R. Ochs  */
2442e0f01a21SMatthew R. Ochs static ssize_t cxlflash_show_port_lun_table(u32 port,
24433b225cd3SMatthew R. Ochs 					    struct cxlflash_cfg *cfg,
2444e0f01a21SMatthew R. Ochs 					    char *buf)
2445e0f01a21SMatthew R. Ochs {
244678ae028eSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
24470aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_luns;
2448e0f01a21SMatthew R. Ochs 	int i;
2449e0f01a21SMatthew R. Ochs 	ssize_t bytes = 0;
2450e0f01a21SMatthew R. Ochs 
245178ae028eSMatthew R. Ochs 	WARN_ON(port >= MAX_FC_PORTS);
245278ae028eSMatthew R. Ochs 
245378ae028eSMatthew R. Ochs 	if (port >= cfg->num_fc_ports) {
245478ae028eSMatthew R. Ochs 		dev_info(dev, "%s: Port %d not supported on this card.\n",
245578ae028eSMatthew R. Ochs 			__func__, port);
245678ae028eSMatthew R. Ochs 		return -EINVAL;
245778ae028eSMatthew R. Ochs 	}
2458e0f01a21SMatthew R. Ochs 
24590aa14887SMatthew R. Ochs 	fc_port_luns = get_fc_port_luns(cfg, port);
2460e0f01a21SMatthew R. Ochs 
2461e0f01a21SMatthew R. Ochs 	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2462e0f01a21SMatthew R. Ochs 		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
24630aa14887SMatthew R. Ochs 				   "%03d: %016llx\n",
24640aa14887SMatthew R. Ochs 				   i, readq_be(&fc_port_luns[i]));
2465e0f01a21SMatthew R. Ochs 	return bytes;
2466e0f01a21SMatthew R. Ochs }
2467e0f01a21SMatthew R. Ochs 
2468e0f01a21SMatthew R. Ochs /**
2469e0f01a21SMatthew R. Ochs  * port0_lun_table_show() - presents the current LUN table of port 0
2470e0f01a21SMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
2471e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the port.
2472e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2473e0f01a21SMatthew R. Ochs  *
2474e0f01a21SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2475e0f01a21SMatthew R. Ochs  */
2476e0f01a21SMatthew R. Ochs static ssize_t port0_lun_table_show(struct device *dev,
2477e0f01a21SMatthew R. Ochs 				    struct device_attribute *attr,
2478e0f01a21SMatthew R. Ochs 				    char *buf)
2479e0f01a21SMatthew R. Ochs {
2480fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2481e0f01a21SMatthew R. Ochs 
24823b225cd3SMatthew R. Ochs 	return cxlflash_show_port_lun_table(0, cfg, buf);
2483e0f01a21SMatthew R. Ochs }
2484e0f01a21SMatthew R. Ochs 
2485e0f01a21SMatthew R. Ochs /**
2486e0f01a21SMatthew R. Ochs  * port1_lun_table_show() - presents the current LUN table of port 1
2487e0f01a21SMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
2488e0f01a21SMatthew R. Ochs  * @attr:	Device attribute representing the port.
2489e0f01a21SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2490e0f01a21SMatthew R. Ochs  *
2491e0f01a21SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2492e0f01a21SMatthew R. Ochs  */
2493e0f01a21SMatthew R. Ochs static ssize_t port1_lun_table_show(struct device *dev,
2494e0f01a21SMatthew R. Ochs 				    struct device_attribute *attr,
2495e0f01a21SMatthew R. Ochs 				    char *buf)
2496e0f01a21SMatthew R. Ochs {
2497fb67d44dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2498e0f01a21SMatthew R. Ochs 
24993b225cd3SMatthew R. Ochs 	return cxlflash_show_port_lun_table(1, cfg, buf);
2500e0f01a21SMatthew R. Ochs }
2501e0f01a21SMatthew R. Ochs 
2502e0f01a21SMatthew R. Ochs /**
25031cd7fabcSMatthew R. Ochs  * port2_lun_table_show() - presents the current LUN table of port 2
25041cd7fabcSMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
25051cd7fabcSMatthew R. Ochs  * @attr:	Device attribute representing the port.
25061cd7fabcSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
25071cd7fabcSMatthew R. Ochs  *
25081cd7fabcSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
25091cd7fabcSMatthew R. Ochs  */
25101cd7fabcSMatthew R. Ochs static ssize_t port2_lun_table_show(struct device *dev,
25111cd7fabcSMatthew R. Ochs 				    struct device_attribute *attr,
25121cd7fabcSMatthew R. Ochs 				    char *buf)
25131cd7fabcSMatthew R. Ochs {
25141cd7fabcSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
25151cd7fabcSMatthew R. Ochs 
25161cd7fabcSMatthew R. Ochs 	return cxlflash_show_port_lun_table(2, cfg, buf);
25171cd7fabcSMatthew R. Ochs }
25181cd7fabcSMatthew R. Ochs 
25191cd7fabcSMatthew R. Ochs /**
25201cd7fabcSMatthew R. Ochs  * port3_lun_table_show() - presents the current LUN table of port 3
25211cd7fabcSMatthew R. Ochs  * @dev:	Generic device associated with the host owning the port.
25221cd7fabcSMatthew R. Ochs  * @attr:	Device attribute representing the port.
25231cd7fabcSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
25241cd7fabcSMatthew R. Ochs  *
25251cd7fabcSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
25261cd7fabcSMatthew R. Ochs  */
25271cd7fabcSMatthew R. Ochs static ssize_t port3_lun_table_show(struct device *dev,
25281cd7fabcSMatthew R. Ochs 				    struct device_attribute *attr,
25291cd7fabcSMatthew R. Ochs 				    char *buf)
25301cd7fabcSMatthew R. Ochs {
25311cd7fabcSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
25321cd7fabcSMatthew R. Ochs 
25331cd7fabcSMatthew R. Ochs 	return cxlflash_show_port_lun_table(3, cfg, buf);
25341cd7fabcSMatthew R. Ochs }
25351cd7fabcSMatthew R. Ochs 
25361cd7fabcSMatthew R. Ochs /**
2537cba06e6dSMatthew R. Ochs  * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2538cba06e6dSMatthew R. Ochs  * @dev:	Generic device associated with the host.
2539cba06e6dSMatthew R. Ochs  * @attr:	Device attribute representing the IRQ poll weight.
2540cba06e6dSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the current IRQ poll
2541cba06e6dSMatthew R. Ochs  *		weight in ASCII.
2542cba06e6dSMatthew R. Ochs  *
2543cba06e6dSMatthew R. Ochs  * An IRQ poll weight of 0 indicates polling is disabled.
2544cba06e6dSMatthew R. Ochs  *
2545cba06e6dSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2546cba06e6dSMatthew R. Ochs  */
2547cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_show(struct device *dev,
2548cba06e6dSMatthew R. Ochs 				   struct device_attribute *attr, char *buf)
2549cba06e6dSMatthew R. Ochs {
2550cba06e6dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2551cba06e6dSMatthew R. Ochs 	struct afu *afu = cfg->afu;
2552cba06e6dSMatthew R. Ochs 
2553cba06e6dSMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2554cba06e6dSMatthew R. Ochs }
2555cba06e6dSMatthew R. Ochs 
2556cba06e6dSMatthew R. Ochs /**
2557cba06e6dSMatthew R. Ochs  * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2558cba06e6dSMatthew R. Ochs  * @dev:	Generic device associated with the host.
2559cba06e6dSMatthew R. Ochs  * @attr:	Device attribute representing the IRQ poll weight.
2560cba06e6dSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE containing the desired IRQ poll
2561cba06e6dSMatthew R. Ochs  *		weight in ASCII.
2562cba06e6dSMatthew R. Ochs  * @count:	Length of data resizing in @buf.
2563cba06e6dSMatthew R. Ochs  *
2564cba06e6dSMatthew R. Ochs  * An IRQ poll weight of 0 indicates polling is disabled.
2565cba06e6dSMatthew R. Ochs  *
2566cba06e6dSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
2567cba06e6dSMatthew R. Ochs  */
2568cba06e6dSMatthew R. Ochs static ssize_t irqpoll_weight_store(struct device *dev,
2569cba06e6dSMatthew R. Ochs 				    struct device_attribute *attr,
2570cba06e6dSMatthew R. Ochs 				    const char *buf, size_t count)
2571cba06e6dSMatthew R. Ochs {
2572cba06e6dSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2573cba06e6dSMatthew R. Ochs 	struct device *cfgdev = &cfg->dev->dev;
2574cba06e6dSMatthew R. Ochs 	struct afu *afu = cfg->afu;
2575bfc0bab1SUma Krishnan 	struct hwq *hwq;
2576cba06e6dSMatthew R. Ochs 	u32 weight;
2577bfc0bab1SUma Krishnan 	int rc, i;
2578cba06e6dSMatthew R. Ochs 
2579cba06e6dSMatthew R. Ochs 	rc = kstrtouint(buf, 10, &weight);
2580cba06e6dSMatthew R. Ochs 	if (rc)
2581cba06e6dSMatthew R. Ochs 		return -EINVAL;
2582cba06e6dSMatthew R. Ochs 
2583cba06e6dSMatthew R. Ochs 	if (weight > 256) {
2584cba06e6dSMatthew R. Ochs 		dev_info(cfgdev,
2585cba06e6dSMatthew R. Ochs 			 "Invalid IRQ poll weight. It must be 256 or less.\n");
2586cba06e6dSMatthew R. Ochs 		return -EINVAL;
2587cba06e6dSMatthew R. Ochs 	}
2588cba06e6dSMatthew R. Ochs 
2589cba06e6dSMatthew R. Ochs 	if (weight == afu->irqpoll_weight) {
2590cba06e6dSMatthew R. Ochs 		dev_info(cfgdev,
2591cba06e6dSMatthew R. Ochs 			 "Current IRQ poll weight has the same weight.\n");
2592cba06e6dSMatthew R. Ochs 		return -EINVAL;
2593cba06e6dSMatthew R. Ochs 	}
2594cba06e6dSMatthew R. Ochs 
2595bfc0bab1SUma Krishnan 	if (afu_is_irqpoll_enabled(afu)) {
25963065267aSMatthew R. Ochs 		for (i = 0; i < afu->num_hwqs; i++) {
2597bfc0bab1SUma Krishnan 			hwq = get_hwq(afu, i);
2598bfc0bab1SUma Krishnan 
2599bfc0bab1SUma Krishnan 			irq_poll_disable(&hwq->irqpoll);
2600bfc0bab1SUma Krishnan 		}
2601bfc0bab1SUma Krishnan 	}
2602cba06e6dSMatthew R. Ochs 
2603cba06e6dSMatthew R. Ochs 	afu->irqpoll_weight = weight;
2604cba06e6dSMatthew R. Ochs 
2605bfc0bab1SUma Krishnan 	if (weight > 0) {
26063065267aSMatthew R. Ochs 		for (i = 0; i < afu->num_hwqs; i++) {
2607bfc0bab1SUma Krishnan 			hwq = get_hwq(afu, i);
2608bfc0bab1SUma Krishnan 
2609bfc0bab1SUma Krishnan 			irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2610bfc0bab1SUma Krishnan 		}
2611bfc0bab1SUma Krishnan 	}
2612cba06e6dSMatthew R. Ochs 
2613cba06e6dSMatthew R. Ochs 	return count;
2614cba06e6dSMatthew R. Ochs }
2615cba06e6dSMatthew R. Ochs 
2616cba06e6dSMatthew R. Ochs /**
26173065267aSMatthew R. Ochs  * num_hwqs_show() - presents the number of hardware queues for the host
26183065267aSMatthew R. Ochs  * @dev:	Generic device associated with the host.
26193065267aSMatthew R. Ochs  * @attr:	Device attribute representing the number of hardware queues.
26203065267aSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the number of hardware
26213065267aSMatthew R. Ochs  *		queues in ASCII.
26223065267aSMatthew R. Ochs  *
26233065267aSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
26243065267aSMatthew R. Ochs  */
26253065267aSMatthew R. Ochs static ssize_t num_hwqs_show(struct device *dev,
26263065267aSMatthew R. Ochs 			     struct device_attribute *attr, char *buf)
26273065267aSMatthew R. Ochs {
26283065267aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
26293065267aSMatthew R. Ochs 	struct afu *afu = cfg->afu;
26303065267aSMatthew R. Ochs 
26313065267aSMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
26323065267aSMatthew R. Ochs }
26333065267aSMatthew R. Ochs 
26343065267aSMatthew R. Ochs /**
26353065267aSMatthew R. Ochs  * num_hwqs_store() - sets the number of hardware queues for the host
26363065267aSMatthew R. Ochs  * @dev:	Generic device associated with the host.
26373065267aSMatthew R. Ochs  * @attr:	Device attribute representing the number of hardware queues.
26383065267aSMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE containing the number of hardware
26393065267aSMatthew R. Ochs  *		queues in ASCII.
26403065267aSMatthew R. Ochs  * @count:	Length of data resizing in @buf.
26413065267aSMatthew R. Ochs  *
26423065267aSMatthew R. Ochs  * n > 0: num_hwqs = n
26433065267aSMatthew R. Ochs  * n = 0: num_hwqs = num_online_cpus()
26443065267aSMatthew R. Ochs  * n < 0: num_online_cpus() / abs(n)
26453065267aSMatthew R. Ochs  *
26463065267aSMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
26473065267aSMatthew R. Ochs  */
26483065267aSMatthew R. Ochs static ssize_t num_hwqs_store(struct device *dev,
26493065267aSMatthew R. Ochs 			      struct device_attribute *attr,
26503065267aSMatthew R. Ochs 			      const char *buf, size_t count)
26513065267aSMatthew R. Ochs {
26523065267aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
26533065267aSMatthew R. Ochs 	struct afu *afu = cfg->afu;
26543065267aSMatthew R. Ochs 	int rc;
26553065267aSMatthew R. Ochs 	int nhwqs, num_hwqs;
26563065267aSMatthew R. Ochs 
26573065267aSMatthew R. Ochs 	rc = kstrtoint(buf, 10, &nhwqs);
26583065267aSMatthew R. Ochs 	if (rc)
26593065267aSMatthew R. Ochs 		return -EINVAL;
26603065267aSMatthew R. Ochs 
26613065267aSMatthew R. Ochs 	if (nhwqs >= 1)
26623065267aSMatthew R. Ochs 		num_hwqs = nhwqs;
26633065267aSMatthew R. Ochs 	else if (nhwqs == 0)
26643065267aSMatthew R. Ochs 		num_hwqs = num_online_cpus();
26653065267aSMatthew R. Ochs 	else
26663065267aSMatthew R. Ochs 		num_hwqs = num_online_cpus() / abs(nhwqs);
26673065267aSMatthew R. Ochs 
26683065267aSMatthew R. Ochs 	afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
26693065267aSMatthew R. Ochs 	WARN_ON_ONCE(afu->desired_hwqs == 0);
26703065267aSMatthew R. Ochs 
26713065267aSMatthew R. Ochs retry:
26723065267aSMatthew R. Ochs 	switch (cfg->state) {
26733065267aSMatthew R. Ochs 	case STATE_NORMAL:
26743065267aSMatthew R. Ochs 		cfg->state = STATE_RESET;
26753065267aSMatthew R. Ochs 		drain_ioctls(cfg);
26763065267aSMatthew R. Ochs 		cxlflash_mark_contexts_error(cfg);
26773065267aSMatthew R. Ochs 		rc = afu_reset(cfg);
26783065267aSMatthew R. Ochs 		if (rc)
26793065267aSMatthew R. Ochs 			cfg->state = STATE_FAILTERM;
26803065267aSMatthew R. Ochs 		else
26813065267aSMatthew R. Ochs 			cfg->state = STATE_NORMAL;
26823065267aSMatthew R. Ochs 		wake_up_all(&cfg->reset_waitq);
26833065267aSMatthew R. Ochs 		break;
26843065267aSMatthew R. Ochs 	case STATE_RESET:
26853065267aSMatthew R. Ochs 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
26863065267aSMatthew R. Ochs 		if (cfg->state == STATE_NORMAL)
26873065267aSMatthew R. Ochs 			goto retry;
26883065267aSMatthew R. Ochs 	default:
26893065267aSMatthew R. Ochs 		/* Ideally should not happen */
26903065267aSMatthew R. Ochs 		dev_err(dev, "%s: Device is not ready, state=%d\n",
26913065267aSMatthew R. Ochs 			__func__, cfg->state);
26923065267aSMatthew R. Ochs 		break;
26933065267aSMatthew R. Ochs 	}
26943065267aSMatthew R. Ochs 
26953065267aSMatthew R. Ochs 	return count;
26963065267aSMatthew R. Ochs }
26973065267aSMatthew R. Ochs 
26981dd0c0e4SMatthew R. Ochs static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
26991dd0c0e4SMatthew R. Ochs 
27001dd0c0e4SMatthew R. Ochs /**
27011dd0c0e4SMatthew R. Ochs  * hwq_mode_show() - presents the HWQ steering mode for the host
27021dd0c0e4SMatthew R. Ochs  * @dev:	Generic device associated with the host.
27031dd0c0e4SMatthew R. Ochs  * @attr:	Device attribute representing the HWQ steering mode.
27041dd0c0e4SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the HWQ steering mode
27051dd0c0e4SMatthew R. Ochs  *		as a character string.
27061dd0c0e4SMatthew R. Ochs  *
27071dd0c0e4SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
27081dd0c0e4SMatthew R. Ochs  */
27091dd0c0e4SMatthew R. Ochs static ssize_t hwq_mode_show(struct device *dev,
27101dd0c0e4SMatthew R. Ochs 			     struct device_attribute *attr, char *buf)
27111dd0c0e4SMatthew R. Ochs {
27121dd0c0e4SMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
27131dd0c0e4SMatthew R. Ochs 	struct afu *afu = cfg->afu;
27141dd0c0e4SMatthew R. Ochs 
27151dd0c0e4SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
27161dd0c0e4SMatthew R. Ochs }
27171dd0c0e4SMatthew R. Ochs 
27181dd0c0e4SMatthew R. Ochs /**
27191dd0c0e4SMatthew R. Ochs  * hwq_mode_store() - sets the HWQ steering mode for the host
27201dd0c0e4SMatthew R. Ochs  * @dev:	Generic device associated with the host.
27211dd0c0e4SMatthew R. Ochs  * @attr:	Device attribute representing the HWQ steering mode.
27221dd0c0e4SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE containing the HWQ steering mode
27231dd0c0e4SMatthew R. Ochs  *		as a character string.
27241dd0c0e4SMatthew R. Ochs  * @count:	Length of data resizing in @buf.
27251dd0c0e4SMatthew R. Ochs  *
27261dd0c0e4SMatthew R. Ochs  * rr = Round-Robin
27271dd0c0e4SMatthew R. Ochs  * tag = Block MQ Tagging
27281dd0c0e4SMatthew R. Ochs  * cpu = CPU Affinity
27291dd0c0e4SMatthew R. Ochs  *
27301dd0c0e4SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
27311dd0c0e4SMatthew R. Ochs  */
27321dd0c0e4SMatthew R. Ochs static ssize_t hwq_mode_store(struct device *dev,
27331dd0c0e4SMatthew R. Ochs 			      struct device_attribute *attr,
27341dd0c0e4SMatthew R. Ochs 			      const char *buf, size_t count)
27351dd0c0e4SMatthew R. Ochs {
27361dd0c0e4SMatthew R. Ochs 	struct Scsi_Host *shost = class_to_shost(dev);
27371dd0c0e4SMatthew R. Ochs 	struct cxlflash_cfg *cfg = shost_priv(shost);
27381dd0c0e4SMatthew R. Ochs 	struct device *cfgdev = &cfg->dev->dev;
27391dd0c0e4SMatthew R. Ochs 	struct afu *afu = cfg->afu;
27401dd0c0e4SMatthew R. Ochs 	int i;
27411dd0c0e4SMatthew R. Ochs 	u32 mode = MAX_HWQ_MODE;
27421dd0c0e4SMatthew R. Ochs 
27431dd0c0e4SMatthew R. Ochs 	for (i = 0; i < MAX_HWQ_MODE; i++) {
27441dd0c0e4SMatthew R. Ochs 		if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
27451dd0c0e4SMatthew R. Ochs 			mode = i;
27461dd0c0e4SMatthew R. Ochs 			break;
27471dd0c0e4SMatthew R. Ochs 		}
27481dd0c0e4SMatthew R. Ochs 	}
27491dd0c0e4SMatthew R. Ochs 
27501dd0c0e4SMatthew R. Ochs 	if (mode >= MAX_HWQ_MODE) {
27511dd0c0e4SMatthew R. Ochs 		dev_info(cfgdev, "Invalid HWQ steering mode.\n");
27521dd0c0e4SMatthew R. Ochs 		return -EINVAL;
27531dd0c0e4SMatthew R. Ochs 	}
27541dd0c0e4SMatthew R. Ochs 
27551dd0c0e4SMatthew R. Ochs 	if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
27561dd0c0e4SMatthew R. Ochs 		dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
27571dd0c0e4SMatthew R. Ochs 			 "HWQ steering mode.\n");
27581dd0c0e4SMatthew R. Ochs 		return -EINVAL;
27591dd0c0e4SMatthew R. Ochs 	}
27601dd0c0e4SMatthew R. Ochs 
27611dd0c0e4SMatthew R. Ochs 	afu->hwq_mode = mode;
27621dd0c0e4SMatthew R. Ochs 
27631dd0c0e4SMatthew R. Ochs 	return count;
27641dd0c0e4SMatthew R. Ochs }
27651dd0c0e4SMatthew R. Ochs 
27663065267aSMatthew R. Ochs /**
2767e0f01a21SMatthew R. Ochs  * mode_show() - presents the current mode of the device
276815305514SMatthew R. Ochs  * @dev:	Generic device associated with the device.
276915305514SMatthew R. Ochs  * @attr:	Device attribute representing the device mode.
277015305514SMatthew R. Ochs  * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
277115305514SMatthew R. Ochs  *
277215305514SMatthew R. Ochs  * Return: The size of the ASCII string returned in @buf.
277315305514SMatthew R. Ochs  */
2774e0f01a21SMatthew R. Ochs static ssize_t mode_show(struct device *dev,
277515305514SMatthew R. Ochs 			 struct device_attribute *attr, char *buf)
277615305514SMatthew R. Ochs {
277715305514SMatthew R. Ochs 	struct scsi_device *sdev = to_scsi_device(dev);
277815305514SMatthew R. Ochs 
2779e0f01a21SMatthew R. Ochs 	return scnprintf(buf, PAGE_SIZE, "%s\n",
278015305514SMatthew R. Ochs 			 sdev->hostdata ? "superpipe" : "legacy");
278115305514SMatthew R. Ochs }
278215305514SMatthew R. Ochs 
278315305514SMatthew R. Ochs /*
278415305514SMatthew R. Ochs  * Host attributes
278515305514SMatthew R. Ochs  */
2786e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0);
2787e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1);
27881cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port2);
27891cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port3);
2790e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RW(lun_mode);
2791e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(ioctl_version);
2792e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port0_lun_table);
2793e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(port1_lun_table);
27941cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port2_lun_table);
27951cd7fabcSMatthew R. Ochs static DEVICE_ATTR_RO(port3_lun_table);
2796cba06e6dSMatthew R. Ochs static DEVICE_ATTR_RW(irqpoll_weight);
27973065267aSMatthew R. Ochs static DEVICE_ATTR_RW(num_hwqs);
27981dd0c0e4SMatthew R. Ochs static DEVICE_ATTR_RW(hwq_mode);
279915305514SMatthew R. Ochs 
280015305514SMatthew R. Ochs static struct device_attribute *cxlflash_host_attrs[] = {
280115305514SMatthew R. Ochs 	&dev_attr_port0,
280215305514SMatthew R. Ochs 	&dev_attr_port1,
28031cd7fabcSMatthew R. Ochs 	&dev_attr_port2,
28041cd7fabcSMatthew R. Ochs 	&dev_attr_port3,
280515305514SMatthew R. Ochs 	&dev_attr_lun_mode,
280615305514SMatthew R. Ochs 	&dev_attr_ioctl_version,
2807e0f01a21SMatthew R. Ochs 	&dev_attr_port0_lun_table,
2808e0f01a21SMatthew R. Ochs 	&dev_attr_port1_lun_table,
28091cd7fabcSMatthew R. Ochs 	&dev_attr_port2_lun_table,
28101cd7fabcSMatthew R. Ochs 	&dev_attr_port3_lun_table,
2811cba06e6dSMatthew R. Ochs 	&dev_attr_irqpoll_weight,
28123065267aSMatthew R. Ochs 	&dev_attr_num_hwqs,
28131dd0c0e4SMatthew R. Ochs 	&dev_attr_hwq_mode,
281415305514SMatthew R. Ochs 	NULL
281515305514SMatthew R. Ochs };
281615305514SMatthew R. Ochs 
281715305514SMatthew R. Ochs /*
281815305514SMatthew R. Ochs  * Device attributes
281915305514SMatthew R. Ochs  */
2820e0f01a21SMatthew R. Ochs static DEVICE_ATTR_RO(mode);
282115305514SMatthew R. Ochs 
282215305514SMatthew R. Ochs static struct device_attribute *cxlflash_dev_attrs[] = {
282315305514SMatthew R. Ochs 	&dev_attr_mode,
282415305514SMatthew R. Ochs 	NULL
282515305514SMatthew R. Ochs };
282615305514SMatthew R. Ochs 
282715305514SMatthew R. Ochs /*
282815305514SMatthew R. Ochs  * Host template
282915305514SMatthew R. Ochs  */
283015305514SMatthew R. Ochs static struct scsi_host_template driver_template = {
283115305514SMatthew R. Ochs 	.module = THIS_MODULE,
283215305514SMatthew R. Ochs 	.name = CXLFLASH_ADAPTER_NAME,
283315305514SMatthew R. Ochs 	.info = cxlflash_driver_info,
283415305514SMatthew R. Ochs 	.ioctl = cxlflash_ioctl,
283515305514SMatthew R. Ochs 	.proc_name = CXLFLASH_NAME,
283615305514SMatthew R. Ochs 	.queuecommand = cxlflash_queuecommand,
283715305514SMatthew R. Ochs 	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
283815305514SMatthew R. Ochs 	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
283915305514SMatthew R. Ochs 	.change_queue_depth = cxlflash_change_queue_depth,
284083430833SManoj N. Kumar 	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
284115305514SMatthew R. Ochs 	.can_queue = CXLFLASH_MAX_CMDS,
28425fbb96c8SMatthew R. Ochs 	.cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
284315305514SMatthew R. Ochs 	.this_id = -1,
284468ab2d76SUma Krishnan 	.sg_tablesize = 1,	/* No scatter gather support */
284515305514SMatthew R. Ochs 	.max_sectors = CXLFLASH_MAX_SECTORS,
284615305514SMatthew R. Ochs 	.use_clustering = ENABLE_CLUSTERING,
284715305514SMatthew R. Ochs 	.shost_attrs = cxlflash_host_attrs,
284815305514SMatthew R. Ochs 	.sdev_attrs = cxlflash_dev_attrs,
284915305514SMatthew R. Ochs };
285015305514SMatthew R. Ochs 
285115305514SMatthew R. Ochs /*
285215305514SMatthew R. Ochs  * Device dependent values
285315305514SMatthew R. Ochs  */
285496e1b660SUma Krishnan static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
285596e1b660SUma Krishnan 					0ULL };
285696e1b660SUma Krishnan static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
2857704c4b0dSUma Krishnan 					CXLFLASH_NOTIFY_SHUTDOWN };
285894344520SMatthew R. Ochs static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
285994344520SMatthew R. Ochs 					CXLFLASH_NOTIFY_SHUTDOWN };
286015305514SMatthew R. Ochs 
286115305514SMatthew R. Ochs /*
286215305514SMatthew R. Ochs  * PCI device binding table
286315305514SMatthew R. Ochs  */
286415305514SMatthew R. Ochs static struct pci_device_id cxlflash_pci_table[] = {
286515305514SMatthew R. Ochs 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
286615305514SMatthew R. Ochs 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2867a2746fb1SManoj Kumar 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2868a2746fb1SManoj Kumar 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
286994344520SMatthew R. Ochs 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
287094344520SMatthew R. Ochs 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
287115305514SMatthew R. Ochs 	{}
287215305514SMatthew R. Ochs };
287315305514SMatthew R. Ochs 
287415305514SMatthew R. Ochs MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
287515305514SMatthew R. Ochs 
287615305514SMatthew R. Ochs /**
2877c21e0bbfSMatthew R. Ochs  * cxlflash_worker_thread() - work thread handler for the AFU
2878c21e0bbfSMatthew R. Ochs  * @work:	Work structure contained within cxlflash associated with host.
2879c21e0bbfSMatthew R. Ochs  *
2880c21e0bbfSMatthew R. Ochs  * Handles the following events:
2881c21e0bbfSMatthew R. Ochs  * - Link reset which cannot be performed on interrupt context due to
2882c21e0bbfSMatthew R. Ochs  * blocking up to a few seconds
2883ef51074aSMatthew R. Ochs  * - Rescan the host
2884c21e0bbfSMatthew R. Ochs  */
2885c21e0bbfSMatthew R. Ochs static void cxlflash_worker_thread(struct work_struct *work)
2886c21e0bbfSMatthew R. Ochs {
28875cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
28885cdac81aSMatthew R. Ochs 						work_q);
2889c21e0bbfSMatthew R. Ochs 	struct afu *afu = cfg->afu;
28904392ba49SMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
28910aa14887SMatthew R. Ochs 	__be64 __iomem *fc_port_regs;
2892c21e0bbfSMatthew R. Ochs 	int port;
2893c21e0bbfSMatthew R. Ochs 	ulong lock_flags;
2894c21e0bbfSMatthew R. Ochs 
28955cdac81aSMatthew R. Ochs 	/* Avoid MMIO if the device has failed */
28965cdac81aSMatthew R. Ochs 
28975cdac81aSMatthew R. Ochs 	if (cfg->state != STATE_NORMAL)
28985cdac81aSMatthew R. Ochs 		return;
28995cdac81aSMatthew R. Ochs 
2900c21e0bbfSMatthew R. Ochs 	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2901c21e0bbfSMatthew R. Ochs 
2902c21e0bbfSMatthew R. Ochs 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
2903c21e0bbfSMatthew R. Ochs 		port = cfg->lr_port;
2904c21e0bbfSMatthew R. Ochs 		if (port < 0)
29054392ba49SMatthew R. Ochs 			dev_err(dev, "%s: invalid port index %d\n",
29064392ba49SMatthew R. Ochs 				__func__, port);
2907c21e0bbfSMatthew R. Ochs 		else {
2908c21e0bbfSMatthew R. Ochs 			spin_unlock_irqrestore(cfg->host->host_lock,
2909c21e0bbfSMatthew R. Ochs 					       lock_flags);
2910c21e0bbfSMatthew R. Ochs 
2911c21e0bbfSMatthew R. Ochs 			/* The reset can block... */
29120aa14887SMatthew R. Ochs 			fc_port_regs = get_fc_port_regs(cfg, port);
29130aa14887SMatthew R. Ochs 			afu_link_reset(afu, port, fc_port_regs);
2914c21e0bbfSMatthew R. Ochs 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2915c21e0bbfSMatthew R. Ochs 		}
2916c21e0bbfSMatthew R. Ochs 
2917c21e0bbfSMatthew R. Ochs 		cfg->lr_state = LINK_RESET_COMPLETE;
2918c21e0bbfSMatthew R. Ochs 	}
2919c21e0bbfSMatthew R. Ochs 
2920c21e0bbfSMatthew R. Ochs 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2921ef51074aSMatthew R. Ochs 
2922ef51074aSMatthew R. Ochs 	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2923ef51074aSMatthew R. Ochs 		scsi_scan_host(cfg->host);
2924c21e0bbfSMatthew R. Ochs }
2925c21e0bbfSMatthew R. Ochs 
2926c21e0bbfSMatthew R. Ochs /**
2927c21e0bbfSMatthew R. Ochs  * cxlflash_probe() - PCI entry point to add host
2928c21e0bbfSMatthew R. Ochs  * @pdev:	PCI device associated with the host.
2929c21e0bbfSMatthew R. Ochs  * @dev_id:	PCI device id associated with device.
2930c21e0bbfSMatthew R. Ochs  *
2931323e3342SMatthew R. Ochs  * The device will initially start out in a 'probing' state and
2932323e3342SMatthew R. Ochs  * transition to the 'normal' state at the end of a successful
2933323e3342SMatthew R. Ochs  * probe. Should an EEH event occur during probe, the notification
2934323e3342SMatthew R. Ochs  * thread (error_detected()) will wait until the probe handler
2935323e3342SMatthew R. Ochs  * is nearly complete. At that time, the device will be moved to
2936323e3342SMatthew R. Ochs  * a 'probed' state and the EEH thread woken up to drive the slot
2937323e3342SMatthew R. Ochs  * reset and recovery (device moves to 'normal' state). Meanwhile,
2938323e3342SMatthew R. Ochs  * the probe will be allowed to exit successfully.
2939323e3342SMatthew R. Ochs  *
29401284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
2941c21e0bbfSMatthew R. Ochs  */
2942c21e0bbfSMatthew R. Ochs static int cxlflash_probe(struct pci_dev *pdev,
2943c21e0bbfSMatthew R. Ochs 			  const struct pci_device_id *dev_id)
2944c21e0bbfSMatthew R. Ochs {
2945c21e0bbfSMatthew R. Ochs 	struct Scsi_Host *host;
2946c21e0bbfSMatthew R. Ochs 	struct cxlflash_cfg *cfg = NULL;
2947fb67d44dSMatthew R. Ochs 	struct device *dev = &pdev->dev;
2948c21e0bbfSMatthew R. Ochs 	struct dev_dependent_vals *ddv;
2949c21e0bbfSMatthew R. Ochs 	int rc = 0;
295078ae028eSMatthew R. Ochs 	int k;
2951c21e0bbfSMatthew R. Ochs 
2952c21e0bbfSMatthew R. Ochs 	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2953c21e0bbfSMatthew R. Ochs 		__func__, pdev->irq);
2954c21e0bbfSMatthew R. Ochs 
2955c21e0bbfSMatthew R. Ochs 	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2956c21e0bbfSMatthew R. Ochs 	driver_template.max_sectors = ddv->max_sectors;
2957c21e0bbfSMatthew R. Ochs 
2958c21e0bbfSMatthew R. Ochs 	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2959c21e0bbfSMatthew R. Ochs 	if (!host) {
2960fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
2961c21e0bbfSMatthew R. Ochs 		rc = -ENOMEM;
2962c21e0bbfSMatthew R. Ochs 		goto out;
2963c21e0bbfSMatthew R. Ochs 	}
2964c21e0bbfSMatthew R. Ochs 
2965c21e0bbfSMatthew R. Ochs 	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2966c21e0bbfSMatthew R. Ochs 	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2967c21e0bbfSMatthew R. Ochs 	host->unique_id = host->host_no;
2968c21e0bbfSMatthew R. Ochs 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2969c21e0bbfSMatthew R. Ochs 
2970fb67d44dSMatthew R. Ochs 	cfg = shost_priv(host);
2971c21e0bbfSMatthew R. Ochs 	cfg->host = host;
2972c21e0bbfSMatthew R. Ochs 	rc = alloc_mem(cfg);
2973c21e0bbfSMatthew R. Ochs 	if (rc) {
2974fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: alloc_mem failed\n", __func__);
2975c21e0bbfSMatthew R. Ochs 		rc = -ENOMEM;
29768b5b1e87SMatthew R. Ochs 		scsi_host_put(cfg->host);
2977c21e0bbfSMatthew R. Ochs 		goto out;
2978c21e0bbfSMatthew R. Ochs 	}
2979c21e0bbfSMatthew R. Ochs 
2980c21e0bbfSMatthew R. Ochs 	cfg->init_state = INIT_STATE_NONE;
2981c21e0bbfSMatthew R. Ochs 	cfg->dev = pdev;
298217ead26fSMatthew R. Ochs 	cfg->cxl_fops = cxlflash_cxl_fops;
29832cb79266SMatthew R. Ochs 
29842cb79266SMatthew R. Ochs 	/*
298578ae028eSMatthew R. Ochs 	 * Promoted LUNs move to the top of the LUN table. The rest stay on
298678ae028eSMatthew R. Ochs 	 * the bottom half. The bottom half grows from the end (index = 255),
298778ae028eSMatthew R. Ochs 	 * whereas the top half grows from the beginning (index = 0).
298878ae028eSMatthew R. Ochs 	 *
298978ae028eSMatthew R. Ochs 	 * Initialize the last LUN index for all possible ports.
29902cb79266SMatthew R. Ochs 	 */
29912cb79266SMatthew R. Ochs 	cfg->promote_lun_index = 0;
299278ae028eSMatthew R. Ochs 
299378ae028eSMatthew R. Ochs 	for (k = 0; k < MAX_FC_PORTS; k++)
299478ae028eSMatthew R. Ochs 		cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
29952cb79266SMatthew R. Ochs 
2996c21e0bbfSMatthew R. Ochs 	cfg->dev_id = (struct pci_device_id *)dev_id;
2997c21e0bbfSMatthew R. Ochs 
2998c21e0bbfSMatthew R. Ochs 	init_waitqueue_head(&cfg->tmf_waitq);
2999439e85c1SMatthew R. Ochs 	init_waitqueue_head(&cfg->reset_waitq);
3000c21e0bbfSMatthew R. Ochs 
3001c21e0bbfSMatthew R. Ochs 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3002c21e0bbfSMatthew R. Ochs 	cfg->lr_state = LINK_RESET_INVALID;
3003c21e0bbfSMatthew R. Ochs 	cfg->lr_port = -1;
30040d73122cSMatthew R. Ochs 	spin_lock_init(&cfg->tmf_slock);
300565be2c79SMatthew R. Ochs 	mutex_init(&cfg->ctx_tbl_list_mutex);
300665be2c79SMatthew R. Ochs 	mutex_init(&cfg->ctx_recovery_mutex);
30070a27ae51SMatthew R. Ochs 	init_rwsem(&cfg->ioctl_rwsem);
300865be2c79SMatthew R. Ochs 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
300965be2c79SMatthew R. Ochs 	INIT_LIST_HEAD(&cfg->lluns);
3010c21e0bbfSMatthew R. Ochs 
3011c21e0bbfSMatthew R. Ochs 	pci_set_drvdata(pdev, cfg);
3012c21e0bbfSMatthew R. Ochs 
3013c21e0bbfSMatthew R. Ochs 	cfg->cxl_afu = cxl_pci_to_afu(pdev);
3014c21e0bbfSMatthew R. Ochs 
3015c21e0bbfSMatthew R. Ochs 	rc = init_pci(cfg);
3016c21e0bbfSMatthew R. Ochs 	if (rc) {
3017fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3018c21e0bbfSMatthew R. Ochs 		goto out_remove;
3019c21e0bbfSMatthew R. Ochs 	}
3020c21e0bbfSMatthew R. Ochs 	cfg->init_state = INIT_STATE_PCI;
3021c21e0bbfSMatthew R. Ochs 
3022c21e0bbfSMatthew R. Ochs 	rc = init_afu(cfg);
3023323e3342SMatthew R. Ochs 	if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3024fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3025c21e0bbfSMatthew R. Ochs 		goto out_remove;
3026c21e0bbfSMatthew R. Ochs 	}
3027c21e0bbfSMatthew R. Ochs 	cfg->init_state = INIT_STATE_AFU;
3028c21e0bbfSMatthew R. Ochs 
3029c21e0bbfSMatthew R. Ochs 	rc = init_scsi(cfg);
3030c21e0bbfSMatthew R. Ochs 	if (rc) {
3031fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3032c21e0bbfSMatthew R. Ochs 		goto out_remove;
3033c21e0bbfSMatthew R. Ochs 	}
3034c21e0bbfSMatthew R. Ochs 	cfg->init_state = INIT_STATE_SCSI;
3035c21e0bbfSMatthew R. Ochs 
3036323e3342SMatthew R. Ochs 	if (wq_has_sleeper(&cfg->reset_waitq)) {
3037323e3342SMatthew R. Ochs 		cfg->state = STATE_PROBED;
3038323e3342SMatthew R. Ochs 		wake_up_all(&cfg->reset_waitq);
3039323e3342SMatthew R. Ochs 	} else
3040323e3342SMatthew R. Ochs 		cfg->state = STATE_NORMAL;
3041c21e0bbfSMatthew R. Ochs out:
3042fb67d44dSMatthew R. Ochs 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3043c21e0bbfSMatthew R. Ochs 	return rc;
3044c21e0bbfSMatthew R. Ochs 
3045c21e0bbfSMatthew R. Ochs out_remove:
3046c21e0bbfSMatthew R. Ochs 	cxlflash_remove(pdev);
3047c21e0bbfSMatthew R. Ochs 	goto out;
3048c21e0bbfSMatthew R. Ochs }
3049c21e0bbfSMatthew R. Ochs 
30505cdac81aSMatthew R. Ochs /**
30515cdac81aSMatthew R. Ochs  * cxlflash_pci_error_detected() - called when a PCI error is detected
30525cdac81aSMatthew R. Ochs  * @pdev:	PCI device struct.
30535cdac81aSMatthew R. Ochs  * @state:	PCI channel state.
30545cdac81aSMatthew R. Ochs  *
30551d3324c3SMatthew R. Ochs  * When an EEH occurs during an active reset, wait until the reset is
30561d3324c3SMatthew R. Ochs  * complete and then take action based upon the device state.
30571d3324c3SMatthew R. Ochs  *
30585cdac81aSMatthew R. Ochs  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
30595cdac81aSMatthew R. Ochs  */
30605cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
30615cdac81aSMatthew R. Ochs 						    pci_channel_state_t state)
30625cdac81aSMatthew R. Ochs {
306365be2c79SMatthew R. Ochs 	int rc = 0;
30645cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
30655cdac81aSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
30665cdac81aSMatthew R. Ochs 
30675cdac81aSMatthew R. Ochs 	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
30685cdac81aSMatthew R. Ochs 
30695cdac81aSMatthew R. Ochs 	switch (state) {
30705cdac81aSMatthew R. Ochs 	case pci_channel_io_frozen:
3071323e3342SMatthew R. Ochs 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3072323e3342SMatthew R. Ochs 					     cfg->state != STATE_PROBING);
30731d3324c3SMatthew R. Ochs 		if (cfg->state == STATE_FAILTERM)
30741d3324c3SMatthew R. Ochs 			return PCI_ERS_RESULT_DISCONNECT;
30751d3324c3SMatthew R. Ochs 
3076439e85c1SMatthew R. Ochs 		cfg->state = STATE_RESET;
30775cdac81aSMatthew R. Ochs 		scsi_block_requests(cfg->host);
30780a27ae51SMatthew R. Ochs 		drain_ioctls(cfg);
307965be2c79SMatthew R. Ochs 		rc = cxlflash_mark_contexts_error(cfg);
308065be2c79SMatthew R. Ochs 		if (unlikely(rc))
3081fb67d44dSMatthew R. Ochs 			dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
308265be2c79SMatthew R. Ochs 				__func__, rc);
30839526f360SManoj N. Kumar 		term_afu(cfg);
30845cdac81aSMatthew R. Ochs 		return PCI_ERS_RESULT_NEED_RESET;
30855cdac81aSMatthew R. Ochs 	case pci_channel_io_perm_failure:
30865cdac81aSMatthew R. Ochs 		cfg->state = STATE_FAILTERM;
3087439e85c1SMatthew R. Ochs 		wake_up_all(&cfg->reset_waitq);
30885cdac81aSMatthew R. Ochs 		scsi_unblock_requests(cfg->host);
30895cdac81aSMatthew R. Ochs 		return PCI_ERS_RESULT_DISCONNECT;
30905cdac81aSMatthew R. Ochs 	default:
30915cdac81aSMatthew R. Ochs 		break;
30925cdac81aSMatthew R. Ochs 	}
30935cdac81aSMatthew R. Ochs 	return PCI_ERS_RESULT_NEED_RESET;
30945cdac81aSMatthew R. Ochs }
30955cdac81aSMatthew R. Ochs 
30965cdac81aSMatthew R. Ochs /**
30975cdac81aSMatthew R. Ochs  * cxlflash_pci_slot_reset() - called when PCI slot has been reset
30985cdac81aSMatthew R. Ochs  * @pdev:	PCI device struct.
30995cdac81aSMatthew R. Ochs  *
31005cdac81aSMatthew R. Ochs  * This routine is called by the pci error recovery code after the PCI
31015cdac81aSMatthew R. Ochs  * slot has been reset, just before we should resume normal operations.
31025cdac81aSMatthew R. Ochs  *
31035cdac81aSMatthew R. Ochs  * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
31045cdac81aSMatthew R. Ochs  */
31055cdac81aSMatthew R. Ochs static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
31065cdac81aSMatthew R. Ochs {
31075cdac81aSMatthew R. Ochs 	int rc = 0;
31085cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
31095cdac81aSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
31105cdac81aSMatthew R. Ochs 
31115cdac81aSMatthew R. Ochs 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
31125cdac81aSMatthew R. Ochs 
31135cdac81aSMatthew R. Ochs 	rc = init_afu(cfg);
31145cdac81aSMatthew R. Ochs 	if (unlikely(rc)) {
3115fb67d44dSMatthew R. Ochs 		dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
31165cdac81aSMatthew R. Ochs 		return PCI_ERS_RESULT_DISCONNECT;
31175cdac81aSMatthew R. Ochs 	}
31185cdac81aSMatthew R. Ochs 
31195cdac81aSMatthew R. Ochs 	return PCI_ERS_RESULT_RECOVERED;
31205cdac81aSMatthew R. Ochs }
31215cdac81aSMatthew R. Ochs 
31225cdac81aSMatthew R. Ochs /**
31235cdac81aSMatthew R. Ochs  * cxlflash_pci_resume() - called when normal operation can resume
31245cdac81aSMatthew R. Ochs  * @pdev:	PCI device struct
31255cdac81aSMatthew R. Ochs  */
31265cdac81aSMatthew R. Ochs static void cxlflash_pci_resume(struct pci_dev *pdev)
31275cdac81aSMatthew R. Ochs {
31285cdac81aSMatthew R. Ochs 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
31295cdac81aSMatthew R. Ochs 	struct device *dev = &cfg->dev->dev;
31305cdac81aSMatthew R. Ochs 
31315cdac81aSMatthew R. Ochs 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
31325cdac81aSMatthew R. Ochs 
31335cdac81aSMatthew R. Ochs 	cfg->state = STATE_NORMAL;
3134439e85c1SMatthew R. Ochs 	wake_up_all(&cfg->reset_waitq);
31355cdac81aSMatthew R. Ochs 	scsi_unblock_requests(cfg->host);
31365cdac81aSMatthew R. Ochs }
31375cdac81aSMatthew R. Ochs 
31385cdac81aSMatthew R. Ochs static const struct pci_error_handlers cxlflash_err_handler = {
31395cdac81aSMatthew R. Ochs 	.error_detected = cxlflash_pci_error_detected,
31405cdac81aSMatthew R. Ochs 	.slot_reset = cxlflash_pci_slot_reset,
31415cdac81aSMatthew R. Ochs 	.resume = cxlflash_pci_resume,
31425cdac81aSMatthew R. Ochs };
31435cdac81aSMatthew R. Ochs 
3144c21e0bbfSMatthew R. Ochs /*
3145c21e0bbfSMatthew R. Ochs  * PCI device structure
3146c21e0bbfSMatthew R. Ochs  */
3147c21e0bbfSMatthew R. Ochs static struct pci_driver cxlflash_driver = {
3148c21e0bbfSMatthew R. Ochs 	.name = CXLFLASH_NAME,
3149c21e0bbfSMatthew R. Ochs 	.id_table = cxlflash_pci_table,
3150c21e0bbfSMatthew R. Ochs 	.probe = cxlflash_probe,
3151c21e0bbfSMatthew R. Ochs 	.remove = cxlflash_remove,
3152babf985dSUma Krishnan 	.shutdown = cxlflash_remove,
31535cdac81aSMatthew R. Ochs 	.err_handler = &cxlflash_err_handler,
3154c21e0bbfSMatthew R. Ochs };
3155c21e0bbfSMatthew R. Ochs 
3156c21e0bbfSMatthew R. Ochs /**
3157c21e0bbfSMatthew R. Ochs  * init_cxlflash() - module entry point
3158c21e0bbfSMatthew R. Ochs  *
31591284fb0cSMatthew R. Ochs  * Return: 0 on success, -errno on failure
3160c21e0bbfSMatthew R. Ochs  */
3161c21e0bbfSMatthew R. Ochs static int __init init_cxlflash(void)
3162c21e0bbfSMatthew R. Ochs {
3163cd41e18dSMatthew R. Ochs 	check_sizes();
316465be2c79SMatthew R. Ochs 	cxlflash_list_init();
316565be2c79SMatthew R. Ochs 
3166c21e0bbfSMatthew R. Ochs 	return pci_register_driver(&cxlflash_driver);
3167c21e0bbfSMatthew R. Ochs }
3168c21e0bbfSMatthew R. Ochs 
3169c21e0bbfSMatthew R. Ochs /**
3170c21e0bbfSMatthew R. Ochs  * exit_cxlflash() - module exit point
3171c21e0bbfSMatthew R. Ochs  */
3172c21e0bbfSMatthew R. Ochs static void __exit exit_cxlflash(void)
3173c21e0bbfSMatthew R. Ochs {
317465be2c79SMatthew R. Ochs 	cxlflash_term_global_luns();
317565be2c79SMatthew R. Ochs 	cxlflash_free_errpage();
317665be2c79SMatthew R. Ochs 
3177c21e0bbfSMatthew R. Ochs 	pci_unregister_driver(&cxlflash_driver);
3178c21e0bbfSMatthew R. Ochs }
3179c21e0bbfSMatthew R. Ochs 
3180c21e0bbfSMatthew R. Ochs module_init(init_cxlflash);
3181c21e0bbfSMatthew R. Ochs module_exit(exit_cxlflash);
3182