xref: /openbmc/linux/drivers/scsi/cxlflash/main.c (revision c16352b5b35d8f619028ebf855ce42c1f99649e6)
1 /*
2  * CXL Flash Device Driver
3  *
4  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2015 IBM Corporation
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 
20 #include <asm/unaligned.h>
21 
22 #include <misc/cxl.h>
23 
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
27 
28 #include "main.h"
29 #include "sislite.h"
30 #include "common.h"
31 
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
36 
37 /**
38  * cmd_checkout() - checks out an AFU command
39  * @afu:	AFU to checkout from.
40  *
41  * Commands are checked out in a round-robin fashion. Note that since
42  * the command pool is larger than the hardware queue, the majority of
43  * times we will only loop once or twice before getting a command. The
44  * buffer and CDB within the command are initialized (zeroed) prior to
45  * returning.
46  *
47  * Return: The checked out command or NULL when command pool is empty.
48  */
49 static struct afu_cmd *cmd_checkout(struct afu *afu)
50 {
51 	int k, dec = CXLFLASH_NUM_CMDS;
52 	struct afu_cmd *cmd;
53 
54 	while (dec--) {
55 		k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
56 
57 		cmd = &afu->cmd[k];
58 
59 		if (!atomic_dec_if_positive(&cmd->free)) {
60 			pr_devel("%s: returning found index=%d cmd=%p\n",
61 				 __func__, cmd->slot, cmd);
62 			memset(cmd->buf, 0, CMD_BUFSIZE);
63 			memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
64 			return cmd;
65 		}
66 	}
67 
68 	return NULL;
69 }
70 
71 /**
72  * cmd_checkin() - checks in an AFU command
73  * @cmd:	AFU command to checkin.
74  *
75  * Safe to pass commands that have already been checked in. Several
76  * internal tracking fields are reset as part of the checkin. Note
77  * that these are intentionally reset prior to toggling the free bit
78  * to avoid clobbering values in the event that the command is checked
79  * out right away.
80  */
81 static void cmd_checkin(struct afu_cmd *cmd)
82 {
83 	cmd->rcb.scp = NULL;
84 	cmd->rcb.timeout = 0;
85 	cmd->sa.ioasc = 0;
86 	cmd->cmd_tmf = false;
87 	cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
88 
89 	if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
90 		pr_err("%s: Freeing cmd (%d) that is not in use!\n",
91 		       __func__, cmd->slot);
92 		return;
93 	}
94 
95 	pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
96 }
97 
98 /**
99  * process_cmd_err() - command error handler
100  * @cmd:	AFU command that experienced the error.
101  * @scp:	SCSI command associated with the AFU command in error.
102  *
103  * Translates error bits from AFU command to SCSI command results.
104  */
105 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
106 {
107 	struct sisl_ioarcb *ioarcb;
108 	struct sisl_ioasa *ioasa;
109 	u32 resid;
110 
111 	if (unlikely(!cmd))
112 		return;
113 
114 	ioarcb = &(cmd->rcb);
115 	ioasa = &(cmd->sa);
116 
117 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
118 		resid = ioasa->resid;
119 		scsi_set_resid(scp, resid);
120 		pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
121 			 __func__, cmd, scp, resid);
122 	}
123 
124 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
125 		pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
126 			 __func__, cmd, scp);
127 		scp->result = (DID_ERROR << 16);
128 	}
129 
130 	pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
131 		 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
132 		 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
133 		 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
134 		 ioasa->fc_extra);
135 
136 	if (ioasa->rc.scsi_rc) {
137 		/* We have a SCSI status */
138 		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
139 			memcpy(scp->sense_buffer, ioasa->sense_data,
140 			       SISL_SENSE_DATA_LEN);
141 			scp->result = ioasa->rc.scsi_rc;
142 		} else
143 			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
144 	}
145 
146 	/*
147 	 * We encountered an error. Set scp->result based on nature
148 	 * of error.
149 	 */
150 	if (ioasa->rc.fc_rc) {
151 		/* We have an FC status */
152 		switch (ioasa->rc.fc_rc) {
153 		case SISL_FC_RC_LINKDOWN:
154 			scp->result = (DID_REQUEUE << 16);
155 			break;
156 		case SISL_FC_RC_RESID:
157 			/* This indicates an FCP resid underrun */
158 			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
159 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
160 				 * then we will handle this error else where.
161 				 * If not then we must handle it here.
162 				 * This is probably an AFU bug.
163 				 */
164 				scp->result = (DID_ERROR << 16);
165 			}
166 			break;
167 		case SISL_FC_RC_RESIDERR:
168 			/* Resid mismatch between adapter and device */
169 		case SISL_FC_RC_TGTABORT:
170 		case SISL_FC_RC_ABORTOK:
171 		case SISL_FC_RC_ABORTFAIL:
172 		case SISL_FC_RC_NOLOGI:
173 		case SISL_FC_RC_ABORTPEND:
174 		case SISL_FC_RC_WRABORTPEND:
175 		case SISL_FC_RC_NOEXP:
176 		case SISL_FC_RC_INUSE:
177 			scp->result = (DID_ERROR << 16);
178 			break;
179 		}
180 	}
181 
182 	if (ioasa->rc.afu_rc) {
183 		/* We have an AFU error */
184 		switch (ioasa->rc.afu_rc) {
185 		case SISL_AFU_RC_NO_CHANNELS:
186 			scp->result = (DID_NO_CONNECT << 16);
187 			break;
188 		case SISL_AFU_RC_DATA_DMA_ERR:
189 			switch (ioasa->afu_extra) {
190 			case SISL_AFU_DMA_ERR_PAGE_IN:
191 				/* Retry */
192 				scp->result = (DID_IMM_RETRY << 16);
193 				break;
194 			case SISL_AFU_DMA_ERR_INVALID_EA:
195 			default:
196 				scp->result = (DID_ERROR << 16);
197 			}
198 			break;
199 		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
200 			/* Retry */
201 			scp->result = (DID_ALLOC_FAILURE << 16);
202 			break;
203 		default:
204 			scp->result = (DID_ERROR << 16);
205 		}
206 	}
207 }
208 
209 /**
210  * cmd_complete() - command completion handler
211  * @cmd:	AFU command that has completed.
212  *
213  * Prepares and submits command that has either completed or timed out to
214  * the SCSI stack. Checks AFU command back into command pool for non-internal
215  * (rcb.scp populated) commands.
216  */
217 static void cmd_complete(struct afu_cmd *cmd)
218 {
219 	struct scsi_cmnd *scp;
220 	ulong lock_flags;
221 	struct afu *afu = cmd->parent;
222 	struct cxlflash_cfg *cfg = afu->parent;
223 	bool cmd_is_tmf;
224 
225 	spin_lock_irqsave(&cmd->slock, lock_flags);
226 	cmd->sa.host_use_b[0] |= B_DONE;
227 	spin_unlock_irqrestore(&cmd->slock, lock_flags);
228 
229 	if (cmd->rcb.scp) {
230 		scp = cmd->rcb.scp;
231 		if (unlikely(cmd->sa.ioasc))
232 			process_cmd_err(cmd, scp);
233 		else
234 			scp->result = (DID_OK << 16);
235 
236 		cmd_is_tmf = cmd->cmd_tmf;
237 		cmd_checkin(cmd); /* Don't use cmd after here */
238 
239 		pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
240 				     "ioasc=%d\n", __func__, scp, scp->result,
241 				     cmd->sa.ioasc);
242 
243 		scsi_dma_unmap(scp);
244 		scp->scsi_done(scp);
245 
246 		if (cmd_is_tmf) {
247 			spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
248 			cfg->tmf_active = false;
249 			wake_up_all_locked(&cfg->tmf_waitq);
250 			spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
251 		}
252 	} else
253 		complete(&cmd->cevent);
254 }
255 
256 /**
257  * context_reset() - timeout handler for AFU commands
258  * @cmd:	AFU command that timed out.
259  *
260  * Sends a reset to the AFU.
261  */
262 static void context_reset(struct afu_cmd *cmd)
263 {
264 	int nretry = 0;
265 	u64 rrin = 0x1;
266 	u64 room = 0;
267 	struct afu *afu = cmd->parent;
268 	ulong lock_flags;
269 
270 	pr_debug("%s: cmd=%p\n", __func__, cmd);
271 
272 	spin_lock_irqsave(&cmd->slock, lock_flags);
273 
274 	/* Already completed? */
275 	if (cmd->sa.host_use_b[0] & B_DONE) {
276 		spin_unlock_irqrestore(&cmd->slock, lock_flags);
277 		return;
278 	}
279 
280 	cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
281 	spin_unlock_irqrestore(&cmd->slock, lock_flags);
282 
283 	/*
284 	 * We really want to send this reset at all costs, so spread
285 	 * out wait time on successive retries for available room.
286 	 */
287 	do {
288 		room = readq_be(&afu->host_map->cmd_room);
289 		atomic64_set(&afu->room, room);
290 		if (room)
291 			goto write_rrin;
292 		udelay(nretry);
293 	} while (nretry++ < MC_ROOM_RETRY_CNT);
294 
295 	pr_err("%s: no cmd_room to send reset\n", __func__);
296 	return;
297 
298 write_rrin:
299 	nretry = 0;
300 	writeq_be(rrin, &afu->host_map->ioarrin);
301 	do {
302 		rrin = readq_be(&afu->host_map->ioarrin);
303 		if (rrin != 0x1)
304 			break;
305 		/* Double delay each time */
306 		udelay(2 << nretry);
307 	} while (nretry++ < MC_ROOM_RETRY_CNT);
308 }
309 
310 /**
311  * send_cmd() - sends an AFU command
312  * @afu:	AFU associated with the host.
313  * @cmd:	AFU command to send.
314  *
315  * Return:
316  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
317  */
318 static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
319 {
320 	struct cxlflash_cfg *cfg = afu->parent;
321 	struct device *dev = &cfg->dev->dev;
322 	int nretry = 0;
323 	int rc = 0;
324 	u64 room;
325 	long newval;
326 
327 	/*
328 	 * This routine is used by critical users such an AFU sync and to
329 	 * send a task management function (TMF). Thus we want to retry a
330 	 * bit before returning an error. To avoid the performance penalty
331 	 * of MMIO, we spread the update of 'room' over multiple commands.
332 	 */
333 retry:
334 	newval = atomic64_dec_if_positive(&afu->room);
335 	if (!newval) {
336 		do {
337 			room = readq_be(&afu->host_map->cmd_room);
338 			atomic64_set(&afu->room, room);
339 			if (room)
340 				goto write_ioarrin;
341 			udelay(nretry);
342 		} while (nretry++ < MC_ROOM_RETRY_CNT);
343 
344 		dev_err(dev, "%s: no cmd_room to send 0x%X\n",
345 		       __func__, cmd->rcb.cdb[0]);
346 
347 		goto no_room;
348 	} else if (unlikely(newval < 0)) {
349 		/* This should be rare. i.e. Only if two threads race and
350 		 * decrement before the MMIO read is done. In this case
351 		 * just benefit from the other thread having updated
352 		 * afu->room.
353 		 */
354 		if (nretry++ < MC_ROOM_RETRY_CNT) {
355 			udelay(nretry);
356 			goto retry;
357 		}
358 
359 		goto no_room;
360 	}
361 
362 write_ioarrin:
363 	writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
364 out:
365 	pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
366 		 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
367 	return rc;
368 
369 no_room:
370 	afu->read_room = true;
371 	kref_get(&cfg->afu->mapcount);
372 	schedule_work(&cfg->work_q);
373 	rc = SCSI_MLQUEUE_HOST_BUSY;
374 	goto out;
375 }
376 
377 /**
378  * wait_resp() - polls for a response or timeout to a sent AFU command
379  * @afu:	AFU associated with the host.
380  * @cmd:	AFU command that was sent.
381  */
382 static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
383 {
384 	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
385 
386 	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
387 	if (!timeout)
388 		context_reset(cmd);
389 
390 	if (unlikely(cmd->sa.ioasc != 0))
391 		pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
392 		       "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
393 		       cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
394 		       cmd->sa.rc.fc_rc);
395 }
396 
397 /**
398  * send_tmf() - sends a Task Management Function (TMF)
399  * @afu:	AFU to checkout from.
400  * @scp:	SCSI command from stack.
401  * @tmfcmd:	TMF command to send.
402  *
403  * Return:
404  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
405  */
406 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
407 {
408 	struct afu_cmd *cmd;
409 
410 	u32 port_sel = scp->device->channel + 1;
411 	short lflag = 0;
412 	struct Scsi_Host *host = scp->device->host;
413 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
414 	struct device *dev = &cfg->dev->dev;
415 	ulong lock_flags;
416 	int rc = 0;
417 	ulong to;
418 
419 	cmd = cmd_checkout(afu);
420 	if (unlikely(!cmd)) {
421 		dev_err(dev, "%s: could not get a free command\n", __func__);
422 		rc = SCSI_MLQUEUE_HOST_BUSY;
423 		goto out;
424 	}
425 
426 	/* When Task Management Function is active do not send another */
427 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
428 	if (cfg->tmf_active)
429 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
430 						  !cfg->tmf_active,
431 						  cfg->tmf_slock);
432 	cfg->tmf_active = true;
433 	cmd->cmd_tmf = true;
434 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
435 
436 	cmd->rcb.ctx_id = afu->ctx_hndl;
437 	cmd->rcb.port_sel = port_sel;
438 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
439 
440 	lflag = SISL_REQ_FLAGS_TMF_CMD;
441 
442 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
443 			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
444 
445 	/* Stash the scp in the reserved field, for reuse during interrupt */
446 	cmd->rcb.scp = scp;
447 
448 	/* Copy the CDB from the cmd passed in */
449 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
450 
451 	/* Send the command */
452 	rc = send_cmd(afu, cmd);
453 	if (unlikely(rc)) {
454 		cmd_checkin(cmd);
455 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
456 		cfg->tmf_active = false;
457 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
458 		goto out;
459 	}
460 
461 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
462 	to = msecs_to_jiffies(5000);
463 	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
464 						       !cfg->tmf_active,
465 						       cfg->tmf_slock,
466 						       to);
467 	if (!to) {
468 		cfg->tmf_active = false;
469 		dev_err(dev, "%s: TMF timed out!\n", __func__);
470 		rc = -1;
471 	}
472 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
473 out:
474 	return rc;
475 }
476 
477 static void afu_unmap(struct kref *ref)
478 {
479 	struct afu *afu = container_of(ref, struct afu, mapcount);
480 
481 	if (likely(afu->afu_map)) {
482 		cxl_psa_unmap((void __iomem *)afu->afu_map);
483 		afu->afu_map = NULL;
484 	}
485 }
486 
487 /**
488  * cxlflash_driver_info() - information handler for this host driver
489  * @host:	SCSI host associated with device.
490  *
491  * Return: A string describing the device.
492  */
493 static const char *cxlflash_driver_info(struct Scsi_Host *host)
494 {
495 	return CXLFLASH_ADAPTER_NAME;
496 }
497 
498 /**
499  * cxlflash_queuecommand() - sends a mid-layer request
500  * @host:	SCSI host associated with device.
501  * @scp:	SCSI command to send.
502  *
503  * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
504  */
505 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
506 {
507 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
508 	struct afu *afu = cfg->afu;
509 	struct device *dev = &cfg->dev->dev;
510 	struct afu_cmd *cmd;
511 	u32 port_sel = scp->device->channel + 1;
512 	int nseg, i, ncount;
513 	struct scatterlist *sg;
514 	ulong lock_flags;
515 	short lflag = 0;
516 	int rc = 0;
517 	int kref_got = 0;
518 
519 	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
520 			    "cdb=(%08X-%08X-%08X-%08X)\n",
521 			    __func__, scp, host->host_no, scp->device->channel,
522 			    scp->device->id, scp->device->lun,
523 			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
524 			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
525 			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
526 			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
527 
528 	/*
529 	 * If a Task Management Function is active, wait for it to complete
530 	 * before continuing with regular commands.
531 	 */
532 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
533 	if (cfg->tmf_active) {
534 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
535 		rc = SCSI_MLQUEUE_HOST_BUSY;
536 		goto out;
537 	}
538 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
539 
540 	switch (cfg->state) {
541 	case STATE_RESET:
542 		dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
543 		rc = SCSI_MLQUEUE_HOST_BUSY;
544 		goto out;
545 	case STATE_FAILTERM:
546 		dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
547 		scp->result = (DID_NO_CONNECT << 16);
548 		scp->scsi_done(scp);
549 		rc = 0;
550 		goto out;
551 	default:
552 		break;
553 	}
554 
555 	cmd = cmd_checkout(afu);
556 	if (unlikely(!cmd)) {
557 		dev_err(dev, "%s: could not get a free command\n", __func__);
558 		rc = SCSI_MLQUEUE_HOST_BUSY;
559 		goto out;
560 	}
561 
562 	kref_get(&cfg->afu->mapcount);
563 	kref_got = 1;
564 
565 	cmd->rcb.ctx_id = afu->ctx_hndl;
566 	cmd->rcb.port_sel = port_sel;
567 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
568 
569 	if (scp->sc_data_direction == DMA_TO_DEVICE)
570 		lflag = SISL_REQ_FLAGS_HOST_WRITE;
571 	else
572 		lflag = SISL_REQ_FLAGS_HOST_READ;
573 
574 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
575 			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
576 
577 	/* Stash the scp in the reserved field, for reuse during interrupt */
578 	cmd->rcb.scp = scp;
579 
580 	nseg = scsi_dma_map(scp);
581 	if (unlikely(nseg < 0)) {
582 		dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
583 			__func__, nseg);
584 		rc = SCSI_MLQUEUE_HOST_BUSY;
585 		goto out;
586 	}
587 
588 	ncount = scsi_sg_count(scp);
589 	scsi_for_each_sg(scp, sg, ncount, i) {
590 		cmd->rcb.data_len = sg_dma_len(sg);
591 		cmd->rcb.data_ea = sg_dma_address(sg);
592 	}
593 
594 	/* Copy the CDB from the scsi_cmnd passed in */
595 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
596 
597 	/* Send the command */
598 	rc = send_cmd(afu, cmd);
599 	if (unlikely(rc)) {
600 		cmd_checkin(cmd);
601 		scsi_dma_unmap(scp);
602 	}
603 
604 out:
605 	if (kref_got)
606 		kref_put(&afu->mapcount, afu_unmap);
607 	pr_devel("%s: returning rc=%d\n", __func__, rc);
608 	return rc;
609 }
610 
611 /**
612  * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
613  * @cfg:	Internal structure associated with the host.
614  */
615 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
616 {
617 	struct pci_dev *pdev = cfg->dev;
618 
619 	if (pci_channel_offline(pdev))
620 		wait_event_timeout(cfg->reset_waitq,
621 				   !pci_channel_offline(pdev),
622 				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
623 }
624 
625 /**
626  * free_mem() - free memory associated with the AFU
627  * @cfg:	Internal structure associated with the host.
628  */
629 static void free_mem(struct cxlflash_cfg *cfg)
630 {
631 	int i;
632 	char *buf = NULL;
633 	struct afu *afu = cfg->afu;
634 
635 	if (cfg->afu) {
636 		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
637 			buf = afu->cmd[i].buf;
638 			if (!((u64)buf & (PAGE_SIZE - 1)))
639 				free_page((ulong)buf);
640 		}
641 
642 		free_pages((ulong)afu, get_order(sizeof(struct afu)));
643 		cfg->afu = NULL;
644 	}
645 }
646 
647 /**
648  * stop_afu() - stops the AFU command timers and unmaps the MMIO space
649  * @cfg:	Internal structure associated with the host.
650  *
651  * Safe to call with AFU in a partially allocated/initialized state.
652  *
653  * Cleans up all state associated with the command queue, and unmaps
654  * the MMIO space.
655  *
656  *  - complete() will take care of commands we initiated (they'll be checked
657  *  in as part of the cleanup that occurs after the completion)
658  *
659  *  - cmd_checkin() will take care of entries that we did not initiate and that
660  *  have not (and will not) complete because they are sitting on a [now stale]
661  *  hardware queue
662  */
663 static void stop_afu(struct cxlflash_cfg *cfg)
664 {
665 	int i;
666 	struct afu *afu = cfg->afu;
667 	struct afu_cmd *cmd;
668 
669 	if (likely(afu)) {
670 		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
671 			cmd = &afu->cmd[i];
672 			complete(&cmd->cevent);
673 			if (!atomic_read(&cmd->free))
674 				cmd_checkin(cmd);
675 		}
676 
677 		if (likely(afu->afu_map)) {
678 			cxl_psa_unmap((void __iomem *)afu->afu_map);
679 			afu->afu_map = NULL;
680 		}
681 		kref_put(&afu->mapcount, afu_unmap);
682 	}
683 }
684 
685 /**
686  * term_mc() - terminates the master context
687  * @cfg:	Internal structure associated with the host.
688  * @level:	Depth of allocation, where to begin waterfall tear down.
689  *
690  * Safe to call with AFU/MC in partially allocated/initialized state.
691  */
692 static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
693 {
694 	int rc = 0;
695 	struct afu *afu = cfg->afu;
696 	struct device *dev = &cfg->dev->dev;
697 
698 	if (!afu || !cfg->mcctx) {
699 		dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
700 		       __func__);
701 		return;
702 	}
703 
704 	switch (level) {
705 	case UNDO_START:
706 		rc = cxl_stop_context(cfg->mcctx);
707 		BUG_ON(rc);
708 	case UNMAP_THREE:
709 		cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
710 	case UNMAP_TWO:
711 		cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
712 	case UNMAP_ONE:
713 		cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
714 	case FREE_IRQ:
715 		cxl_free_afu_irqs(cfg->mcctx);
716 	case RELEASE_CONTEXT:
717 		cfg->mcctx = NULL;
718 	}
719 }
720 
721 /**
722  * term_afu() - terminates the AFU
723  * @cfg:	Internal structure associated with the host.
724  *
725  * Safe to call with AFU/MC in partially allocated/initialized state.
726  */
727 static void term_afu(struct cxlflash_cfg *cfg)
728 {
729 	if (cfg->afu)
730 		stop_afu(cfg);
731 
732 	term_mc(cfg, UNDO_START);
733 
734 	pr_debug("%s: returning\n", __func__);
735 }
736 
737 /**
738  * cxlflash_remove() - PCI entry point to tear down host
739  * @pdev:	PCI device associated with the host.
740  *
741  * Safe to use as a cleanup in partially allocated/initialized state.
742  */
743 static void cxlflash_remove(struct pci_dev *pdev)
744 {
745 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
746 	ulong lock_flags;
747 
748 	/* If a Task Management Function is active, wait for it to complete
749 	 * before continuing with remove.
750 	 */
751 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
752 	if (cfg->tmf_active)
753 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
754 						  !cfg->tmf_active,
755 						  cfg->tmf_slock);
756 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
757 
758 	cfg->state = STATE_FAILTERM;
759 	cxlflash_stop_term_user_contexts(cfg);
760 
761 	switch (cfg->init_state) {
762 	case INIT_STATE_SCSI:
763 		cxlflash_term_local_luns(cfg);
764 		scsi_remove_host(cfg->host);
765 		/* fall through */
766 	case INIT_STATE_AFU:
767 		cancel_work_sync(&cfg->work_q);
768 		term_afu(cfg);
769 	case INIT_STATE_PCI:
770 		pci_disable_device(pdev);
771 	case INIT_STATE_NONE:
772 		free_mem(cfg);
773 		scsi_host_put(cfg->host);
774 		break;
775 	}
776 
777 	pr_debug("%s: returning\n", __func__);
778 }
779 
780 /**
781  * alloc_mem() - allocates the AFU and its command pool
782  * @cfg:	Internal structure associated with the host.
783  *
784  * A partially allocated state remains on failure.
785  *
786  * Return:
787  *	0 on success
788  *	-ENOMEM on failure to allocate memory
789  */
790 static int alloc_mem(struct cxlflash_cfg *cfg)
791 {
792 	int rc = 0;
793 	int i;
794 	char *buf = NULL;
795 	struct device *dev = &cfg->dev->dev;
796 
797 	/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
798 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
799 					    get_order(sizeof(struct afu)));
800 	if (unlikely(!cfg->afu)) {
801 		dev_err(dev, "%s: cannot get %d free pages\n",
802 			__func__, get_order(sizeof(struct afu)));
803 		rc = -ENOMEM;
804 		goto out;
805 	}
806 	cfg->afu->parent = cfg;
807 	cfg->afu->afu_map = NULL;
808 
809 	for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
810 		if (!((u64)buf & (PAGE_SIZE - 1))) {
811 			buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
812 			if (unlikely(!buf)) {
813 				dev_err(dev,
814 					"%s: Allocate command buffers fail!\n",
815 				       __func__);
816 				rc = -ENOMEM;
817 				free_mem(cfg);
818 				goto out;
819 			}
820 		}
821 
822 		cfg->afu->cmd[i].buf = buf;
823 		atomic_set(&cfg->afu->cmd[i].free, 1);
824 		cfg->afu->cmd[i].slot = i;
825 	}
826 
827 out:
828 	return rc;
829 }
830 
831 /**
832  * init_pci() - initializes the host as a PCI device
833  * @cfg:	Internal structure associated with the host.
834  *
835  * Return: 0 on success, -errno on failure
836  */
837 static int init_pci(struct cxlflash_cfg *cfg)
838 {
839 	struct pci_dev *pdev = cfg->dev;
840 	int rc = 0;
841 
842 	rc = pci_enable_device(pdev);
843 	if (rc || pci_channel_offline(pdev)) {
844 		if (pci_channel_offline(pdev)) {
845 			cxlflash_wait_for_pci_err_recovery(cfg);
846 			rc = pci_enable_device(pdev);
847 		}
848 
849 		if (rc) {
850 			dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
851 				__func__);
852 			cxlflash_wait_for_pci_err_recovery(cfg);
853 			goto out;
854 		}
855 	}
856 
857 out:
858 	pr_debug("%s: returning rc=%d\n", __func__, rc);
859 	return rc;
860 }
861 
862 /**
863  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
864  * @cfg:	Internal structure associated with the host.
865  *
866  * Return: 0 on success, -errno on failure
867  */
868 static int init_scsi(struct cxlflash_cfg *cfg)
869 {
870 	struct pci_dev *pdev = cfg->dev;
871 	int rc = 0;
872 
873 	rc = scsi_add_host(cfg->host, &pdev->dev);
874 	if (rc) {
875 		dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
876 			__func__, rc);
877 		goto out;
878 	}
879 
880 	scsi_scan_host(cfg->host);
881 
882 out:
883 	pr_debug("%s: returning rc=%d\n", __func__, rc);
884 	return rc;
885 }
886 
887 /**
888  * set_port_online() - transitions the specified host FC port to online state
889  * @fc_regs:	Top of MMIO region defined for specified port.
890  *
891  * The provided MMIO region must be mapped prior to call. Online state means
892  * that the FC link layer has synced, completed the handshaking process, and
893  * is ready for login to start.
894  */
895 static void set_port_online(__be64 __iomem *fc_regs)
896 {
897 	u64 cmdcfg;
898 
899 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
900 	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
901 	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
902 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
903 }
904 
905 /**
906  * set_port_offline() - transitions the specified host FC port to offline state
907  * @fc_regs:	Top of MMIO region defined for specified port.
908  *
909  * The provided MMIO region must be mapped prior to call.
910  */
911 static void set_port_offline(__be64 __iomem *fc_regs)
912 {
913 	u64 cmdcfg;
914 
915 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
916 	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
917 	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
918 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
919 }
920 
921 /**
922  * wait_port_online() - waits for the specified host FC port come online
923  * @fc_regs:	Top of MMIO region defined for specified port.
924  * @delay_us:	Number of microseconds to delay between reading port status.
925  * @nretry:	Number of cycles to retry reading port status.
926  *
927  * The provided MMIO region must be mapped prior to call. This will timeout
928  * when the cable is not plugged in.
929  *
930  * Return:
931  *	TRUE (1) when the specified port is online
932  *	FALSE (0) when the specified port fails to come online after timeout
933  *	-EINVAL when @delay_us is less than 1000
934  */
935 static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
936 {
937 	u64 status;
938 
939 	if (delay_us < 1000) {
940 		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
941 		return -EINVAL;
942 	}
943 
944 	do {
945 		msleep(delay_us / 1000);
946 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
947 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
948 		 nretry--);
949 
950 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
951 }
952 
953 /**
954  * wait_port_offline() - waits for the specified host FC port go offline
955  * @fc_regs:	Top of MMIO region defined for specified port.
956  * @delay_us:	Number of microseconds to delay between reading port status.
957  * @nretry:	Number of cycles to retry reading port status.
958  *
959  * The provided MMIO region must be mapped prior to call.
960  *
961  * Return:
962  *	TRUE (1) when the specified port is offline
963  *	FALSE (0) when the specified port fails to go offline after timeout
964  *	-EINVAL when @delay_us is less than 1000
965  */
966 static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
967 {
968 	u64 status;
969 
970 	if (delay_us < 1000) {
971 		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
972 		return -EINVAL;
973 	}
974 
975 	do {
976 		msleep(delay_us / 1000);
977 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
978 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
979 		 nretry--);
980 
981 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
982 }
983 
984 /**
985  * afu_set_wwpn() - configures the WWPN for the specified host FC port
986  * @afu:	AFU associated with the host that owns the specified FC port.
987  * @port:	Port number being configured.
988  * @fc_regs:	Top of MMIO region defined for specified port.
989  * @wwpn:	The world-wide-port-number previously discovered for port.
990  *
991  * The provided MMIO region must be mapped prior to call. As part of the
992  * sequence to configure the WWPN, the port is toggled offline and then back
993  * online. This toggling action can cause this routine to delay up to a few
994  * seconds. When configured to use the internal LUN feature of the AFU, a
995  * failure to come online is overridden.
996  *
997  * Return:
998  *	0 when the WWPN is successfully written and the port comes back online
999  *	-1 when the port fails to go offline or come back up online
1000  */
1001 static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1002 			u64 wwpn)
1003 {
1004 	int rc = 0;
1005 
1006 	set_port_offline(fc_regs);
1007 
1008 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1009 			       FC_PORT_STATUS_RETRY_CNT)) {
1010 		pr_debug("%s: wait on port %d to go offline timed out\n",
1011 			 __func__, port);
1012 		rc = -1; /* but continue on to leave the port back online */
1013 	}
1014 
1015 	if (rc == 0)
1016 		writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1017 
1018 	/* Always return success after programming WWPN */
1019 	rc = 0;
1020 
1021 	set_port_online(fc_regs);
1022 
1023 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1024 			      FC_PORT_STATUS_RETRY_CNT)) {
1025 		pr_err("%s: wait on port %d to go online timed out\n",
1026 		       __func__, port);
1027 	}
1028 
1029 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1030 
1031 	return rc;
1032 }
1033 
1034 /**
1035  * afu_link_reset() - resets the specified host FC port
1036  * @afu:	AFU associated with the host that owns the specified FC port.
1037  * @port:	Port number being configured.
1038  * @fc_regs:	Top of MMIO region defined for specified port.
1039  *
1040  * The provided MMIO region must be mapped prior to call. The sequence to
1041  * reset the port involves toggling it offline and then back online. This
1042  * action can cause this routine to delay up to a few seconds. An effort
1043  * is made to maintain link with the device by switching to host to use
1044  * the alternate port exclusively while the reset takes place.
1045  * failure to come online is overridden.
1046  */
1047 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1048 {
1049 	u64 port_sel;
1050 
1051 	/* first switch the AFU to the other links, if any */
1052 	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1053 	port_sel &= ~(1ULL << port);
1054 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1055 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1056 
1057 	set_port_offline(fc_regs);
1058 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1059 			       FC_PORT_STATUS_RETRY_CNT))
1060 		pr_err("%s: wait on port %d to go offline timed out\n",
1061 		       __func__, port);
1062 
1063 	set_port_online(fc_regs);
1064 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1065 			      FC_PORT_STATUS_RETRY_CNT))
1066 		pr_err("%s: wait on port %d to go online timed out\n",
1067 		       __func__, port);
1068 
1069 	/* switch back to include this port */
1070 	port_sel |= (1ULL << port);
1071 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1072 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1073 
1074 	pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1075 }
1076 
1077 /*
1078  * Asynchronous interrupt information table
1079  */
1080 static const struct asyc_intr_info ainfo[] = {
1081 	{SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1082 	{SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1083 	{SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1084 	{SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
1085 	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1086 	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
1087 	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1088 	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
1089 	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1090 	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1091 	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1092 	{SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
1093 	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1094 	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
1095 	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1096 	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
1097 	{0x0, "", 0, 0}		/* terminator */
1098 };
1099 
1100 /**
1101  * find_ainfo() - locates and returns asynchronous interrupt information
1102  * @status:	Status code set by AFU on error.
1103  *
1104  * Return: The located information or NULL when the status code is invalid.
1105  */
1106 static const struct asyc_intr_info *find_ainfo(u64 status)
1107 {
1108 	const struct asyc_intr_info *info;
1109 
1110 	for (info = &ainfo[0]; info->status; info++)
1111 		if (info->status == status)
1112 			return info;
1113 
1114 	return NULL;
1115 }
1116 
1117 /**
1118  * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1119  * @afu:	AFU associated with the host.
1120  */
1121 static void afu_err_intr_init(struct afu *afu)
1122 {
1123 	int i;
1124 	u64 reg;
1125 
1126 	/* global async interrupts: AFU clears afu_ctrl on context exit
1127 	 * if async interrupts were sent to that context. This prevents
1128 	 * the AFU form sending further async interrupts when
1129 	 * there is
1130 	 * nobody to receive them.
1131 	 */
1132 
1133 	/* mask all */
1134 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1135 	/* set LISN# to send and point to master context */
1136 	reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1137 
1138 	if (afu->internal_lun)
1139 		reg |= 1;	/* Bit 63 indicates local lun */
1140 	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1141 	/* clear all */
1142 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1143 	/* unmask bits that are of interest */
1144 	/* note: afu can send an interrupt after this step */
1145 	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1146 	/* clear again in case a bit came on after previous clear but before */
1147 	/* unmask */
1148 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1149 
1150 	/* Clear/Set internal lun bits */
1151 	reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1152 	reg &= SISL_FC_INTERNAL_MASK;
1153 	if (afu->internal_lun)
1154 		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1155 	writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1156 
1157 	/* now clear FC errors */
1158 	for (i = 0; i < NUM_FC_PORTS; i++) {
1159 		writeq_be(0xFFFFFFFFU,
1160 			  &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1161 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1162 	}
1163 
1164 	/* sync interrupts for master's IOARRIN write */
1165 	/* note that unlike asyncs, there can be no pending sync interrupts */
1166 	/* at this time (this is a fresh context and master has not written */
1167 	/* IOARRIN yet), so there is nothing to clear. */
1168 
1169 	/* set LISN#, it is always sent to the context that wrote IOARRIN */
1170 	writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1171 	writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1172 }
1173 
1174 /**
1175  * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1176  * @irq:	Interrupt number.
1177  * @data:	Private data provided at interrupt registration, the AFU.
1178  *
1179  * Return: Always return IRQ_HANDLED.
1180  */
1181 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1182 {
1183 	struct afu *afu = (struct afu *)data;
1184 	u64 reg;
1185 	u64 reg_unmasked;
1186 
1187 	reg = readq_be(&afu->host_map->intr_status);
1188 	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1189 
1190 	if (reg_unmasked == 0UL) {
1191 		pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1192 		       __func__, (u64)afu, reg);
1193 		goto cxlflash_sync_err_irq_exit;
1194 	}
1195 
1196 	pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1197 	       __func__, (u64)afu, reg);
1198 
1199 	writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1200 
1201 cxlflash_sync_err_irq_exit:
1202 	pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1203 	return IRQ_HANDLED;
1204 }
1205 
1206 /**
1207  * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1208  * @irq:	Interrupt number.
1209  * @data:	Private data provided at interrupt registration, the AFU.
1210  *
1211  * Return: Always return IRQ_HANDLED.
1212  */
1213 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1214 {
1215 	struct afu *afu = (struct afu *)data;
1216 	struct afu_cmd *cmd;
1217 	bool toggle = afu->toggle;
1218 	u64 entry,
1219 	    *hrrq_start = afu->hrrq_start,
1220 	    *hrrq_end = afu->hrrq_end,
1221 	    *hrrq_curr = afu->hrrq_curr;
1222 
1223 	/* Process however many RRQ entries that are ready */
1224 	while (true) {
1225 		entry = *hrrq_curr;
1226 
1227 		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1228 			break;
1229 
1230 		cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1231 		cmd_complete(cmd);
1232 
1233 		/* Advance to next entry or wrap and flip the toggle bit */
1234 		if (hrrq_curr < hrrq_end)
1235 			hrrq_curr++;
1236 		else {
1237 			hrrq_curr = hrrq_start;
1238 			toggle ^= SISL_RESP_HANDLE_T_BIT;
1239 		}
1240 	}
1241 
1242 	afu->hrrq_curr = hrrq_curr;
1243 	afu->toggle = toggle;
1244 
1245 	return IRQ_HANDLED;
1246 }
1247 
1248 /**
1249  * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1250  * @irq:	Interrupt number.
1251  * @data:	Private data provided at interrupt registration, the AFU.
1252  *
1253  * Return: Always return IRQ_HANDLED.
1254  */
1255 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1256 {
1257 	struct afu *afu = (struct afu *)data;
1258 	struct cxlflash_cfg *cfg = afu->parent;
1259 	struct device *dev = &cfg->dev->dev;
1260 	u64 reg_unmasked;
1261 	const struct asyc_intr_info *info;
1262 	struct sisl_global_map __iomem *global = &afu->afu_map->global;
1263 	u64 reg;
1264 	u8 port;
1265 	int i;
1266 
1267 	reg = readq_be(&global->regs.aintr_status);
1268 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1269 
1270 	if (reg_unmasked == 0) {
1271 		dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
1272 			__func__, reg);
1273 		goto out;
1274 	}
1275 
1276 	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1277 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
1278 
1279 	/* Check each bit that is on */
1280 	for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1281 		info = find_ainfo(1ULL << i);
1282 		if (((reg_unmasked & 0x1) == 0) || !info)
1283 			continue;
1284 
1285 		port = info->port;
1286 
1287 		dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1288 			__func__, port, info->desc,
1289 		       readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1290 
1291 		/*
1292 		 * Do link reset first, some OTHER errors will set FC_ERROR
1293 		 * again if cleared before or w/o a reset
1294 		 */
1295 		if (info->action & LINK_RESET) {
1296 			dev_err(dev, "%s: FC Port %d: resetting link\n",
1297 				__func__, port);
1298 			cfg->lr_state = LINK_RESET_REQUIRED;
1299 			cfg->lr_port = port;
1300 			kref_get(&cfg->afu->mapcount);
1301 			schedule_work(&cfg->work_q);
1302 		}
1303 
1304 		if (info->action & CLR_FC_ERROR) {
1305 			reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1306 
1307 			/*
1308 			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1309 			 * should be the same and tracing one is sufficient.
1310 			 */
1311 
1312 			dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
1313 				__func__, port, reg);
1314 
1315 			writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1316 			writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1317 		}
1318 
1319 		if (info->action & SCAN_HOST) {
1320 			atomic_inc(&cfg->scan_host_needed);
1321 			kref_get(&cfg->afu->mapcount);
1322 			schedule_work(&cfg->work_q);
1323 		}
1324 	}
1325 
1326 out:
1327 	dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
1328 	return IRQ_HANDLED;
1329 }
1330 
1331 /**
1332  * start_context() - starts the master context
1333  * @cfg:	Internal structure associated with the host.
1334  *
1335  * Return: A success or failure value from CXL services.
1336  */
1337 static int start_context(struct cxlflash_cfg *cfg)
1338 {
1339 	int rc = 0;
1340 
1341 	rc = cxl_start_context(cfg->mcctx,
1342 			       cfg->afu->work.work_element_descriptor,
1343 			       NULL);
1344 
1345 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1346 	return rc;
1347 }
1348 
1349 /**
1350  * read_vpd() - obtains the WWPNs from VPD
1351  * @cfg:	Internal structure associated with the host.
1352  * @wwpn:	Array of size NUM_FC_PORTS to pass back WWPNs
1353  *
1354  * Return: 0 on success, -errno on failure
1355  */
1356 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1357 {
1358 	struct pci_dev *dev = cfg->dev;
1359 	int rc = 0;
1360 	int ro_start, ro_size, i, j, k;
1361 	ssize_t vpd_size;
1362 	char vpd_data[CXLFLASH_VPD_LEN];
1363 	char tmp_buf[WWPN_BUF_LEN] = { 0 };
1364 	char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1365 
1366 	/* Get the VPD data from the device */
1367 	vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
1368 	if (unlikely(vpd_size <= 0)) {
1369 		dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
1370 		       __func__, vpd_size);
1371 		rc = -ENODEV;
1372 		goto out;
1373 	}
1374 
1375 	/* Get the read only section offset */
1376 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1377 				    PCI_VPD_LRDT_RO_DATA);
1378 	if (unlikely(ro_start < 0)) {
1379 		dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
1380 			__func__);
1381 		rc = -ENODEV;
1382 		goto out;
1383 	}
1384 
1385 	/* Get the read only section size, cap when extends beyond read VPD */
1386 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1387 	j = ro_size;
1388 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1389 	if (unlikely((i + j) > vpd_size)) {
1390 		pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1391 			 __func__, (i + j), vpd_size);
1392 		ro_size = vpd_size - i;
1393 	}
1394 
1395 	/*
1396 	 * Find the offset of the WWPN tag within the read only
1397 	 * VPD data and validate the found field (partials are
1398 	 * no good to us). Convert the ASCII data to an integer
1399 	 * value. Note that we must copy to a temporary buffer
1400 	 * because the conversion service requires that the ASCII
1401 	 * string be terminated.
1402 	 */
1403 	for (k = 0; k < NUM_FC_PORTS; k++) {
1404 		j = ro_size;
1405 		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1406 
1407 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1408 		if (unlikely(i < 0)) {
1409 			dev_err(&dev->dev, "%s: Port %d WWPN not found "
1410 				"in VPD\n", __func__, k);
1411 			rc = -ENODEV;
1412 			goto out;
1413 		}
1414 
1415 		j = pci_vpd_info_field_size(&vpd_data[i]);
1416 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
1417 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1418 			dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
1419 				"VPD corrupt\n",
1420 			       __func__, k);
1421 			rc = -ENODEV;
1422 			goto out;
1423 		}
1424 
1425 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1426 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1427 		if (unlikely(rc)) {
1428 			dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
1429 				"to integer\n", __func__, k);
1430 			rc = -ENODEV;
1431 			goto out;
1432 		}
1433 	}
1434 
1435 out:
1436 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1437 	return rc;
1438 }
1439 
1440 /**
1441  * init_pcr() - initialize the provisioning and control registers
1442  * @cfg:	Internal structure associated with the host.
1443  *
1444  * Also sets up fast access to the mapped registers and initializes AFU
1445  * command fields that never change.
1446  */
1447 static void init_pcr(struct cxlflash_cfg *cfg)
1448 {
1449 	struct afu *afu = cfg->afu;
1450 	struct sisl_ctrl_map __iomem *ctrl_map;
1451 	int i;
1452 
1453 	for (i = 0; i < MAX_CONTEXT; i++) {
1454 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1455 		/* Disrupt any clients that could be running */
1456 		/* e.g. clients that survived a master restart */
1457 		writeq_be(0, &ctrl_map->rht_start);
1458 		writeq_be(0, &ctrl_map->rht_cnt_id);
1459 		writeq_be(0, &ctrl_map->ctx_cap);
1460 	}
1461 
1462 	/* Copy frequently used fields into afu */
1463 	afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1464 	afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1465 	afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1466 
1467 	/* Program the Endian Control for the master context */
1468 	writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1469 
1470 	/* Initialize cmd fields that never change */
1471 	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1472 		afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1473 		afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1474 		afu->cmd[i].rcb.rrq = 0x0;
1475 	}
1476 }
1477 
1478 /**
1479  * init_global() - initialize AFU global registers
1480  * @cfg:	Internal structure associated with the host.
1481  */
1482 static int init_global(struct cxlflash_cfg *cfg)
1483 {
1484 	struct afu *afu = cfg->afu;
1485 	struct device *dev = &cfg->dev->dev;
1486 	u64 wwpn[NUM_FC_PORTS];	/* wwpn of AFU ports */
1487 	int i = 0, num_ports = 0;
1488 	int rc = 0;
1489 	u64 reg;
1490 
1491 	rc = read_vpd(cfg, &wwpn[0]);
1492 	if (rc) {
1493 		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1494 		goto out;
1495 	}
1496 
1497 	pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1498 
1499 	/* Set up RRQ in AFU for master issued cmds */
1500 	writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1501 	writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1502 
1503 	/* AFU configuration */
1504 	reg = readq_be(&afu->afu_map->global.regs.afu_config);
1505 	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1506 	/* enable all auto retry options and control endianness */
1507 	/* leave others at default: */
1508 	/* CTX_CAP write protected, mbox_r does not clear on read and */
1509 	/* checker on if dual afu */
1510 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1511 
1512 	/* Global port select: select either port */
1513 	if (afu->internal_lun) {
1514 		/* Only use port 0 */
1515 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1516 		num_ports = NUM_FC_PORTS - 1;
1517 	} else {
1518 		writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1519 		num_ports = NUM_FC_PORTS;
1520 	}
1521 
1522 	for (i = 0; i < num_ports; i++) {
1523 		/* Unmask all errors (but they are still masked at AFU) */
1524 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1525 		/* Clear CRC error cnt & set a threshold */
1526 		(void)readq_be(&afu->afu_map->global.
1527 			       fc_regs[i][FC_CNT_CRCERR / 8]);
1528 		writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1529 			  [FC_CRC_THRESH / 8]);
1530 
1531 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1532 		if (wwpn[i] != 0 &&
1533 		    afu_set_wwpn(afu, i,
1534 				 &afu->afu_map->global.fc_regs[i][0],
1535 				 wwpn[i])) {
1536 			dev_err(dev, "%s: failed to set WWPN on port %d\n",
1537 			       __func__, i);
1538 			rc = -EIO;
1539 			goto out;
1540 		}
1541 		/* Programming WWPN back to back causes additional
1542 		 * offline/online transitions and a PLOGI
1543 		 */
1544 		msleep(100);
1545 	}
1546 
1547 	/* Set up master's own CTX_CAP to allow real mode, host translation */
1548 	/* tables, afu cmds and read/write GSCSI cmds. */
1549 	/* First, unlock ctx_cap write by reading mbox */
1550 	(void)readq_be(&afu->ctrl_map->mbox_r);	/* unlock ctx_cap */
1551 	writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1552 		   SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1553 		   SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1554 		  &afu->ctrl_map->ctx_cap);
1555 	/* Initialize heartbeat */
1556 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1557 
1558 out:
1559 	return rc;
1560 }
1561 
1562 /**
1563  * start_afu() - initializes and starts the AFU
1564  * @cfg:	Internal structure associated with the host.
1565  */
1566 static int start_afu(struct cxlflash_cfg *cfg)
1567 {
1568 	struct afu *afu = cfg->afu;
1569 	struct afu_cmd *cmd;
1570 
1571 	int i = 0;
1572 	int rc = 0;
1573 
1574 	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1575 		cmd = &afu->cmd[i];
1576 
1577 		init_completion(&cmd->cevent);
1578 		spin_lock_init(&cmd->slock);
1579 		cmd->parent = afu;
1580 	}
1581 
1582 	init_pcr(cfg);
1583 
1584 	/* After an AFU reset, RRQ entries are stale, clear them */
1585 	memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
1586 
1587 	/* Initialize RRQ pointers */
1588 	afu->hrrq_start = &afu->rrq_entry[0];
1589 	afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1590 	afu->hrrq_curr = afu->hrrq_start;
1591 	afu->toggle = 1;
1592 
1593 	rc = init_global(cfg);
1594 
1595 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1596 	return rc;
1597 }
1598 
1599 /**
1600  * init_mc() - create and register as the master context
1601  * @cfg:	Internal structure associated with the host.
1602  *
1603  * Return: 0 on success, -errno on failure
1604  */
1605 static int init_mc(struct cxlflash_cfg *cfg)
1606 {
1607 	struct cxl_context *ctx;
1608 	struct device *dev = &cfg->dev->dev;
1609 	struct afu *afu = cfg->afu;
1610 	int rc = 0;
1611 	enum undo_level level;
1612 
1613 	ctx = cxl_get_context(cfg->dev);
1614 	if (unlikely(!ctx))
1615 		return -ENOMEM;
1616 	cfg->mcctx = ctx;
1617 
1618 	/* Set it up as a master with the CXL */
1619 	cxl_set_master(ctx);
1620 
1621 	/* During initialization reset the AFU to start from a clean slate */
1622 	rc = cxl_afu_reset(cfg->mcctx);
1623 	if (unlikely(rc)) {
1624 		dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1625 			__func__, rc);
1626 		level = RELEASE_CONTEXT;
1627 		goto out;
1628 	}
1629 
1630 	rc = cxl_allocate_afu_irqs(ctx, 3);
1631 	if (unlikely(rc)) {
1632 		dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1633 			__func__, rc);
1634 		level = RELEASE_CONTEXT;
1635 		goto out;
1636 	}
1637 
1638 	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1639 			     "SISL_MSI_SYNC_ERROR");
1640 	if (unlikely(rc <= 0)) {
1641 		dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1642 			__func__);
1643 		level = FREE_IRQ;
1644 		goto out;
1645 	}
1646 
1647 	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1648 			     "SISL_MSI_RRQ_UPDATED");
1649 	if (unlikely(rc <= 0)) {
1650 		dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1651 			__func__);
1652 		level = UNMAP_ONE;
1653 		goto out;
1654 	}
1655 
1656 	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1657 			     "SISL_MSI_ASYNC_ERROR");
1658 	if (unlikely(rc <= 0)) {
1659 		dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1660 			__func__);
1661 		level = UNMAP_TWO;
1662 		goto out;
1663 	}
1664 
1665 	rc = 0;
1666 
1667 	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
1668 	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1669 	 * element (pe) that is embedded in the context (ctx)
1670 	 */
1671 	rc = start_context(cfg);
1672 	if (unlikely(rc)) {
1673 		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1674 		level = UNMAP_THREE;
1675 		goto out;
1676 	}
1677 ret:
1678 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1679 	return rc;
1680 out:
1681 	term_mc(cfg, level);
1682 	goto ret;
1683 }
1684 
1685 /**
1686  * init_afu() - setup as master context and start AFU
1687  * @cfg:	Internal structure associated with the host.
1688  *
1689  * This routine is a higher level of control for configuring the
1690  * AFU on probe and reset paths.
1691  *
1692  * Return: 0 on success, -errno on failure
1693  */
1694 static int init_afu(struct cxlflash_cfg *cfg)
1695 {
1696 	u64 reg;
1697 	int rc = 0;
1698 	struct afu *afu = cfg->afu;
1699 	struct device *dev = &cfg->dev->dev;
1700 
1701 	cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1702 
1703 	rc = init_mc(cfg);
1704 	if (rc) {
1705 		dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1706 			__func__, rc);
1707 		goto out;
1708 	}
1709 
1710 	/* Map the entire MMIO space of the AFU */
1711 	afu->afu_map = cxl_psa_map(cfg->mcctx);
1712 	if (!afu->afu_map) {
1713 		dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1714 		rc = -ENOMEM;
1715 		goto err1;
1716 	}
1717 	kref_init(&afu->mapcount);
1718 
1719 	/* No byte reverse on reading afu_version or string will be backwards */
1720 	reg = readq(&afu->afu_map->global.regs.afu_version);
1721 	memcpy(afu->version, &reg, sizeof(reg));
1722 	afu->interface_version =
1723 	    readq_be(&afu->afu_map->global.regs.interface_version);
1724 	if ((afu->interface_version + 1) == 0) {
1725 		pr_err("Back level AFU, please upgrade. AFU version %s "
1726 		       "interface version 0x%llx\n", afu->version,
1727 		       afu->interface_version);
1728 		rc = -EINVAL;
1729 		goto err2;
1730 	}
1731 
1732 	pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
1733 		 afu->version, afu->interface_version);
1734 
1735 	rc = start_afu(cfg);
1736 	if (rc) {
1737 		dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1738 			__func__, rc);
1739 		goto err2;
1740 	}
1741 
1742 	afu_err_intr_init(cfg->afu);
1743 	atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1744 
1745 	/* Restore the LUN mappings */
1746 	cxlflash_restore_luntable(cfg);
1747 out:
1748 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1749 	return rc;
1750 
1751 err2:
1752 	kref_put(&afu->mapcount, afu_unmap);
1753 err1:
1754 	term_mc(cfg, UNDO_START);
1755 	goto out;
1756 }
1757 
1758 /**
1759  * cxlflash_afu_sync() - builds and sends an AFU sync command
1760  * @afu:	AFU associated with the host.
1761  * @ctx_hndl_u:	Identifies context requesting sync.
1762  * @res_hndl_u:	Identifies resource requesting sync.
1763  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
1764  *
1765  * The AFU can only take 1 sync command at a time. This routine enforces this
1766  * limitation by using a mutex to provide exclusive access to the AFU during
1767  * the sync. This design point requires calling threads to not be on interrupt
1768  * context due to the possibility of sleeping during concurrent sync operations.
1769  *
1770  * AFU sync operations are only necessary and allowed when the device is
1771  * operating normally. When not operating normally, sync requests can occur as
1772  * part of cleaning up resources associated with an adapter prior to removal.
1773  * In this scenario, these requests are simply ignored (safe due to the AFU
1774  * going away).
1775  *
1776  * Return:
1777  *	0 on success
1778  *	-1 on failure
1779  */
1780 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1781 		      res_hndl_t res_hndl_u, u8 mode)
1782 {
1783 	struct cxlflash_cfg *cfg = afu->parent;
1784 	struct device *dev = &cfg->dev->dev;
1785 	struct afu_cmd *cmd = NULL;
1786 	int rc = 0;
1787 	int retry_cnt = 0;
1788 	static DEFINE_MUTEX(sync_active);
1789 
1790 	if (cfg->state != STATE_NORMAL) {
1791 		pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1792 		return 0;
1793 	}
1794 
1795 	mutex_lock(&sync_active);
1796 retry:
1797 	cmd = cmd_checkout(afu);
1798 	if (unlikely(!cmd)) {
1799 		retry_cnt++;
1800 		udelay(1000 * retry_cnt);
1801 		if (retry_cnt < MC_RETRY_CNT)
1802 			goto retry;
1803 		dev_err(dev, "%s: could not get a free command\n", __func__);
1804 		rc = -1;
1805 		goto out;
1806 	}
1807 
1808 	pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1809 
1810 	memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
1811 
1812 	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1813 	cmd->rcb.port_sel = 0x0;	/* NA */
1814 	cmd->rcb.lun_id = 0x0;	/* NA */
1815 	cmd->rcb.data_len = 0x0;
1816 	cmd->rcb.data_ea = 0x0;
1817 	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1818 
1819 	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
1820 	cmd->rcb.cdb[1] = mode;
1821 
1822 	/* The cdb is aligned, no unaligned accessors required */
1823 	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1824 	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1825 
1826 	rc = send_cmd(afu, cmd);
1827 	if (unlikely(rc))
1828 		goto out;
1829 
1830 	wait_resp(afu, cmd);
1831 
1832 	/* Set on timeout */
1833 	if (unlikely((cmd->sa.ioasc != 0) ||
1834 		     (cmd->sa.host_use_b[0] & B_ERROR)))
1835 		rc = -1;
1836 out:
1837 	mutex_unlock(&sync_active);
1838 	if (cmd)
1839 		cmd_checkin(cmd);
1840 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1841 	return rc;
1842 }
1843 
1844 /**
1845  * afu_reset() - resets the AFU
1846  * @cfg:	Internal structure associated with the host.
1847  *
1848  * Return: 0 on success, -errno on failure
1849  */
1850 static int afu_reset(struct cxlflash_cfg *cfg)
1851 {
1852 	int rc = 0;
1853 	/* Stop the context before the reset. Since the context is
1854 	 * no longer available restart it after the reset is complete
1855 	 */
1856 
1857 	term_afu(cfg);
1858 
1859 	rc = init_afu(cfg);
1860 
1861 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1862 	return rc;
1863 }
1864 
1865 /**
1866  * cxlflash_eh_device_reset_handler() - reset a single LUN
1867  * @scp:	SCSI command to send.
1868  *
1869  * Return:
1870  *	SUCCESS as defined in scsi/scsi.h
1871  *	FAILED as defined in scsi/scsi.h
1872  */
1873 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
1874 {
1875 	int rc = SUCCESS;
1876 	struct Scsi_Host *host = scp->device->host;
1877 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1878 	struct afu *afu = cfg->afu;
1879 	int rcr = 0;
1880 
1881 	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1882 		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1883 		 host->host_no, scp->device->channel,
1884 		 scp->device->id, scp->device->lun,
1885 		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1886 		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1887 		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1888 		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1889 
1890 retry:
1891 	switch (cfg->state) {
1892 	case STATE_NORMAL:
1893 		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
1894 		if (unlikely(rcr))
1895 			rc = FAILED;
1896 		break;
1897 	case STATE_RESET:
1898 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1899 		goto retry;
1900 	default:
1901 		rc = FAILED;
1902 		break;
1903 	}
1904 
1905 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1906 	return rc;
1907 }
1908 
1909 /**
1910  * cxlflash_eh_host_reset_handler() - reset the host adapter
1911  * @scp:	SCSI command from stack identifying host.
1912  *
1913  * Return:
1914  *	SUCCESS as defined in scsi/scsi.h
1915  *	FAILED as defined in scsi/scsi.h
1916  */
1917 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
1918 {
1919 	int rc = SUCCESS;
1920 	int rcr = 0;
1921 	struct Scsi_Host *host = scp->device->host;
1922 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
1923 
1924 	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
1925 		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
1926 		 host->host_no, scp->device->channel,
1927 		 scp->device->id, scp->device->lun,
1928 		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
1929 		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
1930 		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
1931 		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
1932 
1933 	switch (cfg->state) {
1934 	case STATE_NORMAL:
1935 		cfg->state = STATE_RESET;
1936 		cxlflash_mark_contexts_error(cfg);
1937 		rcr = afu_reset(cfg);
1938 		if (rcr) {
1939 			rc = FAILED;
1940 			cfg->state = STATE_FAILTERM;
1941 		} else
1942 			cfg->state = STATE_NORMAL;
1943 		wake_up_all(&cfg->reset_waitq);
1944 		break;
1945 	case STATE_RESET:
1946 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1947 		if (cfg->state == STATE_NORMAL)
1948 			break;
1949 		/* fall through */
1950 	default:
1951 		rc = FAILED;
1952 		break;
1953 	}
1954 
1955 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1956 	return rc;
1957 }
1958 
1959 /**
1960  * cxlflash_change_queue_depth() - change the queue depth for the device
1961  * @sdev:	SCSI device destined for queue depth change.
1962  * @qdepth:	Requested queue depth value to set.
1963  *
1964  * The requested queue depth is capped to the maximum supported value.
1965  *
1966  * Return: The actual queue depth set.
1967  */
1968 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
1969 {
1970 
1971 	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
1972 		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
1973 
1974 	scsi_change_queue_depth(sdev, qdepth);
1975 	return sdev->queue_depth;
1976 }
1977 
1978 /**
1979  * cxlflash_show_port_status() - queries and presents the current port status
1980  * @port:	Desired port for status reporting.
1981  * @afu:	AFU owning the specified port.
1982  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
1983  *
1984  * Return: The size of the ASCII string returned in @buf.
1985  */
1986 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
1987 {
1988 	char *disp_status;
1989 	u64 status;
1990 	__be64 __iomem *fc_regs;
1991 
1992 	if (port >= NUM_FC_PORTS)
1993 		return 0;
1994 
1995 	fc_regs = &afu->afu_map->global.fc_regs[port][0];
1996 	status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1997 	status &= FC_MTIP_STATUS_MASK;
1998 
1999 	if (status == FC_MTIP_STATUS_ONLINE)
2000 		disp_status = "online";
2001 	else if (status == FC_MTIP_STATUS_OFFLINE)
2002 		disp_status = "offline";
2003 	else
2004 		disp_status = "unknown";
2005 
2006 	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2007 }
2008 
2009 /**
2010  * port0_show() - queries and presents the current status of port 0
2011  * @dev:	Generic device associated with the host owning the port.
2012  * @attr:	Device attribute representing the port.
2013  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2014  *
2015  * Return: The size of the ASCII string returned in @buf.
2016  */
2017 static ssize_t port0_show(struct device *dev,
2018 			  struct device_attribute *attr,
2019 			  char *buf)
2020 {
2021 	struct Scsi_Host *shost = class_to_shost(dev);
2022 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2023 	struct afu *afu = cfg->afu;
2024 
2025 	return cxlflash_show_port_status(0, afu, buf);
2026 }
2027 
2028 /**
2029  * port1_show() - queries and presents the current status of port 1
2030  * @dev:	Generic device associated with the host owning the port.
2031  * @attr:	Device attribute representing the port.
2032  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2033  *
2034  * Return: The size of the ASCII string returned in @buf.
2035  */
2036 static ssize_t port1_show(struct device *dev,
2037 			  struct device_attribute *attr,
2038 			  char *buf)
2039 {
2040 	struct Scsi_Host *shost = class_to_shost(dev);
2041 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2042 	struct afu *afu = cfg->afu;
2043 
2044 	return cxlflash_show_port_status(1, afu, buf);
2045 }
2046 
2047 /**
2048  * lun_mode_show() - presents the current LUN mode of the host
2049  * @dev:	Generic device associated with the host.
2050  * @attr:	Device attribute representing the LUN mode.
2051  * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2052  *
2053  * Return: The size of the ASCII string returned in @buf.
2054  */
2055 static ssize_t lun_mode_show(struct device *dev,
2056 			     struct device_attribute *attr, char *buf)
2057 {
2058 	struct Scsi_Host *shost = class_to_shost(dev);
2059 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2060 	struct afu *afu = cfg->afu;
2061 
2062 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2063 }
2064 
2065 /**
2066  * lun_mode_store() - sets the LUN mode of the host
2067  * @dev:	Generic device associated with the host.
2068  * @attr:	Device attribute representing the LUN mode.
2069  * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2070  * @count:	Length of data resizing in @buf.
2071  *
2072  * The CXL Flash AFU supports a dummy LUN mode where the external
2073  * links and storage are not required. Space on the FPGA is used
2074  * to create 1 or 2 small LUNs which are presented to the system
2075  * as if they were a normal storage device. This feature is useful
2076  * during development and also provides manufacturing with a way
2077  * to test the AFU without an actual device.
2078  *
2079  * 0 = external LUN[s] (default)
2080  * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2081  * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2082  * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2083  * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2084  *
2085  * Return: The size of the ASCII string returned in @buf.
2086  */
2087 static ssize_t lun_mode_store(struct device *dev,
2088 			      struct device_attribute *attr,
2089 			      const char *buf, size_t count)
2090 {
2091 	struct Scsi_Host *shost = class_to_shost(dev);
2092 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2093 	struct afu *afu = cfg->afu;
2094 	int rc;
2095 	u32 lun_mode;
2096 
2097 	rc = kstrtouint(buf, 10, &lun_mode);
2098 	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2099 		afu->internal_lun = lun_mode;
2100 
2101 		/*
2102 		 * When configured for internal LUN, there is only one channel,
2103 		 * channel number 0, else there will be 2 (default).
2104 		 */
2105 		if (afu->internal_lun)
2106 			shost->max_channel = 0;
2107 		else
2108 			shost->max_channel = NUM_FC_PORTS - 1;
2109 
2110 		afu_reset(cfg);
2111 		scsi_scan_host(cfg->host);
2112 	}
2113 
2114 	return count;
2115 }
2116 
2117 /**
2118  * ioctl_version_show() - presents the current ioctl version of the host
2119  * @dev:	Generic device associated with the host.
2120  * @attr:	Device attribute representing the ioctl version.
2121  * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
2122  *
2123  * Return: The size of the ASCII string returned in @buf.
2124  */
2125 static ssize_t ioctl_version_show(struct device *dev,
2126 				  struct device_attribute *attr, char *buf)
2127 {
2128 	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2129 }
2130 
2131 /**
2132  * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2133  * @port:	Desired port for status reporting.
2134  * @afu:	AFU owning the specified port.
2135  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2136  *
2137  * Return: The size of the ASCII string returned in @buf.
2138  */
2139 static ssize_t cxlflash_show_port_lun_table(u32 port,
2140 					    struct afu *afu,
2141 					    char *buf)
2142 {
2143 	int i;
2144 	ssize_t bytes = 0;
2145 	__be64 __iomem *fc_port;
2146 
2147 	if (port >= NUM_FC_PORTS)
2148 		return 0;
2149 
2150 	fc_port = &afu->afu_map->global.fc_port[port][0];
2151 
2152 	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2153 		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2154 				   "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2155 	return bytes;
2156 }
2157 
2158 /**
2159  * port0_lun_table_show() - presents the current LUN table of port 0
2160  * @dev:	Generic device associated with the host owning the port.
2161  * @attr:	Device attribute representing the port.
2162  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2163  *
2164  * Return: The size of the ASCII string returned in @buf.
2165  */
2166 static ssize_t port0_lun_table_show(struct device *dev,
2167 				    struct device_attribute *attr,
2168 				    char *buf)
2169 {
2170 	struct Scsi_Host *shost = class_to_shost(dev);
2171 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2172 	struct afu *afu = cfg->afu;
2173 
2174 	return cxlflash_show_port_lun_table(0, afu, buf);
2175 }
2176 
2177 /**
2178  * port1_lun_table_show() - presents the current LUN table of port 1
2179  * @dev:	Generic device associated with the host owning the port.
2180  * @attr:	Device attribute representing the port.
2181  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2182  *
2183  * Return: The size of the ASCII string returned in @buf.
2184  */
2185 static ssize_t port1_lun_table_show(struct device *dev,
2186 				    struct device_attribute *attr,
2187 				    char *buf)
2188 {
2189 	struct Scsi_Host *shost = class_to_shost(dev);
2190 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2191 	struct afu *afu = cfg->afu;
2192 
2193 	return cxlflash_show_port_lun_table(1, afu, buf);
2194 }
2195 
2196 /**
2197  * mode_show() - presents the current mode of the device
2198  * @dev:	Generic device associated with the device.
2199  * @attr:	Device attribute representing the device mode.
2200  * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2201  *
2202  * Return: The size of the ASCII string returned in @buf.
2203  */
2204 static ssize_t mode_show(struct device *dev,
2205 			 struct device_attribute *attr, char *buf)
2206 {
2207 	struct scsi_device *sdev = to_scsi_device(dev);
2208 
2209 	return scnprintf(buf, PAGE_SIZE, "%s\n",
2210 			 sdev->hostdata ? "superpipe" : "legacy");
2211 }
2212 
2213 /*
2214  * Host attributes
2215  */
2216 static DEVICE_ATTR_RO(port0);
2217 static DEVICE_ATTR_RO(port1);
2218 static DEVICE_ATTR_RW(lun_mode);
2219 static DEVICE_ATTR_RO(ioctl_version);
2220 static DEVICE_ATTR_RO(port0_lun_table);
2221 static DEVICE_ATTR_RO(port1_lun_table);
2222 
2223 static struct device_attribute *cxlflash_host_attrs[] = {
2224 	&dev_attr_port0,
2225 	&dev_attr_port1,
2226 	&dev_attr_lun_mode,
2227 	&dev_attr_ioctl_version,
2228 	&dev_attr_port0_lun_table,
2229 	&dev_attr_port1_lun_table,
2230 	NULL
2231 };
2232 
2233 /*
2234  * Device attributes
2235  */
2236 static DEVICE_ATTR_RO(mode);
2237 
2238 static struct device_attribute *cxlflash_dev_attrs[] = {
2239 	&dev_attr_mode,
2240 	NULL
2241 };
2242 
2243 /*
2244  * Host template
2245  */
2246 static struct scsi_host_template driver_template = {
2247 	.module = THIS_MODULE,
2248 	.name = CXLFLASH_ADAPTER_NAME,
2249 	.info = cxlflash_driver_info,
2250 	.ioctl = cxlflash_ioctl,
2251 	.proc_name = CXLFLASH_NAME,
2252 	.queuecommand = cxlflash_queuecommand,
2253 	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2254 	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2255 	.change_queue_depth = cxlflash_change_queue_depth,
2256 	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
2257 	.can_queue = CXLFLASH_MAX_CMDS,
2258 	.this_id = -1,
2259 	.sg_tablesize = SG_NONE,	/* No scatter gather support */
2260 	.max_sectors = CXLFLASH_MAX_SECTORS,
2261 	.use_clustering = ENABLE_CLUSTERING,
2262 	.shost_attrs = cxlflash_host_attrs,
2263 	.sdev_attrs = cxlflash_dev_attrs,
2264 };
2265 
2266 /*
2267  * Device dependent values
2268  */
2269 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
2270 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS };
2271 
2272 /*
2273  * PCI device binding table
2274  */
2275 static struct pci_device_id cxlflash_pci_table[] = {
2276 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2277 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2278 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2279 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
2280 	{}
2281 };
2282 
2283 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2284 
2285 /**
2286  * cxlflash_worker_thread() - work thread handler for the AFU
2287  * @work:	Work structure contained within cxlflash associated with host.
2288  *
2289  * Handles the following events:
2290  * - Link reset which cannot be performed on interrupt context due to
2291  * blocking up to a few seconds
2292  * - Read AFU command room
2293  * - Rescan the host
2294  */
2295 static void cxlflash_worker_thread(struct work_struct *work)
2296 {
2297 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2298 						work_q);
2299 	struct afu *afu = cfg->afu;
2300 	struct device *dev = &cfg->dev->dev;
2301 	int port;
2302 	ulong lock_flags;
2303 
2304 	/* Avoid MMIO if the device has failed */
2305 
2306 	if (cfg->state != STATE_NORMAL)
2307 		return;
2308 
2309 	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2310 
2311 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
2312 		port = cfg->lr_port;
2313 		if (port < 0)
2314 			dev_err(dev, "%s: invalid port index %d\n",
2315 				__func__, port);
2316 		else {
2317 			spin_unlock_irqrestore(cfg->host->host_lock,
2318 					       lock_flags);
2319 
2320 			/* The reset can block... */
2321 			afu_link_reset(afu, port,
2322 				       &afu->afu_map->global.fc_regs[port][0]);
2323 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2324 		}
2325 
2326 		cfg->lr_state = LINK_RESET_COMPLETE;
2327 	}
2328 
2329 	if (afu->read_room) {
2330 		atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2331 		afu->read_room = false;
2332 	}
2333 
2334 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2335 
2336 	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2337 		scsi_scan_host(cfg->host);
2338 	kref_put(&afu->mapcount, afu_unmap);
2339 }
2340 
2341 /**
2342  * cxlflash_probe() - PCI entry point to add host
2343  * @pdev:	PCI device associated with the host.
2344  * @dev_id:	PCI device id associated with device.
2345  *
2346  * Return: 0 on success, -errno on failure
2347  */
2348 static int cxlflash_probe(struct pci_dev *pdev,
2349 			  const struct pci_device_id *dev_id)
2350 {
2351 	struct Scsi_Host *host;
2352 	struct cxlflash_cfg *cfg = NULL;
2353 	struct dev_dependent_vals *ddv;
2354 	int rc = 0;
2355 
2356 	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2357 		__func__, pdev->irq);
2358 
2359 	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2360 	driver_template.max_sectors = ddv->max_sectors;
2361 
2362 	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2363 	if (!host) {
2364 		dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2365 			__func__);
2366 		rc = -ENOMEM;
2367 		goto out;
2368 	}
2369 
2370 	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2371 	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2372 	host->max_channel = NUM_FC_PORTS - 1;
2373 	host->unique_id = host->host_no;
2374 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2375 
2376 	cfg = (struct cxlflash_cfg *)host->hostdata;
2377 	cfg->host = host;
2378 	rc = alloc_mem(cfg);
2379 	if (rc) {
2380 		dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
2381 			__func__);
2382 		rc = -ENOMEM;
2383 		scsi_host_put(cfg->host);
2384 		goto out;
2385 	}
2386 
2387 	cfg->init_state = INIT_STATE_NONE;
2388 	cfg->dev = pdev;
2389 	cfg->cxl_fops = cxlflash_cxl_fops;
2390 
2391 	/*
2392 	 * The promoted LUNs move to the top of the LUN table. The rest stay
2393 	 * on the bottom half. The bottom half grows from the end
2394 	 * (index = 255), whereas the top half grows from the beginning
2395 	 * (index = 0).
2396 	 */
2397 	cfg->promote_lun_index  = 0;
2398 	cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2399 	cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2400 
2401 	cfg->dev_id = (struct pci_device_id *)dev_id;
2402 
2403 	init_waitqueue_head(&cfg->tmf_waitq);
2404 	init_waitqueue_head(&cfg->reset_waitq);
2405 
2406 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2407 	cfg->lr_state = LINK_RESET_INVALID;
2408 	cfg->lr_port = -1;
2409 	spin_lock_init(&cfg->tmf_slock);
2410 	mutex_init(&cfg->ctx_tbl_list_mutex);
2411 	mutex_init(&cfg->ctx_recovery_mutex);
2412 	init_rwsem(&cfg->ioctl_rwsem);
2413 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2414 	INIT_LIST_HEAD(&cfg->lluns);
2415 
2416 	pci_set_drvdata(pdev, cfg);
2417 
2418 	cfg->cxl_afu = cxl_pci_to_afu(pdev);
2419 
2420 	rc = init_pci(cfg);
2421 	if (rc) {
2422 		dev_err(&pdev->dev, "%s: call to init_pci "
2423 			"failed rc=%d!\n", __func__, rc);
2424 		goto out_remove;
2425 	}
2426 	cfg->init_state = INIT_STATE_PCI;
2427 
2428 	rc = init_afu(cfg);
2429 	if (rc) {
2430 		dev_err(&pdev->dev, "%s: call to init_afu "
2431 			"failed rc=%d!\n", __func__, rc);
2432 		goto out_remove;
2433 	}
2434 	cfg->init_state = INIT_STATE_AFU;
2435 
2436 	rc = init_scsi(cfg);
2437 	if (rc) {
2438 		dev_err(&pdev->dev, "%s: call to init_scsi "
2439 			"failed rc=%d!\n", __func__, rc);
2440 		goto out_remove;
2441 	}
2442 	cfg->init_state = INIT_STATE_SCSI;
2443 
2444 out:
2445 	pr_debug("%s: returning rc=%d\n", __func__, rc);
2446 	return rc;
2447 
2448 out_remove:
2449 	cxlflash_remove(pdev);
2450 	goto out;
2451 }
2452 
2453 /**
2454  * drain_ioctls() - wait until all currently executing ioctls have completed
2455  * @cfg:	Internal structure associated with the host.
2456  *
2457  * Obtain write access to read/write semaphore that wraps ioctl
2458  * handling to 'drain' ioctls currently executing.
2459  */
2460 static void drain_ioctls(struct cxlflash_cfg *cfg)
2461 {
2462 	down_write(&cfg->ioctl_rwsem);
2463 	up_write(&cfg->ioctl_rwsem);
2464 }
2465 
2466 /**
2467  * cxlflash_pci_error_detected() - called when a PCI error is detected
2468  * @pdev:	PCI device struct.
2469  * @state:	PCI channel state.
2470  *
2471  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2472  */
2473 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2474 						    pci_channel_state_t state)
2475 {
2476 	int rc = 0;
2477 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2478 	struct device *dev = &cfg->dev->dev;
2479 
2480 	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2481 
2482 	switch (state) {
2483 	case pci_channel_io_frozen:
2484 		cfg->state = STATE_RESET;
2485 		scsi_block_requests(cfg->host);
2486 		drain_ioctls(cfg);
2487 		rc = cxlflash_mark_contexts_error(cfg);
2488 		if (unlikely(rc))
2489 			dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2490 				__func__, rc);
2491 		stop_afu(cfg);
2492 		term_mc(cfg, UNDO_START);
2493 		return PCI_ERS_RESULT_NEED_RESET;
2494 	case pci_channel_io_perm_failure:
2495 		cfg->state = STATE_FAILTERM;
2496 		wake_up_all(&cfg->reset_waitq);
2497 		scsi_unblock_requests(cfg->host);
2498 		return PCI_ERS_RESULT_DISCONNECT;
2499 	default:
2500 		break;
2501 	}
2502 	return PCI_ERS_RESULT_NEED_RESET;
2503 }
2504 
2505 /**
2506  * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2507  * @pdev:	PCI device struct.
2508  *
2509  * This routine is called by the pci error recovery code after the PCI
2510  * slot has been reset, just before we should resume normal operations.
2511  *
2512  * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2513  */
2514 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2515 {
2516 	int rc = 0;
2517 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2518 	struct device *dev = &cfg->dev->dev;
2519 
2520 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2521 
2522 	rc = init_afu(cfg);
2523 	if (unlikely(rc)) {
2524 		dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2525 		return PCI_ERS_RESULT_DISCONNECT;
2526 	}
2527 
2528 	return PCI_ERS_RESULT_RECOVERED;
2529 }
2530 
2531 /**
2532  * cxlflash_pci_resume() - called when normal operation can resume
2533  * @pdev:	PCI device struct
2534  */
2535 static void cxlflash_pci_resume(struct pci_dev *pdev)
2536 {
2537 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2538 	struct device *dev = &cfg->dev->dev;
2539 
2540 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2541 
2542 	cfg->state = STATE_NORMAL;
2543 	wake_up_all(&cfg->reset_waitq);
2544 	scsi_unblock_requests(cfg->host);
2545 }
2546 
2547 static const struct pci_error_handlers cxlflash_err_handler = {
2548 	.error_detected = cxlflash_pci_error_detected,
2549 	.slot_reset = cxlflash_pci_slot_reset,
2550 	.resume = cxlflash_pci_resume,
2551 };
2552 
2553 /*
2554  * PCI device structure
2555  */
2556 static struct pci_driver cxlflash_driver = {
2557 	.name = CXLFLASH_NAME,
2558 	.id_table = cxlflash_pci_table,
2559 	.probe = cxlflash_probe,
2560 	.remove = cxlflash_remove,
2561 	.err_handler = &cxlflash_err_handler,
2562 };
2563 
2564 /**
2565  * init_cxlflash() - module entry point
2566  *
2567  * Return: 0 on success, -errno on failure
2568  */
2569 static int __init init_cxlflash(void)
2570 {
2571 	pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME);
2572 
2573 	cxlflash_list_init();
2574 
2575 	return pci_register_driver(&cxlflash_driver);
2576 }
2577 
2578 /**
2579  * exit_cxlflash() - module exit point
2580  */
2581 static void __exit exit_cxlflash(void)
2582 {
2583 	cxlflash_term_global_luns();
2584 	cxlflash_free_errpage();
2585 
2586 	pci_unregister_driver(&cxlflash_driver);
2587 }
2588 
2589 module_init(init_cxlflash);
2590 module_exit(exit_cxlflash);
2591