xref: /openbmc/linux/drivers/scsi/cxlflash/main.c (revision 0317cd52)
1 /*
2  * CXL Flash Device Driver
3  *
4  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) 2015 IBM Corporation
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <linux/delay.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 
20 #include <asm/unaligned.h>
21 
22 #include <misc/cxl.h>
23 
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_host.h>
26 #include <uapi/scsi/cxlflash_ioctl.h>
27 
28 #include "main.h"
29 #include "sislite.h"
30 #include "common.h"
31 
32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35 MODULE_LICENSE("GPL");
36 
37 /**
38  * cmd_checkout() - checks out an AFU command
39  * @afu:	AFU to checkout from.
40  *
41  * Commands are checked out in a round-robin fashion. Note that since
42  * the command pool is larger than the hardware queue, the majority of
43  * times we will only loop once or twice before getting a command. The
44  * buffer and CDB within the command are initialized (zeroed) prior to
45  * returning.
46  *
47  * Return: The checked out command or NULL when command pool is empty.
48  */
49 static struct afu_cmd *cmd_checkout(struct afu *afu)
50 {
51 	int k, dec = CXLFLASH_NUM_CMDS;
52 	struct afu_cmd *cmd;
53 
54 	while (dec--) {
55 		k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
56 
57 		cmd = &afu->cmd[k];
58 
59 		if (!atomic_dec_if_positive(&cmd->free)) {
60 			pr_devel("%s: returning found index=%d cmd=%p\n",
61 				 __func__, cmd->slot, cmd);
62 			memset(cmd->buf, 0, CMD_BUFSIZE);
63 			memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
64 			return cmd;
65 		}
66 	}
67 
68 	return NULL;
69 }
70 
71 /**
72  * cmd_checkin() - checks in an AFU command
73  * @cmd:	AFU command to checkin.
74  *
75  * Safe to pass commands that have already been checked in. Several
76  * internal tracking fields are reset as part of the checkin. Note
77  * that these are intentionally reset prior to toggling the free bit
78  * to avoid clobbering values in the event that the command is checked
79  * out right away.
80  */
81 static void cmd_checkin(struct afu_cmd *cmd)
82 {
83 	cmd->rcb.scp = NULL;
84 	cmd->rcb.timeout = 0;
85 	cmd->sa.ioasc = 0;
86 	cmd->cmd_tmf = false;
87 	cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
88 
89 	if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
90 		pr_err("%s: Freeing cmd (%d) that is not in use!\n",
91 		       __func__, cmd->slot);
92 		return;
93 	}
94 
95 	pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
96 }
97 
98 /**
99  * process_cmd_err() - command error handler
100  * @cmd:	AFU command that experienced the error.
101  * @scp:	SCSI command associated with the AFU command in error.
102  *
103  * Translates error bits from AFU command to SCSI command results.
104  */
105 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
106 {
107 	struct sisl_ioarcb *ioarcb;
108 	struct sisl_ioasa *ioasa;
109 	u32 resid;
110 
111 	if (unlikely(!cmd))
112 		return;
113 
114 	ioarcb = &(cmd->rcb);
115 	ioasa = &(cmd->sa);
116 
117 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
118 		resid = ioasa->resid;
119 		scsi_set_resid(scp, resid);
120 		pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
121 			 __func__, cmd, scp, resid);
122 	}
123 
124 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
125 		pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
126 			 __func__, cmd, scp);
127 		scp->result = (DID_ERROR << 16);
128 	}
129 
130 	pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
131 		 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
132 		 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
133 		 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
134 		 ioasa->fc_extra);
135 
136 	if (ioasa->rc.scsi_rc) {
137 		/* We have a SCSI status */
138 		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
139 			memcpy(scp->sense_buffer, ioasa->sense_data,
140 			       SISL_SENSE_DATA_LEN);
141 			scp->result = ioasa->rc.scsi_rc;
142 		} else
143 			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
144 	}
145 
146 	/*
147 	 * We encountered an error. Set scp->result based on nature
148 	 * of error.
149 	 */
150 	if (ioasa->rc.fc_rc) {
151 		/* We have an FC status */
152 		switch (ioasa->rc.fc_rc) {
153 		case SISL_FC_RC_LINKDOWN:
154 			scp->result = (DID_REQUEUE << 16);
155 			break;
156 		case SISL_FC_RC_RESID:
157 			/* This indicates an FCP resid underrun */
158 			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
159 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
160 				 * then we will handle this error else where.
161 				 * If not then we must handle it here.
162 				 * This is probably an AFU bug.
163 				 */
164 				scp->result = (DID_ERROR << 16);
165 			}
166 			break;
167 		case SISL_FC_RC_RESIDERR:
168 			/* Resid mismatch between adapter and device */
169 		case SISL_FC_RC_TGTABORT:
170 		case SISL_FC_RC_ABORTOK:
171 		case SISL_FC_RC_ABORTFAIL:
172 		case SISL_FC_RC_NOLOGI:
173 		case SISL_FC_RC_ABORTPEND:
174 		case SISL_FC_RC_WRABORTPEND:
175 		case SISL_FC_RC_NOEXP:
176 		case SISL_FC_RC_INUSE:
177 			scp->result = (DID_ERROR << 16);
178 			break;
179 		}
180 	}
181 
182 	if (ioasa->rc.afu_rc) {
183 		/* We have an AFU error */
184 		switch (ioasa->rc.afu_rc) {
185 		case SISL_AFU_RC_NO_CHANNELS:
186 			scp->result = (DID_NO_CONNECT << 16);
187 			break;
188 		case SISL_AFU_RC_DATA_DMA_ERR:
189 			switch (ioasa->afu_extra) {
190 			case SISL_AFU_DMA_ERR_PAGE_IN:
191 				/* Retry */
192 				scp->result = (DID_IMM_RETRY << 16);
193 				break;
194 			case SISL_AFU_DMA_ERR_INVALID_EA:
195 			default:
196 				scp->result = (DID_ERROR << 16);
197 			}
198 			break;
199 		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
200 			/* Retry */
201 			scp->result = (DID_ALLOC_FAILURE << 16);
202 			break;
203 		default:
204 			scp->result = (DID_ERROR << 16);
205 		}
206 	}
207 }
208 
209 /**
210  * cmd_complete() - command completion handler
211  * @cmd:	AFU command that has completed.
212  *
213  * Prepares and submits command that has either completed or timed out to
214  * the SCSI stack. Checks AFU command back into command pool for non-internal
215  * (rcb.scp populated) commands.
216  */
217 static void cmd_complete(struct afu_cmd *cmd)
218 {
219 	struct scsi_cmnd *scp;
220 	ulong lock_flags;
221 	struct afu *afu = cmd->parent;
222 	struct cxlflash_cfg *cfg = afu->parent;
223 	bool cmd_is_tmf;
224 
225 	spin_lock_irqsave(&cmd->slock, lock_flags);
226 	cmd->sa.host_use_b[0] |= B_DONE;
227 	spin_unlock_irqrestore(&cmd->slock, lock_flags);
228 
229 	if (cmd->rcb.scp) {
230 		scp = cmd->rcb.scp;
231 		if (unlikely(cmd->sa.ioasc))
232 			process_cmd_err(cmd, scp);
233 		else
234 			scp->result = (DID_OK << 16);
235 
236 		cmd_is_tmf = cmd->cmd_tmf;
237 		cmd_checkin(cmd); /* Don't use cmd after here */
238 
239 		pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
240 				     "ioasc=%d\n", __func__, scp, scp->result,
241 				     cmd->sa.ioasc);
242 
243 		scsi_dma_unmap(scp);
244 		scp->scsi_done(scp);
245 
246 		if (cmd_is_tmf) {
247 			spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
248 			cfg->tmf_active = false;
249 			wake_up_all_locked(&cfg->tmf_waitq);
250 			spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
251 		}
252 	} else
253 		complete(&cmd->cevent);
254 }
255 
256 /**
257  * context_reset() - timeout handler for AFU commands
258  * @cmd:	AFU command that timed out.
259  *
260  * Sends a reset to the AFU.
261  */
262 static void context_reset(struct afu_cmd *cmd)
263 {
264 	int nretry = 0;
265 	u64 rrin = 0x1;
266 	u64 room = 0;
267 	struct afu *afu = cmd->parent;
268 	ulong lock_flags;
269 
270 	pr_debug("%s: cmd=%p\n", __func__, cmd);
271 
272 	spin_lock_irqsave(&cmd->slock, lock_flags);
273 
274 	/* Already completed? */
275 	if (cmd->sa.host_use_b[0] & B_DONE) {
276 		spin_unlock_irqrestore(&cmd->slock, lock_flags);
277 		return;
278 	}
279 
280 	cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
281 	spin_unlock_irqrestore(&cmd->slock, lock_flags);
282 
283 	/*
284 	 * We really want to send this reset at all costs, so spread
285 	 * out wait time on successive retries for available room.
286 	 */
287 	do {
288 		room = readq_be(&afu->host_map->cmd_room);
289 		atomic64_set(&afu->room, room);
290 		if (room)
291 			goto write_rrin;
292 		udelay(1 << nretry);
293 	} while (nretry++ < MC_ROOM_RETRY_CNT);
294 
295 	pr_err("%s: no cmd_room to send reset\n", __func__);
296 	return;
297 
298 write_rrin:
299 	nretry = 0;
300 	writeq_be(rrin, &afu->host_map->ioarrin);
301 	do {
302 		rrin = readq_be(&afu->host_map->ioarrin);
303 		if (rrin != 0x1)
304 			break;
305 		/* Double delay each time */
306 		udelay(1 << nretry);
307 	} while (nretry++ < MC_ROOM_RETRY_CNT);
308 }
309 
310 /**
311  * send_cmd() - sends an AFU command
312  * @afu:	AFU associated with the host.
313  * @cmd:	AFU command to send.
314  *
315  * Return:
316  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
317  */
318 static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
319 {
320 	struct cxlflash_cfg *cfg = afu->parent;
321 	struct device *dev = &cfg->dev->dev;
322 	int nretry = 0;
323 	int rc = 0;
324 	u64 room;
325 	long newval;
326 
327 	/*
328 	 * This routine is used by critical users such an AFU sync and to
329 	 * send a task management function (TMF). Thus we want to retry a
330 	 * bit before returning an error. To avoid the performance penalty
331 	 * of MMIO, we spread the update of 'room' over multiple commands.
332 	 */
333 retry:
334 	newval = atomic64_dec_if_positive(&afu->room);
335 	if (!newval) {
336 		do {
337 			room = readq_be(&afu->host_map->cmd_room);
338 			atomic64_set(&afu->room, room);
339 			if (room)
340 				goto write_ioarrin;
341 			udelay(1 << nretry);
342 		} while (nretry++ < MC_ROOM_RETRY_CNT);
343 
344 		dev_err(dev, "%s: no cmd_room to send 0x%X\n",
345 		       __func__, cmd->rcb.cdb[0]);
346 
347 		goto no_room;
348 	} else if (unlikely(newval < 0)) {
349 		/* This should be rare. i.e. Only if two threads race and
350 		 * decrement before the MMIO read is done. In this case
351 		 * just benefit from the other thread having updated
352 		 * afu->room.
353 		 */
354 		if (nretry++ < MC_ROOM_RETRY_CNT) {
355 			udelay(1 << nretry);
356 			goto retry;
357 		}
358 
359 		goto no_room;
360 	}
361 
362 write_ioarrin:
363 	writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
364 out:
365 	pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
366 		 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
367 	return rc;
368 
369 no_room:
370 	afu->read_room = true;
371 	kref_get(&cfg->afu->mapcount);
372 	schedule_work(&cfg->work_q);
373 	rc = SCSI_MLQUEUE_HOST_BUSY;
374 	goto out;
375 }
376 
377 /**
378  * wait_resp() - polls for a response or timeout to a sent AFU command
379  * @afu:	AFU associated with the host.
380  * @cmd:	AFU command that was sent.
381  */
382 static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
383 {
384 	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
385 
386 	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
387 	if (!timeout)
388 		context_reset(cmd);
389 
390 	if (unlikely(cmd->sa.ioasc != 0))
391 		pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
392 		       "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
393 		       cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
394 		       cmd->sa.rc.fc_rc);
395 }
396 
397 /**
398  * send_tmf() - sends a Task Management Function (TMF)
399  * @afu:	AFU to checkout from.
400  * @scp:	SCSI command from stack.
401  * @tmfcmd:	TMF command to send.
402  *
403  * Return:
404  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
405  */
406 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
407 {
408 	struct afu_cmd *cmd;
409 
410 	u32 port_sel = scp->device->channel + 1;
411 	short lflag = 0;
412 	struct Scsi_Host *host = scp->device->host;
413 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
414 	struct device *dev = &cfg->dev->dev;
415 	ulong lock_flags;
416 	int rc = 0;
417 	ulong to;
418 
419 	cmd = cmd_checkout(afu);
420 	if (unlikely(!cmd)) {
421 		dev_err(dev, "%s: could not get a free command\n", __func__);
422 		rc = SCSI_MLQUEUE_HOST_BUSY;
423 		goto out;
424 	}
425 
426 	/* When Task Management Function is active do not send another */
427 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
428 	if (cfg->tmf_active)
429 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
430 						  !cfg->tmf_active,
431 						  cfg->tmf_slock);
432 	cfg->tmf_active = true;
433 	cmd->cmd_tmf = true;
434 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
435 
436 	cmd->rcb.ctx_id = afu->ctx_hndl;
437 	cmd->rcb.port_sel = port_sel;
438 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
439 
440 	lflag = SISL_REQ_FLAGS_TMF_CMD;
441 
442 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
443 			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
444 
445 	/* Stash the scp in the reserved field, for reuse during interrupt */
446 	cmd->rcb.scp = scp;
447 
448 	/* Copy the CDB from the cmd passed in */
449 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
450 
451 	/* Send the command */
452 	rc = send_cmd(afu, cmd);
453 	if (unlikely(rc)) {
454 		cmd_checkin(cmd);
455 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
456 		cfg->tmf_active = false;
457 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
458 		goto out;
459 	}
460 
461 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
462 	to = msecs_to_jiffies(5000);
463 	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
464 						       !cfg->tmf_active,
465 						       cfg->tmf_slock,
466 						       to);
467 	if (!to) {
468 		cfg->tmf_active = false;
469 		dev_err(dev, "%s: TMF timed out!\n", __func__);
470 		rc = -1;
471 	}
472 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
473 out:
474 	return rc;
475 }
476 
477 static void afu_unmap(struct kref *ref)
478 {
479 	struct afu *afu = container_of(ref, struct afu, mapcount);
480 
481 	if (likely(afu->afu_map)) {
482 		cxl_psa_unmap((void __iomem *)afu->afu_map);
483 		afu->afu_map = NULL;
484 	}
485 }
486 
487 /**
488  * cxlflash_driver_info() - information handler for this host driver
489  * @host:	SCSI host associated with device.
490  *
491  * Return: A string describing the device.
492  */
493 static const char *cxlflash_driver_info(struct Scsi_Host *host)
494 {
495 	return CXLFLASH_ADAPTER_NAME;
496 }
497 
498 /**
499  * cxlflash_queuecommand() - sends a mid-layer request
500  * @host:	SCSI host associated with device.
501  * @scp:	SCSI command to send.
502  *
503  * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
504  */
505 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
506 {
507 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
508 	struct afu *afu = cfg->afu;
509 	struct device *dev = &cfg->dev->dev;
510 	struct afu_cmd *cmd;
511 	u32 port_sel = scp->device->channel + 1;
512 	int nseg, i, ncount;
513 	struct scatterlist *sg;
514 	ulong lock_flags;
515 	short lflag = 0;
516 	int rc = 0;
517 	int kref_got = 0;
518 
519 	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
520 			    "cdb=(%08X-%08X-%08X-%08X)\n",
521 			    __func__, scp, host->host_no, scp->device->channel,
522 			    scp->device->id, scp->device->lun,
523 			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
524 			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
525 			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
526 			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
527 
528 	/*
529 	 * If a Task Management Function is active, wait for it to complete
530 	 * before continuing with regular commands.
531 	 */
532 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
533 	if (cfg->tmf_active) {
534 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
535 		rc = SCSI_MLQUEUE_HOST_BUSY;
536 		goto out;
537 	}
538 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
539 
540 	switch (cfg->state) {
541 	case STATE_RESET:
542 		dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
543 		rc = SCSI_MLQUEUE_HOST_BUSY;
544 		goto out;
545 	case STATE_FAILTERM:
546 		dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
547 		scp->result = (DID_NO_CONNECT << 16);
548 		scp->scsi_done(scp);
549 		rc = 0;
550 		goto out;
551 	default:
552 		break;
553 	}
554 
555 	cmd = cmd_checkout(afu);
556 	if (unlikely(!cmd)) {
557 		dev_err(dev, "%s: could not get a free command\n", __func__);
558 		rc = SCSI_MLQUEUE_HOST_BUSY;
559 		goto out;
560 	}
561 
562 	kref_get(&cfg->afu->mapcount);
563 	kref_got = 1;
564 
565 	cmd->rcb.ctx_id = afu->ctx_hndl;
566 	cmd->rcb.port_sel = port_sel;
567 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
568 
569 	if (scp->sc_data_direction == DMA_TO_DEVICE)
570 		lflag = SISL_REQ_FLAGS_HOST_WRITE;
571 	else
572 		lflag = SISL_REQ_FLAGS_HOST_READ;
573 
574 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
575 			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
576 
577 	/* Stash the scp in the reserved field, for reuse during interrupt */
578 	cmd->rcb.scp = scp;
579 
580 	nseg = scsi_dma_map(scp);
581 	if (unlikely(nseg < 0)) {
582 		dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
583 			__func__, nseg);
584 		rc = SCSI_MLQUEUE_HOST_BUSY;
585 		goto out;
586 	}
587 
588 	ncount = scsi_sg_count(scp);
589 	scsi_for_each_sg(scp, sg, ncount, i) {
590 		cmd->rcb.data_len = sg_dma_len(sg);
591 		cmd->rcb.data_ea = sg_dma_address(sg);
592 	}
593 
594 	/* Copy the CDB from the scsi_cmnd passed in */
595 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
596 
597 	/* Send the command */
598 	rc = send_cmd(afu, cmd);
599 	if (unlikely(rc)) {
600 		cmd_checkin(cmd);
601 		scsi_dma_unmap(scp);
602 	}
603 
604 out:
605 	if (kref_got)
606 		kref_put(&afu->mapcount, afu_unmap);
607 	pr_devel("%s: returning rc=%d\n", __func__, rc);
608 	return rc;
609 }
610 
611 /**
612  * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
613  * @cfg:	Internal structure associated with the host.
614  */
615 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
616 {
617 	struct pci_dev *pdev = cfg->dev;
618 
619 	if (pci_channel_offline(pdev))
620 		wait_event_timeout(cfg->reset_waitq,
621 				   !pci_channel_offline(pdev),
622 				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
623 }
624 
625 /**
626  * free_mem() - free memory associated with the AFU
627  * @cfg:	Internal structure associated with the host.
628  */
629 static void free_mem(struct cxlflash_cfg *cfg)
630 {
631 	int i;
632 	char *buf = NULL;
633 	struct afu *afu = cfg->afu;
634 
635 	if (cfg->afu) {
636 		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
637 			buf = afu->cmd[i].buf;
638 			if (!((u64)buf & (PAGE_SIZE - 1)))
639 				free_page((ulong)buf);
640 		}
641 
642 		free_pages((ulong)afu, get_order(sizeof(struct afu)));
643 		cfg->afu = NULL;
644 	}
645 }
646 
647 /**
648  * stop_afu() - stops the AFU command timers and unmaps the MMIO space
649  * @cfg:	Internal structure associated with the host.
650  *
651  * Safe to call with AFU in a partially allocated/initialized state.
652  *
653  * Cleans up all state associated with the command queue, and unmaps
654  * the MMIO space.
655  *
656  *  - complete() will take care of commands we initiated (they'll be checked
657  *  in as part of the cleanup that occurs after the completion)
658  *
659  *  - cmd_checkin() will take care of entries that we did not initiate and that
660  *  have not (and will not) complete because they are sitting on a [now stale]
661  *  hardware queue
662  */
663 static void stop_afu(struct cxlflash_cfg *cfg)
664 {
665 	int i;
666 	struct afu *afu = cfg->afu;
667 	struct afu_cmd *cmd;
668 
669 	if (likely(afu)) {
670 		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
671 			cmd = &afu->cmd[i];
672 			complete(&cmd->cevent);
673 			if (!atomic_read(&cmd->free))
674 				cmd_checkin(cmd);
675 		}
676 
677 		if (likely(afu->afu_map)) {
678 			cxl_psa_unmap((void __iomem *)afu->afu_map);
679 			afu->afu_map = NULL;
680 		}
681 		kref_put(&afu->mapcount, afu_unmap);
682 	}
683 }
684 
685 /**
686  * term_intr() - disables all AFU interrupts
687  * @cfg:	Internal structure associated with the host.
688  * @level:	Depth of allocation, where to begin waterfall tear down.
689  *
690  * Safe to call with AFU/MC in partially allocated/initialized state.
691  */
692 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
693 {
694 	struct afu *afu = cfg->afu;
695 	struct device *dev = &cfg->dev->dev;
696 
697 	if (!afu || !cfg->mcctx) {
698 		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
699 		return;
700 	}
701 
702 	switch (level) {
703 	case UNMAP_THREE:
704 		cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
705 	case UNMAP_TWO:
706 		cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
707 	case UNMAP_ONE:
708 		cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
709 	case FREE_IRQ:
710 		cxl_free_afu_irqs(cfg->mcctx);
711 		/* fall through */
712 	case UNDO_NOOP:
713 		/* No action required */
714 		break;
715 	}
716 }
717 
718 /**
719  * term_mc() - terminates the master context
720  * @cfg:	Internal structure associated with the host.
721  * @level:	Depth of allocation, where to begin waterfall tear down.
722  *
723  * Safe to call with AFU/MC in partially allocated/initialized state.
724  */
725 static void term_mc(struct cxlflash_cfg *cfg)
726 {
727 	int rc = 0;
728 	struct afu *afu = cfg->afu;
729 	struct device *dev = &cfg->dev->dev;
730 
731 	if (!afu || !cfg->mcctx) {
732 		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
733 		return;
734 	}
735 
736 	rc = cxl_stop_context(cfg->mcctx);
737 	WARN_ON(rc);
738 	cfg->mcctx = NULL;
739 }
740 
741 /**
742  * term_afu() - terminates the AFU
743  * @cfg:	Internal structure associated with the host.
744  *
745  * Safe to call with AFU/MC in partially allocated/initialized state.
746  */
747 static void term_afu(struct cxlflash_cfg *cfg)
748 {
749 	/*
750 	 * Tear down is carefully orchestrated to ensure
751 	 * no interrupts can come in when the problem state
752 	 * area is unmapped.
753 	 *
754 	 * 1) Disable all AFU interrupts
755 	 * 2) Unmap the problem state area
756 	 * 3) Stop the master context
757 	 */
758 	term_intr(cfg, UNMAP_THREE);
759 	if (cfg->afu)
760 		stop_afu(cfg);
761 
762 	term_mc(cfg);
763 
764 	pr_debug("%s: returning\n", __func__);
765 }
766 
767 /**
768  * notify_shutdown() - notifies device of pending shutdown
769  * @cfg:	Internal structure associated with the host.
770  * @wait:	Whether to wait for shutdown processing to complete.
771  *
772  * This function will notify the AFU that the adapter is being shutdown
773  * and will wait for shutdown processing to complete if wait is true.
774  * This notification should flush pending I/Os to the device and halt
775  * further I/Os until the next AFU reset is issued and device restarted.
776  */
777 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
778 {
779 	struct afu *afu = cfg->afu;
780 	struct device *dev = &cfg->dev->dev;
781 	struct sisl_global_map __iomem *global;
782 	struct dev_dependent_vals *ddv;
783 	u64 reg, status;
784 	int i, retry_cnt = 0;
785 
786 	ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
787 	if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
788 		return;
789 
790 	if (!afu || !afu->afu_map) {
791 		dev_dbg(dev, "%s: The problem state area is not mapped\n",
792 			__func__);
793 		return;
794 	}
795 
796 	global = &afu->afu_map->global;
797 
798 	/* Notify AFU */
799 	for (i = 0; i < NUM_FC_PORTS; i++) {
800 		reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
801 		reg |= SISL_FC_SHUTDOWN_NORMAL;
802 		writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
803 	}
804 
805 	if (!wait)
806 		return;
807 
808 	/* Wait up to 1.5 seconds for shutdown processing to complete */
809 	for (i = 0; i < NUM_FC_PORTS; i++) {
810 		retry_cnt = 0;
811 		while (true) {
812 			status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
813 			if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
814 				break;
815 			if (++retry_cnt >= MC_RETRY_CNT) {
816 				dev_dbg(dev, "%s: port %d shutdown processing "
817 					"not yet completed\n", __func__, i);
818 				break;
819 			}
820 			msleep(100 * retry_cnt);
821 		}
822 	}
823 }
824 
825 /**
826  * cxlflash_shutdown() - shutdown handler
827  * @pdev:	PCI device associated with the host.
828  */
829 static void cxlflash_shutdown(struct pci_dev *pdev)
830 {
831 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
832 
833 	notify_shutdown(cfg, false);
834 }
835 
836 /**
837  * cxlflash_remove() - PCI entry point to tear down host
838  * @pdev:	PCI device associated with the host.
839  *
840  * Safe to use as a cleanup in partially allocated/initialized state.
841  */
842 static void cxlflash_remove(struct pci_dev *pdev)
843 {
844 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
845 	ulong lock_flags;
846 
847 	/* If a Task Management Function is active, wait for it to complete
848 	 * before continuing with remove.
849 	 */
850 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
851 	if (cfg->tmf_active)
852 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
853 						  !cfg->tmf_active,
854 						  cfg->tmf_slock);
855 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
856 
857 	/* Notify AFU and wait for shutdown processing to complete */
858 	notify_shutdown(cfg, true);
859 
860 	cfg->state = STATE_FAILTERM;
861 	cxlflash_stop_term_user_contexts(cfg);
862 
863 	switch (cfg->init_state) {
864 	case INIT_STATE_SCSI:
865 		cxlflash_term_local_luns(cfg);
866 		scsi_remove_host(cfg->host);
867 		/* fall through */
868 	case INIT_STATE_AFU:
869 		cancel_work_sync(&cfg->work_q);
870 		term_afu(cfg);
871 	case INIT_STATE_PCI:
872 		pci_disable_device(pdev);
873 	case INIT_STATE_NONE:
874 		free_mem(cfg);
875 		scsi_host_put(cfg->host);
876 		break;
877 	}
878 
879 	pr_debug("%s: returning\n", __func__);
880 }
881 
882 /**
883  * alloc_mem() - allocates the AFU and its command pool
884  * @cfg:	Internal structure associated with the host.
885  *
886  * A partially allocated state remains on failure.
887  *
888  * Return:
889  *	0 on success
890  *	-ENOMEM on failure to allocate memory
891  */
892 static int alloc_mem(struct cxlflash_cfg *cfg)
893 {
894 	int rc = 0;
895 	int i;
896 	char *buf = NULL;
897 	struct device *dev = &cfg->dev->dev;
898 
899 	/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
900 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
901 					    get_order(sizeof(struct afu)));
902 	if (unlikely(!cfg->afu)) {
903 		dev_err(dev, "%s: cannot get %d free pages\n",
904 			__func__, get_order(sizeof(struct afu)));
905 		rc = -ENOMEM;
906 		goto out;
907 	}
908 	cfg->afu->parent = cfg;
909 	cfg->afu->afu_map = NULL;
910 
911 	for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
912 		if (!((u64)buf & (PAGE_SIZE - 1))) {
913 			buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
914 			if (unlikely(!buf)) {
915 				dev_err(dev,
916 					"%s: Allocate command buffers fail!\n",
917 				       __func__);
918 				rc = -ENOMEM;
919 				free_mem(cfg);
920 				goto out;
921 			}
922 		}
923 
924 		cfg->afu->cmd[i].buf = buf;
925 		atomic_set(&cfg->afu->cmd[i].free, 1);
926 		cfg->afu->cmd[i].slot = i;
927 	}
928 
929 out:
930 	return rc;
931 }
932 
933 /**
934  * init_pci() - initializes the host as a PCI device
935  * @cfg:	Internal structure associated with the host.
936  *
937  * Return: 0 on success, -errno on failure
938  */
939 static int init_pci(struct cxlflash_cfg *cfg)
940 {
941 	struct pci_dev *pdev = cfg->dev;
942 	int rc = 0;
943 
944 	rc = pci_enable_device(pdev);
945 	if (rc || pci_channel_offline(pdev)) {
946 		if (pci_channel_offline(pdev)) {
947 			cxlflash_wait_for_pci_err_recovery(cfg);
948 			rc = pci_enable_device(pdev);
949 		}
950 
951 		if (rc) {
952 			dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
953 				__func__);
954 			cxlflash_wait_for_pci_err_recovery(cfg);
955 			goto out;
956 		}
957 	}
958 
959 out:
960 	pr_debug("%s: returning rc=%d\n", __func__, rc);
961 	return rc;
962 }
963 
964 /**
965  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
966  * @cfg:	Internal structure associated with the host.
967  *
968  * Return: 0 on success, -errno on failure
969  */
970 static int init_scsi(struct cxlflash_cfg *cfg)
971 {
972 	struct pci_dev *pdev = cfg->dev;
973 	int rc = 0;
974 
975 	rc = scsi_add_host(cfg->host, &pdev->dev);
976 	if (rc) {
977 		dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
978 			__func__, rc);
979 		goto out;
980 	}
981 
982 	scsi_scan_host(cfg->host);
983 
984 out:
985 	pr_debug("%s: returning rc=%d\n", __func__, rc);
986 	return rc;
987 }
988 
989 /**
990  * set_port_online() - transitions the specified host FC port to online state
991  * @fc_regs:	Top of MMIO region defined for specified port.
992  *
993  * The provided MMIO region must be mapped prior to call. Online state means
994  * that the FC link layer has synced, completed the handshaking process, and
995  * is ready for login to start.
996  */
997 static void set_port_online(__be64 __iomem *fc_regs)
998 {
999 	u64 cmdcfg;
1000 
1001 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1002 	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
1003 	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
1004 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1005 }
1006 
1007 /**
1008  * set_port_offline() - transitions the specified host FC port to offline state
1009  * @fc_regs:	Top of MMIO region defined for specified port.
1010  *
1011  * The provided MMIO region must be mapped prior to call.
1012  */
1013 static void set_port_offline(__be64 __iomem *fc_regs)
1014 {
1015 	u64 cmdcfg;
1016 
1017 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1018 	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
1019 	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
1020 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1021 }
1022 
1023 /**
1024  * wait_port_online() - waits for the specified host FC port come online
1025  * @fc_regs:	Top of MMIO region defined for specified port.
1026  * @delay_us:	Number of microseconds to delay between reading port status.
1027  * @nretry:	Number of cycles to retry reading port status.
1028  *
1029  * The provided MMIO region must be mapped prior to call. This will timeout
1030  * when the cable is not plugged in.
1031  *
1032  * Return:
1033  *	TRUE (1) when the specified port is online
1034  *	FALSE (0) when the specified port fails to come online after timeout
1035  *	-EINVAL when @delay_us is less than 1000
1036  */
1037 static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1038 {
1039 	u64 status;
1040 
1041 	if (delay_us < 1000) {
1042 		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
1043 		return -EINVAL;
1044 	}
1045 
1046 	do {
1047 		msleep(delay_us / 1000);
1048 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1049 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1050 		 nretry--);
1051 
1052 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1053 }
1054 
1055 /**
1056  * wait_port_offline() - waits for the specified host FC port go offline
1057  * @fc_regs:	Top of MMIO region defined for specified port.
1058  * @delay_us:	Number of microseconds to delay between reading port status.
1059  * @nretry:	Number of cycles to retry reading port status.
1060  *
1061  * The provided MMIO region must be mapped prior to call.
1062  *
1063  * Return:
1064  *	TRUE (1) when the specified port is offline
1065  *	FALSE (0) when the specified port fails to go offline after timeout
1066  *	-EINVAL when @delay_us is less than 1000
1067  */
1068 static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1069 {
1070 	u64 status;
1071 
1072 	if (delay_us < 1000) {
1073 		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
1074 		return -EINVAL;
1075 	}
1076 
1077 	do {
1078 		msleep(delay_us / 1000);
1079 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1080 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1081 		 nretry--);
1082 
1083 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1084 }
1085 
1086 /**
1087  * afu_set_wwpn() - configures the WWPN for the specified host FC port
1088  * @afu:	AFU associated with the host that owns the specified FC port.
1089  * @port:	Port number being configured.
1090  * @fc_regs:	Top of MMIO region defined for specified port.
1091  * @wwpn:	The world-wide-port-number previously discovered for port.
1092  *
1093  * The provided MMIO region must be mapped prior to call. As part of the
1094  * sequence to configure the WWPN, the port is toggled offline and then back
1095  * online. This toggling action can cause this routine to delay up to a few
1096  * seconds. When configured to use the internal LUN feature of the AFU, a
1097  * failure to come online is overridden.
1098  *
1099  * Return:
1100  *	0 when the WWPN is successfully written and the port comes back online
1101  *	-1 when the port fails to go offline or come back up online
1102  */
1103 static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1104 			u64 wwpn)
1105 {
1106 	int rc = 0;
1107 
1108 	set_port_offline(fc_regs);
1109 
1110 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1111 			       FC_PORT_STATUS_RETRY_CNT)) {
1112 		pr_debug("%s: wait on port %d to go offline timed out\n",
1113 			 __func__, port);
1114 		rc = -1; /* but continue on to leave the port back online */
1115 	}
1116 
1117 	if (rc == 0)
1118 		writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1119 
1120 	/* Always return success after programming WWPN */
1121 	rc = 0;
1122 
1123 	set_port_online(fc_regs);
1124 
1125 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1126 			      FC_PORT_STATUS_RETRY_CNT)) {
1127 		pr_err("%s: wait on port %d to go online timed out\n",
1128 		       __func__, port);
1129 	}
1130 
1131 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1132 
1133 	return rc;
1134 }
1135 
1136 /**
1137  * afu_link_reset() - resets the specified host FC port
1138  * @afu:	AFU associated with the host that owns the specified FC port.
1139  * @port:	Port number being configured.
1140  * @fc_regs:	Top of MMIO region defined for specified port.
1141  *
1142  * The provided MMIO region must be mapped prior to call. The sequence to
1143  * reset the port involves toggling it offline and then back online. This
1144  * action can cause this routine to delay up to a few seconds. An effort
1145  * is made to maintain link with the device by switching to host to use
1146  * the alternate port exclusively while the reset takes place.
1147  * failure to come online is overridden.
1148  */
1149 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1150 {
1151 	u64 port_sel;
1152 
1153 	/* first switch the AFU to the other links, if any */
1154 	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1155 	port_sel &= ~(1ULL << port);
1156 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1157 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1158 
1159 	set_port_offline(fc_regs);
1160 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1161 			       FC_PORT_STATUS_RETRY_CNT))
1162 		pr_err("%s: wait on port %d to go offline timed out\n",
1163 		       __func__, port);
1164 
1165 	set_port_online(fc_regs);
1166 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1167 			      FC_PORT_STATUS_RETRY_CNT))
1168 		pr_err("%s: wait on port %d to go online timed out\n",
1169 		       __func__, port);
1170 
1171 	/* switch back to include this port */
1172 	port_sel |= (1ULL << port);
1173 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1174 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1175 
1176 	pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1177 }
1178 
1179 /*
1180  * Asynchronous interrupt information table
1181  */
1182 static const struct asyc_intr_info ainfo[] = {
1183 	{SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1184 	{SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1185 	{SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1186 	{SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
1187 	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1188 	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
1189 	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1190 	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
1191 	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1192 	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1193 	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1194 	{SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
1195 	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1196 	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
1197 	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1198 	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
1199 	{0x0, "", 0, 0}		/* terminator */
1200 };
1201 
1202 /**
1203  * find_ainfo() - locates and returns asynchronous interrupt information
1204  * @status:	Status code set by AFU on error.
1205  *
1206  * Return: The located information or NULL when the status code is invalid.
1207  */
1208 static const struct asyc_intr_info *find_ainfo(u64 status)
1209 {
1210 	const struct asyc_intr_info *info;
1211 
1212 	for (info = &ainfo[0]; info->status; info++)
1213 		if (info->status == status)
1214 			return info;
1215 
1216 	return NULL;
1217 }
1218 
1219 /**
1220  * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1221  * @afu:	AFU associated with the host.
1222  */
1223 static void afu_err_intr_init(struct afu *afu)
1224 {
1225 	int i;
1226 	u64 reg;
1227 
1228 	/* global async interrupts: AFU clears afu_ctrl on context exit
1229 	 * if async interrupts were sent to that context. This prevents
1230 	 * the AFU form sending further async interrupts when
1231 	 * there is
1232 	 * nobody to receive them.
1233 	 */
1234 
1235 	/* mask all */
1236 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1237 	/* set LISN# to send and point to master context */
1238 	reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1239 
1240 	if (afu->internal_lun)
1241 		reg |= 1;	/* Bit 63 indicates local lun */
1242 	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1243 	/* clear all */
1244 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1245 	/* unmask bits that are of interest */
1246 	/* note: afu can send an interrupt after this step */
1247 	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1248 	/* clear again in case a bit came on after previous clear but before */
1249 	/* unmask */
1250 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1251 
1252 	/* Clear/Set internal lun bits */
1253 	reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1254 	reg &= SISL_FC_INTERNAL_MASK;
1255 	if (afu->internal_lun)
1256 		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1257 	writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1258 
1259 	/* now clear FC errors */
1260 	for (i = 0; i < NUM_FC_PORTS; i++) {
1261 		writeq_be(0xFFFFFFFFU,
1262 			  &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1263 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1264 	}
1265 
1266 	/* sync interrupts for master's IOARRIN write */
1267 	/* note that unlike asyncs, there can be no pending sync interrupts */
1268 	/* at this time (this is a fresh context and master has not written */
1269 	/* IOARRIN yet), so there is nothing to clear. */
1270 
1271 	/* set LISN#, it is always sent to the context that wrote IOARRIN */
1272 	writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1273 	writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1274 }
1275 
1276 /**
1277  * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1278  * @irq:	Interrupt number.
1279  * @data:	Private data provided at interrupt registration, the AFU.
1280  *
1281  * Return: Always return IRQ_HANDLED.
1282  */
1283 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1284 {
1285 	struct afu *afu = (struct afu *)data;
1286 	u64 reg;
1287 	u64 reg_unmasked;
1288 
1289 	reg = readq_be(&afu->host_map->intr_status);
1290 	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1291 
1292 	if (reg_unmasked == 0UL) {
1293 		pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1294 		       __func__, (u64)afu, reg);
1295 		goto cxlflash_sync_err_irq_exit;
1296 	}
1297 
1298 	pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1299 	       __func__, (u64)afu, reg);
1300 
1301 	writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1302 
1303 cxlflash_sync_err_irq_exit:
1304 	pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1305 	return IRQ_HANDLED;
1306 }
1307 
1308 /**
1309  * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1310  * @irq:	Interrupt number.
1311  * @data:	Private data provided at interrupt registration, the AFU.
1312  *
1313  * Return: Always return IRQ_HANDLED.
1314  */
1315 static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1316 {
1317 	struct afu *afu = (struct afu *)data;
1318 	struct afu_cmd *cmd;
1319 	bool toggle = afu->toggle;
1320 	u64 entry,
1321 	    *hrrq_start = afu->hrrq_start,
1322 	    *hrrq_end = afu->hrrq_end,
1323 	    *hrrq_curr = afu->hrrq_curr;
1324 
1325 	/* Process however many RRQ entries that are ready */
1326 	while (true) {
1327 		entry = *hrrq_curr;
1328 
1329 		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1330 			break;
1331 
1332 		cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1333 		cmd_complete(cmd);
1334 
1335 		/* Advance to next entry or wrap and flip the toggle bit */
1336 		if (hrrq_curr < hrrq_end)
1337 			hrrq_curr++;
1338 		else {
1339 			hrrq_curr = hrrq_start;
1340 			toggle ^= SISL_RESP_HANDLE_T_BIT;
1341 		}
1342 	}
1343 
1344 	afu->hrrq_curr = hrrq_curr;
1345 	afu->toggle = toggle;
1346 
1347 	return IRQ_HANDLED;
1348 }
1349 
1350 /**
1351  * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1352  * @irq:	Interrupt number.
1353  * @data:	Private data provided at interrupt registration, the AFU.
1354  *
1355  * Return: Always return IRQ_HANDLED.
1356  */
1357 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1358 {
1359 	struct afu *afu = (struct afu *)data;
1360 	struct cxlflash_cfg *cfg = afu->parent;
1361 	struct device *dev = &cfg->dev->dev;
1362 	u64 reg_unmasked;
1363 	const struct asyc_intr_info *info;
1364 	struct sisl_global_map __iomem *global = &afu->afu_map->global;
1365 	u64 reg;
1366 	u8 port;
1367 	int i;
1368 
1369 	reg = readq_be(&global->regs.aintr_status);
1370 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1371 
1372 	if (reg_unmasked == 0) {
1373 		dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
1374 			__func__, reg);
1375 		goto out;
1376 	}
1377 
1378 	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1379 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
1380 
1381 	/* Check each bit that is on */
1382 	for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1383 		info = find_ainfo(1ULL << i);
1384 		if (((reg_unmasked & 0x1) == 0) || !info)
1385 			continue;
1386 
1387 		port = info->port;
1388 
1389 		dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1390 			__func__, port, info->desc,
1391 		       readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1392 
1393 		/*
1394 		 * Do link reset first, some OTHER errors will set FC_ERROR
1395 		 * again if cleared before or w/o a reset
1396 		 */
1397 		if (info->action & LINK_RESET) {
1398 			dev_err(dev, "%s: FC Port %d: resetting link\n",
1399 				__func__, port);
1400 			cfg->lr_state = LINK_RESET_REQUIRED;
1401 			cfg->lr_port = port;
1402 			kref_get(&cfg->afu->mapcount);
1403 			schedule_work(&cfg->work_q);
1404 		}
1405 
1406 		if (info->action & CLR_FC_ERROR) {
1407 			reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1408 
1409 			/*
1410 			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1411 			 * should be the same and tracing one is sufficient.
1412 			 */
1413 
1414 			dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
1415 				__func__, port, reg);
1416 
1417 			writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1418 			writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1419 		}
1420 
1421 		if (info->action & SCAN_HOST) {
1422 			atomic_inc(&cfg->scan_host_needed);
1423 			kref_get(&cfg->afu->mapcount);
1424 			schedule_work(&cfg->work_q);
1425 		}
1426 	}
1427 
1428 out:
1429 	dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
1430 	return IRQ_HANDLED;
1431 }
1432 
1433 /**
1434  * start_context() - starts the master context
1435  * @cfg:	Internal structure associated with the host.
1436  *
1437  * Return: A success or failure value from CXL services.
1438  */
1439 static int start_context(struct cxlflash_cfg *cfg)
1440 {
1441 	int rc = 0;
1442 
1443 	rc = cxl_start_context(cfg->mcctx,
1444 			       cfg->afu->work.work_element_descriptor,
1445 			       NULL);
1446 
1447 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1448 	return rc;
1449 }
1450 
1451 /**
1452  * read_vpd() - obtains the WWPNs from VPD
1453  * @cfg:	Internal structure associated with the host.
1454  * @wwpn:	Array of size NUM_FC_PORTS to pass back WWPNs
1455  *
1456  * Return: 0 on success, -errno on failure
1457  */
1458 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1459 {
1460 	struct pci_dev *dev = cfg->dev;
1461 	int rc = 0;
1462 	int ro_start, ro_size, i, j, k;
1463 	ssize_t vpd_size;
1464 	char vpd_data[CXLFLASH_VPD_LEN];
1465 	char tmp_buf[WWPN_BUF_LEN] = { 0 };
1466 	char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1467 
1468 	/* Get the VPD data from the device */
1469 	vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
1470 	if (unlikely(vpd_size <= 0)) {
1471 		dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
1472 		       __func__, vpd_size);
1473 		rc = -ENODEV;
1474 		goto out;
1475 	}
1476 
1477 	/* Get the read only section offset */
1478 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1479 				    PCI_VPD_LRDT_RO_DATA);
1480 	if (unlikely(ro_start < 0)) {
1481 		dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
1482 			__func__);
1483 		rc = -ENODEV;
1484 		goto out;
1485 	}
1486 
1487 	/* Get the read only section size, cap when extends beyond read VPD */
1488 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1489 	j = ro_size;
1490 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1491 	if (unlikely((i + j) > vpd_size)) {
1492 		pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1493 			 __func__, (i + j), vpd_size);
1494 		ro_size = vpd_size - i;
1495 	}
1496 
1497 	/*
1498 	 * Find the offset of the WWPN tag within the read only
1499 	 * VPD data and validate the found field (partials are
1500 	 * no good to us). Convert the ASCII data to an integer
1501 	 * value. Note that we must copy to a temporary buffer
1502 	 * because the conversion service requires that the ASCII
1503 	 * string be terminated.
1504 	 */
1505 	for (k = 0; k < NUM_FC_PORTS; k++) {
1506 		j = ro_size;
1507 		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1508 
1509 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1510 		if (unlikely(i < 0)) {
1511 			dev_err(&dev->dev, "%s: Port %d WWPN not found "
1512 				"in VPD\n", __func__, k);
1513 			rc = -ENODEV;
1514 			goto out;
1515 		}
1516 
1517 		j = pci_vpd_info_field_size(&vpd_data[i]);
1518 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
1519 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1520 			dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
1521 				"VPD corrupt\n",
1522 			       __func__, k);
1523 			rc = -ENODEV;
1524 			goto out;
1525 		}
1526 
1527 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1528 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1529 		if (unlikely(rc)) {
1530 			dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
1531 				"to integer\n", __func__, k);
1532 			rc = -ENODEV;
1533 			goto out;
1534 		}
1535 	}
1536 
1537 out:
1538 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1539 	return rc;
1540 }
1541 
1542 /**
1543  * init_pcr() - initialize the provisioning and control registers
1544  * @cfg:	Internal structure associated with the host.
1545  *
1546  * Also sets up fast access to the mapped registers and initializes AFU
1547  * command fields that never change.
1548  */
1549 static void init_pcr(struct cxlflash_cfg *cfg)
1550 {
1551 	struct afu *afu = cfg->afu;
1552 	struct sisl_ctrl_map __iomem *ctrl_map;
1553 	int i;
1554 
1555 	for (i = 0; i < MAX_CONTEXT; i++) {
1556 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1557 		/* Disrupt any clients that could be running */
1558 		/* e.g. clients that survived a master restart */
1559 		writeq_be(0, &ctrl_map->rht_start);
1560 		writeq_be(0, &ctrl_map->rht_cnt_id);
1561 		writeq_be(0, &ctrl_map->ctx_cap);
1562 	}
1563 
1564 	/* Copy frequently used fields into afu */
1565 	afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1566 	afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1567 	afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1568 
1569 	/* Program the Endian Control for the master context */
1570 	writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1571 
1572 	/* Initialize cmd fields that never change */
1573 	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1574 		afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1575 		afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1576 		afu->cmd[i].rcb.rrq = 0x0;
1577 	}
1578 }
1579 
1580 /**
1581  * init_global() - initialize AFU global registers
1582  * @cfg:	Internal structure associated with the host.
1583  */
1584 static int init_global(struct cxlflash_cfg *cfg)
1585 {
1586 	struct afu *afu = cfg->afu;
1587 	struct device *dev = &cfg->dev->dev;
1588 	u64 wwpn[NUM_FC_PORTS];	/* wwpn of AFU ports */
1589 	int i = 0, num_ports = 0;
1590 	int rc = 0;
1591 	u64 reg;
1592 
1593 	rc = read_vpd(cfg, &wwpn[0]);
1594 	if (rc) {
1595 		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1596 		goto out;
1597 	}
1598 
1599 	pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1600 
1601 	/* Set up RRQ in AFU for master issued cmds */
1602 	writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1603 	writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1604 
1605 	/* AFU configuration */
1606 	reg = readq_be(&afu->afu_map->global.regs.afu_config);
1607 	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1608 	/* enable all auto retry options and control endianness */
1609 	/* leave others at default: */
1610 	/* CTX_CAP write protected, mbox_r does not clear on read and */
1611 	/* checker on if dual afu */
1612 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1613 
1614 	/* Global port select: select either port */
1615 	if (afu->internal_lun) {
1616 		/* Only use port 0 */
1617 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1618 		num_ports = NUM_FC_PORTS - 1;
1619 	} else {
1620 		writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1621 		num_ports = NUM_FC_PORTS;
1622 	}
1623 
1624 	for (i = 0; i < num_ports; i++) {
1625 		/* Unmask all errors (but they are still masked at AFU) */
1626 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1627 		/* Clear CRC error cnt & set a threshold */
1628 		(void)readq_be(&afu->afu_map->global.
1629 			       fc_regs[i][FC_CNT_CRCERR / 8]);
1630 		writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1631 			  [FC_CRC_THRESH / 8]);
1632 
1633 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1634 		if (wwpn[i] != 0 &&
1635 		    afu_set_wwpn(afu, i,
1636 				 &afu->afu_map->global.fc_regs[i][0],
1637 				 wwpn[i])) {
1638 			dev_err(dev, "%s: failed to set WWPN on port %d\n",
1639 			       __func__, i);
1640 			rc = -EIO;
1641 			goto out;
1642 		}
1643 		/* Programming WWPN back to back causes additional
1644 		 * offline/online transitions and a PLOGI
1645 		 */
1646 		msleep(100);
1647 	}
1648 
1649 	/* Set up master's own CTX_CAP to allow real mode, host translation */
1650 	/* tables, afu cmds and read/write GSCSI cmds. */
1651 	/* First, unlock ctx_cap write by reading mbox */
1652 	(void)readq_be(&afu->ctrl_map->mbox_r);	/* unlock ctx_cap */
1653 	writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1654 		   SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1655 		   SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1656 		  &afu->ctrl_map->ctx_cap);
1657 	/* Initialize heartbeat */
1658 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1659 
1660 out:
1661 	return rc;
1662 }
1663 
1664 /**
1665  * start_afu() - initializes and starts the AFU
1666  * @cfg:	Internal structure associated with the host.
1667  */
1668 static int start_afu(struct cxlflash_cfg *cfg)
1669 {
1670 	struct afu *afu = cfg->afu;
1671 	struct afu_cmd *cmd;
1672 
1673 	int i = 0;
1674 	int rc = 0;
1675 
1676 	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1677 		cmd = &afu->cmd[i];
1678 
1679 		init_completion(&cmd->cevent);
1680 		spin_lock_init(&cmd->slock);
1681 		cmd->parent = afu;
1682 	}
1683 
1684 	init_pcr(cfg);
1685 
1686 	/* After an AFU reset, RRQ entries are stale, clear them */
1687 	memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
1688 
1689 	/* Initialize RRQ pointers */
1690 	afu->hrrq_start = &afu->rrq_entry[0];
1691 	afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1692 	afu->hrrq_curr = afu->hrrq_start;
1693 	afu->toggle = 1;
1694 
1695 	rc = init_global(cfg);
1696 
1697 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1698 	return rc;
1699 }
1700 
1701 /**
1702  * init_intr() - setup interrupt handlers for the master context
1703  * @cfg:	Internal structure associated with the host.
1704  *
1705  * Return: 0 on success, -errno on failure
1706  */
1707 static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1708 				 struct cxl_context *ctx)
1709 {
1710 	struct afu *afu = cfg->afu;
1711 	struct device *dev = &cfg->dev->dev;
1712 	int rc = 0;
1713 	enum undo_level level = UNDO_NOOP;
1714 
1715 	rc = cxl_allocate_afu_irqs(ctx, 3);
1716 	if (unlikely(rc)) {
1717 		dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1718 			__func__, rc);
1719 		level = UNDO_NOOP;
1720 		goto out;
1721 	}
1722 
1723 	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1724 			     "SISL_MSI_SYNC_ERROR");
1725 	if (unlikely(rc <= 0)) {
1726 		dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1727 			__func__);
1728 		level = FREE_IRQ;
1729 		goto out;
1730 	}
1731 
1732 	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1733 			     "SISL_MSI_RRQ_UPDATED");
1734 	if (unlikely(rc <= 0)) {
1735 		dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1736 			__func__);
1737 		level = UNMAP_ONE;
1738 		goto out;
1739 	}
1740 
1741 	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1742 			     "SISL_MSI_ASYNC_ERROR");
1743 	if (unlikely(rc <= 0)) {
1744 		dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1745 			__func__);
1746 		level = UNMAP_TWO;
1747 		goto out;
1748 	}
1749 out:
1750 	return level;
1751 }
1752 
1753 /**
1754  * init_mc() - create and register as the master context
1755  * @cfg:	Internal structure associated with the host.
1756  *
1757  * Return: 0 on success, -errno on failure
1758  */
1759 static int init_mc(struct cxlflash_cfg *cfg)
1760 {
1761 	struct cxl_context *ctx;
1762 	struct device *dev = &cfg->dev->dev;
1763 	int rc = 0;
1764 	enum undo_level level;
1765 
1766 	ctx = cxl_get_context(cfg->dev);
1767 	if (unlikely(!ctx)) {
1768 		rc = -ENOMEM;
1769 		goto ret;
1770 	}
1771 	cfg->mcctx = ctx;
1772 
1773 	/* Set it up as a master with the CXL */
1774 	cxl_set_master(ctx);
1775 
1776 	/* During initialization reset the AFU to start from a clean slate */
1777 	rc = cxl_afu_reset(cfg->mcctx);
1778 	if (unlikely(rc)) {
1779 		dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1780 			__func__, rc);
1781 		goto ret;
1782 	}
1783 
1784 	level = init_intr(cfg, ctx);
1785 	if (unlikely(level)) {
1786 		dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
1787 			__func__, rc);
1788 		goto out;
1789 	}
1790 
1791 	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
1792 	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1793 	 * element (pe) that is embedded in the context (ctx)
1794 	 */
1795 	rc = start_context(cfg);
1796 	if (unlikely(rc)) {
1797 		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1798 		level = UNMAP_THREE;
1799 		goto out;
1800 	}
1801 ret:
1802 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1803 	return rc;
1804 out:
1805 	term_intr(cfg, level);
1806 	goto ret;
1807 }
1808 
1809 /**
1810  * init_afu() - setup as master context and start AFU
1811  * @cfg:	Internal structure associated with the host.
1812  *
1813  * This routine is a higher level of control for configuring the
1814  * AFU on probe and reset paths.
1815  *
1816  * Return: 0 on success, -errno on failure
1817  */
1818 static int init_afu(struct cxlflash_cfg *cfg)
1819 {
1820 	u64 reg;
1821 	int rc = 0;
1822 	struct afu *afu = cfg->afu;
1823 	struct device *dev = &cfg->dev->dev;
1824 
1825 	cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1826 
1827 	rc = init_mc(cfg);
1828 	if (rc) {
1829 		dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1830 			__func__, rc);
1831 		goto out;
1832 	}
1833 
1834 	/* Map the entire MMIO space of the AFU */
1835 	afu->afu_map = cxl_psa_map(cfg->mcctx);
1836 	if (!afu->afu_map) {
1837 		dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1838 		rc = -ENOMEM;
1839 		goto err1;
1840 	}
1841 	kref_init(&afu->mapcount);
1842 
1843 	/* No byte reverse on reading afu_version or string will be backwards */
1844 	reg = readq(&afu->afu_map->global.regs.afu_version);
1845 	memcpy(afu->version, &reg, sizeof(reg));
1846 	afu->interface_version =
1847 	    readq_be(&afu->afu_map->global.regs.interface_version);
1848 	if ((afu->interface_version + 1) == 0) {
1849 		pr_err("Back level AFU, please upgrade. AFU version %s "
1850 		       "interface version 0x%llx\n", afu->version,
1851 		       afu->interface_version);
1852 		rc = -EINVAL;
1853 		goto err2;
1854 	}
1855 
1856 	pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
1857 		 afu->version, afu->interface_version);
1858 
1859 	rc = start_afu(cfg);
1860 	if (rc) {
1861 		dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1862 			__func__, rc);
1863 		goto err2;
1864 	}
1865 
1866 	afu_err_intr_init(cfg->afu);
1867 	atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1868 
1869 	/* Restore the LUN mappings */
1870 	cxlflash_restore_luntable(cfg);
1871 out:
1872 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1873 	return rc;
1874 
1875 err2:
1876 	kref_put(&afu->mapcount, afu_unmap);
1877 err1:
1878 	term_intr(cfg, UNMAP_THREE);
1879 	term_mc(cfg);
1880 	goto out;
1881 }
1882 
1883 /**
1884  * cxlflash_afu_sync() - builds and sends an AFU sync command
1885  * @afu:	AFU associated with the host.
1886  * @ctx_hndl_u:	Identifies context requesting sync.
1887  * @res_hndl_u:	Identifies resource requesting sync.
1888  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
1889  *
1890  * The AFU can only take 1 sync command at a time. This routine enforces this
1891  * limitation by using a mutex to provide exclusive access to the AFU during
1892  * the sync. This design point requires calling threads to not be on interrupt
1893  * context due to the possibility of sleeping during concurrent sync operations.
1894  *
1895  * AFU sync operations are only necessary and allowed when the device is
1896  * operating normally. When not operating normally, sync requests can occur as
1897  * part of cleaning up resources associated with an adapter prior to removal.
1898  * In this scenario, these requests are simply ignored (safe due to the AFU
1899  * going away).
1900  *
1901  * Return:
1902  *	0 on success
1903  *	-1 on failure
1904  */
1905 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
1906 		      res_hndl_t res_hndl_u, u8 mode)
1907 {
1908 	struct cxlflash_cfg *cfg = afu->parent;
1909 	struct device *dev = &cfg->dev->dev;
1910 	struct afu_cmd *cmd = NULL;
1911 	int rc = 0;
1912 	int retry_cnt = 0;
1913 	static DEFINE_MUTEX(sync_active);
1914 
1915 	if (cfg->state != STATE_NORMAL) {
1916 		pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
1917 		return 0;
1918 	}
1919 
1920 	mutex_lock(&sync_active);
1921 retry:
1922 	cmd = cmd_checkout(afu);
1923 	if (unlikely(!cmd)) {
1924 		retry_cnt++;
1925 		udelay(1000 * retry_cnt);
1926 		if (retry_cnt < MC_RETRY_CNT)
1927 			goto retry;
1928 		dev_err(dev, "%s: could not get a free command\n", __func__);
1929 		rc = -1;
1930 		goto out;
1931 	}
1932 
1933 	pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1934 
1935 	memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
1936 
1937 	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1938 	cmd->rcb.port_sel = 0x0;	/* NA */
1939 	cmd->rcb.lun_id = 0x0;	/* NA */
1940 	cmd->rcb.data_len = 0x0;
1941 	cmd->rcb.data_ea = 0x0;
1942 	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
1943 
1944 	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
1945 	cmd->rcb.cdb[1] = mode;
1946 
1947 	/* The cdb is aligned, no unaligned accessors required */
1948 	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
1949 	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1950 
1951 	rc = send_cmd(afu, cmd);
1952 	if (unlikely(rc))
1953 		goto out;
1954 
1955 	wait_resp(afu, cmd);
1956 
1957 	/* Set on timeout */
1958 	if (unlikely((cmd->sa.ioasc != 0) ||
1959 		     (cmd->sa.host_use_b[0] & B_ERROR)))
1960 		rc = -1;
1961 out:
1962 	mutex_unlock(&sync_active);
1963 	if (cmd)
1964 		cmd_checkin(cmd);
1965 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1966 	return rc;
1967 }
1968 
1969 /**
1970  * afu_reset() - resets the AFU
1971  * @cfg:	Internal structure associated with the host.
1972  *
1973  * Return: 0 on success, -errno on failure
1974  */
1975 static int afu_reset(struct cxlflash_cfg *cfg)
1976 {
1977 	int rc = 0;
1978 	/* Stop the context before the reset. Since the context is
1979 	 * no longer available restart it after the reset is complete
1980 	 */
1981 
1982 	term_afu(cfg);
1983 
1984 	rc = init_afu(cfg);
1985 
1986 	pr_debug("%s: returning rc=%d\n", __func__, rc);
1987 	return rc;
1988 }
1989 
1990 /**
1991  * drain_ioctls() - wait until all currently executing ioctls have completed
1992  * @cfg:	Internal structure associated with the host.
1993  *
1994  * Obtain write access to read/write semaphore that wraps ioctl
1995  * handling to 'drain' ioctls currently executing.
1996  */
1997 static void drain_ioctls(struct cxlflash_cfg *cfg)
1998 {
1999 	down_write(&cfg->ioctl_rwsem);
2000 	up_write(&cfg->ioctl_rwsem);
2001 }
2002 
2003 /**
2004  * cxlflash_eh_device_reset_handler() - reset a single LUN
2005  * @scp:	SCSI command to send.
2006  *
2007  * Return:
2008  *	SUCCESS as defined in scsi/scsi.h
2009  *	FAILED as defined in scsi/scsi.h
2010  */
2011 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2012 {
2013 	int rc = SUCCESS;
2014 	struct Scsi_Host *host = scp->device->host;
2015 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
2016 	struct afu *afu = cfg->afu;
2017 	int rcr = 0;
2018 
2019 	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
2020 		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
2021 		 host->host_no, scp->device->channel,
2022 		 scp->device->id, scp->device->lun,
2023 		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2024 		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2025 		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2026 		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2027 
2028 retry:
2029 	switch (cfg->state) {
2030 	case STATE_NORMAL:
2031 		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
2032 		if (unlikely(rcr))
2033 			rc = FAILED;
2034 		break;
2035 	case STATE_RESET:
2036 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2037 		goto retry;
2038 	default:
2039 		rc = FAILED;
2040 		break;
2041 	}
2042 
2043 	pr_debug("%s: returning rc=%d\n", __func__, rc);
2044 	return rc;
2045 }
2046 
2047 /**
2048  * cxlflash_eh_host_reset_handler() - reset the host adapter
2049  * @scp:	SCSI command from stack identifying host.
2050  *
2051  * Return:
2052  *	SUCCESS as defined in scsi/scsi.h
2053  *	FAILED as defined in scsi/scsi.h
2054  */
2055 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2056 {
2057 	int rc = SUCCESS;
2058 	int rcr = 0;
2059 	struct Scsi_Host *host = scp->device->host;
2060 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
2061 
2062 	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
2063 		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
2064 		 host->host_no, scp->device->channel,
2065 		 scp->device->id, scp->device->lun,
2066 		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2067 		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2068 		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2069 		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2070 
2071 	switch (cfg->state) {
2072 	case STATE_NORMAL:
2073 		cfg->state = STATE_RESET;
2074 		drain_ioctls(cfg);
2075 		cxlflash_mark_contexts_error(cfg);
2076 		rcr = afu_reset(cfg);
2077 		if (rcr) {
2078 			rc = FAILED;
2079 			cfg->state = STATE_FAILTERM;
2080 		} else
2081 			cfg->state = STATE_NORMAL;
2082 		wake_up_all(&cfg->reset_waitq);
2083 		break;
2084 	case STATE_RESET:
2085 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2086 		if (cfg->state == STATE_NORMAL)
2087 			break;
2088 		/* fall through */
2089 	default:
2090 		rc = FAILED;
2091 		break;
2092 	}
2093 
2094 	pr_debug("%s: returning rc=%d\n", __func__, rc);
2095 	return rc;
2096 }
2097 
2098 /**
2099  * cxlflash_change_queue_depth() - change the queue depth for the device
2100  * @sdev:	SCSI device destined for queue depth change.
2101  * @qdepth:	Requested queue depth value to set.
2102  *
2103  * The requested queue depth is capped to the maximum supported value.
2104  *
2105  * Return: The actual queue depth set.
2106  */
2107 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2108 {
2109 
2110 	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2111 		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2112 
2113 	scsi_change_queue_depth(sdev, qdepth);
2114 	return sdev->queue_depth;
2115 }
2116 
2117 /**
2118  * cxlflash_show_port_status() - queries and presents the current port status
2119  * @port:	Desired port for status reporting.
2120  * @afu:	AFU owning the specified port.
2121  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2122  *
2123  * Return: The size of the ASCII string returned in @buf.
2124  */
2125 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
2126 {
2127 	char *disp_status;
2128 	u64 status;
2129 	__be64 __iomem *fc_regs;
2130 
2131 	if (port >= NUM_FC_PORTS)
2132 		return 0;
2133 
2134 	fc_regs = &afu->afu_map->global.fc_regs[port][0];
2135 	status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
2136 	status &= FC_MTIP_STATUS_MASK;
2137 
2138 	if (status == FC_MTIP_STATUS_ONLINE)
2139 		disp_status = "online";
2140 	else if (status == FC_MTIP_STATUS_OFFLINE)
2141 		disp_status = "offline";
2142 	else
2143 		disp_status = "unknown";
2144 
2145 	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2146 }
2147 
2148 /**
2149  * port0_show() - queries and presents the current status of port 0
2150  * @dev:	Generic device associated with the host owning the port.
2151  * @attr:	Device attribute representing the port.
2152  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2153  *
2154  * Return: The size of the ASCII string returned in @buf.
2155  */
2156 static ssize_t port0_show(struct device *dev,
2157 			  struct device_attribute *attr,
2158 			  char *buf)
2159 {
2160 	struct Scsi_Host *shost = class_to_shost(dev);
2161 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2162 	struct afu *afu = cfg->afu;
2163 
2164 	return cxlflash_show_port_status(0, afu, buf);
2165 }
2166 
2167 /**
2168  * port1_show() - queries and presents the current status of port 1
2169  * @dev:	Generic device associated with the host owning the port.
2170  * @attr:	Device attribute representing the port.
2171  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2172  *
2173  * Return: The size of the ASCII string returned in @buf.
2174  */
2175 static ssize_t port1_show(struct device *dev,
2176 			  struct device_attribute *attr,
2177 			  char *buf)
2178 {
2179 	struct Scsi_Host *shost = class_to_shost(dev);
2180 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2181 	struct afu *afu = cfg->afu;
2182 
2183 	return cxlflash_show_port_status(1, afu, buf);
2184 }
2185 
2186 /**
2187  * lun_mode_show() - presents the current LUN mode of the host
2188  * @dev:	Generic device associated with the host.
2189  * @attr:	Device attribute representing the LUN mode.
2190  * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2191  *
2192  * Return: The size of the ASCII string returned in @buf.
2193  */
2194 static ssize_t lun_mode_show(struct device *dev,
2195 			     struct device_attribute *attr, char *buf)
2196 {
2197 	struct Scsi_Host *shost = class_to_shost(dev);
2198 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2199 	struct afu *afu = cfg->afu;
2200 
2201 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2202 }
2203 
2204 /**
2205  * lun_mode_store() - sets the LUN mode of the host
2206  * @dev:	Generic device associated with the host.
2207  * @attr:	Device attribute representing the LUN mode.
2208  * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2209  * @count:	Length of data resizing in @buf.
2210  *
2211  * The CXL Flash AFU supports a dummy LUN mode where the external
2212  * links and storage are not required. Space on the FPGA is used
2213  * to create 1 or 2 small LUNs which are presented to the system
2214  * as if they were a normal storage device. This feature is useful
2215  * during development and also provides manufacturing with a way
2216  * to test the AFU without an actual device.
2217  *
2218  * 0 = external LUN[s] (default)
2219  * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2220  * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2221  * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2222  * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2223  *
2224  * Return: The size of the ASCII string returned in @buf.
2225  */
2226 static ssize_t lun_mode_store(struct device *dev,
2227 			      struct device_attribute *attr,
2228 			      const char *buf, size_t count)
2229 {
2230 	struct Scsi_Host *shost = class_to_shost(dev);
2231 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2232 	struct afu *afu = cfg->afu;
2233 	int rc;
2234 	u32 lun_mode;
2235 
2236 	rc = kstrtouint(buf, 10, &lun_mode);
2237 	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2238 		afu->internal_lun = lun_mode;
2239 
2240 		/*
2241 		 * When configured for internal LUN, there is only one channel,
2242 		 * channel number 0, else there will be 2 (default).
2243 		 */
2244 		if (afu->internal_lun)
2245 			shost->max_channel = 0;
2246 		else
2247 			shost->max_channel = NUM_FC_PORTS - 1;
2248 
2249 		afu_reset(cfg);
2250 		scsi_scan_host(cfg->host);
2251 	}
2252 
2253 	return count;
2254 }
2255 
2256 /**
2257  * ioctl_version_show() - presents the current ioctl version of the host
2258  * @dev:	Generic device associated with the host.
2259  * @attr:	Device attribute representing the ioctl version.
2260  * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
2261  *
2262  * Return: The size of the ASCII string returned in @buf.
2263  */
2264 static ssize_t ioctl_version_show(struct device *dev,
2265 				  struct device_attribute *attr, char *buf)
2266 {
2267 	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
2268 }
2269 
2270 /**
2271  * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2272  * @port:	Desired port for status reporting.
2273  * @afu:	AFU owning the specified port.
2274  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2275  *
2276  * Return: The size of the ASCII string returned in @buf.
2277  */
2278 static ssize_t cxlflash_show_port_lun_table(u32 port,
2279 					    struct afu *afu,
2280 					    char *buf)
2281 {
2282 	int i;
2283 	ssize_t bytes = 0;
2284 	__be64 __iomem *fc_port;
2285 
2286 	if (port >= NUM_FC_PORTS)
2287 		return 0;
2288 
2289 	fc_port = &afu->afu_map->global.fc_port[port][0];
2290 
2291 	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2292 		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2293 				   "%03d: %016llX\n", i, readq_be(&fc_port[i]));
2294 	return bytes;
2295 }
2296 
2297 /**
2298  * port0_lun_table_show() - presents the current LUN table of port 0
2299  * @dev:	Generic device associated with the host owning the port.
2300  * @attr:	Device attribute representing the port.
2301  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2302  *
2303  * Return: The size of the ASCII string returned in @buf.
2304  */
2305 static ssize_t port0_lun_table_show(struct device *dev,
2306 				    struct device_attribute *attr,
2307 				    char *buf)
2308 {
2309 	struct Scsi_Host *shost = class_to_shost(dev);
2310 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2311 	struct afu *afu = cfg->afu;
2312 
2313 	return cxlflash_show_port_lun_table(0, afu, buf);
2314 }
2315 
2316 /**
2317  * port1_lun_table_show() - presents the current LUN table of port 1
2318  * @dev:	Generic device associated with the host owning the port.
2319  * @attr:	Device attribute representing the port.
2320  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2321  *
2322  * Return: The size of the ASCII string returned in @buf.
2323  */
2324 static ssize_t port1_lun_table_show(struct device *dev,
2325 				    struct device_attribute *attr,
2326 				    char *buf)
2327 {
2328 	struct Scsi_Host *shost = class_to_shost(dev);
2329 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
2330 	struct afu *afu = cfg->afu;
2331 
2332 	return cxlflash_show_port_lun_table(1, afu, buf);
2333 }
2334 
2335 /**
2336  * mode_show() - presents the current mode of the device
2337  * @dev:	Generic device associated with the device.
2338  * @attr:	Device attribute representing the device mode.
2339  * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
2340  *
2341  * Return: The size of the ASCII string returned in @buf.
2342  */
2343 static ssize_t mode_show(struct device *dev,
2344 			 struct device_attribute *attr, char *buf)
2345 {
2346 	struct scsi_device *sdev = to_scsi_device(dev);
2347 
2348 	return scnprintf(buf, PAGE_SIZE, "%s\n",
2349 			 sdev->hostdata ? "superpipe" : "legacy");
2350 }
2351 
2352 /*
2353  * Host attributes
2354  */
2355 static DEVICE_ATTR_RO(port0);
2356 static DEVICE_ATTR_RO(port1);
2357 static DEVICE_ATTR_RW(lun_mode);
2358 static DEVICE_ATTR_RO(ioctl_version);
2359 static DEVICE_ATTR_RO(port0_lun_table);
2360 static DEVICE_ATTR_RO(port1_lun_table);
2361 
2362 static struct device_attribute *cxlflash_host_attrs[] = {
2363 	&dev_attr_port0,
2364 	&dev_attr_port1,
2365 	&dev_attr_lun_mode,
2366 	&dev_attr_ioctl_version,
2367 	&dev_attr_port0_lun_table,
2368 	&dev_attr_port1_lun_table,
2369 	NULL
2370 };
2371 
2372 /*
2373  * Device attributes
2374  */
2375 static DEVICE_ATTR_RO(mode);
2376 
2377 static struct device_attribute *cxlflash_dev_attrs[] = {
2378 	&dev_attr_mode,
2379 	NULL
2380 };
2381 
2382 /*
2383  * Host template
2384  */
2385 static struct scsi_host_template driver_template = {
2386 	.module = THIS_MODULE,
2387 	.name = CXLFLASH_ADAPTER_NAME,
2388 	.info = cxlflash_driver_info,
2389 	.ioctl = cxlflash_ioctl,
2390 	.proc_name = CXLFLASH_NAME,
2391 	.queuecommand = cxlflash_queuecommand,
2392 	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
2393 	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
2394 	.change_queue_depth = cxlflash_change_queue_depth,
2395 	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
2396 	.can_queue = CXLFLASH_MAX_CMDS,
2397 	.this_id = -1,
2398 	.sg_tablesize = SG_NONE,	/* No scatter gather support */
2399 	.max_sectors = CXLFLASH_MAX_SECTORS,
2400 	.use_clustering = ENABLE_CLUSTERING,
2401 	.shost_attrs = cxlflash_host_attrs,
2402 	.sdev_attrs = cxlflash_dev_attrs,
2403 };
2404 
2405 /*
2406  * Device dependent values
2407  */
2408 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
2409 					0ULL };
2410 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
2411 					CXLFLASH_NOTIFY_SHUTDOWN };
2412 
2413 /*
2414  * PCI device binding table
2415  */
2416 static struct pci_device_id cxlflash_pci_table[] = {
2417 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
2418 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2419 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
2420 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
2421 	{}
2422 };
2423 
2424 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
2425 
2426 /**
2427  * cxlflash_worker_thread() - work thread handler for the AFU
2428  * @work:	Work structure contained within cxlflash associated with host.
2429  *
2430  * Handles the following events:
2431  * - Link reset which cannot be performed on interrupt context due to
2432  * blocking up to a few seconds
2433  * - Read AFU command room
2434  * - Rescan the host
2435  */
2436 static void cxlflash_worker_thread(struct work_struct *work)
2437 {
2438 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2439 						work_q);
2440 	struct afu *afu = cfg->afu;
2441 	struct device *dev = &cfg->dev->dev;
2442 	int port;
2443 	ulong lock_flags;
2444 
2445 	/* Avoid MMIO if the device has failed */
2446 
2447 	if (cfg->state != STATE_NORMAL)
2448 		return;
2449 
2450 	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2451 
2452 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
2453 		port = cfg->lr_port;
2454 		if (port < 0)
2455 			dev_err(dev, "%s: invalid port index %d\n",
2456 				__func__, port);
2457 		else {
2458 			spin_unlock_irqrestore(cfg->host->host_lock,
2459 					       lock_flags);
2460 
2461 			/* The reset can block... */
2462 			afu_link_reset(afu, port,
2463 				       &afu->afu_map->global.fc_regs[port][0]);
2464 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2465 		}
2466 
2467 		cfg->lr_state = LINK_RESET_COMPLETE;
2468 	}
2469 
2470 	if (afu->read_room) {
2471 		atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2472 		afu->read_room = false;
2473 	}
2474 
2475 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2476 
2477 	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
2478 		scsi_scan_host(cfg->host);
2479 	kref_put(&afu->mapcount, afu_unmap);
2480 }
2481 
2482 /**
2483  * cxlflash_probe() - PCI entry point to add host
2484  * @pdev:	PCI device associated with the host.
2485  * @dev_id:	PCI device id associated with device.
2486  *
2487  * Return: 0 on success, -errno on failure
2488  */
2489 static int cxlflash_probe(struct pci_dev *pdev,
2490 			  const struct pci_device_id *dev_id)
2491 {
2492 	struct Scsi_Host *host;
2493 	struct cxlflash_cfg *cfg = NULL;
2494 	struct dev_dependent_vals *ddv;
2495 	int rc = 0;
2496 
2497 	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2498 		__func__, pdev->irq);
2499 
2500 	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2501 	driver_template.max_sectors = ddv->max_sectors;
2502 
2503 	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2504 	if (!host) {
2505 		dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2506 			__func__);
2507 		rc = -ENOMEM;
2508 		goto out;
2509 	}
2510 
2511 	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2512 	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2513 	host->max_channel = NUM_FC_PORTS - 1;
2514 	host->unique_id = host->host_no;
2515 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2516 
2517 	cfg = (struct cxlflash_cfg *)host->hostdata;
2518 	cfg->host = host;
2519 	rc = alloc_mem(cfg);
2520 	if (rc) {
2521 		dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
2522 			__func__);
2523 		rc = -ENOMEM;
2524 		scsi_host_put(cfg->host);
2525 		goto out;
2526 	}
2527 
2528 	cfg->init_state = INIT_STATE_NONE;
2529 	cfg->dev = pdev;
2530 	cfg->cxl_fops = cxlflash_cxl_fops;
2531 
2532 	/*
2533 	 * The promoted LUNs move to the top of the LUN table. The rest stay
2534 	 * on the bottom half. The bottom half grows from the end
2535 	 * (index = 255), whereas the top half grows from the beginning
2536 	 * (index = 0).
2537 	 */
2538 	cfg->promote_lun_index  = 0;
2539 	cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2540 	cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2541 
2542 	cfg->dev_id = (struct pci_device_id *)dev_id;
2543 
2544 	init_waitqueue_head(&cfg->tmf_waitq);
2545 	init_waitqueue_head(&cfg->reset_waitq);
2546 
2547 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2548 	cfg->lr_state = LINK_RESET_INVALID;
2549 	cfg->lr_port = -1;
2550 	spin_lock_init(&cfg->tmf_slock);
2551 	mutex_init(&cfg->ctx_tbl_list_mutex);
2552 	mutex_init(&cfg->ctx_recovery_mutex);
2553 	init_rwsem(&cfg->ioctl_rwsem);
2554 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2555 	INIT_LIST_HEAD(&cfg->lluns);
2556 
2557 	pci_set_drvdata(pdev, cfg);
2558 
2559 	cfg->cxl_afu = cxl_pci_to_afu(pdev);
2560 
2561 	rc = init_pci(cfg);
2562 	if (rc) {
2563 		dev_err(&pdev->dev, "%s: call to init_pci "
2564 			"failed rc=%d!\n", __func__, rc);
2565 		goto out_remove;
2566 	}
2567 	cfg->init_state = INIT_STATE_PCI;
2568 
2569 	rc = init_afu(cfg);
2570 	if (rc) {
2571 		dev_err(&pdev->dev, "%s: call to init_afu "
2572 			"failed rc=%d!\n", __func__, rc);
2573 		goto out_remove;
2574 	}
2575 	cfg->init_state = INIT_STATE_AFU;
2576 
2577 	rc = init_scsi(cfg);
2578 	if (rc) {
2579 		dev_err(&pdev->dev, "%s: call to init_scsi "
2580 			"failed rc=%d!\n", __func__, rc);
2581 		goto out_remove;
2582 	}
2583 	cfg->init_state = INIT_STATE_SCSI;
2584 
2585 out:
2586 	pr_debug("%s: returning rc=%d\n", __func__, rc);
2587 	return rc;
2588 
2589 out_remove:
2590 	cxlflash_remove(pdev);
2591 	goto out;
2592 }
2593 
2594 /**
2595  * cxlflash_pci_error_detected() - called when a PCI error is detected
2596  * @pdev:	PCI device struct.
2597  * @state:	PCI channel state.
2598  *
2599  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2600  */
2601 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2602 						    pci_channel_state_t state)
2603 {
2604 	int rc = 0;
2605 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2606 	struct device *dev = &cfg->dev->dev;
2607 
2608 	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2609 
2610 	switch (state) {
2611 	case pci_channel_io_frozen:
2612 		cfg->state = STATE_RESET;
2613 		scsi_block_requests(cfg->host);
2614 		drain_ioctls(cfg);
2615 		rc = cxlflash_mark_contexts_error(cfg);
2616 		if (unlikely(rc))
2617 			dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2618 				__func__, rc);
2619 		term_afu(cfg);
2620 		return PCI_ERS_RESULT_NEED_RESET;
2621 	case pci_channel_io_perm_failure:
2622 		cfg->state = STATE_FAILTERM;
2623 		wake_up_all(&cfg->reset_waitq);
2624 		scsi_unblock_requests(cfg->host);
2625 		return PCI_ERS_RESULT_DISCONNECT;
2626 	default:
2627 		break;
2628 	}
2629 	return PCI_ERS_RESULT_NEED_RESET;
2630 }
2631 
2632 /**
2633  * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2634  * @pdev:	PCI device struct.
2635  *
2636  * This routine is called by the pci error recovery code after the PCI
2637  * slot has been reset, just before we should resume normal operations.
2638  *
2639  * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2640  */
2641 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2642 {
2643 	int rc = 0;
2644 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2645 	struct device *dev = &cfg->dev->dev;
2646 
2647 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2648 
2649 	rc = init_afu(cfg);
2650 	if (unlikely(rc)) {
2651 		dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2652 		return PCI_ERS_RESULT_DISCONNECT;
2653 	}
2654 
2655 	return PCI_ERS_RESULT_RECOVERED;
2656 }
2657 
2658 /**
2659  * cxlflash_pci_resume() - called when normal operation can resume
2660  * @pdev:	PCI device struct
2661  */
2662 static void cxlflash_pci_resume(struct pci_dev *pdev)
2663 {
2664 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2665 	struct device *dev = &cfg->dev->dev;
2666 
2667 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2668 
2669 	cfg->state = STATE_NORMAL;
2670 	wake_up_all(&cfg->reset_waitq);
2671 	scsi_unblock_requests(cfg->host);
2672 }
2673 
2674 static const struct pci_error_handlers cxlflash_err_handler = {
2675 	.error_detected = cxlflash_pci_error_detected,
2676 	.slot_reset = cxlflash_pci_slot_reset,
2677 	.resume = cxlflash_pci_resume,
2678 };
2679 
2680 /*
2681  * PCI device structure
2682  */
2683 static struct pci_driver cxlflash_driver = {
2684 	.name = CXLFLASH_NAME,
2685 	.id_table = cxlflash_pci_table,
2686 	.probe = cxlflash_probe,
2687 	.remove = cxlflash_remove,
2688 	.shutdown = cxlflash_shutdown,
2689 	.err_handler = &cxlflash_err_handler,
2690 };
2691 
2692 /**
2693  * init_cxlflash() - module entry point
2694  *
2695  * Return: 0 on success, -errno on failure
2696  */
2697 static int __init init_cxlflash(void)
2698 {
2699 	pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME);
2700 
2701 	cxlflash_list_init();
2702 
2703 	return pci_register_driver(&cxlflash_driver);
2704 }
2705 
2706 /**
2707  * exit_cxlflash() - module exit point
2708  */
2709 static void __exit exit_cxlflash(void)
2710 {
2711 	cxlflash_term_global_luns();
2712 	cxlflash_free_errpage();
2713 
2714 	pci_unregister_driver(&cxlflash_driver);
2715 }
2716 
2717 module_init(init_cxlflash);
2718 module_exit(exit_cxlflash);
2719