1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2015 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/delay.h> 16 #include <linux/list.h> 17 #include <linux/module.h> 18 #include <linux/pci.h> 19 20 #include <asm/unaligned.h> 21 22 #include <misc/cxl.h> 23 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_host.h> 26 #include <uapi/scsi/cxlflash_ioctl.h> 27 28 #include "main.h" 29 #include "sislite.h" 30 #include "common.h" 31 32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); 33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); 34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); 35 MODULE_LICENSE("GPL"); 36 37 /** 38 * cmd_checkout() - checks out an AFU command 39 * @afu: AFU to checkout from. 40 * 41 * Commands are checked out in a round-robin fashion. Note that since 42 * the command pool is larger than the hardware queue, the majority of 43 * times we will only loop once or twice before getting a command. The 44 * buffer and CDB within the command are initialized (zeroed) prior to 45 * returning. 46 * 47 * Return: The checked out command or NULL when command pool is empty. 48 */ 49 static struct afu_cmd *cmd_checkout(struct afu *afu) 50 { 51 int k, dec = CXLFLASH_NUM_CMDS; 52 struct afu_cmd *cmd; 53 54 while (dec--) { 55 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1)); 56 57 cmd = &afu->cmd[k]; 58 59 if (!atomic_dec_if_positive(&cmd->free)) { 60 pr_devel("%s: returning found index=%d cmd=%p\n", 61 __func__, cmd->slot, cmd); 62 memset(cmd->buf, 0, CMD_BUFSIZE); 63 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); 64 return cmd; 65 } 66 } 67 68 return NULL; 69 } 70 71 /** 72 * cmd_checkin() - checks in an AFU command 73 * @cmd: AFU command to checkin. 74 * 75 * Safe to pass commands that have already been checked in. Several 76 * internal tracking fields are reset as part of the checkin. Note 77 * that these are intentionally reset prior to toggling the free bit 78 * to avoid clobbering values in the event that the command is checked 79 * out right away. 80 */ 81 static void cmd_checkin(struct afu_cmd *cmd) 82 { 83 cmd->rcb.scp = NULL; 84 cmd->rcb.timeout = 0; 85 cmd->sa.ioasc = 0; 86 cmd->cmd_tmf = false; 87 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */ 88 89 if (unlikely(atomic_inc_return(&cmd->free) != 1)) { 90 pr_err("%s: Freeing cmd (%d) that is not in use!\n", 91 __func__, cmd->slot); 92 return; 93 } 94 95 pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot); 96 } 97 98 /** 99 * process_cmd_err() - command error handler 100 * @cmd: AFU command that experienced the error. 101 * @scp: SCSI command associated with the AFU command in error. 102 * 103 * Translates error bits from AFU command to SCSI command results. 104 */ 105 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) 106 { 107 struct sisl_ioarcb *ioarcb; 108 struct sisl_ioasa *ioasa; 109 u32 resid; 110 111 if (unlikely(!cmd)) 112 return; 113 114 ioarcb = &(cmd->rcb); 115 ioasa = &(cmd->sa); 116 117 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { 118 resid = ioasa->resid; 119 scsi_set_resid(scp, resid); 120 pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n", 121 __func__, cmd, scp, resid); 122 } 123 124 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { 125 pr_debug("%s: cmd underrun cmd = %p scp = %p\n", 126 __func__, cmd, scp); 127 scp->result = (DID_ERROR << 16); 128 } 129 130 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d " 131 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n", 132 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc, 133 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra, 134 ioasa->fc_extra); 135 136 if (ioasa->rc.scsi_rc) { 137 /* We have a SCSI status */ 138 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { 139 memcpy(scp->sense_buffer, ioasa->sense_data, 140 SISL_SENSE_DATA_LEN); 141 scp->result = ioasa->rc.scsi_rc; 142 } else 143 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); 144 } 145 146 /* 147 * We encountered an error. Set scp->result based on nature 148 * of error. 149 */ 150 if (ioasa->rc.fc_rc) { 151 /* We have an FC status */ 152 switch (ioasa->rc.fc_rc) { 153 case SISL_FC_RC_LINKDOWN: 154 scp->result = (DID_REQUEUE << 16); 155 break; 156 case SISL_FC_RC_RESID: 157 /* This indicates an FCP resid underrun */ 158 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { 159 /* If the SISL_RC_FLAGS_OVERRUN flag was set, 160 * then we will handle this error else where. 161 * If not then we must handle it here. 162 * This is probably an AFU bug. 163 */ 164 scp->result = (DID_ERROR << 16); 165 } 166 break; 167 case SISL_FC_RC_RESIDERR: 168 /* Resid mismatch between adapter and device */ 169 case SISL_FC_RC_TGTABORT: 170 case SISL_FC_RC_ABORTOK: 171 case SISL_FC_RC_ABORTFAIL: 172 case SISL_FC_RC_NOLOGI: 173 case SISL_FC_RC_ABORTPEND: 174 case SISL_FC_RC_WRABORTPEND: 175 case SISL_FC_RC_NOEXP: 176 case SISL_FC_RC_INUSE: 177 scp->result = (DID_ERROR << 16); 178 break; 179 } 180 } 181 182 if (ioasa->rc.afu_rc) { 183 /* We have an AFU error */ 184 switch (ioasa->rc.afu_rc) { 185 case SISL_AFU_RC_NO_CHANNELS: 186 scp->result = (DID_NO_CONNECT << 16); 187 break; 188 case SISL_AFU_RC_DATA_DMA_ERR: 189 switch (ioasa->afu_extra) { 190 case SISL_AFU_DMA_ERR_PAGE_IN: 191 /* Retry */ 192 scp->result = (DID_IMM_RETRY << 16); 193 break; 194 case SISL_AFU_DMA_ERR_INVALID_EA: 195 default: 196 scp->result = (DID_ERROR << 16); 197 } 198 break; 199 case SISL_AFU_RC_OUT_OF_DATA_BUFS: 200 /* Retry */ 201 scp->result = (DID_ALLOC_FAILURE << 16); 202 break; 203 default: 204 scp->result = (DID_ERROR << 16); 205 } 206 } 207 } 208 209 /** 210 * cmd_complete() - command completion handler 211 * @cmd: AFU command that has completed. 212 * 213 * Prepares and submits command that has either completed or timed out to 214 * the SCSI stack. Checks AFU command back into command pool for non-internal 215 * (rcb.scp populated) commands. 216 */ 217 static void cmd_complete(struct afu_cmd *cmd) 218 { 219 struct scsi_cmnd *scp; 220 ulong lock_flags; 221 struct afu *afu = cmd->parent; 222 struct cxlflash_cfg *cfg = afu->parent; 223 bool cmd_is_tmf; 224 225 spin_lock_irqsave(&cmd->slock, lock_flags); 226 cmd->sa.host_use_b[0] |= B_DONE; 227 spin_unlock_irqrestore(&cmd->slock, lock_flags); 228 229 if (cmd->rcb.scp) { 230 scp = cmd->rcb.scp; 231 if (unlikely(cmd->sa.ioasc)) 232 process_cmd_err(cmd, scp); 233 else 234 scp->result = (DID_OK << 16); 235 236 cmd_is_tmf = cmd->cmd_tmf; 237 cmd_checkin(cmd); /* Don't use cmd after here */ 238 239 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " 240 "ioasc=%d\n", __func__, scp, scp->result, 241 cmd->sa.ioasc); 242 243 scsi_dma_unmap(scp); 244 scp->scsi_done(scp); 245 246 if (cmd_is_tmf) { 247 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 248 cfg->tmf_active = false; 249 wake_up_all_locked(&cfg->tmf_waitq); 250 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 251 } 252 } else 253 complete(&cmd->cevent); 254 } 255 256 /** 257 * context_reset() - timeout handler for AFU commands 258 * @cmd: AFU command that timed out. 259 * 260 * Sends a reset to the AFU. 261 */ 262 static void context_reset(struct afu_cmd *cmd) 263 { 264 int nretry = 0; 265 u64 rrin = 0x1; 266 u64 room = 0; 267 struct afu *afu = cmd->parent; 268 ulong lock_flags; 269 270 pr_debug("%s: cmd=%p\n", __func__, cmd); 271 272 spin_lock_irqsave(&cmd->slock, lock_flags); 273 274 /* Already completed? */ 275 if (cmd->sa.host_use_b[0] & B_DONE) { 276 spin_unlock_irqrestore(&cmd->slock, lock_flags); 277 return; 278 } 279 280 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT); 281 spin_unlock_irqrestore(&cmd->slock, lock_flags); 282 283 /* 284 * We really want to send this reset at all costs, so spread 285 * out wait time on successive retries for available room. 286 */ 287 do { 288 room = readq_be(&afu->host_map->cmd_room); 289 atomic64_set(&afu->room, room); 290 if (room) 291 goto write_rrin; 292 udelay(1 << nretry); 293 } while (nretry++ < MC_ROOM_RETRY_CNT); 294 295 pr_err("%s: no cmd_room to send reset\n", __func__); 296 return; 297 298 write_rrin: 299 nretry = 0; 300 writeq_be(rrin, &afu->host_map->ioarrin); 301 do { 302 rrin = readq_be(&afu->host_map->ioarrin); 303 if (rrin != 0x1) 304 break; 305 /* Double delay each time */ 306 udelay(1 << nretry); 307 } while (nretry++ < MC_ROOM_RETRY_CNT); 308 } 309 310 /** 311 * send_cmd() - sends an AFU command 312 * @afu: AFU associated with the host. 313 * @cmd: AFU command to send. 314 * 315 * Return: 316 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 317 */ 318 static int send_cmd(struct afu *afu, struct afu_cmd *cmd) 319 { 320 struct cxlflash_cfg *cfg = afu->parent; 321 struct device *dev = &cfg->dev->dev; 322 int nretry = 0; 323 int rc = 0; 324 u64 room; 325 long newval; 326 327 /* 328 * This routine is used by critical users such an AFU sync and to 329 * send a task management function (TMF). Thus we want to retry a 330 * bit before returning an error. To avoid the performance penalty 331 * of MMIO, we spread the update of 'room' over multiple commands. 332 */ 333 retry: 334 newval = atomic64_dec_if_positive(&afu->room); 335 if (!newval) { 336 do { 337 room = readq_be(&afu->host_map->cmd_room); 338 atomic64_set(&afu->room, room); 339 if (room) 340 goto write_ioarrin; 341 udelay(1 << nretry); 342 } while (nretry++ < MC_ROOM_RETRY_CNT); 343 344 dev_err(dev, "%s: no cmd_room to send 0x%X\n", 345 __func__, cmd->rcb.cdb[0]); 346 347 goto no_room; 348 } else if (unlikely(newval < 0)) { 349 /* This should be rare. i.e. Only if two threads race and 350 * decrement before the MMIO read is done. In this case 351 * just benefit from the other thread having updated 352 * afu->room. 353 */ 354 if (nretry++ < MC_ROOM_RETRY_CNT) { 355 udelay(1 << nretry); 356 goto retry; 357 } 358 359 goto no_room; 360 } 361 362 write_ioarrin: 363 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); 364 out: 365 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, 366 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); 367 return rc; 368 369 no_room: 370 afu->read_room = true; 371 kref_get(&cfg->afu->mapcount); 372 schedule_work(&cfg->work_q); 373 rc = SCSI_MLQUEUE_HOST_BUSY; 374 goto out; 375 } 376 377 /** 378 * wait_resp() - polls for a response or timeout to a sent AFU command 379 * @afu: AFU associated with the host. 380 * @cmd: AFU command that was sent. 381 */ 382 static void wait_resp(struct afu *afu, struct afu_cmd *cmd) 383 { 384 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); 385 386 timeout = wait_for_completion_timeout(&cmd->cevent, timeout); 387 if (!timeout) 388 context_reset(cmd); 389 390 if (unlikely(cmd->sa.ioasc != 0)) 391 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " 392 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], 393 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, 394 cmd->sa.rc.fc_rc); 395 } 396 397 /** 398 * send_tmf() - sends a Task Management Function (TMF) 399 * @afu: AFU to checkout from. 400 * @scp: SCSI command from stack. 401 * @tmfcmd: TMF command to send. 402 * 403 * Return: 404 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 405 */ 406 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) 407 { 408 struct afu_cmd *cmd; 409 410 u32 port_sel = scp->device->channel + 1; 411 short lflag = 0; 412 struct Scsi_Host *host = scp->device->host; 413 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 414 struct device *dev = &cfg->dev->dev; 415 ulong lock_flags; 416 int rc = 0; 417 ulong to; 418 419 cmd = cmd_checkout(afu); 420 if (unlikely(!cmd)) { 421 dev_err(dev, "%s: could not get a free command\n", __func__); 422 rc = SCSI_MLQUEUE_HOST_BUSY; 423 goto out; 424 } 425 426 /* When Task Management Function is active do not send another */ 427 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 428 if (cfg->tmf_active) 429 wait_event_interruptible_lock_irq(cfg->tmf_waitq, 430 !cfg->tmf_active, 431 cfg->tmf_slock); 432 cfg->tmf_active = true; 433 cmd->cmd_tmf = true; 434 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 435 436 cmd->rcb.ctx_id = afu->ctx_hndl; 437 cmd->rcb.port_sel = port_sel; 438 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 439 440 lflag = SISL_REQ_FLAGS_TMF_CMD; 441 442 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 443 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); 444 445 /* Stash the scp in the reserved field, for reuse during interrupt */ 446 cmd->rcb.scp = scp; 447 448 /* Copy the CDB from the cmd passed in */ 449 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); 450 451 /* Send the command */ 452 rc = send_cmd(afu, cmd); 453 if (unlikely(rc)) { 454 cmd_checkin(cmd); 455 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 456 cfg->tmf_active = false; 457 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 458 goto out; 459 } 460 461 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 462 to = msecs_to_jiffies(5000); 463 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, 464 !cfg->tmf_active, 465 cfg->tmf_slock, 466 to); 467 if (!to) { 468 cfg->tmf_active = false; 469 dev_err(dev, "%s: TMF timed out!\n", __func__); 470 rc = -1; 471 } 472 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 473 out: 474 return rc; 475 } 476 477 static void afu_unmap(struct kref *ref) 478 { 479 struct afu *afu = container_of(ref, struct afu, mapcount); 480 481 if (likely(afu->afu_map)) { 482 cxl_psa_unmap((void __iomem *)afu->afu_map); 483 afu->afu_map = NULL; 484 } 485 } 486 487 /** 488 * cxlflash_driver_info() - information handler for this host driver 489 * @host: SCSI host associated with device. 490 * 491 * Return: A string describing the device. 492 */ 493 static const char *cxlflash_driver_info(struct Scsi_Host *host) 494 { 495 return CXLFLASH_ADAPTER_NAME; 496 } 497 498 /** 499 * cxlflash_queuecommand() - sends a mid-layer request 500 * @host: SCSI host associated with device. 501 * @scp: SCSI command to send. 502 * 503 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 504 */ 505 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) 506 { 507 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 508 struct afu *afu = cfg->afu; 509 struct device *dev = &cfg->dev->dev; 510 struct afu_cmd *cmd; 511 u32 port_sel = scp->device->channel + 1; 512 int nseg, i, ncount; 513 struct scatterlist *sg; 514 ulong lock_flags; 515 short lflag = 0; 516 int rc = 0; 517 int kref_got = 0; 518 519 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " 520 "cdb=(%08X-%08X-%08X-%08X)\n", 521 __func__, scp, host->host_no, scp->device->channel, 522 scp->device->id, scp->device->lun, 523 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 524 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 525 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 526 get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 527 528 /* 529 * If a Task Management Function is active, wait for it to complete 530 * before continuing with regular commands. 531 */ 532 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 533 if (cfg->tmf_active) { 534 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 535 rc = SCSI_MLQUEUE_HOST_BUSY; 536 goto out; 537 } 538 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 539 540 switch (cfg->state) { 541 case STATE_RESET: 542 dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__); 543 rc = SCSI_MLQUEUE_HOST_BUSY; 544 goto out; 545 case STATE_FAILTERM: 546 dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__); 547 scp->result = (DID_NO_CONNECT << 16); 548 scp->scsi_done(scp); 549 rc = 0; 550 goto out; 551 default: 552 break; 553 } 554 555 cmd = cmd_checkout(afu); 556 if (unlikely(!cmd)) { 557 dev_err(dev, "%s: could not get a free command\n", __func__); 558 rc = SCSI_MLQUEUE_HOST_BUSY; 559 goto out; 560 } 561 562 kref_get(&cfg->afu->mapcount); 563 kref_got = 1; 564 565 cmd->rcb.ctx_id = afu->ctx_hndl; 566 cmd->rcb.port_sel = port_sel; 567 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 568 569 if (scp->sc_data_direction == DMA_TO_DEVICE) 570 lflag = SISL_REQ_FLAGS_HOST_WRITE; 571 else 572 lflag = SISL_REQ_FLAGS_HOST_READ; 573 574 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 575 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); 576 577 /* Stash the scp in the reserved field, for reuse during interrupt */ 578 cmd->rcb.scp = scp; 579 580 nseg = scsi_dma_map(scp); 581 if (unlikely(nseg < 0)) { 582 dev_err(dev, "%s: Fail DMA map! nseg=%d\n", 583 __func__, nseg); 584 rc = SCSI_MLQUEUE_HOST_BUSY; 585 goto out; 586 } 587 588 ncount = scsi_sg_count(scp); 589 scsi_for_each_sg(scp, sg, ncount, i) { 590 cmd->rcb.data_len = sg_dma_len(sg); 591 cmd->rcb.data_ea = sg_dma_address(sg); 592 } 593 594 /* Copy the CDB from the scsi_cmnd passed in */ 595 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); 596 597 /* Send the command */ 598 rc = send_cmd(afu, cmd); 599 if (unlikely(rc)) { 600 cmd_checkin(cmd); 601 scsi_dma_unmap(scp); 602 } 603 604 out: 605 if (kref_got) 606 kref_put(&afu->mapcount, afu_unmap); 607 pr_devel("%s: returning rc=%d\n", __func__, rc); 608 return rc; 609 } 610 611 /** 612 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe 613 * @cfg: Internal structure associated with the host. 614 */ 615 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) 616 { 617 struct pci_dev *pdev = cfg->dev; 618 619 if (pci_channel_offline(pdev)) 620 wait_event_timeout(cfg->reset_waitq, 621 !pci_channel_offline(pdev), 622 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); 623 } 624 625 /** 626 * free_mem() - free memory associated with the AFU 627 * @cfg: Internal structure associated with the host. 628 */ 629 static void free_mem(struct cxlflash_cfg *cfg) 630 { 631 int i; 632 char *buf = NULL; 633 struct afu *afu = cfg->afu; 634 635 if (cfg->afu) { 636 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 637 buf = afu->cmd[i].buf; 638 if (!((u64)buf & (PAGE_SIZE - 1))) 639 free_page((ulong)buf); 640 } 641 642 free_pages((ulong)afu, get_order(sizeof(struct afu))); 643 cfg->afu = NULL; 644 } 645 } 646 647 /** 648 * stop_afu() - stops the AFU command timers and unmaps the MMIO space 649 * @cfg: Internal structure associated with the host. 650 * 651 * Safe to call with AFU in a partially allocated/initialized state. 652 * 653 * Cleans up all state associated with the command queue, and unmaps 654 * the MMIO space. 655 * 656 * - complete() will take care of commands we initiated (they'll be checked 657 * in as part of the cleanup that occurs after the completion) 658 * 659 * - cmd_checkin() will take care of entries that we did not initiate and that 660 * have not (and will not) complete because they are sitting on a [now stale] 661 * hardware queue 662 */ 663 static void stop_afu(struct cxlflash_cfg *cfg) 664 { 665 int i; 666 struct afu *afu = cfg->afu; 667 struct afu_cmd *cmd; 668 669 if (likely(afu)) { 670 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 671 cmd = &afu->cmd[i]; 672 complete(&cmd->cevent); 673 if (!atomic_read(&cmd->free)) 674 cmd_checkin(cmd); 675 } 676 677 if (likely(afu->afu_map)) { 678 cxl_psa_unmap((void __iomem *)afu->afu_map); 679 afu->afu_map = NULL; 680 } 681 kref_put(&afu->mapcount, afu_unmap); 682 } 683 } 684 685 /** 686 * term_intr() - disables all AFU interrupts 687 * @cfg: Internal structure associated with the host. 688 * @level: Depth of allocation, where to begin waterfall tear down. 689 * 690 * Safe to call with AFU/MC in partially allocated/initialized state. 691 */ 692 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level) 693 { 694 struct afu *afu = cfg->afu; 695 struct device *dev = &cfg->dev->dev; 696 697 if (!afu || !cfg->mcctx) { 698 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); 699 return; 700 } 701 702 switch (level) { 703 case UNMAP_THREE: 704 cxl_unmap_afu_irq(cfg->mcctx, 3, afu); 705 case UNMAP_TWO: 706 cxl_unmap_afu_irq(cfg->mcctx, 2, afu); 707 case UNMAP_ONE: 708 cxl_unmap_afu_irq(cfg->mcctx, 1, afu); 709 case FREE_IRQ: 710 cxl_free_afu_irqs(cfg->mcctx); 711 /* fall through */ 712 case UNDO_NOOP: 713 /* No action required */ 714 break; 715 } 716 } 717 718 /** 719 * term_mc() - terminates the master context 720 * @cfg: Internal structure associated with the host. 721 * @level: Depth of allocation, where to begin waterfall tear down. 722 * 723 * Safe to call with AFU/MC in partially allocated/initialized state. 724 */ 725 static void term_mc(struct cxlflash_cfg *cfg) 726 { 727 int rc = 0; 728 struct afu *afu = cfg->afu; 729 struct device *dev = &cfg->dev->dev; 730 731 if (!afu || !cfg->mcctx) { 732 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); 733 return; 734 } 735 736 rc = cxl_stop_context(cfg->mcctx); 737 WARN_ON(rc); 738 cfg->mcctx = NULL; 739 } 740 741 /** 742 * term_afu() - terminates the AFU 743 * @cfg: Internal structure associated with the host. 744 * 745 * Safe to call with AFU/MC in partially allocated/initialized state. 746 */ 747 static void term_afu(struct cxlflash_cfg *cfg) 748 { 749 /* 750 * Tear down is carefully orchestrated to ensure 751 * no interrupts can come in when the problem state 752 * area is unmapped. 753 * 754 * 1) Disable all AFU interrupts 755 * 2) Unmap the problem state area 756 * 3) Stop the master context 757 */ 758 term_intr(cfg, UNMAP_THREE); 759 if (cfg->afu) 760 stop_afu(cfg); 761 762 term_mc(cfg); 763 764 pr_debug("%s: returning\n", __func__); 765 } 766 767 /** 768 * cxlflash_remove() - PCI entry point to tear down host 769 * @pdev: PCI device associated with the host. 770 * 771 * Safe to use as a cleanup in partially allocated/initialized state. 772 */ 773 static void cxlflash_remove(struct pci_dev *pdev) 774 { 775 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 776 ulong lock_flags; 777 778 /* If a Task Management Function is active, wait for it to complete 779 * before continuing with remove. 780 */ 781 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 782 if (cfg->tmf_active) 783 wait_event_interruptible_lock_irq(cfg->tmf_waitq, 784 !cfg->tmf_active, 785 cfg->tmf_slock); 786 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 787 788 cfg->state = STATE_FAILTERM; 789 cxlflash_stop_term_user_contexts(cfg); 790 791 switch (cfg->init_state) { 792 case INIT_STATE_SCSI: 793 cxlflash_term_local_luns(cfg); 794 scsi_remove_host(cfg->host); 795 /* fall through */ 796 case INIT_STATE_AFU: 797 cancel_work_sync(&cfg->work_q); 798 term_afu(cfg); 799 case INIT_STATE_PCI: 800 pci_disable_device(pdev); 801 case INIT_STATE_NONE: 802 free_mem(cfg); 803 scsi_host_put(cfg->host); 804 break; 805 } 806 807 pr_debug("%s: returning\n", __func__); 808 } 809 810 /** 811 * alloc_mem() - allocates the AFU and its command pool 812 * @cfg: Internal structure associated with the host. 813 * 814 * A partially allocated state remains on failure. 815 * 816 * Return: 817 * 0 on success 818 * -ENOMEM on failure to allocate memory 819 */ 820 static int alloc_mem(struct cxlflash_cfg *cfg) 821 { 822 int rc = 0; 823 int i; 824 char *buf = NULL; 825 struct device *dev = &cfg->dev->dev; 826 827 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */ 828 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 829 get_order(sizeof(struct afu))); 830 if (unlikely(!cfg->afu)) { 831 dev_err(dev, "%s: cannot get %d free pages\n", 832 __func__, get_order(sizeof(struct afu))); 833 rc = -ENOMEM; 834 goto out; 835 } 836 cfg->afu->parent = cfg; 837 cfg->afu->afu_map = NULL; 838 839 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) { 840 if (!((u64)buf & (PAGE_SIZE - 1))) { 841 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 842 if (unlikely(!buf)) { 843 dev_err(dev, 844 "%s: Allocate command buffers fail!\n", 845 __func__); 846 rc = -ENOMEM; 847 free_mem(cfg); 848 goto out; 849 } 850 } 851 852 cfg->afu->cmd[i].buf = buf; 853 atomic_set(&cfg->afu->cmd[i].free, 1); 854 cfg->afu->cmd[i].slot = i; 855 } 856 857 out: 858 return rc; 859 } 860 861 /** 862 * init_pci() - initializes the host as a PCI device 863 * @cfg: Internal structure associated with the host. 864 * 865 * Return: 0 on success, -errno on failure 866 */ 867 static int init_pci(struct cxlflash_cfg *cfg) 868 { 869 struct pci_dev *pdev = cfg->dev; 870 int rc = 0; 871 872 rc = pci_enable_device(pdev); 873 if (rc || pci_channel_offline(pdev)) { 874 if (pci_channel_offline(pdev)) { 875 cxlflash_wait_for_pci_err_recovery(cfg); 876 rc = pci_enable_device(pdev); 877 } 878 879 if (rc) { 880 dev_err(&pdev->dev, "%s: Cannot enable adapter\n", 881 __func__); 882 cxlflash_wait_for_pci_err_recovery(cfg); 883 goto out; 884 } 885 } 886 887 out: 888 pr_debug("%s: returning rc=%d\n", __func__, rc); 889 return rc; 890 } 891 892 /** 893 * init_scsi() - adds the host to the SCSI stack and kicks off host scan 894 * @cfg: Internal structure associated with the host. 895 * 896 * Return: 0 on success, -errno on failure 897 */ 898 static int init_scsi(struct cxlflash_cfg *cfg) 899 { 900 struct pci_dev *pdev = cfg->dev; 901 int rc = 0; 902 903 rc = scsi_add_host(cfg->host, &pdev->dev); 904 if (rc) { 905 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n", 906 __func__, rc); 907 goto out; 908 } 909 910 scsi_scan_host(cfg->host); 911 912 out: 913 pr_debug("%s: returning rc=%d\n", __func__, rc); 914 return rc; 915 } 916 917 /** 918 * set_port_online() - transitions the specified host FC port to online state 919 * @fc_regs: Top of MMIO region defined for specified port. 920 * 921 * The provided MMIO region must be mapped prior to call. Online state means 922 * that the FC link layer has synced, completed the handshaking process, and 923 * is ready for login to start. 924 */ 925 static void set_port_online(__be64 __iomem *fc_regs) 926 { 927 u64 cmdcfg; 928 929 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 930 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ 931 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ 932 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 933 } 934 935 /** 936 * set_port_offline() - transitions the specified host FC port to offline state 937 * @fc_regs: Top of MMIO region defined for specified port. 938 * 939 * The provided MMIO region must be mapped prior to call. 940 */ 941 static void set_port_offline(__be64 __iomem *fc_regs) 942 { 943 u64 cmdcfg; 944 945 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 946 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ 947 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ 948 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 949 } 950 951 /** 952 * wait_port_online() - waits for the specified host FC port come online 953 * @fc_regs: Top of MMIO region defined for specified port. 954 * @delay_us: Number of microseconds to delay between reading port status. 955 * @nretry: Number of cycles to retry reading port status. 956 * 957 * The provided MMIO region must be mapped prior to call. This will timeout 958 * when the cable is not plugged in. 959 * 960 * Return: 961 * TRUE (1) when the specified port is online 962 * FALSE (0) when the specified port fails to come online after timeout 963 * -EINVAL when @delay_us is less than 1000 964 */ 965 static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 966 { 967 u64 status; 968 969 if (delay_us < 1000) { 970 pr_err("%s: invalid delay specified %d\n", __func__, delay_us); 971 return -EINVAL; 972 } 973 974 do { 975 msleep(delay_us / 1000); 976 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 977 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && 978 nretry--); 979 980 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); 981 } 982 983 /** 984 * wait_port_offline() - waits for the specified host FC port go offline 985 * @fc_regs: Top of MMIO region defined for specified port. 986 * @delay_us: Number of microseconds to delay between reading port status. 987 * @nretry: Number of cycles to retry reading port status. 988 * 989 * The provided MMIO region must be mapped prior to call. 990 * 991 * Return: 992 * TRUE (1) when the specified port is offline 993 * FALSE (0) when the specified port fails to go offline after timeout 994 * -EINVAL when @delay_us is less than 1000 995 */ 996 static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 997 { 998 u64 status; 999 1000 if (delay_us < 1000) { 1001 pr_err("%s: invalid delay specified %d\n", __func__, delay_us); 1002 return -EINVAL; 1003 } 1004 1005 do { 1006 msleep(delay_us / 1000); 1007 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 1008 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && 1009 nretry--); 1010 1011 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); 1012 } 1013 1014 /** 1015 * afu_set_wwpn() - configures the WWPN for the specified host FC port 1016 * @afu: AFU associated with the host that owns the specified FC port. 1017 * @port: Port number being configured. 1018 * @fc_regs: Top of MMIO region defined for specified port. 1019 * @wwpn: The world-wide-port-number previously discovered for port. 1020 * 1021 * The provided MMIO region must be mapped prior to call. As part of the 1022 * sequence to configure the WWPN, the port is toggled offline and then back 1023 * online. This toggling action can cause this routine to delay up to a few 1024 * seconds. When configured to use the internal LUN feature of the AFU, a 1025 * failure to come online is overridden. 1026 * 1027 * Return: 1028 * 0 when the WWPN is successfully written and the port comes back online 1029 * -1 when the port fails to go offline or come back up online 1030 */ 1031 static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, 1032 u64 wwpn) 1033 { 1034 int rc = 0; 1035 1036 set_port_offline(fc_regs); 1037 1038 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1039 FC_PORT_STATUS_RETRY_CNT)) { 1040 pr_debug("%s: wait on port %d to go offline timed out\n", 1041 __func__, port); 1042 rc = -1; /* but continue on to leave the port back online */ 1043 } 1044 1045 if (rc == 0) 1046 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); 1047 1048 /* Always return success after programming WWPN */ 1049 rc = 0; 1050 1051 set_port_online(fc_regs); 1052 1053 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1054 FC_PORT_STATUS_RETRY_CNT)) { 1055 pr_err("%s: wait on port %d to go online timed out\n", 1056 __func__, port); 1057 } 1058 1059 pr_debug("%s: returning rc=%d\n", __func__, rc); 1060 1061 return rc; 1062 } 1063 1064 /** 1065 * afu_link_reset() - resets the specified host FC port 1066 * @afu: AFU associated with the host that owns the specified FC port. 1067 * @port: Port number being configured. 1068 * @fc_regs: Top of MMIO region defined for specified port. 1069 * 1070 * The provided MMIO region must be mapped prior to call. The sequence to 1071 * reset the port involves toggling it offline and then back online. This 1072 * action can cause this routine to delay up to a few seconds. An effort 1073 * is made to maintain link with the device by switching to host to use 1074 * the alternate port exclusively while the reset takes place. 1075 * failure to come online is overridden. 1076 */ 1077 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) 1078 { 1079 u64 port_sel; 1080 1081 /* first switch the AFU to the other links, if any */ 1082 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); 1083 port_sel &= ~(1ULL << port); 1084 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1085 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1086 1087 set_port_offline(fc_regs); 1088 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1089 FC_PORT_STATUS_RETRY_CNT)) 1090 pr_err("%s: wait on port %d to go offline timed out\n", 1091 __func__, port); 1092 1093 set_port_online(fc_regs); 1094 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1095 FC_PORT_STATUS_RETRY_CNT)) 1096 pr_err("%s: wait on port %d to go online timed out\n", 1097 __func__, port); 1098 1099 /* switch back to include this port */ 1100 port_sel |= (1ULL << port); 1101 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1102 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1103 1104 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel); 1105 } 1106 1107 /* 1108 * Asynchronous interrupt information table 1109 */ 1110 static const struct asyc_intr_info ainfo[] = { 1111 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, 1112 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, 1113 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, 1114 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET}, 1115 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, 1116 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, 1117 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, 1118 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST}, 1119 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, 1120 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, 1121 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, 1122 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET}, 1123 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, 1124 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, 1125 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, 1126 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST}, 1127 {0x0, "", 0, 0} /* terminator */ 1128 }; 1129 1130 /** 1131 * find_ainfo() - locates and returns asynchronous interrupt information 1132 * @status: Status code set by AFU on error. 1133 * 1134 * Return: The located information or NULL when the status code is invalid. 1135 */ 1136 static const struct asyc_intr_info *find_ainfo(u64 status) 1137 { 1138 const struct asyc_intr_info *info; 1139 1140 for (info = &ainfo[0]; info->status; info++) 1141 if (info->status == status) 1142 return info; 1143 1144 return NULL; 1145 } 1146 1147 /** 1148 * afu_err_intr_init() - clears and initializes the AFU for error interrupts 1149 * @afu: AFU associated with the host. 1150 */ 1151 static void afu_err_intr_init(struct afu *afu) 1152 { 1153 int i; 1154 u64 reg; 1155 1156 /* global async interrupts: AFU clears afu_ctrl on context exit 1157 * if async interrupts were sent to that context. This prevents 1158 * the AFU form sending further async interrupts when 1159 * there is 1160 * nobody to receive them. 1161 */ 1162 1163 /* mask all */ 1164 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); 1165 /* set LISN# to send and point to master context */ 1166 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); 1167 1168 if (afu->internal_lun) 1169 reg |= 1; /* Bit 63 indicates local lun */ 1170 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); 1171 /* clear all */ 1172 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1173 /* unmask bits that are of interest */ 1174 /* note: afu can send an interrupt after this step */ 1175 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); 1176 /* clear again in case a bit came on after previous clear but before */ 1177 /* unmask */ 1178 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1179 1180 /* Clear/Set internal lun bits */ 1181 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); 1182 reg &= SISL_FC_INTERNAL_MASK; 1183 if (afu->internal_lun) 1184 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); 1185 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); 1186 1187 /* now clear FC errors */ 1188 for (i = 0; i < NUM_FC_PORTS; i++) { 1189 writeq_be(0xFFFFFFFFU, 1190 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]); 1191 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]); 1192 } 1193 1194 /* sync interrupts for master's IOARRIN write */ 1195 /* note that unlike asyncs, there can be no pending sync interrupts */ 1196 /* at this time (this is a fresh context and master has not written */ 1197 /* IOARRIN yet), so there is nothing to clear. */ 1198 1199 /* set LISN#, it is always sent to the context that wrote IOARRIN */ 1200 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); 1201 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); 1202 } 1203 1204 /** 1205 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors 1206 * @irq: Interrupt number. 1207 * @data: Private data provided at interrupt registration, the AFU. 1208 * 1209 * Return: Always return IRQ_HANDLED. 1210 */ 1211 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) 1212 { 1213 struct afu *afu = (struct afu *)data; 1214 u64 reg; 1215 u64 reg_unmasked; 1216 1217 reg = readq_be(&afu->host_map->intr_status); 1218 reg_unmasked = (reg & SISL_ISTATUS_UNMASK); 1219 1220 if (reg_unmasked == 0UL) { 1221 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n", 1222 __func__, (u64)afu, reg); 1223 goto cxlflash_sync_err_irq_exit; 1224 } 1225 1226 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n", 1227 __func__, (u64)afu, reg); 1228 1229 writeq_be(reg_unmasked, &afu->host_map->intr_clear); 1230 1231 cxlflash_sync_err_irq_exit: 1232 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED); 1233 return IRQ_HANDLED; 1234 } 1235 1236 /** 1237 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) 1238 * @irq: Interrupt number. 1239 * @data: Private data provided at interrupt registration, the AFU. 1240 * 1241 * Return: Always return IRQ_HANDLED. 1242 */ 1243 static irqreturn_t cxlflash_rrq_irq(int irq, void *data) 1244 { 1245 struct afu *afu = (struct afu *)data; 1246 struct afu_cmd *cmd; 1247 bool toggle = afu->toggle; 1248 u64 entry, 1249 *hrrq_start = afu->hrrq_start, 1250 *hrrq_end = afu->hrrq_end, 1251 *hrrq_curr = afu->hrrq_curr; 1252 1253 /* Process however many RRQ entries that are ready */ 1254 while (true) { 1255 entry = *hrrq_curr; 1256 1257 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) 1258 break; 1259 1260 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT); 1261 cmd_complete(cmd); 1262 1263 /* Advance to next entry or wrap and flip the toggle bit */ 1264 if (hrrq_curr < hrrq_end) 1265 hrrq_curr++; 1266 else { 1267 hrrq_curr = hrrq_start; 1268 toggle ^= SISL_RESP_HANDLE_T_BIT; 1269 } 1270 } 1271 1272 afu->hrrq_curr = hrrq_curr; 1273 afu->toggle = toggle; 1274 1275 return IRQ_HANDLED; 1276 } 1277 1278 /** 1279 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors 1280 * @irq: Interrupt number. 1281 * @data: Private data provided at interrupt registration, the AFU. 1282 * 1283 * Return: Always return IRQ_HANDLED. 1284 */ 1285 static irqreturn_t cxlflash_async_err_irq(int irq, void *data) 1286 { 1287 struct afu *afu = (struct afu *)data; 1288 struct cxlflash_cfg *cfg = afu->parent; 1289 struct device *dev = &cfg->dev->dev; 1290 u64 reg_unmasked; 1291 const struct asyc_intr_info *info; 1292 struct sisl_global_map __iomem *global = &afu->afu_map->global; 1293 u64 reg; 1294 u8 port; 1295 int i; 1296 1297 reg = readq_be(&global->regs.aintr_status); 1298 reg_unmasked = (reg & SISL_ASTATUS_UNMASK); 1299 1300 if (reg_unmasked == 0) { 1301 dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n", 1302 __func__, reg); 1303 goto out; 1304 } 1305 1306 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ 1307 writeq_be(reg_unmasked, &global->regs.aintr_clear); 1308 1309 /* Check each bit that is on */ 1310 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { 1311 info = find_ainfo(1ULL << i); 1312 if (((reg_unmasked & 0x1) == 0) || !info) 1313 continue; 1314 1315 port = info->port; 1316 1317 dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n", 1318 __func__, port, info->desc, 1319 readq_be(&global->fc_regs[port][FC_STATUS / 8])); 1320 1321 /* 1322 * Do link reset first, some OTHER errors will set FC_ERROR 1323 * again if cleared before or w/o a reset 1324 */ 1325 if (info->action & LINK_RESET) { 1326 dev_err(dev, "%s: FC Port %d: resetting link\n", 1327 __func__, port); 1328 cfg->lr_state = LINK_RESET_REQUIRED; 1329 cfg->lr_port = port; 1330 kref_get(&cfg->afu->mapcount); 1331 schedule_work(&cfg->work_q); 1332 } 1333 1334 if (info->action & CLR_FC_ERROR) { 1335 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); 1336 1337 /* 1338 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP 1339 * should be the same and tracing one is sufficient. 1340 */ 1341 1342 dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n", 1343 __func__, port, reg); 1344 1345 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); 1346 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]); 1347 } 1348 1349 if (info->action & SCAN_HOST) { 1350 atomic_inc(&cfg->scan_host_needed); 1351 kref_get(&cfg->afu->mapcount); 1352 schedule_work(&cfg->work_q); 1353 } 1354 } 1355 1356 out: 1357 dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu); 1358 return IRQ_HANDLED; 1359 } 1360 1361 /** 1362 * start_context() - starts the master context 1363 * @cfg: Internal structure associated with the host. 1364 * 1365 * Return: A success or failure value from CXL services. 1366 */ 1367 static int start_context(struct cxlflash_cfg *cfg) 1368 { 1369 int rc = 0; 1370 1371 rc = cxl_start_context(cfg->mcctx, 1372 cfg->afu->work.work_element_descriptor, 1373 NULL); 1374 1375 pr_debug("%s: returning rc=%d\n", __func__, rc); 1376 return rc; 1377 } 1378 1379 /** 1380 * read_vpd() - obtains the WWPNs from VPD 1381 * @cfg: Internal structure associated with the host. 1382 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs 1383 * 1384 * Return: 0 on success, -errno on failure 1385 */ 1386 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) 1387 { 1388 struct pci_dev *dev = cfg->dev; 1389 int rc = 0; 1390 int ro_start, ro_size, i, j, k; 1391 ssize_t vpd_size; 1392 char vpd_data[CXLFLASH_VPD_LEN]; 1393 char tmp_buf[WWPN_BUF_LEN] = { 0 }; 1394 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; 1395 1396 /* Get the VPD data from the device */ 1397 vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data)); 1398 if (unlikely(vpd_size <= 0)) { 1399 dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n", 1400 __func__, vpd_size); 1401 rc = -ENODEV; 1402 goto out; 1403 } 1404 1405 /* Get the read only section offset */ 1406 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, 1407 PCI_VPD_LRDT_RO_DATA); 1408 if (unlikely(ro_start < 0)) { 1409 dev_err(&dev->dev, "%s: VPD Read-only data not found\n", 1410 __func__); 1411 rc = -ENODEV; 1412 goto out; 1413 } 1414 1415 /* Get the read only section size, cap when extends beyond read VPD */ 1416 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 1417 j = ro_size; 1418 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1419 if (unlikely((i + j) > vpd_size)) { 1420 pr_debug("%s: Might need to read more VPD (%d > %ld)\n", 1421 __func__, (i + j), vpd_size); 1422 ro_size = vpd_size - i; 1423 } 1424 1425 /* 1426 * Find the offset of the WWPN tag within the read only 1427 * VPD data and validate the found field (partials are 1428 * no good to us). Convert the ASCII data to an integer 1429 * value. Note that we must copy to a temporary buffer 1430 * because the conversion service requires that the ASCII 1431 * string be terminated. 1432 */ 1433 for (k = 0; k < NUM_FC_PORTS; k++) { 1434 j = ro_size; 1435 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1436 1437 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); 1438 if (unlikely(i < 0)) { 1439 dev_err(&dev->dev, "%s: Port %d WWPN not found " 1440 "in VPD\n", __func__, k); 1441 rc = -ENODEV; 1442 goto out; 1443 } 1444 1445 j = pci_vpd_info_field_size(&vpd_data[i]); 1446 i += PCI_VPD_INFO_FLD_HDR_SIZE; 1447 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { 1448 dev_err(&dev->dev, "%s: Port %d WWPN incomplete or " 1449 "VPD corrupt\n", 1450 __func__, k); 1451 rc = -ENODEV; 1452 goto out; 1453 } 1454 1455 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); 1456 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); 1457 if (unlikely(rc)) { 1458 dev_err(&dev->dev, "%s: Fail to convert port %d WWPN " 1459 "to integer\n", __func__, k); 1460 rc = -ENODEV; 1461 goto out; 1462 } 1463 } 1464 1465 out: 1466 pr_debug("%s: returning rc=%d\n", __func__, rc); 1467 return rc; 1468 } 1469 1470 /** 1471 * init_pcr() - initialize the provisioning and control registers 1472 * @cfg: Internal structure associated with the host. 1473 * 1474 * Also sets up fast access to the mapped registers and initializes AFU 1475 * command fields that never change. 1476 */ 1477 static void init_pcr(struct cxlflash_cfg *cfg) 1478 { 1479 struct afu *afu = cfg->afu; 1480 struct sisl_ctrl_map __iomem *ctrl_map; 1481 int i; 1482 1483 for (i = 0; i < MAX_CONTEXT; i++) { 1484 ctrl_map = &afu->afu_map->ctrls[i].ctrl; 1485 /* Disrupt any clients that could be running */ 1486 /* e.g. clients that survived a master restart */ 1487 writeq_be(0, &ctrl_map->rht_start); 1488 writeq_be(0, &ctrl_map->rht_cnt_id); 1489 writeq_be(0, &ctrl_map->ctx_cap); 1490 } 1491 1492 /* Copy frequently used fields into afu */ 1493 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); 1494 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; 1495 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; 1496 1497 /* Program the Endian Control for the master context */ 1498 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); 1499 1500 /* Initialize cmd fields that never change */ 1501 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 1502 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl; 1503 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; 1504 afu->cmd[i].rcb.rrq = 0x0; 1505 } 1506 } 1507 1508 /** 1509 * init_global() - initialize AFU global registers 1510 * @cfg: Internal structure associated with the host. 1511 */ 1512 static int init_global(struct cxlflash_cfg *cfg) 1513 { 1514 struct afu *afu = cfg->afu; 1515 struct device *dev = &cfg->dev->dev; 1516 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */ 1517 int i = 0, num_ports = 0; 1518 int rc = 0; 1519 u64 reg; 1520 1521 rc = read_vpd(cfg, &wwpn[0]); 1522 if (rc) { 1523 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); 1524 goto out; 1525 } 1526 1527 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); 1528 1529 /* Set up RRQ in AFU for master issued cmds */ 1530 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); 1531 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); 1532 1533 /* AFU configuration */ 1534 reg = readq_be(&afu->afu_map->global.regs.afu_config); 1535 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; 1536 /* enable all auto retry options and control endianness */ 1537 /* leave others at default: */ 1538 /* CTX_CAP write protected, mbox_r does not clear on read and */ 1539 /* checker on if dual afu */ 1540 writeq_be(reg, &afu->afu_map->global.regs.afu_config); 1541 1542 /* Global port select: select either port */ 1543 if (afu->internal_lun) { 1544 /* Only use port 0 */ 1545 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); 1546 num_ports = NUM_FC_PORTS - 1; 1547 } else { 1548 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel); 1549 num_ports = NUM_FC_PORTS; 1550 } 1551 1552 for (i = 0; i < num_ports; i++) { 1553 /* Unmask all errors (but they are still masked at AFU) */ 1554 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); 1555 /* Clear CRC error cnt & set a threshold */ 1556 (void)readq_be(&afu->afu_map->global. 1557 fc_regs[i][FC_CNT_CRCERR / 8]); 1558 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] 1559 [FC_CRC_THRESH / 8]); 1560 1561 /* Set WWPNs. If already programmed, wwpn[i] is 0 */ 1562 if (wwpn[i] != 0 && 1563 afu_set_wwpn(afu, i, 1564 &afu->afu_map->global.fc_regs[i][0], 1565 wwpn[i])) { 1566 dev_err(dev, "%s: failed to set WWPN on port %d\n", 1567 __func__, i); 1568 rc = -EIO; 1569 goto out; 1570 } 1571 /* Programming WWPN back to back causes additional 1572 * offline/online transitions and a PLOGI 1573 */ 1574 msleep(100); 1575 } 1576 1577 /* Set up master's own CTX_CAP to allow real mode, host translation */ 1578 /* tables, afu cmds and read/write GSCSI cmds. */ 1579 /* First, unlock ctx_cap write by reading mbox */ 1580 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ 1581 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | 1582 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | 1583 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), 1584 &afu->ctrl_map->ctx_cap); 1585 /* Initialize heartbeat */ 1586 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); 1587 1588 out: 1589 return rc; 1590 } 1591 1592 /** 1593 * start_afu() - initializes and starts the AFU 1594 * @cfg: Internal structure associated with the host. 1595 */ 1596 static int start_afu(struct cxlflash_cfg *cfg) 1597 { 1598 struct afu *afu = cfg->afu; 1599 struct afu_cmd *cmd; 1600 1601 int i = 0; 1602 int rc = 0; 1603 1604 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 1605 cmd = &afu->cmd[i]; 1606 1607 init_completion(&cmd->cevent); 1608 spin_lock_init(&cmd->slock); 1609 cmd->parent = afu; 1610 } 1611 1612 init_pcr(cfg); 1613 1614 /* After an AFU reset, RRQ entries are stale, clear them */ 1615 memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry)); 1616 1617 /* Initialize RRQ pointers */ 1618 afu->hrrq_start = &afu->rrq_entry[0]; 1619 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; 1620 afu->hrrq_curr = afu->hrrq_start; 1621 afu->toggle = 1; 1622 1623 rc = init_global(cfg); 1624 1625 pr_debug("%s: returning rc=%d\n", __func__, rc); 1626 return rc; 1627 } 1628 1629 /** 1630 * init_intr() - setup interrupt handlers for the master context 1631 * @cfg: Internal structure associated with the host. 1632 * 1633 * Return: 0 on success, -errno on failure 1634 */ 1635 static enum undo_level init_intr(struct cxlflash_cfg *cfg, 1636 struct cxl_context *ctx) 1637 { 1638 struct afu *afu = cfg->afu; 1639 struct device *dev = &cfg->dev->dev; 1640 int rc = 0; 1641 enum undo_level level = UNDO_NOOP; 1642 1643 rc = cxl_allocate_afu_irqs(ctx, 3); 1644 if (unlikely(rc)) { 1645 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", 1646 __func__, rc); 1647 level = UNDO_NOOP; 1648 goto out; 1649 } 1650 1651 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, 1652 "SISL_MSI_SYNC_ERROR"); 1653 if (unlikely(rc <= 0)) { 1654 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n", 1655 __func__); 1656 level = FREE_IRQ; 1657 goto out; 1658 } 1659 1660 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, 1661 "SISL_MSI_RRQ_UPDATED"); 1662 if (unlikely(rc <= 0)) { 1663 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n", 1664 __func__); 1665 level = UNMAP_ONE; 1666 goto out; 1667 } 1668 1669 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, 1670 "SISL_MSI_ASYNC_ERROR"); 1671 if (unlikely(rc <= 0)) { 1672 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n", 1673 __func__); 1674 level = UNMAP_TWO; 1675 goto out; 1676 } 1677 out: 1678 return level; 1679 } 1680 1681 /** 1682 * init_mc() - create and register as the master context 1683 * @cfg: Internal structure associated with the host. 1684 * 1685 * Return: 0 on success, -errno on failure 1686 */ 1687 static int init_mc(struct cxlflash_cfg *cfg) 1688 { 1689 struct cxl_context *ctx; 1690 struct device *dev = &cfg->dev->dev; 1691 int rc = 0; 1692 enum undo_level level; 1693 1694 ctx = cxl_get_context(cfg->dev); 1695 if (unlikely(!ctx)) { 1696 rc = -ENOMEM; 1697 goto ret; 1698 } 1699 cfg->mcctx = ctx; 1700 1701 /* Set it up as a master with the CXL */ 1702 cxl_set_master(ctx); 1703 1704 /* During initialization reset the AFU to start from a clean slate */ 1705 rc = cxl_afu_reset(cfg->mcctx); 1706 if (unlikely(rc)) { 1707 dev_err(dev, "%s: initial AFU reset failed rc=%d\n", 1708 __func__, rc); 1709 goto ret; 1710 } 1711 1712 level = init_intr(cfg, ctx); 1713 if (unlikely(level)) { 1714 dev_err(dev, "%s: setting up interrupts failed rc=%d\n", 1715 __func__, rc); 1716 goto out; 1717 } 1718 1719 /* This performs the equivalent of the CXL_IOCTL_START_WORK. 1720 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process 1721 * element (pe) that is embedded in the context (ctx) 1722 */ 1723 rc = start_context(cfg); 1724 if (unlikely(rc)) { 1725 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); 1726 level = UNMAP_THREE; 1727 goto out; 1728 } 1729 ret: 1730 pr_debug("%s: returning rc=%d\n", __func__, rc); 1731 return rc; 1732 out: 1733 term_intr(cfg, level); 1734 goto ret; 1735 } 1736 1737 /** 1738 * init_afu() - setup as master context and start AFU 1739 * @cfg: Internal structure associated with the host. 1740 * 1741 * This routine is a higher level of control for configuring the 1742 * AFU on probe and reset paths. 1743 * 1744 * Return: 0 on success, -errno on failure 1745 */ 1746 static int init_afu(struct cxlflash_cfg *cfg) 1747 { 1748 u64 reg; 1749 int rc = 0; 1750 struct afu *afu = cfg->afu; 1751 struct device *dev = &cfg->dev->dev; 1752 1753 cxl_perst_reloads_same_image(cfg->cxl_afu, true); 1754 1755 rc = init_mc(cfg); 1756 if (rc) { 1757 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n", 1758 __func__, rc); 1759 goto out; 1760 } 1761 1762 /* Map the entire MMIO space of the AFU */ 1763 afu->afu_map = cxl_psa_map(cfg->mcctx); 1764 if (!afu->afu_map) { 1765 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__); 1766 rc = -ENOMEM; 1767 goto err1; 1768 } 1769 kref_init(&afu->mapcount); 1770 1771 /* No byte reverse on reading afu_version or string will be backwards */ 1772 reg = readq(&afu->afu_map->global.regs.afu_version); 1773 memcpy(afu->version, ®, sizeof(reg)); 1774 afu->interface_version = 1775 readq_be(&afu->afu_map->global.regs.interface_version); 1776 if ((afu->interface_version + 1) == 0) { 1777 pr_err("Back level AFU, please upgrade. AFU version %s " 1778 "interface version 0x%llx\n", afu->version, 1779 afu->interface_version); 1780 rc = -EINVAL; 1781 goto err2; 1782 } 1783 1784 pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, 1785 afu->version, afu->interface_version); 1786 1787 rc = start_afu(cfg); 1788 if (rc) { 1789 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n", 1790 __func__, rc); 1791 goto err2; 1792 } 1793 1794 afu_err_intr_init(cfg->afu); 1795 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); 1796 1797 /* Restore the LUN mappings */ 1798 cxlflash_restore_luntable(cfg); 1799 out: 1800 pr_debug("%s: returning rc=%d\n", __func__, rc); 1801 return rc; 1802 1803 err2: 1804 kref_put(&afu->mapcount, afu_unmap); 1805 err1: 1806 term_intr(cfg, UNMAP_THREE); 1807 term_mc(cfg); 1808 goto out; 1809 } 1810 1811 /** 1812 * cxlflash_afu_sync() - builds and sends an AFU sync command 1813 * @afu: AFU associated with the host. 1814 * @ctx_hndl_u: Identifies context requesting sync. 1815 * @res_hndl_u: Identifies resource requesting sync. 1816 * @mode: Type of sync to issue (lightweight, heavyweight, global). 1817 * 1818 * The AFU can only take 1 sync command at a time. This routine enforces this 1819 * limitation by using a mutex to provide exclusive access to the AFU during 1820 * the sync. This design point requires calling threads to not be on interrupt 1821 * context due to the possibility of sleeping during concurrent sync operations. 1822 * 1823 * AFU sync operations are only necessary and allowed when the device is 1824 * operating normally. When not operating normally, sync requests can occur as 1825 * part of cleaning up resources associated with an adapter prior to removal. 1826 * In this scenario, these requests are simply ignored (safe due to the AFU 1827 * going away). 1828 * 1829 * Return: 1830 * 0 on success 1831 * -1 on failure 1832 */ 1833 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, 1834 res_hndl_t res_hndl_u, u8 mode) 1835 { 1836 struct cxlflash_cfg *cfg = afu->parent; 1837 struct device *dev = &cfg->dev->dev; 1838 struct afu_cmd *cmd = NULL; 1839 int rc = 0; 1840 int retry_cnt = 0; 1841 static DEFINE_MUTEX(sync_active); 1842 1843 if (cfg->state != STATE_NORMAL) { 1844 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state); 1845 return 0; 1846 } 1847 1848 mutex_lock(&sync_active); 1849 retry: 1850 cmd = cmd_checkout(afu); 1851 if (unlikely(!cmd)) { 1852 retry_cnt++; 1853 udelay(1000 * retry_cnt); 1854 if (retry_cnt < MC_RETRY_CNT) 1855 goto retry; 1856 dev_err(dev, "%s: could not get a free command\n", __func__); 1857 rc = -1; 1858 goto out; 1859 } 1860 1861 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); 1862 1863 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); 1864 1865 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 1866 cmd->rcb.port_sel = 0x0; /* NA */ 1867 cmd->rcb.lun_id = 0x0; /* NA */ 1868 cmd->rcb.data_len = 0x0; 1869 cmd->rcb.data_ea = 0x0; 1870 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; 1871 1872 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ 1873 cmd->rcb.cdb[1] = mode; 1874 1875 /* The cdb is aligned, no unaligned accessors required */ 1876 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); 1877 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); 1878 1879 rc = send_cmd(afu, cmd); 1880 if (unlikely(rc)) 1881 goto out; 1882 1883 wait_resp(afu, cmd); 1884 1885 /* Set on timeout */ 1886 if (unlikely((cmd->sa.ioasc != 0) || 1887 (cmd->sa.host_use_b[0] & B_ERROR))) 1888 rc = -1; 1889 out: 1890 mutex_unlock(&sync_active); 1891 if (cmd) 1892 cmd_checkin(cmd); 1893 pr_debug("%s: returning rc=%d\n", __func__, rc); 1894 return rc; 1895 } 1896 1897 /** 1898 * afu_reset() - resets the AFU 1899 * @cfg: Internal structure associated with the host. 1900 * 1901 * Return: 0 on success, -errno on failure 1902 */ 1903 static int afu_reset(struct cxlflash_cfg *cfg) 1904 { 1905 int rc = 0; 1906 /* Stop the context before the reset. Since the context is 1907 * no longer available restart it after the reset is complete 1908 */ 1909 1910 term_afu(cfg); 1911 1912 rc = init_afu(cfg); 1913 1914 pr_debug("%s: returning rc=%d\n", __func__, rc); 1915 return rc; 1916 } 1917 1918 /** 1919 * cxlflash_eh_device_reset_handler() - reset a single LUN 1920 * @scp: SCSI command to send. 1921 * 1922 * Return: 1923 * SUCCESS as defined in scsi/scsi.h 1924 * FAILED as defined in scsi/scsi.h 1925 */ 1926 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) 1927 { 1928 int rc = SUCCESS; 1929 struct Scsi_Host *host = scp->device->host; 1930 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 1931 struct afu *afu = cfg->afu; 1932 int rcr = 0; 1933 1934 pr_debug("%s: (scp=%p) %d/%d/%d/%llu " 1935 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, 1936 host->host_no, scp->device->channel, 1937 scp->device->id, scp->device->lun, 1938 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 1939 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 1940 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 1941 get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 1942 1943 retry: 1944 switch (cfg->state) { 1945 case STATE_NORMAL: 1946 rcr = send_tmf(afu, scp, TMF_LUN_RESET); 1947 if (unlikely(rcr)) 1948 rc = FAILED; 1949 break; 1950 case STATE_RESET: 1951 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 1952 goto retry; 1953 default: 1954 rc = FAILED; 1955 break; 1956 } 1957 1958 pr_debug("%s: returning rc=%d\n", __func__, rc); 1959 return rc; 1960 } 1961 1962 /** 1963 * cxlflash_eh_host_reset_handler() - reset the host adapter 1964 * @scp: SCSI command from stack identifying host. 1965 * 1966 * Return: 1967 * SUCCESS as defined in scsi/scsi.h 1968 * FAILED as defined in scsi/scsi.h 1969 */ 1970 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) 1971 { 1972 int rc = SUCCESS; 1973 int rcr = 0; 1974 struct Scsi_Host *host = scp->device->host; 1975 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 1976 1977 pr_debug("%s: (scp=%p) %d/%d/%d/%llu " 1978 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, 1979 host->host_no, scp->device->channel, 1980 scp->device->id, scp->device->lun, 1981 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 1982 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 1983 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 1984 get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 1985 1986 switch (cfg->state) { 1987 case STATE_NORMAL: 1988 cfg->state = STATE_RESET; 1989 cxlflash_mark_contexts_error(cfg); 1990 rcr = afu_reset(cfg); 1991 if (rcr) { 1992 rc = FAILED; 1993 cfg->state = STATE_FAILTERM; 1994 } else 1995 cfg->state = STATE_NORMAL; 1996 wake_up_all(&cfg->reset_waitq); 1997 break; 1998 case STATE_RESET: 1999 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 2000 if (cfg->state == STATE_NORMAL) 2001 break; 2002 /* fall through */ 2003 default: 2004 rc = FAILED; 2005 break; 2006 } 2007 2008 pr_debug("%s: returning rc=%d\n", __func__, rc); 2009 return rc; 2010 } 2011 2012 /** 2013 * cxlflash_change_queue_depth() - change the queue depth for the device 2014 * @sdev: SCSI device destined for queue depth change. 2015 * @qdepth: Requested queue depth value to set. 2016 * 2017 * The requested queue depth is capped to the maximum supported value. 2018 * 2019 * Return: The actual queue depth set. 2020 */ 2021 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) 2022 { 2023 2024 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) 2025 qdepth = CXLFLASH_MAX_CMDS_PER_LUN; 2026 2027 scsi_change_queue_depth(sdev, qdepth); 2028 return sdev->queue_depth; 2029 } 2030 2031 /** 2032 * cxlflash_show_port_status() - queries and presents the current port status 2033 * @port: Desired port for status reporting. 2034 * @afu: AFU owning the specified port. 2035 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2036 * 2037 * Return: The size of the ASCII string returned in @buf. 2038 */ 2039 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf) 2040 { 2041 char *disp_status; 2042 u64 status; 2043 __be64 __iomem *fc_regs; 2044 2045 if (port >= NUM_FC_PORTS) 2046 return 0; 2047 2048 fc_regs = &afu->afu_map->global.fc_regs[port][0]; 2049 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 2050 status &= FC_MTIP_STATUS_MASK; 2051 2052 if (status == FC_MTIP_STATUS_ONLINE) 2053 disp_status = "online"; 2054 else if (status == FC_MTIP_STATUS_OFFLINE) 2055 disp_status = "offline"; 2056 else 2057 disp_status = "unknown"; 2058 2059 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); 2060 } 2061 2062 /** 2063 * port0_show() - queries and presents the current status of port 0 2064 * @dev: Generic device associated with the host owning the port. 2065 * @attr: Device attribute representing the port. 2066 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2067 * 2068 * Return: The size of the ASCII string returned in @buf. 2069 */ 2070 static ssize_t port0_show(struct device *dev, 2071 struct device_attribute *attr, 2072 char *buf) 2073 { 2074 struct Scsi_Host *shost = class_to_shost(dev); 2075 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2076 struct afu *afu = cfg->afu; 2077 2078 return cxlflash_show_port_status(0, afu, buf); 2079 } 2080 2081 /** 2082 * port1_show() - queries and presents the current status of port 1 2083 * @dev: Generic device associated with the host owning the port. 2084 * @attr: Device attribute representing the port. 2085 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2086 * 2087 * Return: The size of the ASCII string returned in @buf. 2088 */ 2089 static ssize_t port1_show(struct device *dev, 2090 struct device_attribute *attr, 2091 char *buf) 2092 { 2093 struct Scsi_Host *shost = class_to_shost(dev); 2094 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2095 struct afu *afu = cfg->afu; 2096 2097 return cxlflash_show_port_status(1, afu, buf); 2098 } 2099 2100 /** 2101 * lun_mode_show() - presents the current LUN mode of the host 2102 * @dev: Generic device associated with the host. 2103 * @attr: Device attribute representing the LUN mode. 2104 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. 2105 * 2106 * Return: The size of the ASCII string returned in @buf. 2107 */ 2108 static ssize_t lun_mode_show(struct device *dev, 2109 struct device_attribute *attr, char *buf) 2110 { 2111 struct Scsi_Host *shost = class_to_shost(dev); 2112 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2113 struct afu *afu = cfg->afu; 2114 2115 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); 2116 } 2117 2118 /** 2119 * lun_mode_store() - sets the LUN mode of the host 2120 * @dev: Generic device associated with the host. 2121 * @attr: Device attribute representing the LUN mode. 2122 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. 2123 * @count: Length of data resizing in @buf. 2124 * 2125 * The CXL Flash AFU supports a dummy LUN mode where the external 2126 * links and storage are not required. Space on the FPGA is used 2127 * to create 1 or 2 small LUNs which are presented to the system 2128 * as if they were a normal storage device. This feature is useful 2129 * during development and also provides manufacturing with a way 2130 * to test the AFU without an actual device. 2131 * 2132 * 0 = external LUN[s] (default) 2133 * 1 = internal LUN (1 x 64K, 512B blocks, id 0) 2134 * 2 = internal LUN (1 x 64K, 4K blocks, id 0) 2135 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) 2136 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) 2137 * 2138 * Return: The size of the ASCII string returned in @buf. 2139 */ 2140 static ssize_t lun_mode_store(struct device *dev, 2141 struct device_attribute *attr, 2142 const char *buf, size_t count) 2143 { 2144 struct Scsi_Host *shost = class_to_shost(dev); 2145 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2146 struct afu *afu = cfg->afu; 2147 int rc; 2148 u32 lun_mode; 2149 2150 rc = kstrtouint(buf, 10, &lun_mode); 2151 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { 2152 afu->internal_lun = lun_mode; 2153 2154 /* 2155 * When configured for internal LUN, there is only one channel, 2156 * channel number 0, else there will be 2 (default). 2157 */ 2158 if (afu->internal_lun) 2159 shost->max_channel = 0; 2160 else 2161 shost->max_channel = NUM_FC_PORTS - 1; 2162 2163 afu_reset(cfg); 2164 scsi_scan_host(cfg->host); 2165 } 2166 2167 return count; 2168 } 2169 2170 /** 2171 * ioctl_version_show() - presents the current ioctl version of the host 2172 * @dev: Generic device associated with the host. 2173 * @attr: Device attribute representing the ioctl version. 2174 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. 2175 * 2176 * Return: The size of the ASCII string returned in @buf. 2177 */ 2178 static ssize_t ioctl_version_show(struct device *dev, 2179 struct device_attribute *attr, char *buf) 2180 { 2181 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); 2182 } 2183 2184 /** 2185 * cxlflash_show_port_lun_table() - queries and presents the port LUN table 2186 * @port: Desired port for status reporting. 2187 * @afu: AFU owning the specified port. 2188 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2189 * 2190 * Return: The size of the ASCII string returned in @buf. 2191 */ 2192 static ssize_t cxlflash_show_port_lun_table(u32 port, 2193 struct afu *afu, 2194 char *buf) 2195 { 2196 int i; 2197 ssize_t bytes = 0; 2198 __be64 __iomem *fc_port; 2199 2200 if (port >= NUM_FC_PORTS) 2201 return 0; 2202 2203 fc_port = &afu->afu_map->global.fc_port[port][0]; 2204 2205 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) 2206 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, 2207 "%03d: %016llX\n", i, readq_be(&fc_port[i])); 2208 return bytes; 2209 } 2210 2211 /** 2212 * port0_lun_table_show() - presents the current LUN table of port 0 2213 * @dev: Generic device associated with the host owning the port. 2214 * @attr: Device attribute representing the port. 2215 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2216 * 2217 * Return: The size of the ASCII string returned in @buf. 2218 */ 2219 static ssize_t port0_lun_table_show(struct device *dev, 2220 struct device_attribute *attr, 2221 char *buf) 2222 { 2223 struct Scsi_Host *shost = class_to_shost(dev); 2224 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2225 struct afu *afu = cfg->afu; 2226 2227 return cxlflash_show_port_lun_table(0, afu, buf); 2228 } 2229 2230 /** 2231 * port1_lun_table_show() - presents the current LUN table of port 1 2232 * @dev: Generic device associated with the host owning the port. 2233 * @attr: Device attribute representing the port. 2234 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2235 * 2236 * Return: The size of the ASCII string returned in @buf. 2237 */ 2238 static ssize_t port1_lun_table_show(struct device *dev, 2239 struct device_attribute *attr, 2240 char *buf) 2241 { 2242 struct Scsi_Host *shost = class_to_shost(dev); 2243 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2244 struct afu *afu = cfg->afu; 2245 2246 return cxlflash_show_port_lun_table(1, afu, buf); 2247 } 2248 2249 /** 2250 * mode_show() - presents the current mode of the device 2251 * @dev: Generic device associated with the device. 2252 * @attr: Device attribute representing the device mode. 2253 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. 2254 * 2255 * Return: The size of the ASCII string returned in @buf. 2256 */ 2257 static ssize_t mode_show(struct device *dev, 2258 struct device_attribute *attr, char *buf) 2259 { 2260 struct scsi_device *sdev = to_scsi_device(dev); 2261 2262 return scnprintf(buf, PAGE_SIZE, "%s\n", 2263 sdev->hostdata ? "superpipe" : "legacy"); 2264 } 2265 2266 /* 2267 * Host attributes 2268 */ 2269 static DEVICE_ATTR_RO(port0); 2270 static DEVICE_ATTR_RO(port1); 2271 static DEVICE_ATTR_RW(lun_mode); 2272 static DEVICE_ATTR_RO(ioctl_version); 2273 static DEVICE_ATTR_RO(port0_lun_table); 2274 static DEVICE_ATTR_RO(port1_lun_table); 2275 2276 static struct device_attribute *cxlflash_host_attrs[] = { 2277 &dev_attr_port0, 2278 &dev_attr_port1, 2279 &dev_attr_lun_mode, 2280 &dev_attr_ioctl_version, 2281 &dev_attr_port0_lun_table, 2282 &dev_attr_port1_lun_table, 2283 NULL 2284 }; 2285 2286 /* 2287 * Device attributes 2288 */ 2289 static DEVICE_ATTR_RO(mode); 2290 2291 static struct device_attribute *cxlflash_dev_attrs[] = { 2292 &dev_attr_mode, 2293 NULL 2294 }; 2295 2296 /* 2297 * Host template 2298 */ 2299 static struct scsi_host_template driver_template = { 2300 .module = THIS_MODULE, 2301 .name = CXLFLASH_ADAPTER_NAME, 2302 .info = cxlflash_driver_info, 2303 .ioctl = cxlflash_ioctl, 2304 .proc_name = CXLFLASH_NAME, 2305 .queuecommand = cxlflash_queuecommand, 2306 .eh_device_reset_handler = cxlflash_eh_device_reset_handler, 2307 .eh_host_reset_handler = cxlflash_eh_host_reset_handler, 2308 .change_queue_depth = cxlflash_change_queue_depth, 2309 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, 2310 .can_queue = CXLFLASH_MAX_CMDS, 2311 .this_id = -1, 2312 .sg_tablesize = SG_NONE, /* No scatter gather support */ 2313 .max_sectors = CXLFLASH_MAX_SECTORS, 2314 .use_clustering = ENABLE_CLUSTERING, 2315 .shost_attrs = cxlflash_host_attrs, 2316 .sdev_attrs = cxlflash_dev_attrs, 2317 }; 2318 2319 /* 2320 * Device dependent values 2321 */ 2322 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS }; 2323 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS }; 2324 2325 /* 2326 * PCI device binding table 2327 */ 2328 static struct pci_device_id cxlflash_pci_table[] = { 2329 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, 2330 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, 2331 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, 2332 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, 2333 {} 2334 }; 2335 2336 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); 2337 2338 /** 2339 * cxlflash_worker_thread() - work thread handler for the AFU 2340 * @work: Work structure contained within cxlflash associated with host. 2341 * 2342 * Handles the following events: 2343 * - Link reset which cannot be performed on interrupt context due to 2344 * blocking up to a few seconds 2345 * - Read AFU command room 2346 * - Rescan the host 2347 */ 2348 static void cxlflash_worker_thread(struct work_struct *work) 2349 { 2350 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, 2351 work_q); 2352 struct afu *afu = cfg->afu; 2353 struct device *dev = &cfg->dev->dev; 2354 int port; 2355 ulong lock_flags; 2356 2357 /* Avoid MMIO if the device has failed */ 2358 2359 if (cfg->state != STATE_NORMAL) 2360 return; 2361 2362 spin_lock_irqsave(cfg->host->host_lock, lock_flags); 2363 2364 if (cfg->lr_state == LINK_RESET_REQUIRED) { 2365 port = cfg->lr_port; 2366 if (port < 0) 2367 dev_err(dev, "%s: invalid port index %d\n", 2368 __func__, port); 2369 else { 2370 spin_unlock_irqrestore(cfg->host->host_lock, 2371 lock_flags); 2372 2373 /* The reset can block... */ 2374 afu_link_reset(afu, port, 2375 &afu->afu_map->global.fc_regs[port][0]); 2376 spin_lock_irqsave(cfg->host->host_lock, lock_flags); 2377 } 2378 2379 cfg->lr_state = LINK_RESET_COMPLETE; 2380 } 2381 2382 if (afu->read_room) { 2383 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); 2384 afu->read_room = false; 2385 } 2386 2387 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); 2388 2389 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) 2390 scsi_scan_host(cfg->host); 2391 kref_put(&afu->mapcount, afu_unmap); 2392 } 2393 2394 /** 2395 * cxlflash_probe() - PCI entry point to add host 2396 * @pdev: PCI device associated with the host. 2397 * @dev_id: PCI device id associated with device. 2398 * 2399 * Return: 0 on success, -errno on failure 2400 */ 2401 static int cxlflash_probe(struct pci_dev *pdev, 2402 const struct pci_device_id *dev_id) 2403 { 2404 struct Scsi_Host *host; 2405 struct cxlflash_cfg *cfg = NULL; 2406 struct dev_dependent_vals *ddv; 2407 int rc = 0; 2408 2409 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", 2410 __func__, pdev->irq); 2411 2412 ddv = (struct dev_dependent_vals *)dev_id->driver_data; 2413 driver_template.max_sectors = ddv->max_sectors; 2414 2415 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); 2416 if (!host) { 2417 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", 2418 __func__); 2419 rc = -ENOMEM; 2420 goto out; 2421 } 2422 2423 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; 2424 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; 2425 host->max_channel = NUM_FC_PORTS - 1; 2426 host->unique_id = host->host_no; 2427 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 2428 2429 cfg = (struct cxlflash_cfg *)host->hostdata; 2430 cfg->host = host; 2431 rc = alloc_mem(cfg); 2432 if (rc) { 2433 dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n", 2434 __func__); 2435 rc = -ENOMEM; 2436 scsi_host_put(cfg->host); 2437 goto out; 2438 } 2439 2440 cfg->init_state = INIT_STATE_NONE; 2441 cfg->dev = pdev; 2442 cfg->cxl_fops = cxlflash_cxl_fops; 2443 2444 /* 2445 * The promoted LUNs move to the top of the LUN table. The rest stay 2446 * on the bottom half. The bottom half grows from the end 2447 * (index = 255), whereas the top half grows from the beginning 2448 * (index = 0). 2449 */ 2450 cfg->promote_lun_index = 0; 2451 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1; 2452 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1; 2453 2454 cfg->dev_id = (struct pci_device_id *)dev_id; 2455 2456 init_waitqueue_head(&cfg->tmf_waitq); 2457 init_waitqueue_head(&cfg->reset_waitq); 2458 2459 INIT_WORK(&cfg->work_q, cxlflash_worker_thread); 2460 cfg->lr_state = LINK_RESET_INVALID; 2461 cfg->lr_port = -1; 2462 spin_lock_init(&cfg->tmf_slock); 2463 mutex_init(&cfg->ctx_tbl_list_mutex); 2464 mutex_init(&cfg->ctx_recovery_mutex); 2465 init_rwsem(&cfg->ioctl_rwsem); 2466 INIT_LIST_HEAD(&cfg->ctx_err_recovery); 2467 INIT_LIST_HEAD(&cfg->lluns); 2468 2469 pci_set_drvdata(pdev, cfg); 2470 2471 cfg->cxl_afu = cxl_pci_to_afu(pdev); 2472 2473 rc = init_pci(cfg); 2474 if (rc) { 2475 dev_err(&pdev->dev, "%s: call to init_pci " 2476 "failed rc=%d!\n", __func__, rc); 2477 goto out_remove; 2478 } 2479 cfg->init_state = INIT_STATE_PCI; 2480 2481 rc = init_afu(cfg); 2482 if (rc) { 2483 dev_err(&pdev->dev, "%s: call to init_afu " 2484 "failed rc=%d!\n", __func__, rc); 2485 goto out_remove; 2486 } 2487 cfg->init_state = INIT_STATE_AFU; 2488 2489 rc = init_scsi(cfg); 2490 if (rc) { 2491 dev_err(&pdev->dev, "%s: call to init_scsi " 2492 "failed rc=%d!\n", __func__, rc); 2493 goto out_remove; 2494 } 2495 cfg->init_state = INIT_STATE_SCSI; 2496 2497 out: 2498 pr_debug("%s: returning rc=%d\n", __func__, rc); 2499 return rc; 2500 2501 out_remove: 2502 cxlflash_remove(pdev); 2503 goto out; 2504 } 2505 2506 /** 2507 * drain_ioctls() - wait until all currently executing ioctls have completed 2508 * @cfg: Internal structure associated with the host. 2509 * 2510 * Obtain write access to read/write semaphore that wraps ioctl 2511 * handling to 'drain' ioctls currently executing. 2512 */ 2513 static void drain_ioctls(struct cxlflash_cfg *cfg) 2514 { 2515 down_write(&cfg->ioctl_rwsem); 2516 up_write(&cfg->ioctl_rwsem); 2517 } 2518 2519 /** 2520 * cxlflash_pci_error_detected() - called when a PCI error is detected 2521 * @pdev: PCI device struct. 2522 * @state: PCI channel state. 2523 * 2524 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 2525 */ 2526 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, 2527 pci_channel_state_t state) 2528 { 2529 int rc = 0; 2530 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 2531 struct device *dev = &cfg->dev->dev; 2532 2533 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); 2534 2535 switch (state) { 2536 case pci_channel_io_frozen: 2537 cfg->state = STATE_RESET; 2538 scsi_block_requests(cfg->host); 2539 drain_ioctls(cfg); 2540 rc = cxlflash_mark_contexts_error(cfg); 2541 if (unlikely(rc)) 2542 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", 2543 __func__, rc); 2544 term_afu(cfg); 2545 return PCI_ERS_RESULT_NEED_RESET; 2546 case pci_channel_io_perm_failure: 2547 cfg->state = STATE_FAILTERM; 2548 wake_up_all(&cfg->reset_waitq); 2549 scsi_unblock_requests(cfg->host); 2550 return PCI_ERS_RESULT_DISCONNECT; 2551 default: 2552 break; 2553 } 2554 return PCI_ERS_RESULT_NEED_RESET; 2555 } 2556 2557 /** 2558 * cxlflash_pci_slot_reset() - called when PCI slot has been reset 2559 * @pdev: PCI device struct. 2560 * 2561 * This routine is called by the pci error recovery code after the PCI 2562 * slot has been reset, just before we should resume normal operations. 2563 * 2564 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT 2565 */ 2566 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) 2567 { 2568 int rc = 0; 2569 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 2570 struct device *dev = &cfg->dev->dev; 2571 2572 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 2573 2574 rc = init_afu(cfg); 2575 if (unlikely(rc)) { 2576 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc); 2577 return PCI_ERS_RESULT_DISCONNECT; 2578 } 2579 2580 return PCI_ERS_RESULT_RECOVERED; 2581 } 2582 2583 /** 2584 * cxlflash_pci_resume() - called when normal operation can resume 2585 * @pdev: PCI device struct 2586 */ 2587 static void cxlflash_pci_resume(struct pci_dev *pdev) 2588 { 2589 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 2590 struct device *dev = &cfg->dev->dev; 2591 2592 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 2593 2594 cfg->state = STATE_NORMAL; 2595 wake_up_all(&cfg->reset_waitq); 2596 scsi_unblock_requests(cfg->host); 2597 } 2598 2599 static const struct pci_error_handlers cxlflash_err_handler = { 2600 .error_detected = cxlflash_pci_error_detected, 2601 .slot_reset = cxlflash_pci_slot_reset, 2602 .resume = cxlflash_pci_resume, 2603 }; 2604 2605 /* 2606 * PCI device structure 2607 */ 2608 static struct pci_driver cxlflash_driver = { 2609 .name = CXLFLASH_NAME, 2610 .id_table = cxlflash_pci_table, 2611 .probe = cxlflash_probe, 2612 .remove = cxlflash_remove, 2613 .err_handler = &cxlflash_err_handler, 2614 }; 2615 2616 /** 2617 * init_cxlflash() - module entry point 2618 * 2619 * Return: 0 on success, -errno on failure 2620 */ 2621 static int __init init_cxlflash(void) 2622 { 2623 pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME); 2624 2625 cxlflash_list_init(); 2626 2627 return pci_register_driver(&cxlflash_driver); 2628 } 2629 2630 /** 2631 * exit_cxlflash() - module exit point 2632 */ 2633 static void __exit exit_cxlflash(void) 2634 { 2635 cxlflash_term_global_luns(); 2636 cxlflash_free_errpage(); 2637 2638 pci_unregister_driver(&cxlflash_driver); 2639 } 2640 2641 module_init(init_cxlflash); 2642 module_exit(exit_cxlflash); 2643