1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2015 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/delay.h> 16 #include <linux/list.h> 17 #include <linux/module.h> 18 #include <linux/pci.h> 19 20 #include <asm/unaligned.h> 21 22 #include <misc/cxl.h> 23 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_host.h> 26 #include <uapi/scsi/cxlflash_ioctl.h> 27 28 #include "main.h" 29 #include "sislite.h" 30 #include "common.h" 31 32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); 33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); 34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); 35 MODULE_LICENSE("GPL"); 36 37 /** 38 * cmd_checkout() - checks out an AFU command 39 * @afu: AFU to checkout from. 40 * 41 * Commands are checked out in a round-robin fashion. Note that since 42 * the command pool is larger than the hardware queue, the majority of 43 * times we will only loop once or twice before getting a command. The 44 * buffer and CDB within the command are initialized (zeroed) prior to 45 * returning. 46 * 47 * Return: The checked out command or NULL when command pool is empty. 48 */ 49 static struct afu_cmd *cmd_checkout(struct afu *afu) 50 { 51 int k, dec = CXLFLASH_NUM_CMDS; 52 struct afu_cmd *cmd; 53 54 while (dec--) { 55 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1)); 56 57 cmd = &afu->cmd[k]; 58 59 if (!atomic_dec_if_positive(&cmd->free)) { 60 pr_devel("%s: returning found index=%d cmd=%p\n", 61 __func__, cmd->slot, cmd); 62 memset(cmd->buf, 0, CMD_BUFSIZE); 63 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); 64 return cmd; 65 } 66 } 67 68 return NULL; 69 } 70 71 /** 72 * cmd_checkin() - checks in an AFU command 73 * @cmd: AFU command to checkin. 74 * 75 * Safe to pass commands that have already been checked in. Several 76 * internal tracking fields are reset as part of the checkin. Note 77 * that these are intentionally reset prior to toggling the free bit 78 * to avoid clobbering values in the event that the command is checked 79 * out right away. 80 */ 81 static void cmd_checkin(struct afu_cmd *cmd) 82 { 83 cmd->rcb.scp = NULL; 84 cmd->rcb.timeout = 0; 85 cmd->sa.ioasc = 0; 86 cmd->cmd_tmf = false; 87 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */ 88 89 if (unlikely(atomic_inc_return(&cmd->free) != 1)) { 90 pr_err("%s: Freeing cmd (%d) that is not in use!\n", 91 __func__, cmd->slot); 92 return; 93 } 94 95 pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot); 96 } 97 98 /** 99 * process_cmd_err() - command error handler 100 * @cmd: AFU command that experienced the error. 101 * @scp: SCSI command associated with the AFU command in error. 102 * 103 * Translates error bits from AFU command to SCSI command results. 104 */ 105 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) 106 { 107 struct sisl_ioarcb *ioarcb; 108 struct sisl_ioasa *ioasa; 109 u32 resid; 110 111 if (unlikely(!cmd)) 112 return; 113 114 ioarcb = &(cmd->rcb); 115 ioasa = &(cmd->sa); 116 117 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { 118 resid = ioasa->resid; 119 scsi_set_resid(scp, resid); 120 pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n", 121 __func__, cmd, scp, resid); 122 } 123 124 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { 125 pr_debug("%s: cmd underrun cmd = %p scp = %p\n", 126 __func__, cmd, scp); 127 scp->result = (DID_ERROR << 16); 128 } 129 130 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d " 131 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n", 132 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc, 133 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra, 134 ioasa->fc_extra); 135 136 if (ioasa->rc.scsi_rc) { 137 /* We have a SCSI status */ 138 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { 139 memcpy(scp->sense_buffer, ioasa->sense_data, 140 SISL_SENSE_DATA_LEN); 141 scp->result = ioasa->rc.scsi_rc; 142 } else 143 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); 144 } 145 146 /* 147 * We encountered an error. Set scp->result based on nature 148 * of error. 149 */ 150 if (ioasa->rc.fc_rc) { 151 /* We have an FC status */ 152 switch (ioasa->rc.fc_rc) { 153 case SISL_FC_RC_LINKDOWN: 154 scp->result = (DID_REQUEUE << 16); 155 break; 156 case SISL_FC_RC_RESID: 157 /* This indicates an FCP resid underrun */ 158 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { 159 /* If the SISL_RC_FLAGS_OVERRUN flag was set, 160 * then we will handle this error else where. 161 * If not then we must handle it here. 162 * This is probably an AFU bug. 163 */ 164 scp->result = (DID_ERROR << 16); 165 } 166 break; 167 case SISL_FC_RC_RESIDERR: 168 /* Resid mismatch between adapter and device */ 169 case SISL_FC_RC_TGTABORT: 170 case SISL_FC_RC_ABORTOK: 171 case SISL_FC_RC_ABORTFAIL: 172 case SISL_FC_RC_NOLOGI: 173 case SISL_FC_RC_ABORTPEND: 174 case SISL_FC_RC_WRABORTPEND: 175 case SISL_FC_RC_NOEXP: 176 case SISL_FC_RC_INUSE: 177 scp->result = (DID_ERROR << 16); 178 break; 179 } 180 } 181 182 if (ioasa->rc.afu_rc) { 183 /* We have an AFU error */ 184 switch (ioasa->rc.afu_rc) { 185 case SISL_AFU_RC_NO_CHANNELS: 186 scp->result = (DID_NO_CONNECT << 16); 187 break; 188 case SISL_AFU_RC_DATA_DMA_ERR: 189 switch (ioasa->afu_extra) { 190 case SISL_AFU_DMA_ERR_PAGE_IN: 191 /* Retry */ 192 scp->result = (DID_IMM_RETRY << 16); 193 break; 194 case SISL_AFU_DMA_ERR_INVALID_EA: 195 default: 196 scp->result = (DID_ERROR << 16); 197 } 198 break; 199 case SISL_AFU_RC_OUT_OF_DATA_BUFS: 200 /* Retry */ 201 scp->result = (DID_ALLOC_FAILURE << 16); 202 break; 203 default: 204 scp->result = (DID_ERROR << 16); 205 } 206 } 207 } 208 209 /** 210 * cmd_complete() - command completion handler 211 * @cmd: AFU command that has completed. 212 * 213 * Prepares and submits command that has either completed or timed out to 214 * the SCSI stack. Checks AFU command back into command pool for non-internal 215 * (rcb.scp populated) commands. 216 */ 217 static void cmd_complete(struct afu_cmd *cmd) 218 { 219 struct scsi_cmnd *scp; 220 ulong lock_flags; 221 struct afu *afu = cmd->parent; 222 struct cxlflash_cfg *cfg = afu->parent; 223 bool cmd_is_tmf; 224 225 spin_lock_irqsave(&cmd->slock, lock_flags); 226 cmd->sa.host_use_b[0] |= B_DONE; 227 spin_unlock_irqrestore(&cmd->slock, lock_flags); 228 229 if (cmd->rcb.scp) { 230 scp = cmd->rcb.scp; 231 if (unlikely(cmd->sa.ioasc)) 232 process_cmd_err(cmd, scp); 233 else 234 scp->result = (DID_OK << 16); 235 236 cmd_is_tmf = cmd->cmd_tmf; 237 cmd_checkin(cmd); /* Don't use cmd after here */ 238 239 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " 240 "ioasc=%d\n", __func__, scp, scp->result, 241 cmd->sa.ioasc); 242 243 scsi_dma_unmap(scp); 244 scp->scsi_done(scp); 245 246 if (cmd_is_tmf) { 247 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 248 cfg->tmf_active = false; 249 wake_up_all_locked(&cfg->tmf_waitq); 250 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 251 } 252 } else 253 complete(&cmd->cevent); 254 } 255 256 /** 257 * context_reset() - timeout handler for AFU commands 258 * @cmd: AFU command that timed out. 259 * 260 * Sends a reset to the AFU. 261 */ 262 static void context_reset(struct afu_cmd *cmd) 263 { 264 int nretry = 0; 265 u64 rrin = 0x1; 266 u64 room = 0; 267 struct afu *afu = cmd->parent; 268 ulong lock_flags; 269 270 pr_debug("%s: cmd=%p\n", __func__, cmd); 271 272 spin_lock_irqsave(&cmd->slock, lock_flags); 273 274 /* Already completed? */ 275 if (cmd->sa.host_use_b[0] & B_DONE) { 276 spin_unlock_irqrestore(&cmd->slock, lock_flags); 277 return; 278 } 279 280 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT); 281 spin_unlock_irqrestore(&cmd->slock, lock_flags); 282 283 /* 284 * We really want to send this reset at all costs, so spread 285 * out wait time on successive retries for available room. 286 */ 287 do { 288 room = readq_be(&afu->host_map->cmd_room); 289 atomic64_set(&afu->room, room); 290 if (room) 291 goto write_rrin; 292 udelay(1 << nretry); 293 } while (nretry++ < MC_ROOM_RETRY_CNT); 294 295 pr_err("%s: no cmd_room to send reset\n", __func__); 296 return; 297 298 write_rrin: 299 nretry = 0; 300 writeq_be(rrin, &afu->host_map->ioarrin); 301 do { 302 rrin = readq_be(&afu->host_map->ioarrin); 303 if (rrin != 0x1) 304 break; 305 /* Double delay each time */ 306 udelay(1 << nretry); 307 } while (nretry++ < MC_ROOM_RETRY_CNT); 308 } 309 310 /** 311 * send_cmd() - sends an AFU command 312 * @afu: AFU associated with the host. 313 * @cmd: AFU command to send. 314 * 315 * Return: 316 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 317 */ 318 static int send_cmd(struct afu *afu, struct afu_cmd *cmd) 319 { 320 struct cxlflash_cfg *cfg = afu->parent; 321 struct device *dev = &cfg->dev->dev; 322 int nretry = 0; 323 int rc = 0; 324 u64 room; 325 long newval; 326 327 /* 328 * This routine is used by critical users such an AFU sync and to 329 * send a task management function (TMF). Thus we want to retry a 330 * bit before returning an error. To avoid the performance penalty 331 * of MMIO, we spread the update of 'room' over multiple commands. 332 */ 333 retry: 334 newval = atomic64_dec_if_positive(&afu->room); 335 if (!newval) { 336 do { 337 room = readq_be(&afu->host_map->cmd_room); 338 atomic64_set(&afu->room, room); 339 if (room) 340 goto write_ioarrin; 341 udelay(1 << nretry); 342 } while (nretry++ < MC_ROOM_RETRY_CNT); 343 344 dev_err(dev, "%s: no cmd_room to send 0x%X\n", 345 __func__, cmd->rcb.cdb[0]); 346 347 goto no_room; 348 } else if (unlikely(newval < 0)) { 349 /* This should be rare. i.e. Only if two threads race and 350 * decrement before the MMIO read is done. In this case 351 * just benefit from the other thread having updated 352 * afu->room. 353 */ 354 if (nretry++ < MC_ROOM_RETRY_CNT) { 355 udelay(1 << nretry); 356 goto retry; 357 } 358 359 goto no_room; 360 } 361 362 write_ioarrin: 363 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); 364 out: 365 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, 366 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); 367 return rc; 368 369 no_room: 370 afu->read_room = true; 371 kref_get(&cfg->afu->mapcount); 372 schedule_work(&cfg->work_q); 373 rc = SCSI_MLQUEUE_HOST_BUSY; 374 goto out; 375 } 376 377 /** 378 * wait_resp() - polls for a response or timeout to a sent AFU command 379 * @afu: AFU associated with the host. 380 * @cmd: AFU command that was sent. 381 */ 382 static void wait_resp(struct afu *afu, struct afu_cmd *cmd) 383 { 384 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); 385 386 timeout = wait_for_completion_timeout(&cmd->cevent, timeout); 387 if (!timeout) 388 context_reset(cmd); 389 390 if (unlikely(cmd->sa.ioasc != 0)) 391 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " 392 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], 393 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, 394 cmd->sa.rc.fc_rc); 395 } 396 397 /** 398 * send_tmf() - sends a Task Management Function (TMF) 399 * @afu: AFU to checkout from. 400 * @scp: SCSI command from stack. 401 * @tmfcmd: TMF command to send. 402 * 403 * Return: 404 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 405 */ 406 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) 407 { 408 struct afu_cmd *cmd; 409 410 u32 port_sel = scp->device->channel + 1; 411 short lflag = 0; 412 struct Scsi_Host *host = scp->device->host; 413 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 414 struct device *dev = &cfg->dev->dev; 415 ulong lock_flags; 416 int rc = 0; 417 ulong to; 418 419 cmd = cmd_checkout(afu); 420 if (unlikely(!cmd)) { 421 dev_err(dev, "%s: could not get a free command\n", __func__); 422 rc = SCSI_MLQUEUE_HOST_BUSY; 423 goto out; 424 } 425 426 /* When Task Management Function is active do not send another */ 427 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 428 if (cfg->tmf_active) 429 wait_event_interruptible_lock_irq(cfg->tmf_waitq, 430 !cfg->tmf_active, 431 cfg->tmf_slock); 432 cfg->tmf_active = true; 433 cmd->cmd_tmf = true; 434 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 435 436 cmd->rcb.ctx_id = afu->ctx_hndl; 437 cmd->rcb.port_sel = port_sel; 438 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 439 440 lflag = SISL_REQ_FLAGS_TMF_CMD; 441 442 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 443 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); 444 445 /* Stash the scp in the reserved field, for reuse during interrupt */ 446 cmd->rcb.scp = scp; 447 448 /* Copy the CDB from the cmd passed in */ 449 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); 450 451 /* Send the command */ 452 rc = send_cmd(afu, cmd); 453 if (unlikely(rc)) { 454 cmd_checkin(cmd); 455 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 456 cfg->tmf_active = false; 457 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 458 goto out; 459 } 460 461 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 462 to = msecs_to_jiffies(5000); 463 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, 464 !cfg->tmf_active, 465 cfg->tmf_slock, 466 to); 467 if (!to) { 468 cfg->tmf_active = false; 469 dev_err(dev, "%s: TMF timed out!\n", __func__); 470 rc = -1; 471 } 472 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 473 out: 474 return rc; 475 } 476 477 static void afu_unmap(struct kref *ref) 478 { 479 struct afu *afu = container_of(ref, struct afu, mapcount); 480 481 if (likely(afu->afu_map)) { 482 cxl_psa_unmap((void __iomem *)afu->afu_map); 483 afu->afu_map = NULL; 484 } 485 } 486 487 /** 488 * cxlflash_driver_info() - information handler for this host driver 489 * @host: SCSI host associated with device. 490 * 491 * Return: A string describing the device. 492 */ 493 static const char *cxlflash_driver_info(struct Scsi_Host *host) 494 { 495 return CXLFLASH_ADAPTER_NAME; 496 } 497 498 /** 499 * cxlflash_queuecommand() - sends a mid-layer request 500 * @host: SCSI host associated with device. 501 * @scp: SCSI command to send. 502 * 503 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 504 */ 505 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) 506 { 507 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 508 struct afu *afu = cfg->afu; 509 struct device *dev = &cfg->dev->dev; 510 struct afu_cmd *cmd; 511 u32 port_sel = scp->device->channel + 1; 512 int nseg, i, ncount; 513 struct scatterlist *sg; 514 ulong lock_flags; 515 short lflag = 0; 516 int rc = 0; 517 int kref_got = 0; 518 519 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " 520 "cdb=(%08X-%08X-%08X-%08X)\n", 521 __func__, scp, host->host_no, scp->device->channel, 522 scp->device->id, scp->device->lun, 523 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 524 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 525 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 526 get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 527 528 /* 529 * If a Task Management Function is active, wait for it to complete 530 * before continuing with regular commands. 531 */ 532 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 533 if (cfg->tmf_active) { 534 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 535 rc = SCSI_MLQUEUE_HOST_BUSY; 536 goto out; 537 } 538 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 539 540 switch (cfg->state) { 541 case STATE_RESET: 542 dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__); 543 rc = SCSI_MLQUEUE_HOST_BUSY; 544 goto out; 545 case STATE_FAILTERM: 546 dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__); 547 scp->result = (DID_NO_CONNECT << 16); 548 scp->scsi_done(scp); 549 rc = 0; 550 goto out; 551 default: 552 break; 553 } 554 555 cmd = cmd_checkout(afu); 556 if (unlikely(!cmd)) { 557 dev_err(dev, "%s: could not get a free command\n", __func__); 558 rc = SCSI_MLQUEUE_HOST_BUSY; 559 goto out; 560 } 561 562 kref_get(&cfg->afu->mapcount); 563 kref_got = 1; 564 565 cmd->rcb.ctx_id = afu->ctx_hndl; 566 cmd->rcb.port_sel = port_sel; 567 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 568 569 if (scp->sc_data_direction == DMA_TO_DEVICE) 570 lflag = SISL_REQ_FLAGS_HOST_WRITE; 571 else 572 lflag = SISL_REQ_FLAGS_HOST_READ; 573 574 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 575 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); 576 577 /* Stash the scp in the reserved field, for reuse during interrupt */ 578 cmd->rcb.scp = scp; 579 580 nseg = scsi_dma_map(scp); 581 if (unlikely(nseg < 0)) { 582 dev_err(dev, "%s: Fail DMA map! nseg=%d\n", 583 __func__, nseg); 584 rc = SCSI_MLQUEUE_HOST_BUSY; 585 goto out; 586 } 587 588 ncount = scsi_sg_count(scp); 589 scsi_for_each_sg(scp, sg, ncount, i) { 590 cmd->rcb.data_len = sg_dma_len(sg); 591 cmd->rcb.data_ea = sg_dma_address(sg); 592 } 593 594 /* Copy the CDB from the scsi_cmnd passed in */ 595 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); 596 597 /* Send the command */ 598 rc = send_cmd(afu, cmd); 599 if (unlikely(rc)) { 600 cmd_checkin(cmd); 601 scsi_dma_unmap(scp); 602 } 603 604 out: 605 if (kref_got) 606 kref_put(&afu->mapcount, afu_unmap); 607 pr_devel("%s: returning rc=%d\n", __func__, rc); 608 return rc; 609 } 610 611 /** 612 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe 613 * @cfg: Internal structure associated with the host. 614 */ 615 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) 616 { 617 struct pci_dev *pdev = cfg->dev; 618 619 if (pci_channel_offline(pdev)) 620 wait_event_timeout(cfg->reset_waitq, 621 !pci_channel_offline(pdev), 622 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); 623 } 624 625 /** 626 * free_mem() - free memory associated with the AFU 627 * @cfg: Internal structure associated with the host. 628 */ 629 static void free_mem(struct cxlflash_cfg *cfg) 630 { 631 int i; 632 char *buf = NULL; 633 struct afu *afu = cfg->afu; 634 635 if (cfg->afu) { 636 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 637 buf = afu->cmd[i].buf; 638 if (!((u64)buf & (PAGE_SIZE - 1))) 639 free_page((ulong)buf); 640 } 641 642 free_pages((ulong)afu, get_order(sizeof(struct afu))); 643 cfg->afu = NULL; 644 } 645 } 646 647 /** 648 * stop_afu() - stops the AFU command timers and unmaps the MMIO space 649 * @cfg: Internal structure associated with the host. 650 * 651 * Safe to call with AFU in a partially allocated/initialized state. 652 * 653 * Cleans up all state associated with the command queue, and unmaps 654 * the MMIO space. 655 * 656 * - complete() will take care of commands we initiated (they'll be checked 657 * in as part of the cleanup that occurs after the completion) 658 * 659 * - cmd_checkin() will take care of entries that we did not initiate and that 660 * have not (and will not) complete because they are sitting on a [now stale] 661 * hardware queue 662 */ 663 static void stop_afu(struct cxlflash_cfg *cfg) 664 { 665 int i; 666 struct afu *afu = cfg->afu; 667 struct afu_cmd *cmd; 668 669 if (likely(afu)) { 670 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 671 cmd = &afu->cmd[i]; 672 complete(&cmd->cevent); 673 if (!atomic_read(&cmd->free)) 674 cmd_checkin(cmd); 675 } 676 677 if (likely(afu->afu_map)) { 678 cxl_psa_unmap((void __iomem *)afu->afu_map); 679 afu->afu_map = NULL; 680 } 681 kref_put(&afu->mapcount, afu_unmap); 682 } 683 } 684 685 /** 686 * term_intr() - disables all AFU interrupts 687 * @cfg: Internal structure associated with the host. 688 * @level: Depth of allocation, where to begin waterfall tear down. 689 * 690 * Safe to call with AFU/MC in partially allocated/initialized state. 691 */ 692 static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level) 693 { 694 struct afu *afu = cfg->afu; 695 struct device *dev = &cfg->dev->dev; 696 697 if (!afu || !cfg->mcctx) { 698 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); 699 return; 700 } 701 702 switch (level) { 703 case UNMAP_THREE: 704 cxl_unmap_afu_irq(cfg->mcctx, 3, afu); 705 case UNMAP_TWO: 706 cxl_unmap_afu_irq(cfg->mcctx, 2, afu); 707 case UNMAP_ONE: 708 cxl_unmap_afu_irq(cfg->mcctx, 1, afu); 709 case FREE_IRQ: 710 cxl_free_afu_irqs(cfg->mcctx); 711 /* fall through */ 712 case UNDO_NOOP: 713 /* No action required */ 714 break; 715 } 716 } 717 718 /** 719 * term_mc() - terminates the master context 720 * @cfg: Internal structure associated with the host. 721 * @level: Depth of allocation, where to begin waterfall tear down. 722 * 723 * Safe to call with AFU/MC in partially allocated/initialized state. 724 */ 725 static void term_mc(struct cxlflash_cfg *cfg) 726 { 727 int rc = 0; 728 struct afu *afu = cfg->afu; 729 struct device *dev = &cfg->dev->dev; 730 731 if (!afu || !cfg->mcctx) { 732 dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); 733 return; 734 } 735 736 rc = cxl_stop_context(cfg->mcctx); 737 WARN_ON(rc); 738 cfg->mcctx = NULL; 739 } 740 741 /** 742 * term_afu() - terminates the AFU 743 * @cfg: Internal structure associated with the host. 744 * 745 * Safe to call with AFU/MC in partially allocated/initialized state. 746 */ 747 static void term_afu(struct cxlflash_cfg *cfg) 748 { 749 /* 750 * Tear down is carefully orchestrated to ensure 751 * no interrupts can come in when the problem state 752 * area is unmapped. 753 * 754 * 1) Disable all AFU interrupts 755 * 2) Unmap the problem state area 756 * 3) Stop the master context 757 */ 758 term_intr(cfg, UNMAP_THREE); 759 if (cfg->afu) 760 stop_afu(cfg); 761 762 term_mc(cfg); 763 764 pr_debug("%s: returning\n", __func__); 765 } 766 767 /** 768 * notify_shutdown() - notifies device of pending shutdown 769 * @cfg: Internal structure associated with the host. 770 * @wait: Whether to wait for shutdown processing to complete. 771 * 772 * This function will notify the AFU that the adapter is being shutdown 773 * and will wait for shutdown processing to complete if wait is true. 774 * This notification should flush pending I/Os to the device and halt 775 * further I/Os until the next AFU reset is issued and device restarted. 776 */ 777 static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) 778 { 779 struct afu *afu = cfg->afu; 780 struct device *dev = &cfg->dev->dev; 781 struct sisl_global_map __iomem *global = &afu->afu_map->global; 782 struct dev_dependent_vals *ddv; 783 u64 reg, status; 784 int i, retry_cnt = 0; 785 786 ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; 787 if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) 788 return; 789 790 /* Notify AFU */ 791 for (i = 0; i < NUM_FC_PORTS; i++) { 792 reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]); 793 reg |= SISL_FC_SHUTDOWN_NORMAL; 794 writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]); 795 } 796 797 if (!wait) 798 return; 799 800 /* Wait up to 1.5 seconds for shutdown processing to complete */ 801 for (i = 0; i < NUM_FC_PORTS; i++) { 802 retry_cnt = 0; 803 while (true) { 804 status = readq_be(&global->fc_regs[i][FC_STATUS / 8]); 805 if (status & SISL_STATUS_SHUTDOWN_COMPLETE) 806 break; 807 if (++retry_cnt >= MC_RETRY_CNT) { 808 dev_dbg(dev, "%s: port %d shutdown processing " 809 "not yet completed\n", __func__, i); 810 break; 811 } 812 msleep(100 * retry_cnt); 813 } 814 } 815 } 816 817 /** 818 * cxlflash_shutdown() - shutdown handler 819 * @pdev: PCI device associated with the host. 820 */ 821 static void cxlflash_shutdown(struct pci_dev *pdev) 822 { 823 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 824 825 notify_shutdown(cfg, false); 826 } 827 828 /** 829 * cxlflash_remove() - PCI entry point to tear down host 830 * @pdev: PCI device associated with the host. 831 * 832 * Safe to use as a cleanup in partially allocated/initialized state. 833 */ 834 static void cxlflash_remove(struct pci_dev *pdev) 835 { 836 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 837 ulong lock_flags; 838 839 /* If a Task Management Function is active, wait for it to complete 840 * before continuing with remove. 841 */ 842 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 843 if (cfg->tmf_active) 844 wait_event_interruptible_lock_irq(cfg->tmf_waitq, 845 !cfg->tmf_active, 846 cfg->tmf_slock); 847 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 848 849 /* Notify AFU and wait for shutdown processing to complete */ 850 notify_shutdown(cfg, true); 851 852 cfg->state = STATE_FAILTERM; 853 cxlflash_stop_term_user_contexts(cfg); 854 855 switch (cfg->init_state) { 856 case INIT_STATE_SCSI: 857 cxlflash_term_local_luns(cfg); 858 scsi_remove_host(cfg->host); 859 /* fall through */ 860 case INIT_STATE_AFU: 861 cancel_work_sync(&cfg->work_q); 862 term_afu(cfg); 863 case INIT_STATE_PCI: 864 pci_disable_device(pdev); 865 case INIT_STATE_NONE: 866 free_mem(cfg); 867 scsi_host_put(cfg->host); 868 break; 869 } 870 871 pr_debug("%s: returning\n", __func__); 872 } 873 874 /** 875 * alloc_mem() - allocates the AFU and its command pool 876 * @cfg: Internal structure associated with the host. 877 * 878 * A partially allocated state remains on failure. 879 * 880 * Return: 881 * 0 on success 882 * -ENOMEM on failure to allocate memory 883 */ 884 static int alloc_mem(struct cxlflash_cfg *cfg) 885 { 886 int rc = 0; 887 int i; 888 char *buf = NULL; 889 struct device *dev = &cfg->dev->dev; 890 891 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */ 892 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 893 get_order(sizeof(struct afu))); 894 if (unlikely(!cfg->afu)) { 895 dev_err(dev, "%s: cannot get %d free pages\n", 896 __func__, get_order(sizeof(struct afu))); 897 rc = -ENOMEM; 898 goto out; 899 } 900 cfg->afu->parent = cfg; 901 cfg->afu->afu_map = NULL; 902 903 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) { 904 if (!((u64)buf & (PAGE_SIZE - 1))) { 905 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 906 if (unlikely(!buf)) { 907 dev_err(dev, 908 "%s: Allocate command buffers fail!\n", 909 __func__); 910 rc = -ENOMEM; 911 free_mem(cfg); 912 goto out; 913 } 914 } 915 916 cfg->afu->cmd[i].buf = buf; 917 atomic_set(&cfg->afu->cmd[i].free, 1); 918 cfg->afu->cmd[i].slot = i; 919 } 920 921 out: 922 return rc; 923 } 924 925 /** 926 * init_pci() - initializes the host as a PCI device 927 * @cfg: Internal structure associated with the host. 928 * 929 * Return: 0 on success, -errno on failure 930 */ 931 static int init_pci(struct cxlflash_cfg *cfg) 932 { 933 struct pci_dev *pdev = cfg->dev; 934 int rc = 0; 935 936 rc = pci_enable_device(pdev); 937 if (rc || pci_channel_offline(pdev)) { 938 if (pci_channel_offline(pdev)) { 939 cxlflash_wait_for_pci_err_recovery(cfg); 940 rc = pci_enable_device(pdev); 941 } 942 943 if (rc) { 944 dev_err(&pdev->dev, "%s: Cannot enable adapter\n", 945 __func__); 946 cxlflash_wait_for_pci_err_recovery(cfg); 947 goto out; 948 } 949 } 950 951 out: 952 pr_debug("%s: returning rc=%d\n", __func__, rc); 953 return rc; 954 } 955 956 /** 957 * init_scsi() - adds the host to the SCSI stack and kicks off host scan 958 * @cfg: Internal structure associated with the host. 959 * 960 * Return: 0 on success, -errno on failure 961 */ 962 static int init_scsi(struct cxlflash_cfg *cfg) 963 { 964 struct pci_dev *pdev = cfg->dev; 965 int rc = 0; 966 967 rc = scsi_add_host(cfg->host, &pdev->dev); 968 if (rc) { 969 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n", 970 __func__, rc); 971 goto out; 972 } 973 974 scsi_scan_host(cfg->host); 975 976 out: 977 pr_debug("%s: returning rc=%d\n", __func__, rc); 978 return rc; 979 } 980 981 /** 982 * set_port_online() - transitions the specified host FC port to online state 983 * @fc_regs: Top of MMIO region defined for specified port. 984 * 985 * The provided MMIO region must be mapped prior to call. Online state means 986 * that the FC link layer has synced, completed the handshaking process, and 987 * is ready for login to start. 988 */ 989 static void set_port_online(__be64 __iomem *fc_regs) 990 { 991 u64 cmdcfg; 992 993 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 994 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ 995 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ 996 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 997 } 998 999 /** 1000 * set_port_offline() - transitions the specified host FC port to offline state 1001 * @fc_regs: Top of MMIO region defined for specified port. 1002 * 1003 * The provided MMIO region must be mapped prior to call. 1004 */ 1005 static void set_port_offline(__be64 __iomem *fc_regs) 1006 { 1007 u64 cmdcfg; 1008 1009 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 1010 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ 1011 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ 1012 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 1013 } 1014 1015 /** 1016 * wait_port_online() - waits for the specified host FC port come online 1017 * @fc_regs: Top of MMIO region defined for specified port. 1018 * @delay_us: Number of microseconds to delay between reading port status. 1019 * @nretry: Number of cycles to retry reading port status. 1020 * 1021 * The provided MMIO region must be mapped prior to call. This will timeout 1022 * when the cable is not plugged in. 1023 * 1024 * Return: 1025 * TRUE (1) when the specified port is online 1026 * FALSE (0) when the specified port fails to come online after timeout 1027 * -EINVAL when @delay_us is less than 1000 1028 */ 1029 static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 1030 { 1031 u64 status; 1032 1033 if (delay_us < 1000) { 1034 pr_err("%s: invalid delay specified %d\n", __func__, delay_us); 1035 return -EINVAL; 1036 } 1037 1038 do { 1039 msleep(delay_us / 1000); 1040 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 1041 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && 1042 nretry--); 1043 1044 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); 1045 } 1046 1047 /** 1048 * wait_port_offline() - waits for the specified host FC port go offline 1049 * @fc_regs: Top of MMIO region defined for specified port. 1050 * @delay_us: Number of microseconds to delay between reading port status. 1051 * @nretry: Number of cycles to retry reading port status. 1052 * 1053 * The provided MMIO region must be mapped prior to call. 1054 * 1055 * Return: 1056 * TRUE (1) when the specified port is offline 1057 * FALSE (0) when the specified port fails to go offline after timeout 1058 * -EINVAL when @delay_us is less than 1000 1059 */ 1060 static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 1061 { 1062 u64 status; 1063 1064 if (delay_us < 1000) { 1065 pr_err("%s: invalid delay specified %d\n", __func__, delay_us); 1066 return -EINVAL; 1067 } 1068 1069 do { 1070 msleep(delay_us / 1000); 1071 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 1072 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && 1073 nretry--); 1074 1075 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); 1076 } 1077 1078 /** 1079 * afu_set_wwpn() - configures the WWPN for the specified host FC port 1080 * @afu: AFU associated with the host that owns the specified FC port. 1081 * @port: Port number being configured. 1082 * @fc_regs: Top of MMIO region defined for specified port. 1083 * @wwpn: The world-wide-port-number previously discovered for port. 1084 * 1085 * The provided MMIO region must be mapped prior to call. As part of the 1086 * sequence to configure the WWPN, the port is toggled offline and then back 1087 * online. This toggling action can cause this routine to delay up to a few 1088 * seconds. When configured to use the internal LUN feature of the AFU, a 1089 * failure to come online is overridden. 1090 * 1091 * Return: 1092 * 0 when the WWPN is successfully written and the port comes back online 1093 * -1 when the port fails to go offline or come back up online 1094 */ 1095 static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, 1096 u64 wwpn) 1097 { 1098 int rc = 0; 1099 1100 set_port_offline(fc_regs); 1101 1102 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1103 FC_PORT_STATUS_RETRY_CNT)) { 1104 pr_debug("%s: wait on port %d to go offline timed out\n", 1105 __func__, port); 1106 rc = -1; /* but continue on to leave the port back online */ 1107 } 1108 1109 if (rc == 0) 1110 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); 1111 1112 /* Always return success after programming WWPN */ 1113 rc = 0; 1114 1115 set_port_online(fc_regs); 1116 1117 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1118 FC_PORT_STATUS_RETRY_CNT)) { 1119 pr_err("%s: wait on port %d to go online timed out\n", 1120 __func__, port); 1121 } 1122 1123 pr_debug("%s: returning rc=%d\n", __func__, rc); 1124 1125 return rc; 1126 } 1127 1128 /** 1129 * afu_link_reset() - resets the specified host FC port 1130 * @afu: AFU associated with the host that owns the specified FC port. 1131 * @port: Port number being configured. 1132 * @fc_regs: Top of MMIO region defined for specified port. 1133 * 1134 * The provided MMIO region must be mapped prior to call. The sequence to 1135 * reset the port involves toggling it offline and then back online. This 1136 * action can cause this routine to delay up to a few seconds. An effort 1137 * is made to maintain link with the device by switching to host to use 1138 * the alternate port exclusively while the reset takes place. 1139 * failure to come online is overridden. 1140 */ 1141 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) 1142 { 1143 u64 port_sel; 1144 1145 /* first switch the AFU to the other links, if any */ 1146 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); 1147 port_sel &= ~(1ULL << port); 1148 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1149 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1150 1151 set_port_offline(fc_regs); 1152 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1153 FC_PORT_STATUS_RETRY_CNT)) 1154 pr_err("%s: wait on port %d to go offline timed out\n", 1155 __func__, port); 1156 1157 set_port_online(fc_regs); 1158 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1159 FC_PORT_STATUS_RETRY_CNT)) 1160 pr_err("%s: wait on port %d to go online timed out\n", 1161 __func__, port); 1162 1163 /* switch back to include this port */ 1164 port_sel |= (1ULL << port); 1165 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1166 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1167 1168 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel); 1169 } 1170 1171 /* 1172 * Asynchronous interrupt information table 1173 */ 1174 static const struct asyc_intr_info ainfo[] = { 1175 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, 1176 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, 1177 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, 1178 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET}, 1179 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, 1180 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, 1181 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, 1182 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST}, 1183 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, 1184 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, 1185 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, 1186 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET}, 1187 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, 1188 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, 1189 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, 1190 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST}, 1191 {0x0, "", 0, 0} /* terminator */ 1192 }; 1193 1194 /** 1195 * find_ainfo() - locates and returns asynchronous interrupt information 1196 * @status: Status code set by AFU on error. 1197 * 1198 * Return: The located information or NULL when the status code is invalid. 1199 */ 1200 static const struct asyc_intr_info *find_ainfo(u64 status) 1201 { 1202 const struct asyc_intr_info *info; 1203 1204 for (info = &ainfo[0]; info->status; info++) 1205 if (info->status == status) 1206 return info; 1207 1208 return NULL; 1209 } 1210 1211 /** 1212 * afu_err_intr_init() - clears and initializes the AFU for error interrupts 1213 * @afu: AFU associated with the host. 1214 */ 1215 static void afu_err_intr_init(struct afu *afu) 1216 { 1217 int i; 1218 u64 reg; 1219 1220 /* global async interrupts: AFU clears afu_ctrl on context exit 1221 * if async interrupts were sent to that context. This prevents 1222 * the AFU form sending further async interrupts when 1223 * there is 1224 * nobody to receive them. 1225 */ 1226 1227 /* mask all */ 1228 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); 1229 /* set LISN# to send and point to master context */ 1230 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); 1231 1232 if (afu->internal_lun) 1233 reg |= 1; /* Bit 63 indicates local lun */ 1234 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); 1235 /* clear all */ 1236 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1237 /* unmask bits that are of interest */ 1238 /* note: afu can send an interrupt after this step */ 1239 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); 1240 /* clear again in case a bit came on after previous clear but before */ 1241 /* unmask */ 1242 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1243 1244 /* Clear/Set internal lun bits */ 1245 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); 1246 reg &= SISL_FC_INTERNAL_MASK; 1247 if (afu->internal_lun) 1248 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); 1249 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); 1250 1251 /* now clear FC errors */ 1252 for (i = 0; i < NUM_FC_PORTS; i++) { 1253 writeq_be(0xFFFFFFFFU, 1254 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]); 1255 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]); 1256 } 1257 1258 /* sync interrupts for master's IOARRIN write */ 1259 /* note that unlike asyncs, there can be no pending sync interrupts */ 1260 /* at this time (this is a fresh context and master has not written */ 1261 /* IOARRIN yet), so there is nothing to clear. */ 1262 1263 /* set LISN#, it is always sent to the context that wrote IOARRIN */ 1264 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); 1265 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); 1266 } 1267 1268 /** 1269 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors 1270 * @irq: Interrupt number. 1271 * @data: Private data provided at interrupt registration, the AFU. 1272 * 1273 * Return: Always return IRQ_HANDLED. 1274 */ 1275 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) 1276 { 1277 struct afu *afu = (struct afu *)data; 1278 u64 reg; 1279 u64 reg_unmasked; 1280 1281 reg = readq_be(&afu->host_map->intr_status); 1282 reg_unmasked = (reg & SISL_ISTATUS_UNMASK); 1283 1284 if (reg_unmasked == 0UL) { 1285 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n", 1286 __func__, (u64)afu, reg); 1287 goto cxlflash_sync_err_irq_exit; 1288 } 1289 1290 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n", 1291 __func__, (u64)afu, reg); 1292 1293 writeq_be(reg_unmasked, &afu->host_map->intr_clear); 1294 1295 cxlflash_sync_err_irq_exit: 1296 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED); 1297 return IRQ_HANDLED; 1298 } 1299 1300 /** 1301 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) 1302 * @irq: Interrupt number. 1303 * @data: Private data provided at interrupt registration, the AFU. 1304 * 1305 * Return: Always return IRQ_HANDLED. 1306 */ 1307 static irqreturn_t cxlflash_rrq_irq(int irq, void *data) 1308 { 1309 struct afu *afu = (struct afu *)data; 1310 struct afu_cmd *cmd; 1311 bool toggle = afu->toggle; 1312 u64 entry, 1313 *hrrq_start = afu->hrrq_start, 1314 *hrrq_end = afu->hrrq_end, 1315 *hrrq_curr = afu->hrrq_curr; 1316 1317 /* Process however many RRQ entries that are ready */ 1318 while (true) { 1319 entry = *hrrq_curr; 1320 1321 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) 1322 break; 1323 1324 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT); 1325 cmd_complete(cmd); 1326 1327 /* Advance to next entry or wrap and flip the toggle bit */ 1328 if (hrrq_curr < hrrq_end) 1329 hrrq_curr++; 1330 else { 1331 hrrq_curr = hrrq_start; 1332 toggle ^= SISL_RESP_HANDLE_T_BIT; 1333 } 1334 } 1335 1336 afu->hrrq_curr = hrrq_curr; 1337 afu->toggle = toggle; 1338 1339 return IRQ_HANDLED; 1340 } 1341 1342 /** 1343 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors 1344 * @irq: Interrupt number. 1345 * @data: Private data provided at interrupt registration, the AFU. 1346 * 1347 * Return: Always return IRQ_HANDLED. 1348 */ 1349 static irqreturn_t cxlflash_async_err_irq(int irq, void *data) 1350 { 1351 struct afu *afu = (struct afu *)data; 1352 struct cxlflash_cfg *cfg = afu->parent; 1353 struct device *dev = &cfg->dev->dev; 1354 u64 reg_unmasked; 1355 const struct asyc_intr_info *info; 1356 struct sisl_global_map __iomem *global = &afu->afu_map->global; 1357 u64 reg; 1358 u8 port; 1359 int i; 1360 1361 reg = readq_be(&global->regs.aintr_status); 1362 reg_unmasked = (reg & SISL_ASTATUS_UNMASK); 1363 1364 if (reg_unmasked == 0) { 1365 dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n", 1366 __func__, reg); 1367 goto out; 1368 } 1369 1370 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ 1371 writeq_be(reg_unmasked, &global->regs.aintr_clear); 1372 1373 /* Check each bit that is on */ 1374 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { 1375 info = find_ainfo(1ULL << i); 1376 if (((reg_unmasked & 0x1) == 0) || !info) 1377 continue; 1378 1379 port = info->port; 1380 1381 dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n", 1382 __func__, port, info->desc, 1383 readq_be(&global->fc_regs[port][FC_STATUS / 8])); 1384 1385 /* 1386 * Do link reset first, some OTHER errors will set FC_ERROR 1387 * again if cleared before or w/o a reset 1388 */ 1389 if (info->action & LINK_RESET) { 1390 dev_err(dev, "%s: FC Port %d: resetting link\n", 1391 __func__, port); 1392 cfg->lr_state = LINK_RESET_REQUIRED; 1393 cfg->lr_port = port; 1394 kref_get(&cfg->afu->mapcount); 1395 schedule_work(&cfg->work_q); 1396 } 1397 1398 if (info->action & CLR_FC_ERROR) { 1399 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); 1400 1401 /* 1402 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP 1403 * should be the same and tracing one is sufficient. 1404 */ 1405 1406 dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n", 1407 __func__, port, reg); 1408 1409 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); 1410 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]); 1411 } 1412 1413 if (info->action & SCAN_HOST) { 1414 atomic_inc(&cfg->scan_host_needed); 1415 kref_get(&cfg->afu->mapcount); 1416 schedule_work(&cfg->work_q); 1417 } 1418 } 1419 1420 out: 1421 dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu); 1422 return IRQ_HANDLED; 1423 } 1424 1425 /** 1426 * start_context() - starts the master context 1427 * @cfg: Internal structure associated with the host. 1428 * 1429 * Return: A success or failure value from CXL services. 1430 */ 1431 static int start_context(struct cxlflash_cfg *cfg) 1432 { 1433 int rc = 0; 1434 1435 rc = cxl_start_context(cfg->mcctx, 1436 cfg->afu->work.work_element_descriptor, 1437 NULL); 1438 1439 pr_debug("%s: returning rc=%d\n", __func__, rc); 1440 return rc; 1441 } 1442 1443 /** 1444 * read_vpd() - obtains the WWPNs from VPD 1445 * @cfg: Internal structure associated with the host. 1446 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs 1447 * 1448 * Return: 0 on success, -errno on failure 1449 */ 1450 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) 1451 { 1452 struct pci_dev *dev = cfg->dev; 1453 int rc = 0; 1454 int ro_start, ro_size, i, j, k; 1455 ssize_t vpd_size; 1456 char vpd_data[CXLFLASH_VPD_LEN]; 1457 char tmp_buf[WWPN_BUF_LEN] = { 0 }; 1458 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; 1459 1460 /* Get the VPD data from the device */ 1461 vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data)); 1462 if (unlikely(vpd_size <= 0)) { 1463 dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n", 1464 __func__, vpd_size); 1465 rc = -ENODEV; 1466 goto out; 1467 } 1468 1469 /* Get the read only section offset */ 1470 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, 1471 PCI_VPD_LRDT_RO_DATA); 1472 if (unlikely(ro_start < 0)) { 1473 dev_err(&dev->dev, "%s: VPD Read-only data not found\n", 1474 __func__); 1475 rc = -ENODEV; 1476 goto out; 1477 } 1478 1479 /* Get the read only section size, cap when extends beyond read VPD */ 1480 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 1481 j = ro_size; 1482 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1483 if (unlikely((i + j) > vpd_size)) { 1484 pr_debug("%s: Might need to read more VPD (%d > %ld)\n", 1485 __func__, (i + j), vpd_size); 1486 ro_size = vpd_size - i; 1487 } 1488 1489 /* 1490 * Find the offset of the WWPN tag within the read only 1491 * VPD data and validate the found field (partials are 1492 * no good to us). Convert the ASCII data to an integer 1493 * value. Note that we must copy to a temporary buffer 1494 * because the conversion service requires that the ASCII 1495 * string be terminated. 1496 */ 1497 for (k = 0; k < NUM_FC_PORTS; k++) { 1498 j = ro_size; 1499 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1500 1501 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); 1502 if (unlikely(i < 0)) { 1503 dev_err(&dev->dev, "%s: Port %d WWPN not found " 1504 "in VPD\n", __func__, k); 1505 rc = -ENODEV; 1506 goto out; 1507 } 1508 1509 j = pci_vpd_info_field_size(&vpd_data[i]); 1510 i += PCI_VPD_INFO_FLD_HDR_SIZE; 1511 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { 1512 dev_err(&dev->dev, "%s: Port %d WWPN incomplete or " 1513 "VPD corrupt\n", 1514 __func__, k); 1515 rc = -ENODEV; 1516 goto out; 1517 } 1518 1519 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); 1520 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); 1521 if (unlikely(rc)) { 1522 dev_err(&dev->dev, "%s: Fail to convert port %d WWPN " 1523 "to integer\n", __func__, k); 1524 rc = -ENODEV; 1525 goto out; 1526 } 1527 } 1528 1529 out: 1530 pr_debug("%s: returning rc=%d\n", __func__, rc); 1531 return rc; 1532 } 1533 1534 /** 1535 * init_pcr() - initialize the provisioning and control registers 1536 * @cfg: Internal structure associated with the host. 1537 * 1538 * Also sets up fast access to the mapped registers and initializes AFU 1539 * command fields that never change. 1540 */ 1541 static void init_pcr(struct cxlflash_cfg *cfg) 1542 { 1543 struct afu *afu = cfg->afu; 1544 struct sisl_ctrl_map __iomem *ctrl_map; 1545 int i; 1546 1547 for (i = 0; i < MAX_CONTEXT; i++) { 1548 ctrl_map = &afu->afu_map->ctrls[i].ctrl; 1549 /* Disrupt any clients that could be running */ 1550 /* e.g. clients that survived a master restart */ 1551 writeq_be(0, &ctrl_map->rht_start); 1552 writeq_be(0, &ctrl_map->rht_cnt_id); 1553 writeq_be(0, &ctrl_map->ctx_cap); 1554 } 1555 1556 /* Copy frequently used fields into afu */ 1557 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); 1558 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; 1559 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; 1560 1561 /* Program the Endian Control for the master context */ 1562 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); 1563 1564 /* Initialize cmd fields that never change */ 1565 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 1566 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl; 1567 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; 1568 afu->cmd[i].rcb.rrq = 0x0; 1569 } 1570 } 1571 1572 /** 1573 * init_global() - initialize AFU global registers 1574 * @cfg: Internal structure associated with the host. 1575 */ 1576 static int init_global(struct cxlflash_cfg *cfg) 1577 { 1578 struct afu *afu = cfg->afu; 1579 struct device *dev = &cfg->dev->dev; 1580 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */ 1581 int i = 0, num_ports = 0; 1582 int rc = 0; 1583 u64 reg; 1584 1585 rc = read_vpd(cfg, &wwpn[0]); 1586 if (rc) { 1587 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); 1588 goto out; 1589 } 1590 1591 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); 1592 1593 /* Set up RRQ in AFU for master issued cmds */ 1594 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); 1595 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); 1596 1597 /* AFU configuration */ 1598 reg = readq_be(&afu->afu_map->global.regs.afu_config); 1599 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; 1600 /* enable all auto retry options and control endianness */ 1601 /* leave others at default: */ 1602 /* CTX_CAP write protected, mbox_r does not clear on read and */ 1603 /* checker on if dual afu */ 1604 writeq_be(reg, &afu->afu_map->global.regs.afu_config); 1605 1606 /* Global port select: select either port */ 1607 if (afu->internal_lun) { 1608 /* Only use port 0 */ 1609 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); 1610 num_ports = NUM_FC_PORTS - 1; 1611 } else { 1612 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel); 1613 num_ports = NUM_FC_PORTS; 1614 } 1615 1616 for (i = 0; i < num_ports; i++) { 1617 /* Unmask all errors (but they are still masked at AFU) */ 1618 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); 1619 /* Clear CRC error cnt & set a threshold */ 1620 (void)readq_be(&afu->afu_map->global. 1621 fc_regs[i][FC_CNT_CRCERR / 8]); 1622 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] 1623 [FC_CRC_THRESH / 8]); 1624 1625 /* Set WWPNs. If already programmed, wwpn[i] is 0 */ 1626 if (wwpn[i] != 0 && 1627 afu_set_wwpn(afu, i, 1628 &afu->afu_map->global.fc_regs[i][0], 1629 wwpn[i])) { 1630 dev_err(dev, "%s: failed to set WWPN on port %d\n", 1631 __func__, i); 1632 rc = -EIO; 1633 goto out; 1634 } 1635 /* Programming WWPN back to back causes additional 1636 * offline/online transitions and a PLOGI 1637 */ 1638 msleep(100); 1639 } 1640 1641 /* Set up master's own CTX_CAP to allow real mode, host translation */ 1642 /* tables, afu cmds and read/write GSCSI cmds. */ 1643 /* First, unlock ctx_cap write by reading mbox */ 1644 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ 1645 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | 1646 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | 1647 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), 1648 &afu->ctrl_map->ctx_cap); 1649 /* Initialize heartbeat */ 1650 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); 1651 1652 out: 1653 return rc; 1654 } 1655 1656 /** 1657 * start_afu() - initializes and starts the AFU 1658 * @cfg: Internal structure associated with the host. 1659 */ 1660 static int start_afu(struct cxlflash_cfg *cfg) 1661 { 1662 struct afu *afu = cfg->afu; 1663 struct afu_cmd *cmd; 1664 1665 int i = 0; 1666 int rc = 0; 1667 1668 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 1669 cmd = &afu->cmd[i]; 1670 1671 init_completion(&cmd->cevent); 1672 spin_lock_init(&cmd->slock); 1673 cmd->parent = afu; 1674 } 1675 1676 init_pcr(cfg); 1677 1678 /* After an AFU reset, RRQ entries are stale, clear them */ 1679 memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry)); 1680 1681 /* Initialize RRQ pointers */ 1682 afu->hrrq_start = &afu->rrq_entry[0]; 1683 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; 1684 afu->hrrq_curr = afu->hrrq_start; 1685 afu->toggle = 1; 1686 1687 rc = init_global(cfg); 1688 1689 pr_debug("%s: returning rc=%d\n", __func__, rc); 1690 return rc; 1691 } 1692 1693 /** 1694 * init_intr() - setup interrupt handlers for the master context 1695 * @cfg: Internal structure associated with the host. 1696 * 1697 * Return: 0 on success, -errno on failure 1698 */ 1699 static enum undo_level init_intr(struct cxlflash_cfg *cfg, 1700 struct cxl_context *ctx) 1701 { 1702 struct afu *afu = cfg->afu; 1703 struct device *dev = &cfg->dev->dev; 1704 int rc = 0; 1705 enum undo_level level = UNDO_NOOP; 1706 1707 rc = cxl_allocate_afu_irqs(ctx, 3); 1708 if (unlikely(rc)) { 1709 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", 1710 __func__, rc); 1711 level = UNDO_NOOP; 1712 goto out; 1713 } 1714 1715 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, 1716 "SISL_MSI_SYNC_ERROR"); 1717 if (unlikely(rc <= 0)) { 1718 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n", 1719 __func__); 1720 level = FREE_IRQ; 1721 goto out; 1722 } 1723 1724 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, 1725 "SISL_MSI_RRQ_UPDATED"); 1726 if (unlikely(rc <= 0)) { 1727 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n", 1728 __func__); 1729 level = UNMAP_ONE; 1730 goto out; 1731 } 1732 1733 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, 1734 "SISL_MSI_ASYNC_ERROR"); 1735 if (unlikely(rc <= 0)) { 1736 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n", 1737 __func__); 1738 level = UNMAP_TWO; 1739 goto out; 1740 } 1741 out: 1742 return level; 1743 } 1744 1745 /** 1746 * init_mc() - create and register as the master context 1747 * @cfg: Internal structure associated with the host. 1748 * 1749 * Return: 0 on success, -errno on failure 1750 */ 1751 static int init_mc(struct cxlflash_cfg *cfg) 1752 { 1753 struct cxl_context *ctx; 1754 struct device *dev = &cfg->dev->dev; 1755 int rc = 0; 1756 enum undo_level level; 1757 1758 ctx = cxl_get_context(cfg->dev); 1759 if (unlikely(!ctx)) { 1760 rc = -ENOMEM; 1761 goto ret; 1762 } 1763 cfg->mcctx = ctx; 1764 1765 /* Set it up as a master with the CXL */ 1766 cxl_set_master(ctx); 1767 1768 /* During initialization reset the AFU to start from a clean slate */ 1769 rc = cxl_afu_reset(cfg->mcctx); 1770 if (unlikely(rc)) { 1771 dev_err(dev, "%s: initial AFU reset failed rc=%d\n", 1772 __func__, rc); 1773 goto ret; 1774 } 1775 1776 level = init_intr(cfg, ctx); 1777 if (unlikely(level)) { 1778 dev_err(dev, "%s: setting up interrupts failed rc=%d\n", 1779 __func__, rc); 1780 goto out; 1781 } 1782 1783 /* This performs the equivalent of the CXL_IOCTL_START_WORK. 1784 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process 1785 * element (pe) that is embedded in the context (ctx) 1786 */ 1787 rc = start_context(cfg); 1788 if (unlikely(rc)) { 1789 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); 1790 level = UNMAP_THREE; 1791 goto out; 1792 } 1793 ret: 1794 pr_debug("%s: returning rc=%d\n", __func__, rc); 1795 return rc; 1796 out: 1797 term_intr(cfg, level); 1798 goto ret; 1799 } 1800 1801 /** 1802 * init_afu() - setup as master context and start AFU 1803 * @cfg: Internal structure associated with the host. 1804 * 1805 * This routine is a higher level of control for configuring the 1806 * AFU on probe and reset paths. 1807 * 1808 * Return: 0 on success, -errno on failure 1809 */ 1810 static int init_afu(struct cxlflash_cfg *cfg) 1811 { 1812 u64 reg; 1813 int rc = 0; 1814 struct afu *afu = cfg->afu; 1815 struct device *dev = &cfg->dev->dev; 1816 1817 cxl_perst_reloads_same_image(cfg->cxl_afu, true); 1818 1819 rc = init_mc(cfg); 1820 if (rc) { 1821 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n", 1822 __func__, rc); 1823 goto out; 1824 } 1825 1826 /* Map the entire MMIO space of the AFU */ 1827 afu->afu_map = cxl_psa_map(cfg->mcctx); 1828 if (!afu->afu_map) { 1829 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__); 1830 rc = -ENOMEM; 1831 goto err1; 1832 } 1833 kref_init(&afu->mapcount); 1834 1835 /* No byte reverse on reading afu_version or string will be backwards */ 1836 reg = readq(&afu->afu_map->global.regs.afu_version); 1837 memcpy(afu->version, ®, sizeof(reg)); 1838 afu->interface_version = 1839 readq_be(&afu->afu_map->global.regs.interface_version); 1840 if ((afu->interface_version + 1) == 0) { 1841 pr_err("Back level AFU, please upgrade. AFU version %s " 1842 "interface version 0x%llx\n", afu->version, 1843 afu->interface_version); 1844 rc = -EINVAL; 1845 goto err2; 1846 } 1847 1848 pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, 1849 afu->version, afu->interface_version); 1850 1851 rc = start_afu(cfg); 1852 if (rc) { 1853 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n", 1854 __func__, rc); 1855 goto err2; 1856 } 1857 1858 afu_err_intr_init(cfg->afu); 1859 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); 1860 1861 /* Restore the LUN mappings */ 1862 cxlflash_restore_luntable(cfg); 1863 out: 1864 pr_debug("%s: returning rc=%d\n", __func__, rc); 1865 return rc; 1866 1867 err2: 1868 kref_put(&afu->mapcount, afu_unmap); 1869 err1: 1870 term_intr(cfg, UNMAP_THREE); 1871 term_mc(cfg); 1872 goto out; 1873 } 1874 1875 /** 1876 * cxlflash_afu_sync() - builds and sends an AFU sync command 1877 * @afu: AFU associated with the host. 1878 * @ctx_hndl_u: Identifies context requesting sync. 1879 * @res_hndl_u: Identifies resource requesting sync. 1880 * @mode: Type of sync to issue (lightweight, heavyweight, global). 1881 * 1882 * The AFU can only take 1 sync command at a time. This routine enforces this 1883 * limitation by using a mutex to provide exclusive access to the AFU during 1884 * the sync. This design point requires calling threads to not be on interrupt 1885 * context due to the possibility of sleeping during concurrent sync operations. 1886 * 1887 * AFU sync operations are only necessary and allowed when the device is 1888 * operating normally. When not operating normally, sync requests can occur as 1889 * part of cleaning up resources associated with an adapter prior to removal. 1890 * In this scenario, these requests are simply ignored (safe due to the AFU 1891 * going away). 1892 * 1893 * Return: 1894 * 0 on success 1895 * -1 on failure 1896 */ 1897 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, 1898 res_hndl_t res_hndl_u, u8 mode) 1899 { 1900 struct cxlflash_cfg *cfg = afu->parent; 1901 struct device *dev = &cfg->dev->dev; 1902 struct afu_cmd *cmd = NULL; 1903 int rc = 0; 1904 int retry_cnt = 0; 1905 static DEFINE_MUTEX(sync_active); 1906 1907 if (cfg->state != STATE_NORMAL) { 1908 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state); 1909 return 0; 1910 } 1911 1912 mutex_lock(&sync_active); 1913 retry: 1914 cmd = cmd_checkout(afu); 1915 if (unlikely(!cmd)) { 1916 retry_cnt++; 1917 udelay(1000 * retry_cnt); 1918 if (retry_cnt < MC_RETRY_CNT) 1919 goto retry; 1920 dev_err(dev, "%s: could not get a free command\n", __func__); 1921 rc = -1; 1922 goto out; 1923 } 1924 1925 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); 1926 1927 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); 1928 1929 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 1930 cmd->rcb.port_sel = 0x0; /* NA */ 1931 cmd->rcb.lun_id = 0x0; /* NA */ 1932 cmd->rcb.data_len = 0x0; 1933 cmd->rcb.data_ea = 0x0; 1934 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; 1935 1936 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ 1937 cmd->rcb.cdb[1] = mode; 1938 1939 /* The cdb is aligned, no unaligned accessors required */ 1940 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); 1941 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); 1942 1943 rc = send_cmd(afu, cmd); 1944 if (unlikely(rc)) 1945 goto out; 1946 1947 wait_resp(afu, cmd); 1948 1949 /* Set on timeout */ 1950 if (unlikely((cmd->sa.ioasc != 0) || 1951 (cmd->sa.host_use_b[0] & B_ERROR))) 1952 rc = -1; 1953 out: 1954 mutex_unlock(&sync_active); 1955 if (cmd) 1956 cmd_checkin(cmd); 1957 pr_debug("%s: returning rc=%d\n", __func__, rc); 1958 return rc; 1959 } 1960 1961 /** 1962 * afu_reset() - resets the AFU 1963 * @cfg: Internal structure associated with the host. 1964 * 1965 * Return: 0 on success, -errno on failure 1966 */ 1967 static int afu_reset(struct cxlflash_cfg *cfg) 1968 { 1969 int rc = 0; 1970 /* Stop the context before the reset. Since the context is 1971 * no longer available restart it after the reset is complete 1972 */ 1973 1974 term_afu(cfg); 1975 1976 rc = init_afu(cfg); 1977 1978 pr_debug("%s: returning rc=%d\n", __func__, rc); 1979 return rc; 1980 } 1981 1982 /** 1983 * drain_ioctls() - wait until all currently executing ioctls have completed 1984 * @cfg: Internal structure associated with the host. 1985 * 1986 * Obtain write access to read/write semaphore that wraps ioctl 1987 * handling to 'drain' ioctls currently executing. 1988 */ 1989 static void drain_ioctls(struct cxlflash_cfg *cfg) 1990 { 1991 down_write(&cfg->ioctl_rwsem); 1992 up_write(&cfg->ioctl_rwsem); 1993 } 1994 1995 /** 1996 * cxlflash_eh_device_reset_handler() - reset a single LUN 1997 * @scp: SCSI command to send. 1998 * 1999 * Return: 2000 * SUCCESS as defined in scsi/scsi.h 2001 * FAILED as defined in scsi/scsi.h 2002 */ 2003 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) 2004 { 2005 int rc = SUCCESS; 2006 struct Scsi_Host *host = scp->device->host; 2007 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 2008 struct afu *afu = cfg->afu; 2009 int rcr = 0; 2010 2011 pr_debug("%s: (scp=%p) %d/%d/%d/%llu " 2012 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, 2013 host->host_no, scp->device->channel, 2014 scp->device->id, scp->device->lun, 2015 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 2016 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 2017 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 2018 get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 2019 2020 retry: 2021 switch (cfg->state) { 2022 case STATE_NORMAL: 2023 rcr = send_tmf(afu, scp, TMF_LUN_RESET); 2024 if (unlikely(rcr)) 2025 rc = FAILED; 2026 break; 2027 case STATE_RESET: 2028 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 2029 goto retry; 2030 default: 2031 rc = FAILED; 2032 break; 2033 } 2034 2035 pr_debug("%s: returning rc=%d\n", __func__, rc); 2036 return rc; 2037 } 2038 2039 /** 2040 * cxlflash_eh_host_reset_handler() - reset the host adapter 2041 * @scp: SCSI command from stack identifying host. 2042 * 2043 * Return: 2044 * SUCCESS as defined in scsi/scsi.h 2045 * FAILED as defined in scsi/scsi.h 2046 */ 2047 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) 2048 { 2049 int rc = SUCCESS; 2050 int rcr = 0; 2051 struct Scsi_Host *host = scp->device->host; 2052 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 2053 2054 pr_debug("%s: (scp=%p) %d/%d/%d/%llu " 2055 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, 2056 host->host_no, scp->device->channel, 2057 scp->device->id, scp->device->lun, 2058 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 2059 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 2060 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 2061 get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 2062 2063 switch (cfg->state) { 2064 case STATE_NORMAL: 2065 cfg->state = STATE_RESET; 2066 drain_ioctls(cfg); 2067 cxlflash_mark_contexts_error(cfg); 2068 rcr = afu_reset(cfg); 2069 if (rcr) { 2070 rc = FAILED; 2071 cfg->state = STATE_FAILTERM; 2072 } else 2073 cfg->state = STATE_NORMAL; 2074 wake_up_all(&cfg->reset_waitq); 2075 break; 2076 case STATE_RESET: 2077 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 2078 if (cfg->state == STATE_NORMAL) 2079 break; 2080 /* fall through */ 2081 default: 2082 rc = FAILED; 2083 break; 2084 } 2085 2086 pr_debug("%s: returning rc=%d\n", __func__, rc); 2087 return rc; 2088 } 2089 2090 /** 2091 * cxlflash_change_queue_depth() - change the queue depth for the device 2092 * @sdev: SCSI device destined for queue depth change. 2093 * @qdepth: Requested queue depth value to set. 2094 * 2095 * The requested queue depth is capped to the maximum supported value. 2096 * 2097 * Return: The actual queue depth set. 2098 */ 2099 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) 2100 { 2101 2102 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) 2103 qdepth = CXLFLASH_MAX_CMDS_PER_LUN; 2104 2105 scsi_change_queue_depth(sdev, qdepth); 2106 return sdev->queue_depth; 2107 } 2108 2109 /** 2110 * cxlflash_show_port_status() - queries and presents the current port status 2111 * @port: Desired port for status reporting. 2112 * @afu: AFU owning the specified port. 2113 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2114 * 2115 * Return: The size of the ASCII string returned in @buf. 2116 */ 2117 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf) 2118 { 2119 char *disp_status; 2120 u64 status; 2121 __be64 __iomem *fc_regs; 2122 2123 if (port >= NUM_FC_PORTS) 2124 return 0; 2125 2126 fc_regs = &afu->afu_map->global.fc_regs[port][0]; 2127 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 2128 status &= FC_MTIP_STATUS_MASK; 2129 2130 if (status == FC_MTIP_STATUS_ONLINE) 2131 disp_status = "online"; 2132 else if (status == FC_MTIP_STATUS_OFFLINE) 2133 disp_status = "offline"; 2134 else 2135 disp_status = "unknown"; 2136 2137 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); 2138 } 2139 2140 /** 2141 * port0_show() - queries and presents the current status of port 0 2142 * @dev: Generic device associated with the host owning the port. 2143 * @attr: Device attribute representing the port. 2144 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2145 * 2146 * Return: The size of the ASCII string returned in @buf. 2147 */ 2148 static ssize_t port0_show(struct device *dev, 2149 struct device_attribute *attr, 2150 char *buf) 2151 { 2152 struct Scsi_Host *shost = class_to_shost(dev); 2153 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2154 struct afu *afu = cfg->afu; 2155 2156 return cxlflash_show_port_status(0, afu, buf); 2157 } 2158 2159 /** 2160 * port1_show() - queries and presents the current status of port 1 2161 * @dev: Generic device associated with the host owning the port. 2162 * @attr: Device attribute representing the port. 2163 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2164 * 2165 * Return: The size of the ASCII string returned in @buf. 2166 */ 2167 static ssize_t port1_show(struct device *dev, 2168 struct device_attribute *attr, 2169 char *buf) 2170 { 2171 struct Scsi_Host *shost = class_to_shost(dev); 2172 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2173 struct afu *afu = cfg->afu; 2174 2175 return cxlflash_show_port_status(1, afu, buf); 2176 } 2177 2178 /** 2179 * lun_mode_show() - presents the current LUN mode of the host 2180 * @dev: Generic device associated with the host. 2181 * @attr: Device attribute representing the LUN mode. 2182 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. 2183 * 2184 * Return: The size of the ASCII string returned in @buf. 2185 */ 2186 static ssize_t lun_mode_show(struct device *dev, 2187 struct device_attribute *attr, char *buf) 2188 { 2189 struct Scsi_Host *shost = class_to_shost(dev); 2190 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2191 struct afu *afu = cfg->afu; 2192 2193 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); 2194 } 2195 2196 /** 2197 * lun_mode_store() - sets the LUN mode of the host 2198 * @dev: Generic device associated with the host. 2199 * @attr: Device attribute representing the LUN mode. 2200 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. 2201 * @count: Length of data resizing in @buf. 2202 * 2203 * The CXL Flash AFU supports a dummy LUN mode where the external 2204 * links and storage are not required. Space on the FPGA is used 2205 * to create 1 or 2 small LUNs which are presented to the system 2206 * as if they were a normal storage device. This feature is useful 2207 * during development and also provides manufacturing with a way 2208 * to test the AFU without an actual device. 2209 * 2210 * 0 = external LUN[s] (default) 2211 * 1 = internal LUN (1 x 64K, 512B blocks, id 0) 2212 * 2 = internal LUN (1 x 64K, 4K blocks, id 0) 2213 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) 2214 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) 2215 * 2216 * Return: The size of the ASCII string returned in @buf. 2217 */ 2218 static ssize_t lun_mode_store(struct device *dev, 2219 struct device_attribute *attr, 2220 const char *buf, size_t count) 2221 { 2222 struct Scsi_Host *shost = class_to_shost(dev); 2223 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2224 struct afu *afu = cfg->afu; 2225 int rc; 2226 u32 lun_mode; 2227 2228 rc = kstrtouint(buf, 10, &lun_mode); 2229 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { 2230 afu->internal_lun = lun_mode; 2231 2232 /* 2233 * When configured for internal LUN, there is only one channel, 2234 * channel number 0, else there will be 2 (default). 2235 */ 2236 if (afu->internal_lun) 2237 shost->max_channel = 0; 2238 else 2239 shost->max_channel = NUM_FC_PORTS - 1; 2240 2241 afu_reset(cfg); 2242 scsi_scan_host(cfg->host); 2243 } 2244 2245 return count; 2246 } 2247 2248 /** 2249 * ioctl_version_show() - presents the current ioctl version of the host 2250 * @dev: Generic device associated with the host. 2251 * @attr: Device attribute representing the ioctl version. 2252 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. 2253 * 2254 * Return: The size of the ASCII string returned in @buf. 2255 */ 2256 static ssize_t ioctl_version_show(struct device *dev, 2257 struct device_attribute *attr, char *buf) 2258 { 2259 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); 2260 } 2261 2262 /** 2263 * cxlflash_show_port_lun_table() - queries and presents the port LUN table 2264 * @port: Desired port for status reporting. 2265 * @afu: AFU owning the specified port. 2266 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2267 * 2268 * Return: The size of the ASCII string returned in @buf. 2269 */ 2270 static ssize_t cxlflash_show_port_lun_table(u32 port, 2271 struct afu *afu, 2272 char *buf) 2273 { 2274 int i; 2275 ssize_t bytes = 0; 2276 __be64 __iomem *fc_port; 2277 2278 if (port >= NUM_FC_PORTS) 2279 return 0; 2280 2281 fc_port = &afu->afu_map->global.fc_port[port][0]; 2282 2283 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) 2284 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, 2285 "%03d: %016llX\n", i, readq_be(&fc_port[i])); 2286 return bytes; 2287 } 2288 2289 /** 2290 * port0_lun_table_show() - presents the current LUN table of port 0 2291 * @dev: Generic device associated with the host owning the port. 2292 * @attr: Device attribute representing the port. 2293 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2294 * 2295 * Return: The size of the ASCII string returned in @buf. 2296 */ 2297 static ssize_t port0_lun_table_show(struct device *dev, 2298 struct device_attribute *attr, 2299 char *buf) 2300 { 2301 struct Scsi_Host *shost = class_to_shost(dev); 2302 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2303 struct afu *afu = cfg->afu; 2304 2305 return cxlflash_show_port_lun_table(0, afu, buf); 2306 } 2307 2308 /** 2309 * port1_lun_table_show() - presents the current LUN table of port 1 2310 * @dev: Generic device associated with the host owning the port. 2311 * @attr: Device attribute representing the port. 2312 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2313 * 2314 * Return: The size of the ASCII string returned in @buf. 2315 */ 2316 static ssize_t port1_lun_table_show(struct device *dev, 2317 struct device_attribute *attr, 2318 char *buf) 2319 { 2320 struct Scsi_Host *shost = class_to_shost(dev); 2321 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2322 struct afu *afu = cfg->afu; 2323 2324 return cxlflash_show_port_lun_table(1, afu, buf); 2325 } 2326 2327 /** 2328 * mode_show() - presents the current mode of the device 2329 * @dev: Generic device associated with the device. 2330 * @attr: Device attribute representing the device mode. 2331 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. 2332 * 2333 * Return: The size of the ASCII string returned in @buf. 2334 */ 2335 static ssize_t mode_show(struct device *dev, 2336 struct device_attribute *attr, char *buf) 2337 { 2338 struct scsi_device *sdev = to_scsi_device(dev); 2339 2340 return scnprintf(buf, PAGE_SIZE, "%s\n", 2341 sdev->hostdata ? "superpipe" : "legacy"); 2342 } 2343 2344 /* 2345 * Host attributes 2346 */ 2347 static DEVICE_ATTR_RO(port0); 2348 static DEVICE_ATTR_RO(port1); 2349 static DEVICE_ATTR_RW(lun_mode); 2350 static DEVICE_ATTR_RO(ioctl_version); 2351 static DEVICE_ATTR_RO(port0_lun_table); 2352 static DEVICE_ATTR_RO(port1_lun_table); 2353 2354 static struct device_attribute *cxlflash_host_attrs[] = { 2355 &dev_attr_port0, 2356 &dev_attr_port1, 2357 &dev_attr_lun_mode, 2358 &dev_attr_ioctl_version, 2359 &dev_attr_port0_lun_table, 2360 &dev_attr_port1_lun_table, 2361 NULL 2362 }; 2363 2364 /* 2365 * Device attributes 2366 */ 2367 static DEVICE_ATTR_RO(mode); 2368 2369 static struct device_attribute *cxlflash_dev_attrs[] = { 2370 &dev_attr_mode, 2371 NULL 2372 }; 2373 2374 /* 2375 * Host template 2376 */ 2377 static struct scsi_host_template driver_template = { 2378 .module = THIS_MODULE, 2379 .name = CXLFLASH_ADAPTER_NAME, 2380 .info = cxlflash_driver_info, 2381 .ioctl = cxlflash_ioctl, 2382 .proc_name = CXLFLASH_NAME, 2383 .queuecommand = cxlflash_queuecommand, 2384 .eh_device_reset_handler = cxlflash_eh_device_reset_handler, 2385 .eh_host_reset_handler = cxlflash_eh_host_reset_handler, 2386 .change_queue_depth = cxlflash_change_queue_depth, 2387 .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, 2388 .can_queue = CXLFLASH_MAX_CMDS, 2389 .this_id = -1, 2390 .sg_tablesize = SG_NONE, /* No scatter gather support */ 2391 .max_sectors = CXLFLASH_MAX_SECTORS, 2392 .use_clustering = ENABLE_CLUSTERING, 2393 .shost_attrs = cxlflash_host_attrs, 2394 .sdev_attrs = cxlflash_dev_attrs, 2395 }; 2396 2397 /* 2398 * Device dependent values 2399 */ 2400 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, 2401 0ULL }; 2402 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, 2403 CXLFLASH_NOTIFY_SHUTDOWN }; 2404 2405 /* 2406 * PCI device binding table 2407 */ 2408 static struct pci_device_id cxlflash_pci_table[] = { 2409 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, 2410 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, 2411 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, 2412 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, 2413 {} 2414 }; 2415 2416 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); 2417 2418 /** 2419 * cxlflash_worker_thread() - work thread handler for the AFU 2420 * @work: Work structure contained within cxlflash associated with host. 2421 * 2422 * Handles the following events: 2423 * - Link reset which cannot be performed on interrupt context due to 2424 * blocking up to a few seconds 2425 * - Read AFU command room 2426 * - Rescan the host 2427 */ 2428 static void cxlflash_worker_thread(struct work_struct *work) 2429 { 2430 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, 2431 work_q); 2432 struct afu *afu = cfg->afu; 2433 struct device *dev = &cfg->dev->dev; 2434 int port; 2435 ulong lock_flags; 2436 2437 /* Avoid MMIO if the device has failed */ 2438 2439 if (cfg->state != STATE_NORMAL) 2440 return; 2441 2442 spin_lock_irqsave(cfg->host->host_lock, lock_flags); 2443 2444 if (cfg->lr_state == LINK_RESET_REQUIRED) { 2445 port = cfg->lr_port; 2446 if (port < 0) 2447 dev_err(dev, "%s: invalid port index %d\n", 2448 __func__, port); 2449 else { 2450 spin_unlock_irqrestore(cfg->host->host_lock, 2451 lock_flags); 2452 2453 /* The reset can block... */ 2454 afu_link_reset(afu, port, 2455 &afu->afu_map->global.fc_regs[port][0]); 2456 spin_lock_irqsave(cfg->host->host_lock, lock_flags); 2457 } 2458 2459 cfg->lr_state = LINK_RESET_COMPLETE; 2460 } 2461 2462 if (afu->read_room) { 2463 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); 2464 afu->read_room = false; 2465 } 2466 2467 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); 2468 2469 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) 2470 scsi_scan_host(cfg->host); 2471 kref_put(&afu->mapcount, afu_unmap); 2472 } 2473 2474 /** 2475 * cxlflash_probe() - PCI entry point to add host 2476 * @pdev: PCI device associated with the host. 2477 * @dev_id: PCI device id associated with device. 2478 * 2479 * Return: 0 on success, -errno on failure 2480 */ 2481 static int cxlflash_probe(struct pci_dev *pdev, 2482 const struct pci_device_id *dev_id) 2483 { 2484 struct Scsi_Host *host; 2485 struct cxlflash_cfg *cfg = NULL; 2486 struct dev_dependent_vals *ddv; 2487 int rc = 0; 2488 2489 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", 2490 __func__, pdev->irq); 2491 2492 ddv = (struct dev_dependent_vals *)dev_id->driver_data; 2493 driver_template.max_sectors = ddv->max_sectors; 2494 2495 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); 2496 if (!host) { 2497 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", 2498 __func__); 2499 rc = -ENOMEM; 2500 goto out; 2501 } 2502 2503 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; 2504 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; 2505 host->max_channel = NUM_FC_PORTS - 1; 2506 host->unique_id = host->host_no; 2507 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 2508 2509 cfg = (struct cxlflash_cfg *)host->hostdata; 2510 cfg->host = host; 2511 rc = alloc_mem(cfg); 2512 if (rc) { 2513 dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n", 2514 __func__); 2515 rc = -ENOMEM; 2516 scsi_host_put(cfg->host); 2517 goto out; 2518 } 2519 2520 cfg->init_state = INIT_STATE_NONE; 2521 cfg->dev = pdev; 2522 cfg->cxl_fops = cxlflash_cxl_fops; 2523 2524 /* 2525 * The promoted LUNs move to the top of the LUN table. The rest stay 2526 * on the bottom half. The bottom half grows from the end 2527 * (index = 255), whereas the top half grows from the beginning 2528 * (index = 0). 2529 */ 2530 cfg->promote_lun_index = 0; 2531 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1; 2532 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1; 2533 2534 cfg->dev_id = (struct pci_device_id *)dev_id; 2535 2536 init_waitqueue_head(&cfg->tmf_waitq); 2537 init_waitqueue_head(&cfg->reset_waitq); 2538 2539 INIT_WORK(&cfg->work_q, cxlflash_worker_thread); 2540 cfg->lr_state = LINK_RESET_INVALID; 2541 cfg->lr_port = -1; 2542 spin_lock_init(&cfg->tmf_slock); 2543 mutex_init(&cfg->ctx_tbl_list_mutex); 2544 mutex_init(&cfg->ctx_recovery_mutex); 2545 init_rwsem(&cfg->ioctl_rwsem); 2546 INIT_LIST_HEAD(&cfg->ctx_err_recovery); 2547 INIT_LIST_HEAD(&cfg->lluns); 2548 2549 pci_set_drvdata(pdev, cfg); 2550 2551 cfg->cxl_afu = cxl_pci_to_afu(pdev); 2552 2553 rc = init_pci(cfg); 2554 if (rc) { 2555 dev_err(&pdev->dev, "%s: call to init_pci " 2556 "failed rc=%d!\n", __func__, rc); 2557 goto out_remove; 2558 } 2559 cfg->init_state = INIT_STATE_PCI; 2560 2561 rc = init_afu(cfg); 2562 if (rc) { 2563 dev_err(&pdev->dev, "%s: call to init_afu " 2564 "failed rc=%d!\n", __func__, rc); 2565 goto out_remove; 2566 } 2567 cfg->init_state = INIT_STATE_AFU; 2568 2569 rc = init_scsi(cfg); 2570 if (rc) { 2571 dev_err(&pdev->dev, "%s: call to init_scsi " 2572 "failed rc=%d!\n", __func__, rc); 2573 goto out_remove; 2574 } 2575 cfg->init_state = INIT_STATE_SCSI; 2576 2577 out: 2578 pr_debug("%s: returning rc=%d\n", __func__, rc); 2579 return rc; 2580 2581 out_remove: 2582 cxlflash_remove(pdev); 2583 goto out; 2584 } 2585 2586 /** 2587 * cxlflash_pci_error_detected() - called when a PCI error is detected 2588 * @pdev: PCI device struct. 2589 * @state: PCI channel state. 2590 * 2591 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 2592 */ 2593 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, 2594 pci_channel_state_t state) 2595 { 2596 int rc = 0; 2597 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 2598 struct device *dev = &cfg->dev->dev; 2599 2600 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); 2601 2602 switch (state) { 2603 case pci_channel_io_frozen: 2604 cfg->state = STATE_RESET; 2605 scsi_block_requests(cfg->host); 2606 drain_ioctls(cfg); 2607 rc = cxlflash_mark_contexts_error(cfg); 2608 if (unlikely(rc)) 2609 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", 2610 __func__, rc); 2611 term_afu(cfg); 2612 return PCI_ERS_RESULT_NEED_RESET; 2613 case pci_channel_io_perm_failure: 2614 cfg->state = STATE_FAILTERM; 2615 wake_up_all(&cfg->reset_waitq); 2616 scsi_unblock_requests(cfg->host); 2617 return PCI_ERS_RESULT_DISCONNECT; 2618 default: 2619 break; 2620 } 2621 return PCI_ERS_RESULT_NEED_RESET; 2622 } 2623 2624 /** 2625 * cxlflash_pci_slot_reset() - called when PCI slot has been reset 2626 * @pdev: PCI device struct. 2627 * 2628 * This routine is called by the pci error recovery code after the PCI 2629 * slot has been reset, just before we should resume normal operations. 2630 * 2631 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT 2632 */ 2633 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) 2634 { 2635 int rc = 0; 2636 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 2637 struct device *dev = &cfg->dev->dev; 2638 2639 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 2640 2641 rc = init_afu(cfg); 2642 if (unlikely(rc)) { 2643 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc); 2644 return PCI_ERS_RESULT_DISCONNECT; 2645 } 2646 2647 return PCI_ERS_RESULT_RECOVERED; 2648 } 2649 2650 /** 2651 * cxlflash_pci_resume() - called when normal operation can resume 2652 * @pdev: PCI device struct 2653 */ 2654 static void cxlflash_pci_resume(struct pci_dev *pdev) 2655 { 2656 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 2657 struct device *dev = &cfg->dev->dev; 2658 2659 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 2660 2661 cfg->state = STATE_NORMAL; 2662 wake_up_all(&cfg->reset_waitq); 2663 scsi_unblock_requests(cfg->host); 2664 } 2665 2666 static const struct pci_error_handlers cxlflash_err_handler = { 2667 .error_detected = cxlflash_pci_error_detected, 2668 .slot_reset = cxlflash_pci_slot_reset, 2669 .resume = cxlflash_pci_resume, 2670 }; 2671 2672 /* 2673 * PCI device structure 2674 */ 2675 static struct pci_driver cxlflash_driver = { 2676 .name = CXLFLASH_NAME, 2677 .id_table = cxlflash_pci_table, 2678 .probe = cxlflash_probe, 2679 .remove = cxlflash_remove, 2680 .shutdown = cxlflash_shutdown, 2681 .err_handler = &cxlflash_err_handler, 2682 }; 2683 2684 /** 2685 * init_cxlflash() - module entry point 2686 * 2687 * Return: 0 on success, -errno on failure 2688 */ 2689 static int __init init_cxlflash(void) 2690 { 2691 pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME); 2692 2693 cxlflash_list_init(); 2694 2695 return pci_register_driver(&cxlflash_driver); 2696 } 2697 2698 /** 2699 * exit_cxlflash() - module exit point 2700 */ 2701 static void __exit exit_cxlflash(void) 2702 { 2703 cxlflash_term_global_luns(); 2704 cxlflash_free_errpage(); 2705 2706 pci_unregister_driver(&cxlflash_driver); 2707 } 2708 2709 module_init(init_cxlflash); 2710 module_exit(exit_cxlflash); 2711