1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2015 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/delay.h> 16 #include <linux/list.h> 17 #include <linux/module.h> 18 #include <linux/pci.h> 19 20 #include <asm/unaligned.h> 21 22 #include <misc/cxl.h> 23 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_host.h> 26 #include <uapi/scsi/cxlflash_ioctl.h> 27 28 #include "main.h" 29 #include "sislite.h" 30 #include "common.h" 31 32 MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); 33 MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>"); 34 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>"); 35 MODULE_LICENSE("GPL"); 36 37 /** 38 * cmd_checkout() - checks out an AFU command 39 * @afu: AFU to checkout from. 40 * 41 * Commands are checked out in a round-robin fashion. Note that since 42 * the command pool is larger than the hardware queue, the majority of 43 * times we will only loop once or twice before getting a command. The 44 * buffer and CDB within the command are initialized (zeroed) prior to 45 * returning. 46 * 47 * Return: The checked out command or NULL when command pool is empty. 48 */ 49 static struct afu_cmd *cmd_checkout(struct afu *afu) 50 { 51 int k, dec = CXLFLASH_NUM_CMDS; 52 struct afu_cmd *cmd; 53 54 while (dec--) { 55 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1)); 56 57 cmd = &afu->cmd[k]; 58 59 if (!atomic_dec_if_positive(&cmd->free)) { 60 pr_devel("%s: returning found index=%d cmd=%p\n", 61 __func__, cmd->slot, cmd); 62 memset(cmd->buf, 0, CMD_BUFSIZE); 63 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); 64 return cmd; 65 } 66 } 67 68 return NULL; 69 } 70 71 /** 72 * cmd_checkin() - checks in an AFU command 73 * @cmd: AFU command to checkin. 74 * 75 * Safe to pass commands that have already been checked in. Several 76 * internal tracking fields are reset as part of the checkin. Note 77 * that these are intentionally reset prior to toggling the free bit 78 * to avoid clobbering values in the event that the command is checked 79 * out right away. 80 */ 81 static void cmd_checkin(struct afu_cmd *cmd) 82 { 83 cmd->rcb.scp = NULL; 84 cmd->rcb.timeout = 0; 85 cmd->sa.ioasc = 0; 86 cmd->cmd_tmf = false; 87 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */ 88 89 if (unlikely(atomic_inc_return(&cmd->free) != 1)) { 90 pr_err("%s: Freeing cmd (%d) that is not in use!\n", 91 __func__, cmd->slot); 92 return; 93 } 94 95 pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot); 96 } 97 98 /** 99 * process_cmd_err() - command error handler 100 * @cmd: AFU command that experienced the error. 101 * @scp: SCSI command associated with the AFU command in error. 102 * 103 * Translates error bits from AFU command to SCSI command results. 104 */ 105 static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) 106 { 107 struct sisl_ioarcb *ioarcb; 108 struct sisl_ioasa *ioasa; 109 u32 resid; 110 111 if (unlikely(!cmd)) 112 return; 113 114 ioarcb = &(cmd->rcb); 115 ioasa = &(cmd->sa); 116 117 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { 118 resid = ioasa->resid; 119 scsi_set_resid(scp, resid); 120 pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n", 121 __func__, cmd, scp, resid); 122 } 123 124 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { 125 pr_debug("%s: cmd underrun cmd = %p scp = %p\n", 126 __func__, cmd, scp); 127 scp->result = (DID_ERROR << 16); 128 } 129 130 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d " 131 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n", 132 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc, 133 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra, 134 ioasa->fc_extra); 135 136 if (ioasa->rc.scsi_rc) { 137 /* We have a SCSI status */ 138 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { 139 memcpy(scp->sense_buffer, ioasa->sense_data, 140 SISL_SENSE_DATA_LEN); 141 scp->result = ioasa->rc.scsi_rc; 142 } else 143 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); 144 } 145 146 /* 147 * We encountered an error. Set scp->result based on nature 148 * of error. 149 */ 150 if (ioasa->rc.fc_rc) { 151 /* We have an FC status */ 152 switch (ioasa->rc.fc_rc) { 153 case SISL_FC_RC_LINKDOWN: 154 scp->result = (DID_REQUEUE << 16); 155 break; 156 case SISL_FC_RC_RESID: 157 /* This indicates an FCP resid underrun */ 158 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { 159 /* If the SISL_RC_FLAGS_OVERRUN flag was set, 160 * then we will handle this error else where. 161 * If not then we must handle it here. 162 * This is probably an AFU bug. 163 */ 164 scp->result = (DID_ERROR << 16); 165 } 166 break; 167 case SISL_FC_RC_RESIDERR: 168 /* Resid mismatch between adapter and device */ 169 case SISL_FC_RC_TGTABORT: 170 case SISL_FC_RC_ABORTOK: 171 case SISL_FC_RC_ABORTFAIL: 172 case SISL_FC_RC_NOLOGI: 173 case SISL_FC_RC_ABORTPEND: 174 case SISL_FC_RC_WRABORTPEND: 175 case SISL_FC_RC_NOEXP: 176 case SISL_FC_RC_INUSE: 177 scp->result = (DID_ERROR << 16); 178 break; 179 } 180 } 181 182 if (ioasa->rc.afu_rc) { 183 /* We have an AFU error */ 184 switch (ioasa->rc.afu_rc) { 185 case SISL_AFU_RC_NO_CHANNELS: 186 scp->result = (DID_NO_CONNECT << 16); 187 break; 188 case SISL_AFU_RC_DATA_DMA_ERR: 189 switch (ioasa->afu_extra) { 190 case SISL_AFU_DMA_ERR_PAGE_IN: 191 /* Retry */ 192 scp->result = (DID_IMM_RETRY << 16); 193 break; 194 case SISL_AFU_DMA_ERR_INVALID_EA: 195 default: 196 scp->result = (DID_ERROR << 16); 197 } 198 break; 199 case SISL_AFU_RC_OUT_OF_DATA_BUFS: 200 /* Retry */ 201 scp->result = (DID_ALLOC_FAILURE << 16); 202 break; 203 default: 204 scp->result = (DID_ERROR << 16); 205 } 206 } 207 } 208 209 /** 210 * cmd_complete() - command completion handler 211 * @cmd: AFU command that has completed. 212 * 213 * Prepares and submits command that has either completed or timed out to 214 * the SCSI stack. Checks AFU command back into command pool for non-internal 215 * (rcb.scp populated) commands. 216 */ 217 static void cmd_complete(struct afu_cmd *cmd) 218 { 219 struct scsi_cmnd *scp; 220 ulong lock_flags; 221 struct afu *afu = cmd->parent; 222 struct cxlflash_cfg *cfg = afu->parent; 223 bool cmd_is_tmf; 224 225 spin_lock_irqsave(&cmd->slock, lock_flags); 226 cmd->sa.host_use_b[0] |= B_DONE; 227 spin_unlock_irqrestore(&cmd->slock, lock_flags); 228 229 if (cmd->rcb.scp) { 230 scp = cmd->rcb.scp; 231 if (unlikely(cmd->sa.ioasc)) 232 process_cmd_err(cmd, scp); 233 else 234 scp->result = (DID_OK << 16); 235 236 cmd_is_tmf = cmd->cmd_tmf; 237 cmd_checkin(cmd); /* Don't use cmd after here */ 238 239 pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X " 240 "ioasc=%d\n", __func__, scp, scp->result, 241 cmd->sa.ioasc); 242 243 scsi_dma_unmap(scp); 244 scp->scsi_done(scp); 245 246 if (cmd_is_tmf) { 247 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 248 cfg->tmf_active = false; 249 wake_up_all_locked(&cfg->tmf_waitq); 250 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 251 } 252 } else 253 complete(&cmd->cevent); 254 } 255 256 /** 257 * context_reset() - timeout handler for AFU commands 258 * @cmd: AFU command that timed out. 259 * 260 * Sends a reset to the AFU. 261 */ 262 static void context_reset(struct afu_cmd *cmd) 263 { 264 int nretry = 0; 265 u64 rrin = 0x1; 266 u64 room = 0; 267 struct afu *afu = cmd->parent; 268 ulong lock_flags; 269 270 pr_debug("%s: cmd=%p\n", __func__, cmd); 271 272 spin_lock_irqsave(&cmd->slock, lock_flags); 273 274 /* Already completed? */ 275 if (cmd->sa.host_use_b[0] & B_DONE) { 276 spin_unlock_irqrestore(&cmd->slock, lock_flags); 277 return; 278 } 279 280 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT); 281 spin_unlock_irqrestore(&cmd->slock, lock_flags); 282 283 /* 284 * We really want to send this reset at all costs, so spread 285 * out wait time on successive retries for available room. 286 */ 287 do { 288 room = readq_be(&afu->host_map->cmd_room); 289 atomic64_set(&afu->room, room); 290 if (room) 291 goto write_rrin; 292 udelay(nretry); 293 } while (nretry++ < MC_ROOM_RETRY_CNT); 294 295 pr_err("%s: no cmd_room to send reset\n", __func__); 296 return; 297 298 write_rrin: 299 nretry = 0; 300 writeq_be(rrin, &afu->host_map->ioarrin); 301 do { 302 rrin = readq_be(&afu->host_map->ioarrin); 303 if (rrin != 0x1) 304 break; 305 /* Double delay each time */ 306 udelay(2 << nretry); 307 } while (nretry++ < MC_ROOM_RETRY_CNT); 308 } 309 310 /** 311 * send_cmd() - sends an AFU command 312 * @afu: AFU associated with the host. 313 * @cmd: AFU command to send. 314 * 315 * Return: 316 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 317 */ 318 static int send_cmd(struct afu *afu, struct afu_cmd *cmd) 319 { 320 struct cxlflash_cfg *cfg = afu->parent; 321 struct device *dev = &cfg->dev->dev; 322 int nretry = 0; 323 int rc = 0; 324 u64 room; 325 long newval; 326 327 /* 328 * This routine is used by critical users such an AFU sync and to 329 * send a task management function (TMF). Thus we want to retry a 330 * bit before returning an error. To avoid the performance penalty 331 * of MMIO, we spread the update of 'room' over multiple commands. 332 */ 333 retry: 334 newval = atomic64_dec_if_positive(&afu->room); 335 if (!newval) { 336 do { 337 room = readq_be(&afu->host_map->cmd_room); 338 atomic64_set(&afu->room, room); 339 if (room) 340 goto write_ioarrin; 341 udelay(nretry); 342 } while (nretry++ < MC_ROOM_RETRY_CNT); 343 344 dev_err(dev, "%s: no cmd_room to send 0x%X\n", 345 __func__, cmd->rcb.cdb[0]); 346 347 goto no_room; 348 } else if (unlikely(newval < 0)) { 349 /* This should be rare. i.e. Only if two threads race and 350 * decrement before the MMIO read is done. In this case 351 * just benefit from the other thread having updated 352 * afu->room. 353 */ 354 if (nretry++ < MC_ROOM_RETRY_CNT) { 355 udelay(nretry); 356 goto retry; 357 } 358 359 goto no_room; 360 } 361 362 write_ioarrin: 363 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); 364 out: 365 pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd, 366 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc); 367 return rc; 368 369 no_room: 370 afu->read_room = true; 371 kref_get(&cfg->afu->mapcount); 372 schedule_work(&cfg->work_q); 373 rc = SCSI_MLQUEUE_HOST_BUSY; 374 goto out; 375 } 376 377 /** 378 * wait_resp() - polls for a response or timeout to a sent AFU command 379 * @afu: AFU associated with the host. 380 * @cmd: AFU command that was sent. 381 */ 382 static void wait_resp(struct afu *afu, struct afu_cmd *cmd) 383 { 384 ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000); 385 386 timeout = wait_for_completion_timeout(&cmd->cevent, timeout); 387 if (!timeout) 388 context_reset(cmd); 389 390 if (unlikely(cmd->sa.ioasc != 0)) 391 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, " 392 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0], 393 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc, 394 cmd->sa.rc.fc_rc); 395 } 396 397 /** 398 * send_tmf() - sends a Task Management Function (TMF) 399 * @afu: AFU to checkout from. 400 * @scp: SCSI command from stack. 401 * @tmfcmd: TMF command to send. 402 * 403 * Return: 404 * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 405 */ 406 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) 407 { 408 struct afu_cmd *cmd; 409 410 u32 port_sel = scp->device->channel + 1; 411 short lflag = 0; 412 struct Scsi_Host *host = scp->device->host; 413 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 414 struct device *dev = &cfg->dev->dev; 415 ulong lock_flags; 416 int rc = 0; 417 ulong to; 418 419 cmd = cmd_checkout(afu); 420 if (unlikely(!cmd)) { 421 dev_err(dev, "%s: could not get a free command\n", __func__); 422 rc = SCSI_MLQUEUE_HOST_BUSY; 423 goto out; 424 } 425 426 /* When Task Management Function is active do not send another */ 427 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 428 if (cfg->tmf_active) 429 wait_event_interruptible_lock_irq(cfg->tmf_waitq, 430 !cfg->tmf_active, 431 cfg->tmf_slock); 432 cfg->tmf_active = true; 433 cmd->cmd_tmf = true; 434 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 435 436 cmd->rcb.ctx_id = afu->ctx_hndl; 437 cmd->rcb.port_sel = port_sel; 438 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 439 440 lflag = SISL_REQ_FLAGS_TMF_CMD; 441 442 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 443 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); 444 445 /* Stash the scp in the reserved field, for reuse during interrupt */ 446 cmd->rcb.scp = scp; 447 448 /* Copy the CDB from the cmd passed in */ 449 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); 450 451 /* Send the command */ 452 rc = send_cmd(afu, cmd); 453 if (unlikely(rc)) { 454 cmd_checkin(cmd); 455 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 456 cfg->tmf_active = false; 457 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 458 goto out; 459 } 460 461 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 462 to = msecs_to_jiffies(5000); 463 to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, 464 !cfg->tmf_active, 465 cfg->tmf_slock, 466 to); 467 if (!to) { 468 cfg->tmf_active = false; 469 dev_err(dev, "%s: TMF timed out!\n", __func__); 470 rc = -1; 471 } 472 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 473 out: 474 return rc; 475 } 476 477 static void afu_unmap(struct kref *ref) 478 { 479 struct afu *afu = container_of(ref, struct afu, mapcount); 480 481 if (likely(afu->afu_map)) { 482 cxl_psa_unmap((void __iomem *)afu->afu_map); 483 afu->afu_map = NULL; 484 } 485 } 486 487 /** 488 * cxlflash_driver_info() - information handler for this host driver 489 * @host: SCSI host associated with device. 490 * 491 * Return: A string describing the device. 492 */ 493 static const char *cxlflash_driver_info(struct Scsi_Host *host) 494 { 495 return CXLFLASH_ADAPTER_NAME; 496 } 497 498 /** 499 * cxlflash_queuecommand() - sends a mid-layer request 500 * @host: SCSI host associated with device. 501 * @scp: SCSI command to send. 502 * 503 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure 504 */ 505 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) 506 { 507 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 508 struct afu *afu = cfg->afu; 509 struct device *dev = &cfg->dev->dev; 510 struct afu_cmd *cmd; 511 u32 port_sel = scp->device->channel + 1; 512 int nseg, i, ncount; 513 struct scatterlist *sg; 514 ulong lock_flags; 515 short lflag = 0; 516 int rc = 0; 517 int kref_got = 0; 518 519 dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " 520 "cdb=(%08X-%08X-%08X-%08X)\n", 521 __func__, scp, host->host_no, scp->device->channel, 522 scp->device->id, scp->device->lun, 523 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 524 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 525 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 526 get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 527 528 /* 529 * If a Task Management Function is active, wait for it to complete 530 * before continuing with regular commands. 531 */ 532 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 533 if (cfg->tmf_active) { 534 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 535 rc = SCSI_MLQUEUE_HOST_BUSY; 536 goto out; 537 } 538 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 539 540 switch (cfg->state) { 541 case STATE_RESET: 542 dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__); 543 rc = SCSI_MLQUEUE_HOST_BUSY; 544 goto out; 545 case STATE_FAILTERM: 546 dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__); 547 scp->result = (DID_NO_CONNECT << 16); 548 scp->scsi_done(scp); 549 rc = 0; 550 goto out; 551 default: 552 break; 553 } 554 555 cmd = cmd_checkout(afu); 556 if (unlikely(!cmd)) { 557 dev_err(dev, "%s: could not get a free command\n", __func__); 558 rc = SCSI_MLQUEUE_HOST_BUSY; 559 goto out; 560 } 561 562 kref_get(&cfg->afu->mapcount); 563 kref_got = 1; 564 565 cmd->rcb.ctx_id = afu->ctx_hndl; 566 cmd->rcb.port_sel = port_sel; 567 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); 568 569 if (scp->sc_data_direction == DMA_TO_DEVICE) 570 lflag = SISL_REQ_FLAGS_HOST_WRITE; 571 else 572 lflag = SISL_REQ_FLAGS_HOST_READ; 573 574 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | 575 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag); 576 577 /* Stash the scp in the reserved field, for reuse during interrupt */ 578 cmd->rcb.scp = scp; 579 580 nseg = scsi_dma_map(scp); 581 if (unlikely(nseg < 0)) { 582 dev_err(dev, "%s: Fail DMA map! nseg=%d\n", 583 __func__, nseg); 584 rc = SCSI_MLQUEUE_HOST_BUSY; 585 goto out; 586 } 587 588 ncount = scsi_sg_count(scp); 589 scsi_for_each_sg(scp, sg, ncount, i) { 590 cmd->rcb.data_len = sg_dma_len(sg); 591 cmd->rcb.data_ea = sg_dma_address(sg); 592 } 593 594 /* Copy the CDB from the scsi_cmnd passed in */ 595 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); 596 597 /* Send the command */ 598 rc = send_cmd(afu, cmd); 599 if (unlikely(rc)) { 600 cmd_checkin(cmd); 601 scsi_dma_unmap(scp); 602 } 603 604 out: 605 if (kref_got) 606 kref_put(&afu->mapcount, afu_unmap); 607 pr_devel("%s: returning rc=%d\n", __func__, rc); 608 return rc; 609 } 610 611 /** 612 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe 613 * @cfg: Internal structure associated with the host. 614 */ 615 static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) 616 { 617 struct pci_dev *pdev = cfg->dev; 618 619 if (pci_channel_offline(pdev)) 620 wait_event_timeout(cfg->reset_waitq, 621 !pci_channel_offline(pdev), 622 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); 623 } 624 625 /** 626 * free_mem() - free memory associated with the AFU 627 * @cfg: Internal structure associated with the host. 628 */ 629 static void free_mem(struct cxlflash_cfg *cfg) 630 { 631 int i; 632 char *buf = NULL; 633 struct afu *afu = cfg->afu; 634 635 if (cfg->afu) { 636 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 637 buf = afu->cmd[i].buf; 638 if (!((u64)buf & (PAGE_SIZE - 1))) 639 free_page((ulong)buf); 640 } 641 642 free_pages((ulong)afu, get_order(sizeof(struct afu))); 643 cfg->afu = NULL; 644 } 645 } 646 647 /** 648 * stop_afu() - stops the AFU command timers and unmaps the MMIO space 649 * @cfg: Internal structure associated with the host. 650 * 651 * Safe to call with AFU in a partially allocated/initialized state. 652 * 653 * Cleans up all state associated with the command queue, and unmaps 654 * the MMIO space. 655 * 656 * - complete() will take care of commands we initiated (they'll be checked 657 * in as part of the cleanup that occurs after the completion) 658 * 659 * - cmd_checkin() will take care of entries that we did not initiate and that 660 * have not (and will not) complete because they are sitting on a [now stale] 661 * hardware queue 662 */ 663 static void stop_afu(struct cxlflash_cfg *cfg) 664 { 665 int i; 666 struct afu *afu = cfg->afu; 667 struct afu_cmd *cmd; 668 669 if (likely(afu)) { 670 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 671 cmd = &afu->cmd[i]; 672 complete(&cmd->cevent); 673 if (!atomic_read(&cmd->free)) 674 cmd_checkin(cmd); 675 } 676 677 if (likely(afu->afu_map)) { 678 cxl_psa_unmap((void __iomem *)afu->afu_map); 679 afu->afu_map = NULL; 680 } 681 kref_put(&afu->mapcount, afu_unmap); 682 } 683 } 684 685 /** 686 * term_mc() - terminates the master context 687 * @cfg: Internal structure associated with the host. 688 * @level: Depth of allocation, where to begin waterfall tear down. 689 * 690 * Safe to call with AFU/MC in partially allocated/initialized state. 691 */ 692 static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) 693 { 694 int rc = 0; 695 struct afu *afu = cfg->afu; 696 struct device *dev = &cfg->dev->dev; 697 698 if (!afu || !cfg->mcctx) { 699 dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n", 700 __func__); 701 return; 702 } 703 704 switch (level) { 705 case UNDO_START: 706 rc = cxl_stop_context(cfg->mcctx); 707 BUG_ON(rc); 708 case UNMAP_THREE: 709 cxl_unmap_afu_irq(cfg->mcctx, 3, afu); 710 case UNMAP_TWO: 711 cxl_unmap_afu_irq(cfg->mcctx, 2, afu); 712 case UNMAP_ONE: 713 cxl_unmap_afu_irq(cfg->mcctx, 1, afu); 714 case FREE_IRQ: 715 cxl_free_afu_irqs(cfg->mcctx); 716 case RELEASE_CONTEXT: 717 cfg->mcctx = NULL; 718 } 719 } 720 721 /** 722 * term_afu() - terminates the AFU 723 * @cfg: Internal structure associated with the host. 724 * 725 * Safe to call with AFU/MC in partially allocated/initialized state. 726 */ 727 static void term_afu(struct cxlflash_cfg *cfg) 728 { 729 term_mc(cfg, UNDO_START); 730 731 if (cfg->afu) 732 stop_afu(cfg); 733 734 pr_debug("%s: returning\n", __func__); 735 } 736 737 /** 738 * cxlflash_remove() - PCI entry point to tear down host 739 * @pdev: PCI device associated with the host. 740 * 741 * Safe to use as a cleanup in partially allocated/initialized state. 742 */ 743 static void cxlflash_remove(struct pci_dev *pdev) 744 { 745 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 746 ulong lock_flags; 747 748 /* If a Task Management Function is active, wait for it to complete 749 * before continuing with remove. 750 */ 751 spin_lock_irqsave(&cfg->tmf_slock, lock_flags); 752 if (cfg->tmf_active) 753 wait_event_interruptible_lock_irq(cfg->tmf_waitq, 754 !cfg->tmf_active, 755 cfg->tmf_slock); 756 spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); 757 758 cfg->state = STATE_FAILTERM; 759 cxlflash_stop_term_user_contexts(cfg); 760 761 switch (cfg->init_state) { 762 case INIT_STATE_SCSI: 763 cxlflash_term_local_luns(cfg); 764 scsi_remove_host(cfg->host); 765 /* fall through */ 766 case INIT_STATE_AFU: 767 cancel_work_sync(&cfg->work_q); 768 term_afu(cfg); 769 case INIT_STATE_PCI: 770 pci_release_regions(cfg->dev); 771 pci_disable_device(pdev); 772 case INIT_STATE_NONE: 773 free_mem(cfg); 774 scsi_host_put(cfg->host); 775 break; 776 } 777 778 pr_debug("%s: returning\n", __func__); 779 } 780 781 /** 782 * alloc_mem() - allocates the AFU and its command pool 783 * @cfg: Internal structure associated with the host. 784 * 785 * A partially allocated state remains on failure. 786 * 787 * Return: 788 * 0 on success 789 * -ENOMEM on failure to allocate memory 790 */ 791 static int alloc_mem(struct cxlflash_cfg *cfg) 792 { 793 int rc = 0; 794 int i; 795 char *buf = NULL; 796 struct device *dev = &cfg->dev->dev; 797 798 /* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */ 799 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 800 get_order(sizeof(struct afu))); 801 if (unlikely(!cfg->afu)) { 802 dev_err(dev, "%s: cannot get %d free pages\n", 803 __func__, get_order(sizeof(struct afu))); 804 rc = -ENOMEM; 805 goto out; 806 } 807 cfg->afu->parent = cfg; 808 cfg->afu->afu_map = NULL; 809 810 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) { 811 if (!((u64)buf & (PAGE_SIZE - 1))) { 812 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 813 if (unlikely(!buf)) { 814 dev_err(dev, 815 "%s: Allocate command buffers fail!\n", 816 __func__); 817 rc = -ENOMEM; 818 free_mem(cfg); 819 goto out; 820 } 821 } 822 823 cfg->afu->cmd[i].buf = buf; 824 atomic_set(&cfg->afu->cmd[i].free, 1); 825 cfg->afu->cmd[i].slot = i; 826 } 827 828 out: 829 return rc; 830 } 831 832 /** 833 * init_pci() - initializes the host as a PCI device 834 * @cfg: Internal structure associated with the host. 835 * 836 * Return: 0 on success, -errno on failure 837 */ 838 static int init_pci(struct cxlflash_cfg *cfg) 839 { 840 struct pci_dev *pdev = cfg->dev; 841 int rc = 0; 842 843 cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0); 844 rc = pci_request_regions(pdev, CXLFLASH_NAME); 845 if (rc < 0) { 846 dev_err(&pdev->dev, 847 "%s: Couldn't register memory range of registers\n", 848 __func__); 849 goto out; 850 } 851 852 rc = pci_enable_device(pdev); 853 if (rc || pci_channel_offline(pdev)) { 854 if (pci_channel_offline(pdev)) { 855 cxlflash_wait_for_pci_err_recovery(cfg); 856 rc = pci_enable_device(pdev); 857 } 858 859 if (rc) { 860 dev_err(&pdev->dev, "%s: Cannot enable adapter\n", 861 __func__); 862 cxlflash_wait_for_pci_err_recovery(cfg); 863 goto out_release_regions; 864 } 865 } 866 867 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 868 if (rc < 0) { 869 dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n", 870 __func__); 871 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 872 } 873 874 if (rc < 0) { 875 dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n", 876 __func__); 877 goto out_disable; 878 } 879 880 pci_set_master(pdev); 881 882 if (pci_channel_offline(pdev)) { 883 cxlflash_wait_for_pci_err_recovery(cfg); 884 if (pci_channel_offline(pdev)) { 885 rc = -EIO; 886 goto out_msi_disable; 887 } 888 } 889 890 rc = pci_save_state(pdev); 891 892 if (rc != PCIBIOS_SUCCESSFUL) { 893 dev_err(&pdev->dev, "%s: Failed to save PCI config space\n", 894 __func__); 895 rc = -EIO; 896 goto cleanup_nolog; 897 } 898 899 out: 900 pr_debug("%s: returning rc=%d\n", __func__, rc); 901 return rc; 902 903 cleanup_nolog: 904 out_msi_disable: 905 cxlflash_wait_for_pci_err_recovery(cfg); 906 out_disable: 907 pci_disable_device(pdev); 908 out_release_regions: 909 pci_release_regions(pdev); 910 goto out; 911 912 } 913 914 /** 915 * init_scsi() - adds the host to the SCSI stack and kicks off host scan 916 * @cfg: Internal structure associated with the host. 917 * 918 * Return: 0 on success, -errno on failure 919 */ 920 static int init_scsi(struct cxlflash_cfg *cfg) 921 { 922 struct pci_dev *pdev = cfg->dev; 923 int rc = 0; 924 925 rc = scsi_add_host(cfg->host, &pdev->dev); 926 if (rc) { 927 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n", 928 __func__, rc); 929 goto out; 930 } 931 932 scsi_scan_host(cfg->host); 933 934 out: 935 pr_debug("%s: returning rc=%d\n", __func__, rc); 936 return rc; 937 } 938 939 /** 940 * set_port_online() - transitions the specified host FC port to online state 941 * @fc_regs: Top of MMIO region defined for specified port. 942 * 943 * The provided MMIO region must be mapped prior to call. Online state means 944 * that the FC link layer has synced, completed the handshaking process, and 945 * is ready for login to start. 946 */ 947 static void set_port_online(__be64 __iomem *fc_regs) 948 { 949 u64 cmdcfg; 950 951 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 952 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ 953 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ 954 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 955 } 956 957 /** 958 * set_port_offline() - transitions the specified host FC port to offline state 959 * @fc_regs: Top of MMIO region defined for specified port. 960 * 961 * The provided MMIO region must be mapped prior to call. 962 */ 963 static void set_port_offline(__be64 __iomem *fc_regs) 964 { 965 u64 cmdcfg; 966 967 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); 968 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ 969 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ 970 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); 971 } 972 973 /** 974 * wait_port_online() - waits for the specified host FC port come online 975 * @fc_regs: Top of MMIO region defined for specified port. 976 * @delay_us: Number of microseconds to delay between reading port status. 977 * @nretry: Number of cycles to retry reading port status. 978 * 979 * The provided MMIO region must be mapped prior to call. This will timeout 980 * when the cable is not plugged in. 981 * 982 * Return: 983 * TRUE (1) when the specified port is online 984 * FALSE (0) when the specified port fails to come online after timeout 985 * -EINVAL when @delay_us is less than 1000 986 */ 987 static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 988 { 989 u64 status; 990 991 if (delay_us < 1000) { 992 pr_err("%s: invalid delay specified %d\n", __func__, delay_us); 993 return -EINVAL; 994 } 995 996 do { 997 msleep(delay_us / 1000); 998 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 999 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && 1000 nretry--); 1001 1002 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); 1003 } 1004 1005 /** 1006 * wait_port_offline() - waits for the specified host FC port go offline 1007 * @fc_regs: Top of MMIO region defined for specified port. 1008 * @delay_us: Number of microseconds to delay between reading port status. 1009 * @nretry: Number of cycles to retry reading port status. 1010 * 1011 * The provided MMIO region must be mapped prior to call. 1012 * 1013 * Return: 1014 * TRUE (1) when the specified port is offline 1015 * FALSE (0) when the specified port fails to go offline after timeout 1016 * -EINVAL when @delay_us is less than 1000 1017 */ 1018 static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) 1019 { 1020 u64 status; 1021 1022 if (delay_us < 1000) { 1023 pr_err("%s: invalid delay specified %d\n", __func__, delay_us); 1024 return -EINVAL; 1025 } 1026 1027 do { 1028 msleep(delay_us / 1000); 1029 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 1030 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && 1031 nretry--); 1032 1033 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); 1034 } 1035 1036 /** 1037 * afu_set_wwpn() - configures the WWPN for the specified host FC port 1038 * @afu: AFU associated with the host that owns the specified FC port. 1039 * @port: Port number being configured. 1040 * @fc_regs: Top of MMIO region defined for specified port. 1041 * @wwpn: The world-wide-port-number previously discovered for port. 1042 * 1043 * The provided MMIO region must be mapped prior to call. As part of the 1044 * sequence to configure the WWPN, the port is toggled offline and then back 1045 * online. This toggling action can cause this routine to delay up to a few 1046 * seconds. When configured to use the internal LUN feature of the AFU, a 1047 * failure to come online is overridden. 1048 * 1049 * Return: 1050 * 0 when the WWPN is successfully written and the port comes back online 1051 * -1 when the port fails to go offline or come back up online 1052 */ 1053 static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, 1054 u64 wwpn) 1055 { 1056 int rc = 0; 1057 1058 set_port_offline(fc_regs); 1059 1060 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1061 FC_PORT_STATUS_RETRY_CNT)) { 1062 pr_debug("%s: wait on port %d to go offline timed out\n", 1063 __func__, port); 1064 rc = -1; /* but continue on to leave the port back online */ 1065 } 1066 1067 if (rc == 0) 1068 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); 1069 1070 /* Always return success after programming WWPN */ 1071 rc = 0; 1072 1073 set_port_online(fc_regs); 1074 1075 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1076 FC_PORT_STATUS_RETRY_CNT)) { 1077 pr_err("%s: wait on port %d to go online timed out\n", 1078 __func__, port); 1079 } 1080 1081 pr_debug("%s: returning rc=%d\n", __func__, rc); 1082 1083 return rc; 1084 } 1085 1086 /** 1087 * afu_link_reset() - resets the specified host FC port 1088 * @afu: AFU associated with the host that owns the specified FC port. 1089 * @port: Port number being configured. 1090 * @fc_regs: Top of MMIO region defined for specified port. 1091 * 1092 * The provided MMIO region must be mapped prior to call. The sequence to 1093 * reset the port involves toggling it offline and then back online. This 1094 * action can cause this routine to delay up to a few seconds. An effort 1095 * is made to maintain link with the device by switching to host to use 1096 * the alternate port exclusively while the reset takes place. 1097 * failure to come online is overridden. 1098 */ 1099 static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) 1100 { 1101 u64 port_sel; 1102 1103 /* first switch the AFU to the other links, if any */ 1104 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); 1105 port_sel &= ~(1ULL << port); 1106 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1107 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1108 1109 set_port_offline(fc_regs); 1110 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1111 FC_PORT_STATUS_RETRY_CNT)) 1112 pr_err("%s: wait on port %d to go offline timed out\n", 1113 __func__, port); 1114 1115 set_port_online(fc_regs); 1116 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1117 FC_PORT_STATUS_RETRY_CNT)) 1118 pr_err("%s: wait on port %d to go online timed out\n", 1119 __func__, port); 1120 1121 /* switch back to include this port */ 1122 port_sel |= (1ULL << port); 1123 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); 1124 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC); 1125 1126 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel); 1127 } 1128 1129 /* 1130 * Asynchronous interrupt information table 1131 */ 1132 static const struct asyc_intr_info ainfo[] = { 1133 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, 1134 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, 1135 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, 1136 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET}, 1137 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, 1138 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, 1139 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, 1140 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST}, 1141 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, 1142 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, 1143 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, 1144 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET}, 1145 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, 1146 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, 1147 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, 1148 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST}, 1149 {0x0, "", 0, 0} /* terminator */ 1150 }; 1151 1152 /** 1153 * find_ainfo() - locates and returns asynchronous interrupt information 1154 * @status: Status code set by AFU on error. 1155 * 1156 * Return: The located information or NULL when the status code is invalid. 1157 */ 1158 static const struct asyc_intr_info *find_ainfo(u64 status) 1159 { 1160 const struct asyc_intr_info *info; 1161 1162 for (info = &ainfo[0]; info->status; info++) 1163 if (info->status == status) 1164 return info; 1165 1166 return NULL; 1167 } 1168 1169 /** 1170 * afu_err_intr_init() - clears and initializes the AFU for error interrupts 1171 * @afu: AFU associated with the host. 1172 */ 1173 static void afu_err_intr_init(struct afu *afu) 1174 { 1175 int i; 1176 u64 reg; 1177 1178 /* global async interrupts: AFU clears afu_ctrl on context exit 1179 * if async interrupts were sent to that context. This prevents 1180 * the AFU form sending further async interrupts when 1181 * there is 1182 * nobody to receive them. 1183 */ 1184 1185 /* mask all */ 1186 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); 1187 /* set LISN# to send and point to master context */ 1188 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); 1189 1190 if (afu->internal_lun) 1191 reg |= 1; /* Bit 63 indicates local lun */ 1192 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); 1193 /* clear all */ 1194 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1195 /* unmask bits that are of interest */ 1196 /* note: afu can send an interrupt after this step */ 1197 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); 1198 /* clear again in case a bit came on after previous clear but before */ 1199 /* unmask */ 1200 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); 1201 1202 /* Clear/Set internal lun bits */ 1203 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); 1204 reg &= SISL_FC_INTERNAL_MASK; 1205 if (afu->internal_lun) 1206 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); 1207 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); 1208 1209 /* now clear FC errors */ 1210 for (i = 0; i < NUM_FC_PORTS; i++) { 1211 writeq_be(0xFFFFFFFFU, 1212 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]); 1213 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]); 1214 } 1215 1216 /* sync interrupts for master's IOARRIN write */ 1217 /* note that unlike asyncs, there can be no pending sync interrupts */ 1218 /* at this time (this is a fresh context and master has not written */ 1219 /* IOARRIN yet), so there is nothing to clear. */ 1220 1221 /* set LISN#, it is always sent to the context that wrote IOARRIN */ 1222 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); 1223 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); 1224 } 1225 1226 /** 1227 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors 1228 * @irq: Interrupt number. 1229 * @data: Private data provided at interrupt registration, the AFU. 1230 * 1231 * Return: Always return IRQ_HANDLED. 1232 */ 1233 static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) 1234 { 1235 struct afu *afu = (struct afu *)data; 1236 u64 reg; 1237 u64 reg_unmasked; 1238 1239 reg = readq_be(&afu->host_map->intr_status); 1240 reg_unmasked = (reg & SISL_ISTATUS_UNMASK); 1241 1242 if (reg_unmasked == 0UL) { 1243 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n", 1244 __func__, (u64)afu, reg); 1245 goto cxlflash_sync_err_irq_exit; 1246 } 1247 1248 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n", 1249 __func__, (u64)afu, reg); 1250 1251 writeq_be(reg_unmasked, &afu->host_map->intr_clear); 1252 1253 cxlflash_sync_err_irq_exit: 1254 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED); 1255 return IRQ_HANDLED; 1256 } 1257 1258 /** 1259 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) 1260 * @irq: Interrupt number. 1261 * @data: Private data provided at interrupt registration, the AFU. 1262 * 1263 * Return: Always return IRQ_HANDLED. 1264 */ 1265 static irqreturn_t cxlflash_rrq_irq(int irq, void *data) 1266 { 1267 struct afu *afu = (struct afu *)data; 1268 struct afu_cmd *cmd; 1269 bool toggle = afu->toggle; 1270 u64 entry, 1271 *hrrq_start = afu->hrrq_start, 1272 *hrrq_end = afu->hrrq_end, 1273 *hrrq_curr = afu->hrrq_curr; 1274 1275 /* Process however many RRQ entries that are ready */ 1276 while (true) { 1277 entry = *hrrq_curr; 1278 1279 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) 1280 break; 1281 1282 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT); 1283 cmd_complete(cmd); 1284 1285 /* Advance to next entry or wrap and flip the toggle bit */ 1286 if (hrrq_curr < hrrq_end) 1287 hrrq_curr++; 1288 else { 1289 hrrq_curr = hrrq_start; 1290 toggle ^= SISL_RESP_HANDLE_T_BIT; 1291 } 1292 } 1293 1294 afu->hrrq_curr = hrrq_curr; 1295 afu->toggle = toggle; 1296 1297 return IRQ_HANDLED; 1298 } 1299 1300 /** 1301 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors 1302 * @irq: Interrupt number. 1303 * @data: Private data provided at interrupt registration, the AFU. 1304 * 1305 * Return: Always return IRQ_HANDLED. 1306 */ 1307 static irqreturn_t cxlflash_async_err_irq(int irq, void *data) 1308 { 1309 struct afu *afu = (struct afu *)data; 1310 struct cxlflash_cfg *cfg = afu->parent; 1311 struct device *dev = &cfg->dev->dev; 1312 u64 reg_unmasked; 1313 const struct asyc_intr_info *info; 1314 struct sisl_global_map __iomem *global = &afu->afu_map->global; 1315 u64 reg; 1316 u8 port; 1317 int i; 1318 1319 reg = readq_be(&global->regs.aintr_status); 1320 reg_unmasked = (reg & SISL_ASTATUS_UNMASK); 1321 1322 if (reg_unmasked == 0) { 1323 dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n", 1324 __func__, reg); 1325 goto out; 1326 } 1327 1328 /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ 1329 writeq_be(reg_unmasked, &global->regs.aintr_clear); 1330 1331 /* Check each bit that is on */ 1332 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { 1333 info = find_ainfo(1ULL << i); 1334 if (((reg_unmasked & 0x1) == 0) || !info) 1335 continue; 1336 1337 port = info->port; 1338 1339 dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n", 1340 __func__, port, info->desc, 1341 readq_be(&global->fc_regs[port][FC_STATUS / 8])); 1342 1343 /* 1344 * Do link reset first, some OTHER errors will set FC_ERROR 1345 * again if cleared before or w/o a reset 1346 */ 1347 if (info->action & LINK_RESET) { 1348 dev_err(dev, "%s: FC Port %d: resetting link\n", 1349 __func__, port); 1350 cfg->lr_state = LINK_RESET_REQUIRED; 1351 cfg->lr_port = port; 1352 kref_get(&cfg->afu->mapcount); 1353 schedule_work(&cfg->work_q); 1354 } 1355 1356 if (info->action & CLR_FC_ERROR) { 1357 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); 1358 1359 /* 1360 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP 1361 * should be the same and tracing one is sufficient. 1362 */ 1363 1364 dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n", 1365 __func__, port, reg); 1366 1367 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); 1368 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]); 1369 } 1370 1371 if (info->action & SCAN_HOST) { 1372 atomic_inc(&cfg->scan_host_needed); 1373 kref_get(&cfg->afu->mapcount); 1374 schedule_work(&cfg->work_q); 1375 } 1376 } 1377 1378 out: 1379 dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu); 1380 return IRQ_HANDLED; 1381 } 1382 1383 /** 1384 * start_context() - starts the master context 1385 * @cfg: Internal structure associated with the host. 1386 * 1387 * Return: A success or failure value from CXL services. 1388 */ 1389 static int start_context(struct cxlflash_cfg *cfg) 1390 { 1391 int rc = 0; 1392 1393 rc = cxl_start_context(cfg->mcctx, 1394 cfg->afu->work.work_element_descriptor, 1395 NULL); 1396 1397 pr_debug("%s: returning rc=%d\n", __func__, rc); 1398 return rc; 1399 } 1400 1401 /** 1402 * read_vpd() - obtains the WWPNs from VPD 1403 * @cfg: Internal structure associated with the host. 1404 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs 1405 * 1406 * Return: 0 on success, -errno on failure 1407 */ 1408 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) 1409 { 1410 struct pci_dev *dev = cfg->parent_dev; 1411 int rc = 0; 1412 int ro_start, ro_size, i, j, k; 1413 ssize_t vpd_size; 1414 char vpd_data[CXLFLASH_VPD_LEN]; 1415 char tmp_buf[WWPN_BUF_LEN] = { 0 }; 1416 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; 1417 1418 /* Get the VPD data from the device */ 1419 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); 1420 if (unlikely(vpd_size <= 0)) { 1421 dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n", 1422 __func__, vpd_size); 1423 rc = -ENODEV; 1424 goto out; 1425 } 1426 1427 /* Get the read only section offset */ 1428 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, 1429 PCI_VPD_LRDT_RO_DATA); 1430 if (unlikely(ro_start < 0)) { 1431 dev_err(&dev->dev, "%s: VPD Read-only data not found\n", 1432 __func__); 1433 rc = -ENODEV; 1434 goto out; 1435 } 1436 1437 /* Get the read only section size, cap when extends beyond read VPD */ 1438 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); 1439 j = ro_size; 1440 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1441 if (unlikely((i + j) > vpd_size)) { 1442 pr_debug("%s: Might need to read more VPD (%d > %ld)\n", 1443 __func__, (i + j), vpd_size); 1444 ro_size = vpd_size - i; 1445 } 1446 1447 /* 1448 * Find the offset of the WWPN tag within the read only 1449 * VPD data and validate the found field (partials are 1450 * no good to us). Convert the ASCII data to an integer 1451 * value. Note that we must copy to a temporary buffer 1452 * because the conversion service requires that the ASCII 1453 * string be terminated. 1454 */ 1455 for (k = 0; k < NUM_FC_PORTS; k++) { 1456 j = ro_size; 1457 i = ro_start + PCI_VPD_LRDT_TAG_SIZE; 1458 1459 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]); 1460 if (unlikely(i < 0)) { 1461 dev_err(&dev->dev, "%s: Port %d WWPN not found " 1462 "in VPD\n", __func__, k); 1463 rc = -ENODEV; 1464 goto out; 1465 } 1466 1467 j = pci_vpd_info_field_size(&vpd_data[i]); 1468 i += PCI_VPD_INFO_FLD_HDR_SIZE; 1469 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) { 1470 dev_err(&dev->dev, "%s: Port %d WWPN incomplete or " 1471 "VPD corrupt\n", 1472 __func__, k); 1473 rc = -ENODEV; 1474 goto out; 1475 } 1476 1477 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); 1478 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]); 1479 if (unlikely(rc)) { 1480 dev_err(&dev->dev, "%s: Fail to convert port %d WWPN " 1481 "to integer\n", __func__, k); 1482 rc = -ENODEV; 1483 goto out; 1484 } 1485 } 1486 1487 out: 1488 pr_debug("%s: returning rc=%d\n", __func__, rc); 1489 return rc; 1490 } 1491 1492 /** 1493 * init_pcr() - initialize the provisioning and control registers 1494 * @cfg: Internal structure associated with the host. 1495 * 1496 * Also sets up fast access to the mapped registers and initializes AFU 1497 * command fields that never change. 1498 */ 1499 static void init_pcr(struct cxlflash_cfg *cfg) 1500 { 1501 struct afu *afu = cfg->afu; 1502 struct sisl_ctrl_map __iomem *ctrl_map; 1503 int i; 1504 1505 for (i = 0; i < MAX_CONTEXT; i++) { 1506 ctrl_map = &afu->afu_map->ctrls[i].ctrl; 1507 /* Disrupt any clients that could be running */ 1508 /* e.g. clients that survived a master restart */ 1509 writeq_be(0, &ctrl_map->rht_start); 1510 writeq_be(0, &ctrl_map->rht_cnt_id); 1511 writeq_be(0, &ctrl_map->ctx_cap); 1512 } 1513 1514 /* Copy frequently used fields into afu */ 1515 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); 1516 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; 1517 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; 1518 1519 /* Program the Endian Control for the master context */ 1520 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); 1521 1522 /* Initialize cmd fields that never change */ 1523 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 1524 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl; 1525 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED; 1526 afu->cmd[i].rcb.rrq = 0x0; 1527 } 1528 } 1529 1530 /** 1531 * init_global() - initialize AFU global registers 1532 * @cfg: Internal structure associated with the host. 1533 */ 1534 static int init_global(struct cxlflash_cfg *cfg) 1535 { 1536 struct afu *afu = cfg->afu; 1537 struct device *dev = &cfg->dev->dev; 1538 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */ 1539 int i = 0, num_ports = 0; 1540 int rc = 0; 1541 u64 reg; 1542 1543 rc = read_vpd(cfg, &wwpn[0]); 1544 if (rc) { 1545 dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc); 1546 goto out; 1547 } 1548 1549 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]); 1550 1551 /* Set up RRQ in AFU for master issued cmds */ 1552 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); 1553 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); 1554 1555 /* AFU configuration */ 1556 reg = readq_be(&afu->afu_map->global.regs.afu_config); 1557 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; 1558 /* enable all auto retry options and control endianness */ 1559 /* leave others at default: */ 1560 /* CTX_CAP write protected, mbox_r does not clear on read and */ 1561 /* checker on if dual afu */ 1562 writeq_be(reg, &afu->afu_map->global.regs.afu_config); 1563 1564 /* Global port select: select either port */ 1565 if (afu->internal_lun) { 1566 /* Only use port 0 */ 1567 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); 1568 num_ports = NUM_FC_PORTS - 1; 1569 } else { 1570 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel); 1571 num_ports = NUM_FC_PORTS; 1572 } 1573 1574 for (i = 0; i < num_ports; i++) { 1575 /* Unmask all errors (but they are still masked at AFU) */ 1576 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); 1577 /* Clear CRC error cnt & set a threshold */ 1578 (void)readq_be(&afu->afu_map->global. 1579 fc_regs[i][FC_CNT_CRCERR / 8]); 1580 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] 1581 [FC_CRC_THRESH / 8]); 1582 1583 /* Set WWPNs. If already programmed, wwpn[i] is 0 */ 1584 if (wwpn[i] != 0 && 1585 afu_set_wwpn(afu, i, 1586 &afu->afu_map->global.fc_regs[i][0], 1587 wwpn[i])) { 1588 dev_err(dev, "%s: failed to set WWPN on port %d\n", 1589 __func__, i); 1590 rc = -EIO; 1591 goto out; 1592 } 1593 /* Programming WWPN back to back causes additional 1594 * offline/online transitions and a PLOGI 1595 */ 1596 msleep(100); 1597 } 1598 1599 /* Set up master's own CTX_CAP to allow real mode, host translation */ 1600 /* tables, afu cmds and read/write GSCSI cmds. */ 1601 /* First, unlock ctx_cap write by reading mbox */ 1602 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ 1603 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | 1604 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | 1605 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), 1606 &afu->ctrl_map->ctx_cap); 1607 /* Initialize heartbeat */ 1608 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); 1609 1610 out: 1611 return rc; 1612 } 1613 1614 /** 1615 * start_afu() - initializes and starts the AFU 1616 * @cfg: Internal structure associated with the host. 1617 */ 1618 static int start_afu(struct cxlflash_cfg *cfg) 1619 { 1620 struct afu *afu = cfg->afu; 1621 struct afu_cmd *cmd; 1622 1623 int i = 0; 1624 int rc = 0; 1625 1626 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) { 1627 cmd = &afu->cmd[i]; 1628 1629 init_completion(&cmd->cevent); 1630 spin_lock_init(&cmd->slock); 1631 cmd->parent = afu; 1632 } 1633 1634 init_pcr(cfg); 1635 1636 /* After an AFU reset, RRQ entries are stale, clear them */ 1637 memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry)); 1638 1639 /* Initialize RRQ pointers */ 1640 afu->hrrq_start = &afu->rrq_entry[0]; 1641 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; 1642 afu->hrrq_curr = afu->hrrq_start; 1643 afu->toggle = 1; 1644 1645 rc = init_global(cfg); 1646 1647 pr_debug("%s: returning rc=%d\n", __func__, rc); 1648 return rc; 1649 } 1650 1651 /** 1652 * init_mc() - create and register as the master context 1653 * @cfg: Internal structure associated with the host. 1654 * 1655 * Return: 0 on success, -errno on failure 1656 */ 1657 static int init_mc(struct cxlflash_cfg *cfg) 1658 { 1659 struct cxl_context *ctx; 1660 struct device *dev = &cfg->dev->dev; 1661 struct afu *afu = cfg->afu; 1662 int rc = 0; 1663 enum undo_level level; 1664 1665 ctx = cxl_get_context(cfg->dev); 1666 if (unlikely(!ctx)) 1667 return -ENOMEM; 1668 cfg->mcctx = ctx; 1669 1670 /* Set it up as a master with the CXL */ 1671 cxl_set_master(ctx); 1672 1673 /* During initialization reset the AFU to start from a clean slate */ 1674 rc = cxl_afu_reset(cfg->mcctx); 1675 if (unlikely(rc)) { 1676 dev_err(dev, "%s: initial AFU reset failed rc=%d\n", 1677 __func__, rc); 1678 level = RELEASE_CONTEXT; 1679 goto out; 1680 } 1681 1682 rc = cxl_allocate_afu_irqs(ctx, 3); 1683 if (unlikely(rc)) { 1684 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", 1685 __func__, rc); 1686 level = RELEASE_CONTEXT; 1687 goto out; 1688 } 1689 1690 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, 1691 "SISL_MSI_SYNC_ERROR"); 1692 if (unlikely(rc <= 0)) { 1693 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n", 1694 __func__); 1695 level = FREE_IRQ; 1696 goto out; 1697 } 1698 1699 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, 1700 "SISL_MSI_RRQ_UPDATED"); 1701 if (unlikely(rc <= 0)) { 1702 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n", 1703 __func__); 1704 level = UNMAP_ONE; 1705 goto out; 1706 } 1707 1708 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, 1709 "SISL_MSI_ASYNC_ERROR"); 1710 if (unlikely(rc <= 0)) { 1711 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n", 1712 __func__); 1713 level = UNMAP_TWO; 1714 goto out; 1715 } 1716 1717 rc = 0; 1718 1719 /* This performs the equivalent of the CXL_IOCTL_START_WORK. 1720 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process 1721 * element (pe) that is embedded in the context (ctx) 1722 */ 1723 rc = start_context(cfg); 1724 if (unlikely(rc)) { 1725 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); 1726 level = UNMAP_THREE; 1727 goto out; 1728 } 1729 ret: 1730 pr_debug("%s: returning rc=%d\n", __func__, rc); 1731 return rc; 1732 out: 1733 term_mc(cfg, level); 1734 goto ret; 1735 } 1736 1737 /** 1738 * init_afu() - setup as master context and start AFU 1739 * @cfg: Internal structure associated with the host. 1740 * 1741 * This routine is a higher level of control for configuring the 1742 * AFU on probe and reset paths. 1743 * 1744 * Return: 0 on success, -errno on failure 1745 */ 1746 static int init_afu(struct cxlflash_cfg *cfg) 1747 { 1748 u64 reg; 1749 int rc = 0; 1750 struct afu *afu = cfg->afu; 1751 struct device *dev = &cfg->dev->dev; 1752 1753 cxl_perst_reloads_same_image(cfg->cxl_afu, true); 1754 1755 rc = init_mc(cfg); 1756 if (rc) { 1757 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n", 1758 __func__, rc); 1759 goto out; 1760 } 1761 1762 /* Map the entire MMIO space of the AFU */ 1763 afu->afu_map = cxl_psa_map(cfg->mcctx); 1764 if (!afu->afu_map) { 1765 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__); 1766 rc = -ENOMEM; 1767 goto err1; 1768 } 1769 kref_init(&afu->mapcount); 1770 1771 /* No byte reverse on reading afu_version or string will be backwards */ 1772 reg = readq(&afu->afu_map->global.regs.afu_version); 1773 memcpy(afu->version, ®, sizeof(reg)); 1774 afu->interface_version = 1775 readq_be(&afu->afu_map->global.regs.interface_version); 1776 if ((afu->interface_version + 1) == 0) { 1777 pr_err("Back level AFU, please upgrade. AFU version %s " 1778 "interface version 0x%llx\n", afu->version, 1779 afu->interface_version); 1780 rc = -EINVAL; 1781 goto err2; 1782 } 1783 1784 pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__, 1785 afu->version, afu->interface_version); 1786 1787 rc = start_afu(cfg); 1788 if (rc) { 1789 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n", 1790 __func__, rc); 1791 goto err2; 1792 } 1793 1794 afu_err_intr_init(cfg->afu); 1795 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); 1796 1797 /* Restore the LUN mappings */ 1798 cxlflash_restore_luntable(cfg); 1799 out: 1800 pr_debug("%s: returning rc=%d\n", __func__, rc); 1801 return rc; 1802 1803 err2: 1804 kref_put(&afu->mapcount, afu_unmap); 1805 err1: 1806 term_mc(cfg, UNDO_START); 1807 goto out; 1808 } 1809 1810 /** 1811 * cxlflash_afu_sync() - builds and sends an AFU sync command 1812 * @afu: AFU associated with the host. 1813 * @ctx_hndl_u: Identifies context requesting sync. 1814 * @res_hndl_u: Identifies resource requesting sync. 1815 * @mode: Type of sync to issue (lightweight, heavyweight, global). 1816 * 1817 * The AFU can only take 1 sync command at a time. This routine enforces this 1818 * limitation by using a mutex to provide exclusive access to the AFU during 1819 * the sync. This design point requires calling threads to not be on interrupt 1820 * context due to the possibility of sleeping during concurrent sync operations. 1821 * 1822 * AFU sync operations are only necessary and allowed when the device is 1823 * operating normally. When not operating normally, sync requests can occur as 1824 * part of cleaning up resources associated with an adapter prior to removal. 1825 * In this scenario, these requests are simply ignored (safe due to the AFU 1826 * going away). 1827 * 1828 * Return: 1829 * 0 on success 1830 * -1 on failure 1831 */ 1832 int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, 1833 res_hndl_t res_hndl_u, u8 mode) 1834 { 1835 struct cxlflash_cfg *cfg = afu->parent; 1836 struct device *dev = &cfg->dev->dev; 1837 struct afu_cmd *cmd = NULL; 1838 int rc = 0; 1839 int retry_cnt = 0; 1840 static DEFINE_MUTEX(sync_active); 1841 1842 if (cfg->state != STATE_NORMAL) { 1843 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state); 1844 return 0; 1845 } 1846 1847 mutex_lock(&sync_active); 1848 retry: 1849 cmd = cmd_checkout(afu); 1850 if (unlikely(!cmd)) { 1851 retry_cnt++; 1852 udelay(1000 * retry_cnt); 1853 if (retry_cnt < MC_RETRY_CNT) 1854 goto retry; 1855 dev_err(dev, "%s: could not get a free command\n", __func__); 1856 rc = -1; 1857 goto out; 1858 } 1859 1860 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); 1861 1862 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb)); 1863 1864 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; 1865 cmd->rcb.port_sel = 0x0; /* NA */ 1866 cmd->rcb.lun_id = 0x0; /* NA */ 1867 cmd->rcb.data_len = 0x0; 1868 cmd->rcb.data_ea = 0x0; 1869 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; 1870 1871 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */ 1872 cmd->rcb.cdb[1] = mode; 1873 1874 /* The cdb is aligned, no unaligned accessors required */ 1875 *((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u); 1876 *((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u); 1877 1878 rc = send_cmd(afu, cmd); 1879 if (unlikely(rc)) 1880 goto out; 1881 1882 wait_resp(afu, cmd); 1883 1884 /* Set on timeout */ 1885 if (unlikely((cmd->sa.ioasc != 0) || 1886 (cmd->sa.host_use_b[0] & B_ERROR))) 1887 rc = -1; 1888 out: 1889 mutex_unlock(&sync_active); 1890 if (cmd) 1891 cmd_checkin(cmd); 1892 pr_debug("%s: returning rc=%d\n", __func__, rc); 1893 return rc; 1894 } 1895 1896 /** 1897 * afu_reset() - resets the AFU 1898 * @cfg: Internal structure associated with the host. 1899 * 1900 * Return: 0 on success, -errno on failure 1901 */ 1902 static int afu_reset(struct cxlflash_cfg *cfg) 1903 { 1904 int rc = 0; 1905 /* Stop the context before the reset. Since the context is 1906 * no longer available restart it after the reset is complete 1907 */ 1908 1909 term_afu(cfg); 1910 1911 rc = init_afu(cfg); 1912 1913 pr_debug("%s: returning rc=%d\n", __func__, rc); 1914 return rc; 1915 } 1916 1917 /** 1918 * cxlflash_eh_device_reset_handler() - reset a single LUN 1919 * @scp: SCSI command to send. 1920 * 1921 * Return: 1922 * SUCCESS as defined in scsi/scsi.h 1923 * FAILED as defined in scsi/scsi.h 1924 */ 1925 static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) 1926 { 1927 int rc = SUCCESS; 1928 struct Scsi_Host *host = scp->device->host; 1929 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 1930 struct afu *afu = cfg->afu; 1931 int rcr = 0; 1932 1933 pr_debug("%s: (scp=%p) %d/%d/%d/%llu " 1934 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, 1935 host->host_no, scp->device->channel, 1936 scp->device->id, scp->device->lun, 1937 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 1938 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 1939 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 1940 get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 1941 1942 retry: 1943 switch (cfg->state) { 1944 case STATE_NORMAL: 1945 rcr = send_tmf(afu, scp, TMF_LUN_RESET); 1946 if (unlikely(rcr)) 1947 rc = FAILED; 1948 break; 1949 case STATE_RESET: 1950 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 1951 goto retry; 1952 default: 1953 rc = FAILED; 1954 break; 1955 } 1956 1957 pr_debug("%s: returning rc=%d\n", __func__, rc); 1958 return rc; 1959 } 1960 1961 /** 1962 * cxlflash_eh_host_reset_handler() - reset the host adapter 1963 * @scp: SCSI command from stack identifying host. 1964 * 1965 * Return: 1966 * SUCCESS as defined in scsi/scsi.h 1967 * FAILED as defined in scsi/scsi.h 1968 */ 1969 static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) 1970 { 1971 int rc = SUCCESS; 1972 int rcr = 0; 1973 struct Scsi_Host *host = scp->device->host; 1974 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata; 1975 1976 pr_debug("%s: (scp=%p) %d/%d/%d/%llu " 1977 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp, 1978 host->host_no, scp->device->channel, 1979 scp->device->id, scp->device->lun, 1980 get_unaligned_be32(&((u32 *)scp->cmnd)[0]), 1981 get_unaligned_be32(&((u32 *)scp->cmnd)[1]), 1982 get_unaligned_be32(&((u32 *)scp->cmnd)[2]), 1983 get_unaligned_be32(&((u32 *)scp->cmnd)[3])); 1984 1985 switch (cfg->state) { 1986 case STATE_NORMAL: 1987 cfg->state = STATE_RESET; 1988 cxlflash_mark_contexts_error(cfg); 1989 rcr = afu_reset(cfg); 1990 if (rcr) { 1991 rc = FAILED; 1992 cfg->state = STATE_FAILTERM; 1993 } else 1994 cfg->state = STATE_NORMAL; 1995 wake_up_all(&cfg->reset_waitq); 1996 break; 1997 case STATE_RESET: 1998 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 1999 if (cfg->state == STATE_NORMAL) 2000 break; 2001 /* fall through */ 2002 default: 2003 rc = FAILED; 2004 break; 2005 } 2006 2007 pr_debug("%s: returning rc=%d\n", __func__, rc); 2008 return rc; 2009 } 2010 2011 /** 2012 * cxlflash_change_queue_depth() - change the queue depth for the device 2013 * @sdev: SCSI device destined for queue depth change. 2014 * @qdepth: Requested queue depth value to set. 2015 * 2016 * The requested queue depth is capped to the maximum supported value. 2017 * 2018 * Return: The actual queue depth set. 2019 */ 2020 static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) 2021 { 2022 2023 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) 2024 qdepth = CXLFLASH_MAX_CMDS_PER_LUN; 2025 2026 scsi_change_queue_depth(sdev, qdepth); 2027 return sdev->queue_depth; 2028 } 2029 2030 /** 2031 * cxlflash_show_port_status() - queries and presents the current port status 2032 * @port: Desired port for status reporting. 2033 * @afu: AFU owning the specified port. 2034 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2035 * 2036 * Return: The size of the ASCII string returned in @buf. 2037 */ 2038 static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf) 2039 { 2040 char *disp_status; 2041 u64 status; 2042 __be64 __iomem *fc_regs; 2043 2044 if (port >= NUM_FC_PORTS) 2045 return 0; 2046 2047 fc_regs = &afu->afu_map->global.fc_regs[port][0]; 2048 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 2049 status &= FC_MTIP_STATUS_MASK; 2050 2051 if (status == FC_MTIP_STATUS_ONLINE) 2052 disp_status = "online"; 2053 else if (status == FC_MTIP_STATUS_OFFLINE) 2054 disp_status = "offline"; 2055 else 2056 disp_status = "unknown"; 2057 2058 return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status); 2059 } 2060 2061 /** 2062 * port0_show() - queries and presents the current status of port 0 2063 * @dev: Generic device associated with the host owning the port. 2064 * @attr: Device attribute representing the port. 2065 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2066 * 2067 * Return: The size of the ASCII string returned in @buf. 2068 */ 2069 static ssize_t port0_show(struct device *dev, 2070 struct device_attribute *attr, 2071 char *buf) 2072 { 2073 struct Scsi_Host *shost = class_to_shost(dev); 2074 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2075 struct afu *afu = cfg->afu; 2076 2077 return cxlflash_show_port_status(0, afu, buf); 2078 } 2079 2080 /** 2081 * port1_show() - queries and presents the current status of port 1 2082 * @dev: Generic device associated with the host owning the port. 2083 * @attr: Device attribute representing the port. 2084 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2085 * 2086 * Return: The size of the ASCII string returned in @buf. 2087 */ 2088 static ssize_t port1_show(struct device *dev, 2089 struct device_attribute *attr, 2090 char *buf) 2091 { 2092 struct Scsi_Host *shost = class_to_shost(dev); 2093 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2094 struct afu *afu = cfg->afu; 2095 2096 return cxlflash_show_port_status(1, afu, buf); 2097 } 2098 2099 /** 2100 * lun_mode_show() - presents the current LUN mode of the host 2101 * @dev: Generic device associated with the host. 2102 * @attr: Device attribute representing the LUN mode. 2103 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. 2104 * 2105 * Return: The size of the ASCII string returned in @buf. 2106 */ 2107 static ssize_t lun_mode_show(struct device *dev, 2108 struct device_attribute *attr, char *buf) 2109 { 2110 struct Scsi_Host *shost = class_to_shost(dev); 2111 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2112 struct afu *afu = cfg->afu; 2113 2114 return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun); 2115 } 2116 2117 /** 2118 * lun_mode_store() - sets the LUN mode of the host 2119 * @dev: Generic device associated with the host. 2120 * @attr: Device attribute representing the LUN mode. 2121 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. 2122 * @count: Length of data resizing in @buf. 2123 * 2124 * The CXL Flash AFU supports a dummy LUN mode where the external 2125 * links and storage are not required. Space on the FPGA is used 2126 * to create 1 or 2 small LUNs which are presented to the system 2127 * as if they were a normal storage device. This feature is useful 2128 * during development and also provides manufacturing with a way 2129 * to test the AFU without an actual device. 2130 * 2131 * 0 = external LUN[s] (default) 2132 * 1 = internal LUN (1 x 64K, 512B blocks, id 0) 2133 * 2 = internal LUN (1 x 64K, 4K blocks, id 0) 2134 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) 2135 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) 2136 * 2137 * Return: The size of the ASCII string returned in @buf. 2138 */ 2139 static ssize_t lun_mode_store(struct device *dev, 2140 struct device_attribute *attr, 2141 const char *buf, size_t count) 2142 { 2143 struct Scsi_Host *shost = class_to_shost(dev); 2144 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2145 struct afu *afu = cfg->afu; 2146 int rc; 2147 u32 lun_mode; 2148 2149 rc = kstrtouint(buf, 10, &lun_mode); 2150 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { 2151 afu->internal_lun = lun_mode; 2152 afu_reset(cfg); 2153 scsi_scan_host(cfg->host); 2154 } 2155 2156 return count; 2157 } 2158 2159 /** 2160 * ioctl_version_show() - presents the current ioctl version of the host 2161 * @dev: Generic device associated with the host. 2162 * @attr: Device attribute representing the ioctl version. 2163 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. 2164 * 2165 * Return: The size of the ASCII string returned in @buf. 2166 */ 2167 static ssize_t ioctl_version_show(struct device *dev, 2168 struct device_attribute *attr, char *buf) 2169 { 2170 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0); 2171 } 2172 2173 /** 2174 * cxlflash_show_port_lun_table() - queries and presents the port LUN table 2175 * @port: Desired port for status reporting. 2176 * @afu: AFU owning the specified port. 2177 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2178 * 2179 * Return: The size of the ASCII string returned in @buf. 2180 */ 2181 static ssize_t cxlflash_show_port_lun_table(u32 port, 2182 struct afu *afu, 2183 char *buf) 2184 { 2185 int i; 2186 ssize_t bytes = 0; 2187 __be64 __iomem *fc_port; 2188 2189 if (port >= NUM_FC_PORTS) 2190 return 0; 2191 2192 fc_port = &afu->afu_map->global.fc_port[port][0]; 2193 2194 for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) 2195 bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, 2196 "%03d: %016llX\n", i, readq_be(&fc_port[i])); 2197 return bytes; 2198 } 2199 2200 /** 2201 * port0_lun_table_show() - presents the current LUN table of port 0 2202 * @dev: Generic device associated with the host owning the port. 2203 * @attr: Device attribute representing the port. 2204 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2205 * 2206 * Return: The size of the ASCII string returned in @buf. 2207 */ 2208 static ssize_t port0_lun_table_show(struct device *dev, 2209 struct device_attribute *attr, 2210 char *buf) 2211 { 2212 struct Scsi_Host *shost = class_to_shost(dev); 2213 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2214 struct afu *afu = cfg->afu; 2215 2216 return cxlflash_show_port_lun_table(0, afu, buf); 2217 } 2218 2219 /** 2220 * port1_lun_table_show() - presents the current LUN table of port 1 2221 * @dev: Generic device associated with the host owning the port. 2222 * @attr: Device attribute representing the port. 2223 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. 2224 * 2225 * Return: The size of the ASCII string returned in @buf. 2226 */ 2227 static ssize_t port1_lun_table_show(struct device *dev, 2228 struct device_attribute *attr, 2229 char *buf) 2230 { 2231 struct Scsi_Host *shost = class_to_shost(dev); 2232 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata; 2233 struct afu *afu = cfg->afu; 2234 2235 return cxlflash_show_port_lun_table(1, afu, buf); 2236 } 2237 2238 /** 2239 * mode_show() - presents the current mode of the device 2240 * @dev: Generic device associated with the device. 2241 * @attr: Device attribute representing the device mode. 2242 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. 2243 * 2244 * Return: The size of the ASCII string returned in @buf. 2245 */ 2246 static ssize_t mode_show(struct device *dev, 2247 struct device_attribute *attr, char *buf) 2248 { 2249 struct scsi_device *sdev = to_scsi_device(dev); 2250 2251 return scnprintf(buf, PAGE_SIZE, "%s\n", 2252 sdev->hostdata ? "superpipe" : "legacy"); 2253 } 2254 2255 /* 2256 * Host attributes 2257 */ 2258 static DEVICE_ATTR_RO(port0); 2259 static DEVICE_ATTR_RO(port1); 2260 static DEVICE_ATTR_RW(lun_mode); 2261 static DEVICE_ATTR_RO(ioctl_version); 2262 static DEVICE_ATTR_RO(port0_lun_table); 2263 static DEVICE_ATTR_RO(port1_lun_table); 2264 2265 static struct device_attribute *cxlflash_host_attrs[] = { 2266 &dev_attr_port0, 2267 &dev_attr_port1, 2268 &dev_attr_lun_mode, 2269 &dev_attr_ioctl_version, 2270 &dev_attr_port0_lun_table, 2271 &dev_attr_port1_lun_table, 2272 NULL 2273 }; 2274 2275 /* 2276 * Device attributes 2277 */ 2278 static DEVICE_ATTR_RO(mode); 2279 2280 static struct device_attribute *cxlflash_dev_attrs[] = { 2281 &dev_attr_mode, 2282 NULL 2283 }; 2284 2285 /* 2286 * Host template 2287 */ 2288 static struct scsi_host_template driver_template = { 2289 .module = THIS_MODULE, 2290 .name = CXLFLASH_ADAPTER_NAME, 2291 .info = cxlflash_driver_info, 2292 .ioctl = cxlflash_ioctl, 2293 .proc_name = CXLFLASH_NAME, 2294 .queuecommand = cxlflash_queuecommand, 2295 .eh_device_reset_handler = cxlflash_eh_device_reset_handler, 2296 .eh_host_reset_handler = cxlflash_eh_host_reset_handler, 2297 .change_queue_depth = cxlflash_change_queue_depth, 2298 .cmd_per_lun = 16, 2299 .can_queue = CXLFLASH_MAX_CMDS, 2300 .this_id = -1, 2301 .sg_tablesize = SG_NONE, /* No scatter gather support */ 2302 .max_sectors = CXLFLASH_MAX_SECTORS, 2303 .use_clustering = ENABLE_CLUSTERING, 2304 .shost_attrs = cxlflash_host_attrs, 2305 .sdev_attrs = cxlflash_dev_attrs, 2306 }; 2307 2308 /* 2309 * Device dependent values 2310 */ 2311 static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS }; 2312 static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS }; 2313 2314 /* 2315 * PCI device binding table 2316 */ 2317 static struct pci_device_id cxlflash_pci_table[] = { 2318 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, 2319 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, 2320 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, 2321 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, 2322 {} 2323 }; 2324 2325 MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); 2326 2327 /** 2328 * cxlflash_worker_thread() - work thread handler for the AFU 2329 * @work: Work structure contained within cxlflash associated with host. 2330 * 2331 * Handles the following events: 2332 * - Link reset which cannot be performed on interrupt context due to 2333 * blocking up to a few seconds 2334 * - Read AFU command room 2335 * - Rescan the host 2336 */ 2337 static void cxlflash_worker_thread(struct work_struct *work) 2338 { 2339 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, 2340 work_q); 2341 struct afu *afu = cfg->afu; 2342 struct device *dev = &cfg->dev->dev; 2343 int port; 2344 ulong lock_flags; 2345 2346 /* Avoid MMIO if the device has failed */ 2347 2348 if (cfg->state != STATE_NORMAL) 2349 return; 2350 2351 spin_lock_irqsave(cfg->host->host_lock, lock_flags); 2352 2353 if (cfg->lr_state == LINK_RESET_REQUIRED) { 2354 port = cfg->lr_port; 2355 if (port < 0) 2356 dev_err(dev, "%s: invalid port index %d\n", 2357 __func__, port); 2358 else { 2359 spin_unlock_irqrestore(cfg->host->host_lock, 2360 lock_flags); 2361 2362 /* The reset can block... */ 2363 afu_link_reset(afu, port, 2364 &afu->afu_map->global.fc_regs[port][0]); 2365 spin_lock_irqsave(cfg->host->host_lock, lock_flags); 2366 } 2367 2368 cfg->lr_state = LINK_RESET_COMPLETE; 2369 } 2370 2371 if (afu->read_room) { 2372 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room)); 2373 afu->read_room = false; 2374 } 2375 2376 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags); 2377 2378 if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0) 2379 scsi_scan_host(cfg->host); 2380 kref_put(&afu->mapcount, afu_unmap); 2381 } 2382 2383 /** 2384 * cxlflash_probe() - PCI entry point to add host 2385 * @pdev: PCI device associated with the host. 2386 * @dev_id: PCI device id associated with device. 2387 * 2388 * Return: 0 on success, -errno on failure 2389 */ 2390 static int cxlflash_probe(struct pci_dev *pdev, 2391 const struct pci_device_id *dev_id) 2392 { 2393 struct Scsi_Host *host; 2394 struct cxlflash_cfg *cfg = NULL; 2395 struct device *phys_dev; 2396 struct dev_dependent_vals *ddv; 2397 int rc = 0; 2398 2399 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", 2400 __func__, pdev->irq); 2401 2402 ddv = (struct dev_dependent_vals *)dev_id->driver_data; 2403 driver_template.max_sectors = ddv->max_sectors; 2404 2405 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); 2406 if (!host) { 2407 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n", 2408 __func__); 2409 rc = -ENOMEM; 2410 goto out; 2411 } 2412 2413 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; 2414 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; 2415 host->max_channel = NUM_FC_PORTS - 1; 2416 host->unique_id = host->host_no; 2417 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; 2418 2419 cfg = (struct cxlflash_cfg *)host->hostdata; 2420 cfg->host = host; 2421 rc = alloc_mem(cfg); 2422 if (rc) { 2423 dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n", 2424 __func__); 2425 rc = -ENOMEM; 2426 scsi_host_put(cfg->host); 2427 goto out; 2428 } 2429 2430 cfg->init_state = INIT_STATE_NONE; 2431 cfg->dev = pdev; 2432 cfg->cxl_fops = cxlflash_cxl_fops; 2433 2434 /* 2435 * The promoted LUNs move to the top of the LUN table. The rest stay 2436 * on the bottom half. The bottom half grows from the end 2437 * (index = 255), whereas the top half grows from the beginning 2438 * (index = 0). 2439 */ 2440 cfg->promote_lun_index = 0; 2441 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1; 2442 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1; 2443 2444 cfg->dev_id = (struct pci_device_id *)dev_id; 2445 2446 init_waitqueue_head(&cfg->tmf_waitq); 2447 init_waitqueue_head(&cfg->reset_waitq); 2448 2449 INIT_WORK(&cfg->work_q, cxlflash_worker_thread); 2450 cfg->lr_state = LINK_RESET_INVALID; 2451 cfg->lr_port = -1; 2452 spin_lock_init(&cfg->tmf_slock); 2453 mutex_init(&cfg->ctx_tbl_list_mutex); 2454 mutex_init(&cfg->ctx_recovery_mutex); 2455 init_rwsem(&cfg->ioctl_rwsem); 2456 INIT_LIST_HEAD(&cfg->ctx_err_recovery); 2457 INIT_LIST_HEAD(&cfg->lluns); 2458 2459 pci_set_drvdata(pdev, cfg); 2460 2461 /* 2462 * Use the special service provided to look up the physical 2463 * PCI device, since we are called on the probe of the virtual 2464 * PCI host bus (vphb) 2465 */ 2466 phys_dev = cxl_get_phys_dev(pdev); 2467 if (!dev_is_pci(phys_dev)) { 2468 dev_err(&pdev->dev, "%s: not a pci dev\n", __func__); 2469 rc = -ENODEV; 2470 goto out_remove; 2471 } 2472 cfg->parent_dev = to_pci_dev(phys_dev); 2473 2474 cfg->cxl_afu = cxl_pci_to_afu(pdev); 2475 2476 rc = init_pci(cfg); 2477 if (rc) { 2478 dev_err(&pdev->dev, "%s: call to init_pci " 2479 "failed rc=%d!\n", __func__, rc); 2480 goto out_remove; 2481 } 2482 cfg->init_state = INIT_STATE_PCI; 2483 2484 rc = init_afu(cfg); 2485 if (rc) { 2486 dev_err(&pdev->dev, "%s: call to init_afu " 2487 "failed rc=%d!\n", __func__, rc); 2488 goto out_remove; 2489 } 2490 cfg->init_state = INIT_STATE_AFU; 2491 2492 rc = init_scsi(cfg); 2493 if (rc) { 2494 dev_err(&pdev->dev, "%s: call to init_scsi " 2495 "failed rc=%d!\n", __func__, rc); 2496 goto out_remove; 2497 } 2498 cfg->init_state = INIT_STATE_SCSI; 2499 2500 out: 2501 pr_debug("%s: returning rc=%d\n", __func__, rc); 2502 return rc; 2503 2504 out_remove: 2505 cxlflash_remove(pdev); 2506 goto out; 2507 } 2508 2509 /** 2510 * drain_ioctls() - wait until all currently executing ioctls have completed 2511 * @cfg: Internal structure associated with the host. 2512 * 2513 * Obtain write access to read/write semaphore that wraps ioctl 2514 * handling to 'drain' ioctls currently executing. 2515 */ 2516 static void drain_ioctls(struct cxlflash_cfg *cfg) 2517 { 2518 down_write(&cfg->ioctl_rwsem); 2519 up_write(&cfg->ioctl_rwsem); 2520 } 2521 2522 /** 2523 * cxlflash_pci_error_detected() - called when a PCI error is detected 2524 * @pdev: PCI device struct. 2525 * @state: PCI channel state. 2526 * 2527 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 2528 */ 2529 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, 2530 pci_channel_state_t state) 2531 { 2532 int rc = 0; 2533 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 2534 struct device *dev = &cfg->dev->dev; 2535 2536 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state); 2537 2538 switch (state) { 2539 case pci_channel_io_frozen: 2540 cfg->state = STATE_RESET; 2541 scsi_block_requests(cfg->host); 2542 drain_ioctls(cfg); 2543 rc = cxlflash_mark_contexts_error(cfg); 2544 if (unlikely(rc)) 2545 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", 2546 __func__, rc); 2547 term_mc(cfg, UNDO_START); 2548 stop_afu(cfg); 2549 return PCI_ERS_RESULT_NEED_RESET; 2550 case pci_channel_io_perm_failure: 2551 cfg->state = STATE_FAILTERM; 2552 wake_up_all(&cfg->reset_waitq); 2553 scsi_unblock_requests(cfg->host); 2554 return PCI_ERS_RESULT_DISCONNECT; 2555 default: 2556 break; 2557 } 2558 return PCI_ERS_RESULT_NEED_RESET; 2559 } 2560 2561 /** 2562 * cxlflash_pci_slot_reset() - called when PCI slot has been reset 2563 * @pdev: PCI device struct. 2564 * 2565 * This routine is called by the pci error recovery code after the PCI 2566 * slot has been reset, just before we should resume normal operations. 2567 * 2568 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT 2569 */ 2570 static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) 2571 { 2572 int rc = 0; 2573 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 2574 struct device *dev = &cfg->dev->dev; 2575 2576 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 2577 2578 rc = init_afu(cfg); 2579 if (unlikely(rc)) { 2580 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc); 2581 return PCI_ERS_RESULT_DISCONNECT; 2582 } 2583 2584 return PCI_ERS_RESULT_RECOVERED; 2585 } 2586 2587 /** 2588 * cxlflash_pci_resume() - called when normal operation can resume 2589 * @pdev: PCI device struct 2590 */ 2591 static void cxlflash_pci_resume(struct pci_dev *pdev) 2592 { 2593 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 2594 struct device *dev = &cfg->dev->dev; 2595 2596 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev); 2597 2598 cfg->state = STATE_NORMAL; 2599 wake_up_all(&cfg->reset_waitq); 2600 scsi_unblock_requests(cfg->host); 2601 } 2602 2603 static const struct pci_error_handlers cxlflash_err_handler = { 2604 .error_detected = cxlflash_pci_error_detected, 2605 .slot_reset = cxlflash_pci_slot_reset, 2606 .resume = cxlflash_pci_resume, 2607 }; 2608 2609 /* 2610 * PCI device structure 2611 */ 2612 static struct pci_driver cxlflash_driver = { 2613 .name = CXLFLASH_NAME, 2614 .id_table = cxlflash_pci_table, 2615 .probe = cxlflash_probe, 2616 .remove = cxlflash_remove, 2617 .err_handler = &cxlflash_err_handler, 2618 }; 2619 2620 /** 2621 * init_cxlflash() - module entry point 2622 * 2623 * Return: 0 on success, -errno on failure 2624 */ 2625 static int __init init_cxlflash(void) 2626 { 2627 pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME); 2628 2629 cxlflash_list_init(); 2630 2631 return pci_register_driver(&cxlflash_driver); 2632 } 2633 2634 /** 2635 * exit_cxlflash() - module exit point 2636 */ 2637 static void __exit exit_cxlflash(void) 2638 { 2639 cxlflash_term_global_luns(); 2640 cxlflash_free_errpage(); 2641 2642 pci_unregister_driver(&cxlflash_driver); 2643 } 2644 2645 module_init(init_cxlflash); 2646 module_exit(exit_cxlflash); 2647