1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2015 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/delay.h> 16 #include <linux/file.h> 17 #include <linux/interrupt.h> 18 #include <linux/pci.h> 19 #include <linux/syscalls.h> 20 #include <asm/unaligned.h> 21 22 #include <scsi/scsi.h> 23 #include <scsi/scsi_host.h> 24 #include <scsi/scsi_cmnd.h> 25 #include <scsi/scsi_eh.h> 26 #include <uapi/scsi/cxlflash_ioctl.h> 27 28 #include "sislite.h" 29 #include "common.h" 30 #include "vlun.h" 31 #include "superpipe.h" 32 33 struct cxlflash_global global; 34 35 /** 36 * marshal_rele_to_resize() - translate release to resize structure 37 * @rele: Source structure from which to translate/copy. 38 * @resize: Destination structure for the translate/copy. 39 */ 40 static void marshal_rele_to_resize(struct dk_cxlflash_release *release, 41 struct dk_cxlflash_resize *resize) 42 { 43 resize->hdr = release->hdr; 44 resize->context_id = release->context_id; 45 resize->rsrc_handle = release->rsrc_handle; 46 } 47 48 /** 49 * marshal_det_to_rele() - translate detach to release structure 50 * @detach: Destination structure for the translate/copy. 51 * @rele: Source structure from which to translate/copy. 52 */ 53 static void marshal_det_to_rele(struct dk_cxlflash_detach *detach, 54 struct dk_cxlflash_release *release) 55 { 56 release->hdr = detach->hdr; 57 release->context_id = detach->context_id; 58 } 59 60 /** 61 * marshal_udir_to_rele() - translate udirect to release structure 62 * @udirect: Source structure from which to translate/copy. 63 * @release: Destination structure for the translate/copy. 64 */ 65 static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect, 66 struct dk_cxlflash_release *release) 67 { 68 release->hdr = udirect->hdr; 69 release->context_id = udirect->context_id; 70 release->rsrc_handle = udirect->rsrc_handle; 71 } 72 73 /** 74 * cxlflash_free_errpage() - frees resources associated with global error page 75 */ 76 void cxlflash_free_errpage(void) 77 { 78 79 mutex_lock(&global.mutex); 80 if (global.err_page) { 81 __free_page(global.err_page); 82 global.err_page = NULL; 83 } 84 mutex_unlock(&global.mutex); 85 } 86 87 /** 88 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts 89 * @cfg: Internal structure associated with the host. 90 * 91 * When the host needs to go down, all users must be quiesced and their 92 * memory freed. This is accomplished by putting the contexts in error 93 * state which will notify the user and let them 'drive' the tear down. 94 * Meanwhile, this routine camps until all user contexts have been removed. 95 * 96 * Note that the main loop in this routine will always execute at least once 97 * to flush the reset_waitq. 98 */ 99 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) 100 { 101 struct device *dev = &cfg->dev->dev; 102 int i, found = true; 103 104 cxlflash_mark_contexts_error(cfg); 105 106 while (true) { 107 for (i = 0; i < MAX_CONTEXT; i++) 108 if (cfg->ctx_tbl[i]) { 109 found = true; 110 break; 111 } 112 113 if (!found && list_empty(&cfg->ctx_err_recovery)) 114 return; 115 116 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n", 117 __func__); 118 wake_up_all(&cfg->reset_waitq); 119 ssleep(1); 120 found = false; 121 } 122 } 123 124 /** 125 * find_error_context() - locates a context by cookie on the error recovery list 126 * @cfg: Internal structure associated with the host. 127 * @rctxid: Desired context by id. 128 * @file: Desired context by file. 129 * 130 * Return: Found context on success, NULL on failure 131 */ 132 static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid, 133 struct file *file) 134 { 135 struct ctx_info *ctxi; 136 137 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list) 138 if ((ctxi->ctxid == rctxid) || (ctxi->file == file)) 139 return ctxi; 140 141 return NULL; 142 } 143 144 /** 145 * get_context() - obtains a validated and locked context reference 146 * @cfg: Internal structure associated with the host. 147 * @rctxid: Desired context (raw, un-decoded format). 148 * @arg: LUN information or file associated with request. 149 * @ctx_ctrl: Control information to 'steer' desired lookup. 150 * 151 * NOTE: despite the name pid, in linux, current->pid actually refers 152 * to the lightweight process id (tid) and can change if the process is 153 * multi threaded. The tgid remains constant for the process and only changes 154 * when the process of fork. For all intents and purposes, think of tgid 155 * as a pid in the traditional sense. 156 * 157 * Return: Validated context on success, NULL on failure 158 */ 159 struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid, 160 void *arg, enum ctx_ctrl ctx_ctrl) 161 { 162 struct device *dev = &cfg->dev->dev; 163 struct ctx_info *ctxi = NULL; 164 struct lun_access *lun_access = NULL; 165 struct file *file = NULL; 166 struct llun_info *lli = arg; 167 u64 ctxid = DECODE_CTXID(rctxid); 168 int rc; 169 pid_t pid = task_tgid_nr(current), ctxpid = 0; 170 171 if (ctx_ctrl & CTX_CTRL_FILE) { 172 lli = NULL; 173 file = (struct file *)arg; 174 } 175 176 if (ctx_ctrl & CTX_CTRL_CLONE) 177 pid = task_ppid_nr(current); 178 179 if (likely(ctxid < MAX_CONTEXT)) { 180 while (true) { 181 mutex_lock(&cfg->ctx_tbl_list_mutex); 182 ctxi = cfg->ctx_tbl[ctxid]; 183 if (ctxi) 184 if ((file && (ctxi->file != file)) || 185 (!file && (ctxi->ctxid != rctxid))) 186 ctxi = NULL; 187 188 if ((ctx_ctrl & CTX_CTRL_ERR) || 189 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK))) 190 ctxi = find_error_context(cfg, rctxid, file); 191 if (!ctxi) { 192 mutex_unlock(&cfg->ctx_tbl_list_mutex); 193 goto out; 194 } 195 196 /* 197 * Need to acquire ownership of the context while still 198 * under the table/list lock to serialize with a remove 199 * thread. Use the 'try' to avoid stalling the 200 * table/list lock for a single context. 201 * 202 * Note that the lock order is: 203 * 204 * cfg->ctx_tbl_list_mutex -> ctxi->mutex 205 * 206 * Therefore release ctx_tbl_list_mutex before retrying. 207 */ 208 rc = mutex_trylock(&ctxi->mutex); 209 mutex_unlock(&cfg->ctx_tbl_list_mutex); 210 if (rc) 211 break; /* got the context's lock! */ 212 } 213 214 if (ctxi->unavail) 215 goto denied; 216 217 ctxpid = ctxi->pid; 218 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID))) 219 if (pid != ctxpid) 220 goto denied; 221 222 if (lli) { 223 list_for_each_entry(lun_access, &ctxi->luns, list) 224 if (lun_access->lli == lli) 225 goto out; 226 goto denied; 227 } 228 } 229 230 out: 231 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u " 232 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid, 233 ctx_ctrl); 234 235 return ctxi; 236 237 denied: 238 mutex_unlock(&ctxi->mutex); 239 ctxi = NULL; 240 goto out; 241 } 242 243 /** 244 * put_context() - release a context that was retrieved from get_context() 245 * @ctxi: Context to release. 246 * 247 * For now, releasing the context equates to unlocking it's mutex. 248 */ 249 void put_context(struct ctx_info *ctxi) 250 { 251 mutex_unlock(&ctxi->mutex); 252 } 253 254 /** 255 * afu_attach() - attach a context to the AFU 256 * @cfg: Internal structure associated with the host. 257 * @ctxi: Context to attach. 258 * 259 * Upon setting the context capabilities, they must be confirmed with 260 * a read back operation as the context might have been closed since 261 * the mailbox was unlocked. When this occurs, registration is failed. 262 * 263 * Return: 0 on success, -errno on failure 264 */ 265 static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) 266 { 267 struct device *dev = &cfg->dev->dev; 268 struct afu *afu = cfg->afu; 269 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map; 270 int rc = 0; 271 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 272 u64 val; 273 int i; 274 275 /* Unlock cap and restrict user to read/write cmds in translated mode */ 276 readq_be(&ctrl_map->mbox_r); 277 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD); 278 writeq_be(val, &ctrl_map->ctx_cap); 279 val = readq_be(&ctrl_map->ctx_cap); 280 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { 281 dev_err(dev, "%s: ctx may be closed val=%016llx\n", 282 __func__, val); 283 rc = -EAGAIN; 284 goto out; 285 } 286 287 if (afu_is_ocxl_lisn(afu)) { 288 /* Set up the LISN effective address for each interrupt */ 289 for (i = 0; i < ctxi->irqs; i++) { 290 val = cfg->ops->get_irq_objhndl(ctxi->ctx, i); 291 writeq_be(val, &ctrl_map->lisn_ea[i]); 292 } 293 294 /* Use primary HWQ PASID as identifier for all interrupts */ 295 val = hwq->ctx_hndl; 296 writeq_be(SISL_LISN_PASID(val, val), &ctrl_map->lisn_pasid[0]); 297 writeq_be(SISL_LISN_PASID(0UL, val), &ctrl_map->lisn_pasid[1]); 298 } 299 300 /* Set up MMIO registers pointing to the RHT */ 301 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start); 302 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); 303 writeq_be(val, &ctrl_map->rht_cnt_id); 304 out: 305 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 306 return rc; 307 } 308 309 /** 310 * read_cap16() - issues a SCSI READ_CAP16 command 311 * @sdev: SCSI device associated with LUN. 312 * @lli: LUN destined for capacity request. 313 * 314 * The READ_CAP16 can take quite a while to complete. Should an EEH occur while 315 * in scsi_execute(), the EEH handler will attempt to recover. As part of the 316 * recovery, the handler drains all currently running ioctls, waiting until they 317 * have completed before proceeding with a reset. As this routine is used on the 318 * ioctl path, this can create a condition where the EEH handler becomes stuck, 319 * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily 320 * unmark this thread as an ioctl thread by releasing the ioctl read semaphore. 321 * This will allow the EEH handler to proceed with a recovery while this thread 322 * is still running. Once the scsi_execute() returns, reacquire the ioctl read 323 * semaphore and check the adapter state in case it changed while inside of 324 * scsi_execute(). The state check will wait if the adapter is still being 325 * recovered or return a failure if the recovery failed. In the event that the 326 * adapter reset failed, simply return the failure as the ioctl would be unable 327 * to continue. 328 * 329 * Note that the above puts a requirement on this routine to only be called on 330 * an ioctl thread. 331 * 332 * Return: 0 on success, -errno on failure 333 */ 334 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) 335 { 336 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 337 struct device *dev = &cfg->dev->dev; 338 struct glun_info *gli = lli->parent; 339 struct scsi_sense_hdr sshdr; 340 u8 *cmd_buf = NULL; 341 u8 *scsi_cmd = NULL; 342 u8 *sense_buf = NULL; 343 int rc = 0; 344 int result = 0; 345 int retry_cnt = 0; 346 u32 to = CMD_TIMEOUT * HZ; 347 348 retry: 349 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); 350 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); 351 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 352 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) { 353 rc = -ENOMEM; 354 goto out; 355 } 356 357 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */ 358 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */ 359 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]); 360 361 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__, 362 retry_cnt ? "re" : "", scsi_cmd[0]); 363 364 /* Drop the ioctl read semahpore across lengthy call */ 365 up_read(&cfg->ioctl_rwsem); 366 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf, 367 CMD_BUFSIZE, sense_buf, &sshdr, to, CMD_RETRIES, 368 0, 0, NULL); 369 down_read(&cfg->ioctl_rwsem); 370 rc = check_state(cfg); 371 if (rc) { 372 dev_err(dev, "%s: Failed state result=%08x\n", 373 __func__, result); 374 rc = -ENODEV; 375 goto out; 376 } 377 378 if (driver_byte(result) == DRIVER_SENSE) { 379 result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 380 if (result & SAM_STAT_CHECK_CONDITION) { 381 switch (sshdr.sense_key) { 382 case NO_SENSE: 383 case RECOVERED_ERROR: 384 /* fall through */ 385 case NOT_READY: 386 result &= ~SAM_STAT_CHECK_CONDITION; 387 break; 388 case UNIT_ATTENTION: 389 switch (sshdr.asc) { 390 case 0x29: /* Power on Reset or Device Reset */ 391 /* fall through */ 392 case 0x2A: /* Device capacity changed */ 393 case 0x3F: /* Report LUNs changed */ 394 /* Retry the command once more */ 395 if (retry_cnt++ < 1) { 396 kfree(cmd_buf); 397 kfree(scsi_cmd); 398 kfree(sense_buf); 399 goto retry; 400 } 401 } 402 break; 403 default: 404 break; 405 } 406 } 407 } 408 409 if (result) { 410 dev_err(dev, "%s: command failed, result=%08x\n", 411 __func__, result); 412 rc = -EIO; 413 goto out; 414 } 415 416 /* 417 * Read cap was successful, grab values from the buffer; 418 * note that we don't need to worry about unaligned access 419 * as the buffer is allocated on an aligned boundary. 420 */ 421 mutex_lock(&gli->mutex); 422 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0])); 423 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8])); 424 mutex_unlock(&gli->mutex); 425 426 out: 427 kfree(cmd_buf); 428 kfree(scsi_cmd); 429 kfree(sense_buf); 430 431 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n", 432 __func__, gli->max_lba, gli->blk_len, rc); 433 return rc; 434 } 435 436 /** 437 * get_rhte() - obtains validated resource handle table entry reference 438 * @ctxi: Context owning the resource handle. 439 * @rhndl: Resource handle associated with entry. 440 * @lli: LUN associated with request. 441 * 442 * Return: Validated RHTE on success, NULL on failure 443 */ 444 struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, 445 struct llun_info *lli) 446 { 447 struct cxlflash_cfg *cfg = ctxi->cfg; 448 struct device *dev = &cfg->dev->dev; 449 struct sisl_rht_entry *rhte = NULL; 450 451 if (unlikely(!ctxi->rht_start)) { 452 dev_dbg(dev, "%s: Context does not have allocated RHT\n", 453 __func__); 454 goto out; 455 } 456 457 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) { 458 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", 459 __func__, rhndl); 460 goto out; 461 } 462 463 if (unlikely(ctxi->rht_lun[rhndl] != lli)) { 464 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n", 465 __func__, rhndl); 466 goto out; 467 } 468 469 rhte = &ctxi->rht_start[rhndl]; 470 if (unlikely(rhte->nmask == 0)) { 471 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n", 472 __func__, rhndl); 473 rhte = NULL; 474 goto out; 475 } 476 477 out: 478 return rhte; 479 } 480 481 /** 482 * rhte_checkout() - obtains free/empty resource handle table entry 483 * @ctxi: Context owning the resource handle. 484 * @lli: LUN associated with request. 485 * 486 * Return: Free RHTE on success, NULL on failure 487 */ 488 struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, 489 struct llun_info *lli) 490 { 491 struct cxlflash_cfg *cfg = ctxi->cfg; 492 struct device *dev = &cfg->dev->dev; 493 struct sisl_rht_entry *rhte = NULL; 494 int i; 495 496 /* Find a free RHT entry */ 497 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) 498 if (ctxi->rht_start[i].nmask == 0) { 499 rhte = &ctxi->rht_start[i]; 500 ctxi->rht_out++; 501 break; 502 } 503 504 if (likely(rhte)) 505 ctxi->rht_lun[i] = lli; 506 507 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i); 508 return rhte; 509 } 510 511 /** 512 * rhte_checkin() - releases a resource handle table entry 513 * @ctxi: Context owning the resource handle. 514 * @rhte: RHTE to release. 515 */ 516 void rhte_checkin(struct ctx_info *ctxi, 517 struct sisl_rht_entry *rhte) 518 { 519 u32 rsrc_handle = rhte - ctxi->rht_start; 520 521 rhte->nmask = 0; 522 rhte->fp = 0; 523 ctxi->rht_out--; 524 ctxi->rht_lun[rsrc_handle] = NULL; 525 ctxi->rht_needs_ws[rsrc_handle] = false; 526 } 527 528 /** 529 * rhte_format1() - populates a RHTE for format 1 530 * @rhte: RHTE to populate. 531 * @lun_id: LUN ID of LUN associated with RHTE. 532 * @perm: Desired permissions for RHTE. 533 * @port_sel: Port selection mask 534 */ 535 static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm, 536 u32 port_sel) 537 { 538 /* 539 * Populate the Format 1 RHT entry for direct access (physical 540 * LUN) using the synchronization sequence defined in the 541 * SISLite specification. 542 */ 543 struct sisl_rht_entry_f1 dummy = { 0 }; 544 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; 545 546 memset(rhte_f1, 0, sizeof(*rhte_f1)); 547 rhte_f1->fp = SISL_RHT_FP(1U, 0); 548 dma_wmb(); /* Make setting of format bit visible */ 549 550 rhte_f1->lun_id = lun_id; 551 dma_wmb(); /* Make setting of LUN id visible */ 552 553 /* 554 * Use a dummy RHT Format 1 entry to build the second dword 555 * of the entry that must be populated in a single write when 556 * enabled (valid bit set to TRUE). 557 */ 558 dummy.valid = 0x80; 559 dummy.fp = SISL_RHT_FP(1U, perm); 560 dummy.port_sel = port_sel; 561 rhte_f1->dw = dummy.dw; 562 563 dma_wmb(); /* Make remaining RHT entry fields visible */ 564 } 565 566 /** 567 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode 568 * @gli: LUN to attach. 569 * @mode: Desired mode of the LUN. 570 * @locked: Mutex status on current thread. 571 * 572 * Return: 0 on success, -errno on failure 573 */ 574 int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked) 575 { 576 int rc = 0; 577 578 if (!locked) 579 mutex_lock(&gli->mutex); 580 581 if (gli->mode == MODE_NONE) 582 gli->mode = mode; 583 else if (gli->mode != mode) { 584 pr_debug("%s: gli_mode=%d requested_mode=%d\n", 585 __func__, gli->mode, mode); 586 rc = -EINVAL; 587 goto out; 588 } 589 590 gli->users++; 591 WARN_ON(gli->users <= 0); 592 out: 593 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n", 594 __func__, rc, gli->mode, gli->users); 595 if (!locked) 596 mutex_unlock(&gli->mutex); 597 return rc; 598 } 599 600 /** 601 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode 602 * @gli: LUN to detach. 603 * 604 * When resetting the mode, terminate block allocation resources as they 605 * are no longer required (service is safe to call even when block allocation 606 * resources were not present - such as when transitioning from physical mode). 607 * These resources will be reallocated when needed (subsequent transition to 608 * virtual mode). 609 */ 610 void cxlflash_lun_detach(struct glun_info *gli) 611 { 612 mutex_lock(&gli->mutex); 613 WARN_ON(gli->mode == MODE_NONE); 614 if (--gli->users == 0) { 615 gli->mode = MODE_NONE; 616 cxlflash_ba_terminate(&gli->blka.ba_lun); 617 } 618 pr_debug("%s: gli->users=%u\n", __func__, gli->users); 619 WARN_ON(gli->users < 0); 620 mutex_unlock(&gli->mutex); 621 } 622 623 /** 624 * _cxlflash_disk_release() - releases the specified resource entry 625 * @sdev: SCSI device associated with LUN. 626 * @ctxi: Context owning resources. 627 * @release: Release ioctl data structure. 628 * 629 * For LUNs in virtual mode, the virtual LUN associated with the specified 630 * resource handle is resized to 0 prior to releasing the RHTE. Note that the 631 * AFU sync should _not_ be performed when the context is sitting on the error 632 * recovery list. A context on the error recovery list is not known to the AFU 633 * due to reset. When the context is recovered, it will be reattached and made 634 * known again to the AFU. 635 * 636 * Return: 0 on success, -errno on failure 637 */ 638 int _cxlflash_disk_release(struct scsi_device *sdev, 639 struct ctx_info *ctxi, 640 struct dk_cxlflash_release *release) 641 { 642 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 643 struct device *dev = &cfg->dev->dev; 644 struct llun_info *lli = sdev->hostdata; 645 struct glun_info *gli = lli->parent; 646 struct afu *afu = cfg->afu; 647 bool put_ctx = false; 648 649 struct dk_cxlflash_resize size; 650 res_hndl_t rhndl = release->rsrc_handle; 651 652 int rc = 0; 653 int rcr = 0; 654 u64 ctxid = DECODE_CTXID(release->context_id), 655 rctxid = release->context_id; 656 657 struct sisl_rht_entry *rhte; 658 struct sisl_rht_entry_f1 *rhte_f1; 659 660 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n", 661 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users); 662 663 if (!ctxi) { 664 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 665 if (unlikely(!ctxi)) { 666 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", 667 __func__, ctxid); 668 rc = -EINVAL; 669 goto out; 670 } 671 672 put_ctx = true; 673 } 674 675 rhte = get_rhte(ctxi, rhndl, lli); 676 if (unlikely(!rhte)) { 677 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", 678 __func__, rhndl); 679 rc = -EINVAL; 680 goto out; 681 } 682 683 /* 684 * Resize to 0 for virtual LUNS by setting the size 685 * to 0. This will clear LXT_START and LXT_CNT fields 686 * in the RHT entry and properly sync with the AFU. 687 * 688 * Afterwards we clear the remaining fields. 689 */ 690 switch (gli->mode) { 691 case MODE_VIRTUAL: 692 marshal_rele_to_resize(release, &size); 693 size.req_size = 0; 694 rc = _cxlflash_vlun_resize(sdev, ctxi, &size); 695 if (rc) { 696 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc); 697 goto out; 698 } 699 700 break; 701 case MODE_PHYSICAL: 702 /* 703 * Clear the Format 1 RHT entry for direct access 704 * (physical LUN) using the synchronization sequence 705 * defined in the SISLite specification. 706 */ 707 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; 708 709 rhte_f1->valid = 0; 710 dma_wmb(); /* Make revocation of RHT entry visible */ 711 712 rhte_f1->lun_id = 0; 713 dma_wmb(); /* Make clearing of LUN id visible */ 714 715 rhte_f1->dw = 0; 716 dma_wmb(); /* Make RHT entry bottom-half clearing visible */ 717 718 if (!ctxi->err_recovery_active) { 719 rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); 720 if (unlikely(rcr)) 721 dev_dbg(dev, "%s: AFU sync failed rc=%d\n", 722 __func__, rcr); 723 } 724 break; 725 default: 726 WARN(1, "Unsupported LUN mode!"); 727 goto out; 728 } 729 730 rhte_checkin(ctxi, rhte); 731 cxlflash_lun_detach(gli); 732 733 out: 734 if (put_ctx) 735 put_context(ctxi); 736 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 737 return rc; 738 } 739 740 int cxlflash_disk_release(struct scsi_device *sdev, 741 struct dk_cxlflash_release *release) 742 { 743 return _cxlflash_disk_release(sdev, NULL, release); 744 } 745 746 /** 747 * destroy_context() - releases a context 748 * @cfg: Internal structure associated with the host. 749 * @ctxi: Context to release. 750 * 751 * This routine is safe to be called with a a non-initialized context. 752 * Also note that the routine conditionally checks for the existence 753 * of the context control map before clearing the RHT registers and 754 * context capabilities because it is possible to destroy a context 755 * while the context is in the error state (previous mapping was 756 * removed [so there is no need to worry about clearing] and context 757 * is waiting for a new mapping). 758 */ 759 static void destroy_context(struct cxlflash_cfg *cfg, 760 struct ctx_info *ctxi) 761 { 762 struct afu *afu = cfg->afu; 763 764 if (ctxi->initialized) { 765 WARN_ON(!list_empty(&ctxi->luns)); 766 767 /* Clear RHT registers and drop all capabilities for context */ 768 if (afu->afu_map && ctxi->ctrl_map) { 769 writeq_be(0, &ctxi->ctrl_map->rht_start); 770 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); 771 writeq_be(0, &ctxi->ctrl_map->ctx_cap); 772 } 773 } 774 775 /* Free memory associated with context */ 776 free_page((ulong)ctxi->rht_start); 777 kfree(ctxi->rht_needs_ws); 778 kfree(ctxi->rht_lun); 779 kfree(ctxi); 780 } 781 782 /** 783 * create_context() - allocates and initializes a context 784 * @cfg: Internal structure associated with the host. 785 * 786 * Return: Allocated context on success, NULL on failure 787 */ 788 static struct ctx_info *create_context(struct cxlflash_cfg *cfg) 789 { 790 struct device *dev = &cfg->dev->dev; 791 struct ctx_info *ctxi = NULL; 792 struct llun_info **lli = NULL; 793 u8 *ws = NULL; 794 struct sisl_rht_entry *rhte; 795 796 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL); 797 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL); 798 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL); 799 if (unlikely(!ctxi || !lli || !ws)) { 800 dev_err(dev, "%s: Unable to allocate context\n", __func__); 801 goto err; 802 } 803 804 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL); 805 if (unlikely(!rhte)) { 806 dev_err(dev, "%s: Unable to allocate RHT\n", __func__); 807 goto err; 808 } 809 810 ctxi->rht_lun = lli; 811 ctxi->rht_needs_ws = ws; 812 ctxi->rht_start = rhte; 813 out: 814 return ctxi; 815 816 err: 817 kfree(ws); 818 kfree(lli); 819 kfree(ctxi); 820 ctxi = NULL; 821 goto out; 822 } 823 824 /** 825 * init_context() - initializes a previously allocated context 826 * @ctxi: Previously allocated context 827 * @cfg: Internal structure associated with the host. 828 * @ctx: Previously obtained context cookie. 829 * @ctxid: Previously obtained process element associated with CXL context. 830 * @file: Previously obtained file associated with CXL context. 831 * @perms: User-specified permissions. 832 * @irqs: User-specified number of interrupts. 833 */ 834 static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg, 835 void *ctx, int ctxid, struct file *file, u32 perms, 836 u64 irqs) 837 { 838 struct afu *afu = cfg->afu; 839 840 ctxi->rht_perms = perms; 841 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 842 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 843 ctxi->irqs = irqs; 844 ctxi->pid = task_tgid_nr(current); /* tgid = pid */ 845 ctxi->ctx = ctx; 846 ctxi->cfg = cfg; 847 ctxi->file = file; 848 ctxi->initialized = true; 849 mutex_init(&ctxi->mutex); 850 kref_init(&ctxi->kref); 851 INIT_LIST_HEAD(&ctxi->luns); 852 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */ 853 } 854 855 /** 856 * remove_context() - context kref release handler 857 * @kref: Kernel reference associated with context to be removed. 858 * 859 * When a context no longer has any references it can safely be removed 860 * from global access and destroyed. Note that it is assumed the thread 861 * relinquishing access to the context holds its mutex. 862 */ 863 static void remove_context(struct kref *kref) 864 { 865 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref); 866 struct cxlflash_cfg *cfg = ctxi->cfg; 867 u64 ctxid = DECODE_CTXID(ctxi->ctxid); 868 869 /* Remove context from table/error list */ 870 WARN_ON(!mutex_is_locked(&ctxi->mutex)); 871 ctxi->unavail = true; 872 mutex_unlock(&ctxi->mutex); 873 mutex_lock(&cfg->ctx_tbl_list_mutex); 874 mutex_lock(&ctxi->mutex); 875 876 if (!list_empty(&ctxi->list)) 877 list_del(&ctxi->list); 878 cfg->ctx_tbl[ctxid] = NULL; 879 mutex_unlock(&cfg->ctx_tbl_list_mutex); 880 mutex_unlock(&ctxi->mutex); 881 882 /* Context now completely uncoupled/unreachable */ 883 destroy_context(cfg, ctxi); 884 } 885 886 /** 887 * _cxlflash_disk_detach() - detaches a LUN from a context 888 * @sdev: SCSI device associated with LUN. 889 * @ctxi: Context owning resources. 890 * @detach: Detach ioctl data structure. 891 * 892 * As part of the detach, all per-context resources associated with the LUN 893 * are cleaned up. When detaching the last LUN for a context, the context 894 * itself is cleaned up and released. 895 * 896 * Return: 0 on success, -errno on failure 897 */ 898 static int _cxlflash_disk_detach(struct scsi_device *sdev, 899 struct ctx_info *ctxi, 900 struct dk_cxlflash_detach *detach) 901 { 902 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 903 struct device *dev = &cfg->dev->dev; 904 struct llun_info *lli = sdev->hostdata; 905 struct lun_access *lun_access, *t; 906 struct dk_cxlflash_release rel; 907 bool put_ctx = false; 908 909 int i; 910 int rc = 0; 911 u64 ctxid = DECODE_CTXID(detach->context_id), 912 rctxid = detach->context_id; 913 914 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid); 915 916 if (!ctxi) { 917 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 918 if (unlikely(!ctxi)) { 919 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", 920 __func__, ctxid); 921 rc = -EINVAL; 922 goto out; 923 } 924 925 put_ctx = true; 926 } 927 928 /* Cleanup outstanding resources tied to this LUN */ 929 if (ctxi->rht_out) { 930 marshal_det_to_rele(detach, &rel); 931 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { 932 if (ctxi->rht_lun[i] == lli) { 933 rel.rsrc_handle = i; 934 _cxlflash_disk_release(sdev, ctxi, &rel); 935 } 936 937 /* No need to loop further if we're done */ 938 if (ctxi->rht_out == 0) 939 break; 940 } 941 } 942 943 /* Take our LUN out of context, free the node */ 944 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 945 if (lun_access->lli == lli) { 946 list_del(&lun_access->list); 947 kfree(lun_access); 948 lun_access = NULL; 949 break; 950 } 951 952 /* 953 * Release the context reference and the sdev reference that 954 * bound this LUN to the context. 955 */ 956 if (kref_put(&ctxi->kref, remove_context)) 957 put_ctx = false; 958 scsi_device_put(sdev); 959 out: 960 if (put_ctx) 961 put_context(ctxi); 962 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 963 return rc; 964 } 965 966 static int cxlflash_disk_detach(struct scsi_device *sdev, 967 struct dk_cxlflash_detach *detach) 968 { 969 return _cxlflash_disk_detach(sdev, NULL, detach); 970 } 971 972 /** 973 * cxlflash_cxl_release() - release handler for adapter file descriptor 974 * @inode: File-system inode associated with fd. 975 * @file: File installed with adapter file descriptor. 976 * 977 * This routine is the release handler for the fops registered with 978 * the CXL services on an initial attach for a context. It is called 979 * when a close (explicity by the user or as part of a process tear 980 * down) is performed on the adapter file descriptor returned to the 981 * user. The user should be aware that explicitly performing a close 982 * considered catastrophic and subsequent usage of the superpipe API 983 * with previously saved off tokens will fail. 984 * 985 * This routine derives the context reference and calls detach for 986 * each LUN associated with the context.The final detach operation 987 * causes the context itself to be freed. With exception to when the 988 * CXL process element (context id) lookup fails (a case that should 989 * theoretically never occur), every call into this routine results 990 * in a complete freeing of a context. 991 * 992 * Detaching the LUN is typically an ioctl() operation and the underlying 993 * code assumes that ioctl_rwsem has been acquired as a reader. To support 994 * that design point, the semaphore is acquired and released around detach. 995 * 996 * Return: 0 on success 997 */ 998 static int cxlflash_cxl_release(struct inode *inode, struct file *file) 999 { 1000 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1001 cxl_fops); 1002 void *ctx = cfg->ops->fops_get_context(file); 1003 struct device *dev = &cfg->dev->dev; 1004 struct ctx_info *ctxi = NULL; 1005 struct dk_cxlflash_detach detach = { { 0 }, 0 }; 1006 struct lun_access *lun_access, *t; 1007 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1008 int ctxid; 1009 1010 ctxid = cfg->ops->process_element(ctx); 1011 if (unlikely(ctxid < 0)) { 1012 dev_err(dev, "%s: Context %p was closed ctxid=%d\n", 1013 __func__, ctx, ctxid); 1014 goto out; 1015 } 1016 1017 ctxi = get_context(cfg, ctxid, file, ctrl); 1018 if (unlikely(!ctxi)) { 1019 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE); 1020 if (!ctxi) { 1021 dev_dbg(dev, "%s: ctxid=%d already free\n", 1022 __func__, ctxid); 1023 goto out_release; 1024 } 1025 1026 dev_dbg(dev, "%s: Another process owns ctxid=%d\n", 1027 __func__, ctxid); 1028 put_context(ctxi); 1029 goto out; 1030 } 1031 1032 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid); 1033 1034 down_read(&cfg->ioctl_rwsem); 1035 detach.context_id = ctxi->ctxid; 1036 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 1037 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); 1038 up_read(&cfg->ioctl_rwsem); 1039 out_release: 1040 cfg->ops->fd_release(inode, file); 1041 out: 1042 dev_dbg(dev, "%s: returning\n", __func__); 1043 return 0; 1044 } 1045 1046 /** 1047 * unmap_context() - clears a previously established mapping 1048 * @ctxi: Context owning the mapping. 1049 * 1050 * This routine is used to switch between the error notification page 1051 * (dummy page of all 1's) and the real mapping (established by the CXL 1052 * fault handler). 1053 */ 1054 static void unmap_context(struct ctx_info *ctxi) 1055 { 1056 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1); 1057 } 1058 1059 /** 1060 * get_err_page() - obtains and allocates the error notification page 1061 * @cfg: Internal structure associated with the host. 1062 * 1063 * Return: error notification page on success, NULL on failure 1064 */ 1065 static struct page *get_err_page(struct cxlflash_cfg *cfg) 1066 { 1067 struct page *err_page = global.err_page; 1068 struct device *dev = &cfg->dev->dev; 1069 1070 if (unlikely(!err_page)) { 1071 err_page = alloc_page(GFP_KERNEL); 1072 if (unlikely(!err_page)) { 1073 dev_err(dev, "%s: Unable to allocate err_page\n", 1074 __func__); 1075 goto out; 1076 } 1077 1078 memset(page_address(err_page), -1, PAGE_SIZE); 1079 1080 /* Serialize update w/ other threads to avoid a leak */ 1081 mutex_lock(&global.mutex); 1082 if (likely(!global.err_page)) 1083 global.err_page = err_page; 1084 else { 1085 __free_page(err_page); 1086 err_page = global.err_page; 1087 } 1088 mutex_unlock(&global.mutex); 1089 } 1090 1091 out: 1092 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page); 1093 return err_page; 1094 } 1095 1096 /** 1097 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor 1098 * @vmf: VM fault associated with current fault. 1099 * 1100 * To support error notification via MMIO, faults are 'caught' by this routine 1101 * that was inserted before passing back the adapter file descriptor on attach. 1102 * When a fault occurs, this routine evaluates if error recovery is active and 1103 * if so, installs the error page to 'notify' the user about the error state. 1104 * During normal operation, the fault is simply handled by the original fault 1105 * handler that was installed by CXL services as part of initializing the 1106 * adapter file descriptor. The VMA's page protection bits are toggled to 1107 * indicate cached/not-cached depending on the memory backing the fault. 1108 * 1109 * Return: 0 on success, VM_FAULT_SIGBUS on failure 1110 */ 1111 static int cxlflash_mmap_fault(struct vm_fault *vmf) 1112 { 1113 struct vm_area_struct *vma = vmf->vma; 1114 struct file *file = vma->vm_file; 1115 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1116 cxl_fops); 1117 void *ctx = cfg->ops->fops_get_context(file); 1118 struct device *dev = &cfg->dev->dev; 1119 struct ctx_info *ctxi = NULL; 1120 struct page *err_page = NULL; 1121 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1122 int rc = 0; 1123 int ctxid; 1124 1125 ctxid = cfg->ops->process_element(ctx); 1126 if (unlikely(ctxid < 0)) { 1127 dev_err(dev, "%s: Context %p was closed ctxid=%d\n", 1128 __func__, ctx, ctxid); 1129 goto err; 1130 } 1131 1132 ctxi = get_context(cfg, ctxid, file, ctrl); 1133 if (unlikely(!ctxi)) { 1134 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); 1135 goto err; 1136 } 1137 1138 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid); 1139 1140 if (likely(!ctxi->err_recovery_active)) { 1141 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1142 rc = ctxi->cxl_mmap_vmops->fault(vmf); 1143 } else { 1144 dev_dbg(dev, "%s: err recovery active, use err_page\n", 1145 __func__); 1146 1147 err_page = get_err_page(cfg); 1148 if (unlikely(!err_page)) { 1149 dev_err(dev, "%s: Could not get err_page\n", __func__); 1150 rc = VM_FAULT_RETRY; 1151 goto out; 1152 } 1153 1154 get_page(err_page); 1155 vmf->page = err_page; 1156 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); 1157 } 1158 1159 out: 1160 if (likely(ctxi)) 1161 put_context(ctxi); 1162 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1163 return rc; 1164 1165 err: 1166 rc = VM_FAULT_SIGBUS; 1167 goto out; 1168 } 1169 1170 /* 1171 * Local MMAP vmops to 'catch' faults 1172 */ 1173 static const struct vm_operations_struct cxlflash_mmap_vmops = { 1174 .fault = cxlflash_mmap_fault, 1175 }; 1176 1177 /** 1178 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor 1179 * @file: File installed with adapter file descriptor. 1180 * @vma: VM area associated with mapping. 1181 * 1182 * Installs local mmap vmops to 'catch' faults for error notification support. 1183 * 1184 * Return: 0 on success, -errno on failure 1185 */ 1186 static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) 1187 { 1188 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1189 cxl_fops); 1190 void *ctx = cfg->ops->fops_get_context(file); 1191 struct device *dev = &cfg->dev->dev; 1192 struct ctx_info *ctxi = NULL; 1193 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1194 int ctxid; 1195 int rc = 0; 1196 1197 ctxid = cfg->ops->process_element(ctx); 1198 if (unlikely(ctxid < 0)) { 1199 dev_err(dev, "%s: Context %p was closed ctxid=%d\n", 1200 __func__, ctx, ctxid); 1201 rc = -EIO; 1202 goto out; 1203 } 1204 1205 ctxi = get_context(cfg, ctxid, file, ctrl); 1206 if (unlikely(!ctxi)) { 1207 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); 1208 rc = -EIO; 1209 goto out; 1210 } 1211 1212 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid); 1213 1214 rc = cfg->ops->fd_mmap(file, vma); 1215 if (likely(!rc)) { 1216 /* Insert ourself in the mmap fault handler path */ 1217 ctxi->cxl_mmap_vmops = vma->vm_ops; 1218 vma->vm_ops = &cxlflash_mmap_vmops; 1219 } 1220 1221 out: 1222 if (likely(ctxi)) 1223 put_context(ctxi); 1224 return rc; 1225 } 1226 1227 const struct file_operations cxlflash_cxl_fops = { 1228 .owner = THIS_MODULE, 1229 .mmap = cxlflash_cxl_mmap, 1230 .release = cxlflash_cxl_release, 1231 }; 1232 1233 /** 1234 * cxlflash_mark_contexts_error() - move contexts to error state and list 1235 * @cfg: Internal structure associated with the host. 1236 * 1237 * A context is only moved over to the error list when there are no outstanding 1238 * references to it. This ensures that a running operation has completed. 1239 * 1240 * Return: 0 on success, -errno on failure 1241 */ 1242 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg) 1243 { 1244 int i, rc = 0; 1245 struct ctx_info *ctxi = NULL; 1246 1247 mutex_lock(&cfg->ctx_tbl_list_mutex); 1248 1249 for (i = 0; i < MAX_CONTEXT; i++) { 1250 ctxi = cfg->ctx_tbl[i]; 1251 if (ctxi) { 1252 mutex_lock(&ctxi->mutex); 1253 cfg->ctx_tbl[i] = NULL; 1254 list_add(&ctxi->list, &cfg->ctx_err_recovery); 1255 ctxi->err_recovery_active = true; 1256 ctxi->ctrl_map = NULL; 1257 unmap_context(ctxi); 1258 mutex_unlock(&ctxi->mutex); 1259 } 1260 } 1261 1262 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1263 return rc; 1264 } 1265 1266 /* 1267 * Dummy NULL fops 1268 */ 1269 static const struct file_operations null_fops = { 1270 .owner = THIS_MODULE, 1271 }; 1272 1273 /** 1274 * check_state() - checks and responds to the current adapter state 1275 * @cfg: Internal structure associated with the host. 1276 * 1277 * This routine can block and should only be used on process context. 1278 * It assumes that the caller is an ioctl thread and holding the ioctl 1279 * read semaphore. This is temporarily let up across the wait to allow 1280 * for draining actively running ioctls. Also note that when waking up 1281 * from waiting in reset, the state is unknown and must be checked again 1282 * before proceeding. 1283 * 1284 * Return: 0 on success, -errno on failure 1285 */ 1286 int check_state(struct cxlflash_cfg *cfg) 1287 { 1288 struct device *dev = &cfg->dev->dev; 1289 int rc = 0; 1290 1291 retry: 1292 switch (cfg->state) { 1293 case STATE_RESET: 1294 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__); 1295 up_read(&cfg->ioctl_rwsem); 1296 rc = wait_event_interruptible(cfg->reset_waitq, 1297 cfg->state != STATE_RESET); 1298 down_read(&cfg->ioctl_rwsem); 1299 if (unlikely(rc)) 1300 break; 1301 goto retry; 1302 case STATE_FAILTERM: 1303 dev_dbg(dev, "%s: Failed/Terminating\n", __func__); 1304 rc = -ENODEV; 1305 break; 1306 default: 1307 break; 1308 } 1309 1310 return rc; 1311 } 1312 1313 /** 1314 * cxlflash_disk_attach() - attach a LUN to a context 1315 * @sdev: SCSI device associated with LUN. 1316 * @attach: Attach ioctl data structure. 1317 * 1318 * Creates a context and attaches LUN to it. A LUN can only be attached 1319 * one time to a context (subsequent attaches for the same context/LUN pair 1320 * are not supported). Additional LUNs can be attached to a context by 1321 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header. 1322 * 1323 * Return: 0 on success, -errno on failure 1324 */ 1325 static int cxlflash_disk_attach(struct scsi_device *sdev, 1326 struct dk_cxlflash_attach *attach) 1327 { 1328 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1329 struct device *dev = &cfg->dev->dev; 1330 struct afu *afu = cfg->afu; 1331 struct llun_info *lli = sdev->hostdata; 1332 struct glun_info *gli = lli->parent; 1333 struct ctx_info *ctxi = NULL; 1334 struct lun_access *lun_access = NULL; 1335 int rc = 0; 1336 u32 perms; 1337 int ctxid = -1; 1338 u64 irqs = attach->num_interrupts; 1339 u64 flags = 0UL; 1340 u64 rctxid = 0UL; 1341 struct file *file = NULL; 1342 1343 void *ctx = NULL; 1344 1345 int fd = -1; 1346 1347 if (irqs > 4) { 1348 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n", 1349 __func__, irqs); 1350 rc = -EINVAL; 1351 goto out; 1352 } 1353 1354 if (gli->max_lba == 0) { 1355 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n", 1356 __func__, lli->lun_id[sdev->channel]); 1357 rc = read_cap16(sdev, lli); 1358 if (rc) { 1359 dev_err(dev, "%s: Invalid device rc=%d\n", 1360 __func__, rc); 1361 rc = -ENODEV; 1362 goto out; 1363 } 1364 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba); 1365 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len); 1366 } 1367 1368 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) { 1369 rctxid = attach->context_id; 1370 ctxi = get_context(cfg, rctxid, NULL, 0); 1371 if (!ctxi) { 1372 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n", 1373 __func__, rctxid); 1374 rc = -EINVAL; 1375 goto out; 1376 } 1377 1378 list_for_each_entry(lun_access, &ctxi->luns, list) 1379 if (lun_access->lli == lli) { 1380 dev_dbg(dev, "%s: Already attached\n", 1381 __func__); 1382 rc = -EINVAL; 1383 goto out; 1384 } 1385 } 1386 1387 rc = scsi_device_get(sdev); 1388 if (unlikely(rc)) { 1389 dev_err(dev, "%s: Unable to get sdev reference\n", __func__); 1390 goto out; 1391 } 1392 1393 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL); 1394 if (unlikely(!lun_access)) { 1395 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__); 1396 rc = -ENOMEM; 1397 goto err; 1398 } 1399 1400 lun_access->lli = lli; 1401 lun_access->sdev = sdev; 1402 1403 /* Non-NULL context indicates reuse (another context reference) */ 1404 if (ctxi) { 1405 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n", 1406 __func__, rctxid); 1407 kref_get(&ctxi->kref); 1408 list_add(&lun_access->list, &ctxi->luns); 1409 goto out_attach; 1410 } 1411 1412 ctxi = create_context(cfg); 1413 if (unlikely(!ctxi)) { 1414 dev_err(dev, "%s: Failed to create context ctxid=%d\n", 1415 __func__, ctxid); 1416 rc = -ENOMEM; 1417 goto err; 1418 } 1419 1420 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); 1421 if (IS_ERR_OR_NULL(ctx)) { 1422 dev_err(dev, "%s: Could not initialize context %p\n", 1423 __func__, ctx); 1424 rc = -ENODEV; 1425 goto err; 1426 } 1427 1428 rc = cfg->ops->start_work(ctx, irqs); 1429 if (unlikely(rc)) { 1430 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1431 __func__, rc); 1432 goto err; 1433 } 1434 1435 ctxid = cfg->ops->process_element(ctx); 1436 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1437 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); 1438 rc = -EPERM; 1439 goto err; 1440 } 1441 1442 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd); 1443 if (unlikely(fd < 0)) { 1444 rc = -ENODEV; 1445 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1446 goto err; 1447 } 1448 1449 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */ 1450 perms = SISL_RHT_PERM(attach->hdr.flags + 1); 1451 1452 /* Context mutex is locked upon return */ 1453 init_context(ctxi, cfg, ctx, ctxid, file, perms, irqs); 1454 1455 rc = afu_attach(cfg, ctxi); 1456 if (unlikely(rc)) { 1457 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); 1458 goto err; 1459 } 1460 1461 /* 1462 * No error paths after this point. Once the fd is installed it's 1463 * visible to user space and can't be undone safely on this thread. 1464 * There is no need to worry about a deadlock here because no one 1465 * knows about us yet; we can be the only one holding our mutex. 1466 */ 1467 list_add(&lun_access->list, &ctxi->luns); 1468 mutex_lock(&cfg->ctx_tbl_list_mutex); 1469 mutex_lock(&ctxi->mutex); 1470 cfg->ctx_tbl[ctxid] = ctxi; 1471 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1472 fd_install(fd, file); 1473 1474 out_attach: 1475 if (fd != -1) 1476 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD; 1477 if (afu_is_sq_cmd_mode(afu)) 1478 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; 1479 1480 attach->hdr.return_flags = flags; 1481 attach->context_id = ctxi->ctxid; 1482 attach->block_size = gli->blk_len; 1483 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1484 attach->last_lba = gli->max_lba; 1485 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT; 1486 attach->max_xfer /= gli->blk_len; 1487 1488 out: 1489 attach->adap_fd = fd; 1490 1491 if (ctxi) 1492 put_context(ctxi); 1493 1494 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n", 1495 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba); 1496 return rc; 1497 1498 err: 1499 /* Cleanup CXL context; okay to 'stop' even if it was not started */ 1500 if (!IS_ERR_OR_NULL(ctx)) { 1501 cfg->ops->stop_context(ctx); 1502 cfg->ops->release_context(ctx); 1503 ctx = NULL; 1504 } 1505 1506 /* 1507 * Here, we're overriding the fops with a dummy all-NULL fops because 1508 * fput() calls the release fop, which will cause us to mistakenly 1509 * call into the CXL code. Rather than try to add yet more complexity 1510 * to that routine (cxlflash_cxl_release) we should try to fix the 1511 * issue here. 1512 */ 1513 if (fd > 0) { 1514 file->f_op = &null_fops; 1515 fput(file); 1516 put_unused_fd(fd); 1517 fd = -1; 1518 file = NULL; 1519 } 1520 1521 /* Cleanup our context */ 1522 if (ctxi) { 1523 destroy_context(cfg, ctxi); 1524 ctxi = NULL; 1525 } 1526 1527 kfree(lun_access); 1528 scsi_device_put(sdev); 1529 goto out; 1530 } 1531 1532 /** 1533 * recover_context() - recovers a context in error 1534 * @cfg: Internal structure associated with the host. 1535 * @ctxi: Context to release. 1536 * @adap_fd: Adapter file descriptor associated with new/recovered context. 1537 * 1538 * Restablishes the state for a context-in-error. 1539 * 1540 * Return: 0 on success, -errno on failure 1541 */ 1542 static int recover_context(struct cxlflash_cfg *cfg, 1543 struct ctx_info *ctxi, 1544 int *adap_fd) 1545 { 1546 struct device *dev = &cfg->dev->dev; 1547 int rc = 0; 1548 int fd = -1; 1549 int ctxid = -1; 1550 struct file *file; 1551 void *ctx; 1552 struct afu *afu = cfg->afu; 1553 1554 ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); 1555 if (IS_ERR_OR_NULL(ctx)) { 1556 dev_err(dev, "%s: Could not initialize context %p\n", 1557 __func__, ctx); 1558 rc = -ENODEV; 1559 goto out; 1560 } 1561 1562 rc = cfg->ops->start_work(ctx, ctxi->irqs); 1563 if (unlikely(rc)) { 1564 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1565 __func__, rc); 1566 goto err1; 1567 } 1568 1569 ctxid = cfg->ops->process_element(ctx); 1570 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1571 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); 1572 rc = -EPERM; 1573 goto err2; 1574 } 1575 1576 file = cfg->ops->get_fd(ctx, &cfg->cxl_fops, &fd); 1577 if (unlikely(fd < 0)) { 1578 rc = -ENODEV; 1579 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1580 goto err2; 1581 } 1582 1583 /* Update with new MMIO area based on updated context id */ 1584 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 1585 1586 rc = afu_attach(cfg, ctxi); 1587 if (rc) { 1588 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); 1589 goto err3; 1590 } 1591 1592 /* 1593 * No error paths after this point. Once the fd is installed it's 1594 * visible to user space and can't be undone safely on this thread. 1595 */ 1596 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 1597 ctxi->ctx = ctx; 1598 ctxi->file = file; 1599 1600 /* 1601 * Put context back in table (note the reinit of the context list); 1602 * we must first drop the context's mutex and then acquire it in 1603 * order with the table/list mutex to avoid a deadlock - safe to do 1604 * here because no one can find us at this moment in time. 1605 */ 1606 mutex_unlock(&ctxi->mutex); 1607 mutex_lock(&cfg->ctx_tbl_list_mutex); 1608 mutex_lock(&ctxi->mutex); 1609 list_del_init(&ctxi->list); 1610 cfg->ctx_tbl[ctxid] = ctxi; 1611 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1612 fd_install(fd, file); 1613 *adap_fd = fd; 1614 out: 1615 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n", 1616 __func__, ctxid, fd, rc); 1617 return rc; 1618 1619 err3: 1620 fput(file); 1621 put_unused_fd(fd); 1622 err2: 1623 cfg->ops->stop_context(ctx); 1624 err1: 1625 cfg->ops->release_context(ctx); 1626 goto out; 1627 } 1628 1629 /** 1630 * cxlflash_afu_recover() - initiates AFU recovery 1631 * @sdev: SCSI device associated with LUN. 1632 * @recover: Recover ioctl data structure. 1633 * 1634 * Only a single recovery is allowed at a time to avoid exhausting CXL 1635 * resources (leading to recovery failure) in the event that we're up 1636 * against the maximum number of contexts limit. For similar reasons, 1637 * a context recovery is retried if there are multiple recoveries taking 1638 * place at the same time and the failure was due to CXL services being 1639 * unable to keep up. 1640 * 1641 * As this routine is called on ioctl context, it holds the ioctl r/w 1642 * semaphore that is used to drain ioctls in recovery scenarios. The 1643 * implementation to achieve the pacing described above (a local mutex) 1644 * requires that the ioctl r/w semaphore be dropped and reacquired to 1645 * avoid a 3-way deadlock when multiple process recoveries operate in 1646 * parallel. 1647 * 1648 * Because a user can detect an error condition before the kernel, it is 1649 * quite possible for this routine to act as the kernel's EEH detection 1650 * source (MMIO read of mbox_r). Because of this, there is a window of 1651 * time where an EEH might have been detected but not yet 'serviced' 1652 * (callback invoked, causing the device to enter reset state). To avoid 1653 * looping in this routine during that window, a 1 second sleep is in place 1654 * between the time the MMIO failure is detected and the time a wait on the 1655 * reset wait queue is attempted via check_state(). 1656 * 1657 * Return: 0 on success, -errno on failure 1658 */ 1659 static int cxlflash_afu_recover(struct scsi_device *sdev, 1660 struct dk_cxlflash_recover_afu *recover) 1661 { 1662 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1663 struct device *dev = &cfg->dev->dev; 1664 struct llun_info *lli = sdev->hostdata; 1665 struct afu *afu = cfg->afu; 1666 struct ctx_info *ctxi = NULL; 1667 struct mutex *mutex = &cfg->ctx_recovery_mutex; 1668 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); 1669 u64 flags; 1670 u64 ctxid = DECODE_CTXID(recover->context_id), 1671 rctxid = recover->context_id; 1672 long reg; 1673 bool locked = true; 1674 int lretry = 20; /* up to 2 seconds */ 1675 int new_adap_fd = -1; 1676 int rc = 0; 1677 1678 atomic_inc(&cfg->recovery_threads); 1679 up_read(&cfg->ioctl_rwsem); 1680 rc = mutex_lock_interruptible(mutex); 1681 down_read(&cfg->ioctl_rwsem); 1682 if (rc) { 1683 locked = false; 1684 goto out; 1685 } 1686 1687 rc = check_state(cfg); 1688 if (rc) { 1689 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc); 1690 rc = -ENODEV; 1691 goto out; 1692 } 1693 1694 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n", 1695 __func__, recover->reason, rctxid); 1696 1697 retry: 1698 /* Ensure that this process is attached to the context */ 1699 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 1700 if (unlikely(!ctxi)) { 1701 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); 1702 rc = -EINVAL; 1703 goto out; 1704 } 1705 1706 if (ctxi->err_recovery_active) { 1707 retry_recover: 1708 rc = recover_context(cfg, ctxi, &new_adap_fd); 1709 if (unlikely(rc)) { 1710 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n", 1711 __func__, ctxid, rc); 1712 if ((rc == -ENODEV) && 1713 ((atomic_read(&cfg->recovery_threads) > 1) || 1714 (lretry--))) { 1715 dev_dbg(dev, "%s: Going to try again\n", 1716 __func__); 1717 mutex_unlock(mutex); 1718 msleep(100); 1719 rc = mutex_lock_interruptible(mutex); 1720 if (rc) { 1721 locked = false; 1722 goto out; 1723 } 1724 goto retry_recover; 1725 } 1726 1727 goto out; 1728 } 1729 1730 ctxi->err_recovery_active = false; 1731 1732 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD | 1733 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; 1734 if (afu_is_sq_cmd_mode(afu)) 1735 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; 1736 1737 recover->hdr.return_flags = flags; 1738 recover->context_id = ctxi->ctxid; 1739 recover->adap_fd = new_adap_fd; 1740 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1741 goto out; 1742 } 1743 1744 /* Test if in error state */ 1745 reg = readq_be(&hwq->ctrl_map->mbox_r); 1746 if (reg == -1) { 1747 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__); 1748 1749 /* 1750 * Before checking the state, put back the context obtained with 1751 * get_context() as it is no longer needed and sleep for a short 1752 * period of time (see prolog notes). 1753 */ 1754 put_context(ctxi); 1755 ctxi = NULL; 1756 ssleep(1); 1757 rc = check_state(cfg); 1758 if (unlikely(rc)) 1759 goto out; 1760 goto retry; 1761 } 1762 1763 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__); 1764 out: 1765 if (likely(ctxi)) 1766 put_context(ctxi); 1767 if (locked) 1768 mutex_unlock(mutex); 1769 atomic_dec_if_positive(&cfg->recovery_threads); 1770 return rc; 1771 } 1772 1773 /** 1774 * process_sense() - evaluates and processes sense data 1775 * @sdev: SCSI device associated with LUN. 1776 * @verify: Verify ioctl data structure. 1777 * 1778 * Return: 0 on success, -errno on failure 1779 */ 1780 static int process_sense(struct scsi_device *sdev, 1781 struct dk_cxlflash_verify *verify) 1782 { 1783 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1784 struct device *dev = &cfg->dev->dev; 1785 struct llun_info *lli = sdev->hostdata; 1786 struct glun_info *gli = lli->parent; 1787 u64 prev_lba = gli->max_lba; 1788 struct scsi_sense_hdr sshdr = { 0 }; 1789 int rc = 0; 1790 1791 rc = scsi_normalize_sense((const u8 *)&verify->sense_data, 1792 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr); 1793 if (!rc) { 1794 dev_err(dev, "%s: Failed to normalize sense data\n", __func__); 1795 rc = -EINVAL; 1796 goto out; 1797 } 1798 1799 switch (sshdr.sense_key) { 1800 case NO_SENSE: 1801 case RECOVERED_ERROR: 1802 /* fall through */ 1803 case NOT_READY: 1804 break; 1805 case UNIT_ATTENTION: 1806 switch (sshdr.asc) { 1807 case 0x29: /* Power on Reset or Device Reset */ 1808 /* fall through */ 1809 case 0x2A: /* Device settings/capacity changed */ 1810 rc = read_cap16(sdev, lli); 1811 if (rc) { 1812 rc = -ENODEV; 1813 break; 1814 } 1815 if (prev_lba != gli->max_lba) 1816 dev_dbg(dev, "%s: Capacity changed old=%lld " 1817 "new=%lld\n", __func__, prev_lba, 1818 gli->max_lba); 1819 break; 1820 case 0x3F: /* Report LUNs changed, Rescan. */ 1821 scsi_scan_host(cfg->host); 1822 break; 1823 default: 1824 rc = -EIO; 1825 break; 1826 } 1827 break; 1828 default: 1829 rc = -EIO; 1830 break; 1831 } 1832 out: 1833 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__, 1834 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc); 1835 return rc; 1836 } 1837 1838 /** 1839 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes 1840 * @sdev: SCSI device associated with LUN. 1841 * @verify: Verify ioctl data structure. 1842 * 1843 * Return: 0 on success, -errno on failure 1844 */ 1845 static int cxlflash_disk_verify(struct scsi_device *sdev, 1846 struct dk_cxlflash_verify *verify) 1847 { 1848 int rc = 0; 1849 struct ctx_info *ctxi = NULL; 1850 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1851 struct device *dev = &cfg->dev->dev; 1852 struct llun_info *lli = sdev->hostdata; 1853 struct glun_info *gli = lli->parent; 1854 struct sisl_rht_entry *rhte = NULL; 1855 res_hndl_t rhndl = verify->rsrc_handle; 1856 u64 ctxid = DECODE_CTXID(verify->context_id), 1857 rctxid = verify->context_id; 1858 u64 last_lba = 0; 1859 1860 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, " 1861 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle, 1862 verify->hint, verify->hdr.flags); 1863 1864 ctxi = get_context(cfg, rctxid, lli, 0); 1865 if (unlikely(!ctxi)) { 1866 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); 1867 rc = -EINVAL; 1868 goto out; 1869 } 1870 1871 rhte = get_rhte(ctxi, rhndl, lli); 1872 if (unlikely(!rhte)) { 1873 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", 1874 __func__, rhndl); 1875 rc = -EINVAL; 1876 goto out; 1877 } 1878 1879 /* 1880 * Look at the hint/sense to see if it requires us to redrive 1881 * inquiry (i.e. the Unit attention is due to the WWN changing). 1882 */ 1883 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) { 1884 /* Can't hold mutex across process_sense/read_cap16, 1885 * since we could have an intervening EEH event. 1886 */ 1887 ctxi->unavail = true; 1888 mutex_unlock(&ctxi->mutex); 1889 rc = process_sense(sdev, verify); 1890 if (unlikely(rc)) { 1891 dev_err(dev, "%s: Failed to validate sense data (%d)\n", 1892 __func__, rc); 1893 mutex_lock(&ctxi->mutex); 1894 ctxi->unavail = false; 1895 goto out; 1896 } 1897 mutex_lock(&ctxi->mutex); 1898 ctxi->unavail = false; 1899 } 1900 1901 switch (gli->mode) { 1902 case MODE_PHYSICAL: 1903 last_lba = gli->max_lba; 1904 break; 1905 case MODE_VIRTUAL: 1906 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */ 1907 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len); 1908 last_lba /= CXLFLASH_BLOCK_SIZE; 1909 last_lba--; 1910 break; 1911 default: 1912 WARN(1, "Unsupported LUN mode!"); 1913 } 1914 1915 verify->last_lba = last_lba; 1916 1917 out: 1918 if (likely(ctxi)) 1919 put_context(ctxi); 1920 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n", 1921 __func__, rc, verify->last_lba); 1922 return rc; 1923 } 1924 1925 /** 1926 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string 1927 * @cmd: The ioctl command to decode. 1928 * 1929 * Return: A string identifying the decoded ioctl. 1930 */ 1931 static char *decode_ioctl(int cmd) 1932 { 1933 switch (cmd) { 1934 case DK_CXLFLASH_ATTACH: 1935 return __stringify_1(DK_CXLFLASH_ATTACH); 1936 case DK_CXLFLASH_USER_DIRECT: 1937 return __stringify_1(DK_CXLFLASH_USER_DIRECT); 1938 case DK_CXLFLASH_USER_VIRTUAL: 1939 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL); 1940 case DK_CXLFLASH_VLUN_RESIZE: 1941 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE); 1942 case DK_CXLFLASH_RELEASE: 1943 return __stringify_1(DK_CXLFLASH_RELEASE); 1944 case DK_CXLFLASH_DETACH: 1945 return __stringify_1(DK_CXLFLASH_DETACH); 1946 case DK_CXLFLASH_VERIFY: 1947 return __stringify_1(DK_CXLFLASH_VERIFY); 1948 case DK_CXLFLASH_VLUN_CLONE: 1949 return __stringify_1(DK_CXLFLASH_VLUN_CLONE); 1950 case DK_CXLFLASH_RECOVER_AFU: 1951 return __stringify_1(DK_CXLFLASH_RECOVER_AFU); 1952 case DK_CXLFLASH_MANAGE_LUN: 1953 return __stringify_1(DK_CXLFLASH_MANAGE_LUN); 1954 } 1955 1956 return "UNKNOWN"; 1957 } 1958 1959 /** 1960 * cxlflash_disk_direct_open() - opens a direct (physical) disk 1961 * @sdev: SCSI device associated with LUN. 1962 * @arg: UDirect ioctl data structure. 1963 * 1964 * On successful return, the user is informed of the resource handle 1965 * to be used to identify the direct lun and the size (in blocks) of 1966 * the direct lun in last LBA format. 1967 * 1968 * Return: 0 on success, -errno on failure 1969 */ 1970 static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) 1971 { 1972 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1973 struct device *dev = &cfg->dev->dev; 1974 struct afu *afu = cfg->afu; 1975 struct llun_info *lli = sdev->hostdata; 1976 struct glun_info *gli = lli->parent; 1977 struct dk_cxlflash_release rel = { { 0 }, 0 }; 1978 1979 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg; 1980 1981 u64 ctxid = DECODE_CTXID(pphys->context_id), 1982 rctxid = pphys->context_id; 1983 u64 lun_size = 0; 1984 u64 last_lba = 0; 1985 u64 rsrc_handle = -1; 1986 u32 port = CHAN2PORTMASK(sdev->channel); 1987 1988 int rc = 0; 1989 1990 struct ctx_info *ctxi = NULL; 1991 struct sisl_rht_entry *rhte = NULL; 1992 1993 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size); 1994 1995 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false); 1996 if (unlikely(rc)) { 1997 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__); 1998 goto out; 1999 } 2000 2001 ctxi = get_context(cfg, rctxid, lli, 0); 2002 if (unlikely(!ctxi)) { 2003 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); 2004 rc = -EINVAL; 2005 goto err1; 2006 } 2007 2008 rhte = rhte_checkout(ctxi, lli); 2009 if (unlikely(!rhte)) { 2010 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n", 2011 __func__, ctxid); 2012 rc = -EMFILE; /* too many opens */ 2013 goto err1; 2014 } 2015 2016 rsrc_handle = (rhte - ctxi->rht_start); 2017 2018 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port); 2019 2020 last_lba = gli->max_lba; 2021 pphys->hdr.return_flags = 0; 2022 pphys->last_lba = last_lba; 2023 pphys->rsrc_handle = rsrc_handle; 2024 2025 rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC); 2026 if (unlikely(rc)) { 2027 dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc); 2028 goto err2; 2029 } 2030 2031 out: 2032 if (likely(ctxi)) 2033 put_context(ctxi); 2034 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n", 2035 __func__, rsrc_handle, rc, last_lba); 2036 return rc; 2037 2038 err2: 2039 marshal_udir_to_rele(pphys, &rel); 2040 _cxlflash_disk_release(sdev, ctxi, &rel); 2041 goto out; 2042 err1: 2043 cxlflash_lun_detach(gli); 2044 goto out; 2045 } 2046 2047 /** 2048 * ioctl_common() - common IOCTL handler for driver 2049 * @sdev: SCSI device associated with LUN. 2050 * @cmd: IOCTL command. 2051 * 2052 * Handles common fencing operations that are valid for multiple ioctls. Always 2053 * allow through ioctls that are cleanup oriented in nature, even when operating 2054 * in a failed/terminating state. 2055 * 2056 * Return: 0 on success, -errno on failure 2057 */ 2058 static int ioctl_common(struct scsi_device *sdev, int cmd) 2059 { 2060 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 2061 struct device *dev = &cfg->dev->dev; 2062 struct llun_info *lli = sdev->hostdata; 2063 int rc = 0; 2064 2065 if (unlikely(!lli)) { 2066 dev_dbg(dev, "%s: Unknown LUN\n", __func__); 2067 rc = -EINVAL; 2068 goto out; 2069 } 2070 2071 rc = check_state(cfg); 2072 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) { 2073 switch (cmd) { 2074 case DK_CXLFLASH_VLUN_RESIZE: 2075 case DK_CXLFLASH_RELEASE: 2076 case DK_CXLFLASH_DETACH: 2077 dev_dbg(dev, "%s: Command override rc=%d\n", 2078 __func__, rc); 2079 rc = 0; 2080 break; 2081 } 2082 } 2083 out: 2084 return rc; 2085 } 2086 2087 /** 2088 * cxlflash_ioctl() - IOCTL handler for driver 2089 * @sdev: SCSI device associated with LUN. 2090 * @cmd: IOCTL command. 2091 * @arg: Userspace ioctl data structure. 2092 * 2093 * A read/write semaphore is used to implement a 'drain' of currently 2094 * running ioctls. The read semaphore is taken at the beginning of each 2095 * ioctl thread and released upon concluding execution. Additionally the 2096 * semaphore should be released and then reacquired in any ioctl execution 2097 * path which will wait for an event to occur that is outside the scope of 2098 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, 2099 * a thread simply needs to acquire the write semaphore. 2100 * 2101 * Return: 0 on success, -errno on failure 2102 */ 2103 int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 2104 { 2105 typedef int (*sioctl) (struct scsi_device *, void *); 2106 2107 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 2108 struct device *dev = &cfg->dev->dev; 2109 struct afu *afu = cfg->afu; 2110 struct dk_cxlflash_hdr *hdr; 2111 char buf[sizeof(union cxlflash_ioctls)]; 2112 size_t size = 0; 2113 bool known_ioctl = false; 2114 int idx; 2115 int rc = 0; 2116 struct Scsi_Host *shost = sdev->host; 2117 sioctl do_ioctl = NULL; 2118 2119 static const struct { 2120 size_t size; 2121 sioctl ioctl; 2122 } ioctl_tbl[] = { /* NOTE: order matters here */ 2123 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach}, 2124 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open}, 2125 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release}, 2126 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach}, 2127 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify}, 2128 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover}, 2129 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun}, 2130 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open}, 2131 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize}, 2132 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone}, 2133 }; 2134 2135 /* Hold read semaphore so we can drain if needed */ 2136 down_read(&cfg->ioctl_rwsem); 2137 2138 /* Restrict command set to physical support only for internal LUN */ 2139 if (afu->internal_lun) 2140 switch (cmd) { 2141 case DK_CXLFLASH_RELEASE: 2142 case DK_CXLFLASH_USER_VIRTUAL: 2143 case DK_CXLFLASH_VLUN_RESIZE: 2144 case DK_CXLFLASH_VLUN_CLONE: 2145 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n", 2146 __func__, decode_ioctl(cmd), afu->internal_lun); 2147 rc = -EINVAL; 2148 goto cxlflash_ioctl_exit; 2149 } 2150 2151 switch (cmd) { 2152 case DK_CXLFLASH_ATTACH: 2153 case DK_CXLFLASH_USER_DIRECT: 2154 case DK_CXLFLASH_RELEASE: 2155 case DK_CXLFLASH_DETACH: 2156 case DK_CXLFLASH_VERIFY: 2157 case DK_CXLFLASH_RECOVER_AFU: 2158 case DK_CXLFLASH_USER_VIRTUAL: 2159 case DK_CXLFLASH_VLUN_RESIZE: 2160 case DK_CXLFLASH_VLUN_CLONE: 2161 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n", 2162 __func__, decode_ioctl(cmd), cmd, shost->host_no, 2163 sdev->channel, sdev->id, sdev->lun); 2164 rc = ioctl_common(sdev, cmd); 2165 if (unlikely(rc)) 2166 goto cxlflash_ioctl_exit; 2167 2168 /* fall through */ 2169 2170 case DK_CXLFLASH_MANAGE_LUN: 2171 known_ioctl = true; 2172 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH); 2173 size = ioctl_tbl[idx].size; 2174 do_ioctl = ioctl_tbl[idx].ioctl; 2175 2176 if (likely(do_ioctl)) 2177 break; 2178 2179 /* fall through */ 2180 default: 2181 rc = -EINVAL; 2182 goto cxlflash_ioctl_exit; 2183 } 2184 2185 if (unlikely(copy_from_user(&buf, arg, size))) { 2186 dev_err(dev, "%s: copy_from_user() fail " 2187 "size=%lu cmd=%d (%s) arg=%p\n", 2188 __func__, size, cmd, decode_ioctl(cmd), arg); 2189 rc = -EFAULT; 2190 goto cxlflash_ioctl_exit; 2191 } 2192 2193 hdr = (struct dk_cxlflash_hdr *)&buf; 2194 if (hdr->version != DK_CXLFLASH_VERSION_0) { 2195 dev_dbg(dev, "%s: Version %u not supported for %s\n", 2196 __func__, hdr->version, decode_ioctl(cmd)); 2197 rc = -EINVAL; 2198 goto cxlflash_ioctl_exit; 2199 } 2200 2201 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) { 2202 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); 2203 rc = -EINVAL; 2204 goto cxlflash_ioctl_exit; 2205 } 2206 2207 rc = do_ioctl(sdev, (void *)&buf); 2208 if (likely(!rc)) 2209 if (unlikely(copy_to_user(arg, &buf, size))) { 2210 dev_err(dev, "%s: copy_to_user() fail " 2211 "size=%lu cmd=%d (%s) arg=%p\n", 2212 __func__, size, cmd, decode_ioctl(cmd), arg); 2213 rc = -EFAULT; 2214 } 2215 2216 /* fall through to exit */ 2217 2218 cxlflash_ioctl_exit: 2219 up_read(&cfg->ioctl_rwsem); 2220 if (unlikely(rc && known_ioctl)) 2221 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " 2222 "returned rc %d\n", __func__, 2223 decode_ioctl(cmd), cmd, shost->host_no, 2224 sdev->channel, sdev->id, sdev->lun, rc); 2225 else 2226 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " 2227 "returned rc %d\n", __func__, decode_ioctl(cmd), 2228 cmd, shost->host_no, sdev->channel, sdev->id, 2229 sdev->lun, rc); 2230 return rc; 2231 } 2232