1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2015 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/delay.h> 16 #include <linux/file.h> 17 #include <linux/syscalls.h> 18 #include <misc/cxl.h> 19 #include <asm/unaligned.h> 20 21 #include <scsi/scsi.h> 22 #include <scsi/scsi_host.h> 23 #include <scsi/scsi_cmnd.h> 24 #include <scsi/scsi_eh.h> 25 #include <uapi/scsi/cxlflash_ioctl.h> 26 27 #include "sislite.h" 28 #include "common.h" 29 #include "vlun.h" 30 #include "superpipe.h" 31 32 struct cxlflash_global global; 33 34 /** 35 * marshal_rele_to_resize() - translate release to resize structure 36 * @rele: Source structure from which to translate/copy. 37 * @resize: Destination structure for the translate/copy. 38 */ 39 static void marshal_rele_to_resize(struct dk_cxlflash_release *release, 40 struct dk_cxlflash_resize *resize) 41 { 42 resize->hdr = release->hdr; 43 resize->context_id = release->context_id; 44 resize->rsrc_handle = release->rsrc_handle; 45 } 46 47 /** 48 * marshal_det_to_rele() - translate detach to release structure 49 * @detach: Destination structure for the translate/copy. 50 * @rele: Source structure from which to translate/copy. 51 */ 52 static void marshal_det_to_rele(struct dk_cxlflash_detach *detach, 53 struct dk_cxlflash_release *release) 54 { 55 release->hdr = detach->hdr; 56 release->context_id = detach->context_id; 57 } 58 59 /** 60 * cxlflash_free_errpage() - frees resources associated with global error page 61 */ 62 void cxlflash_free_errpage(void) 63 { 64 65 mutex_lock(&global.mutex); 66 if (global.err_page) { 67 __free_page(global.err_page); 68 global.err_page = NULL; 69 } 70 mutex_unlock(&global.mutex); 71 } 72 73 /** 74 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts 75 * @cfg: Internal structure associated with the host. 76 * 77 * When the host needs to go down, all users must be quiesced and their 78 * memory freed. This is accomplished by putting the contexts in error 79 * state which will notify the user and let them 'drive' the tear down. 80 * Meanwhile, this routine camps until all user contexts have been removed. 81 */ 82 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) 83 { 84 struct device *dev = &cfg->dev->dev; 85 int i, found; 86 87 cxlflash_mark_contexts_error(cfg); 88 89 while (true) { 90 found = false; 91 92 for (i = 0; i < MAX_CONTEXT; i++) 93 if (cfg->ctx_tbl[i]) { 94 found = true; 95 break; 96 } 97 98 if (!found && list_empty(&cfg->ctx_err_recovery)) 99 return; 100 101 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n", 102 __func__); 103 wake_up_all(&cfg->reset_waitq); 104 ssleep(1); 105 } 106 } 107 108 /** 109 * find_error_context() - locates a context by cookie on the error recovery list 110 * @cfg: Internal structure associated with the host. 111 * @rctxid: Desired context by id. 112 * @file: Desired context by file. 113 * 114 * Return: Found context on success, NULL on failure 115 */ 116 static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid, 117 struct file *file) 118 { 119 struct ctx_info *ctxi; 120 121 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list) 122 if ((ctxi->ctxid == rctxid) || (ctxi->file == file)) 123 return ctxi; 124 125 return NULL; 126 } 127 128 /** 129 * get_context() - obtains a validated and locked context reference 130 * @cfg: Internal structure associated with the host. 131 * @rctxid: Desired context (raw, un-decoded format). 132 * @arg: LUN information or file associated with request. 133 * @ctx_ctrl: Control information to 'steer' desired lookup. 134 * 135 * NOTE: despite the name pid, in linux, current->pid actually refers 136 * to the lightweight process id (tid) and can change if the process is 137 * multi threaded. The tgid remains constant for the process and only changes 138 * when the process of fork. For all intents and purposes, think of tgid 139 * as a pid in the traditional sense. 140 * 141 * Return: Validated context on success, NULL on failure 142 */ 143 struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid, 144 void *arg, enum ctx_ctrl ctx_ctrl) 145 { 146 struct device *dev = &cfg->dev->dev; 147 struct ctx_info *ctxi = NULL; 148 struct lun_access *lun_access = NULL; 149 struct file *file = NULL; 150 struct llun_info *lli = arg; 151 u64 ctxid = DECODE_CTXID(rctxid); 152 int rc; 153 pid_t pid = current->tgid, ctxpid = 0; 154 155 if (ctx_ctrl & CTX_CTRL_FILE) { 156 lli = NULL; 157 file = (struct file *)arg; 158 } 159 160 if (ctx_ctrl & CTX_CTRL_CLONE) 161 pid = current->parent->tgid; 162 163 if (likely(ctxid < MAX_CONTEXT)) { 164 while (true) { 165 mutex_lock(&cfg->ctx_tbl_list_mutex); 166 ctxi = cfg->ctx_tbl[ctxid]; 167 if (ctxi) 168 if ((file && (ctxi->file != file)) || 169 (!file && (ctxi->ctxid != rctxid))) 170 ctxi = NULL; 171 172 if ((ctx_ctrl & CTX_CTRL_ERR) || 173 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK))) 174 ctxi = find_error_context(cfg, rctxid, file); 175 if (!ctxi) { 176 mutex_unlock(&cfg->ctx_tbl_list_mutex); 177 goto out; 178 } 179 180 /* 181 * Need to acquire ownership of the context while still 182 * under the table/list lock to serialize with a remove 183 * thread. Use the 'try' to avoid stalling the 184 * table/list lock for a single context. 185 * 186 * Note that the lock order is: 187 * 188 * cfg->ctx_tbl_list_mutex -> ctxi->mutex 189 * 190 * Therefore release ctx_tbl_list_mutex before retrying. 191 */ 192 rc = mutex_trylock(&ctxi->mutex); 193 mutex_unlock(&cfg->ctx_tbl_list_mutex); 194 if (rc) 195 break; /* got the context's lock! */ 196 } 197 198 if (ctxi->unavail) 199 goto denied; 200 201 ctxpid = ctxi->pid; 202 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID))) 203 if (pid != ctxpid) 204 goto denied; 205 206 if (lli) { 207 list_for_each_entry(lun_access, &ctxi->luns, list) 208 if (lun_access->lli == lli) 209 goto out; 210 goto denied; 211 } 212 } 213 214 out: 215 dev_dbg(dev, "%s: rctxid=%016llx ctxinfo=%p ctxpid=%u pid=%u " 216 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid, 217 ctx_ctrl); 218 219 return ctxi; 220 221 denied: 222 mutex_unlock(&ctxi->mutex); 223 ctxi = NULL; 224 goto out; 225 } 226 227 /** 228 * put_context() - release a context that was retrieved from get_context() 229 * @ctxi: Context to release. 230 * 231 * For now, releasing the context equates to unlocking it's mutex. 232 */ 233 void put_context(struct ctx_info *ctxi) 234 { 235 mutex_unlock(&ctxi->mutex); 236 } 237 238 /** 239 * afu_attach() - attach a context to the AFU 240 * @cfg: Internal structure associated with the host. 241 * @ctxi: Context to attach. 242 * 243 * Upon setting the context capabilities, they must be confirmed with 244 * a read back operation as the context might have been closed since 245 * the mailbox was unlocked. When this occurs, registration is failed. 246 * 247 * Return: 0 on success, -errno on failure 248 */ 249 static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) 250 { 251 struct device *dev = &cfg->dev->dev; 252 struct afu *afu = cfg->afu; 253 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map; 254 int rc = 0; 255 u64 val; 256 257 /* Unlock cap and restrict user to read/write cmds in translated mode */ 258 readq_be(&ctrl_map->mbox_r); 259 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD); 260 writeq_be(val, &ctrl_map->ctx_cap); 261 val = readq_be(&ctrl_map->ctx_cap); 262 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { 263 dev_err(dev, "%s: ctx may be closed val=%016llx\n", 264 __func__, val); 265 rc = -EAGAIN; 266 goto out; 267 } 268 269 /* Set up MMIO registers pointing to the RHT */ 270 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start); 271 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl)); 272 writeq_be(val, &ctrl_map->rht_cnt_id); 273 out: 274 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 275 return rc; 276 } 277 278 /** 279 * read_cap16() - issues a SCSI READ_CAP16 command 280 * @sdev: SCSI device associated with LUN. 281 * @lli: LUN destined for capacity request. 282 * 283 * The READ_CAP16 can take quite a while to complete. Should an EEH occur while 284 * in scsi_execute(), the EEH handler will attempt to recover. As part of the 285 * recovery, the handler drains all currently running ioctls, waiting until they 286 * have completed before proceeding with a reset. As this routine is used on the 287 * ioctl path, this can create a condition where the EEH handler becomes stuck, 288 * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily 289 * unmark this thread as an ioctl thread by releasing the ioctl read semaphore. 290 * This will allow the EEH handler to proceed with a recovery while this thread 291 * is still running. Once the scsi_execute() returns, reacquire the ioctl read 292 * semaphore and check the adapter state in case it changed while inside of 293 * scsi_execute(). The state check will wait if the adapter is still being 294 * recovered or return a failure if the recovery failed. In the event that the 295 * adapter reset failed, simply return the failure as the ioctl would be unable 296 * to continue. 297 * 298 * Note that the above puts a requirement on this routine to only be called on 299 * an ioctl thread. 300 * 301 * Return: 0 on success, -errno on failure 302 */ 303 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) 304 { 305 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 306 struct device *dev = &cfg->dev->dev; 307 struct glun_info *gli = lli->parent; 308 u8 *cmd_buf = NULL; 309 u8 *scsi_cmd = NULL; 310 u8 *sense_buf = NULL; 311 int rc = 0; 312 int result = 0; 313 int retry_cnt = 0; 314 u32 to = CMD_TIMEOUT * HZ; 315 316 retry: 317 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); 318 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); 319 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 320 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) { 321 rc = -ENOMEM; 322 goto out; 323 } 324 325 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */ 326 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */ 327 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]); 328 329 dev_dbg(dev, "%s: %ssending cmd(%02x)\n", __func__, 330 retry_cnt ? "re" : "", scsi_cmd[0]); 331 332 /* Drop the ioctl read semahpore across lengthy call */ 333 up_read(&cfg->ioctl_rwsem); 334 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf, 335 CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL); 336 down_read(&cfg->ioctl_rwsem); 337 rc = check_state(cfg); 338 if (rc) { 339 dev_err(dev, "%s: Failed state result=%08x\n", 340 __func__, result); 341 rc = -ENODEV; 342 goto out; 343 } 344 345 if (driver_byte(result) == DRIVER_SENSE) { 346 result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 347 if (result & SAM_STAT_CHECK_CONDITION) { 348 struct scsi_sense_hdr sshdr; 349 350 scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE, 351 &sshdr); 352 switch (sshdr.sense_key) { 353 case NO_SENSE: 354 case RECOVERED_ERROR: 355 /* fall through */ 356 case NOT_READY: 357 result &= ~SAM_STAT_CHECK_CONDITION; 358 break; 359 case UNIT_ATTENTION: 360 switch (sshdr.asc) { 361 case 0x29: /* Power on Reset or Device Reset */ 362 /* fall through */ 363 case 0x2A: /* Device capacity changed */ 364 case 0x3F: /* Report LUNs changed */ 365 /* Retry the command once more */ 366 if (retry_cnt++ < 1) { 367 kfree(cmd_buf); 368 kfree(scsi_cmd); 369 kfree(sense_buf); 370 goto retry; 371 } 372 } 373 break; 374 default: 375 break; 376 } 377 } 378 } 379 380 if (result) { 381 dev_err(dev, "%s: command failed, result=%08x\n", 382 __func__, result); 383 rc = -EIO; 384 goto out; 385 } 386 387 /* 388 * Read cap was successful, grab values from the buffer; 389 * note that we don't need to worry about unaligned access 390 * as the buffer is allocated on an aligned boundary. 391 */ 392 mutex_lock(&gli->mutex); 393 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0])); 394 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8])); 395 mutex_unlock(&gli->mutex); 396 397 out: 398 kfree(cmd_buf); 399 kfree(scsi_cmd); 400 kfree(sense_buf); 401 402 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n", 403 __func__, gli->max_lba, gli->blk_len, rc); 404 return rc; 405 } 406 407 /** 408 * get_rhte() - obtains validated resource handle table entry reference 409 * @ctxi: Context owning the resource handle. 410 * @rhndl: Resource handle associated with entry. 411 * @lli: LUN associated with request. 412 * 413 * Return: Validated RHTE on success, NULL on failure 414 */ 415 struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, 416 struct llun_info *lli) 417 { 418 struct cxlflash_cfg *cfg = ctxi->cfg; 419 struct device *dev = &cfg->dev->dev; 420 struct sisl_rht_entry *rhte = NULL; 421 422 if (unlikely(!ctxi->rht_start)) { 423 dev_dbg(dev, "%s: Context does not have allocated RHT\n", 424 __func__); 425 goto out; 426 } 427 428 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) { 429 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", 430 __func__, rhndl); 431 goto out; 432 } 433 434 if (unlikely(ctxi->rht_lun[rhndl] != lli)) { 435 dev_dbg(dev, "%s: Bad resource handle LUN rhndl=%d\n", 436 __func__, rhndl); 437 goto out; 438 } 439 440 rhte = &ctxi->rht_start[rhndl]; 441 if (unlikely(rhte->nmask == 0)) { 442 dev_dbg(dev, "%s: Unopened resource handle rhndl=%d\n", 443 __func__, rhndl); 444 rhte = NULL; 445 goto out; 446 } 447 448 out: 449 return rhte; 450 } 451 452 /** 453 * rhte_checkout() - obtains free/empty resource handle table entry 454 * @ctxi: Context owning the resource handle. 455 * @lli: LUN associated with request. 456 * 457 * Return: Free RHTE on success, NULL on failure 458 */ 459 struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, 460 struct llun_info *lli) 461 { 462 struct cxlflash_cfg *cfg = ctxi->cfg; 463 struct device *dev = &cfg->dev->dev; 464 struct sisl_rht_entry *rhte = NULL; 465 int i; 466 467 /* Find a free RHT entry */ 468 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) 469 if (ctxi->rht_start[i].nmask == 0) { 470 rhte = &ctxi->rht_start[i]; 471 ctxi->rht_out++; 472 break; 473 } 474 475 if (likely(rhte)) 476 ctxi->rht_lun[i] = lli; 477 478 dev_dbg(dev, "%s: returning rhte=%p index=%d\n", __func__, rhte, i); 479 return rhte; 480 } 481 482 /** 483 * rhte_checkin() - releases a resource handle table entry 484 * @ctxi: Context owning the resource handle. 485 * @rhte: RHTE to release. 486 */ 487 void rhte_checkin(struct ctx_info *ctxi, 488 struct sisl_rht_entry *rhte) 489 { 490 u32 rsrc_handle = rhte - ctxi->rht_start; 491 492 rhte->nmask = 0; 493 rhte->fp = 0; 494 ctxi->rht_out--; 495 ctxi->rht_lun[rsrc_handle] = NULL; 496 ctxi->rht_needs_ws[rsrc_handle] = false; 497 } 498 499 /** 500 * rhte_format1() - populates a RHTE for format 1 501 * @rhte: RHTE to populate. 502 * @lun_id: LUN ID of LUN associated with RHTE. 503 * @perm: Desired permissions for RHTE. 504 * @port_sel: Port selection mask 505 */ 506 static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm, 507 u32 port_sel) 508 { 509 /* 510 * Populate the Format 1 RHT entry for direct access (physical 511 * LUN) using the synchronization sequence defined in the 512 * SISLite specification. 513 */ 514 struct sisl_rht_entry_f1 dummy = { 0 }; 515 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; 516 517 memset(rhte_f1, 0, sizeof(*rhte_f1)); 518 rhte_f1->fp = SISL_RHT_FP(1U, 0); 519 dma_wmb(); /* Make setting of format bit visible */ 520 521 rhte_f1->lun_id = lun_id; 522 dma_wmb(); /* Make setting of LUN id visible */ 523 524 /* 525 * Use a dummy RHT Format 1 entry to build the second dword 526 * of the entry that must be populated in a single write when 527 * enabled (valid bit set to TRUE). 528 */ 529 dummy.valid = 0x80; 530 dummy.fp = SISL_RHT_FP(1U, perm); 531 dummy.port_sel = port_sel; 532 rhte_f1->dw = dummy.dw; 533 534 dma_wmb(); /* Make remaining RHT entry fields visible */ 535 } 536 537 /** 538 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode 539 * @gli: LUN to attach. 540 * @mode: Desired mode of the LUN. 541 * @locked: Mutex status on current thread. 542 * 543 * Return: 0 on success, -errno on failure 544 */ 545 int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked) 546 { 547 int rc = 0; 548 549 if (!locked) 550 mutex_lock(&gli->mutex); 551 552 if (gli->mode == MODE_NONE) 553 gli->mode = mode; 554 else if (gli->mode != mode) { 555 pr_debug("%s: gli_mode=%d requested_mode=%d\n", 556 __func__, gli->mode, mode); 557 rc = -EINVAL; 558 goto out; 559 } 560 561 gli->users++; 562 WARN_ON(gli->users <= 0); 563 out: 564 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n", 565 __func__, rc, gli->mode, gli->users); 566 if (!locked) 567 mutex_unlock(&gli->mutex); 568 return rc; 569 } 570 571 /** 572 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode 573 * @gli: LUN to detach. 574 * 575 * When resetting the mode, terminate block allocation resources as they 576 * are no longer required (service is safe to call even when block allocation 577 * resources were not present - such as when transitioning from physical mode). 578 * These resources will be reallocated when needed (subsequent transition to 579 * virtual mode). 580 */ 581 void cxlflash_lun_detach(struct glun_info *gli) 582 { 583 mutex_lock(&gli->mutex); 584 WARN_ON(gli->mode == MODE_NONE); 585 if (--gli->users == 0) { 586 gli->mode = MODE_NONE; 587 cxlflash_ba_terminate(&gli->blka.ba_lun); 588 } 589 pr_debug("%s: gli->users=%u\n", __func__, gli->users); 590 WARN_ON(gli->users < 0); 591 mutex_unlock(&gli->mutex); 592 } 593 594 /** 595 * _cxlflash_disk_release() - releases the specified resource entry 596 * @sdev: SCSI device associated with LUN. 597 * @ctxi: Context owning resources. 598 * @release: Release ioctl data structure. 599 * 600 * For LUNs in virtual mode, the virtual LUN associated with the specified 601 * resource handle is resized to 0 prior to releasing the RHTE. Note that the 602 * AFU sync should _not_ be performed when the context is sitting on the error 603 * recovery list. A context on the error recovery list is not known to the AFU 604 * due to reset. When the context is recovered, it will be reattached and made 605 * known again to the AFU. 606 * 607 * Return: 0 on success, -errno on failure 608 */ 609 int _cxlflash_disk_release(struct scsi_device *sdev, 610 struct ctx_info *ctxi, 611 struct dk_cxlflash_release *release) 612 { 613 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 614 struct device *dev = &cfg->dev->dev; 615 struct llun_info *lli = sdev->hostdata; 616 struct glun_info *gli = lli->parent; 617 struct afu *afu = cfg->afu; 618 bool put_ctx = false; 619 620 struct dk_cxlflash_resize size; 621 res_hndl_t rhndl = release->rsrc_handle; 622 623 int rc = 0; 624 u64 ctxid = DECODE_CTXID(release->context_id), 625 rctxid = release->context_id; 626 627 struct sisl_rht_entry *rhte; 628 struct sisl_rht_entry_f1 *rhte_f1; 629 630 dev_dbg(dev, "%s: ctxid=%llu rhndl=%llu gli->mode=%u gli->users=%u\n", 631 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users); 632 633 if (!ctxi) { 634 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 635 if (unlikely(!ctxi)) { 636 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", 637 __func__, ctxid); 638 rc = -EINVAL; 639 goto out; 640 } 641 642 put_ctx = true; 643 } 644 645 rhte = get_rhte(ctxi, rhndl, lli); 646 if (unlikely(!rhte)) { 647 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", 648 __func__, rhndl); 649 rc = -EINVAL; 650 goto out; 651 } 652 653 /* 654 * Resize to 0 for virtual LUNS by setting the size 655 * to 0. This will clear LXT_START and LXT_CNT fields 656 * in the RHT entry and properly sync with the AFU. 657 * 658 * Afterwards we clear the remaining fields. 659 */ 660 switch (gli->mode) { 661 case MODE_VIRTUAL: 662 marshal_rele_to_resize(release, &size); 663 size.req_size = 0; 664 rc = _cxlflash_vlun_resize(sdev, ctxi, &size); 665 if (rc) { 666 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc); 667 goto out; 668 } 669 670 break; 671 case MODE_PHYSICAL: 672 /* 673 * Clear the Format 1 RHT entry for direct access 674 * (physical LUN) using the synchronization sequence 675 * defined in the SISLite specification. 676 */ 677 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; 678 679 rhte_f1->valid = 0; 680 dma_wmb(); /* Make revocation of RHT entry visible */ 681 682 rhte_f1->lun_id = 0; 683 dma_wmb(); /* Make clearing of LUN id visible */ 684 685 rhte_f1->dw = 0; 686 dma_wmb(); /* Make RHT entry bottom-half clearing visible */ 687 688 if (!ctxi->err_recovery_active) 689 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); 690 break; 691 default: 692 WARN(1, "Unsupported LUN mode!"); 693 goto out; 694 } 695 696 rhte_checkin(ctxi, rhte); 697 cxlflash_lun_detach(gli); 698 699 out: 700 if (put_ctx) 701 put_context(ctxi); 702 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 703 return rc; 704 } 705 706 int cxlflash_disk_release(struct scsi_device *sdev, 707 struct dk_cxlflash_release *release) 708 { 709 return _cxlflash_disk_release(sdev, NULL, release); 710 } 711 712 /** 713 * destroy_context() - releases a context 714 * @cfg: Internal structure associated with the host. 715 * @ctxi: Context to release. 716 * 717 * This routine is safe to be called with a a non-initialized context. 718 * Also note that the routine conditionally checks for the existence 719 * of the context control map before clearing the RHT registers and 720 * context capabilities because it is possible to destroy a context 721 * while the context is in the error state (previous mapping was 722 * removed [so there is no need to worry about clearing] and context 723 * is waiting for a new mapping). 724 */ 725 static void destroy_context(struct cxlflash_cfg *cfg, 726 struct ctx_info *ctxi) 727 { 728 struct afu *afu = cfg->afu; 729 730 if (ctxi->initialized) { 731 WARN_ON(!list_empty(&ctxi->luns)); 732 733 /* Clear RHT registers and drop all capabilities for context */ 734 if (afu->afu_map && ctxi->ctrl_map) { 735 writeq_be(0, &ctxi->ctrl_map->rht_start); 736 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); 737 writeq_be(0, &ctxi->ctrl_map->ctx_cap); 738 } 739 } 740 741 /* Free memory associated with context */ 742 free_page((ulong)ctxi->rht_start); 743 kfree(ctxi->rht_needs_ws); 744 kfree(ctxi->rht_lun); 745 kfree(ctxi); 746 } 747 748 /** 749 * create_context() - allocates and initializes a context 750 * @cfg: Internal structure associated with the host. 751 * 752 * Return: Allocated context on success, NULL on failure 753 */ 754 static struct ctx_info *create_context(struct cxlflash_cfg *cfg) 755 { 756 struct device *dev = &cfg->dev->dev; 757 struct ctx_info *ctxi = NULL; 758 struct llun_info **lli = NULL; 759 u8 *ws = NULL; 760 struct sisl_rht_entry *rhte; 761 762 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL); 763 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL); 764 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL); 765 if (unlikely(!ctxi || !lli || !ws)) { 766 dev_err(dev, "%s: Unable to allocate context\n", __func__); 767 goto err; 768 } 769 770 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL); 771 if (unlikely(!rhte)) { 772 dev_err(dev, "%s: Unable to allocate RHT\n", __func__); 773 goto err; 774 } 775 776 ctxi->rht_lun = lli; 777 ctxi->rht_needs_ws = ws; 778 ctxi->rht_start = rhte; 779 out: 780 return ctxi; 781 782 err: 783 kfree(ws); 784 kfree(lli); 785 kfree(ctxi); 786 ctxi = NULL; 787 goto out; 788 } 789 790 /** 791 * init_context() - initializes a previously allocated context 792 * @ctxi: Previously allocated context 793 * @cfg: Internal structure associated with the host. 794 * @ctx: Previously obtained CXL context reference. 795 * @ctxid: Previously obtained process element associated with CXL context. 796 * @file: Previously obtained file associated with CXL context. 797 * @perms: User-specified permissions. 798 */ 799 static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg, 800 struct cxl_context *ctx, int ctxid, struct file *file, 801 u32 perms) 802 { 803 struct afu *afu = cfg->afu; 804 805 ctxi->rht_perms = perms; 806 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 807 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 808 ctxi->pid = current->tgid; /* tgid = pid */ 809 ctxi->ctx = ctx; 810 ctxi->cfg = cfg; 811 ctxi->file = file; 812 ctxi->initialized = true; 813 mutex_init(&ctxi->mutex); 814 kref_init(&ctxi->kref); 815 INIT_LIST_HEAD(&ctxi->luns); 816 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */ 817 } 818 819 /** 820 * remove_context() - context kref release handler 821 * @kref: Kernel reference associated with context to be removed. 822 * 823 * When a context no longer has any references it can safely be removed 824 * from global access and destroyed. Note that it is assumed the thread 825 * relinquishing access to the context holds its mutex. 826 */ 827 static void remove_context(struct kref *kref) 828 { 829 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref); 830 struct cxlflash_cfg *cfg = ctxi->cfg; 831 u64 ctxid = DECODE_CTXID(ctxi->ctxid); 832 833 /* Remove context from table/error list */ 834 WARN_ON(!mutex_is_locked(&ctxi->mutex)); 835 ctxi->unavail = true; 836 mutex_unlock(&ctxi->mutex); 837 mutex_lock(&cfg->ctx_tbl_list_mutex); 838 mutex_lock(&ctxi->mutex); 839 840 if (!list_empty(&ctxi->list)) 841 list_del(&ctxi->list); 842 cfg->ctx_tbl[ctxid] = NULL; 843 mutex_unlock(&cfg->ctx_tbl_list_mutex); 844 mutex_unlock(&ctxi->mutex); 845 846 /* Context now completely uncoupled/unreachable */ 847 destroy_context(cfg, ctxi); 848 } 849 850 /** 851 * _cxlflash_disk_detach() - detaches a LUN from a context 852 * @sdev: SCSI device associated with LUN. 853 * @ctxi: Context owning resources. 854 * @detach: Detach ioctl data structure. 855 * 856 * As part of the detach, all per-context resources associated with the LUN 857 * are cleaned up. When detaching the last LUN for a context, the context 858 * itself is cleaned up and released. 859 * 860 * Return: 0 on success, -errno on failure 861 */ 862 static int _cxlflash_disk_detach(struct scsi_device *sdev, 863 struct ctx_info *ctxi, 864 struct dk_cxlflash_detach *detach) 865 { 866 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 867 struct device *dev = &cfg->dev->dev; 868 struct llun_info *lli = sdev->hostdata; 869 struct lun_access *lun_access, *t; 870 struct dk_cxlflash_release rel; 871 bool put_ctx = false; 872 873 int i; 874 int rc = 0; 875 u64 ctxid = DECODE_CTXID(detach->context_id), 876 rctxid = detach->context_id; 877 878 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid); 879 880 if (!ctxi) { 881 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 882 if (unlikely(!ctxi)) { 883 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", 884 __func__, ctxid); 885 rc = -EINVAL; 886 goto out; 887 } 888 889 put_ctx = true; 890 } 891 892 /* Cleanup outstanding resources tied to this LUN */ 893 if (ctxi->rht_out) { 894 marshal_det_to_rele(detach, &rel); 895 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { 896 if (ctxi->rht_lun[i] == lli) { 897 rel.rsrc_handle = i; 898 _cxlflash_disk_release(sdev, ctxi, &rel); 899 } 900 901 /* No need to loop further if we're done */ 902 if (ctxi->rht_out == 0) 903 break; 904 } 905 } 906 907 /* Take our LUN out of context, free the node */ 908 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 909 if (lun_access->lli == lli) { 910 list_del(&lun_access->list); 911 kfree(lun_access); 912 lun_access = NULL; 913 break; 914 } 915 916 /* 917 * Release the context reference and the sdev reference that 918 * bound this LUN to the context. 919 */ 920 if (kref_put(&ctxi->kref, remove_context)) 921 put_ctx = false; 922 scsi_device_put(sdev); 923 out: 924 if (put_ctx) 925 put_context(ctxi); 926 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 927 return rc; 928 } 929 930 static int cxlflash_disk_detach(struct scsi_device *sdev, 931 struct dk_cxlflash_detach *detach) 932 { 933 return _cxlflash_disk_detach(sdev, NULL, detach); 934 } 935 936 /** 937 * cxlflash_cxl_release() - release handler for adapter file descriptor 938 * @inode: File-system inode associated with fd. 939 * @file: File installed with adapter file descriptor. 940 * 941 * This routine is the release handler for the fops registered with 942 * the CXL services on an initial attach for a context. It is called 943 * when a close (explicity by the user or as part of a process tear 944 * down) is performed on the adapter file descriptor returned to the 945 * user. The user should be aware that explicitly performing a close 946 * considered catastrophic and subsequent usage of the superpipe API 947 * with previously saved off tokens will fail. 948 * 949 * This routine derives the context reference and calls detach for 950 * each LUN associated with the context.The final detach operation 951 * causes the context itself to be freed. With exception to when the 952 * CXL process element (context id) lookup fails (a case that should 953 * theoretically never occur), every call into this routine results 954 * in a complete freeing of a context. 955 * 956 * Return: 0 on success 957 */ 958 static int cxlflash_cxl_release(struct inode *inode, struct file *file) 959 { 960 struct cxl_context *ctx = cxl_fops_get_context(file); 961 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 962 cxl_fops); 963 struct device *dev = &cfg->dev->dev; 964 struct ctx_info *ctxi = NULL; 965 struct dk_cxlflash_detach detach = { { 0 }, 0 }; 966 struct lun_access *lun_access, *t; 967 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 968 int ctxid; 969 970 ctxid = cxl_process_element(ctx); 971 if (unlikely(ctxid < 0)) { 972 dev_err(dev, "%s: Context %p was closed ctxid=%d\n", 973 __func__, ctx, ctxid); 974 goto out; 975 } 976 977 ctxi = get_context(cfg, ctxid, file, ctrl); 978 if (unlikely(!ctxi)) { 979 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE); 980 if (!ctxi) { 981 dev_dbg(dev, "%s: ctxid=%d already free\n", 982 __func__, ctxid); 983 goto out_release; 984 } 985 986 dev_dbg(dev, "%s: Another process owns ctxid=%d\n", 987 __func__, ctxid); 988 put_context(ctxi); 989 goto out; 990 } 991 992 dev_dbg(dev, "%s: close for ctxid=%d\n", __func__, ctxid); 993 994 detach.context_id = ctxi->ctxid; 995 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 996 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); 997 out_release: 998 cxl_fd_release(inode, file); 999 out: 1000 dev_dbg(dev, "%s: returning\n", __func__); 1001 return 0; 1002 } 1003 1004 /** 1005 * unmap_context() - clears a previously established mapping 1006 * @ctxi: Context owning the mapping. 1007 * 1008 * This routine is used to switch between the error notification page 1009 * (dummy page of all 1's) and the real mapping (established by the CXL 1010 * fault handler). 1011 */ 1012 static void unmap_context(struct ctx_info *ctxi) 1013 { 1014 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1); 1015 } 1016 1017 /** 1018 * get_err_page() - obtains and allocates the error notification page 1019 * @cfg: Internal structure associated with the host. 1020 * 1021 * Return: error notification page on success, NULL on failure 1022 */ 1023 static struct page *get_err_page(struct cxlflash_cfg *cfg) 1024 { 1025 struct page *err_page = global.err_page; 1026 struct device *dev = &cfg->dev->dev; 1027 1028 if (unlikely(!err_page)) { 1029 err_page = alloc_page(GFP_KERNEL); 1030 if (unlikely(!err_page)) { 1031 dev_err(dev, "%s: Unable to allocate err_page\n", 1032 __func__); 1033 goto out; 1034 } 1035 1036 memset(page_address(err_page), -1, PAGE_SIZE); 1037 1038 /* Serialize update w/ other threads to avoid a leak */ 1039 mutex_lock(&global.mutex); 1040 if (likely(!global.err_page)) 1041 global.err_page = err_page; 1042 else { 1043 __free_page(err_page); 1044 err_page = global.err_page; 1045 } 1046 mutex_unlock(&global.mutex); 1047 } 1048 1049 out: 1050 dev_dbg(dev, "%s: returning err_page=%p\n", __func__, err_page); 1051 return err_page; 1052 } 1053 1054 /** 1055 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor 1056 * @vmf: VM fault associated with current fault. 1057 * 1058 * To support error notification via MMIO, faults are 'caught' by this routine 1059 * that was inserted before passing back the adapter file descriptor on attach. 1060 * When a fault occurs, this routine evaluates if error recovery is active and 1061 * if so, installs the error page to 'notify' the user about the error state. 1062 * During normal operation, the fault is simply handled by the original fault 1063 * handler that was installed by CXL services as part of initializing the 1064 * adapter file descriptor. The VMA's page protection bits are toggled to 1065 * indicate cached/not-cached depending on the memory backing the fault. 1066 * 1067 * Return: 0 on success, VM_FAULT_SIGBUS on failure 1068 */ 1069 static int cxlflash_mmap_fault(struct vm_fault *vmf) 1070 { 1071 struct vm_area_struct *vma = vmf->vma; 1072 struct file *file = vma->vm_file; 1073 struct cxl_context *ctx = cxl_fops_get_context(file); 1074 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1075 cxl_fops); 1076 struct device *dev = &cfg->dev->dev; 1077 struct ctx_info *ctxi = NULL; 1078 struct page *err_page = NULL; 1079 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1080 int rc = 0; 1081 int ctxid; 1082 1083 ctxid = cxl_process_element(ctx); 1084 if (unlikely(ctxid < 0)) { 1085 dev_err(dev, "%s: Context %p was closed ctxid=%d\n", 1086 __func__, ctx, ctxid); 1087 goto err; 1088 } 1089 1090 ctxi = get_context(cfg, ctxid, file, ctrl); 1091 if (unlikely(!ctxi)) { 1092 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); 1093 goto err; 1094 } 1095 1096 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid); 1097 1098 if (likely(!ctxi->err_recovery_active)) { 1099 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1100 rc = ctxi->cxl_mmap_vmops->fault(vmf); 1101 } else { 1102 dev_dbg(dev, "%s: err recovery active, use err_page\n", 1103 __func__); 1104 1105 err_page = get_err_page(cfg); 1106 if (unlikely(!err_page)) { 1107 dev_err(dev, "%s: Could not get err_page\n", __func__); 1108 rc = VM_FAULT_RETRY; 1109 goto out; 1110 } 1111 1112 get_page(err_page); 1113 vmf->page = err_page; 1114 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); 1115 } 1116 1117 out: 1118 if (likely(ctxi)) 1119 put_context(ctxi); 1120 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1121 return rc; 1122 1123 err: 1124 rc = VM_FAULT_SIGBUS; 1125 goto out; 1126 } 1127 1128 /* 1129 * Local MMAP vmops to 'catch' faults 1130 */ 1131 static const struct vm_operations_struct cxlflash_mmap_vmops = { 1132 .fault = cxlflash_mmap_fault, 1133 }; 1134 1135 /** 1136 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor 1137 * @file: File installed with adapter file descriptor. 1138 * @vma: VM area associated with mapping. 1139 * 1140 * Installs local mmap vmops to 'catch' faults for error notification support. 1141 * 1142 * Return: 0 on success, -errno on failure 1143 */ 1144 static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) 1145 { 1146 struct cxl_context *ctx = cxl_fops_get_context(file); 1147 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1148 cxl_fops); 1149 struct device *dev = &cfg->dev->dev; 1150 struct ctx_info *ctxi = NULL; 1151 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1152 int ctxid; 1153 int rc = 0; 1154 1155 ctxid = cxl_process_element(ctx); 1156 if (unlikely(ctxid < 0)) { 1157 dev_err(dev, "%s: Context %p was closed ctxid=%d\n", 1158 __func__, ctx, ctxid); 1159 rc = -EIO; 1160 goto out; 1161 } 1162 1163 ctxi = get_context(cfg, ctxid, file, ctrl); 1164 if (unlikely(!ctxi)) { 1165 dev_dbg(dev, "%s: Bad context ctxid=%d\n", __func__, ctxid); 1166 rc = -EIO; 1167 goto out; 1168 } 1169 1170 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid); 1171 1172 rc = cxl_fd_mmap(file, vma); 1173 if (likely(!rc)) { 1174 /* Insert ourself in the mmap fault handler path */ 1175 ctxi->cxl_mmap_vmops = vma->vm_ops; 1176 vma->vm_ops = &cxlflash_mmap_vmops; 1177 } 1178 1179 out: 1180 if (likely(ctxi)) 1181 put_context(ctxi); 1182 return rc; 1183 } 1184 1185 const struct file_operations cxlflash_cxl_fops = { 1186 .owner = THIS_MODULE, 1187 .mmap = cxlflash_cxl_mmap, 1188 .release = cxlflash_cxl_release, 1189 }; 1190 1191 /** 1192 * cxlflash_mark_contexts_error() - move contexts to error state and list 1193 * @cfg: Internal structure associated with the host. 1194 * 1195 * A context is only moved over to the error list when there are no outstanding 1196 * references to it. This ensures that a running operation has completed. 1197 * 1198 * Return: 0 on success, -errno on failure 1199 */ 1200 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg) 1201 { 1202 int i, rc = 0; 1203 struct ctx_info *ctxi = NULL; 1204 1205 mutex_lock(&cfg->ctx_tbl_list_mutex); 1206 1207 for (i = 0; i < MAX_CONTEXT; i++) { 1208 ctxi = cfg->ctx_tbl[i]; 1209 if (ctxi) { 1210 mutex_lock(&ctxi->mutex); 1211 cfg->ctx_tbl[i] = NULL; 1212 list_add(&ctxi->list, &cfg->ctx_err_recovery); 1213 ctxi->err_recovery_active = true; 1214 ctxi->ctrl_map = NULL; 1215 unmap_context(ctxi); 1216 mutex_unlock(&ctxi->mutex); 1217 } 1218 } 1219 1220 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1221 return rc; 1222 } 1223 1224 /* 1225 * Dummy NULL fops 1226 */ 1227 static const struct file_operations null_fops = { 1228 .owner = THIS_MODULE, 1229 }; 1230 1231 /** 1232 * check_state() - checks and responds to the current adapter state 1233 * @cfg: Internal structure associated with the host. 1234 * 1235 * This routine can block and should only be used on process context. 1236 * It assumes that the caller is an ioctl thread and holding the ioctl 1237 * read semaphore. This is temporarily let up across the wait to allow 1238 * for draining actively running ioctls. Also note that when waking up 1239 * from waiting in reset, the state is unknown and must be checked again 1240 * before proceeding. 1241 * 1242 * Return: 0 on success, -errno on failure 1243 */ 1244 int check_state(struct cxlflash_cfg *cfg) 1245 { 1246 struct device *dev = &cfg->dev->dev; 1247 int rc = 0; 1248 1249 retry: 1250 switch (cfg->state) { 1251 case STATE_RESET: 1252 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__); 1253 up_read(&cfg->ioctl_rwsem); 1254 rc = wait_event_interruptible(cfg->reset_waitq, 1255 cfg->state != STATE_RESET); 1256 down_read(&cfg->ioctl_rwsem); 1257 if (unlikely(rc)) 1258 break; 1259 goto retry; 1260 case STATE_FAILTERM: 1261 dev_dbg(dev, "%s: Failed/Terminating\n", __func__); 1262 rc = -ENODEV; 1263 break; 1264 default: 1265 break; 1266 } 1267 1268 return rc; 1269 } 1270 1271 /** 1272 * cxlflash_disk_attach() - attach a LUN to a context 1273 * @sdev: SCSI device associated with LUN. 1274 * @attach: Attach ioctl data structure. 1275 * 1276 * Creates a context and attaches LUN to it. A LUN can only be attached 1277 * one time to a context (subsequent attaches for the same context/LUN pair 1278 * are not supported). Additional LUNs can be attached to a context by 1279 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header. 1280 * 1281 * Return: 0 on success, -errno on failure 1282 */ 1283 static int cxlflash_disk_attach(struct scsi_device *sdev, 1284 struct dk_cxlflash_attach *attach) 1285 { 1286 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1287 struct device *dev = &cfg->dev->dev; 1288 struct afu *afu = cfg->afu; 1289 struct llun_info *lli = sdev->hostdata; 1290 struct glun_info *gli = lli->parent; 1291 struct cxl_ioctl_start_work *work; 1292 struct ctx_info *ctxi = NULL; 1293 struct lun_access *lun_access = NULL; 1294 int rc = 0; 1295 u32 perms; 1296 int ctxid = -1; 1297 u64 flags = 0UL; 1298 u64 rctxid = 0UL; 1299 struct file *file = NULL; 1300 1301 struct cxl_context *ctx = NULL; 1302 1303 int fd = -1; 1304 1305 if (attach->num_interrupts > 4) { 1306 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n", 1307 __func__, attach->num_interrupts); 1308 rc = -EINVAL; 1309 goto out; 1310 } 1311 1312 if (gli->max_lba == 0) { 1313 dev_dbg(dev, "%s: No capacity info for LUN=%016llx\n", 1314 __func__, lli->lun_id[sdev->channel]); 1315 rc = read_cap16(sdev, lli); 1316 if (rc) { 1317 dev_err(dev, "%s: Invalid device rc=%d\n", 1318 __func__, rc); 1319 rc = -ENODEV; 1320 goto out; 1321 } 1322 dev_dbg(dev, "%s: LBA = %016llx\n", __func__, gli->max_lba); 1323 dev_dbg(dev, "%s: BLK_LEN = %08x\n", __func__, gli->blk_len); 1324 } 1325 1326 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) { 1327 rctxid = attach->context_id; 1328 ctxi = get_context(cfg, rctxid, NULL, 0); 1329 if (!ctxi) { 1330 dev_dbg(dev, "%s: Bad context rctxid=%016llx\n", 1331 __func__, rctxid); 1332 rc = -EINVAL; 1333 goto out; 1334 } 1335 1336 list_for_each_entry(lun_access, &ctxi->luns, list) 1337 if (lun_access->lli == lli) { 1338 dev_dbg(dev, "%s: Already attached\n", 1339 __func__); 1340 rc = -EINVAL; 1341 goto out; 1342 } 1343 } 1344 1345 rc = scsi_device_get(sdev); 1346 if (unlikely(rc)) { 1347 dev_err(dev, "%s: Unable to get sdev reference\n", __func__); 1348 goto out; 1349 } 1350 1351 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL); 1352 if (unlikely(!lun_access)) { 1353 dev_err(dev, "%s: Unable to allocate lun_access\n", __func__); 1354 rc = -ENOMEM; 1355 goto err; 1356 } 1357 1358 lun_access->lli = lli; 1359 lun_access->sdev = sdev; 1360 1361 /* Non-NULL context indicates reuse (another context reference) */ 1362 if (ctxi) { 1363 dev_dbg(dev, "%s: Reusing context for LUN rctxid=%016llx\n", 1364 __func__, rctxid); 1365 kref_get(&ctxi->kref); 1366 list_add(&lun_access->list, &ctxi->luns); 1367 goto out_attach; 1368 } 1369 1370 ctxi = create_context(cfg); 1371 if (unlikely(!ctxi)) { 1372 dev_err(dev, "%s: Failed to create context ctxid=%d\n", 1373 __func__, ctxid); 1374 goto err; 1375 } 1376 1377 ctx = cxl_dev_context_init(cfg->dev); 1378 if (IS_ERR_OR_NULL(ctx)) { 1379 dev_err(dev, "%s: Could not initialize context %p\n", 1380 __func__, ctx); 1381 rc = -ENODEV; 1382 goto err; 1383 } 1384 1385 work = &ctxi->work; 1386 work->num_interrupts = attach->num_interrupts; 1387 work->flags = CXL_START_WORK_NUM_IRQS; 1388 1389 rc = cxl_start_work(ctx, work); 1390 if (unlikely(rc)) { 1391 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1392 __func__, rc); 1393 goto err; 1394 } 1395 1396 ctxid = cxl_process_element(ctx); 1397 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1398 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); 1399 rc = -EPERM; 1400 goto err; 1401 } 1402 1403 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); 1404 if (unlikely(fd < 0)) { 1405 rc = -ENODEV; 1406 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1407 goto err; 1408 } 1409 1410 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */ 1411 perms = SISL_RHT_PERM(attach->hdr.flags + 1); 1412 1413 /* Context mutex is locked upon return */ 1414 init_context(ctxi, cfg, ctx, ctxid, file, perms); 1415 1416 rc = afu_attach(cfg, ctxi); 1417 if (unlikely(rc)) { 1418 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); 1419 goto err; 1420 } 1421 1422 /* 1423 * No error paths after this point. Once the fd is installed it's 1424 * visible to user space and can't be undone safely on this thread. 1425 * There is no need to worry about a deadlock here because no one 1426 * knows about us yet; we can be the only one holding our mutex. 1427 */ 1428 list_add(&lun_access->list, &ctxi->luns); 1429 mutex_lock(&cfg->ctx_tbl_list_mutex); 1430 mutex_lock(&ctxi->mutex); 1431 cfg->ctx_tbl[ctxid] = ctxi; 1432 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1433 fd_install(fd, file); 1434 1435 out_attach: 1436 if (fd != -1) 1437 flags |= DK_CXLFLASH_APP_CLOSE_ADAP_FD; 1438 if (afu_is_sq_cmd_mode(afu)) 1439 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; 1440 1441 attach->hdr.return_flags = flags; 1442 attach->context_id = ctxi->ctxid; 1443 attach->block_size = gli->blk_len; 1444 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1445 attach->last_lba = gli->max_lba; 1446 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT; 1447 attach->max_xfer /= gli->blk_len; 1448 1449 out: 1450 attach->adap_fd = fd; 1451 1452 if (ctxi) 1453 put_context(ctxi); 1454 1455 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n", 1456 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba); 1457 return rc; 1458 1459 err: 1460 /* Cleanup CXL context; okay to 'stop' even if it was not started */ 1461 if (!IS_ERR_OR_NULL(ctx)) { 1462 cxl_stop_context(ctx); 1463 cxl_release_context(ctx); 1464 ctx = NULL; 1465 } 1466 1467 /* 1468 * Here, we're overriding the fops with a dummy all-NULL fops because 1469 * fput() calls the release fop, which will cause us to mistakenly 1470 * call into the CXL code. Rather than try to add yet more complexity 1471 * to that routine (cxlflash_cxl_release) we should try to fix the 1472 * issue here. 1473 */ 1474 if (fd > 0) { 1475 file->f_op = &null_fops; 1476 fput(file); 1477 put_unused_fd(fd); 1478 fd = -1; 1479 file = NULL; 1480 } 1481 1482 /* Cleanup our context */ 1483 if (ctxi) { 1484 destroy_context(cfg, ctxi); 1485 ctxi = NULL; 1486 } 1487 1488 kfree(lun_access); 1489 scsi_device_put(sdev); 1490 goto out; 1491 } 1492 1493 /** 1494 * recover_context() - recovers a context in error 1495 * @cfg: Internal structure associated with the host. 1496 * @ctxi: Context to release. 1497 * @adap_fd: Adapter file descriptor associated with new/recovered context. 1498 * 1499 * Restablishes the state for a context-in-error. 1500 * 1501 * Return: 0 on success, -errno on failure 1502 */ 1503 static int recover_context(struct cxlflash_cfg *cfg, 1504 struct ctx_info *ctxi, 1505 int *adap_fd) 1506 { 1507 struct device *dev = &cfg->dev->dev; 1508 int rc = 0; 1509 int fd = -1; 1510 int ctxid = -1; 1511 struct file *file; 1512 struct cxl_context *ctx; 1513 struct afu *afu = cfg->afu; 1514 1515 ctx = cxl_dev_context_init(cfg->dev); 1516 if (IS_ERR_OR_NULL(ctx)) { 1517 dev_err(dev, "%s: Could not initialize context %p\n", 1518 __func__, ctx); 1519 rc = -ENODEV; 1520 goto out; 1521 } 1522 1523 rc = cxl_start_work(ctx, &ctxi->work); 1524 if (unlikely(rc)) { 1525 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1526 __func__, rc); 1527 goto err1; 1528 } 1529 1530 ctxid = cxl_process_element(ctx); 1531 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1532 dev_err(dev, "%s: ctxid=%d invalid\n", __func__, ctxid); 1533 rc = -EPERM; 1534 goto err2; 1535 } 1536 1537 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); 1538 if (unlikely(fd < 0)) { 1539 rc = -ENODEV; 1540 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1541 goto err2; 1542 } 1543 1544 /* Update with new MMIO area based on updated context id */ 1545 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 1546 1547 rc = afu_attach(cfg, ctxi); 1548 if (rc) { 1549 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); 1550 goto err3; 1551 } 1552 1553 /* 1554 * No error paths after this point. Once the fd is installed it's 1555 * visible to user space and can't be undone safely on this thread. 1556 */ 1557 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 1558 ctxi->ctx = ctx; 1559 ctxi->file = file; 1560 1561 /* 1562 * Put context back in table (note the reinit of the context list); 1563 * we must first drop the context's mutex and then acquire it in 1564 * order with the table/list mutex to avoid a deadlock - safe to do 1565 * here because no one can find us at this moment in time. 1566 */ 1567 mutex_unlock(&ctxi->mutex); 1568 mutex_lock(&cfg->ctx_tbl_list_mutex); 1569 mutex_lock(&ctxi->mutex); 1570 list_del_init(&ctxi->list); 1571 cfg->ctx_tbl[ctxid] = ctxi; 1572 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1573 fd_install(fd, file); 1574 *adap_fd = fd; 1575 out: 1576 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n", 1577 __func__, ctxid, fd, rc); 1578 return rc; 1579 1580 err3: 1581 fput(file); 1582 put_unused_fd(fd); 1583 err2: 1584 cxl_stop_context(ctx); 1585 err1: 1586 cxl_release_context(ctx); 1587 goto out; 1588 } 1589 1590 /** 1591 * cxlflash_afu_recover() - initiates AFU recovery 1592 * @sdev: SCSI device associated with LUN. 1593 * @recover: Recover ioctl data structure. 1594 * 1595 * Only a single recovery is allowed at a time to avoid exhausting CXL 1596 * resources (leading to recovery failure) in the event that we're up 1597 * against the maximum number of contexts limit. For similar reasons, 1598 * a context recovery is retried if there are multiple recoveries taking 1599 * place at the same time and the failure was due to CXL services being 1600 * unable to keep up. 1601 * 1602 * As this routine is called on ioctl context, it holds the ioctl r/w 1603 * semaphore that is used to drain ioctls in recovery scenarios. The 1604 * implementation to achieve the pacing described above (a local mutex) 1605 * requires that the ioctl r/w semaphore be dropped and reacquired to 1606 * avoid a 3-way deadlock when multiple process recoveries operate in 1607 * parallel. 1608 * 1609 * Because a user can detect an error condition before the kernel, it is 1610 * quite possible for this routine to act as the kernel's EEH detection 1611 * source (MMIO read of mbox_r). Because of this, there is a window of 1612 * time where an EEH might have been detected but not yet 'serviced' 1613 * (callback invoked, causing the device to enter reset state). To avoid 1614 * looping in this routine during that window, a 1 second sleep is in place 1615 * between the time the MMIO failure is detected and the time a wait on the 1616 * reset wait queue is attempted via check_state(). 1617 * 1618 * Return: 0 on success, -errno on failure 1619 */ 1620 static int cxlflash_afu_recover(struct scsi_device *sdev, 1621 struct dk_cxlflash_recover_afu *recover) 1622 { 1623 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1624 struct device *dev = &cfg->dev->dev; 1625 struct llun_info *lli = sdev->hostdata; 1626 struct afu *afu = cfg->afu; 1627 struct ctx_info *ctxi = NULL; 1628 struct mutex *mutex = &cfg->ctx_recovery_mutex; 1629 u64 flags; 1630 u64 ctxid = DECODE_CTXID(recover->context_id), 1631 rctxid = recover->context_id; 1632 long reg; 1633 int lretry = 20; /* up to 2 seconds */ 1634 int new_adap_fd = -1; 1635 int rc = 0; 1636 1637 atomic_inc(&cfg->recovery_threads); 1638 up_read(&cfg->ioctl_rwsem); 1639 rc = mutex_lock_interruptible(mutex); 1640 down_read(&cfg->ioctl_rwsem); 1641 if (rc) 1642 goto out; 1643 rc = check_state(cfg); 1644 if (rc) { 1645 dev_err(dev, "%s: Failed state rc=%d\n", __func__, rc); 1646 rc = -ENODEV; 1647 goto out; 1648 } 1649 1650 dev_dbg(dev, "%s: reason=%016llx rctxid=%016llx\n", 1651 __func__, recover->reason, rctxid); 1652 1653 retry: 1654 /* Ensure that this process is attached to the context */ 1655 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 1656 if (unlikely(!ctxi)) { 1657 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); 1658 rc = -EINVAL; 1659 goto out; 1660 } 1661 1662 if (ctxi->err_recovery_active) { 1663 retry_recover: 1664 rc = recover_context(cfg, ctxi, &new_adap_fd); 1665 if (unlikely(rc)) { 1666 dev_err(dev, "%s: Recovery failed ctxid=%llu rc=%d\n", 1667 __func__, ctxid, rc); 1668 if ((rc == -ENODEV) && 1669 ((atomic_read(&cfg->recovery_threads) > 1) || 1670 (lretry--))) { 1671 dev_dbg(dev, "%s: Going to try again\n", 1672 __func__); 1673 mutex_unlock(mutex); 1674 msleep(100); 1675 rc = mutex_lock_interruptible(mutex); 1676 if (rc) 1677 goto out; 1678 goto retry_recover; 1679 } 1680 1681 goto out; 1682 } 1683 1684 ctxi->err_recovery_active = false; 1685 1686 flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD | 1687 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; 1688 if (afu_is_sq_cmd_mode(afu)) 1689 flags |= DK_CXLFLASH_CONTEXT_SQ_CMD_MODE; 1690 1691 recover->hdr.return_flags = flags; 1692 recover->context_id = ctxi->ctxid; 1693 recover->adap_fd = new_adap_fd; 1694 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1695 goto out; 1696 } 1697 1698 /* Test if in error state */ 1699 reg = readq_be(&afu->ctrl_map->mbox_r); 1700 if (reg == -1) { 1701 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__); 1702 1703 /* 1704 * Before checking the state, put back the context obtained with 1705 * get_context() as it is no longer needed and sleep for a short 1706 * period of time (see prolog notes). 1707 */ 1708 put_context(ctxi); 1709 ctxi = NULL; 1710 ssleep(1); 1711 rc = check_state(cfg); 1712 if (unlikely(rc)) 1713 goto out; 1714 goto retry; 1715 } 1716 1717 dev_dbg(dev, "%s: MMIO working, no recovery required\n", __func__); 1718 out: 1719 if (likely(ctxi)) 1720 put_context(ctxi); 1721 mutex_unlock(mutex); 1722 atomic_dec_if_positive(&cfg->recovery_threads); 1723 return rc; 1724 } 1725 1726 /** 1727 * process_sense() - evaluates and processes sense data 1728 * @sdev: SCSI device associated with LUN. 1729 * @verify: Verify ioctl data structure. 1730 * 1731 * Return: 0 on success, -errno on failure 1732 */ 1733 static int process_sense(struct scsi_device *sdev, 1734 struct dk_cxlflash_verify *verify) 1735 { 1736 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1737 struct device *dev = &cfg->dev->dev; 1738 struct llun_info *lli = sdev->hostdata; 1739 struct glun_info *gli = lli->parent; 1740 u64 prev_lba = gli->max_lba; 1741 struct scsi_sense_hdr sshdr = { 0 }; 1742 int rc = 0; 1743 1744 rc = scsi_normalize_sense((const u8 *)&verify->sense_data, 1745 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr); 1746 if (!rc) { 1747 dev_err(dev, "%s: Failed to normalize sense data\n", __func__); 1748 rc = -EINVAL; 1749 goto out; 1750 } 1751 1752 switch (sshdr.sense_key) { 1753 case NO_SENSE: 1754 case RECOVERED_ERROR: 1755 /* fall through */ 1756 case NOT_READY: 1757 break; 1758 case UNIT_ATTENTION: 1759 switch (sshdr.asc) { 1760 case 0x29: /* Power on Reset or Device Reset */ 1761 /* fall through */ 1762 case 0x2A: /* Device settings/capacity changed */ 1763 rc = read_cap16(sdev, lli); 1764 if (rc) { 1765 rc = -ENODEV; 1766 break; 1767 } 1768 if (prev_lba != gli->max_lba) 1769 dev_dbg(dev, "%s: Capacity changed old=%lld " 1770 "new=%lld\n", __func__, prev_lba, 1771 gli->max_lba); 1772 break; 1773 case 0x3F: /* Report LUNs changed, Rescan. */ 1774 scsi_scan_host(cfg->host); 1775 break; 1776 default: 1777 rc = -EIO; 1778 break; 1779 } 1780 break; 1781 default: 1782 rc = -EIO; 1783 break; 1784 } 1785 out: 1786 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__, 1787 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc); 1788 return rc; 1789 } 1790 1791 /** 1792 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes 1793 * @sdev: SCSI device associated with LUN. 1794 * @verify: Verify ioctl data structure. 1795 * 1796 * Return: 0 on success, -errno on failure 1797 */ 1798 static int cxlflash_disk_verify(struct scsi_device *sdev, 1799 struct dk_cxlflash_verify *verify) 1800 { 1801 int rc = 0; 1802 struct ctx_info *ctxi = NULL; 1803 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1804 struct device *dev = &cfg->dev->dev; 1805 struct llun_info *lli = sdev->hostdata; 1806 struct glun_info *gli = lli->parent; 1807 struct sisl_rht_entry *rhte = NULL; 1808 res_hndl_t rhndl = verify->rsrc_handle; 1809 u64 ctxid = DECODE_CTXID(verify->context_id), 1810 rctxid = verify->context_id; 1811 u64 last_lba = 0; 1812 1813 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llx, hint=%016llx, " 1814 "flags=%016llx\n", __func__, ctxid, verify->rsrc_handle, 1815 verify->hint, verify->hdr.flags); 1816 1817 ctxi = get_context(cfg, rctxid, lli, 0); 1818 if (unlikely(!ctxi)) { 1819 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); 1820 rc = -EINVAL; 1821 goto out; 1822 } 1823 1824 rhte = get_rhte(ctxi, rhndl, lli); 1825 if (unlikely(!rhte)) { 1826 dev_dbg(dev, "%s: Bad resource handle rhndl=%d\n", 1827 __func__, rhndl); 1828 rc = -EINVAL; 1829 goto out; 1830 } 1831 1832 /* 1833 * Look at the hint/sense to see if it requires us to redrive 1834 * inquiry (i.e. the Unit attention is due to the WWN changing). 1835 */ 1836 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) { 1837 /* Can't hold mutex across process_sense/read_cap16, 1838 * since we could have an intervening EEH event. 1839 */ 1840 ctxi->unavail = true; 1841 mutex_unlock(&ctxi->mutex); 1842 rc = process_sense(sdev, verify); 1843 if (unlikely(rc)) { 1844 dev_err(dev, "%s: Failed to validate sense data (%d)\n", 1845 __func__, rc); 1846 mutex_lock(&ctxi->mutex); 1847 ctxi->unavail = false; 1848 goto out; 1849 } 1850 mutex_lock(&ctxi->mutex); 1851 ctxi->unavail = false; 1852 } 1853 1854 switch (gli->mode) { 1855 case MODE_PHYSICAL: 1856 last_lba = gli->max_lba; 1857 break; 1858 case MODE_VIRTUAL: 1859 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */ 1860 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len); 1861 last_lba /= CXLFLASH_BLOCK_SIZE; 1862 last_lba--; 1863 break; 1864 default: 1865 WARN(1, "Unsupported LUN mode!"); 1866 } 1867 1868 verify->last_lba = last_lba; 1869 1870 out: 1871 if (likely(ctxi)) 1872 put_context(ctxi); 1873 dev_dbg(dev, "%s: returning rc=%d llba=%llx\n", 1874 __func__, rc, verify->last_lba); 1875 return rc; 1876 } 1877 1878 /** 1879 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string 1880 * @cmd: The ioctl command to decode. 1881 * 1882 * Return: A string identifying the decoded ioctl. 1883 */ 1884 static char *decode_ioctl(int cmd) 1885 { 1886 switch (cmd) { 1887 case DK_CXLFLASH_ATTACH: 1888 return __stringify_1(DK_CXLFLASH_ATTACH); 1889 case DK_CXLFLASH_USER_DIRECT: 1890 return __stringify_1(DK_CXLFLASH_USER_DIRECT); 1891 case DK_CXLFLASH_USER_VIRTUAL: 1892 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL); 1893 case DK_CXLFLASH_VLUN_RESIZE: 1894 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE); 1895 case DK_CXLFLASH_RELEASE: 1896 return __stringify_1(DK_CXLFLASH_RELEASE); 1897 case DK_CXLFLASH_DETACH: 1898 return __stringify_1(DK_CXLFLASH_DETACH); 1899 case DK_CXLFLASH_VERIFY: 1900 return __stringify_1(DK_CXLFLASH_VERIFY); 1901 case DK_CXLFLASH_VLUN_CLONE: 1902 return __stringify_1(DK_CXLFLASH_VLUN_CLONE); 1903 case DK_CXLFLASH_RECOVER_AFU: 1904 return __stringify_1(DK_CXLFLASH_RECOVER_AFU); 1905 case DK_CXLFLASH_MANAGE_LUN: 1906 return __stringify_1(DK_CXLFLASH_MANAGE_LUN); 1907 } 1908 1909 return "UNKNOWN"; 1910 } 1911 1912 /** 1913 * cxlflash_disk_direct_open() - opens a direct (physical) disk 1914 * @sdev: SCSI device associated with LUN. 1915 * @arg: UDirect ioctl data structure. 1916 * 1917 * On successful return, the user is informed of the resource handle 1918 * to be used to identify the direct lun and the size (in blocks) of 1919 * the direct lun in last LBA format. 1920 * 1921 * Return: 0 on success, -errno on failure 1922 */ 1923 static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) 1924 { 1925 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 1926 struct device *dev = &cfg->dev->dev; 1927 struct afu *afu = cfg->afu; 1928 struct llun_info *lli = sdev->hostdata; 1929 struct glun_info *gli = lli->parent; 1930 1931 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg; 1932 1933 u64 ctxid = DECODE_CTXID(pphys->context_id), 1934 rctxid = pphys->context_id; 1935 u64 lun_size = 0; 1936 u64 last_lba = 0; 1937 u64 rsrc_handle = -1; 1938 u32 port = CHAN2PORT(sdev->channel); 1939 1940 int rc = 0; 1941 1942 struct ctx_info *ctxi = NULL; 1943 struct sisl_rht_entry *rhte = NULL; 1944 1945 dev_dbg(dev, "%s: ctxid=%llu ls=%llu\n", __func__, ctxid, lun_size); 1946 1947 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false); 1948 if (unlikely(rc)) { 1949 dev_dbg(dev, "%s: Failed attach to LUN (PHYSICAL)\n", __func__); 1950 goto out; 1951 } 1952 1953 ctxi = get_context(cfg, rctxid, lli, 0); 1954 if (unlikely(!ctxi)) { 1955 dev_dbg(dev, "%s: Bad context ctxid=%llu\n", __func__, ctxid); 1956 rc = -EINVAL; 1957 goto err1; 1958 } 1959 1960 rhte = rhte_checkout(ctxi, lli); 1961 if (unlikely(!rhte)) { 1962 dev_dbg(dev, "%s: Too many opens ctxid=%lld\n", 1963 __func__, ctxid); 1964 rc = -EMFILE; /* too many opens */ 1965 goto err1; 1966 } 1967 1968 rsrc_handle = (rhte - ctxi->rht_start); 1969 1970 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port); 1971 cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC); 1972 1973 last_lba = gli->max_lba; 1974 pphys->hdr.return_flags = 0; 1975 pphys->last_lba = last_lba; 1976 pphys->rsrc_handle = rsrc_handle; 1977 1978 out: 1979 if (likely(ctxi)) 1980 put_context(ctxi); 1981 dev_dbg(dev, "%s: returning handle=%llu rc=%d llba=%llu\n", 1982 __func__, rsrc_handle, rc, last_lba); 1983 return rc; 1984 1985 err1: 1986 cxlflash_lun_detach(gli); 1987 goto out; 1988 } 1989 1990 /** 1991 * ioctl_common() - common IOCTL handler for driver 1992 * @sdev: SCSI device associated with LUN. 1993 * @cmd: IOCTL command. 1994 * 1995 * Handles common fencing operations that are valid for multiple ioctls. Always 1996 * allow through ioctls that are cleanup oriented in nature, even when operating 1997 * in a failed/terminating state. 1998 * 1999 * Return: 0 on success, -errno on failure 2000 */ 2001 static int ioctl_common(struct scsi_device *sdev, int cmd) 2002 { 2003 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 2004 struct device *dev = &cfg->dev->dev; 2005 struct llun_info *lli = sdev->hostdata; 2006 int rc = 0; 2007 2008 if (unlikely(!lli)) { 2009 dev_dbg(dev, "%s: Unknown LUN\n", __func__); 2010 rc = -EINVAL; 2011 goto out; 2012 } 2013 2014 rc = check_state(cfg); 2015 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) { 2016 switch (cmd) { 2017 case DK_CXLFLASH_VLUN_RESIZE: 2018 case DK_CXLFLASH_RELEASE: 2019 case DK_CXLFLASH_DETACH: 2020 dev_dbg(dev, "%s: Command override rc=%d\n", 2021 __func__, rc); 2022 rc = 0; 2023 break; 2024 } 2025 } 2026 out: 2027 return rc; 2028 } 2029 2030 /** 2031 * cxlflash_ioctl() - IOCTL handler for driver 2032 * @sdev: SCSI device associated with LUN. 2033 * @cmd: IOCTL command. 2034 * @arg: Userspace ioctl data structure. 2035 * 2036 * A read/write semaphore is used to implement a 'drain' of currently 2037 * running ioctls. The read semaphore is taken at the beginning of each 2038 * ioctl thread and released upon concluding execution. Additionally the 2039 * semaphore should be released and then reacquired in any ioctl execution 2040 * path which will wait for an event to occur that is outside the scope of 2041 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, 2042 * a thread simply needs to acquire the write semaphore. 2043 * 2044 * Return: 0 on success, -errno on failure 2045 */ 2046 int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 2047 { 2048 typedef int (*sioctl) (struct scsi_device *, void *); 2049 2050 struct cxlflash_cfg *cfg = shost_priv(sdev->host); 2051 struct device *dev = &cfg->dev->dev; 2052 struct afu *afu = cfg->afu; 2053 struct dk_cxlflash_hdr *hdr; 2054 char buf[sizeof(union cxlflash_ioctls)]; 2055 size_t size = 0; 2056 bool known_ioctl = false; 2057 int idx; 2058 int rc = 0; 2059 struct Scsi_Host *shost = sdev->host; 2060 sioctl do_ioctl = NULL; 2061 2062 static const struct { 2063 size_t size; 2064 sioctl ioctl; 2065 } ioctl_tbl[] = { /* NOTE: order matters here */ 2066 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach}, 2067 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open}, 2068 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release}, 2069 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach}, 2070 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify}, 2071 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover}, 2072 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun}, 2073 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open}, 2074 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize}, 2075 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone}, 2076 }; 2077 2078 /* Hold read semaphore so we can drain if needed */ 2079 down_read(&cfg->ioctl_rwsem); 2080 2081 /* Restrict command set to physical support only for internal LUN */ 2082 if (afu->internal_lun) 2083 switch (cmd) { 2084 case DK_CXLFLASH_RELEASE: 2085 case DK_CXLFLASH_USER_VIRTUAL: 2086 case DK_CXLFLASH_VLUN_RESIZE: 2087 case DK_CXLFLASH_VLUN_CLONE: 2088 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n", 2089 __func__, decode_ioctl(cmd), afu->internal_lun); 2090 rc = -EINVAL; 2091 goto cxlflash_ioctl_exit; 2092 } 2093 2094 switch (cmd) { 2095 case DK_CXLFLASH_ATTACH: 2096 case DK_CXLFLASH_USER_DIRECT: 2097 case DK_CXLFLASH_RELEASE: 2098 case DK_CXLFLASH_DETACH: 2099 case DK_CXLFLASH_VERIFY: 2100 case DK_CXLFLASH_RECOVER_AFU: 2101 case DK_CXLFLASH_USER_VIRTUAL: 2102 case DK_CXLFLASH_VLUN_RESIZE: 2103 case DK_CXLFLASH_VLUN_CLONE: 2104 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n", 2105 __func__, decode_ioctl(cmd), cmd, shost->host_no, 2106 sdev->channel, sdev->id, sdev->lun); 2107 rc = ioctl_common(sdev, cmd); 2108 if (unlikely(rc)) 2109 goto cxlflash_ioctl_exit; 2110 2111 /* fall through */ 2112 2113 case DK_CXLFLASH_MANAGE_LUN: 2114 known_ioctl = true; 2115 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH); 2116 size = ioctl_tbl[idx].size; 2117 do_ioctl = ioctl_tbl[idx].ioctl; 2118 2119 if (likely(do_ioctl)) 2120 break; 2121 2122 /* fall through */ 2123 default: 2124 rc = -EINVAL; 2125 goto cxlflash_ioctl_exit; 2126 } 2127 2128 if (unlikely(copy_from_user(&buf, arg, size))) { 2129 dev_err(dev, "%s: copy_from_user() fail " 2130 "size=%lu cmd=%d (%s) arg=%p\n", 2131 __func__, size, cmd, decode_ioctl(cmd), arg); 2132 rc = -EFAULT; 2133 goto cxlflash_ioctl_exit; 2134 } 2135 2136 hdr = (struct dk_cxlflash_hdr *)&buf; 2137 if (hdr->version != DK_CXLFLASH_VERSION_0) { 2138 dev_dbg(dev, "%s: Version %u not supported for %s\n", 2139 __func__, hdr->version, decode_ioctl(cmd)); 2140 rc = -EINVAL; 2141 goto cxlflash_ioctl_exit; 2142 } 2143 2144 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) { 2145 dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__); 2146 rc = -EINVAL; 2147 goto cxlflash_ioctl_exit; 2148 } 2149 2150 rc = do_ioctl(sdev, (void *)&buf); 2151 if (likely(!rc)) 2152 if (unlikely(copy_to_user(arg, &buf, size))) { 2153 dev_err(dev, "%s: copy_to_user() fail " 2154 "size=%lu cmd=%d (%s) arg=%p\n", 2155 __func__, size, cmd, decode_ioctl(cmd), arg); 2156 rc = -EFAULT; 2157 } 2158 2159 /* fall through to exit */ 2160 2161 cxlflash_ioctl_exit: 2162 up_read(&cfg->ioctl_rwsem); 2163 if (unlikely(rc && known_ioctl)) 2164 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " 2165 "returned rc %d\n", __func__, 2166 decode_ioctl(cmd), cmd, shost->host_no, 2167 sdev->channel, sdev->id, sdev->lun, rc); 2168 else 2169 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " 2170 "returned rc %d\n", __func__, decode_ioctl(cmd), 2171 cmd, shost->host_no, sdev->channel, sdev->id, 2172 sdev->lun, rc); 2173 return rc; 2174 } 2175