1 /* 2 * CXL Flash Device Driver 3 * 4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2015 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15 #include <linux/delay.h> 16 #include <linux/file.h> 17 #include <linux/syscalls.h> 18 #include <misc/cxl.h> 19 #include <asm/unaligned.h> 20 21 #include <scsi/scsi.h> 22 #include <scsi/scsi_host.h> 23 #include <scsi/scsi_cmnd.h> 24 #include <scsi/scsi_eh.h> 25 #include <uapi/scsi/cxlflash_ioctl.h> 26 27 #include "sislite.h" 28 #include "common.h" 29 #include "vlun.h" 30 #include "superpipe.h" 31 32 struct cxlflash_global global; 33 34 /** 35 * marshal_rele_to_resize() - translate release to resize structure 36 * @rele: Source structure from which to translate/copy. 37 * @resize: Destination structure for the translate/copy. 38 */ 39 static void marshal_rele_to_resize(struct dk_cxlflash_release *release, 40 struct dk_cxlflash_resize *resize) 41 { 42 resize->hdr = release->hdr; 43 resize->context_id = release->context_id; 44 resize->rsrc_handle = release->rsrc_handle; 45 } 46 47 /** 48 * marshal_det_to_rele() - translate detach to release structure 49 * @detach: Destination structure for the translate/copy. 50 * @rele: Source structure from which to translate/copy. 51 */ 52 static void marshal_det_to_rele(struct dk_cxlflash_detach *detach, 53 struct dk_cxlflash_release *release) 54 { 55 release->hdr = detach->hdr; 56 release->context_id = detach->context_id; 57 } 58 59 /** 60 * cxlflash_free_errpage() - frees resources associated with global error page 61 */ 62 void cxlflash_free_errpage(void) 63 { 64 65 mutex_lock(&global.mutex); 66 if (global.err_page) { 67 __free_page(global.err_page); 68 global.err_page = NULL; 69 } 70 mutex_unlock(&global.mutex); 71 } 72 73 /** 74 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts 75 * @cfg: Internal structure associated with the host. 76 * 77 * When the host needs to go down, all users must be quiesced and their 78 * memory freed. This is accomplished by putting the contexts in error 79 * state which will notify the user and let them 'drive' the tear down. 80 * Meanwhile, this routine camps until all user contexts have been removed. 81 */ 82 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) 83 { 84 struct device *dev = &cfg->dev->dev; 85 int i, found; 86 87 cxlflash_mark_contexts_error(cfg); 88 89 while (true) { 90 found = false; 91 92 for (i = 0; i < MAX_CONTEXT; i++) 93 if (cfg->ctx_tbl[i]) { 94 found = true; 95 break; 96 } 97 98 if (!found && list_empty(&cfg->ctx_err_recovery)) 99 return; 100 101 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n", 102 __func__); 103 wake_up_all(&cfg->reset_waitq); 104 ssleep(1); 105 } 106 } 107 108 /** 109 * find_error_context() - locates a context by cookie on the error recovery list 110 * @cfg: Internal structure associated with the host. 111 * @rctxid: Desired context by id. 112 * @file: Desired context by file. 113 * 114 * Return: Found context on success, NULL on failure 115 */ 116 static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid, 117 struct file *file) 118 { 119 struct ctx_info *ctxi; 120 121 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list) 122 if ((ctxi->ctxid == rctxid) || (ctxi->file == file)) 123 return ctxi; 124 125 return NULL; 126 } 127 128 /** 129 * get_context() - obtains a validated and locked context reference 130 * @cfg: Internal structure associated with the host. 131 * @rctxid: Desired context (raw, un-decoded format). 132 * @arg: LUN information or file associated with request. 133 * @ctx_ctrl: Control information to 'steer' desired lookup. 134 * 135 * NOTE: despite the name pid, in linux, current->pid actually refers 136 * to the lightweight process id (tid) and can change if the process is 137 * multi threaded. The tgid remains constant for the process and only changes 138 * when the process of fork. For all intents and purposes, think of tgid 139 * as a pid in the traditional sense. 140 * 141 * Return: Validated context on success, NULL on failure 142 */ 143 struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid, 144 void *arg, enum ctx_ctrl ctx_ctrl) 145 { 146 struct device *dev = &cfg->dev->dev; 147 struct ctx_info *ctxi = NULL; 148 struct lun_access *lun_access = NULL; 149 struct file *file = NULL; 150 struct llun_info *lli = arg; 151 u64 ctxid = DECODE_CTXID(rctxid); 152 int rc; 153 pid_t pid = current->tgid, ctxpid = 0; 154 155 if (ctx_ctrl & CTX_CTRL_FILE) { 156 lli = NULL; 157 file = (struct file *)arg; 158 } 159 160 if (ctx_ctrl & CTX_CTRL_CLONE) 161 pid = current->parent->tgid; 162 163 if (likely(ctxid < MAX_CONTEXT)) { 164 while (true) { 165 mutex_lock(&cfg->ctx_tbl_list_mutex); 166 ctxi = cfg->ctx_tbl[ctxid]; 167 if (ctxi) 168 if ((file && (ctxi->file != file)) || 169 (!file && (ctxi->ctxid != rctxid))) 170 ctxi = NULL; 171 172 if ((ctx_ctrl & CTX_CTRL_ERR) || 173 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK))) 174 ctxi = find_error_context(cfg, rctxid, file); 175 if (!ctxi) { 176 mutex_unlock(&cfg->ctx_tbl_list_mutex); 177 goto out; 178 } 179 180 /* 181 * Need to acquire ownership of the context while still 182 * under the table/list lock to serialize with a remove 183 * thread. Use the 'try' to avoid stalling the 184 * table/list lock for a single context. 185 * 186 * Note that the lock order is: 187 * 188 * cfg->ctx_tbl_list_mutex -> ctxi->mutex 189 * 190 * Therefore release ctx_tbl_list_mutex before retrying. 191 */ 192 rc = mutex_trylock(&ctxi->mutex); 193 mutex_unlock(&cfg->ctx_tbl_list_mutex); 194 if (rc) 195 break; /* got the context's lock! */ 196 } 197 198 if (ctxi->unavail) 199 goto denied; 200 201 ctxpid = ctxi->pid; 202 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID))) 203 if (pid != ctxpid) 204 goto denied; 205 206 if (lli) { 207 list_for_each_entry(lun_access, &ctxi->luns, list) 208 if (lun_access->lli == lli) 209 goto out; 210 goto denied; 211 } 212 } 213 214 out: 215 dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u " 216 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid, 217 ctx_ctrl); 218 219 return ctxi; 220 221 denied: 222 mutex_unlock(&ctxi->mutex); 223 ctxi = NULL; 224 goto out; 225 } 226 227 /** 228 * put_context() - release a context that was retrieved from get_context() 229 * @ctxi: Context to release. 230 * 231 * For now, releasing the context equates to unlocking it's mutex. 232 */ 233 void put_context(struct ctx_info *ctxi) 234 { 235 mutex_unlock(&ctxi->mutex); 236 } 237 238 /** 239 * afu_attach() - attach a context to the AFU 240 * @cfg: Internal structure associated with the host. 241 * @ctxi: Context to attach. 242 * 243 * Upon setting the context capabilities, they must be confirmed with 244 * a read back operation as the context might have been closed since 245 * the mailbox was unlocked. When this occurs, registration is failed. 246 * 247 * Return: 0 on success, -errno on failure 248 */ 249 static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) 250 { 251 struct device *dev = &cfg->dev->dev; 252 struct afu *afu = cfg->afu; 253 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map; 254 int rc = 0; 255 u64 val; 256 257 /* Unlock cap and restrict user to read/write cmds in translated mode */ 258 readq_be(&ctrl_map->mbox_r); 259 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD); 260 writeq_be(val, &ctrl_map->ctx_cap); 261 val = readq_be(&ctrl_map->ctx_cap); 262 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { 263 dev_err(dev, "%s: ctx may be closed val=%016llX\n", 264 __func__, val); 265 rc = -EAGAIN; 266 goto out; 267 } 268 269 /* Set up MMIO registers pointing to the RHT */ 270 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start); 271 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl)); 272 writeq_be(val, &ctrl_map->rht_cnt_id); 273 out: 274 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 275 return rc; 276 } 277 278 /** 279 * read_cap16() - issues a SCSI READ_CAP16 command 280 * @sdev: SCSI device associated with LUN. 281 * @lli: LUN destined for capacity request. 282 * 283 * The READ_CAP16 can take quite a while to complete. Should an EEH occur while 284 * in scsi_execute(), the EEH handler will attempt to recover. As part of the 285 * recovery, the handler drains all currently running ioctls, waiting until they 286 * have completed before proceeding with a reset. As this routine is used on the 287 * ioctl path, this can create a condition where the EEH handler becomes stuck, 288 * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily 289 * unmark this thread as an ioctl thread by releasing the ioctl read semaphore. 290 * This will allow the EEH handler to proceed with a recovery while this thread 291 * is still running. Once the scsi_execute() returns, reacquire the ioctl read 292 * semaphore and check the adapter state in case it changed while inside of 293 * scsi_execute(). The state check will wait if the adapter is still being 294 * recovered or return a failure if the recovery failed. In the event that the 295 * adapter reset failed, simply return the failure as the ioctl would be unable 296 * to continue. 297 * 298 * Note that the above puts a requirement on this routine to only be called on 299 * an ioctl thread. 300 * 301 * Return: 0 on success, -errno on failure 302 */ 303 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) 304 { 305 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 306 struct device *dev = &cfg->dev->dev; 307 struct glun_info *gli = lli->parent; 308 u8 *cmd_buf = NULL; 309 u8 *scsi_cmd = NULL; 310 u8 *sense_buf = NULL; 311 int rc = 0; 312 int result = 0; 313 int retry_cnt = 0; 314 u32 to = CMD_TIMEOUT * HZ; 315 316 retry: 317 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); 318 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); 319 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 320 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) { 321 rc = -ENOMEM; 322 goto out; 323 } 324 325 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */ 326 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */ 327 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]); 328 329 dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__, 330 retry_cnt ? "re" : "", scsi_cmd[0]); 331 332 /* Drop the ioctl read semahpore across lengthy call */ 333 up_read(&cfg->ioctl_rwsem); 334 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf, 335 CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL); 336 down_read(&cfg->ioctl_rwsem); 337 rc = check_state(cfg); 338 if (rc) { 339 dev_err(dev, "%s: Failed state! result=0x08%X\n", 340 __func__, result); 341 rc = -ENODEV; 342 goto out; 343 } 344 345 if (driver_byte(result) == DRIVER_SENSE) { 346 result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 347 if (result & SAM_STAT_CHECK_CONDITION) { 348 struct scsi_sense_hdr sshdr; 349 350 scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE, 351 &sshdr); 352 switch (sshdr.sense_key) { 353 case NO_SENSE: 354 case RECOVERED_ERROR: 355 /* fall through */ 356 case NOT_READY: 357 result &= ~SAM_STAT_CHECK_CONDITION; 358 break; 359 case UNIT_ATTENTION: 360 switch (sshdr.asc) { 361 case 0x29: /* Power on Reset or Device Reset */ 362 /* fall through */ 363 case 0x2A: /* Device capacity changed */ 364 case 0x3F: /* Report LUNs changed */ 365 /* Retry the command once more */ 366 if (retry_cnt++ < 1) { 367 kfree(cmd_buf); 368 kfree(scsi_cmd); 369 kfree(sense_buf); 370 goto retry; 371 } 372 } 373 break; 374 default: 375 break; 376 } 377 } 378 } 379 380 if (result) { 381 dev_err(dev, "%s: command failed, result=0x%x\n", 382 __func__, result); 383 rc = -EIO; 384 goto out; 385 } 386 387 /* 388 * Read cap was successful, grab values from the buffer; 389 * note that we don't need to worry about unaligned access 390 * as the buffer is allocated on an aligned boundary. 391 */ 392 mutex_lock(&gli->mutex); 393 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0])); 394 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8])); 395 mutex_unlock(&gli->mutex); 396 397 out: 398 kfree(cmd_buf); 399 kfree(scsi_cmd); 400 kfree(sense_buf); 401 402 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n", 403 __func__, gli->max_lba, gli->blk_len, rc); 404 return rc; 405 } 406 407 /** 408 * get_rhte() - obtains validated resource handle table entry reference 409 * @ctxi: Context owning the resource handle. 410 * @rhndl: Resource handle associated with entry. 411 * @lli: LUN associated with request. 412 * 413 * Return: Validated RHTE on success, NULL on failure 414 */ 415 struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, 416 struct llun_info *lli) 417 { 418 struct sisl_rht_entry *rhte = NULL; 419 420 if (unlikely(!ctxi->rht_start)) { 421 pr_debug("%s: Context does not have allocated RHT!\n", 422 __func__); 423 goto out; 424 } 425 426 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) { 427 pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl); 428 goto out; 429 } 430 431 if (unlikely(ctxi->rht_lun[rhndl] != lli)) { 432 pr_debug("%s: Bad resource handle LUN! (%d)\n", 433 __func__, rhndl); 434 goto out; 435 } 436 437 rhte = &ctxi->rht_start[rhndl]; 438 if (unlikely(rhte->nmask == 0)) { 439 pr_debug("%s: Unopened resource handle! (%d)\n", 440 __func__, rhndl); 441 rhte = NULL; 442 goto out; 443 } 444 445 out: 446 return rhte; 447 } 448 449 /** 450 * rhte_checkout() - obtains free/empty resource handle table entry 451 * @ctxi: Context owning the resource handle. 452 * @lli: LUN associated with request. 453 * 454 * Return: Free RHTE on success, NULL on failure 455 */ 456 struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, 457 struct llun_info *lli) 458 { 459 struct sisl_rht_entry *rhte = NULL; 460 int i; 461 462 /* Find a free RHT entry */ 463 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) 464 if (ctxi->rht_start[i].nmask == 0) { 465 rhte = &ctxi->rht_start[i]; 466 ctxi->rht_out++; 467 break; 468 } 469 470 if (likely(rhte)) 471 ctxi->rht_lun[i] = lli; 472 473 pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i); 474 return rhte; 475 } 476 477 /** 478 * rhte_checkin() - releases a resource handle table entry 479 * @ctxi: Context owning the resource handle. 480 * @rhte: RHTE to release. 481 */ 482 void rhte_checkin(struct ctx_info *ctxi, 483 struct sisl_rht_entry *rhte) 484 { 485 u32 rsrc_handle = rhte - ctxi->rht_start; 486 487 rhte->nmask = 0; 488 rhte->fp = 0; 489 ctxi->rht_out--; 490 ctxi->rht_lun[rsrc_handle] = NULL; 491 ctxi->rht_needs_ws[rsrc_handle] = false; 492 } 493 494 /** 495 * rhte_format1() - populates a RHTE for format 1 496 * @rhte: RHTE to populate. 497 * @lun_id: LUN ID of LUN associated with RHTE. 498 * @perm: Desired permissions for RHTE. 499 * @port_sel: Port selection mask 500 */ 501 static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm, 502 u32 port_sel) 503 { 504 /* 505 * Populate the Format 1 RHT entry for direct access (physical 506 * LUN) using the synchronization sequence defined in the 507 * SISLite specification. 508 */ 509 struct sisl_rht_entry_f1 dummy = { 0 }; 510 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; 511 512 memset(rhte_f1, 0, sizeof(*rhte_f1)); 513 rhte_f1->fp = SISL_RHT_FP(1U, 0); 514 dma_wmb(); /* Make setting of format bit visible */ 515 516 rhte_f1->lun_id = lun_id; 517 dma_wmb(); /* Make setting of LUN id visible */ 518 519 /* 520 * Use a dummy RHT Format 1 entry to build the second dword 521 * of the entry that must be populated in a single write when 522 * enabled (valid bit set to TRUE). 523 */ 524 dummy.valid = 0x80; 525 dummy.fp = SISL_RHT_FP(1U, perm); 526 dummy.port_sel = port_sel; 527 rhte_f1->dw = dummy.dw; 528 529 dma_wmb(); /* Make remaining RHT entry fields visible */ 530 } 531 532 /** 533 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode 534 * @gli: LUN to attach. 535 * @mode: Desired mode of the LUN. 536 * @locked: Mutex status on current thread. 537 * 538 * Return: 0 on success, -errno on failure 539 */ 540 int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked) 541 { 542 int rc = 0; 543 544 if (!locked) 545 mutex_lock(&gli->mutex); 546 547 if (gli->mode == MODE_NONE) 548 gli->mode = mode; 549 else if (gli->mode != mode) { 550 pr_debug("%s: LUN operating in mode %d, requested mode %d\n", 551 __func__, gli->mode, mode); 552 rc = -EINVAL; 553 goto out; 554 } 555 556 gli->users++; 557 WARN_ON(gli->users <= 0); 558 out: 559 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n", 560 __func__, rc, gli->mode, gli->users); 561 if (!locked) 562 mutex_unlock(&gli->mutex); 563 return rc; 564 } 565 566 /** 567 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode 568 * @gli: LUN to detach. 569 * 570 * When resetting the mode, terminate block allocation resources as they 571 * are no longer required (service is safe to call even when block allocation 572 * resources were not present - such as when transitioning from physical mode). 573 * These resources will be reallocated when needed (subsequent transition to 574 * virtual mode). 575 */ 576 void cxlflash_lun_detach(struct glun_info *gli) 577 { 578 mutex_lock(&gli->mutex); 579 WARN_ON(gli->mode == MODE_NONE); 580 if (--gli->users == 0) { 581 gli->mode = MODE_NONE; 582 cxlflash_ba_terminate(&gli->blka.ba_lun); 583 } 584 pr_debug("%s: gli->users=%u\n", __func__, gli->users); 585 WARN_ON(gli->users < 0); 586 mutex_unlock(&gli->mutex); 587 } 588 589 /** 590 * _cxlflash_disk_release() - releases the specified resource entry 591 * @sdev: SCSI device associated with LUN. 592 * @ctxi: Context owning resources. 593 * @release: Release ioctl data structure. 594 * 595 * For LUNs in virtual mode, the virtual LUN associated with the specified 596 * resource handle is resized to 0 prior to releasing the RHTE. Note that the 597 * AFU sync should _not_ be performed when the context is sitting on the error 598 * recovery list. A context on the error recovery list is not known to the AFU 599 * due to reset. When the context is recovered, it will be reattached and made 600 * known again to the AFU. 601 * 602 * Return: 0 on success, -errno on failure 603 */ 604 int _cxlflash_disk_release(struct scsi_device *sdev, 605 struct ctx_info *ctxi, 606 struct dk_cxlflash_release *release) 607 { 608 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 609 struct device *dev = &cfg->dev->dev; 610 struct llun_info *lli = sdev->hostdata; 611 struct glun_info *gli = lli->parent; 612 struct afu *afu = cfg->afu; 613 bool put_ctx = false; 614 615 struct dk_cxlflash_resize size; 616 res_hndl_t rhndl = release->rsrc_handle; 617 618 int rc = 0; 619 u64 ctxid = DECODE_CTXID(release->context_id), 620 rctxid = release->context_id; 621 622 struct sisl_rht_entry *rhte; 623 struct sisl_rht_entry_f1 *rhte_f1; 624 625 dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n", 626 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users); 627 628 if (!ctxi) { 629 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 630 if (unlikely(!ctxi)) { 631 dev_dbg(dev, "%s: Bad context! (%llu)\n", 632 __func__, ctxid); 633 rc = -EINVAL; 634 goto out; 635 } 636 637 put_ctx = true; 638 } 639 640 rhte = get_rhte(ctxi, rhndl, lli); 641 if (unlikely(!rhte)) { 642 dev_dbg(dev, "%s: Bad resource handle! (%d)\n", 643 __func__, rhndl); 644 rc = -EINVAL; 645 goto out; 646 } 647 648 /* 649 * Resize to 0 for virtual LUNS by setting the size 650 * to 0. This will clear LXT_START and LXT_CNT fields 651 * in the RHT entry and properly sync with the AFU. 652 * 653 * Afterwards we clear the remaining fields. 654 */ 655 switch (gli->mode) { 656 case MODE_VIRTUAL: 657 marshal_rele_to_resize(release, &size); 658 size.req_size = 0; 659 rc = _cxlflash_vlun_resize(sdev, ctxi, &size); 660 if (rc) { 661 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc); 662 goto out; 663 } 664 665 break; 666 case MODE_PHYSICAL: 667 /* 668 * Clear the Format 1 RHT entry for direct access 669 * (physical LUN) using the synchronization sequence 670 * defined in the SISLite specification. 671 */ 672 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; 673 674 rhte_f1->valid = 0; 675 dma_wmb(); /* Make revocation of RHT entry visible */ 676 677 rhte_f1->lun_id = 0; 678 dma_wmb(); /* Make clearing of LUN id visible */ 679 680 rhte_f1->dw = 0; 681 dma_wmb(); /* Make RHT entry bottom-half clearing visible */ 682 683 if (!ctxi->err_recovery_active) 684 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); 685 break; 686 default: 687 WARN(1, "Unsupported LUN mode!"); 688 goto out; 689 } 690 691 rhte_checkin(ctxi, rhte); 692 cxlflash_lun_detach(gli); 693 694 out: 695 if (put_ctx) 696 put_context(ctxi); 697 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 698 return rc; 699 } 700 701 int cxlflash_disk_release(struct scsi_device *sdev, 702 struct dk_cxlflash_release *release) 703 { 704 return _cxlflash_disk_release(sdev, NULL, release); 705 } 706 707 /** 708 * destroy_context() - releases a context 709 * @cfg: Internal structure associated with the host. 710 * @ctxi: Context to release. 711 * 712 * This routine is safe to be called with a a non-initialized context. 713 * Also note that the routine conditionally checks for the existence 714 * of the context control map before clearing the RHT registers and 715 * context capabilities because it is possible to destroy a context 716 * while the context is in the error state (previous mapping was 717 * removed [so there is no need to worry about clearing] and context 718 * is waiting for a new mapping). 719 */ 720 static void destroy_context(struct cxlflash_cfg *cfg, 721 struct ctx_info *ctxi) 722 { 723 struct afu *afu = cfg->afu; 724 725 if (ctxi->initialized) { 726 WARN_ON(!list_empty(&ctxi->luns)); 727 728 /* Clear RHT registers and drop all capabilities for context */ 729 if (afu->afu_map && ctxi->ctrl_map) { 730 writeq_be(0, &ctxi->ctrl_map->rht_start); 731 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); 732 writeq_be(0, &ctxi->ctrl_map->ctx_cap); 733 } 734 } 735 736 /* Free memory associated with context */ 737 free_page((ulong)ctxi->rht_start); 738 kfree(ctxi->rht_needs_ws); 739 kfree(ctxi->rht_lun); 740 kfree(ctxi); 741 } 742 743 /** 744 * create_context() - allocates and initializes a context 745 * @cfg: Internal structure associated with the host. 746 * 747 * Return: Allocated context on success, NULL on failure 748 */ 749 static struct ctx_info *create_context(struct cxlflash_cfg *cfg) 750 { 751 struct device *dev = &cfg->dev->dev; 752 struct ctx_info *ctxi = NULL; 753 struct llun_info **lli = NULL; 754 u8 *ws = NULL; 755 struct sisl_rht_entry *rhte; 756 757 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL); 758 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL); 759 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL); 760 if (unlikely(!ctxi || !lli || !ws)) { 761 dev_err(dev, "%s: Unable to allocate context!\n", __func__); 762 goto err; 763 } 764 765 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL); 766 if (unlikely(!rhte)) { 767 dev_err(dev, "%s: Unable to allocate RHT!\n", __func__); 768 goto err; 769 } 770 771 ctxi->rht_lun = lli; 772 ctxi->rht_needs_ws = ws; 773 ctxi->rht_start = rhte; 774 out: 775 return ctxi; 776 777 err: 778 kfree(ws); 779 kfree(lli); 780 kfree(ctxi); 781 ctxi = NULL; 782 goto out; 783 } 784 785 /** 786 * init_context() - initializes a previously allocated context 787 * @ctxi: Previously allocated context 788 * @cfg: Internal structure associated with the host. 789 * @ctx: Previously obtained CXL context reference. 790 * @ctxid: Previously obtained process element associated with CXL context. 791 * @file: Previously obtained file associated with CXL context. 792 * @perms: User-specified permissions. 793 */ 794 static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg, 795 struct cxl_context *ctx, int ctxid, struct file *file, 796 u32 perms) 797 { 798 struct afu *afu = cfg->afu; 799 800 ctxi->rht_perms = perms; 801 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 802 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 803 ctxi->pid = current->tgid; /* tgid = pid */ 804 ctxi->ctx = ctx; 805 ctxi->cfg = cfg; 806 ctxi->file = file; 807 ctxi->initialized = true; 808 mutex_init(&ctxi->mutex); 809 kref_init(&ctxi->kref); 810 INIT_LIST_HEAD(&ctxi->luns); 811 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */ 812 } 813 814 /** 815 * remove_context() - context kref release handler 816 * @kref: Kernel reference associated with context to be removed. 817 * 818 * When a context no longer has any references it can safely be removed 819 * from global access and destroyed. Note that it is assumed the thread 820 * relinquishing access to the context holds its mutex. 821 */ 822 static void remove_context(struct kref *kref) 823 { 824 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref); 825 struct cxlflash_cfg *cfg = ctxi->cfg; 826 u64 ctxid = DECODE_CTXID(ctxi->ctxid); 827 828 /* Remove context from table/error list */ 829 WARN_ON(!mutex_is_locked(&ctxi->mutex)); 830 ctxi->unavail = true; 831 mutex_unlock(&ctxi->mutex); 832 mutex_lock(&cfg->ctx_tbl_list_mutex); 833 mutex_lock(&ctxi->mutex); 834 835 if (!list_empty(&ctxi->list)) 836 list_del(&ctxi->list); 837 cfg->ctx_tbl[ctxid] = NULL; 838 mutex_unlock(&cfg->ctx_tbl_list_mutex); 839 mutex_unlock(&ctxi->mutex); 840 841 /* Context now completely uncoupled/unreachable */ 842 destroy_context(cfg, ctxi); 843 } 844 845 /** 846 * _cxlflash_disk_detach() - detaches a LUN from a context 847 * @sdev: SCSI device associated with LUN. 848 * @ctxi: Context owning resources. 849 * @detach: Detach ioctl data structure. 850 * 851 * As part of the detach, all per-context resources associated with the LUN 852 * are cleaned up. When detaching the last LUN for a context, the context 853 * itself is cleaned up and released. 854 * 855 * Return: 0 on success, -errno on failure 856 */ 857 static int _cxlflash_disk_detach(struct scsi_device *sdev, 858 struct ctx_info *ctxi, 859 struct dk_cxlflash_detach *detach) 860 { 861 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 862 struct device *dev = &cfg->dev->dev; 863 struct llun_info *lli = sdev->hostdata; 864 struct lun_access *lun_access, *t; 865 struct dk_cxlflash_release rel; 866 bool put_ctx = false; 867 868 int i; 869 int rc = 0; 870 u64 ctxid = DECODE_CTXID(detach->context_id), 871 rctxid = detach->context_id; 872 873 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid); 874 875 if (!ctxi) { 876 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 877 if (unlikely(!ctxi)) { 878 dev_dbg(dev, "%s: Bad context! (%llu)\n", 879 __func__, ctxid); 880 rc = -EINVAL; 881 goto out; 882 } 883 884 put_ctx = true; 885 } 886 887 /* Cleanup outstanding resources tied to this LUN */ 888 if (ctxi->rht_out) { 889 marshal_det_to_rele(detach, &rel); 890 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { 891 if (ctxi->rht_lun[i] == lli) { 892 rel.rsrc_handle = i; 893 _cxlflash_disk_release(sdev, ctxi, &rel); 894 } 895 896 /* No need to loop further if we're done */ 897 if (ctxi->rht_out == 0) 898 break; 899 } 900 } 901 902 /* Take our LUN out of context, free the node */ 903 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 904 if (lun_access->lli == lli) { 905 list_del(&lun_access->list); 906 kfree(lun_access); 907 lun_access = NULL; 908 break; 909 } 910 911 /* 912 * Release the context reference and the sdev reference that 913 * bound this LUN to the context. 914 */ 915 if (kref_put(&ctxi->kref, remove_context)) 916 put_ctx = false; 917 scsi_device_put(sdev); 918 out: 919 if (put_ctx) 920 put_context(ctxi); 921 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 922 return rc; 923 } 924 925 static int cxlflash_disk_detach(struct scsi_device *sdev, 926 struct dk_cxlflash_detach *detach) 927 { 928 return _cxlflash_disk_detach(sdev, NULL, detach); 929 } 930 931 /** 932 * cxlflash_cxl_release() - release handler for adapter file descriptor 933 * @inode: File-system inode associated with fd. 934 * @file: File installed with adapter file descriptor. 935 * 936 * This routine is the release handler for the fops registered with 937 * the CXL services on an initial attach for a context. It is called 938 * when a close (explicity by the user or as part of a process tear 939 * down) is performed on the adapter file descriptor returned to the 940 * user. The user should be aware that explicitly performing a close 941 * considered catastrophic and subsequent usage of the superpipe API 942 * with previously saved off tokens will fail. 943 * 944 * This routine derives the context reference and calls detach for 945 * each LUN associated with the context.The final detach operation 946 * causes the context itself to be freed. With exception to when the 947 * CXL process element (context id) lookup fails (a case that should 948 * theoretically never occur), every call into this routine results 949 * in a complete freeing of a context. 950 * 951 * Return: 0 on success 952 */ 953 static int cxlflash_cxl_release(struct inode *inode, struct file *file) 954 { 955 struct cxl_context *ctx = cxl_fops_get_context(file); 956 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 957 cxl_fops); 958 struct device *dev = &cfg->dev->dev; 959 struct ctx_info *ctxi = NULL; 960 struct dk_cxlflash_detach detach = { { 0 }, 0 }; 961 struct lun_access *lun_access, *t; 962 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 963 int ctxid; 964 965 ctxid = cxl_process_element(ctx); 966 if (unlikely(ctxid < 0)) { 967 dev_err(dev, "%s: Context %p was closed! (%d)\n", 968 __func__, ctx, ctxid); 969 goto out; 970 } 971 972 ctxi = get_context(cfg, ctxid, file, ctrl); 973 if (unlikely(!ctxi)) { 974 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE); 975 if (!ctxi) { 976 dev_dbg(dev, "%s: Context %d already free!\n", 977 __func__, ctxid); 978 goto out_release; 979 } 980 981 dev_dbg(dev, "%s: Another process owns context %d!\n", 982 __func__, ctxid); 983 put_context(ctxi); 984 goto out; 985 } 986 987 dev_dbg(dev, "%s: close for context %d\n", __func__, ctxid); 988 989 detach.context_id = ctxi->ctxid; 990 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 991 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); 992 out_release: 993 cxl_fd_release(inode, file); 994 out: 995 dev_dbg(dev, "%s: returning\n", __func__); 996 return 0; 997 } 998 999 /** 1000 * unmap_context() - clears a previously established mapping 1001 * @ctxi: Context owning the mapping. 1002 * 1003 * This routine is used to switch between the error notification page 1004 * (dummy page of all 1's) and the real mapping (established by the CXL 1005 * fault handler). 1006 */ 1007 static void unmap_context(struct ctx_info *ctxi) 1008 { 1009 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1); 1010 } 1011 1012 /** 1013 * get_err_page() - obtains and allocates the error notification page 1014 * 1015 * Return: error notification page on success, NULL on failure 1016 */ 1017 static struct page *get_err_page(void) 1018 { 1019 struct page *err_page = global.err_page; 1020 1021 if (unlikely(!err_page)) { 1022 err_page = alloc_page(GFP_KERNEL); 1023 if (unlikely(!err_page)) { 1024 pr_err("%s: Unable to allocate err_page!\n", __func__); 1025 goto out; 1026 } 1027 1028 memset(page_address(err_page), -1, PAGE_SIZE); 1029 1030 /* Serialize update w/ other threads to avoid a leak */ 1031 mutex_lock(&global.mutex); 1032 if (likely(!global.err_page)) 1033 global.err_page = err_page; 1034 else { 1035 __free_page(err_page); 1036 err_page = global.err_page; 1037 } 1038 mutex_unlock(&global.mutex); 1039 } 1040 1041 out: 1042 pr_debug("%s: returning err_page=%p\n", __func__, err_page); 1043 return err_page; 1044 } 1045 1046 /** 1047 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor 1048 * @vma: VM area associated with mapping. 1049 * @vmf: VM fault associated with current fault. 1050 * 1051 * To support error notification via MMIO, faults are 'caught' by this routine 1052 * that was inserted before passing back the adapter file descriptor on attach. 1053 * When a fault occurs, this routine evaluates if error recovery is active and 1054 * if so, installs the error page to 'notify' the user about the error state. 1055 * During normal operation, the fault is simply handled by the original fault 1056 * handler that was installed by CXL services as part of initializing the 1057 * adapter file descriptor. The VMA's page protection bits are toggled to 1058 * indicate cached/not-cached depending on the memory backing the fault. 1059 * 1060 * Return: 0 on success, VM_FAULT_SIGBUS on failure 1061 */ 1062 static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1063 { 1064 struct file *file = vma->vm_file; 1065 struct cxl_context *ctx = cxl_fops_get_context(file); 1066 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1067 cxl_fops); 1068 struct device *dev = &cfg->dev->dev; 1069 struct ctx_info *ctxi = NULL; 1070 struct page *err_page = NULL; 1071 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1072 int rc = 0; 1073 int ctxid; 1074 1075 ctxid = cxl_process_element(ctx); 1076 if (unlikely(ctxid < 0)) { 1077 dev_err(dev, "%s: Context %p was closed! (%d)\n", 1078 __func__, ctx, ctxid); 1079 goto err; 1080 } 1081 1082 ctxi = get_context(cfg, ctxid, file, ctrl); 1083 if (unlikely(!ctxi)) { 1084 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); 1085 goto err; 1086 } 1087 1088 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid); 1089 1090 if (likely(!ctxi->err_recovery_active)) { 1091 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1092 rc = ctxi->cxl_mmap_vmops->fault(vma, vmf); 1093 } else { 1094 dev_dbg(dev, "%s: err recovery active, use err_page!\n", 1095 __func__); 1096 1097 err_page = get_err_page(); 1098 if (unlikely(!err_page)) { 1099 dev_err(dev, "%s: Could not obtain error page!\n", 1100 __func__); 1101 rc = VM_FAULT_RETRY; 1102 goto out; 1103 } 1104 1105 get_page(err_page); 1106 vmf->page = err_page; 1107 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); 1108 } 1109 1110 out: 1111 if (likely(ctxi)) 1112 put_context(ctxi); 1113 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1114 return rc; 1115 1116 err: 1117 rc = VM_FAULT_SIGBUS; 1118 goto out; 1119 } 1120 1121 /* 1122 * Local MMAP vmops to 'catch' faults 1123 */ 1124 static const struct vm_operations_struct cxlflash_mmap_vmops = { 1125 .fault = cxlflash_mmap_fault, 1126 }; 1127 1128 /** 1129 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor 1130 * @file: File installed with adapter file descriptor. 1131 * @vma: VM area associated with mapping. 1132 * 1133 * Installs local mmap vmops to 'catch' faults for error notification support. 1134 * 1135 * Return: 0 on success, -errno on failure 1136 */ 1137 static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) 1138 { 1139 struct cxl_context *ctx = cxl_fops_get_context(file); 1140 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1141 cxl_fops); 1142 struct device *dev = &cfg->dev->dev; 1143 struct ctx_info *ctxi = NULL; 1144 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1145 int ctxid; 1146 int rc = 0; 1147 1148 ctxid = cxl_process_element(ctx); 1149 if (unlikely(ctxid < 0)) { 1150 dev_err(dev, "%s: Context %p was closed! (%d)\n", 1151 __func__, ctx, ctxid); 1152 rc = -EIO; 1153 goto out; 1154 } 1155 1156 ctxi = get_context(cfg, ctxid, file, ctrl); 1157 if (unlikely(!ctxi)) { 1158 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); 1159 rc = -EIO; 1160 goto out; 1161 } 1162 1163 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid); 1164 1165 rc = cxl_fd_mmap(file, vma); 1166 if (likely(!rc)) { 1167 /* Insert ourself in the mmap fault handler path */ 1168 ctxi->cxl_mmap_vmops = vma->vm_ops; 1169 vma->vm_ops = &cxlflash_mmap_vmops; 1170 } 1171 1172 out: 1173 if (likely(ctxi)) 1174 put_context(ctxi); 1175 return rc; 1176 } 1177 1178 const struct file_operations cxlflash_cxl_fops = { 1179 .owner = THIS_MODULE, 1180 .mmap = cxlflash_cxl_mmap, 1181 .release = cxlflash_cxl_release, 1182 }; 1183 1184 /** 1185 * cxlflash_mark_contexts_error() - move contexts to error state and list 1186 * @cfg: Internal structure associated with the host. 1187 * 1188 * A context is only moved over to the error list when there are no outstanding 1189 * references to it. This ensures that a running operation has completed. 1190 * 1191 * Return: 0 on success, -errno on failure 1192 */ 1193 int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg) 1194 { 1195 int i, rc = 0; 1196 struct ctx_info *ctxi = NULL; 1197 1198 mutex_lock(&cfg->ctx_tbl_list_mutex); 1199 1200 for (i = 0; i < MAX_CONTEXT; i++) { 1201 ctxi = cfg->ctx_tbl[i]; 1202 if (ctxi) { 1203 mutex_lock(&ctxi->mutex); 1204 cfg->ctx_tbl[i] = NULL; 1205 list_add(&ctxi->list, &cfg->ctx_err_recovery); 1206 ctxi->err_recovery_active = true; 1207 ctxi->ctrl_map = NULL; 1208 unmap_context(ctxi); 1209 mutex_unlock(&ctxi->mutex); 1210 } 1211 } 1212 1213 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1214 return rc; 1215 } 1216 1217 /* 1218 * Dummy NULL fops 1219 */ 1220 static const struct file_operations null_fops = { 1221 .owner = THIS_MODULE, 1222 }; 1223 1224 /** 1225 * check_state() - checks and responds to the current adapter state 1226 * @cfg: Internal structure associated with the host. 1227 * 1228 * This routine can block and should only be used on process context. 1229 * It assumes that the caller is an ioctl thread and holding the ioctl 1230 * read semaphore. This is temporarily let up across the wait to allow 1231 * for draining actively running ioctls. Also note that when waking up 1232 * from waiting in reset, the state is unknown and must be checked again 1233 * before proceeding. 1234 * 1235 * Return: 0 on success, -errno on failure 1236 */ 1237 int check_state(struct cxlflash_cfg *cfg) 1238 { 1239 struct device *dev = &cfg->dev->dev; 1240 int rc = 0; 1241 1242 retry: 1243 switch (cfg->state) { 1244 case STATE_RESET: 1245 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__); 1246 up_read(&cfg->ioctl_rwsem); 1247 rc = wait_event_interruptible(cfg->reset_waitq, 1248 cfg->state != STATE_RESET); 1249 down_read(&cfg->ioctl_rwsem); 1250 if (unlikely(rc)) 1251 break; 1252 goto retry; 1253 case STATE_FAILTERM: 1254 dev_dbg(dev, "%s: Failed/Terminating!\n", __func__); 1255 rc = -ENODEV; 1256 break; 1257 default: 1258 break; 1259 } 1260 1261 return rc; 1262 } 1263 1264 /** 1265 * cxlflash_disk_attach() - attach a LUN to a context 1266 * @sdev: SCSI device associated with LUN. 1267 * @attach: Attach ioctl data structure. 1268 * 1269 * Creates a context and attaches LUN to it. A LUN can only be attached 1270 * one time to a context (subsequent attaches for the same context/LUN pair 1271 * are not supported). Additional LUNs can be attached to a context by 1272 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header. 1273 * 1274 * Return: 0 on success, -errno on failure 1275 */ 1276 static int cxlflash_disk_attach(struct scsi_device *sdev, 1277 struct dk_cxlflash_attach *attach) 1278 { 1279 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1280 struct device *dev = &cfg->dev->dev; 1281 struct afu *afu = cfg->afu; 1282 struct llun_info *lli = sdev->hostdata; 1283 struct glun_info *gli = lli->parent; 1284 struct cxl_ioctl_start_work *work; 1285 struct ctx_info *ctxi = NULL; 1286 struct lun_access *lun_access = NULL; 1287 int rc = 0; 1288 u32 perms; 1289 int ctxid = -1; 1290 u64 rctxid = 0UL; 1291 struct file *file = NULL; 1292 1293 struct cxl_context *ctx = NULL; 1294 1295 int fd = -1; 1296 1297 if (attach->num_interrupts > 4) { 1298 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n", 1299 __func__, attach->num_interrupts); 1300 rc = -EINVAL; 1301 goto out; 1302 } 1303 1304 if (gli->max_lba == 0) { 1305 dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n", 1306 __func__, lli->lun_id[sdev->channel]); 1307 rc = read_cap16(sdev, lli); 1308 if (rc) { 1309 dev_err(dev, "%s: Invalid device! (%d)\n", 1310 __func__, rc); 1311 rc = -ENODEV; 1312 goto out; 1313 } 1314 dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba); 1315 dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len); 1316 } 1317 1318 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) { 1319 rctxid = attach->context_id; 1320 ctxi = get_context(cfg, rctxid, NULL, 0); 1321 if (!ctxi) { 1322 dev_dbg(dev, "%s: Bad context! (%016llX)\n", 1323 __func__, rctxid); 1324 rc = -EINVAL; 1325 goto out; 1326 } 1327 1328 list_for_each_entry(lun_access, &ctxi->luns, list) 1329 if (lun_access->lli == lli) { 1330 dev_dbg(dev, "%s: Already attached!\n", 1331 __func__); 1332 rc = -EINVAL; 1333 goto out; 1334 } 1335 } 1336 1337 rc = scsi_device_get(sdev); 1338 if (unlikely(rc)) { 1339 dev_err(dev, "%s: Unable to get sdev reference!\n", __func__); 1340 goto out; 1341 } 1342 1343 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL); 1344 if (unlikely(!lun_access)) { 1345 dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__); 1346 rc = -ENOMEM; 1347 goto err; 1348 } 1349 1350 lun_access->lli = lli; 1351 lun_access->sdev = sdev; 1352 1353 /* Non-NULL context indicates reuse (another context reference) */ 1354 if (ctxi) { 1355 dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n", 1356 __func__, rctxid); 1357 kref_get(&ctxi->kref); 1358 list_add(&lun_access->list, &ctxi->luns); 1359 goto out_attach; 1360 } 1361 1362 ctxi = create_context(cfg); 1363 if (unlikely(!ctxi)) { 1364 dev_err(dev, "%s: Failed to create context! (%d)\n", 1365 __func__, ctxid); 1366 goto err; 1367 } 1368 1369 ctx = cxl_dev_context_init(cfg->dev); 1370 if (IS_ERR_OR_NULL(ctx)) { 1371 dev_err(dev, "%s: Could not initialize context %p\n", 1372 __func__, ctx); 1373 rc = -ENODEV; 1374 goto err; 1375 } 1376 1377 work = &ctxi->work; 1378 work->num_interrupts = attach->num_interrupts; 1379 work->flags = CXL_START_WORK_NUM_IRQS; 1380 1381 rc = cxl_start_work(ctx, work); 1382 if (unlikely(rc)) { 1383 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1384 __func__, rc); 1385 goto err; 1386 } 1387 1388 ctxid = cxl_process_element(ctx); 1389 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1390 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); 1391 rc = -EPERM; 1392 goto err; 1393 } 1394 1395 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); 1396 if (unlikely(fd < 0)) { 1397 rc = -ENODEV; 1398 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1399 goto err; 1400 } 1401 1402 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */ 1403 perms = SISL_RHT_PERM(attach->hdr.flags + 1); 1404 1405 /* Context mutex is locked upon return */ 1406 init_context(ctxi, cfg, ctx, ctxid, file, perms); 1407 1408 rc = afu_attach(cfg, ctxi); 1409 if (unlikely(rc)) { 1410 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); 1411 goto err; 1412 } 1413 1414 /* 1415 * No error paths after this point. Once the fd is installed it's 1416 * visible to user space and can't be undone safely on this thread. 1417 * There is no need to worry about a deadlock here because no one 1418 * knows about us yet; we can be the only one holding our mutex. 1419 */ 1420 list_add(&lun_access->list, &ctxi->luns); 1421 mutex_lock(&cfg->ctx_tbl_list_mutex); 1422 mutex_lock(&ctxi->mutex); 1423 cfg->ctx_tbl[ctxid] = ctxi; 1424 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1425 fd_install(fd, file); 1426 1427 out_attach: 1428 if (fd != -1) 1429 attach->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD; 1430 else 1431 attach->hdr.return_flags = 0; 1432 1433 attach->context_id = ctxi->ctxid; 1434 attach->block_size = gli->blk_len; 1435 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1436 attach->last_lba = gli->max_lba; 1437 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT; 1438 attach->max_xfer /= gli->blk_len; 1439 1440 out: 1441 attach->adap_fd = fd; 1442 1443 if (ctxi) 1444 put_context(ctxi); 1445 1446 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n", 1447 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba); 1448 return rc; 1449 1450 err: 1451 /* Cleanup CXL context; okay to 'stop' even if it was not started */ 1452 if (!IS_ERR_OR_NULL(ctx)) { 1453 cxl_stop_context(ctx); 1454 cxl_release_context(ctx); 1455 ctx = NULL; 1456 } 1457 1458 /* 1459 * Here, we're overriding the fops with a dummy all-NULL fops because 1460 * fput() calls the release fop, which will cause us to mistakenly 1461 * call into the CXL code. Rather than try to add yet more complexity 1462 * to that routine (cxlflash_cxl_release) we should try to fix the 1463 * issue here. 1464 */ 1465 if (fd > 0) { 1466 file->f_op = &null_fops; 1467 fput(file); 1468 put_unused_fd(fd); 1469 fd = -1; 1470 file = NULL; 1471 } 1472 1473 /* Cleanup our context */ 1474 if (ctxi) { 1475 destroy_context(cfg, ctxi); 1476 ctxi = NULL; 1477 } 1478 1479 kfree(lun_access); 1480 scsi_device_put(sdev); 1481 goto out; 1482 } 1483 1484 /** 1485 * recover_context() - recovers a context in error 1486 * @cfg: Internal structure associated with the host. 1487 * @ctxi: Context to release. 1488 * @adap_fd: Adapter file descriptor associated with new/recovered context. 1489 * 1490 * Restablishes the state for a context-in-error. 1491 * 1492 * Return: 0 on success, -errno on failure 1493 */ 1494 static int recover_context(struct cxlflash_cfg *cfg, 1495 struct ctx_info *ctxi, 1496 int *adap_fd) 1497 { 1498 struct device *dev = &cfg->dev->dev; 1499 int rc = 0; 1500 int fd = -1; 1501 int ctxid = -1; 1502 struct file *file; 1503 struct cxl_context *ctx; 1504 struct afu *afu = cfg->afu; 1505 1506 ctx = cxl_dev_context_init(cfg->dev); 1507 if (IS_ERR_OR_NULL(ctx)) { 1508 dev_err(dev, "%s: Could not initialize context %p\n", 1509 __func__, ctx); 1510 rc = -ENODEV; 1511 goto out; 1512 } 1513 1514 rc = cxl_start_work(ctx, &ctxi->work); 1515 if (unlikely(rc)) { 1516 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1517 __func__, rc); 1518 goto err1; 1519 } 1520 1521 ctxid = cxl_process_element(ctx); 1522 if (unlikely((ctxid >= MAX_CONTEXT) || (ctxid < 0))) { 1523 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); 1524 rc = -EPERM; 1525 goto err2; 1526 } 1527 1528 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); 1529 if (unlikely(fd < 0)) { 1530 rc = -ENODEV; 1531 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1532 goto err2; 1533 } 1534 1535 /* Update with new MMIO area based on updated context id */ 1536 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 1537 1538 rc = afu_attach(cfg, ctxi); 1539 if (rc) { 1540 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); 1541 goto err3; 1542 } 1543 1544 /* 1545 * No error paths after this point. Once the fd is installed it's 1546 * visible to user space and can't be undone safely on this thread. 1547 */ 1548 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 1549 ctxi->ctx = ctx; 1550 ctxi->file = file; 1551 1552 /* 1553 * Put context back in table (note the reinit of the context list); 1554 * we must first drop the context's mutex and then acquire it in 1555 * order with the table/list mutex to avoid a deadlock - safe to do 1556 * here because no one can find us at this moment in time. 1557 */ 1558 mutex_unlock(&ctxi->mutex); 1559 mutex_lock(&cfg->ctx_tbl_list_mutex); 1560 mutex_lock(&ctxi->mutex); 1561 list_del_init(&ctxi->list); 1562 cfg->ctx_tbl[ctxid] = ctxi; 1563 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1564 fd_install(fd, file); 1565 *adap_fd = fd; 1566 out: 1567 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n", 1568 __func__, ctxid, fd, rc); 1569 return rc; 1570 1571 err3: 1572 fput(file); 1573 put_unused_fd(fd); 1574 err2: 1575 cxl_stop_context(ctx); 1576 err1: 1577 cxl_release_context(ctx); 1578 goto out; 1579 } 1580 1581 /** 1582 * cxlflash_afu_recover() - initiates AFU recovery 1583 * @sdev: SCSI device associated with LUN. 1584 * @recover: Recover ioctl data structure. 1585 * 1586 * Only a single recovery is allowed at a time to avoid exhausting CXL 1587 * resources (leading to recovery failure) in the event that we're up 1588 * against the maximum number of contexts limit. For similar reasons, 1589 * a context recovery is retried if there are multiple recoveries taking 1590 * place at the same time and the failure was due to CXL services being 1591 * unable to keep up. 1592 * 1593 * As this routine is called on ioctl context, it holds the ioctl r/w 1594 * semaphore that is used to drain ioctls in recovery scenarios. The 1595 * implementation to achieve the pacing described above (a local mutex) 1596 * requires that the ioctl r/w semaphore be dropped and reacquired to 1597 * avoid a 3-way deadlock when multiple process recoveries operate in 1598 * parallel. 1599 * 1600 * Because a user can detect an error condition before the kernel, it is 1601 * quite possible for this routine to act as the kernel's EEH detection 1602 * source (MMIO read of mbox_r). Because of this, there is a window of 1603 * time where an EEH might have been detected but not yet 'serviced' 1604 * (callback invoked, causing the device to enter reset state). To avoid 1605 * looping in this routine during that window, a 1 second sleep is in place 1606 * between the time the MMIO failure is detected and the time a wait on the 1607 * reset wait queue is attempted via check_state(). 1608 * 1609 * Return: 0 on success, -errno on failure 1610 */ 1611 static int cxlflash_afu_recover(struct scsi_device *sdev, 1612 struct dk_cxlflash_recover_afu *recover) 1613 { 1614 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1615 struct device *dev = &cfg->dev->dev; 1616 struct llun_info *lli = sdev->hostdata; 1617 struct afu *afu = cfg->afu; 1618 struct ctx_info *ctxi = NULL; 1619 struct mutex *mutex = &cfg->ctx_recovery_mutex; 1620 u64 ctxid = DECODE_CTXID(recover->context_id), 1621 rctxid = recover->context_id; 1622 long reg; 1623 int lretry = 20; /* up to 2 seconds */ 1624 int new_adap_fd = -1; 1625 int rc = 0; 1626 1627 atomic_inc(&cfg->recovery_threads); 1628 up_read(&cfg->ioctl_rwsem); 1629 rc = mutex_lock_interruptible(mutex); 1630 down_read(&cfg->ioctl_rwsem); 1631 if (rc) 1632 goto out; 1633 rc = check_state(cfg); 1634 if (rc) { 1635 dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc); 1636 rc = -ENODEV; 1637 goto out; 1638 } 1639 1640 dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n", 1641 __func__, recover->reason, rctxid); 1642 1643 retry: 1644 /* Ensure that this process is attached to the context */ 1645 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 1646 if (unlikely(!ctxi)) { 1647 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 1648 rc = -EINVAL; 1649 goto out; 1650 } 1651 1652 if (ctxi->err_recovery_active) { 1653 retry_recover: 1654 rc = recover_context(cfg, ctxi, &new_adap_fd); 1655 if (unlikely(rc)) { 1656 dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n", 1657 __func__, ctxid, rc); 1658 if ((rc == -ENODEV) && 1659 ((atomic_read(&cfg->recovery_threads) > 1) || 1660 (lretry--))) { 1661 dev_dbg(dev, "%s: Going to try again!\n", 1662 __func__); 1663 mutex_unlock(mutex); 1664 msleep(100); 1665 rc = mutex_lock_interruptible(mutex); 1666 if (rc) 1667 goto out; 1668 goto retry_recover; 1669 } 1670 1671 goto out; 1672 } 1673 1674 ctxi->err_recovery_active = false; 1675 recover->context_id = ctxi->ctxid; 1676 recover->adap_fd = new_adap_fd; 1677 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1678 recover->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD | 1679 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; 1680 goto out; 1681 } 1682 1683 /* Test if in error state */ 1684 reg = readq_be(&afu->ctrl_map->mbox_r); 1685 if (reg == -1) { 1686 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__); 1687 1688 /* 1689 * Before checking the state, put back the context obtained with 1690 * get_context() as it is no longer needed and sleep for a short 1691 * period of time (see prolog notes). 1692 */ 1693 put_context(ctxi); 1694 ctxi = NULL; 1695 ssleep(1); 1696 rc = check_state(cfg); 1697 if (unlikely(rc)) 1698 goto out; 1699 goto retry; 1700 } 1701 1702 dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__); 1703 out: 1704 if (likely(ctxi)) 1705 put_context(ctxi); 1706 mutex_unlock(mutex); 1707 atomic_dec_if_positive(&cfg->recovery_threads); 1708 return rc; 1709 } 1710 1711 /** 1712 * process_sense() - evaluates and processes sense data 1713 * @sdev: SCSI device associated with LUN. 1714 * @verify: Verify ioctl data structure. 1715 * 1716 * Return: 0 on success, -errno on failure 1717 */ 1718 static int process_sense(struct scsi_device *sdev, 1719 struct dk_cxlflash_verify *verify) 1720 { 1721 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1722 struct device *dev = &cfg->dev->dev; 1723 struct llun_info *lli = sdev->hostdata; 1724 struct glun_info *gli = lli->parent; 1725 u64 prev_lba = gli->max_lba; 1726 struct scsi_sense_hdr sshdr = { 0 }; 1727 int rc = 0; 1728 1729 rc = scsi_normalize_sense((const u8 *)&verify->sense_data, 1730 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr); 1731 if (!rc) { 1732 dev_err(dev, "%s: Failed to normalize sense data!\n", __func__); 1733 rc = -EINVAL; 1734 goto out; 1735 } 1736 1737 switch (sshdr.sense_key) { 1738 case NO_SENSE: 1739 case RECOVERED_ERROR: 1740 /* fall through */ 1741 case NOT_READY: 1742 break; 1743 case UNIT_ATTENTION: 1744 switch (sshdr.asc) { 1745 case 0x29: /* Power on Reset or Device Reset */ 1746 /* fall through */ 1747 case 0x2A: /* Device settings/capacity changed */ 1748 rc = read_cap16(sdev, lli); 1749 if (rc) { 1750 rc = -ENODEV; 1751 break; 1752 } 1753 if (prev_lba != gli->max_lba) 1754 dev_dbg(dev, "%s: Capacity changed old=%lld " 1755 "new=%lld\n", __func__, prev_lba, 1756 gli->max_lba); 1757 break; 1758 case 0x3F: /* Report LUNs changed, Rescan. */ 1759 scsi_scan_host(cfg->host); 1760 break; 1761 default: 1762 rc = -EIO; 1763 break; 1764 } 1765 break; 1766 default: 1767 rc = -EIO; 1768 break; 1769 } 1770 out: 1771 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__, 1772 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc); 1773 return rc; 1774 } 1775 1776 /** 1777 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes 1778 * @sdev: SCSI device associated with LUN. 1779 * @verify: Verify ioctl data structure. 1780 * 1781 * Return: 0 on success, -errno on failure 1782 */ 1783 static int cxlflash_disk_verify(struct scsi_device *sdev, 1784 struct dk_cxlflash_verify *verify) 1785 { 1786 int rc = 0; 1787 struct ctx_info *ctxi = NULL; 1788 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1789 struct device *dev = &cfg->dev->dev; 1790 struct llun_info *lli = sdev->hostdata; 1791 struct glun_info *gli = lli->parent; 1792 struct sisl_rht_entry *rhte = NULL; 1793 res_hndl_t rhndl = verify->rsrc_handle; 1794 u64 ctxid = DECODE_CTXID(verify->context_id), 1795 rctxid = verify->context_id; 1796 u64 last_lba = 0; 1797 1798 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, " 1799 "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle, 1800 verify->hint, verify->hdr.flags); 1801 1802 ctxi = get_context(cfg, rctxid, lli, 0); 1803 if (unlikely(!ctxi)) { 1804 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 1805 rc = -EINVAL; 1806 goto out; 1807 } 1808 1809 rhte = get_rhte(ctxi, rhndl, lli); 1810 if (unlikely(!rhte)) { 1811 dev_dbg(dev, "%s: Bad resource handle! (%d)\n", 1812 __func__, rhndl); 1813 rc = -EINVAL; 1814 goto out; 1815 } 1816 1817 /* 1818 * Look at the hint/sense to see if it requires us to redrive 1819 * inquiry (i.e. the Unit attention is due to the WWN changing). 1820 */ 1821 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) { 1822 /* Can't hold mutex across process_sense/read_cap16, 1823 * since we could have an intervening EEH event. 1824 */ 1825 ctxi->unavail = true; 1826 mutex_unlock(&ctxi->mutex); 1827 rc = process_sense(sdev, verify); 1828 if (unlikely(rc)) { 1829 dev_err(dev, "%s: Failed to validate sense data (%d)\n", 1830 __func__, rc); 1831 mutex_lock(&ctxi->mutex); 1832 ctxi->unavail = false; 1833 goto out; 1834 } 1835 mutex_lock(&ctxi->mutex); 1836 ctxi->unavail = false; 1837 } 1838 1839 switch (gli->mode) { 1840 case MODE_PHYSICAL: 1841 last_lba = gli->max_lba; 1842 break; 1843 case MODE_VIRTUAL: 1844 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */ 1845 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len); 1846 last_lba /= CXLFLASH_BLOCK_SIZE; 1847 last_lba--; 1848 break; 1849 default: 1850 WARN(1, "Unsupported LUN mode!"); 1851 } 1852 1853 verify->last_lba = last_lba; 1854 1855 out: 1856 if (likely(ctxi)) 1857 put_context(ctxi); 1858 dev_dbg(dev, "%s: returning rc=%d llba=%llX\n", 1859 __func__, rc, verify->last_lba); 1860 return rc; 1861 } 1862 1863 /** 1864 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string 1865 * @cmd: The ioctl command to decode. 1866 * 1867 * Return: A string identifying the decoded ioctl. 1868 */ 1869 static char *decode_ioctl(int cmd) 1870 { 1871 switch (cmd) { 1872 case DK_CXLFLASH_ATTACH: 1873 return __stringify_1(DK_CXLFLASH_ATTACH); 1874 case DK_CXLFLASH_USER_DIRECT: 1875 return __stringify_1(DK_CXLFLASH_USER_DIRECT); 1876 case DK_CXLFLASH_USER_VIRTUAL: 1877 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL); 1878 case DK_CXLFLASH_VLUN_RESIZE: 1879 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE); 1880 case DK_CXLFLASH_RELEASE: 1881 return __stringify_1(DK_CXLFLASH_RELEASE); 1882 case DK_CXLFLASH_DETACH: 1883 return __stringify_1(DK_CXLFLASH_DETACH); 1884 case DK_CXLFLASH_VERIFY: 1885 return __stringify_1(DK_CXLFLASH_VERIFY); 1886 case DK_CXLFLASH_VLUN_CLONE: 1887 return __stringify_1(DK_CXLFLASH_VLUN_CLONE); 1888 case DK_CXLFLASH_RECOVER_AFU: 1889 return __stringify_1(DK_CXLFLASH_RECOVER_AFU); 1890 case DK_CXLFLASH_MANAGE_LUN: 1891 return __stringify_1(DK_CXLFLASH_MANAGE_LUN); 1892 } 1893 1894 return "UNKNOWN"; 1895 } 1896 1897 /** 1898 * cxlflash_disk_direct_open() - opens a direct (physical) disk 1899 * @sdev: SCSI device associated with LUN. 1900 * @arg: UDirect ioctl data structure. 1901 * 1902 * On successful return, the user is informed of the resource handle 1903 * to be used to identify the direct lun and the size (in blocks) of 1904 * the direct lun in last LBA format. 1905 * 1906 * Return: 0 on success, -errno on failure 1907 */ 1908 static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) 1909 { 1910 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1911 struct device *dev = &cfg->dev->dev; 1912 struct afu *afu = cfg->afu; 1913 struct llun_info *lli = sdev->hostdata; 1914 struct glun_info *gli = lli->parent; 1915 1916 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg; 1917 1918 u64 ctxid = DECODE_CTXID(pphys->context_id), 1919 rctxid = pphys->context_id; 1920 u64 lun_size = 0; 1921 u64 last_lba = 0; 1922 u64 rsrc_handle = -1; 1923 u32 port = CHAN2PORT(sdev->channel); 1924 1925 int rc = 0; 1926 1927 struct ctx_info *ctxi = NULL; 1928 struct sisl_rht_entry *rhte = NULL; 1929 1930 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size); 1931 1932 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false); 1933 if (unlikely(rc)) { 1934 dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n", 1935 __func__); 1936 goto out; 1937 } 1938 1939 ctxi = get_context(cfg, rctxid, lli, 0); 1940 if (unlikely(!ctxi)) { 1941 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 1942 rc = -EINVAL; 1943 goto err1; 1944 } 1945 1946 rhte = rhte_checkout(ctxi, lli); 1947 if (unlikely(!rhte)) { 1948 dev_dbg(dev, "%s: too many opens for this context\n", __func__); 1949 rc = -EMFILE; /* too many opens */ 1950 goto err1; 1951 } 1952 1953 rsrc_handle = (rhte - ctxi->rht_start); 1954 1955 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port); 1956 cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC); 1957 1958 last_lba = gli->max_lba; 1959 pphys->hdr.return_flags = 0; 1960 pphys->last_lba = last_lba; 1961 pphys->rsrc_handle = rsrc_handle; 1962 1963 out: 1964 if (likely(ctxi)) 1965 put_context(ctxi); 1966 dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n", 1967 __func__, rsrc_handle, rc, last_lba); 1968 return rc; 1969 1970 err1: 1971 cxlflash_lun_detach(gli); 1972 goto out; 1973 } 1974 1975 /** 1976 * ioctl_common() - common IOCTL handler for driver 1977 * @sdev: SCSI device associated with LUN. 1978 * @cmd: IOCTL command. 1979 * 1980 * Handles common fencing operations that are valid for multiple ioctls. Always 1981 * allow through ioctls that are cleanup oriented in nature, even when operating 1982 * in a failed/terminating state. 1983 * 1984 * Return: 0 on success, -errno on failure 1985 */ 1986 static int ioctl_common(struct scsi_device *sdev, int cmd) 1987 { 1988 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1989 struct device *dev = &cfg->dev->dev; 1990 struct llun_info *lli = sdev->hostdata; 1991 int rc = 0; 1992 1993 if (unlikely(!lli)) { 1994 dev_dbg(dev, "%s: Unknown LUN\n", __func__); 1995 rc = -EINVAL; 1996 goto out; 1997 } 1998 1999 rc = check_state(cfg); 2000 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) { 2001 switch (cmd) { 2002 case DK_CXLFLASH_VLUN_RESIZE: 2003 case DK_CXLFLASH_RELEASE: 2004 case DK_CXLFLASH_DETACH: 2005 dev_dbg(dev, "%s: Command override! (%d)\n", 2006 __func__, rc); 2007 rc = 0; 2008 break; 2009 } 2010 } 2011 out: 2012 return rc; 2013 } 2014 2015 /** 2016 * cxlflash_ioctl() - IOCTL handler for driver 2017 * @sdev: SCSI device associated with LUN. 2018 * @cmd: IOCTL command. 2019 * @arg: Userspace ioctl data structure. 2020 * 2021 * A read/write semaphore is used to implement a 'drain' of currently 2022 * running ioctls. The read semaphore is taken at the beginning of each 2023 * ioctl thread and released upon concluding execution. Additionally the 2024 * semaphore should be released and then reacquired in any ioctl execution 2025 * path which will wait for an event to occur that is outside the scope of 2026 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, 2027 * a thread simply needs to acquire the write semaphore. 2028 * 2029 * Return: 0 on success, -errno on failure 2030 */ 2031 int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 2032 { 2033 typedef int (*sioctl) (struct scsi_device *, void *); 2034 2035 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 2036 struct device *dev = &cfg->dev->dev; 2037 struct afu *afu = cfg->afu; 2038 struct dk_cxlflash_hdr *hdr; 2039 char buf[sizeof(union cxlflash_ioctls)]; 2040 size_t size = 0; 2041 bool known_ioctl = false; 2042 int idx; 2043 int rc = 0; 2044 struct Scsi_Host *shost = sdev->host; 2045 sioctl do_ioctl = NULL; 2046 2047 static const struct { 2048 size_t size; 2049 sioctl ioctl; 2050 } ioctl_tbl[] = { /* NOTE: order matters here */ 2051 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach}, 2052 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open}, 2053 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release}, 2054 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach}, 2055 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify}, 2056 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover}, 2057 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun}, 2058 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open}, 2059 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize}, 2060 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone}, 2061 }; 2062 2063 /* Hold read semaphore so we can drain if needed */ 2064 down_read(&cfg->ioctl_rwsem); 2065 2066 /* Restrict command set to physical support only for internal LUN */ 2067 if (afu->internal_lun) 2068 switch (cmd) { 2069 case DK_CXLFLASH_RELEASE: 2070 case DK_CXLFLASH_USER_VIRTUAL: 2071 case DK_CXLFLASH_VLUN_RESIZE: 2072 case DK_CXLFLASH_VLUN_CLONE: 2073 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n", 2074 __func__, decode_ioctl(cmd), afu->internal_lun); 2075 rc = -EINVAL; 2076 goto cxlflash_ioctl_exit; 2077 } 2078 2079 switch (cmd) { 2080 case DK_CXLFLASH_ATTACH: 2081 case DK_CXLFLASH_USER_DIRECT: 2082 case DK_CXLFLASH_RELEASE: 2083 case DK_CXLFLASH_DETACH: 2084 case DK_CXLFLASH_VERIFY: 2085 case DK_CXLFLASH_RECOVER_AFU: 2086 case DK_CXLFLASH_USER_VIRTUAL: 2087 case DK_CXLFLASH_VLUN_RESIZE: 2088 case DK_CXLFLASH_VLUN_CLONE: 2089 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n", 2090 __func__, decode_ioctl(cmd), cmd, shost->host_no, 2091 sdev->channel, sdev->id, sdev->lun); 2092 rc = ioctl_common(sdev, cmd); 2093 if (unlikely(rc)) 2094 goto cxlflash_ioctl_exit; 2095 2096 /* fall through */ 2097 2098 case DK_CXLFLASH_MANAGE_LUN: 2099 known_ioctl = true; 2100 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH); 2101 size = ioctl_tbl[idx].size; 2102 do_ioctl = ioctl_tbl[idx].ioctl; 2103 2104 if (likely(do_ioctl)) 2105 break; 2106 2107 /* fall through */ 2108 default: 2109 rc = -EINVAL; 2110 goto cxlflash_ioctl_exit; 2111 } 2112 2113 if (unlikely(copy_from_user(&buf, arg, size))) { 2114 dev_err(dev, "%s: copy_from_user() fail! " 2115 "size=%lu cmd=%d (%s) arg=%p\n", 2116 __func__, size, cmd, decode_ioctl(cmd), arg); 2117 rc = -EFAULT; 2118 goto cxlflash_ioctl_exit; 2119 } 2120 2121 hdr = (struct dk_cxlflash_hdr *)&buf; 2122 if (hdr->version != DK_CXLFLASH_VERSION_0) { 2123 dev_dbg(dev, "%s: Version %u not supported for %s\n", 2124 __func__, hdr->version, decode_ioctl(cmd)); 2125 rc = -EINVAL; 2126 goto cxlflash_ioctl_exit; 2127 } 2128 2129 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) { 2130 dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__); 2131 rc = -EINVAL; 2132 goto cxlflash_ioctl_exit; 2133 } 2134 2135 rc = do_ioctl(sdev, (void *)&buf); 2136 if (likely(!rc)) 2137 if (unlikely(copy_to_user(arg, &buf, size))) { 2138 dev_err(dev, "%s: copy_to_user() fail! " 2139 "size=%lu cmd=%d (%s) arg=%p\n", 2140 __func__, size, cmd, decode_ioctl(cmd), arg); 2141 rc = -EFAULT; 2142 } 2143 2144 /* fall through to exit */ 2145 2146 cxlflash_ioctl_exit: 2147 up_read(&cfg->ioctl_rwsem); 2148 if (unlikely(rc && known_ioctl)) 2149 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " 2150 "returned rc %d\n", __func__, 2151 decode_ioctl(cmd), cmd, shost->host_no, 2152 sdev->channel, sdev->id, sdev->lun, rc); 2153 else 2154 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " 2155 "returned rc %d\n", __func__, decode_ioctl(cmd), 2156 cmd, shost->host_no, sdev->channel, sdev->id, 2157 sdev->lun, rc); 2158 return rc; 2159 } 2160