1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2021 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 12 /* global driver scop variables */ 13 LIST_HEAD(mrioc_list); 14 DEFINE_SPINLOCK(mrioc_list_lock); 15 static int mrioc_ids; 16 static int warn_non_secure_ctlr; 17 18 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 19 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 20 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 21 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 22 23 /* Module parameters*/ 24 int prot_mask = -1; 25 module_param(prot_mask, int, 0); 26 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 27 28 static int prot_guard_mask = 3; 29 module_param(prot_guard_mask, int, 0); 30 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 31 static int logging_level; 32 module_param(logging_level, int, 0); 33 MODULE_PARM_DESC(logging_level, 34 " bits for enabling additional logging info (default=0)"); 35 36 /* Forward declarations*/ 37 /** 38 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 39 * @mrioc: Adapter instance reference 40 * @scmd: SCSI command reference 41 * 42 * Calculate the host tag based on block tag for a given scmd. 43 * 44 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 45 */ 46 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 47 struct scsi_cmnd *scmd) 48 { 49 struct scmd_priv *priv = NULL; 50 u32 unique_tag; 51 u16 host_tag, hw_queue; 52 53 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 54 55 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 56 if (hw_queue >= mrioc->num_op_reply_q) 57 return MPI3MR_HOSTTAG_INVALID; 58 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 59 60 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 61 return MPI3MR_HOSTTAG_INVALID; 62 63 priv = scsi_cmd_priv(scmd); 64 /*host_tag 0 is invalid hence incrementing by 1*/ 65 priv->host_tag = host_tag + 1; 66 priv->scmd = scmd; 67 priv->in_lld_scope = 1; 68 priv->req_q_idx = hw_queue; 69 priv->meta_chain_idx = -1; 70 priv->chain_idx = -1; 71 priv->meta_sg_valid = 0; 72 return priv->host_tag; 73 } 74 75 /** 76 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 77 * @mrioc: Adapter instance reference 78 * @host_tag: Host tag 79 * @qidx: Operational queue index 80 * 81 * Identify the block tag from the host tag and queue index and 82 * retrieve associated scsi command using scsi_host_find_tag(). 83 * 84 * Return: SCSI command reference or NULL. 85 */ 86 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 87 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 88 { 89 struct scsi_cmnd *scmd = NULL; 90 struct scmd_priv *priv = NULL; 91 u32 unique_tag = host_tag - 1; 92 93 if (WARN_ON(host_tag > mrioc->max_host_ios)) 94 goto out; 95 96 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 97 98 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 99 if (scmd) { 100 priv = scsi_cmd_priv(scmd); 101 if (!priv->in_lld_scope) 102 scmd = NULL; 103 } 104 out: 105 return scmd; 106 } 107 108 /** 109 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 110 * @mrioc: Adapter instance reference 111 * @scmd: SCSI command reference 112 * 113 * Invalidate the SCSI command private data to mark the command 114 * is not in LLD scope anymore. 115 * 116 * Return: Nothing. 117 */ 118 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 119 struct scsi_cmnd *scmd) 120 { 121 struct scmd_priv *priv = NULL; 122 123 priv = scsi_cmd_priv(scmd); 124 125 if (WARN_ON(priv->in_lld_scope == 0)) 126 return; 127 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 128 priv->req_q_idx = 0xFFFF; 129 priv->scmd = NULL; 130 priv->in_lld_scope = 0; 131 priv->meta_sg_valid = 0; 132 if (priv->chain_idx >= 0) { 133 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 134 priv->chain_idx = -1; 135 } 136 if (priv->meta_chain_idx >= 0) { 137 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 138 priv->meta_chain_idx = -1; 139 } 140 } 141 142 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 143 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 144 static void mpi3mr_fwevt_worker(struct work_struct *work); 145 146 /** 147 * mpi3mr_fwevt_free - firmware event memory dealloctor 148 * @r: k reference pointer of the firmware event 149 * 150 * Free firmware event memory when no reference. 151 */ 152 static void mpi3mr_fwevt_free(struct kref *r) 153 { 154 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 155 } 156 157 /** 158 * mpi3mr_fwevt_get - k reference incrementor 159 * @fwevt: Firmware event reference 160 * 161 * Increment firmware event reference count. 162 */ 163 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 164 { 165 kref_get(&fwevt->ref_count); 166 } 167 168 /** 169 * mpi3mr_fwevt_put - k reference decrementor 170 * @fwevt: Firmware event reference 171 * 172 * decrement firmware event reference count. 173 */ 174 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 175 { 176 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 177 } 178 179 /** 180 * mpi3mr_alloc_fwevt - Allocate firmware event 181 * @len: length of firmware event data to allocate 182 * 183 * Allocate firmware event with required length and initialize 184 * the reference counter. 185 * 186 * Return: firmware event reference. 187 */ 188 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 189 { 190 struct mpi3mr_fwevt *fwevt; 191 192 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 193 if (!fwevt) 194 return NULL; 195 196 kref_init(&fwevt->ref_count); 197 return fwevt; 198 } 199 200 /** 201 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 202 * @mrioc: Adapter instance reference 203 * @fwevt: Firmware event reference 204 * 205 * Add the given firmware event to the firmware event list. 206 * 207 * Return: Nothing. 208 */ 209 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 210 struct mpi3mr_fwevt *fwevt) 211 { 212 unsigned long flags; 213 214 if (!mrioc->fwevt_worker_thread) 215 return; 216 217 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 218 /* get fwevt reference count while adding it to fwevt_list */ 219 mpi3mr_fwevt_get(fwevt); 220 INIT_LIST_HEAD(&fwevt->list); 221 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 222 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 223 /* get fwevt reference count while enqueueing it to worker queue */ 224 mpi3mr_fwevt_get(fwevt); 225 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 226 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 227 } 228 229 /** 230 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 231 * @mrioc: Adapter instance reference 232 * @fwevt: Firmware event reference 233 * 234 * Delete the given firmware event from the firmware event list. 235 * 236 * Return: Nothing. 237 */ 238 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 239 struct mpi3mr_fwevt *fwevt) 240 { 241 unsigned long flags; 242 243 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 244 if (!list_empty(&fwevt->list)) { 245 list_del_init(&fwevt->list); 246 /* 247 * Put fwevt reference count after 248 * removing it from fwevt_list 249 */ 250 mpi3mr_fwevt_put(fwevt); 251 } 252 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 253 } 254 255 /** 256 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 257 * @mrioc: Adapter instance reference 258 * 259 * Dequeue a firmware event from the firmware event list. 260 * 261 * Return: firmware event. 262 */ 263 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 264 struct mpi3mr_ioc *mrioc) 265 { 266 unsigned long flags; 267 struct mpi3mr_fwevt *fwevt = NULL; 268 269 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 270 if (!list_empty(&mrioc->fwevt_list)) { 271 fwevt = list_first_entry(&mrioc->fwevt_list, 272 struct mpi3mr_fwevt, list); 273 list_del_init(&fwevt->list); 274 /* 275 * Put fwevt reference count after 276 * removing it from fwevt_list 277 */ 278 mpi3mr_fwevt_put(fwevt); 279 } 280 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 281 282 return fwevt; 283 } 284 285 /** 286 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 287 * @mrioc: Adapter instance reference 288 * 289 * Flush all pending firmware events from the firmware event 290 * list. 291 * 292 * Return: Nothing. 293 */ 294 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 295 { 296 struct mpi3mr_fwevt *fwevt = NULL; 297 298 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 299 !mrioc->fwevt_worker_thread) 300 return; 301 302 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)) || 303 (fwevt = mrioc->current_event)) { 304 /* 305 * Wait on the fwevt to complete. If this returns 1, then 306 * the event was never executed, and we need a put for the 307 * reference the work had on the fwevt. 308 * 309 * If it did execute, we wait for it to finish, and the put will 310 * happen from mpi3mr_process_fwevt() 311 */ 312 if (cancel_work_sync(&fwevt->work)) { 313 /* 314 * Put fwevt reference count after 315 * dequeuing it from worker queue 316 */ 317 mpi3mr_fwevt_put(fwevt); 318 /* 319 * Put fwevt reference count to neutralize 320 * kref_init increment 321 */ 322 mpi3mr_fwevt_put(fwevt); 323 } 324 } 325 } 326 327 /** 328 * mpi3mr_invalidate_devhandles -Invalidate device handles 329 * @mrioc: Adapter instance reference 330 * 331 * Invalidate the device handles in the target device structures 332 * . Called post reset prior to reinitializing the controller. 333 * 334 * Return: Nothing. 335 */ 336 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 337 { 338 struct mpi3mr_tgt_dev *tgtdev; 339 struct mpi3mr_stgt_priv_data *tgt_priv; 340 341 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 342 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 343 if (tgtdev->starget && tgtdev->starget->hostdata) { 344 tgt_priv = tgtdev->starget->hostdata; 345 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 346 } 347 } 348 } 349 350 /** 351 * mpi3mr_print_scmd - print individual SCSI command 352 * @rq: Block request 353 * @data: Adapter instance reference 354 * @reserved: N/A. Currently not used 355 * 356 * Print the SCSI command details if it is in LLD scope. 357 * 358 * Return: true always. 359 */ 360 static bool mpi3mr_print_scmd(struct request *rq, 361 void *data, bool reserved) 362 { 363 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 364 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 365 struct scmd_priv *priv = NULL; 366 367 if (scmd) { 368 priv = scsi_cmd_priv(scmd); 369 if (!priv->in_lld_scope) 370 goto out; 371 372 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 373 __func__, priv->host_tag, priv->req_q_idx + 1); 374 scsi_print_command(scmd); 375 } 376 377 out: 378 return(true); 379 } 380 381 /** 382 * mpi3mr_flush_scmd - Flush individual SCSI command 383 * @rq: Block request 384 * @data: Adapter instance reference 385 * @reserved: N/A. Currently not used 386 * 387 * Return the SCSI command to the upper layers if it is in LLD 388 * scope. 389 * 390 * Return: true always. 391 */ 392 393 static bool mpi3mr_flush_scmd(struct request *rq, 394 void *data, bool reserved) 395 { 396 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 397 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 398 struct scmd_priv *priv = NULL; 399 400 if (scmd) { 401 priv = scsi_cmd_priv(scmd); 402 if (!priv->in_lld_scope) 403 goto out; 404 405 if (priv->meta_sg_valid) 406 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 407 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 408 mpi3mr_clear_scmd_priv(mrioc, scmd); 409 scsi_dma_unmap(scmd); 410 scmd->result = DID_RESET << 16; 411 scsi_print_command(scmd); 412 scsi_done(scmd); 413 mrioc->flush_io_count++; 414 } 415 416 out: 417 return(true); 418 } 419 420 /** 421 * mpi3mr_flush_host_io - Flush host I/Os 422 * @mrioc: Adapter instance reference 423 * 424 * Flush all of the pending I/Os by calling 425 * blk_mq_tagset_busy_iter() for each possible tag. This is 426 * executed post controller reset 427 * 428 * Return: Nothing. 429 */ 430 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 431 { 432 struct Scsi_Host *shost = mrioc->shost; 433 434 mrioc->flush_io_count = 0; 435 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 436 blk_mq_tagset_busy_iter(&shost->tag_set, 437 mpi3mr_flush_scmd, (void *)mrioc); 438 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 439 mrioc->flush_io_count); 440 } 441 442 /** 443 * mpi3mr_alloc_tgtdev - target device allocator 444 * 445 * Allocate target device instance and initialize the reference 446 * count 447 * 448 * Return: target device instance. 449 */ 450 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 451 { 452 struct mpi3mr_tgt_dev *tgtdev; 453 454 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 455 if (!tgtdev) 456 return NULL; 457 kref_init(&tgtdev->ref_count); 458 return tgtdev; 459 } 460 461 /** 462 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 463 * @mrioc: Adapter instance reference 464 * @tgtdev: Target device 465 * 466 * Add the target device to the target device list 467 * 468 * Return: Nothing. 469 */ 470 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 471 struct mpi3mr_tgt_dev *tgtdev) 472 { 473 unsigned long flags; 474 475 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 476 mpi3mr_tgtdev_get(tgtdev); 477 INIT_LIST_HEAD(&tgtdev->list); 478 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 479 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 480 } 481 482 /** 483 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 484 * @mrioc: Adapter instance reference 485 * @tgtdev: Target device 486 * 487 * Remove the target device from the target device list 488 * 489 * Return: Nothing. 490 */ 491 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 492 struct mpi3mr_tgt_dev *tgtdev) 493 { 494 unsigned long flags; 495 496 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 497 if (!list_empty(&tgtdev->list)) { 498 list_del_init(&tgtdev->list); 499 mpi3mr_tgtdev_put(tgtdev); 500 } 501 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 502 } 503 504 /** 505 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 506 * @mrioc: Adapter instance reference 507 * @handle: Device handle 508 * 509 * Accessor to retrieve target device from the device handle. 510 * Non Lock version 511 * 512 * Return: Target device reference. 513 */ 514 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 515 struct mpi3mr_ioc *mrioc, u16 handle) 516 { 517 struct mpi3mr_tgt_dev *tgtdev; 518 519 assert_spin_locked(&mrioc->tgtdev_lock); 520 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 521 if (tgtdev->dev_handle == handle) 522 goto found_tgtdev; 523 return NULL; 524 525 found_tgtdev: 526 mpi3mr_tgtdev_get(tgtdev); 527 return tgtdev; 528 } 529 530 /** 531 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 532 * @mrioc: Adapter instance reference 533 * @handle: Device handle 534 * 535 * Accessor to retrieve target device from the device handle. 536 * Lock version 537 * 538 * Return: Target device reference. 539 */ 540 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 541 struct mpi3mr_ioc *mrioc, u16 handle) 542 { 543 struct mpi3mr_tgt_dev *tgtdev; 544 unsigned long flags; 545 546 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 547 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 548 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 549 return tgtdev; 550 } 551 552 /** 553 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 554 * @mrioc: Adapter instance reference 555 * @persist_id: Persistent ID 556 * 557 * Accessor to retrieve target device from the Persistent ID. 558 * Non Lock version 559 * 560 * Return: Target device reference. 561 */ 562 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 563 struct mpi3mr_ioc *mrioc, u16 persist_id) 564 { 565 struct mpi3mr_tgt_dev *tgtdev; 566 567 assert_spin_locked(&mrioc->tgtdev_lock); 568 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 569 if (tgtdev->perst_id == persist_id) 570 goto found_tgtdev; 571 return NULL; 572 573 found_tgtdev: 574 mpi3mr_tgtdev_get(tgtdev); 575 return tgtdev; 576 } 577 578 /** 579 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 580 * @mrioc: Adapter instance reference 581 * @persist_id: Persistent ID 582 * 583 * Accessor to retrieve target device from the Persistent ID. 584 * Lock version 585 * 586 * Return: Target device reference. 587 */ 588 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 589 struct mpi3mr_ioc *mrioc, u16 persist_id) 590 { 591 struct mpi3mr_tgt_dev *tgtdev; 592 unsigned long flags; 593 594 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 595 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 596 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 597 return tgtdev; 598 } 599 600 /** 601 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 602 * @mrioc: Adapter instance reference 603 * @tgt_priv: Target private data 604 * 605 * Accessor to return target device from the target private 606 * data. Non Lock version 607 * 608 * Return: Target device reference. 609 */ 610 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 611 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 612 { 613 struct mpi3mr_tgt_dev *tgtdev; 614 615 assert_spin_locked(&mrioc->tgtdev_lock); 616 tgtdev = tgt_priv->tgt_dev; 617 if (tgtdev) 618 mpi3mr_tgtdev_get(tgtdev); 619 return tgtdev; 620 } 621 622 /** 623 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 624 * @mrioc: Adapter instance reference 625 * @tgtdev: Target device structure 626 * 627 * Checks whether the device is exposed to upper layers and if it 628 * is then remove the device from upper layers by calling 629 * scsi_remove_target(). 630 * 631 * Return: 0 on success, non zero on failure. 632 */ 633 static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 634 struct mpi3mr_tgt_dev *tgtdev) 635 { 636 struct mpi3mr_stgt_priv_data *tgt_priv; 637 638 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 639 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 640 if (tgtdev->starget && tgtdev->starget->hostdata) { 641 tgt_priv = tgtdev->starget->hostdata; 642 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 643 } 644 645 if (tgtdev->starget) { 646 scsi_remove_target(&tgtdev->starget->dev); 647 tgtdev->host_exposed = 0; 648 } 649 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 650 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 651 } 652 653 /** 654 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 655 * @mrioc: Adapter instance reference 656 * @perst_id: Persistent ID of the device 657 * 658 * Checks whether the device can be exposed to upper layers and 659 * if it is not then expose the device to upper layers by 660 * calling scsi_scan_target(). 661 * 662 * Return: 0 on success, non zero on failure. 663 */ 664 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 665 u16 perst_id) 666 { 667 int retval = 0; 668 struct mpi3mr_tgt_dev *tgtdev; 669 670 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 671 if (!tgtdev) { 672 retval = -1; 673 goto out; 674 } 675 if (tgtdev->is_hidden) { 676 retval = -1; 677 goto out; 678 } 679 if (!tgtdev->host_exposed && !mrioc->reset_in_progress) { 680 tgtdev->host_exposed = 1; 681 scsi_scan_target(&mrioc->shost->shost_gendev, 0, 682 tgtdev->perst_id, 683 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 684 if (!tgtdev->starget) 685 tgtdev->host_exposed = 0; 686 } 687 out: 688 if (tgtdev) 689 mpi3mr_tgtdev_put(tgtdev); 690 691 return retval; 692 } 693 694 /** 695 * mpi3mr_change_queue_depth- Change QD callback handler 696 * @sdev: SCSI device reference 697 * @q_depth: Queue depth 698 * 699 * Validate and limit QD and call scsi_change_queue_depth. 700 * 701 * Return: return value of scsi_change_queue_depth 702 */ 703 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 704 int q_depth) 705 { 706 struct scsi_target *starget = scsi_target(sdev); 707 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 708 int retval = 0; 709 710 if (!sdev->tagged_supported) 711 q_depth = 1; 712 if (q_depth > shost->can_queue) 713 q_depth = shost->can_queue; 714 else if (!q_depth) 715 q_depth = MPI3MR_DEFAULT_SDEV_QD; 716 retval = scsi_change_queue_depth(sdev, q_depth); 717 718 return retval; 719 } 720 721 /** 722 * mpi3mr_update_sdev - Update SCSI device information 723 * @sdev: SCSI device reference 724 * @data: target device reference 725 * 726 * This is an iterator function called for each SCSI device in a 727 * target to update the target specific information into each 728 * SCSI device. 729 * 730 * Return: Nothing. 731 */ 732 static void 733 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 734 { 735 struct mpi3mr_tgt_dev *tgtdev; 736 737 tgtdev = (struct mpi3mr_tgt_dev *)data; 738 if (!tgtdev) 739 return; 740 741 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 742 switch (tgtdev->dev_type) { 743 case MPI3_DEVICE_DEVFORM_PCIE: 744 /*The block layer hw sector size = 512*/ 745 if ((tgtdev->dev_spec.pcie_inf.dev_info & 746 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 747 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 748 blk_queue_max_hw_sectors(sdev->request_queue, 749 tgtdev->dev_spec.pcie_inf.mdts / 512); 750 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) 751 blk_queue_virt_boundary(sdev->request_queue, 752 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 753 else 754 blk_queue_virt_boundary(sdev->request_queue, 755 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); 756 } 757 break; 758 default: 759 break; 760 } 761 } 762 763 /** 764 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure 765 * @mrioc: Adapter instance reference 766 * 767 * This is executed post controller reset to identify any 768 * missing devices during reset and remove from the upper layers 769 * or expose any newly detected device to the upper layers. 770 * 771 * Return: Nothing. 772 */ 773 774 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) 775 { 776 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 777 778 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 779 list) { 780 if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) && 781 tgtdev->host_exposed) { 782 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 783 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 784 mpi3mr_tgtdev_put(tgtdev); 785 } 786 } 787 788 tgtdev = NULL; 789 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 790 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 791 !tgtdev->is_hidden && !tgtdev->host_exposed) 792 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 793 } 794 } 795 796 /** 797 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 798 * @mrioc: Adapter instance reference 799 * @tgtdev: Target device internal structure 800 * @dev_pg0: New device page0 801 * 802 * Update the information from the device page0 into the driver 803 * cached target device structure. 804 * 805 * Return: Nothing. 806 */ 807 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 808 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0) 809 { 810 u16 flags = 0; 811 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 812 u8 prot_mask = 0; 813 814 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 815 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 816 tgtdev->dev_type = dev_pg0->device_form; 817 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 818 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 819 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 820 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 821 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 822 823 flags = le16_to_cpu(dev_pg0->flags); 824 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 825 826 if (tgtdev->starget && tgtdev->starget->hostdata) { 827 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 828 tgtdev->starget->hostdata; 829 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 830 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 831 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 832 } 833 834 switch (dev_pg0->access_status) { 835 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 836 case MPI3_DEVICE0_ASTATUS_PREPARE: 837 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 838 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 839 break; 840 default: 841 tgtdev->is_hidden = 1; 842 break; 843 } 844 845 switch (tgtdev->dev_type) { 846 case MPI3_DEVICE_DEVFORM_SAS_SATA: 847 { 848 struct mpi3_device0_sas_sata_format *sasinf = 849 &dev_pg0->device_specific.sas_sata_format; 850 u16 dev_info = le16_to_cpu(sasinf->device_info); 851 852 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 853 tgtdev->dev_spec.sas_sata_inf.sas_address = 854 le64_to_cpu(sasinf->sas_address); 855 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 856 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 857 tgtdev->is_hidden = 1; 858 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 859 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 860 tgtdev->is_hidden = 1; 861 break; 862 } 863 case MPI3_DEVICE_DEVFORM_PCIE: 864 { 865 struct mpi3_device0_pcie_format *pcieinf = 866 &dev_pg0->device_specific.pcie_format; 867 u16 dev_info = le16_to_cpu(pcieinf->device_info); 868 869 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 870 tgtdev->dev_spec.pcie_inf.capb = 871 le32_to_cpu(pcieinf->capabilities); 872 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 873 /* 2^12 = 4096 */ 874 tgtdev->dev_spec.pcie_inf.pgsz = 12; 875 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 876 tgtdev->dev_spec.pcie_inf.mdts = 877 le32_to_cpu(pcieinf->maximum_data_transfer_size); 878 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 879 tgtdev->dev_spec.pcie_inf.reset_to = 880 pcieinf->controller_reset_to; 881 tgtdev->dev_spec.pcie_inf.abort_to = 882 pcieinf->nvme_abort_to; 883 } 884 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 885 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 886 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 887 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 888 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 889 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 890 tgtdev->is_hidden = 1; 891 if (!mrioc->shost) 892 break; 893 prot_mask = scsi_host_get_prot(mrioc->shost); 894 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 895 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 896 ioc_info(mrioc, 897 "%s : Disabling DIX0 prot capability\n", __func__); 898 ioc_info(mrioc, 899 "because HBA does not support DIX0 operation on NVME drives\n"); 900 } 901 break; 902 } 903 case MPI3_DEVICE_DEVFORM_VD: 904 { 905 struct mpi3_device0_vd_format *vdinf = 906 &dev_pg0->device_specific.vd_format; 907 908 tgtdev->dev_spec.vol_inf.state = vdinf->vd_state; 909 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 910 tgtdev->is_hidden = 1; 911 break; 912 } 913 default: 914 break; 915 } 916 } 917 918 /** 919 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 920 * @mrioc: Adapter instance reference 921 * @fwevt: Firmware event information. 922 * 923 * Process Device status Change event and based on device's new 924 * information, either expose the device to the upper layers, or 925 * remove the device from upper layers. 926 * 927 * Return: Nothing. 928 */ 929 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 930 struct mpi3mr_fwevt *fwevt) 931 { 932 u16 dev_handle = 0; 933 u8 uhide = 0, delete = 0, cleanup = 0; 934 struct mpi3mr_tgt_dev *tgtdev = NULL; 935 struct mpi3_event_data_device_status_change *evtdata = 936 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 937 938 dev_handle = le16_to_cpu(evtdata->dev_handle); 939 ioc_info(mrioc, 940 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 941 __func__, dev_handle, evtdata->reason_code); 942 switch (evtdata->reason_code) { 943 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 944 delete = 1; 945 break; 946 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 947 uhide = 1; 948 break; 949 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 950 delete = 1; 951 cleanup = 1; 952 break; 953 default: 954 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 955 evtdata->reason_code); 956 break; 957 } 958 959 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 960 if (!tgtdev) 961 goto out; 962 if (uhide) { 963 tgtdev->is_hidden = 0; 964 if (!tgtdev->host_exposed) 965 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 966 } 967 if (tgtdev->starget && tgtdev->starget->hostdata) { 968 if (delete) 969 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 970 } 971 if (cleanup) { 972 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 973 mpi3mr_tgtdev_put(tgtdev); 974 } 975 976 out: 977 if (tgtdev) 978 mpi3mr_tgtdev_put(tgtdev); 979 } 980 981 /** 982 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 983 * @mrioc: Adapter instance reference 984 * @dev_pg0: New device page0 985 * 986 * Process Device Info Change event and based on device's new 987 * information, either expose the device to the upper layers, or 988 * remove the device from upper layers or update the details of 989 * the device. 990 * 991 * Return: Nothing. 992 */ 993 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 994 struct mpi3_device_page0 *dev_pg0) 995 { 996 struct mpi3mr_tgt_dev *tgtdev = NULL; 997 u16 dev_handle = 0, perst_id = 0; 998 999 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1000 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1001 ioc_info(mrioc, 1002 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1003 __func__, dev_handle, perst_id); 1004 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1005 if (!tgtdev) 1006 goto out; 1007 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0); 1008 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1009 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1010 if (tgtdev->is_hidden && tgtdev->host_exposed) 1011 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1012 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1013 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1014 mpi3mr_update_sdev); 1015 out: 1016 if (tgtdev) 1017 mpi3mr_tgtdev_put(tgtdev); 1018 } 1019 1020 /** 1021 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1022 * @mrioc: Adapter instance reference 1023 * @event_data: SAS topology change list event data 1024 * 1025 * Prints information about the SAS topology change event. 1026 * 1027 * Return: Nothing. 1028 */ 1029 static void 1030 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1031 struct mpi3_event_data_sas_topology_change_list *event_data) 1032 { 1033 int i; 1034 u16 handle; 1035 u8 reason_code, phy_number; 1036 char *status_str = NULL; 1037 u8 link_rate, prev_link_rate; 1038 1039 switch (event_data->exp_status) { 1040 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1041 status_str = "remove"; 1042 break; 1043 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1044 status_str = "responding"; 1045 break; 1046 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1047 status_str = "remove delay"; 1048 break; 1049 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1050 status_str = "direct attached"; 1051 break; 1052 default: 1053 status_str = "unknown status"; 1054 break; 1055 } 1056 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1057 __func__, status_str); 1058 ioc_info(mrioc, 1059 "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1060 __func__, le16_to_cpu(event_data->expander_dev_handle), 1061 le16_to_cpu(event_data->enclosure_handle), 1062 event_data->start_phy_num, event_data->num_entries); 1063 for (i = 0; i < event_data->num_entries; i++) { 1064 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1065 if (!handle) 1066 continue; 1067 phy_number = event_data->start_phy_num + i; 1068 reason_code = event_data->phy_entry[i].status & 1069 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1070 switch (reason_code) { 1071 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1072 status_str = "target remove"; 1073 break; 1074 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1075 status_str = "delay target remove"; 1076 break; 1077 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1078 status_str = "link status change"; 1079 break; 1080 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1081 status_str = "link status no change"; 1082 break; 1083 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1084 status_str = "target responding"; 1085 break; 1086 default: 1087 status_str = "unknown"; 1088 break; 1089 } 1090 link_rate = event_data->phy_entry[i].link_rate >> 4; 1091 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1092 ioc_info(mrioc, 1093 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1094 __func__, phy_number, handle, status_str, link_rate, 1095 prev_link_rate); 1096 } 1097 } 1098 1099 /** 1100 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1101 * @mrioc: Adapter instance reference 1102 * @fwevt: Firmware event reference 1103 * 1104 * Prints information about the SAS topology change event and 1105 * for "not responding" event code, removes the device from the 1106 * upper layers. 1107 * 1108 * Return: Nothing. 1109 */ 1110 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1111 struct mpi3mr_fwevt *fwevt) 1112 { 1113 struct mpi3_event_data_sas_topology_change_list *event_data = 1114 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1115 int i; 1116 u16 handle; 1117 u8 reason_code; 1118 struct mpi3mr_tgt_dev *tgtdev = NULL; 1119 1120 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1121 1122 for (i = 0; i < event_data->num_entries; i++) { 1123 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1124 if (!handle) 1125 continue; 1126 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1127 if (!tgtdev) 1128 continue; 1129 1130 reason_code = event_data->phy_entry[i].status & 1131 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1132 1133 switch (reason_code) { 1134 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1135 if (tgtdev->host_exposed) 1136 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1137 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 1138 mpi3mr_tgtdev_put(tgtdev); 1139 break; 1140 default: 1141 break; 1142 } 1143 if (tgtdev) 1144 mpi3mr_tgtdev_put(tgtdev); 1145 } 1146 } 1147 1148 /** 1149 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1150 * @mrioc: Adapter instance reference 1151 * @event_data: PCIe topology change list event data 1152 * 1153 * Prints information about the PCIe topology change event. 1154 * 1155 * Return: Nothing. 1156 */ 1157 static void 1158 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1159 struct mpi3_event_data_pcie_topology_change_list *event_data) 1160 { 1161 int i; 1162 u16 handle; 1163 u16 reason_code; 1164 u8 port_number; 1165 char *status_str = NULL; 1166 u8 link_rate, prev_link_rate; 1167 1168 switch (event_data->switch_status) { 1169 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1170 status_str = "remove"; 1171 break; 1172 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1173 status_str = "responding"; 1174 break; 1175 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1176 status_str = "remove delay"; 1177 break; 1178 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1179 status_str = "direct attached"; 1180 break; 1181 default: 1182 status_str = "unknown status"; 1183 break; 1184 } 1185 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1186 __func__, status_str); 1187 ioc_info(mrioc, 1188 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1189 __func__, le16_to_cpu(event_data->switch_dev_handle), 1190 le16_to_cpu(event_data->enclosure_handle), 1191 event_data->start_port_num, event_data->num_entries); 1192 for (i = 0; i < event_data->num_entries; i++) { 1193 handle = 1194 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1195 if (!handle) 1196 continue; 1197 port_number = event_data->start_port_num + i; 1198 reason_code = event_data->port_entry[i].port_status; 1199 switch (reason_code) { 1200 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1201 status_str = "target remove"; 1202 break; 1203 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1204 status_str = "delay target remove"; 1205 break; 1206 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1207 status_str = "link status change"; 1208 break; 1209 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1210 status_str = "link status no change"; 1211 break; 1212 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1213 status_str = "target responding"; 1214 break; 1215 default: 1216 status_str = "unknown"; 1217 break; 1218 } 1219 link_rate = event_data->port_entry[i].current_port_info & 1220 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1221 prev_link_rate = event_data->port_entry[i].previous_port_info & 1222 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1223 ioc_info(mrioc, 1224 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1225 __func__, port_number, handle, status_str, link_rate, 1226 prev_link_rate); 1227 } 1228 } 1229 1230 /** 1231 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1232 * @mrioc: Adapter instance reference 1233 * @fwevt: Firmware event reference 1234 * 1235 * Prints information about the PCIe topology change event and 1236 * for "not responding" event code, removes the device from the 1237 * upper layers. 1238 * 1239 * Return: Nothing. 1240 */ 1241 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1242 struct mpi3mr_fwevt *fwevt) 1243 { 1244 struct mpi3_event_data_pcie_topology_change_list *event_data = 1245 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1246 int i; 1247 u16 handle; 1248 u8 reason_code; 1249 struct mpi3mr_tgt_dev *tgtdev = NULL; 1250 1251 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1252 1253 for (i = 0; i < event_data->num_entries; i++) { 1254 handle = 1255 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1256 if (!handle) 1257 continue; 1258 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1259 if (!tgtdev) 1260 continue; 1261 1262 reason_code = event_data->port_entry[i].port_status; 1263 1264 switch (reason_code) { 1265 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1266 if (tgtdev->host_exposed) 1267 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1268 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 1269 mpi3mr_tgtdev_put(tgtdev); 1270 break; 1271 default: 1272 break; 1273 } 1274 if (tgtdev) 1275 mpi3mr_tgtdev_put(tgtdev); 1276 } 1277 } 1278 1279 /** 1280 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1281 * @mrioc: Adapter instance reference 1282 * @fwevt: Firmware event reference 1283 * 1284 * Identifies the firmware event and calls corresponding bottomg 1285 * half handler and sends event acknowledgment if required. 1286 * 1287 * Return: Nothing. 1288 */ 1289 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 1290 struct mpi3mr_fwevt *fwevt) 1291 { 1292 mrioc->current_event = fwevt; 1293 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 1294 1295 if (mrioc->stop_drv_processing) 1296 goto out; 1297 1298 if (!fwevt->process_evt) 1299 goto evt_ack; 1300 1301 switch (fwevt->event_id) { 1302 case MPI3_EVENT_DEVICE_ADDED: 1303 { 1304 struct mpi3_device_page0 *dev_pg0 = 1305 (struct mpi3_device_page0 *)fwevt->event_data; 1306 mpi3mr_report_tgtdev_to_host(mrioc, 1307 le16_to_cpu(dev_pg0->persistent_id)); 1308 break; 1309 } 1310 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1311 { 1312 mpi3mr_devinfochg_evt_bh(mrioc, 1313 (struct mpi3_device_page0 *)fwevt->event_data); 1314 break; 1315 } 1316 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1317 { 1318 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 1319 break; 1320 } 1321 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1322 { 1323 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 1324 break; 1325 } 1326 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1327 { 1328 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 1329 break; 1330 } 1331 default: 1332 break; 1333 } 1334 1335 evt_ack: 1336 if (fwevt->send_ack) 1337 mpi3mr_send_event_ack(mrioc, fwevt->event_id, 1338 fwevt->evt_ctx); 1339 out: 1340 /* Put fwevt reference count to neutralize kref_init increment */ 1341 mpi3mr_fwevt_put(fwevt); 1342 mrioc->current_event = NULL; 1343 } 1344 1345 /** 1346 * mpi3mr_fwevt_worker - Firmware event worker 1347 * @work: Work struct containing firmware event 1348 * 1349 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 1350 * 1351 * Return: Nothing. 1352 */ 1353 static void mpi3mr_fwevt_worker(struct work_struct *work) 1354 { 1355 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 1356 work); 1357 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 1358 /* 1359 * Put fwevt reference count after 1360 * dequeuing it from worker queue 1361 */ 1362 mpi3mr_fwevt_put(fwevt); 1363 } 1364 1365 /** 1366 * mpi3mr_create_tgtdev - Create and add a target device 1367 * @mrioc: Adapter instance reference 1368 * @dev_pg0: Device Page 0 data 1369 * 1370 * If the device specified by the device page 0 data is not 1371 * present in the driver's internal list, allocate the memory 1372 * for the device, populate the data and add to the list, else 1373 * update the device data. The key is persistent ID. 1374 * 1375 * Return: 0 on success, -ENOMEM on memory allocation failure 1376 */ 1377 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 1378 struct mpi3_device_page0 *dev_pg0) 1379 { 1380 int retval = 0; 1381 struct mpi3mr_tgt_dev *tgtdev = NULL; 1382 u16 perst_id = 0; 1383 1384 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1385 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 1386 if (tgtdev) { 1387 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0); 1388 mpi3mr_tgtdev_put(tgtdev); 1389 } else { 1390 tgtdev = mpi3mr_alloc_tgtdev(); 1391 if (!tgtdev) 1392 return -ENOMEM; 1393 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0); 1394 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 1395 } 1396 1397 return retval; 1398 } 1399 1400 /** 1401 * mpi3mr_flush_delayed_rmhs_list - Flush pending commands 1402 * @mrioc: Adapter instance reference 1403 * 1404 * Flush pending commands in the delayed removal handshake list 1405 * due to a controller reset or driver removal as a cleanup. 1406 * 1407 * Return: Nothing 1408 */ 1409 void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc) 1410 { 1411 struct delayed_dev_rmhs_node *_rmhs_node; 1412 1413 while (!list_empty(&mrioc->delayed_rmhs_list)) { 1414 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 1415 struct delayed_dev_rmhs_node, list); 1416 list_del(&_rmhs_node->list); 1417 kfree(_rmhs_node); 1418 } 1419 } 1420 1421 /** 1422 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 1423 * @mrioc: Adapter instance reference 1424 * @drv_cmd: Internal command tracker 1425 * 1426 * Issues a target reset TM to the firmware from the device 1427 * removal TM pend list or retry the removal handshake sequence 1428 * based on the IOU control request IOC status. 1429 * 1430 * Return: Nothing 1431 */ 1432 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 1433 struct mpi3mr_drv_cmd *drv_cmd) 1434 { 1435 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 1436 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 1437 1438 ioc_info(mrioc, 1439 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 1440 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 1441 drv_cmd->ioc_loginfo); 1442 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 1443 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 1444 drv_cmd->retry_count++; 1445 ioc_info(mrioc, 1446 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 1447 __func__, drv_cmd->dev_handle, 1448 drv_cmd->retry_count); 1449 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 1450 drv_cmd, drv_cmd->iou_rc); 1451 return; 1452 } 1453 ioc_err(mrioc, 1454 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 1455 __func__, drv_cmd->dev_handle); 1456 } else { 1457 ioc_info(mrioc, 1458 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 1459 __func__, drv_cmd->dev_handle); 1460 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 1461 } 1462 1463 if (!list_empty(&mrioc->delayed_rmhs_list)) { 1464 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 1465 struct delayed_dev_rmhs_node, list); 1466 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 1467 drv_cmd->retry_count = 0; 1468 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 1469 ioc_info(mrioc, 1470 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 1471 __func__, drv_cmd->dev_handle); 1472 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 1473 drv_cmd->iou_rc); 1474 list_del(&delayed_dev_rmhs->list); 1475 kfree(delayed_dev_rmhs); 1476 return; 1477 } 1478 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1479 drv_cmd->callback = NULL; 1480 drv_cmd->retry_count = 0; 1481 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 1482 clear_bit(cmd_idx, mrioc->devrem_bitmap); 1483 } 1484 1485 /** 1486 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 1487 * @mrioc: Adapter instance reference 1488 * @drv_cmd: Internal command tracker 1489 * 1490 * Issues a target reset TM to the firmware from the device 1491 * removal TM pend list or issue IO unit control request as 1492 * part of device removal or hidden acknowledgment handshake. 1493 * 1494 * Return: Nothing 1495 */ 1496 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 1497 struct mpi3mr_drv_cmd *drv_cmd) 1498 { 1499 struct mpi3_iounit_control_request iou_ctrl; 1500 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 1501 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 1502 int retval; 1503 1504 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 1505 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 1506 1507 if (tm_reply) 1508 pr_info(IOCNAME 1509 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 1510 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 1511 drv_cmd->ioc_loginfo, 1512 le32_to_cpu(tm_reply->termination_count)); 1513 1514 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 1515 mrioc->name, drv_cmd->dev_handle, cmd_idx); 1516 1517 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 1518 1519 drv_cmd->state = MPI3MR_CMD_PENDING; 1520 drv_cmd->is_waiting = 0; 1521 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 1522 iou_ctrl.operation = drv_cmd->iou_rc; 1523 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 1524 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 1525 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 1526 1527 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 1528 1); 1529 if (retval) { 1530 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 1531 mrioc->name); 1532 goto out_failed; 1533 } 1534 1535 return; 1536 out_failed: 1537 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1538 drv_cmd->callback = NULL; 1539 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 1540 drv_cmd->retry_count = 0; 1541 clear_bit(cmd_idx, mrioc->devrem_bitmap); 1542 } 1543 1544 /** 1545 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 1546 * @mrioc: Adapter instance reference 1547 * @handle: Device handle 1548 * @cmdparam: Internal command tracker 1549 * @iou_rc: IO unit reason code 1550 * 1551 * Issues a target reset TM to the firmware or add it to a pend 1552 * list as part of device removal or hidden acknowledgment 1553 * handshake. 1554 * 1555 * Return: Nothing 1556 */ 1557 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 1558 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 1559 { 1560 struct mpi3_scsi_task_mgmt_request tm_req; 1561 int retval = 0; 1562 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 1563 u8 retrycount = 5; 1564 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 1565 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 1566 1567 if (drv_cmd) 1568 goto issue_cmd; 1569 do { 1570 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 1571 MPI3MR_NUM_DEVRMCMD); 1572 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 1573 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 1574 break; 1575 cmd_idx = MPI3MR_NUM_DEVRMCMD; 1576 } 1577 } while (retrycount--); 1578 1579 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 1580 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 1581 GFP_ATOMIC); 1582 if (!delayed_dev_rmhs) 1583 return; 1584 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 1585 delayed_dev_rmhs->handle = handle; 1586 delayed_dev_rmhs->iou_rc = iou_rc; 1587 list_add_tail(&delayed_dev_rmhs->list, 1588 &mrioc->delayed_rmhs_list); 1589 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 1590 __func__, handle); 1591 return; 1592 } 1593 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 1594 1595 issue_cmd: 1596 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 1597 ioc_info(mrioc, 1598 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 1599 __func__, handle, cmd_idx); 1600 1601 memset(&tm_req, 0, sizeof(tm_req)); 1602 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 1603 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 1604 goto out; 1605 } 1606 drv_cmd->state = MPI3MR_CMD_PENDING; 1607 drv_cmd->is_waiting = 0; 1608 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 1609 drv_cmd->dev_handle = handle; 1610 drv_cmd->iou_rc = iou_rc; 1611 tm_req.dev_handle = cpu_to_le16(handle); 1612 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 1613 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 1614 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 1615 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 1616 1617 set_bit(handle, mrioc->removepend_bitmap); 1618 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 1619 if (retval) { 1620 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 1621 __func__); 1622 goto out_failed; 1623 } 1624 out: 1625 return; 1626 out_failed: 1627 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1628 drv_cmd->callback = NULL; 1629 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 1630 drv_cmd->retry_count = 0; 1631 clear_bit(cmd_idx, mrioc->devrem_bitmap); 1632 } 1633 1634 /** 1635 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 1636 * @mrioc: Adapter instance reference 1637 * @event_reply: event data 1638 * 1639 * Checks for the reason code and based on that either block I/O 1640 * to device, or unblock I/O to the device, or start the device 1641 * removal handshake with reason as remove with the firmware for 1642 * PCIe devices. 1643 * 1644 * Return: Nothing 1645 */ 1646 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 1647 struct mpi3_event_notification_reply *event_reply) 1648 { 1649 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 1650 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 1651 int i; 1652 u16 handle; 1653 u8 reason_code; 1654 struct mpi3mr_tgt_dev *tgtdev = NULL; 1655 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1656 1657 for (i = 0; i < topo_evt->num_entries; i++) { 1658 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 1659 if (!handle) 1660 continue; 1661 reason_code = topo_evt->port_entry[i].port_status; 1662 scsi_tgt_priv_data = NULL; 1663 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1664 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 1665 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1666 tgtdev->starget->hostdata; 1667 switch (reason_code) { 1668 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1669 if (scsi_tgt_priv_data) { 1670 scsi_tgt_priv_data->dev_removed = 1; 1671 scsi_tgt_priv_data->dev_removedelay = 0; 1672 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1673 } 1674 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 1675 MPI3_CTRL_OP_REMOVE_DEVICE); 1676 break; 1677 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1678 if (scsi_tgt_priv_data) { 1679 scsi_tgt_priv_data->dev_removedelay = 1; 1680 atomic_inc(&scsi_tgt_priv_data->block_io); 1681 } 1682 break; 1683 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1684 if (scsi_tgt_priv_data && 1685 scsi_tgt_priv_data->dev_removedelay) { 1686 scsi_tgt_priv_data->dev_removedelay = 0; 1687 atomic_dec_if_positive 1688 (&scsi_tgt_priv_data->block_io); 1689 } 1690 break; 1691 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1692 default: 1693 break; 1694 } 1695 if (tgtdev) 1696 mpi3mr_tgtdev_put(tgtdev); 1697 } 1698 } 1699 1700 /** 1701 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 1702 * @mrioc: Adapter instance reference 1703 * @event_reply: event data 1704 * 1705 * Checks for the reason code and based on that either block I/O 1706 * to device, or unblock I/O to the device, or start the device 1707 * removal handshake with reason as remove with the firmware for 1708 * SAS/SATA devices. 1709 * 1710 * Return: Nothing 1711 */ 1712 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 1713 struct mpi3_event_notification_reply *event_reply) 1714 { 1715 struct mpi3_event_data_sas_topology_change_list *topo_evt = 1716 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 1717 int i; 1718 u16 handle; 1719 u8 reason_code; 1720 struct mpi3mr_tgt_dev *tgtdev = NULL; 1721 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1722 1723 for (i = 0; i < topo_evt->num_entries; i++) { 1724 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 1725 if (!handle) 1726 continue; 1727 reason_code = topo_evt->phy_entry[i].status & 1728 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1729 scsi_tgt_priv_data = NULL; 1730 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1731 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 1732 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1733 tgtdev->starget->hostdata; 1734 switch (reason_code) { 1735 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1736 if (scsi_tgt_priv_data) { 1737 scsi_tgt_priv_data->dev_removed = 1; 1738 scsi_tgt_priv_data->dev_removedelay = 0; 1739 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1740 } 1741 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 1742 MPI3_CTRL_OP_REMOVE_DEVICE); 1743 break; 1744 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1745 if (scsi_tgt_priv_data) { 1746 scsi_tgt_priv_data->dev_removedelay = 1; 1747 atomic_inc(&scsi_tgt_priv_data->block_io); 1748 } 1749 break; 1750 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1751 if (scsi_tgt_priv_data && 1752 scsi_tgt_priv_data->dev_removedelay) { 1753 scsi_tgt_priv_data->dev_removedelay = 0; 1754 atomic_dec_if_positive 1755 (&scsi_tgt_priv_data->block_io); 1756 } 1757 break; 1758 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1759 default: 1760 break; 1761 } 1762 if (tgtdev) 1763 mpi3mr_tgtdev_put(tgtdev); 1764 } 1765 } 1766 1767 /** 1768 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 1769 * @mrioc: Adapter instance reference 1770 * @event_reply: event data 1771 * 1772 * Checks for the reason code and based on that either block I/O 1773 * to device, or unblock I/O to the device, or start the device 1774 * removal handshake with reason as remove/hide acknowledgment 1775 * with the firmware. 1776 * 1777 * Return: Nothing 1778 */ 1779 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 1780 struct mpi3_event_notification_reply *event_reply) 1781 { 1782 u16 dev_handle = 0; 1783 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 1784 struct mpi3mr_tgt_dev *tgtdev = NULL; 1785 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1786 struct mpi3_event_data_device_status_change *evtdata = 1787 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 1788 1789 if (mrioc->stop_drv_processing) 1790 goto out; 1791 1792 dev_handle = le16_to_cpu(evtdata->dev_handle); 1793 1794 switch (evtdata->reason_code) { 1795 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 1796 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 1797 block = 1; 1798 break; 1799 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1800 delete = 1; 1801 hide = 1; 1802 break; 1803 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1804 delete = 1; 1805 remove = 1; 1806 break; 1807 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 1808 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 1809 ublock = 1; 1810 break; 1811 default: 1812 break; 1813 } 1814 1815 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1816 if (!tgtdev) 1817 goto out; 1818 if (hide) 1819 tgtdev->is_hidden = hide; 1820 if (tgtdev->starget && tgtdev->starget->hostdata) { 1821 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1822 tgtdev->starget->hostdata; 1823 if (block) 1824 atomic_inc(&scsi_tgt_priv_data->block_io); 1825 if (delete) 1826 scsi_tgt_priv_data->dev_removed = 1; 1827 if (ublock) 1828 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 1829 } 1830 if (remove) 1831 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 1832 MPI3_CTRL_OP_REMOVE_DEVICE); 1833 if (hide) 1834 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 1835 MPI3_CTRL_OP_HIDDEN_ACK); 1836 1837 out: 1838 if (tgtdev) 1839 mpi3mr_tgtdev_put(tgtdev); 1840 } 1841 1842 /** 1843 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 1844 * @mrioc: Adapter instance reference 1845 * @event_reply: event data 1846 * 1847 * Identifies the new shutdown timeout value and update. 1848 * 1849 * Return: Nothing 1850 */ 1851 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 1852 struct mpi3_event_notification_reply *event_reply) 1853 { 1854 struct mpi3_event_data_energy_pack_change *evtdata = 1855 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 1856 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 1857 1858 if (shutdown_timeout <= 0) { 1859 ioc_warn(mrioc, 1860 "%s :Invalid Shutdown Timeout received = %d\n", 1861 __func__, shutdown_timeout); 1862 return; 1863 } 1864 1865 ioc_info(mrioc, 1866 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 1867 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 1868 mrioc->facts.shutdown_timeout = shutdown_timeout; 1869 } 1870 1871 /** 1872 * mpi3mr_os_handle_events - Firmware event handler 1873 * @mrioc: Adapter instance reference 1874 * @event_reply: event data 1875 * 1876 * Identify whteher the event has to handled and acknowledged 1877 * and either process the event in the tophalf and/or schedule a 1878 * bottom half through mpi3mr_fwevt_worker. 1879 * 1880 * Return: Nothing 1881 */ 1882 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 1883 struct mpi3_event_notification_reply *event_reply) 1884 { 1885 u16 evt_type, sz; 1886 struct mpi3mr_fwevt *fwevt = NULL; 1887 bool ack_req = 0, process_evt_bh = 0; 1888 1889 if (mrioc->stop_drv_processing) 1890 return; 1891 1892 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 1893 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 1894 ack_req = 1; 1895 1896 evt_type = event_reply->event; 1897 1898 switch (evt_type) { 1899 case MPI3_EVENT_DEVICE_ADDED: 1900 { 1901 struct mpi3_device_page0 *dev_pg0 = 1902 (struct mpi3_device_page0 *)event_reply->event_data; 1903 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 1904 ioc_err(mrioc, 1905 "%s :Failed to add device in the device add event\n", 1906 __func__); 1907 else 1908 process_evt_bh = 1; 1909 break; 1910 } 1911 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1912 { 1913 process_evt_bh = 1; 1914 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 1915 break; 1916 } 1917 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1918 { 1919 process_evt_bh = 1; 1920 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 1921 break; 1922 } 1923 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1924 { 1925 process_evt_bh = 1; 1926 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 1927 break; 1928 } 1929 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1930 { 1931 process_evt_bh = 1; 1932 break; 1933 } 1934 case MPI3_EVENT_ENERGY_PACK_CHANGE: 1935 { 1936 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 1937 break; 1938 } 1939 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 1940 case MPI3_EVENT_SAS_DISCOVERY: 1941 case MPI3_EVENT_CABLE_MGMT: 1942 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 1943 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 1944 case MPI3_EVENT_PCIE_ENUMERATION: 1945 break; 1946 default: 1947 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 1948 __func__, evt_type); 1949 break; 1950 } 1951 if (process_evt_bh || ack_req) { 1952 sz = event_reply->event_data_length * 4; 1953 fwevt = mpi3mr_alloc_fwevt(sz); 1954 if (!fwevt) { 1955 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 1956 __func__, __FILE__, __LINE__, __func__); 1957 return; 1958 } 1959 1960 memcpy(fwevt->event_data, event_reply->event_data, sz); 1961 fwevt->mrioc = mrioc; 1962 fwevt->event_id = evt_type; 1963 fwevt->send_ack = ack_req; 1964 fwevt->process_evt = process_evt_bh; 1965 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 1966 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 1967 } 1968 } 1969 1970 /** 1971 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 1972 * @mrioc: Adapter instance reference 1973 * @scmd: SCSI command reference 1974 * @scsiio_req: MPI3 SCSI IO request 1975 * 1976 * Identifies the protection information flags from the SCSI 1977 * command and set appropriate flags in the MPI3 SCSI IO 1978 * request. 1979 * 1980 * Return: Nothing 1981 */ 1982 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 1983 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 1984 { 1985 u16 eedp_flags = 0; 1986 unsigned char prot_op = scsi_get_prot_op(scmd); 1987 1988 switch (prot_op) { 1989 case SCSI_PROT_NORMAL: 1990 return; 1991 case SCSI_PROT_READ_STRIP: 1992 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 1993 break; 1994 case SCSI_PROT_WRITE_INSERT: 1995 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 1996 break; 1997 case SCSI_PROT_READ_INSERT: 1998 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 1999 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2000 break; 2001 case SCSI_PROT_WRITE_STRIP: 2002 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2003 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2004 break; 2005 case SCSI_PROT_READ_PASS: 2006 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2007 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2008 break; 2009 case SCSI_PROT_WRITE_PASS: 2010 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 2011 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 2012 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 2013 0xffff; 2014 } else 2015 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2016 2017 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2018 break; 2019 default: 2020 return; 2021 } 2022 2023 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 2024 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 2025 2026 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 2027 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 2028 2029 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 2030 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 2031 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2032 scsiio_req->cdb.eedp32.primary_reference_tag = 2033 cpu_to_be32(scsi_prot_ref_tag(scmd)); 2034 } 2035 2036 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 2037 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2038 2039 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 2040 2041 switch (scsi_prot_interval(scmd)) { 2042 case 512: 2043 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 2044 break; 2045 case 520: 2046 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 2047 break; 2048 case 4080: 2049 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 2050 break; 2051 case 4088: 2052 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 2053 break; 2054 case 4096: 2055 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 2056 break; 2057 case 4104: 2058 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 2059 break; 2060 case 4160: 2061 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 2062 break; 2063 default: 2064 break; 2065 } 2066 2067 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 2068 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 2069 } 2070 2071 /** 2072 * mpi3mr_build_sense_buffer - Map sense information 2073 * @desc: Sense type 2074 * @buf: Sense buffer to populate 2075 * @key: Sense key 2076 * @asc: Additional sense code 2077 * @ascq: Additional sense code qualifier 2078 * 2079 * Maps the given sense information into either descriptor or 2080 * fixed format sense data. 2081 * 2082 * Return: Nothing 2083 */ 2084 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 2085 u8 asc, u8 ascq) 2086 { 2087 if (desc) { 2088 buf[0] = 0x72; /* descriptor, current */ 2089 buf[1] = key; 2090 buf[2] = asc; 2091 buf[3] = ascq; 2092 buf[7] = 0; 2093 } else { 2094 buf[0] = 0x70; /* fixed, current */ 2095 buf[2] = key; 2096 buf[7] = 0xa; 2097 buf[12] = asc; 2098 buf[13] = ascq; 2099 } 2100 } 2101 2102 /** 2103 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 2104 * @scmd: SCSI command reference 2105 * @ioc_status: status of MPI3 request 2106 * 2107 * Maps the EEDP error status of the SCSI IO request to sense 2108 * data. 2109 * 2110 * Return: Nothing 2111 */ 2112 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 2113 u16 ioc_status) 2114 { 2115 u8 ascq = 0; 2116 2117 switch (ioc_status) { 2118 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 2119 ascq = 0x01; 2120 break; 2121 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 2122 ascq = 0x02; 2123 break; 2124 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 2125 ascq = 0x03; 2126 break; 2127 default: 2128 ascq = 0x00; 2129 break; 2130 } 2131 2132 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 2133 0x10, ascq); 2134 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 2135 } 2136 2137 /** 2138 * mpi3mr_process_op_reply_desc - reply descriptor handler 2139 * @mrioc: Adapter instance reference 2140 * @reply_desc: Operational reply descriptor 2141 * @reply_dma: place holder for reply DMA address 2142 * @qidx: Operational queue index 2143 * 2144 * Process the operational reply descriptor and identifies the 2145 * descriptor type. Based on the descriptor map the MPI3 request 2146 * status to a SCSI command status and calls scsi_done call 2147 * back. 2148 * 2149 * Return: Nothing 2150 */ 2151 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 2152 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 2153 { 2154 u16 reply_desc_type, host_tag = 0; 2155 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 2156 u32 ioc_loginfo = 0; 2157 struct mpi3_status_reply_descriptor *status_desc = NULL; 2158 struct mpi3_address_reply_descriptor *addr_desc = NULL; 2159 struct mpi3_success_reply_descriptor *success_desc = NULL; 2160 struct mpi3_scsi_io_reply *scsi_reply = NULL; 2161 struct scsi_cmnd *scmd = NULL; 2162 struct scmd_priv *priv = NULL; 2163 u8 *sense_buf = NULL; 2164 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 2165 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 2166 u16 dev_handle = 0xFFFF; 2167 struct scsi_sense_hdr sshdr; 2168 2169 *reply_dma = 0; 2170 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 2171 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 2172 switch (reply_desc_type) { 2173 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 2174 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 2175 host_tag = le16_to_cpu(status_desc->host_tag); 2176 ioc_status = le16_to_cpu(status_desc->ioc_status); 2177 if (ioc_status & 2178 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 2179 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 2180 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 2181 break; 2182 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 2183 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 2184 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 2185 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 2186 *reply_dma); 2187 if (!scsi_reply) { 2188 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 2189 mrioc->name); 2190 goto out; 2191 } 2192 host_tag = le16_to_cpu(scsi_reply->host_tag); 2193 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 2194 scsi_status = scsi_reply->scsi_status; 2195 scsi_state = scsi_reply->scsi_state; 2196 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 2197 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 2198 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 2199 sense_count = le32_to_cpu(scsi_reply->sense_count); 2200 resp_data = le32_to_cpu(scsi_reply->response_data); 2201 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 2202 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 2203 if (ioc_status & 2204 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 2205 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 2206 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 2207 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 2208 panic("%s: Ran out of sense buffers\n", mrioc->name); 2209 break; 2210 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 2211 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 2212 host_tag = le16_to_cpu(success_desc->host_tag); 2213 break; 2214 default: 2215 break; 2216 } 2217 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 2218 if (!scmd) { 2219 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 2220 mrioc->name, host_tag); 2221 goto out; 2222 } 2223 priv = scsi_cmd_priv(scmd); 2224 if (success_desc) { 2225 scmd->result = DID_OK << 16; 2226 goto out_success; 2227 } 2228 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 2229 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 2230 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 2231 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 2232 ioc_status = MPI3_IOCSTATUS_SUCCESS; 2233 2234 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 2235 sense_buf) { 2236 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 2237 2238 memcpy(scmd->sense_buffer, sense_buf, sz); 2239 } 2240 2241 switch (ioc_status) { 2242 case MPI3_IOCSTATUS_BUSY: 2243 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 2244 scmd->result = SAM_STAT_BUSY; 2245 break; 2246 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2247 scmd->result = DID_NO_CONNECT << 16; 2248 break; 2249 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 2250 scmd->result = DID_SOFT_ERROR << 16; 2251 break; 2252 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 2253 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 2254 scmd->result = DID_RESET << 16; 2255 break; 2256 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2257 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 2258 scmd->result = DID_SOFT_ERROR << 16; 2259 else 2260 scmd->result = (DID_OK << 16) | scsi_status; 2261 break; 2262 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 2263 scmd->result = (DID_OK << 16) | scsi_status; 2264 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 2265 break; 2266 if (xfer_count < scmd->underflow) { 2267 if (scsi_status == SAM_STAT_BUSY) 2268 scmd->result = SAM_STAT_BUSY; 2269 else 2270 scmd->result = DID_SOFT_ERROR << 16; 2271 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 2272 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 2273 scmd->result = DID_SOFT_ERROR << 16; 2274 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 2275 scmd->result = DID_RESET << 16; 2276 break; 2277 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 2278 scsi_set_resid(scmd, 0); 2279 fallthrough; 2280 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 2281 case MPI3_IOCSTATUS_SUCCESS: 2282 scmd->result = (DID_OK << 16) | scsi_status; 2283 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 2284 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 2285 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 2286 scmd->result = DID_SOFT_ERROR << 16; 2287 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 2288 scmd->result = DID_RESET << 16; 2289 break; 2290 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 2291 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 2292 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 2293 mpi3mr_map_eedp_error(scmd, ioc_status); 2294 break; 2295 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2296 case MPI3_IOCSTATUS_INVALID_FUNCTION: 2297 case MPI3_IOCSTATUS_INVALID_SGL: 2298 case MPI3_IOCSTATUS_INTERNAL_ERROR: 2299 case MPI3_IOCSTATUS_INVALID_FIELD: 2300 case MPI3_IOCSTATUS_INVALID_STATE: 2301 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 2302 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2303 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 2304 default: 2305 scmd->result = DID_SOFT_ERROR << 16; 2306 break; 2307 } 2308 2309 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 2310 (scmd->cmnd[0] != ATA_16)) { 2311 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 2312 scmd->result); 2313 scsi_print_command(scmd); 2314 ioc_info(mrioc, 2315 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 2316 __func__, dev_handle, ioc_status, ioc_loginfo, 2317 priv->req_q_idx + 1); 2318 ioc_info(mrioc, 2319 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 2320 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 2321 if (sense_buf) { 2322 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 2323 ioc_info(mrioc, 2324 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 2325 __func__, sense_count, sshdr.sense_key, 2326 sshdr.asc, sshdr.ascq); 2327 } 2328 } 2329 out_success: 2330 if (priv->meta_sg_valid) { 2331 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 2332 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 2333 } 2334 mpi3mr_clear_scmd_priv(mrioc, scmd); 2335 scsi_dma_unmap(scmd); 2336 scsi_done(scmd); 2337 out: 2338 if (sense_buf) 2339 mpi3mr_repost_sense_buf(mrioc, 2340 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 2341 } 2342 2343 /** 2344 * mpi3mr_get_chain_idx - get free chain buffer index 2345 * @mrioc: Adapter instance reference 2346 * 2347 * Try to get a free chain buffer index from the free pool. 2348 * 2349 * Return: -1 on failure or the free chain buffer index 2350 */ 2351 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 2352 { 2353 u8 retry_count = 5; 2354 int cmd_idx = -1; 2355 2356 do { 2357 spin_lock(&mrioc->chain_buf_lock); 2358 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 2359 mrioc->chain_buf_count); 2360 if (cmd_idx < mrioc->chain_buf_count) { 2361 set_bit(cmd_idx, mrioc->chain_bitmap); 2362 spin_unlock(&mrioc->chain_buf_lock); 2363 break; 2364 } 2365 spin_unlock(&mrioc->chain_buf_lock); 2366 cmd_idx = -1; 2367 } while (retry_count--); 2368 return cmd_idx; 2369 } 2370 2371 /** 2372 * mpi3mr_prepare_sg_scmd - build scatter gather list 2373 * @mrioc: Adapter instance reference 2374 * @scmd: SCSI command reference 2375 * @scsiio_req: MPI3 SCSI IO request 2376 * 2377 * This function maps SCSI command's data and protection SGEs to 2378 * MPI request SGEs. If required additional 4K chain buffer is 2379 * used to send the SGEs. 2380 * 2381 * Return: 0 on success, -ENOMEM on dma_map_sg failure 2382 */ 2383 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 2384 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2385 { 2386 dma_addr_t chain_dma; 2387 struct scatterlist *sg_scmd; 2388 void *sg_local, *chain; 2389 u32 chain_length; 2390 int sges_left, chain_idx; 2391 u32 sges_in_segment; 2392 u8 simple_sgl_flags; 2393 u8 simple_sgl_flags_last; 2394 u8 last_chain_sgl_flags; 2395 struct chain_element *chain_req; 2396 struct scmd_priv *priv = NULL; 2397 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 2398 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 2399 2400 priv = scsi_cmd_priv(scmd); 2401 2402 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 2403 MPI3_SGE_FLAGS_DLAS_SYSTEM; 2404 simple_sgl_flags_last = simple_sgl_flags | 2405 MPI3_SGE_FLAGS_END_OF_LIST; 2406 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 2407 MPI3_SGE_FLAGS_DLAS_SYSTEM; 2408 2409 if (meta_sg) 2410 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 2411 else 2412 sg_local = &scsiio_req->sgl; 2413 2414 if (!scsiio_req->data_length && !meta_sg) { 2415 mpi3mr_build_zero_len_sge(sg_local); 2416 return 0; 2417 } 2418 2419 if (meta_sg) { 2420 sg_scmd = scsi_prot_sglist(scmd); 2421 sges_left = dma_map_sg(&mrioc->pdev->dev, 2422 scsi_prot_sglist(scmd), 2423 scsi_prot_sg_count(scmd), 2424 scmd->sc_data_direction); 2425 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 2426 } else { 2427 sg_scmd = scsi_sglist(scmd); 2428 sges_left = scsi_dma_map(scmd); 2429 } 2430 2431 if (sges_left < 0) { 2432 sdev_printk(KERN_ERR, scmd->device, 2433 "scsi_dma_map failed: request for %d bytes!\n", 2434 scsi_bufflen(scmd)); 2435 return -ENOMEM; 2436 } 2437 if (sges_left > MPI3MR_SG_DEPTH) { 2438 sdev_printk(KERN_ERR, scmd->device, 2439 "scsi_dma_map returned unsupported sge count %d!\n", 2440 sges_left); 2441 return -ENOMEM; 2442 } 2443 2444 sges_in_segment = (mrioc->facts.op_req_sz - 2445 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 2446 2447 if (scsiio_req->sgl[0].eedp.flags == 2448 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 2449 sg_local += sizeof(struct mpi3_sge_common); 2450 sges_in_segment--; 2451 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 2452 } 2453 2454 if (scsiio_req->msg_flags == 2455 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 2456 sges_in_segment--; 2457 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 2458 } 2459 2460 if (meta_sg) 2461 sges_in_segment = 1; 2462 2463 if (sges_left <= sges_in_segment) 2464 goto fill_in_last_segment; 2465 2466 /* fill in main message segment when there is a chain following */ 2467 while (sges_in_segment > 1) { 2468 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 2469 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2470 sg_scmd = sg_next(sg_scmd); 2471 sg_local += sizeof(struct mpi3_sge_common); 2472 sges_left--; 2473 sges_in_segment--; 2474 } 2475 2476 chain_idx = mpi3mr_get_chain_idx(mrioc); 2477 if (chain_idx < 0) 2478 return -1; 2479 chain_req = &mrioc->chain_sgl_list[chain_idx]; 2480 if (meta_sg) 2481 priv->meta_chain_idx = chain_idx; 2482 else 2483 priv->chain_idx = chain_idx; 2484 2485 chain = chain_req->addr; 2486 chain_dma = chain_req->dma_addr; 2487 sges_in_segment = sges_left; 2488 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 2489 2490 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 2491 chain_length, chain_dma); 2492 2493 sg_local = chain; 2494 2495 fill_in_last_segment: 2496 while (sges_left > 0) { 2497 if (sges_left == 1) 2498 mpi3mr_add_sg_single(sg_local, 2499 simple_sgl_flags_last, sg_dma_len(sg_scmd), 2500 sg_dma_address(sg_scmd)); 2501 else 2502 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 2503 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2504 sg_scmd = sg_next(sg_scmd); 2505 sg_local += sizeof(struct mpi3_sge_common); 2506 sges_left--; 2507 } 2508 2509 return 0; 2510 } 2511 2512 /** 2513 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 2514 * @mrioc: Adapter instance reference 2515 * @scmd: SCSI command reference 2516 * @scsiio_req: MPI3 SCSI IO request 2517 * 2518 * This function calls mpi3mr_prepare_sg_scmd for constructing 2519 * both data SGEs and protection information SGEs in the MPI 2520 * format from the SCSI Command as appropriate . 2521 * 2522 * Return: return value of mpi3mr_prepare_sg_scmd. 2523 */ 2524 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 2525 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2526 { 2527 int ret; 2528 2529 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 2530 if (ret) 2531 return ret; 2532 2533 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 2534 /* There is a valid meta sg */ 2535 scsiio_req->flags |= 2536 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 2537 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 2538 } 2539 2540 return ret; 2541 } 2542 2543 /** 2544 * mpi3mr_print_response_code - print TM response as a string 2545 * @mrioc: Adapter instance reference 2546 * @resp_code: TM response code 2547 * 2548 * Print TM response code as a readable string. 2549 * 2550 * Return: Nothing. 2551 */ 2552 static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code) 2553 { 2554 char *desc; 2555 2556 switch (resp_code) { 2557 case MPI3MR_RSP_TM_COMPLETE: 2558 desc = "task management request completed"; 2559 break; 2560 case MPI3MR_RSP_INVALID_FRAME: 2561 desc = "invalid frame"; 2562 break; 2563 case MPI3MR_RSP_TM_NOT_SUPPORTED: 2564 desc = "task management request not supported"; 2565 break; 2566 case MPI3MR_RSP_TM_FAILED: 2567 desc = "task management request failed"; 2568 break; 2569 case MPI3MR_RSP_TM_SUCCEEDED: 2570 desc = "task management request succeeded"; 2571 break; 2572 case MPI3MR_RSP_TM_INVALID_LUN: 2573 desc = "invalid lun"; 2574 break; 2575 case MPI3MR_RSP_TM_OVERLAPPED_TAG: 2576 desc = "overlapped tag attempted"; 2577 break; 2578 case MPI3MR_RSP_IO_QUEUED_ON_IOC: 2579 desc = "task queued, however not sent to target"; 2580 break; 2581 default: 2582 desc = "unknown"; 2583 break; 2584 } 2585 ioc_info(mrioc, "%s :response_code(0x%01x): %s\n", __func__, 2586 resp_code, desc); 2587 } 2588 2589 /** 2590 * mpi3mr_issue_tm - Issue Task Management request 2591 * @mrioc: Adapter instance reference 2592 * @tm_type: Task Management type 2593 * @handle: Device handle 2594 * @lun: lun ID 2595 * @htag: Host tag of the TM request 2596 * @drv_cmd: Internal command tracker 2597 * @resp_code: Response code place holder 2598 * @cmd_priv: SCSI command private data 2599 * 2600 * Issues a Task Management Request to the controller for a 2601 * specified target, lun and command and wait for its completion 2602 * and check TM response. Recover the TM if it timed out by 2603 * issuing controller reset. 2604 * 2605 * Return: 0 on success, non-zero on errors 2606 */ 2607 static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 2608 u16 handle, uint lun, u16 htag, ulong timeout, 2609 struct mpi3mr_drv_cmd *drv_cmd, 2610 u8 *resp_code, struct scmd_priv *cmd_priv) 2611 { 2612 struct mpi3_scsi_task_mgmt_request tm_req; 2613 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2614 int retval = 0; 2615 struct mpi3mr_tgt_dev *tgtdev = NULL; 2616 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2617 struct op_req_qinfo *op_req_q = NULL; 2618 2619 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 2620 __func__, tm_type, handle); 2621 if (mrioc->unrecoverable) { 2622 retval = -1; 2623 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 2624 __func__); 2625 goto out; 2626 } 2627 2628 memset(&tm_req, 0, sizeof(tm_req)); 2629 mutex_lock(&drv_cmd->mutex); 2630 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2631 retval = -1; 2632 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2633 mutex_unlock(&drv_cmd->mutex); 2634 goto out; 2635 } 2636 if (mrioc->reset_in_progress) { 2637 retval = -1; 2638 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 2639 mutex_unlock(&drv_cmd->mutex); 2640 goto out; 2641 } 2642 2643 drv_cmd->state = MPI3MR_CMD_PENDING; 2644 drv_cmd->is_waiting = 1; 2645 drv_cmd->callback = NULL; 2646 tm_req.dev_handle = cpu_to_le16(handle); 2647 tm_req.task_type = tm_type; 2648 tm_req.host_tag = cpu_to_le16(htag); 2649 2650 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 2651 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2652 2653 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2654 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) { 2655 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2656 tgtdev->starget->hostdata; 2657 atomic_inc(&scsi_tgt_priv_data->block_io); 2658 } 2659 if (cmd_priv) { 2660 op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx]; 2661 tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag); 2662 tm_req.task_request_queue_id = cpu_to_le16(op_req_q->qid); 2663 } 2664 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 2665 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 2666 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 2667 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 2668 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 2669 } 2670 2671 init_completion(&drv_cmd->done); 2672 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2673 if (retval) { 2674 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 2675 goto out_unlock; 2676 } 2677 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 2678 2679 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 2680 ioc_err(mrioc, "%s :Issue TM: command timed out\n", __func__); 2681 drv_cmd->is_waiting = 0; 2682 retval = -1; 2683 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) 2684 mpi3mr_soft_reset_handler(mrioc, 2685 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 2686 goto out_unlock; 2687 } 2688 2689 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2690 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2691 2692 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2693 ioc_err(mrioc, 2694 "%s :Issue TM: handle(0x%04x) Failed ioc_status(0x%04x) Loginfo(0x%08x)\n", 2695 __func__, handle, drv_cmd->ioc_status, 2696 drv_cmd->ioc_loginfo); 2697 retval = -1; 2698 goto out_unlock; 2699 } 2700 2701 if (!tm_reply) { 2702 ioc_err(mrioc, "%s :Issue TM: No TM Reply message\n", __func__); 2703 retval = -1; 2704 goto out_unlock; 2705 } 2706 2707 *resp_code = le32_to_cpu(tm_reply->response_data) & 2708 MPI3MR_RI_MASK_RESPCODE; 2709 switch (*resp_code) { 2710 case MPI3MR_RSP_TM_SUCCEEDED: 2711 case MPI3MR_RSP_TM_COMPLETE: 2712 break; 2713 case MPI3MR_RSP_IO_QUEUED_ON_IOC: 2714 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 2715 retval = -1; 2716 break; 2717 default: 2718 retval = -1; 2719 break; 2720 } 2721 2722 ioc_info(mrioc, 2723 "%s :Issue TM: Completed TM type (0x%x) handle(0x%04x) ", 2724 __func__, tm_type, handle); 2725 ioc_info(mrioc, 2726 "with ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", 2727 drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 2728 le32_to_cpu(tm_reply->termination_count)); 2729 mpi3mr_print_response_code(mrioc, *resp_code); 2730 2731 out_unlock: 2732 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2733 mutex_unlock(&drv_cmd->mutex); 2734 if (scsi_tgt_priv_data) 2735 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2736 if (tgtdev) 2737 mpi3mr_tgtdev_put(tgtdev); 2738 if (!retval) { 2739 /* 2740 * Flush all IRQ handlers by calling synchronize_irq(). 2741 * mpi3mr_ioc_disable_intr() takes care of it. 2742 */ 2743 mpi3mr_ioc_disable_intr(mrioc); 2744 mpi3mr_ioc_enable_intr(mrioc); 2745 } 2746 out: 2747 return retval; 2748 } 2749 2750 /** 2751 * mpi3mr_bios_param - BIOS param callback 2752 * @sdev: SCSI device reference 2753 * @bdev: Block device reference 2754 * @capacity: Capacity in logical sectors 2755 * @params: Parameter array 2756 * 2757 * Just the parameters with heads/secots/cylinders. 2758 * 2759 * Return: 0 always 2760 */ 2761 static int mpi3mr_bios_param(struct scsi_device *sdev, 2762 struct block_device *bdev, sector_t capacity, int params[]) 2763 { 2764 int heads; 2765 int sectors; 2766 sector_t cylinders; 2767 ulong dummy; 2768 2769 heads = 64; 2770 sectors = 32; 2771 2772 dummy = heads * sectors; 2773 cylinders = capacity; 2774 sector_div(cylinders, dummy); 2775 2776 if ((ulong)capacity >= 0x200000) { 2777 heads = 255; 2778 sectors = 63; 2779 dummy = heads * sectors; 2780 cylinders = capacity; 2781 sector_div(cylinders, dummy); 2782 } 2783 2784 params[0] = heads; 2785 params[1] = sectors; 2786 params[2] = cylinders; 2787 return 0; 2788 } 2789 2790 /** 2791 * mpi3mr_map_queues - Map queues callback handler 2792 * @shost: SCSI host reference 2793 * 2794 * Call the blk_mq_pci_map_queues with from which operational 2795 * queue the mapping has to be done 2796 * 2797 * Return: return of blk_mq_pci_map_queues 2798 */ 2799 static int mpi3mr_map_queues(struct Scsi_Host *shost) 2800 { 2801 struct mpi3mr_ioc *mrioc = shost_priv(shost); 2802 2803 return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], 2804 mrioc->pdev, mrioc->op_reply_q_offset); 2805 } 2806 2807 /** 2808 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 2809 * @mrioc: Adapter instance reference 2810 * 2811 * Calculate the pending I/Os for the controller and return. 2812 * 2813 * Return: Number of pending I/Os 2814 */ 2815 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 2816 { 2817 u16 i; 2818 uint pend_ios = 0; 2819 2820 for (i = 0; i < mrioc->num_op_reply_q; i++) 2821 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 2822 return pend_ios; 2823 } 2824 2825 /** 2826 * mpi3mr_print_pending_host_io - print pending I/Os 2827 * @mrioc: Adapter instance reference 2828 * 2829 * Print number of pending I/Os and each I/O details prior to 2830 * reset for debug purpose. 2831 * 2832 * Return: Nothing 2833 */ 2834 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 2835 { 2836 struct Scsi_Host *shost = mrioc->shost; 2837 2838 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 2839 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 2840 blk_mq_tagset_busy_iter(&shost->tag_set, 2841 mpi3mr_print_scmd, (void *)mrioc); 2842 } 2843 2844 /** 2845 * mpi3mr_wait_for_host_io - block for I/Os to complete 2846 * @mrioc: Adapter instance reference 2847 * @timeout: time out in seconds 2848 * Waits for pending I/Os for the given adapter to complete or 2849 * to hit the timeout. 2850 * 2851 * Return: Nothing 2852 */ 2853 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 2854 { 2855 enum mpi3mr_iocstate iocstate; 2856 int i = 0; 2857 2858 iocstate = mpi3mr_get_iocstate(mrioc); 2859 if (iocstate != MRIOC_STATE_READY) 2860 return; 2861 2862 if (!mpi3mr_get_fw_pending_ios(mrioc)) 2863 return; 2864 ioc_info(mrioc, 2865 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 2866 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 2867 2868 for (i = 0; i < timeout; i++) { 2869 if (!mpi3mr_get_fw_pending_ios(mrioc)) 2870 break; 2871 iocstate = mpi3mr_get_iocstate(mrioc); 2872 if (iocstate != MRIOC_STATE_READY) 2873 break; 2874 msleep(1000); 2875 } 2876 2877 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 2878 mpi3mr_get_fw_pending_ios(mrioc)); 2879 } 2880 2881 /** 2882 * mpi3mr_eh_host_reset - Host reset error handling callback 2883 * @scmd: SCSI command reference 2884 * 2885 * Issue controller reset if the scmd is for a Physical Device, 2886 * if the scmd is for RAID volume, then wait for 2887 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any 2888 * pending I/Os prior to issuing reset to the controller. 2889 * 2890 * Return: SUCCESS of successful reset else FAILED 2891 */ 2892 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 2893 { 2894 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 2895 struct mpi3mr_stgt_priv_data *stgt_priv_data; 2896 struct mpi3mr_sdev_priv_data *sdev_priv_data; 2897 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 2898 int retval = FAILED, ret; 2899 2900 sdev_priv_data = scmd->device->hostdata; 2901 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 2902 stgt_priv_data = sdev_priv_data->tgt_priv_data; 2903 dev_type = stgt_priv_data->dev_type; 2904 } 2905 2906 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 2907 mpi3mr_wait_for_host_io(mrioc, 2908 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 2909 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 2910 retval = SUCCESS; 2911 goto out; 2912 } 2913 } 2914 2915 mpi3mr_print_pending_host_io(mrioc); 2916 ret = mpi3mr_soft_reset_handler(mrioc, 2917 MPI3MR_RESET_FROM_EH_HOS, 1); 2918 if (ret) 2919 goto out; 2920 2921 retval = SUCCESS; 2922 out: 2923 sdev_printk(KERN_INFO, scmd->device, 2924 "Host reset is %s for scmd(%p)\n", 2925 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2926 2927 return retval; 2928 } 2929 2930 /** 2931 * mpi3mr_eh_target_reset - Target reset error handling callback 2932 * @scmd: SCSI command reference 2933 * 2934 * Issue Target reset Task Management and verify the scmd is 2935 * terminated successfully and return status accordingly. 2936 * 2937 * Return: SUCCESS of successful termination of the scmd else 2938 * FAILED 2939 */ 2940 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 2941 { 2942 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 2943 struct mpi3mr_stgt_priv_data *stgt_priv_data; 2944 struct mpi3mr_sdev_priv_data *sdev_priv_data; 2945 u16 dev_handle; 2946 u8 resp_code = 0; 2947 int retval = FAILED, ret = 0; 2948 2949 sdev_printk(KERN_INFO, scmd->device, 2950 "Attempting Target Reset! scmd(%p)\n", scmd); 2951 scsi_print_command(scmd); 2952 2953 sdev_priv_data = scmd->device->hostdata; 2954 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 2955 sdev_printk(KERN_INFO, scmd->device, 2956 "SCSI device is not available\n"); 2957 retval = SUCCESS; 2958 goto out; 2959 } 2960 2961 stgt_priv_data = sdev_priv_data->tgt_priv_data; 2962 dev_handle = stgt_priv_data->dev_handle; 2963 sdev_printk(KERN_INFO, scmd->device, 2964 "Target Reset is issued to handle(0x%04x)\n", 2965 dev_handle); 2966 2967 ret = mpi3mr_issue_tm(mrioc, 2968 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 2969 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 2970 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL); 2971 2972 if (ret) 2973 goto out; 2974 2975 retval = SUCCESS; 2976 out: 2977 sdev_printk(KERN_INFO, scmd->device, 2978 "Target reset is %s for scmd(%p)\n", 2979 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 2980 2981 return retval; 2982 } 2983 2984 /** 2985 * mpi3mr_eh_dev_reset- Device reset error handling callback 2986 * @scmd: SCSI command reference 2987 * 2988 * Issue lun reset Task Management and verify the scmd is 2989 * terminated successfully and return status accordingly. 2990 * 2991 * Return: SUCCESS of successful termination of the scmd else 2992 * FAILED 2993 */ 2994 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 2995 { 2996 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 2997 struct mpi3mr_stgt_priv_data *stgt_priv_data; 2998 struct mpi3mr_sdev_priv_data *sdev_priv_data; 2999 u16 dev_handle; 3000 u8 resp_code = 0; 3001 int retval = FAILED, ret = 0; 3002 3003 sdev_printk(KERN_INFO, scmd->device, 3004 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 3005 scsi_print_command(scmd); 3006 3007 sdev_priv_data = scmd->device->hostdata; 3008 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 3009 sdev_printk(KERN_INFO, scmd->device, 3010 "SCSI device is not available\n"); 3011 retval = SUCCESS; 3012 goto out; 3013 } 3014 3015 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3016 dev_handle = stgt_priv_data->dev_handle; 3017 sdev_printk(KERN_INFO, scmd->device, 3018 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 3019 3020 ret = mpi3mr_issue_tm(mrioc, 3021 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 3022 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 3023 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL); 3024 3025 if (ret) 3026 goto out; 3027 3028 retval = SUCCESS; 3029 out: 3030 sdev_printk(KERN_INFO, scmd->device, 3031 "Device(lun) reset is %s for scmd(%p)\n", 3032 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3033 3034 return retval; 3035 } 3036 3037 /** 3038 * mpi3mr_scan_start - Scan start callback handler 3039 * @shost: SCSI host reference 3040 * 3041 * Issue port enable request asynchronously. 3042 * 3043 * Return: Nothing 3044 */ 3045 static void mpi3mr_scan_start(struct Scsi_Host *shost) 3046 { 3047 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3048 3049 mrioc->scan_started = 1; 3050 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 3051 if (mpi3mr_issue_port_enable(mrioc, 1)) { 3052 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 3053 mrioc->scan_started = 0; 3054 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3055 } 3056 } 3057 3058 /** 3059 * mpi3mr_scan_finished - Scan finished callback handler 3060 * @shost: SCSI host reference 3061 * @time: Jiffies from the scan start 3062 * 3063 * Checks whether the port enable is completed or timedout or 3064 * failed and set the scan status accordingly after taking any 3065 * recovery if required. 3066 * 3067 * Return: 1 on scan finished or timed out, 0 for in progress 3068 */ 3069 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 3070 unsigned long time) 3071 { 3072 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3073 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3074 3075 if (time >= (pe_timeout * HZ)) { 3076 mrioc->init_cmds.is_waiting = 0; 3077 mrioc->init_cmds.callback = NULL; 3078 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3079 ioc_err(mrioc, "%s :port enable request timed out\n", __func__); 3080 mrioc->is_driver_loading = 0; 3081 mpi3mr_soft_reset_handler(mrioc, 3082 MPI3MR_RESET_FROM_PE_TIMEOUT, 1); 3083 } 3084 3085 if (mrioc->scan_failed) { 3086 ioc_err(mrioc, 3087 "%s :port enable failed with (ioc_status=0x%08x)\n", 3088 __func__, mrioc->scan_failed); 3089 mrioc->is_driver_loading = 0; 3090 mrioc->stop_drv_processing = 1; 3091 return 1; 3092 } 3093 3094 if (mrioc->scan_started) 3095 return 0; 3096 ioc_info(mrioc, "%s :port enable: SUCCESS\n", __func__); 3097 mpi3mr_start_watchdog(mrioc); 3098 mrioc->is_driver_loading = 0; 3099 3100 return 1; 3101 } 3102 3103 /** 3104 * mpi3mr_slave_destroy - Slave destroy callback handler 3105 * @sdev: SCSI device reference 3106 * 3107 * Cleanup and free per device(lun) private data. 3108 * 3109 * Return: Nothing. 3110 */ 3111 static void mpi3mr_slave_destroy(struct scsi_device *sdev) 3112 { 3113 struct Scsi_Host *shost; 3114 struct mpi3mr_ioc *mrioc; 3115 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3116 struct mpi3mr_tgt_dev *tgt_dev; 3117 unsigned long flags; 3118 struct scsi_target *starget; 3119 3120 if (!sdev->hostdata) 3121 return; 3122 3123 starget = scsi_target(sdev); 3124 shost = dev_to_shost(&starget->dev); 3125 mrioc = shost_priv(shost); 3126 scsi_tgt_priv_data = starget->hostdata; 3127 3128 scsi_tgt_priv_data->num_luns--; 3129 3130 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3131 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3132 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 3133 tgt_dev->starget = NULL; 3134 if (tgt_dev) 3135 mpi3mr_tgtdev_put(tgt_dev); 3136 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3137 3138 kfree(sdev->hostdata); 3139 sdev->hostdata = NULL; 3140 } 3141 3142 /** 3143 * mpi3mr_target_destroy - Target destroy callback handler 3144 * @starget: SCSI target reference 3145 * 3146 * Cleanup and free per target private data. 3147 * 3148 * Return: Nothing. 3149 */ 3150 static void mpi3mr_target_destroy(struct scsi_target *starget) 3151 { 3152 struct Scsi_Host *shost; 3153 struct mpi3mr_ioc *mrioc; 3154 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3155 struct mpi3mr_tgt_dev *tgt_dev; 3156 unsigned long flags; 3157 3158 if (!starget->hostdata) 3159 return; 3160 3161 shost = dev_to_shost(&starget->dev); 3162 mrioc = shost_priv(shost); 3163 scsi_tgt_priv_data = starget->hostdata; 3164 3165 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3166 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 3167 if (tgt_dev && (tgt_dev->starget == starget) && 3168 (tgt_dev->perst_id == starget->id)) 3169 tgt_dev->starget = NULL; 3170 if (tgt_dev) { 3171 scsi_tgt_priv_data->tgt_dev = NULL; 3172 scsi_tgt_priv_data->perst_id = 0; 3173 mpi3mr_tgtdev_put(tgt_dev); 3174 mpi3mr_tgtdev_put(tgt_dev); 3175 } 3176 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3177 3178 kfree(starget->hostdata); 3179 starget->hostdata = NULL; 3180 } 3181 3182 /** 3183 * mpi3mr_slave_configure - Slave configure callback handler 3184 * @sdev: SCSI device reference 3185 * 3186 * Configure queue depth, max hardware sectors and virt boundary 3187 * as required 3188 * 3189 * Return: 0 always. 3190 */ 3191 static int mpi3mr_slave_configure(struct scsi_device *sdev) 3192 { 3193 struct scsi_target *starget; 3194 struct Scsi_Host *shost; 3195 struct mpi3mr_ioc *mrioc; 3196 struct mpi3mr_tgt_dev *tgt_dev; 3197 unsigned long flags; 3198 int retval = 0; 3199 3200 starget = scsi_target(sdev); 3201 shost = dev_to_shost(&starget->dev); 3202 mrioc = shost_priv(shost); 3203 3204 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3205 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3206 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3207 if (!tgt_dev) 3208 return -ENXIO; 3209 3210 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 3211 switch (tgt_dev->dev_type) { 3212 case MPI3_DEVICE_DEVFORM_PCIE: 3213 /*The block layer hw sector size = 512*/ 3214 if ((tgt_dev->dev_spec.pcie_inf.dev_info & 3215 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 3216 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 3217 blk_queue_max_hw_sectors(sdev->request_queue, 3218 tgt_dev->dev_spec.pcie_inf.mdts / 512); 3219 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) 3220 blk_queue_virt_boundary(sdev->request_queue, 3221 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 3222 else 3223 blk_queue_virt_boundary(sdev->request_queue, 3224 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); 3225 } 3226 break; 3227 default: 3228 break; 3229 } 3230 3231 mpi3mr_tgtdev_put(tgt_dev); 3232 3233 return retval; 3234 } 3235 3236 /** 3237 * mpi3mr_slave_alloc -Slave alloc callback handler 3238 * @sdev: SCSI device reference 3239 * 3240 * Allocate per device(lun) private data and initialize it. 3241 * 3242 * Return: 0 on success -ENOMEM on memory allocation failure. 3243 */ 3244 static int mpi3mr_slave_alloc(struct scsi_device *sdev) 3245 { 3246 struct Scsi_Host *shost; 3247 struct mpi3mr_ioc *mrioc; 3248 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3249 struct mpi3mr_tgt_dev *tgt_dev; 3250 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 3251 unsigned long flags; 3252 struct scsi_target *starget; 3253 int retval = 0; 3254 3255 starget = scsi_target(sdev); 3256 shost = dev_to_shost(&starget->dev); 3257 mrioc = shost_priv(shost); 3258 scsi_tgt_priv_data = starget->hostdata; 3259 3260 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3261 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3262 3263 if (tgt_dev) { 3264 if (tgt_dev->starget == NULL) 3265 tgt_dev->starget = starget; 3266 mpi3mr_tgtdev_put(tgt_dev); 3267 retval = 0; 3268 } else { 3269 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3270 return -ENXIO; 3271 } 3272 3273 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3274 3275 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 3276 if (!scsi_dev_priv_data) 3277 return -ENOMEM; 3278 3279 scsi_dev_priv_data->lun_id = sdev->lun; 3280 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 3281 sdev->hostdata = scsi_dev_priv_data; 3282 3283 scsi_tgt_priv_data->num_luns++; 3284 3285 return retval; 3286 } 3287 3288 /** 3289 * mpi3mr_target_alloc - Target alloc callback handler 3290 * @starget: SCSI target reference 3291 * 3292 * Allocate per target private data and initialize it. 3293 * 3294 * Return: 0 on success -ENOMEM on memory allocation failure. 3295 */ 3296 static int mpi3mr_target_alloc(struct scsi_target *starget) 3297 { 3298 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 3299 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3300 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3301 struct mpi3mr_tgt_dev *tgt_dev; 3302 unsigned long flags; 3303 int retval = 0; 3304 3305 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 3306 if (!scsi_tgt_priv_data) 3307 return -ENOMEM; 3308 3309 starget->hostdata = scsi_tgt_priv_data; 3310 3311 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3312 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3313 if (tgt_dev && !tgt_dev->is_hidden) { 3314 scsi_tgt_priv_data->starget = starget; 3315 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 3316 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 3317 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 3318 scsi_tgt_priv_data->tgt_dev = tgt_dev; 3319 tgt_dev->starget = starget; 3320 atomic_set(&scsi_tgt_priv_data->block_io, 0); 3321 retval = 0; 3322 } else 3323 retval = -ENXIO; 3324 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3325 3326 return retval; 3327 } 3328 3329 /** 3330 * mpi3mr_check_return_unmap - Whether an unmap is allowed 3331 * @mrioc: Adapter instance reference 3332 * @scmd: SCSI Command reference 3333 * 3334 * The controller hardware cannot handle certain unmap commands 3335 * for NVMe drives, this routine checks those and return true 3336 * and completes the SCSI command with proper status and sense 3337 * data. 3338 * 3339 * Return: TRUE for not allowed unmap, FALSE otherwise. 3340 */ 3341 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 3342 struct scsi_cmnd *scmd) 3343 { 3344 unsigned char *buf; 3345 u16 param_len, desc_len; 3346 3347 param_len = get_unaligned_be16(scmd->cmnd + 7); 3348 3349 if (!param_len) { 3350 ioc_warn(mrioc, 3351 "%s: cdb received with zero parameter length\n", 3352 __func__); 3353 scsi_print_command(scmd); 3354 scmd->result = DID_OK << 16; 3355 scsi_done(scmd); 3356 return true; 3357 } 3358 3359 if (param_len < 24) { 3360 ioc_warn(mrioc, 3361 "%s: cdb received with invalid param_len: %d\n", 3362 __func__, param_len); 3363 scsi_print_command(scmd); 3364 scmd->result = SAM_STAT_CHECK_CONDITION; 3365 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3366 0x1A, 0); 3367 scsi_done(scmd); 3368 return true; 3369 } 3370 if (param_len != scsi_bufflen(scmd)) { 3371 ioc_warn(mrioc, 3372 "%s: cdb received with param_len: %d bufflen: %d\n", 3373 __func__, param_len, scsi_bufflen(scmd)); 3374 scsi_print_command(scmd); 3375 scmd->result = SAM_STAT_CHECK_CONDITION; 3376 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3377 0x1A, 0); 3378 scsi_done(scmd); 3379 return true; 3380 } 3381 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 3382 if (!buf) { 3383 scsi_print_command(scmd); 3384 scmd->result = SAM_STAT_CHECK_CONDITION; 3385 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3386 0x55, 0x03); 3387 scsi_done(scmd); 3388 return true; 3389 } 3390 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 3391 desc_len = get_unaligned_be16(&buf[2]); 3392 3393 if (desc_len < 16) { 3394 ioc_warn(mrioc, 3395 "%s: Invalid descriptor length in param list: %d\n", 3396 __func__, desc_len); 3397 scsi_print_command(scmd); 3398 scmd->result = SAM_STAT_CHECK_CONDITION; 3399 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3400 0x26, 0); 3401 scsi_done(scmd); 3402 kfree(buf); 3403 return true; 3404 } 3405 3406 if (param_len > (desc_len + 8)) { 3407 scsi_print_command(scmd); 3408 ioc_warn(mrioc, 3409 "%s: Truncating param_len(%d) to desc_len+8(%d)\n", 3410 __func__, param_len, (desc_len + 8)); 3411 param_len = desc_len + 8; 3412 put_unaligned_be16(param_len, scmd->cmnd + 7); 3413 scsi_print_command(scmd); 3414 } 3415 3416 kfree(buf); 3417 return false; 3418 } 3419 3420 /** 3421 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 3422 * @scmd: SCSI Command reference 3423 * 3424 * Checks whether a cdb is allowed during shutdown or not. 3425 * 3426 * Return: TRUE for allowed commands, FALSE otherwise. 3427 */ 3428 3429 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 3430 { 3431 switch (scmd->cmnd[0]) { 3432 case SYNCHRONIZE_CACHE: 3433 case START_STOP: 3434 return true; 3435 default: 3436 return false; 3437 } 3438 } 3439 3440 /** 3441 * mpi3mr_qcmd - I/O request despatcher 3442 * @shost: SCSI Host reference 3443 * @scmd: SCSI Command reference 3444 * 3445 * Issues the SCSI Command as an MPI3 request. 3446 * 3447 * Return: 0 on successful queueing of the request or if the 3448 * request is completed with failure. 3449 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 3450 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 3451 */ 3452 static int mpi3mr_qcmd(struct Scsi_Host *shost, 3453 struct scsi_cmnd *scmd) 3454 { 3455 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3456 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3457 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3458 struct scmd_priv *scmd_priv_data = NULL; 3459 struct mpi3_scsi_io_request *scsiio_req = NULL; 3460 struct op_req_qinfo *op_req_q = NULL; 3461 int retval = 0; 3462 u16 dev_handle; 3463 u16 host_tag; 3464 u32 scsiio_flags = 0; 3465 struct request *rq = scsi_cmd_to_rq(scmd); 3466 int iprio_class; 3467 3468 sdev_priv_data = scmd->device->hostdata; 3469 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 3470 scmd->result = DID_NO_CONNECT << 16; 3471 scsi_done(scmd); 3472 goto out; 3473 } 3474 3475 if (mrioc->stop_drv_processing && 3476 !(mpi3mr_allow_scmd_to_fw(scmd))) { 3477 scmd->result = DID_NO_CONNECT << 16; 3478 scsi_done(scmd); 3479 goto out; 3480 } 3481 3482 if (mrioc->reset_in_progress) { 3483 retval = SCSI_MLQUEUE_HOST_BUSY; 3484 goto out; 3485 } 3486 3487 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3488 3489 dev_handle = stgt_priv_data->dev_handle; 3490 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 3491 scmd->result = DID_NO_CONNECT << 16; 3492 scsi_done(scmd); 3493 goto out; 3494 } 3495 if (stgt_priv_data->dev_removed) { 3496 scmd->result = DID_NO_CONNECT << 16; 3497 scsi_done(scmd); 3498 goto out; 3499 } 3500 3501 if (atomic_read(&stgt_priv_data->block_io)) { 3502 if (mrioc->stop_drv_processing) { 3503 scmd->result = DID_NO_CONNECT << 16; 3504 scsi_done(scmd); 3505 goto out; 3506 } 3507 retval = SCSI_MLQUEUE_DEVICE_BUSY; 3508 goto out; 3509 } 3510 3511 if ((scmd->cmnd[0] == UNMAP) && 3512 (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 3513 mpi3mr_check_return_unmap(mrioc, scmd)) 3514 goto out; 3515 3516 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 3517 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 3518 scmd->result = DID_ERROR << 16; 3519 scsi_done(scmd); 3520 goto out; 3521 } 3522 3523 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 3524 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 3525 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 3526 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 3527 else 3528 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 3529 3530 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 3531 3532 if (sdev_priv_data->ncq_prio_enable) { 3533 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 3534 if (iprio_class == IOPRIO_CLASS_RT) 3535 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 3536 } 3537 3538 if (scmd->cmd_len > 16) 3539 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 3540 3541 scmd_priv_data = scsi_cmd_priv(scmd); 3542 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 3543 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 3544 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 3545 scsiio_req->host_tag = cpu_to_le16(host_tag); 3546 3547 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 3548 3549 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 3550 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 3551 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 3552 scsiio_req->flags = cpu_to_le32(scsiio_flags); 3553 int_to_scsilun(sdev_priv_data->lun_id, 3554 (struct scsi_lun *)scsiio_req->lun); 3555 3556 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 3557 mpi3mr_clear_scmd_priv(mrioc, scmd); 3558 retval = SCSI_MLQUEUE_HOST_BUSY; 3559 goto out; 3560 } 3561 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 3562 3563 if (mpi3mr_op_request_post(mrioc, op_req_q, 3564 scmd_priv_data->mpi3mr_scsiio_req)) { 3565 mpi3mr_clear_scmd_priv(mrioc, scmd); 3566 retval = SCSI_MLQUEUE_HOST_BUSY; 3567 goto out; 3568 } 3569 3570 out: 3571 return retval; 3572 } 3573 3574 static struct scsi_host_template mpi3mr_driver_template = { 3575 .module = THIS_MODULE, 3576 .name = "MPI3 Storage Controller", 3577 .proc_name = MPI3MR_DRIVER_NAME, 3578 .queuecommand = mpi3mr_qcmd, 3579 .target_alloc = mpi3mr_target_alloc, 3580 .slave_alloc = mpi3mr_slave_alloc, 3581 .slave_configure = mpi3mr_slave_configure, 3582 .target_destroy = mpi3mr_target_destroy, 3583 .slave_destroy = mpi3mr_slave_destroy, 3584 .scan_finished = mpi3mr_scan_finished, 3585 .scan_start = mpi3mr_scan_start, 3586 .change_queue_depth = mpi3mr_change_queue_depth, 3587 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 3588 .eh_target_reset_handler = mpi3mr_eh_target_reset, 3589 .eh_host_reset_handler = mpi3mr_eh_host_reset, 3590 .bios_param = mpi3mr_bios_param, 3591 .map_queues = mpi3mr_map_queues, 3592 .no_write_same = 1, 3593 .can_queue = 1, 3594 .this_id = -1, 3595 .sg_tablesize = MPI3MR_SG_DEPTH, 3596 /* max xfer supported is 1M (2K in 512 byte sized sectors) 3597 */ 3598 .max_sectors = 2048, 3599 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 3600 .track_queue_depth = 1, 3601 .cmd_size = sizeof(struct scmd_priv), 3602 }; 3603 3604 /** 3605 * mpi3mr_init_drv_cmd - Initialize internal command tracker 3606 * @cmdptr: Internal command tracker 3607 * @host_tag: Host tag used for the specific command 3608 * 3609 * Initialize the internal command tracker structure with 3610 * specified host tag. 3611 * 3612 * Return: Nothing. 3613 */ 3614 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 3615 u16 host_tag) 3616 { 3617 mutex_init(&cmdptr->mutex); 3618 cmdptr->reply = NULL; 3619 cmdptr->state = MPI3MR_CMD_NOTUSED; 3620 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 3621 cmdptr->host_tag = host_tag; 3622 } 3623 3624 /** 3625 * osintfc_mrioc_security_status -Check controller secure status 3626 * @pdev: PCI device instance 3627 * 3628 * Read the Device Serial Number capability from PCI config 3629 * space and decide whether the controller is secure or not. 3630 * 3631 * Return: 0 on success, non-zero on failure. 3632 */ 3633 static int 3634 osintfc_mrioc_security_status(struct pci_dev *pdev) 3635 { 3636 u32 cap_data; 3637 int base; 3638 u32 ctlr_status; 3639 u32 debug_status; 3640 int retval = 0; 3641 3642 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 3643 if (!base) { 3644 dev_err(&pdev->dev, 3645 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 3646 return -1; 3647 } 3648 3649 pci_read_config_dword(pdev, base + 4, &cap_data); 3650 3651 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 3652 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 3653 3654 switch (ctlr_status) { 3655 case MPI3MR_INVALID_DEVICE: 3656 dev_err(&pdev->dev, 3657 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 3658 __func__, pdev->device, pdev->subsystem_vendor, 3659 pdev->subsystem_device); 3660 retval = -1; 3661 break; 3662 case MPI3MR_CONFIG_SECURE_DEVICE: 3663 if (!debug_status) 3664 dev_info(&pdev->dev, 3665 "%s: Config secure ctlr is detected\n", 3666 __func__); 3667 break; 3668 case MPI3MR_HARD_SECURE_DEVICE: 3669 break; 3670 case MPI3MR_TAMPERED_DEVICE: 3671 dev_err(&pdev->dev, 3672 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 3673 __func__, pdev->device, pdev->subsystem_vendor, 3674 pdev->subsystem_device); 3675 retval = -1; 3676 break; 3677 default: 3678 retval = -1; 3679 break; 3680 } 3681 3682 if (!retval && debug_status) { 3683 dev_err(&pdev->dev, 3684 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 3685 __func__, pdev->device, pdev->subsystem_vendor, 3686 pdev->subsystem_device); 3687 retval = -1; 3688 } 3689 3690 return retval; 3691 } 3692 3693 /** 3694 * mpi3mr_probe - PCI probe callback 3695 * @pdev: PCI device instance 3696 * @id: PCI device ID details 3697 * 3698 * controller initialization routine. Checks the security status 3699 * of the controller and if it is invalid or tampered return the 3700 * probe without initializing the controller. Otherwise, 3701 * allocate per adapter instance through shost_priv and 3702 * initialize controller specific data structures, initializae 3703 * the controller hardware, add shost to the SCSI subsystem. 3704 * 3705 * Return: 0 on success, non-zero on failure. 3706 */ 3707 3708 static int 3709 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3710 { 3711 struct mpi3mr_ioc *mrioc = NULL; 3712 struct Scsi_Host *shost = NULL; 3713 int retval = 0, i; 3714 3715 if (osintfc_mrioc_security_status(pdev)) { 3716 warn_non_secure_ctlr = 1; 3717 return 1; /* For Invalid and Tampered device */ 3718 } 3719 3720 shost = scsi_host_alloc(&mpi3mr_driver_template, 3721 sizeof(struct mpi3mr_ioc)); 3722 if (!shost) { 3723 retval = -ENODEV; 3724 goto shost_failed; 3725 } 3726 3727 mrioc = shost_priv(shost); 3728 mrioc->id = mrioc_ids++; 3729 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 3730 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 3731 INIT_LIST_HEAD(&mrioc->list); 3732 spin_lock(&mrioc_list_lock); 3733 list_add_tail(&mrioc->list, &mrioc_list); 3734 spin_unlock(&mrioc_list_lock); 3735 3736 spin_lock_init(&mrioc->admin_req_lock); 3737 spin_lock_init(&mrioc->reply_free_queue_lock); 3738 spin_lock_init(&mrioc->sbq_lock); 3739 spin_lock_init(&mrioc->fwevt_lock); 3740 spin_lock_init(&mrioc->tgtdev_lock); 3741 spin_lock_init(&mrioc->watchdog_lock); 3742 spin_lock_init(&mrioc->chain_buf_lock); 3743 3744 INIT_LIST_HEAD(&mrioc->fwevt_list); 3745 INIT_LIST_HEAD(&mrioc->tgtdev_list); 3746 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 3747 3748 mutex_init(&mrioc->reset_mutex); 3749 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 3750 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 3751 3752 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 3753 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 3754 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 3755 3756 if (pdev->revision) 3757 mrioc->enable_segqueue = true; 3758 3759 init_waitqueue_head(&mrioc->reset_waitq); 3760 mrioc->logging_level = logging_level; 3761 mrioc->shost = shost; 3762 mrioc->pdev = pdev; 3763 3764 /* init shost parameters */ 3765 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 3766 shost->max_lun = -1; 3767 shost->unique_id = mrioc->id; 3768 3769 shost->max_channel = 0; 3770 shost->max_id = 0xFFFFFFFF; 3771 3772 if (prot_mask >= 0) 3773 scsi_host_set_prot(shost, prot_mask); 3774 else { 3775 prot_mask = SHOST_DIF_TYPE1_PROTECTION 3776 | SHOST_DIF_TYPE2_PROTECTION 3777 | SHOST_DIF_TYPE3_PROTECTION; 3778 scsi_host_set_prot(shost, prot_mask); 3779 } 3780 3781 ioc_info(mrioc, 3782 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 3783 __func__, 3784 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 3785 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 3786 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 3787 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 3788 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 3789 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 3790 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 3791 3792 if (prot_guard_mask) 3793 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 3794 else 3795 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 3796 3797 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), 3798 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); 3799 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 3800 mrioc->fwevt_worker_name, WQ_MEM_RECLAIM); 3801 if (!mrioc->fwevt_worker_thread) { 3802 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 3803 __FILE__, __LINE__, __func__); 3804 retval = -ENODEV; 3805 goto out_fwevtthread_failed; 3806 } 3807 3808 mrioc->is_driver_loading = 1; 3809 if (mpi3mr_init_ioc(mrioc, MPI3MR_IT_INIT)) { 3810 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 3811 __FILE__, __LINE__, __func__); 3812 retval = -ENODEV; 3813 goto out_iocinit_failed; 3814 } 3815 3816 shost->nr_hw_queues = mrioc->num_op_reply_q; 3817 shost->can_queue = mrioc->max_host_ios; 3818 shost->sg_tablesize = MPI3MR_SG_DEPTH; 3819 shost->max_id = mrioc->facts.max_perids; 3820 3821 retval = scsi_add_host(shost, &pdev->dev); 3822 if (retval) { 3823 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 3824 __FILE__, __LINE__, __func__); 3825 goto addhost_failed; 3826 } 3827 3828 scsi_scan_host(shost); 3829 return retval; 3830 3831 addhost_failed: 3832 mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP); 3833 out_iocinit_failed: 3834 destroy_workqueue(mrioc->fwevt_worker_thread); 3835 out_fwevtthread_failed: 3836 spin_lock(&mrioc_list_lock); 3837 list_del(&mrioc->list); 3838 spin_unlock(&mrioc_list_lock); 3839 scsi_host_put(shost); 3840 shost_failed: 3841 return retval; 3842 } 3843 3844 /** 3845 * mpi3mr_remove - PCI remove callback 3846 * @pdev: PCI device instance 3847 * 3848 * Free up all memory and resources associated with the 3849 * controllerand target devices, unregister the shost. 3850 * 3851 * Return: Nothing. 3852 */ 3853 static void mpi3mr_remove(struct pci_dev *pdev) 3854 { 3855 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3856 struct mpi3mr_ioc *mrioc; 3857 struct workqueue_struct *wq; 3858 unsigned long flags; 3859 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 3860 3861 if (!shost) 3862 return; 3863 3864 mrioc = shost_priv(shost); 3865 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 3866 ssleep(1); 3867 3868 mrioc->stop_drv_processing = 1; 3869 mpi3mr_cleanup_fwevt_list(mrioc); 3870 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 3871 wq = mrioc->fwevt_worker_thread; 3872 mrioc->fwevt_worker_thread = NULL; 3873 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 3874 if (wq) 3875 destroy_workqueue(wq); 3876 scsi_remove_host(shost); 3877 3878 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 3879 list) { 3880 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 3881 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 3882 mpi3mr_tgtdev_put(tgtdev); 3883 } 3884 mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP); 3885 3886 spin_lock(&mrioc_list_lock); 3887 list_del(&mrioc->list); 3888 spin_unlock(&mrioc_list_lock); 3889 3890 scsi_host_put(shost); 3891 } 3892 3893 /** 3894 * mpi3mr_shutdown - PCI shutdown callback 3895 * @pdev: PCI device instance 3896 * 3897 * Free up all memory and resources associated with the 3898 * controller 3899 * 3900 * Return: Nothing. 3901 */ 3902 static void mpi3mr_shutdown(struct pci_dev *pdev) 3903 { 3904 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3905 struct mpi3mr_ioc *mrioc; 3906 struct workqueue_struct *wq; 3907 unsigned long flags; 3908 3909 if (!shost) 3910 return; 3911 3912 mrioc = shost_priv(shost); 3913 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 3914 ssleep(1); 3915 3916 mrioc->stop_drv_processing = 1; 3917 mpi3mr_cleanup_fwevt_list(mrioc); 3918 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 3919 wq = mrioc->fwevt_worker_thread; 3920 mrioc->fwevt_worker_thread = NULL; 3921 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 3922 if (wq) 3923 destroy_workqueue(wq); 3924 mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP); 3925 } 3926 3927 #ifdef CONFIG_PM 3928 /** 3929 * mpi3mr_suspend - PCI power management suspend callback 3930 * @pdev: PCI device instance 3931 * @state: New power state 3932 * 3933 * Change the power state to the given value and cleanup the IOC 3934 * by issuing MUR and shutdown notification 3935 * 3936 * Return: 0 always. 3937 */ 3938 static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state) 3939 { 3940 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3941 struct mpi3mr_ioc *mrioc; 3942 pci_power_t device_state; 3943 3944 if (!shost) 3945 return 0; 3946 3947 mrioc = shost_priv(shost); 3948 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 3949 ssleep(1); 3950 mrioc->stop_drv_processing = 1; 3951 mpi3mr_cleanup_fwevt_list(mrioc); 3952 scsi_block_requests(shost); 3953 mpi3mr_stop_watchdog(mrioc); 3954 mpi3mr_cleanup_ioc(mrioc, MPI3MR_SUSPEND); 3955 3956 device_state = pci_choose_state(pdev, state); 3957 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n", 3958 pdev, pci_name(pdev), device_state); 3959 pci_save_state(pdev); 3960 pci_set_power_state(pdev, device_state); 3961 mpi3mr_cleanup_resources(mrioc); 3962 3963 return 0; 3964 } 3965 3966 /** 3967 * mpi3mr_resume - PCI power management resume callback 3968 * @pdev: PCI device instance 3969 * 3970 * Restore the power state to D0 and reinitialize the controller 3971 * and resume I/O operations to the target devices 3972 * 3973 * Return: 0 on success, non-zero on failure 3974 */ 3975 static int mpi3mr_resume(struct pci_dev *pdev) 3976 { 3977 struct Scsi_Host *shost = pci_get_drvdata(pdev); 3978 struct mpi3mr_ioc *mrioc; 3979 pci_power_t device_state = pdev->current_state; 3980 int r; 3981 3982 if (!shost) 3983 return 0; 3984 3985 mrioc = shost_priv(shost); 3986 3987 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 3988 pdev, pci_name(pdev), device_state); 3989 pci_set_power_state(pdev, PCI_D0); 3990 pci_enable_wake(pdev, PCI_D0, 0); 3991 pci_restore_state(pdev); 3992 mrioc->pdev = pdev; 3993 mrioc->cpu_count = num_online_cpus(); 3994 r = mpi3mr_setup_resources(mrioc); 3995 if (r) { 3996 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 3997 __func__, r); 3998 return r; 3999 } 4000 4001 mrioc->stop_drv_processing = 0; 4002 mpi3mr_memset_buffers(mrioc); 4003 mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESUME); 4004 scsi_unblock_requests(shost); 4005 mpi3mr_start_watchdog(mrioc); 4006 4007 return 0; 4008 } 4009 #endif 4010 4011 static const struct pci_device_id mpi3mr_pci_id_table[] = { 4012 { 4013 PCI_DEVICE_SUB(PCI_VENDOR_ID_LSI_LOGIC, 0x00A5, 4014 PCI_ANY_ID, PCI_ANY_ID) 4015 }, 4016 { 0 } 4017 }; 4018 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 4019 4020 static struct pci_driver mpi3mr_pci_driver = { 4021 .name = MPI3MR_DRIVER_NAME, 4022 .id_table = mpi3mr_pci_id_table, 4023 .probe = mpi3mr_probe, 4024 .remove = mpi3mr_remove, 4025 .shutdown = mpi3mr_shutdown, 4026 #ifdef CONFIG_PM 4027 .suspend = mpi3mr_suspend, 4028 .resume = mpi3mr_resume, 4029 #endif 4030 }; 4031 4032 static int __init mpi3mr_init(void) 4033 { 4034 int ret_val; 4035 4036 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 4037 MPI3MR_DRIVER_VERSION); 4038 4039 ret_val = pci_register_driver(&mpi3mr_pci_driver); 4040 4041 return ret_val; 4042 } 4043 4044 static void __exit mpi3mr_exit(void) 4045 { 4046 if (warn_non_secure_ctlr) 4047 pr_warn( 4048 "Unloading %s version %s while managing a non secure controller\n", 4049 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 4050 else 4051 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 4052 MPI3MR_DRIVER_VERSION); 4053 4054 pci_unregister_driver(&mpi3mr_pci_driver); 4055 } 4056 4057 module_init(mpi3mr_init); 4058 module_exit(mpi3mr_exit); 4059