1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2022 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 12 /* global driver scop variables */ 13 LIST_HEAD(mrioc_list); 14 DEFINE_SPINLOCK(mrioc_list_lock); 15 static int mrioc_ids; 16 static int warn_non_secure_ctlr; 17 atomic64_t event_counter; 18 19 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 20 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 21 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 22 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 23 24 /* Module parameters*/ 25 int prot_mask = -1; 26 module_param(prot_mask, int, 0); 27 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 28 29 static int prot_guard_mask = 3; 30 module_param(prot_guard_mask, int, 0); 31 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 32 static int logging_level; 33 module_param(logging_level, int, 0); 34 MODULE_PARM_DESC(logging_level, 35 " bits for enabling additional logging info (default=0)"); 36 37 /* Forward declarations*/ 38 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 39 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 40 41 /** 42 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 43 * @mrioc: Adapter instance reference 44 * @scmd: SCSI command reference 45 * 46 * Calculate the host tag based on block tag for a given scmd. 47 * 48 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 49 */ 50 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 51 struct scsi_cmnd *scmd) 52 { 53 struct scmd_priv *priv = NULL; 54 u32 unique_tag; 55 u16 host_tag, hw_queue; 56 57 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 58 59 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 60 if (hw_queue >= mrioc->num_op_reply_q) 61 return MPI3MR_HOSTTAG_INVALID; 62 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 63 64 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 65 return MPI3MR_HOSTTAG_INVALID; 66 67 priv = scsi_cmd_priv(scmd); 68 /*host_tag 0 is invalid hence incrementing by 1*/ 69 priv->host_tag = host_tag + 1; 70 priv->scmd = scmd; 71 priv->in_lld_scope = 1; 72 priv->req_q_idx = hw_queue; 73 priv->meta_chain_idx = -1; 74 priv->chain_idx = -1; 75 priv->meta_sg_valid = 0; 76 return priv->host_tag; 77 } 78 79 /** 80 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 81 * @mrioc: Adapter instance reference 82 * @host_tag: Host tag 83 * @qidx: Operational queue index 84 * 85 * Identify the block tag from the host tag and queue index and 86 * retrieve associated scsi command using scsi_host_find_tag(). 87 * 88 * Return: SCSI command reference or NULL. 89 */ 90 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 91 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 92 { 93 struct scsi_cmnd *scmd = NULL; 94 struct scmd_priv *priv = NULL; 95 u32 unique_tag = host_tag - 1; 96 97 if (WARN_ON(host_tag > mrioc->max_host_ios)) 98 goto out; 99 100 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 101 102 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 103 if (scmd) { 104 priv = scsi_cmd_priv(scmd); 105 if (!priv->in_lld_scope) 106 scmd = NULL; 107 } 108 out: 109 return scmd; 110 } 111 112 /** 113 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 114 * @mrioc: Adapter instance reference 115 * @scmd: SCSI command reference 116 * 117 * Invalidate the SCSI command private data to mark the command 118 * is not in LLD scope anymore. 119 * 120 * Return: Nothing. 121 */ 122 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 123 struct scsi_cmnd *scmd) 124 { 125 struct scmd_priv *priv = NULL; 126 127 priv = scsi_cmd_priv(scmd); 128 129 if (WARN_ON(priv->in_lld_scope == 0)) 130 return; 131 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 132 priv->req_q_idx = 0xFFFF; 133 priv->scmd = NULL; 134 priv->in_lld_scope = 0; 135 priv->meta_sg_valid = 0; 136 if (priv->chain_idx >= 0) { 137 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 138 priv->chain_idx = -1; 139 } 140 if (priv->meta_chain_idx >= 0) { 141 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 142 priv->meta_chain_idx = -1; 143 } 144 } 145 146 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 147 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 148 static void mpi3mr_fwevt_worker(struct work_struct *work); 149 150 /** 151 * mpi3mr_fwevt_free - firmware event memory dealloctor 152 * @r: k reference pointer of the firmware event 153 * 154 * Free firmware event memory when no reference. 155 */ 156 static void mpi3mr_fwevt_free(struct kref *r) 157 { 158 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 159 } 160 161 /** 162 * mpi3mr_fwevt_get - k reference incrementor 163 * @fwevt: Firmware event reference 164 * 165 * Increment firmware event reference count. 166 */ 167 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 168 { 169 kref_get(&fwevt->ref_count); 170 } 171 172 /** 173 * mpi3mr_fwevt_put - k reference decrementor 174 * @fwevt: Firmware event reference 175 * 176 * decrement firmware event reference count. 177 */ 178 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 179 { 180 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 181 } 182 183 /** 184 * mpi3mr_alloc_fwevt - Allocate firmware event 185 * @len: length of firmware event data to allocate 186 * 187 * Allocate firmware event with required length and initialize 188 * the reference counter. 189 * 190 * Return: firmware event reference. 191 */ 192 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 193 { 194 struct mpi3mr_fwevt *fwevt; 195 196 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 197 if (!fwevt) 198 return NULL; 199 200 kref_init(&fwevt->ref_count); 201 return fwevt; 202 } 203 204 /** 205 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 206 * @mrioc: Adapter instance reference 207 * @fwevt: Firmware event reference 208 * 209 * Add the given firmware event to the firmware event list. 210 * 211 * Return: Nothing. 212 */ 213 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 214 struct mpi3mr_fwevt *fwevt) 215 { 216 unsigned long flags; 217 218 if (!mrioc->fwevt_worker_thread) 219 return; 220 221 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 222 /* get fwevt reference count while adding it to fwevt_list */ 223 mpi3mr_fwevt_get(fwevt); 224 INIT_LIST_HEAD(&fwevt->list); 225 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 226 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 227 /* get fwevt reference count while enqueueing it to worker queue */ 228 mpi3mr_fwevt_get(fwevt); 229 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 230 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 231 } 232 233 /** 234 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 235 * @mrioc: Adapter instance reference 236 * @fwevt: Firmware event reference 237 * 238 * Delete the given firmware event from the firmware event list. 239 * 240 * Return: Nothing. 241 */ 242 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 243 struct mpi3mr_fwevt *fwevt) 244 { 245 unsigned long flags; 246 247 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 248 if (!list_empty(&fwevt->list)) { 249 list_del_init(&fwevt->list); 250 /* 251 * Put fwevt reference count after 252 * removing it from fwevt_list 253 */ 254 mpi3mr_fwevt_put(fwevt); 255 } 256 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 257 } 258 259 /** 260 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 261 * @mrioc: Adapter instance reference 262 * 263 * Dequeue a firmware event from the firmware event list. 264 * 265 * Return: firmware event. 266 */ 267 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 268 struct mpi3mr_ioc *mrioc) 269 { 270 unsigned long flags; 271 struct mpi3mr_fwevt *fwevt = NULL; 272 273 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 274 if (!list_empty(&mrioc->fwevt_list)) { 275 fwevt = list_first_entry(&mrioc->fwevt_list, 276 struct mpi3mr_fwevt, list); 277 list_del_init(&fwevt->list); 278 /* 279 * Put fwevt reference count after 280 * removing it from fwevt_list 281 */ 282 mpi3mr_fwevt_put(fwevt); 283 } 284 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 285 286 return fwevt; 287 } 288 289 /** 290 * mpi3mr_cancel_work - cancel firmware event 291 * @fwevt: fwevt object which needs to be canceled 292 * 293 * Return: Nothing. 294 */ 295 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 296 { 297 /* 298 * Wait on the fwevt to complete. If this returns 1, then 299 * the event was never executed. 300 * 301 * If it did execute, we wait for it to finish, and the put will 302 * happen from mpi3mr_process_fwevt() 303 */ 304 if (cancel_work_sync(&fwevt->work)) { 305 /* 306 * Put fwevt reference count after 307 * dequeuing it from worker queue 308 */ 309 mpi3mr_fwevt_put(fwevt); 310 /* 311 * Put fwevt reference count to neutralize 312 * kref_init increment 313 */ 314 mpi3mr_fwevt_put(fwevt); 315 } 316 } 317 318 /** 319 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 320 * @mrioc: Adapter instance reference 321 * 322 * Flush all pending firmware events from the firmware event 323 * list. 324 * 325 * Return: Nothing. 326 */ 327 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 328 { 329 struct mpi3mr_fwevt *fwevt = NULL; 330 331 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 332 !mrioc->fwevt_worker_thread) 333 return; 334 335 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 336 mpi3mr_cancel_work(fwevt); 337 338 if (mrioc->current_event) { 339 fwevt = mrioc->current_event; 340 /* 341 * Don't call cancel_work_sync() API for the 342 * fwevt work if the controller reset is 343 * get called as part of processing the 344 * same fwevt work (or) when worker thread is 345 * waiting for device add/remove APIs to complete. 346 * Otherwise we will see deadlock. 347 */ 348 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 349 fwevt->discard = 1; 350 return; 351 } 352 353 mpi3mr_cancel_work(fwevt); 354 } 355 } 356 357 /** 358 * mpi3mr_invalidate_devhandles -Invalidate device handles 359 * @mrioc: Adapter instance reference 360 * 361 * Invalidate the device handles in the target device structures 362 * . Called post reset prior to reinitializing the controller. 363 * 364 * Return: Nothing. 365 */ 366 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 367 { 368 struct mpi3mr_tgt_dev *tgtdev; 369 struct mpi3mr_stgt_priv_data *tgt_priv; 370 371 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 372 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 373 if (tgtdev->starget && tgtdev->starget->hostdata) { 374 tgt_priv = tgtdev->starget->hostdata; 375 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 376 } 377 } 378 } 379 380 /** 381 * mpi3mr_print_scmd - print individual SCSI command 382 * @rq: Block request 383 * @data: Adapter instance reference 384 * 385 * Print the SCSI command details if it is in LLD scope. 386 * 387 * Return: true always. 388 */ 389 static bool mpi3mr_print_scmd(struct request *rq, void *data) 390 { 391 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 392 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 393 struct scmd_priv *priv = NULL; 394 395 if (scmd) { 396 priv = scsi_cmd_priv(scmd); 397 if (!priv->in_lld_scope) 398 goto out; 399 400 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 401 __func__, priv->host_tag, priv->req_q_idx + 1); 402 scsi_print_command(scmd); 403 } 404 405 out: 406 return(true); 407 } 408 409 /** 410 * mpi3mr_flush_scmd - Flush individual SCSI command 411 * @rq: Block request 412 * @data: Adapter instance reference 413 * 414 * Return the SCSI command to the upper layers if it is in LLD 415 * scope. 416 * 417 * Return: true always. 418 */ 419 420 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 421 { 422 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 423 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 424 struct scmd_priv *priv = NULL; 425 426 if (scmd) { 427 priv = scsi_cmd_priv(scmd); 428 if (!priv->in_lld_scope) 429 goto out; 430 431 if (priv->meta_sg_valid) 432 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 433 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 434 mpi3mr_clear_scmd_priv(mrioc, scmd); 435 scsi_dma_unmap(scmd); 436 scmd->result = DID_RESET << 16; 437 scsi_print_command(scmd); 438 scsi_done(scmd); 439 mrioc->flush_io_count++; 440 } 441 442 out: 443 return(true); 444 } 445 446 /** 447 * mpi3mr_count_dev_pending - Count commands pending for a lun 448 * @rq: Block request 449 * @data: SCSI device reference 450 * 451 * This is an iterator function called for each SCSI command in 452 * a host and if the command is pending in the LLD for the 453 * specific device(lun) then device specific pending I/O counter 454 * is updated in the device structure. 455 * 456 * Return: true always. 457 */ 458 459 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 460 { 461 struct scsi_device *sdev = (struct scsi_device *)data; 462 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 463 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 464 struct scmd_priv *priv; 465 466 if (scmd) { 467 priv = scsi_cmd_priv(scmd); 468 if (!priv->in_lld_scope) 469 goto out; 470 if (scmd->device == sdev) 471 sdev_priv_data->pend_count++; 472 } 473 474 out: 475 return true; 476 } 477 478 /** 479 * mpi3mr_count_tgt_pending - Count commands pending for target 480 * @rq: Block request 481 * @data: SCSI target reference 482 * 483 * This is an iterator function called for each SCSI command in 484 * a host and if the command is pending in the LLD for the 485 * specific target then target specific pending I/O counter is 486 * updated in the target structure. 487 * 488 * Return: true always. 489 */ 490 491 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 492 { 493 struct scsi_target *starget = (struct scsi_target *)data; 494 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 495 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 496 struct scmd_priv *priv; 497 498 if (scmd) { 499 priv = scsi_cmd_priv(scmd); 500 if (!priv->in_lld_scope) 501 goto out; 502 if (scmd->device && (scsi_target(scmd->device) == starget)) 503 stgt_priv_data->pend_count++; 504 } 505 506 out: 507 return true; 508 } 509 510 /** 511 * mpi3mr_flush_host_io - Flush host I/Os 512 * @mrioc: Adapter instance reference 513 * 514 * Flush all of the pending I/Os by calling 515 * blk_mq_tagset_busy_iter() for each possible tag. This is 516 * executed post controller reset 517 * 518 * Return: Nothing. 519 */ 520 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 521 { 522 struct Scsi_Host *shost = mrioc->shost; 523 524 mrioc->flush_io_count = 0; 525 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 526 blk_mq_tagset_busy_iter(&shost->tag_set, 527 mpi3mr_flush_scmd, (void *)mrioc); 528 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 529 mrioc->flush_io_count); 530 } 531 532 /** 533 * mpi3mr_alloc_tgtdev - target device allocator 534 * 535 * Allocate target device instance and initialize the reference 536 * count 537 * 538 * Return: target device instance. 539 */ 540 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 541 { 542 struct mpi3mr_tgt_dev *tgtdev; 543 544 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 545 if (!tgtdev) 546 return NULL; 547 kref_init(&tgtdev->ref_count); 548 return tgtdev; 549 } 550 551 /** 552 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 553 * @mrioc: Adapter instance reference 554 * @tgtdev: Target device 555 * 556 * Add the target device to the target device list 557 * 558 * Return: Nothing. 559 */ 560 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 561 struct mpi3mr_tgt_dev *tgtdev) 562 { 563 unsigned long flags; 564 565 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 566 mpi3mr_tgtdev_get(tgtdev); 567 INIT_LIST_HEAD(&tgtdev->list); 568 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 569 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 570 } 571 572 /** 573 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 574 * @mrioc: Adapter instance reference 575 * @tgtdev: Target device 576 * 577 * Remove the target device from the target device list 578 * 579 * Return: Nothing. 580 */ 581 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 582 struct mpi3mr_tgt_dev *tgtdev) 583 { 584 unsigned long flags; 585 586 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 587 if (!list_empty(&tgtdev->list)) { 588 list_del_init(&tgtdev->list); 589 mpi3mr_tgtdev_put(tgtdev); 590 } 591 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 592 } 593 594 /** 595 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 596 * @mrioc: Adapter instance reference 597 * @handle: Device handle 598 * 599 * Accessor to retrieve target device from the device handle. 600 * Non Lock version 601 * 602 * Return: Target device reference. 603 */ 604 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 605 struct mpi3mr_ioc *mrioc, u16 handle) 606 { 607 struct mpi3mr_tgt_dev *tgtdev; 608 609 assert_spin_locked(&mrioc->tgtdev_lock); 610 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 611 if (tgtdev->dev_handle == handle) 612 goto found_tgtdev; 613 return NULL; 614 615 found_tgtdev: 616 mpi3mr_tgtdev_get(tgtdev); 617 return tgtdev; 618 } 619 620 /** 621 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 622 * @mrioc: Adapter instance reference 623 * @handle: Device handle 624 * 625 * Accessor to retrieve target device from the device handle. 626 * Lock version 627 * 628 * Return: Target device reference. 629 */ 630 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 631 struct mpi3mr_ioc *mrioc, u16 handle) 632 { 633 struct mpi3mr_tgt_dev *tgtdev; 634 unsigned long flags; 635 636 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 637 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 638 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 639 return tgtdev; 640 } 641 642 /** 643 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 644 * @mrioc: Adapter instance reference 645 * @persist_id: Persistent ID 646 * 647 * Accessor to retrieve target device from the Persistent ID. 648 * Non Lock version 649 * 650 * Return: Target device reference. 651 */ 652 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 653 struct mpi3mr_ioc *mrioc, u16 persist_id) 654 { 655 struct mpi3mr_tgt_dev *tgtdev; 656 657 assert_spin_locked(&mrioc->tgtdev_lock); 658 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 659 if (tgtdev->perst_id == persist_id) 660 goto found_tgtdev; 661 return NULL; 662 663 found_tgtdev: 664 mpi3mr_tgtdev_get(tgtdev); 665 return tgtdev; 666 } 667 668 /** 669 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 670 * @mrioc: Adapter instance reference 671 * @persist_id: Persistent ID 672 * 673 * Accessor to retrieve target device from the Persistent ID. 674 * Lock version 675 * 676 * Return: Target device reference. 677 */ 678 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 679 struct mpi3mr_ioc *mrioc, u16 persist_id) 680 { 681 struct mpi3mr_tgt_dev *tgtdev; 682 unsigned long flags; 683 684 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 685 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 686 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 687 return tgtdev; 688 } 689 690 /** 691 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 692 * @mrioc: Adapter instance reference 693 * @tgt_priv: Target private data 694 * 695 * Accessor to return target device from the target private 696 * data. Non Lock version 697 * 698 * Return: Target device reference. 699 */ 700 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 701 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 702 { 703 struct mpi3mr_tgt_dev *tgtdev; 704 705 assert_spin_locked(&mrioc->tgtdev_lock); 706 tgtdev = tgt_priv->tgt_dev; 707 if (tgtdev) 708 mpi3mr_tgtdev_get(tgtdev); 709 return tgtdev; 710 } 711 712 /** 713 * mpi3mr_print_device_event_notice - print notice related to post processing of 714 * device event after controller reset. 715 * 716 * @mrioc: Adapter instance reference 717 * @device_add: true for device add event and false for device removal event 718 * 719 * Return: None. 720 */ 721 static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 722 bool device_add) 723 { 724 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 725 (device_add ? "addition" : "removal")); 726 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 727 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 728 } 729 730 /** 731 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 732 * @mrioc: Adapter instance reference 733 * @tgtdev: Target device structure 734 * 735 * Checks whether the device is exposed to upper layers and if it 736 * is then remove the device from upper layers by calling 737 * scsi_remove_target(). 738 * 739 * Return: 0 on success, non zero on failure. 740 */ 741 static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 742 struct mpi3mr_tgt_dev *tgtdev) 743 { 744 struct mpi3mr_stgt_priv_data *tgt_priv; 745 746 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 747 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 748 if (tgtdev->starget && tgtdev->starget->hostdata) { 749 tgt_priv = tgtdev->starget->hostdata; 750 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 751 } 752 753 if (tgtdev->starget) { 754 if (mrioc->current_event) 755 mrioc->current_event->pending_at_sml = 1; 756 scsi_remove_target(&tgtdev->starget->dev); 757 tgtdev->host_exposed = 0; 758 if (mrioc->current_event) { 759 mrioc->current_event->pending_at_sml = 0; 760 if (mrioc->current_event->discard) { 761 mpi3mr_print_device_event_notice(mrioc, false); 762 return; 763 } 764 } 765 } 766 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 767 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 768 } 769 770 /** 771 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 772 * @mrioc: Adapter instance reference 773 * @perst_id: Persistent ID of the device 774 * 775 * Checks whether the device can be exposed to upper layers and 776 * if it is not then expose the device to upper layers by 777 * calling scsi_scan_target(). 778 * 779 * Return: 0 on success, non zero on failure. 780 */ 781 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 782 u16 perst_id) 783 { 784 int retval = 0; 785 struct mpi3mr_tgt_dev *tgtdev; 786 787 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 788 if (!tgtdev) { 789 retval = -1; 790 goto out; 791 } 792 if (tgtdev->is_hidden) { 793 retval = -1; 794 goto out; 795 } 796 if (!tgtdev->host_exposed && !mrioc->reset_in_progress) { 797 tgtdev->host_exposed = 1; 798 if (mrioc->current_event) 799 mrioc->current_event->pending_at_sml = 1; 800 scsi_scan_target(&mrioc->shost->shost_gendev, 0, 801 tgtdev->perst_id, 802 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 803 if (!tgtdev->starget) 804 tgtdev->host_exposed = 0; 805 if (mrioc->current_event) { 806 mrioc->current_event->pending_at_sml = 0; 807 if (mrioc->current_event->discard) { 808 mpi3mr_print_device_event_notice(mrioc, true); 809 goto out; 810 } 811 } 812 } 813 out: 814 if (tgtdev) 815 mpi3mr_tgtdev_put(tgtdev); 816 817 return retval; 818 } 819 820 /** 821 * mpi3mr_change_queue_depth- Change QD callback handler 822 * @sdev: SCSI device reference 823 * @q_depth: Queue depth 824 * 825 * Validate and limit QD and call scsi_change_queue_depth. 826 * 827 * Return: return value of scsi_change_queue_depth 828 */ 829 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 830 int q_depth) 831 { 832 struct scsi_target *starget = scsi_target(sdev); 833 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 834 int retval = 0; 835 836 if (!sdev->tagged_supported) 837 q_depth = 1; 838 if (q_depth > shost->can_queue) 839 q_depth = shost->can_queue; 840 else if (!q_depth) 841 q_depth = MPI3MR_DEFAULT_SDEV_QD; 842 retval = scsi_change_queue_depth(sdev, q_depth); 843 844 return retval; 845 } 846 847 /** 848 * mpi3mr_update_sdev - Update SCSI device information 849 * @sdev: SCSI device reference 850 * @data: target device reference 851 * 852 * This is an iterator function called for each SCSI device in a 853 * target to update the target specific information into each 854 * SCSI device. 855 * 856 * Return: Nothing. 857 */ 858 static void 859 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 860 { 861 struct mpi3mr_tgt_dev *tgtdev; 862 863 tgtdev = (struct mpi3mr_tgt_dev *)data; 864 if (!tgtdev) 865 return; 866 867 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 868 switch (tgtdev->dev_type) { 869 case MPI3_DEVICE_DEVFORM_PCIE: 870 /*The block layer hw sector size = 512*/ 871 if ((tgtdev->dev_spec.pcie_inf.dev_info & 872 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 873 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 874 blk_queue_max_hw_sectors(sdev->request_queue, 875 tgtdev->dev_spec.pcie_inf.mdts / 512); 876 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) 877 blk_queue_virt_boundary(sdev->request_queue, 878 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 879 else 880 blk_queue_virt_boundary(sdev->request_queue, 881 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); 882 } 883 break; 884 default: 885 break; 886 } 887 } 888 889 /** 890 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure 891 * @mrioc: Adapter instance reference 892 * 893 * This is executed post controller reset to identify any 894 * missing devices during reset and remove from the upper layers 895 * or expose any newly detected device to the upper layers. 896 * 897 * Return: Nothing. 898 */ 899 900 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) 901 { 902 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 903 904 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 905 list) { 906 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 907 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 908 tgtdev->perst_id); 909 if (tgtdev->host_exposed) 910 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 911 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 912 mpi3mr_tgtdev_put(tgtdev); 913 } 914 } 915 916 tgtdev = NULL; 917 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 918 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 919 !tgtdev->is_hidden && !tgtdev->host_exposed) 920 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 921 } 922 } 923 924 /** 925 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 926 * @mrioc: Adapter instance reference 927 * @tgtdev: Target device internal structure 928 * @dev_pg0: New device page0 929 * 930 * Update the information from the device page0 into the driver 931 * cached target device structure. 932 * 933 * Return: Nothing. 934 */ 935 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 936 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0) 937 { 938 u16 flags = 0; 939 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 940 u8 prot_mask = 0; 941 942 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 943 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 944 tgtdev->dev_type = dev_pg0->device_form; 945 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 946 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 947 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 948 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 949 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 950 951 flags = le16_to_cpu(dev_pg0->flags); 952 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 953 954 if (tgtdev->starget && tgtdev->starget->hostdata) { 955 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 956 tgtdev->starget->hostdata; 957 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 958 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 959 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 960 } 961 962 switch (dev_pg0->access_status) { 963 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 964 case MPI3_DEVICE0_ASTATUS_PREPARE: 965 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 966 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 967 break; 968 default: 969 tgtdev->is_hidden = 1; 970 break; 971 } 972 973 switch (tgtdev->dev_type) { 974 case MPI3_DEVICE_DEVFORM_SAS_SATA: 975 { 976 struct mpi3_device0_sas_sata_format *sasinf = 977 &dev_pg0->device_specific.sas_sata_format; 978 u16 dev_info = le16_to_cpu(sasinf->device_info); 979 980 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 981 tgtdev->dev_spec.sas_sata_inf.sas_address = 982 le64_to_cpu(sasinf->sas_address); 983 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 984 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 985 tgtdev->is_hidden = 1; 986 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 987 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 988 tgtdev->is_hidden = 1; 989 break; 990 } 991 case MPI3_DEVICE_DEVFORM_PCIE: 992 { 993 struct mpi3_device0_pcie_format *pcieinf = 994 &dev_pg0->device_specific.pcie_format; 995 u16 dev_info = le16_to_cpu(pcieinf->device_info); 996 997 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 998 tgtdev->dev_spec.pcie_inf.capb = 999 le32_to_cpu(pcieinf->capabilities); 1000 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1001 /* 2^12 = 4096 */ 1002 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1003 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1004 tgtdev->dev_spec.pcie_inf.mdts = 1005 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1006 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1007 tgtdev->dev_spec.pcie_inf.reset_to = 1008 max_t(u8, pcieinf->controller_reset_to, 1009 MPI3MR_INTADMCMD_TIMEOUT); 1010 tgtdev->dev_spec.pcie_inf.abort_to = 1011 max_t(u8, pcieinf->nvme_abort_to, 1012 MPI3MR_INTADMCMD_TIMEOUT); 1013 } 1014 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1015 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1016 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1017 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1018 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1019 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1020 tgtdev->is_hidden = 1; 1021 if (!mrioc->shost) 1022 break; 1023 prot_mask = scsi_host_get_prot(mrioc->shost); 1024 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1025 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1026 ioc_info(mrioc, 1027 "%s : Disabling DIX0 prot capability\n", __func__); 1028 ioc_info(mrioc, 1029 "because HBA does not support DIX0 operation on NVME drives\n"); 1030 } 1031 break; 1032 } 1033 case MPI3_DEVICE_DEVFORM_VD: 1034 { 1035 struct mpi3_device0_vd_format *vdinf = 1036 &dev_pg0->device_specific.vd_format; 1037 1038 tgtdev->dev_spec.vol_inf.state = vdinf->vd_state; 1039 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1040 tgtdev->is_hidden = 1; 1041 break; 1042 } 1043 default: 1044 break; 1045 } 1046 } 1047 1048 /** 1049 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1050 * @mrioc: Adapter instance reference 1051 * @fwevt: Firmware event information. 1052 * 1053 * Process Device status Change event and based on device's new 1054 * information, either expose the device to the upper layers, or 1055 * remove the device from upper layers. 1056 * 1057 * Return: Nothing. 1058 */ 1059 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1060 struct mpi3mr_fwevt *fwevt) 1061 { 1062 u16 dev_handle = 0; 1063 u8 uhide = 0, delete = 0, cleanup = 0; 1064 struct mpi3mr_tgt_dev *tgtdev = NULL; 1065 struct mpi3_event_data_device_status_change *evtdata = 1066 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1067 1068 dev_handle = le16_to_cpu(evtdata->dev_handle); 1069 ioc_info(mrioc, 1070 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1071 __func__, dev_handle, evtdata->reason_code); 1072 switch (evtdata->reason_code) { 1073 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1074 delete = 1; 1075 break; 1076 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1077 uhide = 1; 1078 break; 1079 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1080 delete = 1; 1081 cleanup = 1; 1082 break; 1083 default: 1084 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1085 evtdata->reason_code); 1086 break; 1087 } 1088 1089 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1090 if (!tgtdev) 1091 goto out; 1092 if (uhide) { 1093 tgtdev->is_hidden = 0; 1094 if (!tgtdev->host_exposed) 1095 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1096 } 1097 if (tgtdev->starget && tgtdev->starget->hostdata) { 1098 if (delete) 1099 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1100 } 1101 if (cleanup) { 1102 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 1103 mpi3mr_tgtdev_put(tgtdev); 1104 } 1105 1106 out: 1107 if (tgtdev) 1108 mpi3mr_tgtdev_put(tgtdev); 1109 } 1110 1111 /** 1112 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1113 * @mrioc: Adapter instance reference 1114 * @dev_pg0: New device page0 1115 * 1116 * Process Device Info Change event and based on device's new 1117 * information, either expose the device to the upper layers, or 1118 * remove the device from upper layers or update the details of 1119 * the device. 1120 * 1121 * Return: Nothing. 1122 */ 1123 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1124 struct mpi3_device_page0 *dev_pg0) 1125 { 1126 struct mpi3mr_tgt_dev *tgtdev = NULL; 1127 u16 dev_handle = 0, perst_id = 0; 1128 1129 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1130 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1131 ioc_info(mrioc, 1132 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1133 __func__, dev_handle, perst_id); 1134 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1135 if (!tgtdev) 1136 goto out; 1137 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0); 1138 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1139 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1140 if (tgtdev->is_hidden && tgtdev->host_exposed) 1141 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1142 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1143 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1144 mpi3mr_update_sdev); 1145 out: 1146 if (tgtdev) 1147 mpi3mr_tgtdev_put(tgtdev); 1148 } 1149 1150 /** 1151 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1152 * @mrioc: Adapter instance reference 1153 * @event_data: SAS topology change list event data 1154 * 1155 * Prints information about the SAS topology change event. 1156 * 1157 * Return: Nothing. 1158 */ 1159 static void 1160 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1161 struct mpi3_event_data_sas_topology_change_list *event_data) 1162 { 1163 int i; 1164 u16 handle; 1165 u8 reason_code, phy_number; 1166 char *status_str = NULL; 1167 u8 link_rate, prev_link_rate; 1168 1169 switch (event_data->exp_status) { 1170 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1171 status_str = "remove"; 1172 break; 1173 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1174 status_str = "responding"; 1175 break; 1176 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1177 status_str = "remove delay"; 1178 break; 1179 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1180 status_str = "direct attached"; 1181 break; 1182 default: 1183 status_str = "unknown status"; 1184 break; 1185 } 1186 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1187 __func__, status_str); 1188 ioc_info(mrioc, 1189 "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1190 __func__, le16_to_cpu(event_data->expander_dev_handle), 1191 le16_to_cpu(event_data->enclosure_handle), 1192 event_data->start_phy_num, event_data->num_entries); 1193 for (i = 0; i < event_data->num_entries; i++) { 1194 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1195 if (!handle) 1196 continue; 1197 phy_number = event_data->start_phy_num + i; 1198 reason_code = event_data->phy_entry[i].status & 1199 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1200 switch (reason_code) { 1201 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1202 status_str = "target remove"; 1203 break; 1204 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1205 status_str = "delay target remove"; 1206 break; 1207 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1208 status_str = "link status change"; 1209 break; 1210 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1211 status_str = "link status no change"; 1212 break; 1213 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1214 status_str = "target responding"; 1215 break; 1216 default: 1217 status_str = "unknown"; 1218 break; 1219 } 1220 link_rate = event_data->phy_entry[i].link_rate >> 4; 1221 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1222 ioc_info(mrioc, 1223 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1224 __func__, phy_number, handle, status_str, link_rate, 1225 prev_link_rate); 1226 } 1227 } 1228 1229 /** 1230 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1231 * @mrioc: Adapter instance reference 1232 * @fwevt: Firmware event reference 1233 * 1234 * Prints information about the SAS topology change event and 1235 * for "not responding" event code, removes the device from the 1236 * upper layers. 1237 * 1238 * Return: Nothing. 1239 */ 1240 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1241 struct mpi3mr_fwevt *fwevt) 1242 { 1243 struct mpi3_event_data_sas_topology_change_list *event_data = 1244 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1245 int i; 1246 u16 handle; 1247 u8 reason_code; 1248 struct mpi3mr_tgt_dev *tgtdev = NULL; 1249 1250 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1251 1252 for (i = 0; i < event_data->num_entries; i++) { 1253 if (fwevt->discard) 1254 return; 1255 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1256 if (!handle) 1257 continue; 1258 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1259 if (!tgtdev) 1260 continue; 1261 1262 reason_code = event_data->phy_entry[i].status & 1263 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1264 1265 switch (reason_code) { 1266 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1267 if (tgtdev->host_exposed) 1268 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1269 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 1270 mpi3mr_tgtdev_put(tgtdev); 1271 break; 1272 default: 1273 break; 1274 } 1275 if (tgtdev) 1276 mpi3mr_tgtdev_put(tgtdev); 1277 } 1278 } 1279 1280 /** 1281 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1282 * @mrioc: Adapter instance reference 1283 * @event_data: PCIe topology change list event data 1284 * 1285 * Prints information about the PCIe topology change event. 1286 * 1287 * Return: Nothing. 1288 */ 1289 static void 1290 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1291 struct mpi3_event_data_pcie_topology_change_list *event_data) 1292 { 1293 int i; 1294 u16 handle; 1295 u16 reason_code; 1296 u8 port_number; 1297 char *status_str = NULL; 1298 u8 link_rate, prev_link_rate; 1299 1300 switch (event_data->switch_status) { 1301 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1302 status_str = "remove"; 1303 break; 1304 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1305 status_str = "responding"; 1306 break; 1307 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1308 status_str = "remove delay"; 1309 break; 1310 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1311 status_str = "direct attached"; 1312 break; 1313 default: 1314 status_str = "unknown status"; 1315 break; 1316 } 1317 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1318 __func__, status_str); 1319 ioc_info(mrioc, 1320 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1321 __func__, le16_to_cpu(event_data->switch_dev_handle), 1322 le16_to_cpu(event_data->enclosure_handle), 1323 event_data->start_port_num, event_data->num_entries); 1324 for (i = 0; i < event_data->num_entries; i++) { 1325 handle = 1326 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1327 if (!handle) 1328 continue; 1329 port_number = event_data->start_port_num + i; 1330 reason_code = event_data->port_entry[i].port_status; 1331 switch (reason_code) { 1332 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1333 status_str = "target remove"; 1334 break; 1335 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1336 status_str = "delay target remove"; 1337 break; 1338 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1339 status_str = "link status change"; 1340 break; 1341 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1342 status_str = "link status no change"; 1343 break; 1344 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1345 status_str = "target responding"; 1346 break; 1347 default: 1348 status_str = "unknown"; 1349 break; 1350 } 1351 link_rate = event_data->port_entry[i].current_port_info & 1352 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1353 prev_link_rate = event_data->port_entry[i].previous_port_info & 1354 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1355 ioc_info(mrioc, 1356 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1357 __func__, port_number, handle, status_str, link_rate, 1358 prev_link_rate); 1359 } 1360 } 1361 1362 /** 1363 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1364 * @mrioc: Adapter instance reference 1365 * @fwevt: Firmware event reference 1366 * 1367 * Prints information about the PCIe topology change event and 1368 * for "not responding" event code, removes the device from the 1369 * upper layers. 1370 * 1371 * Return: Nothing. 1372 */ 1373 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1374 struct mpi3mr_fwevt *fwevt) 1375 { 1376 struct mpi3_event_data_pcie_topology_change_list *event_data = 1377 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1378 int i; 1379 u16 handle; 1380 u8 reason_code; 1381 struct mpi3mr_tgt_dev *tgtdev = NULL; 1382 1383 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1384 1385 for (i = 0; i < event_data->num_entries; i++) { 1386 if (fwevt->discard) 1387 return; 1388 handle = 1389 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1390 if (!handle) 1391 continue; 1392 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1393 if (!tgtdev) 1394 continue; 1395 1396 reason_code = event_data->port_entry[i].port_status; 1397 1398 switch (reason_code) { 1399 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1400 if (tgtdev->host_exposed) 1401 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1402 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 1403 mpi3mr_tgtdev_put(tgtdev); 1404 break; 1405 default: 1406 break; 1407 } 1408 if (tgtdev) 1409 mpi3mr_tgtdev_put(tgtdev); 1410 } 1411 } 1412 1413 /** 1414 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1415 * @mrioc: Adapter instance reference 1416 * @fwevt: Firmware event reference 1417 * 1418 * Extracts the event data and calls application interfacing 1419 * function to process the event further. 1420 * 1421 * Return: Nothing. 1422 */ 1423 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1424 struct mpi3mr_fwevt *fwevt) 1425 { 1426 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1427 fwevt->event_data_size); 1428 } 1429 1430 /** 1431 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1432 * @mrioc: Adapter instance reference 1433 * @fwevt: Firmware event reference 1434 * 1435 * Identifies the firmware event and calls corresponding bottomg 1436 * half handler and sends event acknowledgment if required. 1437 * 1438 * Return: Nothing. 1439 */ 1440 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 1441 struct mpi3mr_fwevt *fwevt) 1442 { 1443 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 1444 mrioc->current_event = fwevt; 1445 1446 if (mrioc->stop_drv_processing) 1447 goto out; 1448 1449 if (!fwevt->process_evt) 1450 goto evt_ack; 1451 1452 switch (fwevt->event_id) { 1453 case MPI3_EVENT_DEVICE_ADDED: 1454 { 1455 struct mpi3_device_page0 *dev_pg0 = 1456 (struct mpi3_device_page0 *)fwevt->event_data; 1457 mpi3mr_report_tgtdev_to_host(mrioc, 1458 le16_to_cpu(dev_pg0->persistent_id)); 1459 break; 1460 } 1461 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1462 { 1463 mpi3mr_devinfochg_evt_bh(mrioc, 1464 (struct mpi3_device_page0 *)fwevt->event_data); 1465 break; 1466 } 1467 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1468 { 1469 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 1470 break; 1471 } 1472 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1473 { 1474 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 1475 break; 1476 } 1477 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1478 { 1479 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 1480 break; 1481 } 1482 case MPI3_EVENT_LOG_DATA: 1483 { 1484 mpi3mr_logdata_evt_bh(mrioc, fwevt); 1485 break; 1486 } 1487 default: 1488 break; 1489 } 1490 1491 evt_ack: 1492 if (fwevt->send_ack) 1493 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 1494 fwevt->evt_ctx); 1495 out: 1496 /* Put fwevt reference count to neutralize kref_init increment */ 1497 mpi3mr_fwevt_put(fwevt); 1498 mrioc->current_event = NULL; 1499 } 1500 1501 /** 1502 * mpi3mr_fwevt_worker - Firmware event worker 1503 * @work: Work struct containing firmware event 1504 * 1505 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 1506 * 1507 * Return: Nothing. 1508 */ 1509 static void mpi3mr_fwevt_worker(struct work_struct *work) 1510 { 1511 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 1512 work); 1513 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 1514 /* 1515 * Put fwevt reference count after 1516 * dequeuing it from worker queue 1517 */ 1518 mpi3mr_fwevt_put(fwevt); 1519 } 1520 1521 /** 1522 * mpi3mr_create_tgtdev - Create and add a target device 1523 * @mrioc: Adapter instance reference 1524 * @dev_pg0: Device Page 0 data 1525 * 1526 * If the device specified by the device page 0 data is not 1527 * present in the driver's internal list, allocate the memory 1528 * for the device, populate the data and add to the list, else 1529 * update the device data. The key is persistent ID. 1530 * 1531 * Return: 0 on success, -ENOMEM on memory allocation failure 1532 */ 1533 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 1534 struct mpi3_device_page0 *dev_pg0) 1535 { 1536 int retval = 0; 1537 struct mpi3mr_tgt_dev *tgtdev = NULL; 1538 u16 perst_id = 0; 1539 1540 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1541 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 1542 if (tgtdev) { 1543 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0); 1544 mpi3mr_tgtdev_put(tgtdev); 1545 } else { 1546 tgtdev = mpi3mr_alloc_tgtdev(); 1547 if (!tgtdev) 1548 return -ENOMEM; 1549 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0); 1550 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 1551 } 1552 1553 return retval; 1554 } 1555 1556 /** 1557 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 1558 * @mrioc: Adapter instance reference 1559 * 1560 * Flush pending commands in the delayed lists due to a 1561 * controller reset or driver removal as a cleanup. 1562 * 1563 * Return: Nothing 1564 */ 1565 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 1566 { 1567 struct delayed_dev_rmhs_node *_rmhs_node; 1568 struct delayed_evt_ack_node *_evtack_node; 1569 1570 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 1571 while (!list_empty(&mrioc->delayed_rmhs_list)) { 1572 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 1573 struct delayed_dev_rmhs_node, list); 1574 list_del(&_rmhs_node->list); 1575 kfree(_rmhs_node); 1576 } 1577 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 1578 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 1579 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 1580 struct delayed_evt_ack_node, list); 1581 list_del(&_evtack_node->list); 1582 kfree(_evtack_node); 1583 } 1584 } 1585 1586 /** 1587 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 1588 * @mrioc: Adapter instance reference 1589 * @drv_cmd: Internal command tracker 1590 * 1591 * Issues a target reset TM to the firmware from the device 1592 * removal TM pend list or retry the removal handshake sequence 1593 * based on the IOU control request IOC status. 1594 * 1595 * Return: Nothing 1596 */ 1597 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 1598 struct mpi3mr_drv_cmd *drv_cmd) 1599 { 1600 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 1601 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 1602 1603 if (drv_cmd->state & MPI3MR_CMD_RESET) 1604 goto clear_drv_cmd; 1605 1606 ioc_info(mrioc, 1607 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 1608 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 1609 drv_cmd->ioc_loginfo); 1610 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 1611 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 1612 drv_cmd->retry_count++; 1613 ioc_info(mrioc, 1614 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 1615 __func__, drv_cmd->dev_handle, 1616 drv_cmd->retry_count); 1617 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 1618 drv_cmd, drv_cmd->iou_rc); 1619 return; 1620 } 1621 ioc_err(mrioc, 1622 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 1623 __func__, drv_cmd->dev_handle); 1624 } else { 1625 ioc_info(mrioc, 1626 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 1627 __func__, drv_cmd->dev_handle); 1628 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 1629 } 1630 1631 if (!list_empty(&mrioc->delayed_rmhs_list)) { 1632 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 1633 struct delayed_dev_rmhs_node, list); 1634 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 1635 drv_cmd->retry_count = 0; 1636 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 1637 ioc_info(mrioc, 1638 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 1639 __func__, drv_cmd->dev_handle); 1640 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 1641 drv_cmd->iou_rc); 1642 list_del(&delayed_dev_rmhs->list); 1643 kfree(delayed_dev_rmhs); 1644 return; 1645 } 1646 1647 clear_drv_cmd: 1648 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1649 drv_cmd->callback = NULL; 1650 drv_cmd->retry_count = 0; 1651 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 1652 clear_bit(cmd_idx, mrioc->devrem_bitmap); 1653 } 1654 1655 /** 1656 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 1657 * @mrioc: Adapter instance reference 1658 * @drv_cmd: Internal command tracker 1659 * 1660 * Issues a target reset TM to the firmware from the device 1661 * removal TM pend list or issue IO unit control request as 1662 * part of device removal or hidden acknowledgment handshake. 1663 * 1664 * Return: Nothing 1665 */ 1666 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 1667 struct mpi3mr_drv_cmd *drv_cmd) 1668 { 1669 struct mpi3_iounit_control_request iou_ctrl; 1670 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 1671 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 1672 int retval; 1673 1674 if (drv_cmd->state & MPI3MR_CMD_RESET) 1675 goto clear_drv_cmd; 1676 1677 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 1678 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 1679 1680 if (tm_reply) 1681 pr_info(IOCNAME 1682 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 1683 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 1684 drv_cmd->ioc_loginfo, 1685 le32_to_cpu(tm_reply->termination_count)); 1686 1687 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 1688 mrioc->name, drv_cmd->dev_handle, cmd_idx); 1689 1690 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 1691 1692 drv_cmd->state = MPI3MR_CMD_PENDING; 1693 drv_cmd->is_waiting = 0; 1694 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 1695 iou_ctrl.operation = drv_cmd->iou_rc; 1696 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 1697 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 1698 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 1699 1700 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 1701 1); 1702 if (retval) { 1703 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 1704 mrioc->name); 1705 goto clear_drv_cmd; 1706 } 1707 1708 return; 1709 clear_drv_cmd: 1710 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1711 drv_cmd->callback = NULL; 1712 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 1713 drv_cmd->retry_count = 0; 1714 clear_bit(cmd_idx, mrioc->devrem_bitmap); 1715 } 1716 1717 /** 1718 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 1719 * @mrioc: Adapter instance reference 1720 * @handle: Device handle 1721 * @cmdparam: Internal command tracker 1722 * @iou_rc: IO unit reason code 1723 * 1724 * Issues a target reset TM to the firmware or add it to a pend 1725 * list as part of device removal or hidden acknowledgment 1726 * handshake. 1727 * 1728 * Return: Nothing 1729 */ 1730 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 1731 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 1732 { 1733 struct mpi3_scsi_task_mgmt_request tm_req; 1734 int retval = 0; 1735 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 1736 u8 retrycount = 5; 1737 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 1738 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 1739 1740 if (drv_cmd) 1741 goto issue_cmd; 1742 do { 1743 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 1744 MPI3MR_NUM_DEVRMCMD); 1745 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 1746 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 1747 break; 1748 cmd_idx = MPI3MR_NUM_DEVRMCMD; 1749 } 1750 } while (retrycount--); 1751 1752 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 1753 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 1754 GFP_ATOMIC); 1755 if (!delayed_dev_rmhs) 1756 return; 1757 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 1758 delayed_dev_rmhs->handle = handle; 1759 delayed_dev_rmhs->iou_rc = iou_rc; 1760 list_add_tail(&delayed_dev_rmhs->list, 1761 &mrioc->delayed_rmhs_list); 1762 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 1763 __func__, handle); 1764 return; 1765 } 1766 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 1767 1768 issue_cmd: 1769 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 1770 ioc_info(mrioc, 1771 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 1772 __func__, handle, cmd_idx); 1773 1774 memset(&tm_req, 0, sizeof(tm_req)); 1775 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 1776 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 1777 goto out; 1778 } 1779 drv_cmd->state = MPI3MR_CMD_PENDING; 1780 drv_cmd->is_waiting = 0; 1781 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 1782 drv_cmd->dev_handle = handle; 1783 drv_cmd->iou_rc = iou_rc; 1784 tm_req.dev_handle = cpu_to_le16(handle); 1785 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 1786 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 1787 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 1788 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 1789 1790 set_bit(handle, mrioc->removepend_bitmap); 1791 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 1792 if (retval) { 1793 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 1794 __func__); 1795 goto out_failed; 1796 } 1797 out: 1798 return; 1799 out_failed: 1800 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1801 drv_cmd->callback = NULL; 1802 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 1803 drv_cmd->retry_count = 0; 1804 clear_bit(cmd_idx, mrioc->devrem_bitmap); 1805 } 1806 1807 /** 1808 * mpi3mr_complete_evt_ack - event ack request completion 1809 * @mrioc: Adapter instance reference 1810 * @drv_cmd: Internal command tracker 1811 * 1812 * This is the completion handler for non blocking event 1813 * acknowledgment sent to the firmware and this will issue any 1814 * pending event acknowledgment request. 1815 * 1816 * Return: Nothing 1817 */ 1818 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 1819 struct mpi3mr_drv_cmd *drv_cmd) 1820 { 1821 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 1822 struct delayed_evt_ack_node *delayed_evtack = NULL; 1823 1824 if (drv_cmd->state & MPI3MR_CMD_RESET) 1825 goto clear_drv_cmd; 1826 1827 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 1828 dprint_event_th(mrioc, 1829 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 1830 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1831 drv_cmd->ioc_loginfo); 1832 } 1833 1834 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 1835 delayed_evtack = 1836 list_entry(mrioc->delayed_evtack_cmds_list.next, 1837 struct delayed_evt_ack_node, list); 1838 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 1839 delayed_evtack->event_ctx); 1840 list_del(&delayed_evtack->list); 1841 kfree(delayed_evtack); 1842 return; 1843 } 1844 clear_drv_cmd: 1845 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1846 drv_cmd->callback = NULL; 1847 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 1848 } 1849 1850 /** 1851 * mpi3mr_send_event_ack - Issue event acknwoledgment request 1852 * @mrioc: Adapter instance reference 1853 * @event: MPI3 event id 1854 * @cmdparam: Internal command tracker 1855 * @event_ctx: event context 1856 * 1857 * Issues event acknowledgment request to the firmware if there 1858 * is a free command to send the event ack else it to a pend 1859 * list so that it will be processed on a completion of a prior 1860 * event acknowledgment . 1861 * 1862 * Return: Nothing 1863 */ 1864 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 1865 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 1866 { 1867 struct mpi3_event_ack_request evtack_req; 1868 int retval = 0; 1869 u8 retrycount = 5; 1870 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 1871 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 1872 struct delayed_evt_ack_node *delayed_evtack = NULL; 1873 1874 if (drv_cmd) { 1875 dprint_event_th(mrioc, 1876 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 1877 event, event_ctx); 1878 goto issue_cmd; 1879 } 1880 dprint_event_th(mrioc, 1881 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 1882 event, event_ctx); 1883 do { 1884 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 1885 MPI3MR_NUM_EVTACKCMD); 1886 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 1887 if (!test_and_set_bit(cmd_idx, 1888 mrioc->evtack_cmds_bitmap)) 1889 break; 1890 cmd_idx = MPI3MR_NUM_EVTACKCMD; 1891 } 1892 } while (retrycount--); 1893 1894 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 1895 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 1896 GFP_ATOMIC); 1897 if (!delayed_evtack) 1898 return; 1899 INIT_LIST_HEAD(&delayed_evtack->list); 1900 delayed_evtack->event = event; 1901 delayed_evtack->event_ctx = event_ctx; 1902 list_add_tail(&delayed_evtack->list, 1903 &mrioc->delayed_evtack_cmds_list); 1904 dprint_event_th(mrioc, 1905 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 1906 event, event_ctx); 1907 return; 1908 } 1909 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 1910 1911 issue_cmd: 1912 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 1913 1914 memset(&evtack_req, 0, sizeof(evtack_req)); 1915 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 1916 dprint_event_th(mrioc, 1917 "sending event ack failed due to command in use\n"); 1918 goto out; 1919 } 1920 drv_cmd->state = MPI3MR_CMD_PENDING; 1921 drv_cmd->is_waiting = 0; 1922 drv_cmd->callback = mpi3mr_complete_evt_ack; 1923 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 1924 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 1925 evtack_req.event = event; 1926 evtack_req.event_context = cpu_to_le32(event_ctx); 1927 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 1928 sizeof(evtack_req), 1); 1929 if (retval) { 1930 dprint_event_th(mrioc, 1931 "posting event ack request is failed\n"); 1932 goto out_failed; 1933 } 1934 1935 dprint_event_th(mrioc, 1936 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 1937 event, event_ctx); 1938 out: 1939 return; 1940 out_failed: 1941 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1942 drv_cmd->callback = NULL; 1943 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 1944 } 1945 1946 /** 1947 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 1948 * @mrioc: Adapter instance reference 1949 * @event_reply: event data 1950 * 1951 * Checks for the reason code and based on that either block I/O 1952 * to device, or unblock I/O to the device, or start the device 1953 * removal handshake with reason as remove with the firmware for 1954 * PCIe devices. 1955 * 1956 * Return: Nothing 1957 */ 1958 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 1959 struct mpi3_event_notification_reply *event_reply) 1960 { 1961 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 1962 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 1963 int i; 1964 u16 handle; 1965 u8 reason_code; 1966 struct mpi3mr_tgt_dev *tgtdev = NULL; 1967 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1968 1969 for (i = 0; i < topo_evt->num_entries; i++) { 1970 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 1971 if (!handle) 1972 continue; 1973 reason_code = topo_evt->port_entry[i].port_status; 1974 scsi_tgt_priv_data = NULL; 1975 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1976 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 1977 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1978 tgtdev->starget->hostdata; 1979 switch (reason_code) { 1980 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1981 if (scsi_tgt_priv_data) { 1982 scsi_tgt_priv_data->dev_removed = 1; 1983 scsi_tgt_priv_data->dev_removedelay = 0; 1984 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1985 } 1986 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 1987 MPI3_CTRL_OP_REMOVE_DEVICE); 1988 break; 1989 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1990 if (scsi_tgt_priv_data) { 1991 scsi_tgt_priv_data->dev_removedelay = 1; 1992 atomic_inc(&scsi_tgt_priv_data->block_io); 1993 } 1994 break; 1995 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1996 if (scsi_tgt_priv_data && 1997 scsi_tgt_priv_data->dev_removedelay) { 1998 scsi_tgt_priv_data->dev_removedelay = 0; 1999 atomic_dec_if_positive 2000 (&scsi_tgt_priv_data->block_io); 2001 } 2002 break; 2003 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2004 default: 2005 break; 2006 } 2007 if (tgtdev) 2008 mpi3mr_tgtdev_put(tgtdev); 2009 } 2010 } 2011 2012 /** 2013 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2014 * @mrioc: Adapter instance reference 2015 * @event_reply: event data 2016 * 2017 * Checks for the reason code and based on that either block I/O 2018 * to device, or unblock I/O to the device, or start the device 2019 * removal handshake with reason as remove with the firmware for 2020 * SAS/SATA devices. 2021 * 2022 * Return: Nothing 2023 */ 2024 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2025 struct mpi3_event_notification_reply *event_reply) 2026 { 2027 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2028 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2029 int i; 2030 u16 handle; 2031 u8 reason_code; 2032 struct mpi3mr_tgt_dev *tgtdev = NULL; 2033 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2034 2035 for (i = 0; i < topo_evt->num_entries; i++) { 2036 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2037 if (!handle) 2038 continue; 2039 reason_code = topo_evt->phy_entry[i].status & 2040 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2041 scsi_tgt_priv_data = NULL; 2042 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2043 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2044 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2045 tgtdev->starget->hostdata; 2046 switch (reason_code) { 2047 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2048 if (scsi_tgt_priv_data) { 2049 scsi_tgt_priv_data->dev_removed = 1; 2050 scsi_tgt_priv_data->dev_removedelay = 0; 2051 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2052 } 2053 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2054 MPI3_CTRL_OP_REMOVE_DEVICE); 2055 break; 2056 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2057 if (scsi_tgt_priv_data) { 2058 scsi_tgt_priv_data->dev_removedelay = 1; 2059 atomic_inc(&scsi_tgt_priv_data->block_io); 2060 } 2061 break; 2062 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2063 if (scsi_tgt_priv_data && 2064 scsi_tgt_priv_data->dev_removedelay) { 2065 scsi_tgt_priv_data->dev_removedelay = 0; 2066 atomic_dec_if_positive 2067 (&scsi_tgt_priv_data->block_io); 2068 } 2069 break; 2070 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2071 default: 2072 break; 2073 } 2074 if (tgtdev) 2075 mpi3mr_tgtdev_put(tgtdev); 2076 } 2077 } 2078 2079 /** 2080 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2081 * @mrioc: Adapter instance reference 2082 * @event_reply: event data 2083 * 2084 * Checks for the reason code and based on that either block I/O 2085 * to device, or unblock I/O to the device, or start the device 2086 * removal handshake with reason as remove/hide acknowledgment 2087 * with the firmware. 2088 * 2089 * Return: Nothing 2090 */ 2091 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2092 struct mpi3_event_notification_reply *event_reply) 2093 { 2094 u16 dev_handle = 0; 2095 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2096 struct mpi3mr_tgt_dev *tgtdev = NULL; 2097 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2098 struct mpi3_event_data_device_status_change *evtdata = 2099 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2100 2101 if (mrioc->stop_drv_processing) 2102 goto out; 2103 2104 dev_handle = le16_to_cpu(evtdata->dev_handle); 2105 2106 switch (evtdata->reason_code) { 2107 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2108 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2109 block = 1; 2110 break; 2111 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2112 delete = 1; 2113 hide = 1; 2114 break; 2115 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2116 delete = 1; 2117 remove = 1; 2118 break; 2119 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2120 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2121 ublock = 1; 2122 break; 2123 default: 2124 break; 2125 } 2126 2127 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2128 if (!tgtdev) 2129 goto out; 2130 if (hide) 2131 tgtdev->is_hidden = hide; 2132 if (tgtdev->starget && tgtdev->starget->hostdata) { 2133 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2134 tgtdev->starget->hostdata; 2135 if (block) 2136 atomic_inc(&scsi_tgt_priv_data->block_io); 2137 if (delete) 2138 scsi_tgt_priv_data->dev_removed = 1; 2139 if (ublock) 2140 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2141 } 2142 if (remove) 2143 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2144 MPI3_CTRL_OP_REMOVE_DEVICE); 2145 if (hide) 2146 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2147 MPI3_CTRL_OP_HIDDEN_ACK); 2148 2149 out: 2150 if (tgtdev) 2151 mpi3mr_tgtdev_put(tgtdev); 2152 } 2153 2154 /** 2155 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2156 * @mrioc: Adapter instance reference 2157 * @event_reply: event data 2158 * 2159 * Blocks and unblocks host level I/O based on the reason code 2160 * 2161 * Return: Nothing 2162 */ 2163 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2164 struct mpi3_event_notification_reply *event_reply) 2165 { 2166 struct mpi3_event_data_prepare_for_reset *evtdata = 2167 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2168 2169 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2170 dprint_event_th(mrioc, 2171 "prepare for reset event top half with rc=start\n"); 2172 if (mrioc->prepare_for_reset) 2173 return; 2174 mrioc->prepare_for_reset = 1; 2175 mrioc->prepare_for_reset_timeout_counter = 0; 2176 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2177 dprint_event_th(mrioc, 2178 "prepare for reset top half with rc=abort\n"); 2179 mrioc->prepare_for_reset = 0; 2180 mrioc->prepare_for_reset_timeout_counter = 0; 2181 } 2182 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2183 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2184 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2185 le32_to_cpu(event_reply->event_context)); 2186 } 2187 2188 /** 2189 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2190 * @mrioc: Adapter instance reference 2191 * @event_reply: event data 2192 * 2193 * Identifies the new shutdown timeout value and update. 2194 * 2195 * Return: Nothing 2196 */ 2197 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2198 struct mpi3_event_notification_reply *event_reply) 2199 { 2200 struct mpi3_event_data_energy_pack_change *evtdata = 2201 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2202 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2203 2204 if (shutdown_timeout <= 0) { 2205 ioc_warn(mrioc, 2206 "%s :Invalid Shutdown Timeout received = %d\n", 2207 __func__, shutdown_timeout); 2208 return; 2209 } 2210 2211 ioc_info(mrioc, 2212 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2213 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2214 mrioc->facts.shutdown_timeout = shutdown_timeout; 2215 } 2216 2217 /** 2218 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2219 * @mrioc: Adapter instance reference 2220 * @event_reply: event data 2221 * 2222 * Displays Cable manegemt event details. 2223 * 2224 * Return: Nothing 2225 */ 2226 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2227 struct mpi3_event_notification_reply *event_reply) 2228 { 2229 struct mpi3_event_data_cable_management *evtdata = 2230 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2231 2232 switch (evtdata->status) { 2233 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2234 { 2235 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2236 "Devices connected to this cable are not detected.\n" 2237 "This cable requires %d mW of power.\n", 2238 evtdata->receptacle_id, 2239 le32_to_cpu(evtdata->active_cable_power_requirement)); 2240 break; 2241 } 2242 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2243 { 2244 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2245 evtdata->receptacle_id); 2246 break; 2247 } 2248 default: 2249 break; 2250 } 2251 } 2252 2253 /** 2254 * mpi3mr_os_handle_events - Firmware event handler 2255 * @mrioc: Adapter instance reference 2256 * @event_reply: event data 2257 * 2258 * Identify whteher the event has to handled and acknowledged 2259 * and either process the event in the tophalf and/or schedule a 2260 * bottom half through mpi3mr_fwevt_worker. 2261 * 2262 * Return: Nothing 2263 */ 2264 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2265 struct mpi3_event_notification_reply *event_reply) 2266 { 2267 u16 evt_type, sz; 2268 struct mpi3mr_fwevt *fwevt = NULL; 2269 bool ack_req = 0, process_evt_bh = 0; 2270 2271 if (mrioc->stop_drv_processing) 2272 return; 2273 2274 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2275 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2276 ack_req = 1; 2277 2278 evt_type = event_reply->event; 2279 2280 switch (evt_type) { 2281 case MPI3_EVENT_DEVICE_ADDED: 2282 { 2283 struct mpi3_device_page0 *dev_pg0 = 2284 (struct mpi3_device_page0 *)event_reply->event_data; 2285 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 2286 ioc_err(mrioc, 2287 "%s :Failed to add device in the device add event\n", 2288 __func__); 2289 else 2290 process_evt_bh = 1; 2291 break; 2292 } 2293 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2294 { 2295 process_evt_bh = 1; 2296 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 2297 break; 2298 } 2299 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2300 { 2301 process_evt_bh = 1; 2302 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 2303 break; 2304 } 2305 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2306 { 2307 process_evt_bh = 1; 2308 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 2309 break; 2310 } 2311 case MPI3_EVENT_PREPARE_FOR_RESET: 2312 { 2313 mpi3mr_preparereset_evt_th(mrioc, event_reply); 2314 ack_req = 0; 2315 break; 2316 } 2317 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2318 case MPI3_EVENT_LOG_DATA: 2319 { 2320 process_evt_bh = 1; 2321 break; 2322 } 2323 case MPI3_EVENT_ENERGY_PACK_CHANGE: 2324 { 2325 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 2326 break; 2327 } 2328 case MPI3_EVENT_CABLE_MGMT: 2329 { 2330 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 2331 break; 2332 } 2333 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2334 case MPI3_EVENT_SAS_DISCOVERY: 2335 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 2336 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 2337 case MPI3_EVENT_PCIE_ENUMERATION: 2338 break; 2339 default: 2340 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 2341 __func__, evt_type); 2342 break; 2343 } 2344 if (process_evt_bh || ack_req) { 2345 sz = event_reply->event_data_length * 4; 2346 fwevt = mpi3mr_alloc_fwevt(sz); 2347 if (!fwevt) { 2348 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 2349 __func__, __FILE__, __LINE__, __func__); 2350 return; 2351 } 2352 2353 memcpy(fwevt->event_data, event_reply->event_data, sz); 2354 fwevt->mrioc = mrioc; 2355 fwevt->event_id = evt_type; 2356 fwevt->send_ack = ack_req; 2357 fwevt->process_evt = process_evt_bh; 2358 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 2359 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2360 } 2361 } 2362 2363 /** 2364 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 2365 * @mrioc: Adapter instance reference 2366 * @scmd: SCSI command reference 2367 * @scsiio_req: MPI3 SCSI IO request 2368 * 2369 * Identifies the protection information flags from the SCSI 2370 * command and set appropriate flags in the MPI3 SCSI IO 2371 * request. 2372 * 2373 * Return: Nothing 2374 */ 2375 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 2376 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2377 { 2378 u16 eedp_flags = 0; 2379 unsigned char prot_op = scsi_get_prot_op(scmd); 2380 2381 switch (prot_op) { 2382 case SCSI_PROT_NORMAL: 2383 return; 2384 case SCSI_PROT_READ_STRIP: 2385 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2386 break; 2387 case SCSI_PROT_WRITE_INSERT: 2388 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2389 break; 2390 case SCSI_PROT_READ_INSERT: 2391 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2392 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2393 break; 2394 case SCSI_PROT_WRITE_STRIP: 2395 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2396 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2397 break; 2398 case SCSI_PROT_READ_PASS: 2399 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2400 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2401 break; 2402 case SCSI_PROT_WRITE_PASS: 2403 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 2404 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 2405 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 2406 0xffff; 2407 } else 2408 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2409 2410 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2411 break; 2412 default: 2413 return; 2414 } 2415 2416 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 2417 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 2418 2419 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 2420 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 2421 2422 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 2423 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 2424 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2425 scsiio_req->cdb.eedp32.primary_reference_tag = 2426 cpu_to_be32(scsi_prot_ref_tag(scmd)); 2427 } 2428 2429 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 2430 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2431 2432 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 2433 2434 switch (scsi_prot_interval(scmd)) { 2435 case 512: 2436 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 2437 break; 2438 case 520: 2439 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 2440 break; 2441 case 4080: 2442 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 2443 break; 2444 case 4088: 2445 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 2446 break; 2447 case 4096: 2448 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 2449 break; 2450 case 4104: 2451 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 2452 break; 2453 case 4160: 2454 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 2455 break; 2456 default: 2457 break; 2458 } 2459 2460 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 2461 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 2462 } 2463 2464 /** 2465 * mpi3mr_build_sense_buffer - Map sense information 2466 * @desc: Sense type 2467 * @buf: Sense buffer to populate 2468 * @key: Sense key 2469 * @asc: Additional sense code 2470 * @ascq: Additional sense code qualifier 2471 * 2472 * Maps the given sense information into either descriptor or 2473 * fixed format sense data. 2474 * 2475 * Return: Nothing 2476 */ 2477 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 2478 u8 asc, u8 ascq) 2479 { 2480 if (desc) { 2481 buf[0] = 0x72; /* descriptor, current */ 2482 buf[1] = key; 2483 buf[2] = asc; 2484 buf[3] = ascq; 2485 buf[7] = 0; 2486 } else { 2487 buf[0] = 0x70; /* fixed, current */ 2488 buf[2] = key; 2489 buf[7] = 0xa; 2490 buf[12] = asc; 2491 buf[13] = ascq; 2492 } 2493 } 2494 2495 /** 2496 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 2497 * @scmd: SCSI command reference 2498 * @ioc_status: status of MPI3 request 2499 * 2500 * Maps the EEDP error status of the SCSI IO request to sense 2501 * data. 2502 * 2503 * Return: Nothing 2504 */ 2505 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 2506 u16 ioc_status) 2507 { 2508 u8 ascq = 0; 2509 2510 switch (ioc_status) { 2511 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 2512 ascq = 0x01; 2513 break; 2514 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 2515 ascq = 0x02; 2516 break; 2517 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 2518 ascq = 0x03; 2519 break; 2520 default: 2521 ascq = 0x00; 2522 break; 2523 } 2524 2525 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 2526 0x10, ascq); 2527 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 2528 } 2529 2530 /** 2531 * mpi3mr_process_op_reply_desc - reply descriptor handler 2532 * @mrioc: Adapter instance reference 2533 * @reply_desc: Operational reply descriptor 2534 * @reply_dma: place holder for reply DMA address 2535 * @qidx: Operational queue index 2536 * 2537 * Process the operational reply descriptor and identifies the 2538 * descriptor type. Based on the descriptor map the MPI3 request 2539 * status to a SCSI command status and calls scsi_done call 2540 * back. 2541 * 2542 * Return: Nothing 2543 */ 2544 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 2545 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 2546 { 2547 u16 reply_desc_type, host_tag = 0; 2548 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 2549 u32 ioc_loginfo = 0; 2550 struct mpi3_status_reply_descriptor *status_desc = NULL; 2551 struct mpi3_address_reply_descriptor *addr_desc = NULL; 2552 struct mpi3_success_reply_descriptor *success_desc = NULL; 2553 struct mpi3_scsi_io_reply *scsi_reply = NULL; 2554 struct scsi_cmnd *scmd = NULL; 2555 struct scmd_priv *priv = NULL; 2556 u8 *sense_buf = NULL; 2557 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 2558 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 2559 u16 dev_handle = 0xFFFF; 2560 struct scsi_sense_hdr sshdr; 2561 2562 *reply_dma = 0; 2563 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 2564 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 2565 switch (reply_desc_type) { 2566 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 2567 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 2568 host_tag = le16_to_cpu(status_desc->host_tag); 2569 ioc_status = le16_to_cpu(status_desc->ioc_status); 2570 if (ioc_status & 2571 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 2572 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 2573 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 2574 break; 2575 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 2576 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 2577 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 2578 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 2579 *reply_dma); 2580 if (!scsi_reply) { 2581 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 2582 mrioc->name); 2583 goto out; 2584 } 2585 host_tag = le16_to_cpu(scsi_reply->host_tag); 2586 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 2587 scsi_status = scsi_reply->scsi_status; 2588 scsi_state = scsi_reply->scsi_state; 2589 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 2590 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 2591 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 2592 sense_count = le32_to_cpu(scsi_reply->sense_count); 2593 resp_data = le32_to_cpu(scsi_reply->response_data); 2594 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 2595 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 2596 if (ioc_status & 2597 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 2598 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 2599 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 2600 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 2601 panic("%s: Ran out of sense buffers\n", mrioc->name); 2602 break; 2603 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 2604 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 2605 host_tag = le16_to_cpu(success_desc->host_tag); 2606 break; 2607 default: 2608 break; 2609 } 2610 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 2611 if (!scmd) { 2612 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 2613 mrioc->name, host_tag); 2614 goto out; 2615 } 2616 priv = scsi_cmd_priv(scmd); 2617 if (success_desc) { 2618 scmd->result = DID_OK << 16; 2619 goto out_success; 2620 } 2621 2622 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 2623 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 2624 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 2625 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 2626 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 2627 ioc_status = MPI3_IOCSTATUS_SUCCESS; 2628 2629 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 2630 sense_buf) { 2631 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 2632 2633 memcpy(scmd->sense_buffer, sense_buf, sz); 2634 } 2635 2636 switch (ioc_status) { 2637 case MPI3_IOCSTATUS_BUSY: 2638 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 2639 scmd->result = SAM_STAT_BUSY; 2640 break; 2641 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2642 scmd->result = DID_NO_CONNECT << 16; 2643 break; 2644 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 2645 scmd->result = DID_SOFT_ERROR << 16; 2646 break; 2647 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 2648 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 2649 scmd->result = DID_RESET << 16; 2650 break; 2651 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2652 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 2653 scmd->result = DID_SOFT_ERROR << 16; 2654 else 2655 scmd->result = (DID_OK << 16) | scsi_status; 2656 break; 2657 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 2658 scmd->result = (DID_OK << 16) | scsi_status; 2659 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 2660 break; 2661 if (xfer_count < scmd->underflow) { 2662 if (scsi_status == SAM_STAT_BUSY) 2663 scmd->result = SAM_STAT_BUSY; 2664 else 2665 scmd->result = DID_SOFT_ERROR << 16; 2666 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 2667 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 2668 scmd->result = DID_SOFT_ERROR << 16; 2669 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 2670 scmd->result = DID_RESET << 16; 2671 break; 2672 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 2673 scsi_set_resid(scmd, 0); 2674 fallthrough; 2675 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 2676 case MPI3_IOCSTATUS_SUCCESS: 2677 scmd->result = (DID_OK << 16) | scsi_status; 2678 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 2679 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 2680 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 2681 scmd->result = DID_SOFT_ERROR << 16; 2682 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 2683 scmd->result = DID_RESET << 16; 2684 break; 2685 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 2686 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 2687 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 2688 mpi3mr_map_eedp_error(scmd, ioc_status); 2689 break; 2690 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2691 case MPI3_IOCSTATUS_INVALID_FUNCTION: 2692 case MPI3_IOCSTATUS_INVALID_SGL: 2693 case MPI3_IOCSTATUS_INTERNAL_ERROR: 2694 case MPI3_IOCSTATUS_INVALID_FIELD: 2695 case MPI3_IOCSTATUS_INVALID_STATE: 2696 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 2697 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2698 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 2699 default: 2700 scmd->result = DID_SOFT_ERROR << 16; 2701 break; 2702 } 2703 2704 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 2705 (scmd->cmnd[0] != ATA_16)) { 2706 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 2707 scmd->result); 2708 scsi_print_command(scmd); 2709 ioc_info(mrioc, 2710 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 2711 __func__, dev_handle, ioc_status, ioc_loginfo, 2712 priv->req_q_idx + 1); 2713 ioc_info(mrioc, 2714 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 2715 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 2716 if (sense_buf) { 2717 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 2718 ioc_info(mrioc, 2719 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 2720 __func__, sense_count, sshdr.sense_key, 2721 sshdr.asc, sshdr.ascq); 2722 } 2723 } 2724 out_success: 2725 if (priv->meta_sg_valid) { 2726 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 2727 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 2728 } 2729 mpi3mr_clear_scmd_priv(mrioc, scmd); 2730 scsi_dma_unmap(scmd); 2731 scsi_done(scmd); 2732 out: 2733 if (sense_buf) 2734 mpi3mr_repost_sense_buf(mrioc, 2735 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 2736 } 2737 2738 /** 2739 * mpi3mr_get_chain_idx - get free chain buffer index 2740 * @mrioc: Adapter instance reference 2741 * 2742 * Try to get a free chain buffer index from the free pool. 2743 * 2744 * Return: -1 on failure or the free chain buffer index 2745 */ 2746 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 2747 { 2748 u8 retry_count = 5; 2749 int cmd_idx = -1; 2750 2751 do { 2752 spin_lock(&mrioc->chain_buf_lock); 2753 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 2754 mrioc->chain_buf_count); 2755 if (cmd_idx < mrioc->chain_buf_count) { 2756 set_bit(cmd_idx, mrioc->chain_bitmap); 2757 spin_unlock(&mrioc->chain_buf_lock); 2758 break; 2759 } 2760 spin_unlock(&mrioc->chain_buf_lock); 2761 cmd_idx = -1; 2762 } while (retry_count--); 2763 return cmd_idx; 2764 } 2765 2766 /** 2767 * mpi3mr_prepare_sg_scmd - build scatter gather list 2768 * @mrioc: Adapter instance reference 2769 * @scmd: SCSI command reference 2770 * @scsiio_req: MPI3 SCSI IO request 2771 * 2772 * This function maps SCSI command's data and protection SGEs to 2773 * MPI request SGEs. If required additional 4K chain buffer is 2774 * used to send the SGEs. 2775 * 2776 * Return: 0 on success, -ENOMEM on dma_map_sg failure 2777 */ 2778 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 2779 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2780 { 2781 dma_addr_t chain_dma; 2782 struct scatterlist *sg_scmd; 2783 void *sg_local, *chain; 2784 u32 chain_length; 2785 int sges_left, chain_idx; 2786 u32 sges_in_segment; 2787 u8 simple_sgl_flags; 2788 u8 simple_sgl_flags_last; 2789 u8 last_chain_sgl_flags; 2790 struct chain_element *chain_req; 2791 struct scmd_priv *priv = NULL; 2792 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 2793 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 2794 2795 priv = scsi_cmd_priv(scmd); 2796 2797 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 2798 MPI3_SGE_FLAGS_DLAS_SYSTEM; 2799 simple_sgl_flags_last = simple_sgl_flags | 2800 MPI3_SGE_FLAGS_END_OF_LIST; 2801 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 2802 MPI3_SGE_FLAGS_DLAS_SYSTEM; 2803 2804 if (meta_sg) 2805 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 2806 else 2807 sg_local = &scsiio_req->sgl; 2808 2809 if (!scsiio_req->data_length && !meta_sg) { 2810 mpi3mr_build_zero_len_sge(sg_local); 2811 return 0; 2812 } 2813 2814 if (meta_sg) { 2815 sg_scmd = scsi_prot_sglist(scmd); 2816 sges_left = dma_map_sg(&mrioc->pdev->dev, 2817 scsi_prot_sglist(scmd), 2818 scsi_prot_sg_count(scmd), 2819 scmd->sc_data_direction); 2820 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 2821 } else { 2822 sg_scmd = scsi_sglist(scmd); 2823 sges_left = scsi_dma_map(scmd); 2824 } 2825 2826 if (sges_left < 0) { 2827 sdev_printk(KERN_ERR, scmd->device, 2828 "scsi_dma_map failed: request for %d bytes!\n", 2829 scsi_bufflen(scmd)); 2830 return -ENOMEM; 2831 } 2832 if (sges_left > MPI3MR_SG_DEPTH) { 2833 sdev_printk(KERN_ERR, scmd->device, 2834 "scsi_dma_map returned unsupported sge count %d!\n", 2835 sges_left); 2836 return -ENOMEM; 2837 } 2838 2839 sges_in_segment = (mrioc->facts.op_req_sz - 2840 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 2841 2842 if (scsiio_req->sgl[0].eedp.flags == 2843 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 2844 sg_local += sizeof(struct mpi3_sge_common); 2845 sges_in_segment--; 2846 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 2847 } 2848 2849 if (scsiio_req->msg_flags == 2850 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 2851 sges_in_segment--; 2852 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 2853 } 2854 2855 if (meta_sg) 2856 sges_in_segment = 1; 2857 2858 if (sges_left <= sges_in_segment) 2859 goto fill_in_last_segment; 2860 2861 /* fill in main message segment when there is a chain following */ 2862 while (sges_in_segment > 1) { 2863 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 2864 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2865 sg_scmd = sg_next(sg_scmd); 2866 sg_local += sizeof(struct mpi3_sge_common); 2867 sges_left--; 2868 sges_in_segment--; 2869 } 2870 2871 chain_idx = mpi3mr_get_chain_idx(mrioc); 2872 if (chain_idx < 0) 2873 return -1; 2874 chain_req = &mrioc->chain_sgl_list[chain_idx]; 2875 if (meta_sg) 2876 priv->meta_chain_idx = chain_idx; 2877 else 2878 priv->chain_idx = chain_idx; 2879 2880 chain = chain_req->addr; 2881 chain_dma = chain_req->dma_addr; 2882 sges_in_segment = sges_left; 2883 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 2884 2885 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 2886 chain_length, chain_dma); 2887 2888 sg_local = chain; 2889 2890 fill_in_last_segment: 2891 while (sges_left > 0) { 2892 if (sges_left == 1) 2893 mpi3mr_add_sg_single(sg_local, 2894 simple_sgl_flags_last, sg_dma_len(sg_scmd), 2895 sg_dma_address(sg_scmd)); 2896 else 2897 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 2898 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 2899 sg_scmd = sg_next(sg_scmd); 2900 sg_local += sizeof(struct mpi3_sge_common); 2901 sges_left--; 2902 } 2903 2904 return 0; 2905 } 2906 2907 /** 2908 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 2909 * @mrioc: Adapter instance reference 2910 * @scmd: SCSI command reference 2911 * @scsiio_req: MPI3 SCSI IO request 2912 * 2913 * This function calls mpi3mr_prepare_sg_scmd for constructing 2914 * both data SGEs and protection information SGEs in the MPI 2915 * format from the SCSI Command as appropriate . 2916 * 2917 * Return: return value of mpi3mr_prepare_sg_scmd. 2918 */ 2919 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 2920 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2921 { 2922 int ret; 2923 2924 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 2925 if (ret) 2926 return ret; 2927 2928 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 2929 /* There is a valid meta sg */ 2930 scsiio_req->flags |= 2931 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 2932 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 2933 } 2934 2935 return ret; 2936 } 2937 2938 /** 2939 * mpi3mr_tm_response_name - get TM response as a string 2940 * @resp_code: TM response code 2941 * 2942 * Convert known task management response code as a readable 2943 * string. 2944 * 2945 * Return: response code string. 2946 */ 2947 static const char *mpi3mr_tm_response_name(u8 resp_code) 2948 { 2949 char *desc; 2950 2951 switch (resp_code) { 2952 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 2953 desc = "task management request completed"; 2954 break; 2955 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 2956 desc = "invalid frame"; 2957 break; 2958 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 2959 desc = "task management request not supported"; 2960 break; 2961 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 2962 desc = "task management request failed"; 2963 break; 2964 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 2965 desc = "task management request succeeded"; 2966 break; 2967 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 2968 desc = "invalid LUN"; 2969 break; 2970 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 2971 desc = "overlapped tag attempted"; 2972 break; 2973 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 2974 desc = "task queued, however not sent to target"; 2975 break; 2976 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 2977 desc = "task management request denied by NVMe device"; 2978 break; 2979 default: 2980 desc = "unknown"; 2981 break; 2982 } 2983 2984 return desc; 2985 } 2986 2987 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 2988 { 2989 int i; 2990 int num_of_reply_queues = 2991 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 2992 2993 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 2994 mpi3mr_process_op_reply_q(mrioc, 2995 mrioc->intr_info[i].op_reply_q); 2996 } 2997 2998 /** 2999 * mpi3mr_issue_tm - Issue Task Management request 3000 * @mrioc: Adapter instance reference 3001 * @tm_type: Task Management type 3002 * @handle: Device handle 3003 * @lun: lun ID 3004 * @htag: Host tag of the TM request 3005 * @timeout: TM timeout value 3006 * @drv_cmd: Internal command tracker 3007 * @resp_code: Response code place holder 3008 * @scmd: SCSI command 3009 * 3010 * Issues a Task Management Request to the controller for a 3011 * specified target, lun and command and wait for its completion 3012 * and check TM response. Recover the TM if it timed out by 3013 * issuing controller reset. 3014 * 3015 * Return: 0 on success, non-zero on errors 3016 */ 3017 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3018 u16 handle, uint lun, u16 htag, ulong timeout, 3019 struct mpi3mr_drv_cmd *drv_cmd, 3020 u8 *resp_code, struct scsi_cmnd *scmd) 3021 { 3022 struct mpi3_scsi_task_mgmt_request tm_req; 3023 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3024 int retval = 0; 3025 struct mpi3mr_tgt_dev *tgtdev = NULL; 3026 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3027 struct scmd_priv *cmd_priv = NULL; 3028 struct scsi_device *sdev = NULL; 3029 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3030 3031 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3032 __func__, tm_type, handle); 3033 if (mrioc->unrecoverable) { 3034 retval = -1; 3035 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3036 __func__); 3037 goto out; 3038 } 3039 3040 memset(&tm_req, 0, sizeof(tm_req)); 3041 mutex_lock(&drv_cmd->mutex); 3042 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3043 retval = -1; 3044 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3045 mutex_unlock(&drv_cmd->mutex); 3046 goto out; 3047 } 3048 if (mrioc->reset_in_progress) { 3049 retval = -1; 3050 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3051 mutex_unlock(&drv_cmd->mutex); 3052 goto out; 3053 } 3054 3055 drv_cmd->state = MPI3MR_CMD_PENDING; 3056 drv_cmd->is_waiting = 1; 3057 drv_cmd->callback = NULL; 3058 tm_req.dev_handle = cpu_to_le16(handle); 3059 tm_req.task_type = tm_type; 3060 tm_req.host_tag = cpu_to_le16(htag); 3061 3062 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3063 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3064 3065 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3066 3067 if (scmd) { 3068 sdev = scmd->device; 3069 sdev_priv_data = sdev->hostdata; 3070 scsi_tgt_priv_data = ((sdev_priv_data) ? 3071 sdev_priv_data->tgt_priv_data : NULL); 3072 } else { 3073 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3074 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3075 tgtdev->starget->hostdata; 3076 } 3077 3078 if (scsi_tgt_priv_data) 3079 atomic_inc(&scsi_tgt_priv_data->block_io); 3080 3081 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3082 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3083 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3084 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3085 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3086 } 3087 3088 init_completion(&drv_cmd->done); 3089 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3090 if (retval) { 3091 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3092 goto out_unlock; 3093 } 3094 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3095 3096 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3097 drv_cmd->is_waiting = 0; 3098 retval = -1; 3099 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3100 dprint_tm(mrioc, 3101 "task management request timed out after %ld seconds\n", 3102 timeout); 3103 if (mrioc->logging_level & MPI3_DEBUG_TM) 3104 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3105 mpi3mr_soft_reset_handler(mrioc, 3106 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3107 } 3108 goto out_unlock; 3109 } 3110 3111 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3112 dprint_tm(mrioc, "invalid task management reply message\n"); 3113 retval = -1; 3114 goto out_unlock; 3115 } 3116 3117 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3118 3119 switch (drv_cmd->ioc_status) { 3120 case MPI3_IOCSTATUS_SUCCESS: 3121 *resp_code = le32_to_cpu(tm_reply->response_data) & 3122 MPI3MR_RI_MASK_RESPCODE; 3123 break; 3124 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3125 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3126 break; 3127 default: 3128 dprint_tm(mrioc, 3129 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3130 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3131 retval = -1; 3132 goto out_unlock; 3133 } 3134 3135 switch (*resp_code) { 3136 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3137 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3138 break; 3139 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3140 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3141 retval = -1; 3142 break; 3143 default: 3144 retval = -1; 3145 break; 3146 } 3147 3148 dprint_tm(mrioc, 3149 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3150 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3151 le32_to_cpu(tm_reply->termination_count), 3152 mpi3mr_tm_response_name(*resp_code), *resp_code); 3153 3154 if (!retval) { 3155 mpi3mr_ioc_disable_intr(mrioc); 3156 mpi3mr_poll_pend_io_completions(mrioc); 3157 mpi3mr_ioc_enable_intr(mrioc); 3158 mpi3mr_poll_pend_io_completions(mrioc); 3159 } 3160 switch (tm_type) { 3161 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3162 if (!scsi_tgt_priv_data) 3163 break; 3164 scsi_tgt_priv_data->pend_count = 0; 3165 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3166 mpi3mr_count_tgt_pending, 3167 (void *)scsi_tgt_priv_data->starget); 3168 break; 3169 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3170 if (!sdev_priv_data) 3171 break; 3172 sdev_priv_data->pend_count = 0; 3173 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3174 mpi3mr_count_dev_pending, (void *)sdev); 3175 break; 3176 default: 3177 break; 3178 } 3179 3180 out_unlock: 3181 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3182 mutex_unlock(&drv_cmd->mutex); 3183 if (scsi_tgt_priv_data) 3184 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 3185 if (tgtdev) 3186 mpi3mr_tgtdev_put(tgtdev); 3187 out: 3188 return retval; 3189 } 3190 3191 /** 3192 * mpi3mr_bios_param - BIOS param callback 3193 * @sdev: SCSI device reference 3194 * @bdev: Block device reference 3195 * @capacity: Capacity in logical sectors 3196 * @params: Parameter array 3197 * 3198 * Just the parameters with heads/secots/cylinders. 3199 * 3200 * Return: 0 always 3201 */ 3202 static int mpi3mr_bios_param(struct scsi_device *sdev, 3203 struct block_device *bdev, sector_t capacity, int params[]) 3204 { 3205 int heads; 3206 int sectors; 3207 sector_t cylinders; 3208 ulong dummy; 3209 3210 heads = 64; 3211 sectors = 32; 3212 3213 dummy = heads * sectors; 3214 cylinders = capacity; 3215 sector_div(cylinders, dummy); 3216 3217 if ((ulong)capacity >= 0x200000) { 3218 heads = 255; 3219 sectors = 63; 3220 dummy = heads * sectors; 3221 cylinders = capacity; 3222 sector_div(cylinders, dummy); 3223 } 3224 3225 params[0] = heads; 3226 params[1] = sectors; 3227 params[2] = cylinders; 3228 return 0; 3229 } 3230 3231 /** 3232 * mpi3mr_map_queues - Map queues callback handler 3233 * @shost: SCSI host reference 3234 * 3235 * Maps default and poll queues. 3236 * 3237 * Return: return zero. 3238 */ 3239 static int mpi3mr_map_queues(struct Scsi_Host *shost) 3240 { 3241 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3242 int i, qoff, offset; 3243 struct blk_mq_queue_map *map = NULL; 3244 3245 offset = mrioc->op_reply_q_offset; 3246 3247 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 3248 map = &shost->tag_set.map[i]; 3249 3250 map->nr_queues = 0; 3251 3252 if (i == HCTX_TYPE_DEFAULT) 3253 map->nr_queues = mrioc->default_qcount; 3254 else if (i == HCTX_TYPE_POLL) 3255 map->nr_queues = mrioc->active_poll_qcount; 3256 3257 if (!map->nr_queues) { 3258 BUG_ON(i == HCTX_TYPE_DEFAULT); 3259 continue; 3260 } 3261 3262 /* 3263 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3264 * affinity), so use the regular blk-mq cpu mapping 3265 */ 3266 map->queue_offset = qoff; 3267 if (i != HCTX_TYPE_POLL) 3268 blk_mq_pci_map_queues(map, mrioc->pdev, offset); 3269 else 3270 blk_mq_map_queues(map); 3271 3272 qoff += map->nr_queues; 3273 offset += map->nr_queues; 3274 } 3275 3276 return 0; 3277 3278 } 3279 3280 /** 3281 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 3282 * @mrioc: Adapter instance reference 3283 * 3284 * Calculate the pending I/Os for the controller and return. 3285 * 3286 * Return: Number of pending I/Os 3287 */ 3288 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 3289 { 3290 u16 i; 3291 uint pend_ios = 0; 3292 3293 for (i = 0; i < mrioc->num_op_reply_q; i++) 3294 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 3295 return pend_ios; 3296 } 3297 3298 /** 3299 * mpi3mr_print_pending_host_io - print pending I/Os 3300 * @mrioc: Adapter instance reference 3301 * 3302 * Print number of pending I/Os and each I/O details prior to 3303 * reset for debug purpose. 3304 * 3305 * Return: Nothing 3306 */ 3307 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 3308 { 3309 struct Scsi_Host *shost = mrioc->shost; 3310 3311 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 3312 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 3313 blk_mq_tagset_busy_iter(&shost->tag_set, 3314 mpi3mr_print_scmd, (void *)mrioc); 3315 } 3316 3317 /** 3318 * mpi3mr_wait_for_host_io - block for I/Os to complete 3319 * @mrioc: Adapter instance reference 3320 * @timeout: time out in seconds 3321 * Waits for pending I/Os for the given adapter to complete or 3322 * to hit the timeout. 3323 * 3324 * Return: Nothing 3325 */ 3326 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 3327 { 3328 enum mpi3mr_iocstate iocstate; 3329 int i = 0; 3330 3331 iocstate = mpi3mr_get_iocstate(mrioc); 3332 if (iocstate != MRIOC_STATE_READY) 3333 return; 3334 3335 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3336 return; 3337 ioc_info(mrioc, 3338 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 3339 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 3340 3341 for (i = 0; i < timeout; i++) { 3342 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3343 break; 3344 iocstate = mpi3mr_get_iocstate(mrioc); 3345 if (iocstate != MRIOC_STATE_READY) 3346 break; 3347 msleep(1000); 3348 } 3349 3350 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 3351 mpi3mr_get_fw_pending_ios(mrioc)); 3352 } 3353 3354 /** 3355 * mpi3mr_eh_host_reset - Host reset error handling callback 3356 * @scmd: SCSI command reference 3357 * 3358 * Issue controller reset if the scmd is for a Physical Device, 3359 * if the scmd is for RAID volume, then wait for 3360 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any 3361 * pending I/Os prior to issuing reset to the controller. 3362 * 3363 * Return: SUCCESS of successful reset else FAILED 3364 */ 3365 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 3366 { 3367 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 3368 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3369 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3370 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 3371 int retval = FAILED, ret; 3372 3373 sdev_priv_data = scmd->device->hostdata; 3374 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 3375 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3376 dev_type = stgt_priv_data->dev_type; 3377 } 3378 3379 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 3380 mpi3mr_wait_for_host_io(mrioc, 3381 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 3382 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 3383 retval = SUCCESS; 3384 goto out; 3385 } 3386 } 3387 3388 mpi3mr_print_pending_host_io(mrioc); 3389 ret = mpi3mr_soft_reset_handler(mrioc, 3390 MPI3MR_RESET_FROM_EH_HOS, 1); 3391 if (ret) 3392 goto out; 3393 3394 retval = SUCCESS; 3395 out: 3396 sdev_printk(KERN_INFO, scmd->device, 3397 "Host reset is %s for scmd(%p)\n", 3398 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3399 3400 return retval; 3401 } 3402 3403 /** 3404 * mpi3mr_eh_target_reset - Target reset error handling callback 3405 * @scmd: SCSI command reference 3406 * 3407 * Issue Target reset Task Management and verify the scmd is 3408 * terminated successfully and return status accordingly. 3409 * 3410 * Return: SUCCESS of successful termination of the scmd else 3411 * FAILED 3412 */ 3413 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 3414 { 3415 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 3416 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3417 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3418 u16 dev_handle; 3419 u8 resp_code = 0; 3420 int retval = FAILED, ret = 0; 3421 3422 sdev_printk(KERN_INFO, scmd->device, 3423 "Attempting Target Reset! scmd(%p)\n", scmd); 3424 scsi_print_command(scmd); 3425 3426 sdev_priv_data = scmd->device->hostdata; 3427 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 3428 sdev_printk(KERN_INFO, scmd->device, 3429 "SCSI device is not available\n"); 3430 retval = SUCCESS; 3431 goto out; 3432 } 3433 3434 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3435 dev_handle = stgt_priv_data->dev_handle; 3436 if (stgt_priv_data->dev_removed) { 3437 sdev_printk(KERN_INFO, scmd->device, 3438 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 3439 mrioc->name, dev_handle); 3440 retval = FAILED; 3441 goto out; 3442 } 3443 sdev_printk(KERN_INFO, scmd->device, 3444 "Target Reset is issued to handle(0x%04x)\n", 3445 dev_handle); 3446 3447 ret = mpi3mr_issue_tm(mrioc, 3448 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 3449 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 3450 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 3451 3452 if (ret) 3453 goto out; 3454 3455 if (stgt_priv_data->pend_count) { 3456 sdev_printk(KERN_INFO, scmd->device, 3457 "%s: target has %d pending commands, target reset is failed\n", 3458 mrioc->name, stgt_priv_data->pend_count); 3459 goto out; 3460 } 3461 3462 retval = SUCCESS; 3463 out: 3464 sdev_printk(KERN_INFO, scmd->device, 3465 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 3466 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3467 3468 return retval; 3469 } 3470 3471 /** 3472 * mpi3mr_eh_dev_reset- Device reset error handling callback 3473 * @scmd: SCSI command reference 3474 * 3475 * Issue lun reset Task Management and verify the scmd is 3476 * terminated successfully and return status accordingly. 3477 * 3478 * Return: SUCCESS of successful termination of the scmd else 3479 * FAILED 3480 */ 3481 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 3482 { 3483 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 3484 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3485 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3486 u16 dev_handle; 3487 u8 resp_code = 0; 3488 int retval = FAILED, ret = 0; 3489 3490 sdev_printk(KERN_INFO, scmd->device, 3491 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 3492 scsi_print_command(scmd); 3493 3494 sdev_priv_data = scmd->device->hostdata; 3495 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 3496 sdev_printk(KERN_INFO, scmd->device, 3497 "SCSI device is not available\n"); 3498 retval = SUCCESS; 3499 goto out; 3500 } 3501 3502 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3503 dev_handle = stgt_priv_data->dev_handle; 3504 if (stgt_priv_data->dev_removed) { 3505 sdev_printk(KERN_INFO, scmd->device, 3506 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 3507 mrioc->name, dev_handle); 3508 retval = FAILED; 3509 goto out; 3510 } 3511 sdev_printk(KERN_INFO, scmd->device, 3512 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 3513 3514 ret = mpi3mr_issue_tm(mrioc, 3515 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 3516 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 3517 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 3518 3519 if (ret) 3520 goto out; 3521 3522 if (sdev_priv_data->pend_count) { 3523 sdev_printk(KERN_INFO, scmd->device, 3524 "%s: device has %d pending commands, device(LUN) reset is failed\n", 3525 mrioc->name, sdev_priv_data->pend_count); 3526 goto out; 3527 } 3528 retval = SUCCESS; 3529 out: 3530 sdev_printk(KERN_INFO, scmd->device, 3531 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 3532 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3533 3534 return retval; 3535 } 3536 3537 /** 3538 * mpi3mr_scan_start - Scan start callback handler 3539 * @shost: SCSI host reference 3540 * 3541 * Issue port enable request asynchronously. 3542 * 3543 * Return: Nothing 3544 */ 3545 static void mpi3mr_scan_start(struct Scsi_Host *shost) 3546 { 3547 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3548 3549 mrioc->scan_started = 1; 3550 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 3551 if (mpi3mr_issue_port_enable(mrioc, 1)) { 3552 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 3553 mrioc->scan_started = 0; 3554 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3555 } 3556 } 3557 3558 /** 3559 * mpi3mr_scan_finished - Scan finished callback handler 3560 * @shost: SCSI host reference 3561 * @time: Jiffies from the scan start 3562 * 3563 * Checks whether the port enable is completed or timedout or 3564 * failed and set the scan status accordingly after taking any 3565 * recovery if required. 3566 * 3567 * Return: 1 on scan finished or timed out, 0 for in progress 3568 */ 3569 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 3570 unsigned long time) 3571 { 3572 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3573 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3574 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3575 3576 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 3577 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 3578 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 3579 mpi3mr_print_fault_info(mrioc); 3580 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3581 mrioc->scan_started = 0; 3582 mrioc->init_cmds.is_waiting = 0; 3583 mrioc->init_cmds.callback = NULL; 3584 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3585 } 3586 3587 if (time >= (pe_timeout * HZ)) { 3588 ioc_err(mrioc, "port enable failed due to time out\n"); 3589 mpi3mr_check_rh_fault_ioc(mrioc, 3590 MPI3MR_RESET_FROM_PE_TIMEOUT); 3591 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3592 mrioc->scan_started = 0; 3593 mrioc->init_cmds.is_waiting = 0; 3594 mrioc->init_cmds.callback = NULL; 3595 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3596 } 3597 3598 if (mrioc->scan_started) 3599 return 0; 3600 3601 if (mrioc->scan_failed) { 3602 ioc_err(mrioc, 3603 "port enable failed with status=0x%04x\n", 3604 mrioc->scan_failed); 3605 } else 3606 ioc_info(mrioc, "port enable is successfully completed\n"); 3607 3608 mpi3mr_start_watchdog(mrioc); 3609 mrioc->is_driver_loading = 0; 3610 mrioc->stop_bsgs = 0; 3611 return 1; 3612 } 3613 3614 /** 3615 * mpi3mr_slave_destroy - Slave destroy callback handler 3616 * @sdev: SCSI device reference 3617 * 3618 * Cleanup and free per device(lun) private data. 3619 * 3620 * Return: Nothing. 3621 */ 3622 static void mpi3mr_slave_destroy(struct scsi_device *sdev) 3623 { 3624 struct Scsi_Host *shost; 3625 struct mpi3mr_ioc *mrioc; 3626 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3627 struct mpi3mr_tgt_dev *tgt_dev; 3628 unsigned long flags; 3629 struct scsi_target *starget; 3630 3631 if (!sdev->hostdata) 3632 return; 3633 3634 starget = scsi_target(sdev); 3635 shost = dev_to_shost(&starget->dev); 3636 mrioc = shost_priv(shost); 3637 scsi_tgt_priv_data = starget->hostdata; 3638 3639 scsi_tgt_priv_data->num_luns--; 3640 3641 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3642 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3643 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 3644 tgt_dev->starget = NULL; 3645 if (tgt_dev) 3646 mpi3mr_tgtdev_put(tgt_dev); 3647 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3648 3649 kfree(sdev->hostdata); 3650 sdev->hostdata = NULL; 3651 } 3652 3653 /** 3654 * mpi3mr_target_destroy - Target destroy callback handler 3655 * @starget: SCSI target reference 3656 * 3657 * Cleanup and free per target private data. 3658 * 3659 * Return: Nothing. 3660 */ 3661 static void mpi3mr_target_destroy(struct scsi_target *starget) 3662 { 3663 struct Scsi_Host *shost; 3664 struct mpi3mr_ioc *mrioc; 3665 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3666 struct mpi3mr_tgt_dev *tgt_dev; 3667 unsigned long flags; 3668 3669 if (!starget->hostdata) 3670 return; 3671 3672 shost = dev_to_shost(&starget->dev); 3673 mrioc = shost_priv(shost); 3674 scsi_tgt_priv_data = starget->hostdata; 3675 3676 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3677 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 3678 if (tgt_dev && (tgt_dev->starget == starget) && 3679 (tgt_dev->perst_id == starget->id)) 3680 tgt_dev->starget = NULL; 3681 if (tgt_dev) { 3682 scsi_tgt_priv_data->tgt_dev = NULL; 3683 scsi_tgt_priv_data->perst_id = 0; 3684 mpi3mr_tgtdev_put(tgt_dev); 3685 mpi3mr_tgtdev_put(tgt_dev); 3686 } 3687 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3688 3689 kfree(starget->hostdata); 3690 starget->hostdata = NULL; 3691 } 3692 3693 /** 3694 * mpi3mr_slave_configure - Slave configure callback handler 3695 * @sdev: SCSI device reference 3696 * 3697 * Configure queue depth, max hardware sectors and virt boundary 3698 * as required 3699 * 3700 * Return: 0 always. 3701 */ 3702 static int mpi3mr_slave_configure(struct scsi_device *sdev) 3703 { 3704 struct scsi_target *starget; 3705 struct Scsi_Host *shost; 3706 struct mpi3mr_ioc *mrioc; 3707 struct mpi3mr_tgt_dev *tgt_dev; 3708 unsigned long flags; 3709 int retval = 0; 3710 3711 starget = scsi_target(sdev); 3712 shost = dev_to_shost(&starget->dev); 3713 mrioc = shost_priv(shost); 3714 3715 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3716 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3717 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3718 if (!tgt_dev) 3719 return -ENXIO; 3720 3721 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 3722 3723 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 3724 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 3725 3726 switch (tgt_dev->dev_type) { 3727 case MPI3_DEVICE_DEVFORM_PCIE: 3728 /*The block layer hw sector size = 512*/ 3729 if ((tgt_dev->dev_spec.pcie_inf.dev_info & 3730 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 3731 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 3732 blk_queue_max_hw_sectors(sdev->request_queue, 3733 tgt_dev->dev_spec.pcie_inf.mdts / 512); 3734 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) 3735 blk_queue_virt_boundary(sdev->request_queue, 3736 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 3737 else 3738 blk_queue_virt_boundary(sdev->request_queue, 3739 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); 3740 } 3741 break; 3742 default: 3743 break; 3744 } 3745 3746 mpi3mr_tgtdev_put(tgt_dev); 3747 3748 return retval; 3749 } 3750 3751 /** 3752 * mpi3mr_slave_alloc -Slave alloc callback handler 3753 * @sdev: SCSI device reference 3754 * 3755 * Allocate per device(lun) private data and initialize it. 3756 * 3757 * Return: 0 on success -ENOMEM on memory allocation failure. 3758 */ 3759 static int mpi3mr_slave_alloc(struct scsi_device *sdev) 3760 { 3761 struct Scsi_Host *shost; 3762 struct mpi3mr_ioc *mrioc; 3763 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3764 struct mpi3mr_tgt_dev *tgt_dev; 3765 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 3766 unsigned long flags; 3767 struct scsi_target *starget; 3768 int retval = 0; 3769 3770 starget = scsi_target(sdev); 3771 shost = dev_to_shost(&starget->dev); 3772 mrioc = shost_priv(shost); 3773 scsi_tgt_priv_data = starget->hostdata; 3774 3775 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3776 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3777 3778 if (tgt_dev) { 3779 if (tgt_dev->starget == NULL) 3780 tgt_dev->starget = starget; 3781 mpi3mr_tgtdev_put(tgt_dev); 3782 retval = 0; 3783 } else { 3784 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3785 return -ENXIO; 3786 } 3787 3788 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3789 3790 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 3791 if (!scsi_dev_priv_data) 3792 return -ENOMEM; 3793 3794 scsi_dev_priv_data->lun_id = sdev->lun; 3795 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 3796 sdev->hostdata = scsi_dev_priv_data; 3797 3798 scsi_tgt_priv_data->num_luns++; 3799 3800 return retval; 3801 } 3802 3803 /** 3804 * mpi3mr_target_alloc - Target alloc callback handler 3805 * @starget: SCSI target reference 3806 * 3807 * Allocate per target private data and initialize it. 3808 * 3809 * Return: 0 on success -ENOMEM on memory allocation failure. 3810 */ 3811 static int mpi3mr_target_alloc(struct scsi_target *starget) 3812 { 3813 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 3814 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3815 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3816 struct mpi3mr_tgt_dev *tgt_dev; 3817 unsigned long flags; 3818 int retval = 0; 3819 3820 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 3821 if (!scsi_tgt_priv_data) 3822 return -ENOMEM; 3823 3824 starget->hostdata = scsi_tgt_priv_data; 3825 3826 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3827 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3828 if (tgt_dev && !tgt_dev->is_hidden) { 3829 scsi_tgt_priv_data->starget = starget; 3830 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 3831 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 3832 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 3833 scsi_tgt_priv_data->tgt_dev = tgt_dev; 3834 tgt_dev->starget = starget; 3835 atomic_set(&scsi_tgt_priv_data->block_io, 0); 3836 retval = 0; 3837 } else 3838 retval = -ENXIO; 3839 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3840 3841 return retval; 3842 } 3843 3844 /** 3845 * mpi3mr_check_return_unmap - Whether an unmap is allowed 3846 * @mrioc: Adapter instance reference 3847 * @scmd: SCSI Command reference 3848 * 3849 * The controller hardware cannot handle certain unmap commands 3850 * for NVMe drives, this routine checks those and return true 3851 * and completes the SCSI command with proper status and sense 3852 * data. 3853 * 3854 * Return: TRUE for not allowed unmap, FALSE otherwise. 3855 */ 3856 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 3857 struct scsi_cmnd *scmd) 3858 { 3859 unsigned char *buf; 3860 u16 param_len, desc_len, trunc_param_len; 3861 3862 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 3863 3864 if (mrioc->pdev->revision) { 3865 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 3866 trunc_param_len -= (param_len - 8) & 0xF; 3867 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 3868 dprint_scsi_err(mrioc, 3869 "truncating param_len from (%d) to (%d)\n", 3870 param_len, trunc_param_len); 3871 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 3872 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 3873 } 3874 return false; 3875 } 3876 3877 if (!param_len) { 3878 ioc_warn(mrioc, 3879 "%s: cdb received with zero parameter length\n", 3880 __func__); 3881 scsi_print_command(scmd); 3882 scmd->result = DID_OK << 16; 3883 scsi_done(scmd); 3884 return true; 3885 } 3886 3887 if (param_len < 24) { 3888 ioc_warn(mrioc, 3889 "%s: cdb received with invalid param_len: %d\n", 3890 __func__, param_len); 3891 scsi_print_command(scmd); 3892 scmd->result = SAM_STAT_CHECK_CONDITION; 3893 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3894 0x1A, 0); 3895 scsi_done(scmd); 3896 return true; 3897 } 3898 if (param_len != scsi_bufflen(scmd)) { 3899 ioc_warn(mrioc, 3900 "%s: cdb received with param_len: %d bufflen: %d\n", 3901 __func__, param_len, scsi_bufflen(scmd)); 3902 scsi_print_command(scmd); 3903 scmd->result = SAM_STAT_CHECK_CONDITION; 3904 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3905 0x1A, 0); 3906 scsi_done(scmd); 3907 return true; 3908 } 3909 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 3910 if (!buf) { 3911 scsi_print_command(scmd); 3912 scmd->result = SAM_STAT_CHECK_CONDITION; 3913 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3914 0x55, 0x03); 3915 scsi_done(scmd); 3916 return true; 3917 } 3918 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 3919 desc_len = get_unaligned_be16(&buf[2]); 3920 3921 if (desc_len < 16) { 3922 ioc_warn(mrioc, 3923 "%s: Invalid descriptor length in param list: %d\n", 3924 __func__, desc_len); 3925 scsi_print_command(scmd); 3926 scmd->result = SAM_STAT_CHECK_CONDITION; 3927 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3928 0x26, 0); 3929 scsi_done(scmd); 3930 kfree(buf); 3931 return true; 3932 } 3933 3934 if (param_len > (desc_len + 8)) { 3935 trunc_param_len = desc_len + 8; 3936 scsi_print_command(scmd); 3937 dprint_scsi_err(mrioc, 3938 "truncating param_len(%d) to desc_len+8(%d)\n", 3939 param_len, trunc_param_len); 3940 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 3941 scsi_print_command(scmd); 3942 } 3943 3944 kfree(buf); 3945 return false; 3946 } 3947 3948 /** 3949 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 3950 * @scmd: SCSI Command reference 3951 * 3952 * Checks whether a cdb is allowed during shutdown or not. 3953 * 3954 * Return: TRUE for allowed commands, FALSE otherwise. 3955 */ 3956 3957 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 3958 { 3959 switch (scmd->cmnd[0]) { 3960 case SYNCHRONIZE_CACHE: 3961 case START_STOP: 3962 return true; 3963 default: 3964 return false; 3965 } 3966 } 3967 3968 /** 3969 * mpi3mr_qcmd - I/O request despatcher 3970 * @shost: SCSI Host reference 3971 * @scmd: SCSI Command reference 3972 * 3973 * Issues the SCSI Command as an MPI3 request. 3974 * 3975 * Return: 0 on successful queueing of the request or if the 3976 * request is completed with failure. 3977 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 3978 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 3979 */ 3980 static int mpi3mr_qcmd(struct Scsi_Host *shost, 3981 struct scsi_cmnd *scmd) 3982 { 3983 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3984 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3985 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3986 struct scmd_priv *scmd_priv_data = NULL; 3987 struct mpi3_scsi_io_request *scsiio_req = NULL; 3988 struct op_req_qinfo *op_req_q = NULL; 3989 int retval = 0; 3990 u16 dev_handle; 3991 u16 host_tag; 3992 u32 scsiio_flags = 0; 3993 struct request *rq = scsi_cmd_to_rq(scmd); 3994 int iprio_class; 3995 u8 is_pcie_dev = 0; 3996 3997 if (mrioc->unrecoverable) { 3998 scmd->result = DID_ERROR << 16; 3999 scsi_done(scmd); 4000 goto out; 4001 } 4002 4003 sdev_priv_data = scmd->device->hostdata; 4004 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4005 scmd->result = DID_NO_CONNECT << 16; 4006 scsi_done(scmd); 4007 goto out; 4008 } 4009 4010 if (mrioc->stop_drv_processing && 4011 !(mpi3mr_allow_scmd_to_fw(scmd))) { 4012 scmd->result = DID_NO_CONNECT << 16; 4013 scsi_done(scmd); 4014 goto out; 4015 } 4016 4017 if (mrioc->reset_in_progress) { 4018 retval = SCSI_MLQUEUE_HOST_BUSY; 4019 goto out; 4020 } 4021 4022 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4023 4024 dev_handle = stgt_priv_data->dev_handle; 4025 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 4026 scmd->result = DID_NO_CONNECT << 16; 4027 scsi_done(scmd); 4028 goto out; 4029 } 4030 if (stgt_priv_data->dev_removed) { 4031 scmd->result = DID_NO_CONNECT << 16; 4032 scsi_done(scmd); 4033 goto out; 4034 } 4035 4036 if (atomic_read(&stgt_priv_data->block_io)) { 4037 if (mrioc->stop_drv_processing) { 4038 scmd->result = DID_NO_CONNECT << 16; 4039 scsi_done(scmd); 4040 goto out; 4041 } 4042 retval = SCSI_MLQUEUE_DEVICE_BUSY; 4043 goto out; 4044 } 4045 4046 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 4047 is_pcie_dev = 1; 4048 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 4049 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 4050 mpi3mr_check_return_unmap(mrioc, scmd)) 4051 goto out; 4052 4053 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 4054 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 4055 scmd->result = DID_ERROR << 16; 4056 scsi_done(scmd); 4057 goto out; 4058 } 4059 4060 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4061 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 4062 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4063 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 4064 else 4065 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 4066 4067 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 4068 4069 if (sdev_priv_data->ncq_prio_enable) { 4070 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 4071 if (iprio_class == IOPRIO_CLASS_RT) 4072 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 4073 } 4074 4075 if (scmd->cmd_len > 16) 4076 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 4077 4078 scmd_priv_data = scsi_cmd_priv(scmd); 4079 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 4080 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 4081 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 4082 scsiio_req->host_tag = cpu_to_le16(host_tag); 4083 4084 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 4085 4086 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 4087 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 4088 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 4089 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4090 int_to_scsilun(sdev_priv_data->lun_id, 4091 (struct scsi_lun *)scsiio_req->lun); 4092 4093 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 4094 mpi3mr_clear_scmd_priv(mrioc, scmd); 4095 retval = SCSI_MLQUEUE_HOST_BUSY; 4096 goto out; 4097 } 4098 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 4099 4100 if (mpi3mr_op_request_post(mrioc, op_req_q, 4101 scmd_priv_data->mpi3mr_scsiio_req)) { 4102 mpi3mr_clear_scmd_priv(mrioc, scmd); 4103 retval = SCSI_MLQUEUE_HOST_BUSY; 4104 goto out; 4105 } 4106 4107 out: 4108 return retval; 4109 } 4110 4111 static struct scsi_host_template mpi3mr_driver_template = { 4112 .module = THIS_MODULE, 4113 .name = "MPI3 Storage Controller", 4114 .proc_name = MPI3MR_DRIVER_NAME, 4115 .queuecommand = mpi3mr_qcmd, 4116 .target_alloc = mpi3mr_target_alloc, 4117 .slave_alloc = mpi3mr_slave_alloc, 4118 .slave_configure = mpi3mr_slave_configure, 4119 .target_destroy = mpi3mr_target_destroy, 4120 .slave_destroy = mpi3mr_slave_destroy, 4121 .scan_finished = mpi3mr_scan_finished, 4122 .scan_start = mpi3mr_scan_start, 4123 .change_queue_depth = mpi3mr_change_queue_depth, 4124 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 4125 .eh_target_reset_handler = mpi3mr_eh_target_reset, 4126 .eh_host_reset_handler = mpi3mr_eh_host_reset, 4127 .bios_param = mpi3mr_bios_param, 4128 .map_queues = mpi3mr_map_queues, 4129 .mq_poll = mpi3mr_blk_mq_poll, 4130 .no_write_same = 1, 4131 .can_queue = 1, 4132 .this_id = -1, 4133 .sg_tablesize = MPI3MR_SG_DEPTH, 4134 /* max xfer supported is 1M (2K in 512 byte sized sectors) 4135 */ 4136 .max_sectors = 2048, 4137 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 4138 .max_segment_size = 0xffffffff, 4139 .track_queue_depth = 1, 4140 .cmd_size = sizeof(struct scmd_priv), 4141 .shost_groups = mpi3mr_host_groups, 4142 .sdev_groups = mpi3mr_dev_groups, 4143 }; 4144 4145 /** 4146 * mpi3mr_init_drv_cmd - Initialize internal command tracker 4147 * @cmdptr: Internal command tracker 4148 * @host_tag: Host tag used for the specific command 4149 * 4150 * Initialize the internal command tracker structure with 4151 * specified host tag. 4152 * 4153 * Return: Nothing. 4154 */ 4155 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 4156 u16 host_tag) 4157 { 4158 mutex_init(&cmdptr->mutex); 4159 cmdptr->reply = NULL; 4160 cmdptr->state = MPI3MR_CMD_NOTUSED; 4161 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 4162 cmdptr->host_tag = host_tag; 4163 } 4164 4165 /** 4166 * osintfc_mrioc_security_status -Check controller secure status 4167 * @pdev: PCI device instance 4168 * 4169 * Read the Device Serial Number capability from PCI config 4170 * space and decide whether the controller is secure or not. 4171 * 4172 * Return: 0 on success, non-zero on failure. 4173 */ 4174 static int 4175 osintfc_mrioc_security_status(struct pci_dev *pdev) 4176 { 4177 u32 cap_data; 4178 int base; 4179 u32 ctlr_status; 4180 u32 debug_status; 4181 int retval = 0; 4182 4183 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 4184 if (!base) { 4185 dev_err(&pdev->dev, 4186 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 4187 return -1; 4188 } 4189 4190 pci_read_config_dword(pdev, base + 4, &cap_data); 4191 4192 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 4193 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 4194 4195 switch (ctlr_status) { 4196 case MPI3MR_INVALID_DEVICE: 4197 dev_err(&pdev->dev, 4198 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4199 __func__, pdev->device, pdev->subsystem_vendor, 4200 pdev->subsystem_device); 4201 retval = -1; 4202 break; 4203 case MPI3MR_CONFIG_SECURE_DEVICE: 4204 if (!debug_status) 4205 dev_info(&pdev->dev, 4206 "%s: Config secure ctlr is detected\n", 4207 __func__); 4208 break; 4209 case MPI3MR_HARD_SECURE_DEVICE: 4210 break; 4211 case MPI3MR_TAMPERED_DEVICE: 4212 dev_err(&pdev->dev, 4213 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4214 __func__, pdev->device, pdev->subsystem_vendor, 4215 pdev->subsystem_device); 4216 retval = -1; 4217 break; 4218 default: 4219 retval = -1; 4220 break; 4221 } 4222 4223 if (!retval && debug_status) { 4224 dev_err(&pdev->dev, 4225 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4226 __func__, pdev->device, pdev->subsystem_vendor, 4227 pdev->subsystem_device); 4228 retval = -1; 4229 } 4230 4231 return retval; 4232 } 4233 4234 /** 4235 * mpi3mr_probe - PCI probe callback 4236 * @pdev: PCI device instance 4237 * @id: PCI device ID details 4238 * 4239 * controller initialization routine. Checks the security status 4240 * of the controller and if it is invalid or tampered return the 4241 * probe without initializing the controller. Otherwise, 4242 * allocate per adapter instance through shost_priv and 4243 * initialize controller specific data structures, initializae 4244 * the controller hardware, add shost to the SCSI subsystem. 4245 * 4246 * Return: 0 on success, non-zero on failure. 4247 */ 4248 4249 static int 4250 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4251 { 4252 struct mpi3mr_ioc *mrioc = NULL; 4253 struct Scsi_Host *shost = NULL; 4254 int retval = 0, i; 4255 4256 if (osintfc_mrioc_security_status(pdev)) { 4257 warn_non_secure_ctlr = 1; 4258 return 1; /* For Invalid and Tampered device */ 4259 } 4260 4261 shost = scsi_host_alloc(&mpi3mr_driver_template, 4262 sizeof(struct mpi3mr_ioc)); 4263 if (!shost) { 4264 retval = -ENODEV; 4265 goto shost_failed; 4266 } 4267 4268 mrioc = shost_priv(shost); 4269 mrioc->id = mrioc_ids++; 4270 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 4271 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 4272 INIT_LIST_HEAD(&mrioc->list); 4273 spin_lock(&mrioc_list_lock); 4274 list_add_tail(&mrioc->list, &mrioc_list); 4275 spin_unlock(&mrioc_list_lock); 4276 4277 spin_lock_init(&mrioc->admin_req_lock); 4278 spin_lock_init(&mrioc->reply_free_queue_lock); 4279 spin_lock_init(&mrioc->sbq_lock); 4280 spin_lock_init(&mrioc->fwevt_lock); 4281 spin_lock_init(&mrioc->tgtdev_lock); 4282 spin_lock_init(&mrioc->watchdog_lock); 4283 spin_lock_init(&mrioc->chain_buf_lock); 4284 4285 INIT_LIST_HEAD(&mrioc->fwevt_list); 4286 INIT_LIST_HEAD(&mrioc->tgtdev_list); 4287 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 4288 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 4289 4290 mutex_init(&mrioc->reset_mutex); 4291 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 4292 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 4293 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 4294 4295 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 4296 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 4297 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 4298 4299 if (pdev->revision) 4300 mrioc->enable_segqueue = true; 4301 4302 init_waitqueue_head(&mrioc->reset_waitq); 4303 mrioc->logging_level = logging_level; 4304 mrioc->shost = shost; 4305 mrioc->pdev = pdev; 4306 mrioc->stop_bsgs = 1; 4307 4308 /* init shost parameters */ 4309 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 4310 shost->max_lun = -1; 4311 shost->unique_id = mrioc->id; 4312 4313 shost->max_channel = 0; 4314 shost->max_id = 0xFFFFFFFF; 4315 4316 if (prot_mask >= 0) 4317 scsi_host_set_prot(shost, prot_mask); 4318 else { 4319 prot_mask = SHOST_DIF_TYPE1_PROTECTION 4320 | SHOST_DIF_TYPE2_PROTECTION 4321 | SHOST_DIF_TYPE3_PROTECTION; 4322 scsi_host_set_prot(shost, prot_mask); 4323 } 4324 4325 ioc_info(mrioc, 4326 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 4327 __func__, 4328 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 4329 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 4330 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 4331 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 4332 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 4333 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 4334 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 4335 4336 if (prot_guard_mask) 4337 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 4338 else 4339 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 4340 4341 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), 4342 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); 4343 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 4344 mrioc->fwevt_worker_name, 0); 4345 if (!mrioc->fwevt_worker_thread) { 4346 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 4347 __FILE__, __LINE__, __func__); 4348 retval = -ENODEV; 4349 goto fwevtthread_failed; 4350 } 4351 4352 mrioc->is_driver_loading = 1; 4353 mrioc->cpu_count = num_online_cpus(); 4354 if (mpi3mr_setup_resources(mrioc)) { 4355 ioc_err(mrioc, "setup resources failed\n"); 4356 retval = -ENODEV; 4357 goto resource_alloc_failed; 4358 } 4359 if (mpi3mr_init_ioc(mrioc)) { 4360 ioc_err(mrioc, "initializing IOC failed\n"); 4361 retval = -ENODEV; 4362 goto init_ioc_failed; 4363 } 4364 4365 shost->nr_hw_queues = mrioc->num_op_reply_q; 4366 if (mrioc->active_poll_qcount) 4367 shost->nr_maps = 3; 4368 4369 shost->can_queue = mrioc->max_host_ios; 4370 shost->sg_tablesize = MPI3MR_SG_DEPTH; 4371 shost->max_id = mrioc->facts.max_perids + 1; 4372 4373 retval = scsi_add_host(shost, &pdev->dev); 4374 if (retval) { 4375 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 4376 __FILE__, __LINE__, __func__); 4377 goto addhost_failed; 4378 } 4379 4380 scsi_scan_host(shost); 4381 mpi3mr_bsg_init(mrioc); 4382 return retval; 4383 4384 addhost_failed: 4385 mpi3mr_stop_watchdog(mrioc); 4386 mpi3mr_cleanup_ioc(mrioc); 4387 init_ioc_failed: 4388 mpi3mr_free_mem(mrioc); 4389 mpi3mr_cleanup_resources(mrioc); 4390 resource_alloc_failed: 4391 destroy_workqueue(mrioc->fwevt_worker_thread); 4392 fwevtthread_failed: 4393 spin_lock(&mrioc_list_lock); 4394 list_del(&mrioc->list); 4395 spin_unlock(&mrioc_list_lock); 4396 scsi_host_put(shost); 4397 shost_failed: 4398 return retval; 4399 } 4400 4401 /** 4402 * mpi3mr_remove - PCI remove callback 4403 * @pdev: PCI device instance 4404 * 4405 * Cleanup the IOC by issuing MUR and shutdown notification. 4406 * Free up all memory and resources associated with the 4407 * controllerand target devices, unregister the shost. 4408 * 4409 * Return: Nothing. 4410 */ 4411 static void mpi3mr_remove(struct pci_dev *pdev) 4412 { 4413 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4414 struct mpi3mr_ioc *mrioc; 4415 struct workqueue_struct *wq; 4416 unsigned long flags; 4417 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 4418 4419 if (!shost) 4420 return; 4421 4422 mrioc = shost_priv(shost); 4423 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 4424 ssleep(1); 4425 4426 mpi3mr_bsg_exit(mrioc); 4427 mrioc->stop_drv_processing = 1; 4428 mpi3mr_cleanup_fwevt_list(mrioc); 4429 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 4430 wq = mrioc->fwevt_worker_thread; 4431 mrioc->fwevt_worker_thread = NULL; 4432 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 4433 if (wq) 4434 destroy_workqueue(wq); 4435 scsi_remove_host(shost); 4436 4437 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 4438 list) { 4439 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 4440 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 4441 mpi3mr_tgtdev_put(tgtdev); 4442 } 4443 mpi3mr_stop_watchdog(mrioc); 4444 mpi3mr_cleanup_ioc(mrioc); 4445 mpi3mr_free_mem(mrioc); 4446 mpi3mr_cleanup_resources(mrioc); 4447 4448 spin_lock(&mrioc_list_lock); 4449 list_del(&mrioc->list); 4450 spin_unlock(&mrioc_list_lock); 4451 4452 scsi_host_put(shost); 4453 } 4454 4455 /** 4456 * mpi3mr_shutdown - PCI shutdown callback 4457 * @pdev: PCI device instance 4458 * 4459 * Free up all memory and resources associated with the 4460 * controller 4461 * 4462 * Return: Nothing. 4463 */ 4464 static void mpi3mr_shutdown(struct pci_dev *pdev) 4465 { 4466 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4467 struct mpi3mr_ioc *mrioc; 4468 struct workqueue_struct *wq; 4469 unsigned long flags; 4470 4471 if (!shost) 4472 return; 4473 4474 mrioc = shost_priv(shost); 4475 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 4476 ssleep(1); 4477 4478 mrioc->stop_drv_processing = 1; 4479 mpi3mr_cleanup_fwevt_list(mrioc); 4480 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 4481 wq = mrioc->fwevt_worker_thread; 4482 mrioc->fwevt_worker_thread = NULL; 4483 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 4484 if (wq) 4485 destroy_workqueue(wq); 4486 4487 mpi3mr_stop_watchdog(mrioc); 4488 mpi3mr_cleanup_ioc(mrioc); 4489 mpi3mr_cleanup_resources(mrioc); 4490 } 4491 4492 #ifdef CONFIG_PM 4493 /** 4494 * mpi3mr_suspend - PCI power management suspend callback 4495 * @pdev: PCI device instance 4496 * @state: New power state 4497 * 4498 * Change the power state to the given value and cleanup the IOC 4499 * by issuing MUR and shutdown notification 4500 * 4501 * Return: 0 always. 4502 */ 4503 static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state) 4504 { 4505 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4506 struct mpi3mr_ioc *mrioc; 4507 pci_power_t device_state; 4508 4509 if (!shost) 4510 return 0; 4511 4512 mrioc = shost_priv(shost); 4513 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 4514 ssleep(1); 4515 mrioc->stop_drv_processing = 1; 4516 mpi3mr_cleanup_fwevt_list(mrioc); 4517 scsi_block_requests(shost); 4518 mpi3mr_stop_watchdog(mrioc); 4519 mpi3mr_cleanup_ioc(mrioc); 4520 4521 device_state = pci_choose_state(pdev, state); 4522 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n", 4523 pdev, pci_name(pdev), device_state); 4524 pci_save_state(pdev); 4525 mpi3mr_cleanup_resources(mrioc); 4526 pci_set_power_state(pdev, device_state); 4527 4528 return 0; 4529 } 4530 4531 /** 4532 * mpi3mr_resume - PCI power management resume callback 4533 * @pdev: PCI device instance 4534 * 4535 * Restore the power state to D0 and reinitialize the controller 4536 * and resume I/O operations to the target devices 4537 * 4538 * Return: 0 on success, non-zero on failure 4539 */ 4540 static int mpi3mr_resume(struct pci_dev *pdev) 4541 { 4542 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4543 struct mpi3mr_ioc *mrioc; 4544 pci_power_t device_state = pdev->current_state; 4545 int r; 4546 4547 if (!shost) 4548 return 0; 4549 4550 mrioc = shost_priv(shost); 4551 4552 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 4553 pdev, pci_name(pdev), device_state); 4554 pci_set_power_state(pdev, PCI_D0); 4555 pci_enable_wake(pdev, PCI_D0, 0); 4556 pci_restore_state(pdev); 4557 mrioc->pdev = pdev; 4558 mrioc->cpu_count = num_online_cpus(); 4559 r = mpi3mr_setup_resources(mrioc); 4560 if (r) { 4561 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 4562 __func__, r); 4563 return r; 4564 } 4565 4566 mrioc->stop_drv_processing = 0; 4567 mpi3mr_memset_buffers(mrioc); 4568 r = mpi3mr_reinit_ioc(mrioc, 1); 4569 if (r) { 4570 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 4571 return r; 4572 } 4573 scsi_unblock_requests(shost); 4574 mpi3mr_start_watchdog(mrioc); 4575 4576 return 0; 4577 } 4578 #endif 4579 4580 static const struct pci_device_id mpi3mr_pci_id_table[] = { 4581 { 4582 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 4583 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 4584 }, 4585 { 0 } 4586 }; 4587 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 4588 4589 static struct pci_driver mpi3mr_pci_driver = { 4590 .name = MPI3MR_DRIVER_NAME, 4591 .id_table = mpi3mr_pci_id_table, 4592 .probe = mpi3mr_probe, 4593 .remove = mpi3mr_remove, 4594 .shutdown = mpi3mr_shutdown, 4595 #ifdef CONFIG_PM 4596 .suspend = mpi3mr_suspend, 4597 .resume = mpi3mr_resume, 4598 #endif 4599 }; 4600 4601 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 4602 { 4603 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 4604 } 4605 static DRIVER_ATTR_RO(event_counter); 4606 4607 static int __init mpi3mr_init(void) 4608 { 4609 int ret_val; 4610 4611 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 4612 MPI3MR_DRIVER_VERSION); 4613 4614 ret_val = pci_register_driver(&mpi3mr_pci_driver); 4615 if (ret_val) { 4616 pr_err("%s failed to load due to pci register driver failure\n", 4617 MPI3MR_DRIVER_NAME); 4618 return ret_val; 4619 } 4620 4621 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 4622 &driver_attr_event_counter); 4623 if (ret_val) 4624 pci_unregister_driver(&mpi3mr_pci_driver); 4625 4626 return ret_val; 4627 } 4628 4629 static void __exit mpi3mr_exit(void) 4630 { 4631 if (warn_non_secure_ctlr) 4632 pr_warn( 4633 "Unloading %s version %s while managing a non secure controller\n", 4634 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 4635 else 4636 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 4637 MPI3MR_DRIVER_VERSION); 4638 4639 driver_remove_file(&mpi3mr_pci_driver.driver, 4640 &driver_attr_event_counter); 4641 pci_unregister_driver(&mpi3mr_pci_driver); 4642 } 4643 4644 module_init(mpi3mr_init); 4645 module_exit(mpi3mr_exit); 4646