1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2022 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 12 /* global driver scop variables */ 13 LIST_HEAD(mrioc_list); 14 DEFINE_SPINLOCK(mrioc_list_lock); 15 static int mrioc_ids; 16 static int warn_non_secure_ctlr; 17 atomic64_t event_counter; 18 19 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 20 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 21 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 22 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 23 24 /* Module parameters*/ 25 int prot_mask = -1; 26 module_param(prot_mask, int, 0); 27 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 28 29 static int prot_guard_mask = 3; 30 module_param(prot_guard_mask, int, 0); 31 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 32 static int logging_level; 33 module_param(logging_level, int, 0); 34 MODULE_PARM_DESC(logging_level, 35 " bits for enabling additional logging info (default=0)"); 36 37 /* Forward declarations*/ 38 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 39 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 40 41 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 42 43 /** 44 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 45 * @mrioc: Adapter instance reference 46 * @scmd: SCSI command reference 47 * 48 * Calculate the host tag based on block tag for a given scmd. 49 * 50 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 51 */ 52 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 53 struct scsi_cmnd *scmd) 54 { 55 struct scmd_priv *priv = NULL; 56 u32 unique_tag; 57 u16 host_tag, hw_queue; 58 59 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 60 61 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 62 if (hw_queue >= mrioc->num_op_reply_q) 63 return MPI3MR_HOSTTAG_INVALID; 64 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 65 66 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 67 return MPI3MR_HOSTTAG_INVALID; 68 69 priv = scsi_cmd_priv(scmd); 70 /*host_tag 0 is invalid hence incrementing by 1*/ 71 priv->host_tag = host_tag + 1; 72 priv->scmd = scmd; 73 priv->in_lld_scope = 1; 74 priv->req_q_idx = hw_queue; 75 priv->meta_chain_idx = -1; 76 priv->chain_idx = -1; 77 priv->meta_sg_valid = 0; 78 return priv->host_tag; 79 } 80 81 /** 82 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 83 * @mrioc: Adapter instance reference 84 * @host_tag: Host tag 85 * @qidx: Operational queue index 86 * 87 * Identify the block tag from the host tag and queue index and 88 * retrieve associated scsi command using scsi_host_find_tag(). 89 * 90 * Return: SCSI command reference or NULL. 91 */ 92 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 93 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 94 { 95 struct scsi_cmnd *scmd = NULL; 96 struct scmd_priv *priv = NULL; 97 u32 unique_tag = host_tag - 1; 98 99 if (WARN_ON(host_tag > mrioc->max_host_ios)) 100 goto out; 101 102 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 103 104 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 105 if (scmd) { 106 priv = scsi_cmd_priv(scmd); 107 if (!priv->in_lld_scope) 108 scmd = NULL; 109 } 110 out: 111 return scmd; 112 } 113 114 /** 115 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 116 * @mrioc: Adapter instance reference 117 * @scmd: SCSI command reference 118 * 119 * Invalidate the SCSI command private data to mark the command 120 * is not in LLD scope anymore. 121 * 122 * Return: Nothing. 123 */ 124 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 125 struct scsi_cmnd *scmd) 126 { 127 struct scmd_priv *priv = NULL; 128 129 priv = scsi_cmd_priv(scmd); 130 131 if (WARN_ON(priv->in_lld_scope == 0)) 132 return; 133 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 134 priv->req_q_idx = 0xFFFF; 135 priv->scmd = NULL; 136 priv->in_lld_scope = 0; 137 priv->meta_sg_valid = 0; 138 if (priv->chain_idx >= 0) { 139 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 140 priv->chain_idx = -1; 141 } 142 if (priv->meta_chain_idx >= 0) { 143 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 144 priv->meta_chain_idx = -1; 145 } 146 } 147 148 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 149 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 150 static void mpi3mr_fwevt_worker(struct work_struct *work); 151 152 /** 153 * mpi3mr_fwevt_free - firmware event memory dealloctor 154 * @r: k reference pointer of the firmware event 155 * 156 * Free firmware event memory when no reference. 157 */ 158 static void mpi3mr_fwevt_free(struct kref *r) 159 { 160 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 161 } 162 163 /** 164 * mpi3mr_fwevt_get - k reference incrementor 165 * @fwevt: Firmware event reference 166 * 167 * Increment firmware event reference count. 168 */ 169 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 170 { 171 kref_get(&fwevt->ref_count); 172 } 173 174 /** 175 * mpi3mr_fwevt_put - k reference decrementor 176 * @fwevt: Firmware event reference 177 * 178 * decrement firmware event reference count. 179 */ 180 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 181 { 182 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 183 } 184 185 /** 186 * mpi3mr_alloc_fwevt - Allocate firmware event 187 * @len: length of firmware event data to allocate 188 * 189 * Allocate firmware event with required length and initialize 190 * the reference counter. 191 * 192 * Return: firmware event reference. 193 */ 194 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 195 { 196 struct mpi3mr_fwevt *fwevt; 197 198 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 199 if (!fwevt) 200 return NULL; 201 202 kref_init(&fwevt->ref_count); 203 return fwevt; 204 } 205 206 /** 207 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 208 * @mrioc: Adapter instance reference 209 * @fwevt: Firmware event reference 210 * 211 * Add the given firmware event to the firmware event list. 212 * 213 * Return: Nothing. 214 */ 215 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 216 struct mpi3mr_fwevt *fwevt) 217 { 218 unsigned long flags; 219 220 if (!mrioc->fwevt_worker_thread) 221 return; 222 223 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 224 /* get fwevt reference count while adding it to fwevt_list */ 225 mpi3mr_fwevt_get(fwevt); 226 INIT_LIST_HEAD(&fwevt->list); 227 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 228 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 229 /* get fwevt reference count while enqueueing it to worker queue */ 230 mpi3mr_fwevt_get(fwevt); 231 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 232 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 233 } 234 235 /** 236 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 237 * @mrioc: Adapter instance reference 238 * @fwevt: Firmware event reference 239 * 240 * Delete the given firmware event from the firmware event list. 241 * 242 * Return: Nothing. 243 */ 244 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 245 struct mpi3mr_fwevt *fwevt) 246 { 247 unsigned long flags; 248 249 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 250 if (!list_empty(&fwevt->list)) { 251 list_del_init(&fwevt->list); 252 /* 253 * Put fwevt reference count after 254 * removing it from fwevt_list 255 */ 256 mpi3mr_fwevt_put(fwevt); 257 } 258 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 259 } 260 261 /** 262 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 263 * @mrioc: Adapter instance reference 264 * 265 * Dequeue a firmware event from the firmware event list. 266 * 267 * Return: firmware event. 268 */ 269 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 270 struct mpi3mr_ioc *mrioc) 271 { 272 unsigned long flags; 273 struct mpi3mr_fwevt *fwevt = NULL; 274 275 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 276 if (!list_empty(&mrioc->fwevt_list)) { 277 fwevt = list_first_entry(&mrioc->fwevt_list, 278 struct mpi3mr_fwevt, list); 279 list_del_init(&fwevt->list); 280 /* 281 * Put fwevt reference count after 282 * removing it from fwevt_list 283 */ 284 mpi3mr_fwevt_put(fwevt); 285 } 286 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 287 288 return fwevt; 289 } 290 291 /** 292 * mpi3mr_cancel_work - cancel firmware event 293 * @fwevt: fwevt object which needs to be canceled 294 * 295 * Return: Nothing. 296 */ 297 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 298 { 299 /* 300 * Wait on the fwevt to complete. If this returns 1, then 301 * the event was never executed. 302 * 303 * If it did execute, we wait for it to finish, and the put will 304 * happen from mpi3mr_process_fwevt() 305 */ 306 if (cancel_work_sync(&fwevt->work)) { 307 /* 308 * Put fwevt reference count after 309 * dequeuing it from worker queue 310 */ 311 mpi3mr_fwevt_put(fwevt); 312 /* 313 * Put fwevt reference count to neutralize 314 * kref_init increment 315 */ 316 mpi3mr_fwevt_put(fwevt); 317 } 318 } 319 320 /** 321 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 322 * @mrioc: Adapter instance reference 323 * 324 * Flush all pending firmware events from the firmware event 325 * list. 326 * 327 * Return: Nothing. 328 */ 329 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 330 { 331 struct mpi3mr_fwevt *fwevt = NULL; 332 333 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 334 !mrioc->fwevt_worker_thread) 335 return; 336 337 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 338 mpi3mr_cancel_work(fwevt); 339 340 if (mrioc->current_event) { 341 fwevt = mrioc->current_event; 342 /* 343 * Don't call cancel_work_sync() API for the 344 * fwevt work if the controller reset is 345 * get called as part of processing the 346 * same fwevt work (or) when worker thread is 347 * waiting for device add/remove APIs to complete. 348 * Otherwise we will see deadlock. 349 */ 350 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 351 fwevt->discard = 1; 352 return; 353 } 354 355 mpi3mr_cancel_work(fwevt); 356 } 357 } 358 359 /** 360 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 361 * @mrioc: Adapter instance reference 362 * @tg: Throttle group information pointer 363 * 364 * Accessor to queue on synthetically generated driver event to 365 * the event worker thread, the driver event will be used to 366 * reduce the QD of all VDs in the TG from the worker thread. 367 * 368 * Return: None. 369 */ 370 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 371 struct mpi3mr_throttle_group_info *tg) 372 { 373 struct mpi3mr_fwevt *fwevt; 374 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 375 376 /* 377 * If the QD reduction event is already queued due to throttle and if 378 * the QD is not restored through device info change event 379 * then dont queue further reduction events 380 */ 381 if (tg->fw_qd != tg->modified_qd) 382 return; 383 384 fwevt = mpi3mr_alloc_fwevt(sz); 385 if (!fwevt) { 386 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 387 return; 388 } 389 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 390 fwevt->mrioc = mrioc; 391 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 392 fwevt->send_ack = 0; 393 fwevt->process_evt = 1; 394 fwevt->evt_ctx = 0; 395 fwevt->event_data_size = sz; 396 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 397 398 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 399 tg->id); 400 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 401 } 402 403 /** 404 * mpi3mr_invalidate_devhandles -Invalidate device handles 405 * @mrioc: Adapter instance reference 406 * 407 * Invalidate the device handles in the target device structures 408 * . Called post reset prior to reinitializing the controller. 409 * 410 * Return: Nothing. 411 */ 412 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 413 { 414 struct mpi3mr_tgt_dev *tgtdev; 415 struct mpi3mr_stgt_priv_data *tgt_priv; 416 417 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 418 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 419 if (tgtdev->starget && tgtdev->starget->hostdata) { 420 tgt_priv = tgtdev->starget->hostdata; 421 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 422 tgt_priv->io_throttle_enabled = 0; 423 tgt_priv->io_divert = 0; 424 tgt_priv->throttle_group = NULL; 425 } 426 } 427 } 428 429 /** 430 * mpi3mr_print_scmd - print individual SCSI command 431 * @rq: Block request 432 * @data: Adapter instance reference 433 * @reserved: N/A. Currently not used 434 * 435 * Print the SCSI command details if it is in LLD scope. 436 * 437 * Return: true always. 438 */ 439 static bool mpi3mr_print_scmd(struct request *rq, 440 void *data, bool reserved) 441 { 442 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 443 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 444 struct scmd_priv *priv = NULL; 445 446 if (scmd) { 447 priv = scsi_cmd_priv(scmd); 448 if (!priv->in_lld_scope) 449 goto out; 450 451 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 452 __func__, priv->host_tag, priv->req_q_idx + 1); 453 scsi_print_command(scmd); 454 } 455 456 out: 457 return(true); 458 } 459 460 /** 461 * mpi3mr_flush_scmd - Flush individual SCSI command 462 * @rq: Block request 463 * @data: Adapter instance reference 464 * @reserved: N/A. Currently not used 465 * 466 * Return the SCSI command to the upper layers if it is in LLD 467 * scope. 468 * 469 * Return: true always. 470 */ 471 472 static bool mpi3mr_flush_scmd(struct request *rq, 473 void *data, bool reserved) 474 { 475 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 476 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 477 struct scmd_priv *priv = NULL; 478 479 if (scmd) { 480 priv = scsi_cmd_priv(scmd); 481 if (!priv->in_lld_scope) 482 goto out; 483 484 if (priv->meta_sg_valid) 485 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 486 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 487 mpi3mr_clear_scmd_priv(mrioc, scmd); 488 scsi_dma_unmap(scmd); 489 scmd->result = DID_RESET << 16; 490 scsi_print_command(scmd); 491 scsi_done(scmd); 492 mrioc->flush_io_count++; 493 } 494 495 out: 496 return(true); 497 } 498 499 /** 500 * mpi3mr_count_dev_pending - Count commands pending for a lun 501 * @rq: Block request 502 * @data: SCSI device reference 503 * @reserved: Unused 504 * 505 * This is an iterator function called for each SCSI command in 506 * a host and if the command is pending in the LLD for the 507 * specific device(lun) then device specific pending I/O counter 508 * is updated in the device structure. 509 * 510 * Return: true always. 511 */ 512 513 static bool mpi3mr_count_dev_pending(struct request *rq, 514 void *data, bool reserved) 515 { 516 struct scsi_device *sdev = (struct scsi_device *)data; 517 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 518 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 519 struct scmd_priv *priv; 520 521 if (scmd) { 522 priv = scsi_cmd_priv(scmd); 523 if (!priv->in_lld_scope) 524 goto out; 525 if (scmd->device == sdev) 526 sdev_priv_data->pend_count++; 527 } 528 529 out: 530 return true; 531 } 532 533 /** 534 * mpi3mr_count_tgt_pending - Count commands pending for target 535 * @rq: Block request 536 * @data: SCSI target reference 537 * @reserved: Unused 538 * 539 * This is an iterator function called for each SCSI command in 540 * a host and if the command is pending in the LLD for the 541 * specific target then target specific pending I/O counter is 542 * updated in the target structure. 543 * 544 * Return: true always. 545 */ 546 547 static bool mpi3mr_count_tgt_pending(struct request *rq, 548 void *data, bool reserved) 549 { 550 struct scsi_target *starget = (struct scsi_target *)data; 551 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 552 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 553 struct scmd_priv *priv; 554 555 if (scmd) { 556 priv = scsi_cmd_priv(scmd); 557 if (!priv->in_lld_scope) 558 goto out; 559 if (scmd->device && (scsi_target(scmd->device) == starget)) 560 stgt_priv_data->pend_count++; 561 } 562 563 out: 564 return true; 565 } 566 567 /** 568 * mpi3mr_flush_host_io - Flush host I/Os 569 * @mrioc: Adapter instance reference 570 * 571 * Flush all of the pending I/Os by calling 572 * blk_mq_tagset_busy_iter() for each possible tag. This is 573 * executed post controller reset 574 * 575 * Return: Nothing. 576 */ 577 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 578 { 579 struct Scsi_Host *shost = mrioc->shost; 580 581 mrioc->flush_io_count = 0; 582 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 583 blk_mq_tagset_busy_iter(&shost->tag_set, 584 mpi3mr_flush_scmd, (void *)mrioc); 585 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 586 mrioc->flush_io_count); 587 } 588 589 /** 590 * mpi3mr_alloc_tgtdev - target device allocator 591 * 592 * Allocate target device instance and initialize the reference 593 * count 594 * 595 * Return: target device instance. 596 */ 597 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 598 { 599 struct mpi3mr_tgt_dev *tgtdev; 600 601 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 602 if (!tgtdev) 603 return NULL; 604 kref_init(&tgtdev->ref_count); 605 return tgtdev; 606 } 607 608 /** 609 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 610 * @mrioc: Adapter instance reference 611 * @tgtdev: Target device 612 * 613 * Add the target device to the target device list 614 * 615 * Return: Nothing. 616 */ 617 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 618 struct mpi3mr_tgt_dev *tgtdev) 619 { 620 unsigned long flags; 621 622 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 623 mpi3mr_tgtdev_get(tgtdev); 624 INIT_LIST_HEAD(&tgtdev->list); 625 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 626 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 627 } 628 629 /** 630 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 631 * @mrioc: Adapter instance reference 632 * @tgtdev: Target device 633 * 634 * Remove the target device from the target device list 635 * 636 * Return: Nothing. 637 */ 638 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 639 struct mpi3mr_tgt_dev *tgtdev) 640 { 641 unsigned long flags; 642 643 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 644 if (!list_empty(&tgtdev->list)) { 645 list_del_init(&tgtdev->list); 646 mpi3mr_tgtdev_put(tgtdev); 647 } 648 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 649 } 650 651 /** 652 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 653 * @mrioc: Adapter instance reference 654 * @handle: Device handle 655 * 656 * Accessor to retrieve target device from the device handle. 657 * Non Lock version 658 * 659 * Return: Target device reference. 660 */ 661 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 662 struct mpi3mr_ioc *mrioc, u16 handle) 663 { 664 struct mpi3mr_tgt_dev *tgtdev; 665 666 assert_spin_locked(&mrioc->tgtdev_lock); 667 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 668 if (tgtdev->dev_handle == handle) 669 goto found_tgtdev; 670 return NULL; 671 672 found_tgtdev: 673 mpi3mr_tgtdev_get(tgtdev); 674 return tgtdev; 675 } 676 677 /** 678 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 679 * @mrioc: Adapter instance reference 680 * @handle: Device handle 681 * 682 * Accessor to retrieve target device from the device handle. 683 * Lock version 684 * 685 * Return: Target device reference. 686 */ 687 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 688 struct mpi3mr_ioc *mrioc, u16 handle) 689 { 690 struct mpi3mr_tgt_dev *tgtdev; 691 unsigned long flags; 692 693 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 694 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 695 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 696 return tgtdev; 697 } 698 699 /** 700 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 701 * @mrioc: Adapter instance reference 702 * @persist_id: Persistent ID 703 * 704 * Accessor to retrieve target device from the Persistent ID. 705 * Non Lock version 706 * 707 * Return: Target device reference. 708 */ 709 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 710 struct mpi3mr_ioc *mrioc, u16 persist_id) 711 { 712 struct mpi3mr_tgt_dev *tgtdev; 713 714 assert_spin_locked(&mrioc->tgtdev_lock); 715 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 716 if (tgtdev->perst_id == persist_id) 717 goto found_tgtdev; 718 return NULL; 719 720 found_tgtdev: 721 mpi3mr_tgtdev_get(tgtdev); 722 return tgtdev; 723 } 724 725 /** 726 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 727 * @mrioc: Adapter instance reference 728 * @persist_id: Persistent ID 729 * 730 * Accessor to retrieve target device from the Persistent ID. 731 * Lock version 732 * 733 * Return: Target device reference. 734 */ 735 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 736 struct mpi3mr_ioc *mrioc, u16 persist_id) 737 { 738 struct mpi3mr_tgt_dev *tgtdev; 739 unsigned long flags; 740 741 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 742 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 743 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 744 return tgtdev; 745 } 746 747 /** 748 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 749 * @mrioc: Adapter instance reference 750 * @tgt_priv: Target private data 751 * 752 * Accessor to return target device from the target private 753 * data. Non Lock version 754 * 755 * Return: Target device reference. 756 */ 757 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 758 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 759 { 760 struct mpi3mr_tgt_dev *tgtdev; 761 762 assert_spin_locked(&mrioc->tgtdev_lock); 763 tgtdev = tgt_priv->tgt_dev; 764 if (tgtdev) 765 mpi3mr_tgtdev_get(tgtdev); 766 return tgtdev; 767 } 768 769 /** 770 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 771 * @mrioc: Adapter instance reference 772 * @tg: Throttle group information pointer 773 * @divert_value: 1 or 0 774 * 775 * Accessor to set io_divert flag for each device associated 776 * with the given throttle group with the given value. 777 * 778 * Return: None. 779 */ 780 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 781 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 782 { 783 unsigned long flags; 784 struct mpi3mr_tgt_dev *tgtdev; 785 struct mpi3mr_stgt_priv_data *tgt_priv; 786 787 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 788 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 789 if (tgtdev->starget && tgtdev->starget->hostdata) { 790 tgt_priv = tgtdev->starget->hostdata; 791 if (tgt_priv->throttle_group == tg) 792 tgt_priv->io_divert = divert_value; 793 } 794 } 795 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 796 } 797 798 /** 799 * mpi3mr_print_device_event_notice - print notice related to post processing of 800 * device event after controller reset. 801 * 802 * @mrioc: Adapter instance reference 803 * @device_add: true for device add event and false for device removal event 804 * 805 * Return: None. 806 */ 807 static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 808 bool device_add) 809 { 810 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 811 (device_add ? "addition" : "removal")); 812 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 813 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 814 } 815 816 /** 817 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 818 * @mrioc: Adapter instance reference 819 * @tgtdev: Target device structure 820 * 821 * Checks whether the device is exposed to upper layers and if it 822 * is then remove the device from upper layers by calling 823 * scsi_remove_target(). 824 * 825 * Return: 0 on success, non zero on failure. 826 */ 827 static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 828 struct mpi3mr_tgt_dev *tgtdev) 829 { 830 struct mpi3mr_stgt_priv_data *tgt_priv; 831 832 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 833 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 834 if (tgtdev->starget && tgtdev->starget->hostdata) { 835 tgt_priv = tgtdev->starget->hostdata; 836 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 837 } 838 839 if (tgtdev->starget) { 840 if (mrioc->current_event) 841 mrioc->current_event->pending_at_sml = 1; 842 scsi_remove_target(&tgtdev->starget->dev); 843 tgtdev->host_exposed = 0; 844 if (mrioc->current_event) { 845 mrioc->current_event->pending_at_sml = 0; 846 if (mrioc->current_event->discard) { 847 mpi3mr_print_device_event_notice(mrioc, false); 848 return; 849 } 850 } 851 } 852 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 853 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 854 } 855 856 /** 857 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 858 * @mrioc: Adapter instance reference 859 * @perst_id: Persistent ID of the device 860 * 861 * Checks whether the device can be exposed to upper layers and 862 * if it is not then expose the device to upper layers by 863 * calling scsi_scan_target(). 864 * 865 * Return: 0 on success, non zero on failure. 866 */ 867 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 868 u16 perst_id) 869 { 870 int retval = 0; 871 struct mpi3mr_tgt_dev *tgtdev; 872 873 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 874 if (!tgtdev) { 875 retval = -1; 876 goto out; 877 } 878 if (tgtdev->is_hidden) { 879 retval = -1; 880 goto out; 881 } 882 if (!tgtdev->host_exposed && !mrioc->reset_in_progress) { 883 tgtdev->host_exposed = 1; 884 if (mrioc->current_event) 885 mrioc->current_event->pending_at_sml = 1; 886 scsi_scan_target(&mrioc->shost->shost_gendev, 0, 887 tgtdev->perst_id, 888 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 889 if (!tgtdev->starget) 890 tgtdev->host_exposed = 0; 891 if (mrioc->current_event) { 892 mrioc->current_event->pending_at_sml = 0; 893 if (mrioc->current_event->discard) { 894 mpi3mr_print_device_event_notice(mrioc, true); 895 goto out; 896 } 897 } 898 } 899 out: 900 if (tgtdev) 901 mpi3mr_tgtdev_put(tgtdev); 902 903 return retval; 904 } 905 906 /** 907 * mpi3mr_change_queue_depth- Change QD callback handler 908 * @sdev: SCSI device reference 909 * @q_depth: Queue depth 910 * 911 * Validate and limit QD and call scsi_change_queue_depth. 912 * 913 * Return: return value of scsi_change_queue_depth 914 */ 915 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 916 int q_depth) 917 { 918 struct scsi_target *starget = scsi_target(sdev); 919 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 920 int retval = 0; 921 922 if (!sdev->tagged_supported) 923 q_depth = 1; 924 if (q_depth > shost->can_queue) 925 q_depth = shost->can_queue; 926 else if (!q_depth) 927 q_depth = MPI3MR_DEFAULT_SDEV_QD; 928 retval = scsi_change_queue_depth(sdev, q_depth); 929 sdev->max_queue_depth = sdev->queue_depth; 930 931 return retval; 932 } 933 934 /** 935 * mpi3mr_update_sdev - Update SCSI device information 936 * @sdev: SCSI device reference 937 * @data: target device reference 938 * 939 * This is an iterator function called for each SCSI device in a 940 * target to update the target specific information into each 941 * SCSI device. 942 * 943 * Return: Nothing. 944 */ 945 static void 946 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 947 { 948 struct mpi3mr_tgt_dev *tgtdev; 949 950 tgtdev = (struct mpi3mr_tgt_dev *)data; 951 if (!tgtdev) 952 return; 953 954 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 955 switch (tgtdev->dev_type) { 956 case MPI3_DEVICE_DEVFORM_PCIE: 957 /*The block layer hw sector size = 512*/ 958 if ((tgtdev->dev_spec.pcie_inf.dev_info & 959 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 960 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 961 blk_queue_max_hw_sectors(sdev->request_queue, 962 tgtdev->dev_spec.pcie_inf.mdts / 512); 963 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) 964 blk_queue_virt_boundary(sdev->request_queue, 965 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 966 else 967 blk_queue_virt_boundary(sdev->request_queue, 968 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); 969 } 970 break; 971 default: 972 break; 973 } 974 } 975 976 /** 977 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure 978 * @mrioc: Adapter instance reference 979 * 980 * This is executed post controller reset to identify any 981 * missing devices during reset and remove from the upper layers 982 * or expose any newly detected device to the upper layers. 983 * 984 * Return: Nothing. 985 */ 986 987 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) 988 { 989 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 990 991 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 992 list) { 993 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 994 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 995 tgtdev->perst_id); 996 if (tgtdev->host_exposed) 997 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 998 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 999 mpi3mr_tgtdev_put(tgtdev); 1000 } 1001 } 1002 1003 tgtdev = NULL; 1004 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1005 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1006 !tgtdev->is_hidden && !tgtdev->host_exposed) 1007 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1008 } 1009 } 1010 1011 /** 1012 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1013 * @mrioc: Adapter instance reference 1014 * @tgtdev: Target device internal structure 1015 * @dev_pg0: New device page0 1016 * @is_added: Flag to indicate the device is just added 1017 * 1018 * Update the information from the device page0 into the driver 1019 * cached target device structure. 1020 * 1021 * Return: Nothing. 1022 */ 1023 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1024 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1025 bool is_added) 1026 { 1027 u16 flags = 0; 1028 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1029 u8 prot_mask = 0; 1030 1031 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1032 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1033 tgtdev->dev_type = dev_pg0->device_form; 1034 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1035 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1036 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1037 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1038 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1039 1040 flags = le16_to_cpu(dev_pg0->flags); 1041 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1042 1043 if (is_added == true) 1044 tgtdev->io_throttle_enabled = 1045 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1046 1047 1048 if (tgtdev->starget && tgtdev->starget->hostdata) { 1049 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1050 tgtdev->starget->hostdata; 1051 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1052 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1053 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1054 scsi_tgt_priv_data->io_throttle_enabled = 1055 tgtdev->io_throttle_enabled; 1056 } 1057 1058 switch (dev_pg0->access_status) { 1059 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1060 case MPI3_DEVICE0_ASTATUS_PREPARE: 1061 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1062 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1063 break; 1064 default: 1065 tgtdev->is_hidden = 1; 1066 break; 1067 } 1068 1069 switch (tgtdev->dev_type) { 1070 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1071 { 1072 struct mpi3_device0_sas_sata_format *sasinf = 1073 &dev_pg0->device_specific.sas_sata_format; 1074 u16 dev_info = le16_to_cpu(sasinf->device_info); 1075 1076 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1077 tgtdev->dev_spec.sas_sata_inf.sas_address = 1078 le64_to_cpu(sasinf->sas_address); 1079 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1080 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1081 tgtdev->is_hidden = 1; 1082 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1083 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1084 tgtdev->is_hidden = 1; 1085 break; 1086 } 1087 case MPI3_DEVICE_DEVFORM_PCIE: 1088 { 1089 struct mpi3_device0_pcie_format *pcieinf = 1090 &dev_pg0->device_specific.pcie_format; 1091 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1092 1093 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1094 tgtdev->dev_spec.pcie_inf.capb = 1095 le32_to_cpu(pcieinf->capabilities); 1096 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1097 /* 2^12 = 4096 */ 1098 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1099 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1100 tgtdev->dev_spec.pcie_inf.mdts = 1101 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1102 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1103 tgtdev->dev_spec.pcie_inf.reset_to = 1104 max_t(u8, pcieinf->controller_reset_to, 1105 MPI3MR_INTADMCMD_TIMEOUT); 1106 tgtdev->dev_spec.pcie_inf.abort_to = 1107 max_t(u8, pcieinf->nvme_abort_to, 1108 MPI3MR_INTADMCMD_TIMEOUT); 1109 } 1110 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1111 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1112 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1113 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1114 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1115 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1116 tgtdev->is_hidden = 1; 1117 if (!mrioc->shost) 1118 break; 1119 prot_mask = scsi_host_get_prot(mrioc->shost); 1120 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1121 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1122 ioc_info(mrioc, 1123 "%s : Disabling DIX0 prot capability\n", __func__); 1124 ioc_info(mrioc, 1125 "because HBA does not support DIX0 operation on NVME drives\n"); 1126 } 1127 break; 1128 } 1129 case MPI3_DEVICE_DEVFORM_VD: 1130 { 1131 struct mpi3_device0_vd_format *vdinf = 1132 &dev_pg0->device_specific.vd_format; 1133 struct mpi3mr_throttle_group_info *tg = NULL; 1134 u16 vdinf_io_throttle_group = 1135 le16_to_cpu(vdinf->io_throttle_group); 1136 1137 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1138 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1139 tgtdev->is_hidden = 1; 1140 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1141 tgtdev->dev_spec.vd_inf.tg_high = 1142 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1143 tgtdev->dev_spec.vd_inf.tg_low = 1144 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1145 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1146 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1147 tg->id = vdinf_io_throttle_group; 1148 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1149 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1150 tg->qd_reduction = 1151 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1152 if (is_added == true) 1153 tg->fw_qd = tgtdev->q_depth; 1154 tg->modified_qd = tgtdev->q_depth; 1155 } 1156 tgtdev->dev_spec.vd_inf.tg = tg; 1157 if (scsi_tgt_priv_data) 1158 scsi_tgt_priv_data->throttle_group = tg; 1159 break; 1160 } 1161 default: 1162 break; 1163 } 1164 } 1165 1166 /** 1167 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1168 * @mrioc: Adapter instance reference 1169 * @fwevt: Firmware event information. 1170 * 1171 * Process Device status Change event and based on device's new 1172 * information, either expose the device to the upper layers, or 1173 * remove the device from upper layers. 1174 * 1175 * Return: Nothing. 1176 */ 1177 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1178 struct mpi3mr_fwevt *fwevt) 1179 { 1180 u16 dev_handle = 0; 1181 u8 uhide = 0, delete = 0, cleanup = 0; 1182 struct mpi3mr_tgt_dev *tgtdev = NULL; 1183 struct mpi3_event_data_device_status_change *evtdata = 1184 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1185 1186 dev_handle = le16_to_cpu(evtdata->dev_handle); 1187 ioc_info(mrioc, 1188 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1189 __func__, dev_handle, evtdata->reason_code); 1190 switch (evtdata->reason_code) { 1191 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1192 delete = 1; 1193 break; 1194 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1195 uhide = 1; 1196 break; 1197 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1198 delete = 1; 1199 cleanup = 1; 1200 break; 1201 default: 1202 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1203 evtdata->reason_code); 1204 break; 1205 } 1206 1207 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1208 if (!tgtdev) 1209 goto out; 1210 if (uhide) { 1211 tgtdev->is_hidden = 0; 1212 if (!tgtdev->host_exposed) 1213 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1214 } 1215 if (tgtdev->starget && tgtdev->starget->hostdata) { 1216 if (delete) 1217 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1218 } 1219 if (cleanup) { 1220 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 1221 mpi3mr_tgtdev_put(tgtdev); 1222 } 1223 1224 out: 1225 if (tgtdev) 1226 mpi3mr_tgtdev_put(tgtdev); 1227 } 1228 1229 /** 1230 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1231 * @mrioc: Adapter instance reference 1232 * @dev_pg0: New device page0 1233 * 1234 * Process Device Info Change event and based on device's new 1235 * information, either expose the device to the upper layers, or 1236 * remove the device from upper layers or update the details of 1237 * the device. 1238 * 1239 * Return: Nothing. 1240 */ 1241 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1242 struct mpi3_device_page0 *dev_pg0) 1243 { 1244 struct mpi3mr_tgt_dev *tgtdev = NULL; 1245 u16 dev_handle = 0, perst_id = 0; 1246 1247 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1248 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1249 ioc_info(mrioc, 1250 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1251 __func__, dev_handle, perst_id); 1252 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1253 if (!tgtdev) 1254 goto out; 1255 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1256 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1257 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1258 if (tgtdev->is_hidden && tgtdev->host_exposed) 1259 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1260 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1261 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1262 mpi3mr_update_sdev); 1263 out: 1264 if (tgtdev) 1265 mpi3mr_tgtdev_put(tgtdev); 1266 } 1267 1268 /** 1269 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1270 * @mrioc: Adapter instance reference 1271 * @event_data: SAS topology change list event data 1272 * 1273 * Prints information about the SAS topology change event. 1274 * 1275 * Return: Nothing. 1276 */ 1277 static void 1278 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1279 struct mpi3_event_data_sas_topology_change_list *event_data) 1280 { 1281 int i; 1282 u16 handle; 1283 u8 reason_code, phy_number; 1284 char *status_str = NULL; 1285 u8 link_rate, prev_link_rate; 1286 1287 switch (event_data->exp_status) { 1288 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1289 status_str = "remove"; 1290 break; 1291 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1292 status_str = "responding"; 1293 break; 1294 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1295 status_str = "remove delay"; 1296 break; 1297 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1298 status_str = "direct attached"; 1299 break; 1300 default: 1301 status_str = "unknown status"; 1302 break; 1303 } 1304 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1305 __func__, status_str); 1306 ioc_info(mrioc, 1307 "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1308 __func__, le16_to_cpu(event_data->expander_dev_handle), 1309 le16_to_cpu(event_data->enclosure_handle), 1310 event_data->start_phy_num, event_data->num_entries); 1311 for (i = 0; i < event_data->num_entries; i++) { 1312 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1313 if (!handle) 1314 continue; 1315 phy_number = event_data->start_phy_num + i; 1316 reason_code = event_data->phy_entry[i].status & 1317 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1318 switch (reason_code) { 1319 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1320 status_str = "target remove"; 1321 break; 1322 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1323 status_str = "delay target remove"; 1324 break; 1325 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1326 status_str = "link status change"; 1327 break; 1328 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1329 status_str = "link status no change"; 1330 break; 1331 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1332 status_str = "target responding"; 1333 break; 1334 default: 1335 status_str = "unknown"; 1336 break; 1337 } 1338 link_rate = event_data->phy_entry[i].link_rate >> 4; 1339 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1340 ioc_info(mrioc, 1341 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1342 __func__, phy_number, handle, status_str, link_rate, 1343 prev_link_rate); 1344 } 1345 } 1346 1347 /** 1348 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1349 * @mrioc: Adapter instance reference 1350 * @fwevt: Firmware event reference 1351 * 1352 * Prints information about the SAS topology change event and 1353 * for "not responding" event code, removes the device from the 1354 * upper layers. 1355 * 1356 * Return: Nothing. 1357 */ 1358 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1359 struct mpi3mr_fwevt *fwevt) 1360 { 1361 struct mpi3_event_data_sas_topology_change_list *event_data = 1362 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1363 int i; 1364 u16 handle; 1365 u8 reason_code; 1366 struct mpi3mr_tgt_dev *tgtdev = NULL; 1367 1368 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1369 1370 for (i = 0; i < event_data->num_entries; i++) { 1371 if (fwevt->discard) 1372 return; 1373 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1374 if (!handle) 1375 continue; 1376 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1377 if (!tgtdev) 1378 continue; 1379 1380 reason_code = event_data->phy_entry[i].status & 1381 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1382 1383 switch (reason_code) { 1384 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1385 if (tgtdev->host_exposed) 1386 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1387 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 1388 mpi3mr_tgtdev_put(tgtdev); 1389 break; 1390 default: 1391 break; 1392 } 1393 if (tgtdev) 1394 mpi3mr_tgtdev_put(tgtdev); 1395 } 1396 } 1397 1398 /** 1399 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1400 * @mrioc: Adapter instance reference 1401 * @event_data: PCIe topology change list event data 1402 * 1403 * Prints information about the PCIe topology change event. 1404 * 1405 * Return: Nothing. 1406 */ 1407 static void 1408 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1409 struct mpi3_event_data_pcie_topology_change_list *event_data) 1410 { 1411 int i; 1412 u16 handle; 1413 u16 reason_code; 1414 u8 port_number; 1415 char *status_str = NULL; 1416 u8 link_rate, prev_link_rate; 1417 1418 switch (event_data->switch_status) { 1419 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1420 status_str = "remove"; 1421 break; 1422 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1423 status_str = "responding"; 1424 break; 1425 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1426 status_str = "remove delay"; 1427 break; 1428 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1429 status_str = "direct attached"; 1430 break; 1431 default: 1432 status_str = "unknown status"; 1433 break; 1434 } 1435 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1436 __func__, status_str); 1437 ioc_info(mrioc, 1438 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1439 __func__, le16_to_cpu(event_data->switch_dev_handle), 1440 le16_to_cpu(event_data->enclosure_handle), 1441 event_data->start_port_num, event_data->num_entries); 1442 for (i = 0; i < event_data->num_entries; i++) { 1443 handle = 1444 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1445 if (!handle) 1446 continue; 1447 port_number = event_data->start_port_num + i; 1448 reason_code = event_data->port_entry[i].port_status; 1449 switch (reason_code) { 1450 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1451 status_str = "target remove"; 1452 break; 1453 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1454 status_str = "delay target remove"; 1455 break; 1456 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1457 status_str = "link status change"; 1458 break; 1459 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1460 status_str = "link status no change"; 1461 break; 1462 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1463 status_str = "target responding"; 1464 break; 1465 default: 1466 status_str = "unknown"; 1467 break; 1468 } 1469 link_rate = event_data->port_entry[i].current_port_info & 1470 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1471 prev_link_rate = event_data->port_entry[i].previous_port_info & 1472 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1473 ioc_info(mrioc, 1474 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1475 __func__, port_number, handle, status_str, link_rate, 1476 prev_link_rate); 1477 } 1478 } 1479 1480 /** 1481 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1482 * @mrioc: Adapter instance reference 1483 * @fwevt: Firmware event reference 1484 * 1485 * Prints information about the PCIe topology change event and 1486 * for "not responding" event code, removes the device from the 1487 * upper layers. 1488 * 1489 * Return: Nothing. 1490 */ 1491 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1492 struct mpi3mr_fwevt *fwevt) 1493 { 1494 struct mpi3_event_data_pcie_topology_change_list *event_data = 1495 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1496 int i; 1497 u16 handle; 1498 u8 reason_code; 1499 struct mpi3mr_tgt_dev *tgtdev = NULL; 1500 1501 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1502 1503 for (i = 0; i < event_data->num_entries; i++) { 1504 if (fwevt->discard) 1505 return; 1506 handle = 1507 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1508 if (!handle) 1509 continue; 1510 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1511 if (!tgtdev) 1512 continue; 1513 1514 reason_code = event_data->port_entry[i].port_status; 1515 1516 switch (reason_code) { 1517 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1518 if (tgtdev->host_exposed) 1519 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1520 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 1521 mpi3mr_tgtdev_put(tgtdev); 1522 break; 1523 default: 1524 break; 1525 } 1526 if (tgtdev) 1527 mpi3mr_tgtdev_put(tgtdev); 1528 } 1529 } 1530 1531 /** 1532 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1533 * @mrioc: Adapter instance reference 1534 * @fwevt: Firmware event reference 1535 * 1536 * Extracts the event data and calls application interfacing 1537 * function to process the event further. 1538 * 1539 * Return: Nothing. 1540 */ 1541 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1542 struct mpi3mr_fwevt *fwevt) 1543 { 1544 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1545 fwevt->event_data_size); 1546 } 1547 1548 /** 1549 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1550 * @sdev: SCSI device reference 1551 * @data: Queue depth reference 1552 * 1553 * This is an iterator function called for each SCSI device in a 1554 * target to update the QD of each SCSI device. 1555 * 1556 * Return: Nothing. 1557 */ 1558 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1559 { 1560 u16 *q_depth = (u16 *)data; 1561 1562 scsi_change_queue_depth(sdev, (int)*q_depth); 1563 sdev->max_queue_depth = sdev->queue_depth; 1564 } 1565 1566 /** 1567 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1568 * @mrioc: Adapter instance reference 1569 * @tg: Throttle group information pointer 1570 * 1571 * Accessor to reduce QD for each device associated with the 1572 * given throttle group. 1573 * 1574 * Return: None. 1575 */ 1576 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1577 struct mpi3mr_throttle_group_info *tg) 1578 { 1579 unsigned long flags; 1580 struct mpi3mr_tgt_dev *tgtdev; 1581 struct mpi3mr_stgt_priv_data *tgt_priv; 1582 1583 1584 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1585 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1586 if (tgtdev->starget && tgtdev->starget->hostdata) { 1587 tgt_priv = tgtdev->starget->hostdata; 1588 if (tgt_priv->throttle_group == tg) { 1589 dprint_event_bh(mrioc, 1590 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 1591 tgt_priv->perst_id, tgtdev->q_depth, 1592 tg->modified_qd); 1593 starget_for_each_device(tgtdev->starget, 1594 (void *)&tg->modified_qd, 1595 mpi3mr_update_sdev_qd); 1596 } 1597 } 1598 } 1599 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1600 } 1601 1602 /** 1603 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1604 * @mrioc: Adapter instance reference 1605 * @fwevt: Firmware event reference 1606 * 1607 * Identifies the firmware event and calls corresponding bottomg 1608 * half handler and sends event acknowledgment if required. 1609 * 1610 * Return: Nothing. 1611 */ 1612 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 1613 struct mpi3mr_fwevt *fwevt) 1614 { 1615 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 1616 mrioc->current_event = fwevt; 1617 1618 if (mrioc->stop_drv_processing) 1619 goto out; 1620 1621 if (!fwevt->process_evt) 1622 goto evt_ack; 1623 1624 switch (fwevt->event_id) { 1625 case MPI3_EVENT_DEVICE_ADDED: 1626 { 1627 struct mpi3_device_page0 *dev_pg0 = 1628 (struct mpi3_device_page0 *)fwevt->event_data; 1629 mpi3mr_report_tgtdev_to_host(mrioc, 1630 le16_to_cpu(dev_pg0->persistent_id)); 1631 break; 1632 } 1633 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1634 { 1635 mpi3mr_devinfochg_evt_bh(mrioc, 1636 (struct mpi3_device_page0 *)fwevt->event_data); 1637 break; 1638 } 1639 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1640 { 1641 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 1642 break; 1643 } 1644 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1645 { 1646 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 1647 break; 1648 } 1649 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1650 { 1651 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 1652 break; 1653 } 1654 case MPI3_EVENT_LOG_DATA: 1655 { 1656 mpi3mr_logdata_evt_bh(mrioc, fwevt); 1657 break; 1658 } 1659 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 1660 { 1661 struct mpi3mr_throttle_group_info *tg; 1662 1663 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 1664 dprint_event_bh(mrioc, 1665 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 1666 tg->id, tg->need_qd_reduction); 1667 if (tg->need_qd_reduction) { 1668 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 1669 tg->need_qd_reduction = 0; 1670 } 1671 break; 1672 } 1673 default: 1674 break; 1675 } 1676 1677 evt_ack: 1678 if (fwevt->send_ack) 1679 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 1680 fwevt->evt_ctx); 1681 out: 1682 /* Put fwevt reference count to neutralize kref_init increment */ 1683 mpi3mr_fwevt_put(fwevt); 1684 mrioc->current_event = NULL; 1685 } 1686 1687 /** 1688 * mpi3mr_fwevt_worker - Firmware event worker 1689 * @work: Work struct containing firmware event 1690 * 1691 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 1692 * 1693 * Return: Nothing. 1694 */ 1695 static void mpi3mr_fwevt_worker(struct work_struct *work) 1696 { 1697 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 1698 work); 1699 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 1700 /* 1701 * Put fwevt reference count after 1702 * dequeuing it from worker queue 1703 */ 1704 mpi3mr_fwevt_put(fwevt); 1705 } 1706 1707 /** 1708 * mpi3mr_create_tgtdev - Create and add a target device 1709 * @mrioc: Adapter instance reference 1710 * @dev_pg0: Device Page 0 data 1711 * 1712 * If the device specified by the device page 0 data is not 1713 * present in the driver's internal list, allocate the memory 1714 * for the device, populate the data and add to the list, else 1715 * update the device data. The key is persistent ID. 1716 * 1717 * Return: 0 on success, -ENOMEM on memory allocation failure 1718 */ 1719 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 1720 struct mpi3_device_page0 *dev_pg0) 1721 { 1722 int retval = 0; 1723 struct mpi3mr_tgt_dev *tgtdev = NULL; 1724 u16 perst_id = 0; 1725 1726 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1727 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 1728 if (tgtdev) { 1729 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 1730 mpi3mr_tgtdev_put(tgtdev); 1731 } else { 1732 tgtdev = mpi3mr_alloc_tgtdev(); 1733 if (!tgtdev) 1734 return -ENOMEM; 1735 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 1736 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 1737 } 1738 1739 return retval; 1740 } 1741 1742 /** 1743 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 1744 * @mrioc: Adapter instance reference 1745 * 1746 * Flush pending commands in the delayed lists due to a 1747 * controller reset or driver removal as a cleanup. 1748 * 1749 * Return: Nothing 1750 */ 1751 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 1752 { 1753 struct delayed_dev_rmhs_node *_rmhs_node; 1754 struct delayed_evt_ack_node *_evtack_node; 1755 1756 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 1757 while (!list_empty(&mrioc->delayed_rmhs_list)) { 1758 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 1759 struct delayed_dev_rmhs_node, list); 1760 list_del(&_rmhs_node->list); 1761 kfree(_rmhs_node); 1762 } 1763 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 1764 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 1765 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 1766 struct delayed_evt_ack_node, list); 1767 list_del(&_evtack_node->list); 1768 kfree(_evtack_node); 1769 } 1770 } 1771 1772 /** 1773 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 1774 * @mrioc: Adapter instance reference 1775 * @drv_cmd: Internal command tracker 1776 * 1777 * Issues a target reset TM to the firmware from the device 1778 * removal TM pend list or retry the removal handshake sequence 1779 * based on the IOU control request IOC status. 1780 * 1781 * Return: Nothing 1782 */ 1783 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 1784 struct mpi3mr_drv_cmd *drv_cmd) 1785 { 1786 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 1787 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 1788 1789 if (drv_cmd->state & MPI3MR_CMD_RESET) 1790 goto clear_drv_cmd; 1791 1792 ioc_info(mrioc, 1793 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 1794 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 1795 drv_cmd->ioc_loginfo); 1796 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 1797 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 1798 drv_cmd->retry_count++; 1799 ioc_info(mrioc, 1800 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 1801 __func__, drv_cmd->dev_handle, 1802 drv_cmd->retry_count); 1803 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 1804 drv_cmd, drv_cmd->iou_rc); 1805 return; 1806 } 1807 ioc_err(mrioc, 1808 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 1809 __func__, drv_cmd->dev_handle); 1810 } else { 1811 ioc_info(mrioc, 1812 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 1813 __func__, drv_cmd->dev_handle); 1814 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 1815 } 1816 1817 if (!list_empty(&mrioc->delayed_rmhs_list)) { 1818 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 1819 struct delayed_dev_rmhs_node, list); 1820 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 1821 drv_cmd->retry_count = 0; 1822 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 1823 ioc_info(mrioc, 1824 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 1825 __func__, drv_cmd->dev_handle); 1826 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 1827 drv_cmd->iou_rc); 1828 list_del(&delayed_dev_rmhs->list); 1829 kfree(delayed_dev_rmhs); 1830 return; 1831 } 1832 1833 clear_drv_cmd: 1834 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1835 drv_cmd->callback = NULL; 1836 drv_cmd->retry_count = 0; 1837 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 1838 clear_bit(cmd_idx, mrioc->devrem_bitmap); 1839 } 1840 1841 /** 1842 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 1843 * @mrioc: Adapter instance reference 1844 * @drv_cmd: Internal command tracker 1845 * 1846 * Issues a target reset TM to the firmware from the device 1847 * removal TM pend list or issue IO unit control request as 1848 * part of device removal or hidden acknowledgment handshake. 1849 * 1850 * Return: Nothing 1851 */ 1852 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 1853 struct mpi3mr_drv_cmd *drv_cmd) 1854 { 1855 struct mpi3_iounit_control_request iou_ctrl; 1856 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 1857 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 1858 int retval; 1859 1860 if (drv_cmd->state & MPI3MR_CMD_RESET) 1861 goto clear_drv_cmd; 1862 1863 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 1864 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 1865 1866 if (tm_reply) 1867 pr_info(IOCNAME 1868 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 1869 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 1870 drv_cmd->ioc_loginfo, 1871 le32_to_cpu(tm_reply->termination_count)); 1872 1873 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 1874 mrioc->name, drv_cmd->dev_handle, cmd_idx); 1875 1876 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 1877 1878 drv_cmd->state = MPI3MR_CMD_PENDING; 1879 drv_cmd->is_waiting = 0; 1880 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 1881 iou_ctrl.operation = drv_cmd->iou_rc; 1882 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 1883 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 1884 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 1885 1886 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 1887 1); 1888 if (retval) { 1889 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 1890 mrioc->name); 1891 goto clear_drv_cmd; 1892 } 1893 1894 return; 1895 clear_drv_cmd: 1896 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1897 drv_cmd->callback = NULL; 1898 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 1899 drv_cmd->retry_count = 0; 1900 clear_bit(cmd_idx, mrioc->devrem_bitmap); 1901 } 1902 1903 /** 1904 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 1905 * @mrioc: Adapter instance reference 1906 * @handle: Device handle 1907 * @cmdparam: Internal command tracker 1908 * @iou_rc: IO unit reason code 1909 * 1910 * Issues a target reset TM to the firmware or add it to a pend 1911 * list as part of device removal or hidden acknowledgment 1912 * handshake. 1913 * 1914 * Return: Nothing 1915 */ 1916 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 1917 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 1918 { 1919 struct mpi3_scsi_task_mgmt_request tm_req; 1920 int retval = 0; 1921 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 1922 u8 retrycount = 5; 1923 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 1924 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 1925 1926 if (drv_cmd) 1927 goto issue_cmd; 1928 do { 1929 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 1930 MPI3MR_NUM_DEVRMCMD); 1931 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 1932 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 1933 break; 1934 cmd_idx = MPI3MR_NUM_DEVRMCMD; 1935 } 1936 } while (retrycount--); 1937 1938 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 1939 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 1940 GFP_ATOMIC); 1941 if (!delayed_dev_rmhs) 1942 return; 1943 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 1944 delayed_dev_rmhs->handle = handle; 1945 delayed_dev_rmhs->iou_rc = iou_rc; 1946 list_add_tail(&delayed_dev_rmhs->list, 1947 &mrioc->delayed_rmhs_list); 1948 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 1949 __func__, handle); 1950 return; 1951 } 1952 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 1953 1954 issue_cmd: 1955 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 1956 ioc_info(mrioc, 1957 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 1958 __func__, handle, cmd_idx); 1959 1960 memset(&tm_req, 0, sizeof(tm_req)); 1961 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 1962 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 1963 goto out; 1964 } 1965 drv_cmd->state = MPI3MR_CMD_PENDING; 1966 drv_cmd->is_waiting = 0; 1967 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 1968 drv_cmd->dev_handle = handle; 1969 drv_cmd->iou_rc = iou_rc; 1970 tm_req.dev_handle = cpu_to_le16(handle); 1971 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 1972 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 1973 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 1974 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 1975 1976 set_bit(handle, mrioc->removepend_bitmap); 1977 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 1978 if (retval) { 1979 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 1980 __func__); 1981 goto out_failed; 1982 } 1983 out: 1984 return; 1985 out_failed: 1986 drv_cmd->state = MPI3MR_CMD_NOTUSED; 1987 drv_cmd->callback = NULL; 1988 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 1989 drv_cmd->retry_count = 0; 1990 clear_bit(cmd_idx, mrioc->devrem_bitmap); 1991 } 1992 1993 /** 1994 * mpi3mr_complete_evt_ack - event ack request completion 1995 * @mrioc: Adapter instance reference 1996 * @drv_cmd: Internal command tracker 1997 * 1998 * This is the completion handler for non blocking event 1999 * acknowledgment sent to the firmware and this will issue any 2000 * pending event acknowledgment request. 2001 * 2002 * Return: Nothing 2003 */ 2004 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2005 struct mpi3mr_drv_cmd *drv_cmd) 2006 { 2007 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2008 struct delayed_evt_ack_node *delayed_evtack = NULL; 2009 2010 if (drv_cmd->state & MPI3MR_CMD_RESET) 2011 goto clear_drv_cmd; 2012 2013 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2014 dprint_event_th(mrioc, 2015 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2016 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2017 drv_cmd->ioc_loginfo); 2018 } 2019 2020 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2021 delayed_evtack = 2022 list_entry(mrioc->delayed_evtack_cmds_list.next, 2023 struct delayed_evt_ack_node, list); 2024 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2025 delayed_evtack->event_ctx); 2026 list_del(&delayed_evtack->list); 2027 kfree(delayed_evtack); 2028 return; 2029 } 2030 clear_drv_cmd: 2031 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2032 drv_cmd->callback = NULL; 2033 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2034 } 2035 2036 /** 2037 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2038 * @mrioc: Adapter instance reference 2039 * @event: MPI3 event id 2040 * @cmdparam: Internal command tracker 2041 * @event_ctx: event context 2042 * 2043 * Issues event acknowledgment request to the firmware if there 2044 * is a free command to send the event ack else it to a pend 2045 * list so that it will be processed on a completion of a prior 2046 * event acknowledgment . 2047 * 2048 * Return: Nothing 2049 */ 2050 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2051 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2052 { 2053 struct mpi3_event_ack_request evtack_req; 2054 int retval = 0; 2055 u8 retrycount = 5; 2056 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2057 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2058 struct delayed_evt_ack_node *delayed_evtack = NULL; 2059 2060 if (drv_cmd) { 2061 dprint_event_th(mrioc, 2062 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2063 event, event_ctx); 2064 goto issue_cmd; 2065 } 2066 dprint_event_th(mrioc, 2067 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2068 event, event_ctx); 2069 do { 2070 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2071 MPI3MR_NUM_EVTACKCMD); 2072 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2073 if (!test_and_set_bit(cmd_idx, 2074 mrioc->evtack_cmds_bitmap)) 2075 break; 2076 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2077 } 2078 } while (retrycount--); 2079 2080 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2081 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2082 GFP_ATOMIC); 2083 if (!delayed_evtack) 2084 return; 2085 INIT_LIST_HEAD(&delayed_evtack->list); 2086 delayed_evtack->event = event; 2087 delayed_evtack->event_ctx = event_ctx; 2088 list_add_tail(&delayed_evtack->list, 2089 &mrioc->delayed_evtack_cmds_list); 2090 dprint_event_th(mrioc, 2091 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2092 event, event_ctx); 2093 return; 2094 } 2095 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2096 2097 issue_cmd: 2098 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2099 2100 memset(&evtack_req, 0, sizeof(evtack_req)); 2101 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2102 dprint_event_th(mrioc, 2103 "sending event ack failed due to command in use\n"); 2104 goto out; 2105 } 2106 drv_cmd->state = MPI3MR_CMD_PENDING; 2107 drv_cmd->is_waiting = 0; 2108 drv_cmd->callback = mpi3mr_complete_evt_ack; 2109 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2110 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2111 evtack_req.event = event; 2112 evtack_req.event_context = cpu_to_le32(event_ctx); 2113 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2114 sizeof(evtack_req), 1); 2115 if (retval) { 2116 dprint_event_th(mrioc, 2117 "posting event ack request is failed\n"); 2118 goto out_failed; 2119 } 2120 2121 dprint_event_th(mrioc, 2122 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2123 event, event_ctx); 2124 out: 2125 return; 2126 out_failed: 2127 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2128 drv_cmd->callback = NULL; 2129 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2130 } 2131 2132 /** 2133 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2134 * @mrioc: Adapter instance reference 2135 * @event_reply: event data 2136 * 2137 * Checks for the reason code and based on that either block I/O 2138 * to device, or unblock I/O to the device, or start the device 2139 * removal handshake with reason as remove with the firmware for 2140 * PCIe devices. 2141 * 2142 * Return: Nothing 2143 */ 2144 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2145 struct mpi3_event_notification_reply *event_reply) 2146 { 2147 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2148 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2149 int i; 2150 u16 handle; 2151 u8 reason_code; 2152 struct mpi3mr_tgt_dev *tgtdev = NULL; 2153 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2154 2155 for (i = 0; i < topo_evt->num_entries; i++) { 2156 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2157 if (!handle) 2158 continue; 2159 reason_code = topo_evt->port_entry[i].port_status; 2160 scsi_tgt_priv_data = NULL; 2161 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2162 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2163 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2164 tgtdev->starget->hostdata; 2165 switch (reason_code) { 2166 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2167 if (scsi_tgt_priv_data) { 2168 scsi_tgt_priv_data->dev_removed = 1; 2169 scsi_tgt_priv_data->dev_removedelay = 0; 2170 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2171 } 2172 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2173 MPI3_CTRL_OP_REMOVE_DEVICE); 2174 break; 2175 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2176 if (scsi_tgt_priv_data) { 2177 scsi_tgt_priv_data->dev_removedelay = 1; 2178 atomic_inc(&scsi_tgt_priv_data->block_io); 2179 } 2180 break; 2181 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2182 if (scsi_tgt_priv_data && 2183 scsi_tgt_priv_data->dev_removedelay) { 2184 scsi_tgt_priv_data->dev_removedelay = 0; 2185 atomic_dec_if_positive 2186 (&scsi_tgt_priv_data->block_io); 2187 } 2188 break; 2189 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2190 default: 2191 break; 2192 } 2193 if (tgtdev) 2194 mpi3mr_tgtdev_put(tgtdev); 2195 } 2196 } 2197 2198 /** 2199 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2200 * @mrioc: Adapter instance reference 2201 * @event_reply: event data 2202 * 2203 * Checks for the reason code and based on that either block I/O 2204 * to device, or unblock I/O to the device, or start the device 2205 * removal handshake with reason as remove with the firmware for 2206 * SAS/SATA devices. 2207 * 2208 * Return: Nothing 2209 */ 2210 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2211 struct mpi3_event_notification_reply *event_reply) 2212 { 2213 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2214 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2215 int i; 2216 u16 handle; 2217 u8 reason_code; 2218 struct mpi3mr_tgt_dev *tgtdev = NULL; 2219 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2220 2221 for (i = 0; i < topo_evt->num_entries; i++) { 2222 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2223 if (!handle) 2224 continue; 2225 reason_code = topo_evt->phy_entry[i].status & 2226 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2227 scsi_tgt_priv_data = NULL; 2228 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2229 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2230 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2231 tgtdev->starget->hostdata; 2232 switch (reason_code) { 2233 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2234 if (scsi_tgt_priv_data) { 2235 scsi_tgt_priv_data->dev_removed = 1; 2236 scsi_tgt_priv_data->dev_removedelay = 0; 2237 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2238 } 2239 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2240 MPI3_CTRL_OP_REMOVE_DEVICE); 2241 break; 2242 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2243 if (scsi_tgt_priv_data) { 2244 scsi_tgt_priv_data->dev_removedelay = 1; 2245 atomic_inc(&scsi_tgt_priv_data->block_io); 2246 } 2247 break; 2248 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2249 if (scsi_tgt_priv_data && 2250 scsi_tgt_priv_data->dev_removedelay) { 2251 scsi_tgt_priv_data->dev_removedelay = 0; 2252 atomic_dec_if_positive 2253 (&scsi_tgt_priv_data->block_io); 2254 } 2255 break; 2256 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2257 default: 2258 break; 2259 } 2260 if (tgtdev) 2261 mpi3mr_tgtdev_put(tgtdev); 2262 } 2263 } 2264 2265 /** 2266 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2267 * @mrioc: Adapter instance reference 2268 * @event_reply: event data 2269 * 2270 * Checks for the reason code and based on that either block I/O 2271 * to device, or unblock I/O to the device, or start the device 2272 * removal handshake with reason as remove/hide acknowledgment 2273 * with the firmware. 2274 * 2275 * Return: Nothing 2276 */ 2277 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2278 struct mpi3_event_notification_reply *event_reply) 2279 { 2280 u16 dev_handle = 0; 2281 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2282 struct mpi3mr_tgt_dev *tgtdev = NULL; 2283 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2284 struct mpi3_event_data_device_status_change *evtdata = 2285 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2286 2287 if (mrioc->stop_drv_processing) 2288 goto out; 2289 2290 dev_handle = le16_to_cpu(evtdata->dev_handle); 2291 2292 switch (evtdata->reason_code) { 2293 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2294 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2295 block = 1; 2296 break; 2297 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2298 delete = 1; 2299 hide = 1; 2300 break; 2301 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2302 delete = 1; 2303 remove = 1; 2304 break; 2305 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2306 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2307 ublock = 1; 2308 break; 2309 default: 2310 break; 2311 } 2312 2313 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2314 if (!tgtdev) 2315 goto out; 2316 if (hide) 2317 tgtdev->is_hidden = hide; 2318 if (tgtdev->starget && tgtdev->starget->hostdata) { 2319 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2320 tgtdev->starget->hostdata; 2321 if (block) 2322 atomic_inc(&scsi_tgt_priv_data->block_io); 2323 if (delete) 2324 scsi_tgt_priv_data->dev_removed = 1; 2325 if (ublock) 2326 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2327 } 2328 if (remove) 2329 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2330 MPI3_CTRL_OP_REMOVE_DEVICE); 2331 if (hide) 2332 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2333 MPI3_CTRL_OP_HIDDEN_ACK); 2334 2335 out: 2336 if (tgtdev) 2337 mpi3mr_tgtdev_put(tgtdev); 2338 } 2339 2340 /** 2341 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2342 * @mrioc: Adapter instance reference 2343 * @event_reply: event data 2344 * 2345 * Blocks and unblocks host level I/O based on the reason code 2346 * 2347 * Return: Nothing 2348 */ 2349 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2350 struct mpi3_event_notification_reply *event_reply) 2351 { 2352 struct mpi3_event_data_prepare_for_reset *evtdata = 2353 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2354 2355 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2356 dprint_event_th(mrioc, 2357 "prepare for reset event top half with rc=start\n"); 2358 if (mrioc->prepare_for_reset) 2359 return; 2360 mrioc->prepare_for_reset = 1; 2361 mrioc->prepare_for_reset_timeout_counter = 0; 2362 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2363 dprint_event_th(mrioc, 2364 "prepare for reset top half with rc=abort\n"); 2365 mrioc->prepare_for_reset = 0; 2366 mrioc->prepare_for_reset_timeout_counter = 0; 2367 } 2368 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2369 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2370 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2371 le32_to_cpu(event_reply->event_context)); 2372 } 2373 2374 /** 2375 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2376 * @mrioc: Adapter instance reference 2377 * @event_reply: event data 2378 * 2379 * Identifies the new shutdown timeout value and update. 2380 * 2381 * Return: Nothing 2382 */ 2383 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2384 struct mpi3_event_notification_reply *event_reply) 2385 { 2386 struct mpi3_event_data_energy_pack_change *evtdata = 2387 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2388 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2389 2390 if (shutdown_timeout <= 0) { 2391 ioc_warn(mrioc, 2392 "%s :Invalid Shutdown Timeout received = %d\n", 2393 __func__, shutdown_timeout); 2394 return; 2395 } 2396 2397 ioc_info(mrioc, 2398 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2399 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2400 mrioc->facts.shutdown_timeout = shutdown_timeout; 2401 } 2402 2403 /** 2404 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2405 * @mrioc: Adapter instance reference 2406 * @event_reply: event data 2407 * 2408 * Displays Cable manegemt event details. 2409 * 2410 * Return: Nothing 2411 */ 2412 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2413 struct mpi3_event_notification_reply *event_reply) 2414 { 2415 struct mpi3_event_data_cable_management *evtdata = 2416 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2417 2418 switch (evtdata->status) { 2419 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2420 { 2421 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2422 "Devices connected to this cable are not detected.\n" 2423 "This cable requires %d mW of power.\n", 2424 evtdata->receptacle_id, 2425 le32_to_cpu(evtdata->active_cable_power_requirement)); 2426 break; 2427 } 2428 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2429 { 2430 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2431 evtdata->receptacle_id); 2432 break; 2433 } 2434 default: 2435 break; 2436 } 2437 } 2438 2439 /** 2440 * mpi3mr_os_handle_events - Firmware event handler 2441 * @mrioc: Adapter instance reference 2442 * @event_reply: event data 2443 * 2444 * Identify whteher the event has to handled and acknowledged 2445 * and either process the event in the tophalf and/or schedule a 2446 * bottom half through mpi3mr_fwevt_worker. 2447 * 2448 * Return: Nothing 2449 */ 2450 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2451 struct mpi3_event_notification_reply *event_reply) 2452 { 2453 u16 evt_type, sz; 2454 struct mpi3mr_fwevt *fwevt = NULL; 2455 bool ack_req = 0, process_evt_bh = 0; 2456 2457 if (mrioc->stop_drv_processing) 2458 return; 2459 2460 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2461 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2462 ack_req = 1; 2463 2464 evt_type = event_reply->event; 2465 2466 switch (evt_type) { 2467 case MPI3_EVENT_DEVICE_ADDED: 2468 { 2469 struct mpi3_device_page0 *dev_pg0 = 2470 (struct mpi3_device_page0 *)event_reply->event_data; 2471 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 2472 ioc_err(mrioc, 2473 "%s :Failed to add device in the device add event\n", 2474 __func__); 2475 else 2476 process_evt_bh = 1; 2477 break; 2478 } 2479 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2480 { 2481 process_evt_bh = 1; 2482 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 2483 break; 2484 } 2485 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2486 { 2487 process_evt_bh = 1; 2488 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 2489 break; 2490 } 2491 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2492 { 2493 process_evt_bh = 1; 2494 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 2495 break; 2496 } 2497 case MPI3_EVENT_PREPARE_FOR_RESET: 2498 { 2499 mpi3mr_preparereset_evt_th(mrioc, event_reply); 2500 ack_req = 0; 2501 break; 2502 } 2503 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2504 case MPI3_EVENT_LOG_DATA: 2505 { 2506 process_evt_bh = 1; 2507 break; 2508 } 2509 case MPI3_EVENT_ENERGY_PACK_CHANGE: 2510 { 2511 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 2512 break; 2513 } 2514 case MPI3_EVENT_CABLE_MGMT: 2515 { 2516 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 2517 break; 2518 } 2519 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2520 case MPI3_EVENT_SAS_DISCOVERY: 2521 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 2522 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 2523 case MPI3_EVENT_PCIE_ENUMERATION: 2524 break; 2525 default: 2526 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 2527 __func__, evt_type); 2528 break; 2529 } 2530 if (process_evt_bh || ack_req) { 2531 sz = event_reply->event_data_length * 4; 2532 fwevt = mpi3mr_alloc_fwevt(sz); 2533 if (!fwevt) { 2534 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 2535 __func__, __FILE__, __LINE__, __func__); 2536 return; 2537 } 2538 2539 memcpy(fwevt->event_data, event_reply->event_data, sz); 2540 fwevt->mrioc = mrioc; 2541 fwevt->event_id = evt_type; 2542 fwevt->send_ack = ack_req; 2543 fwevt->process_evt = process_evt_bh; 2544 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 2545 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2546 } 2547 } 2548 2549 /** 2550 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 2551 * @mrioc: Adapter instance reference 2552 * @scmd: SCSI command reference 2553 * @scsiio_req: MPI3 SCSI IO request 2554 * 2555 * Identifies the protection information flags from the SCSI 2556 * command and set appropriate flags in the MPI3 SCSI IO 2557 * request. 2558 * 2559 * Return: Nothing 2560 */ 2561 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 2562 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2563 { 2564 u16 eedp_flags = 0; 2565 unsigned char prot_op = scsi_get_prot_op(scmd); 2566 2567 switch (prot_op) { 2568 case SCSI_PROT_NORMAL: 2569 return; 2570 case SCSI_PROT_READ_STRIP: 2571 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2572 break; 2573 case SCSI_PROT_WRITE_INSERT: 2574 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2575 break; 2576 case SCSI_PROT_READ_INSERT: 2577 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2578 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2579 break; 2580 case SCSI_PROT_WRITE_STRIP: 2581 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2582 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2583 break; 2584 case SCSI_PROT_READ_PASS: 2585 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2586 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2587 break; 2588 case SCSI_PROT_WRITE_PASS: 2589 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 2590 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 2591 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 2592 0xffff; 2593 } else 2594 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2595 2596 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2597 break; 2598 default: 2599 return; 2600 } 2601 2602 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 2603 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 2604 2605 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 2606 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 2607 2608 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 2609 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 2610 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2611 scsiio_req->cdb.eedp32.primary_reference_tag = 2612 cpu_to_be32(scsi_prot_ref_tag(scmd)); 2613 } 2614 2615 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 2616 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2617 2618 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 2619 2620 switch (scsi_prot_interval(scmd)) { 2621 case 512: 2622 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 2623 break; 2624 case 520: 2625 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 2626 break; 2627 case 4080: 2628 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 2629 break; 2630 case 4088: 2631 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 2632 break; 2633 case 4096: 2634 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 2635 break; 2636 case 4104: 2637 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 2638 break; 2639 case 4160: 2640 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 2641 break; 2642 default: 2643 break; 2644 } 2645 2646 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 2647 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 2648 } 2649 2650 /** 2651 * mpi3mr_build_sense_buffer - Map sense information 2652 * @desc: Sense type 2653 * @buf: Sense buffer to populate 2654 * @key: Sense key 2655 * @asc: Additional sense code 2656 * @ascq: Additional sense code qualifier 2657 * 2658 * Maps the given sense information into either descriptor or 2659 * fixed format sense data. 2660 * 2661 * Return: Nothing 2662 */ 2663 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 2664 u8 asc, u8 ascq) 2665 { 2666 if (desc) { 2667 buf[0] = 0x72; /* descriptor, current */ 2668 buf[1] = key; 2669 buf[2] = asc; 2670 buf[3] = ascq; 2671 buf[7] = 0; 2672 } else { 2673 buf[0] = 0x70; /* fixed, current */ 2674 buf[2] = key; 2675 buf[7] = 0xa; 2676 buf[12] = asc; 2677 buf[13] = ascq; 2678 } 2679 } 2680 2681 /** 2682 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 2683 * @scmd: SCSI command reference 2684 * @ioc_status: status of MPI3 request 2685 * 2686 * Maps the EEDP error status of the SCSI IO request to sense 2687 * data. 2688 * 2689 * Return: Nothing 2690 */ 2691 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 2692 u16 ioc_status) 2693 { 2694 u8 ascq = 0; 2695 2696 switch (ioc_status) { 2697 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 2698 ascq = 0x01; 2699 break; 2700 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 2701 ascq = 0x02; 2702 break; 2703 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 2704 ascq = 0x03; 2705 break; 2706 default: 2707 ascq = 0x00; 2708 break; 2709 } 2710 2711 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 2712 0x10, ascq); 2713 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 2714 } 2715 2716 /** 2717 * mpi3mr_process_op_reply_desc - reply descriptor handler 2718 * @mrioc: Adapter instance reference 2719 * @reply_desc: Operational reply descriptor 2720 * @reply_dma: place holder for reply DMA address 2721 * @qidx: Operational queue index 2722 * 2723 * Process the operational reply descriptor and identifies the 2724 * descriptor type. Based on the descriptor map the MPI3 request 2725 * status to a SCSI command status and calls scsi_done call 2726 * back. 2727 * 2728 * Return: Nothing 2729 */ 2730 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 2731 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 2732 { 2733 u16 reply_desc_type, host_tag = 0; 2734 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 2735 u32 ioc_loginfo = 0; 2736 struct mpi3_status_reply_descriptor *status_desc = NULL; 2737 struct mpi3_address_reply_descriptor *addr_desc = NULL; 2738 struct mpi3_success_reply_descriptor *success_desc = NULL; 2739 struct mpi3_scsi_io_reply *scsi_reply = NULL; 2740 struct scsi_cmnd *scmd = NULL; 2741 struct scmd_priv *priv = NULL; 2742 u8 *sense_buf = NULL; 2743 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 2744 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 2745 u16 dev_handle = 0xFFFF; 2746 struct scsi_sense_hdr sshdr; 2747 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 2748 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 2749 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 2750 struct mpi3mr_throttle_group_info *tg = NULL; 2751 u8 throttle_enabled_dev = 0; 2752 2753 *reply_dma = 0; 2754 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 2755 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 2756 switch (reply_desc_type) { 2757 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 2758 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 2759 host_tag = le16_to_cpu(status_desc->host_tag); 2760 ioc_status = le16_to_cpu(status_desc->ioc_status); 2761 if (ioc_status & 2762 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 2763 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 2764 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 2765 break; 2766 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 2767 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 2768 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 2769 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 2770 *reply_dma); 2771 if (!scsi_reply) { 2772 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 2773 mrioc->name); 2774 goto out; 2775 } 2776 host_tag = le16_to_cpu(scsi_reply->host_tag); 2777 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 2778 scsi_status = scsi_reply->scsi_status; 2779 scsi_state = scsi_reply->scsi_state; 2780 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 2781 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 2782 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 2783 sense_count = le32_to_cpu(scsi_reply->sense_count); 2784 resp_data = le32_to_cpu(scsi_reply->response_data); 2785 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 2786 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 2787 if (ioc_status & 2788 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 2789 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 2790 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 2791 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 2792 panic("%s: Ran out of sense buffers\n", mrioc->name); 2793 break; 2794 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 2795 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 2796 host_tag = le16_to_cpu(success_desc->host_tag); 2797 break; 2798 default: 2799 break; 2800 } 2801 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 2802 if (!scmd) { 2803 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 2804 mrioc->name, host_tag); 2805 goto out; 2806 } 2807 priv = scsi_cmd_priv(scmd); 2808 2809 data_len_blks = scsi_bufflen(scmd) >> 9; 2810 sdev_priv_data = scmd->device->hostdata; 2811 if (sdev_priv_data) { 2812 stgt_priv_data = sdev_priv_data->tgt_priv_data; 2813 if (stgt_priv_data) { 2814 tg = stgt_priv_data->throttle_group; 2815 throttle_enabled_dev = 2816 stgt_priv_data->io_throttle_enabled; 2817 } 2818 } 2819 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 2820 throttle_enabled_dev)) { 2821 ioc_pend_data_len = atomic_sub_return(data_len_blks, 2822 &mrioc->pend_large_data_sz); 2823 if (tg) { 2824 tg_pend_data_len = atomic_sub_return(data_len_blks, 2825 &tg->pend_large_data_sz); 2826 if (tg->io_divert && ((ioc_pend_data_len <= 2827 mrioc->io_throttle_low) && 2828 (tg_pend_data_len <= tg->low))) { 2829 tg->io_divert = 0; 2830 mpi3mr_set_io_divert_for_all_vd_in_tg( 2831 mrioc, tg, 0); 2832 } 2833 } else { 2834 if (ioc_pend_data_len <= mrioc->io_throttle_low) 2835 stgt_priv_data->io_divert = 0; 2836 } 2837 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 2838 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 2839 if (!tg) { 2840 if (ioc_pend_data_len <= mrioc->io_throttle_low) 2841 stgt_priv_data->io_divert = 0; 2842 2843 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 2844 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 2845 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 2846 tg->io_divert = 0; 2847 mpi3mr_set_io_divert_for_all_vd_in_tg( 2848 mrioc, tg, 0); 2849 } 2850 } 2851 } 2852 2853 if (success_desc) { 2854 scmd->result = DID_OK << 16; 2855 goto out_success; 2856 } 2857 2858 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 2859 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 2860 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 2861 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 2862 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 2863 ioc_status = MPI3_IOCSTATUS_SUCCESS; 2864 2865 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 2866 sense_buf) { 2867 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 2868 2869 memcpy(scmd->sense_buffer, sense_buf, sz); 2870 } 2871 2872 switch (ioc_status) { 2873 case MPI3_IOCSTATUS_BUSY: 2874 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 2875 scmd->result = SAM_STAT_BUSY; 2876 break; 2877 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2878 scmd->result = DID_NO_CONNECT << 16; 2879 break; 2880 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 2881 scmd->result = DID_SOFT_ERROR << 16; 2882 break; 2883 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 2884 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 2885 scmd->result = DID_RESET << 16; 2886 break; 2887 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2888 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 2889 scmd->result = DID_SOFT_ERROR << 16; 2890 else 2891 scmd->result = (DID_OK << 16) | scsi_status; 2892 break; 2893 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 2894 scmd->result = (DID_OK << 16) | scsi_status; 2895 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 2896 break; 2897 if (xfer_count < scmd->underflow) { 2898 if (scsi_status == SAM_STAT_BUSY) 2899 scmd->result = SAM_STAT_BUSY; 2900 else 2901 scmd->result = DID_SOFT_ERROR << 16; 2902 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 2903 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 2904 scmd->result = DID_SOFT_ERROR << 16; 2905 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 2906 scmd->result = DID_RESET << 16; 2907 break; 2908 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 2909 scsi_set_resid(scmd, 0); 2910 fallthrough; 2911 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 2912 case MPI3_IOCSTATUS_SUCCESS: 2913 scmd->result = (DID_OK << 16) | scsi_status; 2914 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 2915 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 2916 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 2917 scmd->result = DID_SOFT_ERROR << 16; 2918 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 2919 scmd->result = DID_RESET << 16; 2920 break; 2921 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 2922 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 2923 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 2924 mpi3mr_map_eedp_error(scmd, ioc_status); 2925 break; 2926 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2927 case MPI3_IOCSTATUS_INVALID_FUNCTION: 2928 case MPI3_IOCSTATUS_INVALID_SGL: 2929 case MPI3_IOCSTATUS_INTERNAL_ERROR: 2930 case MPI3_IOCSTATUS_INVALID_FIELD: 2931 case MPI3_IOCSTATUS_INVALID_STATE: 2932 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 2933 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2934 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 2935 default: 2936 scmd->result = DID_SOFT_ERROR << 16; 2937 break; 2938 } 2939 2940 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 2941 (scmd->cmnd[0] != ATA_16)) { 2942 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 2943 scmd->result); 2944 scsi_print_command(scmd); 2945 ioc_info(mrioc, 2946 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 2947 __func__, dev_handle, ioc_status, ioc_loginfo, 2948 priv->req_q_idx + 1); 2949 ioc_info(mrioc, 2950 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 2951 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 2952 if (sense_buf) { 2953 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 2954 ioc_info(mrioc, 2955 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 2956 __func__, sense_count, sshdr.sense_key, 2957 sshdr.asc, sshdr.ascq); 2958 } 2959 } 2960 out_success: 2961 if (priv->meta_sg_valid) { 2962 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 2963 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 2964 } 2965 mpi3mr_clear_scmd_priv(mrioc, scmd); 2966 scsi_dma_unmap(scmd); 2967 scsi_done(scmd); 2968 out: 2969 if (sense_buf) 2970 mpi3mr_repost_sense_buf(mrioc, 2971 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 2972 } 2973 2974 /** 2975 * mpi3mr_get_chain_idx - get free chain buffer index 2976 * @mrioc: Adapter instance reference 2977 * 2978 * Try to get a free chain buffer index from the free pool. 2979 * 2980 * Return: -1 on failure or the free chain buffer index 2981 */ 2982 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 2983 { 2984 u8 retry_count = 5; 2985 int cmd_idx = -1; 2986 2987 do { 2988 spin_lock(&mrioc->chain_buf_lock); 2989 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 2990 mrioc->chain_buf_count); 2991 if (cmd_idx < mrioc->chain_buf_count) { 2992 set_bit(cmd_idx, mrioc->chain_bitmap); 2993 spin_unlock(&mrioc->chain_buf_lock); 2994 break; 2995 } 2996 spin_unlock(&mrioc->chain_buf_lock); 2997 cmd_idx = -1; 2998 } while (retry_count--); 2999 return cmd_idx; 3000 } 3001 3002 /** 3003 * mpi3mr_prepare_sg_scmd - build scatter gather list 3004 * @mrioc: Adapter instance reference 3005 * @scmd: SCSI command reference 3006 * @scsiio_req: MPI3 SCSI IO request 3007 * 3008 * This function maps SCSI command's data and protection SGEs to 3009 * MPI request SGEs. If required additional 4K chain buffer is 3010 * used to send the SGEs. 3011 * 3012 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3013 */ 3014 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3015 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3016 { 3017 dma_addr_t chain_dma; 3018 struct scatterlist *sg_scmd; 3019 void *sg_local, *chain; 3020 u32 chain_length; 3021 int sges_left, chain_idx; 3022 u32 sges_in_segment; 3023 u8 simple_sgl_flags; 3024 u8 simple_sgl_flags_last; 3025 u8 last_chain_sgl_flags; 3026 struct chain_element *chain_req; 3027 struct scmd_priv *priv = NULL; 3028 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3029 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3030 3031 priv = scsi_cmd_priv(scmd); 3032 3033 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3034 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3035 simple_sgl_flags_last = simple_sgl_flags | 3036 MPI3_SGE_FLAGS_END_OF_LIST; 3037 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3038 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3039 3040 if (meta_sg) 3041 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3042 else 3043 sg_local = &scsiio_req->sgl; 3044 3045 if (!scsiio_req->data_length && !meta_sg) { 3046 mpi3mr_build_zero_len_sge(sg_local); 3047 return 0; 3048 } 3049 3050 if (meta_sg) { 3051 sg_scmd = scsi_prot_sglist(scmd); 3052 sges_left = dma_map_sg(&mrioc->pdev->dev, 3053 scsi_prot_sglist(scmd), 3054 scsi_prot_sg_count(scmd), 3055 scmd->sc_data_direction); 3056 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3057 } else { 3058 sg_scmd = scsi_sglist(scmd); 3059 sges_left = scsi_dma_map(scmd); 3060 } 3061 3062 if (sges_left < 0) { 3063 sdev_printk(KERN_ERR, scmd->device, 3064 "scsi_dma_map failed: request for %d bytes!\n", 3065 scsi_bufflen(scmd)); 3066 return -ENOMEM; 3067 } 3068 if (sges_left > MPI3MR_SG_DEPTH) { 3069 sdev_printk(KERN_ERR, scmd->device, 3070 "scsi_dma_map returned unsupported sge count %d!\n", 3071 sges_left); 3072 return -ENOMEM; 3073 } 3074 3075 sges_in_segment = (mrioc->facts.op_req_sz - 3076 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3077 3078 if (scsiio_req->sgl[0].eedp.flags == 3079 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3080 sg_local += sizeof(struct mpi3_sge_common); 3081 sges_in_segment--; 3082 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3083 } 3084 3085 if (scsiio_req->msg_flags == 3086 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3087 sges_in_segment--; 3088 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3089 } 3090 3091 if (meta_sg) 3092 sges_in_segment = 1; 3093 3094 if (sges_left <= sges_in_segment) 3095 goto fill_in_last_segment; 3096 3097 /* fill in main message segment when there is a chain following */ 3098 while (sges_in_segment > 1) { 3099 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3100 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3101 sg_scmd = sg_next(sg_scmd); 3102 sg_local += sizeof(struct mpi3_sge_common); 3103 sges_left--; 3104 sges_in_segment--; 3105 } 3106 3107 chain_idx = mpi3mr_get_chain_idx(mrioc); 3108 if (chain_idx < 0) 3109 return -1; 3110 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3111 if (meta_sg) 3112 priv->meta_chain_idx = chain_idx; 3113 else 3114 priv->chain_idx = chain_idx; 3115 3116 chain = chain_req->addr; 3117 chain_dma = chain_req->dma_addr; 3118 sges_in_segment = sges_left; 3119 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3120 3121 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3122 chain_length, chain_dma); 3123 3124 sg_local = chain; 3125 3126 fill_in_last_segment: 3127 while (sges_left > 0) { 3128 if (sges_left == 1) 3129 mpi3mr_add_sg_single(sg_local, 3130 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3131 sg_dma_address(sg_scmd)); 3132 else 3133 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3134 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3135 sg_scmd = sg_next(sg_scmd); 3136 sg_local += sizeof(struct mpi3_sge_common); 3137 sges_left--; 3138 } 3139 3140 return 0; 3141 } 3142 3143 /** 3144 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3145 * @mrioc: Adapter instance reference 3146 * @scmd: SCSI command reference 3147 * @scsiio_req: MPI3 SCSI IO request 3148 * 3149 * This function calls mpi3mr_prepare_sg_scmd for constructing 3150 * both data SGEs and protection information SGEs in the MPI 3151 * format from the SCSI Command as appropriate . 3152 * 3153 * Return: return value of mpi3mr_prepare_sg_scmd. 3154 */ 3155 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3156 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3157 { 3158 int ret; 3159 3160 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3161 if (ret) 3162 return ret; 3163 3164 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3165 /* There is a valid meta sg */ 3166 scsiio_req->flags |= 3167 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3168 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3169 } 3170 3171 return ret; 3172 } 3173 3174 /** 3175 * mpi3mr_tm_response_name - get TM response as a string 3176 * @resp_code: TM response code 3177 * 3178 * Convert known task management response code as a readable 3179 * string. 3180 * 3181 * Return: response code string. 3182 */ 3183 static const char *mpi3mr_tm_response_name(u8 resp_code) 3184 { 3185 char *desc; 3186 3187 switch (resp_code) { 3188 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3189 desc = "task management request completed"; 3190 break; 3191 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3192 desc = "invalid frame"; 3193 break; 3194 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3195 desc = "task management request not supported"; 3196 break; 3197 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3198 desc = "task management request failed"; 3199 break; 3200 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3201 desc = "task management request succeeded"; 3202 break; 3203 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3204 desc = "invalid LUN"; 3205 break; 3206 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3207 desc = "overlapped tag attempted"; 3208 break; 3209 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3210 desc = "task queued, however not sent to target"; 3211 break; 3212 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3213 desc = "task management request denied by NVMe device"; 3214 break; 3215 default: 3216 desc = "unknown"; 3217 break; 3218 } 3219 3220 return desc; 3221 } 3222 3223 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3224 { 3225 int i; 3226 int num_of_reply_queues = 3227 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3228 3229 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3230 mpi3mr_process_op_reply_q(mrioc, 3231 mrioc->intr_info[i].op_reply_q); 3232 } 3233 3234 /** 3235 * mpi3mr_issue_tm - Issue Task Management request 3236 * @mrioc: Adapter instance reference 3237 * @tm_type: Task Management type 3238 * @handle: Device handle 3239 * @lun: lun ID 3240 * @htag: Host tag of the TM request 3241 * @timeout: TM timeout value 3242 * @drv_cmd: Internal command tracker 3243 * @resp_code: Response code place holder 3244 * @scmd: SCSI command 3245 * 3246 * Issues a Task Management Request to the controller for a 3247 * specified target, lun and command and wait for its completion 3248 * and check TM response. Recover the TM if it timed out by 3249 * issuing controller reset. 3250 * 3251 * Return: 0 on success, non-zero on errors 3252 */ 3253 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3254 u16 handle, uint lun, u16 htag, ulong timeout, 3255 struct mpi3mr_drv_cmd *drv_cmd, 3256 u8 *resp_code, struct scsi_cmnd *scmd) 3257 { 3258 struct mpi3_scsi_task_mgmt_request tm_req; 3259 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3260 int retval = 0; 3261 struct mpi3mr_tgt_dev *tgtdev = NULL; 3262 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3263 struct scmd_priv *cmd_priv = NULL; 3264 struct scsi_device *sdev = NULL; 3265 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3266 3267 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3268 __func__, tm_type, handle); 3269 if (mrioc->unrecoverable) { 3270 retval = -1; 3271 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3272 __func__); 3273 goto out; 3274 } 3275 3276 memset(&tm_req, 0, sizeof(tm_req)); 3277 mutex_lock(&drv_cmd->mutex); 3278 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3279 retval = -1; 3280 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3281 mutex_unlock(&drv_cmd->mutex); 3282 goto out; 3283 } 3284 if (mrioc->reset_in_progress) { 3285 retval = -1; 3286 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3287 mutex_unlock(&drv_cmd->mutex); 3288 goto out; 3289 } 3290 3291 drv_cmd->state = MPI3MR_CMD_PENDING; 3292 drv_cmd->is_waiting = 1; 3293 drv_cmd->callback = NULL; 3294 tm_req.dev_handle = cpu_to_le16(handle); 3295 tm_req.task_type = tm_type; 3296 tm_req.host_tag = cpu_to_le16(htag); 3297 3298 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3299 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3300 3301 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3302 3303 if (scmd) { 3304 sdev = scmd->device; 3305 sdev_priv_data = sdev->hostdata; 3306 scsi_tgt_priv_data = ((sdev_priv_data) ? 3307 sdev_priv_data->tgt_priv_data : NULL); 3308 } else { 3309 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3310 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3311 tgtdev->starget->hostdata; 3312 } 3313 3314 if (scsi_tgt_priv_data) 3315 atomic_inc(&scsi_tgt_priv_data->block_io); 3316 3317 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3318 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3319 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3320 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3321 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3322 } 3323 3324 init_completion(&drv_cmd->done); 3325 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3326 if (retval) { 3327 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3328 goto out_unlock; 3329 } 3330 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3331 3332 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3333 drv_cmd->is_waiting = 0; 3334 retval = -1; 3335 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3336 dprint_tm(mrioc, 3337 "task management request timed out after %ld seconds\n", 3338 timeout); 3339 if (mrioc->logging_level & MPI3_DEBUG_TM) 3340 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3341 mpi3mr_soft_reset_handler(mrioc, 3342 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3343 } 3344 goto out_unlock; 3345 } 3346 3347 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3348 dprint_tm(mrioc, "invalid task management reply message\n"); 3349 retval = -1; 3350 goto out_unlock; 3351 } 3352 3353 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3354 3355 switch (drv_cmd->ioc_status) { 3356 case MPI3_IOCSTATUS_SUCCESS: 3357 *resp_code = le32_to_cpu(tm_reply->response_data) & 3358 MPI3MR_RI_MASK_RESPCODE; 3359 break; 3360 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3361 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3362 break; 3363 default: 3364 dprint_tm(mrioc, 3365 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3366 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3367 retval = -1; 3368 goto out_unlock; 3369 } 3370 3371 switch (*resp_code) { 3372 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3373 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3374 break; 3375 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3376 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3377 retval = -1; 3378 break; 3379 default: 3380 retval = -1; 3381 break; 3382 } 3383 3384 dprint_tm(mrioc, 3385 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3386 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3387 le32_to_cpu(tm_reply->termination_count), 3388 mpi3mr_tm_response_name(*resp_code), *resp_code); 3389 3390 if (!retval) { 3391 mpi3mr_ioc_disable_intr(mrioc); 3392 mpi3mr_poll_pend_io_completions(mrioc); 3393 mpi3mr_ioc_enable_intr(mrioc); 3394 mpi3mr_poll_pend_io_completions(mrioc); 3395 } 3396 switch (tm_type) { 3397 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3398 if (!scsi_tgt_priv_data) 3399 break; 3400 scsi_tgt_priv_data->pend_count = 0; 3401 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3402 mpi3mr_count_tgt_pending, 3403 (void *)scsi_tgt_priv_data->starget); 3404 break; 3405 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3406 if (!sdev_priv_data) 3407 break; 3408 sdev_priv_data->pend_count = 0; 3409 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3410 mpi3mr_count_dev_pending, (void *)sdev); 3411 break; 3412 default: 3413 break; 3414 } 3415 3416 out_unlock: 3417 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3418 mutex_unlock(&drv_cmd->mutex); 3419 if (scsi_tgt_priv_data) 3420 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 3421 if (tgtdev) 3422 mpi3mr_tgtdev_put(tgtdev); 3423 out: 3424 return retval; 3425 } 3426 3427 /** 3428 * mpi3mr_bios_param - BIOS param callback 3429 * @sdev: SCSI device reference 3430 * @bdev: Block device reference 3431 * @capacity: Capacity in logical sectors 3432 * @params: Parameter array 3433 * 3434 * Just the parameters with heads/secots/cylinders. 3435 * 3436 * Return: 0 always 3437 */ 3438 static int mpi3mr_bios_param(struct scsi_device *sdev, 3439 struct block_device *bdev, sector_t capacity, int params[]) 3440 { 3441 int heads; 3442 int sectors; 3443 sector_t cylinders; 3444 ulong dummy; 3445 3446 heads = 64; 3447 sectors = 32; 3448 3449 dummy = heads * sectors; 3450 cylinders = capacity; 3451 sector_div(cylinders, dummy); 3452 3453 if ((ulong)capacity >= 0x200000) { 3454 heads = 255; 3455 sectors = 63; 3456 dummy = heads * sectors; 3457 cylinders = capacity; 3458 sector_div(cylinders, dummy); 3459 } 3460 3461 params[0] = heads; 3462 params[1] = sectors; 3463 params[2] = cylinders; 3464 return 0; 3465 } 3466 3467 /** 3468 * mpi3mr_map_queues - Map queues callback handler 3469 * @shost: SCSI host reference 3470 * 3471 * Maps default and poll queues. 3472 * 3473 * Return: return zero. 3474 */ 3475 static int mpi3mr_map_queues(struct Scsi_Host *shost) 3476 { 3477 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3478 int i, qoff, offset; 3479 struct blk_mq_queue_map *map = NULL; 3480 3481 offset = mrioc->op_reply_q_offset; 3482 3483 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 3484 map = &shost->tag_set.map[i]; 3485 3486 map->nr_queues = 0; 3487 3488 if (i == HCTX_TYPE_DEFAULT) 3489 map->nr_queues = mrioc->default_qcount; 3490 else if (i == HCTX_TYPE_POLL) 3491 map->nr_queues = mrioc->active_poll_qcount; 3492 3493 if (!map->nr_queues) { 3494 BUG_ON(i == HCTX_TYPE_DEFAULT); 3495 continue; 3496 } 3497 3498 /* 3499 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3500 * affinity), so use the regular blk-mq cpu mapping 3501 */ 3502 map->queue_offset = qoff; 3503 if (i != HCTX_TYPE_POLL) 3504 blk_mq_pci_map_queues(map, mrioc->pdev, offset); 3505 else 3506 blk_mq_map_queues(map); 3507 3508 qoff += map->nr_queues; 3509 offset += map->nr_queues; 3510 } 3511 3512 return 0; 3513 3514 } 3515 3516 /** 3517 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 3518 * @mrioc: Adapter instance reference 3519 * 3520 * Calculate the pending I/Os for the controller and return. 3521 * 3522 * Return: Number of pending I/Os 3523 */ 3524 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 3525 { 3526 u16 i; 3527 uint pend_ios = 0; 3528 3529 for (i = 0; i < mrioc->num_op_reply_q; i++) 3530 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 3531 return pend_ios; 3532 } 3533 3534 /** 3535 * mpi3mr_print_pending_host_io - print pending I/Os 3536 * @mrioc: Adapter instance reference 3537 * 3538 * Print number of pending I/Os and each I/O details prior to 3539 * reset for debug purpose. 3540 * 3541 * Return: Nothing 3542 */ 3543 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 3544 { 3545 struct Scsi_Host *shost = mrioc->shost; 3546 3547 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 3548 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 3549 blk_mq_tagset_busy_iter(&shost->tag_set, 3550 mpi3mr_print_scmd, (void *)mrioc); 3551 } 3552 3553 /** 3554 * mpi3mr_wait_for_host_io - block for I/Os to complete 3555 * @mrioc: Adapter instance reference 3556 * @timeout: time out in seconds 3557 * Waits for pending I/Os for the given adapter to complete or 3558 * to hit the timeout. 3559 * 3560 * Return: Nothing 3561 */ 3562 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 3563 { 3564 enum mpi3mr_iocstate iocstate; 3565 int i = 0; 3566 3567 iocstate = mpi3mr_get_iocstate(mrioc); 3568 if (iocstate != MRIOC_STATE_READY) 3569 return; 3570 3571 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3572 return; 3573 ioc_info(mrioc, 3574 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 3575 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 3576 3577 for (i = 0; i < timeout; i++) { 3578 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3579 break; 3580 iocstate = mpi3mr_get_iocstate(mrioc); 3581 if (iocstate != MRIOC_STATE_READY) 3582 break; 3583 msleep(1000); 3584 } 3585 3586 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 3587 mpi3mr_get_fw_pending_ios(mrioc)); 3588 } 3589 3590 /** 3591 * mpi3mr_eh_host_reset - Host reset error handling callback 3592 * @scmd: SCSI command reference 3593 * 3594 * Issue controller reset if the scmd is for a Physical Device, 3595 * if the scmd is for RAID volume, then wait for 3596 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any 3597 * pending I/Os prior to issuing reset to the controller. 3598 * 3599 * Return: SUCCESS of successful reset else FAILED 3600 */ 3601 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 3602 { 3603 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 3604 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3605 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3606 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 3607 int retval = FAILED, ret; 3608 3609 sdev_priv_data = scmd->device->hostdata; 3610 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 3611 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3612 dev_type = stgt_priv_data->dev_type; 3613 } 3614 3615 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 3616 mpi3mr_wait_for_host_io(mrioc, 3617 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 3618 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 3619 retval = SUCCESS; 3620 goto out; 3621 } 3622 } 3623 3624 mpi3mr_print_pending_host_io(mrioc); 3625 ret = mpi3mr_soft_reset_handler(mrioc, 3626 MPI3MR_RESET_FROM_EH_HOS, 1); 3627 if (ret) 3628 goto out; 3629 3630 retval = SUCCESS; 3631 out: 3632 sdev_printk(KERN_INFO, scmd->device, 3633 "Host reset is %s for scmd(%p)\n", 3634 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3635 3636 return retval; 3637 } 3638 3639 /** 3640 * mpi3mr_eh_target_reset - Target reset error handling callback 3641 * @scmd: SCSI command reference 3642 * 3643 * Issue Target reset Task Management and verify the scmd is 3644 * terminated successfully and return status accordingly. 3645 * 3646 * Return: SUCCESS of successful termination of the scmd else 3647 * FAILED 3648 */ 3649 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 3650 { 3651 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 3652 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3653 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3654 u16 dev_handle; 3655 u8 resp_code = 0; 3656 int retval = FAILED, ret = 0; 3657 3658 sdev_printk(KERN_INFO, scmd->device, 3659 "Attempting Target Reset! scmd(%p)\n", scmd); 3660 scsi_print_command(scmd); 3661 3662 sdev_priv_data = scmd->device->hostdata; 3663 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 3664 sdev_printk(KERN_INFO, scmd->device, 3665 "SCSI device is not available\n"); 3666 retval = SUCCESS; 3667 goto out; 3668 } 3669 3670 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3671 dev_handle = stgt_priv_data->dev_handle; 3672 if (stgt_priv_data->dev_removed) { 3673 sdev_printk(KERN_INFO, scmd->device, 3674 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 3675 mrioc->name, dev_handle); 3676 retval = FAILED; 3677 goto out; 3678 } 3679 sdev_printk(KERN_INFO, scmd->device, 3680 "Target Reset is issued to handle(0x%04x)\n", 3681 dev_handle); 3682 3683 ret = mpi3mr_issue_tm(mrioc, 3684 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 3685 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 3686 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 3687 3688 if (ret) 3689 goto out; 3690 3691 if (stgt_priv_data->pend_count) { 3692 sdev_printk(KERN_INFO, scmd->device, 3693 "%s: target has %d pending commands, target reset is failed\n", 3694 mrioc->name, stgt_priv_data->pend_count); 3695 goto out; 3696 } 3697 3698 retval = SUCCESS; 3699 out: 3700 sdev_printk(KERN_INFO, scmd->device, 3701 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 3702 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3703 3704 return retval; 3705 } 3706 3707 /** 3708 * mpi3mr_eh_dev_reset- Device reset error handling callback 3709 * @scmd: SCSI command reference 3710 * 3711 * Issue lun reset Task Management and verify the scmd is 3712 * terminated successfully and return status accordingly. 3713 * 3714 * Return: SUCCESS of successful termination of the scmd else 3715 * FAILED 3716 */ 3717 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 3718 { 3719 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 3720 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3721 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3722 u16 dev_handle; 3723 u8 resp_code = 0; 3724 int retval = FAILED, ret = 0; 3725 3726 sdev_printk(KERN_INFO, scmd->device, 3727 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 3728 scsi_print_command(scmd); 3729 3730 sdev_priv_data = scmd->device->hostdata; 3731 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 3732 sdev_printk(KERN_INFO, scmd->device, 3733 "SCSI device is not available\n"); 3734 retval = SUCCESS; 3735 goto out; 3736 } 3737 3738 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3739 dev_handle = stgt_priv_data->dev_handle; 3740 if (stgt_priv_data->dev_removed) { 3741 sdev_printk(KERN_INFO, scmd->device, 3742 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 3743 mrioc->name, dev_handle); 3744 retval = FAILED; 3745 goto out; 3746 } 3747 sdev_printk(KERN_INFO, scmd->device, 3748 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 3749 3750 ret = mpi3mr_issue_tm(mrioc, 3751 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 3752 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 3753 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 3754 3755 if (ret) 3756 goto out; 3757 3758 if (sdev_priv_data->pend_count) { 3759 sdev_printk(KERN_INFO, scmd->device, 3760 "%s: device has %d pending commands, device(LUN) reset is failed\n", 3761 mrioc->name, sdev_priv_data->pend_count); 3762 goto out; 3763 } 3764 retval = SUCCESS; 3765 out: 3766 sdev_printk(KERN_INFO, scmd->device, 3767 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 3768 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3769 3770 return retval; 3771 } 3772 3773 /** 3774 * mpi3mr_scan_start - Scan start callback handler 3775 * @shost: SCSI host reference 3776 * 3777 * Issue port enable request asynchronously. 3778 * 3779 * Return: Nothing 3780 */ 3781 static void mpi3mr_scan_start(struct Scsi_Host *shost) 3782 { 3783 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3784 3785 mrioc->scan_started = 1; 3786 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 3787 if (mpi3mr_issue_port_enable(mrioc, 1)) { 3788 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 3789 mrioc->scan_started = 0; 3790 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3791 } 3792 } 3793 3794 /** 3795 * mpi3mr_scan_finished - Scan finished callback handler 3796 * @shost: SCSI host reference 3797 * @time: Jiffies from the scan start 3798 * 3799 * Checks whether the port enable is completed or timedout or 3800 * failed and set the scan status accordingly after taking any 3801 * recovery if required. 3802 * 3803 * Return: 1 on scan finished or timed out, 0 for in progress 3804 */ 3805 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 3806 unsigned long time) 3807 { 3808 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3809 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 3810 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 3811 3812 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 3813 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 3814 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 3815 mpi3mr_print_fault_info(mrioc); 3816 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3817 mrioc->scan_started = 0; 3818 mrioc->init_cmds.is_waiting = 0; 3819 mrioc->init_cmds.callback = NULL; 3820 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3821 } 3822 3823 if (time >= (pe_timeout * HZ)) { 3824 ioc_err(mrioc, "port enable failed due to time out\n"); 3825 mpi3mr_check_rh_fault_ioc(mrioc, 3826 MPI3MR_RESET_FROM_PE_TIMEOUT); 3827 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 3828 mrioc->scan_started = 0; 3829 mrioc->init_cmds.is_waiting = 0; 3830 mrioc->init_cmds.callback = NULL; 3831 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 3832 } 3833 3834 if (mrioc->scan_started) 3835 return 0; 3836 3837 if (mrioc->scan_failed) { 3838 ioc_err(mrioc, 3839 "port enable failed with status=0x%04x\n", 3840 mrioc->scan_failed); 3841 } else 3842 ioc_info(mrioc, "port enable is successfully completed\n"); 3843 3844 mpi3mr_start_watchdog(mrioc); 3845 mrioc->is_driver_loading = 0; 3846 mrioc->stop_bsgs = 0; 3847 return 1; 3848 } 3849 3850 /** 3851 * mpi3mr_slave_destroy - Slave destroy callback handler 3852 * @sdev: SCSI device reference 3853 * 3854 * Cleanup and free per device(lun) private data. 3855 * 3856 * Return: Nothing. 3857 */ 3858 static void mpi3mr_slave_destroy(struct scsi_device *sdev) 3859 { 3860 struct Scsi_Host *shost; 3861 struct mpi3mr_ioc *mrioc; 3862 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3863 struct mpi3mr_tgt_dev *tgt_dev; 3864 unsigned long flags; 3865 struct scsi_target *starget; 3866 3867 if (!sdev->hostdata) 3868 return; 3869 3870 starget = scsi_target(sdev); 3871 shost = dev_to_shost(&starget->dev); 3872 mrioc = shost_priv(shost); 3873 scsi_tgt_priv_data = starget->hostdata; 3874 3875 scsi_tgt_priv_data->num_luns--; 3876 3877 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3878 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3879 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 3880 tgt_dev->starget = NULL; 3881 if (tgt_dev) 3882 mpi3mr_tgtdev_put(tgt_dev); 3883 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3884 3885 kfree(sdev->hostdata); 3886 sdev->hostdata = NULL; 3887 } 3888 3889 /** 3890 * mpi3mr_target_destroy - Target destroy callback handler 3891 * @starget: SCSI target reference 3892 * 3893 * Cleanup and free per target private data. 3894 * 3895 * Return: Nothing. 3896 */ 3897 static void mpi3mr_target_destroy(struct scsi_target *starget) 3898 { 3899 struct Scsi_Host *shost; 3900 struct mpi3mr_ioc *mrioc; 3901 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 3902 struct mpi3mr_tgt_dev *tgt_dev; 3903 unsigned long flags; 3904 3905 if (!starget->hostdata) 3906 return; 3907 3908 shost = dev_to_shost(&starget->dev); 3909 mrioc = shost_priv(shost); 3910 scsi_tgt_priv_data = starget->hostdata; 3911 3912 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3913 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 3914 if (tgt_dev && (tgt_dev->starget == starget) && 3915 (tgt_dev->perst_id == starget->id)) 3916 tgt_dev->starget = NULL; 3917 if (tgt_dev) { 3918 scsi_tgt_priv_data->tgt_dev = NULL; 3919 scsi_tgt_priv_data->perst_id = 0; 3920 mpi3mr_tgtdev_put(tgt_dev); 3921 mpi3mr_tgtdev_put(tgt_dev); 3922 } 3923 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3924 3925 kfree(starget->hostdata); 3926 starget->hostdata = NULL; 3927 } 3928 3929 /** 3930 * mpi3mr_slave_configure - Slave configure callback handler 3931 * @sdev: SCSI device reference 3932 * 3933 * Configure queue depth, max hardware sectors and virt boundary 3934 * as required 3935 * 3936 * Return: 0 always. 3937 */ 3938 static int mpi3mr_slave_configure(struct scsi_device *sdev) 3939 { 3940 struct scsi_target *starget; 3941 struct Scsi_Host *shost; 3942 struct mpi3mr_ioc *mrioc; 3943 struct mpi3mr_tgt_dev *tgt_dev; 3944 unsigned long flags; 3945 int retval = 0; 3946 3947 starget = scsi_target(sdev); 3948 shost = dev_to_shost(&starget->dev); 3949 mrioc = shost_priv(shost); 3950 3951 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 3952 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 3953 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 3954 if (!tgt_dev) 3955 return -ENXIO; 3956 3957 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 3958 3959 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 3960 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 3961 3962 switch (tgt_dev->dev_type) { 3963 case MPI3_DEVICE_DEVFORM_PCIE: 3964 /*The block layer hw sector size = 512*/ 3965 if ((tgt_dev->dev_spec.pcie_inf.dev_info & 3966 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 3967 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 3968 blk_queue_max_hw_sectors(sdev->request_queue, 3969 tgt_dev->dev_spec.pcie_inf.mdts / 512); 3970 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) 3971 blk_queue_virt_boundary(sdev->request_queue, 3972 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 3973 else 3974 blk_queue_virt_boundary(sdev->request_queue, 3975 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); 3976 } 3977 break; 3978 default: 3979 break; 3980 } 3981 3982 mpi3mr_tgtdev_put(tgt_dev); 3983 3984 return retval; 3985 } 3986 3987 /** 3988 * mpi3mr_slave_alloc -Slave alloc callback handler 3989 * @sdev: SCSI device reference 3990 * 3991 * Allocate per device(lun) private data and initialize it. 3992 * 3993 * Return: 0 on success -ENOMEM on memory allocation failure. 3994 */ 3995 static int mpi3mr_slave_alloc(struct scsi_device *sdev) 3996 { 3997 struct Scsi_Host *shost; 3998 struct mpi3mr_ioc *mrioc; 3999 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4000 struct mpi3mr_tgt_dev *tgt_dev; 4001 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4002 unsigned long flags; 4003 struct scsi_target *starget; 4004 int retval = 0; 4005 4006 starget = scsi_target(sdev); 4007 shost = dev_to_shost(&starget->dev); 4008 mrioc = shost_priv(shost); 4009 scsi_tgt_priv_data = starget->hostdata; 4010 4011 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4012 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4013 4014 if (tgt_dev) { 4015 if (tgt_dev->starget == NULL) 4016 tgt_dev->starget = starget; 4017 mpi3mr_tgtdev_put(tgt_dev); 4018 retval = 0; 4019 } else { 4020 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4021 return -ENXIO; 4022 } 4023 4024 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4025 4026 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4027 if (!scsi_dev_priv_data) 4028 return -ENOMEM; 4029 4030 scsi_dev_priv_data->lun_id = sdev->lun; 4031 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4032 sdev->hostdata = scsi_dev_priv_data; 4033 4034 scsi_tgt_priv_data->num_luns++; 4035 4036 return retval; 4037 } 4038 4039 /** 4040 * mpi3mr_target_alloc - Target alloc callback handler 4041 * @starget: SCSI target reference 4042 * 4043 * Allocate per target private data and initialize it. 4044 * 4045 * Return: 0 on success -ENOMEM on memory allocation failure. 4046 */ 4047 static int mpi3mr_target_alloc(struct scsi_target *starget) 4048 { 4049 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4050 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4051 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4052 struct mpi3mr_tgt_dev *tgt_dev; 4053 unsigned long flags; 4054 int retval = 0; 4055 4056 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4057 if (!scsi_tgt_priv_data) 4058 return -ENOMEM; 4059 4060 starget->hostdata = scsi_tgt_priv_data; 4061 4062 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4063 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4064 if (tgt_dev && !tgt_dev->is_hidden) { 4065 scsi_tgt_priv_data->starget = starget; 4066 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4067 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4068 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4069 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4070 tgt_dev->starget = starget; 4071 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4072 retval = 0; 4073 scsi_tgt_priv_data->io_throttle_enabled = 4074 tgt_dev->io_throttle_enabled; 4075 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4076 scsi_tgt_priv_data->throttle_group = 4077 tgt_dev->dev_spec.vd_inf.tg; 4078 } else 4079 retval = -ENXIO; 4080 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4081 4082 return retval; 4083 } 4084 4085 /** 4086 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4087 * @mrioc: Adapter instance reference 4088 * @scmd: SCSI Command reference 4089 * 4090 * The controller hardware cannot handle certain unmap commands 4091 * for NVMe drives, this routine checks those and return true 4092 * and completes the SCSI command with proper status and sense 4093 * data. 4094 * 4095 * Return: TRUE for not allowed unmap, FALSE otherwise. 4096 */ 4097 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4098 struct scsi_cmnd *scmd) 4099 { 4100 unsigned char *buf; 4101 u16 param_len, desc_len, trunc_param_len; 4102 4103 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4104 4105 if (mrioc->pdev->revision) { 4106 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4107 trunc_param_len -= (param_len - 8) & 0xF; 4108 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4109 dprint_scsi_err(mrioc, 4110 "truncating param_len from (%d) to (%d)\n", 4111 param_len, trunc_param_len); 4112 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4113 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4114 } 4115 return false; 4116 } 4117 4118 if (!param_len) { 4119 ioc_warn(mrioc, 4120 "%s: cdb received with zero parameter length\n", 4121 __func__); 4122 scsi_print_command(scmd); 4123 scmd->result = DID_OK << 16; 4124 scsi_done(scmd); 4125 return true; 4126 } 4127 4128 if (param_len < 24) { 4129 ioc_warn(mrioc, 4130 "%s: cdb received with invalid param_len: %d\n", 4131 __func__, param_len); 4132 scsi_print_command(scmd); 4133 scmd->result = SAM_STAT_CHECK_CONDITION; 4134 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4135 0x1A, 0); 4136 scsi_done(scmd); 4137 return true; 4138 } 4139 if (param_len != scsi_bufflen(scmd)) { 4140 ioc_warn(mrioc, 4141 "%s: cdb received with param_len: %d bufflen: %d\n", 4142 __func__, param_len, scsi_bufflen(scmd)); 4143 scsi_print_command(scmd); 4144 scmd->result = SAM_STAT_CHECK_CONDITION; 4145 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4146 0x1A, 0); 4147 scsi_done(scmd); 4148 return true; 4149 } 4150 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4151 if (!buf) { 4152 scsi_print_command(scmd); 4153 scmd->result = SAM_STAT_CHECK_CONDITION; 4154 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4155 0x55, 0x03); 4156 scsi_done(scmd); 4157 return true; 4158 } 4159 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4160 desc_len = get_unaligned_be16(&buf[2]); 4161 4162 if (desc_len < 16) { 4163 ioc_warn(mrioc, 4164 "%s: Invalid descriptor length in param list: %d\n", 4165 __func__, desc_len); 4166 scsi_print_command(scmd); 4167 scmd->result = SAM_STAT_CHECK_CONDITION; 4168 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4169 0x26, 0); 4170 scsi_done(scmd); 4171 kfree(buf); 4172 return true; 4173 } 4174 4175 if (param_len > (desc_len + 8)) { 4176 trunc_param_len = desc_len + 8; 4177 scsi_print_command(scmd); 4178 dprint_scsi_err(mrioc, 4179 "truncating param_len(%d) to desc_len+8(%d)\n", 4180 param_len, trunc_param_len); 4181 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4182 scsi_print_command(scmd); 4183 } 4184 4185 kfree(buf); 4186 return false; 4187 } 4188 4189 /** 4190 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 4191 * @scmd: SCSI Command reference 4192 * 4193 * Checks whether a cdb is allowed during shutdown or not. 4194 * 4195 * Return: TRUE for allowed commands, FALSE otherwise. 4196 */ 4197 4198 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 4199 { 4200 switch (scmd->cmnd[0]) { 4201 case SYNCHRONIZE_CACHE: 4202 case START_STOP: 4203 return true; 4204 default: 4205 return false; 4206 } 4207 } 4208 4209 /** 4210 * mpi3mr_qcmd - I/O request despatcher 4211 * @shost: SCSI Host reference 4212 * @scmd: SCSI Command reference 4213 * 4214 * Issues the SCSI Command as an MPI3 request. 4215 * 4216 * Return: 0 on successful queueing of the request or if the 4217 * request is completed with failure. 4218 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 4219 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 4220 */ 4221 static int mpi3mr_qcmd(struct Scsi_Host *shost, 4222 struct scsi_cmnd *scmd) 4223 { 4224 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4225 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4226 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4227 struct scmd_priv *scmd_priv_data = NULL; 4228 struct mpi3_scsi_io_request *scsiio_req = NULL; 4229 struct op_req_qinfo *op_req_q = NULL; 4230 int retval = 0; 4231 u16 dev_handle; 4232 u16 host_tag; 4233 u32 scsiio_flags = 0, data_len_blks = 0; 4234 struct request *rq = scsi_cmd_to_rq(scmd); 4235 int iprio_class; 4236 u8 is_pcie_dev = 0; 4237 u32 tracked_io_sz = 0; 4238 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 4239 struct mpi3mr_throttle_group_info *tg = NULL; 4240 4241 if (mrioc->unrecoverable) { 4242 scmd->result = DID_ERROR << 16; 4243 scsi_done(scmd); 4244 goto out; 4245 } 4246 4247 sdev_priv_data = scmd->device->hostdata; 4248 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4249 scmd->result = DID_NO_CONNECT << 16; 4250 scsi_done(scmd); 4251 goto out; 4252 } 4253 4254 if (mrioc->stop_drv_processing && 4255 !(mpi3mr_allow_scmd_to_fw(scmd))) { 4256 scmd->result = DID_NO_CONNECT << 16; 4257 scsi_done(scmd); 4258 goto out; 4259 } 4260 4261 if (mrioc->reset_in_progress) { 4262 retval = SCSI_MLQUEUE_HOST_BUSY; 4263 goto out; 4264 } 4265 4266 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4267 4268 dev_handle = stgt_priv_data->dev_handle; 4269 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 4270 scmd->result = DID_NO_CONNECT << 16; 4271 scsi_done(scmd); 4272 goto out; 4273 } 4274 if (stgt_priv_data->dev_removed) { 4275 scmd->result = DID_NO_CONNECT << 16; 4276 scsi_done(scmd); 4277 goto out; 4278 } 4279 4280 if (atomic_read(&stgt_priv_data->block_io)) { 4281 if (mrioc->stop_drv_processing) { 4282 scmd->result = DID_NO_CONNECT << 16; 4283 scsi_done(scmd); 4284 goto out; 4285 } 4286 retval = SCSI_MLQUEUE_DEVICE_BUSY; 4287 goto out; 4288 } 4289 4290 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 4291 is_pcie_dev = 1; 4292 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 4293 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 4294 mpi3mr_check_return_unmap(mrioc, scmd)) 4295 goto out; 4296 4297 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 4298 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 4299 scmd->result = DID_ERROR << 16; 4300 scsi_done(scmd); 4301 goto out; 4302 } 4303 4304 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4305 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 4306 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4307 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 4308 else 4309 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 4310 4311 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 4312 4313 if (sdev_priv_data->ncq_prio_enable) { 4314 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 4315 if (iprio_class == IOPRIO_CLASS_RT) 4316 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 4317 } 4318 4319 if (scmd->cmd_len > 16) 4320 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 4321 4322 scmd_priv_data = scsi_cmd_priv(scmd); 4323 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 4324 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 4325 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 4326 scsiio_req->host_tag = cpu_to_le16(host_tag); 4327 4328 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 4329 4330 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 4331 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 4332 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 4333 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4334 int_to_scsilun(sdev_priv_data->lun_id, 4335 (struct scsi_lun *)scsiio_req->lun); 4336 4337 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 4338 mpi3mr_clear_scmd_priv(mrioc, scmd); 4339 retval = SCSI_MLQUEUE_HOST_BUSY; 4340 goto out; 4341 } 4342 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 4343 data_len_blks = scsi_bufflen(scmd) >> 9; 4344 if ((data_len_blks >= mrioc->io_throttle_data_length) && 4345 stgt_priv_data->io_throttle_enabled) { 4346 tracked_io_sz = data_len_blks; 4347 tg = stgt_priv_data->throttle_group; 4348 if (tg) { 4349 ioc_pend_data_len = atomic_add_return(data_len_blks, 4350 &mrioc->pend_large_data_sz); 4351 tg_pend_data_len = atomic_add_return(data_len_blks, 4352 &tg->pend_large_data_sz); 4353 if (!tg->io_divert && ((ioc_pend_data_len >= 4354 mrioc->io_throttle_high) || 4355 (tg_pend_data_len >= tg->high))) { 4356 tg->io_divert = 1; 4357 tg->need_qd_reduction = 1; 4358 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 4359 tg, 1); 4360 mpi3mr_queue_qd_reduction_event(mrioc, tg); 4361 } 4362 } else { 4363 ioc_pend_data_len = atomic_add_return(data_len_blks, 4364 &mrioc->pend_large_data_sz); 4365 if (ioc_pend_data_len >= mrioc->io_throttle_high) 4366 stgt_priv_data->io_divert = 1; 4367 } 4368 } 4369 4370 if (stgt_priv_data->io_divert) { 4371 scsiio_req->msg_flags |= 4372 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4373 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 4374 } 4375 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4376 4377 if (mpi3mr_op_request_post(mrioc, op_req_q, 4378 scmd_priv_data->mpi3mr_scsiio_req)) { 4379 mpi3mr_clear_scmd_priv(mrioc, scmd); 4380 retval = SCSI_MLQUEUE_HOST_BUSY; 4381 if (tracked_io_sz) { 4382 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 4383 if (tg) 4384 atomic_sub(tracked_io_sz, 4385 &tg->pend_large_data_sz); 4386 } 4387 goto out; 4388 } 4389 4390 out: 4391 return retval; 4392 } 4393 4394 static struct scsi_host_template mpi3mr_driver_template = { 4395 .module = THIS_MODULE, 4396 .name = "MPI3 Storage Controller", 4397 .proc_name = MPI3MR_DRIVER_NAME, 4398 .queuecommand = mpi3mr_qcmd, 4399 .target_alloc = mpi3mr_target_alloc, 4400 .slave_alloc = mpi3mr_slave_alloc, 4401 .slave_configure = mpi3mr_slave_configure, 4402 .target_destroy = mpi3mr_target_destroy, 4403 .slave_destroy = mpi3mr_slave_destroy, 4404 .scan_finished = mpi3mr_scan_finished, 4405 .scan_start = mpi3mr_scan_start, 4406 .change_queue_depth = mpi3mr_change_queue_depth, 4407 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 4408 .eh_target_reset_handler = mpi3mr_eh_target_reset, 4409 .eh_host_reset_handler = mpi3mr_eh_host_reset, 4410 .bios_param = mpi3mr_bios_param, 4411 .map_queues = mpi3mr_map_queues, 4412 .mq_poll = mpi3mr_blk_mq_poll, 4413 .no_write_same = 1, 4414 .can_queue = 1, 4415 .this_id = -1, 4416 .sg_tablesize = MPI3MR_SG_DEPTH, 4417 /* max xfer supported is 1M (2K in 512 byte sized sectors) 4418 */ 4419 .max_sectors = 2048, 4420 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 4421 .max_segment_size = 0xffffffff, 4422 .track_queue_depth = 1, 4423 .cmd_size = sizeof(struct scmd_priv), 4424 .shost_groups = mpi3mr_host_groups, 4425 .sdev_groups = mpi3mr_dev_groups, 4426 }; 4427 4428 /** 4429 * mpi3mr_init_drv_cmd - Initialize internal command tracker 4430 * @cmdptr: Internal command tracker 4431 * @host_tag: Host tag used for the specific command 4432 * 4433 * Initialize the internal command tracker structure with 4434 * specified host tag. 4435 * 4436 * Return: Nothing. 4437 */ 4438 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 4439 u16 host_tag) 4440 { 4441 mutex_init(&cmdptr->mutex); 4442 cmdptr->reply = NULL; 4443 cmdptr->state = MPI3MR_CMD_NOTUSED; 4444 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 4445 cmdptr->host_tag = host_tag; 4446 } 4447 4448 /** 4449 * osintfc_mrioc_security_status -Check controller secure status 4450 * @pdev: PCI device instance 4451 * 4452 * Read the Device Serial Number capability from PCI config 4453 * space and decide whether the controller is secure or not. 4454 * 4455 * Return: 0 on success, non-zero on failure. 4456 */ 4457 static int 4458 osintfc_mrioc_security_status(struct pci_dev *pdev) 4459 { 4460 u32 cap_data; 4461 int base; 4462 u32 ctlr_status; 4463 u32 debug_status; 4464 int retval = 0; 4465 4466 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 4467 if (!base) { 4468 dev_err(&pdev->dev, 4469 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 4470 return -1; 4471 } 4472 4473 pci_read_config_dword(pdev, base + 4, &cap_data); 4474 4475 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 4476 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 4477 4478 switch (ctlr_status) { 4479 case MPI3MR_INVALID_DEVICE: 4480 dev_err(&pdev->dev, 4481 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4482 __func__, pdev->device, pdev->subsystem_vendor, 4483 pdev->subsystem_device); 4484 retval = -1; 4485 break; 4486 case MPI3MR_CONFIG_SECURE_DEVICE: 4487 if (!debug_status) 4488 dev_info(&pdev->dev, 4489 "%s: Config secure ctlr is detected\n", 4490 __func__); 4491 break; 4492 case MPI3MR_HARD_SECURE_DEVICE: 4493 break; 4494 case MPI3MR_TAMPERED_DEVICE: 4495 dev_err(&pdev->dev, 4496 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4497 __func__, pdev->device, pdev->subsystem_vendor, 4498 pdev->subsystem_device); 4499 retval = -1; 4500 break; 4501 default: 4502 retval = -1; 4503 break; 4504 } 4505 4506 if (!retval && debug_status) { 4507 dev_err(&pdev->dev, 4508 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4509 __func__, pdev->device, pdev->subsystem_vendor, 4510 pdev->subsystem_device); 4511 retval = -1; 4512 } 4513 4514 return retval; 4515 } 4516 4517 /** 4518 * mpi3mr_probe - PCI probe callback 4519 * @pdev: PCI device instance 4520 * @id: PCI device ID details 4521 * 4522 * controller initialization routine. Checks the security status 4523 * of the controller and if it is invalid or tampered return the 4524 * probe without initializing the controller. Otherwise, 4525 * allocate per adapter instance through shost_priv and 4526 * initialize controller specific data structures, initializae 4527 * the controller hardware, add shost to the SCSI subsystem. 4528 * 4529 * Return: 0 on success, non-zero on failure. 4530 */ 4531 4532 static int 4533 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4534 { 4535 struct mpi3mr_ioc *mrioc = NULL; 4536 struct Scsi_Host *shost = NULL; 4537 int retval = 0, i; 4538 4539 if (osintfc_mrioc_security_status(pdev)) { 4540 warn_non_secure_ctlr = 1; 4541 return 1; /* For Invalid and Tampered device */ 4542 } 4543 4544 shost = scsi_host_alloc(&mpi3mr_driver_template, 4545 sizeof(struct mpi3mr_ioc)); 4546 if (!shost) { 4547 retval = -ENODEV; 4548 goto shost_failed; 4549 } 4550 4551 mrioc = shost_priv(shost); 4552 mrioc->id = mrioc_ids++; 4553 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 4554 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 4555 INIT_LIST_HEAD(&mrioc->list); 4556 spin_lock(&mrioc_list_lock); 4557 list_add_tail(&mrioc->list, &mrioc_list); 4558 spin_unlock(&mrioc_list_lock); 4559 4560 spin_lock_init(&mrioc->admin_req_lock); 4561 spin_lock_init(&mrioc->reply_free_queue_lock); 4562 spin_lock_init(&mrioc->sbq_lock); 4563 spin_lock_init(&mrioc->fwevt_lock); 4564 spin_lock_init(&mrioc->tgtdev_lock); 4565 spin_lock_init(&mrioc->watchdog_lock); 4566 spin_lock_init(&mrioc->chain_buf_lock); 4567 4568 INIT_LIST_HEAD(&mrioc->fwevt_list); 4569 INIT_LIST_HEAD(&mrioc->tgtdev_list); 4570 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 4571 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 4572 4573 mutex_init(&mrioc->reset_mutex); 4574 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 4575 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 4576 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 4577 4578 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 4579 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 4580 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 4581 4582 if (pdev->revision) 4583 mrioc->enable_segqueue = true; 4584 4585 init_waitqueue_head(&mrioc->reset_waitq); 4586 mrioc->logging_level = logging_level; 4587 mrioc->shost = shost; 4588 mrioc->pdev = pdev; 4589 mrioc->stop_bsgs = 1; 4590 4591 /* init shost parameters */ 4592 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 4593 shost->max_lun = -1; 4594 shost->unique_id = mrioc->id; 4595 4596 shost->max_channel = 0; 4597 shost->max_id = 0xFFFFFFFF; 4598 4599 shost->host_tagset = 1; 4600 4601 if (prot_mask >= 0) 4602 scsi_host_set_prot(shost, prot_mask); 4603 else { 4604 prot_mask = SHOST_DIF_TYPE1_PROTECTION 4605 | SHOST_DIF_TYPE2_PROTECTION 4606 | SHOST_DIF_TYPE3_PROTECTION; 4607 scsi_host_set_prot(shost, prot_mask); 4608 } 4609 4610 ioc_info(mrioc, 4611 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 4612 __func__, 4613 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 4614 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 4615 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 4616 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 4617 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 4618 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 4619 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 4620 4621 if (prot_guard_mask) 4622 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 4623 else 4624 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 4625 4626 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), 4627 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); 4628 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 4629 mrioc->fwevt_worker_name, 0); 4630 if (!mrioc->fwevt_worker_thread) { 4631 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 4632 __FILE__, __LINE__, __func__); 4633 retval = -ENODEV; 4634 goto fwevtthread_failed; 4635 } 4636 4637 mrioc->is_driver_loading = 1; 4638 mrioc->cpu_count = num_online_cpus(); 4639 if (mpi3mr_setup_resources(mrioc)) { 4640 ioc_err(mrioc, "setup resources failed\n"); 4641 retval = -ENODEV; 4642 goto resource_alloc_failed; 4643 } 4644 if (mpi3mr_init_ioc(mrioc)) { 4645 ioc_err(mrioc, "initializing IOC failed\n"); 4646 retval = -ENODEV; 4647 goto init_ioc_failed; 4648 } 4649 4650 shost->nr_hw_queues = mrioc->num_op_reply_q; 4651 if (mrioc->active_poll_qcount) 4652 shost->nr_maps = 3; 4653 4654 shost->can_queue = mrioc->max_host_ios; 4655 shost->sg_tablesize = MPI3MR_SG_DEPTH; 4656 shost->max_id = mrioc->facts.max_perids + 1; 4657 4658 retval = scsi_add_host(shost, &pdev->dev); 4659 if (retval) { 4660 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 4661 __FILE__, __LINE__, __func__); 4662 goto addhost_failed; 4663 } 4664 4665 scsi_scan_host(shost); 4666 mpi3mr_bsg_init(mrioc); 4667 return retval; 4668 4669 addhost_failed: 4670 mpi3mr_stop_watchdog(mrioc); 4671 mpi3mr_cleanup_ioc(mrioc); 4672 init_ioc_failed: 4673 mpi3mr_free_mem(mrioc); 4674 mpi3mr_cleanup_resources(mrioc); 4675 resource_alloc_failed: 4676 destroy_workqueue(mrioc->fwevt_worker_thread); 4677 fwevtthread_failed: 4678 spin_lock(&mrioc_list_lock); 4679 list_del(&mrioc->list); 4680 spin_unlock(&mrioc_list_lock); 4681 scsi_host_put(shost); 4682 shost_failed: 4683 return retval; 4684 } 4685 4686 /** 4687 * mpi3mr_remove - PCI remove callback 4688 * @pdev: PCI device instance 4689 * 4690 * Cleanup the IOC by issuing MUR and shutdown notification. 4691 * Free up all memory and resources associated with the 4692 * controllerand target devices, unregister the shost. 4693 * 4694 * Return: Nothing. 4695 */ 4696 static void mpi3mr_remove(struct pci_dev *pdev) 4697 { 4698 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4699 struct mpi3mr_ioc *mrioc; 4700 struct workqueue_struct *wq; 4701 unsigned long flags; 4702 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 4703 4704 if (!shost) 4705 return; 4706 4707 mrioc = shost_priv(shost); 4708 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 4709 ssleep(1); 4710 4711 mpi3mr_bsg_exit(mrioc); 4712 mrioc->stop_drv_processing = 1; 4713 mpi3mr_cleanup_fwevt_list(mrioc); 4714 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 4715 wq = mrioc->fwevt_worker_thread; 4716 mrioc->fwevt_worker_thread = NULL; 4717 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 4718 if (wq) 4719 destroy_workqueue(wq); 4720 scsi_remove_host(shost); 4721 4722 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 4723 list) { 4724 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 4725 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev); 4726 mpi3mr_tgtdev_put(tgtdev); 4727 } 4728 mpi3mr_stop_watchdog(mrioc); 4729 mpi3mr_cleanup_ioc(mrioc); 4730 mpi3mr_free_mem(mrioc); 4731 mpi3mr_cleanup_resources(mrioc); 4732 4733 spin_lock(&mrioc_list_lock); 4734 list_del(&mrioc->list); 4735 spin_unlock(&mrioc_list_lock); 4736 4737 scsi_host_put(shost); 4738 } 4739 4740 /** 4741 * mpi3mr_shutdown - PCI shutdown callback 4742 * @pdev: PCI device instance 4743 * 4744 * Free up all memory and resources associated with the 4745 * controller 4746 * 4747 * Return: Nothing. 4748 */ 4749 static void mpi3mr_shutdown(struct pci_dev *pdev) 4750 { 4751 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4752 struct mpi3mr_ioc *mrioc; 4753 struct workqueue_struct *wq; 4754 unsigned long flags; 4755 4756 if (!shost) 4757 return; 4758 4759 mrioc = shost_priv(shost); 4760 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 4761 ssleep(1); 4762 4763 mrioc->stop_drv_processing = 1; 4764 mpi3mr_cleanup_fwevt_list(mrioc); 4765 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 4766 wq = mrioc->fwevt_worker_thread; 4767 mrioc->fwevt_worker_thread = NULL; 4768 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 4769 if (wq) 4770 destroy_workqueue(wq); 4771 4772 mpi3mr_stop_watchdog(mrioc); 4773 mpi3mr_cleanup_ioc(mrioc); 4774 mpi3mr_cleanup_resources(mrioc); 4775 } 4776 4777 #ifdef CONFIG_PM 4778 /** 4779 * mpi3mr_suspend - PCI power management suspend callback 4780 * @pdev: PCI device instance 4781 * @state: New power state 4782 * 4783 * Change the power state to the given value and cleanup the IOC 4784 * by issuing MUR and shutdown notification 4785 * 4786 * Return: 0 always. 4787 */ 4788 static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state) 4789 { 4790 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4791 struct mpi3mr_ioc *mrioc; 4792 pci_power_t device_state; 4793 4794 if (!shost) 4795 return 0; 4796 4797 mrioc = shost_priv(shost); 4798 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 4799 ssleep(1); 4800 mrioc->stop_drv_processing = 1; 4801 mpi3mr_cleanup_fwevt_list(mrioc); 4802 scsi_block_requests(shost); 4803 mpi3mr_stop_watchdog(mrioc); 4804 mpi3mr_cleanup_ioc(mrioc); 4805 4806 device_state = pci_choose_state(pdev, state); 4807 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n", 4808 pdev, pci_name(pdev), device_state); 4809 pci_save_state(pdev); 4810 mpi3mr_cleanup_resources(mrioc); 4811 pci_set_power_state(pdev, device_state); 4812 4813 return 0; 4814 } 4815 4816 /** 4817 * mpi3mr_resume - PCI power management resume callback 4818 * @pdev: PCI device instance 4819 * 4820 * Restore the power state to D0 and reinitialize the controller 4821 * and resume I/O operations to the target devices 4822 * 4823 * Return: 0 on success, non-zero on failure 4824 */ 4825 static int mpi3mr_resume(struct pci_dev *pdev) 4826 { 4827 struct Scsi_Host *shost = pci_get_drvdata(pdev); 4828 struct mpi3mr_ioc *mrioc; 4829 pci_power_t device_state = pdev->current_state; 4830 int r; 4831 4832 if (!shost) 4833 return 0; 4834 4835 mrioc = shost_priv(shost); 4836 4837 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 4838 pdev, pci_name(pdev), device_state); 4839 pci_set_power_state(pdev, PCI_D0); 4840 pci_enable_wake(pdev, PCI_D0, 0); 4841 pci_restore_state(pdev); 4842 mrioc->pdev = pdev; 4843 mrioc->cpu_count = num_online_cpus(); 4844 r = mpi3mr_setup_resources(mrioc); 4845 if (r) { 4846 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 4847 __func__, r); 4848 return r; 4849 } 4850 4851 mrioc->stop_drv_processing = 0; 4852 mpi3mr_memset_buffers(mrioc); 4853 r = mpi3mr_reinit_ioc(mrioc, 1); 4854 if (r) { 4855 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 4856 return r; 4857 } 4858 scsi_unblock_requests(shost); 4859 mpi3mr_start_watchdog(mrioc); 4860 4861 return 0; 4862 } 4863 #endif 4864 4865 static const struct pci_device_id mpi3mr_pci_id_table[] = { 4866 { 4867 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 4868 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 4869 }, 4870 { 0 } 4871 }; 4872 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 4873 4874 static struct pci_driver mpi3mr_pci_driver = { 4875 .name = MPI3MR_DRIVER_NAME, 4876 .id_table = mpi3mr_pci_id_table, 4877 .probe = mpi3mr_probe, 4878 .remove = mpi3mr_remove, 4879 .shutdown = mpi3mr_shutdown, 4880 #ifdef CONFIG_PM 4881 .suspend = mpi3mr_suspend, 4882 .resume = mpi3mr_resume, 4883 #endif 4884 }; 4885 4886 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 4887 { 4888 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 4889 } 4890 static DRIVER_ATTR_RO(event_counter); 4891 4892 static int __init mpi3mr_init(void) 4893 { 4894 int ret_val; 4895 4896 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 4897 MPI3MR_DRIVER_VERSION); 4898 4899 ret_val = pci_register_driver(&mpi3mr_pci_driver); 4900 if (ret_val) { 4901 pr_err("%s failed to load due to pci register driver failure\n", 4902 MPI3MR_DRIVER_NAME); 4903 return ret_val; 4904 } 4905 4906 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 4907 &driver_attr_event_counter); 4908 if (ret_val) 4909 pci_unregister_driver(&mpi3mr_pci_driver); 4910 4911 return ret_val; 4912 } 4913 4914 static void __exit mpi3mr_exit(void) 4915 { 4916 if (warn_non_secure_ctlr) 4917 pr_warn( 4918 "Unloading %s version %s while managing a non secure controller\n", 4919 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 4920 else 4921 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 4922 MPI3MR_DRIVER_VERSION); 4923 4924 driver_remove_file(&mpi3mr_pci_driver.driver, 4925 &driver_attr_event_counter); 4926 pci_unregister_driver(&mpi3mr_pci_driver); 4927 } 4928 4929 module_init(mpi3mr_init); 4930 module_exit(mpi3mr_exit); 4931