1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 12 /* global driver scop variables */ 13 LIST_HEAD(mrioc_list); 14 DEFINE_SPINLOCK(mrioc_list_lock); 15 static int mrioc_ids; 16 static int warn_non_secure_ctlr; 17 atomic64_t event_counter; 18 19 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 20 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 21 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 22 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 23 24 /* Module parameters*/ 25 int prot_mask = -1; 26 module_param(prot_mask, int, 0); 27 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 28 29 static int prot_guard_mask = 3; 30 module_param(prot_guard_mask, int, 0); 31 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 32 static int logging_level; 33 module_param(logging_level, int, 0); 34 MODULE_PARM_DESC(logging_level, 35 " bits for enabling additional logging info (default=0)"); 36 37 /* Forward declarations*/ 38 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 39 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 40 41 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 42 43 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 44 45 /** 46 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 47 * @mrioc: Adapter instance reference 48 * @scmd: SCSI command reference 49 * 50 * Calculate the host tag based on block tag for a given scmd. 51 * 52 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 53 */ 54 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 55 struct scsi_cmnd *scmd) 56 { 57 struct scmd_priv *priv = NULL; 58 u32 unique_tag; 59 u16 host_tag, hw_queue; 60 61 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 62 63 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 64 if (hw_queue >= mrioc->num_op_reply_q) 65 return MPI3MR_HOSTTAG_INVALID; 66 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 67 68 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 69 return MPI3MR_HOSTTAG_INVALID; 70 71 priv = scsi_cmd_priv(scmd); 72 /*host_tag 0 is invalid hence incrementing by 1*/ 73 priv->host_tag = host_tag + 1; 74 priv->scmd = scmd; 75 priv->in_lld_scope = 1; 76 priv->req_q_idx = hw_queue; 77 priv->meta_chain_idx = -1; 78 priv->chain_idx = -1; 79 priv->meta_sg_valid = 0; 80 return priv->host_tag; 81 } 82 83 /** 84 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 85 * @mrioc: Adapter instance reference 86 * @host_tag: Host tag 87 * @qidx: Operational queue index 88 * 89 * Identify the block tag from the host tag and queue index and 90 * retrieve associated scsi command using scsi_host_find_tag(). 91 * 92 * Return: SCSI command reference or NULL. 93 */ 94 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 95 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 96 { 97 struct scsi_cmnd *scmd = NULL; 98 struct scmd_priv *priv = NULL; 99 u32 unique_tag = host_tag - 1; 100 101 if (WARN_ON(host_tag > mrioc->max_host_ios)) 102 goto out; 103 104 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 105 106 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 107 if (scmd) { 108 priv = scsi_cmd_priv(scmd); 109 if (!priv->in_lld_scope) 110 scmd = NULL; 111 } 112 out: 113 return scmd; 114 } 115 116 /** 117 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 118 * @mrioc: Adapter instance reference 119 * @scmd: SCSI command reference 120 * 121 * Invalidate the SCSI command private data to mark the command 122 * is not in LLD scope anymore. 123 * 124 * Return: Nothing. 125 */ 126 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 127 struct scsi_cmnd *scmd) 128 { 129 struct scmd_priv *priv = NULL; 130 131 priv = scsi_cmd_priv(scmd); 132 133 if (WARN_ON(priv->in_lld_scope == 0)) 134 return; 135 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 136 priv->req_q_idx = 0xFFFF; 137 priv->scmd = NULL; 138 priv->in_lld_scope = 0; 139 priv->meta_sg_valid = 0; 140 if (priv->chain_idx >= 0) { 141 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 142 priv->chain_idx = -1; 143 } 144 if (priv->meta_chain_idx >= 0) { 145 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 146 priv->meta_chain_idx = -1; 147 } 148 } 149 150 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 151 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 152 static void mpi3mr_fwevt_worker(struct work_struct *work); 153 154 /** 155 * mpi3mr_fwevt_free - firmware event memory dealloctor 156 * @r: k reference pointer of the firmware event 157 * 158 * Free firmware event memory when no reference. 159 */ 160 static void mpi3mr_fwevt_free(struct kref *r) 161 { 162 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 163 } 164 165 /** 166 * mpi3mr_fwevt_get - k reference incrementor 167 * @fwevt: Firmware event reference 168 * 169 * Increment firmware event reference count. 170 */ 171 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 172 { 173 kref_get(&fwevt->ref_count); 174 } 175 176 /** 177 * mpi3mr_fwevt_put - k reference decrementor 178 * @fwevt: Firmware event reference 179 * 180 * decrement firmware event reference count. 181 */ 182 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 183 { 184 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 185 } 186 187 /** 188 * mpi3mr_alloc_fwevt - Allocate firmware event 189 * @len: length of firmware event data to allocate 190 * 191 * Allocate firmware event with required length and initialize 192 * the reference counter. 193 * 194 * Return: firmware event reference. 195 */ 196 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 197 { 198 struct mpi3mr_fwevt *fwevt; 199 200 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 201 if (!fwevt) 202 return NULL; 203 204 kref_init(&fwevt->ref_count); 205 return fwevt; 206 } 207 208 /** 209 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 210 * @mrioc: Adapter instance reference 211 * @fwevt: Firmware event reference 212 * 213 * Add the given firmware event to the firmware event list. 214 * 215 * Return: Nothing. 216 */ 217 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 218 struct mpi3mr_fwevt *fwevt) 219 { 220 unsigned long flags; 221 222 if (!mrioc->fwevt_worker_thread) 223 return; 224 225 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 226 /* get fwevt reference count while adding it to fwevt_list */ 227 mpi3mr_fwevt_get(fwevt); 228 INIT_LIST_HEAD(&fwevt->list); 229 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 230 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 231 /* get fwevt reference count while enqueueing it to worker queue */ 232 mpi3mr_fwevt_get(fwevt); 233 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 234 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 235 } 236 237 /** 238 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 239 * @mrioc: Adapter instance reference 240 * @fwevt: Firmware event reference 241 * 242 * Delete the given firmware event from the firmware event list. 243 * 244 * Return: Nothing. 245 */ 246 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 247 struct mpi3mr_fwevt *fwevt) 248 { 249 unsigned long flags; 250 251 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 252 if (!list_empty(&fwevt->list)) { 253 list_del_init(&fwevt->list); 254 /* 255 * Put fwevt reference count after 256 * removing it from fwevt_list 257 */ 258 mpi3mr_fwevt_put(fwevt); 259 } 260 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 261 } 262 263 /** 264 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 265 * @mrioc: Adapter instance reference 266 * 267 * Dequeue a firmware event from the firmware event list. 268 * 269 * Return: firmware event. 270 */ 271 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 272 struct mpi3mr_ioc *mrioc) 273 { 274 unsigned long flags; 275 struct mpi3mr_fwevt *fwevt = NULL; 276 277 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 278 if (!list_empty(&mrioc->fwevt_list)) { 279 fwevt = list_first_entry(&mrioc->fwevt_list, 280 struct mpi3mr_fwevt, list); 281 list_del_init(&fwevt->list); 282 /* 283 * Put fwevt reference count after 284 * removing it from fwevt_list 285 */ 286 mpi3mr_fwevt_put(fwevt); 287 } 288 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 289 290 return fwevt; 291 } 292 293 /** 294 * mpi3mr_cancel_work - cancel firmware event 295 * @fwevt: fwevt object which needs to be canceled 296 * 297 * Return: Nothing. 298 */ 299 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 300 { 301 /* 302 * Wait on the fwevt to complete. If this returns 1, then 303 * the event was never executed. 304 * 305 * If it did execute, we wait for it to finish, and the put will 306 * happen from mpi3mr_process_fwevt() 307 */ 308 if (cancel_work_sync(&fwevt->work)) { 309 /* 310 * Put fwevt reference count after 311 * dequeuing it from worker queue 312 */ 313 mpi3mr_fwevt_put(fwevt); 314 /* 315 * Put fwevt reference count to neutralize 316 * kref_init increment 317 */ 318 mpi3mr_fwevt_put(fwevt); 319 } 320 } 321 322 /** 323 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 324 * @mrioc: Adapter instance reference 325 * 326 * Flush all pending firmware events from the firmware event 327 * list. 328 * 329 * Return: Nothing. 330 */ 331 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 332 { 333 struct mpi3mr_fwevt *fwevt = NULL; 334 335 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 336 !mrioc->fwevt_worker_thread) 337 return; 338 339 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 340 mpi3mr_cancel_work(fwevt); 341 342 if (mrioc->current_event) { 343 fwevt = mrioc->current_event; 344 /* 345 * Don't call cancel_work_sync() API for the 346 * fwevt work if the controller reset is 347 * get called as part of processing the 348 * same fwevt work (or) when worker thread is 349 * waiting for device add/remove APIs to complete. 350 * Otherwise we will see deadlock. 351 */ 352 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 353 fwevt->discard = 1; 354 return; 355 } 356 357 mpi3mr_cancel_work(fwevt); 358 } 359 } 360 361 /** 362 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 363 * @mrioc: Adapter instance reference 364 * @tg: Throttle group information pointer 365 * 366 * Accessor to queue on synthetically generated driver event to 367 * the event worker thread, the driver event will be used to 368 * reduce the QD of all VDs in the TG from the worker thread. 369 * 370 * Return: None. 371 */ 372 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 373 struct mpi3mr_throttle_group_info *tg) 374 { 375 struct mpi3mr_fwevt *fwevt; 376 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 377 378 /* 379 * If the QD reduction event is already queued due to throttle and if 380 * the QD is not restored through device info change event 381 * then dont queue further reduction events 382 */ 383 if (tg->fw_qd != tg->modified_qd) 384 return; 385 386 fwevt = mpi3mr_alloc_fwevt(sz); 387 if (!fwevt) { 388 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 389 return; 390 } 391 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 392 fwevt->mrioc = mrioc; 393 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 394 fwevt->send_ack = 0; 395 fwevt->process_evt = 1; 396 fwevt->evt_ctx = 0; 397 fwevt->event_data_size = sz; 398 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 399 400 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 401 tg->id); 402 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 403 } 404 405 /** 406 * mpi3mr_invalidate_devhandles -Invalidate device handles 407 * @mrioc: Adapter instance reference 408 * 409 * Invalidate the device handles in the target device structures 410 * . Called post reset prior to reinitializing the controller. 411 * 412 * Return: Nothing. 413 */ 414 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 415 { 416 struct mpi3mr_tgt_dev *tgtdev; 417 struct mpi3mr_stgt_priv_data *tgt_priv; 418 419 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 420 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 421 if (tgtdev->starget && tgtdev->starget->hostdata) { 422 tgt_priv = tgtdev->starget->hostdata; 423 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 424 tgt_priv->io_throttle_enabled = 0; 425 tgt_priv->io_divert = 0; 426 tgt_priv->throttle_group = NULL; 427 if (tgtdev->host_exposed) 428 atomic_set(&tgt_priv->block_io, 1); 429 } 430 } 431 } 432 433 /** 434 * mpi3mr_print_scmd - print individual SCSI command 435 * @rq: Block request 436 * @data: Adapter instance reference 437 * 438 * Print the SCSI command details if it is in LLD scope. 439 * 440 * Return: true always. 441 */ 442 static bool mpi3mr_print_scmd(struct request *rq, void *data) 443 { 444 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 445 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 446 struct scmd_priv *priv = NULL; 447 448 if (scmd) { 449 priv = scsi_cmd_priv(scmd); 450 if (!priv->in_lld_scope) 451 goto out; 452 453 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 454 __func__, priv->host_tag, priv->req_q_idx + 1); 455 scsi_print_command(scmd); 456 } 457 458 out: 459 return(true); 460 } 461 462 /** 463 * mpi3mr_flush_scmd - Flush individual SCSI command 464 * @rq: Block request 465 * @data: Adapter instance reference 466 * 467 * Return the SCSI command to the upper layers if it is in LLD 468 * scope. 469 * 470 * Return: true always. 471 */ 472 473 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 474 { 475 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 476 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 477 struct scmd_priv *priv = NULL; 478 479 if (scmd) { 480 priv = scsi_cmd_priv(scmd); 481 if (!priv->in_lld_scope) 482 goto out; 483 484 if (priv->meta_sg_valid) 485 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 486 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 487 mpi3mr_clear_scmd_priv(mrioc, scmd); 488 scsi_dma_unmap(scmd); 489 scmd->result = DID_RESET << 16; 490 scsi_print_command(scmd); 491 scsi_done(scmd); 492 mrioc->flush_io_count++; 493 } 494 495 out: 496 return(true); 497 } 498 499 /** 500 * mpi3mr_count_dev_pending - Count commands pending for a lun 501 * @rq: Block request 502 * @data: SCSI device reference 503 * 504 * This is an iterator function called for each SCSI command in 505 * a host and if the command is pending in the LLD for the 506 * specific device(lun) then device specific pending I/O counter 507 * is updated in the device structure. 508 * 509 * Return: true always. 510 */ 511 512 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 513 { 514 struct scsi_device *sdev = (struct scsi_device *)data; 515 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 516 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 517 struct scmd_priv *priv; 518 519 if (scmd) { 520 priv = scsi_cmd_priv(scmd); 521 if (!priv->in_lld_scope) 522 goto out; 523 if (scmd->device == sdev) 524 sdev_priv_data->pend_count++; 525 } 526 527 out: 528 return true; 529 } 530 531 /** 532 * mpi3mr_count_tgt_pending - Count commands pending for target 533 * @rq: Block request 534 * @data: SCSI target reference 535 * 536 * This is an iterator function called for each SCSI command in 537 * a host and if the command is pending in the LLD for the 538 * specific target then target specific pending I/O counter is 539 * updated in the target structure. 540 * 541 * Return: true always. 542 */ 543 544 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 545 { 546 struct scsi_target *starget = (struct scsi_target *)data; 547 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 548 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 549 struct scmd_priv *priv; 550 551 if (scmd) { 552 priv = scsi_cmd_priv(scmd); 553 if (!priv->in_lld_scope) 554 goto out; 555 if (scmd->device && (scsi_target(scmd->device) == starget)) 556 stgt_priv_data->pend_count++; 557 } 558 559 out: 560 return true; 561 } 562 563 /** 564 * mpi3mr_flush_host_io - Flush host I/Os 565 * @mrioc: Adapter instance reference 566 * 567 * Flush all of the pending I/Os by calling 568 * blk_mq_tagset_busy_iter() for each possible tag. This is 569 * executed post controller reset 570 * 571 * Return: Nothing. 572 */ 573 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 574 { 575 struct Scsi_Host *shost = mrioc->shost; 576 577 mrioc->flush_io_count = 0; 578 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 579 blk_mq_tagset_busy_iter(&shost->tag_set, 580 mpi3mr_flush_scmd, (void *)mrioc); 581 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 582 mrioc->flush_io_count); 583 } 584 585 /** 586 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 587 * @mrioc: Adapter instance reference 588 * 589 * This function waits for currently running IO poll threads to 590 * exit and then flushes all host I/Os and any internal pending 591 * cmds. This is executed after controller is marked as 592 * unrecoverable. 593 * 594 * Return: Nothing. 595 */ 596 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 597 { 598 struct Scsi_Host *shost = mrioc->shost; 599 int i; 600 601 if (!mrioc->unrecoverable) 602 return; 603 604 if (mrioc->op_reply_qinfo) { 605 for (i = 0; i < mrioc->num_queues; i++) { 606 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 607 udelay(500); 608 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 609 } 610 } 611 mrioc->flush_io_count = 0; 612 blk_mq_tagset_busy_iter(&shost->tag_set, 613 mpi3mr_flush_scmd, (void *)mrioc); 614 mpi3mr_flush_delayed_cmd_lists(mrioc); 615 mpi3mr_flush_drv_cmds(mrioc); 616 } 617 618 /** 619 * mpi3mr_alloc_tgtdev - target device allocator 620 * 621 * Allocate target device instance and initialize the reference 622 * count 623 * 624 * Return: target device instance. 625 */ 626 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 627 { 628 struct mpi3mr_tgt_dev *tgtdev; 629 630 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 631 if (!tgtdev) 632 return NULL; 633 kref_init(&tgtdev->ref_count); 634 return tgtdev; 635 } 636 637 /** 638 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 639 * @mrioc: Adapter instance reference 640 * @tgtdev: Target device 641 * 642 * Add the target device to the target device list 643 * 644 * Return: Nothing. 645 */ 646 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 647 struct mpi3mr_tgt_dev *tgtdev) 648 { 649 unsigned long flags; 650 651 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 652 mpi3mr_tgtdev_get(tgtdev); 653 INIT_LIST_HEAD(&tgtdev->list); 654 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 655 tgtdev->state = MPI3MR_DEV_CREATED; 656 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 657 } 658 659 /** 660 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 661 * @mrioc: Adapter instance reference 662 * @tgtdev: Target device 663 * @must_delete: Must delete the target device from the list irrespective 664 * of the device state. 665 * 666 * Remove the target device from the target device list 667 * 668 * Return: Nothing. 669 */ 670 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 671 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 672 { 673 unsigned long flags; 674 675 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 676 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 677 if (!list_empty(&tgtdev->list)) { 678 list_del_init(&tgtdev->list); 679 tgtdev->state = MPI3MR_DEV_DELETED; 680 mpi3mr_tgtdev_put(tgtdev); 681 } 682 } 683 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 684 } 685 686 /** 687 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 688 * @mrioc: Adapter instance reference 689 * @handle: Device handle 690 * 691 * Accessor to retrieve target device from the device handle. 692 * Non Lock version 693 * 694 * Return: Target device reference. 695 */ 696 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 697 struct mpi3mr_ioc *mrioc, u16 handle) 698 { 699 struct mpi3mr_tgt_dev *tgtdev; 700 701 assert_spin_locked(&mrioc->tgtdev_lock); 702 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 703 if (tgtdev->dev_handle == handle) 704 goto found_tgtdev; 705 return NULL; 706 707 found_tgtdev: 708 mpi3mr_tgtdev_get(tgtdev); 709 return tgtdev; 710 } 711 712 /** 713 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 714 * @mrioc: Adapter instance reference 715 * @handle: Device handle 716 * 717 * Accessor to retrieve target device from the device handle. 718 * Lock version 719 * 720 * Return: Target device reference. 721 */ 722 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 723 struct mpi3mr_ioc *mrioc, u16 handle) 724 { 725 struct mpi3mr_tgt_dev *tgtdev; 726 unsigned long flags; 727 728 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 729 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 730 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 731 return tgtdev; 732 } 733 734 /** 735 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 736 * @mrioc: Adapter instance reference 737 * @persist_id: Persistent ID 738 * 739 * Accessor to retrieve target device from the Persistent ID. 740 * Non Lock version 741 * 742 * Return: Target device reference. 743 */ 744 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 745 struct mpi3mr_ioc *mrioc, u16 persist_id) 746 { 747 struct mpi3mr_tgt_dev *tgtdev; 748 749 assert_spin_locked(&mrioc->tgtdev_lock); 750 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 751 if (tgtdev->perst_id == persist_id) 752 goto found_tgtdev; 753 return NULL; 754 755 found_tgtdev: 756 mpi3mr_tgtdev_get(tgtdev); 757 return tgtdev; 758 } 759 760 /** 761 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 762 * @mrioc: Adapter instance reference 763 * @persist_id: Persistent ID 764 * 765 * Accessor to retrieve target device from the Persistent ID. 766 * Lock version 767 * 768 * Return: Target device reference. 769 */ 770 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 771 struct mpi3mr_ioc *mrioc, u16 persist_id) 772 { 773 struct mpi3mr_tgt_dev *tgtdev; 774 unsigned long flags; 775 776 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 777 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 778 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 779 return tgtdev; 780 } 781 782 /** 783 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 784 * @mrioc: Adapter instance reference 785 * @tgt_priv: Target private data 786 * 787 * Accessor to return target device from the target private 788 * data. Non Lock version 789 * 790 * Return: Target device reference. 791 */ 792 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 793 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 794 { 795 struct mpi3mr_tgt_dev *tgtdev; 796 797 assert_spin_locked(&mrioc->tgtdev_lock); 798 tgtdev = tgt_priv->tgt_dev; 799 if (tgtdev) 800 mpi3mr_tgtdev_get(tgtdev); 801 return tgtdev; 802 } 803 804 /** 805 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 806 * @mrioc: Adapter instance reference 807 * @tg: Throttle group information pointer 808 * @divert_value: 1 or 0 809 * 810 * Accessor to set io_divert flag for each device associated 811 * with the given throttle group with the given value. 812 * 813 * Return: None. 814 */ 815 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 816 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 817 { 818 unsigned long flags; 819 struct mpi3mr_tgt_dev *tgtdev; 820 struct mpi3mr_stgt_priv_data *tgt_priv; 821 822 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 823 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 824 if (tgtdev->starget && tgtdev->starget->hostdata) { 825 tgt_priv = tgtdev->starget->hostdata; 826 if (tgt_priv->throttle_group == tg) 827 tgt_priv->io_divert = divert_value; 828 } 829 } 830 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 831 } 832 833 /** 834 * mpi3mr_print_device_event_notice - print notice related to post processing of 835 * device event after controller reset. 836 * 837 * @mrioc: Adapter instance reference 838 * @device_add: true for device add event and false for device removal event 839 * 840 * Return: None. 841 */ 842 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 843 bool device_add) 844 { 845 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 846 (device_add ? "addition" : "removal")); 847 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 848 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 849 } 850 851 /** 852 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 853 * @mrioc: Adapter instance reference 854 * @tgtdev: Target device structure 855 * 856 * Checks whether the device is exposed to upper layers and if it 857 * is then remove the device from upper layers by calling 858 * scsi_remove_target(). 859 * 860 * Return: 0 on success, non zero on failure. 861 */ 862 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 863 struct mpi3mr_tgt_dev *tgtdev) 864 { 865 struct mpi3mr_stgt_priv_data *tgt_priv; 866 867 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 868 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 869 if (tgtdev->starget && tgtdev->starget->hostdata) { 870 tgt_priv = tgtdev->starget->hostdata; 871 atomic_set(&tgt_priv->block_io, 0); 872 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 873 } 874 875 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 876 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 877 if (tgtdev->starget) { 878 if (mrioc->current_event) 879 mrioc->current_event->pending_at_sml = 1; 880 scsi_remove_target(&tgtdev->starget->dev); 881 tgtdev->host_exposed = 0; 882 if (mrioc->current_event) { 883 mrioc->current_event->pending_at_sml = 0; 884 if (mrioc->current_event->discard) { 885 mpi3mr_print_device_event_notice(mrioc, 886 false); 887 return; 888 } 889 } 890 } 891 } else 892 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 893 894 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 895 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 896 } 897 898 /** 899 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 900 * @mrioc: Adapter instance reference 901 * @perst_id: Persistent ID of the device 902 * 903 * Checks whether the device can be exposed to upper layers and 904 * if it is not then expose the device to upper layers by 905 * calling scsi_scan_target(). 906 * 907 * Return: 0 on success, non zero on failure. 908 */ 909 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 910 u16 perst_id) 911 { 912 int retval = 0; 913 struct mpi3mr_tgt_dev *tgtdev; 914 915 if (mrioc->reset_in_progress) 916 return -1; 917 918 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 919 if (!tgtdev) { 920 retval = -1; 921 goto out; 922 } 923 if (tgtdev->is_hidden || tgtdev->host_exposed) { 924 retval = -1; 925 goto out; 926 } 927 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 928 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 929 tgtdev->host_exposed = 1; 930 if (mrioc->current_event) 931 mrioc->current_event->pending_at_sml = 1; 932 scsi_scan_target(&mrioc->shost->shost_gendev, 933 mrioc->scsi_device_channel, tgtdev->perst_id, 934 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 935 if (!tgtdev->starget) 936 tgtdev->host_exposed = 0; 937 if (mrioc->current_event) { 938 mrioc->current_event->pending_at_sml = 0; 939 if (mrioc->current_event->discard) { 940 mpi3mr_print_device_event_notice(mrioc, true); 941 goto out; 942 } 943 } 944 } else 945 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 946 out: 947 if (tgtdev) 948 mpi3mr_tgtdev_put(tgtdev); 949 950 return retval; 951 } 952 953 /** 954 * mpi3mr_change_queue_depth- Change QD callback handler 955 * @sdev: SCSI device reference 956 * @q_depth: Queue depth 957 * 958 * Validate and limit QD and call scsi_change_queue_depth. 959 * 960 * Return: return value of scsi_change_queue_depth 961 */ 962 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 963 int q_depth) 964 { 965 struct scsi_target *starget = scsi_target(sdev); 966 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 967 int retval = 0; 968 969 if (!sdev->tagged_supported) 970 q_depth = 1; 971 if (q_depth > shost->can_queue) 972 q_depth = shost->can_queue; 973 else if (!q_depth) 974 q_depth = MPI3MR_DEFAULT_SDEV_QD; 975 retval = scsi_change_queue_depth(sdev, q_depth); 976 sdev->max_queue_depth = sdev->queue_depth; 977 978 return retval; 979 } 980 981 /** 982 * mpi3mr_update_sdev - Update SCSI device information 983 * @sdev: SCSI device reference 984 * @data: target device reference 985 * 986 * This is an iterator function called for each SCSI device in a 987 * target to update the target specific information into each 988 * SCSI device. 989 * 990 * Return: Nothing. 991 */ 992 static void 993 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 994 { 995 struct mpi3mr_tgt_dev *tgtdev; 996 997 tgtdev = (struct mpi3mr_tgt_dev *)data; 998 if (!tgtdev) 999 return; 1000 1001 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1002 switch (tgtdev->dev_type) { 1003 case MPI3_DEVICE_DEVFORM_PCIE: 1004 /*The block layer hw sector size = 512*/ 1005 if ((tgtdev->dev_spec.pcie_inf.dev_info & 1006 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1007 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 1008 blk_queue_max_hw_sectors(sdev->request_queue, 1009 tgtdev->dev_spec.pcie_inf.mdts / 512); 1010 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) 1011 blk_queue_virt_boundary(sdev->request_queue, 1012 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 1013 else 1014 blk_queue_virt_boundary(sdev->request_queue, 1015 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); 1016 } 1017 break; 1018 default: 1019 break; 1020 } 1021 } 1022 1023 /** 1024 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure 1025 * @mrioc: Adapter instance reference 1026 * 1027 * This is executed post controller reset to identify any 1028 * missing devices during reset and remove from the upper layers 1029 * or expose any newly detected device to the upper layers. 1030 * 1031 * Return: Nothing. 1032 */ 1033 1034 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1035 { 1036 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1037 1038 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1039 list) { 1040 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1041 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1042 tgtdev->perst_id); 1043 if (tgtdev->host_exposed) 1044 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1045 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1046 mpi3mr_tgtdev_put(tgtdev); 1047 } 1048 } 1049 1050 tgtdev = NULL; 1051 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1052 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1053 !tgtdev->is_hidden && !tgtdev->host_exposed) 1054 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1055 } 1056 } 1057 1058 /** 1059 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1060 * @mrioc: Adapter instance reference 1061 * @tgtdev: Target device internal structure 1062 * @dev_pg0: New device page0 1063 * @is_added: Flag to indicate the device is just added 1064 * 1065 * Update the information from the device page0 into the driver 1066 * cached target device structure. 1067 * 1068 * Return: Nothing. 1069 */ 1070 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1071 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1072 bool is_added) 1073 { 1074 u16 flags = 0; 1075 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1076 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1077 u8 prot_mask = 0; 1078 1079 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1080 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1081 tgtdev->dev_type = dev_pg0->device_form; 1082 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1083 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1084 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1085 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1086 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1087 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1088 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1089 1090 if (tgtdev->encl_handle) 1091 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1092 tgtdev->encl_handle); 1093 if (enclosure_dev) 1094 tgtdev->enclosure_logical_id = le64_to_cpu( 1095 enclosure_dev->pg0.enclosure_logical_id); 1096 1097 flags = tgtdev->devpg0_flag; 1098 1099 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1100 1101 if (is_added == true) 1102 tgtdev->io_throttle_enabled = 1103 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1104 1105 1106 if (tgtdev->starget && tgtdev->starget->hostdata) { 1107 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1108 tgtdev->starget->hostdata; 1109 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1110 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1111 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1112 scsi_tgt_priv_data->io_throttle_enabled = 1113 tgtdev->io_throttle_enabled; 1114 if (is_added == true) 1115 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1116 } 1117 1118 switch (dev_pg0->access_status) { 1119 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1120 case MPI3_DEVICE0_ASTATUS_PREPARE: 1121 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1122 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1123 break; 1124 default: 1125 tgtdev->is_hidden = 1; 1126 break; 1127 } 1128 1129 switch (tgtdev->dev_type) { 1130 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1131 { 1132 struct mpi3_device0_sas_sata_format *sasinf = 1133 &dev_pg0->device_specific.sas_sata_format; 1134 u16 dev_info = le16_to_cpu(sasinf->device_info); 1135 1136 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1137 tgtdev->dev_spec.sas_sata_inf.sas_address = 1138 le64_to_cpu(sasinf->sas_address); 1139 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1140 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1141 sasinf->attached_phy_identifier; 1142 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1143 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1144 tgtdev->is_hidden = 1; 1145 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1146 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1147 tgtdev->is_hidden = 1; 1148 1149 if (((tgtdev->devpg0_flag & 1150 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1151 && (tgtdev->devpg0_flag & 1152 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1153 (tgtdev->parent_handle == 0xFFFF)) 1154 tgtdev->non_stl = 1; 1155 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1156 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1157 dev_pg0->io_unit_port; 1158 break; 1159 } 1160 case MPI3_DEVICE_DEVFORM_PCIE: 1161 { 1162 struct mpi3_device0_pcie_format *pcieinf = 1163 &dev_pg0->device_specific.pcie_format; 1164 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1165 1166 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1167 tgtdev->dev_spec.pcie_inf.capb = 1168 le32_to_cpu(pcieinf->capabilities); 1169 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1170 /* 2^12 = 4096 */ 1171 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1172 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1173 tgtdev->dev_spec.pcie_inf.mdts = 1174 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1175 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1176 tgtdev->dev_spec.pcie_inf.reset_to = 1177 max_t(u8, pcieinf->controller_reset_to, 1178 MPI3MR_INTADMCMD_TIMEOUT); 1179 tgtdev->dev_spec.pcie_inf.abort_to = 1180 max_t(u8, pcieinf->nvme_abort_to, 1181 MPI3MR_INTADMCMD_TIMEOUT); 1182 } 1183 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1184 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1185 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1186 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1187 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1188 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1189 tgtdev->is_hidden = 1; 1190 tgtdev->non_stl = 1; 1191 if (!mrioc->shost) 1192 break; 1193 prot_mask = scsi_host_get_prot(mrioc->shost); 1194 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1195 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1196 ioc_info(mrioc, 1197 "%s : Disabling DIX0 prot capability\n", __func__); 1198 ioc_info(mrioc, 1199 "because HBA does not support DIX0 operation on NVME drives\n"); 1200 } 1201 break; 1202 } 1203 case MPI3_DEVICE_DEVFORM_VD: 1204 { 1205 struct mpi3_device0_vd_format *vdinf = 1206 &dev_pg0->device_specific.vd_format; 1207 struct mpi3mr_throttle_group_info *tg = NULL; 1208 u16 vdinf_io_throttle_group = 1209 le16_to_cpu(vdinf->io_throttle_group); 1210 1211 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1212 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1213 tgtdev->is_hidden = 1; 1214 tgtdev->non_stl = 1; 1215 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1216 tgtdev->dev_spec.vd_inf.tg_high = 1217 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1218 tgtdev->dev_spec.vd_inf.tg_low = 1219 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1220 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1221 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1222 tg->id = vdinf_io_throttle_group; 1223 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1224 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1225 tg->qd_reduction = 1226 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1227 if (is_added == true) 1228 tg->fw_qd = tgtdev->q_depth; 1229 tg->modified_qd = tgtdev->q_depth; 1230 } 1231 tgtdev->dev_spec.vd_inf.tg = tg; 1232 if (scsi_tgt_priv_data) 1233 scsi_tgt_priv_data->throttle_group = tg; 1234 break; 1235 } 1236 default: 1237 break; 1238 } 1239 } 1240 1241 /** 1242 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1243 * @mrioc: Adapter instance reference 1244 * @fwevt: Firmware event information. 1245 * 1246 * Process Device status Change event and based on device's new 1247 * information, either expose the device to the upper layers, or 1248 * remove the device from upper layers. 1249 * 1250 * Return: Nothing. 1251 */ 1252 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1253 struct mpi3mr_fwevt *fwevt) 1254 { 1255 u16 dev_handle = 0; 1256 u8 uhide = 0, delete = 0, cleanup = 0; 1257 struct mpi3mr_tgt_dev *tgtdev = NULL; 1258 struct mpi3_event_data_device_status_change *evtdata = 1259 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1260 1261 dev_handle = le16_to_cpu(evtdata->dev_handle); 1262 ioc_info(mrioc, 1263 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1264 __func__, dev_handle, evtdata->reason_code); 1265 switch (evtdata->reason_code) { 1266 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1267 delete = 1; 1268 break; 1269 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1270 uhide = 1; 1271 break; 1272 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1273 delete = 1; 1274 cleanup = 1; 1275 break; 1276 default: 1277 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1278 evtdata->reason_code); 1279 break; 1280 } 1281 1282 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1283 if (!tgtdev) 1284 goto out; 1285 if (uhide) { 1286 tgtdev->is_hidden = 0; 1287 if (!tgtdev->host_exposed) 1288 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1289 } 1290 1291 if (delete) 1292 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1293 1294 if (cleanup) { 1295 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1296 mpi3mr_tgtdev_put(tgtdev); 1297 } 1298 1299 out: 1300 if (tgtdev) 1301 mpi3mr_tgtdev_put(tgtdev); 1302 } 1303 1304 /** 1305 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1306 * @mrioc: Adapter instance reference 1307 * @dev_pg0: New device page0 1308 * 1309 * Process Device Info Change event and based on device's new 1310 * information, either expose the device to the upper layers, or 1311 * remove the device from upper layers or update the details of 1312 * the device. 1313 * 1314 * Return: Nothing. 1315 */ 1316 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1317 struct mpi3_device_page0 *dev_pg0) 1318 { 1319 struct mpi3mr_tgt_dev *tgtdev = NULL; 1320 u16 dev_handle = 0, perst_id = 0; 1321 1322 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1323 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1324 ioc_info(mrioc, 1325 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1326 __func__, dev_handle, perst_id); 1327 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1328 if (!tgtdev) 1329 goto out; 1330 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1331 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1332 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1333 if (tgtdev->is_hidden && tgtdev->host_exposed) 1334 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1335 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1336 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1337 mpi3mr_update_sdev); 1338 out: 1339 if (tgtdev) 1340 mpi3mr_tgtdev_put(tgtdev); 1341 } 1342 1343 /** 1344 * mpi3mr_free_enclosure_list - release enclosures 1345 * @mrioc: Adapter instance reference 1346 * 1347 * Free memory allocated during encloure add. 1348 * 1349 * Return nothing. 1350 */ 1351 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1352 { 1353 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1354 1355 list_for_each_entry_safe(enclosure_dev, 1356 enclosure_dev_next, &mrioc->enclosure_list, list) { 1357 list_del(&enclosure_dev->list); 1358 kfree(enclosure_dev); 1359 } 1360 } 1361 1362 /** 1363 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1364 * @mrioc: Adapter instance reference 1365 * @handle: Firmware device handle of the enclosure 1366 * 1367 * This searches for enclosure device based on handle, then returns the 1368 * enclosure object. 1369 * 1370 * Return: Enclosure object reference or NULL 1371 */ 1372 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1373 struct mpi3mr_ioc *mrioc, u16 handle) 1374 { 1375 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1376 1377 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1378 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1379 continue; 1380 r = enclosure_dev; 1381 goto out; 1382 } 1383 out: 1384 return r; 1385 } 1386 1387 /** 1388 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1389 * @mrioc: Adapter instance reference 1390 * @encl_pg0: Enclosure page 0. 1391 * @is_added: Added event or not 1392 * 1393 * Return nothing. 1394 */ 1395 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1396 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1397 { 1398 char *reason_str = NULL; 1399 1400 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1401 return; 1402 1403 if (is_added) 1404 reason_str = "enclosure added"; 1405 else 1406 reason_str = "enclosure dev status changed"; 1407 1408 ioc_info(mrioc, 1409 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1410 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1411 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1412 ioc_info(mrioc, 1413 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1414 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1415 le16_to_cpu(encl_pg0->flags), 1416 ((le16_to_cpu(encl_pg0->flags) & 1417 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1418 } 1419 1420 /** 1421 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1422 * @mrioc: Adapter instance reference 1423 * @fwevt: Firmware event reference 1424 * 1425 * Prints information about the Enclosure device status or 1426 * Enclosure add events if logging is enabled and add or remove 1427 * the enclosure from the controller's internal list of 1428 * enclosures. 1429 * 1430 * Return: Nothing. 1431 */ 1432 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1433 struct mpi3mr_fwevt *fwevt) 1434 { 1435 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1436 struct mpi3_enclosure_page0 *encl_pg0; 1437 u16 encl_handle; 1438 u8 added, present; 1439 1440 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1441 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1442 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1443 1444 1445 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1446 present = ((le16_to_cpu(encl_pg0->flags) & 1447 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1448 1449 if (encl_handle) 1450 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1451 encl_handle); 1452 if (!enclosure_dev && present) { 1453 enclosure_dev = 1454 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1455 GFP_KERNEL); 1456 if (!enclosure_dev) 1457 return; 1458 list_add_tail(&enclosure_dev->list, 1459 &mrioc->enclosure_list); 1460 } 1461 if (enclosure_dev) { 1462 if (!present) { 1463 list_del(&enclosure_dev->list); 1464 kfree(enclosure_dev); 1465 } else 1466 memcpy(&enclosure_dev->pg0, encl_pg0, 1467 sizeof(enclosure_dev->pg0)); 1468 1469 } 1470 } 1471 1472 /** 1473 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1474 * @mrioc: Adapter instance reference 1475 * @event_data: SAS topology change list event data 1476 * 1477 * Prints information about the SAS topology change event. 1478 * 1479 * Return: Nothing. 1480 */ 1481 static void 1482 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1483 struct mpi3_event_data_sas_topology_change_list *event_data) 1484 { 1485 int i; 1486 u16 handle; 1487 u8 reason_code, phy_number; 1488 char *status_str = NULL; 1489 u8 link_rate, prev_link_rate; 1490 1491 switch (event_data->exp_status) { 1492 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1493 status_str = "remove"; 1494 break; 1495 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1496 status_str = "responding"; 1497 break; 1498 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1499 status_str = "remove delay"; 1500 break; 1501 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1502 status_str = "direct attached"; 1503 break; 1504 default: 1505 status_str = "unknown status"; 1506 break; 1507 } 1508 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1509 __func__, status_str); 1510 ioc_info(mrioc, 1511 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1512 __func__, le16_to_cpu(event_data->expander_dev_handle), 1513 event_data->io_unit_port, 1514 le16_to_cpu(event_data->enclosure_handle), 1515 event_data->start_phy_num, event_data->num_entries); 1516 for (i = 0; i < event_data->num_entries; i++) { 1517 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1518 if (!handle) 1519 continue; 1520 phy_number = event_data->start_phy_num + i; 1521 reason_code = event_data->phy_entry[i].status & 1522 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1523 switch (reason_code) { 1524 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1525 status_str = "target remove"; 1526 break; 1527 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1528 status_str = "delay target remove"; 1529 break; 1530 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1531 status_str = "link status change"; 1532 break; 1533 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1534 status_str = "link status no change"; 1535 break; 1536 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1537 status_str = "target responding"; 1538 break; 1539 default: 1540 status_str = "unknown"; 1541 break; 1542 } 1543 link_rate = event_data->phy_entry[i].link_rate >> 4; 1544 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1545 ioc_info(mrioc, 1546 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1547 __func__, phy_number, handle, status_str, link_rate, 1548 prev_link_rate); 1549 } 1550 } 1551 1552 /** 1553 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1554 * @mrioc: Adapter instance reference 1555 * @fwevt: Firmware event reference 1556 * 1557 * Prints information about the SAS topology change event and 1558 * for "not responding" event code, removes the device from the 1559 * upper layers. 1560 * 1561 * Return: Nothing. 1562 */ 1563 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1564 struct mpi3mr_fwevt *fwevt) 1565 { 1566 struct mpi3_event_data_sas_topology_change_list *event_data = 1567 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1568 int i; 1569 u16 handle; 1570 u8 reason_code; 1571 u64 exp_sas_address = 0, parent_sas_address = 0; 1572 struct mpi3mr_hba_port *hba_port = NULL; 1573 struct mpi3mr_tgt_dev *tgtdev = NULL; 1574 struct mpi3mr_sas_node *sas_expander = NULL; 1575 unsigned long flags; 1576 u8 link_rate, prev_link_rate, parent_phy_number; 1577 1578 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1579 if (mrioc->sas_transport_enabled) { 1580 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1581 event_data->io_unit_port); 1582 if (le16_to_cpu(event_data->expander_dev_handle)) { 1583 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1584 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1585 le16_to_cpu(event_data->expander_dev_handle)); 1586 if (sas_expander) { 1587 exp_sas_address = sas_expander->sas_address; 1588 hba_port = sas_expander->hba_port; 1589 } 1590 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1591 parent_sas_address = exp_sas_address; 1592 } else 1593 parent_sas_address = mrioc->sas_hba.sas_address; 1594 } 1595 1596 for (i = 0; i < event_data->num_entries; i++) { 1597 if (fwevt->discard) 1598 return; 1599 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1600 if (!handle) 1601 continue; 1602 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1603 if (!tgtdev) 1604 continue; 1605 1606 reason_code = event_data->phy_entry[i].status & 1607 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1608 1609 switch (reason_code) { 1610 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1611 if (tgtdev->host_exposed) 1612 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1613 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1614 mpi3mr_tgtdev_put(tgtdev); 1615 break; 1616 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1617 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1618 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1619 { 1620 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1621 || tgtdev->is_hidden) 1622 break; 1623 link_rate = event_data->phy_entry[i].link_rate >> 4; 1624 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1625 if (link_rate == prev_link_rate) 1626 break; 1627 if (!parent_sas_address) 1628 break; 1629 parent_phy_number = event_data->start_phy_num + i; 1630 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1631 parent_phy_number, link_rate, hba_port); 1632 break; 1633 } 1634 default: 1635 break; 1636 } 1637 if (tgtdev) 1638 mpi3mr_tgtdev_put(tgtdev); 1639 } 1640 1641 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1642 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1643 if (sas_expander) 1644 mpi3mr_expander_remove(mrioc, exp_sas_address, 1645 hba_port); 1646 } 1647 } 1648 1649 /** 1650 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1651 * @mrioc: Adapter instance reference 1652 * @event_data: PCIe topology change list event data 1653 * 1654 * Prints information about the PCIe topology change event. 1655 * 1656 * Return: Nothing. 1657 */ 1658 static void 1659 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1660 struct mpi3_event_data_pcie_topology_change_list *event_data) 1661 { 1662 int i; 1663 u16 handle; 1664 u16 reason_code; 1665 u8 port_number; 1666 char *status_str = NULL; 1667 u8 link_rate, prev_link_rate; 1668 1669 switch (event_data->switch_status) { 1670 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1671 status_str = "remove"; 1672 break; 1673 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1674 status_str = "responding"; 1675 break; 1676 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1677 status_str = "remove delay"; 1678 break; 1679 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1680 status_str = "direct attached"; 1681 break; 1682 default: 1683 status_str = "unknown status"; 1684 break; 1685 } 1686 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1687 __func__, status_str); 1688 ioc_info(mrioc, 1689 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1690 __func__, le16_to_cpu(event_data->switch_dev_handle), 1691 le16_to_cpu(event_data->enclosure_handle), 1692 event_data->start_port_num, event_data->num_entries); 1693 for (i = 0; i < event_data->num_entries; i++) { 1694 handle = 1695 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1696 if (!handle) 1697 continue; 1698 port_number = event_data->start_port_num + i; 1699 reason_code = event_data->port_entry[i].port_status; 1700 switch (reason_code) { 1701 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1702 status_str = "target remove"; 1703 break; 1704 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1705 status_str = "delay target remove"; 1706 break; 1707 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1708 status_str = "link status change"; 1709 break; 1710 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1711 status_str = "link status no change"; 1712 break; 1713 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1714 status_str = "target responding"; 1715 break; 1716 default: 1717 status_str = "unknown"; 1718 break; 1719 } 1720 link_rate = event_data->port_entry[i].current_port_info & 1721 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1722 prev_link_rate = event_data->port_entry[i].previous_port_info & 1723 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1724 ioc_info(mrioc, 1725 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1726 __func__, port_number, handle, status_str, link_rate, 1727 prev_link_rate); 1728 } 1729 } 1730 1731 /** 1732 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1733 * @mrioc: Adapter instance reference 1734 * @fwevt: Firmware event reference 1735 * 1736 * Prints information about the PCIe topology change event and 1737 * for "not responding" event code, removes the device from the 1738 * upper layers. 1739 * 1740 * Return: Nothing. 1741 */ 1742 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1743 struct mpi3mr_fwevt *fwevt) 1744 { 1745 struct mpi3_event_data_pcie_topology_change_list *event_data = 1746 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1747 int i; 1748 u16 handle; 1749 u8 reason_code; 1750 struct mpi3mr_tgt_dev *tgtdev = NULL; 1751 1752 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1753 1754 for (i = 0; i < event_data->num_entries; i++) { 1755 if (fwevt->discard) 1756 return; 1757 handle = 1758 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1759 if (!handle) 1760 continue; 1761 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1762 if (!tgtdev) 1763 continue; 1764 1765 reason_code = event_data->port_entry[i].port_status; 1766 1767 switch (reason_code) { 1768 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1769 if (tgtdev->host_exposed) 1770 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1771 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1772 mpi3mr_tgtdev_put(tgtdev); 1773 break; 1774 default: 1775 break; 1776 } 1777 if (tgtdev) 1778 mpi3mr_tgtdev_put(tgtdev); 1779 } 1780 } 1781 1782 /** 1783 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1784 * @mrioc: Adapter instance reference 1785 * @fwevt: Firmware event reference 1786 * 1787 * Extracts the event data and calls application interfacing 1788 * function to process the event further. 1789 * 1790 * Return: Nothing. 1791 */ 1792 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1793 struct mpi3mr_fwevt *fwevt) 1794 { 1795 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1796 fwevt->event_data_size); 1797 } 1798 1799 /** 1800 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1801 * @sdev: SCSI device reference 1802 * @data: Queue depth reference 1803 * 1804 * This is an iterator function called for each SCSI device in a 1805 * target to update the QD of each SCSI device. 1806 * 1807 * Return: Nothing. 1808 */ 1809 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1810 { 1811 u16 *q_depth = (u16 *)data; 1812 1813 scsi_change_queue_depth(sdev, (int)*q_depth); 1814 sdev->max_queue_depth = sdev->queue_depth; 1815 } 1816 1817 /** 1818 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1819 * @mrioc: Adapter instance reference 1820 * @tg: Throttle group information pointer 1821 * 1822 * Accessor to reduce QD for each device associated with the 1823 * given throttle group. 1824 * 1825 * Return: None. 1826 */ 1827 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1828 struct mpi3mr_throttle_group_info *tg) 1829 { 1830 unsigned long flags; 1831 struct mpi3mr_tgt_dev *tgtdev; 1832 struct mpi3mr_stgt_priv_data *tgt_priv; 1833 1834 1835 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1836 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1837 if (tgtdev->starget && tgtdev->starget->hostdata) { 1838 tgt_priv = tgtdev->starget->hostdata; 1839 if (tgt_priv->throttle_group == tg) { 1840 dprint_event_bh(mrioc, 1841 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 1842 tgt_priv->perst_id, tgtdev->q_depth, 1843 tg->modified_qd); 1844 starget_for_each_device(tgtdev->starget, 1845 (void *)&tg->modified_qd, 1846 mpi3mr_update_sdev_qd); 1847 } 1848 } 1849 } 1850 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1851 } 1852 1853 /** 1854 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1855 * @mrioc: Adapter instance reference 1856 * @fwevt: Firmware event reference 1857 * 1858 * Identifies the firmware event and calls corresponding bottomg 1859 * half handler and sends event acknowledgment if required. 1860 * 1861 * Return: Nothing. 1862 */ 1863 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 1864 struct mpi3mr_fwevt *fwevt) 1865 { 1866 struct mpi3_device_page0 *dev_pg0 = NULL; 1867 u16 perst_id, handle, dev_info; 1868 struct mpi3_device0_sas_sata_format *sasinf = NULL; 1869 1870 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 1871 mrioc->current_event = fwevt; 1872 1873 if (mrioc->stop_drv_processing) 1874 goto out; 1875 1876 if (mrioc->unrecoverable) { 1877 dprint_event_bh(mrioc, 1878 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 1879 fwevt->event_id); 1880 goto out; 1881 } 1882 1883 if (!fwevt->process_evt) 1884 goto evt_ack; 1885 1886 switch (fwevt->event_id) { 1887 case MPI3_EVENT_DEVICE_ADDED: 1888 { 1889 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1890 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1891 handle = le16_to_cpu(dev_pg0->dev_handle); 1892 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1893 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1894 else if (mrioc->sas_transport_enabled && 1895 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 1896 sasinf = &dev_pg0->device_specific.sas_sata_format; 1897 dev_info = le16_to_cpu(sasinf->device_info); 1898 if (!mrioc->sas_hba.num_phys) 1899 mpi3mr_sas_host_add(mrioc); 1900 else 1901 mpi3mr_sas_host_refresh(mrioc); 1902 1903 if (mpi3mr_is_expander_device(dev_info)) 1904 mpi3mr_expander_add(mrioc, handle); 1905 } 1906 break; 1907 } 1908 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1909 { 1910 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1911 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1912 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1913 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 1914 break; 1915 } 1916 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1917 { 1918 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 1919 break; 1920 } 1921 case MPI3_EVENT_ENCL_DEVICE_ADDED: 1922 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 1923 { 1924 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 1925 break; 1926 } 1927 1928 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1929 { 1930 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 1931 break; 1932 } 1933 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1934 { 1935 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 1936 break; 1937 } 1938 case MPI3_EVENT_LOG_DATA: 1939 { 1940 mpi3mr_logdata_evt_bh(mrioc, fwevt); 1941 break; 1942 } 1943 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 1944 { 1945 struct mpi3mr_throttle_group_info *tg; 1946 1947 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 1948 dprint_event_bh(mrioc, 1949 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 1950 tg->id, tg->need_qd_reduction); 1951 if (tg->need_qd_reduction) { 1952 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 1953 tg->need_qd_reduction = 0; 1954 } 1955 break; 1956 } 1957 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 1958 { 1959 while (mrioc->device_refresh_on) 1960 msleep(500); 1961 1962 dprint_event_bh(mrioc, 1963 "scan for non responding and newly added devices after soft reset started\n"); 1964 if (mrioc->sas_transport_enabled) { 1965 mpi3mr_refresh_sas_ports(mrioc); 1966 mpi3mr_refresh_expanders(mrioc); 1967 } 1968 mpi3mr_rfresh_tgtdevs(mrioc); 1969 ioc_info(mrioc, 1970 "scan for non responding and newly added devices after soft reset completed\n"); 1971 break; 1972 } 1973 default: 1974 break; 1975 } 1976 1977 evt_ack: 1978 if (fwevt->send_ack) 1979 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 1980 fwevt->evt_ctx); 1981 out: 1982 /* Put fwevt reference count to neutralize kref_init increment */ 1983 mpi3mr_fwevt_put(fwevt); 1984 mrioc->current_event = NULL; 1985 } 1986 1987 /** 1988 * mpi3mr_fwevt_worker - Firmware event worker 1989 * @work: Work struct containing firmware event 1990 * 1991 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 1992 * 1993 * Return: Nothing. 1994 */ 1995 static void mpi3mr_fwevt_worker(struct work_struct *work) 1996 { 1997 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 1998 work); 1999 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2000 /* 2001 * Put fwevt reference count after 2002 * dequeuing it from worker queue 2003 */ 2004 mpi3mr_fwevt_put(fwevt); 2005 } 2006 2007 /** 2008 * mpi3mr_create_tgtdev - Create and add a target device 2009 * @mrioc: Adapter instance reference 2010 * @dev_pg0: Device Page 0 data 2011 * 2012 * If the device specified by the device page 0 data is not 2013 * present in the driver's internal list, allocate the memory 2014 * for the device, populate the data and add to the list, else 2015 * update the device data. The key is persistent ID. 2016 * 2017 * Return: 0 on success, -ENOMEM on memory allocation failure 2018 */ 2019 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2020 struct mpi3_device_page0 *dev_pg0) 2021 { 2022 int retval = 0; 2023 struct mpi3mr_tgt_dev *tgtdev = NULL; 2024 u16 perst_id = 0; 2025 unsigned long flags; 2026 2027 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2028 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2029 return retval; 2030 2031 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2032 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2033 if (tgtdev) 2034 tgtdev->state = MPI3MR_DEV_CREATED; 2035 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2036 2037 if (tgtdev) { 2038 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2039 mpi3mr_tgtdev_put(tgtdev); 2040 } else { 2041 tgtdev = mpi3mr_alloc_tgtdev(); 2042 if (!tgtdev) 2043 return -ENOMEM; 2044 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2045 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2046 } 2047 2048 return retval; 2049 } 2050 2051 /** 2052 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2053 * @mrioc: Adapter instance reference 2054 * 2055 * Flush pending commands in the delayed lists due to a 2056 * controller reset or driver removal as a cleanup. 2057 * 2058 * Return: Nothing 2059 */ 2060 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2061 { 2062 struct delayed_dev_rmhs_node *_rmhs_node; 2063 struct delayed_evt_ack_node *_evtack_node; 2064 2065 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2066 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2067 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2068 struct delayed_dev_rmhs_node, list); 2069 list_del(&_rmhs_node->list); 2070 kfree(_rmhs_node); 2071 } 2072 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2073 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2074 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2075 struct delayed_evt_ack_node, list); 2076 list_del(&_evtack_node->list); 2077 kfree(_evtack_node); 2078 } 2079 } 2080 2081 /** 2082 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2083 * @mrioc: Adapter instance reference 2084 * @drv_cmd: Internal command tracker 2085 * 2086 * Issues a target reset TM to the firmware from the device 2087 * removal TM pend list or retry the removal handshake sequence 2088 * based on the IOU control request IOC status. 2089 * 2090 * Return: Nothing 2091 */ 2092 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2093 struct mpi3mr_drv_cmd *drv_cmd) 2094 { 2095 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2096 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2097 2098 if (drv_cmd->state & MPI3MR_CMD_RESET) 2099 goto clear_drv_cmd; 2100 2101 ioc_info(mrioc, 2102 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2103 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2104 drv_cmd->ioc_loginfo); 2105 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2106 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2107 drv_cmd->retry_count++; 2108 ioc_info(mrioc, 2109 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2110 __func__, drv_cmd->dev_handle, 2111 drv_cmd->retry_count); 2112 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2113 drv_cmd, drv_cmd->iou_rc); 2114 return; 2115 } 2116 ioc_err(mrioc, 2117 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2118 __func__, drv_cmd->dev_handle); 2119 } else { 2120 ioc_info(mrioc, 2121 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2122 __func__, drv_cmd->dev_handle); 2123 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2124 } 2125 2126 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2127 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2128 struct delayed_dev_rmhs_node, list); 2129 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2130 drv_cmd->retry_count = 0; 2131 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2132 ioc_info(mrioc, 2133 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2134 __func__, drv_cmd->dev_handle); 2135 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2136 drv_cmd->iou_rc); 2137 list_del(&delayed_dev_rmhs->list); 2138 kfree(delayed_dev_rmhs); 2139 return; 2140 } 2141 2142 clear_drv_cmd: 2143 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2144 drv_cmd->callback = NULL; 2145 drv_cmd->retry_count = 0; 2146 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2147 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2148 } 2149 2150 /** 2151 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2152 * @mrioc: Adapter instance reference 2153 * @drv_cmd: Internal command tracker 2154 * 2155 * Issues a target reset TM to the firmware from the device 2156 * removal TM pend list or issue IO unit control request as 2157 * part of device removal or hidden acknowledgment handshake. 2158 * 2159 * Return: Nothing 2160 */ 2161 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2162 struct mpi3mr_drv_cmd *drv_cmd) 2163 { 2164 struct mpi3_iounit_control_request iou_ctrl; 2165 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2166 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2167 int retval; 2168 2169 if (drv_cmd->state & MPI3MR_CMD_RESET) 2170 goto clear_drv_cmd; 2171 2172 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2173 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2174 2175 if (tm_reply) 2176 pr_info(IOCNAME 2177 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2178 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2179 drv_cmd->ioc_loginfo, 2180 le32_to_cpu(tm_reply->termination_count)); 2181 2182 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2183 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2184 2185 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2186 2187 drv_cmd->state = MPI3MR_CMD_PENDING; 2188 drv_cmd->is_waiting = 0; 2189 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2190 iou_ctrl.operation = drv_cmd->iou_rc; 2191 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2192 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2193 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2194 2195 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2196 1); 2197 if (retval) { 2198 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2199 mrioc->name); 2200 goto clear_drv_cmd; 2201 } 2202 2203 return; 2204 clear_drv_cmd: 2205 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2206 drv_cmd->callback = NULL; 2207 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2208 drv_cmd->retry_count = 0; 2209 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2210 } 2211 2212 /** 2213 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2214 * @mrioc: Adapter instance reference 2215 * @handle: Device handle 2216 * @cmdparam: Internal command tracker 2217 * @iou_rc: IO unit reason code 2218 * 2219 * Issues a target reset TM to the firmware or add it to a pend 2220 * list as part of device removal or hidden acknowledgment 2221 * handshake. 2222 * 2223 * Return: Nothing 2224 */ 2225 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2226 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2227 { 2228 struct mpi3_scsi_task_mgmt_request tm_req; 2229 int retval = 0; 2230 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2231 u8 retrycount = 5; 2232 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2233 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2234 struct mpi3mr_tgt_dev *tgtdev = NULL; 2235 unsigned long flags; 2236 2237 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2238 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2239 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2240 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2241 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2242 2243 if (drv_cmd) 2244 goto issue_cmd; 2245 do { 2246 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2247 MPI3MR_NUM_DEVRMCMD); 2248 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2249 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2250 break; 2251 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2252 } 2253 } while (retrycount--); 2254 2255 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2256 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2257 GFP_ATOMIC); 2258 if (!delayed_dev_rmhs) 2259 return; 2260 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2261 delayed_dev_rmhs->handle = handle; 2262 delayed_dev_rmhs->iou_rc = iou_rc; 2263 list_add_tail(&delayed_dev_rmhs->list, 2264 &mrioc->delayed_rmhs_list); 2265 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2266 __func__, handle); 2267 return; 2268 } 2269 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2270 2271 issue_cmd: 2272 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2273 ioc_info(mrioc, 2274 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2275 __func__, handle, cmd_idx); 2276 2277 memset(&tm_req, 0, sizeof(tm_req)); 2278 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2279 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2280 goto out; 2281 } 2282 drv_cmd->state = MPI3MR_CMD_PENDING; 2283 drv_cmd->is_waiting = 0; 2284 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2285 drv_cmd->dev_handle = handle; 2286 drv_cmd->iou_rc = iou_rc; 2287 tm_req.dev_handle = cpu_to_le16(handle); 2288 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2289 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2290 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2291 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2292 2293 set_bit(handle, mrioc->removepend_bitmap); 2294 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2295 if (retval) { 2296 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2297 __func__); 2298 goto out_failed; 2299 } 2300 out: 2301 return; 2302 out_failed: 2303 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2304 drv_cmd->callback = NULL; 2305 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2306 drv_cmd->retry_count = 0; 2307 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2308 } 2309 2310 /** 2311 * mpi3mr_complete_evt_ack - event ack request completion 2312 * @mrioc: Adapter instance reference 2313 * @drv_cmd: Internal command tracker 2314 * 2315 * This is the completion handler for non blocking event 2316 * acknowledgment sent to the firmware and this will issue any 2317 * pending event acknowledgment request. 2318 * 2319 * Return: Nothing 2320 */ 2321 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2322 struct mpi3mr_drv_cmd *drv_cmd) 2323 { 2324 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2325 struct delayed_evt_ack_node *delayed_evtack = NULL; 2326 2327 if (drv_cmd->state & MPI3MR_CMD_RESET) 2328 goto clear_drv_cmd; 2329 2330 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2331 dprint_event_th(mrioc, 2332 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2333 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2334 drv_cmd->ioc_loginfo); 2335 } 2336 2337 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2338 delayed_evtack = 2339 list_entry(mrioc->delayed_evtack_cmds_list.next, 2340 struct delayed_evt_ack_node, list); 2341 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2342 delayed_evtack->event_ctx); 2343 list_del(&delayed_evtack->list); 2344 kfree(delayed_evtack); 2345 return; 2346 } 2347 clear_drv_cmd: 2348 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2349 drv_cmd->callback = NULL; 2350 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2351 } 2352 2353 /** 2354 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2355 * @mrioc: Adapter instance reference 2356 * @event: MPI3 event id 2357 * @cmdparam: Internal command tracker 2358 * @event_ctx: event context 2359 * 2360 * Issues event acknowledgment request to the firmware if there 2361 * is a free command to send the event ack else it to a pend 2362 * list so that it will be processed on a completion of a prior 2363 * event acknowledgment . 2364 * 2365 * Return: Nothing 2366 */ 2367 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2368 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2369 { 2370 struct mpi3_event_ack_request evtack_req; 2371 int retval = 0; 2372 u8 retrycount = 5; 2373 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2374 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2375 struct delayed_evt_ack_node *delayed_evtack = NULL; 2376 2377 if (drv_cmd) { 2378 dprint_event_th(mrioc, 2379 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2380 event, event_ctx); 2381 goto issue_cmd; 2382 } 2383 dprint_event_th(mrioc, 2384 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2385 event, event_ctx); 2386 do { 2387 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2388 MPI3MR_NUM_EVTACKCMD); 2389 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2390 if (!test_and_set_bit(cmd_idx, 2391 mrioc->evtack_cmds_bitmap)) 2392 break; 2393 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2394 } 2395 } while (retrycount--); 2396 2397 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2398 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2399 GFP_ATOMIC); 2400 if (!delayed_evtack) 2401 return; 2402 INIT_LIST_HEAD(&delayed_evtack->list); 2403 delayed_evtack->event = event; 2404 delayed_evtack->event_ctx = event_ctx; 2405 list_add_tail(&delayed_evtack->list, 2406 &mrioc->delayed_evtack_cmds_list); 2407 dprint_event_th(mrioc, 2408 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2409 event, event_ctx); 2410 return; 2411 } 2412 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2413 2414 issue_cmd: 2415 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2416 2417 memset(&evtack_req, 0, sizeof(evtack_req)); 2418 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2419 dprint_event_th(mrioc, 2420 "sending event ack failed due to command in use\n"); 2421 goto out; 2422 } 2423 drv_cmd->state = MPI3MR_CMD_PENDING; 2424 drv_cmd->is_waiting = 0; 2425 drv_cmd->callback = mpi3mr_complete_evt_ack; 2426 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2427 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2428 evtack_req.event = event; 2429 evtack_req.event_context = cpu_to_le32(event_ctx); 2430 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2431 sizeof(evtack_req), 1); 2432 if (retval) { 2433 dprint_event_th(mrioc, 2434 "posting event ack request is failed\n"); 2435 goto out_failed; 2436 } 2437 2438 dprint_event_th(mrioc, 2439 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2440 event, event_ctx); 2441 out: 2442 return; 2443 out_failed: 2444 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2445 drv_cmd->callback = NULL; 2446 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2447 } 2448 2449 /** 2450 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2451 * @mrioc: Adapter instance reference 2452 * @event_reply: event data 2453 * 2454 * Checks for the reason code and based on that either block I/O 2455 * to device, or unblock I/O to the device, or start the device 2456 * removal handshake with reason as remove with the firmware for 2457 * PCIe devices. 2458 * 2459 * Return: Nothing 2460 */ 2461 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2462 struct mpi3_event_notification_reply *event_reply) 2463 { 2464 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2465 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2466 int i; 2467 u16 handle; 2468 u8 reason_code; 2469 struct mpi3mr_tgt_dev *tgtdev = NULL; 2470 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2471 2472 for (i = 0; i < topo_evt->num_entries; i++) { 2473 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2474 if (!handle) 2475 continue; 2476 reason_code = topo_evt->port_entry[i].port_status; 2477 scsi_tgt_priv_data = NULL; 2478 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2479 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2480 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2481 tgtdev->starget->hostdata; 2482 switch (reason_code) { 2483 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2484 if (scsi_tgt_priv_data) { 2485 scsi_tgt_priv_data->dev_removed = 1; 2486 scsi_tgt_priv_data->dev_removedelay = 0; 2487 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2488 } 2489 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2490 MPI3_CTRL_OP_REMOVE_DEVICE); 2491 break; 2492 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2493 if (scsi_tgt_priv_data) { 2494 scsi_tgt_priv_data->dev_removedelay = 1; 2495 atomic_inc(&scsi_tgt_priv_data->block_io); 2496 } 2497 break; 2498 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2499 if (scsi_tgt_priv_data && 2500 scsi_tgt_priv_data->dev_removedelay) { 2501 scsi_tgt_priv_data->dev_removedelay = 0; 2502 atomic_dec_if_positive 2503 (&scsi_tgt_priv_data->block_io); 2504 } 2505 break; 2506 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2507 default: 2508 break; 2509 } 2510 if (tgtdev) 2511 mpi3mr_tgtdev_put(tgtdev); 2512 } 2513 } 2514 2515 /** 2516 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2517 * @mrioc: Adapter instance reference 2518 * @event_reply: event data 2519 * 2520 * Checks for the reason code and based on that either block I/O 2521 * to device, or unblock I/O to the device, or start the device 2522 * removal handshake with reason as remove with the firmware for 2523 * SAS/SATA devices. 2524 * 2525 * Return: Nothing 2526 */ 2527 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2528 struct mpi3_event_notification_reply *event_reply) 2529 { 2530 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2531 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2532 int i; 2533 u16 handle; 2534 u8 reason_code; 2535 struct mpi3mr_tgt_dev *tgtdev = NULL; 2536 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2537 2538 for (i = 0; i < topo_evt->num_entries; i++) { 2539 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2540 if (!handle) 2541 continue; 2542 reason_code = topo_evt->phy_entry[i].status & 2543 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2544 scsi_tgt_priv_data = NULL; 2545 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2546 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2547 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2548 tgtdev->starget->hostdata; 2549 switch (reason_code) { 2550 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2551 if (scsi_tgt_priv_data) { 2552 scsi_tgt_priv_data->dev_removed = 1; 2553 scsi_tgt_priv_data->dev_removedelay = 0; 2554 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2555 } 2556 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2557 MPI3_CTRL_OP_REMOVE_DEVICE); 2558 break; 2559 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2560 if (scsi_tgt_priv_data) { 2561 scsi_tgt_priv_data->dev_removedelay = 1; 2562 atomic_inc(&scsi_tgt_priv_data->block_io); 2563 } 2564 break; 2565 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2566 if (scsi_tgt_priv_data && 2567 scsi_tgt_priv_data->dev_removedelay) { 2568 scsi_tgt_priv_data->dev_removedelay = 0; 2569 atomic_dec_if_positive 2570 (&scsi_tgt_priv_data->block_io); 2571 } 2572 break; 2573 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2574 default: 2575 break; 2576 } 2577 if (tgtdev) 2578 mpi3mr_tgtdev_put(tgtdev); 2579 } 2580 } 2581 2582 /** 2583 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2584 * @mrioc: Adapter instance reference 2585 * @event_reply: event data 2586 * 2587 * Checks for the reason code and based on that either block I/O 2588 * to device, or unblock I/O to the device, or start the device 2589 * removal handshake with reason as remove/hide acknowledgment 2590 * with the firmware. 2591 * 2592 * Return: Nothing 2593 */ 2594 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2595 struct mpi3_event_notification_reply *event_reply) 2596 { 2597 u16 dev_handle = 0; 2598 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2599 struct mpi3mr_tgt_dev *tgtdev = NULL; 2600 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2601 struct mpi3_event_data_device_status_change *evtdata = 2602 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2603 2604 if (mrioc->stop_drv_processing) 2605 goto out; 2606 2607 dev_handle = le16_to_cpu(evtdata->dev_handle); 2608 2609 switch (evtdata->reason_code) { 2610 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2611 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2612 block = 1; 2613 break; 2614 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2615 delete = 1; 2616 hide = 1; 2617 break; 2618 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2619 delete = 1; 2620 remove = 1; 2621 break; 2622 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2623 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2624 ublock = 1; 2625 break; 2626 default: 2627 break; 2628 } 2629 2630 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2631 if (!tgtdev) 2632 goto out; 2633 if (hide) 2634 tgtdev->is_hidden = hide; 2635 if (tgtdev->starget && tgtdev->starget->hostdata) { 2636 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2637 tgtdev->starget->hostdata; 2638 if (block) 2639 atomic_inc(&scsi_tgt_priv_data->block_io); 2640 if (delete) 2641 scsi_tgt_priv_data->dev_removed = 1; 2642 if (ublock) 2643 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2644 } 2645 if (remove) 2646 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2647 MPI3_CTRL_OP_REMOVE_DEVICE); 2648 if (hide) 2649 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2650 MPI3_CTRL_OP_HIDDEN_ACK); 2651 2652 out: 2653 if (tgtdev) 2654 mpi3mr_tgtdev_put(tgtdev); 2655 } 2656 2657 /** 2658 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2659 * @mrioc: Adapter instance reference 2660 * @event_reply: event data 2661 * 2662 * Blocks and unblocks host level I/O based on the reason code 2663 * 2664 * Return: Nothing 2665 */ 2666 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2667 struct mpi3_event_notification_reply *event_reply) 2668 { 2669 struct mpi3_event_data_prepare_for_reset *evtdata = 2670 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2671 2672 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2673 dprint_event_th(mrioc, 2674 "prepare for reset event top half with rc=start\n"); 2675 if (mrioc->prepare_for_reset) 2676 return; 2677 mrioc->prepare_for_reset = 1; 2678 mrioc->prepare_for_reset_timeout_counter = 0; 2679 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2680 dprint_event_th(mrioc, 2681 "prepare for reset top half with rc=abort\n"); 2682 mrioc->prepare_for_reset = 0; 2683 mrioc->prepare_for_reset_timeout_counter = 0; 2684 } 2685 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2686 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2687 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2688 le32_to_cpu(event_reply->event_context)); 2689 } 2690 2691 /** 2692 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2693 * @mrioc: Adapter instance reference 2694 * @event_reply: event data 2695 * 2696 * Identifies the new shutdown timeout value and update. 2697 * 2698 * Return: Nothing 2699 */ 2700 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2701 struct mpi3_event_notification_reply *event_reply) 2702 { 2703 struct mpi3_event_data_energy_pack_change *evtdata = 2704 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2705 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2706 2707 if (shutdown_timeout <= 0) { 2708 ioc_warn(mrioc, 2709 "%s :Invalid Shutdown Timeout received = %d\n", 2710 __func__, shutdown_timeout); 2711 return; 2712 } 2713 2714 ioc_info(mrioc, 2715 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2716 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2717 mrioc->facts.shutdown_timeout = shutdown_timeout; 2718 } 2719 2720 /** 2721 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2722 * @mrioc: Adapter instance reference 2723 * @event_reply: event data 2724 * 2725 * Displays Cable manegemt event details. 2726 * 2727 * Return: Nothing 2728 */ 2729 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2730 struct mpi3_event_notification_reply *event_reply) 2731 { 2732 struct mpi3_event_data_cable_management *evtdata = 2733 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2734 2735 switch (evtdata->status) { 2736 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2737 { 2738 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2739 "Devices connected to this cable are not detected.\n" 2740 "This cable requires %d mW of power.\n", 2741 evtdata->receptacle_id, 2742 le32_to_cpu(evtdata->active_cable_power_requirement)); 2743 break; 2744 } 2745 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2746 { 2747 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2748 evtdata->receptacle_id); 2749 break; 2750 } 2751 default: 2752 break; 2753 } 2754 } 2755 2756 /** 2757 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2758 * @mrioc: Adapter instance reference 2759 * 2760 * Add driver specific event to make sure that the driver won't process the 2761 * events until all the devices are refreshed during soft reset. 2762 * 2763 * Return: Nothing 2764 */ 2765 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2766 { 2767 struct mpi3mr_fwevt *fwevt = NULL; 2768 2769 fwevt = mpi3mr_alloc_fwevt(0); 2770 if (!fwevt) { 2771 dprint_event_th(mrioc, 2772 "failed to schedule bottom half handler for event(0x%02x)\n", 2773 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2774 return; 2775 } 2776 fwevt->mrioc = mrioc; 2777 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2778 fwevt->send_ack = 0; 2779 fwevt->process_evt = 1; 2780 fwevt->evt_ctx = 0; 2781 fwevt->event_data_size = 0; 2782 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2783 } 2784 2785 /** 2786 * mpi3mr_os_handle_events - Firmware event handler 2787 * @mrioc: Adapter instance reference 2788 * @event_reply: event data 2789 * 2790 * Identify whteher the event has to handled and acknowledged 2791 * and either process the event in the tophalf and/or schedule a 2792 * bottom half through mpi3mr_fwevt_worker. 2793 * 2794 * Return: Nothing 2795 */ 2796 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2797 struct mpi3_event_notification_reply *event_reply) 2798 { 2799 u16 evt_type, sz; 2800 struct mpi3mr_fwevt *fwevt = NULL; 2801 bool ack_req = 0, process_evt_bh = 0; 2802 2803 if (mrioc->stop_drv_processing) 2804 return; 2805 2806 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2807 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2808 ack_req = 1; 2809 2810 evt_type = event_reply->event; 2811 2812 switch (evt_type) { 2813 case MPI3_EVENT_DEVICE_ADDED: 2814 { 2815 struct mpi3_device_page0 *dev_pg0 = 2816 (struct mpi3_device_page0 *)event_reply->event_data; 2817 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 2818 ioc_err(mrioc, 2819 "%s :Failed to add device in the device add event\n", 2820 __func__); 2821 else 2822 process_evt_bh = 1; 2823 break; 2824 } 2825 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2826 { 2827 process_evt_bh = 1; 2828 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 2829 break; 2830 } 2831 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2832 { 2833 process_evt_bh = 1; 2834 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 2835 break; 2836 } 2837 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2838 { 2839 process_evt_bh = 1; 2840 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 2841 break; 2842 } 2843 case MPI3_EVENT_PREPARE_FOR_RESET: 2844 { 2845 mpi3mr_preparereset_evt_th(mrioc, event_reply); 2846 ack_req = 0; 2847 break; 2848 } 2849 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2850 case MPI3_EVENT_LOG_DATA: 2851 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2852 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2853 { 2854 process_evt_bh = 1; 2855 break; 2856 } 2857 case MPI3_EVENT_ENERGY_PACK_CHANGE: 2858 { 2859 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 2860 break; 2861 } 2862 case MPI3_EVENT_CABLE_MGMT: 2863 { 2864 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 2865 break; 2866 } 2867 case MPI3_EVENT_SAS_DISCOVERY: 2868 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 2869 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 2870 case MPI3_EVENT_PCIE_ENUMERATION: 2871 break; 2872 default: 2873 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 2874 __func__, evt_type); 2875 break; 2876 } 2877 if (process_evt_bh || ack_req) { 2878 sz = event_reply->event_data_length * 4; 2879 fwevt = mpi3mr_alloc_fwevt(sz); 2880 if (!fwevt) { 2881 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 2882 __func__, __FILE__, __LINE__, __func__); 2883 return; 2884 } 2885 2886 memcpy(fwevt->event_data, event_reply->event_data, sz); 2887 fwevt->mrioc = mrioc; 2888 fwevt->event_id = evt_type; 2889 fwevt->send_ack = ack_req; 2890 fwevt->process_evt = process_evt_bh; 2891 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 2892 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2893 } 2894 } 2895 2896 /** 2897 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 2898 * @mrioc: Adapter instance reference 2899 * @scmd: SCSI command reference 2900 * @scsiio_req: MPI3 SCSI IO request 2901 * 2902 * Identifies the protection information flags from the SCSI 2903 * command and set appropriate flags in the MPI3 SCSI IO 2904 * request. 2905 * 2906 * Return: Nothing 2907 */ 2908 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 2909 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2910 { 2911 u16 eedp_flags = 0; 2912 unsigned char prot_op = scsi_get_prot_op(scmd); 2913 2914 switch (prot_op) { 2915 case SCSI_PROT_NORMAL: 2916 return; 2917 case SCSI_PROT_READ_STRIP: 2918 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2919 break; 2920 case SCSI_PROT_WRITE_INSERT: 2921 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2922 break; 2923 case SCSI_PROT_READ_INSERT: 2924 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2925 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2926 break; 2927 case SCSI_PROT_WRITE_STRIP: 2928 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2929 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2930 break; 2931 case SCSI_PROT_READ_PASS: 2932 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2933 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2934 break; 2935 case SCSI_PROT_WRITE_PASS: 2936 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 2937 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 2938 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 2939 0xffff; 2940 } else 2941 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2942 2943 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2944 break; 2945 default: 2946 return; 2947 } 2948 2949 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 2950 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 2951 2952 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 2953 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 2954 2955 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 2956 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 2957 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2958 scsiio_req->cdb.eedp32.primary_reference_tag = 2959 cpu_to_be32(scsi_prot_ref_tag(scmd)); 2960 } 2961 2962 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 2963 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2964 2965 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 2966 2967 switch (scsi_prot_interval(scmd)) { 2968 case 512: 2969 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 2970 break; 2971 case 520: 2972 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 2973 break; 2974 case 4080: 2975 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 2976 break; 2977 case 4088: 2978 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 2979 break; 2980 case 4096: 2981 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 2982 break; 2983 case 4104: 2984 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 2985 break; 2986 case 4160: 2987 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 2988 break; 2989 default: 2990 break; 2991 } 2992 2993 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 2994 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 2995 } 2996 2997 /** 2998 * mpi3mr_build_sense_buffer - Map sense information 2999 * @desc: Sense type 3000 * @buf: Sense buffer to populate 3001 * @key: Sense key 3002 * @asc: Additional sense code 3003 * @ascq: Additional sense code qualifier 3004 * 3005 * Maps the given sense information into either descriptor or 3006 * fixed format sense data. 3007 * 3008 * Return: Nothing 3009 */ 3010 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3011 u8 asc, u8 ascq) 3012 { 3013 if (desc) { 3014 buf[0] = 0x72; /* descriptor, current */ 3015 buf[1] = key; 3016 buf[2] = asc; 3017 buf[3] = ascq; 3018 buf[7] = 0; 3019 } else { 3020 buf[0] = 0x70; /* fixed, current */ 3021 buf[2] = key; 3022 buf[7] = 0xa; 3023 buf[12] = asc; 3024 buf[13] = ascq; 3025 } 3026 } 3027 3028 /** 3029 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3030 * @scmd: SCSI command reference 3031 * @ioc_status: status of MPI3 request 3032 * 3033 * Maps the EEDP error status of the SCSI IO request to sense 3034 * data. 3035 * 3036 * Return: Nothing 3037 */ 3038 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3039 u16 ioc_status) 3040 { 3041 u8 ascq = 0; 3042 3043 switch (ioc_status) { 3044 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3045 ascq = 0x01; 3046 break; 3047 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3048 ascq = 0x02; 3049 break; 3050 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3051 ascq = 0x03; 3052 break; 3053 default: 3054 ascq = 0x00; 3055 break; 3056 } 3057 3058 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3059 0x10, ascq); 3060 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3061 } 3062 3063 /** 3064 * mpi3mr_process_op_reply_desc - reply descriptor handler 3065 * @mrioc: Adapter instance reference 3066 * @reply_desc: Operational reply descriptor 3067 * @reply_dma: place holder for reply DMA address 3068 * @qidx: Operational queue index 3069 * 3070 * Process the operational reply descriptor and identifies the 3071 * descriptor type. Based on the descriptor map the MPI3 request 3072 * status to a SCSI command status and calls scsi_done call 3073 * back. 3074 * 3075 * Return: Nothing 3076 */ 3077 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3078 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3079 { 3080 u16 reply_desc_type, host_tag = 0; 3081 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3082 u32 ioc_loginfo = 0; 3083 struct mpi3_status_reply_descriptor *status_desc = NULL; 3084 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3085 struct mpi3_success_reply_descriptor *success_desc = NULL; 3086 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3087 struct scsi_cmnd *scmd = NULL; 3088 struct scmd_priv *priv = NULL; 3089 u8 *sense_buf = NULL; 3090 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3091 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3092 u16 dev_handle = 0xFFFF; 3093 struct scsi_sense_hdr sshdr; 3094 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3095 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3096 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3097 struct mpi3mr_throttle_group_info *tg = NULL; 3098 u8 throttle_enabled_dev = 0; 3099 3100 *reply_dma = 0; 3101 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3102 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3103 switch (reply_desc_type) { 3104 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3105 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3106 host_tag = le16_to_cpu(status_desc->host_tag); 3107 ioc_status = le16_to_cpu(status_desc->ioc_status); 3108 if (ioc_status & 3109 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3110 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3111 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3112 break; 3113 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3114 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3115 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3116 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3117 *reply_dma); 3118 if (!scsi_reply) { 3119 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3120 mrioc->name); 3121 goto out; 3122 } 3123 host_tag = le16_to_cpu(scsi_reply->host_tag); 3124 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3125 scsi_status = scsi_reply->scsi_status; 3126 scsi_state = scsi_reply->scsi_state; 3127 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3128 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3129 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3130 sense_count = le32_to_cpu(scsi_reply->sense_count); 3131 resp_data = le32_to_cpu(scsi_reply->response_data); 3132 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3133 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3134 if (ioc_status & 3135 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3136 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3137 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3138 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3139 panic("%s: Ran out of sense buffers\n", mrioc->name); 3140 break; 3141 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3142 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3143 host_tag = le16_to_cpu(success_desc->host_tag); 3144 break; 3145 default: 3146 break; 3147 } 3148 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3149 if (!scmd) { 3150 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3151 mrioc->name, host_tag); 3152 goto out; 3153 } 3154 priv = scsi_cmd_priv(scmd); 3155 3156 data_len_blks = scsi_bufflen(scmd) >> 9; 3157 sdev_priv_data = scmd->device->hostdata; 3158 if (sdev_priv_data) { 3159 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3160 if (stgt_priv_data) { 3161 tg = stgt_priv_data->throttle_group; 3162 throttle_enabled_dev = 3163 stgt_priv_data->io_throttle_enabled; 3164 } 3165 } 3166 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3167 throttle_enabled_dev)) { 3168 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3169 &mrioc->pend_large_data_sz); 3170 if (tg) { 3171 tg_pend_data_len = atomic_sub_return(data_len_blks, 3172 &tg->pend_large_data_sz); 3173 if (tg->io_divert && ((ioc_pend_data_len <= 3174 mrioc->io_throttle_low) && 3175 (tg_pend_data_len <= tg->low))) { 3176 tg->io_divert = 0; 3177 mpi3mr_set_io_divert_for_all_vd_in_tg( 3178 mrioc, tg, 0); 3179 } 3180 } else { 3181 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3182 stgt_priv_data->io_divert = 0; 3183 } 3184 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3185 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3186 if (!tg) { 3187 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3188 stgt_priv_data->io_divert = 0; 3189 3190 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3191 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3192 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3193 tg->io_divert = 0; 3194 mpi3mr_set_io_divert_for_all_vd_in_tg( 3195 mrioc, tg, 0); 3196 } 3197 } 3198 } 3199 3200 if (success_desc) { 3201 scmd->result = DID_OK << 16; 3202 goto out_success; 3203 } 3204 3205 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3206 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3207 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3208 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3209 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3210 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3211 3212 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3213 sense_buf) { 3214 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3215 3216 memcpy(scmd->sense_buffer, sense_buf, sz); 3217 } 3218 3219 switch (ioc_status) { 3220 case MPI3_IOCSTATUS_BUSY: 3221 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3222 scmd->result = SAM_STAT_BUSY; 3223 break; 3224 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3225 scmd->result = DID_NO_CONNECT << 16; 3226 break; 3227 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3228 scmd->result = DID_SOFT_ERROR << 16; 3229 break; 3230 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3231 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3232 scmd->result = DID_RESET << 16; 3233 break; 3234 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3235 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3236 scmd->result = DID_SOFT_ERROR << 16; 3237 else 3238 scmd->result = (DID_OK << 16) | scsi_status; 3239 break; 3240 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3241 scmd->result = (DID_OK << 16) | scsi_status; 3242 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3243 break; 3244 if (xfer_count < scmd->underflow) { 3245 if (scsi_status == SAM_STAT_BUSY) 3246 scmd->result = SAM_STAT_BUSY; 3247 else 3248 scmd->result = DID_SOFT_ERROR << 16; 3249 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3250 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3251 scmd->result = DID_SOFT_ERROR << 16; 3252 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3253 scmd->result = DID_RESET << 16; 3254 break; 3255 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3256 scsi_set_resid(scmd, 0); 3257 fallthrough; 3258 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3259 case MPI3_IOCSTATUS_SUCCESS: 3260 scmd->result = (DID_OK << 16) | scsi_status; 3261 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3262 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3263 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3264 scmd->result = DID_SOFT_ERROR << 16; 3265 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3266 scmd->result = DID_RESET << 16; 3267 break; 3268 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3269 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3270 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3271 mpi3mr_map_eedp_error(scmd, ioc_status); 3272 break; 3273 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3274 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3275 case MPI3_IOCSTATUS_INVALID_SGL: 3276 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3277 case MPI3_IOCSTATUS_INVALID_FIELD: 3278 case MPI3_IOCSTATUS_INVALID_STATE: 3279 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3280 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3281 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3282 default: 3283 scmd->result = DID_SOFT_ERROR << 16; 3284 break; 3285 } 3286 3287 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3288 (scmd->cmnd[0] != ATA_16) && 3289 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3290 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3291 scmd->result); 3292 scsi_print_command(scmd); 3293 ioc_info(mrioc, 3294 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3295 __func__, dev_handle, ioc_status, ioc_loginfo, 3296 priv->req_q_idx + 1); 3297 ioc_info(mrioc, 3298 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3299 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3300 if (sense_buf) { 3301 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3302 ioc_info(mrioc, 3303 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3304 __func__, sense_count, sshdr.sense_key, 3305 sshdr.asc, sshdr.ascq); 3306 } 3307 } 3308 out_success: 3309 if (priv->meta_sg_valid) { 3310 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3311 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3312 } 3313 mpi3mr_clear_scmd_priv(mrioc, scmd); 3314 scsi_dma_unmap(scmd); 3315 scsi_done(scmd); 3316 out: 3317 if (sense_buf) 3318 mpi3mr_repost_sense_buf(mrioc, 3319 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3320 } 3321 3322 /** 3323 * mpi3mr_get_chain_idx - get free chain buffer index 3324 * @mrioc: Adapter instance reference 3325 * 3326 * Try to get a free chain buffer index from the free pool. 3327 * 3328 * Return: -1 on failure or the free chain buffer index 3329 */ 3330 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3331 { 3332 u8 retry_count = 5; 3333 int cmd_idx = -1; 3334 unsigned long flags; 3335 3336 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3337 do { 3338 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3339 mrioc->chain_buf_count); 3340 if (cmd_idx < mrioc->chain_buf_count) { 3341 set_bit(cmd_idx, mrioc->chain_bitmap); 3342 break; 3343 } 3344 cmd_idx = -1; 3345 } while (retry_count--); 3346 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3347 return cmd_idx; 3348 } 3349 3350 /** 3351 * mpi3mr_prepare_sg_scmd - build scatter gather list 3352 * @mrioc: Adapter instance reference 3353 * @scmd: SCSI command reference 3354 * @scsiio_req: MPI3 SCSI IO request 3355 * 3356 * This function maps SCSI command's data and protection SGEs to 3357 * MPI request SGEs. If required additional 4K chain buffer is 3358 * used to send the SGEs. 3359 * 3360 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3361 */ 3362 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3363 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3364 { 3365 dma_addr_t chain_dma; 3366 struct scatterlist *sg_scmd; 3367 void *sg_local, *chain; 3368 u32 chain_length; 3369 int sges_left, chain_idx; 3370 u32 sges_in_segment; 3371 u8 simple_sgl_flags; 3372 u8 simple_sgl_flags_last; 3373 u8 last_chain_sgl_flags; 3374 struct chain_element *chain_req; 3375 struct scmd_priv *priv = NULL; 3376 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3377 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3378 3379 priv = scsi_cmd_priv(scmd); 3380 3381 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3382 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3383 simple_sgl_flags_last = simple_sgl_flags | 3384 MPI3_SGE_FLAGS_END_OF_LIST; 3385 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3386 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3387 3388 if (meta_sg) 3389 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3390 else 3391 sg_local = &scsiio_req->sgl; 3392 3393 if (!scsiio_req->data_length && !meta_sg) { 3394 mpi3mr_build_zero_len_sge(sg_local); 3395 return 0; 3396 } 3397 3398 if (meta_sg) { 3399 sg_scmd = scsi_prot_sglist(scmd); 3400 sges_left = dma_map_sg(&mrioc->pdev->dev, 3401 scsi_prot_sglist(scmd), 3402 scsi_prot_sg_count(scmd), 3403 scmd->sc_data_direction); 3404 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3405 } else { 3406 sg_scmd = scsi_sglist(scmd); 3407 sges_left = scsi_dma_map(scmd); 3408 } 3409 3410 if (sges_left < 0) { 3411 sdev_printk(KERN_ERR, scmd->device, 3412 "scsi_dma_map failed: request for %d bytes!\n", 3413 scsi_bufflen(scmd)); 3414 return -ENOMEM; 3415 } 3416 if (sges_left > MPI3MR_SG_DEPTH) { 3417 sdev_printk(KERN_ERR, scmd->device, 3418 "scsi_dma_map returned unsupported sge count %d!\n", 3419 sges_left); 3420 return -ENOMEM; 3421 } 3422 3423 sges_in_segment = (mrioc->facts.op_req_sz - 3424 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3425 3426 if (scsiio_req->sgl[0].eedp.flags == 3427 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3428 sg_local += sizeof(struct mpi3_sge_common); 3429 sges_in_segment--; 3430 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3431 } 3432 3433 if (scsiio_req->msg_flags == 3434 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3435 sges_in_segment--; 3436 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3437 } 3438 3439 if (meta_sg) 3440 sges_in_segment = 1; 3441 3442 if (sges_left <= sges_in_segment) 3443 goto fill_in_last_segment; 3444 3445 /* fill in main message segment when there is a chain following */ 3446 while (sges_in_segment > 1) { 3447 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3448 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3449 sg_scmd = sg_next(sg_scmd); 3450 sg_local += sizeof(struct mpi3_sge_common); 3451 sges_left--; 3452 sges_in_segment--; 3453 } 3454 3455 chain_idx = mpi3mr_get_chain_idx(mrioc); 3456 if (chain_idx < 0) 3457 return -1; 3458 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3459 if (meta_sg) 3460 priv->meta_chain_idx = chain_idx; 3461 else 3462 priv->chain_idx = chain_idx; 3463 3464 chain = chain_req->addr; 3465 chain_dma = chain_req->dma_addr; 3466 sges_in_segment = sges_left; 3467 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3468 3469 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3470 chain_length, chain_dma); 3471 3472 sg_local = chain; 3473 3474 fill_in_last_segment: 3475 while (sges_left > 0) { 3476 if (sges_left == 1) 3477 mpi3mr_add_sg_single(sg_local, 3478 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3479 sg_dma_address(sg_scmd)); 3480 else 3481 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3482 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3483 sg_scmd = sg_next(sg_scmd); 3484 sg_local += sizeof(struct mpi3_sge_common); 3485 sges_left--; 3486 } 3487 3488 return 0; 3489 } 3490 3491 /** 3492 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3493 * @mrioc: Adapter instance reference 3494 * @scmd: SCSI command reference 3495 * @scsiio_req: MPI3 SCSI IO request 3496 * 3497 * This function calls mpi3mr_prepare_sg_scmd for constructing 3498 * both data SGEs and protection information SGEs in the MPI 3499 * format from the SCSI Command as appropriate . 3500 * 3501 * Return: return value of mpi3mr_prepare_sg_scmd. 3502 */ 3503 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3504 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3505 { 3506 int ret; 3507 3508 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3509 if (ret) 3510 return ret; 3511 3512 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3513 /* There is a valid meta sg */ 3514 scsiio_req->flags |= 3515 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3516 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3517 } 3518 3519 return ret; 3520 } 3521 3522 /** 3523 * mpi3mr_tm_response_name - get TM response as a string 3524 * @resp_code: TM response code 3525 * 3526 * Convert known task management response code as a readable 3527 * string. 3528 * 3529 * Return: response code string. 3530 */ 3531 static const char *mpi3mr_tm_response_name(u8 resp_code) 3532 { 3533 char *desc; 3534 3535 switch (resp_code) { 3536 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3537 desc = "task management request completed"; 3538 break; 3539 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3540 desc = "invalid frame"; 3541 break; 3542 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3543 desc = "task management request not supported"; 3544 break; 3545 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3546 desc = "task management request failed"; 3547 break; 3548 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3549 desc = "task management request succeeded"; 3550 break; 3551 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3552 desc = "invalid LUN"; 3553 break; 3554 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3555 desc = "overlapped tag attempted"; 3556 break; 3557 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3558 desc = "task queued, however not sent to target"; 3559 break; 3560 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3561 desc = "task management request denied by NVMe device"; 3562 break; 3563 default: 3564 desc = "unknown"; 3565 break; 3566 } 3567 3568 return desc; 3569 } 3570 3571 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3572 { 3573 int i; 3574 int num_of_reply_queues = 3575 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3576 3577 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3578 mpi3mr_process_op_reply_q(mrioc, 3579 mrioc->intr_info[i].op_reply_q); 3580 } 3581 3582 /** 3583 * mpi3mr_issue_tm - Issue Task Management request 3584 * @mrioc: Adapter instance reference 3585 * @tm_type: Task Management type 3586 * @handle: Device handle 3587 * @lun: lun ID 3588 * @htag: Host tag of the TM request 3589 * @timeout: TM timeout value 3590 * @drv_cmd: Internal command tracker 3591 * @resp_code: Response code place holder 3592 * @scmd: SCSI command 3593 * 3594 * Issues a Task Management Request to the controller for a 3595 * specified target, lun and command and wait for its completion 3596 * and check TM response. Recover the TM if it timed out by 3597 * issuing controller reset. 3598 * 3599 * Return: 0 on success, non-zero on errors 3600 */ 3601 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3602 u16 handle, uint lun, u16 htag, ulong timeout, 3603 struct mpi3mr_drv_cmd *drv_cmd, 3604 u8 *resp_code, struct scsi_cmnd *scmd) 3605 { 3606 struct mpi3_scsi_task_mgmt_request tm_req; 3607 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3608 int retval = 0; 3609 struct mpi3mr_tgt_dev *tgtdev = NULL; 3610 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3611 struct scmd_priv *cmd_priv = NULL; 3612 struct scsi_device *sdev = NULL; 3613 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3614 3615 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3616 __func__, tm_type, handle); 3617 if (mrioc->unrecoverable) { 3618 retval = -1; 3619 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3620 __func__); 3621 goto out; 3622 } 3623 3624 memset(&tm_req, 0, sizeof(tm_req)); 3625 mutex_lock(&drv_cmd->mutex); 3626 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3627 retval = -1; 3628 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3629 mutex_unlock(&drv_cmd->mutex); 3630 goto out; 3631 } 3632 if (mrioc->reset_in_progress) { 3633 retval = -1; 3634 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3635 mutex_unlock(&drv_cmd->mutex); 3636 goto out; 3637 } 3638 3639 drv_cmd->state = MPI3MR_CMD_PENDING; 3640 drv_cmd->is_waiting = 1; 3641 drv_cmd->callback = NULL; 3642 tm_req.dev_handle = cpu_to_le16(handle); 3643 tm_req.task_type = tm_type; 3644 tm_req.host_tag = cpu_to_le16(htag); 3645 3646 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3647 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3648 3649 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3650 3651 if (scmd) { 3652 sdev = scmd->device; 3653 sdev_priv_data = sdev->hostdata; 3654 scsi_tgt_priv_data = ((sdev_priv_data) ? 3655 sdev_priv_data->tgt_priv_data : NULL); 3656 } else { 3657 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3658 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3659 tgtdev->starget->hostdata; 3660 } 3661 3662 if (scsi_tgt_priv_data) 3663 atomic_inc(&scsi_tgt_priv_data->block_io); 3664 3665 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3666 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3667 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3668 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3669 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3670 } 3671 3672 init_completion(&drv_cmd->done); 3673 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3674 if (retval) { 3675 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3676 goto out_unlock; 3677 } 3678 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3679 3680 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3681 drv_cmd->is_waiting = 0; 3682 retval = -1; 3683 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3684 dprint_tm(mrioc, 3685 "task management request timed out after %ld seconds\n", 3686 timeout); 3687 if (mrioc->logging_level & MPI3_DEBUG_TM) 3688 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3689 mpi3mr_soft_reset_handler(mrioc, 3690 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3691 } 3692 goto out_unlock; 3693 } 3694 3695 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3696 dprint_tm(mrioc, "invalid task management reply message\n"); 3697 retval = -1; 3698 goto out_unlock; 3699 } 3700 3701 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3702 3703 switch (drv_cmd->ioc_status) { 3704 case MPI3_IOCSTATUS_SUCCESS: 3705 *resp_code = le32_to_cpu(tm_reply->response_data) & 3706 MPI3MR_RI_MASK_RESPCODE; 3707 break; 3708 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3709 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3710 break; 3711 default: 3712 dprint_tm(mrioc, 3713 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3714 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3715 retval = -1; 3716 goto out_unlock; 3717 } 3718 3719 switch (*resp_code) { 3720 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3721 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3722 break; 3723 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3724 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3725 retval = -1; 3726 break; 3727 default: 3728 retval = -1; 3729 break; 3730 } 3731 3732 dprint_tm(mrioc, 3733 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3734 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3735 le32_to_cpu(tm_reply->termination_count), 3736 mpi3mr_tm_response_name(*resp_code), *resp_code); 3737 3738 if (!retval) { 3739 mpi3mr_ioc_disable_intr(mrioc); 3740 mpi3mr_poll_pend_io_completions(mrioc); 3741 mpi3mr_ioc_enable_intr(mrioc); 3742 mpi3mr_poll_pend_io_completions(mrioc); 3743 mpi3mr_process_admin_reply_q(mrioc); 3744 } 3745 switch (tm_type) { 3746 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3747 if (!scsi_tgt_priv_data) 3748 break; 3749 scsi_tgt_priv_data->pend_count = 0; 3750 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3751 mpi3mr_count_tgt_pending, 3752 (void *)scsi_tgt_priv_data->starget); 3753 break; 3754 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3755 if (!sdev_priv_data) 3756 break; 3757 sdev_priv_data->pend_count = 0; 3758 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3759 mpi3mr_count_dev_pending, (void *)sdev); 3760 break; 3761 default: 3762 break; 3763 } 3764 3765 out_unlock: 3766 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3767 mutex_unlock(&drv_cmd->mutex); 3768 if (scsi_tgt_priv_data) 3769 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 3770 if (tgtdev) 3771 mpi3mr_tgtdev_put(tgtdev); 3772 out: 3773 return retval; 3774 } 3775 3776 /** 3777 * mpi3mr_bios_param - BIOS param callback 3778 * @sdev: SCSI device reference 3779 * @bdev: Block device reference 3780 * @capacity: Capacity in logical sectors 3781 * @params: Parameter array 3782 * 3783 * Just the parameters with heads/secots/cylinders. 3784 * 3785 * Return: 0 always 3786 */ 3787 static int mpi3mr_bios_param(struct scsi_device *sdev, 3788 struct block_device *bdev, sector_t capacity, int params[]) 3789 { 3790 int heads; 3791 int sectors; 3792 sector_t cylinders; 3793 ulong dummy; 3794 3795 heads = 64; 3796 sectors = 32; 3797 3798 dummy = heads * sectors; 3799 cylinders = capacity; 3800 sector_div(cylinders, dummy); 3801 3802 if ((ulong)capacity >= 0x200000) { 3803 heads = 255; 3804 sectors = 63; 3805 dummy = heads * sectors; 3806 cylinders = capacity; 3807 sector_div(cylinders, dummy); 3808 } 3809 3810 params[0] = heads; 3811 params[1] = sectors; 3812 params[2] = cylinders; 3813 return 0; 3814 } 3815 3816 /** 3817 * mpi3mr_map_queues - Map queues callback handler 3818 * @shost: SCSI host reference 3819 * 3820 * Maps default and poll queues. 3821 * 3822 * Return: return zero. 3823 */ 3824 static void mpi3mr_map_queues(struct Scsi_Host *shost) 3825 { 3826 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3827 int i, qoff, offset; 3828 struct blk_mq_queue_map *map = NULL; 3829 3830 offset = mrioc->op_reply_q_offset; 3831 3832 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 3833 map = &shost->tag_set.map[i]; 3834 3835 map->nr_queues = 0; 3836 3837 if (i == HCTX_TYPE_DEFAULT) 3838 map->nr_queues = mrioc->default_qcount; 3839 else if (i == HCTX_TYPE_POLL) 3840 map->nr_queues = mrioc->active_poll_qcount; 3841 3842 if (!map->nr_queues) { 3843 BUG_ON(i == HCTX_TYPE_DEFAULT); 3844 continue; 3845 } 3846 3847 /* 3848 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3849 * affinity), so use the regular blk-mq cpu mapping 3850 */ 3851 map->queue_offset = qoff; 3852 if (i != HCTX_TYPE_POLL) 3853 blk_mq_pci_map_queues(map, mrioc->pdev, offset); 3854 else 3855 blk_mq_map_queues(map); 3856 3857 qoff += map->nr_queues; 3858 offset += map->nr_queues; 3859 } 3860 } 3861 3862 /** 3863 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 3864 * @mrioc: Adapter instance reference 3865 * 3866 * Calculate the pending I/Os for the controller and return. 3867 * 3868 * Return: Number of pending I/Os 3869 */ 3870 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 3871 { 3872 u16 i; 3873 uint pend_ios = 0; 3874 3875 for (i = 0; i < mrioc->num_op_reply_q; i++) 3876 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 3877 return pend_ios; 3878 } 3879 3880 /** 3881 * mpi3mr_print_pending_host_io - print pending I/Os 3882 * @mrioc: Adapter instance reference 3883 * 3884 * Print number of pending I/Os and each I/O details prior to 3885 * reset for debug purpose. 3886 * 3887 * Return: Nothing 3888 */ 3889 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 3890 { 3891 struct Scsi_Host *shost = mrioc->shost; 3892 3893 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 3894 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 3895 blk_mq_tagset_busy_iter(&shost->tag_set, 3896 mpi3mr_print_scmd, (void *)mrioc); 3897 } 3898 3899 /** 3900 * mpi3mr_wait_for_host_io - block for I/Os to complete 3901 * @mrioc: Adapter instance reference 3902 * @timeout: time out in seconds 3903 * Waits for pending I/Os for the given adapter to complete or 3904 * to hit the timeout. 3905 * 3906 * Return: Nothing 3907 */ 3908 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 3909 { 3910 enum mpi3mr_iocstate iocstate; 3911 int i = 0; 3912 3913 iocstate = mpi3mr_get_iocstate(mrioc); 3914 if (iocstate != MRIOC_STATE_READY) 3915 return; 3916 3917 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3918 return; 3919 ioc_info(mrioc, 3920 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 3921 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 3922 3923 for (i = 0; i < timeout; i++) { 3924 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3925 break; 3926 iocstate = mpi3mr_get_iocstate(mrioc); 3927 if (iocstate != MRIOC_STATE_READY) 3928 break; 3929 msleep(1000); 3930 } 3931 3932 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 3933 mpi3mr_get_fw_pending_ios(mrioc)); 3934 } 3935 3936 /** 3937 * mpi3mr_eh_host_reset - Host reset error handling callback 3938 * @scmd: SCSI command reference 3939 * 3940 * Issue controller reset if the scmd is for a Physical Device, 3941 * if the scmd is for RAID volume, then wait for 3942 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any 3943 * pending I/Os prior to issuing reset to the controller. 3944 * 3945 * Return: SUCCESS of successful reset else FAILED 3946 */ 3947 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 3948 { 3949 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 3950 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3951 struct mpi3mr_sdev_priv_data *sdev_priv_data; 3952 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 3953 int retval = FAILED, ret; 3954 3955 sdev_priv_data = scmd->device->hostdata; 3956 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 3957 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3958 dev_type = stgt_priv_data->dev_type; 3959 } 3960 3961 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 3962 mpi3mr_wait_for_host_io(mrioc, 3963 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 3964 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 3965 retval = SUCCESS; 3966 goto out; 3967 } 3968 } 3969 3970 mpi3mr_print_pending_host_io(mrioc); 3971 ret = mpi3mr_soft_reset_handler(mrioc, 3972 MPI3MR_RESET_FROM_EH_HOS, 1); 3973 if (ret) 3974 goto out; 3975 3976 retval = SUCCESS; 3977 out: 3978 sdev_printk(KERN_INFO, scmd->device, 3979 "Host reset is %s for scmd(%p)\n", 3980 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 3981 3982 return retval; 3983 } 3984 3985 /** 3986 * mpi3mr_eh_target_reset - Target reset error handling callback 3987 * @scmd: SCSI command reference 3988 * 3989 * Issue Target reset Task Management and verify the scmd is 3990 * terminated successfully and return status accordingly. 3991 * 3992 * Return: SUCCESS of successful termination of the scmd else 3993 * FAILED 3994 */ 3995 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 3996 { 3997 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 3998 struct mpi3mr_stgt_priv_data *stgt_priv_data; 3999 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4000 u16 dev_handle; 4001 u8 resp_code = 0; 4002 int retval = FAILED, ret = 0; 4003 4004 sdev_printk(KERN_INFO, scmd->device, 4005 "Attempting Target Reset! scmd(%p)\n", scmd); 4006 scsi_print_command(scmd); 4007 4008 sdev_priv_data = scmd->device->hostdata; 4009 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4010 sdev_printk(KERN_INFO, scmd->device, 4011 "SCSI device is not available\n"); 4012 retval = SUCCESS; 4013 goto out; 4014 } 4015 4016 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4017 dev_handle = stgt_priv_data->dev_handle; 4018 if (stgt_priv_data->dev_removed) { 4019 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4020 sdev_printk(KERN_INFO, scmd->device, 4021 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4022 mrioc->name, dev_handle); 4023 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4024 retval = SUCCESS; 4025 else 4026 retval = FAILED; 4027 goto out; 4028 } 4029 sdev_printk(KERN_INFO, scmd->device, 4030 "Target Reset is issued to handle(0x%04x)\n", 4031 dev_handle); 4032 4033 ret = mpi3mr_issue_tm(mrioc, 4034 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4035 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4036 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4037 4038 if (ret) 4039 goto out; 4040 4041 if (stgt_priv_data->pend_count) { 4042 sdev_printk(KERN_INFO, scmd->device, 4043 "%s: target has %d pending commands, target reset is failed\n", 4044 mrioc->name, stgt_priv_data->pend_count); 4045 goto out; 4046 } 4047 4048 retval = SUCCESS; 4049 out: 4050 sdev_printk(KERN_INFO, scmd->device, 4051 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4052 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4053 4054 return retval; 4055 } 4056 4057 /** 4058 * mpi3mr_eh_dev_reset- Device reset error handling callback 4059 * @scmd: SCSI command reference 4060 * 4061 * Issue lun reset Task Management and verify the scmd is 4062 * terminated successfully and return status accordingly. 4063 * 4064 * Return: SUCCESS of successful termination of the scmd else 4065 * FAILED 4066 */ 4067 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4068 { 4069 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4070 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4071 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4072 u16 dev_handle; 4073 u8 resp_code = 0; 4074 int retval = FAILED, ret = 0; 4075 4076 sdev_printk(KERN_INFO, scmd->device, 4077 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4078 scsi_print_command(scmd); 4079 4080 sdev_priv_data = scmd->device->hostdata; 4081 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4082 sdev_printk(KERN_INFO, scmd->device, 4083 "SCSI device is not available\n"); 4084 retval = SUCCESS; 4085 goto out; 4086 } 4087 4088 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4089 dev_handle = stgt_priv_data->dev_handle; 4090 if (stgt_priv_data->dev_removed) { 4091 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4092 sdev_printk(KERN_INFO, scmd->device, 4093 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4094 mrioc->name, dev_handle); 4095 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4096 retval = SUCCESS; 4097 else 4098 retval = FAILED; 4099 goto out; 4100 } 4101 sdev_printk(KERN_INFO, scmd->device, 4102 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4103 4104 ret = mpi3mr_issue_tm(mrioc, 4105 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4106 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4107 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4108 4109 if (ret) 4110 goto out; 4111 4112 if (sdev_priv_data->pend_count) { 4113 sdev_printk(KERN_INFO, scmd->device, 4114 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4115 mrioc->name, sdev_priv_data->pend_count); 4116 goto out; 4117 } 4118 retval = SUCCESS; 4119 out: 4120 sdev_printk(KERN_INFO, scmd->device, 4121 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4122 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4123 4124 return retval; 4125 } 4126 4127 /** 4128 * mpi3mr_scan_start - Scan start callback handler 4129 * @shost: SCSI host reference 4130 * 4131 * Issue port enable request asynchronously. 4132 * 4133 * Return: Nothing 4134 */ 4135 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4136 { 4137 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4138 4139 mrioc->scan_started = 1; 4140 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4141 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4142 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4143 mrioc->scan_started = 0; 4144 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4145 } 4146 } 4147 4148 /** 4149 * mpi3mr_scan_finished - Scan finished callback handler 4150 * @shost: SCSI host reference 4151 * @time: Jiffies from the scan start 4152 * 4153 * Checks whether the port enable is completed or timedout or 4154 * failed and set the scan status accordingly after taking any 4155 * recovery if required. 4156 * 4157 * Return: 1 on scan finished or timed out, 0 for in progress 4158 */ 4159 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4160 unsigned long time) 4161 { 4162 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4163 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4164 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4165 4166 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4167 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4168 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4169 mpi3mr_print_fault_info(mrioc); 4170 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4171 mrioc->scan_started = 0; 4172 mrioc->init_cmds.is_waiting = 0; 4173 mrioc->init_cmds.callback = NULL; 4174 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4175 } 4176 4177 if (time >= (pe_timeout * HZ)) { 4178 ioc_err(mrioc, "port enable failed due to time out\n"); 4179 mpi3mr_check_rh_fault_ioc(mrioc, 4180 MPI3MR_RESET_FROM_PE_TIMEOUT); 4181 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4182 mrioc->scan_started = 0; 4183 mrioc->init_cmds.is_waiting = 0; 4184 mrioc->init_cmds.callback = NULL; 4185 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4186 } 4187 4188 if (mrioc->scan_started) 4189 return 0; 4190 4191 if (mrioc->scan_failed) { 4192 ioc_err(mrioc, 4193 "port enable failed with status=0x%04x\n", 4194 mrioc->scan_failed); 4195 } else 4196 ioc_info(mrioc, "port enable is successfully completed\n"); 4197 4198 mpi3mr_start_watchdog(mrioc); 4199 mrioc->is_driver_loading = 0; 4200 mrioc->stop_bsgs = 0; 4201 return 1; 4202 } 4203 4204 /** 4205 * mpi3mr_slave_destroy - Slave destroy callback handler 4206 * @sdev: SCSI device reference 4207 * 4208 * Cleanup and free per device(lun) private data. 4209 * 4210 * Return: Nothing. 4211 */ 4212 static void mpi3mr_slave_destroy(struct scsi_device *sdev) 4213 { 4214 struct Scsi_Host *shost; 4215 struct mpi3mr_ioc *mrioc; 4216 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4217 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4218 unsigned long flags; 4219 struct scsi_target *starget; 4220 struct sas_rphy *rphy = NULL; 4221 4222 if (!sdev->hostdata) 4223 return; 4224 4225 starget = scsi_target(sdev); 4226 shost = dev_to_shost(&starget->dev); 4227 mrioc = shost_priv(shost); 4228 scsi_tgt_priv_data = starget->hostdata; 4229 4230 scsi_tgt_priv_data->num_luns--; 4231 4232 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4233 if (starget->channel == mrioc->scsi_device_channel) 4234 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4235 else if (mrioc->sas_transport_enabled && !starget->channel) { 4236 rphy = dev_to_rphy(starget->dev.parent); 4237 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4238 rphy->identify.sas_address, rphy); 4239 } 4240 4241 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4242 tgt_dev->starget = NULL; 4243 if (tgt_dev) 4244 mpi3mr_tgtdev_put(tgt_dev); 4245 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4246 4247 kfree(sdev->hostdata); 4248 sdev->hostdata = NULL; 4249 } 4250 4251 /** 4252 * mpi3mr_target_destroy - Target destroy callback handler 4253 * @starget: SCSI target reference 4254 * 4255 * Cleanup and free per target private data. 4256 * 4257 * Return: Nothing. 4258 */ 4259 static void mpi3mr_target_destroy(struct scsi_target *starget) 4260 { 4261 struct Scsi_Host *shost; 4262 struct mpi3mr_ioc *mrioc; 4263 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4264 struct mpi3mr_tgt_dev *tgt_dev; 4265 unsigned long flags; 4266 4267 if (!starget->hostdata) 4268 return; 4269 4270 shost = dev_to_shost(&starget->dev); 4271 mrioc = shost_priv(shost); 4272 scsi_tgt_priv_data = starget->hostdata; 4273 4274 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4275 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4276 if (tgt_dev && (tgt_dev->starget == starget) && 4277 (tgt_dev->perst_id == starget->id)) 4278 tgt_dev->starget = NULL; 4279 if (tgt_dev) { 4280 scsi_tgt_priv_data->tgt_dev = NULL; 4281 scsi_tgt_priv_data->perst_id = 0; 4282 mpi3mr_tgtdev_put(tgt_dev); 4283 mpi3mr_tgtdev_put(tgt_dev); 4284 } 4285 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4286 4287 kfree(starget->hostdata); 4288 starget->hostdata = NULL; 4289 } 4290 4291 /** 4292 * mpi3mr_slave_configure - Slave configure callback handler 4293 * @sdev: SCSI device reference 4294 * 4295 * Configure queue depth, max hardware sectors and virt boundary 4296 * as required 4297 * 4298 * Return: 0 always. 4299 */ 4300 static int mpi3mr_slave_configure(struct scsi_device *sdev) 4301 { 4302 struct scsi_target *starget; 4303 struct Scsi_Host *shost; 4304 struct mpi3mr_ioc *mrioc; 4305 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4306 unsigned long flags; 4307 int retval = 0; 4308 struct sas_rphy *rphy = NULL; 4309 4310 starget = scsi_target(sdev); 4311 shost = dev_to_shost(&starget->dev); 4312 mrioc = shost_priv(shost); 4313 4314 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4315 if (starget->channel == mrioc->scsi_device_channel) 4316 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4317 else if (mrioc->sas_transport_enabled && !starget->channel) { 4318 rphy = dev_to_rphy(starget->dev.parent); 4319 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4320 rphy->identify.sas_address, rphy); 4321 } 4322 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4323 if (!tgt_dev) 4324 return -ENXIO; 4325 4326 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4327 4328 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4329 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4330 4331 switch (tgt_dev->dev_type) { 4332 case MPI3_DEVICE_DEVFORM_PCIE: 4333 /*The block layer hw sector size = 512*/ 4334 if ((tgt_dev->dev_spec.pcie_inf.dev_info & 4335 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4336 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 4337 blk_queue_max_hw_sectors(sdev->request_queue, 4338 tgt_dev->dev_spec.pcie_inf.mdts / 512); 4339 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) 4340 blk_queue_virt_boundary(sdev->request_queue, 4341 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 4342 else 4343 blk_queue_virt_boundary(sdev->request_queue, 4344 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); 4345 } 4346 break; 4347 default: 4348 break; 4349 } 4350 4351 mpi3mr_tgtdev_put(tgt_dev); 4352 4353 return retval; 4354 } 4355 4356 /** 4357 * mpi3mr_slave_alloc -Slave alloc callback handler 4358 * @sdev: SCSI device reference 4359 * 4360 * Allocate per device(lun) private data and initialize it. 4361 * 4362 * Return: 0 on success -ENOMEM on memory allocation failure. 4363 */ 4364 static int mpi3mr_slave_alloc(struct scsi_device *sdev) 4365 { 4366 struct Scsi_Host *shost; 4367 struct mpi3mr_ioc *mrioc; 4368 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4369 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4370 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4371 unsigned long flags; 4372 struct scsi_target *starget; 4373 int retval = 0; 4374 struct sas_rphy *rphy = NULL; 4375 4376 starget = scsi_target(sdev); 4377 shost = dev_to_shost(&starget->dev); 4378 mrioc = shost_priv(shost); 4379 scsi_tgt_priv_data = starget->hostdata; 4380 4381 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4382 4383 if (starget->channel == mrioc->scsi_device_channel) 4384 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4385 else if (mrioc->sas_transport_enabled && !starget->channel) { 4386 rphy = dev_to_rphy(starget->dev.parent); 4387 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4388 rphy->identify.sas_address, rphy); 4389 } 4390 4391 if (tgt_dev) { 4392 if (tgt_dev->starget == NULL) 4393 tgt_dev->starget = starget; 4394 mpi3mr_tgtdev_put(tgt_dev); 4395 retval = 0; 4396 } else { 4397 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4398 return -ENXIO; 4399 } 4400 4401 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4402 4403 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4404 if (!scsi_dev_priv_data) 4405 return -ENOMEM; 4406 4407 scsi_dev_priv_data->lun_id = sdev->lun; 4408 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4409 sdev->hostdata = scsi_dev_priv_data; 4410 4411 scsi_tgt_priv_data->num_luns++; 4412 4413 return retval; 4414 } 4415 4416 /** 4417 * mpi3mr_target_alloc - Target alloc callback handler 4418 * @starget: SCSI target reference 4419 * 4420 * Allocate per target private data and initialize it. 4421 * 4422 * Return: 0 on success -ENOMEM on memory allocation failure. 4423 */ 4424 static int mpi3mr_target_alloc(struct scsi_target *starget) 4425 { 4426 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4427 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4428 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4429 struct mpi3mr_tgt_dev *tgt_dev; 4430 unsigned long flags; 4431 int retval = 0; 4432 struct sas_rphy *rphy = NULL; 4433 bool update_stgt_priv_data = false; 4434 4435 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4436 if (!scsi_tgt_priv_data) 4437 return -ENOMEM; 4438 4439 starget->hostdata = scsi_tgt_priv_data; 4440 4441 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4442 4443 if (starget->channel == mrioc->scsi_device_channel) { 4444 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4445 if (tgt_dev && !tgt_dev->is_hidden) 4446 update_stgt_priv_data = true; 4447 else 4448 retval = -ENXIO; 4449 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4450 rphy = dev_to_rphy(starget->dev.parent); 4451 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4452 rphy->identify.sas_address, rphy); 4453 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4454 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) 4455 update_stgt_priv_data = true; 4456 else 4457 retval = -ENXIO; 4458 } 4459 4460 if (update_stgt_priv_data) { 4461 scsi_tgt_priv_data->starget = starget; 4462 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4463 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4464 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4465 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4466 tgt_dev->starget = starget; 4467 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4468 retval = 0; 4469 scsi_tgt_priv_data->io_throttle_enabled = 4470 tgt_dev->io_throttle_enabled; 4471 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4472 scsi_tgt_priv_data->throttle_group = 4473 tgt_dev->dev_spec.vd_inf.tg; 4474 } 4475 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4476 4477 return retval; 4478 } 4479 4480 /** 4481 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4482 * @mrioc: Adapter instance reference 4483 * @scmd: SCSI Command reference 4484 * 4485 * The controller hardware cannot handle certain unmap commands 4486 * for NVMe drives, this routine checks those and return true 4487 * and completes the SCSI command with proper status and sense 4488 * data. 4489 * 4490 * Return: TRUE for not allowed unmap, FALSE otherwise. 4491 */ 4492 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4493 struct scsi_cmnd *scmd) 4494 { 4495 unsigned char *buf; 4496 u16 param_len, desc_len, trunc_param_len; 4497 4498 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4499 4500 if (mrioc->pdev->revision) { 4501 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4502 trunc_param_len -= (param_len - 8) & 0xF; 4503 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4504 dprint_scsi_err(mrioc, 4505 "truncating param_len from (%d) to (%d)\n", 4506 param_len, trunc_param_len); 4507 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4508 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4509 } 4510 return false; 4511 } 4512 4513 if (!param_len) { 4514 ioc_warn(mrioc, 4515 "%s: cdb received with zero parameter length\n", 4516 __func__); 4517 scsi_print_command(scmd); 4518 scmd->result = DID_OK << 16; 4519 scsi_done(scmd); 4520 return true; 4521 } 4522 4523 if (param_len < 24) { 4524 ioc_warn(mrioc, 4525 "%s: cdb received with invalid param_len: %d\n", 4526 __func__, param_len); 4527 scsi_print_command(scmd); 4528 scmd->result = SAM_STAT_CHECK_CONDITION; 4529 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4530 0x1A, 0); 4531 scsi_done(scmd); 4532 return true; 4533 } 4534 if (param_len != scsi_bufflen(scmd)) { 4535 ioc_warn(mrioc, 4536 "%s: cdb received with param_len: %d bufflen: %d\n", 4537 __func__, param_len, scsi_bufflen(scmd)); 4538 scsi_print_command(scmd); 4539 scmd->result = SAM_STAT_CHECK_CONDITION; 4540 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4541 0x1A, 0); 4542 scsi_done(scmd); 4543 return true; 4544 } 4545 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4546 if (!buf) { 4547 scsi_print_command(scmd); 4548 scmd->result = SAM_STAT_CHECK_CONDITION; 4549 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4550 0x55, 0x03); 4551 scsi_done(scmd); 4552 return true; 4553 } 4554 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4555 desc_len = get_unaligned_be16(&buf[2]); 4556 4557 if (desc_len < 16) { 4558 ioc_warn(mrioc, 4559 "%s: Invalid descriptor length in param list: %d\n", 4560 __func__, desc_len); 4561 scsi_print_command(scmd); 4562 scmd->result = SAM_STAT_CHECK_CONDITION; 4563 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4564 0x26, 0); 4565 scsi_done(scmd); 4566 kfree(buf); 4567 return true; 4568 } 4569 4570 if (param_len > (desc_len + 8)) { 4571 trunc_param_len = desc_len + 8; 4572 scsi_print_command(scmd); 4573 dprint_scsi_err(mrioc, 4574 "truncating param_len(%d) to desc_len+8(%d)\n", 4575 param_len, trunc_param_len); 4576 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4577 scsi_print_command(scmd); 4578 } 4579 4580 kfree(buf); 4581 return false; 4582 } 4583 4584 /** 4585 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 4586 * @scmd: SCSI Command reference 4587 * 4588 * Checks whether a cdb is allowed during shutdown or not. 4589 * 4590 * Return: TRUE for allowed commands, FALSE otherwise. 4591 */ 4592 4593 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 4594 { 4595 switch (scmd->cmnd[0]) { 4596 case SYNCHRONIZE_CACHE: 4597 case START_STOP: 4598 return true; 4599 default: 4600 return false; 4601 } 4602 } 4603 4604 /** 4605 * mpi3mr_qcmd - I/O request despatcher 4606 * @shost: SCSI Host reference 4607 * @scmd: SCSI Command reference 4608 * 4609 * Issues the SCSI Command as an MPI3 request. 4610 * 4611 * Return: 0 on successful queueing of the request or if the 4612 * request is completed with failure. 4613 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 4614 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 4615 */ 4616 static int mpi3mr_qcmd(struct Scsi_Host *shost, 4617 struct scsi_cmnd *scmd) 4618 { 4619 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4620 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4621 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4622 struct scmd_priv *scmd_priv_data = NULL; 4623 struct mpi3_scsi_io_request *scsiio_req = NULL; 4624 struct op_req_qinfo *op_req_q = NULL; 4625 int retval = 0; 4626 u16 dev_handle; 4627 u16 host_tag; 4628 u32 scsiio_flags = 0, data_len_blks = 0; 4629 struct request *rq = scsi_cmd_to_rq(scmd); 4630 int iprio_class; 4631 u8 is_pcie_dev = 0; 4632 u32 tracked_io_sz = 0; 4633 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 4634 struct mpi3mr_throttle_group_info *tg = NULL; 4635 4636 if (mrioc->unrecoverable) { 4637 scmd->result = DID_ERROR << 16; 4638 scsi_done(scmd); 4639 goto out; 4640 } 4641 4642 sdev_priv_data = scmd->device->hostdata; 4643 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4644 scmd->result = DID_NO_CONNECT << 16; 4645 scsi_done(scmd); 4646 goto out; 4647 } 4648 4649 if (mrioc->stop_drv_processing && 4650 !(mpi3mr_allow_scmd_to_fw(scmd))) { 4651 scmd->result = DID_NO_CONNECT << 16; 4652 scsi_done(scmd); 4653 goto out; 4654 } 4655 4656 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4657 dev_handle = stgt_priv_data->dev_handle; 4658 4659 /* Avoid error handling escalation when device is removed or blocked */ 4660 4661 if (scmd->device->host->shost_state == SHOST_RECOVERY && 4662 scmd->cmnd[0] == TEST_UNIT_READY && 4663 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 4664 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 4665 scsi_done(scmd); 4666 goto out; 4667 } 4668 4669 if (mrioc->reset_in_progress) { 4670 retval = SCSI_MLQUEUE_HOST_BUSY; 4671 goto out; 4672 } 4673 4674 if (atomic_read(&stgt_priv_data->block_io)) { 4675 if (mrioc->stop_drv_processing) { 4676 scmd->result = DID_NO_CONNECT << 16; 4677 scsi_done(scmd); 4678 goto out; 4679 } 4680 retval = SCSI_MLQUEUE_DEVICE_BUSY; 4681 goto out; 4682 } 4683 4684 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 4685 scmd->result = DID_NO_CONNECT << 16; 4686 scsi_done(scmd); 4687 goto out; 4688 } 4689 if (stgt_priv_data->dev_removed) { 4690 scmd->result = DID_NO_CONNECT << 16; 4691 scsi_done(scmd); 4692 goto out; 4693 } 4694 4695 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 4696 is_pcie_dev = 1; 4697 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 4698 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 4699 mpi3mr_check_return_unmap(mrioc, scmd)) 4700 goto out; 4701 4702 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 4703 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 4704 scmd->result = DID_ERROR << 16; 4705 scsi_done(scmd); 4706 goto out; 4707 } 4708 4709 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4710 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 4711 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4712 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 4713 else 4714 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 4715 4716 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 4717 4718 if (sdev_priv_data->ncq_prio_enable) { 4719 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 4720 if (iprio_class == IOPRIO_CLASS_RT) 4721 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 4722 } 4723 4724 if (scmd->cmd_len > 16) 4725 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 4726 4727 scmd_priv_data = scsi_cmd_priv(scmd); 4728 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 4729 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 4730 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 4731 scsiio_req->host_tag = cpu_to_le16(host_tag); 4732 4733 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 4734 4735 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 4736 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 4737 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 4738 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4739 int_to_scsilun(sdev_priv_data->lun_id, 4740 (struct scsi_lun *)scsiio_req->lun); 4741 4742 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 4743 mpi3mr_clear_scmd_priv(mrioc, scmd); 4744 retval = SCSI_MLQUEUE_HOST_BUSY; 4745 goto out; 4746 } 4747 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 4748 data_len_blks = scsi_bufflen(scmd) >> 9; 4749 if ((data_len_blks >= mrioc->io_throttle_data_length) && 4750 stgt_priv_data->io_throttle_enabled) { 4751 tracked_io_sz = data_len_blks; 4752 tg = stgt_priv_data->throttle_group; 4753 if (tg) { 4754 ioc_pend_data_len = atomic_add_return(data_len_blks, 4755 &mrioc->pend_large_data_sz); 4756 tg_pend_data_len = atomic_add_return(data_len_blks, 4757 &tg->pend_large_data_sz); 4758 if (!tg->io_divert && ((ioc_pend_data_len >= 4759 mrioc->io_throttle_high) || 4760 (tg_pend_data_len >= tg->high))) { 4761 tg->io_divert = 1; 4762 tg->need_qd_reduction = 1; 4763 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 4764 tg, 1); 4765 mpi3mr_queue_qd_reduction_event(mrioc, tg); 4766 } 4767 } else { 4768 ioc_pend_data_len = atomic_add_return(data_len_blks, 4769 &mrioc->pend_large_data_sz); 4770 if (ioc_pend_data_len >= mrioc->io_throttle_high) 4771 stgt_priv_data->io_divert = 1; 4772 } 4773 } 4774 4775 if (stgt_priv_data->io_divert) { 4776 scsiio_req->msg_flags |= 4777 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4778 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 4779 } 4780 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4781 4782 if (mpi3mr_op_request_post(mrioc, op_req_q, 4783 scmd_priv_data->mpi3mr_scsiio_req)) { 4784 mpi3mr_clear_scmd_priv(mrioc, scmd); 4785 retval = SCSI_MLQUEUE_HOST_BUSY; 4786 if (tracked_io_sz) { 4787 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 4788 if (tg) 4789 atomic_sub(tracked_io_sz, 4790 &tg->pend_large_data_sz); 4791 } 4792 goto out; 4793 } 4794 4795 out: 4796 return retval; 4797 } 4798 4799 static const struct scsi_host_template mpi3mr_driver_template = { 4800 .module = THIS_MODULE, 4801 .name = "MPI3 Storage Controller", 4802 .proc_name = MPI3MR_DRIVER_NAME, 4803 .queuecommand = mpi3mr_qcmd, 4804 .target_alloc = mpi3mr_target_alloc, 4805 .slave_alloc = mpi3mr_slave_alloc, 4806 .slave_configure = mpi3mr_slave_configure, 4807 .target_destroy = mpi3mr_target_destroy, 4808 .slave_destroy = mpi3mr_slave_destroy, 4809 .scan_finished = mpi3mr_scan_finished, 4810 .scan_start = mpi3mr_scan_start, 4811 .change_queue_depth = mpi3mr_change_queue_depth, 4812 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 4813 .eh_target_reset_handler = mpi3mr_eh_target_reset, 4814 .eh_host_reset_handler = mpi3mr_eh_host_reset, 4815 .bios_param = mpi3mr_bios_param, 4816 .map_queues = mpi3mr_map_queues, 4817 .mq_poll = mpi3mr_blk_mq_poll, 4818 .no_write_same = 1, 4819 .can_queue = 1, 4820 .this_id = -1, 4821 .sg_tablesize = MPI3MR_SG_DEPTH, 4822 /* max xfer supported is 1M (2K in 512 byte sized sectors) 4823 */ 4824 .max_sectors = 2048, 4825 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 4826 .max_segment_size = 0xffffffff, 4827 .track_queue_depth = 1, 4828 .cmd_size = sizeof(struct scmd_priv), 4829 .shost_groups = mpi3mr_host_groups, 4830 .sdev_groups = mpi3mr_dev_groups, 4831 }; 4832 4833 /** 4834 * mpi3mr_init_drv_cmd - Initialize internal command tracker 4835 * @cmdptr: Internal command tracker 4836 * @host_tag: Host tag used for the specific command 4837 * 4838 * Initialize the internal command tracker structure with 4839 * specified host tag. 4840 * 4841 * Return: Nothing. 4842 */ 4843 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 4844 u16 host_tag) 4845 { 4846 mutex_init(&cmdptr->mutex); 4847 cmdptr->reply = NULL; 4848 cmdptr->state = MPI3MR_CMD_NOTUSED; 4849 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 4850 cmdptr->host_tag = host_tag; 4851 } 4852 4853 /** 4854 * osintfc_mrioc_security_status -Check controller secure status 4855 * @pdev: PCI device instance 4856 * 4857 * Read the Device Serial Number capability from PCI config 4858 * space and decide whether the controller is secure or not. 4859 * 4860 * Return: 0 on success, non-zero on failure. 4861 */ 4862 static int 4863 osintfc_mrioc_security_status(struct pci_dev *pdev) 4864 { 4865 u32 cap_data; 4866 int base; 4867 u32 ctlr_status; 4868 u32 debug_status; 4869 int retval = 0; 4870 4871 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 4872 if (!base) { 4873 dev_err(&pdev->dev, 4874 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 4875 return -1; 4876 } 4877 4878 pci_read_config_dword(pdev, base + 4, &cap_data); 4879 4880 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 4881 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 4882 4883 switch (ctlr_status) { 4884 case MPI3MR_INVALID_DEVICE: 4885 dev_err(&pdev->dev, 4886 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4887 __func__, pdev->device, pdev->subsystem_vendor, 4888 pdev->subsystem_device); 4889 retval = -1; 4890 break; 4891 case MPI3MR_CONFIG_SECURE_DEVICE: 4892 if (!debug_status) 4893 dev_info(&pdev->dev, 4894 "%s: Config secure ctlr is detected\n", 4895 __func__); 4896 break; 4897 case MPI3MR_HARD_SECURE_DEVICE: 4898 break; 4899 case MPI3MR_TAMPERED_DEVICE: 4900 dev_err(&pdev->dev, 4901 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4902 __func__, pdev->device, pdev->subsystem_vendor, 4903 pdev->subsystem_device); 4904 retval = -1; 4905 break; 4906 default: 4907 retval = -1; 4908 break; 4909 } 4910 4911 if (!retval && debug_status) { 4912 dev_err(&pdev->dev, 4913 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4914 __func__, pdev->device, pdev->subsystem_vendor, 4915 pdev->subsystem_device); 4916 retval = -1; 4917 } 4918 4919 return retval; 4920 } 4921 4922 /** 4923 * mpi3mr_probe - PCI probe callback 4924 * @pdev: PCI device instance 4925 * @id: PCI device ID details 4926 * 4927 * controller initialization routine. Checks the security status 4928 * of the controller and if it is invalid or tampered return the 4929 * probe without initializing the controller. Otherwise, 4930 * allocate per adapter instance through shost_priv and 4931 * initialize controller specific data structures, initializae 4932 * the controller hardware, add shost to the SCSI subsystem. 4933 * 4934 * Return: 0 on success, non-zero on failure. 4935 */ 4936 4937 static int 4938 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 4939 { 4940 struct mpi3mr_ioc *mrioc = NULL; 4941 struct Scsi_Host *shost = NULL; 4942 int retval = 0, i; 4943 4944 if (osintfc_mrioc_security_status(pdev)) { 4945 warn_non_secure_ctlr = 1; 4946 return 1; /* For Invalid and Tampered device */ 4947 } 4948 4949 shost = scsi_host_alloc(&mpi3mr_driver_template, 4950 sizeof(struct mpi3mr_ioc)); 4951 if (!shost) { 4952 retval = -ENODEV; 4953 goto shost_failed; 4954 } 4955 4956 mrioc = shost_priv(shost); 4957 mrioc->id = mrioc_ids++; 4958 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 4959 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 4960 INIT_LIST_HEAD(&mrioc->list); 4961 spin_lock(&mrioc_list_lock); 4962 list_add_tail(&mrioc->list, &mrioc_list); 4963 spin_unlock(&mrioc_list_lock); 4964 4965 spin_lock_init(&mrioc->admin_req_lock); 4966 spin_lock_init(&mrioc->reply_free_queue_lock); 4967 spin_lock_init(&mrioc->sbq_lock); 4968 spin_lock_init(&mrioc->fwevt_lock); 4969 spin_lock_init(&mrioc->tgtdev_lock); 4970 spin_lock_init(&mrioc->watchdog_lock); 4971 spin_lock_init(&mrioc->chain_buf_lock); 4972 spin_lock_init(&mrioc->sas_node_lock); 4973 4974 INIT_LIST_HEAD(&mrioc->fwevt_list); 4975 INIT_LIST_HEAD(&mrioc->tgtdev_list); 4976 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 4977 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 4978 INIT_LIST_HEAD(&mrioc->sas_expander_list); 4979 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 4980 INIT_LIST_HEAD(&mrioc->enclosure_list); 4981 4982 mutex_init(&mrioc->reset_mutex); 4983 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 4984 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 4985 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 4986 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 4987 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 4988 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 4989 4990 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 4991 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 4992 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 4993 4994 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 4995 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 4996 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 4997 4998 if (pdev->revision) 4999 mrioc->enable_segqueue = true; 5000 5001 init_waitqueue_head(&mrioc->reset_waitq); 5002 mrioc->logging_level = logging_level; 5003 mrioc->shost = shost; 5004 mrioc->pdev = pdev; 5005 mrioc->stop_bsgs = 1; 5006 5007 /* init shost parameters */ 5008 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5009 shost->max_lun = -1; 5010 shost->unique_id = mrioc->id; 5011 5012 shost->max_channel = 0; 5013 shost->max_id = 0xFFFFFFFF; 5014 5015 shost->host_tagset = 1; 5016 5017 if (prot_mask >= 0) 5018 scsi_host_set_prot(shost, prot_mask); 5019 else { 5020 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5021 | SHOST_DIF_TYPE2_PROTECTION 5022 | SHOST_DIF_TYPE3_PROTECTION; 5023 scsi_host_set_prot(shost, prot_mask); 5024 } 5025 5026 ioc_info(mrioc, 5027 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5028 __func__, 5029 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5030 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5031 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5032 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5033 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5034 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5035 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5036 5037 if (prot_guard_mask) 5038 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5039 else 5040 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5041 5042 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), 5043 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); 5044 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5045 mrioc->fwevt_worker_name, 0); 5046 if (!mrioc->fwevt_worker_thread) { 5047 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5048 __FILE__, __LINE__, __func__); 5049 retval = -ENODEV; 5050 goto fwevtthread_failed; 5051 } 5052 5053 mrioc->is_driver_loading = 1; 5054 mrioc->cpu_count = num_online_cpus(); 5055 if (mpi3mr_setup_resources(mrioc)) { 5056 ioc_err(mrioc, "setup resources failed\n"); 5057 retval = -ENODEV; 5058 goto resource_alloc_failed; 5059 } 5060 if (mpi3mr_init_ioc(mrioc)) { 5061 ioc_err(mrioc, "initializing IOC failed\n"); 5062 retval = -ENODEV; 5063 goto init_ioc_failed; 5064 } 5065 5066 shost->nr_hw_queues = mrioc->num_op_reply_q; 5067 if (mrioc->active_poll_qcount) 5068 shost->nr_maps = 3; 5069 5070 shost->can_queue = mrioc->max_host_ios; 5071 shost->sg_tablesize = MPI3MR_SG_DEPTH; 5072 shost->max_id = mrioc->facts.max_perids + 1; 5073 5074 retval = scsi_add_host(shost, &pdev->dev); 5075 if (retval) { 5076 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5077 __FILE__, __LINE__, __func__); 5078 goto addhost_failed; 5079 } 5080 5081 scsi_scan_host(shost); 5082 mpi3mr_bsg_init(mrioc); 5083 return retval; 5084 5085 addhost_failed: 5086 mpi3mr_stop_watchdog(mrioc); 5087 mpi3mr_cleanup_ioc(mrioc); 5088 init_ioc_failed: 5089 mpi3mr_free_mem(mrioc); 5090 mpi3mr_cleanup_resources(mrioc); 5091 resource_alloc_failed: 5092 destroy_workqueue(mrioc->fwevt_worker_thread); 5093 fwevtthread_failed: 5094 spin_lock(&mrioc_list_lock); 5095 list_del(&mrioc->list); 5096 spin_unlock(&mrioc_list_lock); 5097 scsi_host_put(shost); 5098 shost_failed: 5099 return retval; 5100 } 5101 5102 /** 5103 * mpi3mr_remove - PCI remove callback 5104 * @pdev: PCI device instance 5105 * 5106 * Cleanup the IOC by issuing MUR and shutdown notification. 5107 * Free up all memory and resources associated with the 5108 * controllerand target devices, unregister the shost. 5109 * 5110 * Return: Nothing. 5111 */ 5112 static void mpi3mr_remove(struct pci_dev *pdev) 5113 { 5114 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5115 struct mpi3mr_ioc *mrioc; 5116 struct workqueue_struct *wq; 5117 unsigned long flags; 5118 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5119 struct mpi3mr_hba_port *port, *hba_port_next; 5120 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5121 5122 if (!shost) 5123 return; 5124 5125 mrioc = shost_priv(shost); 5126 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5127 ssleep(1); 5128 5129 if (!pci_device_is_present(mrioc->pdev)) { 5130 mrioc->unrecoverable = 1; 5131 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5132 } 5133 5134 mpi3mr_bsg_exit(mrioc); 5135 mrioc->stop_drv_processing = 1; 5136 mpi3mr_cleanup_fwevt_list(mrioc); 5137 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5138 wq = mrioc->fwevt_worker_thread; 5139 mrioc->fwevt_worker_thread = NULL; 5140 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5141 if (wq) 5142 destroy_workqueue(wq); 5143 5144 if (mrioc->sas_transport_enabled) 5145 sas_remove_host(shost); 5146 else 5147 scsi_remove_host(shost); 5148 5149 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5150 list) { 5151 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5152 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5153 mpi3mr_tgtdev_put(tgtdev); 5154 } 5155 mpi3mr_stop_watchdog(mrioc); 5156 mpi3mr_cleanup_ioc(mrioc); 5157 mpi3mr_free_mem(mrioc); 5158 mpi3mr_cleanup_resources(mrioc); 5159 5160 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5161 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5162 &mrioc->sas_expander_list, list) { 5163 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5164 mpi3mr_expander_node_remove(mrioc, sas_expander); 5165 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5166 } 5167 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5168 ioc_info(mrioc, 5169 "removing hba_port entry: %p port: %d from hba_port list\n", 5170 port, port->port_id); 5171 list_del(&port->list); 5172 kfree(port); 5173 } 5174 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5175 5176 if (mrioc->sas_hba.num_phys) { 5177 kfree(mrioc->sas_hba.phy); 5178 mrioc->sas_hba.phy = NULL; 5179 mrioc->sas_hba.num_phys = 0; 5180 } 5181 5182 spin_lock(&mrioc_list_lock); 5183 list_del(&mrioc->list); 5184 spin_unlock(&mrioc_list_lock); 5185 5186 scsi_host_put(shost); 5187 } 5188 5189 /** 5190 * mpi3mr_shutdown - PCI shutdown callback 5191 * @pdev: PCI device instance 5192 * 5193 * Free up all memory and resources associated with the 5194 * controller 5195 * 5196 * Return: Nothing. 5197 */ 5198 static void mpi3mr_shutdown(struct pci_dev *pdev) 5199 { 5200 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5201 struct mpi3mr_ioc *mrioc; 5202 struct workqueue_struct *wq; 5203 unsigned long flags; 5204 5205 if (!shost) 5206 return; 5207 5208 mrioc = shost_priv(shost); 5209 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5210 ssleep(1); 5211 5212 mrioc->stop_drv_processing = 1; 5213 mpi3mr_cleanup_fwevt_list(mrioc); 5214 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5215 wq = mrioc->fwevt_worker_thread; 5216 mrioc->fwevt_worker_thread = NULL; 5217 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5218 if (wq) 5219 destroy_workqueue(wq); 5220 5221 mpi3mr_stop_watchdog(mrioc); 5222 mpi3mr_cleanup_ioc(mrioc); 5223 mpi3mr_cleanup_resources(mrioc); 5224 } 5225 5226 /** 5227 * mpi3mr_suspend - PCI power management suspend callback 5228 * @dev: Device struct 5229 * 5230 * Change the power state to the given value and cleanup the IOC 5231 * by issuing MUR and shutdown notification 5232 * 5233 * Return: 0 always. 5234 */ 5235 static int __maybe_unused 5236 mpi3mr_suspend(struct device *dev) 5237 { 5238 struct pci_dev *pdev = to_pci_dev(dev); 5239 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5240 struct mpi3mr_ioc *mrioc; 5241 5242 if (!shost) 5243 return 0; 5244 5245 mrioc = shost_priv(shost); 5246 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5247 ssleep(1); 5248 mrioc->stop_drv_processing = 1; 5249 mpi3mr_cleanup_fwevt_list(mrioc); 5250 scsi_block_requests(shost); 5251 mpi3mr_stop_watchdog(mrioc); 5252 mpi3mr_cleanup_ioc(mrioc); 5253 5254 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5255 pdev, pci_name(pdev)); 5256 mpi3mr_cleanup_resources(mrioc); 5257 5258 return 0; 5259 } 5260 5261 /** 5262 * mpi3mr_resume - PCI power management resume callback 5263 * @dev: Device struct 5264 * 5265 * Restore the power state to D0 and reinitialize the controller 5266 * and resume I/O operations to the target devices 5267 * 5268 * Return: 0 on success, non-zero on failure 5269 */ 5270 static int __maybe_unused 5271 mpi3mr_resume(struct device *dev) 5272 { 5273 struct pci_dev *pdev = to_pci_dev(dev); 5274 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5275 struct mpi3mr_ioc *mrioc; 5276 pci_power_t device_state = pdev->current_state; 5277 int r; 5278 5279 if (!shost) 5280 return 0; 5281 5282 mrioc = shost_priv(shost); 5283 5284 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5285 pdev, pci_name(pdev), device_state); 5286 mrioc->pdev = pdev; 5287 mrioc->cpu_count = num_online_cpus(); 5288 r = mpi3mr_setup_resources(mrioc); 5289 if (r) { 5290 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5291 __func__, r); 5292 return r; 5293 } 5294 5295 mrioc->stop_drv_processing = 0; 5296 mpi3mr_invalidate_devhandles(mrioc); 5297 mpi3mr_free_enclosure_list(mrioc); 5298 mpi3mr_memset_buffers(mrioc); 5299 r = mpi3mr_reinit_ioc(mrioc, 1); 5300 if (r) { 5301 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5302 return r; 5303 } 5304 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5305 scsi_unblock_requests(shost); 5306 mrioc->device_refresh_on = 0; 5307 mpi3mr_start_watchdog(mrioc); 5308 5309 return 0; 5310 } 5311 5312 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5313 { 5314 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5315 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5316 }, 5317 { 0 } 5318 }; 5319 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5320 5321 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5322 5323 static struct pci_driver mpi3mr_pci_driver = { 5324 .name = MPI3MR_DRIVER_NAME, 5325 .id_table = mpi3mr_pci_id_table, 5326 .probe = mpi3mr_probe, 5327 .remove = mpi3mr_remove, 5328 .shutdown = mpi3mr_shutdown, 5329 .driver.pm = &mpi3mr_pm_ops, 5330 }; 5331 5332 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5333 { 5334 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5335 } 5336 static DRIVER_ATTR_RO(event_counter); 5337 5338 static int __init mpi3mr_init(void) 5339 { 5340 int ret_val; 5341 5342 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 5343 MPI3MR_DRIVER_VERSION); 5344 5345 mpi3mr_transport_template = 5346 sas_attach_transport(&mpi3mr_transport_functions); 5347 if (!mpi3mr_transport_template) { 5348 pr_err("%s failed to load due to sas transport attach failure\n", 5349 MPI3MR_DRIVER_NAME); 5350 return -ENODEV; 5351 } 5352 5353 ret_val = pci_register_driver(&mpi3mr_pci_driver); 5354 if (ret_val) { 5355 pr_err("%s failed to load due to pci register driver failure\n", 5356 MPI3MR_DRIVER_NAME); 5357 goto err_pci_reg_fail; 5358 } 5359 5360 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 5361 &driver_attr_event_counter); 5362 if (ret_val) 5363 goto err_event_counter; 5364 5365 return ret_val; 5366 5367 err_event_counter: 5368 pci_unregister_driver(&mpi3mr_pci_driver); 5369 5370 err_pci_reg_fail: 5371 sas_release_transport(mpi3mr_transport_template); 5372 return ret_val; 5373 } 5374 5375 static void __exit mpi3mr_exit(void) 5376 { 5377 if (warn_non_secure_ctlr) 5378 pr_warn( 5379 "Unloading %s version %s while managing a non secure controller\n", 5380 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 5381 else 5382 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 5383 MPI3MR_DRIVER_VERSION); 5384 5385 driver_remove_file(&mpi3mr_pci_driver.driver, 5386 &driver_attr_event_counter); 5387 pci_unregister_driver(&mpi3mr_pci_driver); 5388 sas_release_transport(mpi3mr_transport_template); 5389 } 5390 5391 module_init(mpi3mr_init); 5392 module_exit(mpi3mr_exit); 5393