1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 12 /* global driver scop variables */ 13 LIST_HEAD(mrioc_list); 14 DEFINE_SPINLOCK(mrioc_list_lock); 15 static int mrioc_ids; 16 static int warn_non_secure_ctlr; 17 atomic64_t event_counter; 18 19 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 20 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 21 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 22 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 23 24 /* Module parameters*/ 25 int prot_mask = -1; 26 module_param(prot_mask, int, 0); 27 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 28 29 static int prot_guard_mask = 3; 30 module_param(prot_guard_mask, int, 0); 31 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 32 static int logging_level; 33 module_param(logging_level, int, 0); 34 MODULE_PARM_DESC(logging_level, 35 " bits for enabling additional logging info (default=0)"); 36 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 37 module_param(max_sgl_entries, int, 0444); 38 MODULE_PARM_DESC(max_sgl_entries, 39 "Preferred max number of SG entries to be used for a single I/O\n" 40 "The actual value will be determined by the driver\n" 41 "(Minimum=256, Maximum=2048, default=256)"); 42 43 /* Forward declarations*/ 44 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 45 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 46 47 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 48 49 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 50 51 /** 52 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 53 * @mrioc: Adapter instance reference 54 * @scmd: SCSI command reference 55 * 56 * Calculate the host tag based on block tag for a given scmd. 57 * 58 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 59 */ 60 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 61 struct scsi_cmnd *scmd) 62 { 63 struct scmd_priv *priv = NULL; 64 u32 unique_tag; 65 u16 host_tag, hw_queue; 66 67 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 68 69 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 70 if (hw_queue >= mrioc->num_op_reply_q) 71 return MPI3MR_HOSTTAG_INVALID; 72 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 73 74 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 75 return MPI3MR_HOSTTAG_INVALID; 76 77 priv = scsi_cmd_priv(scmd); 78 /*host_tag 0 is invalid hence incrementing by 1*/ 79 priv->host_tag = host_tag + 1; 80 priv->scmd = scmd; 81 priv->in_lld_scope = 1; 82 priv->req_q_idx = hw_queue; 83 priv->meta_chain_idx = -1; 84 priv->chain_idx = -1; 85 priv->meta_sg_valid = 0; 86 return priv->host_tag; 87 } 88 89 /** 90 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 91 * @mrioc: Adapter instance reference 92 * @host_tag: Host tag 93 * @qidx: Operational queue index 94 * 95 * Identify the block tag from the host tag and queue index and 96 * retrieve associated scsi command using scsi_host_find_tag(). 97 * 98 * Return: SCSI command reference or NULL. 99 */ 100 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 101 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 102 { 103 struct scsi_cmnd *scmd = NULL; 104 struct scmd_priv *priv = NULL; 105 u32 unique_tag = host_tag - 1; 106 107 if (WARN_ON(host_tag > mrioc->max_host_ios)) 108 goto out; 109 110 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 111 112 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 113 if (scmd) { 114 priv = scsi_cmd_priv(scmd); 115 if (!priv->in_lld_scope) 116 scmd = NULL; 117 } 118 out: 119 return scmd; 120 } 121 122 /** 123 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 124 * @mrioc: Adapter instance reference 125 * @scmd: SCSI command reference 126 * 127 * Invalidate the SCSI command private data to mark the command 128 * is not in LLD scope anymore. 129 * 130 * Return: Nothing. 131 */ 132 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 133 struct scsi_cmnd *scmd) 134 { 135 struct scmd_priv *priv = NULL; 136 137 priv = scsi_cmd_priv(scmd); 138 139 if (WARN_ON(priv->in_lld_scope == 0)) 140 return; 141 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 142 priv->req_q_idx = 0xFFFF; 143 priv->scmd = NULL; 144 priv->in_lld_scope = 0; 145 priv->meta_sg_valid = 0; 146 if (priv->chain_idx >= 0) { 147 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 148 priv->chain_idx = -1; 149 } 150 if (priv->meta_chain_idx >= 0) { 151 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 152 priv->meta_chain_idx = -1; 153 } 154 } 155 156 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 157 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 158 static void mpi3mr_fwevt_worker(struct work_struct *work); 159 160 /** 161 * mpi3mr_fwevt_free - firmware event memory dealloctor 162 * @r: k reference pointer of the firmware event 163 * 164 * Free firmware event memory when no reference. 165 */ 166 static void mpi3mr_fwevt_free(struct kref *r) 167 { 168 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 169 } 170 171 /** 172 * mpi3mr_fwevt_get - k reference incrementor 173 * @fwevt: Firmware event reference 174 * 175 * Increment firmware event reference count. 176 */ 177 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 178 { 179 kref_get(&fwevt->ref_count); 180 } 181 182 /** 183 * mpi3mr_fwevt_put - k reference decrementor 184 * @fwevt: Firmware event reference 185 * 186 * decrement firmware event reference count. 187 */ 188 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 189 { 190 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 191 } 192 193 /** 194 * mpi3mr_alloc_fwevt - Allocate firmware event 195 * @len: length of firmware event data to allocate 196 * 197 * Allocate firmware event with required length and initialize 198 * the reference counter. 199 * 200 * Return: firmware event reference. 201 */ 202 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 203 { 204 struct mpi3mr_fwevt *fwevt; 205 206 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 207 if (!fwevt) 208 return NULL; 209 210 kref_init(&fwevt->ref_count); 211 return fwevt; 212 } 213 214 /** 215 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 216 * @mrioc: Adapter instance reference 217 * @fwevt: Firmware event reference 218 * 219 * Add the given firmware event to the firmware event list. 220 * 221 * Return: Nothing. 222 */ 223 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 224 struct mpi3mr_fwevt *fwevt) 225 { 226 unsigned long flags; 227 228 if (!mrioc->fwevt_worker_thread) 229 return; 230 231 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 232 /* get fwevt reference count while adding it to fwevt_list */ 233 mpi3mr_fwevt_get(fwevt); 234 INIT_LIST_HEAD(&fwevt->list); 235 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 236 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 237 /* get fwevt reference count while enqueueing it to worker queue */ 238 mpi3mr_fwevt_get(fwevt); 239 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 240 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 241 } 242 243 /** 244 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 245 * @mrioc: Adapter instance reference 246 * @fwevt: Firmware event reference 247 * 248 * Delete the given firmware event from the firmware event list. 249 * 250 * Return: Nothing. 251 */ 252 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 253 struct mpi3mr_fwevt *fwevt) 254 { 255 unsigned long flags; 256 257 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 258 if (!list_empty(&fwevt->list)) { 259 list_del_init(&fwevt->list); 260 /* 261 * Put fwevt reference count after 262 * removing it from fwevt_list 263 */ 264 mpi3mr_fwevt_put(fwevt); 265 } 266 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 267 } 268 269 /** 270 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 271 * @mrioc: Adapter instance reference 272 * 273 * Dequeue a firmware event from the firmware event list. 274 * 275 * Return: firmware event. 276 */ 277 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 278 struct mpi3mr_ioc *mrioc) 279 { 280 unsigned long flags; 281 struct mpi3mr_fwevt *fwevt = NULL; 282 283 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 284 if (!list_empty(&mrioc->fwevt_list)) { 285 fwevt = list_first_entry(&mrioc->fwevt_list, 286 struct mpi3mr_fwevt, list); 287 list_del_init(&fwevt->list); 288 /* 289 * Put fwevt reference count after 290 * removing it from fwevt_list 291 */ 292 mpi3mr_fwevt_put(fwevt); 293 } 294 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 295 296 return fwevt; 297 } 298 299 /** 300 * mpi3mr_cancel_work - cancel firmware event 301 * @fwevt: fwevt object which needs to be canceled 302 * 303 * Return: Nothing. 304 */ 305 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 306 { 307 /* 308 * Wait on the fwevt to complete. If this returns 1, then 309 * the event was never executed. 310 * 311 * If it did execute, we wait for it to finish, and the put will 312 * happen from mpi3mr_process_fwevt() 313 */ 314 if (cancel_work_sync(&fwevt->work)) { 315 /* 316 * Put fwevt reference count after 317 * dequeuing it from worker queue 318 */ 319 mpi3mr_fwevt_put(fwevt); 320 /* 321 * Put fwevt reference count to neutralize 322 * kref_init increment 323 */ 324 mpi3mr_fwevt_put(fwevt); 325 } 326 } 327 328 /** 329 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 330 * @mrioc: Adapter instance reference 331 * 332 * Flush all pending firmware events from the firmware event 333 * list. 334 * 335 * Return: Nothing. 336 */ 337 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 338 { 339 struct mpi3mr_fwevt *fwevt = NULL; 340 341 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 342 !mrioc->fwevt_worker_thread) 343 return; 344 345 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 346 mpi3mr_cancel_work(fwevt); 347 348 if (mrioc->current_event) { 349 fwevt = mrioc->current_event; 350 /* 351 * Don't call cancel_work_sync() API for the 352 * fwevt work if the controller reset is 353 * get called as part of processing the 354 * same fwevt work (or) when worker thread is 355 * waiting for device add/remove APIs to complete. 356 * Otherwise we will see deadlock. 357 */ 358 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 359 fwevt->discard = 1; 360 return; 361 } 362 363 mpi3mr_cancel_work(fwevt); 364 } 365 } 366 367 /** 368 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 369 * @mrioc: Adapter instance reference 370 * @tg: Throttle group information pointer 371 * 372 * Accessor to queue on synthetically generated driver event to 373 * the event worker thread, the driver event will be used to 374 * reduce the QD of all VDs in the TG from the worker thread. 375 * 376 * Return: None. 377 */ 378 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 379 struct mpi3mr_throttle_group_info *tg) 380 { 381 struct mpi3mr_fwevt *fwevt; 382 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 383 384 /* 385 * If the QD reduction event is already queued due to throttle and if 386 * the QD is not restored through device info change event 387 * then dont queue further reduction events 388 */ 389 if (tg->fw_qd != tg->modified_qd) 390 return; 391 392 fwevt = mpi3mr_alloc_fwevt(sz); 393 if (!fwevt) { 394 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 395 return; 396 } 397 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 398 fwevt->mrioc = mrioc; 399 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 400 fwevt->send_ack = 0; 401 fwevt->process_evt = 1; 402 fwevt->evt_ctx = 0; 403 fwevt->event_data_size = sz; 404 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 405 406 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 407 tg->id); 408 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 409 } 410 411 /** 412 * mpi3mr_invalidate_devhandles -Invalidate device handles 413 * @mrioc: Adapter instance reference 414 * 415 * Invalidate the device handles in the target device structures 416 * . Called post reset prior to reinitializing the controller. 417 * 418 * Return: Nothing. 419 */ 420 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 421 { 422 struct mpi3mr_tgt_dev *tgtdev; 423 struct mpi3mr_stgt_priv_data *tgt_priv; 424 425 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 426 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 427 if (tgtdev->starget && tgtdev->starget->hostdata) { 428 tgt_priv = tgtdev->starget->hostdata; 429 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 430 tgt_priv->io_throttle_enabled = 0; 431 tgt_priv->io_divert = 0; 432 tgt_priv->throttle_group = NULL; 433 tgt_priv->wslen = 0; 434 if (tgtdev->host_exposed) 435 atomic_set(&tgt_priv->block_io, 1); 436 } 437 } 438 } 439 440 /** 441 * mpi3mr_print_scmd - print individual SCSI command 442 * @rq: Block request 443 * @data: Adapter instance reference 444 * 445 * Print the SCSI command details if it is in LLD scope. 446 * 447 * Return: true always. 448 */ 449 static bool mpi3mr_print_scmd(struct request *rq, void *data) 450 { 451 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 452 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 453 struct scmd_priv *priv = NULL; 454 455 if (scmd) { 456 priv = scsi_cmd_priv(scmd); 457 if (!priv->in_lld_scope) 458 goto out; 459 460 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 461 __func__, priv->host_tag, priv->req_q_idx + 1); 462 scsi_print_command(scmd); 463 } 464 465 out: 466 return(true); 467 } 468 469 /** 470 * mpi3mr_flush_scmd - Flush individual SCSI command 471 * @rq: Block request 472 * @data: Adapter instance reference 473 * 474 * Return the SCSI command to the upper layers if it is in LLD 475 * scope. 476 * 477 * Return: true always. 478 */ 479 480 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 481 { 482 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 483 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 484 struct scmd_priv *priv = NULL; 485 486 if (scmd) { 487 priv = scsi_cmd_priv(scmd); 488 if (!priv->in_lld_scope) 489 goto out; 490 491 if (priv->meta_sg_valid) 492 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 493 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 494 mpi3mr_clear_scmd_priv(mrioc, scmd); 495 scsi_dma_unmap(scmd); 496 scmd->result = DID_RESET << 16; 497 scsi_print_command(scmd); 498 scsi_done(scmd); 499 mrioc->flush_io_count++; 500 } 501 502 out: 503 return(true); 504 } 505 506 /** 507 * mpi3mr_count_dev_pending - Count commands pending for a lun 508 * @rq: Block request 509 * @data: SCSI device reference 510 * 511 * This is an iterator function called for each SCSI command in 512 * a host and if the command is pending in the LLD for the 513 * specific device(lun) then device specific pending I/O counter 514 * is updated in the device structure. 515 * 516 * Return: true always. 517 */ 518 519 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 520 { 521 struct scsi_device *sdev = (struct scsi_device *)data; 522 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 523 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 524 struct scmd_priv *priv; 525 526 if (scmd) { 527 priv = scsi_cmd_priv(scmd); 528 if (!priv->in_lld_scope) 529 goto out; 530 if (scmd->device == sdev) 531 sdev_priv_data->pend_count++; 532 } 533 534 out: 535 return true; 536 } 537 538 /** 539 * mpi3mr_count_tgt_pending - Count commands pending for target 540 * @rq: Block request 541 * @data: SCSI target reference 542 * 543 * This is an iterator function called for each SCSI command in 544 * a host and if the command is pending in the LLD for the 545 * specific target then target specific pending I/O counter is 546 * updated in the target structure. 547 * 548 * Return: true always. 549 */ 550 551 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 552 { 553 struct scsi_target *starget = (struct scsi_target *)data; 554 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 555 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 556 struct scmd_priv *priv; 557 558 if (scmd) { 559 priv = scsi_cmd_priv(scmd); 560 if (!priv->in_lld_scope) 561 goto out; 562 if (scmd->device && (scsi_target(scmd->device) == starget)) 563 stgt_priv_data->pend_count++; 564 } 565 566 out: 567 return true; 568 } 569 570 /** 571 * mpi3mr_flush_host_io - Flush host I/Os 572 * @mrioc: Adapter instance reference 573 * 574 * Flush all of the pending I/Os by calling 575 * blk_mq_tagset_busy_iter() for each possible tag. This is 576 * executed post controller reset 577 * 578 * Return: Nothing. 579 */ 580 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 581 { 582 struct Scsi_Host *shost = mrioc->shost; 583 584 mrioc->flush_io_count = 0; 585 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 586 blk_mq_tagset_busy_iter(&shost->tag_set, 587 mpi3mr_flush_scmd, (void *)mrioc); 588 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 589 mrioc->flush_io_count); 590 } 591 592 /** 593 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 594 * @mrioc: Adapter instance reference 595 * 596 * This function waits for currently running IO poll threads to 597 * exit and then flushes all host I/Os and any internal pending 598 * cmds. This is executed after controller is marked as 599 * unrecoverable. 600 * 601 * Return: Nothing. 602 */ 603 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 604 { 605 struct Scsi_Host *shost = mrioc->shost; 606 int i; 607 608 if (!mrioc->unrecoverable) 609 return; 610 611 if (mrioc->op_reply_qinfo) { 612 for (i = 0; i < mrioc->num_queues; i++) { 613 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 614 udelay(500); 615 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 616 } 617 } 618 mrioc->flush_io_count = 0; 619 blk_mq_tagset_busy_iter(&shost->tag_set, 620 mpi3mr_flush_scmd, (void *)mrioc); 621 mpi3mr_flush_delayed_cmd_lists(mrioc); 622 mpi3mr_flush_drv_cmds(mrioc); 623 } 624 625 /** 626 * mpi3mr_alloc_tgtdev - target device allocator 627 * 628 * Allocate target device instance and initialize the reference 629 * count 630 * 631 * Return: target device instance. 632 */ 633 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 634 { 635 struct mpi3mr_tgt_dev *tgtdev; 636 637 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 638 if (!tgtdev) 639 return NULL; 640 kref_init(&tgtdev->ref_count); 641 return tgtdev; 642 } 643 644 /** 645 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 646 * @mrioc: Adapter instance reference 647 * @tgtdev: Target device 648 * 649 * Add the target device to the target device list 650 * 651 * Return: Nothing. 652 */ 653 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 654 struct mpi3mr_tgt_dev *tgtdev) 655 { 656 unsigned long flags; 657 658 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 659 mpi3mr_tgtdev_get(tgtdev); 660 INIT_LIST_HEAD(&tgtdev->list); 661 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 662 tgtdev->state = MPI3MR_DEV_CREATED; 663 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 664 } 665 666 /** 667 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 668 * @mrioc: Adapter instance reference 669 * @tgtdev: Target device 670 * @must_delete: Must delete the target device from the list irrespective 671 * of the device state. 672 * 673 * Remove the target device from the target device list 674 * 675 * Return: Nothing. 676 */ 677 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 678 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 679 { 680 unsigned long flags; 681 682 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 683 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 684 if (!list_empty(&tgtdev->list)) { 685 list_del_init(&tgtdev->list); 686 tgtdev->state = MPI3MR_DEV_DELETED; 687 mpi3mr_tgtdev_put(tgtdev); 688 } 689 } 690 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 691 } 692 693 /** 694 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 695 * @mrioc: Adapter instance reference 696 * @handle: Device handle 697 * 698 * Accessor to retrieve target device from the device handle. 699 * Non Lock version 700 * 701 * Return: Target device reference. 702 */ 703 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 704 struct mpi3mr_ioc *mrioc, u16 handle) 705 { 706 struct mpi3mr_tgt_dev *tgtdev; 707 708 assert_spin_locked(&mrioc->tgtdev_lock); 709 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 710 if (tgtdev->dev_handle == handle) 711 goto found_tgtdev; 712 return NULL; 713 714 found_tgtdev: 715 mpi3mr_tgtdev_get(tgtdev); 716 return tgtdev; 717 } 718 719 /** 720 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 721 * @mrioc: Adapter instance reference 722 * @handle: Device handle 723 * 724 * Accessor to retrieve target device from the device handle. 725 * Lock version 726 * 727 * Return: Target device reference. 728 */ 729 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 730 struct mpi3mr_ioc *mrioc, u16 handle) 731 { 732 struct mpi3mr_tgt_dev *tgtdev; 733 unsigned long flags; 734 735 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 736 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 737 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 738 return tgtdev; 739 } 740 741 /** 742 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 743 * @mrioc: Adapter instance reference 744 * @persist_id: Persistent ID 745 * 746 * Accessor to retrieve target device from the Persistent ID. 747 * Non Lock version 748 * 749 * Return: Target device reference. 750 */ 751 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 752 struct mpi3mr_ioc *mrioc, u16 persist_id) 753 { 754 struct mpi3mr_tgt_dev *tgtdev; 755 756 assert_spin_locked(&mrioc->tgtdev_lock); 757 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 758 if (tgtdev->perst_id == persist_id) 759 goto found_tgtdev; 760 return NULL; 761 762 found_tgtdev: 763 mpi3mr_tgtdev_get(tgtdev); 764 return tgtdev; 765 } 766 767 /** 768 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 769 * @mrioc: Adapter instance reference 770 * @persist_id: Persistent ID 771 * 772 * Accessor to retrieve target device from the Persistent ID. 773 * Lock version 774 * 775 * Return: Target device reference. 776 */ 777 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 778 struct mpi3mr_ioc *mrioc, u16 persist_id) 779 { 780 struct mpi3mr_tgt_dev *tgtdev; 781 unsigned long flags; 782 783 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 784 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 785 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 786 return tgtdev; 787 } 788 789 /** 790 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 791 * @mrioc: Adapter instance reference 792 * @tgt_priv: Target private data 793 * 794 * Accessor to return target device from the target private 795 * data. Non Lock version 796 * 797 * Return: Target device reference. 798 */ 799 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 800 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 801 { 802 struct mpi3mr_tgt_dev *tgtdev; 803 804 assert_spin_locked(&mrioc->tgtdev_lock); 805 tgtdev = tgt_priv->tgt_dev; 806 if (tgtdev) 807 mpi3mr_tgtdev_get(tgtdev); 808 return tgtdev; 809 } 810 811 /** 812 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 813 * @mrioc: Adapter instance reference 814 * @tg: Throttle group information pointer 815 * @divert_value: 1 or 0 816 * 817 * Accessor to set io_divert flag for each device associated 818 * with the given throttle group with the given value. 819 * 820 * Return: None. 821 */ 822 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 823 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 824 { 825 unsigned long flags; 826 struct mpi3mr_tgt_dev *tgtdev; 827 struct mpi3mr_stgt_priv_data *tgt_priv; 828 829 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 830 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 831 if (tgtdev->starget && tgtdev->starget->hostdata) { 832 tgt_priv = tgtdev->starget->hostdata; 833 if (tgt_priv->throttle_group == tg) 834 tgt_priv->io_divert = divert_value; 835 } 836 } 837 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 838 } 839 840 /** 841 * mpi3mr_print_device_event_notice - print notice related to post processing of 842 * device event after controller reset. 843 * 844 * @mrioc: Adapter instance reference 845 * @device_add: true for device add event and false for device removal event 846 * 847 * Return: None. 848 */ 849 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 850 bool device_add) 851 { 852 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 853 (device_add ? "addition" : "removal")); 854 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 855 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 856 } 857 858 /** 859 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 860 * @mrioc: Adapter instance reference 861 * @tgtdev: Target device structure 862 * 863 * Checks whether the device is exposed to upper layers and if it 864 * is then remove the device from upper layers by calling 865 * scsi_remove_target(). 866 * 867 * Return: 0 on success, non zero on failure. 868 */ 869 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 870 struct mpi3mr_tgt_dev *tgtdev) 871 { 872 struct mpi3mr_stgt_priv_data *tgt_priv; 873 874 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 875 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 876 if (tgtdev->starget && tgtdev->starget->hostdata) { 877 tgt_priv = tgtdev->starget->hostdata; 878 atomic_set(&tgt_priv->block_io, 0); 879 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 880 } 881 882 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 883 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 884 if (tgtdev->starget) { 885 if (mrioc->current_event) 886 mrioc->current_event->pending_at_sml = 1; 887 scsi_remove_target(&tgtdev->starget->dev); 888 tgtdev->host_exposed = 0; 889 if (mrioc->current_event) { 890 mrioc->current_event->pending_at_sml = 0; 891 if (mrioc->current_event->discard) { 892 mpi3mr_print_device_event_notice(mrioc, 893 false); 894 return; 895 } 896 } 897 } 898 } else 899 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 900 901 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 902 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 903 } 904 905 /** 906 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 907 * @mrioc: Adapter instance reference 908 * @perst_id: Persistent ID of the device 909 * 910 * Checks whether the device can be exposed to upper layers and 911 * if it is not then expose the device to upper layers by 912 * calling scsi_scan_target(). 913 * 914 * Return: 0 on success, non zero on failure. 915 */ 916 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 917 u16 perst_id) 918 { 919 int retval = 0; 920 struct mpi3mr_tgt_dev *tgtdev; 921 922 if (mrioc->reset_in_progress) 923 return -1; 924 925 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 926 if (!tgtdev) { 927 retval = -1; 928 goto out; 929 } 930 if (tgtdev->is_hidden || tgtdev->host_exposed) { 931 retval = -1; 932 goto out; 933 } 934 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 935 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 936 tgtdev->host_exposed = 1; 937 if (mrioc->current_event) 938 mrioc->current_event->pending_at_sml = 1; 939 scsi_scan_target(&mrioc->shost->shost_gendev, 940 mrioc->scsi_device_channel, tgtdev->perst_id, 941 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 942 if (!tgtdev->starget) 943 tgtdev->host_exposed = 0; 944 if (mrioc->current_event) { 945 mrioc->current_event->pending_at_sml = 0; 946 if (mrioc->current_event->discard) { 947 mpi3mr_print_device_event_notice(mrioc, true); 948 goto out; 949 } 950 } 951 } else 952 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 953 out: 954 if (tgtdev) 955 mpi3mr_tgtdev_put(tgtdev); 956 957 return retval; 958 } 959 960 /** 961 * mpi3mr_change_queue_depth- Change QD callback handler 962 * @sdev: SCSI device reference 963 * @q_depth: Queue depth 964 * 965 * Validate and limit QD and call scsi_change_queue_depth. 966 * 967 * Return: return value of scsi_change_queue_depth 968 */ 969 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 970 int q_depth) 971 { 972 struct scsi_target *starget = scsi_target(sdev); 973 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 974 int retval = 0; 975 976 if (!sdev->tagged_supported) 977 q_depth = 1; 978 if (q_depth > shost->can_queue) 979 q_depth = shost->can_queue; 980 else if (!q_depth) 981 q_depth = MPI3MR_DEFAULT_SDEV_QD; 982 retval = scsi_change_queue_depth(sdev, q_depth); 983 sdev->max_queue_depth = sdev->queue_depth; 984 985 return retval; 986 } 987 988 /** 989 * mpi3mr_update_sdev - Update SCSI device information 990 * @sdev: SCSI device reference 991 * @data: target device reference 992 * 993 * This is an iterator function called for each SCSI device in a 994 * target to update the target specific information into each 995 * SCSI device. 996 * 997 * Return: Nothing. 998 */ 999 static void 1000 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1001 { 1002 struct mpi3mr_tgt_dev *tgtdev; 1003 1004 tgtdev = (struct mpi3mr_tgt_dev *)data; 1005 if (!tgtdev) 1006 return; 1007 1008 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1009 switch (tgtdev->dev_type) { 1010 case MPI3_DEVICE_DEVFORM_PCIE: 1011 /*The block layer hw sector size = 512*/ 1012 if ((tgtdev->dev_spec.pcie_inf.dev_info & 1013 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1014 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 1015 blk_queue_max_hw_sectors(sdev->request_queue, 1016 tgtdev->dev_spec.pcie_inf.mdts / 512); 1017 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) 1018 blk_queue_virt_boundary(sdev->request_queue, 1019 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 1020 else 1021 blk_queue_virt_boundary(sdev->request_queue, 1022 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); 1023 } 1024 break; 1025 default: 1026 break; 1027 } 1028 } 1029 1030 /** 1031 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure 1032 * @mrioc: Adapter instance reference 1033 * 1034 * This is executed post controller reset to identify any 1035 * missing devices during reset and remove from the upper layers 1036 * or expose any newly detected device to the upper layers. 1037 * 1038 * Return: Nothing. 1039 */ 1040 1041 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1042 { 1043 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1044 struct mpi3mr_stgt_priv_data *tgt_priv; 1045 1046 dprint_reset(mrioc, "refresh target devices: check for removals\n"); 1047 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1048 list) { 1049 if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) && 1050 tgtdev->host_exposed && tgtdev->starget && 1051 tgtdev->starget->hostdata) { 1052 tgt_priv = tgtdev->starget->hostdata; 1053 tgt_priv->dev_removed = 1; 1054 atomic_set(&tgt_priv->block_io, 0); 1055 } 1056 } 1057 1058 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1059 list) { 1060 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1061 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1062 tgtdev->perst_id); 1063 if (tgtdev->host_exposed) 1064 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1065 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1066 mpi3mr_tgtdev_put(tgtdev); 1067 } 1068 } 1069 1070 tgtdev = NULL; 1071 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1072 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1073 !tgtdev->is_hidden) { 1074 if (!tgtdev->host_exposed) 1075 mpi3mr_report_tgtdev_to_host(mrioc, 1076 tgtdev->perst_id); 1077 else if (tgtdev->starget) 1078 starget_for_each_device(tgtdev->starget, 1079 (void *)tgtdev, mpi3mr_update_sdev); 1080 } 1081 } 1082 } 1083 1084 /** 1085 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1086 * @mrioc: Adapter instance reference 1087 * @tgtdev: Target device internal structure 1088 * @dev_pg0: New device page0 1089 * @is_added: Flag to indicate the device is just added 1090 * 1091 * Update the information from the device page0 into the driver 1092 * cached target device structure. 1093 * 1094 * Return: Nothing. 1095 */ 1096 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1097 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1098 bool is_added) 1099 { 1100 u16 flags = 0; 1101 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1102 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1103 u8 prot_mask = 0; 1104 1105 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1106 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1107 tgtdev->dev_type = dev_pg0->device_form; 1108 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1109 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1110 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1111 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1112 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1113 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1114 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1115 1116 if (tgtdev->encl_handle) 1117 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1118 tgtdev->encl_handle); 1119 if (enclosure_dev) 1120 tgtdev->enclosure_logical_id = le64_to_cpu( 1121 enclosure_dev->pg0.enclosure_logical_id); 1122 1123 flags = tgtdev->devpg0_flag; 1124 1125 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1126 1127 if (is_added == true) 1128 tgtdev->io_throttle_enabled = 1129 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1130 1131 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1132 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1133 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1134 break; 1135 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1136 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1137 break; 1138 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1139 default: 1140 tgtdev->wslen = 0; 1141 break; 1142 } 1143 1144 if (tgtdev->starget && tgtdev->starget->hostdata) { 1145 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1146 tgtdev->starget->hostdata; 1147 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1148 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1149 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1150 scsi_tgt_priv_data->io_throttle_enabled = 1151 tgtdev->io_throttle_enabled; 1152 if (is_added == true) 1153 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1154 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1155 } 1156 1157 switch (dev_pg0->access_status) { 1158 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1159 case MPI3_DEVICE0_ASTATUS_PREPARE: 1160 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1161 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1162 break; 1163 default: 1164 tgtdev->is_hidden = 1; 1165 break; 1166 } 1167 1168 switch (tgtdev->dev_type) { 1169 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1170 { 1171 struct mpi3_device0_sas_sata_format *sasinf = 1172 &dev_pg0->device_specific.sas_sata_format; 1173 u16 dev_info = le16_to_cpu(sasinf->device_info); 1174 1175 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1176 tgtdev->dev_spec.sas_sata_inf.sas_address = 1177 le64_to_cpu(sasinf->sas_address); 1178 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1179 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1180 sasinf->attached_phy_identifier; 1181 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1182 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1183 tgtdev->is_hidden = 1; 1184 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1185 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1186 tgtdev->is_hidden = 1; 1187 1188 if (((tgtdev->devpg0_flag & 1189 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1190 && (tgtdev->devpg0_flag & 1191 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1192 (tgtdev->parent_handle == 0xFFFF)) 1193 tgtdev->non_stl = 1; 1194 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1195 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1196 dev_pg0->io_unit_port; 1197 break; 1198 } 1199 case MPI3_DEVICE_DEVFORM_PCIE: 1200 { 1201 struct mpi3_device0_pcie_format *pcieinf = 1202 &dev_pg0->device_specific.pcie_format; 1203 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1204 1205 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1206 tgtdev->dev_spec.pcie_inf.capb = 1207 le32_to_cpu(pcieinf->capabilities); 1208 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1209 /* 2^12 = 4096 */ 1210 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1211 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1212 tgtdev->dev_spec.pcie_inf.mdts = 1213 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1214 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1215 tgtdev->dev_spec.pcie_inf.reset_to = 1216 max_t(u8, pcieinf->controller_reset_to, 1217 MPI3MR_INTADMCMD_TIMEOUT); 1218 tgtdev->dev_spec.pcie_inf.abort_to = 1219 max_t(u8, pcieinf->nvme_abort_to, 1220 MPI3MR_INTADMCMD_TIMEOUT); 1221 } 1222 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1223 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1224 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1225 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1226 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1227 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1228 tgtdev->is_hidden = 1; 1229 tgtdev->non_stl = 1; 1230 if (!mrioc->shost) 1231 break; 1232 prot_mask = scsi_host_get_prot(mrioc->shost); 1233 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1234 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1235 ioc_info(mrioc, 1236 "%s : Disabling DIX0 prot capability\n", __func__); 1237 ioc_info(mrioc, 1238 "because HBA does not support DIX0 operation on NVME drives\n"); 1239 } 1240 break; 1241 } 1242 case MPI3_DEVICE_DEVFORM_VD: 1243 { 1244 struct mpi3_device0_vd_format *vdinf = 1245 &dev_pg0->device_specific.vd_format; 1246 struct mpi3mr_throttle_group_info *tg = NULL; 1247 u16 vdinf_io_throttle_group = 1248 le16_to_cpu(vdinf->io_throttle_group); 1249 1250 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1251 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1252 tgtdev->is_hidden = 1; 1253 tgtdev->non_stl = 1; 1254 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1255 tgtdev->dev_spec.vd_inf.tg_high = 1256 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1257 tgtdev->dev_spec.vd_inf.tg_low = 1258 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1259 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1260 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1261 tg->id = vdinf_io_throttle_group; 1262 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1263 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1264 tg->qd_reduction = 1265 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1266 if (is_added == true) 1267 tg->fw_qd = tgtdev->q_depth; 1268 tg->modified_qd = tgtdev->q_depth; 1269 } 1270 tgtdev->dev_spec.vd_inf.tg = tg; 1271 if (scsi_tgt_priv_data) 1272 scsi_tgt_priv_data->throttle_group = tg; 1273 break; 1274 } 1275 default: 1276 break; 1277 } 1278 } 1279 1280 /** 1281 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1282 * @mrioc: Adapter instance reference 1283 * @fwevt: Firmware event information. 1284 * 1285 * Process Device status Change event and based on device's new 1286 * information, either expose the device to the upper layers, or 1287 * remove the device from upper layers. 1288 * 1289 * Return: Nothing. 1290 */ 1291 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1292 struct mpi3mr_fwevt *fwevt) 1293 { 1294 u16 dev_handle = 0; 1295 u8 uhide = 0, delete = 0, cleanup = 0; 1296 struct mpi3mr_tgt_dev *tgtdev = NULL; 1297 struct mpi3_event_data_device_status_change *evtdata = 1298 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1299 1300 dev_handle = le16_to_cpu(evtdata->dev_handle); 1301 ioc_info(mrioc, 1302 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1303 __func__, dev_handle, evtdata->reason_code); 1304 switch (evtdata->reason_code) { 1305 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1306 delete = 1; 1307 break; 1308 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1309 uhide = 1; 1310 break; 1311 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1312 delete = 1; 1313 cleanup = 1; 1314 break; 1315 default: 1316 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1317 evtdata->reason_code); 1318 break; 1319 } 1320 1321 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1322 if (!tgtdev) 1323 goto out; 1324 if (uhide) { 1325 tgtdev->is_hidden = 0; 1326 if (!tgtdev->host_exposed) 1327 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1328 } 1329 1330 if (delete) 1331 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1332 1333 if (cleanup) { 1334 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1335 mpi3mr_tgtdev_put(tgtdev); 1336 } 1337 1338 out: 1339 if (tgtdev) 1340 mpi3mr_tgtdev_put(tgtdev); 1341 } 1342 1343 /** 1344 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1345 * @mrioc: Adapter instance reference 1346 * @dev_pg0: New device page0 1347 * 1348 * Process Device Info Change event and based on device's new 1349 * information, either expose the device to the upper layers, or 1350 * remove the device from upper layers or update the details of 1351 * the device. 1352 * 1353 * Return: Nothing. 1354 */ 1355 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1356 struct mpi3_device_page0 *dev_pg0) 1357 { 1358 struct mpi3mr_tgt_dev *tgtdev = NULL; 1359 u16 dev_handle = 0, perst_id = 0; 1360 1361 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1362 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1363 ioc_info(mrioc, 1364 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1365 __func__, dev_handle, perst_id); 1366 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1367 if (!tgtdev) 1368 goto out; 1369 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1370 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1371 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1372 if (tgtdev->is_hidden && tgtdev->host_exposed) 1373 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1374 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1375 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1376 mpi3mr_update_sdev); 1377 out: 1378 if (tgtdev) 1379 mpi3mr_tgtdev_put(tgtdev); 1380 } 1381 1382 /** 1383 * mpi3mr_free_enclosure_list - release enclosures 1384 * @mrioc: Adapter instance reference 1385 * 1386 * Free memory allocated during encloure add. 1387 * 1388 * Return nothing. 1389 */ 1390 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1391 { 1392 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1393 1394 list_for_each_entry_safe(enclosure_dev, 1395 enclosure_dev_next, &mrioc->enclosure_list, list) { 1396 list_del(&enclosure_dev->list); 1397 kfree(enclosure_dev); 1398 } 1399 } 1400 1401 /** 1402 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1403 * @mrioc: Adapter instance reference 1404 * @handle: Firmware device handle of the enclosure 1405 * 1406 * This searches for enclosure device based on handle, then returns the 1407 * enclosure object. 1408 * 1409 * Return: Enclosure object reference or NULL 1410 */ 1411 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1412 struct mpi3mr_ioc *mrioc, u16 handle) 1413 { 1414 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1415 1416 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1417 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1418 continue; 1419 r = enclosure_dev; 1420 goto out; 1421 } 1422 out: 1423 return r; 1424 } 1425 1426 /** 1427 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1428 * @mrioc: Adapter instance reference 1429 * @encl_pg0: Enclosure page 0. 1430 * @is_added: Added event or not 1431 * 1432 * Return nothing. 1433 */ 1434 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1435 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1436 { 1437 char *reason_str = NULL; 1438 1439 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1440 return; 1441 1442 if (is_added) 1443 reason_str = "enclosure added"; 1444 else 1445 reason_str = "enclosure dev status changed"; 1446 1447 ioc_info(mrioc, 1448 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1449 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1450 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1451 ioc_info(mrioc, 1452 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1453 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1454 le16_to_cpu(encl_pg0->flags), 1455 ((le16_to_cpu(encl_pg0->flags) & 1456 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1457 } 1458 1459 /** 1460 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1461 * @mrioc: Adapter instance reference 1462 * @fwevt: Firmware event reference 1463 * 1464 * Prints information about the Enclosure device status or 1465 * Enclosure add events if logging is enabled and add or remove 1466 * the enclosure from the controller's internal list of 1467 * enclosures. 1468 * 1469 * Return: Nothing. 1470 */ 1471 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1472 struct mpi3mr_fwevt *fwevt) 1473 { 1474 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1475 struct mpi3_enclosure_page0 *encl_pg0; 1476 u16 encl_handle; 1477 u8 added, present; 1478 1479 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1480 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1481 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1482 1483 1484 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1485 present = ((le16_to_cpu(encl_pg0->flags) & 1486 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1487 1488 if (encl_handle) 1489 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1490 encl_handle); 1491 if (!enclosure_dev && present) { 1492 enclosure_dev = 1493 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1494 GFP_KERNEL); 1495 if (!enclosure_dev) 1496 return; 1497 list_add_tail(&enclosure_dev->list, 1498 &mrioc->enclosure_list); 1499 } 1500 if (enclosure_dev) { 1501 if (!present) { 1502 list_del(&enclosure_dev->list); 1503 kfree(enclosure_dev); 1504 } else 1505 memcpy(&enclosure_dev->pg0, encl_pg0, 1506 sizeof(enclosure_dev->pg0)); 1507 1508 } 1509 } 1510 1511 /** 1512 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1513 * @mrioc: Adapter instance reference 1514 * @event_data: SAS topology change list event data 1515 * 1516 * Prints information about the SAS topology change event. 1517 * 1518 * Return: Nothing. 1519 */ 1520 static void 1521 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1522 struct mpi3_event_data_sas_topology_change_list *event_data) 1523 { 1524 int i; 1525 u16 handle; 1526 u8 reason_code, phy_number; 1527 char *status_str = NULL; 1528 u8 link_rate, prev_link_rate; 1529 1530 switch (event_data->exp_status) { 1531 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1532 status_str = "remove"; 1533 break; 1534 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1535 status_str = "responding"; 1536 break; 1537 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1538 status_str = "remove delay"; 1539 break; 1540 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1541 status_str = "direct attached"; 1542 break; 1543 default: 1544 status_str = "unknown status"; 1545 break; 1546 } 1547 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1548 __func__, status_str); 1549 ioc_info(mrioc, 1550 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1551 __func__, le16_to_cpu(event_data->expander_dev_handle), 1552 event_data->io_unit_port, 1553 le16_to_cpu(event_data->enclosure_handle), 1554 event_data->start_phy_num, event_data->num_entries); 1555 for (i = 0; i < event_data->num_entries; i++) { 1556 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1557 if (!handle) 1558 continue; 1559 phy_number = event_data->start_phy_num + i; 1560 reason_code = event_data->phy_entry[i].status & 1561 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1562 switch (reason_code) { 1563 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1564 status_str = "target remove"; 1565 break; 1566 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1567 status_str = "delay target remove"; 1568 break; 1569 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1570 status_str = "link status change"; 1571 break; 1572 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1573 status_str = "link status no change"; 1574 break; 1575 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1576 status_str = "target responding"; 1577 break; 1578 default: 1579 status_str = "unknown"; 1580 break; 1581 } 1582 link_rate = event_data->phy_entry[i].link_rate >> 4; 1583 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1584 ioc_info(mrioc, 1585 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1586 __func__, phy_number, handle, status_str, link_rate, 1587 prev_link_rate); 1588 } 1589 } 1590 1591 /** 1592 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1593 * @mrioc: Adapter instance reference 1594 * @fwevt: Firmware event reference 1595 * 1596 * Prints information about the SAS topology change event and 1597 * for "not responding" event code, removes the device from the 1598 * upper layers. 1599 * 1600 * Return: Nothing. 1601 */ 1602 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1603 struct mpi3mr_fwevt *fwevt) 1604 { 1605 struct mpi3_event_data_sas_topology_change_list *event_data = 1606 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1607 int i; 1608 u16 handle; 1609 u8 reason_code; 1610 u64 exp_sas_address = 0, parent_sas_address = 0; 1611 struct mpi3mr_hba_port *hba_port = NULL; 1612 struct mpi3mr_tgt_dev *tgtdev = NULL; 1613 struct mpi3mr_sas_node *sas_expander = NULL; 1614 unsigned long flags; 1615 u8 link_rate, prev_link_rate, parent_phy_number; 1616 1617 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1618 if (mrioc->sas_transport_enabled) { 1619 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1620 event_data->io_unit_port); 1621 if (le16_to_cpu(event_data->expander_dev_handle)) { 1622 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1623 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1624 le16_to_cpu(event_data->expander_dev_handle)); 1625 if (sas_expander) { 1626 exp_sas_address = sas_expander->sas_address; 1627 hba_port = sas_expander->hba_port; 1628 } 1629 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1630 parent_sas_address = exp_sas_address; 1631 } else 1632 parent_sas_address = mrioc->sas_hba.sas_address; 1633 } 1634 1635 for (i = 0; i < event_data->num_entries; i++) { 1636 if (fwevt->discard) 1637 return; 1638 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1639 if (!handle) 1640 continue; 1641 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1642 if (!tgtdev) 1643 continue; 1644 1645 reason_code = event_data->phy_entry[i].status & 1646 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1647 1648 switch (reason_code) { 1649 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1650 if (tgtdev->host_exposed) 1651 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1652 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1653 mpi3mr_tgtdev_put(tgtdev); 1654 break; 1655 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1656 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1657 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1658 { 1659 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1660 || tgtdev->is_hidden) 1661 break; 1662 link_rate = event_data->phy_entry[i].link_rate >> 4; 1663 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1664 if (link_rate == prev_link_rate) 1665 break; 1666 if (!parent_sas_address) 1667 break; 1668 parent_phy_number = event_data->start_phy_num + i; 1669 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1670 parent_phy_number, link_rate, hba_port); 1671 break; 1672 } 1673 default: 1674 break; 1675 } 1676 if (tgtdev) 1677 mpi3mr_tgtdev_put(tgtdev); 1678 } 1679 1680 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1681 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1682 if (sas_expander) 1683 mpi3mr_expander_remove(mrioc, exp_sas_address, 1684 hba_port); 1685 } 1686 } 1687 1688 /** 1689 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1690 * @mrioc: Adapter instance reference 1691 * @event_data: PCIe topology change list event data 1692 * 1693 * Prints information about the PCIe topology change event. 1694 * 1695 * Return: Nothing. 1696 */ 1697 static void 1698 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1699 struct mpi3_event_data_pcie_topology_change_list *event_data) 1700 { 1701 int i; 1702 u16 handle; 1703 u16 reason_code; 1704 u8 port_number; 1705 char *status_str = NULL; 1706 u8 link_rate, prev_link_rate; 1707 1708 switch (event_data->switch_status) { 1709 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1710 status_str = "remove"; 1711 break; 1712 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1713 status_str = "responding"; 1714 break; 1715 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1716 status_str = "remove delay"; 1717 break; 1718 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1719 status_str = "direct attached"; 1720 break; 1721 default: 1722 status_str = "unknown status"; 1723 break; 1724 } 1725 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1726 __func__, status_str); 1727 ioc_info(mrioc, 1728 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1729 __func__, le16_to_cpu(event_data->switch_dev_handle), 1730 le16_to_cpu(event_data->enclosure_handle), 1731 event_data->start_port_num, event_data->num_entries); 1732 for (i = 0; i < event_data->num_entries; i++) { 1733 handle = 1734 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1735 if (!handle) 1736 continue; 1737 port_number = event_data->start_port_num + i; 1738 reason_code = event_data->port_entry[i].port_status; 1739 switch (reason_code) { 1740 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1741 status_str = "target remove"; 1742 break; 1743 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1744 status_str = "delay target remove"; 1745 break; 1746 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1747 status_str = "link status change"; 1748 break; 1749 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1750 status_str = "link status no change"; 1751 break; 1752 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1753 status_str = "target responding"; 1754 break; 1755 default: 1756 status_str = "unknown"; 1757 break; 1758 } 1759 link_rate = event_data->port_entry[i].current_port_info & 1760 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1761 prev_link_rate = event_data->port_entry[i].previous_port_info & 1762 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1763 ioc_info(mrioc, 1764 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1765 __func__, port_number, handle, status_str, link_rate, 1766 prev_link_rate); 1767 } 1768 } 1769 1770 /** 1771 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1772 * @mrioc: Adapter instance reference 1773 * @fwevt: Firmware event reference 1774 * 1775 * Prints information about the PCIe topology change event and 1776 * for "not responding" event code, removes the device from the 1777 * upper layers. 1778 * 1779 * Return: Nothing. 1780 */ 1781 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1782 struct mpi3mr_fwevt *fwevt) 1783 { 1784 struct mpi3_event_data_pcie_topology_change_list *event_data = 1785 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1786 int i; 1787 u16 handle; 1788 u8 reason_code; 1789 struct mpi3mr_tgt_dev *tgtdev = NULL; 1790 1791 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1792 1793 for (i = 0; i < event_data->num_entries; i++) { 1794 if (fwevt->discard) 1795 return; 1796 handle = 1797 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1798 if (!handle) 1799 continue; 1800 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1801 if (!tgtdev) 1802 continue; 1803 1804 reason_code = event_data->port_entry[i].port_status; 1805 1806 switch (reason_code) { 1807 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1808 if (tgtdev->host_exposed) 1809 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1810 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1811 mpi3mr_tgtdev_put(tgtdev); 1812 break; 1813 default: 1814 break; 1815 } 1816 if (tgtdev) 1817 mpi3mr_tgtdev_put(tgtdev); 1818 } 1819 } 1820 1821 /** 1822 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1823 * @mrioc: Adapter instance reference 1824 * @fwevt: Firmware event reference 1825 * 1826 * Extracts the event data and calls application interfacing 1827 * function to process the event further. 1828 * 1829 * Return: Nothing. 1830 */ 1831 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1832 struct mpi3mr_fwevt *fwevt) 1833 { 1834 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1835 fwevt->event_data_size); 1836 } 1837 1838 /** 1839 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1840 * @sdev: SCSI device reference 1841 * @data: Queue depth reference 1842 * 1843 * This is an iterator function called for each SCSI device in a 1844 * target to update the QD of each SCSI device. 1845 * 1846 * Return: Nothing. 1847 */ 1848 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1849 { 1850 u16 *q_depth = (u16 *)data; 1851 1852 scsi_change_queue_depth(sdev, (int)*q_depth); 1853 sdev->max_queue_depth = sdev->queue_depth; 1854 } 1855 1856 /** 1857 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1858 * @mrioc: Adapter instance reference 1859 * @tg: Throttle group information pointer 1860 * 1861 * Accessor to reduce QD for each device associated with the 1862 * given throttle group. 1863 * 1864 * Return: None. 1865 */ 1866 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1867 struct mpi3mr_throttle_group_info *tg) 1868 { 1869 unsigned long flags; 1870 struct mpi3mr_tgt_dev *tgtdev; 1871 struct mpi3mr_stgt_priv_data *tgt_priv; 1872 1873 1874 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1875 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1876 if (tgtdev->starget && tgtdev->starget->hostdata) { 1877 tgt_priv = tgtdev->starget->hostdata; 1878 if (tgt_priv->throttle_group == tg) { 1879 dprint_event_bh(mrioc, 1880 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 1881 tgt_priv->perst_id, tgtdev->q_depth, 1882 tg->modified_qd); 1883 starget_for_each_device(tgtdev->starget, 1884 (void *)&tg->modified_qd, 1885 mpi3mr_update_sdev_qd); 1886 } 1887 } 1888 } 1889 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1890 } 1891 1892 /** 1893 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1894 * @mrioc: Adapter instance reference 1895 * @fwevt: Firmware event reference 1896 * 1897 * Identifies the firmware event and calls corresponding bottomg 1898 * half handler and sends event acknowledgment if required. 1899 * 1900 * Return: Nothing. 1901 */ 1902 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 1903 struct mpi3mr_fwevt *fwevt) 1904 { 1905 struct mpi3_device_page0 *dev_pg0 = NULL; 1906 u16 perst_id, handle, dev_info; 1907 struct mpi3_device0_sas_sata_format *sasinf = NULL; 1908 1909 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 1910 mrioc->current_event = fwevt; 1911 1912 if (mrioc->stop_drv_processing) 1913 goto out; 1914 1915 if (mrioc->unrecoverable) { 1916 dprint_event_bh(mrioc, 1917 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 1918 fwevt->event_id); 1919 goto out; 1920 } 1921 1922 if (!fwevt->process_evt) 1923 goto evt_ack; 1924 1925 switch (fwevt->event_id) { 1926 case MPI3_EVENT_DEVICE_ADDED: 1927 { 1928 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1929 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1930 handle = le16_to_cpu(dev_pg0->dev_handle); 1931 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1932 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1933 else if (mrioc->sas_transport_enabled && 1934 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 1935 sasinf = &dev_pg0->device_specific.sas_sata_format; 1936 dev_info = le16_to_cpu(sasinf->device_info); 1937 if (!mrioc->sas_hba.num_phys) 1938 mpi3mr_sas_host_add(mrioc); 1939 else 1940 mpi3mr_sas_host_refresh(mrioc); 1941 1942 if (mpi3mr_is_expander_device(dev_info)) 1943 mpi3mr_expander_add(mrioc, handle); 1944 } 1945 break; 1946 } 1947 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1948 { 1949 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1950 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1951 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1952 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 1953 break; 1954 } 1955 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1956 { 1957 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 1958 break; 1959 } 1960 case MPI3_EVENT_ENCL_DEVICE_ADDED: 1961 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 1962 { 1963 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 1964 break; 1965 } 1966 1967 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1968 { 1969 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 1970 break; 1971 } 1972 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1973 { 1974 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 1975 break; 1976 } 1977 case MPI3_EVENT_LOG_DATA: 1978 { 1979 mpi3mr_logdata_evt_bh(mrioc, fwevt); 1980 break; 1981 } 1982 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 1983 { 1984 struct mpi3mr_throttle_group_info *tg; 1985 1986 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 1987 dprint_event_bh(mrioc, 1988 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 1989 tg->id, tg->need_qd_reduction); 1990 if (tg->need_qd_reduction) { 1991 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 1992 tg->need_qd_reduction = 0; 1993 } 1994 break; 1995 } 1996 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 1997 { 1998 while (mrioc->device_refresh_on) 1999 msleep(500); 2000 2001 dprint_event_bh(mrioc, 2002 "scan for non responding and newly added devices after soft reset started\n"); 2003 if (mrioc->sas_transport_enabled) { 2004 mpi3mr_refresh_sas_ports(mrioc); 2005 mpi3mr_refresh_expanders(mrioc); 2006 } 2007 mpi3mr_rfresh_tgtdevs(mrioc); 2008 ioc_info(mrioc, 2009 "scan for non responding and newly added devices after soft reset completed\n"); 2010 break; 2011 } 2012 default: 2013 break; 2014 } 2015 2016 evt_ack: 2017 if (fwevt->send_ack) 2018 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2019 fwevt->evt_ctx); 2020 out: 2021 /* Put fwevt reference count to neutralize kref_init increment */ 2022 mpi3mr_fwevt_put(fwevt); 2023 mrioc->current_event = NULL; 2024 } 2025 2026 /** 2027 * mpi3mr_fwevt_worker - Firmware event worker 2028 * @work: Work struct containing firmware event 2029 * 2030 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2031 * 2032 * Return: Nothing. 2033 */ 2034 static void mpi3mr_fwevt_worker(struct work_struct *work) 2035 { 2036 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2037 work); 2038 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2039 /* 2040 * Put fwevt reference count after 2041 * dequeuing it from worker queue 2042 */ 2043 mpi3mr_fwevt_put(fwevt); 2044 } 2045 2046 /** 2047 * mpi3mr_create_tgtdev - Create and add a target device 2048 * @mrioc: Adapter instance reference 2049 * @dev_pg0: Device Page 0 data 2050 * 2051 * If the device specified by the device page 0 data is not 2052 * present in the driver's internal list, allocate the memory 2053 * for the device, populate the data and add to the list, else 2054 * update the device data. The key is persistent ID. 2055 * 2056 * Return: 0 on success, -ENOMEM on memory allocation failure 2057 */ 2058 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2059 struct mpi3_device_page0 *dev_pg0) 2060 { 2061 int retval = 0; 2062 struct mpi3mr_tgt_dev *tgtdev = NULL; 2063 u16 perst_id = 0; 2064 unsigned long flags; 2065 2066 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2067 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2068 return retval; 2069 2070 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2071 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2072 if (tgtdev) 2073 tgtdev->state = MPI3MR_DEV_CREATED; 2074 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2075 2076 if (tgtdev) { 2077 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2078 mpi3mr_tgtdev_put(tgtdev); 2079 } else { 2080 tgtdev = mpi3mr_alloc_tgtdev(); 2081 if (!tgtdev) 2082 return -ENOMEM; 2083 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2084 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2085 } 2086 2087 return retval; 2088 } 2089 2090 /** 2091 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2092 * @mrioc: Adapter instance reference 2093 * 2094 * Flush pending commands in the delayed lists due to a 2095 * controller reset or driver removal as a cleanup. 2096 * 2097 * Return: Nothing 2098 */ 2099 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2100 { 2101 struct delayed_dev_rmhs_node *_rmhs_node; 2102 struct delayed_evt_ack_node *_evtack_node; 2103 2104 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2105 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2106 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2107 struct delayed_dev_rmhs_node, list); 2108 list_del(&_rmhs_node->list); 2109 kfree(_rmhs_node); 2110 } 2111 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2112 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2113 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2114 struct delayed_evt_ack_node, list); 2115 list_del(&_evtack_node->list); 2116 kfree(_evtack_node); 2117 } 2118 } 2119 2120 /** 2121 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2122 * @mrioc: Adapter instance reference 2123 * @drv_cmd: Internal command tracker 2124 * 2125 * Issues a target reset TM to the firmware from the device 2126 * removal TM pend list or retry the removal handshake sequence 2127 * based on the IOU control request IOC status. 2128 * 2129 * Return: Nothing 2130 */ 2131 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2132 struct mpi3mr_drv_cmd *drv_cmd) 2133 { 2134 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2135 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2136 2137 if (drv_cmd->state & MPI3MR_CMD_RESET) 2138 goto clear_drv_cmd; 2139 2140 ioc_info(mrioc, 2141 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2142 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2143 drv_cmd->ioc_loginfo); 2144 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2145 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2146 drv_cmd->retry_count++; 2147 ioc_info(mrioc, 2148 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2149 __func__, drv_cmd->dev_handle, 2150 drv_cmd->retry_count); 2151 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2152 drv_cmd, drv_cmd->iou_rc); 2153 return; 2154 } 2155 ioc_err(mrioc, 2156 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2157 __func__, drv_cmd->dev_handle); 2158 } else { 2159 ioc_info(mrioc, 2160 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2161 __func__, drv_cmd->dev_handle); 2162 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2163 } 2164 2165 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2166 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2167 struct delayed_dev_rmhs_node, list); 2168 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2169 drv_cmd->retry_count = 0; 2170 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2171 ioc_info(mrioc, 2172 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2173 __func__, drv_cmd->dev_handle); 2174 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2175 drv_cmd->iou_rc); 2176 list_del(&delayed_dev_rmhs->list); 2177 kfree(delayed_dev_rmhs); 2178 return; 2179 } 2180 2181 clear_drv_cmd: 2182 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2183 drv_cmd->callback = NULL; 2184 drv_cmd->retry_count = 0; 2185 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2186 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2187 } 2188 2189 /** 2190 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2191 * @mrioc: Adapter instance reference 2192 * @drv_cmd: Internal command tracker 2193 * 2194 * Issues a target reset TM to the firmware from the device 2195 * removal TM pend list or issue IO unit control request as 2196 * part of device removal or hidden acknowledgment handshake. 2197 * 2198 * Return: Nothing 2199 */ 2200 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2201 struct mpi3mr_drv_cmd *drv_cmd) 2202 { 2203 struct mpi3_iounit_control_request iou_ctrl; 2204 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2205 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2206 int retval; 2207 2208 if (drv_cmd->state & MPI3MR_CMD_RESET) 2209 goto clear_drv_cmd; 2210 2211 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2212 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2213 2214 if (tm_reply) 2215 pr_info(IOCNAME 2216 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2217 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2218 drv_cmd->ioc_loginfo, 2219 le32_to_cpu(tm_reply->termination_count)); 2220 2221 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2222 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2223 2224 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2225 2226 drv_cmd->state = MPI3MR_CMD_PENDING; 2227 drv_cmd->is_waiting = 0; 2228 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2229 iou_ctrl.operation = drv_cmd->iou_rc; 2230 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2231 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2232 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2233 2234 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2235 1); 2236 if (retval) { 2237 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2238 mrioc->name); 2239 goto clear_drv_cmd; 2240 } 2241 2242 return; 2243 clear_drv_cmd: 2244 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2245 drv_cmd->callback = NULL; 2246 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2247 drv_cmd->retry_count = 0; 2248 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2249 } 2250 2251 /** 2252 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2253 * @mrioc: Adapter instance reference 2254 * @handle: Device handle 2255 * @cmdparam: Internal command tracker 2256 * @iou_rc: IO unit reason code 2257 * 2258 * Issues a target reset TM to the firmware or add it to a pend 2259 * list as part of device removal or hidden acknowledgment 2260 * handshake. 2261 * 2262 * Return: Nothing 2263 */ 2264 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2265 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2266 { 2267 struct mpi3_scsi_task_mgmt_request tm_req; 2268 int retval = 0; 2269 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2270 u8 retrycount = 5; 2271 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2272 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2273 struct mpi3mr_tgt_dev *tgtdev = NULL; 2274 unsigned long flags; 2275 2276 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2277 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2278 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2279 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2280 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2281 2282 if (drv_cmd) 2283 goto issue_cmd; 2284 do { 2285 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2286 MPI3MR_NUM_DEVRMCMD); 2287 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2288 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2289 break; 2290 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2291 } 2292 } while (retrycount--); 2293 2294 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2295 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2296 GFP_ATOMIC); 2297 if (!delayed_dev_rmhs) 2298 return; 2299 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2300 delayed_dev_rmhs->handle = handle; 2301 delayed_dev_rmhs->iou_rc = iou_rc; 2302 list_add_tail(&delayed_dev_rmhs->list, 2303 &mrioc->delayed_rmhs_list); 2304 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2305 __func__, handle); 2306 return; 2307 } 2308 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2309 2310 issue_cmd: 2311 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2312 ioc_info(mrioc, 2313 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2314 __func__, handle, cmd_idx); 2315 2316 memset(&tm_req, 0, sizeof(tm_req)); 2317 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2318 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2319 goto out; 2320 } 2321 drv_cmd->state = MPI3MR_CMD_PENDING; 2322 drv_cmd->is_waiting = 0; 2323 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2324 drv_cmd->dev_handle = handle; 2325 drv_cmd->iou_rc = iou_rc; 2326 tm_req.dev_handle = cpu_to_le16(handle); 2327 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2328 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2329 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2330 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2331 2332 set_bit(handle, mrioc->removepend_bitmap); 2333 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2334 if (retval) { 2335 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2336 __func__); 2337 goto out_failed; 2338 } 2339 out: 2340 return; 2341 out_failed: 2342 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2343 drv_cmd->callback = NULL; 2344 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2345 drv_cmd->retry_count = 0; 2346 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2347 } 2348 2349 /** 2350 * mpi3mr_complete_evt_ack - event ack request completion 2351 * @mrioc: Adapter instance reference 2352 * @drv_cmd: Internal command tracker 2353 * 2354 * This is the completion handler for non blocking event 2355 * acknowledgment sent to the firmware and this will issue any 2356 * pending event acknowledgment request. 2357 * 2358 * Return: Nothing 2359 */ 2360 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2361 struct mpi3mr_drv_cmd *drv_cmd) 2362 { 2363 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2364 struct delayed_evt_ack_node *delayed_evtack = NULL; 2365 2366 if (drv_cmd->state & MPI3MR_CMD_RESET) 2367 goto clear_drv_cmd; 2368 2369 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2370 dprint_event_th(mrioc, 2371 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2372 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2373 drv_cmd->ioc_loginfo); 2374 } 2375 2376 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2377 delayed_evtack = 2378 list_entry(mrioc->delayed_evtack_cmds_list.next, 2379 struct delayed_evt_ack_node, list); 2380 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2381 delayed_evtack->event_ctx); 2382 list_del(&delayed_evtack->list); 2383 kfree(delayed_evtack); 2384 return; 2385 } 2386 clear_drv_cmd: 2387 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2388 drv_cmd->callback = NULL; 2389 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2390 } 2391 2392 /** 2393 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2394 * @mrioc: Adapter instance reference 2395 * @event: MPI3 event id 2396 * @cmdparam: Internal command tracker 2397 * @event_ctx: event context 2398 * 2399 * Issues event acknowledgment request to the firmware if there 2400 * is a free command to send the event ack else it to a pend 2401 * list so that it will be processed on a completion of a prior 2402 * event acknowledgment . 2403 * 2404 * Return: Nothing 2405 */ 2406 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2407 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2408 { 2409 struct mpi3_event_ack_request evtack_req; 2410 int retval = 0; 2411 u8 retrycount = 5; 2412 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2413 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2414 struct delayed_evt_ack_node *delayed_evtack = NULL; 2415 2416 if (drv_cmd) { 2417 dprint_event_th(mrioc, 2418 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2419 event, event_ctx); 2420 goto issue_cmd; 2421 } 2422 dprint_event_th(mrioc, 2423 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2424 event, event_ctx); 2425 do { 2426 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2427 MPI3MR_NUM_EVTACKCMD); 2428 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2429 if (!test_and_set_bit(cmd_idx, 2430 mrioc->evtack_cmds_bitmap)) 2431 break; 2432 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2433 } 2434 } while (retrycount--); 2435 2436 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2437 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2438 GFP_ATOMIC); 2439 if (!delayed_evtack) 2440 return; 2441 INIT_LIST_HEAD(&delayed_evtack->list); 2442 delayed_evtack->event = event; 2443 delayed_evtack->event_ctx = event_ctx; 2444 list_add_tail(&delayed_evtack->list, 2445 &mrioc->delayed_evtack_cmds_list); 2446 dprint_event_th(mrioc, 2447 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2448 event, event_ctx); 2449 return; 2450 } 2451 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2452 2453 issue_cmd: 2454 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2455 2456 memset(&evtack_req, 0, sizeof(evtack_req)); 2457 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2458 dprint_event_th(mrioc, 2459 "sending event ack failed due to command in use\n"); 2460 goto out; 2461 } 2462 drv_cmd->state = MPI3MR_CMD_PENDING; 2463 drv_cmd->is_waiting = 0; 2464 drv_cmd->callback = mpi3mr_complete_evt_ack; 2465 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2466 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2467 evtack_req.event = event; 2468 evtack_req.event_context = cpu_to_le32(event_ctx); 2469 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2470 sizeof(evtack_req), 1); 2471 if (retval) { 2472 dprint_event_th(mrioc, 2473 "posting event ack request is failed\n"); 2474 goto out_failed; 2475 } 2476 2477 dprint_event_th(mrioc, 2478 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2479 event, event_ctx); 2480 out: 2481 return; 2482 out_failed: 2483 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2484 drv_cmd->callback = NULL; 2485 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2486 } 2487 2488 /** 2489 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2490 * @mrioc: Adapter instance reference 2491 * @event_reply: event data 2492 * 2493 * Checks for the reason code and based on that either block I/O 2494 * to device, or unblock I/O to the device, or start the device 2495 * removal handshake with reason as remove with the firmware for 2496 * PCIe devices. 2497 * 2498 * Return: Nothing 2499 */ 2500 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2501 struct mpi3_event_notification_reply *event_reply) 2502 { 2503 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2504 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2505 int i; 2506 u16 handle; 2507 u8 reason_code; 2508 struct mpi3mr_tgt_dev *tgtdev = NULL; 2509 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2510 2511 for (i = 0; i < topo_evt->num_entries; i++) { 2512 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2513 if (!handle) 2514 continue; 2515 reason_code = topo_evt->port_entry[i].port_status; 2516 scsi_tgt_priv_data = NULL; 2517 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2518 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2519 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2520 tgtdev->starget->hostdata; 2521 switch (reason_code) { 2522 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2523 if (scsi_tgt_priv_data) { 2524 scsi_tgt_priv_data->dev_removed = 1; 2525 scsi_tgt_priv_data->dev_removedelay = 0; 2526 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2527 } 2528 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2529 MPI3_CTRL_OP_REMOVE_DEVICE); 2530 break; 2531 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2532 if (scsi_tgt_priv_data) { 2533 scsi_tgt_priv_data->dev_removedelay = 1; 2534 atomic_inc(&scsi_tgt_priv_data->block_io); 2535 } 2536 break; 2537 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2538 if (scsi_tgt_priv_data && 2539 scsi_tgt_priv_data->dev_removedelay) { 2540 scsi_tgt_priv_data->dev_removedelay = 0; 2541 atomic_dec_if_positive 2542 (&scsi_tgt_priv_data->block_io); 2543 } 2544 break; 2545 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2546 default: 2547 break; 2548 } 2549 if (tgtdev) 2550 mpi3mr_tgtdev_put(tgtdev); 2551 } 2552 } 2553 2554 /** 2555 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2556 * @mrioc: Adapter instance reference 2557 * @event_reply: event data 2558 * 2559 * Checks for the reason code and based on that either block I/O 2560 * to device, or unblock I/O to the device, or start the device 2561 * removal handshake with reason as remove with the firmware for 2562 * SAS/SATA devices. 2563 * 2564 * Return: Nothing 2565 */ 2566 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2567 struct mpi3_event_notification_reply *event_reply) 2568 { 2569 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2570 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2571 int i; 2572 u16 handle; 2573 u8 reason_code; 2574 struct mpi3mr_tgt_dev *tgtdev = NULL; 2575 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2576 2577 for (i = 0; i < topo_evt->num_entries; i++) { 2578 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2579 if (!handle) 2580 continue; 2581 reason_code = topo_evt->phy_entry[i].status & 2582 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2583 scsi_tgt_priv_data = NULL; 2584 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2585 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2586 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2587 tgtdev->starget->hostdata; 2588 switch (reason_code) { 2589 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2590 if (scsi_tgt_priv_data) { 2591 scsi_tgt_priv_data->dev_removed = 1; 2592 scsi_tgt_priv_data->dev_removedelay = 0; 2593 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2594 } 2595 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2596 MPI3_CTRL_OP_REMOVE_DEVICE); 2597 break; 2598 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2599 if (scsi_tgt_priv_data) { 2600 scsi_tgt_priv_data->dev_removedelay = 1; 2601 atomic_inc(&scsi_tgt_priv_data->block_io); 2602 } 2603 break; 2604 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2605 if (scsi_tgt_priv_data && 2606 scsi_tgt_priv_data->dev_removedelay) { 2607 scsi_tgt_priv_data->dev_removedelay = 0; 2608 atomic_dec_if_positive 2609 (&scsi_tgt_priv_data->block_io); 2610 } 2611 break; 2612 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2613 default: 2614 break; 2615 } 2616 if (tgtdev) 2617 mpi3mr_tgtdev_put(tgtdev); 2618 } 2619 } 2620 2621 /** 2622 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2623 * @mrioc: Adapter instance reference 2624 * @event_reply: event data 2625 * 2626 * Checks for the reason code and based on that either block I/O 2627 * to device, or unblock I/O to the device, or start the device 2628 * removal handshake with reason as remove/hide acknowledgment 2629 * with the firmware. 2630 * 2631 * Return: Nothing 2632 */ 2633 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2634 struct mpi3_event_notification_reply *event_reply) 2635 { 2636 u16 dev_handle = 0; 2637 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2638 struct mpi3mr_tgt_dev *tgtdev = NULL; 2639 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2640 struct mpi3_event_data_device_status_change *evtdata = 2641 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2642 2643 if (mrioc->stop_drv_processing) 2644 goto out; 2645 2646 dev_handle = le16_to_cpu(evtdata->dev_handle); 2647 2648 switch (evtdata->reason_code) { 2649 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2650 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2651 block = 1; 2652 break; 2653 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2654 delete = 1; 2655 hide = 1; 2656 break; 2657 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2658 delete = 1; 2659 remove = 1; 2660 break; 2661 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2662 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2663 ublock = 1; 2664 break; 2665 default: 2666 break; 2667 } 2668 2669 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2670 if (!tgtdev) 2671 goto out; 2672 if (hide) 2673 tgtdev->is_hidden = hide; 2674 if (tgtdev->starget && tgtdev->starget->hostdata) { 2675 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2676 tgtdev->starget->hostdata; 2677 if (block) 2678 atomic_inc(&scsi_tgt_priv_data->block_io); 2679 if (delete) 2680 scsi_tgt_priv_data->dev_removed = 1; 2681 if (ublock) 2682 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2683 } 2684 if (remove) 2685 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2686 MPI3_CTRL_OP_REMOVE_DEVICE); 2687 if (hide) 2688 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2689 MPI3_CTRL_OP_HIDDEN_ACK); 2690 2691 out: 2692 if (tgtdev) 2693 mpi3mr_tgtdev_put(tgtdev); 2694 } 2695 2696 /** 2697 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2698 * @mrioc: Adapter instance reference 2699 * @event_reply: event data 2700 * 2701 * Blocks and unblocks host level I/O based on the reason code 2702 * 2703 * Return: Nothing 2704 */ 2705 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2706 struct mpi3_event_notification_reply *event_reply) 2707 { 2708 struct mpi3_event_data_prepare_for_reset *evtdata = 2709 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2710 2711 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2712 dprint_event_th(mrioc, 2713 "prepare for reset event top half with rc=start\n"); 2714 if (mrioc->prepare_for_reset) 2715 return; 2716 mrioc->prepare_for_reset = 1; 2717 mrioc->prepare_for_reset_timeout_counter = 0; 2718 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2719 dprint_event_th(mrioc, 2720 "prepare for reset top half with rc=abort\n"); 2721 mrioc->prepare_for_reset = 0; 2722 mrioc->prepare_for_reset_timeout_counter = 0; 2723 } 2724 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2725 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2726 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2727 le32_to_cpu(event_reply->event_context)); 2728 } 2729 2730 /** 2731 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2732 * @mrioc: Adapter instance reference 2733 * @event_reply: event data 2734 * 2735 * Identifies the new shutdown timeout value and update. 2736 * 2737 * Return: Nothing 2738 */ 2739 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2740 struct mpi3_event_notification_reply *event_reply) 2741 { 2742 struct mpi3_event_data_energy_pack_change *evtdata = 2743 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2744 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2745 2746 if (shutdown_timeout <= 0) { 2747 ioc_warn(mrioc, 2748 "%s :Invalid Shutdown Timeout received = %d\n", 2749 __func__, shutdown_timeout); 2750 return; 2751 } 2752 2753 ioc_info(mrioc, 2754 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2755 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2756 mrioc->facts.shutdown_timeout = shutdown_timeout; 2757 } 2758 2759 /** 2760 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2761 * @mrioc: Adapter instance reference 2762 * @event_reply: event data 2763 * 2764 * Displays Cable manegemt event details. 2765 * 2766 * Return: Nothing 2767 */ 2768 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2769 struct mpi3_event_notification_reply *event_reply) 2770 { 2771 struct mpi3_event_data_cable_management *evtdata = 2772 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2773 2774 switch (evtdata->status) { 2775 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2776 { 2777 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2778 "Devices connected to this cable are not detected.\n" 2779 "This cable requires %d mW of power.\n", 2780 evtdata->receptacle_id, 2781 le32_to_cpu(evtdata->active_cable_power_requirement)); 2782 break; 2783 } 2784 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2785 { 2786 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2787 evtdata->receptacle_id); 2788 break; 2789 } 2790 default: 2791 break; 2792 } 2793 } 2794 2795 /** 2796 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2797 * @mrioc: Adapter instance reference 2798 * 2799 * Add driver specific event to make sure that the driver won't process the 2800 * events until all the devices are refreshed during soft reset. 2801 * 2802 * Return: Nothing 2803 */ 2804 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2805 { 2806 struct mpi3mr_fwevt *fwevt = NULL; 2807 2808 fwevt = mpi3mr_alloc_fwevt(0); 2809 if (!fwevt) { 2810 dprint_event_th(mrioc, 2811 "failed to schedule bottom half handler for event(0x%02x)\n", 2812 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2813 return; 2814 } 2815 fwevt->mrioc = mrioc; 2816 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2817 fwevt->send_ack = 0; 2818 fwevt->process_evt = 1; 2819 fwevt->evt_ctx = 0; 2820 fwevt->event_data_size = 0; 2821 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2822 } 2823 2824 /** 2825 * mpi3mr_os_handle_events - Firmware event handler 2826 * @mrioc: Adapter instance reference 2827 * @event_reply: event data 2828 * 2829 * Identify whteher the event has to handled and acknowledged 2830 * and either process the event in the tophalf and/or schedule a 2831 * bottom half through mpi3mr_fwevt_worker. 2832 * 2833 * Return: Nothing 2834 */ 2835 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2836 struct mpi3_event_notification_reply *event_reply) 2837 { 2838 u16 evt_type, sz; 2839 struct mpi3mr_fwevt *fwevt = NULL; 2840 bool ack_req = 0, process_evt_bh = 0; 2841 2842 if (mrioc->stop_drv_processing) 2843 return; 2844 2845 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2846 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2847 ack_req = 1; 2848 2849 evt_type = event_reply->event; 2850 2851 switch (evt_type) { 2852 case MPI3_EVENT_DEVICE_ADDED: 2853 { 2854 struct mpi3_device_page0 *dev_pg0 = 2855 (struct mpi3_device_page0 *)event_reply->event_data; 2856 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 2857 ioc_err(mrioc, 2858 "%s :Failed to add device in the device add event\n", 2859 __func__); 2860 else 2861 process_evt_bh = 1; 2862 break; 2863 } 2864 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2865 { 2866 process_evt_bh = 1; 2867 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 2868 break; 2869 } 2870 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2871 { 2872 process_evt_bh = 1; 2873 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 2874 break; 2875 } 2876 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2877 { 2878 process_evt_bh = 1; 2879 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 2880 break; 2881 } 2882 case MPI3_EVENT_PREPARE_FOR_RESET: 2883 { 2884 mpi3mr_preparereset_evt_th(mrioc, event_reply); 2885 ack_req = 0; 2886 break; 2887 } 2888 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2889 case MPI3_EVENT_LOG_DATA: 2890 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2891 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2892 { 2893 process_evt_bh = 1; 2894 break; 2895 } 2896 case MPI3_EVENT_ENERGY_PACK_CHANGE: 2897 { 2898 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 2899 break; 2900 } 2901 case MPI3_EVENT_CABLE_MGMT: 2902 { 2903 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 2904 break; 2905 } 2906 case MPI3_EVENT_SAS_DISCOVERY: 2907 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 2908 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 2909 case MPI3_EVENT_PCIE_ENUMERATION: 2910 break; 2911 default: 2912 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 2913 __func__, evt_type); 2914 break; 2915 } 2916 if (process_evt_bh || ack_req) { 2917 sz = event_reply->event_data_length * 4; 2918 fwevt = mpi3mr_alloc_fwevt(sz); 2919 if (!fwevt) { 2920 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 2921 __func__, __FILE__, __LINE__, __func__); 2922 return; 2923 } 2924 2925 memcpy(fwevt->event_data, event_reply->event_data, sz); 2926 fwevt->mrioc = mrioc; 2927 fwevt->event_id = evt_type; 2928 fwevt->send_ack = ack_req; 2929 fwevt->process_evt = process_evt_bh; 2930 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 2931 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2932 } 2933 } 2934 2935 /** 2936 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 2937 * @mrioc: Adapter instance reference 2938 * @scmd: SCSI command reference 2939 * @scsiio_req: MPI3 SCSI IO request 2940 * 2941 * Identifies the protection information flags from the SCSI 2942 * command and set appropriate flags in the MPI3 SCSI IO 2943 * request. 2944 * 2945 * Return: Nothing 2946 */ 2947 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 2948 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2949 { 2950 u16 eedp_flags = 0; 2951 unsigned char prot_op = scsi_get_prot_op(scmd); 2952 2953 switch (prot_op) { 2954 case SCSI_PROT_NORMAL: 2955 return; 2956 case SCSI_PROT_READ_STRIP: 2957 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2958 break; 2959 case SCSI_PROT_WRITE_INSERT: 2960 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2961 break; 2962 case SCSI_PROT_READ_INSERT: 2963 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2964 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2965 break; 2966 case SCSI_PROT_WRITE_STRIP: 2967 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2968 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2969 break; 2970 case SCSI_PROT_READ_PASS: 2971 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2972 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2973 break; 2974 case SCSI_PROT_WRITE_PASS: 2975 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 2976 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 2977 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 2978 0xffff; 2979 } else 2980 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2981 2982 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2983 break; 2984 default: 2985 return; 2986 } 2987 2988 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 2989 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 2990 2991 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 2992 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 2993 2994 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 2995 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 2996 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2997 scsiio_req->cdb.eedp32.primary_reference_tag = 2998 cpu_to_be32(scsi_prot_ref_tag(scmd)); 2999 } 3000 3001 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 3002 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3003 3004 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 3005 3006 switch (scsi_prot_interval(scmd)) { 3007 case 512: 3008 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 3009 break; 3010 case 520: 3011 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 3012 break; 3013 case 4080: 3014 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 3015 break; 3016 case 4088: 3017 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 3018 break; 3019 case 4096: 3020 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3021 break; 3022 case 4104: 3023 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3024 break; 3025 case 4160: 3026 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3027 break; 3028 default: 3029 break; 3030 } 3031 3032 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3033 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3034 } 3035 3036 /** 3037 * mpi3mr_build_sense_buffer - Map sense information 3038 * @desc: Sense type 3039 * @buf: Sense buffer to populate 3040 * @key: Sense key 3041 * @asc: Additional sense code 3042 * @ascq: Additional sense code qualifier 3043 * 3044 * Maps the given sense information into either descriptor or 3045 * fixed format sense data. 3046 * 3047 * Return: Nothing 3048 */ 3049 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3050 u8 asc, u8 ascq) 3051 { 3052 if (desc) { 3053 buf[0] = 0x72; /* descriptor, current */ 3054 buf[1] = key; 3055 buf[2] = asc; 3056 buf[3] = ascq; 3057 buf[7] = 0; 3058 } else { 3059 buf[0] = 0x70; /* fixed, current */ 3060 buf[2] = key; 3061 buf[7] = 0xa; 3062 buf[12] = asc; 3063 buf[13] = ascq; 3064 } 3065 } 3066 3067 /** 3068 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3069 * @scmd: SCSI command reference 3070 * @ioc_status: status of MPI3 request 3071 * 3072 * Maps the EEDP error status of the SCSI IO request to sense 3073 * data. 3074 * 3075 * Return: Nothing 3076 */ 3077 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3078 u16 ioc_status) 3079 { 3080 u8 ascq = 0; 3081 3082 switch (ioc_status) { 3083 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3084 ascq = 0x01; 3085 break; 3086 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3087 ascq = 0x02; 3088 break; 3089 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3090 ascq = 0x03; 3091 break; 3092 default: 3093 ascq = 0x00; 3094 break; 3095 } 3096 3097 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3098 0x10, ascq); 3099 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3100 } 3101 3102 /** 3103 * mpi3mr_process_op_reply_desc - reply descriptor handler 3104 * @mrioc: Adapter instance reference 3105 * @reply_desc: Operational reply descriptor 3106 * @reply_dma: place holder for reply DMA address 3107 * @qidx: Operational queue index 3108 * 3109 * Process the operational reply descriptor and identifies the 3110 * descriptor type. Based on the descriptor map the MPI3 request 3111 * status to a SCSI command status and calls scsi_done call 3112 * back. 3113 * 3114 * Return: Nothing 3115 */ 3116 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3117 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3118 { 3119 u16 reply_desc_type, host_tag = 0; 3120 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3121 u32 ioc_loginfo = 0; 3122 struct mpi3_status_reply_descriptor *status_desc = NULL; 3123 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3124 struct mpi3_success_reply_descriptor *success_desc = NULL; 3125 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3126 struct scsi_cmnd *scmd = NULL; 3127 struct scmd_priv *priv = NULL; 3128 u8 *sense_buf = NULL; 3129 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3130 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3131 u16 dev_handle = 0xFFFF; 3132 struct scsi_sense_hdr sshdr; 3133 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3134 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3135 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3136 struct mpi3mr_throttle_group_info *tg = NULL; 3137 u8 throttle_enabled_dev = 0; 3138 3139 *reply_dma = 0; 3140 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3141 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3142 switch (reply_desc_type) { 3143 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3144 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3145 host_tag = le16_to_cpu(status_desc->host_tag); 3146 ioc_status = le16_to_cpu(status_desc->ioc_status); 3147 if (ioc_status & 3148 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3149 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3150 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3151 break; 3152 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3153 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3154 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3155 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3156 *reply_dma); 3157 if (!scsi_reply) { 3158 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3159 mrioc->name); 3160 goto out; 3161 } 3162 host_tag = le16_to_cpu(scsi_reply->host_tag); 3163 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3164 scsi_status = scsi_reply->scsi_status; 3165 scsi_state = scsi_reply->scsi_state; 3166 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3167 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3168 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3169 sense_count = le32_to_cpu(scsi_reply->sense_count); 3170 resp_data = le32_to_cpu(scsi_reply->response_data); 3171 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3172 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3173 if (ioc_status & 3174 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3175 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3176 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3177 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3178 panic("%s: Ran out of sense buffers\n", mrioc->name); 3179 break; 3180 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3181 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3182 host_tag = le16_to_cpu(success_desc->host_tag); 3183 break; 3184 default: 3185 break; 3186 } 3187 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3188 if (!scmd) { 3189 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3190 mrioc->name, host_tag); 3191 goto out; 3192 } 3193 priv = scsi_cmd_priv(scmd); 3194 3195 data_len_blks = scsi_bufflen(scmd) >> 9; 3196 sdev_priv_data = scmd->device->hostdata; 3197 if (sdev_priv_data) { 3198 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3199 if (stgt_priv_data) { 3200 tg = stgt_priv_data->throttle_group; 3201 throttle_enabled_dev = 3202 stgt_priv_data->io_throttle_enabled; 3203 } 3204 } 3205 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3206 throttle_enabled_dev)) { 3207 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3208 &mrioc->pend_large_data_sz); 3209 if (tg) { 3210 tg_pend_data_len = atomic_sub_return(data_len_blks, 3211 &tg->pend_large_data_sz); 3212 if (tg->io_divert && ((ioc_pend_data_len <= 3213 mrioc->io_throttle_low) && 3214 (tg_pend_data_len <= tg->low))) { 3215 tg->io_divert = 0; 3216 mpi3mr_set_io_divert_for_all_vd_in_tg( 3217 mrioc, tg, 0); 3218 } 3219 } else { 3220 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3221 stgt_priv_data->io_divert = 0; 3222 } 3223 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3224 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3225 if (!tg) { 3226 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3227 stgt_priv_data->io_divert = 0; 3228 3229 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3230 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3231 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3232 tg->io_divert = 0; 3233 mpi3mr_set_io_divert_for_all_vd_in_tg( 3234 mrioc, tg, 0); 3235 } 3236 } 3237 } 3238 3239 if (success_desc) { 3240 scmd->result = DID_OK << 16; 3241 goto out_success; 3242 } 3243 3244 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3245 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3246 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3247 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3248 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3249 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3250 3251 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3252 sense_buf) { 3253 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3254 3255 memcpy(scmd->sense_buffer, sense_buf, sz); 3256 } 3257 3258 switch (ioc_status) { 3259 case MPI3_IOCSTATUS_BUSY: 3260 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3261 scmd->result = SAM_STAT_BUSY; 3262 break; 3263 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3264 scmd->result = DID_NO_CONNECT << 16; 3265 break; 3266 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3267 scmd->result = DID_SOFT_ERROR << 16; 3268 break; 3269 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3270 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3271 scmd->result = DID_RESET << 16; 3272 break; 3273 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3274 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3275 scmd->result = DID_SOFT_ERROR << 16; 3276 else 3277 scmd->result = (DID_OK << 16) | scsi_status; 3278 break; 3279 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3280 scmd->result = (DID_OK << 16) | scsi_status; 3281 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3282 break; 3283 if (xfer_count < scmd->underflow) { 3284 if (scsi_status == SAM_STAT_BUSY) 3285 scmd->result = SAM_STAT_BUSY; 3286 else 3287 scmd->result = DID_SOFT_ERROR << 16; 3288 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3289 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3290 scmd->result = DID_SOFT_ERROR << 16; 3291 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3292 scmd->result = DID_RESET << 16; 3293 break; 3294 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3295 scsi_set_resid(scmd, 0); 3296 fallthrough; 3297 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3298 case MPI3_IOCSTATUS_SUCCESS: 3299 scmd->result = (DID_OK << 16) | scsi_status; 3300 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3301 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3302 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3303 scmd->result = DID_SOFT_ERROR << 16; 3304 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3305 scmd->result = DID_RESET << 16; 3306 break; 3307 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3308 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3309 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3310 mpi3mr_map_eedp_error(scmd, ioc_status); 3311 break; 3312 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3313 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3314 case MPI3_IOCSTATUS_INVALID_SGL: 3315 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3316 case MPI3_IOCSTATUS_INVALID_FIELD: 3317 case MPI3_IOCSTATUS_INVALID_STATE: 3318 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3319 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3320 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3321 default: 3322 scmd->result = DID_SOFT_ERROR << 16; 3323 break; 3324 } 3325 3326 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3327 (scmd->cmnd[0] != ATA_16) && 3328 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3329 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3330 scmd->result); 3331 scsi_print_command(scmd); 3332 ioc_info(mrioc, 3333 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3334 __func__, dev_handle, ioc_status, ioc_loginfo, 3335 priv->req_q_idx + 1); 3336 ioc_info(mrioc, 3337 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3338 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3339 if (sense_buf) { 3340 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3341 ioc_info(mrioc, 3342 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3343 __func__, sense_count, sshdr.sense_key, 3344 sshdr.asc, sshdr.ascq); 3345 } 3346 } 3347 out_success: 3348 if (priv->meta_sg_valid) { 3349 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3350 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3351 } 3352 mpi3mr_clear_scmd_priv(mrioc, scmd); 3353 scsi_dma_unmap(scmd); 3354 scsi_done(scmd); 3355 out: 3356 if (sense_buf) 3357 mpi3mr_repost_sense_buf(mrioc, 3358 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3359 } 3360 3361 /** 3362 * mpi3mr_get_chain_idx - get free chain buffer index 3363 * @mrioc: Adapter instance reference 3364 * 3365 * Try to get a free chain buffer index from the free pool. 3366 * 3367 * Return: -1 on failure or the free chain buffer index 3368 */ 3369 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3370 { 3371 u8 retry_count = 5; 3372 int cmd_idx = -1; 3373 unsigned long flags; 3374 3375 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3376 do { 3377 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3378 mrioc->chain_buf_count); 3379 if (cmd_idx < mrioc->chain_buf_count) { 3380 set_bit(cmd_idx, mrioc->chain_bitmap); 3381 break; 3382 } 3383 cmd_idx = -1; 3384 } while (retry_count--); 3385 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3386 return cmd_idx; 3387 } 3388 3389 /** 3390 * mpi3mr_prepare_sg_scmd - build scatter gather list 3391 * @mrioc: Adapter instance reference 3392 * @scmd: SCSI command reference 3393 * @scsiio_req: MPI3 SCSI IO request 3394 * 3395 * This function maps SCSI command's data and protection SGEs to 3396 * MPI request SGEs. If required additional 4K chain buffer is 3397 * used to send the SGEs. 3398 * 3399 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3400 */ 3401 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3402 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3403 { 3404 dma_addr_t chain_dma; 3405 struct scatterlist *sg_scmd; 3406 void *sg_local, *chain; 3407 u32 chain_length; 3408 int sges_left, chain_idx; 3409 u32 sges_in_segment; 3410 u8 simple_sgl_flags; 3411 u8 simple_sgl_flags_last; 3412 u8 last_chain_sgl_flags; 3413 struct chain_element *chain_req; 3414 struct scmd_priv *priv = NULL; 3415 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3416 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3417 3418 priv = scsi_cmd_priv(scmd); 3419 3420 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3421 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3422 simple_sgl_flags_last = simple_sgl_flags | 3423 MPI3_SGE_FLAGS_END_OF_LIST; 3424 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3425 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3426 3427 if (meta_sg) 3428 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3429 else 3430 sg_local = &scsiio_req->sgl; 3431 3432 if (!scsiio_req->data_length && !meta_sg) { 3433 mpi3mr_build_zero_len_sge(sg_local); 3434 return 0; 3435 } 3436 3437 if (meta_sg) { 3438 sg_scmd = scsi_prot_sglist(scmd); 3439 sges_left = dma_map_sg(&mrioc->pdev->dev, 3440 scsi_prot_sglist(scmd), 3441 scsi_prot_sg_count(scmd), 3442 scmd->sc_data_direction); 3443 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3444 } else { 3445 sg_scmd = scsi_sglist(scmd); 3446 sges_left = scsi_dma_map(scmd); 3447 } 3448 3449 if (sges_left < 0) { 3450 sdev_printk(KERN_ERR, scmd->device, 3451 "scsi_dma_map failed: request for %d bytes!\n", 3452 scsi_bufflen(scmd)); 3453 return -ENOMEM; 3454 } 3455 if (sges_left > mrioc->max_sgl_entries) { 3456 sdev_printk(KERN_ERR, scmd->device, 3457 "scsi_dma_map returned unsupported sge count %d!\n", 3458 sges_left); 3459 return -ENOMEM; 3460 } 3461 3462 sges_in_segment = (mrioc->facts.op_req_sz - 3463 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3464 3465 if (scsiio_req->sgl[0].eedp.flags == 3466 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3467 sg_local += sizeof(struct mpi3_sge_common); 3468 sges_in_segment--; 3469 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3470 } 3471 3472 if (scsiio_req->msg_flags == 3473 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3474 sges_in_segment--; 3475 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3476 } 3477 3478 if (meta_sg) 3479 sges_in_segment = 1; 3480 3481 if (sges_left <= sges_in_segment) 3482 goto fill_in_last_segment; 3483 3484 /* fill in main message segment when there is a chain following */ 3485 while (sges_in_segment > 1) { 3486 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3487 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3488 sg_scmd = sg_next(sg_scmd); 3489 sg_local += sizeof(struct mpi3_sge_common); 3490 sges_left--; 3491 sges_in_segment--; 3492 } 3493 3494 chain_idx = mpi3mr_get_chain_idx(mrioc); 3495 if (chain_idx < 0) 3496 return -1; 3497 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3498 if (meta_sg) 3499 priv->meta_chain_idx = chain_idx; 3500 else 3501 priv->chain_idx = chain_idx; 3502 3503 chain = chain_req->addr; 3504 chain_dma = chain_req->dma_addr; 3505 sges_in_segment = sges_left; 3506 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3507 3508 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3509 chain_length, chain_dma); 3510 3511 sg_local = chain; 3512 3513 fill_in_last_segment: 3514 while (sges_left > 0) { 3515 if (sges_left == 1) 3516 mpi3mr_add_sg_single(sg_local, 3517 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3518 sg_dma_address(sg_scmd)); 3519 else 3520 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3521 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3522 sg_scmd = sg_next(sg_scmd); 3523 sg_local += sizeof(struct mpi3_sge_common); 3524 sges_left--; 3525 } 3526 3527 return 0; 3528 } 3529 3530 /** 3531 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3532 * @mrioc: Adapter instance reference 3533 * @scmd: SCSI command reference 3534 * @scsiio_req: MPI3 SCSI IO request 3535 * 3536 * This function calls mpi3mr_prepare_sg_scmd for constructing 3537 * both data SGEs and protection information SGEs in the MPI 3538 * format from the SCSI Command as appropriate . 3539 * 3540 * Return: return value of mpi3mr_prepare_sg_scmd. 3541 */ 3542 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3543 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3544 { 3545 int ret; 3546 3547 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3548 if (ret) 3549 return ret; 3550 3551 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3552 /* There is a valid meta sg */ 3553 scsiio_req->flags |= 3554 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3555 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3556 } 3557 3558 return ret; 3559 } 3560 3561 /** 3562 * mpi3mr_tm_response_name - get TM response as a string 3563 * @resp_code: TM response code 3564 * 3565 * Convert known task management response code as a readable 3566 * string. 3567 * 3568 * Return: response code string. 3569 */ 3570 static const char *mpi3mr_tm_response_name(u8 resp_code) 3571 { 3572 char *desc; 3573 3574 switch (resp_code) { 3575 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3576 desc = "task management request completed"; 3577 break; 3578 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3579 desc = "invalid frame"; 3580 break; 3581 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3582 desc = "task management request not supported"; 3583 break; 3584 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3585 desc = "task management request failed"; 3586 break; 3587 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3588 desc = "task management request succeeded"; 3589 break; 3590 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3591 desc = "invalid LUN"; 3592 break; 3593 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3594 desc = "overlapped tag attempted"; 3595 break; 3596 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3597 desc = "task queued, however not sent to target"; 3598 break; 3599 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3600 desc = "task management request denied by NVMe device"; 3601 break; 3602 default: 3603 desc = "unknown"; 3604 break; 3605 } 3606 3607 return desc; 3608 } 3609 3610 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3611 { 3612 int i; 3613 int num_of_reply_queues = 3614 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3615 3616 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3617 mpi3mr_process_op_reply_q(mrioc, 3618 mrioc->intr_info[i].op_reply_q); 3619 } 3620 3621 /** 3622 * mpi3mr_issue_tm - Issue Task Management request 3623 * @mrioc: Adapter instance reference 3624 * @tm_type: Task Management type 3625 * @handle: Device handle 3626 * @lun: lun ID 3627 * @htag: Host tag of the TM request 3628 * @timeout: TM timeout value 3629 * @drv_cmd: Internal command tracker 3630 * @resp_code: Response code place holder 3631 * @scmd: SCSI command 3632 * 3633 * Issues a Task Management Request to the controller for a 3634 * specified target, lun and command and wait for its completion 3635 * and check TM response. Recover the TM if it timed out by 3636 * issuing controller reset. 3637 * 3638 * Return: 0 on success, non-zero on errors 3639 */ 3640 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3641 u16 handle, uint lun, u16 htag, ulong timeout, 3642 struct mpi3mr_drv_cmd *drv_cmd, 3643 u8 *resp_code, struct scsi_cmnd *scmd) 3644 { 3645 struct mpi3_scsi_task_mgmt_request tm_req; 3646 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3647 int retval = 0; 3648 struct mpi3mr_tgt_dev *tgtdev = NULL; 3649 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3650 struct scmd_priv *cmd_priv = NULL; 3651 struct scsi_device *sdev = NULL; 3652 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3653 3654 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3655 __func__, tm_type, handle); 3656 if (mrioc->unrecoverable) { 3657 retval = -1; 3658 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3659 __func__); 3660 goto out; 3661 } 3662 3663 memset(&tm_req, 0, sizeof(tm_req)); 3664 mutex_lock(&drv_cmd->mutex); 3665 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3666 retval = -1; 3667 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3668 mutex_unlock(&drv_cmd->mutex); 3669 goto out; 3670 } 3671 if (mrioc->reset_in_progress) { 3672 retval = -1; 3673 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3674 mutex_unlock(&drv_cmd->mutex); 3675 goto out; 3676 } 3677 3678 drv_cmd->state = MPI3MR_CMD_PENDING; 3679 drv_cmd->is_waiting = 1; 3680 drv_cmd->callback = NULL; 3681 tm_req.dev_handle = cpu_to_le16(handle); 3682 tm_req.task_type = tm_type; 3683 tm_req.host_tag = cpu_to_le16(htag); 3684 3685 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3686 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3687 3688 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3689 3690 if (scmd) { 3691 sdev = scmd->device; 3692 sdev_priv_data = sdev->hostdata; 3693 scsi_tgt_priv_data = ((sdev_priv_data) ? 3694 sdev_priv_data->tgt_priv_data : NULL); 3695 } else { 3696 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3697 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3698 tgtdev->starget->hostdata; 3699 } 3700 3701 if (scsi_tgt_priv_data) 3702 atomic_inc(&scsi_tgt_priv_data->block_io); 3703 3704 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3705 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3706 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3707 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3708 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3709 } 3710 3711 init_completion(&drv_cmd->done); 3712 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3713 if (retval) { 3714 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3715 goto out_unlock; 3716 } 3717 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3718 3719 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3720 drv_cmd->is_waiting = 0; 3721 retval = -1; 3722 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3723 dprint_tm(mrioc, 3724 "task management request timed out after %ld seconds\n", 3725 timeout); 3726 if (mrioc->logging_level & MPI3_DEBUG_TM) 3727 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3728 mpi3mr_soft_reset_handler(mrioc, 3729 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3730 } 3731 goto out_unlock; 3732 } 3733 3734 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3735 dprint_tm(mrioc, "invalid task management reply message\n"); 3736 retval = -1; 3737 goto out_unlock; 3738 } 3739 3740 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3741 3742 switch (drv_cmd->ioc_status) { 3743 case MPI3_IOCSTATUS_SUCCESS: 3744 *resp_code = le32_to_cpu(tm_reply->response_data) & 3745 MPI3MR_RI_MASK_RESPCODE; 3746 break; 3747 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3748 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3749 break; 3750 default: 3751 dprint_tm(mrioc, 3752 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3753 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3754 retval = -1; 3755 goto out_unlock; 3756 } 3757 3758 switch (*resp_code) { 3759 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3760 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3761 break; 3762 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3763 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3764 retval = -1; 3765 break; 3766 default: 3767 retval = -1; 3768 break; 3769 } 3770 3771 dprint_tm(mrioc, 3772 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3773 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3774 le32_to_cpu(tm_reply->termination_count), 3775 mpi3mr_tm_response_name(*resp_code), *resp_code); 3776 3777 if (!retval) { 3778 mpi3mr_ioc_disable_intr(mrioc); 3779 mpi3mr_poll_pend_io_completions(mrioc); 3780 mpi3mr_ioc_enable_intr(mrioc); 3781 mpi3mr_poll_pend_io_completions(mrioc); 3782 mpi3mr_process_admin_reply_q(mrioc); 3783 } 3784 switch (tm_type) { 3785 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3786 if (!scsi_tgt_priv_data) 3787 break; 3788 scsi_tgt_priv_data->pend_count = 0; 3789 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3790 mpi3mr_count_tgt_pending, 3791 (void *)scsi_tgt_priv_data->starget); 3792 break; 3793 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3794 if (!sdev_priv_data) 3795 break; 3796 sdev_priv_data->pend_count = 0; 3797 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3798 mpi3mr_count_dev_pending, (void *)sdev); 3799 break; 3800 default: 3801 break; 3802 } 3803 3804 out_unlock: 3805 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3806 mutex_unlock(&drv_cmd->mutex); 3807 if (scsi_tgt_priv_data) 3808 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 3809 if (tgtdev) 3810 mpi3mr_tgtdev_put(tgtdev); 3811 out: 3812 return retval; 3813 } 3814 3815 /** 3816 * mpi3mr_bios_param - BIOS param callback 3817 * @sdev: SCSI device reference 3818 * @bdev: Block device reference 3819 * @capacity: Capacity in logical sectors 3820 * @params: Parameter array 3821 * 3822 * Just the parameters with heads/secots/cylinders. 3823 * 3824 * Return: 0 always 3825 */ 3826 static int mpi3mr_bios_param(struct scsi_device *sdev, 3827 struct block_device *bdev, sector_t capacity, int params[]) 3828 { 3829 int heads; 3830 int sectors; 3831 sector_t cylinders; 3832 ulong dummy; 3833 3834 heads = 64; 3835 sectors = 32; 3836 3837 dummy = heads * sectors; 3838 cylinders = capacity; 3839 sector_div(cylinders, dummy); 3840 3841 if ((ulong)capacity >= 0x200000) { 3842 heads = 255; 3843 sectors = 63; 3844 dummy = heads * sectors; 3845 cylinders = capacity; 3846 sector_div(cylinders, dummy); 3847 } 3848 3849 params[0] = heads; 3850 params[1] = sectors; 3851 params[2] = cylinders; 3852 return 0; 3853 } 3854 3855 /** 3856 * mpi3mr_map_queues - Map queues callback handler 3857 * @shost: SCSI host reference 3858 * 3859 * Maps default and poll queues. 3860 * 3861 * Return: return zero. 3862 */ 3863 static void mpi3mr_map_queues(struct Scsi_Host *shost) 3864 { 3865 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3866 int i, qoff, offset; 3867 struct blk_mq_queue_map *map = NULL; 3868 3869 offset = mrioc->op_reply_q_offset; 3870 3871 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 3872 map = &shost->tag_set.map[i]; 3873 3874 map->nr_queues = 0; 3875 3876 if (i == HCTX_TYPE_DEFAULT) 3877 map->nr_queues = mrioc->default_qcount; 3878 else if (i == HCTX_TYPE_POLL) 3879 map->nr_queues = mrioc->active_poll_qcount; 3880 3881 if (!map->nr_queues) { 3882 BUG_ON(i == HCTX_TYPE_DEFAULT); 3883 continue; 3884 } 3885 3886 /* 3887 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3888 * affinity), so use the regular blk-mq cpu mapping 3889 */ 3890 map->queue_offset = qoff; 3891 if (i != HCTX_TYPE_POLL) 3892 blk_mq_pci_map_queues(map, mrioc->pdev, offset); 3893 else 3894 blk_mq_map_queues(map); 3895 3896 qoff += map->nr_queues; 3897 offset += map->nr_queues; 3898 } 3899 } 3900 3901 /** 3902 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 3903 * @mrioc: Adapter instance reference 3904 * 3905 * Calculate the pending I/Os for the controller and return. 3906 * 3907 * Return: Number of pending I/Os 3908 */ 3909 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 3910 { 3911 u16 i; 3912 uint pend_ios = 0; 3913 3914 for (i = 0; i < mrioc->num_op_reply_q; i++) 3915 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 3916 return pend_ios; 3917 } 3918 3919 /** 3920 * mpi3mr_print_pending_host_io - print pending I/Os 3921 * @mrioc: Adapter instance reference 3922 * 3923 * Print number of pending I/Os and each I/O details prior to 3924 * reset for debug purpose. 3925 * 3926 * Return: Nothing 3927 */ 3928 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 3929 { 3930 struct Scsi_Host *shost = mrioc->shost; 3931 3932 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 3933 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 3934 blk_mq_tagset_busy_iter(&shost->tag_set, 3935 mpi3mr_print_scmd, (void *)mrioc); 3936 } 3937 3938 /** 3939 * mpi3mr_wait_for_host_io - block for I/Os to complete 3940 * @mrioc: Adapter instance reference 3941 * @timeout: time out in seconds 3942 * Waits for pending I/Os for the given adapter to complete or 3943 * to hit the timeout. 3944 * 3945 * Return: Nothing 3946 */ 3947 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 3948 { 3949 enum mpi3mr_iocstate iocstate; 3950 int i = 0; 3951 3952 iocstate = mpi3mr_get_iocstate(mrioc); 3953 if (iocstate != MRIOC_STATE_READY) 3954 return; 3955 3956 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3957 return; 3958 ioc_info(mrioc, 3959 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 3960 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 3961 3962 for (i = 0; i < timeout; i++) { 3963 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3964 break; 3965 iocstate = mpi3mr_get_iocstate(mrioc); 3966 if (iocstate != MRIOC_STATE_READY) 3967 break; 3968 msleep(1000); 3969 } 3970 3971 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 3972 mpi3mr_get_fw_pending_ios(mrioc)); 3973 } 3974 3975 /** 3976 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 3977 * @mrioc: Adapter instance reference 3978 * @scmd: SCSI command reference 3979 * @scsiio_req: MPI3 SCSI IO request 3980 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 3981 * @wslen: write same max length 3982 * 3983 * Gets values of unmap, ndob and number of blocks from write 3984 * same scsi io and based on these values it sets divert IO flag 3985 * and reason for diverting IO to firmware. 3986 * 3987 * Return: Nothing 3988 */ 3989 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 3990 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 3991 u32 *scsiio_flags, u16 wslen) 3992 { 3993 u8 unmap = 0, ndob = 0; 3994 u8 opcode = scmd->cmnd[0]; 3995 u32 num_blocks = 0; 3996 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 3997 3998 if (opcode == WRITE_SAME_16) { 3999 unmap = scmd->cmnd[1] & 0x08; 4000 ndob = scmd->cmnd[1] & 0x01; 4001 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 4002 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 4003 unmap = scmd->cmnd[10] & 0x08; 4004 ndob = scmd->cmnd[10] & 0x01; 4005 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 4006 } else 4007 return; 4008 4009 if ((unmap) && (ndob) && (num_blocks > wslen)) { 4010 scsiio_req->msg_flags |= 4011 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4012 *scsiio_flags |= 4013 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 4014 } 4015 } 4016 4017 /** 4018 * mpi3mr_eh_host_reset - Host reset error handling callback 4019 * @scmd: SCSI command reference 4020 * 4021 * Issue controller reset if the scmd is for a Physical Device, 4022 * if the scmd is for RAID volume, then wait for 4023 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any 4024 * pending I/Os prior to issuing reset to the controller. 4025 * 4026 * Return: SUCCESS of successful reset else FAILED 4027 */ 4028 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4029 { 4030 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4031 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4032 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4033 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4034 int retval = FAILED, ret; 4035 4036 sdev_priv_data = scmd->device->hostdata; 4037 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4038 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4039 dev_type = stgt_priv_data->dev_type; 4040 } 4041 4042 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4043 mpi3mr_wait_for_host_io(mrioc, 4044 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4045 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 4046 retval = SUCCESS; 4047 goto out; 4048 } 4049 } 4050 4051 mpi3mr_print_pending_host_io(mrioc); 4052 ret = mpi3mr_soft_reset_handler(mrioc, 4053 MPI3MR_RESET_FROM_EH_HOS, 1); 4054 if (ret) 4055 goto out; 4056 4057 retval = SUCCESS; 4058 out: 4059 sdev_printk(KERN_INFO, scmd->device, 4060 "Host reset is %s for scmd(%p)\n", 4061 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4062 4063 return retval; 4064 } 4065 4066 /** 4067 * mpi3mr_eh_target_reset - Target reset error handling callback 4068 * @scmd: SCSI command reference 4069 * 4070 * Issue Target reset Task Management and verify the scmd is 4071 * terminated successfully and return status accordingly. 4072 * 4073 * Return: SUCCESS of successful termination of the scmd else 4074 * FAILED 4075 */ 4076 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4077 { 4078 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4079 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4080 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4081 u16 dev_handle; 4082 u8 resp_code = 0; 4083 int retval = FAILED, ret = 0; 4084 4085 sdev_printk(KERN_INFO, scmd->device, 4086 "Attempting Target Reset! scmd(%p)\n", scmd); 4087 scsi_print_command(scmd); 4088 4089 sdev_priv_data = scmd->device->hostdata; 4090 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4091 sdev_printk(KERN_INFO, scmd->device, 4092 "SCSI device is not available\n"); 4093 retval = SUCCESS; 4094 goto out; 4095 } 4096 4097 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4098 dev_handle = stgt_priv_data->dev_handle; 4099 if (stgt_priv_data->dev_removed) { 4100 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4101 sdev_printk(KERN_INFO, scmd->device, 4102 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4103 mrioc->name, dev_handle); 4104 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4105 retval = SUCCESS; 4106 else 4107 retval = FAILED; 4108 goto out; 4109 } 4110 sdev_printk(KERN_INFO, scmd->device, 4111 "Target Reset is issued to handle(0x%04x)\n", 4112 dev_handle); 4113 4114 ret = mpi3mr_issue_tm(mrioc, 4115 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4116 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4117 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4118 4119 if (ret) 4120 goto out; 4121 4122 if (stgt_priv_data->pend_count) { 4123 sdev_printk(KERN_INFO, scmd->device, 4124 "%s: target has %d pending commands, target reset is failed\n", 4125 mrioc->name, stgt_priv_data->pend_count); 4126 goto out; 4127 } 4128 4129 retval = SUCCESS; 4130 out: 4131 sdev_printk(KERN_INFO, scmd->device, 4132 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4133 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4134 4135 return retval; 4136 } 4137 4138 /** 4139 * mpi3mr_eh_dev_reset- Device reset error handling callback 4140 * @scmd: SCSI command reference 4141 * 4142 * Issue lun reset Task Management and verify the scmd is 4143 * terminated successfully and return status accordingly. 4144 * 4145 * Return: SUCCESS of successful termination of the scmd else 4146 * FAILED 4147 */ 4148 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4149 { 4150 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4151 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4152 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4153 u16 dev_handle; 4154 u8 resp_code = 0; 4155 int retval = FAILED, ret = 0; 4156 4157 sdev_printk(KERN_INFO, scmd->device, 4158 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4159 scsi_print_command(scmd); 4160 4161 sdev_priv_data = scmd->device->hostdata; 4162 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4163 sdev_printk(KERN_INFO, scmd->device, 4164 "SCSI device is not available\n"); 4165 retval = SUCCESS; 4166 goto out; 4167 } 4168 4169 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4170 dev_handle = stgt_priv_data->dev_handle; 4171 if (stgt_priv_data->dev_removed) { 4172 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4173 sdev_printk(KERN_INFO, scmd->device, 4174 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4175 mrioc->name, dev_handle); 4176 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4177 retval = SUCCESS; 4178 else 4179 retval = FAILED; 4180 goto out; 4181 } 4182 sdev_printk(KERN_INFO, scmd->device, 4183 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4184 4185 ret = mpi3mr_issue_tm(mrioc, 4186 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4187 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4188 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4189 4190 if (ret) 4191 goto out; 4192 4193 if (sdev_priv_data->pend_count) { 4194 sdev_printk(KERN_INFO, scmd->device, 4195 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4196 mrioc->name, sdev_priv_data->pend_count); 4197 goto out; 4198 } 4199 retval = SUCCESS; 4200 out: 4201 sdev_printk(KERN_INFO, scmd->device, 4202 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4203 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4204 4205 return retval; 4206 } 4207 4208 /** 4209 * mpi3mr_scan_start - Scan start callback handler 4210 * @shost: SCSI host reference 4211 * 4212 * Issue port enable request asynchronously. 4213 * 4214 * Return: Nothing 4215 */ 4216 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4217 { 4218 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4219 4220 mrioc->scan_started = 1; 4221 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4222 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4223 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4224 mrioc->scan_started = 0; 4225 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4226 } 4227 } 4228 4229 /** 4230 * mpi3mr_scan_finished - Scan finished callback handler 4231 * @shost: SCSI host reference 4232 * @time: Jiffies from the scan start 4233 * 4234 * Checks whether the port enable is completed or timedout or 4235 * failed and set the scan status accordingly after taking any 4236 * recovery if required. 4237 * 4238 * Return: 1 on scan finished or timed out, 0 for in progress 4239 */ 4240 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4241 unsigned long time) 4242 { 4243 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4244 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4245 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4246 4247 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4248 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4249 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4250 mpi3mr_print_fault_info(mrioc); 4251 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4252 mrioc->scan_started = 0; 4253 mrioc->init_cmds.is_waiting = 0; 4254 mrioc->init_cmds.callback = NULL; 4255 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4256 } 4257 4258 if (time >= (pe_timeout * HZ)) { 4259 ioc_err(mrioc, "port enable failed due to time out\n"); 4260 mpi3mr_check_rh_fault_ioc(mrioc, 4261 MPI3MR_RESET_FROM_PE_TIMEOUT); 4262 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4263 mrioc->scan_started = 0; 4264 mrioc->init_cmds.is_waiting = 0; 4265 mrioc->init_cmds.callback = NULL; 4266 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4267 } 4268 4269 if (mrioc->scan_started) 4270 return 0; 4271 4272 if (mrioc->scan_failed) { 4273 ioc_err(mrioc, 4274 "port enable failed with status=0x%04x\n", 4275 mrioc->scan_failed); 4276 } else 4277 ioc_info(mrioc, "port enable is successfully completed\n"); 4278 4279 mpi3mr_start_watchdog(mrioc); 4280 mrioc->is_driver_loading = 0; 4281 mrioc->stop_bsgs = 0; 4282 return 1; 4283 } 4284 4285 /** 4286 * mpi3mr_slave_destroy - Slave destroy callback handler 4287 * @sdev: SCSI device reference 4288 * 4289 * Cleanup and free per device(lun) private data. 4290 * 4291 * Return: Nothing. 4292 */ 4293 static void mpi3mr_slave_destroy(struct scsi_device *sdev) 4294 { 4295 struct Scsi_Host *shost; 4296 struct mpi3mr_ioc *mrioc; 4297 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4298 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4299 unsigned long flags; 4300 struct scsi_target *starget; 4301 struct sas_rphy *rphy = NULL; 4302 4303 if (!sdev->hostdata) 4304 return; 4305 4306 starget = scsi_target(sdev); 4307 shost = dev_to_shost(&starget->dev); 4308 mrioc = shost_priv(shost); 4309 scsi_tgt_priv_data = starget->hostdata; 4310 4311 scsi_tgt_priv_data->num_luns--; 4312 4313 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4314 if (starget->channel == mrioc->scsi_device_channel) 4315 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4316 else if (mrioc->sas_transport_enabled && !starget->channel) { 4317 rphy = dev_to_rphy(starget->dev.parent); 4318 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4319 rphy->identify.sas_address, rphy); 4320 } 4321 4322 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4323 tgt_dev->starget = NULL; 4324 if (tgt_dev) 4325 mpi3mr_tgtdev_put(tgt_dev); 4326 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4327 4328 kfree(sdev->hostdata); 4329 sdev->hostdata = NULL; 4330 } 4331 4332 /** 4333 * mpi3mr_target_destroy - Target destroy callback handler 4334 * @starget: SCSI target reference 4335 * 4336 * Cleanup and free per target private data. 4337 * 4338 * Return: Nothing. 4339 */ 4340 static void mpi3mr_target_destroy(struct scsi_target *starget) 4341 { 4342 struct Scsi_Host *shost; 4343 struct mpi3mr_ioc *mrioc; 4344 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4345 struct mpi3mr_tgt_dev *tgt_dev; 4346 unsigned long flags; 4347 4348 if (!starget->hostdata) 4349 return; 4350 4351 shost = dev_to_shost(&starget->dev); 4352 mrioc = shost_priv(shost); 4353 scsi_tgt_priv_data = starget->hostdata; 4354 4355 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4356 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4357 if (tgt_dev && (tgt_dev->starget == starget) && 4358 (tgt_dev->perst_id == starget->id)) 4359 tgt_dev->starget = NULL; 4360 if (tgt_dev) { 4361 scsi_tgt_priv_data->tgt_dev = NULL; 4362 scsi_tgt_priv_data->perst_id = 0; 4363 mpi3mr_tgtdev_put(tgt_dev); 4364 mpi3mr_tgtdev_put(tgt_dev); 4365 } 4366 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4367 4368 kfree(starget->hostdata); 4369 starget->hostdata = NULL; 4370 } 4371 4372 /** 4373 * mpi3mr_slave_configure - Slave configure callback handler 4374 * @sdev: SCSI device reference 4375 * 4376 * Configure queue depth, max hardware sectors and virt boundary 4377 * as required 4378 * 4379 * Return: 0 always. 4380 */ 4381 static int mpi3mr_slave_configure(struct scsi_device *sdev) 4382 { 4383 struct scsi_target *starget; 4384 struct Scsi_Host *shost; 4385 struct mpi3mr_ioc *mrioc; 4386 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4387 unsigned long flags; 4388 int retval = 0; 4389 struct sas_rphy *rphy = NULL; 4390 4391 starget = scsi_target(sdev); 4392 shost = dev_to_shost(&starget->dev); 4393 mrioc = shost_priv(shost); 4394 4395 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4396 if (starget->channel == mrioc->scsi_device_channel) 4397 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4398 else if (mrioc->sas_transport_enabled && !starget->channel) { 4399 rphy = dev_to_rphy(starget->dev.parent); 4400 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4401 rphy->identify.sas_address, rphy); 4402 } 4403 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4404 if (!tgt_dev) 4405 return -ENXIO; 4406 4407 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4408 4409 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4410 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4411 4412 switch (tgt_dev->dev_type) { 4413 case MPI3_DEVICE_DEVFORM_PCIE: 4414 /*The block layer hw sector size = 512*/ 4415 if ((tgt_dev->dev_spec.pcie_inf.dev_info & 4416 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4417 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 4418 blk_queue_max_hw_sectors(sdev->request_queue, 4419 tgt_dev->dev_spec.pcie_inf.mdts / 512); 4420 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) 4421 blk_queue_virt_boundary(sdev->request_queue, 4422 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 4423 else 4424 blk_queue_virt_boundary(sdev->request_queue, 4425 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); 4426 } 4427 break; 4428 default: 4429 break; 4430 } 4431 4432 mpi3mr_tgtdev_put(tgt_dev); 4433 4434 return retval; 4435 } 4436 4437 /** 4438 * mpi3mr_slave_alloc -Slave alloc callback handler 4439 * @sdev: SCSI device reference 4440 * 4441 * Allocate per device(lun) private data and initialize it. 4442 * 4443 * Return: 0 on success -ENOMEM on memory allocation failure. 4444 */ 4445 static int mpi3mr_slave_alloc(struct scsi_device *sdev) 4446 { 4447 struct Scsi_Host *shost; 4448 struct mpi3mr_ioc *mrioc; 4449 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4450 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4451 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4452 unsigned long flags; 4453 struct scsi_target *starget; 4454 int retval = 0; 4455 struct sas_rphy *rphy = NULL; 4456 4457 starget = scsi_target(sdev); 4458 shost = dev_to_shost(&starget->dev); 4459 mrioc = shost_priv(shost); 4460 scsi_tgt_priv_data = starget->hostdata; 4461 4462 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4463 4464 if (starget->channel == mrioc->scsi_device_channel) 4465 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4466 else if (mrioc->sas_transport_enabled && !starget->channel) { 4467 rphy = dev_to_rphy(starget->dev.parent); 4468 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4469 rphy->identify.sas_address, rphy); 4470 } 4471 4472 if (tgt_dev) { 4473 if (tgt_dev->starget == NULL) 4474 tgt_dev->starget = starget; 4475 mpi3mr_tgtdev_put(tgt_dev); 4476 retval = 0; 4477 } else { 4478 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4479 return -ENXIO; 4480 } 4481 4482 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4483 4484 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4485 if (!scsi_dev_priv_data) 4486 return -ENOMEM; 4487 4488 scsi_dev_priv_data->lun_id = sdev->lun; 4489 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4490 sdev->hostdata = scsi_dev_priv_data; 4491 4492 scsi_tgt_priv_data->num_luns++; 4493 4494 return retval; 4495 } 4496 4497 /** 4498 * mpi3mr_target_alloc - Target alloc callback handler 4499 * @starget: SCSI target reference 4500 * 4501 * Allocate per target private data and initialize it. 4502 * 4503 * Return: 0 on success -ENOMEM on memory allocation failure. 4504 */ 4505 static int mpi3mr_target_alloc(struct scsi_target *starget) 4506 { 4507 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4508 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4509 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4510 struct mpi3mr_tgt_dev *tgt_dev; 4511 unsigned long flags; 4512 int retval = 0; 4513 struct sas_rphy *rphy = NULL; 4514 4515 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4516 if (!scsi_tgt_priv_data) 4517 return -ENOMEM; 4518 4519 starget->hostdata = scsi_tgt_priv_data; 4520 4521 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4522 if (starget->channel == mrioc->scsi_device_channel) { 4523 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4524 if (tgt_dev && !tgt_dev->is_hidden) { 4525 scsi_tgt_priv_data->starget = starget; 4526 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4527 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4528 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4529 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4530 tgt_dev->starget = starget; 4531 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4532 retval = 0; 4533 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4534 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4535 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4536 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4537 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4538 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4539 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4540 scsi_tgt_priv_data->dev_nvme_dif = 1; 4541 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4542 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4543 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4544 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4545 } else 4546 retval = -ENXIO; 4547 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4548 rphy = dev_to_rphy(starget->dev.parent); 4549 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4550 rphy->identify.sas_address, rphy); 4551 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4552 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4553 scsi_tgt_priv_data->starget = starget; 4554 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4555 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4556 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4557 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4558 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4559 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4560 tgt_dev->starget = starget; 4561 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4562 retval = 0; 4563 } else 4564 retval = -ENXIO; 4565 } 4566 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4567 4568 return retval; 4569 } 4570 4571 /** 4572 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4573 * @mrioc: Adapter instance reference 4574 * @scmd: SCSI Command reference 4575 * 4576 * The controller hardware cannot handle certain unmap commands 4577 * for NVMe drives, this routine checks those and return true 4578 * and completes the SCSI command with proper status and sense 4579 * data. 4580 * 4581 * Return: TRUE for not allowed unmap, FALSE otherwise. 4582 */ 4583 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4584 struct scsi_cmnd *scmd) 4585 { 4586 unsigned char *buf; 4587 u16 param_len, desc_len, trunc_param_len; 4588 4589 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4590 4591 if (mrioc->pdev->revision) { 4592 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4593 trunc_param_len -= (param_len - 8) & 0xF; 4594 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4595 dprint_scsi_err(mrioc, 4596 "truncating param_len from (%d) to (%d)\n", 4597 param_len, trunc_param_len); 4598 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4599 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4600 } 4601 return false; 4602 } 4603 4604 if (!param_len) { 4605 ioc_warn(mrioc, 4606 "%s: cdb received with zero parameter length\n", 4607 __func__); 4608 scsi_print_command(scmd); 4609 scmd->result = DID_OK << 16; 4610 scsi_done(scmd); 4611 return true; 4612 } 4613 4614 if (param_len < 24) { 4615 ioc_warn(mrioc, 4616 "%s: cdb received with invalid param_len: %d\n", 4617 __func__, param_len); 4618 scsi_print_command(scmd); 4619 scmd->result = SAM_STAT_CHECK_CONDITION; 4620 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4621 0x1A, 0); 4622 scsi_done(scmd); 4623 return true; 4624 } 4625 if (param_len != scsi_bufflen(scmd)) { 4626 ioc_warn(mrioc, 4627 "%s: cdb received with param_len: %d bufflen: %d\n", 4628 __func__, param_len, scsi_bufflen(scmd)); 4629 scsi_print_command(scmd); 4630 scmd->result = SAM_STAT_CHECK_CONDITION; 4631 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4632 0x1A, 0); 4633 scsi_done(scmd); 4634 return true; 4635 } 4636 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4637 if (!buf) { 4638 scsi_print_command(scmd); 4639 scmd->result = SAM_STAT_CHECK_CONDITION; 4640 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4641 0x55, 0x03); 4642 scsi_done(scmd); 4643 return true; 4644 } 4645 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4646 desc_len = get_unaligned_be16(&buf[2]); 4647 4648 if (desc_len < 16) { 4649 ioc_warn(mrioc, 4650 "%s: Invalid descriptor length in param list: %d\n", 4651 __func__, desc_len); 4652 scsi_print_command(scmd); 4653 scmd->result = SAM_STAT_CHECK_CONDITION; 4654 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4655 0x26, 0); 4656 scsi_done(scmd); 4657 kfree(buf); 4658 return true; 4659 } 4660 4661 if (param_len > (desc_len + 8)) { 4662 trunc_param_len = desc_len + 8; 4663 scsi_print_command(scmd); 4664 dprint_scsi_err(mrioc, 4665 "truncating param_len(%d) to desc_len+8(%d)\n", 4666 param_len, trunc_param_len); 4667 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4668 scsi_print_command(scmd); 4669 } 4670 4671 kfree(buf); 4672 return false; 4673 } 4674 4675 /** 4676 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 4677 * @scmd: SCSI Command reference 4678 * 4679 * Checks whether a cdb is allowed during shutdown or not. 4680 * 4681 * Return: TRUE for allowed commands, FALSE otherwise. 4682 */ 4683 4684 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 4685 { 4686 switch (scmd->cmnd[0]) { 4687 case SYNCHRONIZE_CACHE: 4688 case START_STOP: 4689 return true; 4690 default: 4691 return false; 4692 } 4693 } 4694 4695 /** 4696 * mpi3mr_qcmd - I/O request despatcher 4697 * @shost: SCSI Host reference 4698 * @scmd: SCSI Command reference 4699 * 4700 * Issues the SCSI Command as an MPI3 request. 4701 * 4702 * Return: 0 on successful queueing of the request or if the 4703 * request is completed with failure. 4704 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 4705 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 4706 */ 4707 static int mpi3mr_qcmd(struct Scsi_Host *shost, 4708 struct scsi_cmnd *scmd) 4709 { 4710 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4711 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4712 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4713 struct scmd_priv *scmd_priv_data = NULL; 4714 struct mpi3_scsi_io_request *scsiio_req = NULL; 4715 struct op_req_qinfo *op_req_q = NULL; 4716 int retval = 0; 4717 u16 dev_handle; 4718 u16 host_tag; 4719 u32 scsiio_flags = 0, data_len_blks = 0; 4720 struct request *rq = scsi_cmd_to_rq(scmd); 4721 int iprio_class; 4722 u8 is_pcie_dev = 0; 4723 u32 tracked_io_sz = 0; 4724 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 4725 struct mpi3mr_throttle_group_info *tg = NULL; 4726 4727 if (mrioc->unrecoverable) { 4728 scmd->result = DID_ERROR << 16; 4729 scsi_done(scmd); 4730 goto out; 4731 } 4732 4733 sdev_priv_data = scmd->device->hostdata; 4734 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4735 scmd->result = DID_NO_CONNECT << 16; 4736 scsi_done(scmd); 4737 goto out; 4738 } 4739 4740 if (mrioc->stop_drv_processing && 4741 !(mpi3mr_allow_scmd_to_fw(scmd))) { 4742 scmd->result = DID_NO_CONNECT << 16; 4743 scsi_done(scmd); 4744 goto out; 4745 } 4746 4747 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4748 dev_handle = stgt_priv_data->dev_handle; 4749 4750 /* Avoid error handling escalation when device is removed or blocked */ 4751 4752 if (scmd->device->host->shost_state == SHOST_RECOVERY && 4753 scmd->cmnd[0] == TEST_UNIT_READY && 4754 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 4755 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 4756 scsi_done(scmd); 4757 goto out; 4758 } 4759 4760 if (mrioc->reset_in_progress) { 4761 retval = SCSI_MLQUEUE_HOST_BUSY; 4762 goto out; 4763 } 4764 4765 if (atomic_read(&stgt_priv_data->block_io)) { 4766 if (mrioc->stop_drv_processing) { 4767 scmd->result = DID_NO_CONNECT << 16; 4768 scsi_done(scmd); 4769 goto out; 4770 } 4771 retval = SCSI_MLQUEUE_DEVICE_BUSY; 4772 goto out; 4773 } 4774 4775 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 4776 scmd->result = DID_NO_CONNECT << 16; 4777 scsi_done(scmd); 4778 goto out; 4779 } 4780 if (stgt_priv_data->dev_removed) { 4781 scmd->result = DID_NO_CONNECT << 16; 4782 scsi_done(scmd); 4783 goto out; 4784 } 4785 4786 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 4787 is_pcie_dev = 1; 4788 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 4789 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 4790 mpi3mr_check_return_unmap(mrioc, scmd)) 4791 goto out; 4792 4793 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 4794 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 4795 scmd->result = DID_ERROR << 16; 4796 scsi_done(scmd); 4797 goto out; 4798 } 4799 4800 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4801 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 4802 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4803 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 4804 else 4805 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 4806 4807 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 4808 4809 if (sdev_priv_data->ncq_prio_enable) { 4810 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 4811 if (iprio_class == IOPRIO_CLASS_RT) 4812 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 4813 } 4814 4815 if (scmd->cmd_len > 16) 4816 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 4817 4818 scmd_priv_data = scsi_cmd_priv(scmd); 4819 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 4820 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 4821 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 4822 scsiio_req->host_tag = cpu_to_le16(host_tag); 4823 4824 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 4825 4826 if (stgt_priv_data->wslen) 4827 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 4828 stgt_priv_data->wslen); 4829 4830 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 4831 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 4832 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 4833 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4834 int_to_scsilun(sdev_priv_data->lun_id, 4835 (struct scsi_lun *)scsiio_req->lun); 4836 4837 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 4838 mpi3mr_clear_scmd_priv(mrioc, scmd); 4839 retval = SCSI_MLQUEUE_HOST_BUSY; 4840 goto out; 4841 } 4842 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 4843 data_len_blks = scsi_bufflen(scmd) >> 9; 4844 if ((data_len_blks >= mrioc->io_throttle_data_length) && 4845 stgt_priv_data->io_throttle_enabled) { 4846 tracked_io_sz = data_len_blks; 4847 tg = stgt_priv_data->throttle_group; 4848 if (tg) { 4849 ioc_pend_data_len = atomic_add_return(data_len_blks, 4850 &mrioc->pend_large_data_sz); 4851 tg_pend_data_len = atomic_add_return(data_len_blks, 4852 &tg->pend_large_data_sz); 4853 if (!tg->io_divert && ((ioc_pend_data_len >= 4854 mrioc->io_throttle_high) || 4855 (tg_pend_data_len >= tg->high))) { 4856 tg->io_divert = 1; 4857 tg->need_qd_reduction = 1; 4858 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 4859 tg, 1); 4860 mpi3mr_queue_qd_reduction_event(mrioc, tg); 4861 } 4862 } else { 4863 ioc_pend_data_len = atomic_add_return(data_len_blks, 4864 &mrioc->pend_large_data_sz); 4865 if (ioc_pend_data_len >= mrioc->io_throttle_high) 4866 stgt_priv_data->io_divert = 1; 4867 } 4868 } 4869 4870 if (stgt_priv_data->io_divert) { 4871 scsiio_req->msg_flags |= 4872 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4873 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 4874 } 4875 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4876 4877 if (mpi3mr_op_request_post(mrioc, op_req_q, 4878 scmd_priv_data->mpi3mr_scsiio_req)) { 4879 mpi3mr_clear_scmd_priv(mrioc, scmd); 4880 retval = SCSI_MLQUEUE_HOST_BUSY; 4881 if (tracked_io_sz) { 4882 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 4883 if (tg) 4884 atomic_sub(tracked_io_sz, 4885 &tg->pend_large_data_sz); 4886 } 4887 goto out; 4888 } 4889 4890 out: 4891 return retval; 4892 } 4893 4894 static const struct scsi_host_template mpi3mr_driver_template = { 4895 .module = THIS_MODULE, 4896 .name = "MPI3 Storage Controller", 4897 .proc_name = MPI3MR_DRIVER_NAME, 4898 .queuecommand = mpi3mr_qcmd, 4899 .target_alloc = mpi3mr_target_alloc, 4900 .slave_alloc = mpi3mr_slave_alloc, 4901 .slave_configure = mpi3mr_slave_configure, 4902 .target_destroy = mpi3mr_target_destroy, 4903 .slave_destroy = mpi3mr_slave_destroy, 4904 .scan_finished = mpi3mr_scan_finished, 4905 .scan_start = mpi3mr_scan_start, 4906 .change_queue_depth = mpi3mr_change_queue_depth, 4907 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 4908 .eh_target_reset_handler = mpi3mr_eh_target_reset, 4909 .eh_host_reset_handler = mpi3mr_eh_host_reset, 4910 .bios_param = mpi3mr_bios_param, 4911 .map_queues = mpi3mr_map_queues, 4912 .mq_poll = mpi3mr_blk_mq_poll, 4913 .no_write_same = 1, 4914 .can_queue = 1, 4915 .this_id = -1, 4916 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 4917 /* max xfer supported is 1M (2K in 512 byte sized sectors) 4918 */ 4919 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 4920 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 4921 .max_segment_size = 0xffffffff, 4922 .track_queue_depth = 1, 4923 .cmd_size = sizeof(struct scmd_priv), 4924 .shost_groups = mpi3mr_host_groups, 4925 .sdev_groups = mpi3mr_dev_groups, 4926 }; 4927 4928 /** 4929 * mpi3mr_init_drv_cmd - Initialize internal command tracker 4930 * @cmdptr: Internal command tracker 4931 * @host_tag: Host tag used for the specific command 4932 * 4933 * Initialize the internal command tracker structure with 4934 * specified host tag. 4935 * 4936 * Return: Nothing. 4937 */ 4938 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 4939 u16 host_tag) 4940 { 4941 mutex_init(&cmdptr->mutex); 4942 cmdptr->reply = NULL; 4943 cmdptr->state = MPI3MR_CMD_NOTUSED; 4944 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 4945 cmdptr->host_tag = host_tag; 4946 } 4947 4948 /** 4949 * osintfc_mrioc_security_status -Check controller secure status 4950 * @pdev: PCI device instance 4951 * 4952 * Read the Device Serial Number capability from PCI config 4953 * space and decide whether the controller is secure or not. 4954 * 4955 * Return: 0 on success, non-zero on failure. 4956 */ 4957 static int 4958 osintfc_mrioc_security_status(struct pci_dev *pdev) 4959 { 4960 u32 cap_data; 4961 int base; 4962 u32 ctlr_status; 4963 u32 debug_status; 4964 int retval = 0; 4965 4966 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 4967 if (!base) { 4968 dev_err(&pdev->dev, 4969 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 4970 return -1; 4971 } 4972 4973 pci_read_config_dword(pdev, base + 4, &cap_data); 4974 4975 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 4976 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 4977 4978 switch (ctlr_status) { 4979 case MPI3MR_INVALID_DEVICE: 4980 dev_err(&pdev->dev, 4981 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4982 __func__, pdev->device, pdev->subsystem_vendor, 4983 pdev->subsystem_device); 4984 retval = -1; 4985 break; 4986 case MPI3MR_CONFIG_SECURE_DEVICE: 4987 if (!debug_status) 4988 dev_info(&pdev->dev, 4989 "%s: Config secure ctlr is detected\n", 4990 __func__); 4991 break; 4992 case MPI3MR_HARD_SECURE_DEVICE: 4993 break; 4994 case MPI3MR_TAMPERED_DEVICE: 4995 dev_err(&pdev->dev, 4996 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4997 __func__, pdev->device, pdev->subsystem_vendor, 4998 pdev->subsystem_device); 4999 retval = -1; 5000 break; 5001 default: 5002 retval = -1; 5003 break; 5004 } 5005 5006 if (!retval && debug_status) { 5007 dev_err(&pdev->dev, 5008 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5009 __func__, pdev->device, pdev->subsystem_vendor, 5010 pdev->subsystem_device); 5011 retval = -1; 5012 } 5013 5014 return retval; 5015 } 5016 5017 /** 5018 * mpi3mr_probe - PCI probe callback 5019 * @pdev: PCI device instance 5020 * @id: PCI device ID details 5021 * 5022 * controller initialization routine. Checks the security status 5023 * of the controller and if it is invalid or tampered return the 5024 * probe without initializing the controller. Otherwise, 5025 * allocate per adapter instance through shost_priv and 5026 * initialize controller specific data structures, initializae 5027 * the controller hardware, add shost to the SCSI subsystem. 5028 * 5029 * Return: 0 on success, non-zero on failure. 5030 */ 5031 5032 static int 5033 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5034 { 5035 struct mpi3mr_ioc *mrioc = NULL; 5036 struct Scsi_Host *shost = NULL; 5037 int retval = 0, i; 5038 5039 if (osintfc_mrioc_security_status(pdev)) { 5040 warn_non_secure_ctlr = 1; 5041 return 1; /* For Invalid and Tampered device */ 5042 } 5043 5044 shost = scsi_host_alloc(&mpi3mr_driver_template, 5045 sizeof(struct mpi3mr_ioc)); 5046 if (!shost) { 5047 retval = -ENODEV; 5048 goto shost_failed; 5049 } 5050 5051 mrioc = shost_priv(shost); 5052 mrioc->id = mrioc_ids++; 5053 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5054 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5055 INIT_LIST_HEAD(&mrioc->list); 5056 spin_lock(&mrioc_list_lock); 5057 list_add_tail(&mrioc->list, &mrioc_list); 5058 spin_unlock(&mrioc_list_lock); 5059 5060 spin_lock_init(&mrioc->admin_req_lock); 5061 spin_lock_init(&mrioc->reply_free_queue_lock); 5062 spin_lock_init(&mrioc->sbq_lock); 5063 spin_lock_init(&mrioc->fwevt_lock); 5064 spin_lock_init(&mrioc->tgtdev_lock); 5065 spin_lock_init(&mrioc->watchdog_lock); 5066 spin_lock_init(&mrioc->chain_buf_lock); 5067 spin_lock_init(&mrioc->sas_node_lock); 5068 5069 INIT_LIST_HEAD(&mrioc->fwevt_list); 5070 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5071 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5072 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5073 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5074 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5075 INIT_LIST_HEAD(&mrioc->enclosure_list); 5076 5077 mutex_init(&mrioc->reset_mutex); 5078 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5079 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5080 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5081 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5082 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5083 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5084 5085 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5086 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5087 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5088 5089 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5090 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5091 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5092 5093 if (pdev->revision) 5094 mrioc->enable_segqueue = true; 5095 5096 init_waitqueue_head(&mrioc->reset_waitq); 5097 mrioc->logging_level = logging_level; 5098 mrioc->shost = shost; 5099 mrioc->pdev = pdev; 5100 mrioc->stop_bsgs = 1; 5101 5102 mrioc->max_sgl_entries = max_sgl_entries; 5103 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5104 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5105 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5106 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5107 else { 5108 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5109 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5110 } 5111 5112 /* init shost parameters */ 5113 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5114 shost->max_lun = -1; 5115 shost->unique_id = mrioc->id; 5116 5117 shost->max_channel = 0; 5118 shost->max_id = 0xFFFFFFFF; 5119 5120 shost->host_tagset = 1; 5121 5122 if (prot_mask >= 0) 5123 scsi_host_set_prot(shost, prot_mask); 5124 else { 5125 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5126 | SHOST_DIF_TYPE2_PROTECTION 5127 | SHOST_DIF_TYPE3_PROTECTION; 5128 scsi_host_set_prot(shost, prot_mask); 5129 } 5130 5131 ioc_info(mrioc, 5132 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5133 __func__, 5134 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5135 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5136 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5137 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5138 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5139 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5140 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5141 5142 if (prot_guard_mask) 5143 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5144 else 5145 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5146 5147 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), 5148 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); 5149 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5150 mrioc->fwevt_worker_name, 0); 5151 if (!mrioc->fwevt_worker_thread) { 5152 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5153 __FILE__, __LINE__, __func__); 5154 retval = -ENODEV; 5155 goto fwevtthread_failed; 5156 } 5157 5158 mrioc->is_driver_loading = 1; 5159 mrioc->cpu_count = num_online_cpus(); 5160 if (mpi3mr_setup_resources(mrioc)) { 5161 ioc_err(mrioc, "setup resources failed\n"); 5162 retval = -ENODEV; 5163 goto resource_alloc_failed; 5164 } 5165 if (mpi3mr_init_ioc(mrioc)) { 5166 ioc_err(mrioc, "initializing IOC failed\n"); 5167 retval = -ENODEV; 5168 goto init_ioc_failed; 5169 } 5170 5171 shost->nr_hw_queues = mrioc->num_op_reply_q; 5172 if (mrioc->active_poll_qcount) 5173 shost->nr_maps = 3; 5174 5175 shost->can_queue = mrioc->max_host_ios; 5176 shost->sg_tablesize = mrioc->max_sgl_entries; 5177 shost->max_id = mrioc->facts.max_perids + 1; 5178 5179 retval = scsi_add_host(shost, &pdev->dev); 5180 if (retval) { 5181 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5182 __FILE__, __LINE__, __func__); 5183 goto addhost_failed; 5184 } 5185 5186 scsi_scan_host(shost); 5187 mpi3mr_bsg_init(mrioc); 5188 return retval; 5189 5190 addhost_failed: 5191 mpi3mr_stop_watchdog(mrioc); 5192 mpi3mr_cleanup_ioc(mrioc); 5193 init_ioc_failed: 5194 mpi3mr_free_mem(mrioc); 5195 mpi3mr_cleanup_resources(mrioc); 5196 resource_alloc_failed: 5197 destroy_workqueue(mrioc->fwevt_worker_thread); 5198 fwevtthread_failed: 5199 spin_lock(&mrioc_list_lock); 5200 list_del(&mrioc->list); 5201 spin_unlock(&mrioc_list_lock); 5202 scsi_host_put(shost); 5203 shost_failed: 5204 return retval; 5205 } 5206 5207 /** 5208 * mpi3mr_remove - PCI remove callback 5209 * @pdev: PCI device instance 5210 * 5211 * Cleanup the IOC by issuing MUR and shutdown notification. 5212 * Free up all memory and resources associated with the 5213 * controllerand target devices, unregister the shost. 5214 * 5215 * Return: Nothing. 5216 */ 5217 static void mpi3mr_remove(struct pci_dev *pdev) 5218 { 5219 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5220 struct mpi3mr_ioc *mrioc; 5221 struct workqueue_struct *wq; 5222 unsigned long flags; 5223 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5224 struct mpi3mr_hba_port *port, *hba_port_next; 5225 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5226 5227 if (!shost) 5228 return; 5229 5230 mrioc = shost_priv(shost); 5231 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5232 ssleep(1); 5233 5234 if (!pci_device_is_present(mrioc->pdev)) { 5235 mrioc->unrecoverable = 1; 5236 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5237 } 5238 5239 mpi3mr_bsg_exit(mrioc); 5240 mrioc->stop_drv_processing = 1; 5241 mpi3mr_cleanup_fwevt_list(mrioc); 5242 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5243 wq = mrioc->fwevt_worker_thread; 5244 mrioc->fwevt_worker_thread = NULL; 5245 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5246 if (wq) 5247 destroy_workqueue(wq); 5248 5249 if (mrioc->sas_transport_enabled) 5250 sas_remove_host(shost); 5251 else 5252 scsi_remove_host(shost); 5253 5254 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5255 list) { 5256 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5257 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5258 mpi3mr_tgtdev_put(tgtdev); 5259 } 5260 mpi3mr_stop_watchdog(mrioc); 5261 mpi3mr_cleanup_ioc(mrioc); 5262 mpi3mr_free_mem(mrioc); 5263 mpi3mr_cleanup_resources(mrioc); 5264 5265 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5266 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5267 &mrioc->sas_expander_list, list) { 5268 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5269 mpi3mr_expander_node_remove(mrioc, sas_expander); 5270 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5271 } 5272 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5273 ioc_info(mrioc, 5274 "removing hba_port entry: %p port: %d from hba_port list\n", 5275 port, port->port_id); 5276 list_del(&port->list); 5277 kfree(port); 5278 } 5279 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5280 5281 if (mrioc->sas_hba.num_phys) { 5282 kfree(mrioc->sas_hba.phy); 5283 mrioc->sas_hba.phy = NULL; 5284 mrioc->sas_hba.num_phys = 0; 5285 } 5286 5287 spin_lock(&mrioc_list_lock); 5288 list_del(&mrioc->list); 5289 spin_unlock(&mrioc_list_lock); 5290 5291 scsi_host_put(shost); 5292 } 5293 5294 /** 5295 * mpi3mr_shutdown - PCI shutdown callback 5296 * @pdev: PCI device instance 5297 * 5298 * Free up all memory and resources associated with the 5299 * controller 5300 * 5301 * Return: Nothing. 5302 */ 5303 static void mpi3mr_shutdown(struct pci_dev *pdev) 5304 { 5305 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5306 struct mpi3mr_ioc *mrioc; 5307 struct workqueue_struct *wq; 5308 unsigned long flags; 5309 5310 if (!shost) 5311 return; 5312 5313 mrioc = shost_priv(shost); 5314 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5315 ssleep(1); 5316 5317 mrioc->stop_drv_processing = 1; 5318 mpi3mr_cleanup_fwevt_list(mrioc); 5319 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5320 wq = mrioc->fwevt_worker_thread; 5321 mrioc->fwevt_worker_thread = NULL; 5322 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5323 if (wq) 5324 destroy_workqueue(wq); 5325 5326 mpi3mr_stop_watchdog(mrioc); 5327 mpi3mr_cleanup_ioc(mrioc); 5328 mpi3mr_cleanup_resources(mrioc); 5329 } 5330 5331 /** 5332 * mpi3mr_suspend - PCI power management suspend callback 5333 * @dev: Device struct 5334 * 5335 * Change the power state to the given value and cleanup the IOC 5336 * by issuing MUR and shutdown notification 5337 * 5338 * Return: 0 always. 5339 */ 5340 static int __maybe_unused 5341 mpi3mr_suspend(struct device *dev) 5342 { 5343 struct pci_dev *pdev = to_pci_dev(dev); 5344 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5345 struct mpi3mr_ioc *mrioc; 5346 5347 if (!shost) 5348 return 0; 5349 5350 mrioc = shost_priv(shost); 5351 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5352 ssleep(1); 5353 mrioc->stop_drv_processing = 1; 5354 mpi3mr_cleanup_fwevt_list(mrioc); 5355 scsi_block_requests(shost); 5356 mpi3mr_stop_watchdog(mrioc); 5357 mpi3mr_cleanup_ioc(mrioc); 5358 5359 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5360 pdev, pci_name(pdev)); 5361 mpi3mr_cleanup_resources(mrioc); 5362 5363 return 0; 5364 } 5365 5366 /** 5367 * mpi3mr_resume - PCI power management resume callback 5368 * @dev: Device struct 5369 * 5370 * Restore the power state to D0 and reinitialize the controller 5371 * and resume I/O operations to the target devices 5372 * 5373 * Return: 0 on success, non-zero on failure 5374 */ 5375 static int __maybe_unused 5376 mpi3mr_resume(struct device *dev) 5377 { 5378 struct pci_dev *pdev = to_pci_dev(dev); 5379 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5380 struct mpi3mr_ioc *mrioc; 5381 pci_power_t device_state = pdev->current_state; 5382 int r; 5383 5384 if (!shost) 5385 return 0; 5386 5387 mrioc = shost_priv(shost); 5388 5389 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5390 pdev, pci_name(pdev), device_state); 5391 mrioc->pdev = pdev; 5392 mrioc->cpu_count = num_online_cpus(); 5393 r = mpi3mr_setup_resources(mrioc); 5394 if (r) { 5395 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5396 __func__, r); 5397 return r; 5398 } 5399 5400 mrioc->stop_drv_processing = 0; 5401 mpi3mr_invalidate_devhandles(mrioc); 5402 mpi3mr_free_enclosure_list(mrioc); 5403 mpi3mr_memset_buffers(mrioc); 5404 r = mpi3mr_reinit_ioc(mrioc, 1); 5405 if (r) { 5406 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5407 return r; 5408 } 5409 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5410 scsi_unblock_requests(shost); 5411 mrioc->device_refresh_on = 0; 5412 mpi3mr_start_watchdog(mrioc); 5413 5414 return 0; 5415 } 5416 5417 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5418 { 5419 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5420 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5421 }, 5422 { 0 } 5423 }; 5424 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5425 5426 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5427 5428 static struct pci_driver mpi3mr_pci_driver = { 5429 .name = MPI3MR_DRIVER_NAME, 5430 .id_table = mpi3mr_pci_id_table, 5431 .probe = mpi3mr_probe, 5432 .remove = mpi3mr_remove, 5433 .shutdown = mpi3mr_shutdown, 5434 .driver.pm = &mpi3mr_pm_ops, 5435 }; 5436 5437 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5438 { 5439 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5440 } 5441 static DRIVER_ATTR_RO(event_counter); 5442 5443 static int __init mpi3mr_init(void) 5444 { 5445 int ret_val; 5446 5447 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 5448 MPI3MR_DRIVER_VERSION); 5449 5450 mpi3mr_transport_template = 5451 sas_attach_transport(&mpi3mr_transport_functions); 5452 if (!mpi3mr_transport_template) { 5453 pr_err("%s failed to load due to sas transport attach failure\n", 5454 MPI3MR_DRIVER_NAME); 5455 return -ENODEV; 5456 } 5457 5458 ret_val = pci_register_driver(&mpi3mr_pci_driver); 5459 if (ret_val) { 5460 pr_err("%s failed to load due to pci register driver failure\n", 5461 MPI3MR_DRIVER_NAME); 5462 goto err_pci_reg_fail; 5463 } 5464 5465 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 5466 &driver_attr_event_counter); 5467 if (ret_val) 5468 goto err_event_counter; 5469 5470 return ret_val; 5471 5472 err_event_counter: 5473 pci_unregister_driver(&mpi3mr_pci_driver); 5474 5475 err_pci_reg_fail: 5476 sas_release_transport(mpi3mr_transport_template); 5477 return ret_val; 5478 } 5479 5480 static void __exit mpi3mr_exit(void) 5481 { 5482 if (warn_non_secure_ctlr) 5483 pr_warn( 5484 "Unloading %s version %s while managing a non secure controller\n", 5485 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 5486 else 5487 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 5488 MPI3MR_DRIVER_VERSION); 5489 5490 driver_remove_file(&mpi3mr_pci_driver.driver, 5491 &driver_attr_event_counter); 5492 pci_unregister_driver(&mpi3mr_pci_driver); 5493 sas_release_transport(mpi3mr_transport_template); 5494 } 5495 5496 module_init(mpi3mr_init); 5497 module_exit(mpi3mr_exit); 5498