1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 12 /* global driver scop variables */ 13 LIST_HEAD(mrioc_list); 14 DEFINE_SPINLOCK(mrioc_list_lock); 15 static int mrioc_ids; 16 static int warn_non_secure_ctlr; 17 atomic64_t event_counter; 18 19 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 20 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 21 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 22 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 23 24 /* Module parameters*/ 25 int prot_mask = -1; 26 module_param(prot_mask, int, 0); 27 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 28 29 static int prot_guard_mask = 3; 30 module_param(prot_guard_mask, int, 0); 31 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 32 static int logging_level; 33 module_param(logging_level, int, 0); 34 MODULE_PARM_DESC(logging_level, 35 " bits for enabling additional logging info (default=0)"); 36 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 37 module_param(max_sgl_entries, int, 0444); 38 MODULE_PARM_DESC(max_sgl_entries, 39 "Preferred max number of SG entries to be used for a single I/O\n" 40 "The actual value will be determined by the driver\n" 41 "(Minimum=256, Maximum=2048, default=256)"); 42 43 /* Forward declarations*/ 44 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 45 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 46 47 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 48 49 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 50 51 /** 52 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 53 * @mrioc: Adapter instance reference 54 * @scmd: SCSI command reference 55 * 56 * Calculate the host tag based on block tag for a given scmd. 57 * 58 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 59 */ 60 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 61 struct scsi_cmnd *scmd) 62 { 63 struct scmd_priv *priv = NULL; 64 u32 unique_tag; 65 u16 host_tag, hw_queue; 66 67 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 68 69 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 70 if (hw_queue >= mrioc->num_op_reply_q) 71 return MPI3MR_HOSTTAG_INVALID; 72 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 73 74 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 75 return MPI3MR_HOSTTAG_INVALID; 76 77 priv = scsi_cmd_priv(scmd); 78 /*host_tag 0 is invalid hence incrementing by 1*/ 79 priv->host_tag = host_tag + 1; 80 priv->scmd = scmd; 81 priv->in_lld_scope = 1; 82 priv->req_q_idx = hw_queue; 83 priv->meta_chain_idx = -1; 84 priv->chain_idx = -1; 85 priv->meta_sg_valid = 0; 86 return priv->host_tag; 87 } 88 89 /** 90 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 91 * @mrioc: Adapter instance reference 92 * @host_tag: Host tag 93 * @qidx: Operational queue index 94 * 95 * Identify the block tag from the host tag and queue index and 96 * retrieve associated scsi command using scsi_host_find_tag(). 97 * 98 * Return: SCSI command reference or NULL. 99 */ 100 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 101 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 102 { 103 struct scsi_cmnd *scmd = NULL; 104 struct scmd_priv *priv = NULL; 105 u32 unique_tag = host_tag - 1; 106 107 if (WARN_ON(host_tag > mrioc->max_host_ios)) 108 goto out; 109 110 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 111 112 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 113 if (scmd) { 114 priv = scsi_cmd_priv(scmd); 115 if (!priv->in_lld_scope) 116 scmd = NULL; 117 } 118 out: 119 return scmd; 120 } 121 122 /** 123 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 124 * @mrioc: Adapter instance reference 125 * @scmd: SCSI command reference 126 * 127 * Invalidate the SCSI command private data to mark the command 128 * is not in LLD scope anymore. 129 * 130 * Return: Nothing. 131 */ 132 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 133 struct scsi_cmnd *scmd) 134 { 135 struct scmd_priv *priv = NULL; 136 137 priv = scsi_cmd_priv(scmd); 138 139 if (WARN_ON(priv->in_lld_scope == 0)) 140 return; 141 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 142 priv->req_q_idx = 0xFFFF; 143 priv->scmd = NULL; 144 priv->in_lld_scope = 0; 145 priv->meta_sg_valid = 0; 146 if (priv->chain_idx >= 0) { 147 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 148 priv->chain_idx = -1; 149 } 150 if (priv->meta_chain_idx >= 0) { 151 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 152 priv->meta_chain_idx = -1; 153 } 154 } 155 156 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 157 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 158 static void mpi3mr_fwevt_worker(struct work_struct *work); 159 160 /** 161 * mpi3mr_fwevt_free - firmware event memory dealloctor 162 * @r: k reference pointer of the firmware event 163 * 164 * Free firmware event memory when no reference. 165 */ 166 static void mpi3mr_fwevt_free(struct kref *r) 167 { 168 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 169 } 170 171 /** 172 * mpi3mr_fwevt_get - k reference incrementor 173 * @fwevt: Firmware event reference 174 * 175 * Increment firmware event reference count. 176 */ 177 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 178 { 179 kref_get(&fwevt->ref_count); 180 } 181 182 /** 183 * mpi3mr_fwevt_put - k reference decrementor 184 * @fwevt: Firmware event reference 185 * 186 * decrement firmware event reference count. 187 */ 188 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 189 { 190 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 191 } 192 193 /** 194 * mpi3mr_alloc_fwevt - Allocate firmware event 195 * @len: length of firmware event data to allocate 196 * 197 * Allocate firmware event with required length and initialize 198 * the reference counter. 199 * 200 * Return: firmware event reference. 201 */ 202 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 203 { 204 struct mpi3mr_fwevt *fwevt; 205 206 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 207 if (!fwevt) 208 return NULL; 209 210 kref_init(&fwevt->ref_count); 211 return fwevt; 212 } 213 214 /** 215 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 216 * @mrioc: Adapter instance reference 217 * @fwevt: Firmware event reference 218 * 219 * Add the given firmware event to the firmware event list. 220 * 221 * Return: Nothing. 222 */ 223 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 224 struct mpi3mr_fwevt *fwevt) 225 { 226 unsigned long flags; 227 228 if (!mrioc->fwevt_worker_thread) 229 return; 230 231 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 232 /* get fwevt reference count while adding it to fwevt_list */ 233 mpi3mr_fwevt_get(fwevt); 234 INIT_LIST_HEAD(&fwevt->list); 235 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 236 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 237 /* get fwevt reference count while enqueueing it to worker queue */ 238 mpi3mr_fwevt_get(fwevt); 239 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 240 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 241 } 242 243 /** 244 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 245 * @mrioc: Adapter instance reference 246 * @fwevt: Firmware event reference 247 * 248 * Delete the given firmware event from the firmware event list. 249 * 250 * Return: Nothing. 251 */ 252 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 253 struct mpi3mr_fwevt *fwevt) 254 { 255 unsigned long flags; 256 257 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 258 if (!list_empty(&fwevt->list)) { 259 list_del_init(&fwevt->list); 260 /* 261 * Put fwevt reference count after 262 * removing it from fwevt_list 263 */ 264 mpi3mr_fwevt_put(fwevt); 265 } 266 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 267 } 268 269 /** 270 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 271 * @mrioc: Adapter instance reference 272 * 273 * Dequeue a firmware event from the firmware event list. 274 * 275 * Return: firmware event. 276 */ 277 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 278 struct mpi3mr_ioc *mrioc) 279 { 280 unsigned long flags; 281 struct mpi3mr_fwevt *fwevt = NULL; 282 283 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 284 if (!list_empty(&mrioc->fwevt_list)) { 285 fwevt = list_first_entry(&mrioc->fwevt_list, 286 struct mpi3mr_fwevt, list); 287 list_del_init(&fwevt->list); 288 /* 289 * Put fwevt reference count after 290 * removing it from fwevt_list 291 */ 292 mpi3mr_fwevt_put(fwevt); 293 } 294 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 295 296 return fwevt; 297 } 298 299 /** 300 * mpi3mr_cancel_work - cancel firmware event 301 * @fwevt: fwevt object which needs to be canceled 302 * 303 * Return: Nothing. 304 */ 305 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 306 { 307 /* 308 * Wait on the fwevt to complete. If this returns 1, then 309 * the event was never executed. 310 * 311 * If it did execute, we wait for it to finish, and the put will 312 * happen from mpi3mr_process_fwevt() 313 */ 314 if (cancel_work_sync(&fwevt->work)) { 315 /* 316 * Put fwevt reference count after 317 * dequeuing it from worker queue 318 */ 319 mpi3mr_fwevt_put(fwevt); 320 /* 321 * Put fwevt reference count to neutralize 322 * kref_init increment 323 */ 324 mpi3mr_fwevt_put(fwevt); 325 } 326 } 327 328 /** 329 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 330 * @mrioc: Adapter instance reference 331 * 332 * Flush all pending firmware events from the firmware event 333 * list. 334 * 335 * Return: Nothing. 336 */ 337 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 338 { 339 struct mpi3mr_fwevt *fwevt = NULL; 340 341 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 342 !mrioc->fwevt_worker_thread) 343 return; 344 345 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 346 mpi3mr_cancel_work(fwevt); 347 348 if (mrioc->current_event) { 349 fwevt = mrioc->current_event; 350 /* 351 * Don't call cancel_work_sync() API for the 352 * fwevt work if the controller reset is 353 * get called as part of processing the 354 * same fwevt work (or) when worker thread is 355 * waiting for device add/remove APIs to complete. 356 * Otherwise we will see deadlock. 357 */ 358 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 359 fwevt->discard = 1; 360 return; 361 } 362 363 mpi3mr_cancel_work(fwevt); 364 } 365 } 366 367 /** 368 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 369 * @mrioc: Adapter instance reference 370 * @tg: Throttle group information pointer 371 * 372 * Accessor to queue on synthetically generated driver event to 373 * the event worker thread, the driver event will be used to 374 * reduce the QD of all VDs in the TG from the worker thread. 375 * 376 * Return: None. 377 */ 378 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 379 struct mpi3mr_throttle_group_info *tg) 380 { 381 struct mpi3mr_fwevt *fwevt; 382 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 383 384 /* 385 * If the QD reduction event is already queued due to throttle and if 386 * the QD is not restored through device info change event 387 * then dont queue further reduction events 388 */ 389 if (tg->fw_qd != tg->modified_qd) 390 return; 391 392 fwevt = mpi3mr_alloc_fwevt(sz); 393 if (!fwevt) { 394 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 395 return; 396 } 397 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 398 fwevt->mrioc = mrioc; 399 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 400 fwevt->send_ack = 0; 401 fwevt->process_evt = 1; 402 fwevt->evt_ctx = 0; 403 fwevt->event_data_size = sz; 404 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 405 406 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 407 tg->id); 408 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 409 } 410 411 /** 412 * mpi3mr_invalidate_devhandles -Invalidate device handles 413 * @mrioc: Adapter instance reference 414 * 415 * Invalidate the device handles in the target device structures 416 * . Called post reset prior to reinitializing the controller. 417 * 418 * Return: Nothing. 419 */ 420 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 421 { 422 struct mpi3mr_tgt_dev *tgtdev; 423 struct mpi3mr_stgt_priv_data *tgt_priv; 424 425 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 426 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 427 if (tgtdev->starget && tgtdev->starget->hostdata) { 428 tgt_priv = tgtdev->starget->hostdata; 429 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 430 tgt_priv->io_throttle_enabled = 0; 431 tgt_priv->io_divert = 0; 432 tgt_priv->throttle_group = NULL; 433 tgt_priv->wslen = 0; 434 if (tgtdev->host_exposed) 435 atomic_set(&tgt_priv->block_io, 1); 436 } 437 } 438 } 439 440 /** 441 * mpi3mr_print_scmd - print individual SCSI command 442 * @rq: Block request 443 * @data: Adapter instance reference 444 * 445 * Print the SCSI command details if it is in LLD scope. 446 * 447 * Return: true always. 448 */ 449 static bool mpi3mr_print_scmd(struct request *rq, void *data) 450 { 451 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 452 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 453 struct scmd_priv *priv = NULL; 454 455 if (scmd) { 456 priv = scsi_cmd_priv(scmd); 457 if (!priv->in_lld_scope) 458 goto out; 459 460 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 461 __func__, priv->host_tag, priv->req_q_idx + 1); 462 scsi_print_command(scmd); 463 } 464 465 out: 466 return(true); 467 } 468 469 /** 470 * mpi3mr_flush_scmd - Flush individual SCSI command 471 * @rq: Block request 472 * @data: Adapter instance reference 473 * 474 * Return the SCSI command to the upper layers if it is in LLD 475 * scope. 476 * 477 * Return: true always. 478 */ 479 480 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 481 { 482 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 483 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 484 struct scmd_priv *priv = NULL; 485 486 if (scmd) { 487 priv = scsi_cmd_priv(scmd); 488 if (!priv->in_lld_scope) 489 goto out; 490 491 if (priv->meta_sg_valid) 492 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 493 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 494 mpi3mr_clear_scmd_priv(mrioc, scmd); 495 scsi_dma_unmap(scmd); 496 scmd->result = DID_RESET << 16; 497 scsi_print_command(scmd); 498 scsi_done(scmd); 499 mrioc->flush_io_count++; 500 } 501 502 out: 503 return(true); 504 } 505 506 /** 507 * mpi3mr_count_dev_pending - Count commands pending for a lun 508 * @rq: Block request 509 * @data: SCSI device reference 510 * 511 * This is an iterator function called for each SCSI command in 512 * a host and if the command is pending in the LLD for the 513 * specific device(lun) then device specific pending I/O counter 514 * is updated in the device structure. 515 * 516 * Return: true always. 517 */ 518 519 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 520 { 521 struct scsi_device *sdev = (struct scsi_device *)data; 522 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 523 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 524 struct scmd_priv *priv; 525 526 if (scmd) { 527 priv = scsi_cmd_priv(scmd); 528 if (!priv->in_lld_scope) 529 goto out; 530 if (scmd->device == sdev) 531 sdev_priv_data->pend_count++; 532 } 533 534 out: 535 return true; 536 } 537 538 /** 539 * mpi3mr_count_tgt_pending - Count commands pending for target 540 * @rq: Block request 541 * @data: SCSI target reference 542 * 543 * This is an iterator function called for each SCSI command in 544 * a host and if the command is pending in the LLD for the 545 * specific target then target specific pending I/O counter is 546 * updated in the target structure. 547 * 548 * Return: true always. 549 */ 550 551 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 552 { 553 struct scsi_target *starget = (struct scsi_target *)data; 554 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 555 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 556 struct scmd_priv *priv; 557 558 if (scmd) { 559 priv = scsi_cmd_priv(scmd); 560 if (!priv->in_lld_scope) 561 goto out; 562 if (scmd->device && (scsi_target(scmd->device) == starget)) 563 stgt_priv_data->pend_count++; 564 } 565 566 out: 567 return true; 568 } 569 570 /** 571 * mpi3mr_flush_host_io - Flush host I/Os 572 * @mrioc: Adapter instance reference 573 * 574 * Flush all of the pending I/Os by calling 575 * blk_mq_tagset_busy_iter() for each possible tag. This is 576 * executed post controller reset 577 * 578 * Return: Nothing. 579 */ 580 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 581 { 582 struct Scsi_Host *shost = mrioc->shost; 583 584 mrioc->flush_io_count = 0; 585 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 586 blk_mq_tagset_busy_iter(&shost->tag_set, 587 mpi3mr_flush_scmd, (void *)mrioc); 588 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 589 mrioc->flush_io_count); 590 } 591 592 /** 593 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 594 * @mrioc: Adapter instance reference 595 * 596 * This function waits for currently running IO poll threads to 597 * exit and then flushes all host I/Os and any internal pending 598 * cmds. This is executed after controller is marked as 599 * unrecoverable. 600 * 601 * Return: Nothing. 602 */ 603 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 604 { 605 struct Scsi_Host *shost = mrioc->shost; 606 int i; 607 608 if (!mrioc->unrecoverable) 609 return; 610 611 if (mrioc->op_reply_qinfo) { 612 for (i = 0; i < mrioc->num_queues; i++) { 613 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 614 udelay(500); 615 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 616 } 617 } 618 mrioc->flush_io_count = 0; 619 blk_mq_tagset_busy_iter(&shost->tag_set, 620 mpi3mr_flush_scmd, (void *)mrioc); 621 mpi3mr_flush_delayed_cmd_lists(mrioc); 622 mpi3mr_flush_drv_cmds(mrioc); 623 } 624 625 /** 626 * mpi3mr_alloc_tgtdev - target device allocator 627 * 628 * Allocate target device instance and initialize the reference 629 * count 630 * 631 * Return: target device instance. 632 */ 633 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 634 { 635 struct mpi3mr_tgt_dev *tgtdev; 636 637 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 638 if (!tgtdev) 639 return NULL; 640 kref_init(&tgtdev->ref_count); 641 return tgtdev; 642 } 643 644 /** 645 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 646 * @mrioc: Adapter instance reference 647 * @tgtdev: Target device 648 * 649 * Add the target device to the target device list 650 * 651 * Return: Nothing. 652 */ 653 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 654 struct mpi3mr_tgt_dev *tgtdev) 655 { 656 unsigned long flags; 657 658 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 659 mpi3mr_tgtdev_get(tgtdev); 660 INIT_LIST_HEAD(&tgtdev->list); 661 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 662 tgtdev->state = MPI3MR_DEV_CREATED; 663 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 664 } 665 666 /** 667 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 668 * @mrioc: Adapter instance reference 669 * @tgtdev: Target device 670 * @must_delete: Must delete the target device from the list irrespective 671 * of the device state. 672 * 673 * Remove the target device from the target device list 674 * 675 * Return: Nothing. 676 */ 677 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 678 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 679 { 680 unsigned long flags; 681 682 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 683 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 684 if (!list_empty(&tgtdev->list)) { 685 list_del_init(&tgtdev->list); 686 tgtdev->state = MPI3MR_DEV_DELETED; 687 mpi3mr_tgtdev_put(tgtdev); 688 } 689 } 690 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 691 } 692 693 /** 694 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 695 * @mrioc: Adapter instance reference 696 * @handle: Device handle 697 * 698 * Accessor to retrieve target device from the device handle. 699 * Non Lock version 700 * 701 * Return: Target device reference. 702 */ 703 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 704 struct mpi3mr_ioc *mrioc, u16 handle) 705 { 706 struct mpi3mr_tgt_dev *tgtdev; 707 708 assert_spin_locked(&mrioc->tgtdev_lock); 709 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 710 if (tgtdev->dev_handle == handle) 711 goto found_tgtdev; 712 return NULL; 713 714 found_tgtdev: 715 mpi3mr_tgtdev_get(tgtdev); 716 return tgtdev; 717 } 718 719 /** 720 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 721 * @mrioc: Adapter instance reference 722 * @handle: Device handle 723 * 724 * Accessor to retrieve target device from the device handle. 725 * Lock version 726 * 727 * Return: Target device reference. 728 */ 729 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 730 struct mpi3mr_ioc *mrioc, u16 handle) 731 { 732 struct mpi3mr_tgt_dev *tgtdev; 733 unsigned long flags; 734 735 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 736 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 737 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 738 return tgtdev; 739 } 740 741 /** 742 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 743 * @mrioc: Adapter instance reference 744 * @persist_id: Persistent ID 745 * 746 * Accessor to retrieve target device from the Persistent ID. 747 * Non Lock version 748 * 749 * Return: Target device reference. 750 */ 751 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 752 struct mpi3mr_ioc *mrioc, u16 persist_id) 753 { 754 struct mpi3mr_tgt_dev *tgtdev; 755 756 assert_spin_locked(&mrioc->tgtdev_lock); 757 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 758 if (tgtdev->perst_id == persist_id) 759 goto found_tgtdev; 760 return NULL; 761 762 found_tgtdev: 763 mpi3mr_tgtdev_get(tgtdev); 764 return tgtdev; 765 } 766 767 /** 768 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 769 * @mrioc: Adapter instance reference 770 * @persist_id: Persistent ID 771 * 772 * Accessor to retrieve target device from the Persistent ID. 773 * Lock version 774 * 775 * Return: Target device reference. 776 */ 777 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 778 struct mpi3mr_ioc *mrioc, u16 persist_id) 779 { 780 struct mpi3mr_tgt_dev *tgtdev; 781 unsigned long flags; 782 783 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 784 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 785 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 786 return tgtdev; 787 } 788 789 /** 790 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 791 * @mrioc: Adapter instance reference 792 * @tgt_priv: Target private data 793 * 794 * Accessor to return target device from the target private 795 * data. Non Lock version 796 * 797 * Return: Target device reference. 798 */ 799 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 800 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 801 { 802 struct mpi3mr_tgt_dev *tgtdev; 803 804 assert_spin_locked(&mrioc->tgtdev_lock); 805 tgtdev = tgt_priv->tgt_dev; 806 if (tgtdev) 807 mpi3mr_tgtdev_get(tgtdev); 808 return tgtdev; 809 } 810 811 /** 812 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 813 * @mrioc: Adapter instance reference 814 * @tg: Throttle group information pointer 815 * @divert_value: 1 or 0 816 * 817 * Accessor to set io_divert flag for each device associated 818 * with the given throttle group with the given value. 819 * 820 * Return: None. 821 */ 822 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 823 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 824 { 825 unsigned long flags; 826 struct mpi3mr_tgt_dev *tgtdev; 827 struct mpi3mr_stgt_priv_data *tgt_priv; 828 829 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 830 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 831 if (tgtdev->starget && tgtdev->starget->hostdata) { 832 tgt_priv = tgtdev->starget->hostdata; 833 if (tgt_priv->throttle_group == tg) 834 tgt_priv->io_divert = divert_value; 835 } 836 } 837 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 838 } 839 840 /** 841 * mpi3mr_print_device_event_notice - print notice related to post processing of 842 * device event after controller reset. 843 * 844 * @mrioc: Adapter instance reference 845 * @device_add: true for device add event and false for device removal event 846 * 847 * Return: None. 848 */ 849 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 850 bool device_add) 851 { 852 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 853 (device_add ? "addition" : "removal")); 854 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 855 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 856 } 857 858 /** 859 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 860 * @mrioc: Adapter instance reference 861 * @tgtdev: Target device structure 862 * 863 * Checks whether the device is exposed to upper layers and if it 864 * is then remove the device from upper layers by calling 865 * scsi_remove_target(). 866 * 867 * Return: 0 on success, non zero on failure. 868 */ 869 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 870 struct mpi3mr_tgt_dev *tgtdev) 871 { 872 struct mpi3mr_stgt_priv_data *tgt_priv; 873 874 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 875 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 876 if (tgtdev->starget && tgtdev->starget->hostdata) { 877 tgt_priv = tgtdev->starget->hostdata; 878 atomic_set(&tgt_priv->block_io, 0); 879 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 880 } 881 882 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 883 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 884 if (tgtdev->starget) { 885 if (mrioc->current_event) 886 mrioc->current_event->pending_at_sml = 1; 887 scsi_remove_target(&tgtdev->starget->dev); 888 tgtdev->host_exposed = 0; 889 if (mrioc->current_event) { 890 mrioc->current_event->pending_at_sml = 0; 891 if (mrioc->current_event->discard) { 892 mpi3mr_print_device_event_notice(mrioc, 893 false); 894 return; 895 } 896 } 897 } 898 } else 899 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 900 901 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 902 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 903 } 904 905 /** 906 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 907 * @mrioc: Adapter instance reference 908 * @perst_id: Persistent ID of the device 909 * 910 * Checks whether the device can be exposed to upper layers and 911 * if it is not then expose the device to upper layers by 912 * calling scsi_scan_target(). 913 * 914 * Return: 0 on success, non zero on failure. 915 */ 916 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 917 u16 perst_id) 918 { 919 int retval = 0; 920 struct mpi3mr_tgt_dev *tgtdev; 921 922 if (mrioc->reset_in_progress) 923 return -1; 924 925 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 926 if (!tgtdev) { 927 retval = -1; 928 goto out; 929 } 930 if (tgtdev->is_hidden || tgtdev->host_exposed) { 931 retval = -1; 932 goto out; 933 } 934 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 935 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 936 tgtdev->host_exposed = 1; 937 if (mrioc->current_event) 938 mrioc->current_event->pending_at_sml = 1; 939 scsi_scan_target(&mrioc->shost->shost_gendev, 940 mrioc->scsi_device_channel, tgtdev->perst_id, 941 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 942 if (!tgtdev->starget) 943 tgtdev->host_exposed = 0; 944 if (mrioc->current_event) { 945 mrioc->current_event->pending_at_sml = 0; 946 if (mrioc->current_event->discard) { 947 mpi3mr_print_device_event_notice(mrioc, true); 948 goto out; 949 } 950 } 951 } else 952 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 953 out: 954 if (tgtdev) 955 mpi3mr_tgtdev_put(tgtdev); 956 957 return retval; 958 } 959 960 /** 961 * mpi3mr_change_queue_depth- Change QD callback handler 962 * @sdev: SCSI device reference 963 * @q_depth: Queue depth 964 * 965 * Validate and limit QD and call scsi_change_queue_depth. 966 * 967 * Return: return value of scsi_change_queue_depth 968 */ 969 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 970 int q_depth) 971 { 972 struct scsi_target *starget = scsi_target(sdev); 973 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 974 int retval = 0; 975 976 if (!sdev->tagged_supported) 977 q_depth = 1; 978 if (q_depth > shost->can_queue) 979 q_depth = shost->can_queue; 980 else if (!q_depth) 981 q_depth = MPI3MR_DEFAULT_SDEV_QD; 982 retval = scsi_change_queue_depth(sdev, q_depth); 983 sdev->max_queue_depth = sdev->queue_depth; 984 985 return retval; 986 } 987 988 /** 989 * mpi3mr_update_sdev - Update SCSI device information 990 * @sdev: SCSI device reference 991 * @data: target device reference 992 * 993 * This is an iterator function called for each SCSI device in a 994 * target to update the target specific information into each 995 * SCSI device. 996 * 997 * Return: Nothing. 998 */ 999 static void 1000 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1001 { 1002 struct mpi3mr_tgt_dev *tgtdev; 1003 1004 tgtdev = (struct mpi3mr_tgt_dev *)data; 1005 if (!tgtdev) 1006 return; 1007 1008 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1009 switch (tgtdev->dev_type) { 1010 case MPI3_DEVICE_DEVFORM_PCIE: 1011 /*The block layer hw sector size = 512*/ 1012 if ((tgtdev->dev_spec.pcie_inf.dev_info & 1013 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1014 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 1015 blk_queue_max_hw_sectors(sdev->request_queue, 1016 tgtdev->dev_spec.pcie_inf.mdts / 512); 1017 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) 1018 blk_queue_virt_boundary(sdev->request_queue, 1019 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 1020 else 1021 blk_queue_virt_boundary(sdev->request_queue, 1022 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); 1023 } 1024 break; 1025 default: 1026 break; 1027 } 1028 } 1029 1030 /** 1031 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure 1032 * @mrioc: Adapter instance reference 1033 * 1034 * This is executed post controller reset to identify any 1035 * missing devices during reset and remove from the upper layers 1036 * or expose any newly detected device to the upper layers. 1037 * 1038 * Return: Nothing. 1039 */ 1040 1041 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1042 { 1043 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1044 1045 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1046 list) { 1047 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1048 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1049 tgtdev->perst_id); 1050 if (tgtdev->host_exposed) 1051 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1052 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1053 mpi3mr_tgtdev_put(tgtdev); 1054 } 1055 } 1056 1057 tgtdev = NULL; 1058 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1059 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1060 !tgtdev->is_hidden && !tgtdev->host_exposed) 1061 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1062 } 1063 } 1064 1065 /** 1066 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1067 * @mrioc: Adapter instance reference 1068 * @tgtdev: Target device internal structure 1069 * @dev_pg0: New device page0 1070 * @is_added: Flag to indicate the device is just added 1071 * 1072 * Update the information from the device page0 into the driver 1073 * cached target device structure. 1074 * 1075 * Return: Nothing. 1076 */ 1077 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1078 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1079 bool is_added) 1080 { 1081 u16 flags = 0; 1082 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1083 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1084 u8 prot_mask = 0; 1085 1086 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1087 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1088 tgtdev->dev_type = dev_pg0->device_form; 1089 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1090 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1091 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1092 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1093 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1094 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1095 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1096 1097 if (tgtdev->encl_handle) 1098 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1099 tgtdev->encl_handle); 1100 if (enclosure_dev) 1101 tgtdev->enclosure_logical_id = le64_to_cpu( 1102 enclosure_dev->pg0.enclosure_logical_id); 1103 1104 flags = tgtdev->devpg0_flag; 1105 1106 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1107 1108 if (is_added == true) 1109 tgtdev->io_throttle_enabled = 1110 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1111 1112 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1113 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1114 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1115 break; 1116 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1117 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1118 break; 1119 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1120 default: 1121 tgtdev->wslen = 0; 1122 break; 1123 } 1124 1125 if (tgtdev->starget && tgtdev->starget->hostdata) { 1126 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1127 tgtdev->starget->hostdata; 1128 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1129 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1130 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1131 scsi_tgt_priv_data->io_throttle_enabled = 1132 tgtdev->io_throttle_enabled; 1133 if (is_added == true) 1134 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1135 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1136 } 1137 1138 switch (dev_pg0->access_status) { 1139 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1140 case MPI3_DEVICE0_ASTATUS_PREPARE: 1141 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1142 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1143 break; 1144 default: 1145 tgtdev->is_hidden = 1; 1146 break; 1147 } 1148 1149 switch (tgtdev->dev_type) { 1150 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1151 { 1152 struct mpi3_device0_sas_sata_format *sasinf = 1153 &dev_pg0->device_specific.sas_sata_format; 1154 u16 dev_info = le16_to_cpu(sasinf->device_info); 1155 1156 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1157 tgtdev->dev_spec.sas_sata_inf.sas_address = 1158 le64_to_cpu(sasinf->sas_address); 1159 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1160 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1161 sasinf->attached_phy_identifier; 1162 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1163 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1164 tgtdev->is_hidden = 1; 1165 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1166 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1167 tgtdev->is_hidden = 1; 1168 1169 if (((tgtdev->devpg0_flag & 1170 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1171 && (tgtdev->devpg0_flag & 1172 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1173 (tgtdev->parent_handle == 0xFFFF)) 1174 tgtdev->non_stl = 1; 1175 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1176 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1177 dev_pg0->io_unit_port; 1178 break; 1179 } 1180 case MPI3_DEVICE_DEVFORM_PCIE: 1181 { 1182 struct mpi3_device0_pcie_format *pcieinf = 1183 &dev_pg0->device_specific.pcie_format; 1184 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1185 1186 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1187 tgtdev->dev_spec.pcie_inf.capb = 1188 le32_to_cpu(pcieinf->capabilities); 1189 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1190 /* 2^12 = 4096 */ 1191 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1192 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1193 tgtdev->dev_spec.pcie_inf.mdts = 1194 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1195 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1196 tgtdev->dev_spec.pcie_inf.reset_to = 1197 max_t(u8, pcieinf->controller_reset_to, 1198 MPI3MR_INTADMCMD_TIMEOUT); 1199 tgtdev->dev_spec.pcie_inf.abort_to = 1200 max_t(u8, pcieinf->nvme_abort_to, 1201 MPI3MR_INTADMCMD_TIMEOUT); 1202 } 1203 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1204 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1205 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1206 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1207 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1208 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1209 tgtdev->is_hidden = 1; 1210 tgtdev->non_stl = 1; 1211 if (!mrioc->shost) 1212 break; 1213 prot_mask = scsi_host_get_prot(mrioc->shost); 1214 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1215 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1216 ioc_info(mrioc, 1217 "%s : Disabling DIX0 prot capability\n", __func__); 1218 ioc_info(mrioc, 1219 "because HBA does not support DIX0 operation on NVME drives\n"); 1220 } 1221 break; 1222 } 1223 case MPI3_DEVICE_DEVFORM_VD: 1224 { 1225 struct mpi3_device0_vd_format *vdinf = 1226 &dev_pg0->device_specific.vd_format; 1227 struct mpi3mr_throttle_group_info *tg = NULL; 1228 u16 vdinf_io_throttle_group = 1229 le16_to_cpu(vdinf->io_throttle_group); 1230 1231 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1232 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1233 tgtdev->is_hidden = 1; 1234 tgtdev->non_stl = 1; 1235 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1236 tgtdev->dev_spec.vd_inf.tg_high = 1237 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1238 tgtdev->dev_spec.vd_inf.tg_low = 1239 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1240 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1241 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1242 tg->id = vdinf_io_throttle_group; 1243 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1244 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1245 tg->qd_reduction = 1246 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1247 if (is_added == true) 1248 tg->fw_qd = tgtdev->q_depth; 1249 tg->modified_qd = tgtdev->q_depth; 1250 } 1251 tgtdev->dev_spec.vd_inf.tg = tg; 1252 if (scsi_tgt_priv_data) 1253 scsi_tgt_priv_data->throttle_group = tg; 1254 break; 1255 } 1256 default: 1257 break; 1258 } 1259 } 1260 1261 /** 1262 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1263 * @mrioc: Adapter instance reference 1264 * @fwevt: Firmware event information. 1265 * 1266 * Process Device status Change event and based on device's new 1267 * information, either expose the device to the upper layers, or 1268 * remove the device from upper layers. 1269 * 1270 * Return: Nothing. 1271 */ 1272 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1273 struct mpi3mr_fwevt *fwevt) 1274 { 1275 u16 dev_handle = 0; 1276 u8 uhide = 0, delete = 0, cleanup = 0; 1277 struct mpi3mr_tgt_dev *tgtdev = NULL; 1278 struct mpi3_event_data_device_status_change *evtdata = 1279 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1280 1281 dev_handle = le16_to_cpu(evtdata->dev_handle); 1282 ioc_info(mrioc, 1283 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1284 __func__, dev_handle, evtdata->reason_code); 1285 switch (evtdata->reason_code) { 1286 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1287 delete = 1; 1288 break; 1289 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1290 uhide = 1; 1291 break; 1292 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1293 delete = 1; 1294 cleanup = 1; 1295 break; 1296 default: 1297 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1298 evtdata->reason_code); 1299 break; 1300 } 1301 1302 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1303 if (!tgtdev) 1304 goto out; 1305 if (uhide) { 1306 tgtdev->is_hidden = 0; 1307 if (!tgtdev->host_exposed) 1308 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1309 } 1310 1311 if (delete) 1312 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1313 1314 if (cleanup) { 1315 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1316 mpi3mr_tgtdev_put(tgtdev); 1317 } 1318 1319 out: 1320 if (tgtdev) 1321 mpi3mr_tgtdev_put(tgtdev); 1322 } 1323 1324 /** 1325 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1326 * @mrioc: Adapter instance reference 1327 * @dev_pg0: New device page0 1328 * 1329 * Process Device Info Change event and based on device's new 1330 * information, either expose the device to the upper layers, or 1331 * remove the device from upper layers or update the details of 1332 * the device. 1333 * 1334 * Return: Nothing. 1335 */ 1336 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1337 struct mpi3_device_page0 *dev_pg0) 1338 { 1339 struct mpi3mr_tgt_dev *tgtdev = NULL; 1340 u16 dev_handle = 0, perst_id = 0; 1341 1342 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1343 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1344 ioc_info(mrioc, 1345 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1346 __func__, dev_handle, perst_id); 1347 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1348 if (!tgtdev) 1349 goto out; 1350 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1351 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1352 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1353 if (tgtdev->is_hidden && tgtdev->host_exposed) 1354 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1355 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1356 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1357 mpi3mr_update_sdev); 1358 out: 1359 if (tgtdev) 1360 mpi3mr_tgtdev_put(tgtdev); 1361 } 1362 1363 /** 1364 * mpi3mr_free_enclosure_list - release enclosures 1365 * @mrioc: Adapter instance reference 1366 * 1367 * Free memory allocated during encloure add. 1368 * 1369 * Return nothing. 1370 */ 1371 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1372 { 1373 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1374 1375 list_for_each_entry_safe(enclosure_dev, 1376 enclosure_dev_next, &mrioc->enclosure_list, list) { 1377 list_del(&enclosure_dev->list); 1378 kfree(enclosure_dev); 1379 } 1380 } 1381 1382 /** 1383 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1384 * @mrioc: Adapter instance reference 1385 * @handle: Firmware device handle of the enclosure 1386 * 1387 * This searches for enclosure device based on handle, then returns the 1388 * enclosure object. 1389 * 1390 * Return: Enclosure object reference or NULL 1391 */ 1392 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1393 struct mpi3mr_ioc *mrioc, u16 handle) 1394 { 1395 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1396 1397 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1398 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1399 continue; 1400 r = enclosure_dev; 1401 goto out; 1402 } 1403 out: 1404 return r; 1405 } 1406 1407 /** 1408 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1409 * @mrioc: Adapter instance reference 1410 * @encl_pg0: Enclosure page 0. 1411 * @is_added: Added event or not 1412 * 1413 * Return nothing. 1414 */ 1415 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1416 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1417 { 1418 char *reason_str = NULL; 1419 1420 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1421 return; 1422 1423 if (is_added) 1424 reason_str = "enclosure added"; 1425 else 1426 reason_str = "enclosure dev status changed"; 1427 1428 ioc_info(mrioc, 1429 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1430 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1431 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1432 ioc_info(mrioc, 1433 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1434 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1435 le16_to_cpu(encl_pg0->flags), 1436 ((le16_to_cpu(encl_pg0->flags) & 1437 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1438 } 1439 1440 /** 1441 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1442 * @mrioc: Adapter instance reference 1443 * @fwevt: Firmware event reference 1444 * 1445 * Prints information about the Enclosure device status or 1446 * Enclosure add events if logging is enabled and add or remove 1447 * the enclosure from the controller's internal list of 1448 * enclosures. 1449 * 1450 * Return: Nothing. 1451 */ 1452 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1453 struct mpi3mr_fwevt *fwevt) 1454 { 1455 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1456 struct mpi3_enclosure_page0 *encl_pg0; 1457 u16 encl_handle; 1458 u8 added, present; 1459 1460 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1461 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1462 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1463 1464 1465 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1466 present = ((le16_to_cpu(encl_pg0->flags) & 1467 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1468 1469 if (encl_handle) 1470 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1471 encl_handle); 1472 if (!enclosure_dev && present) { 1473 enclosure_dev = 1474 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1475 GFP_KERNEL); 1476 if (!enclosure_dev) 1477 return; 1478 list_add_tail(&enclosure_dev->list, 1479 &mrioc->enclosure_list); 1480 } 1481 if (enclosure_dev) { 1482 if (!present) { 1483 list_del(&enclosure_dev->list); 1484 kfree(enclosure_dev); 1485 } else 1486 memcpy(&enclosure_dev->pg0, encl_pg0, 1487 sizeof(enclosure_dev->pg0)); 1488 1489 } 1490 } 1491 1492 /** 1493 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1494 * @mrioc: Adapter instance reference 1495 * @event_data: SAS topology change list event data 1496 * 1497 * Prints information about the SAS topology change event. 1498 * 1499 * Return: Nothing. 1500 */ 1501 static void 1502 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1503 struct mpi3_event_data_sas_topology_change_list *event_data) 1504 { 1505 int i; 1506 u16 handle; 1507 u8 reason_code, phy_number; 1508 char *status_str = NULL; 1509 u8 link_rate, prev_link_rate; 1510 1511 switch (event_data->exp_status) { 1512 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1513 status_str = "remove"; 1514 break; 1515 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1516 status_str = "responding"; 1517 break; 1518 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1519 status_str = "remove delay"; 1520 break; 1521 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1522 status_str = "direct attached"; 1523 break; 1524 default: 1525 status_str = "unknown status"; 1526 break; 1527 } 1528 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1529 __func__, status_str); 1530 ioc_info(mrioc, 1531 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1532 __func__, le16_to_cpu(event_data->expander_dev_handle), 1533 event_data->io_unit_port, 1534 le16_to_cpu(event_data->enclosure_handle), 1535 event_data->start_phy_num, event_data->num_entries); 1536 for (i = 0; i < event_data->num_entries; i++) { 1537 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1538 if (!handle) 1539 continue; 1540 phy_number = event_data->start_phy_num + i; 1541 reason_code = event_data->phy_entry[i].status & 1542 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1543 switch (reason_code) { 1544 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1545 status_str = "target remove"; 1546 break; 1547 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1548 status_str = "delay target remove"; 1549 break; 1550 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1551 status_str = "link status change"; 1552 break; 1553 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1554 status_str = "link status no change"; 1555 break; 1556 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1557 status_str = "target responding"; 1558 break; 1559 default: 1560 status_str = "unknown"; 1561 break; 1562 } 1563 link_rate = event_data->phy_entry[i].link_rate >> 4; 1564 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1565 ioc_info(mrioc, 1566 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1567 __func__, phy_number, handle, status_str, link_rate, 1568 prev_link_rate); 1569 } 1570 } 1571 1572 /** 1573 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1574 * @mrioc: Adapter instance reference 1575 * @fwevt: Firmware event reference 1576 * 1577 * Prints information about the SAS topology change event and 1578 * for "not responding" event code, removes the device from the 1579 * upper layers. 1580 * 1581 * Return: Nothing. 1582 */ 1583 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1584 struct mpi3mr_fwevt *fwevt) 1585 { 1586 struct mpi3_event_data_sas_topology_change_list *event_data = 1587 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1588 int i; 1589 u16 handle; 1590 u8 reason_code; 1591 u64 exp_sas_address = 0, parent_sas_address = 0; 1592 struct mpi3mr_hba_port *hba_port = NULL; 1593 struct mpi3mr_tgt_dev *tgtdev = NULL; 1594 struct mpi3mr_sas_node *sas_expander = NULL; 1595 unsigned long flags; 1596 u8 link_rate, prev_link_rate, parent_phy_number; 1597 1598 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1599 if (mrioc->sas_transport_enabled) { 1600 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1601 event_data->io_unit_port); 1602 if (le16_to_cpu(event_data->expander_dev_handle)) { 1603 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1604 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1605 le16_to_cpu(event_data->expander_dev_handle)); 1606 if (sas_expander) { 1607 exp_sas_address = sas_expander->sas_address; 1608 hba_port = sas_expander->hba_port; 1609 } 1610 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1611 parent_sas_address = exp_sas_address; 1612 } else 1613 parent_sas_address = mrioc->sas_hba.sas_address; 1614 } 1615 1616 for (i = 0; i < event_data->num_entries; i++) { 1617 if (fwevt->discard) 1618 return; 1619 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1620 if (!handle) 1621 continue; 1622 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1623 if (!tgtdev) 1624 continue; 1625 1626 reason_code = event_data->phy_entry[i].status & 1627 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1628 1629 switch (reason_code) { 1630 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1631 if (tgtdev->host_exposed) 1632 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1633 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1634 mpi3mr_tgtdev_put(tgtdev); 1635 break; 1636 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1637 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1638 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1639 { 1640 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1641 || tgtdev->is_hidden) 1642 break; 1643 link_rate = event_data->phy_entry[i].link_rate >> 4; 1644 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1645 if (link_rate == prev_link_rate) 1646 break; 1647 if (!parent_sas_address) 1648 break; 1649 parent_phy_number = event_data->start_phy_num + i; 1650 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1651 parent_phy_number, link_rate, hba_port); 1652 break; 1653 } 1654 default: 1655 break; 1656 } 1657 if (tgtdev) 1658 mpi3mr_tgtdev_put(tgtdev); 1659 } 1660 1661 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1662 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1663 if (sas_expander) 1664 mpi3mr_expander_remove(mrioc, exp_sas_address, 1665 hba_port); 1666 } 1667 } 1668 1669 /** 1670 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1671 * @mrioc: Adapter instance reference 1672 * @event_data: PCIe topology change list event data 1673 * 1674 * Prints information about the PCIe topology change event. 1675 * 1676 * Return: Nothing. 1677 */ 1678 static void 1679 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1680 struct mpi3_event_data_pcie_topology_change_list *event_data) 1681 { 1682 int i; 1683 u16 handle; 1684 u16 reason_code; 1685 u8 port_number; 1686 char *status_str = NULL; 1687 u8 link_rate, prev_link_rate; 1688 1689 switch (event_data->switch_status) { 1690 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1691 status_str = "remove"; 1692 break; 1693 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1694 status_str = "responding"; 1695 break; 1696 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1697 status_str = "remove delay"; 1698 break; 1699 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1700 status_str = "direct attached"; 1701 break; 1702 default: 1703 status_str = "unknown status"; 1704 break; 1705 } 1706 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1707 __func__, status_str); 1708 ioc_info(mrioc, 1709 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1710 __func__, le16_to_cpu(event_data->switch_dev_handle), 1711 le16_to_cpu(event_data->enclosure_handle), 1712 event_data->start_port_num, event_data->num_entries); 1713 for (i = 0; i < event_data->num_entries; i++) { 1714 handle = 1715 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1716 if (!handle) 1717 continue; 1718 port_number = event_data->start_port_num + i; 1719 reason_code = event_data->port_entry[i].port_status; 1720 switch (reason_code) { 1721 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1722 status_str = "target remove"; 1723 break; 1724 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1725 status_str = "delay target remove"; 1726 break; 1727 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1728 status_str = "link status change"; 1729 break; 1730 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1731 status_str = "link status no change"; 1732 break; 1733 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1734 status_str = "target responding"; 1735 break; 1736 default: 1737 status_str = "unknown"; 1738 break; 1739 } 1740 link_rate = event_data->port_entry[i].current_port_info & 1741 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1742 prev_link_rate = event_data->port_entry[i].previous_port_info & 1743 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1744 ioc_info(mrioc, 1745 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1746 __func__, port_number, handle, status_str, link_rate, 1747 prev_link_rate); 1748 } 1749 } 1750 1751 /** 1752 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1753 * @mrioc: Adapter instance reference 1754 * @fwevt: Firmware event reference 1755 * 1756 * Prints information about the PCIe topology change event and 1757 * for "not responding" event code, removes the device from the 1758 * upper layers. 1759 * 1760 * Return: Nothing. 1761 */ 1762 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1763 struct mpi3mr_fwevt *fwevt) 1764 { 1765 struct mpi3_event_data_pcie_topology_change_list *event_data = 1766 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1767 int i; 1768 u16 handle; 1769 u8 reason_code; 1770 struct mpi3mr_tgt_dev *tgtdev = NULL; 1771 1772 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1773 1774 for (i = 0; i < event_data->num_entries; i++) { 1775 if (fwevt->discard) 1776 return; 1777 handle = 1778 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1779 if (!handle) 1780 continue; 1781 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1782 if (!tgtdev) 1783 continue; 1784 1785 reason_code = event_data->port_entry[i].port_status; 1786 1787 switch (reason_code) { 1788 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1789 if (tgtdev->host_exposed) 1790 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1791 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1792 mpi3mr_tgtdev_put(tgtdev); 1793 break; 1794 default: 1795 break; 1796 } 1797 if (tgtdev) 1798 mpi3mr_tgtdev_put(tgtdev); 1799 } 1800 } 1801 1802 /** 1803 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1804 * @mrioc: Adapter instance reference 1805 * @fwevt: Firmware event reference 1806 * 1807 * Extracts the event data and calls application interfacing 1808 * function to process the event further. 1809 * 1810 * Return: Nothing. 1811 */ 1812 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1813 struct mpi3mr_fwevt *fwevt) 1814 { 1815 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1816 fwevt->event_data_size); 1817 } 1818 1819 /** 1820 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1821 * @sdev: SCSI device reference 1822 * @data: Queue depth reference 1823 * 1824 * This is an iterator function called for each SCSI device in a 1825 * target to update the QD of each SCSI device. 1826 * 1827 * Return: Nothing. 1828 */ 1829 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1830 { 1831 u16 *q_depth = (u16 *)data; 1832 1833 scsi_change_queue_depth(sdev, (int)*q_depth); 1834 sdev->max_queue_depth = sdev->queue_depth; 1835 } 1836 1837 /** 1838 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1839 * @mrioc: Adapter instance reference 1840 * @tg: Throttle group information pointer 1841 * 1842 * Accessor to reduce QD for each device associated with the 1843 * given throttle group. 1844 * 1845 * Return: None. 1846 */ 1847 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1848 struct mpi3mr_throttle_group_info *tg) 1849 { 1850 unsigned long flags; 1851 struct mpi3mr_tgt_dev *tgtdev; 1852 struct mpi3mr_stgt_priv_data *tgt_priv; 1853 1854 1855 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1856 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1857 if (tgtdev->starget && tgtdev->starget->hostdata) { 1858 tgt_priv = tgtdev->starget->hostdata; 1859 if (tgt_priv->throttle_group == tg) { 1860 dprint_event_bh(mrioc, 1861 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 1862 tgt_priv->perst_id, tgtdev->q_depth, 1863 tg->modified_qd); 1864 starget_for_each_device(tgtdev->starget, 1865 (void *)&tg->modified_qd, 1866 mpi3mr_update_sdev_qd); 1867 } 1868 } 1869 } 1870 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1871 } 1872 1873 /** 1874 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1875 * @mrioc: Adapter instance reference 1876 * @fwevt: Firmware event reference 1877 * 1878 * Identifies the firmware event and calls corresponding bottomg 1879 * half handler and sends event acknowledgment if required. 1880 * 1881 * Return: Nothing. 1882 */ 1883 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 1884 struct mpi3mr_fwevt *fwevt) 1885 { 1886 struct mpi3_device_page0 *dev_pg0 = NULL; 1887 u16 perst_id, handle, dev_info; 1888 struct mpi3_device0_sas_sata_format *sasinf = NULL; 1889 1890 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 1891 mrioc->current_event = fwevt; 1892 1893 if (mrioc->stop_drv_processing) 1894 goto out; 1895 1896 if (mrioc->unrecoverable) { 1897 dprint_event_bh(mrioc, 1898 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 1899 fwevt->event_id); 1900 goto out; 1901 } 1902 1903 if (!fwevt->process_evt) 1904 goto evt_ack; 1905 1906 switch (fwevt->event_id) { 1907 case MPI3_EVENT_DEVICE_ADDED: 1908 { 1909 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1910 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1911 handle = le16_to_cpu(dev_pg0->dev_handle); 1912 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1913 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1914 else if (mrioc->sas_transport_enabled && 1915 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 1916 sasinf = &dev_pg0->device_specific.sas_sata_format; 1917 dev_info = le16_to_cpu(sasinf->device_info); 1918 if (!mrioc->sas_hba.num_phys) 1919 mpi3mr_sas_host_add(mrioc); 1920 else 1921 mpi3mr_sas_host_refresh(mrioc); 1922 1923 if (mpi3mr_is_expander_device(dev_info)) 1924 mpi3mr_expander_add(mrioc, handle); 1925 } 1926 break; 1927 } 1928 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1929 { 1930 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1931 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1932 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1933 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 1934 break; 1935 } 1936 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1937 { 1938 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 1939 break; 1940 } 1941 case MPI3_EVENT_ENCL_DEVICE_ADDED: 1942 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 1943 { 1944 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 1945 break; 1946 } 1947 1948 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1949 { 1950 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 1951 break; 1952 } 1953 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1954 { 1955 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 1956 break; 1957 } 1958 case MPI3_EVENT_LOG_DATA: 1959 { 1960 mpi3mr_logdata_evt_bh(mrioc, fwevt); 1961 break; 1962 } 1963 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 1964 { 1965 struct mpi3mr_throttle_group_info *tg; 1966 1967 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 1968 dprint_event_bh(mrioc, 1969 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 1970 tg->id, tg->need_qd_reduction); 1971 if (tg->need_qd_reduction) { 1972 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 1973 tg->need_qd_reduction = 0; 1974 } 1975 break; 1976 } 1977 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 1978 { 1979 while (mrioc->device_refresh_on) 1980 msleep(500); 1981 1982 dprint_event_bh(mrioc, 1983 "scan for non responding and newly added devices after soft reset started\n"); 1984 if (mrioc->sas_transport_enabled) { 1985 mpi3mr_refresh_sas_ports(mrioc); 1986 mpi3mr_refresh_expanders(mrioc); 1987 } 1988 mpi3mr_rfresh_tgtdevs(mrioc); 1989 ioc_info(mrioc, 1990 "scan for non responding and newly added devices after soft reset completed\n"); 1991 break; 1992 } 1993 default: 1994 break; 1995 } 1996 1997 evt_ack: 1998 if (fwevt->send_ack) 1999 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2000 fwevt->evt_ctx); 2001 out: 2002 /* Put fwevt reference count to neutralize kref_init increment */ 2003 mpi3mr_fwevt_put(fwevt); 2004 mrioc->current_event = NULL; 2005 } 2006 2007 /** 2008 * mpi3mr_fwevt_worker - Firmware event worker 2009 * @work: Work struct containing firmware event 2010 * 2011 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2012 * 2013 * Return: Nothing. 2014 */ 2015 static void mpi3mr_fwevt_worker(struct work_struct *work) 2016 { 2017 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2018 work); 2019 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2020 /* 2021 * Put fwevt reference count after 2022 * dequeuing it from worker queue 2023 */ 2024 mpi3mr_fwevt_put(fwevt); 2025 } 2026 2027 /** 2028 * mpi3mr_create_tgtdev - Create and add a target device 2029 * @mrioc: Adapter instance reference 2030 * @dev_pg0: Device Page 0 data 2031 * 2032 * If the device specified by the device page 0 data is not 2033 * present in the driver's internal list, allocate the memory 2034 * for the device, populate the data and add to the list, else 2035 * update the device data. The key is persistent ID. 2036 * 2037 * Return: 0 on success, -ENOMEM on memory allocation failure 2038 */ 2039 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2040 struct mpi3_device_page0 *dev_pg0) 2041 { 2042 int retval = 0; 2043 struct mpi3mr_tgt_dev *tgtdev = NULL; 2044 u16 perst_id = 0; 2045 unsigned long flags; 2046 2047 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2048 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2049 return retval; 2050 2051 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2052 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2053 if (tgtdev) 2054 tgtdev->state = MPI3MR_DEV_CREATED; 2055 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2056 2057 if (tgtdev) { 2058 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2059 mpi3mr_tgtdev_put(tgtdev); 2060 } else { 2061 tgtdev = mpi3mr_alloc_tgtdev(); 2062 if (!tgtdev) 2063 return -ENOMEM; 2064 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2065 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2066 } 2067 2068 return retval; 2069 } 2070 2071 /** 2072 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2073 * @mrioc: Adapter instance reference 2074 * 2075 * Flush pending commands in the delayed lists due to a 2076 * controller reset or driver removal as a cleanup. 2077 * 2078 * Return: Nothing 2079 */ 2080 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2081 { 2082 struct delayed_dev_rmhs_node *_rmhs_node; 2083 struct delayed_evt_ack_node *_evtack_node; 2084 2085 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2086 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2087 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2088 struct delayed_dev_rmhs_node, list); 2089 list_del(&_rmhs_node->list); 2090 kfree(_rmhs_node); 2091 } 2092 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2093 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2094 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2095 struct delayed_evt_ack_node, list); 2096 list_del(&_evtack_node->list); 2097 kfree(_evtack_node); 2098 } 2099 } 2100 2101 /** 2102 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2103 * @mrioc: Adapter instance reference 2104 * @drv_cmd: Internal command tracker 2105 * 2106 * Issues a target reset TM to the firmware from the device 2107 * removal TM pend list or retry the removal handshake sequence 2108 * based on the IOU control request IOC status. 2109 * 2110 * Return: Nothing 2111 */ 2112 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2113 struct mpi3mr_drv_cmd *drv_cmd) 2114 { 2115 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2116 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2117 2118 if (drv_cmd->state & MPI3MR_CMD_RESET) 2119 goto clear_drv_cmd; 2120 2121 ioc_info(mrioc, 2122 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2123 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2124 drv_cmd->ioc_loginfo); 2125 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2126 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2127 drv_cmd->retry_count++; 2128 ioc_info(mrioc, 2129 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2130 __func__, drv_cmd->dev_handle, 2131 drv_cmd->retry_count); 2132 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2133 drv_cmd, drv_cmd->iou_rc); 2134 return; 2135 } 2136 ioc_err(mrioc, 2137 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2138 __func__, drv_cmd->dev_handle); 2139 } else { 2140 ioc_info(mrioc, 2141 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2142 __func__, drv_cmd->dev_handle); 2143 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2144 } 2145 2146 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2147 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2148 struct delayed_dev_rmhs_node, list); 2149 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2150 drv_cmd->retry_count = 0; 2151 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2152 ioc_info(mrioc, 2153 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2154 __func__, drv_cmd->dev_handle); 2155 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2156 drv_cmd->iou_rc); 2157 list_del(&delayed_dev_rmhs->list); 2158 kfree(delayed_dev_rmhs); 2159 return; 2160 } 2161 2162 clear_drv_cmd: 2163 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2164 drv_cmd->callback = NULL; 2165 drv_cmd->retry_count = 0; 2166 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2167 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2168 } 2169 2170 /** 2171 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2172 * @mrioc: Adapter instance reference 2173 * @drv_cmd: Internal command tracker 2174 * 2175 * Issues a target reset TM to the firmware from the device 2176 * removal TM pend list or issue IO unit control request as 2177 * part of device removal or hidden acknowledgment handshake. 2178 * 2179 * Return: Nothing 2180 */ 2181 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2182 struct mpi3mr_drv_cmd *drv_cmd) 2183 { 2184 struct mpi3_iounit_control_request iou_ctrl; 2185 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2186 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2187 int retval; 2188 2189 if (drv_cmd->state & MPI3MR_CMD_RESET) 2190 goto clear_drv_cmd; 2191 2192 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2193 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2194 2195 if (tm_reply) 2196 pr_info(IOCNAME 2197 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2198 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2199 drv_cmd->ioc_loginfo, 2200 le32_to_cpu(tm_reply->termination_count)); 2201 2202 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2203 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2204 2205 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2206 2207 drv_cmd->state = MPI3MR_CMD_PENDING; 2208 drv_cmd->is_waiting = 0; 2209 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2210 iou_ctrl.operation = drv_cmd->iou_rc; 2211 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2212 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2213 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2214 2215 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2216 1); 2217 if (retval) { 2218 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2219 mrioc->name); 2220 goto clear_drv_cmd; 2221 } 2222 2223 return; 2224 clear_drv_cmd: 2225 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2226 drv_cmd->callback = NULL; 2227 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2228 drv_cmd->retry_count = 0; 2229 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2230 } 2231 2232 /** 2233 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2234 * @mrioc: Adapter instance reference 2235 * @handle: Device handle 2236 * @cmdparam: Internal command tracker 2237 * @iou_rc: IO unit reason code 2238 * 2239 * Issues a target reset TM to the firmware or add it to a pend 2240 * list as part of device removal or hidden acknowledgment 2241 * handshake. 2242 * 2243 * Return: Nothing 2244 */ 2245 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2246 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2247 { 2248 struct mpi3_scsi_task_mgmt_request tm_req; 2249 int retval = 0; 2250 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2251 u8 retrycount = 5; 2252 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2253 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2254 struct mpi3mr_tgt_dev *tgtdev = NULL; 2255 unsigned long flags; 2256 2257 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2258 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2259 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2260 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2261 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2262 2263 if (drv_cmd) 2264 goto issue_cmd; 2265 do { 2266 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2267 MPI3MR_NUM_DEVRMCMD); 2268 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2269 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2270 break; 2271 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2272 } 2273 } while (retrycount--); 2274 2275 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2276 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2277 GFP_ATOMIC); 2278 if (!delayed_dev_rmhs) 2279 return; 2280 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2281 delayed_dev_rmhs->handle = handle; 2282 delayed_dev_rmhs->iou_rc = iou_rc; 2283 list_add_tail(&delayed_dev_rmhs->list, 2284 &mrioc->delayed_rmhs_list); 2285 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2286 __func__, handle); 2287 return; 2288 } 2289 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2290 2291 issue_cmd: 2292 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2293 ioc_info(mrioc, 2294 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2295 __func__, handle, cmd_idx); 2296 2297 memset(&tm_req, 0, sizeof(tm_req)); 2298 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2299 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2300 goto out; 2301 } 2302 drv_cmd->state = MPI3MR_CMD_PENDING; 2303 drv_cmd->is_waiting = 0; 2304 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2305 drv_cmd->dev_handle = handle; 2306 drv_cmd->iou_rc = iou_rc; 2307 tm_req.dev_handle = cpu_to_le16(handle); 2308 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2309 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2310 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2311 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2312 2313 set_bit(handle, mrioc->removepend_bitmap); 2314 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2315 if (retval) { 2316 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2317 __func__); 2318 goto out_failed; 2319 } 2320 out: 2321 return; 2322 out_failed: 2323 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2324 drv_cmd->callback = NULL; 2325 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2326 drv_cmd->retry_count = 0; 2327 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2328 } 2329 2330 /** 2331 * mpi3mr_complete_evt_ack - event ack request completion 2332 * @mrioc: Adapter instance reference 2333 * @drv_cmd: Internal command tracker 2334 * 2335 * This is the completion handler for non blocking event 2336 * acknowledgment sent to the firmware and this will issue any 2337 * pending event acknowledgment request. 2338 * 2339 * Return: Nothing 2340 */ 2341 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2342 struct mpi3mr_drv_cmd *drv_cmd) 2343 { 2344 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2345 struct delayed_evt_ack_node *delayed_evtack = NULL; 2346 2347 if (drv_cmd->state & MPI3MR_CMD_RESET) 2348 goto clear_drv_cmd; 2349 2350 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2351 dprint_event_th(mrioc, 2352 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2353 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2354 drv_cmd->ioc_loginfo); 2355 } 2356 2357 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2358 delayed_evtack = 2359 list_entry(mrioc->delayed_evtack_cmds_list.next, 2360 struct delayed_evt_ack_node, list); 2361 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2362 delayed_evtack->event_ctx); 2363 list_del(&delayed_evtack->list); 2364 kfree(delayed_evtack); 2365 return; 2366 } 2367 clear_drv_cmd: 2368 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2369 drv_cmd->callback = NULL; 2370 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2371 } 2372 2373 /** 2374 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2375 * @mrioc: Adapter instance reference 2376 * @event: MPI3 event id 2377 * @cmdparam: Internal command tracker 2378 * @event_ctx: event context 2379 * 2380 * Issues event acknowledgment request to the firmware if there 2381 * is a free command to send the event ack else it to a pend 2382 * list so that it will be processed on a completion of a prior 2383 * event acknowledgment . 2384 * 2385 * Return: Nothing 2386 */ 2387 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2388 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2389 { 2390 struct mpi3_event_ack_request evtack_req; 2391 int retval = 0; 2392 u8 retrycount = 5; 2393 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2394 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2395 struct delayed_evt_ack_node *delayed_evtack = NULL; 2396 2397 if (drv_cmd) { 2398 dprint_event_th(mrioc, 2399 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2400 event, event_ctx); 2401 goto issue_cmd; 2402 } 2403 dprint_event_th(mrioc, 2404 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2405 event, event_ctx); 2406 do { 2407 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2408 MPI3MR_NUM_EVTACKCMD); 2409 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2410 if (!test_and_set_bit(cmd_idx, 2411 mrioc->evtack_cmds_bitmap)) 2412 break; 2413 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2414 } 2415 } while (retrycount--); 2416 2417 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2418 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2419 GFP_ATOMIC); 2420 if (!delayed_evtack) 2421 return; 2422 INIT_LIST_HEAD(&delayed_evtack->list); 2423 delayed_evtack->event = event; 2424 delayed_evtack->event_ctx = event_ctx; 2425 list_add_tail(&delayed_evtack->list, 2426 &mrioc->delayed_evtack_cmds_list); 2427 dprint_event_th(mrioc, 2428 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2429 event, event_ctx); 2430 return; 2431 } 2432 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2433 2434 issue_cmd: 2435 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2436 2437 memset(&evtack_req, 0, sizeof(evtack_req)); 2438 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2439 dprint_event_th(mrioc, 2440 "sending event ack failed due to command in use\n"); 2441 goto out; 2442 } 2443 drv_cmd->state = MPI3MR_CMD_PENDING; 2444 drv_cmd->is_waiting = 0; 2445 drv_cmd->callback = mpi3mr_complete_evt_ack; 2446 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2447 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2448 evtack_req.event = event; 2449 evtack_req.event_context = cpu_to_le32(event_ctx); 2450 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2451 sizeof(evtack_req), 1); 2452 if (retval) { 2453 dprint_event_th(mrioc, 2454 "posting event ack request is failed\n"); 2455 goto out_failed; 2456 } 2457 2458 dprint_event_th(mrioc, 2459 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2460 event, event_ctx); 2461 out: 2462 return; 2463 out_failed: 2464 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2465 drv_cmd->callback = NULL; 2466 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2467 } 2468 2469 /** 2470 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2471 * @mrioc: Adapter instance reference 2472 * @event_reply: event data 2473 * 2474 * Checks for the reason code and based on that either block I/O 2475 * to device, or unblock I/O to the device, or start the device 2476 * removal handshake with reason as remove with the firmware for 2477 * PCIe devices. 2478 * 2479 * Return: Nothing 2480 */ 2481 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2482 struct mpi3_event_notification_reply *event_reply) 2483 { 2484 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2485 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2486 int i; 2487 u16 handle; 2488 u8 reason_code; 2489 struct mpi3mr_tgt_dev *tgtdev = NULL; 2490 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2491 2492 for (i = 0; i < topo_evt->num_entries; i++) { 2493 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2494 if (!handle) 2495 continue; 2496 reason_code = topo_evt->port_entry[i].port_status; 2497 scsi_tgt_priv_data = NULL; 2498 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2499 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2500 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2501 tgtdev->starget->hostdata; 2502 switch (reason_code) { 2503 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2504 if (scsi_tgt_priv_data) { 2505 scsi_tgt_priv_data->dev_removed = 1; 2506 scsi_tgt_priv_data->dev_removedelay = 0; 2507 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2508 } 2509 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2510 MPI3_CTRL_OP_REMOVE_DEVICE); 2511 break; 2512 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2513 if (scsi_tgt_priv_data) { 2514 scsi_tgt_priv_data->dev_removedelay = 1; 2515 atomic_inc(&scsi_tgt_priv_data->block_io); 2516 } 2517 break; 2518 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2519 if (scsi_tgt_priv_data && 2520 scsi_tgt_priv_data->dev_removedelay) { 2521 scsi_tgt_priv_data->dev_removedelay = 0; 2522 atomic_dec_if_positive 2523 (&scsi_tgt_priv_data->block_io); 2524 } 2525 break; 2526 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2527 default: 2528 break; 2529 } 2530 if (tgtdev) 2531 mpi3mr_tgtdev_put(tgtdev); 2532 } 2533 } 2534 2535 /** 2536 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2537 * @mrioc: Adapter instance reference 2538 * @event_reply: event data 2539 * 2540 * Checks for the reason code and based on that either block I/O 2541 * to device, or unblock I/O to the device, or start the device 2542 * removal handshake with reason as remove with the firmware for 2543 * SAS/SATA devices. 2544 * 2545 * Return: Nothing 2546 */ 2547 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2548 struct mpi3_event_notification_reply *event_reply) 2549 { 2550 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2551 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2552 int i; 2553 u16 handle; 2554 u8 reason_code; 2555 struct mpi3mr_tgt_dev *tgtdev = NULL; 2556 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2557 2558 for (i = 0; i < topo_evt->num_entries; i++) { 2559 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2560 if (!handle) 2561 continue; 2562 reason_code = topo_evt->phy_entry[i].status & 2563 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2564 scsi_tgt_priv_data = NULL; 2565 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2566 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2567 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2568 tgtdev->starget->hostdata; 2569 switch (reason_code) { 2570 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2571 if (scsi_tgt_priv_data) { 2572 scsi_tgt_priv_data->dev_removed = 1; 2573 scsi_tgt_priv_data->dev_removedelay = 0; 2574 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2575 } 2576 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2577 MPI3_CTRL_OP_REMOVE_DEVICE); 2578 break; 2579 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2580 if (scsi_tgt_priv_data) { 2581 scsi_tgt_priv_data->dev_removedelay = 1; 2582 atomic_inc(&scsi_tgt_priv_data->block_io); 2583 } 2584 break; 2585 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2586 if (scsi_tgt_priv_data && 2587 scsi_tgt_priv_data->dev_removedelay) { 2588 scsi_tgt_priv_data->dev_removedelay = 0; 2589 atomic_dec_if_positive 2590 (&scsi_tgt_priv_data->block_io); 2591 } 2592 break; 2593 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2594 default: 2595 break; 2596 } 2597 if (tgtdev) 2598 mpi3mr_tgtdev_put(tgtdev); 2599 } 2600 } 2601 2602 /** 2603 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2604 * @mrioc: Adapter instance reference 2605 * @event_reply: event data 2606 * 2607 * Checks for the reason code and based on that either block I/O 2608 * to device, or unblock I/O to the device, or start the device 2609 * removal handshake with reason as remove/hide acknowledgment 2610 * with the firmware. 2611 * 2612 * Return: Nothing 2613 */ 2614 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2615 struct mpi3_event_notification_reply *event_reply) 2616 { 2617 u16 dev_handle = 0; 2618 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2619 struct mpi3mr_tgt_dev *tgtdev = NULL; 2620 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2621 struct mpi3_event_data_device_status_change *evtdata = 2622 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2623 2624 if (mrioc->stop_drv_processing) 2625 goto out; 2626 2627 dev_handle = le16_to_cpu(evtdata->dev_handle); 2628 2629 switch (evtdata->reason_code) { 2630 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2631 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2632 block = 1; 2633 break; 2634 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2635 delete = 1; 2636 hide = 1; 2637 break; 2638 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2639 delete = 1; 2640 remove = 1; 2641 break; 2642 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2643 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2644 ublock = 1; 2645 break; 2646 default: 2647 break; 2648 } 2649 2650 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2651 if (!tgtdev) 2652 goto out; 2653 if (hide) 2654 tgtdev->is_hidden = hide; 2655 if (tgtdev->starget && tgtdev->starget->hostdata) { 2656 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2657 tgtdev->starget->hostdata; 2658 if (block) 2659 atomic_inc(&scsi_tgt_priv_data->block_io); 2660 if (delete) 2661 scsi_tgt_priv_data->dev_removed = 1; 2662 if (ublock) 2663 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2664 } 2665 if (remove) 2666 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2667 MPI3_CTRL_OP_REMOVE_DEVICE); 2668 if (hide) 2669 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2670 MPI3_CTRL_OP_HIDDEN_ACK); 2671 2672 out: 2673 if (tgtdev) 2674 mpi3mr_tgtdev_put(tgtdev); 2675 } 2676 2677 /** 2678 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2679 * @mrioc: Adapter instance reference 2680 * @event_reply: event data 2681 * 2682 * Blocks and unblocks host level I/O based on the reason code 2683 * 2684 * Return: Nothing 2685 */ 2686 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2687 struct mpi3_event_notification_reply *event_reply) 2688 { 2689 struct mpi3_event_data_prepare_for_reset *evtdata = 2690 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2691 2692 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2693 dprint_event_th(mrioc, 2694 "prepare for reset event top half with rc=start\n"); 2695 if (mrioc->prepare_for_reset) 2696 return; 2697 mrioc->prepare_for_reset = 1; 2698 mrioc->prepare_for_reset_timeout_counter = 0; 2699 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2700 dprint_event_th(mrioc, 2701 "prepare for reset top half with rc=abort\n"); 2702 mrioc->prepare_for_reset = 0; 2703 mrioc->prepare_for_reset_timeout_counter = 0; 2704 } 2705 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2706 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2707 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2708 le32_to_cpu(event_reply->event_context)); 2709 } 2710 2711 /** 2712 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2713 * @mrioc: Adapter instance reference 2714 * @event_reply: event data 2715 * 2716 * Identifies the new shutdown timeout value and update. 2717 * 2718 * Return: Nothing 2719 */ 2720 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2721 struct mpi3_event_notification_reply *event_reply) 2722 { 2723 struct mpi3_event_data_energy_pack_change *evtdata = 2724 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2725 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2726 2727 if (shutdown_timeout <= 0) { 2728 ioc_warn(mrioc, 2729 "%s :Invalid Shutdown Timeout received = %d\n", 2730 __func__, shutdown_timeout); 2731 return; 2732 } 2733 2734 ioc_info(mrioc, 2735 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2736 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2737 mrioc->facts.shutdown_timeout = shutdown_timeout; 2738 } 2739 2740 /** 2741 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2742 * @mrioc: Adapter instance reference 2743 * @event_reply: event data 2744 * 2745 * Displays Cable manegemt event details. 2746 * 2747 * Return: Nothing 2748 */ 2749 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2750 struct mpi3_event_notification_reply *event_reply) 2751 { 2752 struct mpi3_event_data_cable_management *evtdata = 2753 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2754 2755 switch (evtdata->status) { 2756 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2757 { 2758 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2759 "Devices connected to this cable are not detected.\n" 2760 "This cable requires %d mW of power.\n", 2761 evtdata->receptacle_id, 2762 le32_to_cpu(evtdata->active_cable_power_requirement)); 2763 break; 2764 } 2765 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2766 { 2767 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2768 evtdata->receptacle_id); 2769 break; 2770 } 2771 default: 2772 break; 2773 } 2774 } 2775 2776 /** 2777 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2778 * @mrioc: Adapter instance reference 2779 * 2780 * Add driver specific event to make sure that the driver won't process the 2781 * events until all the devices are refreshed during soft reset. 2782 * 2783 * Return: Nothing 2784 */ 2785 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2786 { 2787 struct mpi3mr_fwevt *fwevt = NULL; 2788 2789 fwevt = mpi3mr_alloc_fwevt(0); 2790 if (!fwevt) { 2791 dprint_event_th(mrioc, 2792 "failed to schedule bottom half handler for event(0x%02x)\n", 2793 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2794 return; 2795 } 2796 fwevt->mrioc = mrioc; 2797 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2798 fwevt->send_ack = 0; 2799 fwevt->process_evt = 1; 2800 fwevt->evt_ctx = 0; 2801 fwevt->event_data_size = 0; 2802 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2803 } 2804 2805 /** 2806 * mpi3mr_os_handle_events - Firmware event handler 2807 * @mrioc: Adapter instance reference 2808 * @event_reply: event data 2809 * 2810 * Identify whteher the event has to handled and acknowledged 2811 * and either process the event in the tophalf and/or schedule a 2812 * bottom half through mpi3mr_fwevt_worker. 2813 * 2814 * Return: Nothing 2815 */ 2816 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2817 struct mpi3_event_notification_reply *event_reply) 2818 { 2819 u16 evt_type, sz; 2820 struct mpi3mr_fwevt *fwevt = NULL; 2821 bool ack_req = 0, process_evt_bh = 0; 2822 2823 if (mrioc->stop_drv_processing) 2824 return; 2825 2826 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2827 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2828 ack_req = 1; 2829 2830 evt_type = event_reply->event; 2831 2832 switch (evt_type) { 2833 case MPI3_EVENT_DEVICE_ADDED: 2834 { 2835 struct mpi3_device_page0 *dev_pg0 = 2836 (struct mpi3_device_page0 *)event_reply->event_data; 2837 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 2838 ioc_err(mrioc, 2839 "%s :Failed to add device in the device add event\n", 2840 __func__); 2841 else 2842 process_evt_bh = 1; 2843 break; 2844 } 2845 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2846 { 2847 process_evt_bh = 1; 2848 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 2849 break; 2850 } 2851 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2852 { 2853 process_evt_bh = 1; 2854 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 2855 break; 2856 } 2857 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2858 { 2859 process_evt_bh = 1; 2860 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 2861 break; 2862 } 2863 case MPI3_EVENT_PREPARE_FOR_RESET: 2864 { 2865 mpi3mr_preparereset_evt_th(mrioc, event_reply); 2866 ack_req = 0; 2867 break; 2868 } 2869 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2870 case MPI3_EVENT_LOG_DATA: 2871 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2872 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2873 { 2874 process_evt_bh = 1; 2875 break; 2876 } 2877 case MPI3_EVENT_ENERGY_PACK_CHANGE: 2878 { 2879 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 2880 break; 2881 } 2882 case MPI3_EVENT_CABLE_MGMT: 2883 { 2884 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 2885 break; 2886 } 2887 case MPI3_EVENT_SAS_DISCOVERY: 2888 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 2889 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 2890 case MPI3_EVENT_PCIE_ENUMERATION: 2891 break; 2892 default: 2893 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 2894 __func__, evt_type); 2895 break; 2896 } 2897 if (process_evt_bh || ack_req) { 2898 sz = event_reply->event_data_length * 4; 2899 fwevt = mpi3mr_alloc_fwevt(sz); 2900 if (!fwevt) { 2901 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 2902 __func__, __FILE__, __LINE__, __func__); 2903 return; 2904 } 2905 2906 memcpy(fwevt->event_data, event_reply->event_data, sz); 2907 fwevt->mrioc = mrioc; 2908 fwevt->event_id = evt_type; 2909 fwevt->send_ack = ack_req; 2910 fwevt->process_evt = process_evt_bh; 2911 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 2912 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2913 } 2914 } 2915 2916 /** 2917 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 2918 * @mrioc: Adapter instance reference 2919 * @scmd: SCSI command reference 2920 * @scsiio_req: MPI3 SCSI IO request 2921 * 2922 * Identifies the protection information flags from the SCSI 2923 * command and set appropriate flags in the MPI3 SCSI IO 2924 * request. 2925 * 2926 * Return: Nothing 2927 */ 2928 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 2929 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2930 { 2931 u16 eedp_flags = 0; 2932 unsigned char prot_op = scsi_get_prot_op(scmd); 2933 2934 switch (prot_op) { 2935 case SCSI_PROT_NORMAL: 2936 return; 2937 case SCSI_PROT_READ_STRIP: 2938 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2939 break; 2940 case SCSI_PROT_WRITE_INSERT: 2941 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2942 break; 2943 case SCSI_PROT_READ_INSERT: 2944 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2945 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2946 break; 2947 case SCSI_PROT_WRITE_STRIP: 2948 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2949 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2950 break; 2951 case SCSI_PROT_READ_PASS: 2952 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2953 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2954 break; 2955 case SCSI_PROT_WRITE_PASS: 2956 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 2957 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 2958 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 2959 0xffff; 2960 } else 2961 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2962 2963 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2964 break; 2965 default: 2966 return; 2967 } 2968 2969 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 2970 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 2971 2972 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 2973 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 2974 2975 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 2976 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 2977 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2978 scsiio_req->cdb.eedp32.primary_reference_tag = 2979 cpu_to_be32(scsi_prot_ref_tag(scmd)); 2980 } 2981 2982 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 2983 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 2984 2985 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 2986 2987 switch (scsi_prot_interval(scmd)) { 2988 case 512: 2989 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 2990 break; 2991 case 520: 2992 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 2993 break; 2994 case 4080: 2995 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 2996 break; 2997 case 4088: 2998 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 2999 break; 3000 case 4096: 3001 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3002 break; 3003 case 4104: 3004 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3005 break; 3006 case 4160: 3007 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3008 break; 3009 default: 3010 break; 3011 } 3012 3013 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3014 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3015 } 3016 3017 /** 3018 * mpi3mr_build_sense_buffer - Map sense information 3019 * @desc: Sense type 3020 * @buf: Sense buffer to populate 3021 * @key: Sense key 3022 * @asc: Additional sense code 3023 * @ascq: Additional sense code qualifier 3024 * 3025 * Maps the given sense information into either descriptor or 3026 * fixed format sense data. 3027 * 3028 * Return: Nothing 3029 */ 3030 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3031 u8 asc, u8 ascq) 3032 { 3033 if (desc) { 3034 buf[0] = 0x72; /* descriptor, current */ 3035 buf[1] = key; 3036 buf[2] = asc; 3037 buf[3] = ascq; 3038 buf[7] = 0; 3039 } else { 3040 buf[0] = 0x70; /* fixed, current */ 3041 buf[2] = key; 3042 buf[7] = 0xa; 3043 buf[12] = asc; 3044 buf[13] = ascq; 3045 } 3046 } 3047 3048 /** 3049 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3050 * @scmd: SCSI command reference 3051 * @ioc_status: status of MPI3 request 3052 * 3053 * Maps the EEDP error status of the SCSI IO request to sense 3054 * data. 3055 * 3056 * Return: Nothing 3057 */ 3058 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3059 u16 ioc_status) 3060 { 3061 u8 ascq = 0; 3062 3063 switch (ioc_status) { 3064 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3065 ascq = 0x01; 3066 break; 3067 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3068 ascq = 0x02; 3069 break; 3070 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3071 ascq = 0x03; 3072 break; 3073 default: 3074 ascq = 0x00; 3075 break; 3076 } 3077 3078 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3079 0x10, ascq); 3080 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3081 } 3082 3083 /** 3084 * mpi3mr_process_op_reply_desc - reply descriptor handler 3085 * @mrioc: Adapter instance reference 3086 * @reply_desc: Operational reply descriptor 3087 * @reply_dma: place holder for reply DMA address 3088 * @qidx: Operational queue index 3089 * 3090 * Process the operational reply descriptor and identifies the 3091 * descriptor type. Based on the descriptor map the MPI3 request 3092 * status to a SCSI command status and calls scsi_done call 3093 * back. 3094 * 3095 * Return: Nothing 3096 */ 3097 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3098 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3099 { 3100 u16 reply_desc_type, host_tag = 0; 3101 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3102 u32 ioc_loginfo = 0; 3103 struct mpi3_status_reply_descriptor *status_desc = NULL; 3104 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3105 struct mpi3_success_reply_descriptor *success_desc = NULL; 3106 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3107 struct scsi_cmnd *scmd = NULL; 3108 struct scmd_priv *priv = NULL; 3109 u8 *sense_buf = NULL; 3110 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3111 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3112 u16 dev_handle = 0xFFFF; 3113 struct scsi_sense_hdr sshdr; 3114 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3115 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3116 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3117 struct mpi3mr_throttle_group_info *tg = NULL; 3118 u8 throttle_enabled_dev = 0; 3119 3120 *reply_dma = 0; 3121 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3122 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3123 switch (reply_desc_type) { 3124 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3125 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3126 host_tag = le16_to_cpu(status_desc->host_tag); 3127 ioc_status = le16_to_cpu(status_desc->ioc_status); 3128 if (ioc_status & 3129 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3130 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3131 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3132 break; 3133 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3134 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3135 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3136 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3137 *reply_dma); 3138 if (!scsi_reply) { 3139 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3140 mrioc->name); 3141 goto out; 3142 } 3143 host_tag = le16_to_cpu(scsi_reply->host_tag); 3144 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3145 scsi_status = scsi_reply->scsi_status; 3146 scsi_state = scsi_reply->scsi_state; 3147 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3148 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3149 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3150 sense_count = le32_to_cpu(scsi_reply->sense_count); 3151 resp_data = le32_to_cpu(scsi_reply->response_data); 3152 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3153 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3154 if (ioc_status & 3155 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3156 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3157 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3158 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3159 panic("%s: Ran out of sense buffers\n", mrioc->name); 3160 break; 3161 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3162 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3163 host_tag = le16_to_cpu(success_desc->host_tag); 3164 break; 3165 default: 3166 break; 3167 } 3168 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3169 if (!scmd) { 3170 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3171 mrioc->name, host_tag); 3172 goto out; 3173 } 3174 priv = scsi_cmd_priv(scmd); 3175 3176 data_len_blks = scsi_bufflen(scmd) >> 9; 3177 sdev_priv_data = scmd->device->hostdata; 3178 if (sdev_priv_data) { 3179 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3180 if (stgt_priv_data) { 3181 tg = stgt_priv_data->throttle_group; 3182 throttle_enabled_dev = 3183 stgt_priv_data->io_throttle_enabled; 3184 } 3185 } 3186 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3187 throttle_enabled_dev)) { 3188 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3189 &mrioc->pend_large_data_sz); 3190 if (tg) { 3191 tg_pend_data_len = atomic_sub_return(data_len_blks, 3192 &tg->pend_large_data_sz); 3193 if (tg->io_divert && ((ioc_pend_data_len <= 3194 mrioc->io_throttle_low) && 3195 (tg_pend_data_len <= tg->low))) { 3196 tg->io_divert = 0; 3197 mpi3mr_set_io_divert_for_all_vd_in_tg( 3198 mrioc, tg, 0); 3199 } 3200 } else { 3201 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3202 stgt_priv_data->io_divert = 0; 3203 } 3204 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3205 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3206 if (!tg) { 3207 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3208 stgt_priv_data->io_divert = 0; 3209 3210 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3211 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3212 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3213 tg->io_divert = 0; 3214 mpi3mr_set_io_divert_for_all_vd_in_tg( 3215 mrioc, tg, 0); 3216 } 3217 } 3218 } 3219 3220 if (success_desc) { 3221 scmd->result = DID_OK << 16; 3222 goto out_success; 3223 } 3224 3225 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3226 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3227 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3228 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3229 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3230 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3231 3232 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3233 sense_buf) { 3234 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3235 3236 memcpy(scmd->sense_buffer, sense_buf, sz); 3237 } 3238 3239 switch (ioc_status) { 3240 case MPI3_IOCSTATUS_BUSY: 3241 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3242 scmd->result = SAM_STAT_BUSY; 3243 break; 3244 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3245 scmd->result = DID_NO_CONNECT << 16; 3246 break; 3247 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3248 scmd->result = DID_SOFT_ERROR << 16; 3249 break; 3250 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3251 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3252 scmd->result = DID_RESET << 16; 3253 break; 3254 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3255 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3256 scmd->result = DID_SOFT_ERROR << 16; 3257 else 3258 scmd->result = (DID_OK << 16) | scsi_status; 3259 break; 3260 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3261 scmd->result = (DID_OK << 16) | scsi_status; 3262 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3263 break; 3264 if (xfer_count < scmd->underflow) { 3265 if (scsi_status == SAM_STAT_BUSY) 3266 scmd->result = SAM_STAT_BUSY; 3267 else 3268 scmd->result = DID_SOFT_ERROR << 16; 3269 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3270 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3271 scmd->result = DID_SOFT_ERROR << 16; 3272 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3273 scmd->result = DID_RESET << 16; 3274 break; 3275 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3276 scsi_set_resid(scmd, 0); 3277 fallthrough; 3278 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3279 case MPI3_IOCSTATUS_SUCCESS: 3280 scmd->result = (DID_OK << 16) | scsi_status; 3281 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3282 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3283 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3284 scmd->result = DID_SOFT_ERROR << 16; 3285 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3286 scmd->result = DID_RESET << 16; 3287 break; 3288 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3289 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3290 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3291 mpi3mr_map_eedp_error(scmd, ioc_status); 3292 break; 3293 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3294 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3295 case MPI3_IOCSTATUS_INVALID_SGL: 3296 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3297 case MPI3_IOCSTATUS_INVALID_FIELD: 3298 case MPI3_IOCSTATUS_INVALID_STATE: 3299 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3300 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3301 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3302 default: 3303 scmd->result = DID_SOFT_ERROR << 16; 3304 break; 3305 } 3306 3307 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3308 (scmd->cmnd[0] != ATA_16) && 3309 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3310 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3311 scmd->result); 3312 scsi_print_command(scmd); 3313 ioc_info(mrioc, 3314 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3315 __func__, dev_handle, ioc_status, ioc_loginfo, 3316 priv->req_q_idx + 1); 3317 ioc_info(mrioc, 3318 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3319 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3320 if (sense_buf) { 3321 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3322 ioc_info(mrioc, 3323 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3324 __func__, sense_count, sshdr.sense_key, 3325 sshdr.asc, sshdr.ascq); 3326 } 3327 } 3328 out_success: 3329 if (priv->meta_sg_valid) { 3330 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3331 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3332 } 3333 mpi3mr_clear_scmd_priv(mrioc, scmd); 3334 scsi_dma_unmap(scmd); 3335 scsi_done(scmd); 3336 out: 3337 if (sense_buf) 3338 mpi3mr_repost_sense_buf(mrioc, 3339 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3340 } 3341 3342 /** 3343 * mpi3mr_get_chain_idx - get free chain buffer index 3344 * @mrioc: Adapter instance reference 3345 * 3346 * Try to get a free chain buffer index from the free pool. 3347 * 3348 * Return: -1 on failure or the free chain buffer index 3349 */ 3350 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3351 { 3352 u8 retry_count = 5; 3353 int cmd_idx = -1; 3354 unsigned long flags; 3355 3356 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3357 do { 3358 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3359 mrioc->chain_buf_count); 3360 if (cmd_idx < mrioc->chain_buf_count) { 3361 set_bit(cmd_idx, mrioc->chain_bitmap); 3362 break; 3363 } 3364 cmd_idx = -1; 3365 } while (retry_count--); 3366 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3367 return cmd_idx; 3368 } 3369 3370 /** 3371 * mpi3mr_prepare_sg_scmd - build scatter gather list 3372 * @mrioc: Adapter instance reference 3373 * @scmd: SCSI command reference 3374 * @scsiio_req: MPI3 SCSI IO request 3375 * 3376 * This function maps SCSI command's data and protection SGEs to 3377 * MPI request SGEs. If required additional 4K chain buffer is 3378 * used to send the SGEs. 3379 * 3380 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3381 */ 3382 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3383 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3384 { 3385 dma_addr_t chain_dma; 3386 struct scatterlist *sg_scmd; 3387 void *sg_local, *chain; 3388 u32 chain_length; 3389 int sges_left, chain_idx; 3390 u32 sges_in_segment; 3391 u8 simple_sgl_flags; 3392 u8 simple_sgl_flags_last; 3393 u8 last_chain_sgl_flags; 3394 struct chain_element *chain_req; 3395 struct scmd_priv *priv = NULL; 3396 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3397 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3398 3399 priv = scsi_cmd_priv(scmd); 3400 3401 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3402 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3403 simple_sgl_flags_last = simple_sgl_flags | 3404 MPI3_SGE_FLAGS_END_OF_LIST; 3405 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3406 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3407 3408 if (meta_sg) 3409 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3410 else 3411 sg_local = &scsiio_req->sgl; 3412 3413 if (!scsiio_req->data_length && !meta_sg) { 3414 mpi3mr_build_zero_len_sge(sg_local); 3415 return 0; 3416 } 3417 3418 if (meta_sg) { 3419 sg_scmd = scsi_prot_sglist(scmd); 3420 sges_left = dma_map_sg(&mrioc->pdev->dev, 3421 scsi_prot_sglist(scmd), 3422 scsi_prot_sg_count(scmd), 3423 scmd->sc_data_direction); 3424 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3425 } else { 3426 sg_scmd = scsi_sglist(scmd); 3427 sges_left = scsi_dma_map(scmd); 3428 } 3429 3430 if (sges_left < 0) { 3431 sdev_printk(KERN_ERR, scmd->device, 3432 "scsi_dma_map failed: request for %d bytes!\n", 3433 scsi_bufflen(scmd)); 3434 return -ENOMEM; 3435 } 3436 if (sges_left > mrioc->max_sgl_entries) { 3437 sdev_printk(KERN_ERR, scmd->device, 3438 "scsi_dma_map returned unsupported sge count %d!\n", 3439 sges_left); 3440 return -ENOMEM; 3441 } 3442 3443 sges_in_segment = (mrioc->facts.op_req_sz - 3444 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3445 3446 if (scsiio_req->sgl[0].eedp.flags == 3447 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3448 sg_local += sizeof(struct mpi3_sge_common); 3449 sges_in_segment--; 3450 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3451 } 3452 3453 if (scsiio_req->msg_flags == 3454 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3455 sges_in_segment--; 3456 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3457 } 3458 3459 if (meta_sg) 3460 sges_in_segment = 1; 3461 3462 if (sges_left <= sges_in_segment) 3463 goto fill_in_last_segment; 3464 3465 /* fill in main message segment when there is a chain following */ 3466 while (sges_in_segment > 1) { 3467 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3468 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3469 sg_scmd = sg_next(sg_scmd); 3470 sg_local += sizeof(struct mpi3_sge_common); 3471 sges_left--; 3472 sges_in_segment--; 3473 } 3474 3475 chain_idx = mpi3mr_get_chain_idx(mrioc); 3476 if (chain_idx < 0) 3477 return -1; 3478 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3479 if (meta_sg) 3480 priv->meta_chain_idx = chain_idx; 3481 else 3482 priv->chain_idx = chain_idx; 3483 3484 chain = chain_req->addr; 3485 chain_dma = chain_req->dma_addr; 3486 sges_in_segment = sges_left; 3487 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3488 3489 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3490 chain_length, chain_dma); 3491 3492 sg_local = chain; 3493 3494 fill_in_last_segment: 3495 while (sges_left > 0) { 3496 if (sges_left == 1) 3497 mpi3mr_add_sg_single(sg_local, 3498 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3499 sg_dma_address(sg_scmd)); 3500 else 3501 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3502 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3503 sg_scmd = sg_next(sg_scmd); 3504 sg_local += sizeof(struct mpi3_sge_common); 3505 sges_left--; 3506 } 3507 3508 return 0; 3509 } 3510 3511 /** 3512 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3513 * @mrioc: Adapter instance reference 3514 * @scmd: SCSI command reference 3515 * @scsiio_req: MPI3 SCSI IO request 3516 * 3517 * This function calls mpi3mr_prepare_sg_scmd for constructing 3518 * both data SGEs and protection information SGEs in the MPI 3519 * format from the SCSI Command as appropriate . 3520 * 3521 * Return: return value of mpi3mr_prepare_sg_scmd. 3522 */ 3523 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3524 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3525 { 3526 int ret; 3527 3528 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3529 if (ret) 3530 return ret; 3531 3532 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3533 /* There is a valid meta sg */ 3534 scsiio_req->flags |= 3535 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3536 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3537 } 3538 3539 return ret; 3540 } 3541 3542 /** 3543 * mpi3mr_tm_response_name - get TM response as a string 3544 * @resp_code: TM response code 3545 * 3546 * Convert known task management response code as a readable 3547 * string. 3548 * 3549 * Return: response code string. 3550 */ 3551 static const char *mpi3mr_tm_response_name(u8 resp_code) 3552 { 3553 char *desc; 3554 3555 switch (resp_code) { 3556 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3557 desc = "task management request completed"; 3558 break; 3559 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3560 desc = "invalid frame"; 3561 break; 3562 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3563 desc = "task management request not supported"; 3564 break; 3565 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3566 desc = "task management request failed"; 3567 break; 3568 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3569 desc = "task management request succeeded"; 3570 break; 3571 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3572 desc = "invalid LUN"; 3573 break; 3574 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3575 desc = "overlapped tag attempted"; 3576 break; 3577 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3578 desc = "task queued, however not sent to target"; 3579 break; 3580 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3581 desc = "task management request denied by NVMe device"; 3582 break; 3583 default: 3584 desc = "unknown"; 3585 break; 3586 } 3587 3588 return desc; 3589 } 3590 3591 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3592 { 3593 int i; 3594 int num_of_reply_queues = 3595 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3596 3597 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3598 mpi3mr_process_op_reply_q(mrioc, 3599 mrioc->intr_info[i].op_reply_q); 3600 } 3601 3602 /** 3603 * mpi3mr_issue_tm - Issue Task Management request 3604 * @mrioc: Adapter instance reference 3605 * @tm_type: Task Management type 3606 * @handle: Device handle 3607 * @lun: lun ID 3608 * @htag: Host tag of the TM request 3609 * @timeout: TM timeout value 3610 * @drv_cmd: Internal command tracker 3611 * @resp_code: Response code place holder 3612 * @scmd: SCSI command 3613 * 3614 * Issues a Task Management Request to the controller for a 3615 * specified target, lun and command and wait for its completion 3616 * and check TM response. Recover the TM if it timed out by 3617 * issuing controller reset. 3618 * 3619 * Return: 0 on success, non-zero on errors 3620 */ 3621 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3622 u16 handle, uint lun, u16 htag, ulong timeout, 3623 struct mpi3mr_drv_cmd *drv_cmd, 3624 u8 *resp_code, struct scsi_cmnd *scmd) 3625 { 3626 struct mpi3_scsi_task_mgmt_request tm_req; 3627 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3628 int retval = 0; 3629 struct mpi3mr_tgt_dev *tgtdev = NULL; 3630 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3631 struct scmd_priv *cmd_priv = NULL; 3632 struct scsi_device *sdev = NULL; 3633 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3634 3635 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3636 __func__, tm_type, handle); 3637 if (mrioc->unrecoverable) { 3638 retval = -1; 3639 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3640 __func__); 3641 goto out; 3642 } 3643 3644 memset(&tm_req, 0, sizeof(tm_req)); 3645 mutex_lock(&drv_cmd->mutex); 3646 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3647 retval = -1; 3648 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3649 mutex_unlock(&drv_cmd->mutex); 3650 goto out; 3651 } 3652 if (mrioc->reset_in_progress) { 3653 retval = -1; 3654 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3655 mutex_unlock(&drv_cmd->mutex); 3656 goto out; 3657 } 3658 3659 drv_cmd->state = MPI3MR_CMD_PENDING; 3660 drv_cmd->is_waiting = 1; 3661 drv_cmd->callback = NULL; 3662 tm_req.dev_handle = cpu_to_le16(handle); 3663 tm_req.task_type = tm_type; 3664 tm_req.host_tag = cpu_to_le16(htag); 3665 3666 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3667 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3668 3669 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3670 3671 if (scmd) { 3672 sdev = scmd->device; 3673 sdev_priv_data = sdev->hostdata; 3674 scsi_tgt_priv_data = ((sdev_priv_data) ? 3675 sdev_priv_data->tgt_priv_data : NULL); 3676 } else { 3677 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3678 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3679 tgtdev->starget->hostdata; 3680 } 3681 3682 if (scsi_tgt_priv_data) 3683 atomic_inc(&scsi_tgt_priv_data->block_io); 3684 3685 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3686 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3687 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3688 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3689 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3690 } 3691 3692 init_completion(&drv_cmd->done); 3693 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3694 if (retval) { 3695 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3696 goto out_unlock; 3697 } 3698 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3699 3700 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3701 drv_cmd->is_waiting = 0; 3702 retval = -1; 3703 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3704 dprint_tm(mrioc, 3705 "task management request timed out after %ld seconds\n", 3706 timeout); 3707 if (mrioc->logging_level & MPI3_DEBUG_TM) 3708 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3709 mpi3mr_soft_reset_handler(mrioc, 3710 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3711 } 3712 goto out_unlock; 3713 } 3714 3715 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3716 dprint_tm(mrioc, "invalid task management reply message\n"); 3717 retval = -1; 3718 goto out_unlock; 3719 } 3720 3721 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3722 3723 switch (drv_cmd->ioc_status) { 3724 case MPI3_IOCSTATUS_SUCCESS: 3725 *resp_code = le32_to_cpu(tm_reply->response_data) & 3726 MPI3MR_RI_MASK_RESPCODE; 3727 break; 3728 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3729 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3730 break; 3731 default: 3732 dprint_tm(mrioc, 3733 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3734 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3735 retval = -1; 3736 goto out_unlock; 3737 } 3738 3739 switch (*resp_code) { 3740 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3741 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3742 break; 3743 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3744 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3745 retval = -1; 3746 break; 3747 default: 3748 retval = -1; 3749 break; 3750 } 3751 3752 dprint_tm(mrioc, 3753 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3754 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3755 le32_to_cpu(tm_reply->termination_count), 3756 mpi3mr_tm_response_name(*resp_code), *resp_code); 3757 3758 if (!retval) { 3759 mpi3mr_ioc_disable_intr(mrioc); 3760 mpi3mr_poll_pend_io_completions(mrioc); 3761 mpi3mr_ioc_enable_intr(mrioc); 3762 mpi3mr_poll_pend_io_completions(mrioc); 3763 mpi3mr_process_admin_reply_q(mrioc); 3764 } 3765 switch (tm_type) { 3766 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3767 if (!scsi_tgt_priv_data) 3768 break; 3769 scsi_tgt_priv_data->pend_count = 0; 3770 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3771 mpi3mr_count_tgt_pending, 3772 (void *)scsi_tgt_priv_data->starget); 3773 break; 3774 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3775 if (!sdev_priv_data) 3776 break; 3777 sdev_priv_data->pend_count = 0; 3778 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3779 mpi3mr_count_dev_pending, (void *)sdev); 3780 break; 3781 default: 3782 break; 3783 } 3784 3785 out_unlock: 3786 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3787 mutex_unlock(&drv_cmd->mutex); 3788 if (scsi_tgt_priv_data) 3789 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 3790 if (tgtdev) 3791 mpi3mr_tgtdev_put(tgtdev); 3792 out: 3793 return retval; 3794 } 3795 3796 /** 3797 * mpi3mr_bios_param - BIOS param callback 3798 * @sdev: SCSI device reference 3799 * @bdev: Block device reference 3800 * @capacity: Capacity in logical sectors 3801 * @params: Parameter array 3802 * 3803 * Just the parameters with heads/secots/cylinders. 3804 * 3805 * Return: 0 always 3806 */ 3807 static int mpi3mr_bios_param(struct scsi_device *sdev, 3808 struct block_device *bdev, sector_t capacity, int params[]) 3809 { 3810 int heads; 3811 int sectors; 3812 sector_t cylinders; 3813 ulong dummy; 3814 3815 heads = 64; 3816 sectors = 32; 3817 3818 dummy = heads * sectors; 3819 cylinders = capacity; 3820 sector_div(cylinders, dummy); 3821 3822 if ((ulong)capacity >= 0x200000) { 3823 heads = 255; 3824 sectors = 63; 3825 dummy = heads * sectors; 3826 cylinders = capacity; 3827 sector_div(cylinders, dummy); 3828 } 3829 3830 params[0] = heads; 3831 params[1] = sectors; 3832 params[2] = cylinders; 3833 return 0; 3834 } 3835 3836 /** 3837 * mpi3mr_map_queues - Map queues callback handler 3838 * @shost: SCSI host reference 3839 * 3840 * Maps default and poll queues. 3841 * 3842 * Return: return zero. 3843 */ 3844 static void mpi3mr_map_queues(struct Scsi_Host *shost) 3845 { 3846 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3847 int i, qoff, offset; 3848 struct blk_mq_queue_map *map = NULL; 3849 3850 offset = mrioc->op_reply_q_offset; 3851 3852 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 3853 map = &shost->tag_set.map[i]; 3854 3855 map->nr_queues = 0; 3856 3857 if (i == HCTX_TYPE_DEFAULT) 3858 map->nr_queues = mrioc->default_qcount; 3859 else if (i == HCTX_TYPE_POLL) 3860 map->nr_queues = mrioc->active_poll_qcount; 3861 3862 if (!map->nr_queues) { 3863 BUG_ON(i == HCTX_TYPE_DEFAULT); 3864 continue; 3865 } 3866 3867 /* 3868 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3869 * affinity), so use the regular blk-mq cpu mapping 3870 */ 3871 map->queue_offset = qoff; 3872 if (i != HCTX_TYPE_POLL) 3873 blk_mq_pci_map_queues(map, mrioc->pdev, offset); 3874 else 3875 blk_mq_map_queues(map); 3876 3877 qoff += map->nr_queues; 3878 offset += map->nr_queues; 3879 } 3880 } 3881 3882 /** 3883 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 3884 * @mrioc: Adapter instance reference 3885 * 3886 * Calculate the pending I/Os for the controller and return. 3887 * 3888 * Return: Number of pending I/Os 3889 */ 3890 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 3891 { 3892 u16 i; 3893 uint pend_ios = 0; 3894 3895 for (i = 0; i < mrioc->num_op_reply_q; i++) 3896 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 3897 return pend_ios; 3898 } 3899 3900 /** 3901 * mpi3mr_print_pending_host_io - print pending I/Os 3902 * @mrioc: Adapter instance reference 3903 * 3904 * Print number of pending I/Os and each I/O details prior to 3905 * reset for debug purpose. 3906 * 3907 * Return: Nothing 3908 */ 3909 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 3910 { 3911 struct Scsi_Host *shost = mrioc->shost; 3912 3913 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 3914 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 3915 blk_mq_tagset_busy_iter(&shost->tag_set, 3916 mpi3mr_print_scmd, (void *)mrioc); 3917 } 3918 3919 /** 3920 * mpi3mr_wait_for_host_io - block for I/Os to complete 3921 * @mrioc: Adapter instance reference 3922 * @timeout: time out in seconds 3923 * Waits for pending I/Os for the given adapter to complete or 3924 * to hit the timeout. 3925 * 3926 * Return: Nothing 3927 */ 3928 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 3929 { 3930 enum mpi3mr_iocstate iocstate; 3931 int i = 0; 3932 3933 iocstate = mpi3mr_get_iocstate(mrioc); 3934 if (iocstate != MRIOC_STATE_READY) 3935 return; 3936 3937 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3938 return; 3939 ioc_info(mrioc, 3940 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 3941 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 3942 3943 for (i = 0; i < timeout; i++) { 3944 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3945 break; 3946 iocstate = mpi3mr_get_iocstate(mrioc); 3947 if (iocstate != MRIOC_STATE_READY) 3948 break; 3949 msleep(1000); 3950 } 3951 3952 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 3953 mpi3mr_get_fw_pending_ios(mrioc)); 3954 } 3955 3956 /** 3957 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 3958 * @mrioc: Adapter instance reference 3959 * @scmd: SCSI command reference 3960 * @scsiio_req: MPI3 SCSI IO request 3961 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 3962 * @wslen: write same max length 3963 * 3964 * Gets values of unmap, ndob and number of blocks from write 3965 * same scsi io and based on these values it sets divert IO flag 3966 * and reason for diverting IO to firmware. 3967 * 3968 * Return: Nothing 3969 */ 3970 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 3971 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 3972 u32 *scsiio_flags, u16 wslen) 3973 { 3974 u8 unmap = 0, ndob = 0; 3975 u8 opcode = scmd->cmnd[0]; 3976 u32 num_blocks = 0; 3977 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 3978 3979 if (opcode == WRITE_SAME_16) { 3980 unmap = scmd->cmnd[1] & 0x08; 3981 ndob = scmd->cmnd[1] & 0x01; 3982 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 3983 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 3984 unmap = scmd->cmnd[10] & 0x08; 3985 ndob = scmd->cmnd[10] & 0x01; 3986 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 3987 } else 3988 return; 3989 3990 if ((unmap) && (ndob) && (num_blocks > wslen)) { 3991 scsiio_req->msg_flags |= 3992 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 3993 *scsiio_flags |= 3994 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 3995 } 3996 } 3997 3998 /** 3999 * mpi3mr_eh_host_reset - Host reset error handling callback 4000 * @scmd: SCSI command reference 4001 * 4002 * Issue controller reset if the scmd is for a Physical Device, 4003 * if the scmd is for RAID volume, then wait for 4004 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any 4005 * pending I/Os prior to issuing reset to the controller. 4006 * 4007 * Return: SUCCESS of successful reset else FAILED 4008 */ 4009 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4010 { 4011 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4012 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4013 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4014 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4015 int retval = FAILED, ret; 4016 4017 sdev_priv_data = scmd->device->hostdata; 4018 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4019 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4020 dev_type = stgt_priv_data->dev_type; 4021 } 4022 4023 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4024 mpi3mr_wait_for_host_io(mrioc, 4025 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4026 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 4027 retval = SUCCESS; 4028 goto out; 4029 } 4030 } 4031 4032 mpi3mr_print_pending_host_io(mrioc); 4033 ret = mpi3mr_soft_reset_handler(mrioc, 4034 MPI3MR_RESET_FROM_EH_HOS, 1); 4035 if (ret) 4036 goto out; 4037 4038 retval = SUCCESS; 4039 out: 4040 sdev_printk(KERN_INFO, scmd->device, 4041 "Host reset is %s for scmd(%p)\n", 4042 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4043 4044 return retval; 4045 } 4046 4047 /** 4048 * mpi3mr_eh_target_reset - Target reset error handling callback 4049 * @scmd: SCSI command reference 4050 * 4051 * Issue Target reset Task Management and verify the scmd is 4052 * terminated successfully and return status accordingly. 4053 * 4054 * Return: SUCCESS of successful termination of the scmd else 4055 * FAILED 4056 */ 4057 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4058 { 4059 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4060 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4061 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4062 u16 dev_handle; 4063 u8 resp_code = 0; 4064 int retval = FAILED, ret = 0; 4065 4066 sdev_printk(KERN_INFO, scmd->device, 4067 "Attempting Target Reset! scmd(%p)\n", scmd); 4068 scsi_print_command(scmd); 4069 4070 sdev_priv_data = scmd->device->hostdata; 4071 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4072 sdev_printk(KERN_INFO, scmd->device, 4073 "SCSI device is not available\n"); 4074 retval = SUCCESS; 4075 goto out; 4076 } 4077 4078 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4079 dev_handle = stgt_priv_data->dev_handle; 4080 if (stgt_priv_data->dev_removed) { 4081 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4082 sdev_printk(KERN_INFO, scmd->device, 4083 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4084 mrioc->name, dev_handle); 4085 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4086 retval = SUCCESS; 4087 else 4088 retval = FAILED; 4089 goto out; 4090 } 4091 sdev_printk(KERN_INFO, scmd->device, 4092 "Target Reset is issued to handle(0x%04x)\n", 4093 dev_handle); 4094 4095 ret = mpi3mr_issue_tm(mrioc, 4096 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4097 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4098 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4099 4100 if (ret) 4101 goto out; 4102 4103 if (stgt_priv_data->pend_count) { 4104 sdev_printk(KERN_INFO, scmd->device, 4105 "%s: target has %d pending commands, target reset is failed\n", 4106 mrioc->name, stgt_priv_data->pend_count); 4107 goto out; 4108 } 4109 4110 retval = SUCCESS; 4111 out: 4112 sdev_printk(KERN_INFO, scmd->device, 4113 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4114 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4115 4116 return retval; 4117 } 4118 4119 /** 4120 * mpi3mr_eh_dev_reset- Device reset error handling callback 4121 * @scmd: SCSI command reference 4122 * 4123 * Issue lun reset Task Management and verify the scmd is 4124 * terminated successfully and return status accordingly. 4125 * 4126 * Return: SUCCESS of successful termination of the scmd else 4127 * FAILED 4128 */ 4129 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4130 { 4131 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4132 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4133 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4134 u16 dev_handle; 4135 u8 resp_code = 0; 4136 int retval = FAILED, ret = 0; 4137 4138 sdev_printk(KERN_INFO, scmd->device, 4139 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4140 scsi_print_command(scmd); 4141 4142 sdev_priv_data = scmd->device->hostdata; 4143 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4144 sdev_printk(KERN_INFO, scmd->device, 4145 "SCSI device is not available\n"); 4146 retval = SUCCESS; 4147 goto out; 4148 } 4149 4150 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4151 dev_handle = stgt_priv_data->dev_handle; 4152 if (stgt_priv_data->dev_removed) { 4153 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4154 sdev_printk(KERN_INFO, scmd->device, 4155 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4156 mrioc->name, dev_handle); 4157 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4158 retval = SUCCESS; 4159 else 4160 retval = FAILED; 4161 goto out; 4162 } 4163 sdev_printk(KERN_INFO, scmd->device, 4164 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4165 4166 ret = mpi3mr_issue_tm(mrioc, 4167 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4168 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4169 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4170 4171 if (ret) 4172 goto out; 4173 4174 if (sdev_priv_data->pend_count) { 4175 sdev_printk(KERN_INFO, scmd->device, 4176 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4177 mrioc->name, sdev_priv_data->pend_count); 4178 goto out; 4179 } 4180 retval = SUCCESS; 4181 out: 4182 sdev_printk(KERN_INFO, scmd->device, 4183 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4184 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4185 4186 return retval; 4187 } 4188 4189 /** 4190 * mpi3mr_scan_start - Scan start callback handler 4191 * @shost: SCSI host reference 4192 * 4193 * Issue port enable request asynchronously. 4194 * 4195 * Return: Nothing 4196 */ 4197 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4198 { 4199 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4200 4201 mrioc->scan_started = 1; 4202 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4203 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4204 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4205 mrioc->scan_started = 0; 4206 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4207 } 4208 } 4209 4210 /** 4211 * mpi3mr_scan_finished - Scan finished callback handler 4212 * @shost: SCSI host reference 4213 * @time: Jiffies from the scan start 4214 * 4215 * Checks whether the port enable is completed or timedout or 4216 * failed and set the scan status accordingly after taking any 4217 * recovery if required. 4218 * 4219 * Return: 1 on scan finished or timed out, 0 for in progress 4220 */ 4221 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4222 unsigned long time) 4223 { 4224 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4225 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4226 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4227 4228 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4229 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4230 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4231 mpi3mr_print_fault_info(mrioc); 4232 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4233 mrioc->scan_started = 0; 4234 mrioc->init_cmds.is_waiting = 0; 4235 mrioc->init_cmds.callback = NULL; 4236 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4237 } 4238 4239 if (time >= (pe_timeout * HZ)) { 4240 ioc_err(mrioc, "port enable failed due to time out\n"); 4241 mpi3mr_check_rh_fault_ioc(mrioc, 4242 MPI3MR_RESET_FROM_PE_TIMEOUT); 4243 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4244 mrioc->scan_started = 0; 4245 mrioc->init_cmds.is_waiting = 0; 4246 mrioc->init_cmds.callback = NULL; 4247 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4248 } 4249 4250 if (mrioc->scan_started) 4251 return 0; 4252 4253 if (mrioc->scan_failed) { 4254 ioc_err(mrioc, 4255 "port enable failed with status=0x%04x\n", 4256 mrioc->scan_failed); 4257 } else 4258 ioc_info(mrioc, "port enable is successfully completed\n"); 4259 4260 mpi3mr_start_watchdog(mrioc); 4261 mrioc->is_driver_loading = 0; 4262 mrioc->stop_bsgs = 0; 4263 return 1; 4264 } 4265 4266 /** 4267 * mpi3mr_slave_destroy - Slave destroy callback handler 4268 * @sdev: SCSI device reference 4269 * 4270 * Cleanup and free per device(lun) private data. 4271 * 4272 * Return: Nothing. 4273 */ 4274 static void mpi3mr_slave_destroy(struct scsi_device *sdev) 4275 { 4276 struct Scsi_Host *shost; 4277 struct mpi3mr_ioc *mrioc; 4278 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4279 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4280 unsigned long flags; 4281 struct scsi_target *starget; 4282 struct sas_rphy *rphy = NULL; 4283 4284 if (!sdev->hostdata) 4285 return; 4286 4287 starget = scsi_target(sdev); 4288 shost = dev_to_shost(&starget->dev); 4289 mrioc = shost_priv(shost); 4290 scsi_tgt_priv_data = starget->hostdata; 4291 4292 scsi_tgt_priv_data->num_luns--; 4293 4294 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4295 if (starget->channel == mrioc->scsi_device_channel) 4296 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4297 else if (mrioc->sas_transport_enabled && !starget->channel) { 4298 rphy = dev_to_rphy(starget->dev.parent); 4299 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4300 rphy->identify.sas_address, rphy); 4301 } 4302 4303 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4304 tgt_dev->starget = NULL; 4305 if (tgt_dev) 4306 mpi3mr_tgtdev_put(tgt_dev); 4307 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4308 4309 kfree(sdev->hostdata); 4310 sdev->hostdata = NULL; 4311 } 4312 4313 /** 4314 * mpi3mr_target_destroy - Target destroy callback handler 4315 * @starget: SCSI target reference 4316 * 4317 * Cleanup and free per target private data. 4318 * 4319 * Return: Nothing. 4320 */ 4321 static void mpi3mr_target_destroy(struct scsi_target *starget) 4322 { 4323 struct Scsi_Host *shost; 4324 struct mpi3mr_ioc *mrioc; 4325 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4326 struct mpi3mr_tgt_dev *tgt_dev; 4327 unsigned long flags; 4328 4329 if (!starget->hostdata) 4330 return; 4331 4332 shost = dev_to_shost(&starget->dev); 4333 mrioc = shost_priv(shost); 4334 scsi_tgt_priv_data = starget->hostdata; 4335 4336 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4337 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4338 if (tgt_dev && (tgt_dev->starget == starget) && 4339 (tgt_dev->perst_id == starget->id)) 4340 tgt_dev->starget = NULL; 4341 if (tgt_dev) { 4342 scsi_tgt_priv_data->tgt_dev = NULL; 4343 scsi_tgt_priv_data->perst_id = 0; 4344 mpi3mr_tgtdev_put(tgt_dev); 4345 mpi3mr_tgtdev_put(tgt_dev); 4346 } 4347 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4348 4349 kfree(starget->hostdata); 4350 starget->hostdata = NULL; 4351 } 4352 4353 /** 4354 * mpi3mr_slave_configure - Slave configure callback handler 4355 * @sdev: SCSI device reference 4356 * 4357 * Configure queue depth, max hardware sectors and virt boundary 4358 * as required 4359 * 4360 * Return: 0 always. 4361 */ 4362 static int mpi3mr_slave_configure(struct scsi_device *sdev) 4363 { 4364 struct scsi_target *starget; 4365 struct Scsi_Host *shost; 4366 struct mpi3mr_ioc *mrioc; 4367 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4368 unsigned long flags; 4369 int retval = 0; 4370 struct sas_rphy *rphy = NULL; 4371 4372 starget = scsi_target(sdev); 4373 shost = dev_to_shost(&starget->dev); 4374 mrioc = shost_priv(shost); 4375 4376 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4377 if (starget->channel == mrioc->scsi_device_channel) 4378 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4379 else if (mrioc->sas_transport_enabled && !starget->channel) { 4380 rphy = dev_to_rphy(starget->dev.parent); 4381 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4382 rphy->identify.sas_address, rphy); 4383 } 4384 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4385 if (!tgt_dev) 4386 return -ENXIO; 4387 4388 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4389 4390 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4391 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4392 4393 switch (tgt_dev->dev_type) { 4394 case MPI3_DEVICE_DEVFORM_PCIE: 4395 /*The block layer hw sector size = 512*/ 4396 if ((tgt_dev->dev_spec.pcie_inf.dev_info & 4397 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4398 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 4399 blk_queue_max_hw_sectors(sdev->request_queue, 4400 tgt_dev->dev_spec.pcie_inf.mdts / 512); 4401 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) 4402 blk_queue_virt_boundary(sdev->request_queue, 4403 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 4404 else 4405 blk_queue_virt_boundary(sdev->request_queue, 4406 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); 4407 } 4408 break; 4409 default: 4410 break; 4411 } 4412 4413 mpi3mr_tgtdev_put(tgt_dev); 4414 4415 return retval; 4416 } 4417 4418 /** 4419 * mpi3mr_slave_alloc -Slave alloc callback handler 4420 * @sdev: SCSI device reference 4421 * 4422 * Allocate per device(lun) private data and initialize it. 4423 * 4424 * Return: 0 on success -ENOMEM on memory allocation failure. 4425 */ 4426 static int mpi3mr_slave_alloc(struct scsi_device *sdev) 4427 { 4428 struct Scsi_Host *shost; 4429 struct mpi3mr_ioc *mrioc; 4430 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4431 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4432 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4433 unsigned long flags; 4434 struct scsi_target *starget; 4435 int retval = 0; 4436 struct sas_rphy *rphy = NULL; 4437 4438 starget = scsi_target(sdev); 4439 shost = dev_to_shost(&starget->dev); 4440 mrioc = shost_priv(shost); 4441 scsi_tgt_priv_data = starget->hostdata; 4442 4443 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4444 4445 if (starget->channel == mrioc->scsi_device_channel) 4446 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4447 else if (mrioc->sas_transport_enabled && !starget->channel) { 4448 rphy = dev_to_rphy(starget->dev.parent); 4449 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4450 rphy->identify.sas_address, rphy); 4451 } 4452 4453 if (tgt_dev) { 4454 if (tgt_dev->starget == NULL) 4455 tgt_dev->starget = starget; 4456 mpi3mr_tgtdev_put(tgt_dev); 4457 retval = 0; 4458 } else { 4459 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4460 return -ENXIO; 4461 } 4462 4463 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4464 4465 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4466 if (!scsi_dev_priv_data) 4467 return -ENOMEM; 4468 4469 scsi_dev_priv_data->lun_id = sdev->lun; 4470 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4471 sdev->hostdata = scsi_dev_priv_data; 4472 4473 scsi_tgt_priv_data->num_luns++; 4474 4475 return retval; 4476 } 4477 4478 /** 4479 * mpi3mr_target_alloc - Target alloc callback handler 4480 * @starget: SCSI target reference 4481 * 4482 * Allocate per target private data and initialize it. 4483 * 4484 * Return: 0 on success -ENOMEM on memory allocation failure. 4485 */ 4486 static int mpi3mr_target_alloc(struct scsi_target *starget) 4487 { 4488 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4489 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4490 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4491 struct mpi3mr_tgt_dev *tgt_dev; 4492 unsigned long flags; 4493 int retval = 0; 4494 struct sas_rphy *rphy = NULL; 4495 4496 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4497 if (!scsi_tgt_priv_data) 4498 return -ENOMEM; 4499 4500 starget->hostdata = scsi_tgt_priv_data; 4501 4502 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4503 if (starget->channel == mrioc->scsi_device_channel) { 4504 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4505 if (tgt_dev && !tgt_dev->is_hidden) { 4506 scsi_tgt_priv_data->starget = starget; 4507 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4508 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4509 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4510 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4511 tgt_dev->starget = starget; 4512 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4513 retval = 0; 4514 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4515 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4516 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4517 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4518 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4519 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4520 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4521 scsi_tgt_priv_data->dev_nvme_dif = 1; 4522 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4523 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4524 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4525 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4526 } else 4527 retval = -ENXIO; 4528 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4529 rphy = dev_to_rphy(starget->dev.parent); 4530 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4531 rphy->identify.sas_address, rphy); 4532 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4533 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4534 scsi_tgt_priv_data->starget = starget; 4535 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4536 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4537 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4538 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4539 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4540 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4541 tgt_dev->starget = starget; 4542 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4543 retval = 0; 4544 } else 4545 retval = -ENXIO; 4546 } 4547 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4548 4549 return retval; 4550 } 4551 4552 /** 4553 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4554 * @mrioc: Adapter instance reference 4555 * @scmd: SCSI Command reference 4556 * 4557 * The controller hardware cannot handle certain unmap commands 4558 * for NVMe drives, this routine checks those and return true 4559 * and completes the SCSI command with proper status and sense 4560 * data. 4561 * 4562 * Return: TRUE for not allowed unmap, FALSE otherwise. 4563 */ 4564 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4565 struct scsi_cmnd *scmd) 4566 { 4567 unsigned char *buf; 4568 u16 param_len, desc_len, trunc_param_len; 4569 4570 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4571 4572 if (mrioc->pdev->revision) { 4573 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4574 trunc_param_len -= (param_len - 8) & 0xF; 4575 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4576 dprint_scsi_err(mrioc, 4577 "truncating param_len from (%d) to (%d)\n", 4578 param_len, trunc_param_len); 4579 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4580 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4581 } 4582 return false; 4583 } 4584 4585 if (!param_len) { 4586 ioc_warn(mrioc, 4587 "%s: cdb received with zero parameter length\n", 4588 __func__); 4589 scsi_print_command(scmd); 4590 scmd->result = DID_OK << 16; 4591 scsi_done(scmd); 4592 return true; 4593 } 4594 4595 if (param_len < 24) { 4596 ioc_warn(mrioc, 4597 "%s: cdb received with invalid param_len: %d\n", 4598 __func__, param_len); 4599 scsi_print_command(scmd); 4600 scmd->result = SAM_STAT_CHECK_CONDITION; 4601 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4602 0x1A, 0); 4603 scsi_done(scmd); 4604 return true; 4605 } 4606 if (param_len != scsi_bufflen(scmd)) { 4607 ioc_warn(mrioc, 4608 "%s: cdb received with param_len: %d bufflen: %d\n", 4609 __func__, param_len, scsi_bufflen(scmd)); 4610 scsi_print_command(scmd); 4611 scmd->result = SAM_STAT_CHECK_CONDITION; 4612 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4613 0x1A, 0); 4614 scsi_done(scmd); 4615 return true; 4616 } 4617 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4618 if (!buf) { 4619 scsi_print_command(scmd); 4620 scmd->result = SAM_STAT_CHECK_CONDITION; 4621 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4622 0x55, 0x03); 4623 scsi_done(scmd); 4624 return true; 4625 } 4626 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4627 desc_len = get_unaligned_be16(&buf[2]); 4628 4629 if (desc_len < 16) { 4630 ioc_warn(mrioc, 4631 "%s: Invalid descriptor length in param list: %d\n", 4632 __func__, desc_len); 4633 scsi_print_command(scmd); 4634 scmd->result = SAM_STAT_CHECK_CONDITION; 4635 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4636 0x26, 0); 4637 scsi_done(scmd); 4638 kfree(buf); 4639 return true; 4640 } 4641 4642 if (param_len > (desc_len + 8)) { 4643 trunc_param_len = desc_len + 8; 4644 scsi_print_command(scmd); 4645 dprint_scsi_err(mrioc, 4646 "truncating param_len(%d) to desc_len+8(%d)\n", 4647 param_len, trunc_param_len); 4648 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4649 scsi_print_command(scmd); 4650 } 4651 4652 kfree(buf); 4653 return false; 4654 } 4655 4656 /** 4657 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 4658 * @scmd: SCSI Command reference 4659 * 4660 * Checks whether a cdb is allowed during shutdown or not. 4661 * 4662 * Return: TRUE for allowed commands, FALSE otherwise. 4663 */ 4664 4665 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 4666 { 4667 switch (scmd->cmnd[0]) { 4668 case SYNCHRONIZE_CACHE: 4669 case START_STOP: 4670 return true; 4671 default: 4672 return false; 4673 } 4674 } 4675 4676 /** 4677 * mpi3mr_qcmd - I/O request despatcher 4678 * @shost: SCSI Host reference 4679 * @scmd: SCSI Command reference 4680 * 4681 * Issues the SCSI Command as an MPI3 request. 4682 * 4683 * Return: 0 on successful queueing of the request or if the 4684 * request is completed with failure. 4685 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 4686 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 4687 */ 4688 static int mpi3mr_qcmd(struct Scsi_Host *shost, 4689 struct scsi_cmnd *scmd) 4690 { 4691 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4692 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4693 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4694 struct scmd_priv *scmd_priv_data = NULL; 4695 struct mpi3_scsi_io_request *scsiio_req = NULL; 4696 struct op_req_qinfo *op_req_q = NULL; 4697 int retval = 0; 4698 u16 dev_handle; 4699 u16 host_tag; 4700 u32 scsiio_flags = 0, data_len_blks = 0; 4701 struct request *rq = scsi_cmd_to_rq(scmd); 4702 int iprio_class; 4703 u8 is_pcie_dev = 0; 4704 u32 tracked_io_sz = 0; 4705 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 4706 struct mpi3mr_throttle_group_info *tg = NULL; 4707 4708 if (mrioc->unrecoverable) { 4709 scmd->result = DID_ERROR << 16; 4710 scsi_done(scmd); 4711 goto out; 4712 } 4713 4714 sdev_priv_data = scmd->device->hostdata; 4715 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4716 scmd->result = DID_NO_CONNECT << 16; 4717 scsi_done(scmd); 4718 goto out; 4719 } 4720 4721 if (mrioc->stop_drv_processing && 4722 !(mpi3mr_allow_scmd_to_fw(scmd))) { 4723 scmd->result = DID_NO_CONNECT << 16; 4724 scsi_done(scmd); 4725 goto out; 4726 } 4727 4728 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4729 dev_handle = stgt_priv_data->dev_handle; 4730 4731 /* Avoid error handling escalation when device is removed or blocked */ 4732 4733 if (scmd->device->host->shost_state == SHOST_RECOVERY && 4734 scmd->cmnd[0] == TEST_UNIT_READY && 4735 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 4736 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 4737 scsi_done(scmd); 4738 goto out; 4739 } 4740 4741 if (mrioc->reset_in_progress) { 4742 retval = SCSI_MLQUEUE_HOST_BUSY; 4743 goto out; 4744 } 4745 4746 if (atomic_read(&stgt_priv_data->block_io)) { 4747 if (mrioc->stop_drv_processing) { 4748 scmd->result = DID_NO_CONNECT << 16; 4749 scsi_done(scmd); 4750 goto out; 4751 } 4752 retval = SCSI_MLQUEUE_DEVICE_BUSY; 4753 goto out; 4754 } 4755 4756 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 4757 scmd->result = DID_NO_CONNECT << 16; 4758 scsi_done(scmd); 4759 goto out; 4760 } 4761 if (stgt_priv_data->dev_removed) { 4762 scmd->result = DID_NO_CONNECT << 16; 4763 scsi_done(scmd); 4764 goto out; 4765 } 4766 4767 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 4768 is_pcie_dev = 1; 4769 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 4770 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 4771 mpi3mr_check_return_unmap(mrioc, scmd)) 4772 goto out; 4773 4774 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 4775 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 4776 scmd->result = DID_ERROR << 16; 4777 scsi_done(scmd); 4778 goto out; 4779 } 4780 4781 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4782 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 4783 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4784 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 4785 else 4786 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 4787 4788 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 4789 4790 if (sdev_priv_data->ncq_prio_enable) { 4791 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 4792 if (iprio_class == IOPRIO_CLASS_RT) 4793 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 4794 } 4795 4796 if (scmd->cmd_len > 16) 4797 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 4798 4799 scmd_priv_data = scsi_cmd_priv(scmd); 4800 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 4801 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 4802 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 4803 scsiio_req->host_tag = cpu_to_le16(host_tag); 4804 4805 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 4806 4807 if (stgt_priv_data->wslen) 4808 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 4809 stgt_priv_data->wslen); 4810 4811 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 4812 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 4813 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 4814 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4815 int_to_scsilun(sdev_priv_data->lun_id, 4816 (struct scsi_lun *)scsiio_req->lun); 4817 4818 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 4819 mpi3mr_clear_scmd_priv(mrioc, scmd); 4820 retval = SCSI_MLQUEUE_HOST_BUSY; 4821 goto out; 4822 } 4823 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 4824 data_len_blks = scsi_bufflen(scmd) >> 9; 4825 if ((data_len_blks >= mrioc->io_throttle_data_length) && 4826 stgt_priv_data->io_throttle_enabled) { 4827 tracked_io_sz = data_len_blks; 4828 tg = stgt_priv_data->throttle_group; 4829 if (tg) { 4830 ioc_pend_data_len = atomic_add_return(data_len_blks, 4831 &mrioc->pend_large_data_sz); 4832 tg_pend_data_len = atomic_add_return(data_len_blks, 4833 &tg->pend_large_data_sz); 4834 if (!tg->io_divert && ((ioc_pend_data_len >= 4835 mrioc->io_throttle_high) || 4836 (tg_pend_data_len >= tg->high))) { 4837 tg->io_divert = 1; 4838 tg->need_qd_reduction = 1; 4839 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 4840 tg, 1); 4841 mpi3mr_queue_qd_reduction_event(mrioc, tg); 4842 } 4843 } else { 4844 ioc_pend_data_len = atomic_add_return(data_len_blks, 4845 &mrioc->pend_large_data_sz); 4846 if (ioc_pend_data_len >= mrioc->io_throttle_high) 4847 stgt_priv_data->io_divert = 1; 4848 } 4849 } 4850 4851 if (stgt_priv_data->io_divert) { 4852 scsiio_req->msg_flags |= 4853 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4854 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 4855 } 4856 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4857 4858 if (mpi3mr_op_request_post(mrioc, op_req_q, 4859 scmd_priv_data->mpi3mr_scsiio_req)) { 4860 mpi3mr_clear_scmd_priv(mrioc, scmd); 4861 retval = SCSI_MLQUEUE_HOST_BUSY; 4862 if (tracked_io_sz) { 4863 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 4864 if (tg) 4865 atomic_sub(tracked_io_sz, 4866 &tg->pend_large_data_sz); 4867 } 4868 goto out; 4869 } 4870 4871 out: 4872 return retval; 4873 } 4874 4875 static const struct scsi_host_template mpi3mr_driver_template = { 4876 .module = THIS_MODULE, 4877 .name = "MPI3 Storage Controller", 4878 .proc_name = MPI3MR_DRIVER_NAME, 4879 .queuecommand = mpi3mr_qcmd, 4880 .target_alloc = mpi3mr_target_alloc, 4881 .slave_alloc = mpi3mr_slave_alloc, 4882 .slave_configure = mpi3mr_slave_configure, 4883 .target_destroy = mpi3mr_target_destroy, 4884 .slave_destroy = mpi3mr_slave_destroy, 4885 .scan_finished = mpi3mr_scan_finished, 4886 .scan_start = mpi3mr_scan_start, 4887 .change_queue_depth = mpi3mr_change_queue_depth, 4888 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 4889 .eh_target_reset_handler = mpi3mr_eh_target_reset, 4890 .eh_host_reset_handler = mpi3mr_eh_host_reset, 4891 .bios_param = mpi3mr_bios_param, 4892 .map_queues = mpi3mr_map_queues, 4893 .mq_poll = mpi3mr_blk_mq_poll, 4894 .no_write_same = 1, 4895 .can_queue = 1, 4896 .this_id = -1, 4897 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 4898 /* max xfer supported is 1M (2K in 512 byte sized sectors) 4899 */ 4900 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 4901 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 4902 .max_segment_size = 0xffffffff, 4903 .track_queue_depth = 1, 4904 .cmd_size = sizeof(struct scmd_priv), 4905 .shost_groups = mpi3mr_host_groups, 4906 .sdev_groups = mpi3mr_dev_groups, 4907 }; 4908 4909 /** 4910 * mpi3mr_init_drv_cmd - Initialize internal command tracker 4911 * @cmdptr: Internal command tracker 4912 * @host_tag: Host tag used for the specific command 4913 * 4914 * Initialize the internal command tracker structure with 4915 * specified host tag. 4916 * 4917 * Return: Nothing. 4918 */ 4919 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 4920 u16 host_tag) 4921 { 4922 mutex_init(&cmdptr->mutex); 4923 cmdptr->reply = NULL; 4924 cmdptr->state = MPI3MR_CMD_NOTUSED; 4925 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 4926 cmdptr->host_tag = host_tag; 4927 } 4928 4929 /** 4930 * osintfc_mrioc_security_status -Check controller secure status 4931 * @pdev: PCI device instance 4932 * 4933 * Read the Device Serial Number capability from PCI config 4934 * space and decide whether the controller is secure or not. 4935 * 4936 * Return: 0 on success, non-zero on failure. 4937 */ 4938 static int 4939 osintfc_mrioc_security_status(struct pci_dev *pdev) 4940 { 4941 u32 cap_data; 4942 int base; 4943 u32 ctlr_status; 4944 u32 debug_status; 4945 int retval = 0; 4946 4947 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 4948 if (!base) { 4949 dev_err(&pdev->dev, 4950 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 4951 return -1; 4952 } 4953 4954 pci_read_config_dword(pdev, base + 4, &cap_data); 4955 4956 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 4957 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 4958 4959 switch (ctlr_status) { 4960 case MPI3MR_INVALID_DEVICE: 4961 dev_err(&pdev->dev, 4962 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4963 __func__, pdev->device, pdev->subsystem_vendor, 4964 pdev->subsystem_device); 4965 retval = -1; 4966 break; 4967 case MPI3MR_CONFIG_SECURE_DEVICE: 4968 if (!debug_status) 4969 dev_info(&pdev->dev, 4970 "%s: Config secure ctlr is detected\n", 4971 __func__); 4972 break; 4973 case MPI3MR_HARD_SECURE_DEVICE: 4974 break; 4975 case MPI3MR_TAMPERED_DEVICE: 4976 dev_err(&pdev->dev, 4977 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4978 __func__, pdev->device, pdev->subsystem_vendor, 4979 pdev->subsystem_device); 4980 retval = -1; 4981 break; 4982 default: 4983 retval = -1; 4984 break; 4985 } 4986 4987 if (!retval && debug_status) { 4988 dev_err(&pdev->dev, 4989 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4990 __func__, pdev->device, pdev->subsystem_vendor, 4991 pdev->subsystem_device); 4992 retval = -1; 4993 } 4994 4995 return retval; 4996 } 4997 4998 /** 4999 * mpi3mr_probe - PCI probe callback 5000 * @pdev: PCI device instance 5001 * @id: PCI device ID details 5002 * 5003 * controller initialization routine. Checks the security status 5004 * of the controller and if it is invalid or tampered return the 5005 * probe without initializing the controller. Otherwise, 5006 * allocate per adapter instance through shost_priv and 5007 * initialize controller specific data structures, initializae 5008 * the controller hardware, add shost to the SCSI subsystem. 5009 * 5010 * Return: 0 on success, non-zero on failure. 5011 */ 5012 5013 static int 5014 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5015 { 5016 struct mpi3mr_ioc *mrioc = NULL; 5017 struct Scsi_Host *shost = NULL; 5018 int retval = 0, i; 5019 5020 if (osintfc_mrioc_security_status(pdev)) { 5021 warn_non_secure_ctlr = 1; 5022 return 1; /* For Invalid and Tampered device */ 5023 } 5024 5025 shost = scsi_host_alloc(&mpi3mr_driver_template, 5026 sizeof(struct mpi3mr_ioc)); 5027 if (!shost) { 5028 retval = -ENODEV; 5029 goto shost_failed; 5030 } 5031 5032 mrioc = shost_priv(shost); 5033 mrioc->id = mrioc_ids++; 5034 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5035 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5036 INIT_LIST_HEAD(&mrioc->list); 5037 spin_lock(&mrioc_list_lock); 5038 list_add_tail(&mrioc->list, &mrioc_list); 5039 spin_unlock(&mrioc_list_lock); 5040 5041 spin_lock_init(&mrioc->admin_req_lock); 5042 spin_lock_init(&mrioc->reply_free_queue_lock); 5043 spin_lock_init(&mrioc->sbq_lock); 5044 spin_lock_init(&mrioc->fwevt_lock); 5045 spin_lock_init(&mrioc->tgtdev_lock); 5046 spin_lock_init(&mrioc->watchdog_lock); 5047 spin_lock_init(&mrioc->chain_buf_lock); 5048 spin_lock_init(&mrioc->sas_node_lock); 5049 5050 INIT_LIST_HEAD(&mrioc->fwevt_list); 5051 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5052 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5053 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5054 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5055 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5056 INIT_LIST_HEAD(&mrioc->enclosure_list); 5057 5058 mutex_init(&mrioc->reset_mutex); 5059 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5060 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5061 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5062 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5063 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5064 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5065 5066 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5067 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5068 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5069 5070 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5071 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5072 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5073 5074 if (pdev->revision) 5075 mrioc->enable_segqueue = true; 5076 5077 init_waitqueue_head(&mrioc->reset_waitq); 5078 mrioc->logging_level = logging_level; 5079 mrioc->shost = shost; 5080 mrioc->pdev = pdev; 5081 mrioc->stop_bsgs = 1; 5082 5083 mrioc->max_sgl_entries = max_sgl_entries; 5084 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5085 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5086 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5087 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5088 else { 5089 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5090 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5091 } 5092 5093 /* init shost parameters */ 5094 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5095 shost->max_lun = -1; 5096 shost->unique_id = mrioc->id; 5097 5098 shost->max_channel = 0; 5099 shost->max_id = 0xFFFFFFFF; 5100 5101 shost->host_tagset = 1; 5102 5103 if (prot_mask >= 0) 5104 scsi_host_set_prot(shost, prot_mask); 5105 else { 5106 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5107 | SHOST_DIF_TYPE2_PROTECTION 5108 | SHOST_DIF_TYPE3_PROTECTION; 5109 scsi_host_set_prot(shost, prot_mask); 5110 } 5111 5112 ioc_info(mrioc, 5113 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5114 __func__, 5115 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5116 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5117 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5118 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5119 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5120 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5121 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5122 5123 if (prot_guard_mask) 5124 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5125 else 5126 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5127 5128 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), 5129 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); 5130 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5131 mrioc->fwevt_worker_name, 0); 5132 if (!mrioc->fwevt_worker_thread) { 5133 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5134 __FILE__, __LINE__, __func__); 5135 retval = -ENODEV; 5136 goto fwevtthread_failed; 5137 } 5138 5139 mrioc->is_driver_loading = 1; 5140 mrioc->cpu_count = num_online_cpus(); 5141 if (mpi3mr_setup_resources(mrioc)) { 5142 ioc_err(mrioc, "setup resources failed\n"); 5143 retval = -ENODEV; 5144 goto resource_alloc_failed; 5145 } 5146 if (mpi3mr_init_ioc(mrioc)) { 5147 ioc_err(mrioc, "initializing IOC failed\n"); 5148 retval = -ENODEV; 5149 goto init_ioc_failed; 5150 } 5151 5152 shost->nr_hw_queues = mrioc->num_op_reply_q; 5153 if (mrioc->active_poll_qcount) 5154 shost->nr_maps = 3; 5155 5156 shost->can_queue = mrioc->max_host_ios; 5157 shost->sg_tablesize = mrioc->max_sgl_entries; 5158 shost->max_id = mrioc->facts.max_perids + 1; 5159 5160 retval = scsi_add_host(shost, &pdev->dev); 5161 if (retval) { 5162 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5163 __FILE__, __LINE__, __func__); 5164 goto addhost_failed; 5165 } 5166 5167 scsi_scan_host(shost); 5168 mpi3mr_bsg_init(mrioc); 5169 return retval; 5170 5171 addhost_failed: 5172 mpi3mr_stop_watchdog(mrioc); 5173 mpi3mr_cleanup_ioc(mrioc); 5174 init_ioc_failed: 5175 mpi3mr_free_mem(mrioc); 5176 mpi3mr_cleanup_resources(mrioc); 5177 resource_alloc_failed: 5178 destroy_workqueue(mrioc->fwevt_worker_thread); 5179 fwevtthread_failed: 5180 spin_lock(&mrioc_list_lock); 5181 list_del(&mrioc->list); 5182 spin_unlock(&mrioc_list_lock); 5183 scsi_host_put(shost); 5184 shost_failed: 5185 return retval; 5186 } 5187 5188 /** 5189 * mpi3mr_remove - PCI remove callback 5190 * @pdev: PCI device instance 5191 * 5192 * Cleanup the IOC by issuing MUR and shutdown notification. 5193 * Free up all memory and resources associated with the 5194 * controllerand target devices, unregister the shost. 5195 * 5196 * Return: Nothing. 5197 */ 5198 static void mpi3mr_remove(struct pci_dev *pdev) 5199 { 5200 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5201 struct mpi3mr_ioc *mrioc; 5202 struct workqueue_struct *wq; 5203 unsigned long flags; 5204 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5205 struct mpi3mr_hba_port *port, *hba_port_next; 5206 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5207 5208 if (!shost) 5209 return; 5210 5211 mrioc = shost_priv(shost); 5212 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5213 ssleep(1); 5214 5215 if (!pci_device_is_present(mrioc->pdev)) { 5216 mrioc->unrecoverable = 1; 5217 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5218 } 5219 5220 mpi3mr_bsg_exit(mrioc); 5221 mrioc->stop_drv_processing = 1; 5222 mpi3mr_cleanup_fwevt_list(mrioc); 5223 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5224 wq = mrioc->fwevt_worker_thread; 5225 mrioc->fwevt_worker_thread = NULL; 5226 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5227 if (wq) 5228 destroy_workqueue(wq); 5229 5230 if (mrioc->sas_transport_enabled) 5231 sas_remove_host(shost); 5232 else 5233 scsi_remove_host(shost); 5234 5235 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5236 list) { 5237 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5238 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5239 mpi3mr_tgtdev_put(tgtdev); 5240 } 5241 mpi3mr_stop_watchdog(mrioc); 5242 mpi3mr_cleanup_ioc(mrioc); 5243 mpi3mr_free_mem(mrioc); 5244 mpi3mr_cleanup_resources(mrioc); 5245 5246 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5247 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5248 &mrioc->sas_expander_list, list) { 5249 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5250 mpi3mr_expander_node_remove(mrioc, sas_expander); 5251 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5252 } 5253 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5254 ioc_info(mrioc, 5255 "removing hba_port entry: %p port: %d from hba_port list\n", 5256 port, port->port_id); 5257 list_del(&port->list); 5258 kfree(port); 5259 } 5260 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5261 5262 if (mrioc->sas_hba.num_phys) { 5263 kfree(mrioc->sas_hba.phy); 5264 mrioc->sas_hba.phy = NULL; 5265 mrioc->sas_hba.num_phys = 0; 5266 } 5267 5268 spin_lock(&mrioc_list_lock); 5269 list_del(&mrioc->list); 5270 spin_unlock(&mrioc_list_lock); 5271 5272 scsi_host_put(shost); 5273 } 5274 5275 /** 5276 * mpi3mr_shutdown - PCI shutdown callback 5277 * @pdev: PCI device instance 5278 * 5279 * Free up all memory and resources associated with the 5280 * controller 5281 * 5282 * Return: Nothing. 5283 */ 5284 static void mpi3mr_shutdown(struct pci_dev *pdev) 5285 { 5286 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5287 struct mpi3mr_ioc *mrioc; 5288 struct workqueue_struct *wq; 5289 unsigned long flags; 5290 5291 if (!shost) 5292 return; 5293 5294 mrioc = shost_priv(shost); 5295 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5296 ssleep(1); 5297 5298 mrioc->stop_drv_processing = 1; 5299 mpi3mr_cleanup_fwevt_list(mrioc); 5300 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5301 wq = mrioc->fwevt_worker_thread; 5302 mrioc->fwevt_worker_thread = NULL; 5303 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5304 if (wq) 5305 destroy_workqueue(wq); 5306 5307 mpi3mr_stop_watchdog(mrioc); 5308 mpi3mr_cleanup_ioc(mrioc); 5309 mpi3mr_cleanup_resources(mrioc); 5310 } 5311 5312 /** 5313 * mpi3mr_suspend - PCI power management suspend callback 5314 * @dev: Device struct 5315 * 5316 * Change the power state to the given value and cleanup the IOC 5317 * by issuing MUR and shutdown notification 5318 * 5319 * Return: 0 always. 5320 */ 5321 static int __maybe_unused 5322 mpi3mr_suspend(struct device *dev) 5323 { 5324 struct pci_dev *pdev = to_pci_dev(dev); 5325 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5326 struct mpi3mr_ioc *mrioc; 5327 5328 if (!shost) 5329 return 0; 5330 5331 mrioc = shost_priv(shost); 5332 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5333 ssleep(1); 5334 mrioc->stop_drv_processing = 1; 5335 mpi3mr_cleanup_fwevt_list(mrioc); 5336 scsi_block_requests(shost); 5337 mpi3mr_stop_watchdog(mrioc); 5338 mpi3mr_cleanup_ioc(mrioc); 5339 5340 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5341 pdev, pci_name(pdev)); 5342 mpi3mr_cleanup_resources(mrioc); 5343 5344 return 0; 5345 } 5346 5347 /** 5348 * mpi3mr_resume - PCI power management resume callback 5349 * @dev: Device struct 5350 * 5351 * Restore the power state to D0 and reinitialize the controller 5352 * and resume I/O operations to the target devices 5353 * 5354 * Return: 0 on success, non-zero on failure 5355 */ 5356 static int __maybe_unused 5357 mpi3mr_resume(struct device *dev) 5358 { 5359 struct pci_dev *pdev = to_pci_dev(dev); 5360 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5361 struct mpi3mr_ioc *mrioc; 5362 pci_power_t device_state = pdev->current_state; 5363 int r; 5364 5365 if (!shost) 5366 return 0; 5367 5368 mrioc = shost_priv(shost); 5369 5370 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5371 pdev, pci_name(pdev), device_state); 5372 mrioc->pdev = pdev; 5373 mrioc->cpu_count = num_online_cpus(); 5374 r = mpi3mr_setup_resources(mrioc); 5375 if (r) { 5376 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5377 __func__, r); 5378 return r; 5379 } 5380 5381 mrioc->stop_drv_processing = 0; 5382 mpi3mr_invalidate_devhandles(mrioc); 5383 mpi3mr_free_enclosure_list(mrioc); 5384 mpi3mr_memset_buffers(mrioc); 5385 r = mpi3mr_reinit_ioc(mrioc, 1); 5386 if (r) { 5387 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5388 return r; 5389 } 5390 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5391 scsi_unblock_requests(shost); 5392 mrioc->device_refresh_on = 0; 5393 mpi3mr_start_watchdog(mrioc); 5394 5395 return 0; 5396 } 5397 5398 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5399 { 5400 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5401 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5402 }, 5403 { 0 } 5404 }; 5405 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5406 5407 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5408 5409 static struct pci_driver mpi3mr_pci_driver = { 5410 .name = MPI3MR_DRIVER_NAME, 5411 .id_table = mpi3mr_pci_id_table, 5412 .probe = mpi3mr_probe, 5413 .remove = mpi3mr_remove, 5414 .shutdown = mpi3mr_shutdown, 5415 .driver.pm = &mpi3mr_pm_ops, 5416 }; 5417 5418 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5419 { 5420 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5421 } 5422 static DRIVER_ATTR_RO(event_counter); 5423 5424 static int __init mpi3mr_init(void) 5425 { 5426 int ret_val; 5427 5428 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 5429 MPI3MR_DRIVER_VERSION); 5430 5431 mpi3mr_transport_template = 5432 sas_attach_transport(&mpi3mr_transport_functions); 5433 if (!mpi3mr_transport_template) { 5434 pr_err("%s failed to load due to sas transport attach failure\n", 5435 MPI3MR_DRIVER_NAME); 5436 return -ENODEV; 5437 } 5438 5439 ret_val = pci_register_driver(&mpi3mr_pci_driver); 5440 if (ret_val) { 5441 pr_err("%s failed to load due to pci register driver failure\n", 5442 MPI3MR_DRIVER_NAME); 5443 goto err_pci_reg_fail; 5444 } 5445 5446 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 5447 &driver_attr_event_counter); 5448 if (ret_val) 5449 goto err_event_counter; 5450 5451 return ret_val; 5452 5453 err_event_counter: 5454 pci_unregister_driver(&mpi3mr_pci_driver); 5455 5456 err_pci_reg_fail: 5457 sas_release_transport(mpi3mr_transport_template); 5458 return ret_val; 5459 } 5460 5461 static void __exit mpi3mr_exit(void) 5462 { 5463 if (warn_non_secure_ctlr) 5464 pr_warn( 5465 "Unloading %s version %s while managing a non secure controller\n", 5466 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 5467 else 5468 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 5469 MPI3MR_DRIVER_VERSION); 5470 5471 driver_remove_file(&mpi3mr_pci_driver.driver, 5472 &driver_attr_event_counter); 5473 pci_unregister_driver(&mpi3mr_pci_driver); 5474 sas_release_transport(mpi3mr_transport_template); 5475 } 5476 5477 module_init(mpi3mr_init); 5478 module_exit(mpi3mr_exit); 5479