1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2023 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/idr.h> 12 13 /* global driver scop variables */ 14 LIST_HEAD(mrioc_list); 15 DEFINE_SPINLOCK(mrioc_list_lock); 16 static DEFINE_IDA(mrioc_ida); 17 static int warn_non_secure_ctlr; 18 atomic64_t event_counter; 19 20 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR); 21 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC); 22 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE); 23 MODULE_VERSION(MPI3MR_DRIVER_VERSION); 24 25 /* Module parameters*/ 26 int prot_mask = -1; 27 module_param(prot_mask, int, 0); 28 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07"); 29 30 static int prot_guard_mask = 3; 31 module_param(prot_guard_mask, int, 0); 32 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3"); 33 static int logging_level; 34 module_param(logging_level, int, 0); 35 MODULE_PARM_DESC(logging_level, 36 " bits for enabling additional logging info (default=0)"); 37 static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 38 module_param(max_sgl_entries, int, 0444); 39 MODULE_PARM_DESC(max_sgl_entries, 40 "Preferred max number of SG entries to be used for a single I/O\n" 41 "The actual value will be determined by the driver\n" 42 "(Minimum=256, Maximum=2048, default=256)"); 43 44 /* Forward declarations*/ 45 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 46 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx); 47 48 #define MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION (0xFFFF) 49 50 #define MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH (0xFFFE) 51 52 /** 53 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd 54 * @mrioc: Adapter instance reference 55 * @scmd: SCSI command reference 56 * 57 * Calculate the host tag based on block tag for a given scmd. 58 * 59 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID. 60 */ 61 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, 62 struct scsi_cmnd *scmd) 63 { 64 struct scmd_priv *priv = NULL; 65 u32 unique_tag; 66 u16 host_tag, hw_queue; 67 68 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 69 70 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); 71 if (hw_queue >= mrioc->num_op_reply_q) 72 return MPI3MR_HOSTTAG_INVALID; 73 host_tag = blk_mq_unique_tag_to_tag(unique_tag); 74 75 if (WARN_ON(host_tag >= mrioc->max_host_ios)) 76 return MPI3MR_HOSTTAG_INVALID; 77 78 priv = scsi_cmd_priv(scmd); 79 /*host_tag 0 is invalid hence incrementing by 1*/ 80 priv->host_tag = host_tag + 1; 81 priv->scmd = scmd; 82 priv->in_lld_scope = 1; 83 priv->req_q_idx = hw_queue; 84 priv->meta_chain_idx = -1; 85 priv->chain_idx = -1; 86 priv->meta_sg_valid = 0; 87 return priv->host_tag; 88 } 89 90 /** 91 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag 92 * @mrioc: Adapter instance reference 93 * @host_tag: Host tag 94 * @qidx: Operational queue index 95 * 96 * Identify the block tag from the host tag and queue index and 97 * retrieve associated scsi command using scsi_host_find_tag(). 98 * 99 * Return: SCSI command reference or NULL. 100 */ 101 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag( 102 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx) 103 { 104 struct scsi_cmnd *scmd = NULL; 105 struct scmd_priv *priv = NULL; 106 u32 unique_tag = host_tag - 1; 107 108 if (WARN_ON(host_tag > mrioc->max_host_ios)) 109 goto out; 110 111 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS); 112 113 scmd = scsi_host_find_tag(mrioc->shost, unique_tag); 114 if (scmd) { 115 priv = scsi_cmd_priv(scmd); 116 if (!priv->in_lld_scope) 117 scmd = NULL; 118 } 119 out: 120 return scmd; 121 } 122 123 /** 124 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date 125 * @mrioc: Adapter instance reference 126 * @scmd: SCSI command reference 127 * 128 * Invalidate the SCSI command private data to mark the command 129 * is not in LLD scope anymore. 130 * 131 * Return: Nothing. 132 */ 133 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc, 134 struct scsi_cmnd *scmd) 135 { 136 struct scmd_priv *priv = NULL; 137 138 priv = scsi_cmd_priv(scmd); 139 140 if (WARN_ON(priv->in_lld_scope == 0)) 141 return; 142 priv->host_tag = MPI3MR_HOSTTAG_INVALID; 143 priv->req_q_idx = 0xFFFF; 144 priv->scmd = NULL; 145 priv->in_lld_scope = 0; 146 priv->meta_sg_valid = 0; 147 if (priv->chain_idx >= 0) { 148 clear_bit(priv->chain_idx, mrioc->chain_bitmap); 149 priv->chain_idx = -1; 150 } 151 if (priv->meta_chain_idx >= 0) { 152 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap); 153 priv->meta_chain_idx = -1; 154 } 155 } 156 157 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 158 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc); 159 static void mpi3mr_fwevt_worker(struct work_struct *work); 160 161 /** 162 * mpi3mr_fwevt_free - firmware event memory dealloctor 163 * @r: k reference pointer of the firmware event 164 * 165 * Free firmware event memory when no reference. 166 */ 167 static void mpi3mr_fwevt_free(struct kref *r) 168 { 169 kfree(container_of(r, struct mpi3mr_fwevt, ref_count)); 170 } 171 172 /** 173 * mpi3mr_fwevt_get - k reference incrementor 174 * @fwevt: Firmware event reference 175 * 176 * Increment firmware event reference count. 177 */ 178 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt) 179 { 180 kref_get(&fwevt->ref_count); 181 } 182 183 /** 184 * mpi3mr_fwevt_put - k reference decrementor 185 * @fwevt: Firmware event reference 186 * 187 * decrement firmware event reference count. 188 */ 189 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt) 190 { 191 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free); 192 } 193 194 /** 195 * mpi3mr_alloc_fwevt - Allocate firmware event 196 * @len: length of firmware event data to allocate 197 * 198 * Allocate firmware event with required length and initialize 199 * the reference counter. 200 * 201 * Return: firmware event reference. 202 */ 203 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len) 204 { 205 struct mpi3mr_fwevt *fwevt; 206 207 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC); 208 if (!fwevt) 209 return NULL; 210 211 kref_init(&fwevt->ref_count); 212 return fwevt; 213 } 214 215 /** 216 * mpi3mr_fwevt_add_to_list - Add firmware event to the list 217 * @mrioc: Adapter instance reference 218 * @fwevt: Firmware event reference 219 * 220 * Add the given firmware event to the firmware event list. 221 * 222 * Return: Nothing. 223 */ 224 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc, 225 struct mpi3mr_fwevt *fwevt) 226 { 227 unsigned long flags; 228 229 if (!mrioc->fwevt_worker_thread) 230 return; 231 232 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 233 /* get fwevt reference count while adding it to fwevt_list */ 234 mpi3mr_fwevt_get(fwevt); 235 INIT_LIST_HEAD(&fwevt->list); 236 list_add_tail(&fwevt->list, &mrioc->fwevt_list); 237 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker); 238 /* get fwevt reference count while enqueueing it to worker queue */ 239 mpi3mr_fwevt_get(fwevt); 240 queue_work(mrioc->fwevt_worker_thread, &fwevt->work); 241 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 242 } 243 244 /** 245 * mpi3mr_fwevt_del_from_list - Delete firmware event from list 246 * @mrioc: Adapter instance reference 247 * @fwevt: Firmware event reference 248 * 249 * Delete the given firmware event from the firmware event list. 250 * 251 * Return: Nothing. 252 */ 253 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc, 254 struct mpi3mr_fwevt *fwevt) 255 { 256 unsigned long flags; 257 258 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 259 if (!list_empty(&fwevt->list)) { 260 list_del_init(&fwevt->list); 261 /* 262 * Put fwevt reference count after 263 * removing it from fwevt_list 264 */ 265 mpi3mr_fwevt_put(fwevt); 266 } 267 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 268 } 269 270 /** 271 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list 272 * @mrioc: Adapter instance reference 273 * 274 * Dequeue a firmware event from the firmware event list. 275 * 276 * Return: firmware event. 277 */ 278 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt( 279 struct mpi3mr_ioc *mrioc) 280 { 281 unsigned long flags; 282 struct mpi3mr_fwevt *fwevt = NULL; 283 284 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 285 if (!list_empty(&mrioc->fwevt_list)) { 286 fwevt = list_first_entry(&mrioc->fwevt_list, 287 struct mpi3mr_fwevt, list); 288 list_del_init(&fwevt->list); 289 /* 290 * Put fwevt reference count after 291 * removing it from fwevt_list 292 */ 293 mpi3mr_fwevt_put(fwevt); 294 } 295 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 296 297 return fwevt; 298 } 299 300 /** 301 * mpi3mr_cancel_work - cancel firmware event 302 * @fwevt: fwevt object which needs to be canceled 303 * 304 * Return: Nothing. 305 */ 306 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt) 307 { 308 /* 309 * Wait on the fwevt to complete. If this returns 1, then 310 * the event was never executed. 311 * 312 * If it did execute, we wait for it to finish, and the put will 313 * happen from mpi3mr_process_fwevt() 314 */ 315 if (cancel_work_sync(&fwevt->work)) { 316 /* 317 * Put fwevt reference count after 318 * dequeuing it from worker queue 319 */ 320 mpi3mr_fwevt_put(fwevt); 321 /* 322 * Put fwevt reference count to neutralize 323 * kref_init increment 324 */ 325 mpi3mr_fwevt_put(fwevt); 326 } 327 } 328 329 /** 330 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list 331 * @mrioc: Adapter instance reference 332 * 333 * Flush all pending firmware events from the firmware event 334 * list. 335 * 336 * Return: Nothing. 337 */ 338 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc) 339 { 340 struct mpi3mr_fwevt *fwevt = NULL; 341 342 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) || 343 !mrioc->fwevt_worker_thread) 344 return; 345 346 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc))) 347 mpi3mr_cancel_work(fwevt); 348 349 if (mrioc->current_event) { 350 fwevt = mrioc->current_event; 351 /* 352 * Don't call cancel_work_sync() API for the 353 * fwevt work if the controller reset is 354 * get called as part of processing the 355 * same fwevt work (or) when worker thread is 356 * waiting for device add/remove APIs to complete. 357 * Otherwise we will see deadlock. 358 */ 359 if (current_work() == &fwevt->work || fwevt->pending_at_sml) { 360 fwevt->discard = 1; 361 return; 362 } 363 364 mpi3mr_cancel_work(fwevt); 365 } 366 } 367 368 /** 369 * mpi3mr_queue_qd_reduction_event - Queue TG QD reduction event 370 * @mrioc: Adapter instance reference 371 * @tg: Throttle group information pointer 372 * 373 * Accessor to queue on synthetically generated driver event to 374 * the event worker thread, the driver event will be used to 375 * reduce the QD of all VDs in the TG from the worker thread. 376 * 377 * Return: None. 378 */ 379 static void mpi3mr_queue_qd_reduction_event(struct mpi3mr_ioc *mrioc, 380 struct mpi3mr_throttle_group_info *tg) 381 { 382 struct mpi3mr_fwevt *fwevt; 383 u16 sz = sizeof(struct mpi3mr_throttle_group_info *); 384 385 /* 386 * If the QD reduction event is already queued due to throttle and if 387 * the QD is not restored through device info change event 388 * then dont queue further reduction events 389 */ 390 if (tg->fw_qd != tg->modified_qd) 391 return; 392 393 fwevt = mpi3mr_alloc_fwevt(sz); 394 if (!fwevt) { 395 ioc_warn(mrioc, "failed to queue TG QD reduction event\n"); 396 return; 397 } 398 *(struct mpi3mr_throttle_group_info **)fwevt->event_data = tg; 399 fwevt->mrioc = mrioc; 400 fwevt->event_id = MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION; 401 fwevt->send_ack = 0; 402 fwevt->process_evt = 1; 403 fwevt->evt_ctx = 0; 404 fwevt->event_data_size = sz; 405 tg->modified_qd = max_t(u16, (tg->fw_qd * tg->qd_reduction) / 10, 8); 406 407 dprint_event_bh(mrioc, "qd reduction event queued for tg_id(%d)\n", 408 tg->id); 409 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 410 } 411 412 /** 413 * mpi3mr_invalidate_devhandles -Invalidate device handles 414 * @mrioc: Adapter instance reference 415 * 416 * Invalidate the device handles in the target device structures 417 * . Called post reset prior to reinitializing the controller. 418 * 419 * Return: Nothing. 420 */ 421 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc) 422 { 423 struct mpi3mr_tgt_dev *tgtdev; 424 struct mpi3mr_stgt_priv_data *tgt_priv; 425 426 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 427 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 428 if (tgtdev->starget && tgtdev->starget->hostdata) { 429 tgt_priv = tgtdev->starget->hostdata; 430 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 431 tgt_priv->io_throttle_enabled = 0; 432 tgt_priv->io_divert = 0; 433 tgt_priv->throttle_group = NULL; 434 tgt_priv->wslen = 0; 435 if (tgtdev->host_exposed) 436 atomic_set(&tgt_priv->block_io, 1); 437 } 438 } 439 } 440 441 /** 442 * mpi3mr_print_scmd - print individual SCSI command 443 * @rq: Block request 444 * @data: Adapter instance reference 445 * 446 * Print the SCSI command details if it is in LLD scope. 447 * 448 * Return: true always. 449 */ 450 static bool mpi3mr_print_scmd(struct request *rq, void *data) 451 { 452 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 453 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 454 struct scmd_priv *priv = NULL; 455 456 if (scmd) { 457 priv = scsi_cmd_priv(scmd); 458 if (!priv->in_lld_scope) 459 goto out; 460 461 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n", 462 __func__, priv->host_tag, priv->req_q_idx + 1); 463 scsi_print_command(scmd); 464 } 465 466 out: 467 return(true); 468 } 469 470 /** 471 * mpi3mr_flush_scmd - Flush individual SCSI command 472 * @rq: Block request 473 * @data: Adapter instance reference 474 * 475 * Return the SCSI command to the upper layers if it is in LLD 476 * scope. 477 * 478 * Return: true always. 479 */ 480 481 static bool mpi3mr_flush_scmd(struct request *rq, void *data) 482 { 483 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data; 484 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 485 struct scmd_priv *priv = NULL; 486 487 if (scmd) { 488 priv = scsi_cmd_priv(scmd); 489 if (!priv->in_lld_scope) 490 goto out; 491 492 if (priv->meta_sg_valid) 493 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 494 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 495 mpi3mr_clear_scmd_priv(mrioc, scmd); 496 scsi_dma_unmap(scmd); 497 scmd->result = DID_RESET << 16; 498 scsi_print_command(scmd); 499 scsi_done(scmd); 500 mrioc->flush_io_count++; 501 } 502 503 out: 504 return(true); 505 } 506 507 /** 508 * mpi3mr_count_dev_pending - Count commands pending for a lun 509 * @rq: Block request 510 * @data: SCSI device reference 511 * 512 * This is an iterator function called for each SCSI command in 513 * a host and if the command is pending in the LLD for the 514 * specific device(lun) then device specific pending I/O counter 515 * is updated in the device structure. 516 * 517 * Return: true always. 518 */ 519 520 static bool mpi3mr_count_dev_pending(struct request *rq, void *data) 521 { 522 struct scsi_device *sdev = (struct scsi_device *)data; 523 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata; 524 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 525 struct scmd_priv *priv; 526 527 if (scmd) { 528 priv = scsi_cmd_priv(scmd); 529 if (!priv->in_lld_scope) 530 goto out; 531 if (scmd->device == sdev) 532 sdev_priv_data->pend_count++; 533 } 534 535 out: 536 return true; 537 } 538 539 /** 540 * mpi3mr_count_tgt_pending - Count commands pending for target 541 * @rq: Block request 542 * @data: SCSI target reference 543 * 544 * This is an iterator function called for each SCSI command in 545 * a host and if the command is pending in the LLD for the 546 * specific target then target specific pending I/O counter is 547 * updated in the target structure. 548 * 549 * Return: true always. 550 */ 551 552 static bool mpi3mr_count_tgt_pending(struct request *rq, void *data) 553 { 554 struct scsi_target *starget = (struct scsi_target *)data; 555 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata; 556 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); 557 struct scmd_priv *priv; 558 559 if (scmd) { 560 priv = scsi_cmd_priv(scmd); 561 if (!priv->in_lld_scope) 562 goto out; 563 if (scmd->device && (scsi_target(scmd->device) == starget)) 564 stgt_priv_data->pend_count++; 565 } 566 567 out: 568 return true; 569 } 570 571 /** 572 * mpi3mr_flush_host_io - Flush host I/Os 573 * @mrioc: Adapter instance reference 574 * 575 * Flush all of the pending I/Os by calling 576 * blk_mq_tagset_busy_iter() for each possible tag. This is 577 * executed post controller reset 578 * 579 * Return: Nothing. 580 */ 581 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc) 582 { 583 struct Scsi_Host *shost = mrioc->shost; 584 585 mrioc->flush_io_count = 0; 586 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__); 587 blk_mq_tagset_busy_iter(&shost->tag_set, 588 mpi3mr_flush_scmd, (void *)mrioc); 589 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__, 590 mrioc->flush_io_count); 591 } 592 593 /** 594 * mpi3mr_flush_cmds_for_unrecovered_controller - Flush all pending cmds 595 * @mrioc: Adapter instance reference 596 * 597 * This function waits for currently running IO poll threads to 598 * exit and then flushes all host I/Os and any internal pending 599 * cmds. This is executed after controller is marked as 600 * unrecoverable. 601 * 602 * Return: Nothing. 603 */ 604 void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc) 605 { 606 struct Scsi_Host *shost = mrioc->shost; 607 int i; 608 609 if (!mrioc->unrecoverable) 610 return; 611 612 if (mrioc->op_reply_qinfo) { 613 for (i = 0; i < mrioc->num_queues; i++) { 614 while (atomic_read(&mrioc->op_reply_qinfo[i].in_use)) 615 udelay(500); 616 atomic_set(&mrioc->op_reply_qinfo[i].pend_ios, 0); 617 } 618 } 619 mrioc->flush_io_count = 0; 620 blk_mq_tagset_busy_iter(&shost->tag_set, 621 mpi3mr_flush_scmd, (void *)mrioc); 622 mpi3mr_flush_delayed_cmd_lists(mrioc); 623 mpi3mr_flush_drv_cmds(mrioc); 624 } 625 626 /** 627 * mpi3mr_alloc_tgtdev - target device allocator 628 * 629 * Allocate target device instance and initialize the reference 630 * count 631 * 632 * Return: target device instance. 633 */ 634 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void) 635 { 636 struct mpi3mr_tgt_dev *tgtdev; 637 638 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC); 639 if (!tgtdev) 640 return NULL; 641 kref_init(&tgtdev->ref_count); 642 return tgtdev; 643 } 644 645 /** 646 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list 647 * @mrioc: Adapter instance reference 648 * @tgtdev: Target device 649 * 650 * Add the target device to the target device list 651 * 652 * Return: Nothing. 653 */ 654 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc, 655 struct mpi3mr_tgt_dev *tgtdev) 656 { 657 unsigned long flags; 658 659 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 660 mpi3mr_tgtdev_get(tgtdev); 661 INIT_LIST_HEAD(&tgtdev->list); 662 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list); 663 tgtdev->state = MPI3MR_DEV_CREATED; 664 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 665 } 666 667 /** 668 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list 669 * @mrioc: Adapter instance reference 670 * @tgtdev: Target device 671 * @must_delete: Must delete the target device from the list irrespective 672 * of the device state. 673 * 674 * Remove the target device from the target device list 675 * 676 * Return: Nothing. 677 */ 678 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc, 679 struct mpi3mr_tgt_dev *tgtdev, bool must_delete) 680 { 681 unsigned long flags; 682 683 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 684 if ((tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) || (must_delete == true)) { 685 if (!list_empty(&tgtdev->list)) { 686 list_del_init(&tgtdev->list); 687 tgtdev->state = MPI3MR_DEV_DELETED; 688 mpi3mr_tgtdev_put(tgtdev); 689 } 690 } 691 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 692 } 693 694 /** 695 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 696 * @mrioc: Adapter instance reference 697 * @handle: Device handle 698 * 699 * Accessor to retrieve target device from the device handle. 700 * Non Lock version 701 * 702 * Return: Target device reference. 703 */ 704 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle( 705 struct mpi3mr_ioc *mrioc, u16 handle) 706 { 707 struct mpi3mr_tgt_dev *tgtdev; 708 709 assert_spin_locked(&mrioc->tgtdev_lock); 710 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 711 if (tgtdev->dev_handle == handle) 712 goto found_tgtdev; 713 return NULL; 714 715 found_tgtdev: 716 mpi3mr_tgtdev_get(tgtdev); 717 return tgtdev; 718 } 719 720 /** 721 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle 722 * @mrioc: Adapter instance reference 723 * @handle: Device handle 724 * 725 * Accessor to retrieve target device from the device handle. 726 * Lock version 727 * 728 * Return: Target device reference. 729 */ 730 struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle( 731 struct mpi3mr_ioc *mrioc, u16 handle) 732 { 733 struct mpi3mr_tgt_dev *tgtdev; 734 unsigned long flags; 735 736 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 737 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 738 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 739 return tgtdev; 740 } 741 742 /** 743 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID 744 * @mrioc: Adapter instance reference 745 * @persist_id: Persistent ID 746 * 747 * Accessor to retrieve target device from the Persistent ID. 748 * Non Lock version 749 * 750 * Return: Target device reference. 751 */ 752 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id( 753 struct mpi3mr_ioc *mrioc, u16 persist_id) 754 { 755 struct mpi3mr_tgt_dev *tgtdev; 756 757 assert_spin_locked(&mrioc->tgtdev_lock); 758 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 759 if (tgtdev->perst_id == persist_id) 760 goto found_tgtdev; 761 return NULL; 762 763 found_tgtdev: 764 mpi3mr_tgtdev_get(tgtdev); 765 return tgtdev; 766 } 767 768 /** 769 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID 770 * @mrioc: Adapter instance reference 771 * @persist_id: Persistent ID 772 * 773 * Accessor to retrieve target device from the Persistent ID. 774 * Lock version 775 * 776 * Return: Target device reference. 777 */ 778 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id( 779 struct mpi3mr_ioc *mrioc, u16 persist_id) 780 { 781 struct mpi3mr_tgt_dev *tgtdev; 782 unsigned long flags; 783 784 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 785 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id); 786 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 787 return tgtdev; 788 } 789 790 /** 791 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private 792 * @mrioc: Adapter instance reference 793 * @tgt_priv: Target private data 794 * 795 * Accessor to return target device from the target private 796 * data. Non Lock version 797 * 798 * Return: Target device reference. 799 */ 800 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv( 801 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv) 802 { 803 struct mpi3mr_tgt_dev *tgtdev; 804 805 assert_spin_locked(&mrioc->tgtdev_lock); 806 tgtdev = tgt_priv->tgt_dev; 807 if (tgtdev) 808 mpi3mr_tgtdev_get(tgtdev); 809 return tgtdev; 810 } 811 812 /** 813 * mpi3mr_set_io_divert_for_all_vd_in_tg -set divert for TG VDs 814 * @mrioc: Adapter instance reference 815 * @tg: Throttle group information pointer 816 * @divert_value: 1 or 0 817 * 818 * Accessor to set io_divert flag for each device associated 819 * with the given throttle group with the given value. 820 * 821 * Return: None. 822 */ 823 static void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 824 struct mpi3mr_throttle_group_info *tg, u8 divert_value) 825 { 826 unsigned long flags; 827 struct mpi3mr_tgt_dev *tgtdev; 828 struct mpi3mr_stgt_priv_data *tgt_priv; 829 830 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 831 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 832 if (tgtdev->starget && tgtdev->starget->hostdata) { 833 tgt_priv = tgtdev->starget->hostdata; 834 if (tgt_priv->throttle_group == tg) 835 tgt_priv->io_divert = divert_value; 836 } 837 } 838 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 839 } 840 841 /** 842 * mpi3mr_print_device_event_notice - print notice related to post processing of 843 * device event after controller reset. 844 * 845 * @mrioc: Adapter instance reference 846 * @device_add: true for device add event and false for device removal event 847 * 848 * Return: None. 849 */ 850 void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc, 851 bool device_add) 852 { 853 ioc_notice(mrioc, "Device %s was in progress before the reset and\n", 854 (device_add ? "addition" : "removal")); 855 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n"); 856 ioc_notice(mrioc, "are matched with attached devices for correctness\n"); 857 } 858 859 /** 860 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers 861 * @mrioc: Adapter instance reference 862 * @tgtdev: Target device structure 863 * 864 * Checks whether the device is exposed to upper layers and if it 865 * is then remove the device from upper layers by calling 866 * scsi_remove_target(). 867 * 868 * Return: 0 on success, non zero on failure. 869 */ 870 void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc, 871 struct mpi3mr_tgt_dev *tgtdev) 872 { 873 struct mpi3mr_stgt_priv_data *tgt_priv; 874 875 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n", 876 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 877 if (tgtdev->starget && tgtdev->starget->hostdata) { 878 tgt_priv = tgtdev->starget->hostdata; 879 atomic_set(&tgt_priv->block_io, 0); 880 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 881 } 882 883 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 884 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl) { 885 if (tgtdev->starget) { 886 if (mrioc->current_event) 887 mrioc->current_event->pending_at_sml = 1; 888 scsi_remove_target(&tgtdev->starget->dev); 889 tgtdev->host_exposed = 0; 890 if (mrioc->current_event) { 891 mrioc->current_event->pending_at_sml = 0; 892 if (mrioc->current_event->discard) { 893 mpi3mr_print_device_event_notice(mrioc, 894 false); 895 return; 896 } 897 } 898 } 899 } else 900 mpi3mr_remove_tgtdev_from_sas_transport(mrioc, tgtdev); 901 902 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n", 903 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid); 904 } 905 906 /** 907 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers 908 * @mrioc: Adapter instance reference 909 * @perst_id: Persistent ID of the device 910 * 911 * Checks whether the device can be exposed to upper layers and 912 * if it is not then expose the device to upper layers by 913 * calling scsi_scan_target(). 914 * 915 * Return: 0 on success, non zero on failure. 916 */ 917 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc, 918 u16 perst_id) 919 { 920 int retval = 0; 921 struct mpi3mr_tgt_dev *tgtdev; 922 923 if (mrioc->reset_in_progress) 924 return -1; 925 926 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 927 if (!tgtdev) { 928 retval = -1; 929 goto out; 930 } 931 if (tgtdev->is_hidden || tgtdev->host_exposed) { 932 retval = -1; 933 goto out; 934 } 935 if (!mrioc->sas_transport_enabled || (tgtdev->dev_type != 936 MPI3_DEVICE_DEVFORM_SAS_SATA) || tgtdev->non_stl){ 937 tgtdev->host_exposed = 1; 938 if (mrioc->current_event) 939 mrioc->current_event->pending_at_sml = 1; 940 scsi_scan_target(&mrioc->shost->shost_gendev, 941 mrioc->scsi_device_channel, tgtdev->perst_id, 942 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 943 if (!tgtdev->starget) 944 tgtdev->host_exposed = 0; 945 if (mrioc->current_event) { 946 mrioc->current_event->pending_at_sml = 0; 947 if (mrioc->current_event->discard) { 948 mpi3mr_print_device_event_notice(mrioc, true); 949 goto out; 950 } 951 } 952 } else 953 mpi3mr_report_tgtdev_to_sas_transport(mrioc, tgtdev); 954 out: 955 if (tgtdev) 956 mpi3mr_tgtdev_put(tgtdev); 957 958 return retval; 959 } 960 961 /** 962 * mpi3mr_change_queue_depth- Change QD callback handler 963 * @sdev: SCSI device reference 964 * @q_depth: Queue depth 965 * 966 * Validate and limit QD and call scsi_change_queue_depth. 967 * 968 * Return: return value of scsi_change_queue_depth 969 */ 970 static int mpi3mr_change_queue_depth(struct scsi_device *sdev, 971 int q_depth) 972 { 973 struct scsi_target *starget = scsi_target(sdev); 974 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 975 int retval = 0; 976 977 if (!sdev->tagged_supported) 978 q_depth = 1; 979 if (q_depth > shost->can_queue) 980 q_depth = shost->can_queue; 981 else if (!q_depth) 982 q_depth = MPI3MR_DEFAULT_SDEV_QD; 983 retval = scsi_change_queue_depth(sdev, q_depth); 984 sdev->max_queue_depth = sdev->queue_depth; 985 986 return retval; 987 } 988 989 /** 990 * mpi3mr_update_sdev - Update SCSI device information 991 * @sdev: SCSI device reference 992 * @data: target device reference 993 * 994 * This is an iterator function called for each SCSI device in a 995 * target to update the target specific information into each 996 * SCSI device. 997 * 998 * Return: Nothing. 999 */ 1000 static void 1001 mpi3mr_update_sdev(struct scsi_device *sdev, void *data) 1002 { 1003 struct mpi3mr_tgt_dev *tgtdev; 1004 1005 tgtdev = (struct mpi3mr_tgt_dev *)data; 1006 if (!tgtdev) 1007 return; 1008 1009 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth); 1010 switch (tgtdev->dev_type) { 1011 case MPI3_DEVICE_DEVFORM_PCIE: 1012 /*The block layer hw sector size = 512*/ 1013 if ((tgtdev->dev_spec.pcie_inf.dev_info & 1014 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 1015 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 1016 blk_queue_max_hw_sectors(sdev->request_queue, 1017 tgtdev->dev_spec.pcie_inf.mdts / 512); 1018 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) 1019 blk_queue_virt_boundary(sdev->request_queue, 1020 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 1021 else 1022 blk_queue_virt_boundary(sdev->request_queue, 1023 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1)); 1024 } 1025 break; 1026 default: 1027 break; 1028 } 1029 } 1030 1031 /** 1032 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure 1033 * @mrioc: Adapter instance reference 1034 * 1035 * This is executed post controller reset to identify any 1036 * missing devices during reset and remove from the upper layers 1037 * or expose any newly detected device to the upper layers. 1038 * 1039 * Return: Nothing. 1040 */ 1041 1042 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) 1043 { 1044 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 1045 struct mpi3mr_stgt_priv_data *tgt_priv; 1046 1047 dprint_reset(mrioc, "refresh target devices: check for removals\n"); 1048 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1049 list) { 1050 if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) && 1051 tgtdev->is_hidden && 1052 tgtdev->host_exposed && tgtdev->starget && 1053 tgtdev->starget->hostdata) { 1054 tgt_priv = tgtdev->starget->hostdata; 1055 tgt_priv->dev_removed = 1; 1056 atomic_set(&tgt_priv->block_io, 0); 1057 } 1058 } 1059 1060 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 1061 list) { 1062 if (tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 1063 dprint_reset(mrioc, "removing target device with perst_id(%d)\n", 1064 tgtdev->perst_id); 1065 if (tgtdev->host_exposed) 1066 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1067 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 1068 mpi3mr_tgtdev_put(tgtdev); 1069 } else if (tgtdev->is_hidden & tgtdev->host_exposed) { 1070 dprint_reset(mrioc, "hiding target device with perst_id(%d)\n", 1071 tgtdev->perst_id); 1072 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1073 } 1074 } 1075 1076 tgtdev = NULL; 1077 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1078 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && 1079 !tgtdev->is_hidden) { 1080 if (!tgtdev->host_exposed) 1081 mpi3mr_report_tgtdev_to_host(mrioc, 1082 tgtdev->perst_id); 1083 else if (tgtdev->starget) 1084 starget_for_each_device(tgtdev->starget, 1085 (void *)tgtdev, mpi3mr_update_sdev); 1086 } 1087 } 1088 } 1089 1090 /** 1091 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf 1092 * @mrioc: Adapter instance reference 1093 * @tgtdev: Target device internal structure 1094 * @dev_pg0: New device page0 1095 * @is_added: Flag to indicate the device is just added 1096 * 1097 * Update the information from the device page0 into the driver 1098 * cached target device structure. 1099 * 1100 * Return: Nothing. 1101 */ 1102 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, 1103 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0, 1104 bool is_added) 1105 { 1106 u16 flags = 0; 1107 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 1108 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1109 u8 prot_mask = 0; 1110 1111 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id); 1112 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1113 tgtdev->dev_type = dev_pg0->device_form; 1114 tgtdev->io_unit_port = dev_pg0->io_unit_port; 1115 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle); 1116 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle); 1117 tgtdev->slot = le16_to_cpu(dev_pg0->slot); 1118 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth); 1119 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid); 1120 tgtdev->devpg0_flag = le16_to_cpu(dev_pg0->flags); 1121 1122 if (tgtdev->encl_handle) 1123 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1124 tgtdev->encl_handle); 1125 if (enclosure_dev) 1126 tgtdev->enclosure_logical_id = le64_to_cpu( 1127 enclosure_dev->pg0.enclosure_logical_id); 1128 1129 flags = tgtdev->devpg0_flag; 1130 1131 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN); 1132 1133 if (is_added == true) 1134 tgtdev->io_throttle_enabled = 1135 (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; 1136 1137 switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { 1138 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: 1139 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS; 1140 break; 1141 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB: 1142 tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS; 1143 break; 1144 case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT: 1145 default: 1146 tgtdev->wslen = 0; 1147 break; 1148 } 1149 1150 if (tgtdev->starget && tgtdev->starget->hostdata) { 1151 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 1152 tgtdev->starget->hostdata; 1153 scsi_tgt_priv_data->perst_id = tgtdev->perst_id; 1154 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle; 1155 scsi_tgt_priv_data->dev_type = tgtdev->dev_type; 1156 scsi_tgt_priv_data->io_throttle_enabled = 1157 tgtdev->io_throttle_enabled; 1158 if (is_added == true) 1159 atomic_set(&scsi_tgt_priv_data->block_io, 0); 1160 scsi_tgt_priv_data->wslen = tgtdev->wslen; 1161 } 1162 1163 switch (dev_pg0->access_status) { 1164 case MPI3_DEVICE0_ASTATUS_NO_ERRORS: 1165 case MPI3_DEVICE0_ASTATUS_PREPARE: 1166 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION: 1167 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY: 1168 break; 1169 default: 1170 tgtdev->is_hidden = 1; 1171 break; 1172 } 1173 1174 switch (tgtdev->dev_type) { 1175 case MPI3_DEVICE_DEVFORM_SAS_SATA: 1176 { 1177 struct mpi3_device0_sas_sata_format *sasinf = 1178 &dev_pg0->device_specific.sas_sata_format; 1179 u16 dev_info = le16_to_cpu(sasinf->device_info); 1180 1181 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info; 1182 tgtdev->dev_spec.sas_sata_inf.sas_address = 1183 le64_to_cpu(sasinf->sas_address); 1184 tgtdev->dev_spec.sas_sata_inf.phy_id = sasinf->phy_num; 1185 tgtdev->dev_spec.sas_sata_inf.attached_phy_id = 1186 sasinf->attached_phy_identifier; 1187 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) != 1188 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE) 1189 tgtdev->is_hidden = 1; 1190 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET | 1191 MPI3_SAS_DEVICE_INFO_SSP_TARGET))) 1192 tgtdev->is_hidden = 1; 1193 1194 if (((tgtdev->devpg0_flag & 1195 MPI3_DEVICE0_FLAGS_ATT_METHOD_DIR_ATTACHED) 1196 && (tgtdev->devpg0_flag & 1197 MPI3_DEVICE0_FLAGS_ATT_METHOD_VIRTUAL)) || 1198 (tgtdev->parent_handle == 0xFFFF)) 1199 tgtdev->non_stl = 1; 1200 if (tgtdev->dev_spec.sas_sata_inf.hba_port) 1201 tgtdev->dev_spec.sas_sata_inf.hba_port->port_id = 1202 dev_pg0->io_unit_port; 1203 break; 1204 } 1205 case MPI3_DEVICE_DEVFORM_PCIE: 1206 { 1207 struct mpi3_device0_pcie_format *pcieinf = 1208 &dev_pg0->device_specific.pcie_format; 1209 u16 dev_info = le16_to_cpu(pcieinf->device_info); 1210 1211 tgtdev->dev_spec.pcie_inf.dev_info = dev_info; 1212 tgtdev->dev_spec.pcie_inf.capb = 1213 le32_to_cpu(pcieinf->capabilities); 1214 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS; 1215 /* 2^12 = 4096 */ 1216 tgtdev->dev_spec.pcie_inf.pgsz = 12; 1217 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) { 1218 tgtdev->dev_spec.pcie_inf.mdts = 1219 le32_to_cpu(pcieinf->maximum_data_transfer_size); 1220 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size; 1221 tgtdev->dev_spec.pcie_inf.reset_to = 1222 max_t(u8, pcieinf->controller_reset_to, 1223 MPI3MR_INTADMCMD_TIMEOUT); 1224 tgtdev->dev_spec.pcie_inf.abort_to = 1225 max_t(u8, pcieinf->nvme_abort_to, 1226 MPI3MR_INTADMCMD_TIMEOUT); 1227 } 1228 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024)) 1229 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024); 1230 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1231 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 1232 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) != 1233 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE)) 1234 tgtdev->is_hidden = 1; 1235 tgtdev->non_stl = 1; 1236 if (!mrioc->shost) 1237 break; 1238 prot_mask = scsi_host_get_prot(mrioc->shost); 1239 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) { 1240 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77); 1241 ioc_info(mrioc, 1242 "%s : Disabling DIX0 prot capability\n", __func__); 1243 ioc_info(mrioc, 1244 "because HBA does not support DIX0 operation on NVME drives\n"); 1245 } 1246 break; 1247 } 1248 case MPI3_DEVICE_DEVFORM_VD: 1249 { 1250 struct mpi3_device0_vd_format *vdinf = 1251 &dev_pg0->device_specific.vd_format; 1252 struct mpi3mr_throttle_group_info *tg = NULL; 1253 u16 vdinf_io_throttle_group = 1254 le16_to_cpu(vdinf->io_throttle_group); 1255 1256 tgtdev->dev_spec.vd_inf.state = vdinf->vd_state; 1257 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE) 1258 tgtdev->is_hidden = 1; 1259 tgtdev->non_stl = 1; 1260 tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group; 1261 tgtdev->dev_spec.vd_inf.tg_high = 1262 le16_to_cpu(vdinf->io_throttle_group_high) * 2048; 1263 tgtdev->dev_spec.vd_inf.tg_low = 1264 le16_to_cpu(vdinf->io_throttle_group_low) * 2048; 1265 if (vdinf_io_throttle_group < mrioc->num_io_throttle_group) { 1266 tg = mrioc->throttle_groups + vdinf_io_throttle_group; 1267 tg->id = vdinf_io_throttle_group; 1268 tg->high = tgtdev->dev_spec.vd_inf.tg_high; 1269 tg->low = tgtdev->dev_spec.vd_inf.tg_low; 1270 tg->qd_reduction = 1271 tgtdev->dev_spec.vd_inf.tg_qd_reduction; 1272 if (is_added == true) 1273 tg->fw_qd = tgtdev->q_depth; 1274 tg->modified_qd = tgtdev->q_depth; 1275 } 1276 tgtdev->dev_spec.vd_inf.tg = tg; 1277 if (scsi_tgt_priv_data) 1278 scsi_tgt_priv_data->throttle_group = tg; 1279 break; 1280 } 1281 default: 1282 break; 1283 } 1284 } 1285 1286 /** 1287 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf 1288 * @mrioc: Adapter instance reference 1289 * @fwevt: Firmware event information. 1290 * 1291 * Process Device status Change event and based on device's new 1292 * information, either expose the device to the upper layers, or 1293 * remove the device from upper layers. 1294 * 1295 * Return: Nothing. 1296 */ 1297 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc, 1298 struct mpi3mr_fwevt *fwevt) 1299 { 1300 u16 dev_handle = 0; 1301 u8 uhide = 0, delete = 0, cleanup = 0; 1302 struct mpi3mr_tgt_dev *tgtdev = NULL; 1303 struct mpi3_event_data_device_status_change *evtdata = 1304 (struct mpi3_event_data_device_status_change *)fwevt->event_data; 1305 1306 dev_handle = le16_to_cpu(evtdata->dev_handle); 1307 ioc_info(mrioc, 1308 "%s :device status change: handle(0x%04x): reason code(0x%x)\n", 1309 __func__, dev_handle, evtdata->reason_code); 1310 switch (evtdata->reason_code) { 1311 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 1312 delete = 1; 1313 break; 1314 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN: 1315 uhide = 1; 1316 break; 1317 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 1318 delete = 1; 1319 cleanup = 1; 1320 break; 1321 default: 1322 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__, 1323 evtdata->reason_code); 1324 break; 1325 } 1326 1327 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1328 if (!tgtdev) 1329 goto out; 1330 if (uhide) { 1331 tgtdev->is_hidden = 0; 1332 if (!tgtdev->host_exposed) 1333 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); 1334 } 1335 1336 if (delete) 1337 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1338 1339 if (cleanup) { 1340 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1341 mpi3mr_tgtdev_put(tgtdev); 1342 } 1343 1344 out: 1345 if (tgtdev) 1346 mpi3mr_tgtdev_put(tgtdev); 1347 } 1348 1349 /** 1350 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf 1351 * @mrioc: Adapter instance reference 1352 * @dev_pg0: New device page0 1353 * 1354 * Process Device Info Change event and based on device's new 1355 * information, either expose the device to the upper layers, or 1356 * remove the device from upper layers or update the details of 1357 * the device. 1358 * 1359 * Return: Nothing. 1360 */ 1361 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc, 1362 struct mpi3_device_page0 *dev_pg0) 1363 { 1364 struct mpi3mr_tgt_dev *tgtdev = NULL; 1365 u16 dev_handle = 0, perst_id = 0; 1366 1367 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1368 dev_handle = le16_to_cpu(dev_pg0->dev_handle); 1369 ioc_info(mrioc, 1370 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n", 1371 __func__, dev_handle, perst_id); 1372 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1373 if (!tgtdev) 1374 goto out; 1375 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, false); 1376 if (!tgtdev->is_hidden && !tgtdev->host_exposed) 1377 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1378 if (tgtdev->is_hidden && tgtdev->host_exposed) 1379 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1380 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget) 1381 starget_for_each_device(tgtdev->starget, (void *)tgtdev, 1382 mpi3mr_update_sdev); 1383 out: 1384 if (tgtdev) 1385 mpi3mr_tgtdev_put(tgtdev); 1386 } 1387 1388 /** 1389 * mpi3mr_free_enclosure_list - release enclosures 1390 * @mrioc: Adapter instance reference 1391 * 1392 * Free memory allocated during encloure add. 1393 * 1394 * Return nothing. 1395 */ 1396 void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc) 1397 { 1398 struct mpi3mr_enclosure_node *enclosure_dev, *enclosure_dev_next; 1399 1400 list_for_each_entry_safe(enclosure_dev, 1401 enclosure_dev_next, &mrioc->enclosure_list, list) { 1402 list_del(&enclosure_dev->list); 1403 kfree(enclosure_dev); 1404 } 1405 } 1406 1407 /** 1408 * mpi3mr_enclosure_find_by_handle - enclosure search by handle 1409 * @mrioc: Adapter instance reference 1410 * @handle: Firmware device handle of the enclosure 1411 * 1412 * This searches for enclosure device based on handle, then returns the 1413 * enclosure object. 1414 * 1415 * Return: Enclosure object reference or NULL 1416 */ 1417 struct mpi3mr_enclosure_node *mpi3mr_enclosure_find_by_handle( 1418 struct mpi3mr_ioc *mrioc, u16 handle) 1419 { 1420 struct mpi3mr_enclosure_node *enclosure_dev, *r = NULL; 1421 1422 list_for_each_entry(enclosure_dev, &mrioc->enclosure_list, list) { 1423 if (le16_to_cpu(enclosure_dev->pg0.enclosure_handle) != handle) 1424 continue; 1425 r = enclosure_dev; 1426 goto out; 1427 } 1428 out: 1429 return r; 1430 } 1431 1432 /** 1433 * mpi3mr_encldev_add_chg_evt_debug - debug for enclosure event 1434 * @mrioc: Adapter instance reference 1435 * @encl_pg0: Enclosure page 0. 1436 * @is_added: Added event or not 1437 * 1438 * Return nothing. 1439 */ 1440 static void mpi3mr_encldev_add_chg_evt_debug(struct mpi3mr_ioc *mrioc, 1441 struct mpi3_enclosure_page0 *encl_pg0, u8 is_added) 1442 { 1443 char *reason_str = NULL; 1444 1445 if (!(mrioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK)) 1446 return; 1447 1448 if (is_added) 1449 reason_str = "enclosure added"; 1450 else 1451 reason_str = "enclosure dev status changed"; 1452 1453 ioc_info(mrioc, 1454 "%s: handle(0x%04x), enclosure logical id(0x%016llx)\n", 1455 reason_str, le16_to_cpu(encl_pg0->enclosure_handle), 1456 (unsigned long long)le64_to_cpu(encl_pg0->enclosure_logical_id)); 1457 ioc_info(mrioc, 1458 "number of slots(%d), port(%d), flags(0x%04x), present(%d)\n", 1459 le16_to_cpu(encl_pg0->num_slots), encl_pg0->io_unit_port, 1460 le16_to_cpu(encl_pg0->flags), 1461 ((le16_to_cpu(encl_pg0->flags) & 1462 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4)); 1463 } 1464 1465 /** 1466 * mpi3mr_encldev_add_chg_evt_bh - Enclosure evt bottomhalf 1467 * @mrioc: Adapter instance reference 1468 * @fwevt: Firmware event reference 1469 * 1470 * Prints information about the Enclosure device status or 1471 * Enclosure add events if logging is enabled and add or remove 1472 * the enclosure from the controller's internal list of 1473 * enclosures. 1474 * 1475 * Return: Nothing. 1476 */ 1477 static void mpi3mr_encldev_add_chg_evt_bh(struct mpi3mr_ioc *mrioc, 1478 struct mpi3mr_fwevt *fwevt) 1479 { 1480 struct mpi3mr_enclosure_node *enclosure_dev = NULL; 1481 struct mpi3_enclosure_page0 *encl_pg0; 1482 u16 encl_handle; 1483 u8 added, present; 1484 1485 encl_pg0 = (struct mpi3_enclosure_page0 *) fwevt->event_data; 1486 added = (fwevt->event_id == MPI3_EVENT_ENCL_DEVICE_ADDED) ? 1 : 0; 1487 mpi3mr_encldev_add_chg_evt_debug(mrioc, encl_pg0, added); 1488 1489 1490 encl_handle = le16_to_cpu(encl_pg0->enclosure_handle); 1491 present = ((le16_to_cpu(encl_pg0->flags) & 1492 MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK) >> 4); 1493 1494 if (encl_handle) 1495 enclosure_dev = mpi3mr_enclosure_find_by_handle(mrioc, 1496 encl_handle); 1497 if (!enclosure_dev && present) { 1498 enclosure_dev = 1499 kzalloc(sizeof(struct mpi3mr_enclosure_node), 1500 GFP_KERNEL); 1501 if (!enclosure_dev) 1502 return; 1503 list_add_tail(&enclosure_dev->list, 1504 &mrioc->enclosure_list); 1505 } 1506 if (enclosure_dev) { 1507 if (!present) { 1508 list_del(&enclosure_dev->list); 1509 kfree(enclosure_dev); 1510 } else 1511 memcpy(&enclosure_dev->pg0, encl_pg0, 1512 sizeof(enclosure_dev->pg0)); 1513 1514 } 1515 } 1516 1517 /** 1518 * mpi3mr_sastopochg_evt_debug - SASTopoChange details 1519 * @mrioc: Adapter instance reference 1520 * @event_data: SAS topology change list event data 1521 * 1522 * Prints information about the SAS topology change event. 1523 * 1524 * Return: Nothing. 1525 */ 1526 static void 1527 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1528 struct mpi3_event_data_sas_topology_change_list *event_data) 1529 { 1530 int i; 1531 u16 handle; 1532 u8 reason_code, phy_number; 1533 char *status_str = NULL; 1534 u8 link_rate, prev_link_rate; 1535 1536 switch (event_data->exp_status) { 1537 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING: 1538 status_str = "remove"; 1539 break; 1540 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING: 1541 status_str = "responding"; 1542 break; 1543 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: 1544 status_str = "remove delay"; 1545 break; 1546 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER: 1547 status_str = "direct attached"; 1548 break; 1549 default: 1550 status_str = "unknown status"; 1551 break; 1552 } 1553 ioc_info(mrioc, "%s :sas topology change: (%s)\n", 1554 __func__, status_str); 1555 ioc_info(mrioc, 1556 "%s :\texpander_handle(0x%04x), port(%d), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n", 1557 __func__, le16_to_cpu(event_data->expander_dev_handle), 1558 event_data->io_unit_port, 1559 le16_to_cpu(event_data->enclosure_handle), 1560 event_data->start_phy_num, event_data->num_entries); 1561 for (i = 0; i < event_data->num_entries; i++) { 1562 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1563 if (!handle) 1564 continue; 1565 phy_number = event_data->start_phy_num + i; 1566 reason_code = event_data->phy_entry[i].status & 1567 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1568 switch (reason_code) { 1569 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1570 status_str = "target remove"; 1571 break; 1572 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 1573 status_str = "delay target remove"; 1574 break; 1575 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1576 status_str = "link status change"; 1577 break; 1578 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1579 status_str = "link status no change"; 1580 break; 1581 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1582 status_str = "target responding"; 1583 break; 1584 default: 1585 status_str = "unknown"; 1586 break; 1587 } 1588 link_rate = event_data->phy_entry[i].link_rate >> 4; 1589 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1590 ioc_info(mrioc, 1591 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1592 __func__, phy_number, handle, status_str, link_rate, 1593 prev_link_rate); 1594 } 1595 } 1596 1597 /** 1598 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf 1599 * @mrioc: Adapter instance reference 1600 * @fwevt: Firmware event reference 1601 * 1602 * Prints information about the SAS topology change event and 1603 * for "not responding" event code, removes the device from the 1604 * upper layers. 1605 * 1606 * Return: Nothing. 1607 */ 1608 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1609 struct mpi3mr_fwevt *fwevt) 1610 { 1611 struct mpi3_event_data_sas_topology_change_list *event_data = 1612 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data; 1613 int i; 1614 u16 handle; 1615 u8 reason_code; 1616 u64 exp_sas_address = 0, parent_sas_address = 0; 1617 struct mpi3mr_hba_port *hba_port = NULL; 1618 struct mpi3mr_tgt_dev *tgtdev = NULL; 1619 struct mpi3mr_sas_node *sas_expander = NULL; 1620 unsigned long flags; 1621 u8 link_rate, prev_link_rate, parent_phy_number; 1622 1623 mpi3mr_sastopochg_evt_debug(mrioc, event_data); 1624 if (mrioc->sas_transport_enabled) { 1625 hba_port = mpi3mr_get_hba_port_by_id(mrioc, 1626 event_data->io_unit_port); 1627 if (le16_to_cpu(event_data->expander_dev_handle)) { 1628 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 1629 sas_expander = __mpi3mr_expander_find_by_handle(mrioc, 1630 le16_to_cpu(event_data->expander_dev_handle)); 1631 if (sas_expander) { 1632 exp_sas_address = sas_expander->sas_address; 1633 hba_port = sas_expander->hba_port; 1634 } 1635 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 1636 parent_sas_address = exp_sas_address; 1637 } else 1638 parent_sas_address = mrioc->sas_hba.sas_address; 1639 } 1640 1641 for (i = 0; i < event_data->num_entries; i++) { 1642 if (fwevt->discard) 1643 return; 1644 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle); 1645 if (!handle) 1646 continue; 1647 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1648 if (!tgtdev) 1649 continue; 1650 1651 reason_code = event_data->phy_entry[i].status & 1652 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 1653 1654 switch (reason_code) { 1655 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 1656 if (tgtdev->host_exposed) 1657 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1658 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1659 mpi3mr_tgtdev_put(tgtdev); 1660 break; 1661 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 1662 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 1663 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE: 1664 { 1665 if (!mrioc->sas_transport_enabled || tgtdev->non_stl 1666 || tgtdev->is_hidden) 1667 break; 1668 link_rate = event_data->phy_entry[i].link_rate >> 4; 1669 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF; 1670 if (link_rate == prev_link_rate) 1671 break; 1672 if (!parent_sas_address) 1673 break; 1674 parent_phy_number = event_data->start_phy_num + i; 1675 mpi3mr_update_links(mrioc, parent_sas_address, handle, 1676 parent_phy_number, link_rate, hba_port); 1677 break; 1678 } 1679 default: 1680 break; 1681 } 1682 if (tgtdev) 1683 mpi3mr_tgtdev_put(tgtdev); 1684 } 1685 1686 if (mrioc->sas_transport_enabled && (event_data->exp_status == 1687 MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING)) { 1688 if (sas_expander) 1689 mpi3mr_expander_remove(mrioc, exp_sas_address, 1690 hba_port); 1691 } 1692 } 1693 1694 /** 1695 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details 1696 * @mrioc: Adapter instance reference 1697 * @event_data: PCIe topology change list event data 1698 * 1699 * Prints information about the PCIe topology change event. 1700 * 1701 * Return: Nothing. 1702 */ 1703 static void 1704 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc, 1705 struct mpi3_event_data_pcie_topology_change_list *event_data) 1706 { 1707 int i; 1708 u16 handle; 1709 u16 reason_code; 1710 u8 port_number; 1711 char *status_str = NULL; 1712 u8 link_rate, prev_link_rate; 1713 1714 switch (event_data->switch_status) { 1715 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING: 1716 status_str = "remove"; 1717 break; 1718 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING: 1719 status_str = "responding"; 1720 break; 1721 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING: 1722 status_str = "remove delay"; 1723 break; 1724 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH: 1725 status_str = "direct attached"; 1726 break; 1727 default: 1728 status_str = "unknown status"; 1729 break; 1730 } 1731 ioc_info(mrioc, "%s :pcie topology change: (%s)\n", 1732 __func__, status_str); 1733 ioc_info(mrioc, 1734 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n", 1735 __func__, le16_to_cpu(event_data->switch_dev_handle), 1736 le16_to_cpu(event_data->enclosure_handle), 1737 event_data->start_port_num, event_data->num_entries); 1738 for (i = 0; i < event_data->num_entries; i++) { 1739 handle = 1740 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1741 if (!handle) 1742 continue; 1743 port_number = event_data->start_port_num + i; 1744 reason_code = event_data->port_entry[i].port_status; 1745 switch (reason_code) { 1746 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1747 status_str = "target remove"; 1748 break; 1749 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 1750 status_str = "delay target remove"; 1751 break; 1752 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 1753 status_str = "link status change"; 1754 break; 1755 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE: 1756 status_str = "link status no change"; 1757 break; 1758 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 1759 status_str = "target responding"; 1760 break; 1761 default: 1762 status_str = "unknown"; 1763 break; 1764 } 1765 link_rate = event_data->port_entry[i].current_port_info & 1766 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1767 prev_link_rate = event_data->port_entry[i].previous_port_info & 1768 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK; 1769 ioc_info(mrioc, 1770 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n", 1771 __func__, port_number, handle, status_str, link_rate, 1772 prev_link_rate); 1773 } 1774 } 1775 1776 /** 1777 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf 1778 * @mrioc: Adapter instance reference 1779 * @fwevt: Firmware event reference 1780 * 1781 * Prints information about the PCIe topology change event and 1782 * for "not responding" event code, removes the device from the 1783 * upper layers. 1784 * 1785 * Return: Nothing. 1786 */ 1787 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc, 1788 struct mpi3mr_fwevt *fwevt) 1789 { 1790 struct mpi3_event_data_pcie_topology_change_list *event_data = 1791 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data; 1792 int i; 1793 u16 handle; 1794 u8 reason_code; 1795 struct mpi3mr_tgt_dev *tgtdev = NULL; 1796 1797 mpi3mr_pcietopochg_evt_debug(mrioc, event_data); 1798 1799 for (i = 0; i < event_data->num_entries; i++) { 1800 if (fwevt->discard) 1801 return; 1802 handle = 1803 le16_to_cpu(event_data->port_entry[i].attached_dev_handle); 1804 if (!handle) 1805 continue; 1806 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 1807 if (!tgtdev) 1808 continue; 1809 1810 reason_code = event_data->port_entry[i].port_status; 1811 1812 switch (reason_code) { 1813 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 1814 if (tgtdev->host_exposed) 1815 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 1816 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, false); 1817 mpi3mr_tgtdev_put(tgtdev); 1818 break; 1819 default: 1820 break; 1821 } 1822 if (tgtdev) 1823 mpi3mr_tgtdev_put(tgtdev); 1824 } 1825 } 1826 1827 /** 1828 * mpi3mr_logdata_evt_bh - Log data event bottomhalf 1829 * @mrioc: Adapter instance reference 1830 * @fwevt: Firmware event reference 1831 * 1832 * Extracts the event data and calls application interfacing 1833 * function to process the event further. 1834 * 1835 * Return: Nothing. 1836 */ 1837 static void mpi3mr_logdata_evt_bh(struct mpi3mr_ioc *mrioc, 1838 struct mpi3mr_fwevt *fwevt) 1839 { 1840 mpi3mr_app_save_logdata(mrioc, fwevt->event_data, 1841 fwevt->event_data_size); 1842 } 1843 1844 /** 1845 * mpi3mr_update_sdev_qd - Update SCSI device queue depath 1846 * @sdev: SCSI device reference 1847 * @data: Queue depth reference 1848 * 1849 * This is an iterator function called for each SCSI device in a 1850 * target to update the QD of each SCSI device. 1851 * 1852 * Return: Nothing. 1853 */ 1854 static void mpi3mr_update_sdev_qd(struct scsi_device *sdev, void *data) 1855 { 1856 u16 *q_depth = (u16 *)data; 1857 1858 scsi_change_queue_depth(sdev, (int)*q_depth); 1859 sdev->max_queue_depth = sdev->queue_depth; 1860 } 1861 1862 /** 1863 * mpi3mr_set_qd_for_all_vd_in_tg -set QD for TG VDs 1864 * @mrioc: Adapter instance reference 1865 * @tg: Throttle group information pointer 1866 * 1867 * Accessor to reduce QD for each device associated with the 1868 * given throttle group. 1869 * 1870 * Return: None. 1871 */ 1872 static void mpi3mr_set_qd_for_all_vd_in_tg(struct mpi3mr_ioc *mrioc, 1873 struct mpi3mr_throttle_group_info *tg) 1874 { 1875 unsigned long flags; 1876 struct mpi3mr_tgt_dev *tgtdev; 1877 struct mpi3mr_stgt_priv_data *tgt_priv; 1878 1879 1880 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 1881 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 1882 if (tgtdev->starget && tgtdev->starget->hostdata) { 1883 tgt_priv = tgtdev->starget->hostdata; 1884 if (tgt_priv->throttle_group == tg) { 1885 dprint_event_bh(mrioc, 1886 "updating qd due to throttling for persist_id(%d) original_qd(%d), reduced_qd (%d)\n", 1887 tgt_priv->perst_id, tgtdev->q_depth, 1888 tg->modified_qd); 1889 starget_for_each_device(tgtdev->starget, 1890 (void *)&tg->modified_qd, 1891 mpi3mr_update_sdev_qd); 1892 } 1893 } 1894 } 1895 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 1896 } 1897 1898 /** 1899 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler 1900 * @mrioc: Adapter instance reference 1901 * @fwevt: Firmware event reference 1902 * 1903 * Identifies the firmware event and calls corresponding bottomg 1904 * half handler and sends event acknowledgment if required. 1905 * 1906 * Return: Nothing. 1907 */ 1908 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc, 1909 struct mpi3mr_fwevt *fwevt) 1910 { 1911 struct mpi3_device_page0 *dev_pg0 = NULL; 1912 u16 perst_id, handle, dev_info; 1913 struct mpi3_device0_sas_sata_format *sasinf = NULL; 1914 1915 mpi3mr_fwevt_del_from_list(mrioc, fwevt); 1916 mrioc->current_event = fwevt; 1917 1918 if (mrioc->stop_drv_processing) 1919 goto out; 1920 1921 if (mrioc->unrecoverable) { 1922 dprint_event_bh(mrioc, 1923 "ignoring event(0x%02x) in bottom half handler due to unrecoverable controller\n", 1924 fwevt->event_id); 1925 goto out; 1926 } 1927 1928 if (!fwevt->process_evt) 1929 goto evt_ack; 1930 1931 switch (fwevt->event_id) { 1932 case MPI3_EVENT_DEVICE_ADDED: 1933 { 1934 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1935 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1936 handle = le16_to_cpu(dev_pg0->dev_handle); 1937 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1938 mpi3mr_report_tgtdev_to_host(mrioc, perst_id); 1939 else if (mrioc->sas_transport_enabled && 1940 (dev_pg0->device_form == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 1941 sasinf = &dev_pg0->device_specific.sas_sata_format; 1942 dev_info = le16_to_cpu(sasinf->device_info); 1943 if (!mrioc->sas_hba.num_phys) 1944 mpi3mr_sas_host_add(mrioc); 1945 else 1946 mpi3mr_sas_host_refresh(mrioc); 1947 1948 if (mpi3mr_is_expander_device(dev_info)) 1949 mpi3mr_expander_add(mrioc, handle); 1950 } 1951 break; 1952 } 1953 case MPI3_EVENT_DEVICE_INFO_CHANGED: 1954 { 1955 dev_pg0 = (struct mpi3_device_page0 *)fwevt->event_data; 1956 perst_id = le16_to_cpu(dev_pg0->persistent_id); 1957 if (perst_id != MPI3_DEVICE0_PERSISTENTID_INVALID) 1958 mpi3mr_devinfochg_evt_bh(mrioc, dev_pg0); 1959 break; 1960 } 1961 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 1962 { 1963 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt); 1964 break; 1965 } 1966 case MPI3_EVENT_ENCL_DEVICE_ADDED: 1967 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 1968 { 1969 mpi3mr_encldev_add_chg_evt_bh(mrioc, fwevt); 1970 break; 1971 } 1972 1973 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 1974 { 1975 mpi3mr_sastopochg_evt_bh(mrioc, fwevt); 1976 break; 1977 } 1978 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 1979 { 1980 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt); 1981 break; 1982 } 1983 case MPI3_EVENT_LOG_DATA: 1984 { 1985 mpi3mr_logdata_evt_bh(mrioc, fwevt); 1986 break; 1987 } 1988 case MPI3MR_DRIVER_EVENT_TG_QD_REDUCTION: 1989 { 1990 struct mpi3mr_throttle_group_info *tg; 1991 1992 tg = *(struct mpi3mr_throttle_group_info **)fwevt->event_data; 1993 dprint_event_bh(mrioc, 1994 "qd reduction event processed for tg_id(%d) reduction_needed(%d)\n", 1995 tg->id, tg->need_qd_reduction); 1996 if (tg->need_qd_reduction) { 1997 mpi3mr_set_qd_for_all_vd_in_tg(mrioc, tg); 1998 tg->need_qd_reduction = 0; 1999 } 2000 break; 2001 } 2002 case MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH: 2003 { 2004 while (mrioc->device_refresh_on) 2005 msleep(500); 2006 2007 dprint_event_bh(mrioc, 2008 "scan for non responding and newly added devices after soft reset started\n"); 2009 if (mrioc->sas_transport_enabled) { 2010 mpi3mr_refresh_sas_ports(mrioc); 2011 mpi3mr_refresh_expanders(mrioc); 2012 } 2013 mpi3mr_rfresh_tgtdevs(mrioc); 2014 ioc_info(mrioc, 2015 "scan for non responding and newly added devices after soft reset completed\n"); 2016 break; 2017 } 2018 default: 2019 break; 2020 } 2021 2022 evt_ack: 2023 if (fwevt->send_ack) 2024 mpi3mr_process_event_ack(mrioc, fwevt->event_id, 2025 fwevt->evt_ctx); 2026 out: 2027 /* Put fwevt reference count to neutralize kref_init increment */ 2028 mpi3mr_fwevt_put(fwevt); 2029 mrioc->current_event = NULL; 2030 } 2031 2032 /** 2033 * mpi3mr_fwevt_worker - Firmware event worker 2034 * @work: Work struct containing firmware event 2035 * 2036 * Extracts the firmware event and calls mpi3mr_fwevt_bh. 2037 * 2038 * Return: Nothing. 2039 */ 2040 static void mpi3mr_fwevt_worker(struct work_struct *work) 2041 { 2042 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt, 2043 work); 2044 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt); 2045 /* 2046 * Put fwevt reference count after 2047 * dequeuing it from worker queue 2048 */ 2049 mpi3mr_fwevt_put(fwevt); 2050 } 2051 2052 /** 2053 * mpi3mr_create_tgtdev - Create and add a target device 2054 * @mrioc: Adapter instance reference 2055 * @dev_pg0: Device Page 0 data 2056 * 2057 * If the device specified by the device page 0 data is not 2058 * present in the driver's internal list, allocate the memory 2059 * for the device, populate the data and add to the list, else 2060 * update the device data. The key is persistent ID. 2061 * 2062 * Return: 0 on success, -ENOMEM on memory allocation failure 2063 */ 2064 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc, 2065 struct mpi3_device_page0 *dev_pg0) 2066 { 2067 int retval = 0; 2068 struct mpi3mr_tgt_dev *tgtdev = NULL; 2069 u16 perst_id = 0; 2070 unsigned long flags; 2071 2072 perst_id = le16_to_cpu(dev_pg0->persistent_id); 2073 if (perst_id == MPI3_DEVICE0_PERSISTENTID_INVALID) 2074 return retval; 2075 2076 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2077 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id); 2078 if (tgtdev) 2079 tgtdev->state = MPI3MR_DEV_CREATED; 2080 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2081 2082 if (tgtdev) { 2083 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2084 mpi3mr_tgtdev_put(tgtdev); 2085 } else { 2086 tgtdev = mpi3mr_alloc_tgtdev(); 2087 if (!tgtdev) 2088 return -ENOMEM; 2089 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0, true); 2090 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev); 2091 } 2092 2093 return retval; 2094 } 2095 2096 /** 2097 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands 2098 * @mrioc: Adapter instance reference 2099 * 2100 * Flush pending commands in the delayed lists due to a 2101 * controller reset or driver removal as a cleanup. 2102 * 2103 * Return: Nothing 2104 */ 2105 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc) 2106 { 2107 struct delayed_dev_rmhs_node *_rmhs_node; 2108 struct delayed_evt_ack_node *_evtack_node; 2109 2110 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n"); 2111 while (!list_empty(&mrioc->delayed_rmhs_list)) { 2112 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next, 2113 struct delayed_dev_rmhs_node, list); 2114 list_del(&_rmhs_node->list); 2115 kfree(_rmhs_node); 2116 } 2117 dprint_reset(mrioc, "flushing delayed event ack commands\n"); 2118 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2119 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next, 2120 struct delayed_evt_ack_node, list); 2121 list_del(&_evtack_node->list); 2122 kfree(_evtack_node); 2123 } 2124 } 2125 2126 /** 2127 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion 2128 * @mrioc: Adapter instance reference 2129 * @drv_cmd: Internal command tracker 2130 * 2131 * Issues a target reset TM to the firmware from the device 2132 * removal TM pend list or retry the removal handshake sequence 2133 * based on the IOU control request IOC status. 2134 * 2135 * Return: Nothing 2136 */ 2137 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc, 2138 struct mpi3mr_drv_cmd *drv_cmd) 2139 { 2140 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2141 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2142 2143 if (drv_cmd->state & MPI3MR_CMD_RESET) 2144 goto clear_drv_cmd; 2145 2146 ioc_info(mrioc, 2147 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n", 2148 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status, 2149 drv_cmd->ioc_loginfo); 2150 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2151 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) { 2152 drv_cmd->retry_count++; 2153 ioc_info(mrioc, 2154 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n", 2155 __func__, drv_cmd->dev_handle, 2156 drv_cmd->retry_count); 2157 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, 2158 drv_cmd, drv_cmd->iou_rc); 2159 return; 2160 } 2161 ioc_err(mrioc, 2162 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n", 2163 __func__, drv_cmd->dev_handle); 2164 } else { 2165 ioc_info(mrioc, 2166 "%s :dev removal handshake completed successfully: handle(0x%04x)\n", 2167 __func__, drv_cmd->dev_handle); 2168 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap); 2169 } 2170 2171 if (!list_empty(&mrioc->delayed_rmhs_list)) { 2172 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next, 2173 struct delayed_dev_rmhs_node, list); 2174 drv_cmd->dev_handle = delayed_dev_rmhs->handle; 2175 drv_cmd->retry_count = 0; 2176 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc; 2177 ioc_info(mrioc, 2178 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n", 2179 __func__, drv_cmd->dev_handle); 2180 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd, 2181 drv_cmd->iou_rc); 2182 list_del(&delayed_dev_rmhs->list); 2183 kfree(delayed_dev_rmhs); 2184 return; 2185 } 2186 2187 clear_drv_cmd: 2188 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2189 drv_cmd->callback = NULL; 2190 drv_cmd->retry_count = 0; 2191 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2192 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2193 } 2194 2195 /** 2196 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion 2197 * @mrioc: Adapter instance reference 2198 * @drv_cmd: Internal command tracker 2199 * 2200 * Issues a target reset TM to the firmware from the device 2201 * removal TM pend list or issue IO unit control request as 2202 * part of device removal or hidden acknowledgment handshake. 2203 * 2204 * Return: Nothing 2205 */ 2206 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc, 2207 struct mpi3mr_drv_cmd *drv_cmd) 2208 { 2209 struct mpi3_iounit_control_request iou_ctrl; 2210 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2211 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 2212 int retval; 2213 2214 if (drv_cmd->state & MPI3MR_CMD_RESET) 2215 goto clear_drv_cmd; 2216 2217 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID) 2218 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 2219 2220 if (tm_reply) 2221 pr_info(IOCNAME 2222 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n", 2223 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status, 2224 drv_cmd->ioc_loginfo, 2225 le32_to_cpu(tm_reply->termination_count)); 2226 2227 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n", 2228 mrioc->name, drv_cmd->dev_handle, cmd_idx); 2229 2230 memset(&iou_ctrl, 0, sizeof(iou_ctrl)); 2231 2232 drv_cmd->state = MPI3MR_CMD_PENDING; 2233 drv_cmd->is_waiting = 0; 2234 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou; 2235 iou_ctrl.operation = drv_cmd->iou_rc; 2236 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle); 2237 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag); 2238 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL; 2239 2240 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl), 2241 1); 2242 if (retval) { 2243 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n", 2244 mrioc->name); 2245 goto clear_drv_cmd; 2246 } 2247 2248 return; 2249 clear_drv_cmd: 2250 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2251 drv_cmd->callback = NULL; 2252 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2253 drv_cmd->retry_count = 0; 2254 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2255 } 2256 2257 /** 2258 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal 2259 * @mrioc: Adapter instance reference 2260 * @handle: Device handle 2261 * @cmdparam: Internal command tracker 2262 * @iou_rc: IO unit reason code 2263 * 2264 * Issues a target reset TM to the firmware or add it to a pend 2265 * list as part of device removal or hidden acknowledgment 2266 * handshake. 2267 * 2268 * Return: Nothing 2269 */ 2270 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle, 2271 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc) 2272 { 2273 struct mpi3_scsi_task_mgmt_request tm_req; 2274 int retval = 0; 2275 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2276 u8 retrycount = 5; 2277 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2278 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL; 2279 struct mpi3mr_tgt_dev *tgtdev = NULL; 2280 unsigned long flags; 2281 2282 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 2283 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2284 if (tgtdev && (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) 2285 tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED; 2286 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 2287 2288 if (drv_cmd) 2289 goto issue_cmd; 2290 do { 2291 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap, 2292 MPI3MR_NUM_DEVRMCMD); 2293 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) { 2294 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap)) 2295 break; 2296 cmd_idx = MPI3MR_NUM_DEVRMCMD; 2297 } 2298 } while (retrycount--); 2299 2300 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) { 2301 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs), 2302 GFP_ATOMIC); 2303 if (!delayed_dev_rmhs) 2304 return; 2305 INIT_LIST_HEAD(&delayed_dev_rmhs->list); 2306 delayed_dev_rmhs->handle = handle; 2307 delayed_dev_rmhs->iou_rc = iou_rc; 2308 list_add_tail(&delayed_dev_rmhs->list, 2309 &mrioc->delayed_rmhs_list); 2310 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n", 2311 __func__, handle); 2312 return; 2313 } 2314 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx]; 2315 2316 issue_cmd: 2317 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN; 2318 ioc_info(mrioc, 2319 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n", 2320 __func__, handle, cmd_idx); 2321 2322 memset(&tm_req, 0, sizeof(tm_req)); 2323 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2324 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 2325 goto out; 2326 } 2327 drv_cmd->state = MPI3MR_CMD_PENDING; 2328 drv_cmd->is_waiting = 0; 2329 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm; 2330 drv_cmd->dev_handle = handle; 2331 drv_cmd->iou_rc = iou_rc; 2332 tm_req.dev_handle = cpu_to_le16(handle); 2333 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2334 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2335 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID); 2336 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 2337 2338 set_bit(handle, mrioc->removepend_bitmap); 2339 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 2340 if (retval) { 2341 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n", 2342 __func__); 2343 goto out_failed; 2344 } 2345 out: 2346 return; 2347 out_failed: 2348 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2349 drv_cmd->callback = NULL; 2350 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 2351 drv_cmd->retry_count = 0; 2352 clear_bit(cmd_idx, mrioc->devrem_bitmap); 2353 } 2354 2355 /** 2356 * mpi3mr_complete_evt_ack - event ack request completion 2357 * @mrioc: Adapter instance reference 2358 * @drv_cmd: Internal command tracker 2359 * 2360 * This is the completion handler for non blocking event 2361 * acknowledgment sent to the firmware and this will issue any 2362 * pending event acknowledgment request. 2363 * 2364 * Return: Nothing 2365 */ 2366 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc, 2367 struct mpi3mr_drv_cmd *drv_cmd) 2368 { 2369 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2370 struct delayed_evt_ack_node *delayed_evtack = NULL; 2371 2372 if (drv_cmd->state & MPI3MR_CMD_RESET) 2373 goto clear_drv_cmd; 2374 2375 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) { 2376 dprint_event_th(mrioc, 2377 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n", 2378 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 2379 drv_cmd->ioc_loginfo); 2380 } 2381 2382 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) { 2383 delayed_evtack = 2384 list_entry(mrioc->delayed_evtack_cmds_list.next, 2385 struct delayed_evt_ack_node, list); 2386 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd, 2387 delayed_evtack->event_ctx); 2388 list_del(&delayed_evtack->list); 2389 kfree(delayed_evtack); 2390 return; 2391 } 2392 clear_drv_cmd: 2393 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2394 drv_cmd->callback = NULL; 2395 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2396 } 2397 2398 /** 2399 * mpi3mr_send_event_ack - Issue event acknwoledgment request 2400 * @mrioc: Adapter instance reference 2401 * @event: MPI3 event id 2402 * @cmdparam: Internal command tracker 2403 * @event_ctx: event context 2404 * 2405 * Issues event acknowledgment request to the firmware if there 2406 * is a free command to send the event ack else it to a pend 2407 * list so that it will be processed on a completion of a prior 2408 * event acknowledgment . 2409 * 2410 * Return: Nothing 2411 */ 2412 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event, 2413 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx) 2414 { 2415 struct mpi3_event_ack_request evtack_req; 2416 int retval = 0; 2417 u8 retrycount = 5; 2418 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2419 struct mpi3mr_drv_cmd *drv_cmd = cmdparam; 2420 struct delayed_evt_ack_node *delayed_evtack = NULL; 2421 2422 if (drv_cmd) { 2423 dprint_event_th(mrioc, 2424 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2425 event, event_ctx); 2426 goto issue_cmd; 2427 } 2428 dprint_event_th(mrioc, 2429 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n", 2430 event, event_ctx); 2431 do { 2432 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap, 2433 MPI3MR_NUM_EVTACKCMD); 2434 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) { 2435 if (!test_and_set_bit(cmd_idx, 2436 mrioc->evtack_cmds_bitmap)) 2437 break; 2438 cmd_idx = MPI3MR_NUM_EVTACKCMD; 2439 } 2440 } while (retrycount--); 2441 2442 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) { 2443 delayed_evtack = kzalloc(sizeof(*delayed_evtack), 2444 GFP_ATOMIC); 2445 if (!delayed_evtack) 2446 return; 2447 INIT_LIST_HEAD(&delayed_evtack->list); 2448 delayed_evtack->event = event; 2449 delayed_evtack->event_ctx = event_ctx; 2450 list_add_tail(&delayed_evtack->list, 2451 &mrioc->delayed_evtack_cmds_list); 2452 dprint_event_th(mrioc, 2453 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n", 2454 event, event_ctx); 2455 return; 2456 } 2457 drv_cmd = &mrioc->evtack_cmds[cmd_idx]; 2458 2459 issue_cmd: 2460 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN; 2461 2462 memset(&evtack_req, 0, sizeof(evtack_req)); 2463 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 2464 dprint_event_th(mrioc, 2465 "sending event ack failed due to command in use\n"); 2466 goto out; 2467 } 2468 drv_cmd->state = MPI3MR_CMD_PENDING; 2469 drv_cmd->is_waiting = 0; 2470 drv_cmd->callback = mpi3mr_complete_evt_ack; 2471 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag); 2472 evtack_req.function = MPI3_FUNCTION_EVENT_ACK; 2473 evtack_req.event = event; 2474 evtack_req.event_context = cpu_to_le32(event_ctx); 2475 retval = mpi3mr_admin_request_post(mrioc, &evtack_req, 2476 sizeof(evtack_req), 1); 2477 if (retval) { 2478 dprint_event_th(mrioc, 2479 "posting event ack request is failed\n"); 2480 goto out_failed; 2481 } 2482 2483 dprint_event_th(mrioc, 2484 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n", 2485 event, event_ctx); 2486 out: 2487 return; 2488 out_failed: 2489 drv_cmd->state = MPI3MR_CMD_NOTUSED; 2490 drv_cmd->callback = NULL; 2491 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap); 2492 } 2493 2494 /** 2495 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf 2496 * @mrioc: Adapter instance reference 2497 * @event_reply: event data 2498 * 2499 * Checks for the reason code and based on that either block I/O 2500 * to device, or unblock I/O to the device, or start the device 2501 * removal handshake with reason as remove with the firmware for 2502 * PCIe devices. 2503 * 2504 * Return: Nothing 2505 */ 2506 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc, 2507 struct mpi3_event_notification_reply *event_reply) 2508 { 2509 struct mpi3_event_data_pcie_topology_change_list *topo_evt = 2510 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data; 2511 int i; 2512 u16 handle; 2513 u8 reason_code; 2514 struct mpi3mr_tgt_dev *tgtdev = NULL; 2515 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2516 2517 for (i = 0; i < topo_evt->num_entries; i++) { 2518 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle); 2519 if (!handle) 2520 continue; 2521 reason_code = topo_evt->port_entry[i].port_status; 2522 scsi_tgt_priv_data = NULL; 2523 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2524 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2525 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2526 tgtdev->starget->hostdata; 2527 switch (reason_code) { 2528 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING: 2529 if (scsi_tgt_priv_data) { 2530 scsi_tgt_priv_data->dev_removed = 1; 2531 scsi_tgt_priv_data->dev_removedelay = 0; 2532 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2533 } 2534 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2535 MPI3_CTRL_OP_REMOVE_DEVICE); 2536 break; 2537 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING: 2538 if (scsi_tgt_priv_data) { 2539 scsi_tgt_priv_data->dev_removedelay = 1; 2540 atomic_inc(&scsi_tgt_priv_data->block_io); 2541 } 2542 break; 2543 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING: 2544 if (scsi_tgt_priv_data && 2545 scsi_tgt_priv_data->dev_removedelay) { 2546 scsi_tgt_priv_data->dev_removedelay = 0; 2547 atomic_dec_if_positive 2548 (&scsi_tgt_priv_data->block_io); 2549 } 2550 break; 2551 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED: 2552 default: 2553 break; 2554 } 2555 if (tgtdev) 2556 mpi3mr_tgtdev_put(tgtdev); 2557 } 2558 } 2559 2560 /** 2561 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf 2562 * @mrioc: Adapter instance reference 2563 * @event_reply: event data 2564 * 2565 * Checks for the reason code and based on that either block I/O 2566 * to device, or unblock I/O to the device, or start the device 2567 * removal handshake with reason as remove with the firmware for 2568 * SAS/SATA devices. 2569 * 2570 * Return: Nothing 2571 */ 2572 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc, 2573 struct mpi3_event_notification_reply *event_reply) 2574 { 2575 struct mpi3_event_data_sas_topology_change_list *topo_evt = 2576 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data; 2577 int i; 2578 u16 handle; 2579 u8 reason_code; 2580 struct mpi3mr_tgt_dev *tgtdev = NULL; 2581 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2582 2583 for (i = 0; i < topo_evt->num_entries; i++) { 2584 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle); 2585 if (!handle) 2586 continue; 2587 reason_code = topo_evt->phy_entry[i].status & 2588 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK; 2589 scsi_tgt_priv_data = NULL; 2590 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 2591 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 2592 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2593 tgtdev->starget->hostdata; 2594 switch (reason_code) { 2595 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING: 2596 if (scsi_tgt_priv_data) { 2597 scsi_tgt_priv_data->dev_removed = 1; 2598 scsi_tgt_priv_data->dev_removedelay = 0; 2599 atomic_set(&scsi_tgt_priv_data->block_io, 0); 2600 } 2601 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL, 2602 MPI3_CTRL_OP_REMOVE_DEVICE); 2603 break; 2604 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING: 2605 if (scsi_tgt_priv_data) { 2606 scsi_tgt_priv_data->dev_removedelay = 1; 2607 atomic_inc(&scsi_tgt_priv_data->block_io); 2608 } 2609 break; 2610 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING: 2611 if (scsi_tgt_priv_data && 2612 scsi_tgt_priv_data->dev_removedelay) { 2613 scsi_tgt_priv_data->dev_removedelay = 0; 2614 atomic_dec_if_positive 2615 (&scsi_tgt_priv_data->block_io); 2616 } 2617 break; 2618 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED: 2619 default: 2620 break; 2621 } 2622 if (tgtdev) 2623 mpi3mr_tgtdev_put(tgtdev); 2624 } 2625 } 2626 2627 /** 2628 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf 2629 * @mrioc: Adapter instance reference 2630 * @event_reply: event data 2631 * 2632 * Checks for the reason code and based on that either block I/O 2633 * to device, or unblock I/O to the device, or start the device 2634 * removal handshake with reason as remove/hide acknowledgment 2635 * with the firmware. 2636 * 2637 * Return: Nothing 2638 */ 2639 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc, 2640 struct mpi3_event_notification_reply *event_reply) 2641 { 2642 u16 dev_handle = 0; 2643 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0; 2644 struct mpi3mr_tgt_dev *tgtdev = NULL; 2645 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 2646 struct mpi3_event_data_device_status_change *evtdata = 2647 (struct mpi3_event_data_device_status_change *)event_reply->event_data; 2648 2649 if (mrioc->stop_drv_processing) 2650 goto out; 2651 2652 dev_handle = le16_to_cpu(evtdata->dev_handle); 2653 2654 switch (evtdata->reason_code) { 2655 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT: 2656 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT: 2657 block = 1; 2658 break; 2659 case MPI3_EVENT_DEV_STAT_RC_HIDDEN: 2660 delete = 1; 2661 hide = 1; 2662 break; 2663 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING: 2664 delete = 1; 2665 remove = 1; 2666 break; 2667 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP: 2668 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP: 2669 ublock = 1; 2670 break; 2671 default: 2672 break; 2673 } 2674 2675 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 2676 if (!tgtdev) 2677 goto out; 2678 if (hide) 2679 tgtdev->is_hidden = hide; 2680 if (tgtdev->starget && tgtdev->starget->hostdata) { 2681 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 2682 tgtdev->starget->hostdata; 2683 if (block) 2684 atomic_inc(&scsi_tgt_priv_data->block_io); 2685 if (delete) 2686 scsi_tgt_priv_data->dev_removed = 1; 2687 if (ublock) 2688 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 2689 } 2690 if (remove) 2691 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2692 MPI3_CTRL_OP_REMOVE_DEVICE); 2693 if (hide) 2694 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL, 2695 MPI3_CTRL_OP_HIDDEN_ACK); 2696 2697 out: 2698 if (tgtdev) 2699 mpi3mr_tgtdev_put(tgtdev); 2700 } 2701 2702 /** 2703 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf 2704 * @mrioc: Adapter instance reference 2705 * @event_reply: event data 2706 * 2707 * Blocks and unblocks host level I/O based on the reason code 2708 * 2709 * Return: Nothing 2710 */ 2711 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc, 2712 struct mpi3_event_notification_reply *event_reply) 2713 { 2714 struct mpi3_event_data_prepare_for_reset *evtdata = 2715 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data; 2716 2717 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) { 2718 dprint_event_th(mrioc, 2719 "prepare for reset event top half with rc=start\n"); 2720 if (mrioc->prepare_for_reset) 2721 return; 2722 mrioc->prepare_for_reset = 1; 2723 mrioc->prepare_for_reset_timeout_counter = 0; 2724 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) { 2725 dprint_event_th(mrioc, 2726 "prepare for reset top half with rc=abort\n"); 2727 mrioc->prepare_for_reset = 0; 2728 mrioc->prepare_for_reset_timeout_counter = 0; 2729 } 2730 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2731 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2732 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL, 2733 le32_to_cpu(event_reply->event_context)); 2734 } 2735 2736 /** 2737 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf 2738 * @mrioc: Adapter instance reference 2739 * @event_reply: event data 2740 * 2741 * Identifies the new shutdown timeout value and update. 2742 * 2743 * Return: Nothing 2744 */ 2745 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc, 2746 struct mpi3_event_notification_reply *event_reply) 2747 { 2748 struct mpi3_event_data_energy_pack_change *evtdata = 2749 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data; 2750 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout); 2751 2752 if (shutdown_timeout <= 0) { 2753 ioc_warn(mrioc, 2754 "%s :Invalid Shutdown Timeout received = %d\n", 2755 __func__, shutdown_timeout); 2756 return; 2757 } 2758 2759 ioc_info(mrioc, 2760 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n", 2761 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout); 2762 mrioc->facts.shutdown_timeout = shutdown_timeout; 2763 } 2764 2765 /** 2766 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf 2767 * @mrioc: Adapter instance reference 2768 * @event_reply: event data 2769 * 2770 * Displays Cable manegemt event details. 2771 * 2772 * Return: Nothing 2773 */ 2774 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc, 2775 struct mpi3_event_notification_reply *event_reply) 2776 { 2777 struct mpi3_event_data_cable_management *evtdata = 2778 (struct mpi3_event_data_cable_management *)event_reply->event_data; 2779 2780 switch (evtdata->status) { 2781 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER: 2782 { 2783 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n" 2784 "Devices connected to this cable are not detected.\n" 2785 "This cable requires %d mW of power.\n", 2786 evtdata->receptacle_id, 2787 le32_to_cpu(evtdata->active_cable_power_requirement)); 2788 break; 2789 } 2790 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED: 2791 { 2792 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n", 2793 evtdata->receptacle_id); 2794 break; 2795 } 2796 default: 2797 break; 2798 } 2799 } 2800 2801 /** 2802 * mpi3mr_add_event_wait_for_device_refresh - Add Wait for Device Refresh Event 2803 * @mrioc: Adapter instance reference 2804 * 2805 * Add driver specific event to make sure that the driver won't process the 2806 * events until all the devices are refreshed during soft reset. 2807 * 2808 * Return: Nothing 2809 */ 2810 void mpi3mr_add_event_wait_for_device_refresh(struct mpi3mr_ioc *mrioc) 2811 { 2812 struct mpi3mr_fwevt *fwevt = NULL; 2813 2814 fwevt = mpi3mr_alloc_fwevt(0); 2815 if (!fwevt) { 2816 dprint_event_th(mrioc, 2817 "failed to schedule bottom half handler for event(0x%02x)\n", 2818 MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH); 2819 return; 2820 } 2821 fwevt->mrioc = mrioc; 2822 fwevt->event_id = MPI3_EVENT_WAIT_FOR_DEVICES_TO_REFRESH; 2823 fwevt->send_ack = 0; 2824 fwevt->process_evt = 1; 2825 fwevt->evt_ctx = 0; 2826 fwevt->event_data_size = 0; 2827 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2828 } 2829 2830 /** 2831 * mpi3mr_os_handle_events - Firmware event handler 2832 * @mrioc: Adapter instance reference 2833 * @event_reply: event data 2834 * 2835 * Identify whteher the event has to handled and acknowledged 2836 * and either process the event in the tophalf and/or schedule a 2837 * bottom half through mpi3mr_fwevt_worker. 2838 * 2839 * Return: Nothing 2840 */ 2841 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc, 2842 struct mpi3_event_notification_reply *event_reply) 2843 { 2844 u16 evt_type, sz; 2845 struct mpi3mr_fwevt *fwevt = NULL; 2846 bool ack_req = 0, process_evt_bh = 0; 2847 2848 if (mrioc->stop_drv_processing) 2849 return; 2850 2851 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK) 2852 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED) 2853 ack_req = 1; 2854 2855 evt_type = event_reply->event; 2856 2857 switch (evt_type) { 2858 case MPI3_EVENT_DEVICE_ADDED: 2859 { 2860 struct mpi3_device_page0 *dev_pg0 = 2861 (struct mpi3_device_page0 *)event_reply->event_data; 2862 if (mpi3mr_create_tgtdev(mrioc, dev_pg0)) 2863 ioc_err(mrioc, 2864 "%s :Failed to add device in the device add event\n", 2865 __func__); 2866 else 2867 process_evt_bh = 1; 2868 break; 2869 } 2870 case MPI3_EVENT_DEVICE_STATUS_CHANGE: 2871 { 2872 process_evt_bh = 1; 2873 mpi3mr_devstatuschg_evt_th(mrioc, event_reply); 2874 break; 2875 } 2876 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 2877 { 2878 process_evt_bh = 1; 2879 mpi3mr_sastopochg_evt_th(mrioc, event_reply); 2880 break; 2881 } 2882 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST: 2883 { 2884 process_evt_bh = 1; 2885 mpi3mr_pcietopochg_evt_th(mrioc, event_reply); 2886 break; 2887 } 2888 case MPI3_EVENT_PREPARE_FOR_RESET: 2889 { 2890 mpi3mr_preparereset_evt_th(mrioc, event_reply); 2891 ack_req = 0; 2892 break; 2893 } 2894 case MPI3_EVENT_DEVICE_INFO_CHANGED: 2895 case MPI3_EVENT_LOG_DATA: 2896 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE: 2897 case MPI3_EVENT_ENCL_DEVICE_ADDED: 2898 { 2899 process_evt_bh = 1; 2900 break; 2901 } 2902 case MPI3_EVENT_ENERGY_PACK_CHANGE: 2903 { 2904 mpi3mr_energypackchg_evt_th(mrioc, event_reply); 2905 break; 2906 } 2907 case MPI3_EVENT_CABLE_MGMT: 2908 { 2909 mpi3mr_cablemgmt_evt_th(mrioc, event_reply); 2910 break; 2911 } 2912 case MPI3_EVENT_SAS_DISCOVERY: 2913 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR: 2914 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE: 2915 case MPI3_EVENT_PCIE_ENUMERATION: 2916 break; 2917 default: 2918 ioc_info(mrioc, "%s :event 0x%02x is not handled\n", 2919 __func__, evt_type); 2920 break; 2921 } 2922 if (process_evt_bh || ack_req) { 2923 sz = event_reply->event_data_length * 4; 2924 fwevt = mpi3mr_alloc_fwevt(sz); 2925 if (!fwevt) { 2926 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n", 2927 __func__, __FILE__, __LINE__, __func__); 2928 return; 2929 } 2930 2931 memcpy(fwevt->event_data, event_reply->event_data, sz); 2932 fwevt->mrioc = mrioc; 2933 fwevt->event_id = evt_type; 2934 fwevt->send_ack = ack_req; 2935 fwevt->process_evt = process_evt_bh; 2936 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context); 2937 mpi3mr_fwevt_add_to_list(mrioc, fwevt); 2938 } 2939 } 2940 2941 /** 2942 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO 2943 * @mrioc: Adapter instance reference 2944 * @scmd: SCSI command reference 2945 * @scsiio_req: MPI3 SCSI IO request 2946 * 2947 * Identifies the protection information flags from the SCSI 2948 * command and set appropriate flags in the MPI3 SCSI IO 2949 * request. 2950 * 2951 * Return: Nothing 2952 */ 2953 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, 2954 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 2955 { 2956 u16 eedp_flags = 0; 2957 unsigned char prot_op = scsi_get_prot_op(scmd); 2958 2959 switch (prot_op) { 2960 case SCSI_PROT_NORMAL: 2961 return; 2962 case SCSI_PROT_READ_STRIP: 2963 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2964 break; 2965 case SCSI_PROT_WRITE_INSERT: 2966 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2967 break; 2968 case SCSI_PROT_READ_INSERT: 2969 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT; 2970 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2971 break; 2972 case SCSI_PROT_WRITE_STRIP: 2973 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE; 2974 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2975 break; 2976 case SCSI_PROT_READ_PASS: 2977 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2978 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2979 break; 2980 case SCSI_PROT_WRITE_PASS: 2981 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) { 2982 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN; 2983 scsiio_req->sgl[0].eedp.application_tag_translation_mask = 2984 0xffff; 2985 } else 2986 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK; 2987 2988 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID; 2989 break; 2990 default: 2991 return; 2992 } 2993 2994 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK) 2995 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD; 2996 2997 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) 2998 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM; 2999 3000 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) { 3001 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG | 3002 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3003 scsiio_req->cdb.eedp32.primary_reference_tag = 3004 cpu_to_be32(scsi_prot_ref_tag(scmd)); 3005 } 3006 3007 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) 3008 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; 3009 3010 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE; 3011 3012 switch (scsi_prot_interval(scmd)) { 3013 case 512: 3014 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512; 3015 break; 3016 case 520: 3017 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520; 3018 break; 3019 case 4080: 3020 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080; 3021 break; 3022 case 4088: 3023 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088; 3024 break; 3025 case 4096: 3026 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096; 3027 break; 3028 case 4104: 3029 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104; 3030 break; 3031 case 4160: 3032 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160; 3033 break; 3034 default: 3035 break; 3036 } 3037 3038 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags); 3039 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED; 3040 } 3041 3042 /** 3043 * mpi3mr_build_sense_buffer - Map sense information 3044 * @desc: Sense type 3045 * @buf: Sense buffer to populate 3046 * @key: Sense key 3047 * @asc: Additional sense code 3048 * @ascq: Additional sense code qualifier 3049 * 3050 * Maps the given sense information into either descriptor or 3051 * fixed format sense data. 3052 * 3053 * Return: Nothing 3054 */ 3055 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key, 3056 u8 asc, u8 ascq) 3057 { 3058 if (desc) { 3059 buf[0] = 0x72; /* descriptor, current */ 3060 buf[1] = key; 3061 buf[2] = asc; 3062 buf[3] = ascq; 3063 buf[7] = 0; 3064 } else { 3065 buf[0] = 0x70; /* fixed, current */ 3066 buf[2] = key; 3067 buf[7] = 0xa; 3068 buf[12] = asc; 3069 buf[13] = ascq; 3070 } 3071 } 3072 3073 /** 3074 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status 3075 * @scmd: SCSI command reference 3076 * @ioc_status: status of MPI3 request 3077 * 3078 * Maps the EEDP error status of the SCSI IO request to sense 3079 * data. 3080 * 3081 * Return: Nothing 3082 */ 3083 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd, 3084 u16 ioc_status) 3085 { 3086 u8 ascq = 0; 3087 3088 switch (ioc_status) { 3089 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3090 ascq = 0x01; 3091 break; 3092 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3093 ascq = 0x02; 3094 break; 3095 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3096 ascq = 0x03; 3097 break; 3098 default: 3099 ascq = 0x00; 3100 break; 3101 } 3102 3103 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 3104 0x10, ascq); 3105 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION; 3106 } 3107 3108 /** 3109 * mpi3mr_process_op_reply_desc - reply descriptor handler 3110 * @mrioc: Adapter instance reference 3111 * @reply_desc: Operational reply descriptor 3112 * @reply_dma: place holder for reply DMA address 3113 * @qidx: Operational queue index 3114 * 3115 * Process the operational reply descriptor and identifies the 3116 * descriptor type. Based on the descriptor map the MPI3 request 3117 * status to a SCSI command status and calls scsi_done call 3118 * back. 3119 * 3120 * Return: Nothing 3121 */ 3122 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, 3123 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx) 3124 { 3125 u16 reply_desc_type, host_tag = 0; 3126 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3127 u32 ioc_loginfo = 0; 3128 struct mpi3_status_reply_descriptor *status_desc = NULL; 3129 struct mpi3_address_reply_descriptor *addr_desc = NULL; 3130 struct mpi3_success_reply_descriptor *success_desc = NULL; 3131 struct mpi3_scsi_io_reply *scsi_reply = NULL; 3132 struct scsi_cmnd *scmd = NULL; 3133 struct scmd_priv *priv = NULL; 3134 u8 *sense_buf = NULL; 3135 u8 scsi_state = 0, scsi_status = 0, sense_state = 0; 3136 u32 xfer_count = 0, sense_count = 0, resp_data = 0; 3137 u16 dev_handle = 0xFFFF; 3138 struct scsi_sense_hdr sshdr; 3139 struct mpi3mr_stgt_priv_data *stgt_priv_data = NULL; 3140 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3141 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0, data_len_blks = 0; 3142 struct mpi3mr_throttle_group_info *tg = NULL; 3143 u8 throttle_enabled_dev = 0; 3144 3145 *reply_dma = 0; 3146 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) & 3147 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK; 3148 switch (reply_desc_type) { 3149 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS: 3150 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc; 3151 host_tag = le16_to_cpu(status_desc->host_tag); 3152 ioc_status = le16_to_cpu(status_desc->ioc_status); 3153 if (ioc_status & 3154 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3155 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info); 3156 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3157 break; 3158 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY: 3159 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc; 3160 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address); 3161 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc, 3162 *reply_dma); 3163 if (!scsi_reply) { 3164 panic("%s: scsi_reply is NULL, this shouldn't happen\n", 3165 mrioc->name); 3166 goto out; 3167 } 3168 host_tag = le16_to_cpu(scsi_reply->host_tag); 3169 ioc_status = le16_to_cpu(scsi_reply->ioc_status); 3170 scsi_status = scsi_reply->scsi_status; 3171 scsi_state = scsi_reply->scsi_state; 3172 dev_handle = le16_to_cpu(scsi_reply->dev_handle); 3173 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK); 3174 xfer_count = le32_to_cpu(scsi_reply->transfer_count); 3175 sense_count = le32_to_cpu(scsi_reply->sense_count); 3176 resp_data = le32_to_cpu(scsi_reply->response_data); 3177 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc, 3178 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3179 if (ioc_status & 3180 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL) 3181 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info); 3182 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK; 3183 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY) 3184 panic("%s: Ran out of sense buffers\n", mrioc->name); 3185 break; 3186 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS: 3187 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc; 3188 host_tag = le16_to_cpu(success_desc->host_tag); 3189 break; 3190 default: 3191 break; 3192 } 3193 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx); 3194 if (!scmd) { 3195 panic("%s: Cannot Identify scmd for host_tag 0x%x\n", 3196 mrioc->name, host_tag); 3197 goto out; 3198 } 3199 priv = scsi_cmd_priv(scmd); 3200 3201 data_len_blks = scsi_bufflen(scmd) >> 9; 3202 sdev_priv_data = scmd->device->hostdata; 3203 if (sdev_priv_data) { 3204 stgt_priv_data = sdev_priv_data->tgt_priv_data; 3205 if (stgt_priv_data) { 3206 tg = stgt_priv_data->throttle_group; 3207 throttle_enabled_dev = 3208 stgt_priv_data->io_throttle_enabled; 3209 } 3210 } 3211 if (unlikely((data_len_blks >= mrioc->io_throttle_data_length) && 3212 throttle_enabled_dev)) { 3213 ioc_pend_data_len = atomic_sub_return(data_len_blks, 3214 &mrioc->pend_large_data_sz); 3215 if (tg) { 3216 tg_pend_data_len = atomic_sub_return(data_len_blks, 3217 &tg->pend_large_data_sz); 3218 if (tg->io_divert && ((ioc_pend_data_len <= 3219 mrioc->io_throttle_low) && 3220 (tg_pend_data_len <= tg->low))) { 3221 tg->io_divert = 0; 3222 mpi3mr_set_io_divert_for_all_vd_in_tg( 3223 mrioc, tg, 0); 3224 } 3225 } else { 3226 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3227 stgt_priv_data->io_divert = 0; 3228 } 3229 } else if (unlikely((stgt_priv_data && stgt_priv_data->io_divert))) { 3230 ioc_pend_data_len = atomic_read(&mrioc->pend_large_data_sz); 3231 if (!tg) { 3232 if (ioc_pend_data_len <= mrioc->io_throttle_low) 3233 stgt_priv_data->io_divert = 0; 3234 3235 } else if (ioc_pend_data_len <= mrioc->io_throttle_low) { 3236 tg_pend_data_len = atomic_read(&tg->pend_large_data_sz); 3237 if (tg->io_divert && (tg_pend_data_len <= tg->low)) { 3238 tg->io_divert = 0; 3239 mpi3mr_set_io_divert_for_all_vd_in_tg( 3240 mrioc, tg, 0); 3241 } 3242 } 3243 } 3244 3245 if (success_desc) { 3246 scmd->result = DID_OK << 16; 3247 goto out_success; 3248 } 3249 3250 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count); 3251 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN && 3252 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY || 3253 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT || 3254 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL)) 3255 ioc_status = MPI3_IOCSTATUS_SUCCESS; 3256 3257 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count && 3258 sense_buf) { 3259 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count); 3260 3261 memcpy(scmd->sense_buffer, sense_buf, sz); 3262 } 3263 3264 switch (ioc_status) { 3265 case MPI3_IOCSTATUS_BUSY: 3266 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES: 3267 scmd->result = SAM_STAT_BUSY; 3268 break; 3269 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3270 scmd->result = DID_NO_CONNECT << 16; 3271 break; 3272 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3273 scmd->result = DID_SOFT_ERROR << 16; 3274 break; 3275 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED: 3276 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED: 3277 scmd->result = DID_RESET << 16; 3278 break; 3279 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3280 if ((xfer_count == 0) || (scmd->underflow > xfer_count)) 3281 scmd->result = DID_SOFT_ERROR << 16; 3282 else 3283 scmd->result = (DID_OK << 16) | scsi_status; 3284 break; 3285 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN: 3286 scmd->result = (DID_OK << 16) | scsi_status; 3287 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID) 3288 break; 3289 if (xfer_count < scmd->underflow) { 3290 if (scsi_status == SAM_STAT_BUSY) 3291 scmd->result = SAM_STAT_BUSY; 3292 else 3293 scmd->result = DID_SOFT_ERROR << 16; 3294 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3295 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE)) 3296 scmd->result = DID_SOFT_ERROR << 16; 3297 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3298 scmd->result = DID_RESET << 16; 3299 break; 3300 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN: 3301 scsi_set_resid(scmd, 0); 3302 fallthrough; 3303 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR: 3304 case MPI3_IOCSTATUS_SUCCESS: 3305 scmd->result = (DID_OK << 16) | scsi_status; 3306 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) || 3307 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) || 3308 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)) 3309 scmd->result = DID_SOFT_ERROR << 16; 3310 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED) 3311 scmd->result = DID_RESET << 16; 3312 break; 3313 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR: 3314 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR: 3315 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR: 3316 mpi3mr_map_eedp_error(scmd, ioc_status); 3317 break; 3318 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3319 case MPI3_IOCSTATUS_INVALID_FUNCTION: 3320 case MPI3_IOCSTATUS_INVALID_SGL: 3321 case MPI3_IOCSTATUS_INTERNAL_ERROR: 3322 case MPI3_IOCSTATUS_INVALID_FIELD: 3323 case MPI3_IOCSTATUS_INVALID_STATE: 3324 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR: 3325 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3326 case MPI3_IOCSTATUS_INSUFFICIENT_POWER: 3327 default: 3328 scmd->result = DID_SOFT_ERROR << 16; 3329 break; 3330 } 3331 3332 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && 3333 (scmd->cmnd[0] != ATA_16) && 3334 mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { 3335 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, 3336 scmd->result); 3337 scsi_print_command(scmd); 3338 ioc_info(mrioc, 3339 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n", 3340 __func__, dev_handle, ioc_status, ioc_loginfo, 3341 priv->req_q_idx + 1); 3342 ioc_info(mrioc, 3343 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n", 3344 host_tag, scsi_state, scsi_status, xfer_count, resp_data); 3345 if (sense_buf) { 3346 scsi_normalize_sense(sense_buf, sense_count, &sshdr); 3347 ioc_info(mrioc, 3348 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n", 3349 __func__, sense_count, sshdr.sense_key, 3350 sshdr.asc, sshdr.ascq); 3351 } 3352 } 3353 out_success: 3354 if (priv->meta_sg_valid) { 3355 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd), 3356 scsi_prot_sg_count(scmd), scmd->sc_data_direction); 3357 } 3358 mpi3mr_clear_scmd_priv(mrioc, scmd); 3359 scsi_dma_unmap(scmd); 3360 scsi_done(scmd); 3361 out: 3362 if (sense_buf) 3363 mpi3mr_repost_sense_buf(mrioc, 3364 le64_to_cpu(scsi_reply->sense_data_buffer_address)); 3365 } 3366 3367 /** 3368 * mpi3mr_get_chain_idx - get free chain buffer index 3369 * @mrioc: Adapter instance reference 3370 * 3371 * Try to get a free chain buffer index from the free pool. 3372 * 3373 * Return: -1 on failure or the free chain buffer index 3374 */ 3375 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc) 3376 { 3377 u8 retry_count = 5; 3378 int cmd_idx = -1; 3379 unsigned long flags; 3380 3381 spin_lock_irqsave(&mrioc->chain_buf_lock, flags); 3382 do { 3383 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap, 3384 mrioc->chain_buf_count); 3385 if (cmd_idx < mrioc->chain_buf_count) { 3386 set_bit(cmd_idx, mrioc->chain_bitmap); 3387 break; 3388 } 3389 cmd_idx = -1; 3390 } while (retry_count--); 3391 spin_unlock_irqrestore(&mrioc->chain_buf_lock, flags); 3392 return cmd_idx; 3393 } 3394 3395 /** 3396 * mpi3mr_prepare_sg_scmd - build scatter gather list 3397 * @mrioc: Adapter instance reference 3398 * @scmd: SCSI command reference 3399 * @scsiio_req: MPI3 SCSI IO request 3400 * 3401 * This function maps SCSI command's data and protection SGEs to 3402 * MPI request SGEs. If required additional 4K chain buffer is 3403 * used to send the SGEs. 3404 * 3405 * Return: 0 on success, -ENOMEM on dma_map_sg failure 3406 */ 3407 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc, 3408 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3409 { 3410 dma_addr_t chain_dma; 3411 struct scatterlist *sg_scmd; 3412 void *sg_local, *chain; 3413 u32 chain_length; 3414 int sges_left, chain_idx; 3415 u32 sges_in_segment; 3416 u8 simple_sgl_flags; 3417 u8 simple_sgl_flags_last; 3418 u8 last_chain_sgl_flags; 3419 struct chain_element *chain_req; 3420 struct scmd_priv *priv = NULL; 3421 u32 meta_sg = le32_to_cpu(scsiio_req->flags) & 3422 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI; 3423 3424 priv = scsi_cmd_priv(scmd); 3425 3426 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 3427 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3428 simple_sgl_flags_last = simple_sgl_flags | 3429 MPI3_SGE_FLAGS_END_OF_LIST; 3430 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN | 3431 MPI3_SGE_FLAGS_DLAS_SYSTEM; 3432 3433 if (meta_sg) 3434 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX]; 3435 else 3436 sg_local = &scsiio_req->sgl; 3437 3438 if (!scsiio_req->data_length && !meta_sg) { 3439 mpi3mr_build_zero_len_sge(sg_local); 3440 return 0; 3441 } 3442 3443 if (meta_sg) { 3444 sg_scmd = scsi_prot_sglist(scmd); 3445 sges_left = dma_map_sg(&mrioc->pdev->dev, 3446 scsi_prot_sglist(scmd), 3447 scsi_prot_sg_count(scmd), 3448 scmd->sc_data_direction); 3449 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */ 3450 } else { 3451 /* 3452 * Some firmware versions byte-swap the REPORT ZONES command 3453 * reply from ATA-ZAC devices by directly accessing in the host 3454 * buffer. This does not respect the default command DMA 3455 * direction and causes IOMMU page faults on some architectures 3456 * with an IOMMU enforcing write mappings (e.g. AMD hosts). 3457 * Avoid such issue by making the REPORT ZONES buffer mapping 3458 * bi-directional. 3459 */ 3460 if (scmd->cmnd[0] == ZBC_IN && scmd->cmnd[1] == ZI_REPORT_ZONES) 3461 scmd->sc_data_direction = DMA_BIDIRECTIONAL; 3462 sg_scmd = scsi_sglist(scmd); 3463 sges_left = scsi_dma_map(scmd); 3464 } 3465 3466 if (sges_left < 0) { 3467 sdev_printk(KERN_ERR, scmd->device, 3468 "scsi_dma_map failed: request for %d bytes!\n", 3469 scsi_bufflen(scmd)); 3470 return -ENOMEM; 3471 } 3472 if (sges_left > mrioc->max_sgl_entries) { 3473 sdev_printk(KERN_ERR, scmd->device, 3474 "scsi_dma_map returned unsupported sge count %d!\n", 3475 sges_left); 3476 return -ENOMEM; 3477 } 3478 3479 sges_in_segment = (mrioc->facts.op_req_sz - 3480 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common); 3481 3482 if (scsiio_req->sgl[0].eedp.flags == 3483 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) { 3484 sg_local += sizeof(struct mpi3_sge_common); 3485 sges_in_segment--; 3486 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */ 3487 } 3488 3489 if (scsiio_req->msg_flags == 3490 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) { 3491 sges_in_segment--; 3492 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */ 3493 } 3494 3495 if (meta_sg) 3496 sges_in_segment = 1; 3497 3498 if (sges_left <= sges_in_segment) 3499 goto fill_in_last_segment; 3500 3501 /* fill in main message segment when there is a chain following */ 3502 while (sges_in_segment > 1) { 3503 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3504 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3505 sg_scmd = sg_next(sg_scmd); 3506 sg_local += sizeof(struct mpi3_sge_common); 3507 sges_left--; 3508 sges_in_segment--; 3509 } 3510 3511 chain_idx = mpi3mr_get_chain_idx(mrioc); 3512 if (chain_idx < 0) 3513 return -1; 3514 chain_req = &mrioc->chain_sgl_list[chain_idx]; 3515 if (meta_sg) 3516 priv->meta_chain_idx = chain_idx; 3517 else 3518 priv->chain_idx = chain_idx; 3519 3520 chain = chain_req->addr; 3521 chain_dma = chain_req->dma_addr; 3522 sges_in_segment = sges_left; 3523 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common); 3524 3525 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags, 3526 chain_length, chain_dma); 3527 3528 sg_local = chain; 3529 3530 fill_in_last_segment: 3531 while (sges_left > 0) { 3532 if (sges_left == 1) 3533 mpi3mr_add_sg_single(sg_local, 3534 simple_sgl_flags_last, sg_dma_len(sg_scmd), 3535 sg_dma_address(sg_scmd)); 3536 else 3537 mpi3mr_add_sg_single(sg_local, simple_sgl_flags, 3538 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); 3539 sg_scmd = sg_next(sg_scmd); 3540 sg_local += sizeof(struct mpi3_sge_common); 3541 sges_left--; 3542 } 3543 3544 return 0; 3545 } 3546 3547 /** 3548 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO 3549 * @mrioc: Adapter instance reference 3550 * @scmd: SCSI command reference 3551 * @scsiio_req: MPI3 SCSI IO request 3552 * 3553 * This function calls mpi3mr_prepare_sg_scmd for constructing 3554 * both data SGEs and protection information SGEs in the MPI 3555 * format from the SCSI Command as appropriate . 3556 * 3557 * Return: return value of mpi3mr_prepare_sg_scmd. 3558 */ 3559 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc, 3560 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req) 3561 { 3562 int ret; 3563 3564 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3565 if (ret) 3566 return ret; 3567 3568 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) { 3569 /* There is a valid meta sg */ 3570 scsiio_req->flags |= 3571 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI); 3572 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req); 3573 } 3574 3575 return ret; 3576 } 3577 3578 /** 3579 * mpi3mr_tm_response_name - get TM response as a string 3580 * @resp_code: TM response code 3581 * 3582 * Convert known task management response code as a readable 3583 * string. 3584 * 3585 * Return: response code string. 3586 */ 3587 static const char *mpi3mr_tm_response_name(u8 resp_code) 3588 { 3589 char *desc; 3590 3591 switch (resp_code) { 3592 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3593 desc = "task management request completed"; 3594 break; 3595 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME: 3596 desc = "invalid frame"; 3597 break; 3598 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED: 3599 desc = "task management request not supported"; 3600 break; 3601 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED: 3602 desc = "task management request failed"; 3603 break; 3604 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3605 desc = "task management request succeeded"; 3606 break; 3607 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN: 3608 desc = "invalid LUN"; 3609 break; 3610 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG: 3611 desc = "overlapped tag attempted"; 3612 break; 3613 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3614 desc = "task queued, however not sent to target"; 3615 break; 3616 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED: 3617 desc = "task management request denied by NVMe device"; 3618 break; 3619 default: 3620 desc = "unknown"; 3621 break; 3622 } 3623 3624 return desc; 3625 } 3626 3627 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc) 3628 { 3629 int i; 3630 int num_of_reply_queues = 3631 mrioc->num_op_reply_q + mrioc->op_reply_q_offset; 3632 3633 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++) 3634 mpi3mr_process_op_reply_q(mrioc, 3635 mrioc->intr_info[i].op_reply_q); 3636 } 3637 3638 /** 3639 * mpi3mr_issue_tm - Issue Task Management request 3640 * @mrioc: Adapter instance reference 3641 * @tm_type: Task Management type 3642 * @handle: Device handle 3643 * @lun: lun ID 3644 * @htag: Host tag of the TM request 3645 * @timeout: TM timeout value 3646 * @drv_cmd: Internal command tracker 3647 * @resp_code: Response code place holder 3648 * @scmd: SCSI command 3649 * 3650 * Issues a Task Management Request to the controller for a 3651 * specified target, lun and command and wait for its completion 3652 * and check TM response. Recover the TM if it timed out by 3653 * issuing controller reset. 3654 * 3655 * Return: 0 on success, non-zero on errors 3656 */ 3657 int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type, 3658 u16 handle, uint lun, u16 htag, ulong timeout, 3659 struct mpi3mr_drv_cmd *drv_cmd, 3660 u8 *resp_code, struct scsi_cmnd *scmd) 3661 { 3662 struct mpi3_scsi_task_mgmt_request tm_req; 3663 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL; 3664 int retval = 0; 3665 struct mpi3mr_tgt_dev *tgtdev = NULL; 3666 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL; 3667 struct scmd_priv *cmd_priv = NULL; 3668 struct scsi_device *sdev = NULL; 3669 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL; 3670 3671 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n", 3672 __func__, tm_type, handle); 3673 if (mrioc->unrecoverable) { 3674 retval = -1; 3675 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n", 3676 __func__); 3677 goto out; 3678 } 3679 3680 memset(&tm_req, 0, sizeof(tm_req)); 3681 mutex_lock(&drv_cmd->mutex); 3682 if (drv_cmd->state & MPI3MR_CMD_PENDING) { 3683 retval = -1; 3684 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__); 3685 mutex_unlock(&drv_cmd->mutex); 3686 goto out; 3687 } 3688 if (mrioc->reset_in_progress) { 3689 retval = -1; 3690 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__); 3691 mutex_unlock(&drv_cmd->mutex); 3692 goto out; 3693 } 3694 3695 drv_cmd->state = MPI3MR_CMD_PENDING; 3696 drv_cmd->is_waiting = 1; 3697 drv_cmd->callback = NULL; 3698 tm_req.dev_handle = cpu_to_le16(handle); 3699 tm_req.task_type = tm_type; 3700 tm_req.host_tag = cpu_to_le16(htag); 3701 3702 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun); 3703 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT; 3704 3705 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle); 3706 3707 if (scmd) { 3708 sdev = scmd->device; 3709 sdev_priv_data = sdev->hostdata; 3710 scsi_tgt_priv_data = ((sdev_priv_data) ? 3711 sdev_priv_data->tgt_priv_data : NULL); 3712 } else { 3713 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) 3714 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *) 3715 tgtdev->starget->hostdata; 3716 } 3717 3718 if (scsi_tgt_priv_data) 3719 atomic_inc(&scsi_tgt_priv_data->block_io); 3720 3721 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) { 3722 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to) 3723 timeout = tgtdev->dev_spec.pcie_inf.abort_to; 3724 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to) 3725 timeout = tgtdev->dev_spec.pcie_inf.reset_to; 3726 } 3727 3728 init_completion(&drv_cmd->done); 3729 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1); 3730 if (retval) { 3731 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__); 3732 goto out_unlock; 3733 } 3734 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ)); 3735 3736 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) { 3737 drv_cmd->is_waiting = 0; 3738 retval = -1; 3739 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) { 3740 dprint_tm(mrioc, 3741 "task management request timed out after %ld seconds\n", 3742 timeout); 3743 if (mrioc->logging_level & MPI3_DEBUG_TM) 3744 dprint_dump_req(&tm_req, sizeof(tm_req)/4); 3745 mpi3mr_soft_reset_handler(mrioc, 3746 MPI3MR_RESET_FROM_TM_TIMEOUT, 1); 3747 } 3748 goto out_unlock; 3749 } 3750 3751 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) { 3752 dprint_tm(mrioc, "invalid task management reply message\n"); 3753 retval = -1; 3754 goto out_unlock; 3755 } 3756 3757 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply; 3758 3759 switch (drv_cmd->ioc_status) { 3760 case MPI3_IOCSTATUS_SUCCESS: 3761 *resp_code = le32_to_cpu(tm_reply->response_data) & 3762 MPI3MR_RI_MASK_RESPCODE; 3763 break; 3764 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED: 3765 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE; 3766 break; 3767 default: 3768 dprint_tm(mrioc, 3769 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n", 3770 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo); 3771 retval = -1; 3772 goto out_unlock; 3773 } 3774 3775 switch (*resp_code) { 3776 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED: 3777 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE: 3778 break; 3779 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC: 3780 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK) 3781 retval = -1; 3782 break; 3783 default: 3784 retval = -1; 3785 break; 3786 } 3787 3788 dprint_tm(mrioc, 3789 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n", 3790 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo, 3791 le32_to_cpu(tm_reply->termination_count), 3792 mpi3mr_tm_response_name(*resp_code), *resp_code); 3793 3794 if (!retval) { 3795 mpi3mr_ioc_disable_intr(mrioc); 3796 mpi3mr_poll_pend_io_completions(mrioc); 3797 mpi3mr_ioc_enable_intr(mrioc); 3798 mpi3mr_poll_pend_io_completions(mrioc); 3799 mpi3mr_process_admin_reply_q(mrioc); 3800 } 3801 switch (tm_type) { 3802 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET: 3803 if (!scsi_tgt_priv_data) 3804 break; 3805 scsi_tgt_priv_data->pend_count = 0; 3806 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3807 mpi3mr_count_tgt_pending, 3808 (void *)scsi_tgt_priv_data->starget); 3809 break; 3810 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: 3811 if (!sdev_priv_data) 3812 break; 3813 sdev_priv_data->pend_count = 0; 3814 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set, 3815 mpi3mr_count_dev_pending, (void *)sdev); 3816 break; 3817 default: 3818 break; 3819 } 3820 3821 out_unlock: 3822 drv_cmd->state = MPI3MR_CMD_NOTUSED; 3823 mutex_unlock(&drv_cmd->mutex); 3824 if (scsi_tgt_priv_data) 3825 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io); 3826 if (tgtdev) 3827 mpi3mr_tgtdev_put(tgtdev); 3828 out: 3829 return retval; 3830 } 3831 3832 /** 3833 * mpi3mr_bios_param - BIOS param callback 3834 * @sdev: SCSI device reference 3835 * @bdev: Block device reference 3836 * @capacity: Capacity in logical sectors 3837 * @params: Parameter array 3838 * 3839 * Just the parameters with heads/secots/cylinders. 3840 * 3841 * Return: 0 always 3842 */ 3843 static int mpi3mr_bios_param(struct scsi_device *sdev, 3844 struct block_device *bdev, sector_t capacity, int params[]) 3845 { 3846 int heads; 3847 int sectors; 3848 sector_t cylinders; 3849 ulong dummy; 3850 3851 heads = 64; 3852 sectors = 32; 3853 3854 dummy = heads * sectors; 3855 cylinders = capacity; 3856 sector_div(cylinders, dummy); 3857 3858 if ((ulong)capacity >= 0x200000) { 3859 heads = 255; 3860 sectors = 63; 3861 dummy = heads * sectors; 3862 cylinders = capacity; 3863 sector_div(cylinders, dummy); 3864 } 3865 3866 params[0] = heads; 3867 params[1] = sectors; 3868 params[2] = cylinders; 3869 return 0; 3870 } 3871 3872 /** 3873 * mpi3mr_map_queues - Map queues callback handler 3874 * @shost: SCSI host reference 3875 * 3876 * Maps default and poll queues. 3877 * 3878 * Return: return zero. 3879 */ 3880 static void mpi3mr_map_queues(struct Scsi_Host *shost) 3881 { 3882 struct mpi3mr_ioc *mrioc = shost_priv(shost); 3883 int i, qoff, offset; 3884 struct blk_mq_queue_map *map = NULL; 3885 3886 offset = mrioc->op_reply_q_offset; 3887 3888 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) { 3889 map = &shost->tag_set.map[i]; 3890 3891 map->nr_queues = 0; 3892 3893 if (i == HCTX_TYPE_DEFAULT) 3894 map->nr_queues = mrioc->default_qcount; 3895 else if (i == HCTX_TYPE_POLL) 3896 map->nr_queues = mrioc->active_poll_qcount; 3897 3898 if (!map->nr_queues) { 3899 BUG_ON(i == HCTX_TYPE_DEFAULT); 3900 continue; 3901 } 3902 3903 /* 3904 * The poll queue(s) doesn't have an IRQ (and hence IRQ 3905 * affinity), so use the regular blk-mq cpu mapping 3906 */ 3907 map->queue_offset = qoff; 3908 if (i != HCTX_TYPE_POLL) 3909 blk_mq_pci_map_queues(map, mrioc->pdev, offset); 3910 else 3911 blk_mq_map_queues(map); 3912 3913 qoff += map->nr_queues; 3914 offset += map->nr_queues; 3915 } 3916 } 3917 3918 /** 3919 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count 3920 * @mrioc: Adapter instance reference 3921 * 3922 * Calculate the pending I/Os for the controller and return. 3923 * 3924 * Return: Number of pending I/Os 3925 */ 3926 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc) 3927 { 3928 u16 i; 3929 uint pend_ios = 0; 3930 3931 for (i = 0; i < mrioc->num_op_reply_q; i++) 3932 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios); 3933 return pend_ios; 3934 } 3935 3936 /** 3937 * mpi3mr_print_pending_host_io - print pending I/Os 3938 * @mrioc: Adapter instance reference 3939 * 3940 * Print number of pending I/Os and each I/O details prior to 3941 * reset for debug purpose. 3942 * 3943 * Return: Nothing 3944 */ 3945 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc) 3946 { 3947 struct Scsi_Host *shost = mrioc->shost; 3948 3949 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n", 3950 __func__, mpi3mr_get_fw_pending_ios(mrioc)); 3951 blk_mq_tagset_busy_iter(&shost->tag_set, 3952 mpi3mr_print_scmd, (void *)mrioc); 3953 } 3954 3955 /** 3956 * mpi3mr_wait_for_host_io - block for I/Os to complete 3957 * @mrioc: Adapter instance reference 3958 * @timeout: time out in seconds 3959 * Waits for pending I/Os for the given adapter to complete or 3960 * to hit the timeout. 3961 * 3962 * Return: Nothing 3963 */ 3964 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout) 3965 { 3966 enum mpi3mr_iocstate iocstate; 3967 int i = 0; 3968 3969 iocstate = mpi3mr_get_iocstate(mrioc); 3970 if (iocstate != MRIOC_STATE_READY) 3971 return; 3972 3973 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3974 return; 3975 ioc_info(mrioc, 3976 "%s :Waiting for %d seconds prior to reset for %d I/O\n", 3977 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc)); 3978 3979 for (i = 0; i < timeout; i++) { 3980 if (!mpi3mr_get_fw_pending_ios(mrioc)) 3981 break; 3982 iocstate = mpi3mr_get_iocstate(mrioc); 3983 if (iocstate != MRIOC_STATE_READY) 3984 break; 3985 msleep(1000); 3986 } 3987 3988 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__, 3989 mpi3mr_get_fw_pending_ios(mrioc)); 3990 } 3991 3992 /** 3993 * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same 3994 * @mrioc: Adapter instance reference 3995 * @scmd: SCSI command reference 3996 * @scsiio_req: MPI3 SCSI IO request 3997 * @scsiio_flags: Pointer to MPI3 SCSI IO Flags 3998 * @wslen: write same max length 3999 * 4000 * Gets values of unmap, ndob and number of blocks from write 4001 * same scsi io and based on these values it sets divert IO flag 4002 * and reason for diverting IO to firmware. 4003 * 4004 * Return: Nothing 4005 */ 4006 static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc, 4007 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req, 4008 u32 *scsiio_flags, u16 wslen) 4009 { 4010 u8 unmap = 0, ndob = 0; 4011 u8 opcode = scmd->cmnd[0]; 4012 u32 num_blocks = 0; 4013 u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]); 4014 4015 if (opcode == WRITE_SAME_16) { 4016 unmap = scmd->cmnd[1] & 0x08; 4017 ndob = scmd->cmnd[1] & 0x01; 4018 num_blocks = get_unaligned_be32(scmd->cmnd + 10); 4019 } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) { 4020 unmap = scmd->cmnd[10] & 0x08; 4021 ndob = scmd->cmnd[10] & 0x01; 4022 num_blocks = get_unaligned_be32(scmd->cmnd + 28); 4023 } else 4024 return; 4025 4026 if ((unmap) && (ndob) && (num_blocks > wslen)) { 4027 scsiio_req->msg_flags |= 4028 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4029 *scsiio_flags |= 4030 MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE; 4031 } 4032 } 4033 4034 /** 4035 * mpi3mr_eh_host_reset - Host reset error handling callback 4036 * @scmd: SCSI command reference 4037 * 4038 * Issue controller reset if the scmd is for a Physical Device, 4039 * if the scmd is for RAID volume, then wait for 4040 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any 4041 * pending I/Os prior to issuing reset to the controller. 4042 * 4043 * Return: SUCCESS of successful reset else FAILED 4044 */ 4045 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd) 4046 { 4047 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4048 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4049 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4050 u8 dev_type = MPI3_DEVICE_DEVFORM_VD; 4051 int retval = FAILED, ret; 4052 4053 sdev_priv_data = scmd->device->hostdata; 4054 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) { 4055 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4056 dev_type = stgt_priv_data->dev_type; 4057 } 4058 4059 if (dev_type == MPI3_DEVICE_DEVFORM_VD) { 4060 mpi3mr_wait_for_host_io(mrioc, 4061 MPI3MR_RAID_ERRREC_RESET_TIMEOUT); 4062 if (!mpi3mr_get_fw_pending_ios(mrioc)) { 4063 retval = SUCCESS; 4064 goto out; 4065 } 4066 } 4067 4068 mpi3mr_print_pending_host_io(mrioc); 4069 ret = mpi3mr_soft_reset_handler(mrioc, 4070 MPI3MR_RESET_FROM_EH_HOS, 1); 4071 if (ret) 4072 goto out; 4073 4074 retval = SUCCESS; 4075 out: 4076 sdev_printk(KERN_INFO, scmd->device, 4077 "Host reset is %s for scmd(%p)\n", 4078 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4079 4080 return retval; 4081 } 4082 4083 /** 4084 * mpi3mr_eh_target_reset - Target reset error handling callback 4085 * @scmd: SCSI command reference 4086 * 4087 * Issue Target reset Task Management and verify the scmd is 4088 * terminated successfully and return status accordingly. 4089 * 4090 * Return: SUCCESS of successful termination of the scmd else 4091 * FAILED 4092 */ 4093 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd) 4094 { 4095 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4096 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4097 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4098 u16 dev_handle; 4099 u8 resp_code = 0; 4100 int retval = FAILED, ret = 0; 4101 4102 sdev_printk(KERN_INFO, scmd->device, 4103 "Attempting Target Reset! scmd(%p)\n", scmd); 4104 scsi_print_command(scmd); 4105 4106 sdev_priv_data = scmd->device->hostdata; 4107 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4108 sdev_printk(KERN_INFO, scmd->device, 4109 "SCSI device is not available\n"); 4110 retval = SUCCESS; 4111 goto out; 4112 } 4113 4114 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4115 dev_handle = stgt_priv_data->dev_handle; 4116 if (stgt_priv_data->dev_removed) { 4117 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4118 sdev_printk(KERN_INFO, scmd->device, 4119 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n", 4120 mrioc->name, dev_handle); 4121 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4122 retval = SUCCESS; 4123 else 4124 retval = FAILED; 4125 goto out; 4126 } 4127 sdev_printk(KERN_INFO, scmd->device, 4128 "Target Reset is issued to handle(0x%04x)\n", 4129 dev_handle); 4130 4131 ret = mpi3mr_issue_tm(mrioc, 4132 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle, 4133 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4134 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4135 4136 if (ret) 4137 goto out; 4138 4139 if (stgt_priv_data->pend_count) { 4140 sdev_printk(KERN_INFO, scmd->device, 4141 "%s: target has %d pending commands, target reset is failed\n", 4142 mrioc->name, stgt_priv_data->pend_count); 4143 goto out; 4144 } 4145 4146 retval = SUCCESS; 4147 out: 4148 sdev_printk(KERN_INFO, scmd->device, 4149 "%s: target reset is %s for scmd(%p)\n", mrioc->name, 4150 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4151 4152 return retval; 4153 } 4154 4155 /** 4156 * mpi3mr_eh_dev_reset- Device reset error handling callback 4157 * @scmd: SCSI command reference 4158 * 4159 * Issue lun reset Task Management and verify the scmd is 4160 * terminated successfully and return status accordingly. 4161 * 4162 * Return: SUCCESS of successful termination of the scmd else 4163 * FAILED 4164 */ 4165 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd) 4166 { 4167 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host); 4168 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4169 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4170 u16 dev_handle; 4171 u8 resp_code = 0; 4172 int retval = FAILED, ret = 0; 4173 4174 sdev_printk(KERN_INFO, scmd->device, 4175 "Attempting Device(lun) Reset! scmd(%p)\n", scmd); 4176 scsi_print_command(scmd); 4177 4178 sdev_priv_data = scmd->device->hostdata; 4179 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4180 sdev_printk(KERN_INFO, scmd->device, 4181 "SCSI device is not available\n"); 4182 retval = SUCCESS; 4183 goto out; 4184 } 4185 4186 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4187 dev_handle = stgt_priv_data->dev_handle; 4188 if (stgt_priv_data->dev_removed) { 4189 struct scmd_priv *cmd_priv = scsi_cmd_priv(scmd); 4190 sdev_printk(KERN_INFO, scmd->device, 4191 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n", 4192 mrioc->name, dev_handle); 4193 if (!cmd_priv->in_lld_scope || cmd_priv->host_tag == MPI3MR_HOSTTAG_INVALID) 4194 retval = SUCCESS; 4195 else 4196 retval = FAILED; 4197 goto out; 4198 } 4199 sdev_printk(KERN_INFO, scmd->device, 4200 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle); 4201 4202 ret = mpi3mr_issue_tm(mrioc, 4203 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle, 4204 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS, 4205 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd); 4206 4207 if (ret) 4208 goto out; 4209 4210 if (sdev_priv_data->pend_count) { 4211 sdev_printk(KERN_INFO, scmd->device, 4212 "%s: device has %d pending commands, device(LUN) reset is failed\n", 4213 mrioc->name, sdev_priv_data->pend_count); 4214 goto out; 4215 } 4216 retval = SUCCESS; 4217 out: 4218 sdev_printk(KERN_INFO, scmd->device, 4219 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name, 4220 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); 4221 4222 return retval; 4223 } 4224 4225 /** 4226 * mpi3mr_scan_start - Scan start callback handler 4227 * @shost: SCSI host reference 4228 * 4229 * Issue port enable request asynchronously. 4230 * 4231 * Return: Nothing 4232 */ 4233 static void mpi3mr_scan_start(struct Scsi_Host *shost) 4234 { 4235 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4236 4237 mrioc->scan_started = 1; 4238 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__); 4239 if (mpi3mr_issue_port_enable(mrioc, 1)) { 4240 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__); 4241 mrioc->scan_started = 0; 4242 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4243 } 4244 } 4245 4246 /** 4247 * mpi3mr_scan_finished - Scan finished callback handler 4248 * @shost: SCSI host reference 4249 * @time: Jiffies from the scan start 4250 * 4251 * Checks whether the port enable is completed or timedout or 4252 * failed and set the scan status accordingly after taking any 4253 * recovery if required. 4254 * 4255 * Return: 1 on scan finished or timed out, 0 for in progress 4256 */ 4257 static int mpi3mr_scan_finished(struct Scsi_Host *shost, 4258 unsigned long time) 4259 { 4260 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4261 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT; 4262 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status); 4263 4264 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) || 4265 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) { 4266 ioc_err(mrioc, "port enable failed due to fault or reset\n"); 4267 mpi3mr_print_fault_info(mrioc); 4268 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4269 mrioc->scan_started = 0; 4270 mrioc->init_cmds.is_waiting = 0; 4271 mrioc->init_cmds.callback = NULL; 4272 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4273 } 4274 4275 if (time >= (pe_timeout * HZ)) { 4276 ioc_err(mrioc, "port enable failed due to time out\n"); 4277 mpi3mr_check_rh_fault_ioc(mrioc, 4278 MPI3MR_RESET_FROM_PE_TIMEOUT); 4279 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR; 4280 mrioc->scan_started = 0; 4281 mrioc->init_cmds.is_waiting = 0; 4282 mrioc->init_cmds.callback = NULL; 4283 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED; 4284 } 4285 4286 if (mrioc->scan_started) 4287 return 0; 4288 4289 if (mrioc->scan_failed) { 4290 ioc_err(mrioc, 4291 "port enable failed with status=0x%04x\n", 4292 mrioc->scan_failed); 4293 } else 4294 ioc_info(mrioc, "port enable is successfully completed\n"); 4295 4296 mpi3mr_start_watchdog(mrioc); 4297 mrioc->is_driver_loading = 0; 4298 mrioc->stop_bsgs = 0; 4299 return 1; 4300 } 4301 4302 /** 4303 * mpi3mr_slave_destroy - Slave destroy callback handler 4304 * @sdev: SCSI device reference 4305 * 4306 * Cleanup and free per device(lun) private data. 4307 * 4308 * Return: Nothing. 4309 */ 4310 static void mpi3mr_slave_destroy(struct scsi_device *sdev) 4311 { 4312 struct Scsi_Host *shost; 4313 struct mpi3mr_ioc *mrioc; 4314 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4315 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4316 unsigned long flags; 4317 struct scsi_target *starget; 4318 struct sas_rphy *rphy = NULL; 4319 4320 if (!sdev->hostdata) 4321 return; 4322 4323 starget = scsi_target(sdev); 4324 shost = dev_to_shost(&starget->dev); 4325 mrioc = shost_priv(shost); 4326 scsi_tgt_priv_data = starget->hostdata; 4327 4328 scsi_tgt_priv_data->num_luns--; 4329 4330 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4331 if (starget->channel == mrioc->scsi_device_channel) 4332 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4333 else if (mrioc->sas_transport_enabled && !starget->channel) { 4334 rphy = dev_to_rphy(starget->dev.parent); 4335 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4336 rphy->identify.sas_address, rphy); 4337 } 4338 4339 if (tgt_dev && (!scsi_tgt_priv_data->num_luns)) 4340 tgt_dev->starget = NULL; 4341 if (tgt_dev) 4342 mpi3mr_tgtdev_put(tgt_dev); 4343 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4344 4345 kfree(sdev->hostdata); 4346 sdev->hostdata = NULL; 4347 } 4348 4349 /** 4350 * mpi3mr_target_destroy - Target destroy callback handler 4351 * @starget: SCSI target reference 4352 * 4353 * Cleanup and free per target private data. 4354 * 4355 * Return: Nothing. 4356 */ 4357 static void mpi3mr_target_destroy(struct scsi_target *starget) 4358 { 4359 struct Scsi_Host *shost; 4360 struct mpi3mr_ioc *mrioc; 4361 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4362 struct mpi3mr_tgt_dev *tgt_dev; 4363 unsigned long flags; 4364 4365 if (!starget->hostdata) 4366 return; 4367 4368 shost = dev_to_shost(&starget->dev); 4369 mrioc = shost_priv(shost); 4370 scsi_tgt_priv_data = starget->hostdata; 4371 4372 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4373 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data); 4374 if (tgt_dev && (tgt_dev->starget == starget) && 4375 (tgt_dev->perst_id == starget->id)) 4376 tgt_dev->starget = NULL; 4377 if (tgt_dev) { 4378 scsi_tgt_priv_data->tgt_dev = NULL; 4379 scsi_tgt_priv_data->perst_id = 0; 4380 mpi3mr_tgtdev_put(tgt_dev); 4381 mpi3mr_tgtdev_put(tgt_dev); 4382 } 4383 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4384 4385 kfree(starget->hostdata); 4386 starget->hostdata = NULL; 4387 } 4388 4389 /** 4390 * mpi3mr_slave_configure - Slave configure callback handler 4391 * @sdev: SCSI device reference 4392 * 4393 * Configure queue depth, max hardware sectors and virt boundary 4394 * as required 4395 * 4396 * Return: 0 always. 4397 */ 4398 static int mpi3mr_slave_configure(struct scsi_device *sdev) 4399 { 4400 struct scsi_target *starget; 4401 struct Scsi_Host *shost; 4402 struct mpi3mr_ioc *mrioc; 4403 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4404 unsigned long flags; 4405 int retval = 0; 4406 struct sas_rphy *rphy = NULL; 4407 4408 starget = scsi_target(sdev); 4409 shost = dev_to_shost(&starget->dev); 4410 mrioc = shost_priv(shost); 4411 4412 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4413 if (starget->channel == mrioc->scsi_device_channel) 4414 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4415 else if (mrioc->sas_transport_enabled && !starget->channel) { 4416 rphy = dev_to_rphy(starget->dev.parent); 4417 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4418 rphy->identify.sas_address, rphy); 4419 } 4420 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4421 if (!tgt_dev) 4422 return -ENXIO; 4423 4424 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth); 4425 4426 sdev->eh_timeout = MPI3MR_EH_SCMD_TIMEOUT; 4427 blk_queue_rq_timeout(sdev->request_queue, MPI3MR_SCMD_TIMEOUT); 4428 4429 switch (tgt_dev->dev_type) { 4430 case MPI3_DEVICE_DEVFORM_PCIE: 4431 /*The block layer hw sector size = 512*/ 4432 if ((tgt_dev->dev_spec.pcie_inf.dev_info & 4433 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4434 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) { 4435 blk_queue_max_hw_sectors(sdev->request_queue, 4436 tgt_dev->dev_spec.pcie_inf.mdts / 512); 4437 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0) 4438 blk_queue_virt_boundary(sdev->request_queue, 4439 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1)); 4440 else 4441 blk_queue_virt_boundary(sdev->request_queue, 4442 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1)); 4443 } 4444 break; 4445 default: 4446 break; 4447 } 4448 4449 mpi3mr_tgtdev_put(tgt_dev); 4450 4451 return retval; 4452 } 4453 4454 /** 4455 * mpi3mr_slave_alloc -Slave alloc callback handler 4456 * @sdev: SCSI device reference 4457 * 4458 * Allocate per device(lun) private data and initialize it. 4459 * 4460 * Return: 0 on success -ENOMEM on memory allocation failure. 4461 */ 4462 static int mpi3mr_slave_alloc(struct scsi_device *sdev) 4463 { 4464 struct Scsi_Host *shost; 4465 struct mpi3mr_ioc *mrioc; 4466 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4467 struct mpi3mr_tgt_dev *tgt_dev = NULL; 4468 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data; 4469 unsigned long flags; 4470 struct scsi_target *starget; 4471 int retval = 0; 4472 struct sas_rphy *rphy = NULL; 4473 4474 starget = scsi_target(sdev); 4475 shost = dev_to_shost(&starget->dev); 4476 mrioc = shost_priv(shost); 4477 scsi_tgt_priv_data = starget->hostdata; 4478 4479 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4480 4481 if (starget->channel == mrioc->scsi_device_channel) 4482 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4483 else if (mrioc->sas_transport_enabled && !starget->channel) { 4484 rphy = dev_to_rphy(starget->dev.parent); 4485 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4486 rphy->identify.sas_address, rphy); 4487 } 4488 4489 if (tgt_dev) { 4490 if (tgt_dev->starget == NULL) 4491 tgt_dev->starget = starget; 4492 mpi3mr_tgtdev_put(tgt_dev); 4493 retval = 0; 4494 } else { 4495 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4496 return -ENXIO; 4497 } 4498 4499 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4500 4501 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL); 4502 if (!scsi_dev_priv_data) 4503 return -ENOMEM; 4504 4505 scsi_dev_priv_data->lun_id = sdev->lun; 4506 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data; 4507 sdev->hostdata = scsi_dev_priv_data; 4508 4509 scsi_tgt_priv_data->num_luns++; 4510 4511 return retval; 4512 } 4513 4514 /** 4515 * mpi3mr_target_alloc - Target alloc callback handler 4516 * @starget: SCSI target reference 4517 * 4518 * Allocate per target private data and initialize it. 4519 * 4520 * Return: 0 on success -ENOMEM on memory allocation failure. 4521 */ 4522 static int mpi3mr_target_alloc(struct scsi_target *starget) 4523 { 4524 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 4525 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4526 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data; 4527 struct mpi3mr_tgt_dev *tgt_dev; 4528 unsigned long flags; 4529 int retval = 0; 4530 struct sas_rphy *rphy = NULL; 4531 4532 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL); 4533 if (!scsi_tgt_priv_data) 4534 return -ENOMEM; 4535 4536 starget->hostdata = scsi_tgt_priv_data; 4537 4538 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 4539 if (starget->channel == mrioc->scsi_device_channel) { 4540 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); 4541 if (tgt_dev && !tgt_dev->is_hidden) { 4542 scsi_tgt_priv_data->starget = starget; 4543 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4544 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4545 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4546 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4547 tgt_dev->starget = starget; 4548 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4549 retval = 0; 4550 if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) && 4551 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4552 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) == 4553 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) && 4554 ((tgt_dev->dev_spec.pcie_inf.dev_info & 4555 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) != 4556 MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0)) 4557 scsi_tgt_priv_data->dev_nvme_dif = 1; 4558 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4559 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4560 if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD) 4561 scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg; 4562 } else 4563 retval = -ENXIO; 4564 } else if (mrioc->sas_transport_enabled && !starget->channel) { 4565 rphy = dev_to_rphy(starget->dev.parent); 4566 tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc, 4567 rphy->identify.sas_address, rphy); 4568 if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl && 4569 (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) { 4570 scsi_tgt_priv_data->starget = starget; 4571 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; 4572 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; 4573 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type; 4574 scsi_tgt_priv_data->tgt_dev = tgt_dev; 4575 scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled; 4576 scsi_tgt_priv_data->wslen = tgt_dev->wslen; 4577 tgt_dev->starget = starget; 4578 atomic_set(&scsi_tgt_priv_data->block_io, 0); 4579 retval = 0; 4580 } else 4581 retval = -ENXIO; 4582 } 4583 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 4584 4585 return retval; 4586 } 4587 4588 /** 4589 * mpi3mr_check_return_unmap - Whether an unmap is allowed 4590 * @mrioc: Adapter instance reference 4591 * @scmd: SCSI Command reference 4592 * 4593 * The controller hardware cannot handle certain unmap commands 4594 * for NVMe drives, this routine checks those and return true 4595 * and completes the SCSI command with proper status and sense 4596 * data. 4597 * 4598 * Return: TRUE for not allowed unmap, FALSE otherwise. 4599 */ 4600 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc, 4601 struct scsi_cmnd *scmd) 4602 { 4603 unsigned char *buf; 4604 u16 param_len, desc_len, trunc_param_len; 4605 4606 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7); 4607 4608 if (mrioc->pdev->revision) { 4609 if ((param_len > 24) && ((param_len - 8) & 0xF)) { 4610 trunc_param_len -= (param_len - 8) & 0xF; 4611 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4612 dprint_scsi_err(mrioc, 4613 "truncating param_len from (%d) to (%d)\n", 4614 param_len, trunc_param_len); 4615 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4616 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR); 4617 } 4618 return false; 4619 } 4620 4621 if (!param_len) { 4622 ioc_warn(mrioc, 4623 "%s: cdb received with zero parameter length\n", 4624 __func__); 4625 scsi_print_command(scmd); 4626 scmd->result = DID_OK << 16; 4627 scsi_done(scmd); 4628 return true; 4629 } 4630 4631 if (param_len < 24) { 4632 ioc_warn(mrioc, 4633 "%s: cdb received with invalid param_len: %d\n", 4634 __func__, param_len); 4635 scsi_print_command(scmd); 4636 scmd->result = SAM_STAT_CHECK_CONDITION; 4637 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4638 0x1A, 0); 4639 scsi_done(scmd); 4640 return true; 4641 } 4642 if (param_len != scsi_bufflen(scmd)) { 4643 ioc_warn(mrioc, 4644 "%s: cdb received with param_len: %d bufflen: %d\n", 4645 __func__, param_len, scsi_bufflen(scmd)); 4646 scsi_print_command(scmd); 4647 scmd->result = SAM_STAT_CHECK_CONDITION; 4648 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4649 0x1A, 0); 4650 scsi_done(scmd); 4651 return true; 4652 } 4653 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC); 4654 if (!buf) { 4655 scsi_print_command(scmd); 4656 scmd->result = SAM_STAT_CHECK_CONDITION; 4657 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4658 0x55, 0x03); 4659 scsi_done(scmd); 4660 return true; 4661 } 4662 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd)); 4663 desc_len = get_unaligned_be16(&buf[2]); 4664 4665 if (desc_len < 16) { 4666 ioc_warn(mrioc, 4667 "%s: Invalid descriptor length in param list: %d\n", 4668 __func__, desc_len); 4669 scsi_print_command(scmd); 4670 scmd->result = SAM_STAT_CHECK_CONDITION; 4671 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 4672 0x26, 0); 4673 scsi_done(scmd); 4674 kfree(buf); 4675 return true; 4676 } 4677 4678 if (param_len > (desc_len + 8)) { 4679 trunc_param_len = desc_len + 8; 4680 scsi_print_command(scmd); 4681 dprint_scsi_err(mrioc, 4682 "truncating param_len(%d) to desc_len+8(%d)\n", 4683 param_len, trunc_param_len); 4684 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7); 4685 scsi_print_command(scmd); 4686 } 4687 4688 kfree(buf); 4689 return false; 4690 } 4691 4692 /** 4693 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown 4694 * @scmd: SCSI Command reference 4695 * 4696 * Checks whether a cdb is allowed during shutdown or not. 4697 * 4698 * Return: TRUE for allowed commands, FALSE otherwise. 4699 */ 4700 4701 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd) 4702 { 4703 switch (scmd->cmnd[0]) { 4704 case SYNCHRONIZE_CACHE: 4705 case START_STOP: 4706 return true; 4707 default: 4708 return false; 4709 } 4710 } 4711 4712 /** 4713 * mpi3mr_qcmd - I/O request despatcher 4714 * @shost: SCSI Host reference 4715 * @scmd: SCSI Command reference 4716 * 4717 * Issues the SCSI Command as an MPI3 request. 4718 * 4719 * Return: 0 on successful queueing of the request or if the 4720 * request is completed with failure. 4721 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy. 4722 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full. 4723 */ 4724 static int mpi3mr_qcmd(struct Scsi_Host *shost, 4725 struct scsi_cmnd *scmd) 4726 { 4727 struct mpi3mr_ioc *mrioc = shost_priv(shost); 4728 struct mpi3mr_stgt_priv_data *stgt_priv_data; 4729 struct mpi3mr_sdev_priv_data *sdev_priv_data; 4730 struct scmd_priv *scmd_priv_data = NULL; 4731 struct mpi3_scsi_io_request *scsiio_req = NULL; 4732 struct op_req_qinfo *op_req_q = NULL; 4733 int retval = 0; 4734 u16 dev_handle; 4735 u16 host_tag; 4736 u32 scsiio_flags = 0, data_len_blks = 0; 4737 struct request *rq = scsi_cmd_to_rq(scmd); 4738 int iprio_class; 4739 u8 is_pcie_dev = 0; 4740 u32 tracked_io_sz = 0; 4741 u32 ioc_pend_data_len = 0, tg_pend_data_len = 0; 4742 struct mpi3mr_throttle_group_info *tg = NULL; 4743 4744 if (mrioc->unrecoverable) { 4745 scmd->result = DID_ERROR << 16; 4746 scsi_done(scmd); 4747 goto out; 4748 } 4749 4750 sdev_priv_data = scmd->device->hostdata; 4751 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) { 4752 scmd->result = DID_NO_CONNECT << 16; 4753 scsi_done(scmd); 4754 goto out; 4755 } 4756 4757 if (mrioc->stop_drv_processing && 4758 !(mpi3mr_allow_scmd_to_fw(scmd))) { 4759 scmd->result = DID_NO_CONNECT << 16; 4760 scsi_done(scmd); 4761 goto out; 4762 } 4763 4764 stgt_priv_data = sdev_priv_data->tgt_priv_data; 4765 dev_handle = stgt_priv_data->dev_handle; 4766 4767 /* Avoid error handling escalation when device is removed or blocked */ 4768 4769 if (scmd->device->host->shost_state == SHOST_RECOVERY && 4770 scmd->cmnd[0] == TEST_UNIT_READY && 4771 (stgt_priv_data->dev_removed || (dev_handle == MPI3MR_INVALID_DEV_HANDLE))) { 4772 scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07); 4773 scsi_done(scmd); 4774 goto out; 4775 } 4776 4777 if (mrioc->reset_in_progress) { 4778 retval = SCSI_MLQUEUE_HOST_BUSY; 4779 goto out; 4780 } 4781 4782 if (atomic_read(&stgt_priv_data->block_io)) { 4783 if (mrioc->stop_drv_processing) { 4784 scmd->result = DID_NO_CONNECT << 16; 4785 scsi_done(scmd); 4786 goto out; 4787 } 4788 retval = SCSI_MLQUEUE_DEVICE_BUSY; 4789 goto out; 4790 } 4791 4792 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) { 4793 scmd->result = DID_NO_CONNECT << 16; 4794 scsi_done(scmd); 4795 goto out; 4796 } 4797 if (stgt_priv_data->dev_removed) { 4798 scmd->result = DID_NO_CONNECT << 16; 4799 scsi_done(scmd); 4800 goto out; 4801 } 4802 4803 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) 4804 is_pcie_dev = 1; 4805 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev && 4806 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 4807 mpi3mr_check_return_unmap(mrioc, scmd)) 4808 goto out; 4809 4810 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd); 4811 if (host_tag == MPI3MR_HOSTTAG_INVALID) { 4812 scmd->result = DID_ERROR << 16; 4813 scsi_done(scmd); 4814 goto out; 4815 } 4816 4817 if (scmd->sc_data_direction == DMA_FROM_DEVICE) 4818 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ; 4819 else if (scmd->sc_data_direction == DMA_TO_DEVICE) 4820 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE; 4821 else 4822 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER; 4823 4824 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ; 4825 4826 if (sdev_priv_data->ncq_prio_enable) { 4827 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); 4828 if (iprio_class == IOPRIO_CLASS_RT) 4829 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT; 4830 } 4831 4832 if (scmd->cmd_len > 16) 4833 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16; 4834 4835 scmd_priv_data = scsi_cmd_priv(scmd); 4836 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ); 4837 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req; 4838 scsiio_req->function = MPI3_FUNCTION_SCSI_IO; 4839 scsiio_req->host_tag = cpu_to_le16(host_tag); 4840 4841 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req); 4842 4843 if (stgt_priv_data->wslen) 4844 mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags, 4845 stgt_priv_data->wslen); 4846 4847 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len); 4848 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd)); 4849 scsiio_req->dev_handle = cpu_to_le16(dev_handle); 4850 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4851 int_to_scsilun(sdev_priv_data->lun_id, 4852 (struct scsi_lun *)scsiio_req->lun); 4853 4854 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) { 4855 mpi3mr_clear_scmd_priv(mrioc, scmd); 4856 retval = SCSI_MLQUEUE_HOST_BUSY; 4857 goto out; 4858 } 4859 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx]; 4860 data_len_blks = scsi_bufflen(scmd) >> 9; 4861 if ((data_len_blks >= mrioc->io_throttle_data_length) && 4862 stgt_priv_data->io_throttle_enabled) { 4863 tracked_io_sz = data_len_blks; 4864 tg = stgt_priv_data->throttle_group; 4865 if (tg) { 4866 ioc_pend_data_len = atomic_add_return(data_len_blks, 4867 &mrioc->pend_large_data_sz); 4868 tg_pend_data_len = atomic_add_return(data_len_blks, 4869 &tg->pend_large_data_sz); 4870 if (!tg->io_divert && ((ioc_pend_data_len >= 4871 mrioc->io_throttle_high) || 4872 (tg_pend_data_len >= tg->high))) { 4873 tg->io_divert = 1; 4874 tg->need_qd_reduction = 1; 4875 mpi3mr_set_io_divert_for_all_vd_in_tg(mrioc, 4876 tg, 1); 4877 mpi3mr_queue_qd_reduction_event(mrioc, tg); 4878 } 4879 } else { 4880 ioc_pend_data_len = atomic_add_return(data_len_blks, 4881 &mrioc->pend_large_data_sz); 4882 if (ioc_pend_data_len >= mrioc->io_throttle_high) 4883 stgt_priv_data->io_divert = 1; 4884 } 4885 } 4886 4887 if (stgt_priv_data->io_divert) { 4888 scsiio_req->msg_flags |= 4889 MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE; 4890 scsiio_flags |= MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING; 4891 } 4892 scsiio_req->flags = cpu_to_le32(scsiio_flags); 4893 4894 if (mpi3mr_op_request_post(mrioc, op_req_q, 4895 scmd_priv_data->mpi3mr_scsiio_req)) { 4896 mpi3mr_clear_scmd_priv(mrioc, scmd); 4897 retval = SCSI_MLQUEUE_HOST_BUSY; 4898 if (tracked_io_sz) { 4899 atomic_sub(tracked_io_sz, &mrioc->pend_large_data_sz); 4900 if (tg) 4901 atomic_sub(tracked_io_sz, 4902 &tg->pend_large_data_sz); 4903 } 4904 goto out; 4905 } 4906 4907 out: 4908 return retval; 4909 } 4910 4911 static const struct scsi_host_template mpi3mr_driver_template = { 4912 .module = THIS_MODULE, 4913 .name = "MPI3 Storage Controller", 4914 .proc_name = MPI3MR_DRIVER_NAME, 4915 .queuecommand = mpi3mr_qcmd, 4916 .target_alloc = mpi3mr_target_alloc, 4917 .slave_alloc = mpi3mr_slave_alloc, 4918 .slave_configure = mpi3mr_slave_configure, 4919 .target_destroy = mpi3mr_target_destroy, 4920 .slave_destroy = mpi3mr_slave_destroy, 4921 .scan_finished = mpi3mr_scan_finished, 4922 .scan_start = mpi3mr_scan_start, 4923 .change_queue_depth = mpi3mr_change_queue_depth, 4924 .eh_device_reset_handler = mpi3mr_eh_dev_reset, 4925 .eh_target_reset_handler = mpi3mr_eh_target_reset, 4926 .eh_host_reset_handler = mpi3mr_eh_host_reset, 4927 .bios_param = mpi3mr_bios_param, 4928 .map_queues = mpi3mr_map_queues, 4929 .mq_poll = mpi3mr_blk_mq_poll, 4930 .no_write_same = 1, 4931 .can_queue = 1, 4932 .this_id = -1, 4933 .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES, 4934 /* max xfer supported is 1M (2K in 512 byte sized sectors) 4935 */ 4936 .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512), 4937 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN, 4938 .max_segment_size = 0xffffffff, 4939 .track_queue_depth = 1, 4940 .cmd_size = sizeof(struct scmd_priv), 4941 .shost_groups = mpi3mr_host_groups, 4942 .sdev_groups = mpi3mr_dev_groups, 4943 }; 4944 4945 /** 4946 * mpi3mr_init_drv_cmd - Initialize internal command tracker 4947 * @cmdptr: Internal command tracker 4948 * @host_tag: Host tag used for the specific command 4949 * 4950 * Initialize the internal command tracker structure with 4951 * specified host tag. 4952 * 4953 * Return: Nothing. 4954 */ 4955 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr, 4956 u16 host_tag) 4957 { 4958 mutex_init(&cmdptr->mutex); 4959 cmdptr->reply = NULL; 4960 cmdptr->state = MPI3MR_CMD_NOTUSED; 4961 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE; 4962 cmdptr->host_tag = host_tag; 4963 } 4964 4965 /** 4966 * osintfc_mrioc_security_status -Check controller secure status 4967 * @pdev: PCI device instance 4968 * 4969 * Read the Device Serial Number capability from PCI config 4970 * space and decide whether the controller is secure or not. 4971 * 4972 * Return: 0 on success, non-zero on failure. 4973 */ 4974 static int 4975 osintfc_mrioc_security_status(struct pci_dev *pdev) 4976 { 4977 u32 cap_data; 4978 int base; 4979 u32 ctlr_status; 4980 u32 debug_status; 4981 int retval = 0; 4982 4983 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN); 4984 if (!base) { 4985 dev_err(&pdev->dev, 4986 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__); 4987 return -1; 4988 } 4989 4990 pci_read_config_dword(pdev, base + 4, &cap_data); 4991 4992 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK; 4993 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK; 4994 4995 switch (ctlr_status) { 4996 case MPI3MR_INVALID_DEVICE: 4997 dev_err(&pdev->dev, 4998 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 4999 __func__, pdev->device, pdev->subsystem_vendor, 5000 pdev->subsystem_device); 5001 retval = -1; 5002 break; 5003 case MPI3MR_CONFIG_SECURE_DEVICE: 5004 if (!debug_status) 5005 dev_info(&pdev->dev, 5006 "%s: Config secure ctlr is detected\n", 5007 __func__); 5008 break; 5009 case MPI3MR_HARD_SECURE_DEVICE: 5010 break; 5011 case MPI3MR_TAMPERED_DEVICE: 5012 dev_err(&pdev->dev, 5013 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5014 __func__, pdev->device, pdev->subsystem_vendor, 5015 pdev->subsystem_device); 5016 retval = -1; 5017 break; 5018 default: 5019 retval = -1; 5020 break; 5021 } 5022 5023 if (!retval && debug_status) { 5024 dev_err(&pdev->dev, 5025 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n", 5026 __func__, pdev->device, pdev->subsystem_vendor, 5027 pdev->subsystem_device); 5028 retval = -1; 5029 } 5030 5031 return retval; 5032 } 5033 5034 /** 5035 * mpi3mr_probe - PCI probe callback 5036 * @pdev: PCI device instance 5037 * @id: PCI device ID details 5038 * 5039 * controller initialization routine. Checks the security status 5040 * of the controller and if it is invalid or tampered return the 5041 * probe without initializing the controller. Otherwise, 5042 * allocate per adapter instance through shost_priv and 5043 * initialize controller specific data structures, initializae 5044 * the controller hardware, add shost to the SCSI subsystem. 5045 * 5046 * Return: 0 on success, non-zero on failure. 5047 */ 5048 5049 static int 5050 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 5051 { 5052 struct mpi3mr_ioc *mrioc = NULL; 5053 struct Scsi_Host *shost = NULL; 5054 int retval = 0, i; 5055 5056 if (osintfc_mrioc_security_status(pdev)) { 5057 warn_non_secure_ctlr = 1; 5058 return 1; /* For Invalid and Tampered device */ 5059 } 5060 5061 shost = scsi_host_alloc(&mpi3mr_driver_template, 5062 sizeof(struct mpi3mr_ioc)); 5063 if (!shost) { 5064 retval = -ENODEV; 5065 goto shost_failed; 5066 } 5067 5068 mrioc = shost_priv(shost); 5069 retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL); 5070 if (retval < 0) 5071 goto id_alloc_failed; 5072 mrioc->id = (u8)retval; 5073 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME); 5074 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id); 5075 INIT_LIST_HEAD(&mrioc->list); 5076 spin_lock(&mrioc_list_lock); 5077 list_add_tail(&mrioc->list, &mrioc_list); 5078 spin_unlock(&mrioc_list_lock); 5079 5080 spin_lock_init(&mrioc->admin_req_lock); 5081 spin_lock_init(&mrioc->reply_free_queue_lock); 5082 spin_lock_init(&mrioc->sbq_lock); 5083 spin_lock_init(&mrioc->fwevt_lock); 5084 spin_lock_init(&mrioc->tgtdev_lock); 5085 spin_lock_init(&mrioc->watchdog_lock); 5086 spin_lock_init(&mrioc->chain_buf_lock); 5087 spin_lock_init(&mrioc->sas_node_lock); 5088 5089 INIT_LIST_HEAD(&mrioc->fwevt_list); 5090 INIT_LIST_HEAD(&mrioc->tgtdev_list); 5091 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list); 5092 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list); 5093 INIT_LIST_HEAD(&mrioc->sas_expander_list); 5094 INIT_LIST_HEAD(&mrioc->hba_port_table_list); 5095 INIT_LIST_HEAD(&mrioc->enclosure_list); 5096 5097 mutex_init(&mrioc->reset_mutex); 5098 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS); 5099 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS); 5100 mpi3mr_init_drv_cmd(&mrioc->bsg_cmds, MPI3MR_HOSTTAG_BSG_CMDS); 5101 mpi3mr_init_drv_cmd(&mrioc->cfg_cmds, MPI3MR_HOSTTAG_CFG_CMDS); 5102 mpi3mr_init_drv_cmd(&mrioc->transport_cmds, 5103 MPI3MR_HOSTTAG_TRANSPORT_CMDS); 5104 5105 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) 5106 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i], 5107 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i); 5108 5109 for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) 5110 mpi3mr_init_drv_cmd(&mrioc->evtack_cmds[i], 5111 MPI3MR_HOSTTAG_EVTACKCMD_MIN + i); 5112 5113 if ((pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) && 5114 !pdev->revision) 5115 mrioc->enable_segqueue = false; 5116 else 5117 mrioc->enable_segqueue = true; 5118 5119 init_waitqueue_head(&mrioc->reset_waitq); 5120 mrioc->logging_level = logging_level; 5121 mrioc->shost = shost; 5122 mrioc->pdev = pdev; 5123 mrioc->stop_bsgs = 1; 5124 5125 mrioc->max_sgl_entries = max_sgl_entries; 5126 if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES) 5127 mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES; 5128 else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES) 5129 mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES; 5130 else { 5131 mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES; 5132 mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES; 5133 } 5134 5135 /* init shost parameters */ 5136 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH; 5137 shost->max_lun = -1; 5138 shost->unique_id = mrioc->id; 5139 5140 shost->max_channel = 0; 5141 shost->max_id = 0xFFFFFFFF; 5142 5143 shost->host_tagset = 1; 5144 5145 if (prot_mask >= 0) 5146 scsi_host_set_prot(shost, prot_mask); 5147 else { 5148 prot_mask = SHOST_DIF_TYPE1_PROTECTION 5149 | SHOST_DIF_TYPE2_PROTECTION 5150 | SHOST_DIF_TYPE3_PROTECTION; 5151 scsi_host_set_prot(shost, prot_mask); 5152 } 5153 5154 ioc_info(mrioc, 5155 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n", 5156 __func__, 5157 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "", 5158 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "", 5159 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "", 5160 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "", 5161 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "", 5162 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "", 5163 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : ""); 5164 5165 if (prot_guard_mask) 5166 scsi_host_set_guard(shost, (prot_guard_mask & 3)); 5167 else 5168 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 5169 5170 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name), 5171 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id); 5172 mrioc->fwevt_worker_thread = alloc_ordered_workqueue( 5173 mrioc->fwevt_worker_name, 0); 5174 if (!mrioc->fwevt_worker_thread) { 5175 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5176 __FILE__, __LINE__, __func__); 5177 retval = -ENODEV; 5178 goto fwevtthread_failed; 5179 } 5180 5181 mrioc->is_driver_loading = 1; 5182 mrioc->cpu_count = num_online_cpus(); 5183 if (mpi3mr_setup_resources(mrioc)) { 5184 ioc_err(mrioc, "setup resources failed\n"); 5185 retval = -ENODEV; 5186 goto resource_alloc_failed; 5187 } 5188 if (mpi3mr_init_ioc(mrioc)) { 5189 ioc_err(mrioc, "initializing IOC failed\n"); 5190 retval = -ENODEV; 5191 goto init_ioc_failed; 5192 } 5193 5194 shost->nr_hw_queues = mrioc->num_op_reply_q; 5195 if (mrioc->active_poll_qcount) 5196 shost->nr_maps = 3; 5197 5198 shost->can_queue = mrioc->max_host_ios; 5199 shost->sg_tablesize = mrioc->max_sgl_entries; 5200 shost->max_id = mrioc->facts.max_perids + 1; 5201 5202 retval = scsi_add_host(shost, &pdev->dev); 5203 if (retval) { 5204 ioc_err(mrioc, "failure at %s:%d/%s()!\n", 5205 __FILE__, __LINE__, __func__); 5206 goto addhost_failed; 5207 } 5208 5209 scsi_scan_host(shost); 5210 mpi3mr_bsg_init(mrioc); 5211 return retval; 5212 5213 addhost_failed: 5214 mpi3mr_stop_watchdog(mrioc); 5215 mpi3mr_cleanup_ioc(mrioc); 5216 init_ioc_failed: 5217 mpi3mr_free_mem(mrioc); 5218 mpi3mr_cleanup_resources(mrioc); 5219 resource_alloc_failed: 5220 destroy_workqueue(mrioc->fwevt_worker_thread); 5221 fwevtthread_failed: 5222 ida_free(&mrioc_ida, mrioc->id); 5223 spin_lock(&mrioc_list_lock); 5224 list_del(&mrioc->list); 5225 spin_unlock(&mrioc_list_lock); 5226 id_alloc_failed: 5227 scsi_host_put(shost); 5228 shost_failed: 5229 return retval; 5230 } 5231 5232 /** 5233 * mpi3mr_remove - PCI remove callback 5234 * @pdev: PCI device instance 5235 * 5236 * Cleanup the IOC by issuing MUR and shutdown notification. 5237 * Free up all memory and resources associated with the 5238 * controllerand target devices, unregister the shost. 5239 * 5240 * Return: Nothing. 5241 */ 5242 static void mpi3mr_remove(struct pci_dev *pdev) 5243 { 5244 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5245 struct mpi3mr_ioc *mrioc; 5246 struct workqueue_struct *wq; 5247 unsigned long flags; 5248 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; 5249 struct mpi3mr_hba_port *port, *hba_port_next; 5250 struct mpi3mr_sas_node *sas_expander, *sas_expander_next; 5251 5252 if (!shost) 5253 return; 5254 5255 mrioc = shost_priv(shost); 5256 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5257 ssleep(1); 5258 5259 if (!pci_device_is_present(mrioc->pdev)) { 5260 mrioc->unrecoverable = 1; 5261 mpi3mr_flush_cmds_for_unrecovered_controller(mrioc); 5262 } 5263 5264 mpi3mr_bsg_exit(mrioc); 5265 mrioc->stop_drv_processing = 1; 5266 mpi3mr_cleanup_fwevt_list(mrioc); 5267 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5268 wq = mrioc->fwevt_worker_thread; 5269 mrioc->fwevt_worker_thread = NULL; 5270 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5271 if (wq) 5272 destroy_workqueue(wq); 5273 5274 if (mrioc->sas_transport_enabled) 5275 sas_remove_host(shost); 5276 else 5277 scsi_remove_host(shost); 5278 5279 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list, 5280 list) { 5281 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev); 5282 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev, true); 5283 mpi3mr_tgtdev_put(tgtdev); 5284 } 5285 mpi3mr_stop_watchdog(mrioc); 5286 mpi3mr_cleanup_ioc(mrioc); 5287 mpi3mr_free_mem(mrioc); 5288 mpi3mr_cleanup_resources(mrioc); 5289 5290 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5291 list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, 5292 &mrioc->sas_expander_list, list) { 5293 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5294 mpi3mr_expander_node_remove(mrioc, sas_expander); 5295 spin_lock_irqsave(&mrioc->sas_node_lock, flags); 5296 } 5297 list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { 5298 ioc_info(mrioc, 5299 "removing hba_port entry: %p port: %d from hba_port list\n", 5300 port, port->port_id); 5301 list_del(&port->list); 5302 kfree(port); 5303 } 5304 spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); 5305 5306 if (mrioc->sas_hba.num_phys) { 5307 kfree(mrioc->sas_hba.phy); 5308 mrioc->sas_hba.phy = NULL; 5309 mrioc->sas_hba.num_phys = 0; 5310 } 5311 5312 ida_free(&mrioc_ida, mrioc->id); 5313 spin_lock(&mrioc_list_lock); 5314 list_del(&mrioc->list); 5315 spin_unlock(&mrioc_list_lock); 5316 5317 scsi_host_put(shost); 5318 } 5319 5320 /** 5321 * mpi3mr_shutdown - PCI shutdown callback 5322 * @pdev: PCI device instance 5323 * 5324 * Free up all memory and resources associated with the 5325 * controller 5326 * 5327 * Return: Nothing. 5328 */ 5329 static void mpi3mr_shutdown(struct pci_dev *pdev) 5330 { 5331 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5332 struct mpi3mr_ioc *mrioc; 5333 struct workqueue_struct *wq; 5334 unsigned long flags; 5335 5336 if (!shost) 5337 return; 5338 5339 mrioc = shost_priv(shost); 5340 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5341 ssleep(1); 5342 5343 mrioc->stop_drv_processing = 1; 5344 mpi3mr_cleanup_fwevt_list(mrioc); 5345 spin_lock_irqsave(&mrioc->fwevt_lock, flags); 5346 wq = mrioc->fwevt_worker_thread; 5347 mrioc->fwevt_worker_thread = NULL; 5348 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags); 5349 if (wq) 5350 destroy_workqueue(wq); 5351 5352 mpi3mr_stop_watchdog(mrioc); 5353 mpi3mr_cleanup_ioc(mrioc); 5354 mpi3mr_cleanup_resources(mrioc); 5355 } 5356 5357 /** 5358 * mpi3mr_suspend - PCI power management suspend callback 5359 * @dev: Device struct 5360 * 5361 * Change the power state to the given value and cleanup the IOC 5362 * by issuing MUR and shutdown notification 5363 * 5364 * Return: 0 always. 5365 */ 5366 static int __maybe_unused 5367 mpi3mr_suspend(struct device *dev) 5368 { 5369 struct pci_dev *pdev = to_pci_dev(dev); 5370 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5371 struct mpi3mr_ioc *mrioc; 5372 5373 if (!shost) 5374 return 0; 5375 5376 mrioc = shost_priv(shost); 5377 while (mrioc->reset_in_progress || mrioc->is_driver_loading) 5378 ssleep(1); 5379 mrioc->stop_drv_processing = 1; 5380 mpi3mr_cleanup_fwevt_list(mrioc); 5381 scsi_block_requests(shost); 5382 mpi3mr_stop_watchdog(mrioc); 5383 mpi3mr_cleanup_ioc(mrioc); 5384 5385 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state\n", 5386 pdev, pci_name(pdev)); 5387 mpi3mr_cleanup_resources(mrioc); 5388 5389 return 0; 5390 } 5391 5392 /** 5393 * mpi3mr_resume - PCI power management resume callback 5394 * @dev: Device struct 5395 * 5396 * Restore the power state to D0 and reinitialize the controller 5397 * and resume I/O operations to the target devices 5398 * 5399 * Return: 0 on success, non-zero on failure 5400 */ 5401 static int __maybe_unused 5402 mpi3mr_resume(struct device *dev) 5403 { 5404 struct pci_dev *pdev = to_pci_dev(dev); 5405 struct Scsi_Host *shost = pci_get_drvdata(pdev); 5406 struct mpi3mr_ioc *mrioc; 5407 pci_power_t device_state = pdev->current_state; 5408 int r; 5409 5410 if (!shost) 5411 return 0; 5412 5413 mrioc = shost_priv(shost); 5414 5415 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n", 5416 pdev, pci_name(pdev), device_state); 5417 mrioc->pdev = pdev; 5418 mrioc->cpu_count = num_online_cpus(); 5419 r = mpi3mr_setup_resources(mrioc); 5420 if (r) { 5421 ioc_info(mrioc, "%s: Setup resources failed[%d]\n", 5422 __func__, r); 5423 return r; 5424 } 5425 5426 mrioc->stop_drv_processing = 0; 5427 mpi3mr_invalidate_devhandles(mrioc); 5428 mpi3mr_free_enclosure_list(mrioc); 5429 mpi3mr_memset_buffers(mrioc); 5430 r = mpi3mr_reinit_ioc(mrioc, 1); 5431 if (r) { 5432 ioc_err(mrioc, "resuming controller failed[%d]\n", r); 5433 return r; 5434 } 5435 ssleep(MPI3MR_RESET_TOPOLOGY_SETTLE_TIME); 5436 scsi_unblock_requests(shost); 5437 mrioc->device_refresh_on = 0; 5438 mpi3mr_start_watchdog(mrioc); 5439 5440 return 0; 5441 } 5442 5443 static const struct pci_device_id mpi3mr_pci_id_table[] = { 5444 { 5445 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5446 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID) 5447 }, 5448 { 5449 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5450 MPI3_MFGPAGE_DEVID_SAS5116_MPI, PCI_ANY_ID, PCI_ANY_ID) 5451 }, 5452 { 5453 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM, 5454 MPI3_MFGPAGE_DEVID_SAS5116_MPI_MGMT, PCI_ANY_ID, PCI_ANY_ID) 5455 }, 5456 { 0 } 5457 }; 5458 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table); 5459 5460 static SIMPLE_DEV_PM_OPS(mpi3mr_pm_ops, mpi3mr_suspend, mpi3mr_resume); 5461 5462 static struct pci_driver mpi3mr_pci_driver = { 5463 .name = MPI3MR_DRIVER_NAME, 5464 .id_table = mpi3mr_pci_id_table, 5465 .probe = mpi3mr_probe, 5466 .remove = mpi3mr_remove, 5467 .shutdown = mpi3mr_shutdown, 5468 .driver.pm = &mpi3mr_pm_ops, 5469 }; 5470 5471 static ssize_t event_counter_show(struct device_driver *dd, char *buf) 5472 { 5473 return sprintf(buf, "%llu\n", atomic64_read(&event_counter)); 5474 } 5475 static DRIVER_ATTR_RO(event_counter); 5476 5477 static int __init mpi3mr_init(void) 5478 { 5479 int ret_val; 5480 5481 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME, 5482 MPI3MR_DRIVER_VERSION); 5483 5484 mpi3mr_transport_template = 5485 sas_attach_transport(&mpi3mr_transport_functions); 5486 if (!mpi3mr_transport_template) { 5487 pr_err("%s failed to load due to sas transport attach failure\n", 5488 MPI3MR_DRIVER_NAME); 5489 return -ENODEV; 5490 } 5491 5492 ret_val = pci_register_driver(&mpi3mr_pci_driver); 5493 if (ret_val) { 5494 pr_err("%s failed to load due to pci register driver failure\n", 5495 MPI3MR_DRIVER_NAME); 5496 goto err_pci_reg_fail; 5497 } 5498 5499 ret_val = driver_create_file(&mpi3mr_pci_driver.driver, 5500 &driver_attr_event_counter); 5501 if (ret_val) 5502 goto err_event_counter; 5503 5504 return ret_val; 5505 5506 err_event_counter: 5507 pci_unregister_driver(&mpi3mr_pci_driver); 5508 5509 err_pci_reg_fail: 5510 sas_release_transport(mpi3mr_transport_template); 5511 return ret_val; 5512 } 5513 5514 static void __exit mpi3mr_exit(void) 5515 { 5516 if (warn_non_secure_ctlr) 5517 pr_warn( 5518 "Unloading %s version %s while managing a non secure controller\n", 5519 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION); 5520 else 5521 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME, 5522 MPI3MR_DRIVER_VERSION); 5523 5524 driver_remove_file(&mpi3mr_pci_driver.driver, 5525 &driver_attr_event_counter); 5526 pci_unregister_driver(&mpi3mr_pci_driver); 5527 sas_release_transport(mpi3mr_transport_template); 5528 ida_destroy(&mrioc_ida); 5529 } 5530 5531 module_init(mpi3mr_init); 5532 module_exit(mpi3mr_exit); 5533