1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for Broadcom MPI3 Storage Controllers 4 * 5 * Copyright (C) 2017-2022 Broadcom Inc. 6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com) 7 * 8 */ 9 10 #include "mpi3mr.h" 11 #include <linux/bsg-lib.h> 12 #include <uapi/scsi/scsi_bsg_mpi3mr.h> 13 14 /** 15 * mpi3mr_bsg_pel_abort - sends PEL abort request 16 * @mrioc: Adapter instance reference 17 * 18 * This function sends PEL abort request to the firmware through 19 * admin request queue. 20 * 21 * Return: 0 on success, -1 on failure 22 */ 23 static int mpi3mr_bsg_pel_abort(struct mpi3mr_ioc *mrioc) 24 { 25 struct mpi3_pel_req_action_abort pel_abort_req; 26 struct mpi3_pel_reply *pel_reply; 27 int retval = 0; 28 u16 pe_log_status; 29 30 if (mrioc->reset_in_progress) { 31 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 32 return -1; 33 } 34 if (mrioc->stop_bsgs) { 35 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 36 return -1; 37 } 38 39 memset(&pel_abort_req, 0, sizeof(pel_abort_req)); 40 mutex_lock(&mrioc->pel_abort_cmd.mutex); 41 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_PENDING) { 42 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 43 mutex_unlock(&mrioc->pel_abort_cmd.mutex); 44 return -1; 45 } 46 mrioc->pel_abort_cmd.state = MPI3MR_CMD_PENDING; 47 mrioc->pel_abort_cmd.is_waiting = 1; 48 mrioc->pel_abort_cmd.callback = NULL; 49 pel_abort_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_ABORT); 50 pel_abort_req.function = MPI3_FUNCTION_PERSISTENT_EVENT_LOG; 51 pel_abort_req.action = MPI3_PEL_ACTION_ABORT; 52 pel_abort_req.abort_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_PEL_WAIT); 53 54 mrioc->pel_abort_requested = 1; 55 init_completion(&mrioc->pel_abort_cmd.done); 56 retval = mpi3mr_admin_request_post(mrioc, &pel_abort_req, 57 sizeof(pel_abort_req), 0); 58 if (retval) { 59 retval = -1; 60 dprint_bsg_err(mrioc, "%s: admin request post failed\n", 61 __func__); 62 mrioc->pel_abort_requested = 0; 63 goto out_unlock; 64 } 65 66 wait_for_completion_timeout(&mrioc->pel_abort_cmd.done, 67 (MPI3MR_INTADMCMD_TIMEOUT * HZ)); 68 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_COMPLETE)) { 69 mrioc->pel_abort_cmd.is_waiting = 0; 70 dprint_bsg_err(mrioc, "%s: command timedout\n", __func__); 71 if (!(mrioc->pel_abort_cmd.state & MPI3MR_CMD_RESET)) 72 mpi3mr_soft_reset_handler(mrioc, 73 MPI3MR_RESET_FROM_PELABORT_TIMEOUT, 1); 74 retval = -1; 75 goto out_unlock; 76 } 77 if ((mrioc->pel_abort_cmd.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 78 != MPI3_IOCSTATUS_SUCCESS) { 79 dprint_bsg_err(mrioc, 80 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 81 __func__, (mrioc->pel_abort_cmd.ioc_status & 82 MPI3_IOCSTATUS_STATUS_MASK), 83 mrioc->pel_abort_cmd.ioc_loginfo); 84 retval = -1; 85 goto out_unlock; 86 } 87 if (mrioc->pel_abort_cmd.state & MPI3MR_CMD_REPLY_VALID) { 88 pel_reply = (struct mpi3_pel_reply *)mrioc->pel_abort_cmd.reply; 89 pe_log_status = le16_to_cpu(pel_reply->pe_log_status); 90 if (pe_log_status != MPI3_PEL_STATUS_SUCCESS) { 91 dprint_bsg_err(mrioc, 92 "%s: command failed, pel_status(0x%04x)\n", 93 __func__, pe_log_status); 94 retval = -1; 95 } 96 } 97 98 out_unlock: 99 mrioc->pel_abort_cmd.state = MPI3MR_CMD_NOTUSED; 100 mutex_unlock(&mrioc->pel_abort_cmd.mutex); 101 return retval; 102 } 103 /** 104 * mpi3mr_bsg_verify_adapter - verify adapter number is valid 105 * @ioc_number: Adapter number 106 * 107 * This function returns the adapter instance pointer of given 108 * adapter number. If adapter number does not match with the 109 * driver's adapter list, driver returns NULL. 110 * 111 * Return: adapter instance reference 112 */ 113 static struct mpi3mr_ioc *mpi3mr_bsg_verify_adapter(int ioc_number) 114 { 115 struct mpi3mr_ioc *mrioc = NULL; 116 117 spin_lock(&mrioc_list_lock); 118 list_for_each_entry(mrioc, &mrioc_list, list) { 119 if (mrioc->id == ioc_number) { 120 spin_unlock(&mrioc_list_lock); 121 return mrioc; 122 } 123 } 124 spin_unlock(&mrioc_list_lock); 125 return NULL; 126 } 127 128 /** 129 * mpi3mr_enable_logdata - Handler for log data enable 130 * @mrioc: Adapter instance reference 131 * @job: BSG job reference 132 * 133 * This function enables log data caching in the driver if not 134 * already enabled and return the maximum number of log data 135 * entries that can be cached in the driver. 136 * 137 * Return: 0 on success and proper error codes on failure 138 */ 139 static long mpi3mr_enable_logdata(struct mpi3mr_ioc *mrioc, 140 struct bsg_job *job) 141 { 142 struct mpi3mr_logdata_enable logdata_enable; 143 144 if (!mrioc->logdata_buf) { 145 mrioc->logdata_entry_sz = 146 (mrioc->reply_sz - (sizeof(struct mpi3_event_notification_reply) - 4)) 147 + MPI3MR_BSG_LOGDATA_ENTRY_HEADER_SZ; 148 mrioc->logdata_buf_idx = 0; 149 mrioc->logdata_buf = kcalloc(MPI3MR_BSG_LOGDATA_MAX_ENTRIES, 150 mrioc->logdata_entry_sz, GFP_KERNEL); 151 152 if (!mrioc->logdata_buf) 153 return -ENOMEM; 154 } 155 156 memset(&logdata_enable, 0, sizeof(logdata_enable)); 157 logdata_enable.max_entries = 158 MPI3MR_BSG_LOGDATA_MAX_ENTRIES; 159 if (job->request_payload.payload_len >= sizeof(logdata_enable)) { 160 sg_copy_from_buffer(job->request_payload.sg_list, 161 job->request_payload.sg_cnt, 162 &logdata_enable, sizeof(logdata_enable)); 163 return 0; 164 } 165 166 return -EINVAL; 167 } 168 /** 169 * mpi3mr_get_logdata - Handler for get log data 170 * @mrioc: Adapter instance reference 171 * @job: BSG job pointer 172 * This function copies the log data entries to the user buffer 173 * when log caching is enabled in the driver. 174 * 175 * Return: 0 on success and proper error codes on failure 176 */ 177 static long mpi3mr_get_logdata(struct mpi3mr_ioc *mrioc, 178 struct bsg_job *job) 179 { 180 u16 num_entries, sz, entry_sz = mrioc->logdata_entry_sz; 181 182 if ((!mrioc->logdata_buf) || (job->request_payload.payload_len < entry_sz)) 183 return -EINVAL; 184 185 num_entries = job->request_payload.payload_len / entry_sz; 186 if (num_entries > MPI3MR_BSG_LOGDATA_MAX_ENTRIES) 187 num_entries = MPI3MR_BSG_LOGDATA_MAX_ENTRIES; 188 sz = num_entries * entry_sz; 189 190 if (job->request_payload.payload_len >= sz) { 191 sg_copy_from_buffer(job->request_payload.sg_list, 192 job->request_payload.sg_cnt, 193 mrioc->logdata_buf, sz); 194 return 0; 195 } 196 return -EINVAL; 197 } 198 199 /** 200 * mpi3mr_bsg_pel_enable - Handler for PEL enable driver 201 * @mrioc: Adapter instance reference 202 * @job: BSG job pointer 203 * 204 * This function is the handler for PEL enable driver. 205 * Validates the application given class and locale and if 206 * requires aborts the existing PEL wait request and/or issues 207 * new PEL wait request to the firmware and returns. 208 * 209 * Return: 0 on success and proper error codes on failure. 210 */ 211 static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc, 212 struct bsg_job *job) 213 { 214 long rval = -EINVAL; 215 struct mpi3mr_bsg_out_pel_enable pel_enable; 216 u8 issue_pel_wait; 217 u8 tmp_class; 218 u16 tmp_locale; 219 220 if (job->request_payload.payload_len != sizeof(pel_enable)) { 221 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 222 __func__); 223 return rval; 224 } 225 226 sg_copy_to_buffer(job->request_payload.sg_list, 227 job->request_payload.sg_cnt, 228 &pel_enable, sizeof(pel_enable)); 229 230 if (pel_enable.pel_class > MPI3_PEL_CLASS_FAULT) { 231 dprint_bsg_err(mrioc, "%s: out of range class %d sent\n", 232 __func__, pel_enable.pel_class); 233 rval = 0; 234 goto out; 235 } 236 if (!mrioc->pel_enabled) 237 issue_pel_wait = 1; 238 else { 239 if ((mrioc->pel_class <= pel_enable.pel_class) && 240 !((mrioc->pel_locale & pel_enable.pel_locale) ^ 241 pel_enable.pel_locale)) { 242 issue_pel_wait = 0; 243 rval = 0; 244 } else { 245 pel_enable.pel_locale |= mrioc->pel_locale; 246 247 if (mrioc->pel_class < pel_enable.pel_class) 248 pel_enable.pel_class = mrioc->pel_class; 249 250 rval = mpi3mr_bsg_pel_abort(mrioc); 251 if (rval) { 252 dprint_bsg_err(mrioc, 253 "%s: pel_abort failed, status(%ld)\n", 254 __func__, rval); 255 goto out; 256 } 257 issue_pel_wait = 1; 258 } 259 } 260 if (issue_pel_wait) { 261 tmp_class = mrioc->pel_class; 262 tmp_locale = mrioc->pel_locale; 263 mrioc->pel_class = pel_enable.pel_class; 264 mrioc->pel_locale = pel_enable.pel_locale; 265 mrioc->pel_enabled = 1; 266 rval = mpi3mr_pel_get_seqnum_post(mrioc, NULL); 267 if (rval) { 268 mrioc->pel_class = tmp_class; 269 mrioc->pel_locale = tmp_locale; 270 mrioc->pel_enabled = 0; 271 dprint_bsg_err(mrioc, 272 "%s: pel get sequence number failed, status(%ld)\n", 273 __func__, rval); 274 } 275 } 276 277 out: 278 return rval; 279 } 280 /** 281 * mpi3mr_get_all_tgt_info - Get all target information 282 * @mrioc: Adapter instance reference 283 * @job: BSG job reference 284 * 285 * This function copies the driver managed target devices device 286 * handle, persistent ID, bus ID and taret ID to the user 287 * provided buffer for the specific controller. This function 288 * also provides the number of devices managed by the driver for 289 * the specific controller. 290 * 291 * Return: 0 on success and proper error codes on failure 292 */ 293 static long mpi3mr_get_all_tgt_info(struct mpi3mr_ioc *mrioc, 294 struct bsg_job *job) 295 { 296 long rval = -EINVAL; 297 u16 num_devices = 0, i = 0, size; 298 unsigned long flags; 299 struct mpi3mr_tgt_dev *tgtdev; 300 struct mpi3mr_device_map_info *devmap_info = NULL; 301 struct mpi3mr_all_tgt_info *alltgt_info = NULL; 302 uint32_t min_entrylen = 0, kern_entrylen = 0, usr_entrylen = 0; 303 304 if (job->request_payload.payload_len < sizeof(u32)) { 305 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 306 __func__); 307 return rval; 308 } 309 310 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 311 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) 312 num_devices++; 313 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 314 315 if ((job->request_payload.payload_len == sizeof(u32)) || 316 list_empty(&mrioc->tgtdev_list)) { 317 sg_copy_from_buffer(job->request_payload.sg_list, 318 job->request_payload.sg_cnt, 319 &num_devices, sizeof(num_devices)); 320 return 0; 321 } 322 323 kern_entrylen = (num_devices - 1) * sizeof(*devmap_info); 324 size = sizeof(*alltgt_info) + kern_entrylen; 325 alltgt_info = kzalloc(size, GFP_KERNEL); 326 if (!alltgt_info) 327 return -ENOMEM; 328 329 devmap_info = alltgt_info->dmi; 330 memset((u8 *)devmap_info, 0xFF, (kern_entrylen + sizeof(*devmap_info))); 331 spin_lock_irqsave(&mrioc->tgtdev_lock, flags); 332 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { 333 if (i < num_devices) { 334 devmap_info[i].handle = tgtdev->dev_handle; 335 devmap_info[i].perst_id = tgtdev->perst_id; 336 if (tgtdev->host_exposed && tgtdev->starget) { 337 devmap_info[i].target_id = tgtdev->starget->id; 338 devmap_info[i].bus_id = 339 tgtdev->starget->channel; 340 } 341 i++; 342 } 343 } 344 num_devices = i; 345 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags); 346 347 memcpy(&alltgt_info->num_devices, &num_devices, sizeof(num_devices)); 348 349 usr_entrylen = (job->request_payload.payload_len - sizeof(u32)) / sizeof(*devmap_info); 350 usr_entrylen *= sizeof(*devmap_info); 351 min_entrylen = min(usr_entrylen, kern_entrylen); 352 if (min_entrylen && (!memcpy(&alltgt_info->dmi, devmap_info, min_entrylen))) { 353 dprint_bsg_err(mrioc, "%s:%d: device map info copy failed\n", 354 __func__, __LINE__); 355 rval = -EFAULT; 356 goto out; 357 } 358 359 sg_copy_from_buffer(job->request_payload.sg_list, 360 job->request_payload.sg_cnt, 361 alltgt_info, job->request_payload.payload_len); 362 rval = 0; 363 out: 364 kfree(alltgt_info); 365 return rval; 366 } 367 /** 368 * mpi3mr_get_change_count - Get topology change count 369 * @mrioc: Adapter instance reference 370 * @job: BSG job reference 371 * 372 * This function copies the toplogy change count provided by the 373 * driver in events and cached in the driver to the user 374 * provided buffer for the specific controller. 375 * 376 * Return: 0 on success and proper error codes on failure 377 */ 378 static long mpi3mr_get_change_count(struct mpi3mr_ioc *mrioc, 379 struct bsg_job *job) 380 { 381 struct mpi3mr_change_count chgcnt; 382 383 memset(&chgcnt, 0, sizeof(chgcnt)); 384 chgcnt.change_count = mrioc->change_count; 385 if (job->request_payload.payload_len >= sizeof(chgcnt)) { 386 sg_copy_from_buffer(job->request_payload.sg_list, 387 job->request_payload.sg_cnt, 388 &chgcnt, sizeof(chgcnt)); 389 return 0; 390 } 391 return -EINVAL; 392 } 393 394 /** 395 * mpi3mr_bsg_adp_reset - Issue controller reset 396 * @mrioc: Adapter instance reference 397 * @job: BSG job reference 398 * 399 * This function identifies the user provided reset type and 400 * issues approporiate reset to the controller and wait for that 401 * to complete and reinitialize the controller and then returns 402 * 403 * Return: 0 on success and proper error codes on failure 404 */ 405 static long mpi3mr_bsg_adp_reset(struct mpi3mr_ioc *mrioc, 406 struct bsg_job *job) 407 { 408 long rval = -EINVAL; 409 u8 save_snapdump; 410 struct mpi3mr_bsg_adp_reset adpreset; 411 412 if (job->request_payload.payload_len != 413 sizeof(adpreset)) { 414 dprint_bsg_err(mrioc, "%s: invalid size argument\n", 415 __func__); 416 goto out; 417 } 418 419 sg_copy_to_buffer(job->request_payload.sg_list, 420 job->request_payload.sg_cnt, 421 &adpreset, sizeof(adpreset)); 422 423 switch (adpreset.reset_type) { 424 case MPI3MR_BSG_ADPRESET_SOFT: 425 save_snapdump = 0; 426 break; 427 case MPI3MR_BSG_ADPRESET_DIAG_FAULT: 428 save_snapdump = 1; 429 break; 430 default: 431 dprint_bsg_err(mrioc, "%s: unknown reset_type(%d)\n", 432 __func__, adpreset.reset_type); 433 goto out; 434 } 435 436 rval = mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_APP, 437 save_snapdump); 438 439 if (rval) 440 dprint_bsg_err(mrioc, 441 "%s: reset handler returned error(%ld) for reset type %d\n", 442 __func__, rval, adpreset.reset_type); 443 out: 444 return rval; 445 } 446 447 /** 448 * mpi3mr_bsg_populate_adpinfo - Get adapter info command handler 449 * @mrioc: Adapter instance reference 450 * @job: BSG job reference 451 * 452 * This function provides adapter information for the given 453 * controller 454 * 455 * Return: 0 on success and proper error codes on failure 456 */ 457 static long mpi3mr_bsg_populate_adpinfo(struct mpi3mr_ioc *mrioc, 458 struct bsg_job *job) 459 { 460 enum mpi3mr_iocstate ioc_state; 461 struct mpi3mr_bsg_in_adpinfo adpinfo; 462 463 memset(&adpinfo, 0, sizeof(adpinfo)); 464 adpinfo.adp_type = MPI3MR_BSG_ADPTYPE_AVGFAMILY; 465 adpinfo.pci_dev_id = mrioc->pdev->device; 466 adpinfo.pci_dev_hw_rev = mrioc->pdev->revision; 467 adpinfo.pci_subsys_dev_id = mrioc->pdev->subsystem_device; 468 adpinfo.pci_subsys_ven_id = mrioc->pdev->subsystem_vendor; 469 adpinfo.pci_bus = mrioc->pdev->bus->number; 470 adpinfo.pci_dev = PCI_SLOT(mrioc->pdev->devfn); 471 adpinfo.pci_func = PCI_FUNC(mrioc->pdev->devfn); 472 adpinfo.pci_seg_id = pci_domain_nr(mrioc->pdev->bus); 473 adpinfo.app_intfc_ver = MPI3MR_IOCTL_VERSION; 474 475 ioc_state = mpi3mr_get_iocstate(mrioc); 476 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 477 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; 478 else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) 479 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; 480 else if (ioc_state == MRIOC_STATE_FAULT) 481 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_FAULT; 482 else 483 adpinfo.adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; 484 485 memcpy((u8 *)&adpinfo.driver_info, (u8 *)&mrioc->driver_info, 486 sizeof(adpinfo.driver_info)); 487 488 if (job->request_payload.payload_len >= sizeof(adpinfo)) { 489 sg_copy_from_buffer(job->request_payload.sg_list, 490 job->request_payload.sg_cnt, 491 &adpinfo, sizeof(adpinfo)); 492 return 0; 493 } 494 return -EINVAL; 495 } 496 497 /** 498 * mpi3mr_bsg_process_drv_cmds - Driver Command handler 499 * @job: BSG job reference 500 * 501 * This function is the top level handler for driver commands, 502 * this does basic validation of the buffer and identifies the 503 * opcode and switches to correct sub handler. 504 * 505 * Return: 0 on success and proper error codes on failure 506 */ 507 static long mpi3mr_bsg_process_drv_cmds(struct bsg_job *job) 508 { 509 long rval = -EINVAL; 510 struct mpi3mr_ioc *mrioc = NULL; 511 struct mpi3mr_bsg_packet *bsg_req = NULL; 512 struct mpi3mr_bsg_drv_cmd *drvrcmd = NULL; 513 514 bsg_req = job->request; 515 drvrcmd = &bsg_req->cmd.drvrcmd; 516 517 mrioc = mpi3mr_bsg_verify_adapter(drvrcmd->mrioc_id); 518 if (!mrioc) 519 return -ENODEV; 520 521 if (drvrcmd->opcode == MPI3MR_DRVBSG_OPCODE_ADPINFO) { 522 rval = mpi3mr_bsg_populate_adpinfo(mrioc, job); 523 return rval; 524 } 525 526 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) 527 return -ERESTARTSYS; 528 529 switch (drvrcmd->opcode) { 530 case MPI3MR_DRVBSG_OPCODE_ADPRESET: 531 rval = mpi3mr_bsg_adp_reset(mrioc, job); 532 break; 533 case MPI3MR_DRVBSG_OPCODE_ALLTGTDEVINFO: 534 rval = mpi3mr_get_all_tgt_info(mrioc, job); 535 break; 536 case MPI3MR_DRVBSG_OPCODE_GETCHGCNT: 537 rval = mpi3mr_get_change_count(mrioc, job); 538 break; 539 case MPI3MR_DRVBSG_OPCODE_LOGDATAENABLE: 540 rval = mpi3mr_enable_logdata(mrioc, job); 541 break; 542 case MPI3MR_DRVBSG_OPCODE_GETLOGDATA: 543 rval = mpi3mr_get_logdata(mrioc, job); 544 break; 545 case MPI3MR_DRVBSG_OPCODE_PELENABLE: 546 rval = mpi3mr_bsg_pel_enable(mrioc, job); 547 break; 548 case MPI3MR_DRVBSG_OPCODE_UNKNOWN: 549 default: 550 pr_err("%s: unsupported driver command opcode %d\n", 551 MPI3MR_DRIVER_NAME, drvrcmd->opcode); 552 break; 553 } 554 mutex_unlock(&mrioc->bsg_cmds.mutex); 555 return rval; 556 } 557 558 /** 559 * mpi3mr_bsg_build_sgl - SGL construction for MPI commands 560 * @mpi_req: MPI request 561 * @sgl_offset: offset to start sgl in the MPI request 562 * @drv_bufs: DMA address of the buffers to be placed in sgl 563 * @bufcnt: Number of DMA buffers 564 * @is_rmc: Does the buffer list has management command buffer 565 * @is_rmr: Does the buffer list has management response buffer 566 * @num_datasges: Number of data buffers in the list 567 * 568 * This function places the DMA address of the given buffers in 569 * proper format as SGEs in the given MPI request. 570 * 571 * Return: Nothing 572 */ 573 static void mpi3mr_bsg_build_sgl(u8 *mpi_req, uint32_t sgl_offset, 574 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt, u8 is_rmc, 575 u8 is_rmr, u8 num_datasges) 576 { 577 u8 *sgl = (mpi_req + sgl_offset), count = 0; 578 struct mpi3_mgmt_passthrough_request *rmgmt_req = 579 (struct mpi3_mgmt_passthrough_request *)mpi_req; 580 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 581 u8 sgl_flags, sgl_flags_last; 582 583 sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | 584 MPI3_SGE_FLAGS_DLAS_SYSTEM | MPI3_SGE_FLAGS_END_OF_BUFFER; 585 sgl_flags_last = sgl_flags | MPI3_SGE_FLAGS_END_OF_LIST; 586 587 if (is_rmc) { 588 mpi3mr_add_sg_single(&rmgmt_req->command_sgl, 589 sgl_flags_last, drv_buf_iter->kern_buf_len, 590 drv_buf_iter->kern_buf_dma); 591 sgl = (u8 *)drv_buf_iter->kern_buf + drv_buf_iter->bsg_buf_len; 592 drv_buf_iter++; 593 count++; 594 if (is_rmr) { 595 mpi3mr_add_sg_single(&rmgmt_req->response_sgl, 596 sgl_flags_last, drv_buf_iter->kern_buf_len, 597 drv_buf_iter->kern_buf_dma); 598 drv_buf_iter++; 599 count++; 600 } else 601 mpi3mr_build_zero_len_sge( 602 &rmgmt_req->response_sgl); 603 } 604 if (!num_datasges) { 605 mpi3mr_build_zero_len_sge(sgl); 606 return; 607 } 608 for (; count < bufcnt; count++, drv_buf_iter++) { 609 if (drv_buf_iter->data_dir == DMA_NONE) 610 continue; 611 if (num_datasges == 1 || !is_rmc) 612 mpi3mr_add_sg_single(sgl, sgl_flags_last, 613 drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma); 614 else 615 mpi3mr_add_sg_single(sgl, sgl_flags, 616 drv_buf_iter->kern_buf_len, drv_buf_iter->kern_buf_dma); 617 sgl += sizeof(struct mpi3_sge_common); 618 num_datasges--; 619 } 620 } 621 622 /** 623 * mpi3mr_get_nvme_data_fmt - returns the NVMe data format 624 * @nvme_encap_request: NVMe encapsulated MPI request 625 * 626 * This function returns the type of the data format specified 627 * in user provided NVMe command in NVMe encapsulated request. 628 * 629 * Return: Data format of the NVMe command (PRP/SGL etc) 630 */ 631 static unsigned int mpi3mr_get_nvme_data_fmt( 632 struct mpi3_nvme_encapsulated_request *nvme_encap_request) 633 { 634 u8 format = 0; 635 636 format = ((nvme_encap_request->command[0] & 0xc000) >> 14); 637 return format; 638 639 } 640 641 /** 642 * mpi3mr_build_nvme_sgl - SGL constructor for NVME 643 * encapsulated request 644 * @mrioc: Adapter instance reference 645 * @nvme_encap_request: NVMe encapsulated MPI request 646 * @drv_bufs: DMA address of the buffers to be placed in sgl 647 * @bufcnt: Number of DMA buffers 648 * 649 * This function places the DMA address of the given buffers in 650 * proper format as SGEs in the given NVMe encapsulated request. 651 * 652 * Return: 0 on success, -1 on failure 653 */ 654 static int mpi3mr_build_nvme_sgl(struct mpi3mr_ioc *mrioc, 655 struct mpi3_nvme_encapsulated_request *nvme_encap_request, 656 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) 657 { 658 struct mpi3mr_nvme_pt_sge *nvme_sgl; 659 u64 sgl_ptr; 660 u8 count; 661 size_t length = 0; 662 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 663 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << 664 mrioc->facts.sge_mod_shift) << 32); 665 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << 666 mrioc->facts.sge_mod_shift) << 32; 667 668 /* 669 * Not all commands require a data transfer. If no data, just return 670 * without constructing any sgl. 671 */ 672 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 673 if (drv_buf_iter->data_dir == DMA_NONE) 674 continue; 675 sgl_ptr = (u64)drv_buf_iter->kern_buf_dma; 676 length = drv_buf_iter->kern_buf_len; 677 break; 678 } 679 if (!length) 680 return 0; 681 682 if (sgl_ptr & sgemod_mask) { 683 dprint_bsg_err(mrioc, 684 "%s: SGL address collides with SGE modifier\n", 685 __func__); 686 return -1; 687 } 688 689 sgl_ptr &= ~sgemod_mask; 690 sgl_ptr |= sgemod_val; 691 nvme_sgl = (struct mpi3mr_nvme_pt_sge *) 692 ((u8 *)(nvme_encap_request->command) + MPI3MR_NVME_CMD_SGL_OFFSET); 693 memset(nvme_sgl, 0, sizeof(struct mpi3mr_nvme_pt_sge)); 694 nvme_sgl->base_addr = sgl_ptr; 695 nvme_sgl->length = length; 696 return 0; 697 } 698 699 /** 700 * mpi3mr_build_nvme_prp - PRP constructor for NVME 701 * encapsulated request 702 * @mrioc: Adapter instance reference 703 * @nvme_encap_request: NVMe encapsulated MPI request 704 * @drv_bufs: DMA address of the buffers to be placed in SGL 705 * @bufcnt: Number of DMA buffers 706 * 707 * This function places the DMA address of the given buffers in 708 * proper format as PRP entries in the given NVMe encapsulated 709 * request. 710 * 711 * Return: 0 on success, -1 on failure 712 */ 713 static int mpi3mr_build_nvme_prp(struct mpi3mr_ioc *mrioc, 714 struct mpi3_nvme_encapsulated_request *nvme_encap_request, 715 struct mpi3mr_buf_map *drv_bufs, u8 bufcnt) 716 { 717 int prp_size = MPI3MR_NVME_PRP_SIZE; 718 __le64 *prp_entry, *prp1_entry, *prp2_entry; 719 __le64 *prp_page; 720 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr; 721 u32 offset, entry_len, dev_pgsz; 722 u32 page_mask_result, page_mask; 723 size_t length = 0; 724 u8 count; 725 struct mpi3mr_buf_map *drv_buf_iter = drv_bufs; 726 u64 sgemod_mask = ((u64)((mrioc->facts.sge_mod_mask) << 727 mrioc->facts.sge_mod_shift) << 32); 728 u64 sgemod_val = ((u64)(mrioc->facts.sge_mod_value) << 729 mrioc->facts.sge_mod_shift) << 32; 730 u16 dev_handle = nvme_encap_request->dev_handle; 731 struct mpi3mr_tgt_dev *tgtdev; 732 733 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 734 if (!tgtdev) { 735 dprint_bsg_err(mrioc, "%s: invalid device handle 0x%04x\n", 736 __func__, dev_handle); 737 return -1; 738 } 739 740 if (tgtdev->dev_spec.pcie_inf.pgsz == 0) { 741 dprint_bsg_err(mrioc, 742 "%s: NVMe device page size is zero for handle 0x%04x\n", 743 __func__, dev_handle); 744 mpi3mr_tgtdev_put(tgtdev); 745 return -1; 746 } 747 748 dev_pgsz = 1 << (tgtdev->dev_spec.pcie_inf.pgsz); 749 mpi3mr_tgtdev_put(tgtdev); 750 751 /* 752 * Not all commands require a data transfer. If no data, just return 753 * without constructing any PRP. 754 */ 755 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 756 if (drv_buf_iter->data_dir == DMA_NONE) 757 continue; 758 dma_addr = drv_buf_iter->kern_buf_dma; 759 length = drv_buf_iter->kern_buf_len; 760 break; 761 } 762 763 if (!length) 764 return 0; 765 766 mrioc->prp_sz = 0; 767 mrioc->prp_list_virt = dma_alloc_coherent(&mrioc->pdev->dev, 768 dev_pgsz, &mrioc->prp_list_dma, GFP_KERNEL); 769 770 if (!mrioc->prp_list_virt) 771 return -1; 772 mrioc->prp_sz = dev_pgsz; 773 774 /* 775 * Set pointers to PRP1 and PRP2, which are in the NVMe command. 776 * PRP1 is located at a 24 byte offset from the start of the NVMe 777 * command. Then set the current PRP entry pointer to PRP1. 778 */ 779 prp1_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + 780 MPI3MR_NVME_CMD_PRP1_OFFSET); 781 prp2_entry = (__le64 *)((u8 *)(nvme_encap_request->command) + 782 MPI3MR_NVME_CMD_PRP2_OFFSET); 783 prp_entry = prp1_entry; 784 /* 785 * For the PRP entries, use the specially allocated buffer of 786 * contiguous memory. 787 */ 788 prp_page = (__le64 *)mrioc->prp_list_virt; 789 prp_page_dma = mrioc->prp_list_dma; 790 791 /* 792 * Check if we are within 1 entry of a page boundary we don't 793 * want our first entry to be a PRP List entry. 794 */ 795 page_mask = dev_pgsz - 1; 796 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask; 797 if (!page_mask_result) { 798 dprint_bsg_err(mrioc, "%s: PRP page is not page aligned\n", 799 __func__); 800 goto err_out; 801 } 802 803 /* 804 * Set PRP physical pointer, which initially points to the current PRP 805 * DMA memory page. 806 */ 807 prp_entry_dma = prp_page_dma; 808 809 810 /* Loop while the length is not zero. */ 811 while (length) { 812 page_mask_result = (prp_entry_dma + prp_size) & page_mask; 813 if (!page_mask_result && (length > dev_pgsz)) { 814 dprint_bsg_err(mrioc, 815 "%s: single PRP page is not sufficient\n", 816 __func__); 817 goto err_out; 818 } 819 820 /* Need to handle if entry will be part of a page. */ 821 offset = dma_addr & page_mask; 822 entry_len = dev_pgsz - offset; 823 824 if (prp_entry == prp1_entry) { 825 /* 826 * Must fill in the first PRP pointer (PRP1) before 827 * moving on. 828 */ 829 *prp1_entry = cpu_to_le64(dma_addr); 830 if (*prp1_entry & sgemod_mask) { 831 dprint_bsg_err(mrioc, 832 "%s: PRP1 address collides with SGE modifier\n", 833 __func__); 834 goto err_out; 835 } 836 *prp1_entry &= ~sgemod_mask; 837 *prp1_entry |= sgemod_val; 838 839 /* 840 * Now point to the second PRP entry within the 841 * command (PRP2). 842 */ 843 prp_entry = prp2_entry; 844 } else if (prp_entry == prp2_entry) { 845 /* 846 * Should the PRP2 entry be a PRP List pointer or just 847 * a regular PRP pointer? If there is more than one 848 * more page of data, must use a PRP List pointer. 849 */ 850 if (length > dev_pgsz) { 851 /* 852 * PRP2 will contain a PRP List pointer because 853 * more PRP's are needed with this command. The 854 * list will start at the beginning of the 855 * contiguous buffer. 856 */ 857 *prp2_entry = cpu_to_le64(prp_entry_dma); 858 if (*prp2_entry & sgemod_mask) { 859 dprint_bsg_err(mrioc, 860 "%s: PRP list address collides with SGE modifier\n", 861 __func__); 862 goto err_out; 863 } 864 *prp2_entry &= ~sgemod_mask; 865 *prp2_entry |= sgemod_val; 866 867 /* 868 * The next PRP Entry will be the start of the 869 * first PRP List. 870 */ 871 prp_entry = prp_page; 872 continue; 873 } else { 874 /* 875 * After this, the PRP Entries are complete. 876 * This command uses 2 PRP's and no PRP list. 877 */ 878 *prp2_entry = cpu_to_le64(dma_addr); 879 if (*prp2_entry & sgemod_mask) { 880 dprint_bsg_err(mrioc, 881 "%s: PRP2 collides with SGE modifier\n", 882 __func__); 883 goto err_out; 884 } 885 *prp2_entry &= ~sgemod_mask; 886 *prp2_entry |= sgemod_val; 887 } 888 } else { 889 /* 890 * Put entry in list and bump the addresses. 891 * 892 * After PRP1 and PRP2 are filled in, this will fill in 893 * all remaining PRP entries in a PRP List, one per 894 * each time through the loop. 895 */ 896 *prp_entry = cpu_to_le64(dma_addr); 897 if (*prp1_entry & sgemod_mask) { 898 dprint_bsg_err(mrioc, 899 "%s: PRP address collides with SGE modifier\n", 900 __func__); 901 goto err_out; 902 } 903 *prp_entry &= ~sgemod_mask; 904 *prp_entry |= sgemod_val; 905 prp_entry++; 906 prp_entry_dma++; 907 } 908 909 /* 910 * Bump the phys address of the command's data buffer by the 911 * entry_len. 912 */ 913 dma_addr += entry_len; 914 915 /* decrement length accounting for last partial page. */ 916 if (entry_len > length) 917 length = 0; 918 else 919 length -= entry_len; 920 } 921 return 0; 922 err_out: 923 if (mrioc->prp_list_virt) { 924 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, 925 mrioc->prp_list_virt, mrioc->prp_list_dma); 926 mrioc->prp_list_virt = NULL; 927 } 928 return -1; 929 } 930 /** 931 * mpi3mr_bsg_process_mpt_cmds - MPI Pass through BSG handler 932 * @job: BSG job reference 933 * 934 * This function is the top level handler for MPI Pass through 935 * command, this does basic validation of the input data buffers, 936 * identifies the given buffer types and MPI command, allocates 937 * DMAable memory for user given buffers, construstcs SGL 938 * properly and passes the command to the firmware. 939 * 940 * Once the MPI command is completed the driver copies the data 941 * if any and reply, sense information to user provided buffers. 942 * If the command is timed out then issues controller reset 943 * prior to returning. 944 * 945 * Return: 0 on success and proper error codes on failure 946 */ 947 948 static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job, unsigned int *reply_payload_rcv_len) 949 { 950 long rval = -EINVAL; 951 952 struct mpi3mr_ioc *mrioc = NULL; 953 u8 *mpi_req = NULL, *sense_buff_k = NULL; 954 u8 mpi_msg_size = 0; 955 struct mpi3mr_bsg_packet *bsg_req = NULL; 956 struct mpi3mr_bsg_mptcmd *karg; 957 struct mpi3mr_buf_entry *buf_entries = NULL; 958 struct mpi3mr_buf_map *drv_bufs = NULL, *drv_buf_iter = NULL; 959 u8 count, bufcnt = 0, is_rmcb = 0, is_rmrb = 0, din_cnt = 0, dout_cnt = 0; 960 u8 invalid_be = 0, erb_offset = 0xFF, mpirep_offset = 0xFF, sg_entries = 0; 961 u8 block_io = 0, resp_code = 0, nvme_fmt = 0; 962 struct mpi3_request_header *mpi_header = NULL; 963 struct mpi3_status_reply_descriptor *status_desc; 964 struct mpi3_scsi_task_mgmt_request *tm_req; 965 u32 erbsz = MPI3MR_SENSE_BUF_SZ, tmplen; 966 u16 dev_handle; 967 struct mpi3mr_tgt_dev *tgtdev; 968 struct mpi3mr_stgt_priv_data *stgt_priv = NULL; 969 struct mpi3mr_bsg_in_reply_buf *bsg_reply_buf = NULL; 970 u32 din_size = 0, dout_size = 0; 971 u8 *din_buf = NULL, *dout_buf = NULL; 972 u8 *sgl_iter = NULL, *sgl_din_iter = NULL, *sgl_dout_iter = NULL; 973 974 bsg_req = job->request; 975 karg = (struct mpi3mr_bsg_mptcmd *)&bsg_req->cmd.mptcmd; 976 977 mrioc = mpi3mr_bsg_verify_adapter(karg->mrioc_id); 978 if (!mrioc) 979 return -ENODEV; 980 981 if (karg->timeout < MPI3MR_APP_DEFAULT_TIMEOUT) 982 karg->timeout = MPI3MR_APP_DEFAULT_TIMEOUT; 983 984 mpi_req = kzalloc(MPI3MR_ADMIN_REQ_FRAME_SZ, GFP_KERNEL); 985 if (!mpi_req) 986 return -ENOMEM; 987 mpi_header = (struct mpi3_request_header *)mpi_req; 988 989 bufcnt = karg->buf_entry_list.num_of_entries; 990 drv_bufs = kzalloc((sizeof(*drv_bufs) * bufcnt), GFP_KERNEL); 991 if (!drv_bufs) { 992 rval = -ENOMEM; 993 goto out; 994 } 995 996 dout_buf = kzalloc(job->request_payload.payload_len, 997 GFP_KERNEL); 998 if (!dout_buf) { 999 rval = -ENOMEM; 1000 goto out; 1001 } 1002 1003 din_buf = kzalloc(job->reply_payload.payload_len, 1004 GFP_KERNEL); 1005 if (!din_buf) { 1006 rval = -ENOMEM; 1007 goto out; 1008 } 1009 1010 sg_copy_to_buffer(job->request_payload.sg_list, 1011 job->request_payload.sg_cnt, 1012 dout_buf, job->request_payload.payload_len); 1013 1014 buf_entries = karg->buf_entry_list.buf_entry; 1015 sgl_din_iter = din_buf; 1016 sgl_dout_iter = dout_buf; 1017 drv_buf_iter = drv_bufs; 1018 1019 for (count = 0; count < bufcnt; count++, buf_entries++, drv_buf_iter++) { 1020 1021 if (sgl_dout_iter > (dout_buf + job->request_payload.payload_len)) { 1022 dprint_bsg_err(mrioc, "%s: data_out buffer length mismatch\n", 1023 __func__); 1024 rval = -EINVAL; 1025 goto out; 1026 } 1027 if (sgl_din_iter > (din_buf + job->reply_payload.payload_len)) { 1028 dprint_bsg_err(mrioc, "%s: data_in buffer length mismatch\n", 1029 __func__); 1030 rval = -EINVAL; 1031 goto out; 1032 } 1033 1034 switch (buf_entries->buf_type) { 1035 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_CMD: 1036 sgl_iter = sgl_dout_iter; 1037 sgl_dout_iter += buf_entries->buf_len; 1038 drv_buf_iter->data_dir = DMA_TO_DEVICE; 1039 is_rmcb = 1; 1040 if (count != 0) 1041 invalid_be = 1; 1042 break; 1043 case MPI3MR_BSG_BUFTYPE_RAIDMGMT_RESP: 1044 sgl_iter = sgl_din_iter; 1045 sgl_din_iter += buf_entries->buf_len; 1046 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 1047 is_rmrb = 1; 1048 if (count != 1 || !is_rmcb) 1049 invalid_be = 1; 1050 break; 1051 case MPI3MR_BSG_BUFTYPE_DATA_IN: 1052 sgl_iter = sgl_din_iter; 1053 sgl_din_iter += buf_entries->buf_len; 1054 drv_buf_iter->data_dir = DMA_FROM_DEVICE; 1055 din_cnt++; 1056 din_size += drv_buf_iter->bsg_buf_len; 1057 if ((din_cnt > 1) && !is_rmcb) 1058 invalid_be = 1; 1059 break; 1060 case MPI3MR_BSG_BUFTYPE_DATA_OUT: 1061 sgl_iter = sgl_dout_iter; 1062 sgl_dout_iter += buf_entries->buf_len; 1063 drv_buf_iter->data_dir = DMA_TO_DEVICE; 1064 dout_cnt++; 1065 dout_size += drv_buf_iter->bsg_buf_len; 1066 if ((dout_cnt > 1) && !is_rmcb) 1067 invalid_be = 1; 1068 break; 1069 case MPI3MR_BSG_BUFTYPE_MPI_REPLY: 1070 sgl_iter = sgl_din_iter; 1071 sgl_din_iter += buf_entries->buf_len; 1072 drv_buf_iter->data_dir = DMA_NONE; 1073 mpirep_offset = count; 1074 break; 1075 case MPI3MR_BSG_BUFTYPE_ERR_RESPONSE: 1076 sgl_iter = sgl_din_iter; 1077 sgl_din_iter += buf_entries->buf_len; 1078 drv_buf_iter->data_dir = DMA_NONE; 1079 erb_offset = count; 1080 break; 1081 case MPI3MR_BSG_BUFTYPE_MPI_REQUEST: 1082 sgl_iter = sgl_dout_iter; 1083 sgl_dout_iter += buf_entries->buf_len; 1084 drv_buf_iter->data_dir = DMA_NONE; 1085 mpi_msg_size = buf_entries->buf_len; 1086 if ((!mpi_msg_size || (mpi_msg_size % 4)) || 1087 (mpi_msg_size > MPI3MR_ADMIN_REQ_FRAME_SZ)) { 1088 dprint_bsg_err(mrioc, "%s: invalid MPI message size\n", 1089 __func__); 1090 rval = -EINVAL; 1091 goto out; 1092 } 1093 memcpy(mpi_req, sgl_iter, buf_entries->buf_len); 1094 break; 1095 default: 1096 invalid_be = 1; 1097 break; 1098 } 1099 if (invalid_be) { 1100 dprint_bsg_err(mrioc, "%s: invalid buffer entries passed\n", 1101 __func__); 1102 rval = -EINVAL; 1103 goto out; 1104 } 1105 1106 drv_buf_iter->bsg_buf = sgl_iter; 1107 drv_buf_iter->bsg_buf_len = buf_entries->buf_len; 1108 1109 } 1110 if (!is_rmcb && (dout_cnt || din_cnt)) { 1111 sg_entries = dout_cnt + din_cnt; 1112 if (((mpi_msg_size) + (sg_entries * 1113 sizeof(struct mpi3_sge_common))) > MPI3MR_ADMIN_REQ_FRAME_SZ) { 1114 dprint_bsg_err(mrioc, 1115 "%s:%d: invalid message size passed\n", 1116 __func__, __LINE__); 1117 rval = -EINVAL; 1118 goto out; 1119 } 1120 } 1121 if (din_size > MPI3MR_MAX_APP_XFER_SIZE) { 1122 dprint_bsg_err(mrioc, 1123 "%s:%d: invalid data transfer size passed for function 0x%x din_size=%d\n", 1124 __func__, __LINE__, mpi_header->function, din_size); 1125 rval = -EINVAL; 1126 goto out; 1127 } 1128 if (dout_size > MPI3MR_MAX_APP_XFER_SIZE) { 1129 dprint_bsg_err(mrioc, 1130 "%s:%d: invalid data transfer size passed for function 0x%x dout_size = %d\n", 1131 __func__, __LINE__, mpi_header->function, dout_size); 1132 rval = -EINVAL; 1133 goto out; 1134 } 1135 1136 drv_buf_iter = drv_bufs; 1137 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1138 if (drv_buf_iter->data_dir == DMA_NONE) 1139 continue; 1140 1141 drv_buf_iter->kern_buf_len = drv_buf_iter->bsg_buf_len; 1142 if (is_rmcb && !count) 1143 drv_buf_iter->kern_buf_len += ((dout_cnt + din_cnt) * 1144 sizeof(struct mpi3_sge_common)); 1145 1146 if (!drv_buf_iter->kern_buf_len) 1147 continue; 1148 1149 drv_buf_iter->kern_buf = dma_alloc_coherent(&mrioc->pdev->dev, 1150 drv_buf_iter->kern_buf_len, &drv_buf_iter->kern_buf_dma, 1151 GFP_KERNEL); 1152 if (!drv_buf_iter->kern_buf) { 1153 rval = -ENOMEM; 1154 goto out; 1155 } 1156 if (drv_buf_iter->data_dir == DMA_TO_DEVICE) { 1157 tmplen = min(drv_buf_iter->kern_buf_len, 1158 drv_buf_iter->bsg_buf_len); 1159 memcpy(drv_buf_iter->kern_buf, drv_buf_iter->bsg_buf, tmplen); 1160 } 1161 } 1162 1163 if (erb_offset != 0xFF) { 1164 sense_buff_k = kzalloc(erbsz, GFP_KERNEL); 1165 if (!sense_buff_k) { 1166 rval = -ENOMEM; 1167 goto out; 1168 } 1169 } 1170 1171 if (mutex_lock_interruptible(&mrioc->bsg_cmds.mutex)) { 1172 rval = -ERESTARTSYS; 1173 goto out; 1174 } 1175 if (mrioc->bsg_cmds.state & MPI3MR_CMD_PENDING) { 1176 rval = -EAGAIN; 1177 dprint_bsg_err(mrioc, "%s: command is in use\n", __func__); 1178 mutex_unlock(&mrioc->bsg_cmds.mutex); 1179 goto out; 1180 } 1181 if (mrioc->unrecoverable) { 1182 dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", 1183 __func__); 1184 rval = -EFAULT; 1185 mutex_unlock(&mrioc->bsg_cmds.mutex); 1186 goto out; 1187 } 1188 if (mrioc->reset_in_progress) { 1189 dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); 1190 rval = -EAGAIN; 1191 mutex_unlock(&mrioc->bsg_cmds.mutex); 1192 goto out; 1193 } 1194 if (mrioc->stop_bsgs) { 1195 dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); 1196 rval = -EAGAIN; 1197 mutex_unlock(&mrioc->bsg_cmds.mutex); 1198 goto out; 1199 } 1200 1201 if (mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) { 1202 nvme_fmt = mpi3mr_get_nvme_data_fmt( 1203 (struct mpi3_nvme_encapsulated_request *)mpi_req); 1204 if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_PRP) { 1205 if (mpi3mr_build_nvme_prp(mrioc, 1206 (struct mpi3_nvme_encapsulated_request *)mpi_req, 1207 drv_bufs, bufcnt)) { 1208 rval = -ENOMEM; 1209 mutex_unlock(&mrioc->bsg_cmds.mutex); 1210 goto out; 1211 } 1212 } else if (nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL1 || 1213 nvme_fmt == MPI3MR_NVME_DATA_FORMAT_SGL2) { 1214 if (mpi3mr_build_nvme_sgl(mrioc, 1215 (struct mpi3_nvme_encapsulated_request *)mpi_req, 1216 drv_bufs, bufcnt)) { 1217 rval = -EINVAL; 1218 mutex_unlock(&mrioc->bsg_cmds.mutex); 1219 goto out; 1220 } 1221 } else { 1222 dprint_bsg_err(mrioc, 1223 "%s:invalid NVMe command format\n", __func__); 1224 rval = -EINVAL; 1225 mutex_unlock(&mrioc->bsg_cmds.mutex); 1226 goto out; 1227 } 1228 } else { 1229 mpi3mr_bsg_build_sgl(mpi_req, (mpi_msg_size), 1230 drv_bufs, bufcnt, is_rmcb, is_rmrb, 1231 (dout_cnt + din_cnt)); 1232 } 1233 1234 if (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_TASK_MGMT) { 1235 tm_req = (struct mpi3_scsi_task_mgmt_request *)mpi_req; 1236 if (tm_req->task_type != 1237 MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { 1238 dev_handle = tm_req->dev_handle; 1239 block_io = 1; 1240 } 1241 } 1242 if (block_io) { 1243 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle); 1244 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) { 1245 stgt_priv = (struct mpi3mr_stgt_priv_data *) 1246 tgtdev->starget->hostdata; 1247 atomic_inc(&stgt_priv->block_io); 1248 mpi3mr_tgtdev_put(tgtdev); 1249 } 1250 } 1251 1252 mrioc->bsg_cmds.state = MPI3MR_CMD_PENDING; 1253 mrioc->bsg_cmds.is_waiting = 1; 1254 mrioc->bsg_cmds.callback = NULL; 1255 mrioc->bsg_cmds.is_sense = 0; 1256 mrioc->bsg_cmds.sensebuf = sense_buff_k; 1257 memset(mrioc->bsg_cmds.reply, 0, mrioc->reply_sz); 1258 mpi_header->host_tag = cpu_to_le16(MPI3MR_HOSTTAG_BSG_CMDS); 1259 if (mrioc->logging_level & MPI3_DEBUG_BSG_INFO) { 1260 dprint_bsg_info(mrioc, 1261 "%s: posting bsg request to the controller\n", __func__); 1262 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, 1263 "bsg_mpi3_req"); 1264 if (mpi_header->function == MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { 1265 drv_buf_iter = &drv_bufs[0]; 1266 dprint_dump(drv_buf_iter->kern_buf, 1267 drv_buf_iter->kern_buf_len, "mpi3_mgmt_req"); 1268 } 1269 } 1270 1271 init_completion(&mrioc->bsg_cmds.done); 1272 rval = mpi3mr_admin_request_post(mrioc, mpi_req, 1273 MPI3MR_ADMIN_REQ_FRAME_SZ, 0); 1274 1275 1276 if (rval) { 1277 mrioc->bsg_cmds.is_waiting = 0; 1278 dprint_bsg_err(mrioc, 1279 "%s: posting bsg request is failed\n", __func__); 1280 rval = -EAGAIN; 1281 goto out_unlock; 1282 } 1283 wait_for_completion_timeout(&mrioc->bsg_cmds.done, 1284 (karg->timeout * HZ)); 1285 if (block_io && stgt_priv) 1286 atomic_dec(&stgt_priv->block_io); 1287 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE)) { 1288 mrioc->bsg_cmds.is_waiting = 0; 1289 rval = -EAGAIN; 1290 if (mrioc->bsg_cmds.state & MPI3MR_CMD_RESET) 1291 goto out_unlock; 1292 dprint_bsg_err(mrioc, 1293 "%s: bsg request timedout after %d seconds\n", __func__, 1294 karg->timeout); 1295 if (mrioc->logging_level & MPI3_DEBUG_BSG_ERROR) { 1296 dprint_dump(mpi_req, MPI3MR_ADMIN_REQ_FRAME_SZ, 1297 "bsg_mpi3_req"); 1298 if (mpi_header->function == 1299 MPI3_BSG_FUNCTION_MGMT_PASSTHROUGH) { 1300 drv_buf_iter = &drv_bufs[0]; 1301 dprint_dump(drv_buf_iter->kern_buf, 1302 drv_buf_iter->kern_buf_len, "mpi3_mgmt_req"); 1303 } 1304 } 1305 1306 if ((mpi_header->function == MPI3_BSG_FUNCTION_NVME_ENCAPSULATED) || 1307 (mpi_header->function == MPI3_BSG_FUNCTION_SCSI_IO)) 1308 mpi3mr_issue_tm(mrioc, 1309 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 1310 mpi_header->function_dependent, 0, 1311 MPI3MR_HOSTTAG_BLK_TMS, MPI3MR_RESETTM_TIMEOUT, 1312 &mrioc->host_tm_cmds, &resp_code, NULL); 1313 if (!(mrioc->bsg_cmds.state & MPI3MR_CMD_COMPLETE) && 1314 !(mrioc->bsg_cmds.state & MPI3MR_CMD_RESET)) 1315 mpi3mr_soft_reset_handler(mrioc, 1316 MPI3MR_RESET_FROM_APP_TIMEOUT, 1); 1317 goto out_unlock; 1318 } 1319 dprint_bsg_info(mrioc, "%s: bsg request is completed\n", __func__); 1320 1321 if (mrioc->prp_list_virt) { 1322 dma_free_coherent(&mrioc->pdev->dev, mrioc->prp_sz, 1323 mrioc->prp_list_virt, mrioc->prp_list_dma); 1324 mrioc->prp_list_virt = NULL; 1325 } 1326 1327 if ((mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) 1328 != MPI3_IOCSTATUS_SUCCESS) { 1329 dprint_bsg_info(mrioc, 1330 "%s: command failed, ioc_status(0x%04x) log_info(0x%08x)\n", 1331 __func__, 1332 (mrioc->bsg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), 1333 mrioc->bsg_cmds.ioc_loginfo); 1334 } 1335 1336 if ((mpirep_offset != 0xFF) && 1337 drv_bufs[mpirep_offset].bsg_buf_len) { 1338 drv_buf_iter = &drv_bufs[mpirep_offset]; 1339 drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 + 1340 mrioc->reply_sz); 1341 bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL); 1342 1343 if (!bsg_reply_buf) { 1344 rval = -ENOMEM; 1345 goto out_unlock; 1346 } 1347 if (mrioc->bsg_cmds.state & MPI3MR_CMD_REPLY_VALID) { 1348 bsg_reply_buf->mpi_reply_type = 1349 MPI3MR_BSG_MPI_REPLY_BUFTYPE_ADDRESS; 1350 memcpy(bsg_reply_buf->reply_buf, 1351 mrioc->bsg_cmds.reply, mrioc->reply_sz); 1352 } else { 1353 bsg_reply_buf->mpi_reply_type = 1354 MPI3MR_BSG_MPI_REPLY_BUFTYPE_STATUS; 1355 status_desc = (struct mpi3_status_reply_descriptor *) 1356 bsg_reply_buf->reply_buf; 1357 status_desc->ioc_status = mrioc->bsg_cmds.ioc_status; 1358 status_desc->ioc_log_info = mrioc->bsg_cmds.ioc_loginfo; 1359 } 1360 tmplen = min(drv_buf_iter->kern_buf_len, 1361 drv_buf_iter->bsg_buf_len); 1362 memcpy(drv_buf_iter->bsg_buf, bsg_reply_buf, tmplen); 1363 } 1364 1365 if (erb_offset != 0xFF && mrioc->bsg_cmds.sensebuf && 1366 mrioc->bsg_cmds.is_sense) { 1367 drv_buf_iter = &drv_bufs[erb_offset]; 1368 tmplen = min(erbsz, drv_buf_iter->bsg_buf_len); 1369 memcpy(drv_buf_iter->bsg_buf, sense_buff_k, tmplen); 1370 } 1371 1372 drv_buf_iter = drv_bufs; 1373 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1374 if (drv_buf_iter->data_dir == DMA_NONE) 1375 continue; 1376 if (drv_buf_iter->data_dir == DMA_FROM_DEVICE) { 1377 tmplen = min(drv_buf_iter->kern_buf_len, 1378 drv_buf_iter->bsg_buf_len); 1379 memcpy(drv_buf_iter->bsg_buf, 1380 drv_buf_iter->kern_buf, tmplen); 1381 } 1382 } 1383 1384 out_unlock: 1385 if (din_buf) { 1386 *reply_payload_rcv_len = 1387 sg_copy_from_buffer(job->reply_payload.sg_list, 1388 job->reply_payload.sg_cnt, 1389 din_buf, job->reply_payload.payload_len); 1390 } 1391 mrioc->bsg_cmds.is_sense = 0; 1392 mrioc->bsg_cmds.sensebuf = NULL; 1393 mrioc->bsg_cmds.state = MPI3MR_CMD_NOTUSED; 1394 mutex_unlock(&mrioc->bsg_cmds.mutex); 1395 out: 1396 kfree(sense_buff_k); 1397 kfree(dout_buf); 1398 kfree(din_buf); 1399 kfree(mpi_req); 1400 if (drv_bufs) { 1401 drv_buf_iter = drv_bufs; 1402 for (count = 0; count < bufcnt; count++, drv_buf_iter++) { 1403 if (drv_buf_iter->kern_buf && drv_buf_iter->kern_buf_dma) 1404 dma_free_coherent(&mrioc->pdev->dev, 1405 drv_buf_iter->kern_buf_len, 1406 drv_buf_iter->kern_buf, 1407 drv_buf_iter->kern_buf_dma); 1408 } 1409 kfree(drv_bufs); 1410 } 1411 kfree(bsg_reply_buf); 1412 return rval; 1413 } 1414 1415 /** 1416 * mpi3mr_app_save_logdata - Save Log Data events 1417 * @mrioc: Adapter instance reference 1418 * @event_data: event data associated with log data event 1419 * @event_data_size: event data size to copy 1420 * 1421 * If log data event caching is enabled by the applicatiobns, 1422 * then this function saves the log data in the circular queue 1423 * and Sends async signal SIGIO to indicate there is an async 1424 * event from the firmware to the event monitoring applications. 1425 * 1426 * Return:Nothing 1427 */ 1428 void mpi3mr_app_save_logdata(struct mpi3mr_ioc *mrioc, char *event_data, 1429 u16 event_data_size) 1430 { 1431 u32 index = mrioc->logdata_buf_idx, sz; 1432 struct mpi3mr_logdata_entry *entry; 1433 1434 if (!(mrioc->logdata_buf)) 1435 return; 1436 1437 entry = (struct mpi3mr_logdata_entry *) 1438 (mrioc->logdata_buf + (index * mrioc->logdata_entry_sz)); 1439 entry->valid_entry = 1; 1440 sz = min(mrioc->logdata_entry_sz, event_data_size); 1441 memcpy(entry->data, event_data, sz); 1442 mrioc->logdata_buf_idx = 1443 ((++index) % MPI3MR_BSG_LOGDATA_MAX_ENTRIES); 1444 atomic64_inc(&event_counter); 1445 } 1446 1447 /** 1448 * mpi3mr_bsg_request - bsg request entry point 1449 * @job: BSG job reference 1450 * 1451 * This is driver's entry point for bsg requests 1452 * 1453 * Return: 0 on success and proper error codes on failure 1454 */ 1455 static int mpi3mr_bsg_request(struct bsg_job *job) 1456 { 1457 long rval = -EINVAL; 1458 unsigned int reply_payload_rcv_len = 0; 1459 1460 struct mpi3mr_bsg_packet *bsg_req = job->request; 1461 1462 switch (bsg_req->cmd_type) { 1463 case MPI3MR_DRV_CMD: 1464 rval = mpi3mr_bsg_process_drv_cmds(job); 1465 break; 1466 case MPI3MR_MPT_CMD: 1467 rval = mpi3mr_bsg_process_mpt_cmds(job, &reply_payload_rcv_len); 1468 break; 1469 default: 1470 pr_err("%s: unsupported BSG command(0x%08x)\n", 1471 MPI3MR_DRIVER_NAME, bsg_req->cmd_type); 1472 break; 1473 } 1474 1475 bsg_job_done(job, rval, reply_payload_rcv_len); 1476 1477 return 0; 1478 } 1479 1480 /** 1481 * mpi3mr_bsg_exit - de-registration from bsg layer 1482 * 1483 * This will be called during driver unload and all 1484 * bsg resources allocated during load will be freed. 1485 * 1486 * Return:Nothing 1487 */ 1488 void mpi3mr_bsg_exit(struct mpi3mr_ioc *mrioc) 1489 { 1490 struct device *bsg_dev = &mrioc->bsg_dev; 1491 if (!mrioc->bsg_queue) 1492 return; 1493 1494 bsg_remove_queue(mrioc->bsg_queue); 1495 mrioc->bsg_queue = NULL; 1496 1497 device_del(bsg_dev); 1498 put_device(bsg_dev); 1499 } 1500 1501 /** 1502 * mpi3mr_bsg_node_release -release bsg device node 1503 * @dev: bsg device node 1504 * 1505 * decrements bsg dev parent reference count 1506 * 1507 * Return:Nothing 1508 */ 1509 static void mpi3mr_bsg_node_release(struct device *dev) 1510 { 1511 put_device(dev->parent); 1512 } 1513 1514 /** 1515 * mpi3mr_bsg_init - registration with bsg layer 1516 * 1517 * This will be called during driver load and it will 1518 * register driver with bsg layer 1519 * 1520 * Return:Nothing 1521 */ 1522 void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) 1523 { 1524 struct device *bsg_dev = &mrioc->bsg_dev; 1525 struct device *parent = &mrioc->shost->shost_gendev; 1526 1527 device_initialize(bsg_dev); 1528 1529 bsg_dev->parent = get_device(parent); 1530 bsg_dev->release = mpi3mr_bsg_node_release; 1531 1532 dev_set_name(bsg_dev, "mpi3mrctl%u", mrioc->id); 1533 1534 if (device_add(bsg_dev)) { 1535 ioc_err(mrioc, "%s: bsg device add failed\n", 1536 dev_name(bsg_dev)); 1537 put_device(bsg_dev); 1538 return; 1539 } 1540 1541 mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), 1542 mpi3mr_bsg_request, NULL, 0); 1543 if (IS_ERR(mrioc->bsg_queue)) { 1544 ioc_err(mrioc, "%s: bsg registration failed\n", 1545 dev_name(bsg_dev)); 1546 device_del(bsg_dev); 1547 put_device(bsg_dev); 1548 return; 1549 } 1550 1551 blk_queue_max_segments(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SEGMENTS); 1552 blk_queue_max_hw_sectors(mrioc->bsg_queue, MPI3MR_MAX_APP_XFER_SECTORS); 1553 1554 return; 1555 } 1556 1557 /** 1558 * version_fw_show - SysFS callback for firmware version read 1559 * @dev: class device 1560 * @attr: Device attributes 1561 * @buf: Buffer to copy 1562 * 1563 * Return: sysfs_emit() return after copying firmware version 1564 */ 1565 static ssize_t 1566 version_fw_show(struct device *dev, struct device_attribute *attr, 1567 char *buf) 1568 { 1569 struct Scsi_Host *shost = class_to_shost(dev); 1570 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1571 struct mpi3mr_compimg_ver *fwver = &mrioc->facts.fw_ver; 1572 1573 return sysfs_emit(buf, "%d.%d.%d.%d.%05d-%05d\n", 1574 fwver->gen_major, fwver->gen_minor, fwver->ph_major, 1575 fwver->ph_minor, fwver->cust_id, fwver->build_num); 1576 } 1577 static DEVICE_ATTR_RO(version_fw); 1578 1579 /** 1580 * fw_queue_depth_show - SysFS callback for firmware max cmds 1581 * @dev: class device 1582 * @attr: Device attributes 1583 * @buf: Buffer to copy 1584 * 1585 * Return: sysfs_emit() return after copying firmware max commands 1586 */ 1587 static ssize_t 1588 fw_queue_depth_show(struct device *dev, struct device_attribute *attr, 1589 char *buf) 1590 { 1591 struct Scsi_Host *shost = class_to_shost(dev); 1592 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1593 1594 return sysfs_emit(buf, "%d\n", mrioc->facts.max_reqs); 1595 } 1596 static DEVICE_ATTR_RO(fw_queue_depth); 1597 1598 /** 1599 * op_req_q_count_show - SysFS callback for request queue count 1600 * @dev: class device 1601 * @attr: Device attributes 1602 * @buf: Buffer to copy 1603 * 1604 * Return: sysfs_emit() return after copying request queue count 1605 */ 1606 static ssize_t 1607 op_req_q_count_show(struct device *dev, struct device_attribute *attr, 1608 char *buf) 1609 { 1610 struct Scsi_Host *shost = class_to_shost(dev); 1611 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1612 1613 return sysfs_emit(buf, "%d\n", mrioc->num_op_req_q); 1614 } 1615 static DEVICE_ATTR_RO(op_req_q_count); 1616 1617 /** 1618 * reply_queue_count_show - SysFS callback for reply queue count 1619 * @dev: class device 1620 * @attr: Device attributes 1621 * @buf: Buffer to copy 1622 * 1623 * Return: sysfs_emit() return after copying reply queue count 1624 */ 1625 static ssize_t 1626 reply_queue_count_show(struct device *dev, struct device_attribute *attr, 1627 char *buf) 1628 { 1629 struct Scsi_Host *shost = class_to_shost(dev); 1630 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1631 1632 return sysfs_emit(buf, "%d\n", mrioc->num_op_reply_q); 1633 } 1634 1635 static DEVICE_ATTR_RO(reply_queue_count); 1636 1637 /** 1638 * logging_level_show - Show controller debug level 1639 * @dev: class device 1640 * @attr: Device attributes 1641 * @buf: Buffer to copy 1642 * 1643 * A sysfs 'read/write' shost attribute, to show the current 1644 * debug log level used by the driver for the specific 1645 * controller. 1646 * 1647 * Return: sysfs_emit() return 1648 */ 1649 static ssize_t 1650 logging_level_show(struct device *dev, 1651 struct device_attribute *attr, char *buf) 1652 1653 { 1654 struct Scsi_Host *shost = class_to_shost(dev); 1655 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1656 1657 return sysfs_emit(buf, "%08xh\n", mrioc->logging_level); 1658 } 1659 1660 /** 1661 * logging_level_store- Change controller debug level 1662 * @dev: class device 1663 * @attr: Device attributes 1664 * @buf: Buffer to copy 1665 * @count: size of the buffer 1666 * 1667 * A sysfs 'read/write' shost attribute, to change the current 1668 * debug log level used by the driver for the specific 1669 * controller. 1670 * 1671 * Return: strlen() return 1672 */ 1673 static ssize_t 1674 logging_level_store(struct device *dev, 1675 struct device_attribute *attr, 1676 const char *buf, size_t count) 1677 { 1678 struct Scsi_Host *shost = class_to_shost(dev); 1679 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1680 int val = 0; 1681 1682 if (kstrtoint(buf, 0, &val) != 0) 1683 return -EINVAL; 1684 1685 mrioc->logging_level = val; 1686 ioc_info(mrioc, "logging_level=%08xh\n", mrioc->logging_level); 1687 return strlen(buf); 1688 } 1689 static DEVICE_ATTR_RW(logging_level); 1690 1691 /** 1692 * adapter_state_show - SysFS callback for adapter state show 1693 * @dev: class device 1694 * @attr: Device attributes 1695 * @buf: Buffer to copy 1696 * 1697 * Return: sysfs_emit() return after copying adapter state 1698 */ 1699 static ssize_t 1700 adp_state_show(struct device *dev, struct device_attribute *attr, 1701 char *buf) 1702 { 1703 struct Scsi_Host *shost = class_to_shost(dev); 1704 struct mpi3mr_ioc *mrioc = shost_priv(shost); 1705 enum mpi3mr_iocstate ioc_state; 1706 uint8_t adp_state; 1707 1708 ioc_state = mpi3mr_get_iocstate(mrioc); 1709 if (ioc_state == MRIOC_STATE_UNRECOVERABLE) 1710 adp_state = MPI3MR_BSG_ADPSTATE_UNRECOVERABLE; 1711 else if ((mrioc->reset_in_progress) || (mrioc->stop_bsgs)) 1712 adp_state = MPI3MR_BSG_ADPSTATE_IN_RESET; 1713 else if (ioc_state == MRIOC_STATE_FAULT) 1714 adp_state = MPI3MR_BSG_ADPSTATE_FAULT; 1715 else 1716 adp_state = MPI3MR_BSG_ADPSTATE_OPERATIONAL; 1717 1718 return sysfs_emit(buf, "%u\n", adp_state); 1719 } 1720 1721 static DEVICE_ATTR_RO(adp_state); 1722 1723 static struct attribute *mpi3mr_host_attrs[] = { 1724 &dev_attr_version_fw.attr, 1725 &dev_attr_fw_queue_depth.attr, 1726 &dev_attr_op_req_q_count.attr, 1727 &dev_attr_reply_queue_count.attr, 1728 &dev_attr_logging_level.attr, 1729 &dev_attr_adp_state.attr, 1730 NULL, 1731 }; 1732 1733 static const struct attribute_group mpi3mr_host_attr_group = { 1734 .attrs = mpi3mr_host_attrs 1735 }; 1736 1737 const struct attribute_group *mpi3mr_host_groups[] = { 1738 &mpi3mr_host_attr_group, 1739 NULL, 1740 }; 1741 1742 1743 /* 1744 * SCSI Device attributes under sysfs 1745 */ 1746 1747 /** 1748 * sas_address_show - SysFS callback for dev SASaddress display 1749 * @dev: class device 1750 * @attr: Device attributes 1751 * @buf: Buffer to copy 1752 * 1753 * Return: sysfs_emit() return after copying SAS address of the 1754 * specific SAS/SATA end device. 1755 */ 1756 static ssize_t 1757 sas_address_show(struct device *dev, struct device_attribute *attr, 1758 char *buf) 1759 { 1760 struct scsi_device *sdev = to_scsi_device(dev); 1761 struct mpi3mr_sdev_priv_data *sdev_priv_data; 1762 struct mpi3mr_stgt_priv_data *tgt_priv_data; 1763 struct mpi3mr_tgt_dev *tgtdev; 1764 1765 sdev_priv_data = sdev->hostdata; 1766 if (!sdev_priv_data) 1767 return 0; 1768 1769 tgt_priv_data = sdev_priv_data->tgt_priv_data; 1770 if (!tgt_priv_data) 1771 return 0; 1772 tgtdev = tgt_priv_data->tgt_dev; 1773 if (!tgtdev || tgtdev->dev_type != MPI3_DEVICE_DEVFORM_SAS_SATA) 1774 return 0; 1775 return sysfs_emit(buf, "0x%016llx\n", 1776 (unsigned long long)tgtdev->dev_spec.sas_sata_inf.sas_address); 1777 } 1778 1779 static DEVICE_ATTR_RO(sas_address); 1780 1781 /** 1782 * device_handle_show - SysFS callback for device handle display 1783 * @dev: class device 1784 * @attr: Device attributes 1785 * @buf: Buffer to copy 1786 * 1787 * Return: sysfs_emit() return after copying firmware internal 1788 * device handle of the specific device. 1789 */ 1790 static ssize_t 1791 device_handle_show(struct device *dev, struct device_attribute *attr, 1792 char *buf) 1793 { 1794 struct scsi_device *sdev = to_scsi_device(dev); 1795 struct mpi3mr_sdev_priv_data *sdev_priv_data; 1796 struct mpi3mr_stgt_priv_data *tgt_priv_data; 1797 struct mpi3mr_tgt_dev *tgtdev; 1798 1799 sdev_priv_data = sdev->hostdata; 1800 if (!sdev_priv_data) 1801 return 0; 1802 1803 tgt_priv_data = sdev_priv_data->tgt_priv_data; 1804 if (!tgt_priv_data) 1805 return 0; 1806 tgtdev = tgt_priv_data->tgt_dev; 1807 if (!tgtdev) 1808 return 0; 1809 return sysfs_emit(buf, "0x%04x\n", tgtdev->dev_handle); 1810 } 1811 1812 static DEVICE_ATTR_RO(device_handle); 1813 1814 /** 1815 * persistent_id_show - SysFS callback for persisten ID display 1816 * @dev: class device 1817 * @attr: Device attributes 1818 * @buf: Buffer to copy 1819 * 1820 * Return: sysfs_emit() return after copying persistent ID of the 1821 * of the specific device. 1822 */ 1823 static ssize_t 1824 persistent_id_show(struct device *dev, struct device_attribute *attr, 1825 char *buf) 1826 { 1827 struct scsi_device *sdev = to_scsi_device(dev); 1828 struct mpi3mr_sdev_priv_data *sdev_priv_data; 1829 struct mpi3mr_stgt_priv_data *tgt_priv_data; 1830 struct mpi3mr_tgt_dev *tgtdev; 1831 1832 sdev_priv_data = sdev->hostdata; 1833 if (!sdev_priv_data) 1834 return 0; 1835 1836 tgt_priv_data = sdev_priv_data->tgt_priv_data; 1837 if (!tgt_priv_data) 1838 return 0; 1839 tgtdev = tgt_priv_data->tgt_dev; 1840 if (!tgtdev) 1841 return 0; 1842 return sysfs_emit(buf, "%d\n", tgtdev->perst_id); 1843 } 1844 static DEVICE_ATTR_RO(persistent_id); 1845 1846 static struct attribute *mpi3mr_dev_attrs[] = { 1847 &dev_attr_sas_address.attr, 1848 &dev_attr_device_handle.attr, 1849 &dev_attr_persistent_id.attr, 1850 NULL, 1851 }; 1852 1853 static const struct attribute_group mpi3mr_dev_attr_group = { 1854 .attrs = mpi3mr_dev_attrs 1855 }; 1856 1857 const struct attribute_group *mpi3mr_dev_groups[] = { 1858 &mpi3mr_dev_attr_group, 1859 NULL, 1860 }; 1861