1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 14 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 15 void *funcdata); 16 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 17 struct domain_device *device); 18 static void hisi_sas_dev_gone(struct domain_device *device); 19 20 struct hisi_sas_internal_abort_data { 21 bool rst_ha_timeout; /* reset the HA for timeout */ 22 }; 23 24 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 25 { 26 switch (fis->command) { 27 case ATA_CMD_FPDMA_WRITE: 28 case ATA_CMD_FPDMA_READ: 29 case ATA_CMD_FPDMA_RECV: 30 case ATA_CMD_FPDMA_SEND: 31 case ATA_CMD_NCQ_NON_DATA: 32 return HISI_SAS_SATA_PROTOCOL_FPDMA; 33 34 case ATA_CMD_DOWNLOAD_MICRO: 35 case ATA_CMD_ID_ATA: 36 case ATA_CMD_PMP_READ: 37 case ATA_CMD_READ_LOG_EXT: 38 case ATA_CMD_PIO_READ: 39 case ATA_CMD_PIO_READ_EXT: 40 case ATA_CMD_PMP_WRITE: 41 case ATA_CMD_WRITE_LOG_EXT: 42 case ATA_CMD_PIO_WRITE: 43 case ATA_CMD_PIO_WRITE_EXT: 44 return HISI_SAS_SATA_PROTOCOL_PIO; 45 46 case ATA_CMD_DSM: 47 case ATA_CMD_DOWNLOAD_MICRO_DMA: 48 case ATA_CMD_PMP_READ_DMA: 49 case ATA_CMD_PMP_WRITE_DMA: 50 case ATA_CMD_READ: 51 case ATA_CMD_READ_EXT: 52 case ATA_CMD_READ_LOG_DMA_EXT: 53 case ATA_CMD_READ_STREAM_DMA_EXT: 54 case ATA_CMD_TRUSTED_RCV_DMA: 55 case ATA_CMD_TRUSTED_SND_DMA: 56 case ATA_CMD_WRITE: 57 case ATA_CMD_WRITE_EXT: 58 case ATA_CMD_WRITE_FUA_EXT: 59 case ATA_CMD_WRITE_QUEUED: 60 case ATA_CMD_WRITE_LOG_DMA_EXT: 61 case ATA_CMD_WRITE_STREAM_DMA_EXT: 62 case ATA_CMD_ZAC_MGMT_IN: 63 return HISI_SAS_SATA_PROTOCOL_DMA; 64 65 case ATA_CMD_CHK_POWER: 66 case ATA_CMD_DEV_RESET: 67 case ATA_CMD_EDD: 68 case ATA_CMD_FLUSH: 69 case ATA_CMD_FLUSH_EXT: 70 case ATA_CMD_VERIFY: 71 case ATA_CMD_VERIFY_EXT: 72 case ATA_CMD_SET_FEATURES: 73 case ATA_CMD_STANDBY: 74 case ATA_CMD_STANDBYNOW1: 75 case ATA_CMD_ZAC_MGMT_OUT: 76 return HISI_SAS_SATA_PROTOCOL_NONDATA; 77 78 case ATA_CMD_SET_MAX: 79 switch (fis->features) { 80 case ATA_SET_MAX_PASSWD: 81 case ATA_SET_MAX_LOCK: 82 return HISI_SAS_SATA_PROTOCOL_PIO; 83 84 case ATA_SET_MAX_PASSWD_DMA: 85 case ATA_SET_MAX_UNLOCK_DMA: 86 return HISI_SAS_SATA_PROTOCOL_DMA; 87 88 default: 89 return HISI_SAS_SATA_PROTOCOL_NONDATA; 90 } 91 92 default: 93 { 94 if (direction == DMA_NONE) 95 return HISI_SAS_SATA_PROTOCOL_NONDATA; 96 return HISI_SAS_SATA_PROTOCOL_PIO; 97 } 98 } 99 } 100 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 101 102 void hisi_sas_sata_done(struct sas_task *task, 103 struct hisi_sas_slot *slot) 104 { 105 struct task_status_struct *ts = &task->task_status; 106 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 107 struct hisi_sas_status_buffer *status_buf = 108 hisi_sas_status_buf_addr_mem(slot); 109 u8 *iu = &status_buf->iu[0]; 110 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 111 112 resp->frame_len = sizeof(struct dev_to_host_fis); 113 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 114 115 ts->buf_valid_size = sizeof(*resp); 116 } 117 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 118 119 /* 120 * This function assumes linkrate mask fits in 8 bits, which it 121 * does for all HW versions supported. 122 */ 123 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 124 { 125 u8 rate = 0; 126 int i; 127 128 max -= SAS_LINK_RATE_1_5_GBPS; 129 for (i = 0; i <= max; i++) 130 rate |= 1 << (i * 2); 131 return rate; 132 } 133 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 134 135 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 136 { 137 return device->port->ha->lldd_ha; 138 } 139 140 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 141 { 142 return container_of(sas_port, struct hisi_sas_port, sas_port); 143 } 144 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 145 146 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 147 { 148 int phy_no; 149 150 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 151 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 152 } 153 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 154 155 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 156 { 157 void *bitmap = hisi_hba->slot_index_tags; 158 159 __clear_bit(slot_idx, bitmap); 160 } 161 162 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 163 { 164 if (hisi_hba->hw->slot_index_alloc || 165 slot_idx >= HISI_SAS_UNRESERVED_IPTT) { 166 spin_lock(&hisi_hba->lock); 167 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 168 spin_unlock(&hisi_hba->lock); 169 } 170 } 171 172 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 173 { 174 void *bitmap = hisi_hba->slot_index_tags; 175 176 __set_bit(slot_idx, bitmap); 177 } 178 179 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 180 struct scsi_cmnd *scsi_cmnd) 181 { 182 int index; 183 void *bitmap = hisi_hba->slot_index_tags; 184 185 if (scsi_cmnd) 186 return scsi_cmd_to_rq(scsi_cmnd)->tag; 187 188 spin_lock(&hisi_hba->lock); 189 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 190 hisi_hba->last_slot_index + 1); 191 if (index >= hisi_hba->slot_index_count) { 192 index = find_next_zero_bit(bitmap, 193 hisi_hba->slot_index_count, 194 HISI_SAS_UNRESERVED_IPTT); 195 if (index >= hisi_hba->slot_index_count) { 196 spin_unlock(&hisi_hba->lock); 197 return -SAS_QUEUE_FULL; 198 } 199 } 200 hisi_sas_slot_index_set(hisi_hba, index); 201 hisi_hba->last_slot_index = index; 202 spin_unlock(&hisi_hba->lock); 203 204 return index; 205 } 206 207 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 208 struct hisi_sas_slot *slot) 209 { 210 int device_id = slot->device_id; 211 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 212 213 if (task) { 214 struct device *dev = hisi_hba->dev; 215 216 if (!task->lldd_task) 217 return; 218 219 task->lldd_task = NULL; 220 221 if (!sas_protocol_ata(task->task_proto)) { 222 if (slot->n_elem) 223 dma_unmap_sg(dev, task->scatter, 224 task->num_scatter, 225 task->data_dir); 226 if (slot->n_elem_dif) { 227 struct sas_ssp_task *ssp_task = &task->ssp_task; 228 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 229 230 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 231 scsi_prot_sg_count(scsi_cmnd), 232 task->data_dir); 233 } 234 } 235 } 236 237 spin_lock(&sas_dev->lock); 238 list_del_init(&slot->entry); 239 spin_unlock(&sas_dev->lock); 240 241 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 242 243 hisi_sas_slot_index_free(hisi_hba, slot->idx); 244 } 245 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 246 247 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 248 struct hisi_sas_slot *slot) 249 { 250 hisi_hba->hw->prep_smp(hisi_hba, slot); 251 } 252 253 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 254 struct hisi_sas_slot *slot) 255 { 256 hisi_hba->hw->prep_ssp(hisi_hba, slot); 257 } 258 259 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 260 struct hisi_sas_slot *slot) 261 { 262 hisi_hba->hw->prep_stp(hisi_hba, slot); 263 } 264 265 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 266 struct hisi_sas_slot *slot) 267 { 268 hisi_hba->hw->prep_abort(hisi_hba, slot); 269 } 270 271 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 272 struct sas_task *task, int n_elem, 273 int n_elem_req) 274 { 275 struct device *dev = hisi_hba->dev; 276 277 if (!sas_protocol_ata(task->task_proto)) { 278 if (task->num_scatter) { 279 if (n_elem) 280 dma_unmap_sg(dev, task->scatter, 281 task->num_scatter, 282 task->data_dir); 283 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 284 if (n_elem_req) 285 dma_unmap_sg(dev, &task->smp_task.smp_req, 286 1, DMA_TO_DEVICE); 287 } 288 } 289 } 290 291 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 292 struct sas_task *task, int *n_elem, 293 int *n_elem_req) 294 { 295 struct device *dev = hisi_hba->dev; 296 int rc; 297 298 if (sas_protocol_ata(task->task_proto)) { 299 *n_elem = task->num_scatter; 300 } else { 301 unsigned int req_len; 302 303 if (task->num_scatter) { 304 *n_elem = dma_map_sg(dev, task->scatter, 305 task->num_scatter, task->data_dir); 306 if (!*n_elem) { 307 rc = -ENOMEM; 308 goto prep_out; 309 } 310 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 311 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 312 1, DMA_TO_DEVICE); 313 if (!*n_elem_req) { 314 rc = -ENOMEM; 315 goto prep_out; 316 } 317 req_len = sg_dma_len(&task->smp_task.smp_req); 318 if (req_len & 0x3) { 319 rc = -EINVAL; 320 goto err_out_dma_unmap; 321 } 322 } 323 } 324 325 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 326 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 327 *n_elem); 328 rc = -EINVAL; 329 goto err_out_dma_unmap; 330 } 331 return 0; 332 333 err_out_dma_unmap: 334 /* It would be better to call dma_unmap_sg() here, but it's messy */ 335 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 336 *n_elem_req); 337 prep_out: 338 return rc; 339 } 340 341 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 342 struct sas_task *task, int n_elem_dif) 343 { 344 struct device *dev = hisi_hba->dev; 345 346 if (n_elem_dif) { 347 struct sas_ssp_task *ssp_task = &task->ssp_task; 348 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 349 350 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 351 scsi_prot_sg_count(scsi_cmnd), 352 task->data_dir); 353 } 354 } 355 356 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 357 int *n_elem_dif, struct sas_task *task) 358 { 359 struct device *dev = hisi_hba->dev; 360 struct sas_ssp_task *ssp_task; 361 struct scsi_cmnd *scsi_cmnd; 362 int rc; 363 364 if (task->num_scatter) { 365 ssp_task = &task->ssp_task; 366 scsi_cmnd = ssp_task->cmd; 367 368 if (scsi_prot_sg_count(scsi_cmnd)) { 369 *n_elem_dif = dma_map_sg(dev, 370 scsi_prot_sglist(scsi_cmnd), 371 scsi_prot_sg_count(scsi_cmnd), 372 task->data_dir); 373 374 if (!*n_elem_dif) 375 return -ENOMEM; 376 377 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 378 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 379 *n_elem_dif); 380 rc = -EINVAL; 381 goto err_out_dif_dma_unmap; 382 } 383 } 384 } 385 386 return 0; 387 388 err_out_dif_dma_unmap: 389 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 390 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 391 return rc; 392 } 393 394 static 395 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, 396 struct hisi_sas_slot *slot, 397 struct hisi_sas_dq *dq, 398 struct hisi_sas_device *sas_dev) 399 { 400 struct hisi_sas_cmd_hdr *cmd_hdr_base; 401 int dlvry_queue_slot, dlvry_queue; 402 struct sas_task *task = slot->task; 403 int wr_q_index; 404 405 spin_lock(&dq->lock); 406 wr_q_index = dq->wr_point; 407 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 408 list_add_tail(&slot->delivery, &dq->list); 409 spin_unlock(&dq->lock); 410 spin_lock(&sas_dev->lock); 411 list_add_tail(&slot->entry, &sas_dev->list); 412 spin_unlock(&sas_dev->lock); 413 414 dlvry_queue = dq->id; 415 dlvry_queue_slot = wr_q_index; 416 417 slot->device_id = sas_dev->device_id; 418 slot->dlvry_queue = dlvry_queue; 419 slot->dlvry_queue_slot = dlvry_queue_slot; 420 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 421 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 422 423 task->lldd_task = slot; 424 425 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 426 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 427 memset(hisi_sas_status_buf_addr_mem(slot), 0, 428 sizeof(struct hisi_sas_err_record)); 429 430 switch (task->task_proto) { 431 case SAS_PROTOCOL_SMP: 432 hisi_sas_task_prep_smp(hisi_hba, slot); 433 break; 434 case SAS_PROTOCOL_SSP: 435 hisi_sas_task_prep_ssp(hisi_hba, slot); 436 break; 437 case SAS_PROTOCOL_SATA: 438 case SAS_PROTOCOL_STP: 439 case SAS_PROTOCOL_STP_ALL: 440 hisi_sas_task_prep_ata(hisi_hba, slot); 441 break; 442 case SAS_PROTOCOL_INTERNAL_ABORT: 443 hisi_sas_task_prep_abort(hisi_hba, slot); 444 break; 445 fallthrough; 446 default: 447 return; 448 } 449 450 WRITE_ONCE(slot->ready, 1); 451 452 spin_lock(&dq->lock); 453 hisi_hba->hw->start_delivery(dq); 454 spin_unlock(&dq->lock); 455 } 456 457 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 458 { 459 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; 460 struct domain_device *device = task->dev; 461 struct asd_sas_port *sas_port = device->port; 462 struct hisi_sas_device *sas_dev = device->lldd_dev; 463 bool internal_abort = sas_is_internal_abort(task); 464 struct scsi_cmnd *scmd = NULL; 465 struct hisi_sas_dq *dq = NULL; 466 struct hisi_sas_port *port; 467 struct hisi_hba *hisi_hba; 468 struct hisi_sas_slot *slot; 469 struct device *dev; 470 int rc; 471 472 if (!sas_port) { 473 struct task_status_struct *ts = &task->task_status; 474 475 ts->resp = SAS_TASK_UNDELIVERED; 476 ts->stat = SAS_PHY_DOWN; 477 /* 478 * libsas will use dev->port, should 479 * not call task_done for sata 480 */ 481 if (device->dev_type != SAS_SATA_DEV && !internal_abort) 482 task->task_done(task); 483 return -ECOMM; 484 } 485 486 hisi_hba = dev_to_hisi_hba(device); 487 dev = hisi_hba->dev; 488 489 switch (task->task_proto) { 490 case SAS_PROTOCOL_SSP: 491 case SAS_PROTOCOL_SMP: 492 case SAS_PROTOCOL_SATA: 493 case SAS_PROTOCOL_STP: 494 case SAS_PROTOCOL_STP_ALL: 495 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 496 if (!gfpflags_allow_blocking(gfp_flags)) 497 return -EINVAL; 498 499 down(&hisi_hba->sem); 500 up(&hisi_hba->sem); 501 } 502 503 if (DEV_IS_GONE(sas_dev)) { 504 if (sas_dev) 505 dev_info(dev, "task prep: device %d not ready\n", 506 sas_dev->device_id); 507 else 508 dev_info(dev, "task prep: device %016llx not ready\n", 509 SAS_ADDR(device->sas_addr)); 510 511 return -ECOMM; 512 } 513 514 port = to_hisi_sas_port(sas_port); 515 if (!port->port_attached) { 516 dev_info(dev, "task prep: %s port%d not attach device\n", 517 dev_is_sata(device) ? "SATA/STP" : "SAS", 518 device->port->id); 519 520 return -ECOMM; 521 } 522 523 if (task->uldd_task) { 524 struct ata_queued_cmd *qc; 525 526 if (dev_is_sata(device)) { 527 qc = task->uldd_task; 528 scmd = qc->scsicmd; 529 } else { 530 scmd = task->uldd_task; 531 } 532 } 533 534 if (scmd) { 535 unsigned int dq_index; 536 u32 blk_tag; 537 538 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 539 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 540 dq = &hisi_hba->dq[dq_index]; 541 } else { 542 struct Scsi_Host *shost = hisi_hba->shost; 543 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 544 int queue = qmap->mq_map[raw_smp_processor_id()]; 545 546 dq = &hisi_hba->dq[queue]; 547 } 548 break; 549 case SAS_PROTOCOL_INTERNAL_ABORT: 550 if (!hisi_hba->hw->prep_abort) 551 return TMF_RESP_FUNC_FAILED; 552 553 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 554 return -EIO; 555 556 hisi_hba = dev_to_hisi_hba(device); 557 558 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 559 return -EINVAL; 560 561 port = to_hisi_sas_port(sas_port); 562 dq = &hisi_hba->dq[task->abort_task.qid]; 563 break; 564 default: 565 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", 566 task->task_proto); 567 return -EINVAL; 568 } 569 570 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 571 &n_elem_req); 572 if (rc < 0) 573 goto prep_out; 574 575 if (!sas_protocol_ata(task->task_proto)) { 576 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 577 if (rc < 0) 578 goto err_out_dma_unmap; 579 } 580 581 if (!internal_abort && hisi_hba->hw->slot_index_alloc) 582 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 583 else 584 rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); 585 586 if (rc < 0) 587 goto err_out_dif_dma_unmap; 588 589 slot = &hisi_hba->slot_info[rc]; 590 slot->n_elem = n_elem; 591 slot->n_elem_dif = n_elem_dif; 592 slot->task = task; 593 slot->port = port; 594 595 slot->tmf = task->tmf; 596 slot->is_internal = !!task->tmf || internal_abort; 597 598 /* protect task_prep and start_delivery sequence */ 599 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev); 600 601 return 0; 602 603 err_out_dif_dma_unmap: 604 if (!sas_protocol_ata(task->task_proto)) 605 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 606 err_out_dma_unmap: 607 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 608 n_elem_req); 609 prep_out: 610 dev_err(dev, "task exec: failed[%d]!\n", rc); 611 return rc; 612 } 613 614 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 615 gfp_t gfp_flags) 616 { 617 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 618 struct asd_sas_phy *sas_phy = &phy->sas_phy; 619 620 if (!phy->phy_attached) 621 return; 622 623 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 624 625 if (sas_phy->phy) { 626 struct sas_phy *sphy = sas_phy->phy; 627 628 sphy->negotiated_linkrate = sas_phy->linkrate; 629 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 630 sphy->maximum_linkrate_hw = 631 hisi_hba->hw->phy_get_max_linkrate(); 632 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 633 sphy->minimum_linkrate = phy->minimum_linkrate; 634 635 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 636 sphy->maximum_linkrate = phy->maximum_linkrate; 637 } 638 639 if (phy->phy_type & PORT_TYPE_SAS) { 640 struct sas_identify_frame *id; 641 642 id = (struct sas_identify_frame *)phy->frame_rcvd; 643 id->dev_type = phy->identify.device_type; 644 id->initiator_bits = SAS_PROTOCOL_ALL; 645 id->target_bits = phy->identify.target_port_protocols; 646 } else if (phy->phy_type & PORT_TYPE_SATA) { 647 /* Nothing */ 648 } 649 650 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 651 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 652 } 653 654 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 655 { 656 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 657 struct hisi_sas_device *sas_dev = NULL; 658 int last = hisi_hba->last_dev_id; 659 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 660 int i; 661 662 spin_lock(&hisi_hba->lock); 663 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 664 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 665 int queue = i % hisi_hba->queue_count; 666 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 667 668 hisi_hba->devices[i].device_id = i; 669 sas_dev = &hisi_hba->devices[i]; 670 sas_dev->dev_status = HISI_SAS_DEV_INIT; 671 sas_dev->dev_type = device->dev_type; 672 sas_dev->hisi_hba = hisi_hba; 673 sas_dev->sas_device = device; 674 sas_dev->dq = dq; 675 spin_lock_init(&sas_dev->lock); 676 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 677 break; 678 } 679 i++; 680 } 681 hisi_hba->last_dev_id = i; 682 spin_unlock(&hisi_hba->lock); 683 684 return sas_dev; 685 } 686 687 static void hisi_sas_tmf_aborted(struct sas_task *task) 688 { 689 struct hisi_sas_slot *slot = task->lldd_task; 690 struct domain_device *device = task->dev; 691 struct hisi_sas_device *sas_dev = device->lldd_dev; 692 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 693 694 if (slot) { 695 struct hisi_sas_cq *cq = 696 &hisi_hba->cq[slot->dlvry_queue]; 697 /* 698 * sync irq to avoid free'ing task 699 * before using task in IO completion 700 */ 701 synchronize_irq(cq->irq_no); 702 slot->task = NULL; 703 } 704 } 705 706 #define HISI_SAS_DISK_RECOVER_CNT 3 707 static int hisi_sas_init_device(struct domain_device *device) 708 { 709 int rc = TMF_RESP_FUNC_COMPLETE; 710 struct scsi_lun lun; 711 int retry = HISI_SAS_DISK_RECOVER_CNT; 712 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 713 struct device *dev = hisi_hba->dev; 714 struct sas_phy *local_phy; 715 716 switch (device->dev_type) { 717 case SAS_END_DEVICE: 718 int_to_scsilun(0, &lun); 719 720 while (retry-- > 0) { 721 rc = sas_clear_task_set(device, lun.scsi_lun); 722 if (rc == TMF_RESP_FUNC_COMPLETE) { 723 hisi_sas_release_task(hisi_hba, device); 724 break; 725 } 726 } 727 break; 728 case SAS_SATA_DEV: 729 case SAS_SATA_PM: 730 case SAS_SATA_PM_PORT: 731 case SAS_SATA_PENDING: 732 /* 733 * send HARD RESET to clear previous affiliation of 734 * STP target port 735 */ 736 local_phy = sas_get_local_phy(device); 737 if (!scsi_is_sas_phy_local(local_phy) && 738 !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 739 unsigned long deadline = ata_deadline(jiffies, 20000); 740 struct sata_device *sata_dev = &device->sata_dev; 741 struct ata_host *ata_host = sata_dev->ata_host; 742 struct ata_port_operations *ops = ata_host->ops; 743 struct ata_port *ap = sata_dev->ap; 744 struct ata_link *link; 745 unsigned int classes; 746 747 ata_for_each_link(link, ap, EDGE) 748 rc = ops->hardreset(link, &classes, 749 deadline); 750 } 751 sas_put_local_phy(local_phy); 752 if (rc) { 753 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc); 754 return rc; 755 } 756 757 while (retry-- > 0) { 758 rc = hisi_sas_softreset_ata_disk(device); 759 if (!rc) 760 break; 761 } 762 break; 763 default: 764 break; 765 } 766 767 return rc; 768 } 769 770 int hisi_sas_slave_alloc(struct scsi_device *sdev) 771 { 772 struct domain_device *ddev; 773 int rc; 774 775 rc = sas_slave_alloc(sdev); 776 if (rc) 777 return rc; 778 ddev = sdev_to_domain_dev(sdev); 779 780 return hisi_sas_init_device(ddev); 781 } 782 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); 783 784 static int hisi_sas_dev_found(struct domain_device *device) 785 { 786 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 787 struct domain_device *parent_dev = device->parent; 788 struct hisi_sas_device *sas_dev; 789 struct device *dev = hisi_hba->dev; 790 int rc; 791 792 if (hisi_hba->hw->alloc_dev) 793 sas_dev = hisi_hba->hw->alloc_dev(device); 794 else 795 sas_dev = hisi_sas_alloc_dev(device); 796 if (!sas_dev) { 797 dev_err(dev, "fail alloc dev: max support %d devices\n", 798 HISI_SAS_MAX_DEVICES); 799 return -EINVAL; 800 } 801 802 device->lldd_dev = sas_dev; 803 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 804 805 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 806 int phy_no; 807 u8 phy_num = parent_dev->ex_dev.num_phys; 808 struct ex_phy *phy; 809 810 for (phy_no = 0; phy_no < phy_num; phy_no++) { 811 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 812 if (SAS_ADDR(phy->attached_sas_addr) == 813 SAS_ADDR(device->sas_addr)) 814 break; 815 } 816 817 if (phy_no == phy_num) { 818 dev_info(dev, "dev found: no attached " 819 "dev:%016llx at ex:%016llx\n", 820 SAS_ADDR(device->sas_addr), 821 SAS_ADDR(parent_dev->sas_addr)); 822 rc = -EINVAL; 823 goto err_out; 824 } 825 } 826 827 dev_info(dev, "dev[%d:%x] found\n", 828 sas_dev->device_id, sas_dev->dev_type); 829 830 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 831 return 0; 832 833 err_out: 834 hisi_sas_dev_gone(device); 835 return rc; 836 } 837 838 int hisi_sas_slave_configure(struct scsi_device *sdev) 839 { 840 struct domain_device *dev = sdev_to_domain_dev(sdev); 841 int ret = sas_slave_configure(sdev); 842 843 if (ret) 844 return ret; 845 if (!dev_is_sata(dev)) 846 sas_change_queue_depth(sdev, 64); 847 848 return 0; 849 } 850 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 851 852 void hisi_sas_scan_start(struct Scsi_Host *shost) 853 { 854 struct hisi_hba *hisi_hba = shost_priv(shost); 855 856 hisi_hba->hw->phys_init(hisi_hba); 857 } 858 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 859 860 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 861 { 862 struct hisi_hba *hisi_hba = shost_priv(shost); 863 struct sas_ha_struct *sha = &hisi_hba->sha; 864 865 /* Wait for PHY up interrupt to occur */ 866 if (time < HZ) 867 return 0; 868 869 sas_drain_work(sha); 870 return 1; 871 } 872 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 873 874 static void hisi_sas_phyup_work_common(struct work_struct *work, 875 enum hisi_sas_phy_event event) 876 { 877 struct hisi_sas_phy *phy = 878 container_of(work, typeof(*phy), works[event]); 879 struct hisi_hba *hisi_hba = phy->hisi_hba; 880 struct asd_sas_phy *sas_phy = &phy->sas_phy; 881 int phy_no = sas_phy->id; 882 883 phy->wait_phyup_cnt = 0; 884 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 885 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 886 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 887 } 888 889 static void hisi_sas_phyup_work(struct work_struct *work) 890 { 891 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); 892 } 893 894 static void hisi_sas_linkreset_work(struct work_struct *work) 895 { 896 struct hisi_sas_phy *phy = 897 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 898 struct asd_sas_phy *sas_phy = &phy->sas_phy; 899 900 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 901 } 902 903 static void hisi_sas_phyup_pm_work(struct work_struct *work) 904 { 905 struct hisi_sas_phy *phy = 906 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); 907 struct hisi_hba *hisi_hba = phy->hisi_hba; 908 struct device *dev = hisi_hba->dev; 909 910 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); 911 pm_runtime_put_sync(dev); 912 } 913 914 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 915 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 916 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 917 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, 918 }; 919 920 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 921 enum hisi_sas_phy_event event) 922 { 923 struct hisi_hba *hisi_hba = phy->hisi_hba; 924 925 if (WARN_ON(event >= HISI_PHYES_NUM)) 926 return false; 927 928 return queue_work(hisi_hba->wq, &phy->works[event]); 929 } 930 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 931 932 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 933 { 934 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 935 struct hisi_hba *hisi_hba = phy->hisi_hba; 936 struct device *dev = hisi_hba->dev; 937 int phy_no = phy->sas_phy.id; 938 939 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 940 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 941 } 942 943 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 944 945 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 946 { 947 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 948 struct device *dev = hisi_hba->dev; 949 unsigned long flags; 950 951 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 952 spin_lock_irqsave(&phy->lock, flags); 953 if (phy->phy_attached) { 954 spin_unlock_irqrestore(&phy->lock, flags); 955 return; 956 } 957 958 if (!timer_pending(&phy->timer)) { 959 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 960 phy->wait_phyup_cnt++; 961 phy->timer.expires = jiffies + 962 HISI_SAS_WAIT_PHYUP_TIMEOUT; 963 add_timer(&phy->timer); 964 spin_unlock_irqrestore(&phy->lock, flags); 965 return; 966 } 967 968 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 969 phy_no, phy->wait_phyup_cnt); 970 phy->wait_phyup_cnt = 0; 971 } 972 spin_unlock_irqrestore(&phy->lock, flags); 973 } 974 975 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 976 977 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 978 { 979 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 980 struct asd_sas_phy *sas_phy = &phy->sas_phy; 981 int i; 982 983 phy->hisi_hba = hisi_hba; 984 phy->port = NULL; 985 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 986 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 987 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 988 sas_phy->class = SAS; 989 sas_phy->iproto = SAS_PROTOCOL_ALL; 990 sas_phy->tproto = 0; 991 sas_phy->type = PHY_TYPE_PHYSICAL; 992 sas_phy->role = PHY_ROLE_INITIATOR; 993 sas_phy->oob_mode = OOB_NOT_CONNECTED; 994 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 995 sas_phy->id = phy_no; 996 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 997 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 998 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 999 sas_phy->lldd_phy = phy; 1000 1001 for (i = 0; i < HISI_PHYES_NUM; i++) 1002 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 1003 1004 spin_lock_init(&phy->lock); 1005 1006 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 1007 } 1008 1009 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 1010 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 1011 { 1012 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1013 struct asd_sas_phy *aphy = &phy->sas_phy; 1014 struct sas_phy *sphy = aphy->phy; 1015 unsigned long flags; 1016 1017 spin_lock_irqsave(&phy->lock, flags); 1018 1019 if (enable) { 1020 /* We may have been enabled already; if so, don't touch */ 1021 if (!phy->enable) 1022 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1023 hisi_hba->hw->phy_start(hisi_hba, phy_no); 1024 } else { 1025 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1026 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1027 } 1028 phy->enable = enable; 1029 spin_unlock_irqrestore(&phy->lock, flags); 1030 } 1031 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1032 1033 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1034 { 1035 struct sas_ha_struct *sas_ha = sas_phy->ha; 1036 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1037 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1038 struct asd_sas_port *sas_port = sas_phy->port; 1039 struct hisi_sas_port *port; 1040 unsigned long flags; 1041 1042 if (!sas_port) 1043 return; 1044 1045 port = to_hisi_sas_port(sas_port); 1046 spin_lock_irqsave(&hisi_hba->lock, flags); 1047 port->port_attached = 1; 1048 port->id = phy->port_id; 1049 phy->port = port; 1050 sas_port->lldd_port = port; 1051 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1052 } 1053 1054 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1055 struct hisi_sas_slot *slot) 1056 { 1057 if (task) { 1058 unsigned long flags; 1059 struct task_status_struct *ts; 1060 1061 ts = &task->task_status; 1062 1063 ts->resp = SAS_TASK_COMPLETE; 1064 ts->stat = SAS_ABORTED_TASK; 1065 spin_lock_irqsave(&task->task_state_lock, flags); 1066 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 1067 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1068 task->task_state_flags |= SAS_TASK_STATE_DONE; 1069 spin_unlock_irqrestore(&task->task_state_lock, flags); 1070 } 1071 1072 hisi_sas_slot_task_free(hisi_hba, task, slot); 1073 } 1074 1075 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1076 struct domain_device *device) 1077 { 1078 struct hisi_sas_slot *slot, *slot2; 1079 struct hisi_sas_device *sas_dev = device->lldd_dev; 1080 1081 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1082 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1083 } 1084 1085 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1086 { 1087 struct hisi_sas_device *sas_dev; 1088 struct domain_device *device; 1089 int i; 1090 1091 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1092 sas_dev = &hisi_hba->devices[i]; 1093 device = sas_dev->sas_device; 1094 1095 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1096 !device) 1097 continue; 1098 1099 hisi_sas_release_task(hisi_hba, device); 1100 } 1101 } 1102 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1103 1104 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1105 struct domain_device *device) 1106 { 1107 if (hisi_hba->hw->dereg_device) 1108 hisi_hba->hw->dereg_device(hisi_hba, device); 1109 } 1110 1111 static int 1112 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev, 1113 bool rst_ha_timeout) 1114 { 1115 struct hisi_sas_internal_abort_data data = { rst_ha_timeout }; 1116 struct domain_device *device = sas_dev->sas_device; 1117 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1118 int i, rc; 1119 1120 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 1121 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1122 const struct cpumask *mask = cq->irq_mask; 1123 1124 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 1125 continue; 1126 rc = sas_execute_internal_abort_dev(device, i, &data); 1127 if (rc) 1128 return rc; 1129 } 1130 1131 return 0; 1132 } 1133 1134 static void hisi_sas_dev_gone(struct domain_device *device) 1135 { 1136 struct hisi_sas_device *sas_dev = device->lldd_dev; 1137 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1138 struct device *dev = hisi_hba->dev; 1139 int ret = 0; 1140 1141 dev_info(dev, "dev[%d:%x] is gone\n", 1142 sas_dev->device_id, sas_dev->dev_type); 1143 1144 down(&hisi_hba->sem); 1145 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1146 hisi_sas_internal_task_abort_dev(sas_dev, true); 1147 1148 hisi_sas_dereg_device(hisi_hba, device); 1149 1150 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1151 device->lldd_dev = NULL; 1152 } 1153 1154 if (hisi_hba->hw->free_device) 1155 hisi_hba->hw->free_device(sas_dev); 1156 1157 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1158 if (!ret) 1159 sas_dev->dev_type = SAS_PHY_UNUSED; 1160 sas_dev->sas_device = NULL; 1161 up(&hisi_hba->sem); 1162 } 1163 1164 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1165 struct sas_phy_linkrates *r) 1166 { 1167 struct sas_phy_linkrates _r; 1168 1169 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1170 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1171 enum sas_linkrate min, max; 1172 1173 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1174 return -EINVAL; 1175 1176 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1177 max = sas_phy->phy->maximum_linkrate; 1178 min = r->minimum_linkrate; 1179 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1180 max = r->maximum_linkrate; 1181 min = sas_phy->phy->minimum_linkrate; 1182 } else 1183 return -EINVAL; 1184 1185 _r.maximum_linkrate = max; 1186 _r.minimum_linkrate = min; 1187 1188 sas_phy->phy->maximum_linkrate = max; 1189 sas_phy->phy->minimum_linkrate = min; 1190 1191 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1192 msleep(100); 1193 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1194 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1195 1196 return 0; 1197 } 1198 1199 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1200 void *funcdata) 1201 { 1202 struct hisi_sas_phy *phy = container_of(sas_phy, 1203 struct hisi_sas_phy, sas_phy); 1204 struct sas_ha_struct *sas_ha = sas_phy->ha; 1205 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1206 struct device *dev = hisi_hba->dev; 1207 DECLARE_COMPLETION_ONSTACK(completion); 1208 int phy_no = sas_phy->id; 1209 u8 sts = phy->phy_attached; 1210 int ret = 0; 1211 1212 down(&hisi_hba->sem); 1213 phy->reset_completion = &completion; 1214 1215 switch (func) { 1216 case PHY_FUNC_HARD_RESET: 1217 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1218 break; 1219 1220 case PHY_FUNC_LINK_RESET: 1221 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1222 msleep(100); 1223 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1224 break; 1225 1226 case PHY_FUNC_DISABLE: 1227 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1228 goto out; 1229 1230 case PHY_FUNC_SET_LINK_RATE: 1231 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1232 break; 1233 1234 case PHY_FUNC_GET_EVENTS: 1235 if (hisi_hba->hw->get_events) { 1236 hisi_hba->hw->get_events(hisi_hba, phy_no); 1237 goto out; 1238 } 1239 fallthrough; 1240 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1241 default: 1242 ret = -EOPNOTSUPP; 1243 goto out; 1244 } 1245 1246 if (sts && !wait_for_completion_timeout(&completion, 1247 HISI_SAS_WAIT_PHYUP_TIMEOUT)) { 1248 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1249 phy_no, func); 1250 if (phy->in_reset) 1251 ret = -ETIMEDOUT; 1252 } 1253 1254 out: 1255 phy->reset_completion = NULL; 1256 1257 up(&hisi_hba->sem); 1258 return ret; 1259 } 1260 1261 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1262 bool reset, int pmp, u8 *fis) 1263 { 1264 struct ata_taskfile tf; 1265 1266 ata_tf_init(dev, &tf); 1267 if (reset) 1268 tf.ctl |= ATA_SRST; 1269 else 1270 tf.ctl &= ~ATA_SRST; 1271 tf.command = ATA_CMD_DEV_RESET; 1272 ata_tf_to_fis(&tf, pmp, 0, fis); 1273 } 1274 1275 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1276 { 1277 u8 fis[20] = {0}; 1278 struct ata_port *ap = device->sata_dev.ap; 1279 struct ata_link *link; 1280 int rc = TMF_RESP_FUNC_FAILED; 1281 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1282 struct device *dev = hisi_hba->dev; 1283 1284 ata_for_each_link(link, ap, EDGE) { 1285 int pmp = sata_srst_pmp(link); 1286 1287 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1288 rc = sas_execute_ata_cmd(device, fis, -1); 1289 if (rc != TMF_RESP_FUNC_COMPLETE) 1290 break; 1291 } 1292 1293 if (rc == TMF_RESP_FUNC_COMPLETE) { 1294 ata_for_each_link(link, ap, EDGE) { 1295 int pmp = sata_srst_pmp(link); 1296 1297 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1298 rc = sas_execute_ata_cmd(device, fis, -1); 1299 if (rc != TMF_RESP_FUNC_COMPLETE) 1300 dev_err(dev, "ata disk %016llx de-reset failed\n", 1301 SAS_ADDR(device->sas_addr)); 1302 } 1303 } else { 1304 dev_err(dev, "ata disk %016llx reset failed\n", 1305 SAS_ADDR(device->sas_addr)); 1306 } 1307 1308 if (rc == TMF_RESP_FUNC_COMPLETE) 1309 hisi_sas_release_task(hisi_hba, device); 1310 1311 return rc; 1312 } 1313 1314 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1315 { 1316 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1317 int i; 1318 1319 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1320 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1321 struct domain_device *device = sas_dev->sas_device; 1322 struct asd_sas_port *sas_port; 1323 struct hisi_sas_port *port; 1324 struct hisi_sas_phy *phy = NULL; 1325 struct asd_sas_phy *sas_phy; 1326 1327 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1328 || !device || !device->port) 1329 continue; 1330 1331 sas_port = device->port; 1332 port = to_hisi_sas_port(sas_port); 1333 1334 spin_lock(&sas_port->phy_list_lock); 1335 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1336 if (state & BIT(sas_phy->id)) { 1337 phy = sas_phy->lldd_phy; 1338 break; 1339 } 1340 spin_unlock(&sas_port->phy_list_lock); 1341 1342 if (phy) { 1343 port->id = phy->port_id; 1344 1345 /* Update linkrate of directly attached device. */ 1346 if (!device->parent) 1347 device->linkrate = phy->sas_phy.linkrate; 1348 1349 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1350 } else 1351 port->id = 0xff; 1352 } 1353 } 1354 1355 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1356 { 1357 struct asd_sas_port *_sas_port = NULL; 1358 int phy_no; 1359 1360 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1361 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1362 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1363 struct asd_sas_port *sas_port = sas_phy->port; 1364 bool do_port_check = _sas_port != sas_port; 1365 1366 if (!sas_phy->phy->enabled) 1367 continue; 1368 1369 /* Report PHY state change to libsas */ 1370 if (state & BIT(phy_no)) { 1371 if (do_port_check && sas_port && sas_port->port_dev) { 1372 struct domain_device *dev = sas_port->port_dev; 1373 1374 _sas_port = sas_port; 1375 1376 if (dev_is_expander(dev->dev_type)) 1377 sas_notify_port_event(sas_phy, 1378 PORTE_BROADCAST_RCVD, 1379 GFP_KERNEL); 1380 } 1381 } else { 1382 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1383 } 1384 } 1385 } 1386 1387 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1388 { 1389 struct hisi_sas_device *sas_dev; 1390 struct domain_device *device; 1391 int i; 1392 1393 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1394 sas_dev = &hisi_hba->devices[i]; 1395 device = sas_dev->sas_device; 1396 1397 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1398 continue; 1399 1400 hisi_sas_init_device(device); 1401 } 1402 } 1403 1404 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1405 struct asd_sas_port *sas_port, 1406 struct domain_device *device) 1407 { 1408 struct ata_port *ap = device->sata_dev.ap; 1409 struct device *dev = hisi_hba->dev; 1410 int rc = TMF_RESP_FUNC_FAILED; 1411 struct ata_link *link; 1412 u8 fis[20] = {0}; 1413 int i; 1414 1415 for (i = 0; i < hisi_hba->n_phy; i++) { 1416 if (!(sas_port->phy_mask & BIT(i))) 1417 continue; 1418 1419 ata_for_each_link(link, ap, EDGE) { 1420 int pmp = sata_srst_pmp(link); 1421 1422 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1423 rc = sas_execute_ata_cmd(device, fis, i); 1424 if (rc != TMF_RESP_FUNC_COMPLETE) { 1425 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1426 i, rc); 1427 break; 1428 } 1429 } 1430 } 1431 } 1432 1433 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1434 { 1435 struct device *dev = hisi_hba->dev; 1436 int port_no, rc, i; 1437 1438 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1439 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1440 struct domain_device *device = sas_dev->sas_device; 1441 1442 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1443 continue; 1444 1445 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1446 if (rc < 0) 1447 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1448 } 1449 1450 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1451 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1452 struct asd_sas_port *sas_port = &port->sas_port; 1453 struct domain_device *port_dev = sas_port->port_dev; 1454 struct domain_device *device; 1455 1456 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1457 continue; 1458 1459 /* Try to find a SATA device */ 1460 list_for_each_entry(device, &sas_port->dev_list, 1461 dev_list_node) { 1462 if (dev_is_sata(device)) { 1463 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1464 sas_port, 1465 device); 1466 break; 1467 } 1468 } 1469 } 1470 } 1471 1472 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1473 { 1474 struct Scsi_Host *shost = hisi_hba->shost; 1475 1476 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1477 1478 scsi_block_requests(shost); 1479 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1480 1481 del_timer_sync(&hisi_hba->timer); 1482 1483 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1484 } 1485 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1486 1487 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1488 { 1489 struct Scsi_Host *shost = hisi_hba->shost; 1490 1491 /* Init and wait for PHYs to come up and all libsas event finished. */ 1492 hisi_hba->hw->phys_init(hisi_hba); 1493 msleep(1000); 1494 hisi_sas_refresh_port_id(hisi_hba); 1495 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1496 1497 if (hisi_hba->reject_stp_links_msk) 1498 hisi_sas_terminate_stp_reject(hisi_hba); 1499 hisi_sas_reset_init_all_devices(hisi_hba); 1500 scsi_unblock_requests(shost); 1501 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1502 up(&hisi_hba->sem); 1503 1504 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1505 } 1506 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1507 1508 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1509 { 1510 if (!hisi_hba->hw->soft_reset) 1511 return -1; 1512 1513 down(&hisi_hba->sem); 1514 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1515 up(&hisi_hba->sem); 1516 return -1; 1517 } 1518 1519 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1520 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1521 1522 return 0; 1523 } 1524 1525 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1526 { 1527 struct device *dev = hisi_hba->dev; 1528 struct Scsi_Host *shost = hisi_hba->shost; 1529 int rc; 1530 1531 dev_info(dev, "controller resetting...\n"); 1532 hisi_sas_controller_reset_prepare(hisi_hba); 1533 1534 rc = hisi_hba->hw->soft_reset(hisi_hba); 1535 if (rc) { 1536 dev_warn(dev, "controller reset failed (%d)\n", rc); 1537 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1538 up(&hisi_hba->sem); 1539 scsi_unblock_requests(shost); 1540 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1541 return rc; 1542 } 1543 1544 hisi_sas_controller_reset_done(hisi_hba); 1545 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1546 dev_info(dev, "controller reset complete\n"); 1547 1548 return 0; 1549 } 1550 1551 static int hisi_sas_abort_task(struct sas_task *task) 1552 { 1553 struct hisi_sas_internal_abort_data internal_abort_data = { false }; 1554 struct domain_device *device = task->dev; 1555 struct hisi_sas_device *sas_dev = device->lldd_dev; 1556 struct hisi_hba *hisi_hba; 1557 struct device *dev; 1558 int rc = TMF_RESP_FUNC_FAILED; 1559 unsigned long flags; 1560 1561 if (!sas_dev) 1562 return TMF_RESP_FUNC_FAILED; 1563 1564 hisi_hba = dev_to_hisi_hba(task->dev); 1565 dev = hisi_hba->dev; 1566 1567 spin_lock_irqsave(&task->task_state_lock, flags); 1568 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1569 struct hisi_sas_slot *slot = task->lldd_task; 1570 struct hisi_sas_cq *cq; 1571 1572 if (slot) { 1573 /* 1574 * sync irq to avoid free'ing task 1575 * before using task in IO completion 1576 */ 1577 cq = &hisi_hba->cq[slot->dlvry_queue]; 1578 synchronize_irq(cq->irq_no); 1579 } 1580 spin_unlock_irqrestore(&task->task_state_lock, flags); 1581 rc = TMF_RESP_FUNC_COMPLETE; 1582 goto out; 1583 } 1584 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1585 spin_unlock_irqrestore(&task->task_state_lock, flags); 1586 1587 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1588 struct hisi_sas_slot *slot = task->lldd_task; 1589 u16 tag = slot->idx; 1590 int rc2; 1591 1592 rc = sas_abort_task(task, tag); 1593 rc2 = sas_execute_internal_abort_single(device, tag, 1594 slot->dlvry_queue, &internal_abort_data); 1595 if (rc2 < 0) { 1596 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1597 return TMF_RESP_FUNC_FAILED; 1598 } 1599 1600 /* 1601 * If the TMF finds that the IO is not in the device and also 1602 * the internal abort does not succeed, then it is safe to 1603 * free the slot. 1604 * Note: if the internal abort succeeds then the slot 1605 * will have already been completed 1606 */ 1607 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1608 if (task->lldd_task) 1609 hisi_sas_do_release_task(hisi_hba, task, slot); 1610 } 1611 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1612 task->task_proto & SAS_PROTOCOL_STP) { 1613 if (task->dev->dev_type == SAS_SATA_DEV) { 1614 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1615 if (rc < 0) { 1616 dev_err(dev, "abort task: internal abort failed\n"); 1617 goto out; 1618 } 1619 hisi_sas_dereg_device(hisi_hba, device); 1620 rc = hisi_sas_softreset_ata_disk(device); 1621 } 1622 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1623 /* SMP */ 1624 struct hisi_sas_slot *slot = task->lldd_task; 1625 u32 tag = slot->idx; 1626 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1627 1628 rc = sas_execute_internal_abort_single(device, 1629 tag, slot->dlvry_queue, 1630 &internal_abort_data); 1631 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1632 task->lldd_task) { 1633 /* 1634 * sync irq to avoid free'ing task 1635 * before using task in IO completion 1636 */ 1637 synchronize_irq(cq->irq_no); 1638 slot->task = NULL; 1639 } 1640 } 1641 1642 out: 1643 if (rc != TMF_RESP_FUNC_COMPLETE) 1644 dev_notice(dev, "abort task: rc=%d\n", rc); 1645 return rc; 1646 } 1647 1648 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1649 { 1650 struct hisi_sas_device *sas_dev = device->lldd_dev; 1651 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1652 struct device *dev = hisi_hba->dev; 1653 int rc; 1654 1655 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1656 if (rc < 0) { 1657 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1658 return TMF_RESP_FUNC_FAILED; 1659 } 1660 hisi_sas_dereg_device(hisi_hba, device); 1661 1662 rc = sas_abort_task_set(device, lun); 1663 if (rc == TMF_RESP_FUNC_COMPLETE) 1664 hisi_sas_release_task(hisi_hba, device); 1665 1666 return rc; 1667 } 1668 1669 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1670 { 1671 struct sas_phy *local_phy = sas_get_local_phy(device); 1672 struct hisi_sas_device *sas_dev = device->lldd_dev; 1673 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1674 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1675 int rc, reset_type; 1676 1677 if (!local_phy->enabled) { 1678 sas_put_local_phy(local_phy); 1679 return -ENODEV; 1680 } 1681 1682 if (scsi_is_sas_phy_local(local_phy)) { 1683 struct asd_sas_phy *sas_phy = 1684 sas_ha->sas_phy[local_phy->number]; 1685 struct hisi_sas_phy *phy = 1686 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1687 unsigned long flags; 1688 1689 spin_lock_irqsave(&phy->lock, flags); 1690 phy->in_reset = 1; 1691 spin_unlock_irqrestore(&phy->lock, flags); 1692 } 1693 1694 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1695 !dev_is_sata(device)) ? true : false; 1696 1697 rc = sas_phy_reset(local_phy, reset_type); 1698 sas_put_local_phy(local_phy); 1699 1700 if (scsi_is_sas_phy_local(local_phy)) { 1701 struct asd_sas_phy *sas_phy = 1702 sas_ha->sas_phy[local_phy->number]; 1703 struct hisi_sas_phy *phy = 1704 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1705 unsigned long flags; 1706 1707 spin_lock_irqsave(&phy->lock, flags); 1708 phy->in_reset = 0; 1709 spin_unlock_irqrestore(&phy->lock, flags); 1710 1711 /* report PHY down if timed out */ 1712 if (rc == -ETIMEDOUT) 1713 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1714 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1715 /* 1716 * If in init state, we rely on caller to wait for link to be 1717 * ready; otherwise, except phy reset is fail, delay. 1718 */ 1719 if (!rc) 1720 msleep(2000); 1721 } 1722 1723 return rc; 1724 } 1725 1726 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1727 { 1728 struct hisi_sas_device *sas_dev = device->lldd_dev; 1729 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1730 struct device *dev = hisi_hba->dev; 1731 int rc; 1732 1733 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1734 if (rc < 0) { 1735 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1736 return TMF_RESP_FUNC_FAILED; 1737 } 1738 hisi_sas_dereg_device(hisi_hba, device); 1739 1740 rc = hisi_sas_debug_I_T_nexus_reset(device); 1741 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1742 struct sas_phy *local_phy; 1743 1744 rc = hisi_sas_softreset_ata_disk(device); 1745 switch (rc) { 1746 case -ECOMM: 1747 rc = -ENODEV; 1748 break; 1749 case TMF_RESP_FUNC_FAILED: 1750 case -EMSGSIZE: 1751 case -EIO: 1752 local_phy = sas_get_local_phy(device); 1753 rc = sas_phy_enable(local_phy, 0); 1754 if (!rc) { 1755 local_phy->enabled = 0; 1756 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1757 SAS_ADDR(device->sas_addr), rc); 1758 rc = -ENODEV; 1759 } 1760 sas_put_local_phy(local_phy); 1761 break; 1762 default: 1763 break; 1764 } 1765 } 1766 1767 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1768 hisi_sas_release_task(hisi_hba, device); 1769 1770 return rc; 1771 } 1772 1773 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1774 { 1775 struct hisi_sas_device *sas_dev = device->lldd_dev; 1776 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1777 struct device *dev = hisi_hba->dev; 1778 int rc = TMF_RESP_FUNC_FAILED; 1779 1780 /* Clear internal IO and then lu reset */ 1781 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1782 if (rc < 0) { 1783 dev_err(dev, "lu_reset: internal abort failed\n"); 1784 goto out; 1785 } 1786 hisi_sas_dereg_device(hisi_hba, device); 1787 1788 if (dev_is_sata(device)) { 1789 struct sas_phy *phy; 1790 1791 phy = sas_get_local_phy(device); 1792 1793 rc = sas_phy_reset(phy, true); 1794 1795 if (rc == 0) 1796 hisi_sas_release_task(hisi_hba, device); 1797 sas_put_local_phy(phy); 1798 } else { 1799 rc = sas_lu_reset(device, lun); 1800 if (rc == TMF_RESP_FUNC_COMPLETE) 1801 hisi_sas_release_task(hisi_hba, device); 1802 } 1803 out: 1804 if (rc != TMF_RESP_FUNC_COMPLETE) 1805 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1806 sas_dev->device_id, rc); 1807 return rc; 1808 } 1809 1810 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1811 { 1812 struct domain_device *device = data; 1813 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1814 int rc; 1815 1816 rc = hisi_sas_debug_I_T_nexus_reset(device); 1817 if (rc != TMF_RESP_FUNC_COMPLETE) 1818 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1819 SAS_ADDR(device->sas_addr), rc); 1820 } 1821 1822 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1823 { 1824 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1825 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1826 ASYNC_DOMAIN_EXCLUSIVE(async); 1827 int i; 1828 1829 queue_work(hisi_hba->wq, &r.work); 1830 wait_for_completion(r.completion); 1831 if (!r.done) 1832 return TMF_RESP_FUNC_FAILED; 1833 1834 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1835 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1836 struct domain_device *device = sas_dev->sas_device; 1837 1838 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1839 dev_is_expander(device->dev_type)) 1840 continue; 1841 1842 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1843 device, &async); 1844 } 1845 1846 async_synchronize_full_domain(&async); 1847 hisi_sas_release_tasks(hisi_hba); 1848 1849 return TMF_RESP_FUNC_COMPLETE; 1850 } 1851 1852 static int hisi_sas_query_task(struct sas_task *task) 1853 { 1854 int rc = TMF_RESP_FUNC_FAILED; 1855 1856 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1857 struct hisi_sas_slot *slot = task->lldd_task; 1858 u32 tag = slot->idx; 1859 1860 rc = sas_query_task(task, tag); 1861 switch (rc) { 1862 /* The task is still in Lun, release it then */ 1863 case TMF_RESP_FUNC_SUCC: 1864 /* The task is not in Lun or failed, reset the phy */ 1865 case TMF_RESP_FUNC_FAILED: 1866 case TMF_RESP_FUNC_COMPLETE: 1867 break; 1868 default: 1869 rc = TMF_RESP_FUNC_FAILED; 1870 break; 1871 } 1872 } 1873 return rc; 1874 } 1875 1876 static bool hisi_sas_internal_abort_timeout(struct sas_task *task, 1877 void *data) 1878 { 1879 struct domain_device *device = task->dev; 1880 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1881 struct hisi_sas_internal_abort_data *timeout = data; 1882 1883 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1884 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 1885 1886 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1887 pr_err("Internal abort: timeout %016llx\n", 1888 SAS_ADDR(device->sas_addr)); 1889 } else { 1890 struct hisi_sas_slot *slot = task->lldd_task; 1891 1892 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1893 1894 if (slot) { 1895 struct hisi_sas_cq *cq = 1896 &hisi_hba->cq[slot->dlvry_queue]; 1897 /* 1898 * sync irq to avoid free'ing task 1899 * before using task in IO completion 1900 */ 1901 synchronize_irq(cq->irq_no); 1902 slot->task = NULL; 1903 } 1904 1905 if (timeout->rst_ha_timeout) { 1906 pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n", 1907 SAS_ADDR(device->sas_addr)); 1908 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1909 } else { 1910 pr_err("Internal abort: timeout and not done %016llx.\n", 1911 SAS_ADDR(device->sas_addr)); 1912 } 1913 1914 return true; 1915 } 1916 1917 return false; 1918 } 1919 1920 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 1921 { 1922 hisi_sas_port_notify_formed(sas_phy); 1923 } 1924 1925 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 1926 u8 reg_index, u8 reg_count, u8 *write_data) 1927 { 1928 struct hisi_hba *hisi_hba = sha->lldd_ha; 1929 1930 if (!hisi_hba->hw->write_gpio) 1931 return -EOPNOTSUPP; 1932 1933 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 1934 reg_index, reg_count, write_data); 1935 } 1936 1937 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 1938 { 1939 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1940 struct sas_phy *sphy = sas_phy->phy; 1941 unsigned long flags; 1942 1943 phy->phy_attached = 0; 1944 phy->phy_type = 0; 1945 phy->port = NULL; 1946 1947 spin_lock_irqsave(&phy->lock, flags); 1948 if (phy->enable) 1949 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1950 else 1951 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1952 spin_unlock_irqrestore(&phy->lock, flags); 1953 } 1954 1955 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 1956 gfp_t gfp_flags) 1957 { 1958 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1959 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1960 struct device *dev = hisi_hba->dev; 1961 1962 if (rdy) { 1963 /* Phy down but ready */ 1964 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 1965 hisi_sas_port_notify_formed(sas_phy); 1966 } else { 1967 struct hisi_sas_port *port = phy->port; 1968 1969 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 1970 phy->in_reset) { 1971 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 1972 return; 1973 } 1974 /* Phy down and not ready */ 1975 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 1976 sas_phy_disconnected(sas_phy); 1977 1978 if (port) { 1979 if (phy->phy_type & PORT_TYPE_SAS) { 1980 int port_id = port->id; 1981 1982 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 1983 port_id)) 1984 port->port_attached = 0; 1985 } else if (phy->phy_type & PORT_TYPE_SATA) 1986 port->port_attached = 0; 1987 } 1988 hisi_sas_phy_disconnected(phy); 1989 } 1990 } 1991 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 1992 1993 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) 1994 { 1995 int i; 1996 1997 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 1998 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1999 2000 synchronize_irq(cq->irq_no); 2001 } 2002 } 2003 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); 2004 2005 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2006 { 2007 struct hisi_hba *hisi_hba = shost_priv(shost); 2008 2009 if (reset_type != SCSI_ADAPTER_RESET) 2010 return -EOPNOTSUPP; 2011 2012 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2013 2014 return 0; 2015 } 2016 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2017 2018 struct scsi_transport_template *hisi_sas_stt; 2019 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2020 2021 static struct sas_domain_function_template hisi_sas_transport_ops = { 2022 .lldd_dev_found = hisi_sas_dev_found, 2023 .lldd_dev_gone = hisi_sas_dev_gone, 2024 .lldd_execute_task = hisi_sas_queue_command, 2025 .lldd_control_phy = hisi_sas_control_phy, 2026 .lldd_abort_task = hisi_sas_abort_task, 2027 .lldd_abort_task_set = hisi_sas_abort_task_set, 2028 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2029 .lldd_lu_reset = hisi_sas_lu_reset, 2030 .lldd_query_task = hisi_sas_query_task, 2031 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2032 .lldd_port_formed = hisi_sas_port_formed, 2033 .lldd_write_gpio = hisi_sas_write_gpio, 2034 .lldd_tmf_aborted = hisi_sas_tmf_aborted, 2035 .lldd_abort_timeout = hisi_sas_internal_abort_timeout, 2036 }; 2037 2038 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2039 { 2040 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2041 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2042 2043 for (i = 0; i < hisi_hba->queue_count; i++) { 2044 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2045 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2046 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2047 2048 s = sizeof(struct hisi_sas_cmd_hdr); 2049 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2050 memset(&cmd_hdr[j], 0, s); 2051 2052 dq->wr_point = 0; 2053 2054 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2055 memset(hisi_hba->complete_hdr[i], 0, s); 2056 cq->rd_point = 0; 2057 } 2058 2059 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2060 memset(hisi_hba->initial_fis, 0, s); 2061 2062 s = max_command_entries * sizeof(struct hisi_sas_iost); 2063 memset(hisi_hba->iost, 0, s); 2064 2065 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2066 memset(hisi_hba->breakpoint, 0, s); 2067 2068 s = sizeof(struct hisi_sas_sata_breakpoint); 2069 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2070 memset(&sata_breakpoint[j], 0, s); 2071 } 2072 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2073 2074 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2075 { 2076 struct device *dev = hisi_hba->dev; 2077 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2078 int max_command_entries_ru, sz_slot_buf_ru; 2079 int blk_cnt, slots_per_blk; 2080 2081 sema_init(&hisi_hba->sem, 1); 2082 spin_lock_init(&hisi_hba->lock); 2083 for (i = 0; i < hisi_hba->n_phy; i++) { 2084 hisi_sas_phy_init(hisi_hba, i); 2085 hisi_hba->port[i].port_attached = 0; 2086 hisi_hba->port[i].id = -1; 2087 } 2088 2089 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2090 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2091 hisi_hba->devices[i].device_id = i; 2092 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2093 } 2094 2095 for (i = 0; i < hisi_hba->queue_count; i++) { 2096 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2097 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2098 2099 /* Completion queue structure */ 2100 cq->id = i; 2101 cq->hisi_hba = hisi_hba; 2102 2103 /* Delivery queue structure */ 2104 spin_lock_init(&dq->lock); 2105 INIT_LIST_HEAD(&dq->list); 2106 dq->id = i; 2107 dq->hisi_hba = hisi_hba; 2108 2109 /* Delivery queue */ 2110 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2111 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2112 &hisi_hba->cmd_hdr_dma[i], 2113 GFP_KERNEL); 2114 if (!hisi_hba->cmd_hdr[i]) 2115 goto err_out; 2116 2117 /* Completion queue */ 2118 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2119 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2120 &hisi_hba->complete_hdr_dma[i], 2121 GFP_KERNEL); 2122 if (!hisi_hba->complete_hdr[i]) 2123 goto err_out; 2124 } 2125 2126 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2127 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2128 GFP_KERNEL); 2129 if (!hisi_hba->itct) 2130 goto err_out; 2131 2132 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2133 sizeof(struct hisi_sas_slot), 2134 GFP_KERNEL); 2135 if (!hisi_hba->slot_info) 2136 goto err_out; 2137 2138 /* roundup to avoid overly large block size */ 2139 max_command_entries_ru = roundup(max_command_entries, 64); 2140 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2141 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2142 else 2143 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2144 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2145 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2146 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2147 slots_per_blk = s / sz_slot_buf_ru; 2148 2149 for (i = 0; i < blk_cnt; i++) { 2150 int slot_index = i * slots_per_blk; 2151 dma_addr_t buf_dma; 2152 void *buf; 2153 2154 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2155 GFP_KERNEL); 2156 if (!buf) 2157 goto err_out; 2158 2159 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2160 struct hisi_sas_slot *slot; 2161 2162 slot = &hisi_hba->slot_info[slot_index]; 2163 slot->buf = buf; 2164 slot->buf_dma = buf_dma; 2165 slot->idx = slot_index; 2166 2167 buf += sz_slot_buf_ru; 2168 buf_dma += sz_slot_buf_ru; 2169 } 2170 } 2171 2172 s = max_command_entries * sizeof(struct hisi_sas_iost); 2173 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2174 GFP_KERNEL); 2175 if (!hisi_hba->iost) 2176 goto err_out; 2177 2178 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2179 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2180 &hisi_hba->breakpoint_dma, 2181 GFP_KERNEL); 2182 if (!hisi_hba->breakpoint) 2183 goto err_out; 2184 2185 s = hisi_hba->slot_index_count = max_command_entries; 2186 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); 2187 if (!hisi_hba->slot_index_tags) 2188 goto err_out; 2189 2190 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2191 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2192 &hisi_hba->initial_fis_dma, 2193 GFP_KERNEL); 2194 if (!hisi_hba->initial_fis) 2195 goto err_out; 2196 2197 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2198 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2199 &hisi_hba->sata_breakpoint_dma, 2200 GFP_KERNEL); 2201 if (!hisi_hba->sata_breakpoint) 2202 goto err_out; 2203 2204 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; 2205 2206 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2207 if (!hisi_hba->wq) { 2208 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2209 goto err_out; 2210 } 2211 2212 return 0; 2213 err_out: 2214 return -ENOMEM; 2215 } 2216 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2217 2218 void hisi_sas_free(struct hisi_hba *hisi_hba) 2219 { 2220 int i; 2221 2222 for (i = 0; i < hisi_hba->n_phy; i++) { 2223 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2224 2225 del_timer_sync(&phy->timer); 2226 } 2227 2228 if (hisi_hba->wq) 2229 destroy_workqueue(hisi_hba->wq); 2230 } 2231 EXPORT_SYMBOL_GPL(hisi_sas_free); 2232 2233 void hisi_sas_rst_work_handler(struct work_struct *work) 2234 { 2235 struct hisi_hba *hisi_hba = 2236 container_of(work, struct hisi_hba, rst_work); 2237 2238 if (hisi_sas_controller_prereset(hisi_hba)) 2239 return; 2240 2241 hisi_sas_controller_reset(hisi_hba); 2242 } 2243 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2244 2245 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2246 { 2247 struct hisi_sas_rst *rst = 2248 container_of(work, struct hisi_sas_rst, work); 2249 2250 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2251 goto rst_complete; 2252 2253 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2254 rst->done = true; 2255 rst_complete: 2256 complete(rst->completion); 2257 } 2258 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2259 2260 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2261 { 2262 struct device *dev = hisi_hba->dev; 2263 struct platform_device *pdev = hisi_hba->platform_dev; 2264 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2265 struct clk *refclk; 2266 2267 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2268 SAS_ADDR_SIZE)) { 2269 dev_err(dev, "could not get property sas-addr\n"); 2270 return -ENOENT; 2271 } 2272 2273 if (np) { 2274 /* 2275 * These properties are only required for platform device-based 2276 * controller with DT firmware. 2277 */ 2278 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2279 "hisilicon,sas-syscon"); 2280 if (IS_ERR(hisi_hba->ctrl)) { 2281 dev_err(dev, "could not get syscon\n"); 2282 return -ENOENT; 2283 } 2284 2285 if (device_property_read_u32(dev, "ctrl-reset-reg", 2286 &hisi_hba->ctrl_reset_reg)) { 2287 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2288 return -ENOENT; 2289 } 2290 2291 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2292 &hisi_hba->ctrl_reset_sts_reg)) { 2293 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2294 return -ENOENT; 2295 } 2296 2297 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2298 &hisi_hba->ctrl_clock_ena_reg)) { 2299 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2300 return -ENOENT; 2301 } 2302 } 2303 2304 refclk = devm_clk_get(dev, NULL); 2305 if (IS_ERR(refclk)) 2306 dev_dbg(dev, "no ref clk property\n"); 2307 else 2308 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2309 2310 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2311 dev_err(dev, "could not get property phy-count\n"); 2312 return -ENOENT; 2313 } 2314 2315 if (device_property_read_u32(dev, "queue-count", 2316 &hisi_hba->queue_count)) { 2317 dev_err(dev, "could not get property queue-count\n"); 2318 return -ENOENT; 2319 } 2320 2321 return 0; 2322 } 2323 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2324 2325 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2326 const struct hisi_sas_hw *hw) 2327 { 2328 struct resource *res; 2329 struct Scsi_Host *shost; 2330 struct hisi_hba *hisi_hba; 2331 struct device *dev = &pdev->dev; 2332 int error; 2333 2334 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2335 if (!shost) { 2336 dev_err(dev, "scsi host alloc failed\n"); 2337 return NULL; 2338 } 2339 hisi_hba = shost_priv(shost); 2340 2341 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2342 hisi_hba->hw = hw; 2343 hisi_hba->dev = dev; 2344 hisi_hba->platform_dev = pdev; 2345 hisi_hba->shost = shost; 2346 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2347 2348 timer_setup(&hisi_hba->timer, NULL, 0); 2349 2350 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2351 goto err_out; 2352 2353 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2354 if (error) { 2355 dev_err(dev, "No usable DMA addressing method\n"); 2356 goto err_out; 2357 } 2358 2359 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2360 if (IS_ERR(hisi_hba->regs)) 2361 goto err_out; 2362 2363 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2364 if (res) { 2365 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2366 if (IS_ERR(hisi_hba->sgpio_regs)) 2367 goto err_out; 2368 } 2369 2370 if (hisi_sas_alloc(hisi_hba)) { 2371 hisi_sas_free(hisi_hba); 2372 goto err_out; 2373 } 2374 2375 return shost; 2376 err_out: 2377 scsi_host_put(shost); 2378 dev_err(dev, "shost alloc failed\n"); 2379 return NULL; 2380 } 2381 2382 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2383 { 2384 if (hisi_hba->hw->interrupt_preinit) 2385 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2386 return 0; 2387 } 2388 2389 int hisi_sas_probe(struct platform_device *pdev, 2390 const struct hisi_sas_hw *hw) 2391 { 2392 struct Scsi_Host *shost; 2393 struct hisi_hba *hisi_hba; 2394 struct device *dev = &pdev->dev; 2395 struct asd_sas_phy **arr_phy; 2396 struct asd_sas_port **arr_port; 2397 struct sas_ha_struct *sha; 2398 int rc, phy_nr, port_nr, i; 2399 2400 shost = hisi_sas_shost_alloc(pdev, hw); 2401 if (!shost) 2402 return -ENOMEM; 2403 2404 sha = SHOST_TO_SAS_HA(shost); 2405 hisi_hba = shost_priv(shost); 2406 platform_set_drvdata(pdev, sha); 2407 2408 phy_nr = port_nr = hisi_hba->n_phy; 2409 2410 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2411 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2412 if (!arr_phy || !arr_port) { 2413 rc = -ENOMEM; 2414 goto err_out_ha; 2415 } 2416 2417 sha->sas_phy = arr_phy; 2418 sha->sas_port = arr_port; 2419 sha->lldd_ha = hisi_hba; 2420 2421 shost->transportt = hisi_sas_stt; 2422 shost->max_id = HISI_SAS_MAX_DEVICES; 2423 shost->max_lun = ~0; 2424 shost->max_channel = 1; 2425 shost->max_cmd_len = 16; 2426 if (hisi_hba->hw->slot_index_alloc) { 2427 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2428 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2429 } else { 2430 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2431 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2432 } 2433 2434 sha->sas_ha_name = DRV_NAME; 2435 sha->dev = hisi_hba->dev; 2436 sha->lldd_module = THIS_MODULE; 2437 sha->sas_addr = &hisi_hba->sas_addr[0]; 2438 sha->num_phys = hisi_hba->n_phy; 2439 sha->core.shost = hisi_hba->shost; 2440 2441 for (i = 0; i < hisi_hba->n_phy; i++) { 2442 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2443 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2444 } 2445 2446 rc = hisi_sas_interrupt_preinit(hisi_hba); 2447 if (rc) 2448 goto err_out_ha; 2449 2450 rc = scsi_add_host(shost, &pdev->dev); 2451 if (rc) 2452 goto err_out_ha; 2453 2454 rc = sas_register_ha(sha); 2455 if (rc) 2456 goto err_out_register_ha; 2457 2458 rc = hisi_hba->hw->hw_init(hisi_hba); 2459 if (rc) 2460 goto err_out_hw_init; 2461 2462 scsi_scan_host(shost); 2463 2464 return 0; 2465 2466 err_out_hw_init: 2467 sas_unregister_ha(sha); 2468 err_out_register_ha: 2469 scsi_remove_host(shost); 2470 err_out_ha: 2471 hisi_sas_free(hisi_hba); 2472 scsi_host_put(shost); 2473 return rc; 2474 } 2475 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2476 2477 int hisi_sas_remove(struct platform_device *pdev) 2478 { 2479 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2480 struct hisi_hba *hisi_hba = sha->lldd_ha; 2481 struct Scsi_Host *shost = sha->core.shost; 2482 2483 del_timer_sync(&hisi_hba->timer); 2484 2485 sas_unregister_ha(sha); 2486 sas_remove_host(sha->core.shost); 2487 2488 hisi_sas_free(hisi_hba); 2489 scsi_host_put(shost); 2490 return 0; 2491 } 2492 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2493 2494 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2495 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2496 bool hisi_sas_debugfs_enable = true; 2497 u32 hisi_sas_debugfs_dump_count = 50; 2498 #else 2499 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2500 bool hisi_sas_debugfs_enable; 2501 u32 hisi_sas_debugfs_dump_count = 1; 2502 #endif 2503 2504 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2505 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2506 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2507 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2508 2509 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2510 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2511 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2512 2513 struct dentry *hisi_sas_debugfs_dir; 2514 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2515 2516 static __init int hisi_sas_init(void) 2517 { 2518 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2519 if (!hisi_sas_stt) 2520 return -ENOMEM; 2521 2522 if (hisi_sas_debugfs_enable) { 2523 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2524 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2525 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2526 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2527 } 2528 } 2529 2530 return 0; 2531 } 2532 2533 static __exit void hisi_sas_exit(void) 2534 { 2535 sas_release_transport(hisi_sas_stt); 2536 2537 debugfs_remove(hisi_sas_debugfs_dir); 2538 } 2539 2540 module_init(hisi_sas_init); 2541 module_exit(hisi_sas_exit); 2542 2543 MODULE_LICENSE("GPL"); 2544 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2545 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2546 MODULE_ALIAS("platform:" DRV_NAME); 2547