1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 14 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 15 void *funcdata); 16 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 17 struct domain_device *device); 18 static void hisi_sas_dev_gone(struct domain_device *device); 19 20 struct hisi_sas_internal_abort_data { 21 bool rst_ha_timeout; /* reset the HA for timeout */ 22 }; 23 24 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 25 { 26 switch (fis->command) { 27 case ATA_CMD_FPDMA_WRITE: 28 case ATA_CMD_FPDMA_READ: 29 case ATA_CMD_FPDMA_RECV: 30 case ATA_CMD_FPDMA_SEND: 31 case ATA_CMD_NCQ_NON_DATA: 32 return HISI_SAS_SATA_PROTOCOL_FPDMA; 33 34 case ATA_CMD_DOWNLOAD_MICRO: 35 case ATA_CMD_ID_ATA: 36 case ATA_CMD_PMP_READ: 37 case ATA_CMD_READ_LOG_EXT: 38 case ATA_CMD_PIO_READ: 39 case ATA_CMD_PIO_READ_EXT: 40 case ATA_CMD_PMP_WRITE: 41 case ATA_CMD_WRITE_LOG_EXT: 42 case ATA_CMD_PIO_WRITE: 43 case ATA_CMD_PIO_WRITE_EXT: 44 return HISI_SAS_SATA_PROTOCOL_PIO; 45 46 case ATA_CMD_DSM: 47 case ATA_CMD_DOWNLOAD_MICRO_DMA: 48 case ATA_CMD_PMP_READ_DMA: 49 case ATA_CMD_PMP_WRITE_DMA: 50 case ATA_CMD_READ: 51 case ATA_CMD_READ_EXT: 52 case ATA_CMD_READ_LOG_DMA_EXT: 53 case ATA_CMD_READ_STREAM_DMA_EXT: 54 case ATA_CMD_TRUSTED_RCV_DMA: 55 case ATA_CMD_TRUSTED_SND_DMA: 56 case ATA_CMD_WRITE: 57 case ATA_CMD_WRITE_EXT: 58 case ATA_CMD_WRITE_FUA_EXT: 59 case ATA_CMD_WRITE_QUEUED: 60 case ATA_CMD_WRITE_LOG_DMA_EXT: 61 case ATA_CMD_WRITE_STREAM_DMA_EXT: 62 case ATA_CMD_ZAC_MGMT_IN: 63 return HISI_SAS_SATA_PROTOCOL_DMA; 64 65 case ATA_CMD_CHK_POWER: 66 case ATA_CMD_DEV_RESET: 67 case ATA_CMD_EDD: 68 case ATA_CMD_FLUSH: 69 case ATA_CMD_FLUSH_EXT: 70 case ATA_CMD_VERIFY: 71 case ATA_CMD_VERIFY_EXT: 72 case ATA_CMD_SET_FEATURES: 73 case ATA_CMD_STANDBY: 74 case ATA_CMD_STANDBYNOW1: 75 case ATA_CMD_ZAC_MGMT_OUT: 76 return HISI_SAS_SATA_PROTOCOL_NONDATA; 77 78 case ATA_CMD_SET_MAX: 79 switch (fis->features) { 80 case ATA_SET_MAX_PASSWD: 81 case ATA_SET_MAX_LOCK: 82 return HISI_SAS_SATA_PROTOCOL_PIO; 83 84 case ATA_SET_MAX_PASSWD_DMA: 85 case ATA_SET_MAX_UNLOCK_DMA: 86 return HISI_SAS_SATA_PROTOCOL_DMA; 87 88 default: 89 return HISI_SAS_SATA_PROTOCOL_NONDATA; 90 } 91 92 default: 93 { 94 if (direction == DMA_NONE) 95 return HISI_SAS_SATA_PROTOCOL_NONDATA; 96 return HISI_SAS_SATA_PROTOCOL_PIO; 97 } 98 } 99 } 100 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 101 102 void hisi_sas_sata_done(struct sas_task *task, 103 struct hisi_sas_slot *slot) 104 { 105 struct task_status_struct *ts = &task->task_status; 106 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 107 struct hisi_sas_status_buffer *status_buf = 108 hisi_sas_status_buf_addr_mem(slot); 109 u8 *iu = &status_buf->iu[0]; 110 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 111 112 resp->frame_len = sizeof(struct dev_to_host_fis); 113 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 114 115 ts->buf_valid_size = sizeof(*resp); 116 } 117 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 118 119 /* 120 * This function assumes linkrate mask fits in 8 bits, which it 121 * does for all HW versions supported. 122 */ 123 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 124 { 125 u8 rate = 0; 126 int i; 127 128 max -= SAS_LINK_RATE_1_5_GBPS; 129 for (i = 0; i <= max; i++) 130 rate |= 1 << (i * 2); 131 return rate; 132 } 133 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 134 135 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 136 { 137 return device->port->ha->lldd_ha; 138 } 139 140 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 141 { 142 return container_of(sas_port, struct hisi_sas_port, sas_port); 143 } 144 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 145 146 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 147 { 148 int phy_no; 149 150 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 151 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 152 } 153 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 154 155 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 156 { 157 void *bitmap = hisi_hba->slot_index_tags; 158 159 __clear_bit(slot_idx, bitmap); 160 } 161 162 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 163 { 164 if (hisi_hba->hw->slot_index_alloc || 165 slot_idx < HISI_SAS_RESERVED_IPTT) { 166 spin_lock(&hisi_hba->lock); 167 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 168 spin_unlock(&hisi_hba->lock); 169 } 170 } 171 172 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 173 { 174 void *bitmap = hisi_hba->slot_index_tags; 175 176 __set_bit(slot_idx, bitmap); 177 } 178 179 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 180 struct request *rq) 181 { 182 int index; 183 void *bitmap = hisi_hba->slot_index_tags; 184 185 if (rq) 186 return rq->tag + HISI_SAS_RESERVED_IPTT; 187 188 spin_lock(&hisi_hba->lock); 189 index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT, 190 hisi_hba->last_slot_index + 1); 191 if (index >= HISI_SAS_RESERVED_IPTT) { 192 index = find_next_zero_bit(bitmap, 193 HISI_SAS_RESERVED_IPTT, 194 0); 195 if (index >= HISI_SAS_RESERVED_IPTT) { 196 spin_unlock(&hisi_hba->lock); 197 return -SAS_QUEUE_FULL; 198 } 199 } 200 hisi_sas_slot_index_set(hisi_hba, index); 201 hisi_hba->last_slot_index = index; 202 spin_unlock(&hisi_hba->lock); 203 204 return index; 205 } 206 207 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 208 struct hisi_sas_slot *slot) 209 { 210 int device_id = slot->device_id; 211 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 212 213 if (task) { 214 struct device *dev = hisi_hba->dev; 215 216 if (!task->lldd_task) 217 return; 218 219 task->lldd_task = NULL; 220 221 if (!sas_protocol_ata(task->task_proto)) { 222 if (slot->n_elem) { 223 if (task->task_proto & SAS_PROTOCOL_SSP) 224 dma_unmap_sg(dev, task->scatter, 225 task->num_scatter, 226 task->data_dir); 227 else 228 dma_unmap_sg(dev, &task->smp_task.smp_req, 229 1, DMA_TO_DEVICE); 230 } 231 if (slot->n_elem_dif) { 232 struct sas_ssp_task *ssp_task = &task->ssp_task; 233 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 234 235 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 236 scsi_prot_sg_count(scsi_cmnd), 237 task->data_dir); 238 } 239 } 240 } 241 242 spin_lock(&sas_dev->lock); 243 list_del_init(&slot->entry); 244 spin_unlock(&sas_dev->lock); 245 246 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 247 248 hisi_sas_slot_index_free(hisi_hba, slot->idx); 249 } 250 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 251 252 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 253 struct hisi_sas_slot *slot) 254 { 255 hisi_hba->hw->prep_smp(hisi_hba, slot); 256 } 257 258 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 259 struct hisi_sas_slot *slot) 260 { 261 hisi_hba->hw->prep_ssp(hisi_hba, slot); 262 } 263 264 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 265 struct hisi_sas_slot *slot) 266 { 267 hisi_hba->hw->prep_stp(hisi_hba, slot); 268 } 269 270 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 271 struct hisi_sas_slot *slot) 272 { 273 hisi_hba->hw->prep_abort(hisi_hba, slot); 274 } 275 276 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 277 struct sas_task *task, int n_elem) 278 { 279 struct device *dev = hisi_hba->dev; 280 281 if (!sas_protocol_ata(task->task_proto) && n_elem) { 282 if (task->num_scatter) { 283 dma_unmap_sg(dev, task->scatter, task->num_scatter, 284 task->data_dir); 285 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 286 dma_unmap_sg(dev, &task->smp_task.smp_req, 287 1, DMA_TO_DEVICE); 288 } 289 } 290 } 291 292 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 293 struct sas_task *task, int *n_elem) 294 { 295 struct device *dev = hisi_hba->dev; 296 int rc; 297 298 if (sas_protocol_ata(task->task_proto)) { 299 *n_elem = task->num_scatter; 300 } else { 301 unsigned int req_len; 302 303 if (task->num_scatter) { 304 *n_elem = dma_map_sg(dev, task->scatter, 305 task->num_scatter, task->data_dir); 306 if (!*n_elem) { 307 rc = -ENOMEM; 308 goto prep_out; 309 } 310 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 311 *n_elem = dma_map_sg(dev, &task->smp_task.smp_req, 312 1, DMA_TO_DEVICE); 313 if (!*n_elem) { 314 rc = -ENOMEM; 315 goto prep_out; 316 } 317 req_len = sg_dma_len(&task->smp_task.smp_req); 318 if (req_len & 0x3) { 319 rc = -EINVAL; 320 goto err_out_dma_unmap; 321 } 322 } 323 } 324 325 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 326 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 327 *n_elem); 328 rc = -EINVAL; 329 goto err_out_dma_unmap; 330 } 331 return 0; 332 333 err_out_dma_unmap: 334 /* It would be better to call dma_unmap_sg() here, but it's messy */ 335 hisi_sas_dma_unmap(hisi_hba, task, *n_elem); 336 prep_out: 337 return rc; 338 } 339 340 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 341 struct sas_task *task, int n_elem_dif) 342 { 343 struct device *dev = hisi_hba->dev; 344 345 if (n_elem_dif) { 346 struct sas_ssp_task *ssp_task = &task->ssp_task; 347 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 348 349 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 350 scsi_prot_sg_count(scsi_cmnd), 351 task->data_dir); 352 } 353 } 354 355 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 356 int *n_elem_dif, struct sas_task *task) 357 { 358 struct device *dev = hisi_hba->dev; 359 struct sas_ssp_task *ssp_task; 360 struct scsi_cmnd *scsi_cmnd; 361 int rc; 362 363 if (task->num_scatter) { 364 ssp_task = &task->ssp_task; 365 scsi_cmnd = ssp_task->cmd; 366 367 if (scsi_prot_sg_count(scsi_cmnd)) { 368 *n_elem_dif = dma_map_sg(dev, 369 scsi_prot_sglist(scsi_cmnd), 370 scsi_prot_sg_count(scsi_cmnd), 371 task->data_dir); 372 373 if (!*n_elem_dif) 374 return -ENOMEM; 375 376 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 377 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 378 *n_elem_dif); 379 rc = -EINVAL; 380 goto err_out_dif_dma_unmap; 381 } 382 } 383 } 384 385 return 0; 386 387 err_out_dif_dma_unmap: 388 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 389 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 390 return rc; 391 } 392 393 static 394 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, 395 struct hisi_sas_slot *slot, 396 struct hisi_sas_dq *dq, 397 struct hisi_sas_device *sas_dev) 398 { 399 struct hisi_sas_cmd_hdr *cmd_hdr_base; 400 int dlvry_queue_slot, dlvry_queue; 401 struct sas_task *task = slot->task; 402 int wr_q_index; 403 404 spin_lock(&dq->lock); 405 wr_q_index = dq->wr_point; 406 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 407 list_add_tail(&slot->delivery, &dq->list); 408 spin_unlock(&dq->lock); 409 spin_lock(&sas_dev->lock); 410 list_add_tail(&slot->entry, &sas_dev->list); 411 spin_unlock(&sas_dev->lock); 412 413 dlvry_queue = dq->id; 414 dlvry_queue_slot = wr_q_index; 415 416 slot->device_id = sas_dev->device_id; 417 slot->dlvry_queue = dlvry_queue; 418 slot->dlvry_queue_slot = dlvry_queue_slot; 419 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 420 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 421 422 task->lldd_task = slot; 423 424 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 425 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 426 memset(hisi_sas_status_buf_addr_mem(slot), 0, 427 sizeof(struct hisi_sas_err_record)); 428 429 switch (task->task_proto) { 430 case SAS_PROTOCOL_SMP: 431 hisi_sas_task_prep_smp(hisi_hba, slot); 432 break; 433 case SAS_PROTOCOL_SSP: 434 hisi_sas_task_prep_ssp(hisi_hba, slot); 435 break; 436 case SAS_PROTOCOL_SATA: 437 case SAS_PROTOCOL_STP: 438 case SAS_PROTOCOL_STP_ALL: 439 hisi_sas_task_prep_ata(hisi_hba, slot); 440 break; 441 case SAS_PROTOCOL_INTERNAL_ABORT: 442 hisi_sas_task_prep_abort(hisi_hba, slot); 443 break; 444 default: 445 return; 446 } 447 448 /* Make slot memories observable before marking as ready */ 449 smp_wmb(); 450 WRITE_ONCE(slot->ready, 1); 451 452 spin_lock(&dq->lock); 453 hisi_hba->hw->start_delivery(dq); 454 spin_unlock(&dq->lock); 455 } 456 457 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 458 { 459 int n_elem = 0, n_elem_dif = 0; 460 struct domain_device *device = task->dev; 461 struct asd_sas_port *sas_port = device->port; 462 struct hisi_sas_device *sas_dev = device->lldd_dev; 463 bool internal_abort = sas_is_internal_abort(task); 464 struct hisi_sas_dq *dq = NULL; 465 struct hisi_sas_port *port; 466 struct hisi_hba *hisi_hba; 467 struct hisi_sas_slot *slot; 468 struct request *rq = NULL; 469 struct device *dev; 470 int rc; 471 472 if (!sas_port) { 473 struct task_status_struct *ts = &task->task_status; 474 475 ts->resp = SAS_TASK_UNDELIVERED; 476 ts->stat = SAS_PHY_DOWN; 477 /* 478 * libsas will use dev->port, should 479 * not call task_done for sata 480 */ 481 if (device->dev_type != SAS_SATA_DEV && !internal_abort) 482 task->task_done(task); 483 return -ECOMM; 484 } 485 486 hisi_hba = dev_to_hisi_hba(device); 487 dev = hisi_hba->dev; 488 489 switch (task->task_proto) { 490 case SAS_PROTOCOL_SSP: 491 case SAS_PROTOCOL_SMP: 492 case SAS_PROTOCOL_SATA: 493 case SAS_PROTOCOL_STP: 494 case SAS_PROTOCOL_STP_ALL: 495 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 496 if (!gfpflags_allow_blocking(gfp_flags)) 497 return -EINVAL; 498 499 down(&hisi_hba->sem); 500 up(&hisi_hba->sem); 501 } 502 503 if (DEV_IS_GONE(sas_dev)) { 504 if (sas_dev) 505 dev_info(dev, "task prep: device %d not ready\n", 506 sas_dev->device_id); 507 else 508 dev_info(dev, "task prep: device %016llx not ready\n", 509 SAS_ADDR(device->sas_addr)); 510 511 return -ECOMM; 512 } 513 514 port = to_hisi_sas_port(sas_port); 515 if (!port->port_attached) { 516 dev_info(dev, "task prep: %s port%d not attach device\n", 517 dev_is_sata(device) ? "SATA/STP" : "SAS", 518 device->port->id); 519 520 return -ECOMM; 521 } 522 523 rq = sas_task_find_rq(task); 524 if (rq) { 525 unsigned int dq_index; 526 u32 blk_tag; 527 528 blk_tag = blk_mq_unique_tag(rq); 529 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 530 dq = &hisi_hba->dq[dq_index]; 531 } else { 532 struct Scsi_Host *shost = hisi_hba->shost; 533 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 534 int queue = qmap->mq_map[raw_smp_processor_id()]; 535 536 dq = &hisi_hba->dq[queue]; 537 } 538 break; 539 case SAS_PROTOCOL_INTERNAL_ABORT: 540 if (!hisi_hba->hw->prep_abort) 541 return TMF_RESP_FUNC_FAILED; 542 543 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 544 return -EIO; 545 546 hisi_hba = dev_to_hisi_hba(device); 547 548 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 549 return -EINVAL; 550 551 port = to_hisi_sas_port(sas_port); 552 dq = &hisi_hba->dq[task->abort_task.qid]; 553 break; 554 default: 555 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", 556 task->task_proto); 557 return -EINVAL; 558 } 559 560 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem); 561 if (rc < 0) 562 goto prep_out; 563 564 if (!sas_protocol_ata(task->task_proto)) { 565 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 566 if (rc < 0) 567 goto err_out_dma_unmap; 568 } 569 570 if (!internal_abort && hisi_hba->hw->slot_index_alloc) 571 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 572 else 573 rc = hisi_sas_slot_index_alloc(hisi_hba, rq); 574 575 if (rc < 0) 576 goto err_out_dif_dma_unmap; 577 578 slot = &hisi_hba->slot_info[rc]; 579 slot->n_elem = n_elem; 580 slot->n_elem_dif = n_elem_dif; 581 slot->task = task; 582 slot->port = port; 583 584 slot->tmf = task->tmf; 585 slot->is_internal = !!task->tmf || internal_abort; 586 587 /* protect task_prep and start_delivery sequence */ 588 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev); 589 590 return 0; 591 592 err_out_dif_dma_unmap: 593 if (!sas_protocol_ata(task->task_proto)) 594 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 595 err_out_dma_unmap: 596 hisi_sas_dma_unmap(hisi_hba, task, n_elem); 597 prep_out: 598 dev_err(dev, "task exec: failed[%d]!\n", rc); 599 return rc; 600 } 601 602 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 603 gfp_t gfp_flags) 604 { 605 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 606 struct asd_sas_phy *sas_phy = &phy->sas_phy; 607 608 if (!phy->phy_attached) 609 return; 610 611 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 612 613 if (sas_phy->phy) { 614 struct sas_phy *sphy = sas_phy->phy; 615 616 sphy->negotiated_linkrate = sas_phy->linkrate; 617 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 618 sphy->maximum_linkrate_hw = 619 hisi_hba->hw->phy_get_max_linkrate(); 620 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 621 sphy->minimum_linkrate = phy->minimum_linkrate; 622 623 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 624 sphy->maximum_linkrate = phy->maximum_linkrate; 625 } 626 627 if (phy->phy_type & PORT_TYPE_SAS) { 628 struct sas_identify_frame *id; 629 630 id = (struct sas_identify_frame *)phy->frame_rcvd; 631 id->dev_type = phy->identify.device_type; 632 id->initiator_bits = SAS_PROTOCOL_ALL; 633 id->target_bits = phy->identify.target_port_protocols; 634 } else if (phy->phy_type & PORT_TYPE_SATA) { 635 /* Nothing */ 636 } 637 638 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 639 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 640 } 641 642 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 643 { 644 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 645 struct hisi_sas_device *sas_dev = NULL; 646 int last = hisi_hba->last_dev_id; 647 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 648 int i; 649 650 spin_lock(&hisi_hba->lock); 651 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 652 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 653 int queue = i % hisi_hba->queue_count; 654 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 655 656 hisi_hba->devices[i].device_id = i; 657 sas_dev = &hisi_hba->devices[i]; 658 sas_dev->dev_status = HISI_SAS_DEV_INIT; 659 sas_dev->dev_type = device->dev_type; 660 sas_dev->hisi_hba = hisi_hba; 661 sas_dev->sas_device = device; 662 sas_dev->dq = dq; 663 spin_lock_init(&sas_dev->lock); 664 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 665 break; 666 } 667 i++; 668 } 669 hisi_hba->last_dev_id = i; 670 spin_unlock(&hisi_hba->lock); 671 672 return sas_dev; 673 } 674 675 static void hisi_sas_tmf_aborted(struct sas_task *task) 676 { 677 struct hisi_sas_slot *slot = task->lldd_task; 678 struct domain_device *device = task->dev; 679 struct hisi_sas_device *sas_dev = device->lldd_dev; 680 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 681 682 if (slot) { 683 struct hisi_sas_cq *cq = 684 &hisi_hba->cq[slot->dlvry_queue]; 685 /* 686 * sync irq to avoid free'ing task 687 * before using task in IO completion 688 */ 689 synchronize_irq(cq->irq_no); 690 slot->task = NULL; 691 } 692 } 693 694 #define HISI_SAS_DISK_RECOVER_CNT 3 695 static int hisi_sas_init_device(struct domain_device *device) 696 { 697 int rc = TMF_RESP_FUNC_COMPLETE; 698 struct scsi_lun lun; 699 int retry = HISI_SAS_DISK_RECOVER_CNT; 700 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 701 702 switch (device->dev_type) { 703 case SAS_END_DEVICE: 704 int_to_scsilun(0, &lun); 705 706 while (retry-- > 0) { 707 rc = sas_abort_task_set(device, lun.scsi_lun); 708 if (rc == TMF_RESP_FUNC_COMPLETE) { 709 hisi_sas_release_task(hisi_hba, device); 710 break; 711 } 712 } 713 break; 714 case SAS_SATA_DEV: 715 case SAS_SATA_PM: 716 case SAS_SATA_PM_PORT: 717 case SAS_SATA_PENDING: 718 /* 719 * If an expander is swapped when a SATA disk is attached then 720 * we should issue a hard reset to clear previous affiliation 721 * of STP target port, see SPL (chapter 6.19.4). 722 * 723 * However we don't need to issue a hard reset here for these 724 * reasons: 725 * a. When probing the device, libsas/libata already issues a 726 * hard reset in sas_probe_sata() -> ata_sas_async_probe(). 727 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care 728 * to issue a hard reset by checking the dev status (== INIT). 729 * b. When resetting the controller, this is simply unnecessary. 730 */ 731 while (retry-- > 0) { 732 rc = hisi_sas_softreset_ata_disk(device); 733 if (!rc) 734 break; 735 } 736 break; 737 default: 738 break; 739 } 740 741 return rc; 742 } 743 744 int hisi_sas_slave_alloc(struct scsi_device *sdev) 745 { 746 struct domain_device *ddev = sdev_to_domain_dev(sdev); 747 struct hisi_sas_device *sas_dev = ddev->lldd_dev; 748 int rc; 749 750 rc = sas_slave_alloc(sdev); 751 if (rc) 752 return rc; 753 754 rc = hisi_sas_init_device(ddev); 755 if (rc) 756 return rc; 757 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 758 return 0; 759 } 760 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); 761 762 static int hisi_sas_dev_found(struct domain_device *device) 763 { 764 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 765 struct domain_device *parent_dev = device->parent; 766 struct hisi_sas_device *sas_dev; 767 struct device *dev = hisi_hba->dev; 768 int rc; 769 770 if (hisi_hba->hw->alloc_dev) 771 sas_dev = hisi_hba->hw->alloc_dev(device); 772 else 773 sas_dev = hisi_sas_alloc_dev(device); 774 if (!sas_dev) { 775 dev_err(dev, "fail alloc dev: max support %d devices\n", 776 HISI_SAS_MAX_DEVICES); 777 return -EINVAL; 778 } 779 780 device->lldd_dev = sas_dev; 781 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 782 783 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 784 int phy_no; 785 786 phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device); 787 if (phy_no < 0) { 788 dev_info(dev, "dev found: no attached " 789 "dev:%016llx at ex:%016llx\n", 790 SAS_ADDR(device->sas_addr), 791 SAS_ADDR(parent_dev->sas_addr)); 792 rc = phy_no; 793 goto err_out; 794 } 795 } 796 797 dev_info(dev, "dev[%d:%x] found\n", 798 sas_dev->device_id, sas_dev->dev_type); 799 800 return 0; 801 802 err_out: 803 hisi_sas_dev_gone(device); 804 return rc; 805 } 806 807 int hisi_sas_slave_configure(struct scsi_device *sdev) 808 { 809 struct domain_device *dev = sdev_to_domain_dev(sdev); 810 int ret = sas_slave_configure(sdev); 811 812 if (ret) 813 return ret; 814 if (!dev_is_sata(dev)) 815 sas_change_queue_depth(sdev, 64); 816 817 return 0; 818 } 819 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 820 821 void hisi_sas_scan_start(struct Scsi_Host *shost) 822 { 823 struct hisi_hba *hisi_hba = shost_priv(shost); 824 825 hisi_hba->hw->phys_init(hisi_hba); 826 } 827 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 828 829 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 830 { 831 struct hisi_hba *hisi_hba = shost_priv(shost); 832 struct sas_ha_struct *sha = &hisi_hba->sha; 833 834 /* Wait for PHY up interrupt to occur */ 835 if (time < HZ) 836 return 0; 837 838 sas_drain_work(sha); 839 return 1; 840 } 841 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 842 843 static void hisi_sas_phyup_work_common(struct work_struct *work, 844 enum hisi_sas_phy_event event) 845 { 846 struct hisi_sas_phy *phy = 847 container_of(work, typeof(*phy), works[event]); 848 struct hisi_hba *hisi_hba = phy->hisi_hba; 849 struct asd_sas_phy *sas_phy = &phy->sas_phy; 850 int phy_no = sas_phy->id; 851 852 phy->wait_phyup_cnt = 0; 853 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 854 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 855 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 856 } 857 858 static void hisi_sas_phyup_work(struct work_struct *work) 859 { 860 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); 861 } 862 863 static void hisi_sas_linkreset_work(struct work_struct *work) 864 { 865 struct hisi_sas_phy *phy = 866 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 867 struct asd_sas_phy *sas_phy = &phy->sas_phy; 868 869 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 870 } 871 872 static void hisi_sas_phyup_pm_work(struct work_struct *work) 873 { 874 struct hisi_sas_phy *phy = 875 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); 876 struct hisi_hba *hisi_hba = phy->hisi_hba; 877 struct device *dev = hisi_hba->dev; 878 879 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); 880 pm_runtime_put_sync(dev); 881 } 882 883 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 884 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 885 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 886 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, 887 }; 888 889 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 890 enum hisi_sas_phy_event event) 891 { 892 struct hisi_hba *hisi_hba = phy->hisi_hba; 893 894 if (WARN_ON(event >= HISI_PHYES_NUM)) 895 return false; 896 897 return queue_work(hisi_hba->wq, &phy->works[event]); 898 } 899 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 900 901 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 902 { 903 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 904 struct hisi_hba *hisi_hba = phy->hisi_hba; 905 struct device *dev = hisi_hba->dev; 906 int phy_no = phy->sas_phy.id; 907 908 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 909 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 910 } 911 912 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 913 914 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 915 { 916 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 917 struct device *dev = hisi_hba->dev; 918 unsigned long flags; 919 920 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 921 spin_lock_irqsave(&phy->lock, flags); 922 if (phy->phy_attached) { 923 spin_unlock_irqrestore(&phy->lock, flags); 924 return; 925 } 926 927 if (!timer_pending(&phy->timer)) { 928 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 929 phy->wait_phyup_cnt++; 930 phy->timer.expires = jiffies + 931 HISI_SAS_WAIT_PHYUP_TIMEOUT; 932 add_timer(&phy->timer); 933 spin_unlock_irqrestore(&phy->lock, flags); 934 return; 935 } 936 937 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 938 phy_no, phy->wait_phyup_cnt); 939 phy->wait_phyup_cnt = 0; 940 } 941 spin_unlock_irqrestore(&phy->lock, flags); 942 } 943 944 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 945 946 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 947 { 948 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 949 struct asd_sas_phy *sas_phy = &phy->sas_phy; 950 int i; 951 952 phy->hisi_hba = hisi_hba; 953 phy->port = NULL; 954 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 955 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 956 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 957 sas_phy->class = SAS; 958 sas_phy->iproto = SAS_PROTOCOL_ALL; 959 sas_phy->tproto = 0; 960 sas_phy->type = PHY_TYPE_PHYSICAL; 961 sas_phy->role = PHY_ROLE_INITIATOR; 962 sas_phy->oob_mode = OOB_NOT_CONNECTED; 963 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 964 sas_phy->id = phy_no; 965 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 966 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 967 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 968 sas_phy->lldd_phy = phy; 969 970 for (i = 0; i < HISI_PHYES_NUM; i++) 971 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 972 973 spin_lock_init(&phy->lock); 974 975 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 976 } 977 978 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 979 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 980 { 981 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 982 struct asd_sas_phy *aphy = &phy->sas_phy; 983 struct sas_phy *sphy = aphy->phy; 984 unsigned long flags; 985 986 spin_lock_irqsave(&phy->lock, flags); 987 988 if (enable) { 989 /* We may have been enabled already; if so, don't touch */ 990 if (!phy->enable) 991 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 992 hisi_hba->hw->phy_start(hisi_hba, phy_no); 993 } else { 994 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 995 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 996 } 997 phy->enable = enable; 998 spin_unlock_irqrestore(&phy->lock, flags); 999 } 1000 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1001 1002 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1003 { 1004 struct sas_ha_struct *sas_ha = sas_phy->ha; 1005 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1006 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1007 struct asd_sas_port *sas_port = sas_phy->port; 1008 struct hisi_sas_port *port; 1009 unsigned long flags; 1010 1011 if (!sas_port) 1012 return; 1013 1014 port = to_hisi_sas_port(sas_port); 1015 spin_lock_irqsave(&hisi_hba->lock, flags); 1016 port->port_attached = 1; 1017 port->id = phy->port_id; 1018 phy->port = port; 1019 sas_port->lldd_port = port; 1020 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1021 } 1022 1023 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1024 struct hisi_sas_slot *slot) 1025 { 1026 if (task) { 1027 unsigned long flags; 1028 struct task_status_struct *ts; 1029 1030 ts = &task->task_status; 1031 1032 ts->resp = SAS_TASK_COMPLETE; 1033 ts->stat = SAS_ABORTED_TASK; 1034 spin_lock_irqsave(&task->task_state_lock, flags); 1035 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 1036 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1037 task->task_state_flags |= SAS_TASK_STATE_DONE; 1038 spin_unlock_irqrestore(&task->task_state_lock, flags); 1039 } 1040 1041 hisi_sas_slot_task_free(hisi_hba, task, slot); 1042 } 1043 1044 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1045 struct domain_device *device) 1046 { 1047 struct hisi_sas_slot *slot, *slot2; 1048 struct hisi_sas_device *sas_dev = device->lldd_dev; 1049 1050 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1051 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1052 } 1053 1054 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1055 { 1056 struct hisi_sas_device *sas_dev; 1057 struct domain_device *device; 1058 int i; 1059 1060 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1061 sas_dev = &hisi_hba->devices[i]; 1062 device = sas_dev->sas_device; 1063 1064 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1065 !device) 1066 continue; 1067 1068 hisi_sas_release_task(hisi_hba, device); 1069 } 1070 } 1071 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1072 1073 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1074 struct domain_device *device) 1075 { 1076 if (hisi_hba->hw->dereg_device) 1077 hisi_hba->hw->dereg_device(hisi_hba, device); 1078 } 1079 1080 static int 1081 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev, 1082 bool rst_ha_timeout) 1083 { 1084 struct hisi_sas_internal_abort_data data = { rst_ha_timeout }; 1085 struct domain_device *device = sas_dev->sas_device; 1086 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1087 int i, rc; 1088 1089 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 1090 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1091 const struct cpumask *mask = cq->irq_mask; 1092 1093 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 1094 continue; 1095 rc = sas_execute_internal_abort_dev(device, i, &data); 1096 if (rc) 1097 return rc; 1098 } 1099 1100 return 0; 1101 } 1102 1103 static void hisi_sas_dev_gone(struct domain_device *device) 1104 { 1105 struct hisi_sas_device *sas_dev = device->lldd_dev; 1106 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1107 struct device *dev = hisi_hba->dev; 1108 int ret = 0; 1109 1110 dev_info(dev, "dev[%d:%x] is gone\n", 1111 sas_dev->device_id, sas_dev->dev_type); 1112 1113 down(&hisi_hba->sem); 1114 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1115 hisi_sas_internal_task_abort_dev(sas_dev, true); 1116 1117 hisi_sas_dereg_device(hisi_hba, device); 1118 1119 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1120 device->lldd_dev = NULL; 1121 } 1122 1123 if (hisi_hba->hw->free_device) 1124 hisi_hba->hw->free_device(sas_dev); 1125 1126 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1127 if (!ret) 1128 sas_dev->dev_type = SAS_PHY_UNUSED; 1129 sas_dev->sas_device = NULL; 1130 up(&hisi_hba->sem); 1131 } 1132 1133 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1134 struct sas_phy_linkrates *r) 1135 { 1136 struct sas_phy_linkrates _r; 1137 1138 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1139 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1140 enum sas_linkrate min, max; 1141 1142 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1143 return -EINVAL; 1144 1145 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1146 max = sas_phy->phy->maximum_linkrate; 1147 min = r->minimum_linkrate; 1148 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1149 max = r->maximum_linkrate; 1150 min = sas_phy->phy->minimum_linkrate; 1151 } else 1152 return -EINVAL; 1153 1154 _r.maximum_linkrate = max; 1155 _r.minimum_linkrate = min; 1156 1157 sas_phy->phy->maximum_linkrate = max; 1158 sas_phy->phy->minimum_linkrate = min; 1159 1160 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1161 msleep(100); 1162 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1163 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1164 1165 return 0; 1166 } 1167 1168 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1169 void *funcdata) 1170 { 1171 struct hisi_sas_phy *phy = container_of(sas_phy, 1172 struct hisi_sas_phy, sas_phy); 1173 struct sas_ha_struct *sas_ha = sas_phy->ha; 1174 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1175 struct device *dev = hisi_hba->dev; 1176 DECLARE_COMPLETION_ONSTACK(completion); 1177 int phy_no = sas_phy->id; 1178 u8 sts = phy->phy_attached; 1179 int ret = 0; 1180 1181 down(&hisi_hba->sem); 1182 phy->reset_completion = &completion; 1183 1184 switch (func) { 1185 case PHY_FUNC_HARD_RESET: 1186 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1187 break; 1188 1189 case PHY_FUNC_LINK_RESET: 1190 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1191 msleep(100); 1192 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1193 break; 1194 1195 case PHY_FUNC_DISABLE: 1196 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1197 goto out; 1198 1199 case PHY_FUNC_SET_LINK_RATE: 1200 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1201 break; 1202 1203 case PHY_FUNC_GET_EVENTS: 1204 if (hisi_hba->hw->get_events) { 1205 hisi_hba->hw->get_events(hisi_hba, phy_no); 1206 goto out; 1207 } 1208 fallthrough; 1209 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1210 default: 1211 ret = -EOPNOTSUPP; 1212 goto out; 1213 } 1214 1215 if (sts && !wait_for_completion_timeout(&completion, 1216 HISI_SAS_WAIT_PHYUP_TIMEOUT)) { 1217 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1218 phy_no, func); 1219 if (phy->in_reset) 1220 ret = -ETIMEDOUT; 1221 } 1222 1223 out: 1224 phy->reset_completion = NULL; 1225 1226 up(&hisi_hba->sem); 1227 return ret; 1228 } 1229 1230 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1231 bool reset, int pmp, u8 *fis) 1232 { 1233 struct ata_taskfile tf; 1234 1235 ata_tf_init(dev, &tf); 1236 if (reset) 1237 tf.ctl |= ATA_SRST; 1238 else 1239 tf.ctl &= ~ATA_SRST; 1240 tf.command = ATA_CMD_DEV_RESET; 1241 ata_tf_to_fis(&tf, pmp, 0, fis); 1242 } 1243 1244 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1245 { 1246 u8 fis[20] = {0}; 1247 struct ata_port *ap = device->sata_dev.ap; 1248 struct ata_link *link; 1249 int rc = TMF_RESP_FUNC_FAILED; 1250 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1251 struct device *dev = hisi_hba->dev; 1252 1253 ata_for_each_link(link, ap, EDGE) { 1254 int pmp = sata_srst_pmp(link); 1255 1256 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1257 rc = sas_execute_ata_cmd(device, fis, -1); 1258 if (rc != TMF_RESP_FUNC_COMPLETE) 1259 break; 1260 } 1261 1262 if (rc == TMF_RESP_FUNC_COMPLETE) { 1263 ata_for_each_link(link, ap, EDGE) { 1264 int pmp = sata_srst_pmp(link); 1265 1266 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1267 rc = sas_execute_ata_cmd(device, fis, -1); 1268 if (rc != TMF_RESP_FUNC_COMPLETE) 1269 dev_err(dev, "ata disk %016llx de-reset failed\n", 1270 SAS_ADDR(device->sas_addr)); 1271 } 1272 } else { 1273 dev_err(dev, "ata disk %016llx reset failed\n", 1274 SAS_ADDR(device->sas_addr)); 1275 } 1276 1277 if (rc == TMF_RESP_FUNC_COMPLETE) 1278 hisi_sas_release_task(hisi_hba, device); 1279 1280 return rc; 1281 } 1282 1283 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1284 { 1285 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1286 int i; 1287 1288 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1289 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1290 struct domain_device *device = sas_dev->sas_device; 1291 struct asd_sas_port *sas_port; 1292 struct hisi_sas_port *port; 1293 struct hisi_sas_phy *phy = NULL; 1294 struct asd_sas_phy *sas_phy; 1295 1296 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1297 || !device || !device->port) 1298 continue; 1299 1300 sas_port = device->port; 1301 port = to_hisi_sas_port(sas_port); 1302 1303 spin_lock(&sas_port->phy_list_lock); 1304 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1305 if (state & BIT(sas_phy->id)) { 1306 phy = sas_phy->lldd_phy; 1307 break; 1308 } 1309 spin_unlock(&sas_port->phy_list_lock); 1310 1311 if (phy) { 1312 port->id = phy->port_id; 1313 1314 /* Update linkrate of directly attached device. */ 1315 if (!device->parent) 1316 device->linkrate = phy->sas_phy.linkrate; 1317 1318 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1319 } else if (!port->port_attached) 1320 port->id = 0xff; 1321 } 1322 } 1323 1324 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1325 { 1326 struct asd_sas_port *_sas_port = NULL; 1327 int phy_no; 1328 1329 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1330 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1331 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1332 struct asd_sas_port *sas_port = sas_phy->port; 1333 bool do_port_check = _sas_port != sas_port; 1334 1335 if (!sas_phy->phy->enabled) 1336 continue; 1337 1338 /* Report PHY state change to libsas */ 1339 if (state & BIT(phy_no)) { 1340 if (do_port_check && sas_port && sas_port->port_dev) { 1341 struct domain_device *dev = sas_port->port_dev; 1342 1343 _sas_port = sas_port; 1344 1345 if (dev_is_expander(dev->dev_type)) 1346 sas_notify_port_event(sas_phy, 1347 PORTE_BROADCAST_RCVD, 1348 GFP_KERNEL); 1349 } 1350 } else { 1351 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1352 } 1353 } 1354 } 1355 1356 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1357 { 1358 struct hisi_sas_device *sas_dev; 1359 struct domain_device *device; 1360 int i; 1361 1362 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1363 sas_dev = &hisi_hba->devices[i]; 1364 device = sas_dev->sas_device; 1365 1366 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1367 continue; 1368 1369 hisi_sas_init_device(device); 1370 } 1371 } 1372 1373 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1374 struct asd_sas_port *sas_port, 1375 struct domain_device *device) 1376 { 1377 struct ata_port *ap = device->sata_dev.ap; 1378 struct device *dev = hisi_hba->dev; 1379 int rc = TMF_RESP_FUNC_FAILED; 1380 struct ata_link *link; 1381 u8 fis[20] = {0}; 1382 int i; 1383 1384 for (i = 0; i < hisi_hba->n_phy; i++) { 1385 if (!(sas_port->phy_mask & BIT(i))) 1386 continue; 1387 1388 ata_for_each_link(link, ap, EDGE) { 1389 int pmp = sata_srst_pmp(link); 1390 1391 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1392 rc = sas_execute_ata_cmd(device, fis, i); 1393 if (rc != TMF_RESP_FUNC_COMPLETE) { 1394 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1395 i, rc); 1396 break; 1397 } 1398 } 1399 } 1400 } 1401 1402 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1403 { 1404 struct device *dev = hisi_hba->dev; 1405 int port_no, rc, i; 1406 1407 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1408 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1409 struct domain_device *device = sas_dev->sas_device; 1410 1411 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1412 continue; 1413 1414 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1415 if (rc < 0) 1416 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1417 } 1418 1419 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1420 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1421 struct asd_sas_port *sas_port = &port->sas_port; 1422 struct domain_device *port_dev = sas_port->port_dev; 1423 struct domain_device *device; 1424 1425 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1426 continue; 1427 1428 /* Try to find a SATA device */ 1429 list_for_each_entry(device, &sas_port->dev_list, 1430 dev_list_node) { 1431 if (dev_is_sata(device)) { 1432 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1433 sas_port, 1434 device); 1435 break; 1436 } 1437 } 1438 } 1439 } 1440 1441 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1442 { 1443 struct Scsi_Host *shost = hisi_hba->shost; 1444 1445 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1446 1447 scsi_block_requests(shost); 1448 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1449 1450 del_timer_sync(&hisi_hba->timer); 1451 1452 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1453 } 1454 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1455 1456 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1457 { 1458 struct Scsi_Host *shost = hisi_hba->shost; 1459 1460 /* Init and wait for PHYs to come up and all libsas event finished. */ 1461 hisi_hba->hw->phys_init(hisi_hba); 1462 msleep(1000); 1463 hisi_sas_refresh_port_id(hisi_hba); 1464 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1465 1466 if (hisi_hba->reject_stp_links_msk) 1467 hisi_sas_terminate_stp_reject(hisi_hba); 1468 hisi_sas_reset_init_all_devices(hisi_hba); 1469 scsi_unblock_requests(shost); 1470 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1471 up(&hisi_hba->sem); 1472 1473 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1474 } 1475 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1476 1477 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1478 { 1479 if (!hisi_hba->hw->soft_reset) 1480 return -1; 1481 1482 down(&hisi_hba->sem); 1483 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1484 up(&hisi_hba->sem); 1485 return -1; 1486 } 1487 1488 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1489 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1490 1491 return 0; 1492 } 1493 1494 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1495 { 1496 struct device *dev = hisi_hba->dev; 1497 struct Scsi_Host *shost = hisi_hba->shost; 1498 int rc; 1499 1500 dev_info(dev, "controller resetting...\n"); 1501 hisi_sas_controller_reset_prepare(hisi_hba); 1502 1503 rc = hisi_hba->hw->soft_reset(hisi_hba); 1504 if (rc) { 1505 dev_warn(dev, "controller reset failed (%d)\n", rc); 1506 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1507 up(&hisi_hba->sem); 1508 scsi_unblock_requests(shost); 1509 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1510 return rc; 1511 } 1512 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1513 1514 hisi_sas_controller_reset_done(hisi_hba); 1515 dev_info(dev, "controller reset complete\n"); 1516 1517 return 0; 1518 } 1519 1520 static int hisi_sas_abort_task(struct sas_task *task) 1521 { 1522 struct hisi_sas_internal_abort_data internal_abort_data = { false }; 1523 struct domain_device *device = task->dev; 1524 struct hisi_sas_device *sas_dev = device->lldd_dev; 1525 struct hisi_sas_slot *slot = task->lldd_task; 1526 struct hisi_hba *hisi_hba; 1527 struct device *dev; 1528 int rc = TMF_RESP_FUNC_FAILED; 1529 unsigned long flags; 1530 1531 if (!sas_dev) 1532 return TMF_RESP_FUNC_FAILED; 1533 1534 hisi_hba = dev_to_hisi_hba(task->dev); 1535 dev = hisi_hba->dev; 1536 1537 spin_lock_irqsave(&task->task_state_lock, flags); 1538 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1539 struct hisi_sas_cq *cq; 1540 1541 if (slot) { 1542 /* 1543 * sync irq to avoid free'ing task 1544 * before using task in IO completion 1545 */ 1546 cq = &hisi_hba->cq[slot->dlvry_queue]; 1547 synchronize_irq(cq->irq_no); 1548 } 1549 spin_unlock_irqrestore(&task->task_state_lock, flags); 1550 rc = TMF_RESP_FUNC_COMPLETE; 1551 goto out; 1552 } 1553 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1554 spin_unlock_irqrestore(&task->task_state_lock, flags); 1555 1556 if (slot && task->task_proto & SAS_PROTOCOL_SSP) { 1557 u16 tag = slot->idx; 1558 int rc2; 1559 1560 rc = sas_abort_task(task, tag); 1561 rc2 = sas_execute_internal_abort_single(device, tag, 1562 slot->dlvry_queue, &internal_abort_data); 1563 if (rc2 < 0) { 1564 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1565 return TMF_RESP_FUNC_FAILED; 1566 } 1567 1568 /* 1569 * If the TMF finds that the IO is not in the device and also 1570 * the internal abort does not succeed, then it is safe to 1571 * free the slot. 1572 * Note: if the internal abort succeeds then the slot 1573 * will have already been completed 1574 */ 1575 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1576 if (task->lldd_task) 1577 hisi_sas_do_release_task(hisi_hba, task, slot); 1578 } 1579 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1580 task->task_proto & SAS_PROTOCOL_STP) { 1581 if (task->dev->dev_type == SAS_SATA_DEV) { 1582 struct ata_queued_cmd *qc = task->uldd_task; 1583 1584 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1585 if (rc < 0) { 1586 dev_err(dev, "abort task: internal abort failed\n"); 1587 goto out; 1588 } 1589 hisi_sas_dereg_device(hisi_hba, device); 1590 1591 /* 1592 * If an ATA internal command times out in ATA EH, it 1593 * need to execute soft reset, so check the scsicmd 1594 */ 1595 if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && 1596 qc && qc->scsicmd) { 1597 hisi_sas_do_release_task(hisi_hba, task, slot); 1598 rc = TMF_RESP_FUNC_COMPLETE; 1599 } else { 1600 rc = hisi_sas_softreset_ata_disk(device); 1601 } 1602 } 1603 } else if (slot && task->task_proto & SAS_PROTOCOL_SMP) { 1604 /* SMP */ 1605 u32 tag = slot->idx; 1606 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1607 1608 rc = sas_execute_internal_abort_single(device, 1609 tag, slot->dlvry_queue, 1610 &internal_abort_data); 1611 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1612 task->lldd_task) { 1613 /* 1614 * sync irq to avoid free'ing task 1615 * before using task in IO completion 1616 */ 1617 synchronize_irq(cq->irq_no); 1618 slot->task = NULL; 1619 } 1620 } 1621 1622 out: 1623 if (rc != TMF_RESP_FUNC_COMPLETE) 1624 dev_notice(dev, "abort task: rc=%d\n", rc); 1625 return rc; 1626 } 1627 1628 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1629 { 1630 struct hisi_sas_device *sas_dev = device->lldd_dev; 1631 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1632 struct device *dev = hisi_hba->dev; 1633 int rc; 1634 1635 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1636 if (rc < 0) { 1637 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1638 return TMF_RESP_FUNC_FAILED; 1639 } 1640 hisi_sas_dereg_device(hisi_hba, device); 1641 1642 rc = sas_abort_task_set(device, lun); 1643 if (rc == TMF_RESP_FUNC_COMPLETE) 1644 hisi_sas_release_task(hisi_hba, device); 1645 1646 return rc; 1647 } 1648 1649 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1650 { 1651 struct sas_phy *local_phy = sas_get_local_phy(device); 1652 struct hisi_sas_device *sas_dev = device->lldd_dev; 1653 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1654 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1655 int rc, reset_type; 1656 1657 if (!local_phy->enabled) { 1658 sas_put_local_phy(local_phy); 1659 return -ENODEV; 1660 } 1661 1662 if (scsi_is_sas_phy_local(local_phy)) { 1663 struct asd_sas_phy *sas_phy = 1664 sas_ha->sas_phy[local_phy->number]; 1665 struct hisi_sas_phy *phy = 1666 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1667 unsigned long flags; 1668 1669 spin_lock_irqsave(&phy->lock, flags); 1670 phy->in_reset = 1; 1671 spin_unlock_irqrestore(&phy->lock, flags); 1672 } 1673 1674 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1675 !dev_is_sata(device)) ? true : false; 1676 1677 rc = sas_phy_reset(local_phy, reset_type); 1678 sas_put_local_phy(local_phy); 1679 1680 if (scsi_is_sas_phy_local(local_phy)) { 1681 struct asd_sas_phy *sas_phy = 1682 sas_ha->sas_phy[local_phy->number]; 1683 struct hisi_sas_phy *phy = 1684 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1685 unsigned long flags; 1686 1687 spin_lock_irqsave(&phy->lock, flags); 1688 phy->in_reset = 0; 1689 spin_unlock_irqrestore(&phy->lock, flags); 1690 1691 /* report PHY down if timed out */ 1692 if (rc == -ETIMEDOUT) 1693 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1694 return rc; 1695 } 1696 1697 /* Remote phy */ 1698 if (rc) 1699 return rc; 1700 1701 if (dev_is_sata(device)) { 1702 struct ata_link *link = &device->sata_dev.ap->link; 1703 1704 rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT, 1705 smp_ata_check_ready_type); 1706 } else { 1707 msleep(2000); 1708 } 1709 1710 return rc; 1711 } 1712 1713 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1714 { 1715 struct hisi_sas_device *sas_dev = device->lldd_dev; 1716 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1717 struct device *dev = hisi_hba->dev; 1718 int rc; 1719 1720 if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) 1721 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 1722 1723 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1724 if (rc < 0) { 1725 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1726 return TMF_RESP_FUNC_FAILED; 1727 } 1728 hisi_sas_dereg_device(hisi_hba, device); 1729 1730 rc = hisi_sas_debug_I_T_nexus_reset(device); 1731 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1732 struct sas_phy *local_phy; 1733 1734 rc = hisi_sas_softreset_ata_disk(device); 1735 switch (rc) { 1736 case -ECOMM: 1737 rc = -ENODEV; 1738 break; 1739 case TMF_RESP_FUNC_FAILED: 1740 case -EMSGSIZE: 1741 case -EIO: 1742 local_phy = sas_get_local_phy(device); 1743 rc = sas_phy_enable(local_phy, 0); 1744 if (!rc) { 1745 local_phy->enabled = 0; 1746 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1747 SAS_ADDR(device->sas_addr), rc); 1748 rc = -ENODEV; 1749 } 1750 sas_put_local_phy(local_phy); 1751 break; 1752 default: 1753 break; 1754 } 1755 } 1756 1757 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1758 hisi_sas_release_task(hisi_hba, device); 1759 1760 return rc; 1761 } 1762 1763 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1764 { 1765 struct hisi_sas_device *sas_dev = device->lldd_dev; 1766 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1767 struct device *dev = hisi_hba->dev; 1768 int rc = TMF_RESP_FUNC_FAILED; 1769 1770 /* Clear internal IO and then lu reset */ 1771 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1772 if (rc < 0) { 1773 dev_err(dev, "lu_reset: internal abort failed\n"); 1774 goto out; 1775 } 1776 hisi_sas_dereg_device(hisi_hba, device); 1777 1778 if (dev_is_sata(device)) { 1779 struct sas_phy *phy; 1780 1781 phy = sas_get_local_phy(device); 1782 1783 rc = sas_phy_reset(phy, true); 1784 1785 if (rc == 0) 1786 hisi_sas_release_task(hisi_hba, device); 1787 sas_put_local_phy(phy); 1788 } else { 1789 rc = sas_lu_reset(device, lun); 1790 if (rc == TMF_RESP_FUNC_COMPLETE) 1791 hisi_sas_release_task(hisi_hba, device); 1792 } 1793 out: 1794 if (rc != TMF_RESP_FUNC_COMPLETE) 1795 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1796 sas_dev->device_id, rc); 1797 return rc; 1798 } 1799 1800 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1801 { 1802 struct domain_device *device = data; 1803 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1804 int rc; 1805 1806 rc = hisi_sas_debug_I_T_nexus_reset(device); 1807 if (rc != TMF_RESP_FUNC_COMPLETE) 1808 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1809 SAS_ADDR(device->sas_addr), rc); 1810 } 1811 1812 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1813 { 1814 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1815 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1816 ASYNC_DOMAIN_EXCLUSIVE(async); 1817 int i; 1818 1819 queue_work(hisi_hba->wq, &r.work); 1820 wait_for_completion(r.completion); 1821 if (!r.done) 1822 return TMF_RESP_FUNC_FAILED; 1823 1824 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1825 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1826 struct domain_device *device = sas_dev->sas_device; 1827 1828 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1829 dev_is_expander(device->dev_type)) 1830 continue; 1831 1832 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1833 device, &async); 1834 } 1835 1836 async_synchronize_full_domain(&async); 1837 hisi_sas_release_tasks(hisi_hba); 1838 1839 return TMF_RESP_FUNC_COMPLETE; 1840 } 1841 1842 static int hisi_sas_query_task(struct sas_task *task) 1843 { 1844 int rc = TMF_RESP_FUNC_FAILED; 1845 1846 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1847 struct hisi_sas_slot *slot = task->lldd_task; 1848 u32 tag = slot->idx; 1849 1850 rc = sas_query_task(task, tag); 1851 switch (rc) { 1852 /* The task is still in Lun, release it then */ 1853 case TMF_RESP_FUNC_SUCC: 1854 /* The task is not in Lun or failed, reset the phy */ 1855 case TMF_RESP_FUNC_FAILED: 1856 case TMF_RESP_FUNC_COMPLETE: 1857 break; 1858 default: 1859 rc = TMF_RESP_FUNC_FAILED; 1860 break; 1861 } 1862 } 1863 return rc; 1864 } 1865 1866 static bool hisi_sas_internal_abort_timeout(struct sas_task *task, 1867 void *data) 1868 { 1869 struct domain_device *device = task->dev; 1870 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1871 struct hisi_sas_internal_abort_data *timeout = data; 1872 1873 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1874 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 1875 1876 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1877 pr_err("Internal abort: timeout %016llx\n", 1878 SAS_ADDR(device->sas_addr)); 1879 } else { 1880 struct hisi_sas_slot *slot = task->lldd_task; 1881 1882 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1883 1884 if (slot) { 1885 struct hisi_sas_cq *cq = 1886 &hisi_hba->cq[slot->dlvry_queue]; 1887 /* 1888 * sync irq to avoid free'ing task 1889 * before using task in IO completion 1890 */ 1891 synchronize_irq(cq->irq_no); 1892 slot->task = NULL; 1893 } 1894 1895 if (timeout->rst_ha_timeout) { 1896 pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n", 1897 SAS_ADDR(device->sas_addr)); 1898 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1899 } else { 1900 pr_err("Internal abort: timeout and not done %016llx.\n", 1901 SAS_ADDR(device->sas_addr)); 1902 } 1903 1904 return true; 1905 } 1906 1907 return false; 1908 } 1909 1910 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 1911 { 1912 hisi_sas_port_notify_formed(sas_phy); 1913 } 1914 1915 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 1916 u8 reg_index, u8 reg_count, u8 *write_data) 1917 { 1918 struct hisi_hba *hisi_hba = sha->lldd_ha; 1919 1920 if (!hisi_hba->hw->write_gpio) 1921 return -EOPNOTSUPP; 1922 1923 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 1924 reg_index, reg_count, write_data); 1925 } 1926 1927 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 1928 { 1929 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1930 struct sas_phy *sphy = sas_phy->phy; 1931 unsigned long flags; 1932 1933 phy->phy_attached = 0; 1934 phy->phy_type = 0; 1935 phy->port = NULL; 1936 1937 spin_lock_irqsave(&phy->lock, flags); 1938 if (phy->enable) 1939 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1940 else 1941 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1942 spin_unlock_irqrestore(&phy->lock, flags); 1943 } 1944 1945 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 1946 gfp_t gfp_flags) 1947 { 1948 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1949 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1950 struct device *dev = hisi_hba->dev; 1951 1952 if (rdy) { 1953 /* Phy down but ready */ 1954 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 1955 hisi_sas_port_notify_formed(sas_phy); 1956 } else { 1957 struct hisi_sas_port *port = phy->port; 1958 1959 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 1960 phy->in_reset) { 1961 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 1962 return; 1963 } 1964 /* Phy down and not ready */ 1965 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 1966 sas_phy_disconnected(sas_phy); 1967 1968 if (port) { 1969 if (phy->phy_type & PORT_TYPE_SAS) { 1970 int port_id = port->id; 1971 1972 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 1973 port_id)) 1974 port->port_attached = 0; 1975 } else if (phy->phy_type & PORT_TYPE_SATA) 1976 port->port_attached = 0; 1977 } 1978 hisi_sas_phy_disconnected(phy); 1979 } 1980 } 1981 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 1982 1983 void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) 1984 { 1985 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1986 struct hisi_hba *hisi_hba = phy->hisi_hba; 1987 1988 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 1989 return; 1990 1991 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); 1992 } 1993 EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); 1994 1995 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) 1996 { 1997 int i; 1998 1999 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2000 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2001 2002 synchronize_irq(cq->irq_no); 2003 } 2004 } 2005 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); 2006 2007 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2008 { 2009 struct hisi_hba *hisi_hba = shost_priv(shost); 2010 2011 if (reset_type != SCSI_ADAPTER_RESET) 2012 return -EOPNOTSUPP; 2013 2014 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2015 2016 return 0; 2017 } 2018 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2019 2020 struct scsi_transport_template *hisi_sas_stt; 2021 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2022 2023 static struct sas_domain_function_template hisi_sas_transport_ops = { 2024 .lldd_dev_found = hisi_sas_dev_found, 2025 .lldd_dev_gone = hisi_sas_dev_gone, 2026 .lldd_execute_task = hisi_sas_queue_command, 2027 .lldd_control_phy = hisi_sas_control_phy, 2028 .lldd_abort_task = hisi_sas_abort_task, 2029 .lldd_abort_task_set = hisi_sas_abort_task_set, 2030 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2031 .lldd_lu_reset = hisi_sas_lu_reset, 2032 .lldd_query_task = hisi_sas_query_task, 2033 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2034 .lldd_port_formed = hisi_sas_port_formed, 2035 .lldd_write_gpio = hisi_sas_write_gpio, 2036 .lldd_tmf_aborted = hisi_sas_tmf_aborted, 2037 .lldd_abort_timeout = hisi_sas_internal_abort_timeout, 2038 }; 2039 2040 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2041 { 2042 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2043 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2044 2045 for (i = 0; i < hisi_hba->queue_count; i++) { 2046 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2047 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2048 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2049 2050 s = sizeof(struct hisi_sas_cmd_hdr); 2051 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2052 memset(&cmd_hdr[j], 0, s); 2053 2054 dq->wr_point = 0; 2055 2056 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2057 memset(hisi_hba->complete_hdr[i], 0, s); 2058 cq->rd_point = 0; 2059 } 2060 2061 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2062 memset(hisi_hba->initial_fis, 0, s); 2063 2064 s = max_command_entries * sizeof(struct hisi_sas_iost); 2065 memset(hisi_hba->iost, 0, s); 2066 2067 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2068 memset(hisi_hba->breakpoint, 0, s); 2069 2070 s = sizeof(struct hisi_sas_sata_breakpoint); 2071 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2072 memset(&sata_breakpoint[j], 0, s); 2073 } 2074 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2075 2076 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2077 { 2078 struct device *dev = hisi_hba->dev; 2079 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2080 int max_command_entries_ru, sz_slot_buf_ru; 2081 int blk_cnt, slots_per_blk; 2082 2083 sema_init(&hisi_hba->sem, 1); 2084 spin_lock_init(&hisi_hba->lock); 2085 for (i = 0; i < hisi_hba->n_phy; i++) { 2086 hisi_sas_phy_init(hisi_hba, i); 2087 hisi_hba->port[i].port_attached = 0; 2088 hisi_hba->port[i].id = -1; 2089 } 2090 2091 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2092 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2093 hisi_hba->devices[i].device_id = i; 2094 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2095 } 2096 2097 for (i = 0; i < hisi_hba->queue_count; i++) { 2098 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2099 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2100 2101 /* Completion queue structure */ 2102 cq->id = i; 2103 cq->hisi_hba = hisi_hba; 2104 2105 /* Delivery queue structure */ 2106 spin_lock_init(&dq->lock); 2107 INIT_LIST_HEAD(&dq->list); 2108 dq->id = i; 2109 dq->hisi_hba = hisi_hba; 2110 2111 /* Delivery queue */ 2112 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2113 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2114 &hisi_hba->cmd_hdr_dma[i], 2115 GFP_KERNEL); 2116 if (!hisi_hba->cmd_hdr[i]) 2117 goto err_out; 2118 2119 /* Completion queue */ 2120 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2121 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2122 &hisi_hba->complete_hdr_dma[i], 2123 GFP_KERNEL); 2124 if (!hisi_hba->complete_hdr[i]) 2125 goto err_out; 2126 } 2127 2128 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2129 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2130 GFP_KERNEL); 2131 if (!hisi_hba->itct) 2132 goto err_out; 2133 2134 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2135 sizeof(struct hisi_sas_slot), 2136 GFP_KERNEL); 2137 if (!hisi_hba->slot_info) 2138 goto err_out; 2139 2140 /* roundup to avoid overly large block size */ 2141 max_command_entries_ru = roundup(max_command_entries, 64); 2142 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2143 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2144 else 2145 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2146 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2147 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2148 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2149 slots_per_blk = s / sz_slot_buf_ru; 2150 2151 for (i = 0; i < blk_cnt; i++) { 2152 int slot_index = i * slots_per_blk; 2153 dma_addr_t buf_dma; 2154 void *buf; 2155 2156 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2157 GFP_KERNEL); 2158 if (!buf) 2159 goto err_out; 2160 2161 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2162 struct hisi_sas_slot *slot; 2163 2164 slot = &hisi_hba->slot_info[slot_index]; 2165 slot->buf = buf; 2166 slot->buf_dma = buf_dma; 2167 slot->idx = slot_index; 2168 2169 buf += sz_slot_buf_ru; 2170 buf_dma += sz_slot_buf_ru; 2171 } 2172 } 2173 2174 s = max_command_entries * sizeof(struct hisi_sas_iost); 2175 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2176 GFP_KERNEL); 2177 if (!hisi_hba->iost) 2178 goto err_out; 2179 2180 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2181 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2182 &hisi_hba->breakpoint_dma, 2183 GFP_KERNEL); 2184 if (!hisi_hba->breakpoint) 2185 goto err_out; 2186 2187 s = hisi_hba->slot_index_count = max_command_entries; 2188 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); 2189 if (!hisi_hba->slot_index_tags) 2190 goto err_out; 2191 2192 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2193 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2194 &hisi_hba->initial_fis_dma, 2195 GFP_KERNEL); 2196 if (!hisi_hba->initial_fis) 2197 goto err_out; 2198 2199 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2200 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2201 &hisi_hba->sata_breakpoint_dma, 2202 GFP_KERNEL); 2203 if (!hisi_hba->sata_breakpoint) 2204 goto err_out; 2205 2206 hisi_hba->last_slot_index = 0; 2207 2208 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2209 if (!hisi_hba->wq) { 2210 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2211 goto err_out; 2212 } 2213 2214 return 0; 2215 err_out: 2216 return -ENOMEM; 2217 } 2218 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2219 2220 void hisi_sas_free(struct hisi_hba *hisi_hba) 2221 { 2222 int i; 2223 2224 for (i = 0; i < hisi_hba->n_phy; i++) { 2225 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2226 2227 del_timer_sync(&phy->timer); 2228 } 2229 2230 if (hisi_hba->wq) 2231 destroy_workqueue(hisi_hba->wq); 2232 } 2233 EXPORT_SYMBOL_GPL(hisi_sas_free); 2234 2235 void hisi_sas_rst_work_handler(struct work_struct *work) 2236 { 2237 struct hisi_hba *hisi_hba = 2238 container_of(work, struct hisi_hba, rst_work); 2239 2240 if (hisi_sas_controller_prereset(hisi_hba)) 2241 return; 2242 2243 hisi_sas_controller_reset(hisi_hba); 2244 } 2245 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2246 2247 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2248 { 2249 struct hisi_sas_rst *rst = 2250 container_of(work, struct hisi_sas_rst, work); 2251 2252 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2253 goto rst_complete; 2254 2255 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2256 rst->done = true; 2257 rst_complete: 2258 complete(rst->completion); 2259 } 2260 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2261 2262 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2263 { 2264 struct device *dev = hisi_hba->dev; 2265 struct platform_device *pdev = hisi_hba->platform_dev; 2266 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2267 struct clk *refclk; 2268 2269 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2270 SAS_ADDR_SIZE)) { 2271 dev_err(dev, "could not get property sas-addr\n"); 2272 return -ENOENT; 2273 } 2274 2275 if (np) { 2276 /* 2277 * These properties are only required for platform device-based 2278 * controller with DT firmware. 2279 */ 2280 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2281 "hisilicon,sas-syscon"); 2282 if (IS_ERR(hisi_hba->ctrl)) { 2283 dev_err(dev, "could not get syscon\n"); 2284 return -ENOENT; 2285 } 2286 2287 if (device_property_read_u32(dev, "ctrl-reset-reg", 2288 &hisi_hba->ctrl_reset_reg)) { 2289 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2290 return -ENOENT; 2291 } 2292 2293 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2294 &hisi_hba->ctrl_reset_sts_reg)) { 2295 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2296 return -ENOENT; 2297 } 2298 2299 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2300 &hisi_hba->ctrl_clock_ena_reg)) { 2301 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2302 return -ENOENT; 2303 } 2304 } 2305 2306 refclk = devm_clk_get(dev, NULL); 2307 if (IS_ERR(refclk)) 2308 dev_dbg(dev, "no ref clk property\n"); 2309 else 2310 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2311 2312 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2313 dev_err(dev, "could not get property phy-count\n"); 2314 return -ENOENT; 2315 } 2316 2317 if (device_property_read_u32(dev, "queue-count", 2318 &hisi_hba->queue_count)) { 2319 dev_err(dev, "could not get property queue-count\n"); 2320 return -ENOENT; 2321 } 2322 2323 return 0; 2324 } 2325 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2326 2327 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2328 const struct hisi_sas_hw *hw) 2329 { 2330 struct resource *res; 2331 struct Scsi_Host *shost; 2332 struct hisi_hba *hisi_hba; 2333 struct device *dev = &pdev->dev; 2334 int error; 2335 2336 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2337 if (!shost) { 2338 dev_err(dev, "scsi host alloc failed\n"); 2339 return NULL; 2340 } 2341 hisi_hba = shost_priv(shost); 2342 2343 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2344 hisi_hba->hw = hw; 2345 hisi_hba->dev = dev; 2346 hisi_hba->platform_dev = pdev; 2347 hisi_hba->shost = shost; 2348 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2349 2350 timer_setup(&hisi_hba->timer, NULL, 0); 2351 2352 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2353 goto err_out; 2354 2355 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2356 if (error) { 2357 dev_err(dev, "No usable DMA addressing method\n"); 2358 goto err_out; 2359 } 2360 2361 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2362 if (IS_ERR(hisi_hba->regs)) 2363 goto err_out; 2364 2365 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2366 if (res) { 2367 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2368 if (IS_ERR(hisi_hba->sgpio_regs)) 2369 goto err_out; 2370 } 2371 2372 if (hisi_sas_alloc(hisi_hba)) { 2373 hisi_sas_free(hisi_hba); 2374 goto err_out; 2375 } 2376 2377 return shost; 2378 err_out: 2379 scsi_host_put(shost); 2380 dev_err(dev, "shost alloc failed\n"); 2381 return NULL; 2382 } 2383 2384 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2385 { 2386 if (hisi_hba->hw->interrupt_preinit) 2387 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2388 return 0; 2389 } 2390 2391 int hisi_sas_probe(struct platform_device *pdev, 2392 const struct hisi_sas_hw *hw) 2393 { 2394 struct Scsi_Host *shost; 2395 struct hisi_hba *hisi_hba; 2396 struct device *dev = &pdev->dev; 2397 struct asd_sas_phy **arr_phy; 2398 struct asd_sas_port **arr_port; 2399 struct sas_ha_struct *sha; 2400 int rc, phy_nr, port_nr, i; 2401 2402 shost = hisi_sas_shost_alloc(pdev, hw); 2403 if (!shost) 2404 return -ENOMEM; 2405 2406 sha = SHOST_TO_SAS_HA(shost); 2407 hisi_hba = shost_priv(shost); 2408 platform_set_drvdata(pdev, sha); 2409 2410 phy_nr = port_nr = hisi_hba->n_phy; 2411 2412 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2413 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2414 if (!arr_phy || !arr_port) { 2415 rc = -ENOMEM; 2416 goto err_out_ha; 2417 } 2418 2419 sha->sas_phy = arr_phy; 2420 sha->sas_port = arr_port; 2421 sha->lldd_ha = hisi_hba; 2422 2423 shost->transportt = hisi_sas_stt; 2424 shost->max_id = HISI_SAS_MAX_DEVICES; 2425 shost->max_lun = ~0; 2426 shost->max_channel = 1; 2427 shost->max_cmd_len = 16; 2428 if (hisi_hba->hw->slot_index_alloc) { 2429 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2430 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2431 } else { 2432 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2433 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2434 } 2435 2436 sha->sas_ha_name = DRV_NAME; 2437 sha->dev = hisi_hba->dev; 2438 sha->lldd_module = THIS_MODULE; 2439 sha->sas_addr = &hisi_hba->sas_addr[0]; 2440 sha->num_phys = hisi_hba->n_phy; 2441 sha->core.shost = hisi_hba->shost; 2442 2443 for (i = 0; i < hisi_hba->n_phy; i++) { 2444 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2445 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2446 } 2447 2448 rc = hisi_sas_interrupt_preinit(hisi_hba); 2449 if (rc) 2450 goto err_out_ha; 2451 2452 rc = scsi_add_host(shost, &pdev->dev); 2453 if (rc) 2454 goto err_out_ha; 2455 2456 rc = sas_register_ha(sha); 2457 if (rc) 2458 goto err_out_register_ha; 2459 2460 rc = hisi_hba->hw->hw_init(hisi_hba); 2461 if (rc) 2462 goto err_out_hw_init; 2463 2464 scsi_scan_host(shost); 2465 2466 return 0; 2467 2468 err_out_hw_init: 2469 sas_unregister_ha(sha); 2470 err_out_register_ha: 2471 scsi_remove_host(shost); 2472 err_out_ha: 2473 hisi_sas_free(hisi_hba); 2474 scsi_host_put(shost); 2475 return rc; 2476 } 2477 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2478 2479 int hisi_sas_remove(struct platform_device *pdev) 2480 { 2481 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2482 struct hisi_hba *hisi_hba = sha->lldd_ha; 2483 struct Scsi_Host *shost = sha->core.shost; 2484 2485 del_timer_sync(&hisi_hba->timer); 2486 2487 sas_unregister_ha(sha); 2488 sas_remove_host(sha->core.shost); 2489 2490 hisi_sas_free(hisi_hba); 2491 scsi_host_put(shost); 2492 return 0; 2493 } 2494 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2495 2496 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2497 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2498 bool hisi_sas_debugfs_enable = true; 2499 u32 hisi_sas_debugfs_dump_count = 50; 2500 #else 2501 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2502 bool hisi_sas_debugfs_enable; 2503 u32 hisi_sas_debugfs_dump_count = 1; 2504 #endif 2505 2506 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2507 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2508 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2509 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2510 2511 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2512 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2513 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2514 2515 struct dentry *hisi_sas_debugfs_dir; 2516 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2517 2518 static __init int hisi_sas_init(void) 2519 { 2520 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2521 if (!hisi_sas_stt) 2522 return -ENOMEM; 2523 2524 if (hisi_sas_debugfs_enable) { 2525 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2526 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2527 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2528 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2529 } 2530 } 2531 2532 return 0; 2533 } 2534 2535 static __exit void hisi_sas_exit(void) 2536 { 2537 sas_release_transport(hisi_sas_stt); 2538 2539 debugfs_remove(hisi_sas_debugfs_dir); 2540 } 2541 2542 module_init(hisi_sas_init); 2543 module_exit(hisi_sas_exit); 2544 2545 MODULE_LICENSE("GPL"); 2546 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2547 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2548 MODULE_ALIAS("platform:" DRV_NAME); 2549