1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 14 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 15 void *funcdata); 16 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 17 struct domain_device *device); 18 static void hisi_sas_dev_gone(struct domain_device *device); 19 20 struct hisi_sas_internal_abort_data { 21 bool rst_ha_timeout; /* reset the HA for timeout */ 22 }; 23 24 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 25 { 26 switch (fis->command) { 27 case ATA_CMD_FPDMA_WRITE: 28 case ATA_CMD_FPDMA_READ: 29 case ATA_CMD_FPDMA_RECV: 30 case ATA_CMD_FPDMA_SEND: 31 case ATA_CMD_NCQ_NON_DATA: 32 return HISI_SAS_SATA_PROTOCOL_FPDMA; 33 34 case ATA_CMD_DOWNLOAD_MICRO: 35 case ATA_CMD_ID_ATA: 36 case ATA_CMD_PMP_READ: 37 case ATA_CMD_READ_LOG_EXT: 38 case ATA_CMD_PIO_READ: 39 case ATA_CMD_PIO_READ_EXT: 40 case ATA_CMD_PMP_WRITE: 41 case ATA_CMD_WRITE_LOG_EXT: 42 case ATA_CMD_PIO_WRITE: 43 case ATA_CMD_PIO_WRITE_EXT: 44 return HISI_SAS_SATA_PROTOCOL_PIO; 45 46 case ATA_CMD_DSM: 47 case ATA_CMD_DOWNLOAD_MICRO_DMA: 48 case ATA_CMD_PMP_READ_DMA: 49 case ATA_CMD_PMP_WRITE_DMA: 50 case ATA_CMD_READ: 51 case ATA_CMD_READ_EXT: 52 case ATA_CMD_READ_LOG_DMA_EXT: 53 case ATA_CMD_READ_STREAM_DMA_EXT: 54 case ATA_CMD_TRUSTED_RCV_DMA: 55 case ATA_CMD_TRUSTED_SND_DMA: 56 case ATA_CMD_WRITE: 57 case ATA_CMD_WRITE_EXT: 58 case ATA_CMD_WRITE_FUA_EXT: 59 case ATA_CMD_WRITE_QUEUED: 60 case ATA_CMD_WRITE_LOG_DMA_EXT: 61 case ATA_CMD_WRITE_STREAM_DMA_EXT: 62 case ATA_CMD_ZAC_MGMT_IN: 63 return HISI_SAS_SATA_PROTOCOL_DMA; 64 65 case ATA_CMD_CHK_POWER: 66 case ATA_CMD_DEV_RESET: 67 case ATA_CMD_EDD: 68 case ATA_CMD_FLUSH: 69 case ATA_CMD_FLUSH_EXT: 70 case ATA_CMD_VERIFY: 71 case ATA_CMD_VERIFY_EXT: 72 case ATA_CMD_SET_FEATURES: 73 case ATA_CMD_STANDBY: 74 case ATA_CMD_STANDBYNOW1: 75 case ATA_CMD_ZAC_MGMT_OUT: 76 return HISI_SAS_SATA_PROTOCOL_NONDATA; 77 78 case ATA_CMD_SET_MAX: 79 switch (fis->features) { 80 case ATA_SET_MAX_PASSWD: 81 case ATA_SET_MAX_LOCK: 82 return HISI_SAS_SATA_PROTOCOL_PIO; 83 84 case ATA_SET_MAX_PASSWD_DMA: 85 case ATA_SET_MAX_UNLOCK_DMA: 86 return HISI_SAS_SATA_PROTOCOL_DMA; 87 88 default: 89 return HISI_SAS_SATA_PROTOCOL_NONDATA; 90 } 91 92 default: 93 { 94 if (direction == DMA_NONE) 95 return HISI_SAS_SATA_PROTOCOL_NONDATA; 96 return HISI_SAS_SATA_PROTOCOL_PIO; 97 } 98 } 99 } 100 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 101 102 void hisi_sas_sata_done(struct sas_task *task, 103 struct hisi_sas_slot *slot) 104 { 105 struct task_status_struct *ts = &task->task_status; 106 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 107 struct hisi_sas_status_buffer *status_buf = 108 hisi_sas_status_buf_addr_mem(slot); 109 u8 *iu = &status_buf->iu[0]; 110 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 111 112 resp->frame_len = sizeof(struct dev_to_host_fis); 113 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 114 115 ts->buf_valid_size = sizeof(*resp); 116 } 117 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 118 119 /* 120 * This function assumes linkrate mask fits in 8 bits, which it 121 * does for all HW versions supported. 122 */ 123 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 124 { 125 u8 rate = 0; 126 int i; 127 128 max -= SAS_LINK_RATE_1_5_GBPS; 129 for (i = 0; i <= max; i++) 130 rate |= 1 << (i * 2); 131 return rate; 132 } 133 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 134 135 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 136 { 137 return device->port->ha->lldd_ha; 138 } 139 140 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 141 { 142 return container_of(sas_port, struct hisi_sas_port, sas_port); 143 } 144 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 145 146 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 147 { 148 int phy_no; 149 150 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 151 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 152 } 153 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 154 155 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 156 { 157 void *bitmap = hisi_hba->slot_index_tags; 158 159 __clear_bit(slot_idx, bitmap); 160 } 161 162 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 163 { 164 if (hisi_hba->hw->slot_index_alloc || 165 slot_idx >= HISI_SAS_UNRESERVED_IPTT) { 166 spin_lock(&hisi_hba->lock); 167 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 168 spin_unlock(&hisi_hba->lock); 169 } 170 } 171 172 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 173 { 174 void *bitmap = hisi_hba->slot_index_tags; 175 176 __set_bit(slot_idx, bitmap); 177 } 178 179 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 180 struct scsi_cmnd *scsi_cmnd) 181 { 182 int index; 183 void *bitmap = hisi_hba->slot_index_tags; 184 185 if (scsi_cmnd) 186 return scsi_cmd_to_rq(scsi_cmnd)->tag; 187 188 spin_lock(&hisi_hba->lock); 189 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 190 hisi_hba->last_slot_index + 1); 191 if (index >= hisi_hba->slot_index_count) { 192 index = find_next_zero_bit(bitmap, 193 hisi_hba->slot_index_count, 194 HISI_SAS_UNRESERVED_IPTT); 195 if (index >= hisi_hba->slot_index_count) { 196 spin_unlock(&hisi_hba->lock); 197 return -SAS_QUEUE_FULL; 198 } 199 } 200 hisi_sas_slot_index_set(hisi_hba, index); 201 hisi_hba->last_slot_index = index; 202 spin_unlock(&hisi_hba->lock); 203 204 return index; 205 } 206 207 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 208 struct hisi_sas_slot *slot) 209 { 210 int device_id = slot->device_id; 211 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 212 213 if (task) { 214 struct device *dev = hisi_hba->dev; 215 216 if (!task->lldd_task) 217 return; 218 219 task->lldd_task = NULL; 220 221 if (!sas_protocol_ata(task->task_proto)) { 222 if (slot->n_elem) 223 dma_unmap_sg(dev, task->scatter, 224 task->num_scatter, 225 task->data_dir); 226 if (slot->n_elem_dif) { 227 struct sas_ssp_task *ssp_task = &task->ssp_task; 228 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 229 230 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 231 scsi_prot_sg_count(scsi_cmnd), 232 task->data_dir); 233 } 234 } 235 } 236 237 spin_lock(&sas_dev->lock); 238 list_del_init(&slot->entry); 239 spin_unlock(&sas_dev->lock); 240 241 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 242 243 hisi_sas_slot_index_free(hisi_hba, slot->idx); 244 } 245 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 246 247 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 248 struct hisi_sas_slot *slot) 249 { 250 hisi_hba->hw->prep_smp(hisi_hba, slot); 251 } 252 253 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 254 struct hisi_sas_slot *slot) 255 { 256 hisi_hba->hw->prep_ssp(hisi_hba, slot); 257 } 258 259 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 260 struct hisi_sas_slot *slot) 261 { 262 hisi_hba->hw->prep_stp(hisi_hba, slot); 263 } 264 265 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 266 struct hisi_sas_slot *slot) 267 { 268 hisi_hba->hw->prep_abort(hisi_hba, slot); 269 } 270 271 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 272 struct sas_task *task, int n_elem, 273 int n_elem_req) 274 { 275 struct device *dev = hisi_hba->dev; 276 277 if (!sas_protocol_ata(task->task_proto)) { 278 if (task->num_scatter) { 279 if (n_elem) 280 dma_unmap_sg(dev, task->scatter, 281 task->num_scatter, 282 task->data_dir); 283 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 284 if (n_elem_req) 285 dma_unmap_sg(dev, &task->smp_task.smp_req, 286 1, DMA_TO_DEVICE); 287 } 288 } 289 } 290 291 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 292 struct sas_task *task, int *n_elem, 293 int *n_elem_req) 294 { 295 struct device *dev = hisi_hba->dev; 296 int rc; 297 298 if (sas_protocol_ata(task->task_proto)) { 299 *n_elem = task->num_scatter; 300 } else { 301 unsigned int req_len; 302 303 if (task->num_scatter) { 304 *n_elem = dma_map_sg(dev, task->scatter, 305 task->num_scatter, task->data_dir); 306 if (!*n_elem) { 307 rc = -ENOMEM; 308 goto prep_out; 309 } 310 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 311 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 312 1, DMA_TO_DEVICE); 313 if (!*n_elem_req) { 314 rc = -ENOMEM; 315 goto prep_out; 316 } 317 req_len = sg_dma_len(&task->smp_task.smp_req); 318 if (req_len & 0x3) { 319 rc = -EINVAL; 320 goto err_out_dma_unmap; 321 } 322 } 323 } 324 325 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 326 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 327 *n_elem); 328 rc = -EINVAL; 329 goto err_out_dma_unmap; 330 } 331 return 0; 332 333 err_out_dma_unmap: 334 /* It would be better to call dma_unmap_sg() here, but it's messy */ 335 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 336 *n_elem_req); 337 prep_out: 338 return rc; 339 } 340 341 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 342 struct sas_task *task, int n_elem_dif) 343 { 344 struct device *dev = hisi_hba->dev; 345 346 if (n_elem_dif) { 347 struct sas_ssp_task *ssp_task = &task->ssp_task; 348 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 349 350 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 351 scsi_prot_sg_count(scsi_cmnd), 352 task->data_dir); 353 } 354 } 355 356 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 357 int *n_elem_dif, struct sas_task *task) 358 { 359 struct device *dev = hisi_hba->dev; 360 struct sas_ssp_task *ssp_task; 361 struct scsi_cmnd *scsi_cmnd; 362 int rc; 363 364 if (task->num_scatter) { 365 ssp_task = &task->ssp_task; 366 scsi_cmnd = ssp_task->cmd; 367 368 if (scsi_prot_sg_count(scsi_cmnd)) { 369 *n_elem_dif = dma_map_sg(dev, 370 scsi_prot_sglist(scsi_cmnd), 371 scsi_prot_sg_count(scsi_cmnd), 372 task->data_dir); 373 374 if (!*n_elem_dif) 375 return -ENOMEM; 376 377 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 378 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 379 *n_elem_dif); 380 rc = -EINVAL; 381 goto err_out_dif_dma_unmap; 382 } 383 } 384 } 385 386 return 0; 387 388 err_out_dif_dma_unmap: 389 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 390 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 391 return rc; 392 } 393 394 static 395 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, 396 struct hisi_sas_slot *slot, 397 struct hisi_sas_dq *dq, 398 struct hisi_sas_device *sas_dev) 399 { 400 struct hisi_sas_cmd_hdr *cmd_hdr_base; 401 int dlvry_queue_slot, dlvry_queue; 402 struct sas_task *task = slot->task; 403 int wr_q_index; 404 405 spin_lock(&dq->lock); 406 wr_q_index = dq->wr_point; 407 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 408 list_add_tail(&slot->delivery, &dq->list); 409 spin_unlock(&dq->lock); 410 spin_lock(&sas_dev->lock); 411 list_add_tail(&slot->entry, &sas_dev->list); 412 spin_unlock(&sas_dev->lock); 413 414 dlvry_queue = dq->id; 415 dlvry_queue_slot = wr_q_index; 416 417 slot->device_id = sas_dev->device_id; 418 slot->dlvry_queue = dlvry_queue; 419 slot->dlvry_queue_slot = dlvry_queue_slot; 420 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 421 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 422 423 task->lldd_task = slot; 424 425 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 426 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 427 memset(hisi_sas_status_buf_addr_mem(slot), 0, 428 sizeof(struct hisi_sas_err_record)); 429 430 switch (task->task_proto) { 431 case SAS_PROTOCOL_SMP: 432 hisi_sas_task_prep_smp(hisi_hba, slot); 433 break; 434 case SAS_PROTOCOL_SSP: 435 hisi_sas_task_prep_ssp(hisi_hba, slot); 436 break; 437 case SAS_PROTOCOL_SATA: 438 case SAS_PROTOCOL_STP: 439 case SAS_PROTOCOL_STP_ALL: 440 hisi_sas_task_prep_ata(hisi_hba, slot); 441 break; 442 case SAS_PROTOCOL_INTERNAL_ABORT: 443 hisi_sas_task_prep_abort(hisi_hba, slot); 444 break; 445 default: 446 return; 447 } 448 449 WRITE_ONCE(slot->ready, 1); 450 451 spin_lock(&dq->lock); 452 hisi_hba->hw->start_delivery(dq); 453 spin_unlock(&dq->lock); 454 } 455 456 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 457 { 458 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; 459 struct domain_device *device = task->dev; 460 struct asd_sas_port *sas_port = device->port; 461 struct hisi_sas_device *sas_dev = device->lldd_dev; 462 bool internal_abort = sas_is_internal_abort(task); 463 struct scsi_cmnd *scmd = NULL; 464 struct hisi_sas_dq *dq = NULL; 465 struct hisi_sas_port *port; 466 struct hisi_hba *hisi_hba; 467 struct hisi_sas_slot *slot; 468 struct device *dev; 469 int rc; 470 471 if (!sas_port) { 472 struct task_status_struct *ts = &task->task_status; 473 474 ts->resp = SAS_TASK_UNDELIVERED; 475 ts->stat = SAS_PHY_DOWN; 476 /* 477 * libsas will use dev->port, should 478 * not call task_done for sata 479 */ 480 if (device->dev_type != SAS_SATA_DEV && !internal_abort) 481 task->task_done(task); 482 return -ECOMM; 483 } 484 485 hisi_hba = dev_to_hisi_hba(device); 486 dev = hisi_hba->dev; 487 488 switch (task->task_proto) { 489 case SAS_PROTOCOL_SSP: 490 case SAS_PROTOCOL_SMP: 491 case SAS_PROTOCOL_SATA: 492 case SAS_PROTOCOL_STP: 493 case SAS_PROTOCOL_STP_ALL: 494 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 495 if (!gfpflags_allow_blocking(gfp_flags)) 496 return -EINVAL; 497 498 down(&hisi_hba->sem); 499 up(&hisi_hba->sem); 500 } 501 502 if (DEV_IS_GONE(sas_dev)) { 503 if (sas_dev) 504 dev_info(dev, "task prep: device %d not ready\n", 505 sas_dev->device_id); 506 else 507 dev_info(dev, "task prep: device %016llx not ready\n", 508 SAS_ADDR(device->sas_addr)); 509 510 return -ECOMM; 511 } 512 513 port = to_hisi_sas_port(sas_port); 514 if (!port->port_attached) { 515 dev_info(dev, "task prep: %s port%d not attach device\n", 516 dev_is_sata(device) ? "SATA/STP" : "SAS", 517 device->port->id); 518 519 return -ECOMM; 520 } 521 522 if (task->uldd_task) { 523 struct ata_queued_cmd *qc; 524 525 if (dev_is_sata(device)) { 526 qc = task->uldd_task; 527 scmd = qc->scsicmd; 528 } else { 529 scmd = task->uldd_task; 530 } 531 } 532 533 if (scmd) { 534 unsigned int dq_index; 535 u32 blk_tag; 536 537 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 538 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 539 dq = &hisi_hba->dq[dq_index]; 540 } else { 541 struct Scsi_Host *shost = hisi_hba->shost; 542 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 543 int queue = qmap->mq_map[raw_smp_processor_id()]; 544 545 dq = &hisi_hba->dq[queue]; 546 } 547 break; 548 case SAS_PROTOCOL_INTERNAL_ABORT: 549 if (!hisi_hba->hw->prep_abort) 550 return TMF_RESP_FUNC_FAILED; 551 552 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 553 return -EIO; 554 555 hisi_hba = dev_to_hisi_hba(device); 556 557 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 558 return -EINVAL; 559 560 port = to_hisi_sas_port(sas_port); 561 dq = &hisi_hba->dq[task->abort_task.qid]; 562 break; 563 default: 564 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", 565 task->task_proto); 566 return -EINVAL; 567 } 568 569 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 570 &n_elem_req); 571 if (rc < 0) 572 goto prep_out; 573 574 if (!sas_protocol_ata(task->task_proto)) { 575 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 576 if (rc < 0) 577 goto err_out_dma_unmap; 578 } 579 580 if (!internal_abort && hisi_hba->hw->slot_index_alloc) 581 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 582 else 583 rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); 584 585 if (rc < 0) 586 goto err_out_dif_dma_unmap; 587 588 slot = &hisi_hba->slot_info[rc]; 589 slot->n_elem = n_elem; 590 slot->n_elem_dif = n_elem_dif; 591 slot->task = task; 592 slot->port = port; 593 594 slot->tmf = task->tmf; 595 slot->is_internal = !!task->tmf || internal_abort; 596 597 /* protect task_prep and start_delivery sequence */ 598 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev); 599 600 return 0; 601 602 err_out_dif_dma_unmap: 603 if (!sas_protocol_ata(task->task_proto)) 604 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 605 err_out_dma_unmap: 606 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 607 n_elem_req); 608 prep_out: 609 dev_err(dev, "task exec: failed[%d]!\n", rc); 610 return rc; 611 } 612 613 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 614 gfp_t gfp_flags) 615 { 616 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 617 struct asd_sas_phy *sas_phy = &phy->sas_phy; 618 619 if (!phy->phy_attached) 620 return; 621 622 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 623 624 if (sas_phy->phy) { 625 struct sas_phy *sphy = sas_phy->phy; 626 627 sphy->negotiated_linkrate = sas_phy->linkrate; 628 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 629 sphy->maximum_linkrate_hw = 630 hisi_hba->hw->phy_get_max_linkrate(); 631 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 632 sphy->minimum_linkrate = phy->minimum_linkrate; 633 634 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 635 sphy->maximum_linkrate = phy->maximum_linkrate; 636 } 637 638 if (phy->phy_type & PORT_TYPE_SAS) { 639 struct sas_identify_frame *id; 640 641 id = (struct sas_identify_frame *)phy->frame_rcvd; 642 id->dev_type = phy->identify.device_type; 643 id->initiator_bits = SAS_PROTOCOL_ALL; 644 id->target_bits = phy->identify.target_port_protocols; 645 } else if (phy->phy_type & PORT_TYPE_SATA) { 646 /* Nothing */ 647 } 648 649 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 650 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 651 } 652 653 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 654 { 655 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 656 struct hisi_sas_device *sas_dev = NULL; 657 int last = hisi_hba->last_dev_id; 658 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 659 int i; 660 661 spin_lock(&hisi_hba->lock); 662 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 663 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 664 int queue = i % hisi_hba->queue_count; 665 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 666 667 hisi_hba->devices[i].device_id = i; 668 sas_dev = &hisi_hba->devices[i]; 669 sas_dev->dev_status = HISI_SAS_DEV_INIT; 670 sas_dev->dev_type = device->dev_type; 671 sas_dev->hisi_hba = hisi_hba; 672 sas_dev->sas_device = device; 673 sas_dev->dq = dq; 674 spin_lock_init(&sas_dev->lock); 675 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 676 break; 677 } 678 i++; 679 } 680 hisi_hba->last_dev_id = i; 681 spin_unlock(&hisi_hba->lock); 682 683 return sas_dev; 684 } 685 686 static void hisi_sas_tmf_aborted(struct sas_task *task) 687 { 688 struct hisi_sas_slot *slot = task->lldd_task; 689 struct domain_device *device = task->dev; 690 struct hisi_sas_device *sas_dev = device->lldd_dev; 691 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 692 693 if (slot) { 694 struct hisi_sas_cq *cq = 695 &hisi_hba->cq[slot->dlvry_queue]; 696 /* 697 * sync irq to avoid free'ing task 698 * before using task in IO completion 699 */ 700 synchronize_irq(cq->irq_no); 701 slot->task = NULL; 702 } 703 } 704 705 #define HISI_SAS_DISK_RECOVER_CNT 3 706 static int hisi_sas_init_device(struct domain_device *device) 707 { 708 int rc = TMF_RESP_FUNC_COMPLETE; 709 struct scsi_lun lun; 710 int retry = HISI_SAS_DISK_RECOVER_CNT; 711 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 712 struct device *dev = hisi_hba->dev; 713 struct sas_phy *local_phy; 714 715 switch (device->dev_type) { 716 case SAS_END_DEVICE: 717 int_to_scsilun(0, &lun); 718 719 while (retry-- > 0) { 720 rc = sas_clear_task_set(device, lun.scsi_lun); 721 if (rc == TMF_RESP_FUNC_COMPLETE) { 722 hisi_sas_release_task(hisi_hba, device); 723 break; 724 } 725 } 726 break; 727 case SAS_SATA_DEV: 728 case SAS_SATA_PM: 729 case SAS_SATA_PM_PORT: 730 case SAS_SATA_PENDING: 731 /* 732 * send HARD RESET to clear previous affiliation of 733 * STP target port 734 */ 735 local_phy = sas_get_local_phy(device); 736 if (!scsi_is_sas_phy_local(local_phy) && 737 !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 738 unsigned long deadline = ata_deadline(jiffies, 20000); 739 struct sata_device *sata_dev = &device->sata_dev; 740 struct ata_host *ata_host = sata_dev->ata_host; 741 struct ata_port_operations *ops = ata_host->ops; 742 struct ata_port *ap = sata_dev->ap; 743 struct ata_link *link; 744 unsigned int classes; 745 746 ata_for_each_link(link, ap, EDGE) 747 rc = ops->hardreset(link, &classes, 748 deadline); 749 } 750 sas_put_local_phy(local_phy); 751 if (rc) { 752 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc); 753 return rc; 754 } 755 756 while (retry-- > 0) { 757 rc = hisi_sas_softreset_ata_disk(device); 758 if (!rc) 759 break; 760 } 761 break; 762 default: 763 break; 764 } 765 766 return rc; 767 } 768 769 int hisi_sas_slave_alloc(struct scsi_device *sdev) 770 { 771 struct domain_device *ddev; 772 int rc; 773 774 rc = sas_slave_alloc(sdev); 775 if (rc) 776 return rc; 777 ddev = sdev_to_domain_dev(sdev); 778 779 return hisi_sas_init_device(ddev); 780 } 781 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); 782 783 static int hisi_sas_dev_found(struct domain_device *device) 784 { 785 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 786 struct domain_device *parent_dev = device->parent; 787 struct hisi_sas_device *sas_dev; 788 struct device *dev = hisi_hba->dev; 789 int rc; 790 791 if (hisi_hba->hw->alloc_dev) 792 sas_dev = hisi_hba->hw->alloc_dev(device); 793 else 794 sas_dev = hisi_sas_alloc_dev(device); 795 if (!sas_dev) { 796 dev_err(dev, "fail alloc dev: max support %d devices\n", 797 HISI_SAS_MAX_DEVICES); 798 return -EINVAL; 799 } 800 801 device->lldd_dev = sas_dev; 802 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 803 804 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 805 int phy_no; 806 u8 phy_num = parent_dev->ex_dev.num_phys; 807 struct ex_phy *phy; 808 809 for (phy_no = 0; phy_no < phy_num; phy_no++) { 810 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 811 if (SAS_ADDR(phy->attached_sas_addr) == 812 SAS_ADDR(device->sas_addr)) 813 break; 814 } 815 816 if (phy_no == phy_num) { 817 dev_info(dev, "dev found: no attached " 818 "dev:%016llx at ex:%016llx\n", 819 SAS_ADDR(device->sas_addr), 820 SAS_ADDR(parent_dev->sas_addr)); 821 rc = -EINVAL; 822 goto err_out; 823 } 824 } 825 826 dev_info(dev, "dev[%d:%x] found\n", 827 sas_dev->device_id, sas_dev->dev_type); 828 829 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 830 return 0; 831 832 err_out: 833 hisi_sas_dev_gone(device); 834 return rc; 835 } 836 837 int hisi_sas_slave_configure(struct scsi_device *sdev) 838 { 839 struct domain_device *dev = sdev_to_domain_dev(sdev); 840 int ret = sas_slave_configure(sdev); 841 842 if (ret) 843 return ret; 844 if (!dev_is_sata(dev)) 845 sas_change_queue_depth(sdev, 64); 846 847 return 0; 848 } 849 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 850 851 void hisi_sas_scan_start(struct Scsi_Host *shost) 852 { 853 struct hisi_hba *hisi_hba = shost_priv(shost); 854 855 hisi_hba->hw->phys_init(hisi_hba); 856 } 857 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 858 859 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 860 { 861 struct hisi_hba *hisi_hba = shost_priv(shost); 862 struct sas_ha_struct *sha = &hisi_hba->sha; 863 864 /* Wait for PHY up interrupt to occur */ 865 if (time < HZ) 866 return 0; 867 868 sas_drain_work(sha); 869 return 1; 870 } 871 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 872 873 static void hisi_sas_phyup_work_common(struct work_struct *work, 874 enum hisi_sas_phy_event event) 875 { 876 struct hisi_sas_phy *phy = 877 container_of(work, typeof(*phy), works[event]); 878 struct hisi_hba *hisi_hba = phy->hisi_hba; 879 struct asd_sas_phy *sas_phy = &phy->sas_phy; 880 int phy_no = sas_phy->id; 881 882 phy->wait_phyup_cnt = 0; 883 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 884 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 885 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 886 } 887 888 static void hisi_sas_phyup_work(struct work_struct *work) 889 { 890 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); 891 } 892 893 static void hisi_sas_linkreset_work(struct work_struct *work) 894 { 895 struct hisi_sas_phy *phy = 896 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 897 struct asd_sas_phy *sas_phy = &phy->sas_phy; 898 899 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 900 } 901 902 static void hisi_sas_phyup_pm_work(struct work_struct *work) 903 { 904 struct hisi_sas_phy *phy = 905 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); 906 struct hisi_hba *hisi_hba = phy->hisi_hba; 907 struct device *dev = hisi_hba->dev; 908 909 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); 910 pm_runtime_put_sync(dev); 911 } 912 913 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 914 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 915 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 916 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, 917 }; 918 919 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 920 enum hisi_sas_phy_event event) 921 { 922 struct hisi_hba *hisi_hba = phy->hisi_hba; 923 924 if (WARN_ON(event >= HISI_PHYES_NUM)) 925 return false; 926 927 return queue_work(hisi_hba->wq, &phy->works[event]); 928 } 929 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 930 931 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 932 { 933 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 934 struct hisi_hba *hisi_hba = phy->hisi_hba; 935 struct device *dev = hisi_hba->dev; 936 int phy_no = phy->sas_phy.id; 937 938 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 939 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 940 } 941 942 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 943 944 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 945 { 946 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 947 struct device *dev = hisi_hba->dev; 948 unsigned long flags; 949 950 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 951 spin_lock_irqsave(&phy->lock, flags); 952 if (phy->phy_attached) { 953 spin_unlock_irqrestore(&phy->lock, flags); 954 return; 955 } 956 957 if (!timer_pending(&phy->timer)) { 958 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 959 phy->wait_phyup_cnt++; 960 phy->timer.expires = jiffies + 961 HISI_SAS_WAIT_PHYUP_TIMEOUT; 962 add_timer(&phy->timer); 963 spin_unlock_irqrestore(&phy->lock, flags); 964 return; 965 } 966 967 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 968 phy_no, phy->wait_phyup_cnt); 969 phy->wait_phyup_cnt = 0; 970 } 971 spin_unlock_irqrestore(&phy->lock, flags); 972 } 973 974 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 975 976 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 977 { 978 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 979 struct asd_sas_phy *sas_phy = &phy->sas_phy; 980 int i; 981 982 phy->hisi_hba = hisi_hba; 983 phy->port = NULL; 984 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 985 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 986 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 987 sas_phy->class = SAS; 988 sas_phy->iproto = SAS_PROTOCOL_ALL; 989 sas_phy->tproto = 0; 990 sas_phy->type = PHY_TYPE_PHYSICAL; 991 sas_phy->role = PHY_ROLE_INITIATOR; 992 sas_phy->oob_mode = OOB_NOT_CONNECTED; 993 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 994 sas_phy->id = phy_no; 995 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 996 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 997 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 998 sas_phy->lldd_phy = phy; 999 1000 for (i = 0; i < HISI_PHYES_NUM; i++) 1001 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 1002 1003 spin_lock_init(&phy->lock); 1004 1005 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 1006 } 1007 1008 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 1009 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 1010 { 1011 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1012 struct asd_sas_phy *aphy = &phy->sas_phy; 1013 struct sas_phy *sphy = aphy->phy; 1014 unsigned long flags; 1015 1016 spin_lock_irqsave(&phy->lock, flags); 1017 1018 if (enable) { 1019 /* We may have been enabled already; if so, don't touch */ 1020 if (!phy->enable) 1021 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1022 hisi_hba->hw->phy_start(hisi_hba, phy_no); 1023 } else { 1024 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1025 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1026 } 1027 phy->enable = enable; 1028 spin_unlock_irqrestore(&phy->lock, flags); 1029 } 1030 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1031 1032 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1033 { 1034 struct sas_ha_struct *sas_ha = sas_phy->ha; 1035 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1036 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1037 struct asd_sas_port *sas_port = sas_phy->port; 1038 struct hisi_sas_port *port; 1039 unsigned long flags; 1040 1041 if (!sas_port) 1042 return; 1043 1044 port = to_hisi_sas_port(sas_port); 1045 spin_lock_irqsave(&hisi_hba->lock, flags); 1046 port->port_attached = 1; 1047 port->id = phy->port_id; 1048 phy->port = port; 1049 sas_port->lldd_port = port; 1050 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1051 } 1052 1053 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1054 struct hisi_sas_slot *slot) 1055 { 1056 if (task) { 1057 unsigned long flags; 1058 struct task_status_struct *ts; 1059 1060 ts = &task->task_status; 1061 1062 ts->resp = SAS_TASK_COMPLETE; 1063 ts->stat = SAS_ABORTED_TASK; 1064 spin_lock_irqsave(&task->task_state_lock, flags); 1065 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 1066 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1067 task->task_state_flags |= SAS_TASK_STATE_DONE; 1068 spin_unlock_irqrestore(&task->task_state_lock, flags); 1069 } 1070 1071 hisi_sas_slot_task_free(hisi_hba, task, slot); 1072 } 1073 1074 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1075 struct domain_device *device) 1076 { 1077 struct hisi_sas_slot *slot, *slot2; 1078 struct hisi_sas_device *sas_dev = device->lldd_dev; 1079 1080 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1081 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1082 } 1083 1084 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1085 { 1086 struct hisi_sas_device *sas_dev; 1087 struct domain_device *device; 1088 int i; 1089 1090 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1091 sas_dev = &hisi_hba->devices[i]; 1092 device = sas_dev->sas_device; 1093 1094 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1095 !device) 1096 continue; 1097 1098 hisi_sas_release_task(hisi_hba, device); 1099 } 1100 } 1101 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1102 1103 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1104 struct domain_device *device) 1105 { 1106 if (hisi_hba->hw->dereg_device) 1107 hisi_hba->hw->dereg_device(hisi_hba, device); 1108 } 1109 1110 static int 1111 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev, 1112 bool rst_ha_timeout) 1113 { 1114 struct hisi_sas_internal_abort_data data = { rst_ha_timeout }; 1115 struct domain_device *device = sas_dev->sas_device; 1116 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1117 int i, rc; 1118 1119 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 1120 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1121 const struct cpumask *mask = cq->irq_mask; 1122 1123 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 1124 continue; 1125 rc = sas_execute_internal_abort_dev(device, i, &data); 1126 if (rc) 1127 return rc; 1128 } 1129 1130 return 0; 1131 } 1132 1133 static void hisi_sas_dev_gone(struct domain_device *device) 1134 { 1135 struct hisi_sas_device *sas_dev = device->lldd_dev; 1136 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1137 struct device *dev = hisi_hba->dev; 1138 int ret = 0; 1139 1140 dev_info(dev, "dev[%d:%x] is gone\n", 1141 sas_dev->device_id, sas_dev->dev_type); 1142 1143 down(&hisi_hba->sem); 1144 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1145 hisi_sas_internal_task_abort_dev(sas_dev, true); 1146 1147 hisi_sas_dereg_device(hisi_hba, device); 1148 1149 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1150 device->lldd_dev = NULL; 1151 } 1152 1153 if (hisi_hba->hw->free_device) 1154 hisi_hba->hw->free_device(sas_dev); 1155 1156 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1157 if (!ret) 1158 sas_dev->dev_type = SAS_PHY_UNUSED; 1159 sas_dev->sas_device = NULL; 1160 up(&hisi_hba->sem); 1161 } 1162 1163 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1164 struct sas_phy_linkrates *r) 1165 { 1166 struct sas_phy_linkrates _r; 1167 1168 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1169 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1170 enum sas_linkrate min, max; 1171 1172 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1173 return -EINVAL; 1174 1175 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1176 max = sas_phy->phy->maximum_linkrate; 1177 min = r->minimum_linkrate; 1178 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1179 max = r->maximum_linkrate; 1180 min = sas_phy->phy->minimum_linkrate; 1181 } else 1182 return -EINVAL; 1183 1184 _r.maximum_linkrate = max; 1185 _r.minimum_linkrate = min; 1186 1187 sas_phy->phy->maximum_linkrate = max; 1188 sas_phy->phy->minimum_linkrate = min; 1189 1190 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1191 msleep(100); 1192 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1193 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1194 1195 return 0; 1196 } 1197 1198 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1199 void *funcdata) 1200 { 1201 struct hisi_sas_phy *phy = container_of(sas_phy, 1202 struct hisi_sas_phy, sas_phy); 1203 struct sas_ha_struct *sas_ha = sas_phy->ha; 1204 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1205 struct device *dev = hisi_hba->dev; 1206 DECLARE_COMPLETION_ONSTACK(completion); 1207 int phy_no = sas_phy->id; 1208 u8 sts = phy->phy_attached; 1209 int ret = 0; 1210 1211 down(&hisi_hba->sem); 1212 phy->reset_completion = &completion; 1213 1214 switch (func) { 1215 case PHY_FUNC_HARD_RESET: 1216 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1217 break; 1218 1219 case PHY_FUNC_LINK_RESET: 1220 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1221 msleep(100); 1222 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1223 break; 1224 1225 case PHY_FUNC_DISABLE: 1226 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1227 goto out; 1228 1229 case PHY_FUNC_SET_LINK_RATE: 1230 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1231 break; 1232 1233 case PHY_FUNC_GET_EVENTS: 1234 if (hisi_hba->hw->get_events) { 1235 hisi_hba->hw->get_events(hisi_hba, phy_no); 1236 goto out; 1237 } 1238 fallthrough; 1239 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1240 default: 1241 ret = -EOPNOTSUPP; 1242 goto out; 1243 } 1244 1245 if (sts && !wait_for_completion_timeout(&completion, 1246 HISI_SAS_WAIT_PHYUP_TIMEOUT)) { 1247 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1248 phy_no, func); 1249 if (phy->in_reset) 1250 ret = -ETIMEDOUT; 1251 } 1252 1253 out: 1254 phy->reset_completion = NULL; 1255 1256 up(&hisi_hba->sem); 1257 return ret; 1258 } 1259 1260 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1261 bool reset, int pmp, u8 *fis) 1262 { 1263 struct ata_taskfile tf; 1264 1265 ata_tf_init(dev, &tf); 1266 if (reset) 1267 tf.ctl |= ATA_SRST; 1268 else 1269 tf.ctl &= ~ATA_SRST; 1270 tf.command = ATA_CMD_DEV_RESET; 1271 ata_tf_to_fis(&tf, pmp, 0, fis); 1272 } 1273 1274 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1275 { 1276 u8 fis[20] = {0}; 1277 struct ata_port *ap = device->sata_dev.ap; 1278 struct ata_link *link; 1279 int rc = TMF_RESP_FUNC_FAILED; 1280 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1281 struct device *dev = hisi_hba->dev; 1282 1283 ata_for_each_link(link, ap, EDGE) { 1284 int pmp = sata_srst_pmp(link); 1285 1286 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1287 rc = sas_execute_ata_cmd(device, fis, -1); 1288 if (rc != TMF_RESP_FUNC_COMPLETE) 1289 break; 1290 } 1291 1292 if (rc == TMF_RESP_FUNC_COMPLETE) { 1293 ata_for_each_link(link, ap, EDGE) { 1294 int pmp = sata_srst_pmp(link); 1295 1296 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1297 rc = sas_execute_ata_cmd(device, fis, -1); 1298 if (rc != TMF_RESP_FUNC_COMPLETE) 1299 dev_err(dev, "ata disk %016llx de-reset failed\n", 1300 SAS_ADDR(device->sas_addr)); 1301 } 1302 } else { 1303 dev_err(dev, "ata disk %016llx reset failed\n", 1304 SAS_ADDR(device->sas_addr)); 1305 } 1306 1307 if (rc == TMF_RESP_FUNC_COMPLETE) 1308 hisi_sas_release_task(hisi_hba, device); 1309 1310 return rc; 1311 } 1312 1313 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1314 { 1315 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1316 int i; 1317 1318 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1319 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1320 struct domain_device *device = sas_dev->sas_device; 1321 struct asd_sas_port *sas_port; 1322 struct hisi_sas_port *port; 1323 struct hisi_sas_phy *phy = NULL; 1324 struct asd_sas_phy *sas_phy; 1325 1326 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1327 || !device || !device->port) 1328 continue; 1329 1330 sas_port = device->port; 1331 port = to_hisi_sas_port(sas_port); 1332 1333 spin_lock(&sas_port->phy_list_lock); 1334 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1335 if (state & BIT(sas_phy->id)) { 1336 phy = sas_phy->lldd_phy; 1337 break; 1338 } 1339 spin_unlock(&sas_port->phy_list_lock); 1340 1341 if (phy) { 1342 port->id = phy->port_id; 1343 1344 /* Update linkrate of directly attached device. */ 1345 if (!device->parent) 1346 device->linkrate = phy->sas_phy.linkrate; 1347 1348 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1349 } else 1350 port->id = 0xff; 1351 } 1352 } 1353 1354 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1355 { 1356 struct asd_sas_port *_sas_port = NULL; 1357 int phy_no; 1358 1359 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1360 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1361 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1362 struct asd_sas_port *sas_port = sas_phy->port; 1363 bool do_port_check = _sas_port != sas_port; 1364 1365 if (!sas_phy->phy->enabled) 1366 continue; 1367 1368 /* Report PHY state change to libsas */ 1369 if (state & BIT(phy_no)) { 1370 if (do_port_check && sas_port && sas_port->port_dev) { 1371 struct domain_device *dev = sas_port->port_dev; 1372 1373 _sas_port = sas_port; 1374 1375 if (dev_is_expander(dev->dev_type)) 1376 sas_notify_port_event(sas_phy, 1377 PORTE_BROADCAST_RCVD, 1378 GFP_KERNEL); 1379 } 1380 } else { 1381 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1382 } 1383 } 1384 } 1385 1386 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1387 { 1388 struct hisi_sas_device *sas_dev; 1389 struct domain_device *device; 1390 int i; 1391 1392 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1393 sas_dev = &hisi_hba->devices[i]; 1394 device = sas_dev->sas_device; 1395 1396 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1397 continue; 1398 1399 hisi_sas_init_device(device); 1400 } 1401 } 1402 1403 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1404 struct asd_sas_port *sas_port, 1405 struct domain_device *device) 1406 { 1407 struct ata_port *ap = device->sata_dev.ap; 1408 struct device *dev = hisi_hba->dev; 1409 int rc = TMF_RESP_FUNC_FAILED; 1410 struct ata_link *link; 1411 u8 fis[20] = {0}; 1412 int i; 1413 1414 for (i = 0; i < hisi_hba->n_phy; i++) { 1415 if (!(sas_port->phy_mask & BIT(i))) 1416 continue; 1417 1418 ata_for_each_link(link, ap, EDGE) { 1419 int pmp = sata_srst_pmp(link); 1420 1421 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1422 rc = sas_execute_ata_cmd(device, fis, i); 1423 if (rc != TMF_RESP_FUNC_COMPLETE) { 1424 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1425 i, rc); 1426 break; 1427 } 1428 } 1429 } 1430 } 1431 1432 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1433 { 1434 struct device *dev = hisi_hba->dev; 1435 int port_no, rc, i; 1436 1437 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1438 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1439 struct domain_device *device = sas_dev->sas_device; 1440 1441 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1442 continue; 1443 1444 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1445 if (rc < 0) 1446 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1447 } 1448 1449 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1450 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1451 struct asd_sas_port *sas_port = &port->sas_port; 1452 struct domain_device *port_dev = sas_port->port_dev; 1453 struct domain_device *device; 1454 1455 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1456 continue; 1457 1458 /* Try to find a SATA device */ 1459 list_for_each_entry(device, &sas_port->dev_list, 1460 dev_list_node) { 1461 if (dev_is_sata(device)) { 1462 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1463 sas_port, 1464 device); 1465 break; 1466 } 1467 } 1468 } 1469 } 1470 1471 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1472 { 1473 struct Scsi_Host *shost = hisi_hba->shost; 1474 1475 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1476 1477 scsi_block_requests(shost); 1478 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1479 1480 del_timer_sync(&hisi_hba->timer); 1481 1482 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1483 } 1484 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1485 1486 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1487 { 1488 struct Scsi_Host *shost = hisi_hba->shost; 1489 1490 /* Init and wait for PHYs to come up and all libsas event finished. */ 1491 hisi_hba->hw->phys_init(hisi_hba); 1492 msleep(1000); 1493 hisi_sas_refresh_port_id(hisi_hba); 1494 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1495 1496 if (hisi_hba->reject_stp_links_msk) 1497 hisi_sas_terminate_stp_reject(hisi_hba); 1498 hisi_sas_reset_init_all_devices(hisi_hba); 1499 scsi_unblock_requests(shost); 1500 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1501 up(&hisi_hba->sem); 1502 1503 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1504 } 1505 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1506 1507 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1508 { 1509 if (!hisi_hba->hw->soft_reset) 1510 return -1; 1511 1512 down(&hisi_hba->sem); 1513 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1514 up(&hisi_hba->sem); 1515 return -1; 1516 } 1517 1518 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1519 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1520 1521 return 0; 1522 } 1523 1524 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1525 { 1526 struct device *dev = hisi_hba->dev; 1527 struct Scsi_Host *shost = hisi_hba->shost; 1528 int rc; 1529 1530 dev_info(dev, "controller resetting...\n"); 1531 hisi_sas_controller_reset_prepare(hisi_hba); 1532 1533 rc = hisi_hba->hw->soft_reset(hisi_hba); 1534 if (rc) { 1535 dev_warn(dev, "controller reset failed (%d)\n", rc); 1536 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1537 up(&hisi_hba->sem); 1538 scsi_unblock_requests(shost); 1539 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1540 return rc; 1541 } 1542 1543 hisi_sas_controller_reset_done(hisi_hba); 1544 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1545 dev_info(dev, "controller reset complete\n"); 1546 1547 return 0; 1548 } 1549 1550 static int hisi_sas_abort_task(struct sas_task *task) 1551 { 1552 struct hisi_sas_internal_abort_data internal_abort_data = { false }; 1553 struct domain_device *device = task->dev; 1554 struct hisi_sas_device *sas_dev = device->lldd_dev; 1555 struct hisi_hba *hisi_hba; 1556 struct device *dev; 1557 int rc = TMF_RESP_FUNC_FAILED; 1558 unsigned long flags; 1559 1560 if (!sas_dev) 1561 return TMF_RESP_FUNC_FAILED; 1562 1563 hisi_hba = dev_to_hisi_hba(task->dev); 1564 dev = hisi_hba->dev; 1565 1566 spin_lock_irqsave(&task->task_state_lock, flags); 1567 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1568 struct hisi_sas_slot *slot = task->lldd_task; 1569 struct hisi_sas_cq *cq; 1570 1571 if (slot) { 1572 /* 1573 * sync irq to avoid free'ing task 1574 * before using task in IO completion 1575 */ 1576 cq = &hisi_hba->cq[slot->dlvry_queue]; 1577 synchronize_irq(cq->irq_no); 1578 } 1579 spin_unlock_irqrestore(&task->task_state_lock, flags); 1580 rc = TMF_RESP_FUNC_COMPLETE; 1581 goto out; 1582 } 1583 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1584 spin_unlock_irqrestore(&task->task_state_lock, flags); 1585 1586 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1587 struct hisi_sas_slot *slot = task->lldd_task; 1588 u16 tag = slot->idx; 1589 int rc2; 1590 1591 rc = sas_abort_task(task, tag); 1592 rc2 = sas_execute_internal_abort_single(device, tag, 1593 slot->dlvry_queue, &internal_abort_data); 1594 if (rc2 < 0) { 1595 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1596 return TMF_RESP_FUNC_FAILED; 1597 } 1598 1599 /* 1600 * If the TMF finds that the IO is not in the device and also 1601 * the internal abort does not succeed, then it is safe to 1602 * free the slot. 1603 * Note: if the internal abort succeeds then the slot 1604 * will have already been completed 1605 */ 1606 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1607 if (task->lldd_task) 1608 hisi_sas_do_release_task(hisi_hba, task, slot); 1609 } 1610 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1611 task->task_proto & SAS_PROTOCOL_STP) { 1612 if (task->dev->dev_type == SAS_SATA_DEV) { 1613 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1614 if (rc < 0) { 1615 dev_err(dev, "abort task: internal abort failed\n"); 1616 goto out; 1617 } 1618 hisi_sas_dereg_device(hisi_hba, device); 1619 rc = hisi_sas_softreset_ata_disk(device); 1620 } 1621 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1622 /* SMP */ 1623 struct hisi_sas_slot *slot = task->lldd_task; 1624 u32 tag = slot->idx; 1625 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1626 1627 rc = sas_execute_internal_abort_single(device, 1628 tag, slot->dlvry_queue, 1629 &internal_abort_data); 1630 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1631 task->lldd_task) { 1632 /* 1633 * sync irq to avoid free'ing task 1634 * before using task in IO completion 1635 */ 1636 synchronize_irq(cq->irq_no); 1637 slot->task = NULL; 1638 } 1639 } 1640 1641 out: 1642 if (rc != TMF_RESP_FUNC_COMPLETE) 1643 dev_notice(dev, "abort task: rc=%d\n", rc); 1644 return rc; 1645 } 1646 1647 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1648 { 1649 struct hisi_sas_device *sas_dev = device->lldd_dev; 1650 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1651 struct device *dev = hisi_hba->dev; 1652 int rc; 1653 1654 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1655 if (rc < 0) { 1656 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1657 return TMF_RESP_FUNC_FAILED; 1658 } 1659 hisi_sas_dereg_device(hisi_hba, device); 1660 1661 rc = sas_abort_task_set(device, lun); 1662 if (rc == TMF_RESP_FUNC_COMPLETE) 1663 hisi_sas_release_task(hisi_hba, device); 1664 1665 return rc; 1666 } 1667 1668 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1669 { 1670 struct sas_phy *local_phy = sas_get_local_phy(device); 1671 struct hisi_sas_device *sas_dev = device->lldd_dev; 1672 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1673 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1674 int rc, reset_type; 1675 1676 if (!local_phy->enabled) { 1677 sas_put_local_phy(local_phy); 1678 return -ENODEV; 1679 } 1680 1681 if (scsi_is_sas_phy_local(local_phy)) { 1682 struct asd_sas_phy *sas_phy = 1683 sas_ha->sas_phy[local_phy->number]; 1684 struct hisi_sas_phy *phy = 1685 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1686 unsigned long flags; 1687 1688 spin_lock_irqsave(&phy->lock, flags); 1689 phy->in_reset = 1; 1690 spin_unlock_irqrestore(&phy->lock, flags); 1691 } 1692 1693 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1694 !dev_is_sata(device)) ? true : false; 1695 1696 rc = sas_phy_reset(local_phy, reset_type); 1697 sas_put_local_phy(local_phy); 1698 1699 if (scsi_is_sas_phy_local(local_phy)) { 1700 struct asd_sas_phy *sas_phy = 1701 sas_ha->sas_phy[local_phy->number]; 1702 struct hisi_sas_phy *phy = 1703 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1704 unsigned long flags; 1705 1706 spin_lock_irqsave(&phy->lock, flags); 1707 phy->in_reset = 0; 1708 spin_unlock_irqrestore(&phy->lock, flags); 1709 1710 /* report PHY down if timed out */ 1711 if (rc == -ETIMEDOUT) 1712 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1713 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1714 /* 1715 * If in init state, we rely on caller to wait for link to be 1716 * ready; otherwise, except phy reset is fail, delay. 1717 */ 1718 if (!rc) 1719 msleep(2000); 1720 } 1721 1722 return rc; 1723 } 1724 1725 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1726 { 1727 struct hisi_sas_device *sas_dev = device->lldd_dev; 1728 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1729 struct device *dev = hisi_hba->dev; 1730 int rc; 1731 1732 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1733 if (rc < 0) { 1734 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1735 return TMF_RESP_FUNC_FAILED; 1736 } 1737 hisi_sas_dereg_device(hisi_hba, device); 1738 1739 rc = hisi_sas_debug_I_T_nexus_reset(device); 1740 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1741 struct sas_phy *local_phy; 1742 1743 rc = hisi_sas_softreset_ata_disk(device); 1744 switch (rc) { 1745 case -ECOMM: 1746 rc = -ENODEV; 1747 break; 1748 case TMF_RESP_FUNC_FAILED: 1749 case -EMSGSIZE: 1750 case -EIO: 1751 local_phy = sas_get_local_phy(device); 1752 rc = sas_phy_enable(local_phy, 0); 1753 if (!rc) { 1754 local_phy->enabled = 0; 1755 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1756 SAS_ADDR(device->sas_addr), rc); 1757 rc = -ENODEV; 1758 } 1759 sas_put_local_phy(local_phy); 1760 break; 1761 default: 1762 break; 1763 } 1764 } 1765 1766 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1767 hisi_sas_release_task(hisi_hba, device); 1768 1769 return rc; 1770 } 1771 1772 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1773 { 1774 struct hisi_sas_device *sas_dev = device->lldd_dev; 1775 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1776 struct device *dev = hisi_hba->dev; 1777 int rc = TMF_RESP_FUNC_FAILED; 1778 1779 /* Clear internal IO and then lu reset */ 1780 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1781 if (rc < 0) { 1782 dev_err(dev, "lu_reset: internal abort failed\n"); 1783 goto out; 1784 } 1785 hisi_sas_dereg_device(hisi_hba, device); 1786 1787 if (dev_is_sata(device)) { 1788 struct sas_phy *phy; 1789 1790 phy = sas_get_local_phy(device); 1791 1792 rc = sas_phy_reset(phy, true); 1793 1794 if (rc == 0) 1795 hisi_sas_release_task(hisi_hba, device); 1796 sas_put_local_phy(phy); 1797 } else { 1798 rc = sas_lu_reset(device, lun); 1799 if (rc == TMF_RESP_FUNC_COMPLETE) 1800 hisi_sas_release_task(hisi_hba, device); 1801 } 1802 out: 1803 if (rc != TMF_RESP_FUNC_COMPLETE) 1804 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1805 sas_dev->device_id, rc); 1806 return rc; 1807 } 1808 1809 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1810 { 1811 struct domain_device *device = data; 1812 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1813 int rc; 1814 1815 rc = hisi_sas_debug_I_T_nexus_reset(device); 1816 if (rc != TMF_RESP_FUNC_COMPLETE) 1817 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1818 SAS_ADDR(device->sas_addr), rc); 1819 } 1820 1821 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1822 { 1823 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1824 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1825 ASYNC_DOMAIN_EXCLUSIVE(async); 1826 int i; 1827 1828 queue_work(hisi_hba->wq, &r.work); 1829 wait_for_completion(r.completion); 1830 if (!r.done) 1831 return TMF_RESP_FUNC_FAILED; 1832 1833 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1834 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1835 struct domain_device *device = sas_dev->sas_device; 1836 1837 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1838 dev_is_expander(device->dev_type)) 1839 continue; 1840 1841 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1842 device, &async); 1843 } 1844 1845 async_synchronize_full_domain(&async); 1846 hisi_sas_release_tasks(hisi_hba); 1847 1848 return TMF_RESP_FUNC_COMPLETE; 1849 } 1850 1851 static int hisi_sas_query_task(struct sas_task *task) 1852 { 1853 int rc = TMF_RESP_FUNC_FAILED; 1854 1855 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1856 struct hisi_sas_slot *slot = task->lldd_task; 1857 u32 tag = slot->idx; 1858 1859 rc = sas_query_task(task, tag); 1860 switch (rc) { 1861 /* The task is still in Lun, release it then */ 1862 case TMF_RESP_FUNC_SUCC: 1863 /* The task is not in Lun or failed, reset the phy */ 1864 case TMF_RESP_FUNC_FAILED: 1865 case TMF_RESP_FUNC_COMPLETE: 1866 break; 1867 default: 1868 rc = TMF_RESP_FUNC_FAILED; 1869 break; 1870 } 1871 } 1872 return rc; 1873 } 1874 1875 static bool hisi_sas_internal_abort_timeout(struct sas_task *task, 1876 void *data) 1877 { 1878 struct domain_device *device = task->dev; 1879 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1880 struct hisi_sas_internal_abort_data *timeout = data; 1881 1882 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1883 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 1884 1885 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1886 pr_err("Internal abort: timeout %016llx\n", 1887 SAS_ADDR(device->sas_addr)); 1888 } else { 1889 struct hisi_sas_slot *slot = task->lldd_task; 1890 1891 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1892 1893 if (slot) { 1894 struct hisi_sas_cq *cq = 1895 &hisi_hba->cq[slot->dlvry_queue]; 1896 /* 1897 * sync irq to avoid free'ing task 1898 * before using task in IO completion 1899 */ 1900 synchronize_irq(cq->irq_no); 1901 slot->task = NULL; 1902 } 1903 1904 if (timeout->rst_ha_timeout) { 1905 pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n", 1906 SAS_ADDR(device->sas_addr)); 1907 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1908 } else { 1909 pr_err("Internal abort: timeout and not done %016llx.\n", 1910 SAS_ADDR(device->sas_addr)); 1911 } 1912 1913 return true; 1914 } 1915 1916 return false; 1917 } 1918 1919 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 1920 { 1921 hisi_sas_port_notify_formed(sas_phy); 1922 } 1923 1924 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 1925 u8 reg_index, u8 reg_count, u8 *write_data) 1926 { 1927 struct hisi_hba *hisi_hba = sha->lldd_ha; 1928 1929 if (!hisi_hba->hw->write_gpio) 1930 return -EOPNOTSUPP; 1931 1932 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 1933 reg_index, reg_count, write_data); 1934 } 1935 1936 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 1937 { 1938 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1939 struct sas_phy *sphy = sas_phy->phy; 1940 unsigned long flags; 1941 1942 phy->phy_attached = 0; 1943 phy->phy_type = 0; 1944 phy->port = NULL; 1945 1946 spin_lock_irqsave(&phy->lock, flags); 1947 if (phy->enable) 1948 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1949 else 1950 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1951 spin_unlock_irqrestore(&phy->lock, flags); 1952 } 1953 1954 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 1955 gfp_t gfp_flags) 1956 { 1957 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1958 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1959 struct device *dev = hisi_hba->dev; 1960 1961 if (rdy) { 1962 /* Phy down but ready */ 1963 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 1964 hisi_sas_port_notify_formed(sas_phy); 1965 } else { 1966 struct hisi_sas_port *port = phy->port; 1967 1968 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 1969 phy->in_reset) { 1970 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 1971 return; 1972 } 1973 /* Phy down and not ready */ 1974 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 1975 sas_phy_disconnected(sas_phy); 1976 1977 if (port) { 1978 if (phy->phy_type & PORT_TYPE_SAS) { 1979 int port_id = port->id; 1980 1981 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 1982 port_id)) 1983 port->port_attached = 0; 1984 } else if (phy->phy_type & PORT_TYPE_SATA) 1985 port->port_attached = 0; 1986 } 1987 hisi_sas_phy_disconnected(phy); 1988 } 1989 } 1990 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 1991 1992 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) 1993 { 1994 int i; 1995 1996 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 1997 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1998 1999 synchronize_irq(cq->irq_no); 2000 } 2001 } 2002 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); 2003 2004 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2005 { 2006 struct hisi_hba *hisi_hba = shost_priv(shost); 2007 2008 if (reset_type != SCSI_ADAPTER_RESET) 2009 return -EOPNOTSUPP; 2010 2011 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2012 2013 return 0; 2014 } 2015 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2016 2017 struct scsi_transport_template *hisi_sas_stt; 2018 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2019 2020 static struct sas_domain_function_template hisi_sas_transport_ops = { 2021 .lldd_dev_found = hisi_sas_dev_found, 2022 .lldd_dev_gone = hisi_sas_dev_gone, 2023 .lldd_execute_task = hisi_sas_queue_command, 2024 .lldd_control_phy = hisi_sas_control_phy, 2025 .lldd_abort_task = hisi_sas_abort_task, 2026 .lldd_abort_task_set = hisi_sas_abort_task_set, 2027 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2028 .lldd_lu_reset = hisi_sas_lu_reset, 2029 .lldd_query_task = hisi_sas_query_task, 2030 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2031 .lldd_port_formed = hisi_sas_port_formed, 2032 .lldd_write_gpio = hisi_sas_write_gpio, 2033 .lldd_tmf_aborted = hisi_sas_tmf_aborted, 2034 .lldd_abort_timeout = hisi_sas_internal_abort_timeout, 2035 }; 2036 2037 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2038 { 2039 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2040 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2041 2042 for (i = 0; i < hisi_hba->queue_count; i++) { 2043 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2044 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2045 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2046 2047 s = sizeof(struct hisi_sas_cmd_hdr); 2048 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2049 memset(&cmd_hdr[j], 0, s); 2050 2051 dq->wr_point = 0; 2052 2053 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2054 memset(hisi_hba->complete_hdr[i], 0, s); 2055 cq->rd_point = 0; 2056 } 2057 2058 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2059 memset(hisi_hba->initial_fis, 0, s); 2060 2061 s = max_command_entries * sizeof(struct hisi_sas_iost); 2062 memset(hisi_hba->iost, 0, s); 2063 2064 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2065 memset(hisi_hba->breakpoint, 0, s); 2066 2067 s = sizeof(struct hisi_sas_sata_breakpoint); 2068 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2069 memset(&sata_breakpoint[j], 0, s); 2070 } 2071 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2072 2073 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2074 { 2075 struct device *dev = hisi_hba->dev; 2076 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2077 int max_command_entries_ru, sz_slot_buf_ru; 2078 int blk_cnt, slots_per_blk; 2079 2080 sema_init(&hisi_hba->sem, 1); 2081 spin_lock_init(&hisi_hba->lock); 2082 for (i = 0; i < hisi_hba->n_phy; i++) { 2083 hisi_sas_phy_init(hisi_hba, i); 2084 hisi_hba->port[i].port_attached = 0; 2085 hisi_hba->port[i].id = -1; 2086 } 2087 2088 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2089 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2090 hisi_hba->devices[i].device_id = i; 2091 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2092 } 2093 2094 for (i = 0; i < hisi_hba->queue_count; i++) { 2095 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2096 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2097 2098 /* Completion queue structure */ 2099 cq->id = i; 2100 cq->hisi_hba = hisi_hba; 2101 2102 /* Delivery queue structure */ 2103 spin_lock_init(&dq->lock); 2104 INIT_LIST_HEAD(&dq->list); 2105 dq->id = i; 2106 dq->hisi_hba = hisi_hba; 2107 2108 /* Delivery queue */ 2109 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2110 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2111 &hisi_hba->cmd_hdr_dma[i], 2112 GFP_KERNEL); 2113 if (!hisi_hba->cmd_hdr[i]) 2114 goto err_out; 2115 2116 /* Completion queue */ 2117 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2118 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2119 &hisi_hba->complete_hdr_dma[i], 2120 GFP_KERNEL); 2121 if (!hisi_hba->complete_hdr[i]) 2122 goto err_out; 2123 } 2124 2125 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2126 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2127 GFP_KERNEL); 2128 if (!hisi_hba->itct) 2129 goto err_out; 2130 2131 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2132 sizeof(struct hisi_sas_slot), 2133 GFP_KERNEL); 2134 if (!hisi_hba->slot_info) 2135 goto err_out; 2136 2137 /* roundup to avoid overly large block size */ 2138 max_command_entries_ru = roundup(max_command_entries, 64); 2139 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2140 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2141 else 2142 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2143 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2144 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2145 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2146 slots_per_blk = s / sz_slot_buf_ru; 2147 2148 for (i = 0; i < blk_cnt; i++) { 2149 int slot_index = i * slots_per_blk; 2150 dma_addr_t buf_dma; 2151 void *buf; 2152 2153 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2154 GFP_KERNEL); 2155 if (!buf) 2156 goto err_out; 2157 2158 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2159 struct hisi_sas_slot *slot; 2160 2161 slot = &hisi_hba->slot_info[slot_index]; 2162 slot->buf = buf; 2163 slot->buf_dma = buf_dma; 2164 slot->idx = slot_index; 2165 2166 buf += sz_slot_buf_ru; 2167 buf_dma += sz_slot_buf_ru; 2168 } 2169 } 2170 2171 s = max_command_entries * sizeof(struct hisi_sas_iost); 2172 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2173 GFP_KERNEL); 2174 if (!hisi_hba->iost) 2175 goto err_out; 2176 2177 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2178 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2179 &hisi_hba->breakpoint_dma, 2180 GFP_KERNEL); 2181 if (!hisi_hba->breakpoint) 2182 goto err_out; 2183 2184 s = hisi_hba->slot_index_count = max_command_entries; 2185 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); 2186 if (!hisi_hba->slot_index_tags) 2187 goto err_out; 2188 2189 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2190 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2191 &hisi_hba->initial_fis_dma, 2192 GFP_KERNEL); 2193 if (!hisi_hba->initial_fis) 2194 goto err_out; 2195 2196 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2197 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2198 &hisi_hba->sata_breakpoint_dma, 2199 GFP_KERNEL); 2200 if (!hisi_hba->sata_breakpoint) 2201 goto err_out; 2202 2203 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; 2204 2205 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2206 if (!hisi_hba->wq) { 2207 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2208 goto err_out; 2209 } 2210 2211 return 0; 2212 err_out: 2213 return -ENOMEM; 2214 } 2215 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2216 2217 void hisi_sas_free(struct hisi_hba *hisi_hba) 2218 { 2219 int i; 2220 2221 for (i = 0; i < hisi_hba->n_phy; i++) { 2222 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2223 2224 del_timer_sync(&phy->timer); 2225 } 2226 2227 if (hisi_hba->wq) 2228 destroy_workqueue(hisi_hba->wq); 2229 } 2230 EXPORT_SYMBOL_GPL(hisi_sas_free); 2231 2232 void hisi_sas_rst_work_handler(struct work_struct *work) 2233 { 2234 struct hisi_hba *hisi_hba = 2235 container_of(work, struct hisi_hba, rst_work); 2236 2237 if (hisi_sas_controller_prereset(hisi_hba)) 2238 return; 2239 2240 hisi_sas_controller_reset(hisi_hba); 2241 } 2242 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2243 2244 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2245 { 2246 struct hisi_sas_rst *rst = 2247 container_of(work, struct hisi_sas_rst, work); 2248 2249 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2250 goto rst_complete; 2251 2252 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2253 rst->done = true; 2254 rst_complete: 2255 complete(rst->completion); 2256 } 2257 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2258 2259 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2260 { 2261 struct device *dev = hisi_hba->dev; 2262 struct platform_device *pdev = hisi_hba->platform_dev; 2263 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2264 struct clk *refclk; 2265 2266 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2267 SAS_ADDR_SIZE)) { 2268 dev_err(dev, "could not get property sas-addr\n"); 2269 return -ENOENT; 2270 } 2271 2272 if (np) { 2273 /* 2274 * These properties are only required for platform device-based 2275 * controller with DT firmware. 2276 */ 2277 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2278 "hisilicon,sas-syscon"); 2279 if (IS_ERR(hisi_hba->ctrl)) { 2280 dev_err(dev, "could not get syscon\n"); 2281 return -ENOENT; 2282 } 2283 2284 if (device_property_read_u32(dev, "ctrl-reset-reg", 2285 &hisi_hba->ctrl_reset_reg)) { 2286 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2287 return -ENOENT; 2288 } 2289 2290 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2291 &hisi_hba->ctrl_reset_sts_reg)) { 2292 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2293 return -ENOENT; 2294 } 2295 2296 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2297 &hisi_hba->ctrl_clock_ena_reg)) { 2298 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2299 return -ENOENT; 2300 } 2301 } 2302 2303 refclk = devm_clk_get(dev, NULL); 2304 if (IS_ERR(refclk)) 2305 dev_dbg(dev, "no ref clk property\n"); 2306 else 2307 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2308 2309 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2310 dev_err(dev, "could not get property phy-count\n"); 2311 return -ENOENT; 2312 } 2313 2314 if (device_property_read_u32(dev, "queue-count", 2315 &hisi_hba->queue_count)) { 2316 dev_err(dev, "could not get property queue-count\n"); 2317 return -ENOENT; 2318 } 2319 2320 return 0; 2321 } 2322 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2323 2324 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2325 const struct hisi_sas_hw *hw) 2326 { 2327 struct resource *res; 2328 struct Scsi_Host *shost; 2329 struct hisi_hba *hisi_hba; 2330 struct device *dev = &pdev->dev; 2331 int error; 2332 2333 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2334 if (!shost) { 2335 dev_err(dev, "scsi host alloc failed\n"); 2336 return NULL; 2337 } 2338 hisi_hba = shost_priv(shost); 2339 2340 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2341 hisi_hba->hw = hw; 2342 hisi_hba->dev = dev; 2343 hisi_hba->platform_dev = pdev; 2344 hisi_hba->shost = shost; 2345 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2346 2347 timer_setup(&hisi_hba->timer, NULL, 0); 2348 2349 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2350 goto err_out; 2351 2352 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2353 if (error) { 2354 dev_err(dev, "No usable DMA addressing method\n"); 2355 goto err_out; 2356 } 2357 2358 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2359 if (IS_ERR(hisi_hba->regs)) 2360 goto err_out; 2361 2362 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2363 if (res) { 2364 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2365 if (IS_ERR(hisi_hba->sgpio_regs)) 2366 goto err_out; 2367 } 2368 2369 if (hisi_sas_alloc(hisi_hba)) { 2370 hisi_sas_free(hisi_hba); 2371 goto err_out; 2372 } 2373 2374 return shost; 2375 err_out: 2376 scsi_host_put(shost); 2377 dev_err(dev, "shost alloc failed\n"); 2378 return NULL; 2379 } 2380 2381 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2382 { 2383 if (hisi_hba->hw->interrupt_preinit) 2384 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2385 return 0; 2386 } 2387 2388 int hisi_sas_probe(struct platform_device *pdev, 2389 const struct hisi_sas_hw *hw) 2390 { 2391 struct Scsi_Host *shost; 2392 struct hisi_hba *hisi_hba; 2393 struct device *dev = &pdev->dev; 2394 struct asd_sas_phy **arr_phy; 2395 struct asd_sas_port **arr_port; 2396 struct sas_ha_struct *sha; 2397 int rc, phy_nr, port_nr, i; 2398 2399 shost = hisi_sas_shost_alloc(pdev, hw); 2400 if (!shost) 2401 return -ENOMEM; 2402 2403 sha = SHOST_TO_SAS_HA(shost); 2404 hisi_hba = shost_priv(shost); 2405 platform_set_drvdata(pdev, sha); 2406 2407 phy_nr = port_nr = hisi_hba->n_phy; 2408 2409 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2410 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2411 if (!arr_phy || !arr_port) { 2412 rc = -ENOMEM; 2413 goto err_out_ha; 2414 } 2415 2416 sha->sas_phy = arr_phy; 2417 sha->sas_port = arr_port; 2418 sha->lldd_ha = hisi_hba; 2419 2420 shost->transportt = hisi_sas_stt; 2421 shost->max_id = HISI_SAS_MAX_DEVICES; 2422 shost->max_lun = ~0; 2423 shost->max_channel = 1; 2424 shost->max_cmd_len = 16; 2425 if (hisi_hba->hw->slot_index_alloc) { 2426 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2427 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2428 } else { 2429 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2430 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2431 } 2432 2433 sha->sas_ha_name = DRV_NAME; 2434 sha->dev = hisi_hba->dev; 2435 sha->lldd_module = THIS_MODULE; 2436 sha->sas_addr = &hisi_hba->sas_addr[0]; 2437 sha->num_phys = hisi_hba->n_phy; 2438 sha->core.shost = hisi_hba->shost; 2439 2440 for (i = 0; i < hisi_hba->n_phy; i++) { 2441 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2442 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2443 } 2444 2445 rc = hisi_sas_interrupt_preinit(hisi_hba); 2446 if (rc) 2447 goto err_out_ha; 2448 2449 rc = scsi_add_host(shost, &pdev->dev); 2450 if (rc) 2451 goto err_out_ha; 2452 2453 rc = sas_register_ha(sha); 2454 if (rc) 2455 goto err_out_register_ha; 2456 2457 rc = hisi_hba->hw->hw_init(hisi_hba); 2458 if (rc) 2459 goto err_out_hw_init; 2460 2461 scsi_scan_host(shost); 2462 2463 return 0; 2464 2465 err_out_hw_init: 2466 sas_unregister_ha(sha); 2467 err_out_register_ha: 2468 scsi_remove_host(shost); 2469 err_out_ha: 2470 hisi_sas_free(hisi_hba); 2471 scsi_host_put(shost); 2472 return rc; 2473 } 2474 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2475 2476 int hisi_sas_remove(struct platform_device *pdev) 2477 { 2478 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2479 struct hisi_hba *hisi_hba = sha->lldd_ha; 2480 struct Scsi_Host *shost = sha->core.shost; 2481 2482 del_timer_sync(&hisi_hba->timer); 2483 2484 sas_unregister_ha(sha); 2485 sas_remove_host(sha->core.shost); 2486 2487 hisi_sas_free(hisi_hba); 2488 scsi_host_put(shost); 2489 return 0; 2490 } 2491 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2492 2493 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2494 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2495 bool hisi_sas_debugfs_enable = true; 2496 u32 hisi_sas_debugfs_dump_count = 50; 2497 #else 2498 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2499 bool hisi_sas_debugfs_enable; 2500 u32 hisi_sas_debugfs_dump_count = 1; 2501 #endif 2502 2503 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2504 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2505 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2506 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2507 2508 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2509 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2510 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2511 2512 struct dentry *hisi_sas_debugfs_dir; 2513 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2514 2515 static __init int hisi_sas_init(void) 2516 { 2517 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2518 if (!hisi_sas_stt) 2519 return -ENOMEM; 2520 2521 if (hisi_sas_debugfs_enable) { 2522 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2523 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2524 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2525 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2526 } 2527 } 2528 2529 return 0; 2530 } 2531 2532 static __exit void hisi_sas_exit(void) 2533 { 2534 sas_release_transport(hisi_sas_stt); 2535 2536 debugfs_remove(hisi_sas_debugfs_dir); 2537 } 2538 2539 module_init(hisi_sas_init); 2540 module_exit(hisi_sas_exit); 2541 2542 MODULE_LICENSE("GPL"); 2543 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2544 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2545 MODULE_ALIAS("platform:" DRV_NAME); 2546