1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 14 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 15 void *funcdata); 16 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 17 struct domain_device *device); 18 static void hisi_sas_dev_gone(struct domain_device *device); 19 20 struct hisi_sas_internal_abort_data { 21 bool rst_ha_timeout; /* reset the HA for timeout */ 22 }; 23 24 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 25 { 26 switch (fis->command) { 27 case ATA_CMD_FPDMA_WRITE: 28 case ATA_CMD_FPDMA_READ: 29 case ATA_CMD_FPDMA_RECV: 30 case ATA_CMD_FPDMA_SEND: 31 case ATA_CMD_NCQ_NON_DATA: 32 return HISI_SAS_SATA_PROTOCOL_FPDMA; 33 34 case ATA_CMD_DOWNLOAD_MICRO: 35 case ATA_CMD_ID_ATA: 36 case ATA_CMD_PMP_READ: 37 case ATA_CMD_READ_LOG_EXT: 38 case ATA_CMD_PIO_READ: 39 case ATA_CMD_PIO_READ_EXT: 40 case ATA_CMD_PMP_WRITE: 41 case ATA_CMD_WRITE_LOG_EXT: 42 case ATA_CMD_PIO_WRITE: 43 case ATA_CMD_PIO_WRITE_EXT: 44 return HISI_SAS_SATA_PROTOCOL_PIO; 45 46 case ATA_CMD_DSM: 47 case ATA_CMD_DOWNLOAD_MICRO_DMA: 48 case ATA_CMD_PMP_READ_DMA: 49 case ATA_CMD_PMP_WRITE_DMA: 50 case ATA_CMD_READ: 51 case ATA_CMD_READ_EXT: 52 case ATA_CMD_READ_LOG_DMA_EXT: 53 case ATA_CMD_READ_STREAM_DMA_EXT: 54 case ATA_CMD_TRUSTED_RCV_DMA: 55 case ATA_CMD_TRUSTED_SND_DMA: 56 case ATA_CMD_WRITE: 57 case ATA_CMD_WRITE_EXT: 58 case ATA_CMD_WRITE_FUA_EXT: 59 case ATA_CMD_WRITE_QUEUED: 60 case ATA_CMD_WRITE_LOG_DMA_EXT: 61 case ATA_CMD_WRITE_STREAM_DMA_EXT: 62 case ATA_CMD_ZAC_MGMT_IN: 63 return HISI_SAS_SATA_PROTOCOL_DMA; 64 65 case ATA_CMD_CHK_POWER: 66 case ATA_CMD_DEV_RESET: 67 case ATA_CMD_EDD: 68 case ATA_CMD_FLUSH: 69 case ATA_CMD_FLUSH_EXT: 70 case ATA_CMD_VERIFY: 71 case ATA_CMD_VERIFY_EXT: 72 case ATA_CMD_SET_FEATURES: 73 case ATA_CMD_STANDBY: 74 case ATA_CMD_STANDBYNOW1: 75 case ATA_CMD_ZAC_MGMT_OUT: 76 return HISI_SAS_SATA_PROTOCOL_NONDATA; 77 78 case ATA_CMD_SET_MAX: 79 switch (fis->features) { 80 case ATA_SET_MAX_PASSWD: 81 case ATA_SET_MAX_LOCK: 82 return HISI_SAS_SATA_PROTOCOL_PIO; 83 84 case ATA_SET_MAX_PASSWD_DMA: 85 case ATA_SET_MAX_UNLOCK_DMA: 86 return HISI_SAS_SATA_PROTOCOL_DMA; 87 88 default: 89 return HISI_SAS_SATA_PROTOCOL_NONDATA; 90 } 91 92 default: 93 { 94 if (direction == DMA_NONE) 95 return HISI_SAS_SATA_PROTOCOL_NONDATA; 96 return HISI_SAS_SATA_PROTOCOL_PIO; 97 } 98 } 99 } 100 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 101 102 void hisi_sas_sata_done(struct sas_task *task, 103 struct hisi_sas_slot *slot) 104 { 105 struct task_status_struct *ts = &task->task_status; 106 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 107 struct hisi_sas_status_buffer *status_buf = 108 hisi_sas_status_buf_addr_mem(slot); 109 u8 *iu = &status_buf->iu[0]; 110 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 111 112 resp->frame_len = sizeof(struct dev_to_host_fis); 113 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 114 115 ts->buf_valid_size = sizeof(*resp); 116 } 117 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 118 119 /* 120 * This function assumes linkrate mask fits in 8 bits, which it 121 * does for all HW versions supported. 122 */ 123 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 124 { 125 u8 rate = 0; 126 int i; 127 128 max -= SAS_LINK_RATE_1_5_GBPS; 129 for (i = 0; i <= max; i++) 130 rate |= 1 << (i * 2); 131 return rate; 132 } 133 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 134 135 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 136 { 137 return device->port->ha->lldd_ha; 138 } 139 140 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 141 { 142 return container_of(sas_port, struct hisi_sas_port, sas_port); 143 } 144 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 145 146 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 147 { 148 int phy_no; 149 150 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 151 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 152 } 153 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 154 155 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 156 { 157 void *bitmap = hisi_hba->slot_index_tags; 158 159 __clear_bit(slot_idx, bitmap); 160 } 161 162 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 163 { 164 if (hisi_hba->hw->slot_index_alloc || 165 slot_idx >= HISI_SAS_UNRESERVED_IPTT) { 166 spin_lock(&hisi_hba->lock); 167 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 168 spin_unlock(&hisi_hba->lock); 169 } 170 } 171 172 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 173 { 174 void *bitmap = hisi_hba->slot_index_tags; 175 176 __set_bit(slot_idx, bitmap); 177 } 178 179 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 180 struct scsi_cmnd *scsi_cmnd) 181 { 182 int index; 183 void *bitmap = hisi_hba->slot_index_tags; 184 185 if (scsi_cmnd) 186 return scsi_cmd_to_rq(scsi_cmnd)->tag; 187 188 spin_lock(&hisi_hba->lock); 189 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 190 hisi_hba->last_slot_index + 1); 191 if (index >= hisi_hba->slot_index_count) { 192 index = find_next_zero_bit(bitmap, 193 hisi_hba->slot_index_count, 194 HISI_SAS_UNRESERVED_IPTT); 195 if (index >= hisi_hba->slot_index_count) { 196 spin_unlock(&hisi_hba->lock); 197 return -SAS_QUEUE_FULL; 198 } 199 } 200 hisi_sas_slot_index_set(hisi_hba, index); 201 hisi_hba->last_slot_index = index; 202 spin_unlock(&hisi_hba->lock); 203 204 return index; 205 } 206 207 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 208 struct hisi_sas_slot *slot) 209 { 210 int device_id = slot->device_id; 211 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 212 213 if (task) { 214 struct device *dev = hisi_hba->dev; 215 216 if (!task->lldd_task) 217 return; 218 219 task->lldd_task = NULL; 220 221 if (!sas_protocol_ata(task->task_proto)) { 222 if (slot->n_elem) { 223 if (task->task_proto & SAS_PROTOCOL_SSP) 224 dma_unmap_sg(dev, task->scatter, 225 task->num_scatter, 226 task->data_dir); 227 else 228 dma_unmap_sg(dev, &task->smp_task.smp_req, 229 1, DMA_TO_DEVICE); 230 } 231 if (slot->n_elem_dif) { 232 struct sas_ssp_task *ssp_task = &task->ssp_task; 233 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 234 235 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 236 scsi_prot_sg_count(scsi_cmnd), 237 task->data_dir); 238 } 239 } 240 } 241 242 spin_lock(&sas_dev->lock); 243 list_del_init(&slot->entry); 244 spin_unlock(&sas_dev->lock); 245 246 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 247 248 hisi_sas_slot_index_free(hisi_hba, slot->idx); 249 } 250 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 251 252 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 253 struct hisi_sas_slot *slot) 254 { 255 hisi_hba->hw->prep_smp(hisi_hba, slot); 256 } 257 258 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 259 struct hisi_sas_slot *slot) 260 { 261 hisi_hba->hw->prep_ssp(hisi_hba, slot); 262 } 263 264 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 265 struct hisi_sas_slot *slot) 266 { 267 hisi_hba->hw->prep_stp(hisi_hba, slot); 268 } 269 270 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 271 struct hisi_sas_slot *slot) 272 { 273 hisi_hba->hw->prep_abort(hisi_hba, slot); 274 } 275 276 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 277 struct sas_task *task, int n_elem) 278 { 279 struct device *dev = hisi_hba->dev; 280 281 if (!sas_protocol_ata(task->task_proto) && n_elem) { 282 if (task->num_scatter) { 283 dma_unmap_sg(dev, task->scatter, task->num_scatter, 284 task->data_dir); 285 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 286 dma_unmap_sg(dev, &task->smp_task.smp_req, 287 1, DMA_TO_DEVICE); 288 } 289 } 290 } 291 292 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 293 struct sas_task *task, int *n_elem) 294 { 295 struct device *dev = hisi_hba->dev; 296 int rc; 297 298 if (sas_protocol_ata(task->task_proto)) { 299 *n_elem = task->num_scatter; 300 } else { 301 unsigned int req_len; 302 303 if (task->num_scatter) { 304 *n_elem = dma_map_sg(dev, task->scatter, 305 task->num_scatter, task->data_dir); 306 if (!*n_elem) { 307 rc = -ENOMEM; 308 goto prep_out; 309 } 310 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 311 *n_elem = dma_map_sg(dev, &task->smp_task.smp_req, 312 1, DMA_TO_DEVICE); 313 if (!*n_elem) { 314 rc = -ENOMEM; 315 goto prep_out; 316 } 317 req_len = sg_dma_len(&task->smp_task.smp_req); 318 if (req_len & 0x3) { 319 rc = -EINVAL; 320 goto err_out_dma_unmap; 321 } 322 } 323 } 324 325 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 326 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 327 *n_elem); 328 rc = -EINVAL; 329 goto err_out_dma_unmap; 330 } 331 return 0; 332 333 err_out_dma_unmap: 334 /* It would be better to call dma_unmap_sg() here, but it's messy */ 335 hisi_sas_dma_unmap(hisi_hba, task, *n_elem); 336 prep_out: 337 return rc; 338 } 339 340 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 341 struct sas_task *task, int n_elem_dif) 342 { 343 struct device *dev = hisi_hba->dev; 344 345 if (n_elem_dif) { 346 struct sas_ssp_task *ssp_task = &task->ssp_task; 347 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 348 349 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 350 scsi_prot_sg_count(scsi_cmnd), 351 task->data_dir); 352 } 353 } 354 355 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 356 int *n_elem_dif, struct sas_task *task) 357 { 358 struct device *dev = hisi_hba->dev; 359 struct sas_ssp_task *ssp_task; 360 struct scsi_cmnd *scsi_cmnd; 361 int rc; 362 363 if (task->num_scatter) { 364 ssp_task = &task->ssp_task; 365 scsi_cmnd = ssp_task->cmd; 366 367 if (scsi_prot_sg_count(scsi_cmnd)) { 368 *n_elem_dif = dma_map_sg(dev, 369 scsi_prot_sglist(scsi_cmnd), 370 scsi_prot_sg_count(scsi_cmnd), 371 task->data_dir); 372 373 if (!*n_elem_dif) 374 return -ENOMEM; 375 376 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 377 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 378 *n_elem_dif); 379 rc = -EINVAL; 380 goto err_out_dif_dma_unmap; 381 } 382 } 383 } 384 385 return 0; 386 387 err_out_dif_dma_unmap: 388 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 389 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 390 return rc; 391 } 392 393 static 394 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, 395 struct hisi_sas_slot *slot, 396 struct hisi_sas_dq *dq, 397 struct hisi_sas_device *sas_dev) 398 { 399 struct hisi_sas_cmd_hdr *cmd_hdr_base; 400 int dlvry_queue_slot, dlvry_queue; 401 struct sas_task *task = slot->task; 402 int wr_q_index; 403 404 spin_lock(&dq->lock); 405 wr_q_index = dq->wr_point; 406 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 407 list_add_tail(&slot->delivery, &dq->list); 408 spin_unlock(&dq->lock); 409 spin_lock(&sas_dev->lock); 410 list_add_tail(&slot->entry, &sas_dev->list); 411 spin_unlock(&sas_dev->lock); 412 413 dlvry_queue = dq->id; 414 dlvry_queue_slot = wr_q_index; 415 416 slot->device_id = sas_dev->device_id; 417 slot->dlvry_queue = dlvry_queue; 418 slot->dlvry_queue_slot = dlvry_queue_slot; 419 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 420 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 421 422 task->lldd_task = slot; 423 424 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 425 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 426 memset(hisi_sas_status_buf_addr_mem(slot), 0, 427 sizeof(struct hisi_sas_err_record)); 428 429 switch (task->task_proto) { 430 case SAS_PROTOCOL_SMP: 431 hisi_sas_task_prep_smp(hisi_hba, slot); 432 break; 433 case SAS_PROTOCOL_SSP: 434 hisi_sas_task_prep_ssp(hisi_hba, slot); 435 break; 436 case SAS_PROTOCOL_SATA: 437 case SAS_PROTOCOL_STP: 438 case SAS_PROTOCOL_STP_ALL: 439 hisi_sas_task_prep_ata(hisi_hba, slot); 440 break; 441 case SAS_PROTOCOL_INTERNAL_ABORT: 442 hisi_sas_task_prep_abort(hisi_hba, slot); 443 break; 444 default: 445 return; 446 } 447 448 /* Make slot memories observable before marking as ready */ 449 smp_wmb(); 450 WRITE_ONCE(slot->ready, 1); 451 452 spin_lock(&dq->lock); 453 hisi_hba->hw->start_delivery(dq); 454 spin_unlock(&dq->lock); 455 } 456 457 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 458 { 459 int n_elem = 0, n_elem_dif = 0; 460 struct domain_device *device = task->dev; 461 struct asd_sas_port *sas_port = device->port; 462 struct hisi_sas_device *sas_dev = device->lldd_dev; 463 bool internal_abort = sas_is_internal_abort(task); 464 struct scsi_cmnd *scmd = NULL; 465 struct hisi_sas_dq *dq = NULL; 466 struct hisi_sas_port *port; 467 struct hisi_hba *hisi_hba; 468 struct hisi_sas_slot *slot; 469 struct device *dev; 470 int rc; 471 472 if (!sas_port) { 473 struct task_status_struct *ts = &task->task_status; 474 475 ts->resp = SAS_TASK_UNDELIVERED; 476 ts->stat = SAS_PHY_DOWN; 477 /* 478 * libsas will use dev->port, should 479 * not call task_done for sata 480 */ 481 if (device->dev_type != SAS_SATA_DEV && !internal_abort) 482 task->task_done(task); 483 return -ECOMM; 484 } 485 486 hisi_hba = dev_to_hisi_hba(device); 487 dev = hisi_hba->dev; 488 489 switch (task->task_proto) { 490 case SAS_PROTOCOL_SSP: 491 case SAS_PROTOCOL_SMP: 492 case SAS_PROTOCOL_SATA: 493 case SAS_PROTOCOL_STP: 494 case SAS_PROTOCOL_STP_ALL: 495 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 496 if (!gfpflags_allow_blocking(gfp_flags)) 497 return -EINVAL; 498 499 down(&hisi_hba->sem); 500 up(&hisi_hba->sem); 501 } 502 503 if (DEV_IS_GONE(sas_dev)) { 504 if (sas_dev) 505 dev_info(dev, "task prep: device %d not ready\n", 506 sas_dev->device_id); 507 else 508 dev_info(dev, "task prep: device %016llx not ready\n", 509 SAS_ADDR(device->sas_addr)); 510 511 return -ECOMM; 512 } 513 514 port = to_hisi_sas_port(sas_port); 515 if (!port->port_attached) { 516 dev_info(dev, "task prep: %s port%d not attach device\n", 517 dev_is_sata(device) ? "SATA/STP" : "SAS", 518 device->port->id); 519 520 return -ECOMM; 521 } 522 523 if (task->uldd_task) { 524 struct ata_queued_cmd *qc; 525 526 if (dev_is_sata(device)) { 527 qc = task->uldd_task; 528 scmd = qc->scsicmd; 529 } else { 530 scmd = task->uldd_task; 531 } 532 } 533 534 if (scmd) { 535 unsigned int dq_index; 536 u32 blk_tag; 537 538 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 539 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 540 dq = &hisi_hba->dq[dq_index]; 541 } else { 542 struct Scsi_Host *shost = hisi_hba->shost; 543 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 544 int queue = qmap->mq_map[raw_smp_processor_id()]; 545 546 dq = &hisi_hba->dq[queue]; 547 } 548 break; 549 case SAS_PROTOCOL_INTERNAL_ABORT: 550 if (!hisi_hba->hw->prep_abort) 551 return TMF_RESP_FUNC_FAILED; 552 553 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 554 return -EIO; 555 556 hisi_hba = dev_to_hisi_hba(device); 557 558 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 559 return -EINVAL; 560 561 port = to_hisi_sas_port(sas_port); 562 dq = &hisi_hba->dq[task->abort_task.qid]; 563 break; 564 default: 565 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", 566 task->task_proto); 567 return -EINVAL; 568 } 569 570 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem); 571 if (rc < 0) 572 goto prep_out; 573 574 if (!sas_protocol_ata(task->task_proto)) { 575 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 576 if (rc < 0) 577 goto err_out_dma_unmap; 578 } 579 580 if (!internal_abort && hisi_hba->hw->slot_index_alloc) 581 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 582 else 583 rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); 584 585 if (rc < 0) 586 goto err_out_dif_dma_unmap; 587 588 slot = &hisi_hba->slot_info[rc]; 589 slot->n_elem = n_elem; 590 slot->n_elem_dif = n_elem_dif; 591 slot->task = task; 592 slot->port = port; 593 594 slot->tmf = task->tmf; 595 slot->is_internal = !!task->tmf || internal_abort; 596 597 /* protect task_prep and start_delivery sequence */ 598 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev); 599 600 return 0; 601 602 err_out_dif_dma_unmap: 603 if (!sas_protocol_ata(task->task_proto)) 604 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 605 err_out_dma_unmap: 606 hisi_sas_dma_unmap(hisi_hba, task, n_elem); 607 prep_out: 608 dev_err(dev, "task exec: failed[%d]!\n", rc); 609 return rc; 610 } 611 612 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 613 gfp_t gfp_flags) 614 { 615 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 616 struct asd_sas_phy *sas_phy = &phy->sas_phy; 617 618 if (!phy->phy_attached) 619 return; 620 621 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 622 623 if (sas_phy->phy) { 624 struct sas_phy *sphy = sas_phy->phy; 625 626 sphy->negotiated_linkrate = sas_phy->linkrate; 627 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 628 sphy->maximum_linkrate_hw = 629 hisi_hba->hw->phy_get_max_linkrate(); 630 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 631 sphy->minimum_linkrate = phy->minimum_linkrate; 632 633 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 634 sphy->maximum_linkrate = phy->maximum_linkrate; 635 } 636 637 if (phy->phy_type & PORT_TYPE_SAS) { 638 struct sas_identify_frame *id; 639 640 id = (struct sas_identify_frame *)phy->frame_rcvd; 641 id->dev_type = phy->identify.device_type; 642 id->initiator_bits = SAS_PROTOCOL_ALL; 643 id->target_bits = phy->identify.target_port_protocols; 644 } else if (phy->phy_type & PORT_TYPE_SATA) { 645 /* Nothing */ 646 } 647 648 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 649 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 650 } 651 652 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 653 { 654 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 655 struct hisi_sas_device *sas_dev = NULL; 656 int last = hisi_hba->last_dev_id; 657 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 658 int i; 659 660 spin_lock(&hisi_hba->lock); 661 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 662 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 663 int queue = i % hisi_hba->queue_count; 664 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 665 666 hisi_hba->devices[i].device_id = i; 667 sas_dev = &hisi_hba->devices[i]; 668 sas_dev->dev_status = HISI_SAS_DEV_INIT; 669 sas_dev->dev_type = device->dev_type; 670 sas_dev->hisi_hba = hisi_hba; 671 sas_dev->sas_device = device; 672 sas_dev->dq = dq; 673 spin_lock_init(&sas_dev->lock); 674 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 675 break; 676 } 677 i++; 678 } 679 hisi_hba->last_dev_id = i; 680 spin_unlock(&hisi_hba->lock); 681 682 return sas_dev; 683 } 684 685 static void hisi_sas_tmf_aborted(struct sas_task *task) 686 { 687 struct hisi_sas_slot *slot = task->lldd_task; 688 struct domain_device *device = task->dev; 689 struct hisi_sas_device *sas_dev = device->lldd_dev; 690 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 691 692 if (slot) { 693 struct hisi_sas_cq *cq = 694 &hisi_hba->cq[slot->dlvry_queue]; 695 /* 696 * sync irq to avoid free'ing task 697 * before using task in IO completion 698 */ 699 synchronize_irq(cq->irq_no); 700 slot->task = NULL; 701 } 702 } 703 704 #define HISI_SAS_DISK_RECOVER_CNT 3 705 static int hisi_sas_init_device(struct domain_device *device) 706 { 707 int rc = TMF_RESP_FUNC_COMPLETE; 708 struct scsi_lun lun; 709 int retry = HISI_SAS_DISK_RECOVER_CNT; 710 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 711 712 switch (device->dev_type) { 713 case SAS_END_DEVICE: 714 int_to_scsilun(0, &lun); 715 716 while (retry-- > 0) { 717 rc = sas_clear_task_set(device, lun.scsi_lun); 718 if (rc == TMF_RESP_FUNC_COMPLETE) { 719 hisi_sas_release_task(hisi_hba, device); 720 break; 721 } 722 } 723 break; 724 case SAS_SATA_DEV: 725 case SAS_SATA_PM: 726 case SAS_SATA_PM_PORT: 727 case SAS_SATA_PENDING: 728 /* 729 * If an expander is swapped when a SATA disk is attached then 730 * we should issue a hard reset to clear previous affiliation 731 * of STP target port, see SPL (chapter 6.19.4). 732 * 733 * However we don't need to issue a hard reset here for these 734 * reasons: 735 * a. When probing the device, libsas/libata already issues a 736 * hard reset in sas_probe_sata() -> ata_sas_async_probe(). 737 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care 738 * to issue a hard reset by checking the dev status (== INIT). 739 * b. When resetting the controller, this is simply unnecessary. 740 */ 741 while (retry-- > 0) { 742 rc = hisi_sas_softreset_ata_disk(device); 743 if (!rc) 744 break; 745 } 746 break; 747 default: 748 break; 749 } 750 751 return rc; 752 } 753 754 int hisi_sas_slave_alloc(struct scsi_device *sdev) 755 { 756 struct domain_device *ddev = sdev_to_domain_dev(sdev); 757 struct hisi_sas_device *sas_dev = ddev->lldd_dev; 758 int rc; 759 760 rc = sas_slave_alloc(sdev); 761 if (rc) 762 return rc; 763 764 rc = hisi_sas_init_device(ddev); 765 if (rc) 766 return rc; 767 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 768 return 0; 769 } 770 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); 771 772 static int hisi_sas_dev_found(struct domain_device *device) 773 { 774 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 775 struct domain_device *parent_dev = device->parent; 776 struct hisi_sas_device *sas_dev; 777 struct device *dev = hisi_hba->dev; 778 int rc; 779 780 if (hisi_hba->hw->alloc_dev) 781 sas_dev = hisi_hba->hw->alloc_dev(device); 782 else 783 sas_dev = hisi_sas_alloc_dev(device); 784 if (!sas_dev) { 785 dev_err(dev, "fail alloc dev: max support %d devices\n", 786 HISI_SAS_MAX_DEVICES); 787 return -EINVAL; 788 } 789 790 device->lldd_dev = sas_dev; 791 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 792 793 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 794 int phy_no; 795 u8 phy_num = parent_dev->ex_dev.num_phys; 796 struct ex_phy *phy; 797 798 for (phy_no = 0; phy_no < phy_num; phy_no++) { 799 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 800 if (SAS_ADDR(phy->attached_sas_addr) == 801 SAS_ADDR(device->sas_addr)) 802 break; 803 } 804 805 if (phy_no == phy_num) { 806 dev_info(dev, "dev found: no attached " 807 "dev:%016llx at ex:%016llx\n", 808 SAS_ADDR(device->sas_addr), 809 SAS_ADDR(parent_dev->sas_addr)); 810 rc = -EINVAL; 811 goto err_out; 812 } 813 } 814 815 dev_info(dev, "dev[%d:%x] found\n", 816 sas_dev->device_id, sas_dev->dev_type); 817 818 return 0; 819 820 err_out: 821 hisi_sas_dev_gone(device); 822 return rc; 823 } 824 825 int hisi_sas_slave_configure(struct scsi_device *sdev) 826 { 827 struct domain_device *dev = sdev_to_domain_dev(sdev); 828 int ret = sas_slave_configure(sdev); 829 830 if (ret) 831 return ret; 832 if (!dev_is_sata(dev)) 833 sas_change_queue_depth(sdev, 64); 834 835 return 0; 836 } 837 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 838 839 void hisi_sas_scan_start(struct Scsi_Host *shost) 840 { 841 struct hisi_hba *hisi_hba = shost_priv(shost); 842 843 hisi_hba->hw->phys_init(hisi_hba); 844 } 845 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 846 847 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 848 { 849 struct hisi_hba *hisi_hba = shost_priv(shost); 850 struct sas_ha_struct *sha = &hisi_hba->sha; 851 852 /* Wait for PHY up interrupt to occur */ 853 if (time < HZ) 854 return 0; 855 856 sas_drain_work(sha); 857 return 1; 858 } 859 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 860 861 static void hisi_sas_phyup_work_common(struct work_struct *work, 862 enum hisi_sas_phy_event event) 863 { 864 struct hisi_sas_phy *phy = 865 container_of(work, typeof(*phy), works[event]); 866 struct hisi_hba *hisi_hba = phy->hisi_hba; 867 struct asd_sas_phy *sas_phy = &phy->sas_phy; 868 int phy_no = sas_phy->id; 869 870 phy->wait_phyup_cnt = 0; 871 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 872 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 873 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 874 } 875 876 static void hisi_sas_phyup_work(struct work_struct *work) 877 { 878 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); 879 } 880 881 static void hisi_sas_linkreset_work(struct work_struct *work) 882 { 883 struct hisi_sas_phy *phy = 884 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 885 struct asd_sas_phy *sas_phy = &phy->sas_phy; 886 887 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 888 } 889 890 static void hisi_sas_phyup_pm_work(struct work_struct *work) 891 { 892 struct hisi_sas_phy *phy = 893 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); 894 struct hisi_hba *hisi_hba = phy->hisi_hba; 895 struct device *dev = hisi_hba->dev; 896 897 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); 898 pm_runtime_put_sync(dev); 899 } 900 901 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 902 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 903 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 904 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, 905 }; 906 907 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 908 enum hisi_sas_phy_event event) 909 { 910 struct hisi_hba *hisi_hba = phy->hisi_hba; 911 912 if (WARN_ON(event >= HISI_PHYES_NUM)) 913 return false; 914 915 return queue_work(hisi_hba->wq, &phy->works[event]); 916 } 917 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 918 919 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 920 { 921 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 922 struct hisi_hba *hisi_hba = phy->hisi_hba; 923 struct device *dev = hisi_hba->dev; 924 int phy_no = phy->sas_phy.id; 925 926 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 927 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 928 } 929 930 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 931 932 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 933 { 934 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 935 struct device *dev = hisi_hba->dev; 936 unsigned long flags; 937 938 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 939 spin_lock_irqsave(&phy->lock, flags); 940 if (phy->phy_attached) { 941 spin_unlock_irqrestore(&phy->lock, flags); 942 return; 943 } 944 945 if (!timer_pending(&phy->timer)) { 946 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 947 phy->wait_phyup_cnt++; 948 phy->timer.expires = jiffies + 949 HISI_SAS_WAIT_PHYUP_TIMEOUT; 950 add_timer(&phy->timer); 951 spin_unlock_irqrestore(&phy->lock, flags); 952 return; 953 } 954 955 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 956 phy_no, phy->wait_phyup_cnt); 957 phy->wait_phyup_cnt = 0; 958 } 959 spin_unlock_irqrestore(&phy->lock, flags); 960 } 961 962 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 963 964 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 965 { 966 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 967 struct asd_sas_phy *sas_phy = &phy->sas_phy; 968 int i; 969 970 phy->hisi_hba = hisi_hba; 971 phy->port = NULL; 972 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 973 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 974 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 975 sas_phy->class = SAS; 976 sas_phy->iproto = SAS_PROTOCOL_ALL; 977 sas_phy->tproto = 0; 978 sas_phy->type = PHY_TYPE_PHYSICAL; 979 sas_phy->role = PHY_ROLE_INITIATOR; 980 sas_phy->oob_mode = OOB_NOT_CONNECTED; 981 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 982 sas_phy->id = phy_no; 983 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 984 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 985 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 986 sas_phy->lldd_phy = phy; 987 988 for (i = 0; i < HISI_PHYES_NUM; i++) 989 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 990 991 spin_lock_init(&phy->lock); 992 993 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 994 } 995 996 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 997 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 998 { 999 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1000 struct asd_sas_phy *aphy = &phy->sas_phy; 1001 struct sas_phy *sphy = aphy->phy; 1002 unsigned long flags; 1003 1004 spin_lock_irqsave(&phy->lock, flags); 1005 1006 if (enable) { 1007 /* We may have been enabled already; if so, don't touch */ 1008 if (!phy->enable) 1009 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1010 hisi_hba->hw->phy_start(hisi_hba, phy_no); 1011 } else { 1012 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1013 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1014 } 1015 phy->enable = enable; 1016 spin_unlock_irqrestore(&phy->lock, flags); 1017 } 1018 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1019 1020 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1021 { 1022 struct sas_ha_struct *sas_ha = sas_phy->ha; 1023 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1024 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1025 struct asd_sas_port *sas_port = sas_phy->port; 1026 struct hisi_sas_port *port; 1027 unsigned long flags; 1028 1029 if (!sas_port) 1030 return; 1031 1032 port = to_hisi_sas_port(sas_port); 1033 spin_lock_irqsave(&hisi_hba->lock, flags); 1034 port->port_attached = 1; 1035 port->id = phy->port_id; 1036 phy->port = port; 1037 sas_port->lldd_port = port; 1038 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1039 } 1040 1041 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1042 struct hisi_sas_slot *slot) 1043 { 1044 if (task) { 1045 unsigned long flags; 1046 struct task_status_struct *ts; 1047 1048 ts = &task->task_status; 1049 1050 ts->resp = SAS_TASK_COMPLETE; 1051 ts->stat = SAS_ABORTED_TASK; 1052 spin_lock_irqsave(&task->task_state_lock, flags); 1053 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 1054 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1055 task->task_state_flags |= SAS_TASK_STATE_DONE; 1056 spin_unlock_irqrestore(&task->task_state_lock, flags); 1057 } 1058 1059 hisi_sas_slot_task_free(hisi_hba, task, slot); 1060 } 1061 1062 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1063 struct domain_device *device) 1064 { 1065 struct hisi_sas_slot *slot, *slot2; 1066 struct hisi_sas_device *sas_dev = device->lldd_dev; 1067 1068 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1069 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1070 } 1071 1072 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1073 { 1074 struct hisi_sas_device *sas_dev; 1075 struct domain_device *device; 1076 int i; 1077 1078 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1079 sas_dev = &hisi_hba->devices[i]; 1080 device = sas_dev->sas_device; 1081 1082 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1083 !device) 1084 continue; 1085 1086 hisi_sas_release_task(hisi_hba, device); 1087 } 1088 } 1089 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1090 1091 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1092 struct domain_device *device) 1093 { 1094 if (hisi_hba->hw->dereg_device) 1095 hisi_hba->hw->dereg_device(hisi_hba, device); 1096 } 1097 1098 static int 1099 hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev, 1100 bool rst_ha_timeout) 1101 { 1102 struct hisi_sas_internal_abort_data data = { rst_ha_timeout }; 1103 struct domain_device *device = sas_dev->sas_device; 1104 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1105 int i, rc; 1106 1107 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 1108 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1109 const struct cpumask *mask = cq->irq_mask; 1110 1111 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 1112 continue; 1113 rc = sas_execute_internal_abort_dev(device, i, &data); 1114 if (rc) 1115 return rc; 1116 } 1117 1118 return 0; 1119 } 1120 1121 static void hisi_sas_dev_gone(struct domain_device *device) 1122 { 1123 struct hisi_sas_device *sas_dev = device->lldd_dev; 1124 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1125 struct device *dev = hisi_hba->dev; 1126 int ret = 0; 1127 1128 dev_info(dev, "dev[%d:%x] is gone\n", 1129 sas_dev->device_id, sas_dev->dev_type); 1130 1131 down(&hisi_hba->sem); 1132 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1133 hisi_sas_internal_task_abort_dev(sas_dev, true); 1134 1135 hisi_sas_dereg_device(hisi_hba, device); 1136 1137 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1138 device->lldd_dev = NULL; 1139 } 1140 1141 if (hisi_hba->hw->free_device) 1142 hisi_hba->hw->free_device(sas_dev); 1143 1144 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1145 if (!ret) 1146 sas_dev->dev_type = SAS_PHY_UNUSED; 1147 sas_dev->sas_device = NULL; 1148 up(&hisi_hba->sem); 1149 } 1150 1151 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1152 struct sas_phy_linkrates *r) 1153 { 1154 struct sas_phy_linkrates _r; 1155 1156 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1157 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1158 enum sas_linkrate min, max; 1159 1160 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1161 return -EINVAL; 1162 1163 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1164 max = sas_phy->phy->maximum_linkrate; 1165 min = r->minimum_linkrate; 1166 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1167 max = r->maximum_linkrate; 1168 min = sas_phy->phy->minimum_linkrate; 1169 } else 1170 return -EINVAL; 1171 1172 _r.maximum_linkrate = max; 1173 _r.minimum_linkrate = min; 1174 1175 sas_phy->phy->maximum_linkrate = max; 1176 sas_phy->phy->minimum_linkrate = min; 1177 1178 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1179 msleep(100); 1180 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1181 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1182 1183 return 0; 1184 } 1185 1186 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1187 void *funcdata) 1188 { 1189 struct hisi_sas_phy *phy = container_of(sas_phy, 1190 struct hisi_sas_phy, sas_phy); 1191 struct sas_ha_struct *sas_ha = sas_phy->ha; 1192 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1193 struct device *dev = hisi_hba->dev; 1194 DECLARE_COMPLETION_ONSTACK(completion); 1195 int phy_no = sas_phy->id; 1196 u8 sts = phy->phy_attached; 1197 int ret = 0; 1198 1199 down(&hisi_hba->sem); 1200 phy->reset_completion = &completion; 1201 1202 switch (func) { 1203 case PHY_FUNC_HARD_RESET: 1204 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1205 break; 1206 1207 case PHY_FUNC_LINK_RESET: 1208 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1209 msleep(100); 1210 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1211 break; 1212 1213 case PHY_FUNC_DISABLE: 1214 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1215 goto out; 1216 1217 case PHY_FUNC_SET_LINK_RATE: 1218 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1219 break; 1220 1221 case PHY_FUNC_GET_EVENTS: 1222 if (hisi_hba->hw->get_events) { 1223 hisi_hba->hw->get_events(hisi_hba, phy_no); 1224 goto out; 1225 } 1226 fallthrough; 1227 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1228 default: 1229 ret = -EOPNOTSUPP; 1230 goto out; 1231 } 1232 1233 if (sts && !wait_for_completion_timeout(&completion, 1234 HISI_SAS_WAIT_PHYUP_TIMEOUT)) { 1235 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1236 phy_no, func); 1237 if (phy->in_reset) 1238 ret = -ETIMEDOUT; 1239 } 1240 1241 out: 1242 phy->reset_completion = NULL; 1243 1244 up(&hisi_hba->sem); 1245 return ret; 1246 } 1247 1248 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1249 bool reset, int pmp, u8 *fis) 1250 { 1251 struct ata_taskfile tf; 1252 1253 ata_tf_init(dev, &tf); 1254 if (reset) 1255 tf.ctl |= ATA_SRST; 1256 else 1257 tf.ctl &= ~ATA_SRST; 1258 tf.command = ATA_CMD_DEV_RESET; 1259 ata_tf_to_fis(&tf, pmp, 0, fis); 1260 } 1261 1262 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1263 { 1264 u8 fis[20] = {0}; 1265 struct ata_port *ap = device->sata_dev.ap; 1266 struct ata_link *link; 1267 int rc = TMF_RESP_FUNC_FAILED; 1268 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1269 struct device *dev = hisi_hba->dev; 1270 1271 ata_for_each_link(link, ap, EDGE) { 1272 int pmp = sata_srst_pmp(link); 1273 1274 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1275 rc = sas_execute_ata_cmd(device, fis, -1); 1276 if (rc != TMF_RESP_FUNC_COMPLETE) 1277 break; 1278 } 1279 1280 if (rc == TMF_RESP_FUNC_COMPLETE) { 1281 ata_for_each_link(link, ap, EDGE) { 1282 int pmp = sata_srst_pmp(link); 1283 1284 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1285 rc = sas_execute_ata_cmd(device, fis, -1); 1286 if (rc != TMF_RESP_FUNC_COMPLETE) 1287 dev_err(dev, "ata disk %016llx de-reset failed\n", 1288 SAS_ADDR(device->sas_addr)); 1289 } 1290 } else { 1291 dev_err(dev, "ata disk %016llx reset failed\n", 1292 SAS_ADDR(device->sas_addr)); 1293 } 1294 1295 if (rc == TMF_RESP_FUNC_COMPLETE) 1296 hisi_sas_release_task(hisi_hba, device); 1297 1298 return rc; 1299 } 1300 1301 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1302 { 1303 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1304 int i; 1305 1306 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1307 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1308 struct domain_device *device = sas_dev->sas_device; 1309 struct asd_sas_port *sas_port; 1310 struct hisi_sas_port *port; 1311 struct hisi_sas_phy *phy = NULL; 1312 struct asd_sas_phy *sas_phy; 1313 1314 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1315 || !device || !device->port) 1316 continue; 1317 1318 sas_port = device->port; 1319 port = to_hisi_sas_port(sas_port); 1320 1321 spin_lock(&sas_port->phy_list_lock); 1322 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1323 if (state & BIT(sas_phy->id)) { 1324 phy = sas_phy->lldd_phy; 1325 break; 1326 } 1327 spin_unlock(&sas_port->phy_list_lock); 1328 1329 if (phy) { 1330 port->id = phy->port_id; 1331 1332 /* Update linkrate of directly attached device. */ 1333 if (!device->parent) 1334 device->linkrate = phy->sas_phy.linkrate; 1335 1336 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1337 } else 1338 port->id = 0xff; 1339 } 1340 } 1341 1342 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1343 { 1344 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1345 struct asd_sas_port *_sas_port = NULL; 1346 int phy_no; 1347 1348 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1349 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1350 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1351 struct asd_sas_port *sas_port = sas_phy->port; 1352 bool do_port_check = _sas_port != sas_port; 1353 1354 if (!sas_phy->phy->enabled) 1355 continue; 1356 1357 /* Report PHY state change to libsas */ 1358 if (state & BIT(phy_no)) { 1359 if (do_port_check && sas_port && sas_port->port_dev) { 1360 struct domain_device *dev = sas_port->port_dev; 1361 1362 _sas_port = sas_port; 1363 1364 if (dev_is_expander(dev->dev_type)) 1365 sas_notify_port_event(sas_phy, 1366 PORTE_BROADCAST_RCVD, 1367 GFP_KERNEL); 1368 } 1369 } else { 1370 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1371 } 1372 } 1373 /* 1374 * Ensure any bcast events are processed prior to calling async nexus 1375 * reset calls from hisi_sas_clear_nexus_ha() -> 1376 * hisi_sas_async_I_T_nexus_reset() 1377 */ 1378 sas_drain_work(sas_ha); 1379 } 1380 1381 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1382 { 1383 struct hisi_sas_device *sas_dev; 1384 struct domain_device *device; 1385 int i; 1386 1387 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1388 sas_dev = &hisi_hba->devices[i]; 1389 device = sas_dev->sas_device; 1390 1391 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1392 continue; 1393 1394 hisi_sas_init_device(device); 1395 } 1396 } 1397 1398 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1399 struct asd_sas_port *sas_port, 1400 struct domain_device *device) 1401 { 1402 struct ata_port *ap = device->sata_dev.ap; 1403 struct device *dev = hisi_hba->dev; 1404 int rc = TMF_RESP_FUNC_FAILED; 1405 struct ata_link *link; 1406 u8 fis[20] = {0}; 1407 int i; 1408 1409 for (i = 0; i < hisi_hba->n_phy; i++) { 1410 if (!(sas_port->phy_mask & BIT(i))) 1411 continue; 1412 1413 ata_for_each_link(link, ap, EDGE) { 1414 int pmp = sata_srst_pmp(link); 1415 1416 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1417 rc = sas_execute_ata_cmd(device, fis, i); 1418 if (rc != TMF_RESP_FUNC_COMPLETE) { 1419 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1420 i, rc); 1421 break; 1422 } 1423 } 1424 } 1425 } 1426 1427 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1428 { 1429 struct device *dev = hisi_hba->dev; 1430 int port_no, rc, i; 1431 1432 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1433 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1434 struct domain_device *device = sas_dev->sas_device; 1435 1436 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1437 continue; 1438 1439 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1440 if (rc < 0) 1441 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1442 } 1443 1444 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1445 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1446 struct asd_sas_port *sas_port = &port->sas_port; 1447 struct domain_device *port_dev = sas_port->port_dev; 1448 struct domain_device *device; 1449 1450 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1451 continue; 1452 1453 /* Try to find a SATA device */ 1454 list_for_each_entry(device, &sas_port->dev_list, 1455 dev_list_node) { 1456 if (dev_is_sata(device)) { 1457 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1458 sas_port, 1459 device); 1460 break; 1461 } 1462 } 1463 } 1464 } 1465 1466 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1467 { 1468 struct Scsi_Host *shost = hisi_hba->shost; 1469 1470 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1471 1472 scsi_block_requests(shost); 1473 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1474 1475 del_timer_sync(&hisi_hba->timer); 1476 1477 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1478 } 1479 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1480 1481 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1482 { 1483 struct Scsi_Host *shost = hisi_hba->shost; 1484 1485 /* Init and wait for PHYs to come up and all libsas event finished. */ 1486 hisi_hba->hw->phys_init(hisi_hba); 1487 msleep(1000); 1488 hisi_sas_refresh_port_id(hisi_hba); 1489 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1490 1491 if (hisi_hba->reject_stp_links_msk) 1492 hisi_sas_terminate_stp_reject(hisi_hba); 1493 hisi_sas_reset_init_all_devices(hisi_hba); 1494 scsi_unblock_requests(shost); 1495 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1496 up(&hisi_hba->sem); 1497 1498 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1499 } 1500 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1501 1502 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1503 { 1504 if (!hisi_hba->hw->soft_reset) 1505 return -1; 1506 1507 down(&hisi_hba->sem); 1508 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1509 up(&hisi_hba->sem); 1510 return -1; 1511 } 1512 1513 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1514 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1515 1516 return 0; 1517 } 1518 1519 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1520 { 1521 struct device *dev = hisi_hba->dev; 1522 struct Scsi_Host *shost = hisi_hba->shost; 1523 int rc; 1524 1525 dev_info(dev, "controller resetting...\n"); 1526 hisi_sas_controller_reset_prepare(hisi_hba); 1527 1528 rc = hisi_hba->hw->soft_reset(hisi_hba); 1529 if (rc) { 1530 dev_warn(dev, "controller reset failed (%d)\n", rc); 1531 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1532 up(&hisi_hba->sem); 1533 scsi_unblock_requests(shost); 1534 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1535 return rc; 1536 } 1537 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1538 1539 hisi_sas_controller_reset_done(hisi_hba); 1540 dev_info(dev, "controller reset complete\n"); 1541 1542 return 0; 1543 } 1544 1545 static int hisi_sas_abort_task(struct sas_task *task) 1546 { 1547 struct hisi_sas_internal_abort_data internal_abort_data = { false }; 1548 struct domain_device *device = task->dev; 1549 struct hisi_sas_device *sas_dev = device->lldd_dev; 1550 struct hisi_hba *hisi_hba; 1551 struct device *dev; 1552 int rc = TMF_RESP_FUNC_FAILED; 1553 unsigned long flags; 1554 1555 if (!sas_dev) 1556 return TMF_RESP_FUNC_FAILED; 1557 1558 hisi_hba = dev_to_hisi_hba(task->dev); 1559 dev = hisi_hba->dev; 1560 1561 spin_lock_irqsave(&task->task_state_lock, flags); 1562 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1563 struct hisi_sas_slot *slot = task->lldd_task; 1564 struct hisi_sas_cq *cq; 1565 1566 if (slot) { 1567 /* 1568 * sync irq to avoid free'ing task 1569 * before using task in IO completion 1570 */ 1571 cq = &hisi_hba->cq[slot->dlvry_queue]; 1572 synchronize_irq(cq->irq_no); 1573 } 1574 spin_unlock_irqrestore(&task->task_state_lock, flags); 1575 rc = TMF_RESP_FUNC_COMPLETE; 1576 goto out; 1577 } 1578 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1579 spin_unlock_irqrestore(&task->task_state_lock, flags); 1580 1581 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1582 struct hisi_sas_slot *slot = task->lldd_task; 1583 u16 tag = slot->idx; 1584 int rc2; 1585 1586 rc = sas_abort_task(task, tag); 1587 rc2 = sas_execute_internal_abort_single(device, tag, 1588 slot->dlvry_queue, &internal_abort_data); 1589 if (rc2 < 0) { 1590 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1591 return TMF_RESP_FUNC_FAILED; 1592 } 1593 1594 /* 1595 * If the TMF finds that the IO is not in the device and also 1596 * the internal abort does not succeed, then it is safe to 1597 * free the slot. 1598 * Note: if the internal abort succeeds then the slot 1599 * will have already been completed 1600 */ 1601 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1602 if (task->lldd_task) 1603 hisi_sas_do_release_task(hisi_hba, task, slot); 1604 } 1605 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1606 task->task_proto & SAS_PROTOCOL_STP) { 1607 if (task->dev->dev_type == SAS_SATA_DEV) { 1608 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1609 if (rc < 0) { 1610 dev_err(dev, "abort task: internal abort failed\n"); 1611 goto out; 1612 } 1613 hisi_sas_dereg_device(hisi_hba, device); 1614 rc = hisi_sas_softreset_ata_disk(device); 1615 } 1616 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1617 /* SMP */ 1618 struct hisi_sas_slot *slot = task->lldd_task; 1619 u32 tag = slot->idx; 1620 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1621 1622 rc = sas_execute_internal_abort_single(device, 1623 tag, slot->dlvry_queue, 1624 &internal_abort_data); 1625 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1626 task->lldd_task) { 1627 /* 1628 * sync irq to avoid free'ing task 1629 * before using task in IO completion 1630 */ 1631 synchronize_irq(cq->irq_no); 1632 slot->task = NULL; 1633 } 1634 } 1635 1636 out: 1637 if (rc != TMF_RESP_FUNC_COMPLETE) 1638 dev_notice(dev, "abort task: rc=%d\n", rc); 1639 return rc; 1640 } 1641 1642 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1643 { 1644 struct hisi_sas_device *sas_dev = device->lldd_dev; 1645 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1646 struct device *dev = hisi_hba->dev; 1647 int rc; 1648 1649 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1650 if (rc < 0) { 1651 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1652 return TMF_RESP_FUNC_FAILED; 1653 } 1654 hisi_sas_dereg_device(hisi_hba, device); 1655 1656 rc = sas_abort_task_set(device, lun); 1657 if (rc == TMF_RESP_FUNC_COMPLETE) 1658 hisi_sas_release_task(hisi_hba, device); 1659 1660 return rc; 1661 } 1662 1663 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1664 { 1665 struct sas_phy *local_phy = sas_get_local_phy(device); 1666 struct hisi_sas_device *sas_dev = device->lldd_dev; 1667 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1668 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1669 int rc, reset_type; 1670 1671 if (!local_phy->enabled) { 1672 sas_put_local_phy(local_phy); 1673 return -ENODEV; 1674 } 1675 1676 if (scsi_is_sas_phy_local(local_phy)) { 1677 struct asd_sas_phy *sas_phy = 1678 sas_ha->sas_phy[local_phy->number]; 1679 struct hisi_sas_phy *phy = 1680 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1681 unsigned long flags; 1682 1683 spin_lock_irqsave(&phy->lock, flags); 1684 phy->in_reset = 1; 1685 spin_unlock_irqrestore(&phy->lock, flags); 1686 } 1687 1688 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1689 !dev_is_sata(device)) ? true : false; 1690 1691 rc = sas_phy_reset(local_phy, reset_type); 1692 sas_put_local_phy(local_phy); 1693 1694 if (scsi_is_sas_phy_local(local_phy)) { 1695 struct asd_sas_phy *sas_phy = 1696 sas_ha->sas_phy[local_phy->number]; 1697 struct hisi_sas_phy *phy = 1698 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1699 unsigned long flags; 1700 1701 spin_lock_irqsave(&phy->lock, flags); 1702 phy->in_reset = 0; 1703 spin_unlock_irqrestore(&phy->lock, flags); 1704 1705 /* report PHY down if timed out */ 1706 if (rc == -ETIMEDOUT) 1707 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1708 return rc; 1709 } 1710 1711 if (rc) 1712 return rc; 1713 1714 /* Remote phy */ 1715 if (dev_is_sata(device)) { 1716 rc = sas_ata_wait_after_reset(device, 1717 HISI_SAS_WAIT_PHYUP_TIMEOUT); 1718 } else { 1719 msleep(2000); 1720 } 1721 1722 return rc; 1723 } 1724 1725 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1726 { 1727 struct hisi_sas_device *sas_dev = device->lldd_dev; 1728 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1729 struct device *dev = hisi_hba->dev; 1730 int rc; 1731 1732 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1733 if (rc < 0) { 1734 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1735 return TMF_RESP_FUNC_FAILED; 1736 } 1737 hisi_sas_dereg_device(hisi_hba, device); 1738 1739 rc = hisi_sas_debug_I_T_nexus_reset(device); 1740 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1741 struct sas_phy *local_phy; 1742 1743 rc = hisi_sas_softreset_ata_disk(device); 1744 switch (rc) { 1745 case -ECOMM: 1746 rc = -ENODEV; 1747 break; 1748 case TMF_RESP_FUNC_FAILED: 1749 case -EMSGSIZE: 1750 case -EIO: 1751 local_phy = sas_get_local_phy(device); 1752 rc = sas_phy_enable(local_phy, 0); 1753 if (!rc) { 1754 local_phy->enabled = 0; 1755 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1756 SAS_ADDR(device->sas_addr), rc); 1757 rc = -ENODEV; 1758 } 1759 sas_put_local_phy(local_phy); 1760 break; 1761 default: 1762 break; 1763 } 1764 } 1765 1766 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1767 hisi_sas_release_task(hisi_hba, device); 1768 1769 return rc; 1770 } 1771 1772 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1773 { 1774 struct hisi_sas_device *sas_dev = device->lldd_dev; 1775 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1776 struct device *dev = hisi_hba->dev; 1777 int rc = TMF_RESP_FUNC_FAILED; 1778 1779 /* Clear internal IO and then lu reset */ 1780 rc = hisi_sas_internal_task_abort_dev(sas_dev, false); 1781 if (rc < 0) { 1782 dev_err(dev, "lu_reset: internal abort failed\n"); 1783 goto out; 1784 } 1785 hisi_sas_dereg_device(hisi_hba, device); 1786 1787 if (dev_is_sata(device)) { 1788 struct sas_phy *phy; 1789 1790 phy = sas_get_local_phy(device); 1791 1792 rc = sas_phy_reset(phy, true); 1793 1794 if (rc == 0) 1795 hisi_sas_release_task(hisi_hba, device); 1796 sas_put_local_phy(phy); 1797 } else { 1798 rc = sas_lu_reset(device, lun); 1799 if (rc == TMF_RESP_FUNC_COMPLETE) 1800 hisi_sas_release_task(hisi_hba, device); 1801 } 1802 out: 1803 if (rc != TMF_RESP_FUNC_COMPLETE) 1804 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1805 sas_dev->device_id, rc); 1806 return rc; 1807 } 1808 1809 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1810 { 1811 struct domain_device *device = data; 1812 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1813 int rc; 1814 1815 rc = hisi_sas_debug_I_T_nexus_reset(device); 1816 if (rc != TMF_RESP_FUNC_COMPLETE) 1817 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1818 SAS_ADDR(device->sas_addr), rc); 1819 } 1820 1821 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1822 { 1823 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1824 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1825 ASYNC_DOMAIN_EXCLUSIVE(async); 1826 int i, ret; 1827 1828 queue_work(hisi_hba->wq, &r.work); 1829 wait_for_completion(r.completion); 1830 if (!r.done) { 1831 ret = TMF_RESP_FUNC_FAILED; 1832 goto out; 1833 } 1834 1835 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1836 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1837 struct domain_device *device = sas_dev->sas_device; 1838 1839 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1840 dev_is_expander(device->dev_type)) 1841 continue; 1842 1843 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1844 device, &async); 1845 } 1846 1847 async_synchronize_full_domain(&async); 1848 hisi_sas_release_tasks(hisi_hba); 1849 1850 ret = TMF_RESP_FUNC_COMPLETE; 1851 out: 1852 return ret; 1853 } 1854 1855 static int hisi_sas_query_task(struct sas_task *task) 1856 { 1857 int rc = TMF_RESP_FUNC_FAILED; 1858 1859 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1860 struct hisi_sas_slot *slot = task->lldd_task; 1861 u32 tag = slot->idx; 1862 1863 rc = sas_query_task(task, tag); 1864 switch (rc) { 1865 /* The task is still in Lun, release it then */ 1866 case TMF_RESP_FUNC_SUCC: 1867 /* The task is not in Lun or failed, reset the phy */ 1868 case TMF_RESP_FUNC_FAILED: 1869 case TMF_RESP_FUNC_COMPLETE: 1870 break; 1871 default: 1872 rc = TMF_RESP_FUNC_FAILED; 1873 break; 1874 } 1875 } 1876 return rc; 1877 } 1878 1879 static bool hisi_sas_internal_abort_timeout(struct sas_task *task, 1880 void *data) 1881 { 1882 struct domain_device *device = task->dev; 1883 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1884 struct hisi_sas_internal_abort_data *timeout = data; 1885 1886 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1887 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 1888 1889 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1890 pr_err("Internal abort: timeout %016llx\n", 1891 SAS_ADDR(device->sas_addr)); 1892 } else { 1893 struct hisi_sas_slot *slot = task->lldd_task; 1894 1895 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1896 1897 if (slot) { 1898 struct hisi_sas_cq *cq = 1899 &hisi_hba->cq[slot->dlvry_queue]; 1900 /* 1901 * sync irq to avoid free'ing task 1902 * before using task in IO completion 1903 */ 1904 synchronize_irq(cq->irq_no); 1905 slot->task = NULL; 1906 } 1907 1908 if (timeout->rst_ha_timeout) { 1909 pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n", 1910 SAS_ADDR(device->sas_addr)); 1911 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 1912 } else { 1913 pr_err("Internal abort: timeout and not done %016llx.\n", 1914 SAS_ADDR(device->sas_addr)); 1915 } 1916 1917 return true; 1918 } 1919 1920 return false; 1921 } 1922 1923 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 1924 { 1925 hisi_sas_port_notify_formed(sas_phy); 1926 } 1927 1928 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 1929 u8 reg_index, u8 reg_count, u8 *write_data) 1930 { 1931 struct hisi_hba *hisi_hba = sha->lldd_ha; 1932 1933 if (!hisi_hba->hw->write_gpio) 1934 return -EOPNOTSUPP; 1935 1936 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 1937 reg_index, reg_count, write_data); 1938 } 1939 1940 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 1941 { 1942 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1943 struct sas_phy *sphy = sas_phy->phy; 1944 unsigned long flags; 1945 1946 phy->phy_attached = 0; 1947 phy->phy_type = 0; 1948 phy->port = NULL; 1949 1950 spin_lock_irqsave(&phy->lock, flags); 1951 if (phy->enable) 1952 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 1953 else 1954 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1955 spin_unlock_irqrestore(&phy->lock, flags); 1956 } 1957 1958 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 1959 gfp_t gfp_flags) 1960 { 1961 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1962 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1963 struct device *dev = hisi_hba->dev; 1964 1965 if (rdy) { 1966 /* Phy down but ready */ 1967 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 1968 hisi_sas_port_notify_formed(sas_phy); 1969 } else { 1970 struct hisi_sas_port *port = phy->port; 1971 1972 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 1973 phy->in_reset) { 1974 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 1975 return; 1976 } 1977 /* Phy down and not ready */ 1978 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 1979 sas_phy_disconnected(sas_phy); 1980 1981 if (port) { 1982 if (phy->phy_type & PORT_TYPE_SAS) { 1983 int port_id = port->id; 1984 1985 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 1986 port_id)) 1987 port->port_attached = 0; 1988 } else if (phy->phy_type & PORT_TYPE_SATA) 1989 port->port_attached = 0; 1990 } 1991 hisi_sas_phy_disconnected(phy); 1992 } 1993 } 1994 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 1995 1996 void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) 1997 { 1998 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1999 struct hisi_hba *hisi_hba = phy->hisi_hba; 2000 struct sas_ha_struct *sha = &hisi_hba->sha; 2001 2002 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 2003 return; 2004 2005 if (test_bit(SAS_HA_FROZEN, &sha->state)) 2006 return; 2007 2008 sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); 2009 } 2010 EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); 2011 2012 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) 2013 { 2014 int i; 2015 2016 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2017 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2018 2019 synchronize_irq(cq->irq_no); 2020 } 2021 } 2022 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); 2023 2024 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2025 { 2026 struct hisi_hba *hisi_hba = shost_priv(shost); 2027 2028 if (reset_type != SCSI_ADAPTER_RESET) 2029 return -EOPNOTSUPP; 2030 2031 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2032 2033 return 0; 2034 } 2035 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2036 2037 struct scsi_transport_template *hisi_sas_stt; 2038 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2039 2040 static struct sas_domain_function_template hisi_sas_transport_ops = { 2041 .lldd_dev_found = hisi_sas_dev_found, 2042 .lldd_dev_gone = hisi_sas_dev_gone, 2043 .lldd_execute_task = hisi_sas_queue_command, 2044 .lldd_control_phy = hisi_sas_control_phy, 2045 .lldd_abort_task = hisi_sas_abort_task, 2046 .lldd_abort_task_set = hisi_sas_abort_task_set, 2047 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2048 .lldd_lu_reset = hisi_sas_lu_reset, 2049 .lldd_query_task = hisi_sas_query_task, 2050 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2051 .lldd_port_formed = hisi_sas_port_formed, 2052 .lldd_write_gpio = hisi_sas_write_gpio, 2053 .lldd_tmf_aborted = hisi_sas_tmf_aborted, 2054 .lldd_abort_timeout = hisi_sas_internal_abort_timeout, 2055 }; 2056 2057 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2058 { 2059 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2060 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2061 2062 for (i = 0; i < hisi_hba->queue_count; i++) { 2063 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2064 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2065 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2066 2067 s = sizeof(struct hisi_sas_cmd_hdr); 2068 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2069 memset(&cmd_hdr[j], 0, s); 2070 2071 dq->wr_point = 0; 2072 2073 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2074 memset(hisi_hba->complete_hdr[i], 0, s); 2075 cq->rd_point = 0; 2076 } 2077 2078 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2079 memset(hisi_hba->initial_fis, 0, s); 2080 2081 s = max_command_entries * sizeof(struct hisi_sas_iost); 2082 memset(hisi_hba->iost, 0, s); 2083 2084 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2085 memset(hisi_hba->breakpoint, 0, s); 2086 2087 s = sizeof(struct hisi_sas_sata_breakpoint); 2088 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2089 memset(&sata_breakpoint[j], 0, s); 2090 } 2091 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2092 2093 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2094 { 2095 struct device *dev = hisi_hba->dev; 2096 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2097 int max_command_entries_ru, sz_slot_buf_ru; 2098 int blk_cnt, slots_per_blk; 2099 2100 sema_init(&hisi_hba->sem, 1); 2101 spin_lock_init(&hisi_hba->lock); 2102 for (i = 0; i < hisi_hba->n_phy; i++) { 2103 hisi_sas_phy_init(hisi_hba, i); 2104 hisi_hba->port[i].port_attached = 0; 2105 hisi_hba->port[i].id = -1; 2106 } 2107 2108 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2109 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2110 hisi_hba->devices[i].device_id = i; 2111 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2112 } 2113 2114 for (i = 0; i < hisi_hba->queue_count; i++) { 2115 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2116 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2117 2118 /* Completion queue structure */ 2119 cq->id = i; 2120 cq->hisi_hba = hisi_hba; 2121 2122 /* Delivery queue structure */ 2123 spin_lock_init(&dq->lock); 2124 INIT_LIST_HEAD(&dq->list); 2125 dq->id = i; 2126 dq->hisi_hba = hisi_hba; 2127 2128 /* Delivery queue */ 2129 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2130 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2131 &hisi_hba->cmd_hdr_dma[i], 2132 GFP_KERNEL); 2133 if (!hisi_hba->cmd_hdr[i]) 2134 goto err_out; 2135 2136 /* Completion queue */ 2137 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2138 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2139 &hisi_hba->complete_hdr_dma[i], 2140 GFP_KERNEL); 2141 if (!hisi_hba->complete_hdr[i]) 2142 goto err_out; 2143 } 2144 2145 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2146 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2147 GFP_KERNEL); 2148 if (!hisi_hba->itct) 2149 goto err_out; 2150 2151 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2152 sizeof(struct hisi_sas_slot), 2153 GFP_KERNEL); 2154 if (!hisi_hba->slot_info) 2155 goto err_out; 2156 2157 /* roundup to avoid overly large block size */ 2158 max_command_entries_ru = roundup(max_command_entries, 64); 2159 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2160 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2161 else 2162 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2163 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2164 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2165 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2166 slots_per_blk = s / sz_slot_buf_ru; 2167 2168 for (i = 0; i < blk_cnt; i++) { 2169 int slot_index = i * slots_per_blk; 2170 dma_addr_t buf_dma; 2171 void *buf; 2172 2173 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2174 GFP_KERNEL); 2175 if (!buf) 2176 goto err_out; 2177 2178 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2179 struct hisi_sas_slot *slot; 2180 2181 slot = &hisi_hba->slot_info[slot_index]; 2182 slot->buf = buf; 2183 slot->buf_dma = buf_dma; 2184 slot->idx = slot_index; 2185 2186 buf += sz_slot_buf_ru; 2187 buf_dma += sz_slot_buf_ru; 2188 } 2189 } 2190 2191 s = max_command_entries * sizeof(struct hisi_sas_iost); 2192 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2193 GFP_KERNEL); 2194 if (!hisi_hba->iost) 2195 goto err_out; 2196 2197 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2198 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2199 &hisi_hba->breakpoint_dma, 2200 GFP_KERNEL); 2201 if (!hisi_hba->breakpoint) 2202 goto err_out; 2203 2204 s = hisi_hba->slot_index_count = max_command_entries; 2205 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); 2206 if (!hisi_hba->slot_index_tags) 2207 goto err_out; 2208 2209 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2210 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2211 &hisi_hba->initial_fis_dma, 2212 GFP_KERNEL); 2213 if (!hisi_hba->initial_fis) 2214 goto err_out; 2215 2216 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2217 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2218 &hisi_hba->sata_breakpoint_dma, 2219 GFP_KERNEL); 2220 if (!hisi_hba->sata_breakpoint) 2221 goto err_out; 2222 2223 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; 2224 2225 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2226 if (!hisi_hba->wq) { 2227 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2228 goto err_out; 2229 } 2230 2231 return 0; 2232 err_out: 2233 return -ENOMEM; 2234 } 2235 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2236 2237 void hisi_sas_free(struct hisi_hba *hisi_hba) 2238 { 2239 int i; 2240 2241 for (i = 0; i < hisi_hba->n_phy; i++) { 2242 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2243 2244 del_timer_sync(&phy->timer); 2245 } 2246 2247 if (hisi_hba->wq) 2248 destroy_workqueue(hisi_hba->wq); 2249 } 2250 EXPORT_SYMBOL_GPL(hisi_sas_free); 2251 2252 void hisi_sas_rst_work_handler(struct work_struct *work) 2253 { 2254 struct hisi_hba *hisi_hba = 2255 container_of(work, struct hisi_hba, rst_work); 2256 2257 if (hisi_sas_controller_prereset(hisi_hba)) 2258 return; 2259 2260 hisi_sas_controller_reset(hisi_hba); 2261 } 2262 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2263 2264 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2265 { 2266 struct hisi_sas_rst *rst = 2267 container_of(work, struct hisi_sas_rst, work); 2268 2269 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2270 goto rst_complete; 2271 2272 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2273 rst->done = true; 2274 rst_complete: 2275 complete(rst->completion); 2276 } 2277 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2278 2279 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2280 { 2281 struct device *dev = hisi_hba->dev; 2282 struct platform_device *pdev = hisi_hba->platform_dev; 2283 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2284 struct clk *refclk; 2285 2286 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2287 SAS_ADDR_SIZE)) { 2288 dev_err(dev, "could not get property sas-addr\n"); 2289 return -ENOENT; 2290 } 2291 2292 if (np) { 2293 /* 2294 * These properties are only required for platform device-based 2295 * controller with DT firmware. 2296 */ 2297 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2298 "hisilicon,sas-syscon"); 2299 if (IS_ERR(hisi_hba->ctrl)) { 2300 dev_err(dev, "could not get syscon\n"); 2301 return -ENOENT; 2302 } 2303 2304 if (device_property_read_u32(dev, "ctrl-reset-reg", 2305 &hisi_hba->ctrl_reset_reg)) { 2306 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2307 return -ENOENT; 2308 } 2309 2310 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2311 &hisi_hba->ctrl_reset_sts_reg)) { 2312 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2313 return -ENOENT; 2314 } 2315 2316 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2317 &hisi_hba->ctrl_clock_ena_reg)) { 2318 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2319 return -ENOENT; 2320 } 2321 } 2322 2323 refclk = devm_clk_get(dev, NULL); 2324 if (IS_ERR(refclk)) 2325 dev_dbg(dev, "no ref clk property\n"); 2326 else 2327 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2328 2329 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2330 dev_err(dev, "could not get property phy-count\n"); 2331 return -ENOENT; 2332 } 2333 2334 if (device_property_read_u32(dev, "queue-count", 2335 &hisi_hba->queue_count)) { 2336 dev_err(dev, "could not get property queue-count\n"); 2337 return -ENOENT; 2338 } 2339 2340 return 0; 2341 } 2342 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2343 2344 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2345 const struct hisi_sas_hw *hw) 2346 { 2347 struct resource *res; 2348 struct Scsi_Host *shost; 2349 struct hisi_hba *hisi_hba; 2350 struct device *dev = &pdev->dev; 2351 int error; 2352 2353 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2354 if (!shost) { 2355 dev_err(dev, "scsi host alloc failed\n"); 2356 return NULL; 2357 } 2358 hisi_hba = shost_priv(shost); 2359 2360 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2361 hisi_hba->hw = hw; 2362 hisi_hba->dev = dev; 2363 hisi_hba->platform_dev = pdev; 2364 hisi_hba->shost = shost; 2365 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2366 2367 timer_setup(&hisi_hba->timer, NULL, 0); 2368 2369 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2370 goto err_out; 2371 2372 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2373 if (error) { 2374 dev_err(dev, "No usable DMA addressing method\n"); 2375 goto err_out; 2376 } 2377 2378 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2379 if (IS_ERR(hisi_hba->regs)) 2380 goto err_out; 2381 2382 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2383 if (res) { 2384 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2385 if (IS_ERR(hisi_hba->sgpio_regs)) 2386 goto err_out; 2387 } 2388 2389 if (hisi_sas_alloc(hisi_hba)) { 2390 hisi_sas_free(hisi_hba); 2391 goto err_out; 2392 } 2393 2394 return shost; 2395 err_out: 2396 scsi_host_put(shost); 2397 dev_err(dev, "shost alloc failed\n"); 2398 return NULL; 2399 } 2400 2401 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2402 { 2403 if (hisi_hba->hw->interrupt_preinit) 2404 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2405 return 0; 2406 } 2407 2408 int hisi_sas_probe(struct platform_device *pdev, 2409 const struct hisi_sas_hw *hw) 2410 { 2411 struct Scsi_Host *shost; 2412 struct hisi_hba *hisi_hba; 2413 struct device *dev = &pdev->dev; 2414 struct asd_sas_phy **arr_phy; 2415 struct asd_sas_port **arr_port; 2416 struct sas_ha_struct *sha; 2417 int rc, phy_nr, port_nr, i; 2418 2419 shost = hisi_sas_shost_alloc(pdev, hw); 2420 if (!shost) 2421 return -ENOMEM; 2422 2423 sha = SHOST_TO_SAS_HA(shost); 2424 hisi_hba = shost_priv(shost); 2425 platform_set_drvdata(pdev, sha); 2426 2427 phy_nr = port_nr = hisi_hba->n_phy; 2428 2429 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2430 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2431 if (!arr_phy || !arr_port) { 2432 rc = -ENOMEM; 2433 goto err_out_ha; 2434 } 2435 2436 sha->sas_phy = arr_phy; 2437 sha->sas_port = arr_port; 2438 sha->lldd_ha = hisi_hba; 2439 2440 shost->transportt = hisi_sas_stt; 2441 shost->max_id = HISI_SAS_MAX_DEVICES; 2442 shost->max_lun = ~0; 2443 shost->max_channel = 1; 2444 shost->max_cmd_len = 16; 2445 if (hisi_hba->hw->slot_index_alloc) { 2446 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2447 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2448 } else { 2449 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2450 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2451 } 2452 2453 sha->sas_ha_name = DRV_NAME; 2454 sha->dev = hisi_hba->dev; 2455 sha->lldd_module = THIS_MODULE; 2456 sha->sas_addr = &hisi_hba->sas_addr[0]; 2457 sha->num_phys = hisi_hba->n_phy; 2458 sha->core.shost = hisi_hba->shost; 2459 2460 for (i = 0; i < hisi_hba->n_phy; i++) { 2461 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2462 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2463 } 2464 2465 rc = hisi_sas_interrupt_preinit(hisi_hba); 2466 if (rc) 2467 goto err_out_ha; 2468 2469 rc = scsi_add_host(shost, &pdev->dev); 2470 if (rc) 2471 goto err_out_ha; 2472 2473 rc = sas_register_ha(sha); 2474 if (rc) 2475 goto err_out_register_ha; 2476 2477 rc = hisi_hba->hw->hw_init(hisi_hba); 2478 if (rc) 2479 goto err_out_hw_init; 2480 2481 scsi_scan_host(shost); 2482 2483 return 0; 2484 2485 err_out_hw_init: 2486 sas_unregister_ha(sha); 2487 err_out_register_ha: 2488 scsi_remove_host(shost); 2489 err_out_ha: 2490 hisi_sas_free(hisi_hba); 2491 scsi_host_put(shost); 2492 return rc; 2493 } 2494 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2495 2496 int hisi_sas_remove(struct platform_device *pdev) 2497 { 2498 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2499 struct hisi_hba *hisi_hba = sha->lldd_ha; 2500 struct Scsi_Host *shost = sha->core.shost; 2501 2502 del_timer_sync(&hisi_hba->timer); 2503 2504 sas_unregister_ha(sha); 2505 sas_remove_host(sha->core.shost); 2506 2507 hisi_sas_free(hisi_hba); 2508 scsi_host_put(shost); 2509 return 0; 2510 } 2511 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2512 2513 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2514 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2515 bool hisi_sas_debugfs_enable = true; 2516 u32 hisi_sas_debugfs_dump_count = 50; 2517 #else 2518 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2519 bool hisi_sas_debugfs_enable; 2520 u32 hisi_sas_debugfs_dump_count = 1; 2521 #endif 2522 2523 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2524 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2525 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2526 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2527 2528 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2529 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2530 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2531 2532 struct dentry *hisi_sas_debugfs_dir; 2533 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2534 2535 static __init int hisi_sas_init(void) 2536 { 2537 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2538 if (!hisi_sas_stt) 2539 return -ENOMEM; 2540 2541 if (hisi_sas_debugfs_enable) { 2542 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2543 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2544 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2545 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2546 } 2547 } 2548 2549 return 0; 2550 } 2551 2552 static __exit void hisi_sas_exit(void) 2553 { 2554 sas_release_transport(hisi_sas_stt); 2555 2556 debugfs_remove(hisi_sas_debugfs_dir); 2557 } 2558 2559 module_init(hisi_sas_init); 2560 module_exit(hisi_sas_exit); 2561 2562 MODULE_LICENSE("GPL"); 2563 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2564 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2565 MODULE_ALIAS("platform:" DRV_NAME); 2566