1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 14 u8 *lun, struct hisi_sas_tmf_task *tmf); 15 static int 16 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 17 struct domain_device *device, 18 int abort_flag, int tag, bool rst_to_recover); 19 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 20 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 21 void *funcdata); 22 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 23 struct domain_device *device); 24 static void hisi_sas_dev_gone(struct domain_device *device); 25 26 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 27 { 28 switch (fis->command) { 29 case ATA_CMD_FPDMA_WRITE: 30 case ATA_CMD_FPDMA_READ: 31 case ATA_CMD_FPDMA_RECV: 32 case ATA_CMD_FPDMA_SEND: 33 case ATA_CMD_NCQ_NON_DATA: 34 return HISI_SAS_SATA_PROTOCOL_FPDMA; 35 36 case ATA_CMD_DOWNLOAD_MICRO: 37 case ATA_CMD_ID_ATA: 38 case ATA_CMD_PMP_READ: 39 case ATA_CMD_READ_LOG_EXT: 40 case ATA_CMD_PIO_READ: 41 case ATA_CMD_PIO_READ_EXT: 42 case ATA_CMD_PMP_WRITE: 43 case ATA_CMD_WRITE_LOG_EXT: 44 case ATA_CMD_PIO_WRITE: 45 case ATA_CMD_PIO_WRITE_EXT: 46 return HISI_SAS_SATA_PROTOCOL_PIO; 47 48 case ATA_CMD_DSM: 49 case ATA_CMD_DOWNLOAD_MICRO_DMA: 50 case ATA_CMD_PMP_READ_DMA: 51 case ATA_CMD_PMP_WRITE_DMA: 52 case ATA_CMD_READ: 53 case ATA_CMD_READ_EXT: 54 case ATA_CMD_READ_LOG_DMA_EXT: 55 case ATA_CMD_READ_STREAM_DMA_EXT: 56 case ATA_CMD_TRUSTED_RCV_DMA: 57 case ATA_CMD_TRUSTED_SND_DMA: 58 case ATA_CMD_WRITE: 59 case ATA_CMD_WRITE_EXT: 60 case ATA_CMD_WRITE_FUA_EXT: 61 case ATA_CMD_WRITE_QUEUED: 62 case ATA_CMD_WRITE_LOG_DMA_EXT: 63 case ATA_CMD_WRITE_STREAM_DMA_EXT: 64 case ATA_CMD_ZAC_MGMT_IN: 65 return HISI_SAS_SATA_PROTOCOL_DMA; 66 67 case ATA_CMD_CHK_POWER: 68 case ATA_CMD_DEV_RESET: 69 case ATA_CMD_EDD: 70 case ATA_CMD_FLUSH: 71 case ATA_CMD_FLUSH_EXT: 72 case ATA_CMD_VERIFY: 73 case ATA_CMD_VERIFY_EXT: 74 case ATA_CMD_SET_FEATURES: 75 case ATA_CMD_STANDBY: 76 case ATA_CMD_STANDBYNOW1: 77 case ATA_CMD_ZAC_MGMT_OUT: 78 return HISI_SAS_SATA_PROTOCOL_NONDATA; 79 80 case ATA_CMD_SET_MAX: 81 switch (fis->features) { 82 case ATA_SET_MAX_PASSWD: 83 case ATA_SET_MAX_LOCK: 84 return HISI_SAS_SATA_PROTOCOL_PIO; 85 86 case ATA_SET_MAX_PASSWD_DMA: 87 case ATA_SET_MAX_UNLOCK_DMA: 88 return HISI_SAS_SATA_PROTOCOL_DMA; 89 90 default: 91 return HISI_SAS_SATA_PROTOCOL_NONDATA; 92 } 93 94 default: 95 { 96 if (direction == DMA_NONE) 97 return HISI_SAS_SATA_PROTOCOL_NONDATA; 98 return HISI_SAS_SATA_PROTOCOL_PIO; 99 } 100 } 101 } 102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 103 104 void hisi_sas_sata_done(struct sas_task *task, 105 struct hisi_sas_slot *slot) 106 { 107 struct task_status_struct *ts = &task->task_status; 108 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 109 struct hisi_sas_status_buffer *status_buf = 110 hisi_sas_status_buf_addr_mem(slot); 111 u8 *iu = &status_buf->iu[0]; 112 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 113 114 resp->frame_len = sizeof(struct dev_to_host_fis); 115 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 116 117 ts->buf_valid_size = sizeof(*resp); 118 } 119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 120 121 /* 122 * This function assumes linkrate mask fits in 8 bits, which it 123 * does for all HW versions supported. 124 */ 125 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 126 { 127 u8 rate = 0; 128 int i; 129 130 max -= SAS_LINK_RATE_1_5_GBPS; 131 for (i = 0; i <= max; i++) 132 rate |= 1 << (i * 2); 133 return rate; 134 } 135 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 136 137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 138 { 139 return device->port->ha->lldd_ha; 140 } 141 142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 143 { 144 return container_of(sas_port, struct hisi_sas_port, sas_port); 145 } 146 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 147 148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 149 { 150 int phy_no; 151 152 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 153 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 154 } 155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 156 157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 158 { 159 void *bitmap = hisi_hba->slot_index_tags; 160 161 __clear_bit(slot_idx, bitmap); 162 } 163 164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 165 { 166 if (hisi_hba->hw->slot_index_alloc || 167 slot_idx >= HISI_SAS_UNRESERVED_IPTT) { 168 spin_lock(&hisi_hba->lock); 169 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 170 spin_unlock(&hisi_hba->lock); 171 } 172 } 173 174 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 175 { 176 void *bitmap = hisi_hba->slot_index_tags; 177 178 __set_bit(slot_idx, bitmap); 179 } 180 181 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 182 struct scsi_cmnd *scsi_cmnd) 183 { 184 int index; 185 void *bitmap = hisi_hba->slot_index_tags; 186 187 if (scsi_cmnd) 188 return scsi_cmd_to_rq(scsi_cmnd)->tag; 189 190 spin_lock(&hisi_hba->lock); 191 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 192 hisi_hba->last_slot_index + 1); 193 if (index >= hisi_hba->slot_index_count) { 194 index = find_next_zero_bit(bitmap, 195 hisi_hba->slot_index_count, 196 HISI_SAS_UNRESERVED_IPTT); 197 if (index >= hisi_hba->slot_index_count) { 198 spin_unlock(&hisi_hba->lock); 199 return -SAS_QUEUE_FULL; 200 } 201 } 202 hisi_sas_slot_index_set(hisi_hba, index); 203 hisi_hba->last_slot_index = index; 204 spin_unlock(&hisi_hba->lock); 205 206 return index; 207 } 208 209 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 210 struct hisi_sas_slot *slot) 211 { 212 int device_id = slot->device_id; 213 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 214 215 if (task) { 216 struct device *dev = hisi_hba->dev; 217 218 if (!task->lldd_task) 219 return; 220 221 task->lldd_task = NULL; 222 223 if (!sas_protocol_ata(task->task_proto)) { 224 if (slot->n_elem) 225 dma_unmap_sg(dev, task->scatter, 226 task->num_scatter, 227 task->data_dir); 228 if (slot->n_elem_dif) { 229 struct sas_ssp_task *ssp_task = &task->ssp_task; 230 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 231 232 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 233 scsi_prot_sg_count(scsi_cmnd), 234 task->data_dir); 235 } 236 } 237 } 238 239 spin_lock(&sas_dev->lock); 240 list_del_init(&slot->entry); 241 spin_unlock(&sas_dev->lock); 242 243 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 244 245 hisi_sas_slot_index_free(hisi_hba, slot->idx); 246 } 247 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 248 249 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 250 struct hisi_sas_slot *slot) 251 { 252 hisi_hba->hw->prep_smp(hisi_hba, slot); 253 } 254 255 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 256 struct hisi_sas_slot *slot) 257 { 258 hisi_hba->hw->prep_ssp(hisi_hba, slot); 259 } 260 261 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 262 struct hisi_sas_slot *slot) 263 { 264 hisi_hba->hw->prep_stp(hisi_hba, slot); 265 } 266 267 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 268 struct hisi_sas_internal_abort *abort, 269 struct hisi_sas_slot *slot, int device_id) 270 { 271 hisi_hba->hw->prep_abort(hisi_hba, slot, 272 device_id, abort->flag, abort->tag); 273 } 274 275 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 276 struct sas_task *task, int n_elem, 277 int n_elem_req) 278 { 279 struct device *dev = hisi_hba->dev; 280 281 if (!sas_protocol_ata(task->task_proto)) { 282 if (task->num_scatter) { 283 if (n_elem) 284 dma_unmap_sg(dev, task->scatter, 285 task->num_scatter, 286 task->data_dir); 287 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 288 if (n_elem_req) 289 dma_unmap_sg(dev, &task->smp_task.smp_req, 290 1, DMA_TO_DEVICE); 291 } 292 } 293 } 294 295 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 296 struct sas_task *task, int *n_elem, 297 int *n_elem_req) 298 { 299 struct device *dev = hisi_hba->dev; 300 int rc; 301 302 if (sas_protocol_ata(task->task_proto)) { 303 *n_elem = task->num_scatter; 304 } else { 305 unsigned int req_len; 306 307 if (task->num_scatter) { 308 *n_elem = dma_map_sg(dev, task->scatter, 309 task->num_scatter, task->data_dir); 310 if (!*n_elem) { 311 rc = -ENOMEM; 312 goto prep_out; 313 } 314 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 315 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 316 1, DMA_TO_DEVICE); 317 if (!*n_elem_req) { 318 rc = -ENOMEM; 319 goto prep_out; 320 } 321 req_len = sg_dma_len(&task->smp_task.smp_req); 322 if (req_len & 0x3) { 323 rc = -EINVAL; 324 goto err_out_dma_unmap; 325 } 326 } 327 } 328 329 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 330 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 331 *n_elem); 332 rc = -EINVAL; 333 goto err_out_dma_unmap; 334 } 335 return 0; 336 337 err_out_dma_unmap: 338 /* It would be better to call dma_unmap_sg() here, but it's messy */ 339 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 340 *n_elem_req); 341 prep_out: 342 return rc; 343 } 344 345 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 346 struct sas_task *task, int n_elem_dif) 347 { 348 struct device *dev = hisi_hba->dev; 349 350 if (n_elem_dif) { 351 struct sas_ssp_task *ssp_task = &task->ssp_task; 352 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 353 354 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 355 scsi_prot_sg_count(scsi_cmnd), 356 task->data_dir); 357 } 358 } 359 360 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 361 int *n_elem_dif, struct sas_task *task) 362 { 363 struct device *dev = hisi_hba->dev; 364 struct sas_ssp_task *ssp_task; 365 struct scsi_cmnd *scsi_cmnd; 366 int rc; 367 368 if (task->num_scatter) { 369 ssp_task = &task->ssp_task; 370 scsi_cmnd = ssp_task->cmd; 371 372 if (scsi_prot_sg_count(scsi_cmnd)) { 373 *n_elem_dif = dma_map_sg(dev, 374 scsi_prot_sglist(scsi_cmnd), 375 scsi_prot_sg_count(scsi_cmnd), 376 task->data_dir); 377 378 if (!*n_elem_dif) 379 return -ENOMEM; 380 381 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 382 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 383 *n_elem_dif); 384 rc = -EINVAL; 385 goto err_out_dif_dma_unmap; 386 } 387 } 388 } 389 390 return 0; 391 392 err_out_dif_dma_unmap: 393 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 394 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 395 return rc; 396 } 397 398 static 399 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, 400 struct hisi_sas_slot *slot, 401 struct hisi_sas_dq *dq, 402 struct hisi_sas_device *sas_dev, 403 struct hisi_sas_internal_abort *abort) 404 { 405 struct hisi_sas_cmd_hdr *cmd_hdr_base; 406 int dlvry_queue_slot, dlvry_queue; 407 struct sas_task *task = slot->task; 408 unsigned long flags; 409 int wr_q_index; 410 411 spin_lock(&dq->lock); 412 wr_q_index = dq->wr_point; 413 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 414 list_add_tail(&slot->delivery, &dq->list); 415 spin_unlock(&dq->lock); 416 spin_lock(&sas_dev->lock); 417 list_add_tail(&slot->entry, &sas_dev->list); 418 spin_unlock(&sas_dev->lock); 419 420 dlvry_queue = dq->id; 421 dlvry_queue_slot = wr_q_index; 422 423 slot->device_id = sas_dev->device_id; 424 slot->dlvry_queue = dlvry_queue; 425 slot->dlvry_queue_slot = dlvry_queue_slot; 426 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 427 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 428 429 task->lldd_task = slot; 430 431 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 432 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 433 memset(hisi_sas_status_buf_addr_mem(slot), 0, 434 sizeof(struct hisi_sas_err_record)); 435 436 switch (task->task_proto) { 437 case SAS_PROTOCOL_SMP: 438 hisi_sas_task_prep_smp(hisi_hba, slot); 439 break; 440 case SAS_PROTOCOL_SSP: 441 hisi_sas_task_prep_ssp(hisi_hba, slot); 442 break; 443 case SAS_PROTOCOL_SATA: 444 case SAS_PROTOCOL_STP: 445 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 446 hisi_sas_task_prep_ata(hisi_hba, slot); 447 break; 448 case SAS_PROTOCOL_NONE: 449 if (abort) { 450 hisi_sas_task_prep_abort(hisi_hba, abort, slot, sas_dev->device_id); 451 break; 452 } 453 fallthrough; 454 default: 455 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", 456 task->task_proto); 457 break; 458 } 459 460 spin_lock_irqsave(&task->task_state_lock, flags); 461 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 462 spin_unlock_irqrestore(&task->task_state_lock, flags); 463 464 WRITE_ONCE(slot->ready, 1); 465 466 spin_lock(&dq->lock); 467 hisi_hba->hw->start_delivery(dq); 468 spin_unlock(&dq->lock); 469 } 470 471 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 472 struct hisi_sas_tmf_task *tmf) 473 { 474 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; 475 struct domain_device *device = task->dev; 476 struct asd_sas_port *sas_port = device->port; 477 struct hisi_sas_device *sas_dev = device->lldd_dev; 478 struct scsi_cmnd *scmd = NULL; 479 struct hisi_sas_dq *dq = NULL; 480 struct hisi_sas_port *port; 481 struct hisi_hba *hisi_hba; 482 struct hisi_sas_slot *slot; 483 struct device *dev; 484 int rc; 485 486 if (!sas_port) { 487 struct task_status_struct *ts = &task->task_status; 488 489 ts->resp = SAS_TASK_UNDELIVERED; 490 ts->stat = SAS_PHY_DOWN; 491 /* 492 * libsas will use dev->port, should 493 * not call task_done for sata 494 */ 495 if (device->dev_type != SAS_SATA_DEV) 496 task->task_done(task); 497 return -ECOMM; 498 } 499 500 hisi_hba = dev_to_hisi_hba(device); 501 dev = hisi_hba->dev; 502 503 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 504 if (!gfpflags_allow_blocking(gfp_flags)) 505 return -EINVAL; 506 507 down(&hisi_hba->sem); 508 up(&hisi_hba->sem); 509 } 510 511 if (DEV_IS_GONE(sas_dev)) { 512 if (sas_dev) 513 dev_info(dev, "task prep: device %d not ready\n", 514 sas_dev->device_id); 515 else 516 dev_info(dev, "task prep: device %016llx not ready\n", 517 SAS_ADDR(device->sas_addr)); 518 519 return -ECOMM; 520 } 521 522 if (task->uldd_task) { 523 struct ata_queued_cmd *qc; 524 525 if (dev_is_sata(device)) { 526 qc = task->uldd_task; 527 scmd = qc->scsicmd; 528 } else { 529 scmd = task->uldd_task; 530 } 531 } 532 533 if (scmd) { 534 unsigned int dq_index; 535 u32 blk_tag; 536 537 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 538 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 539 dq = &hisi_hba->dq[dq_index]; 540 } else { 541 struct Scsi_Host *shost = hisi_hba->shost; 542 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 543 int queue = qmap->mq_map[raw_smp_processor_id()]; 544 545 dq = &hisi_hba->dq[queue]; 546 } 547 548 port = to_hisi_sas_port(sas_port); 549 if (port && !port->port_attached) { 550 dev_info(dev, "task prep: %s port%d not attach device\n", 551 (dev_is_sata(device)) ? 552 "SATA/STP" : "SAS", 553 device->port->id); 554 555 return -ECOMM; 556 } 557 558 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 559 &n_elem_req); 560 if (rc < 0) 561 goto prep_out; 562 563 if (!sas_protocol_ata(task->task_proto)) { 564 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 565 if (rc < 0) 566 goto err_out_dma_unmap; 567 } 568 569 if (hisi_hba->hw->slot_index_alloc) 570 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 571 else 572 rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); 573 574 if (rc < 0) 575 goto err_out_dif_dma_unmap; 576 577 slot = &hisi_hba->slot_info[rc]; 578 slot->n_elem = n_elem; 579 slot->n_elem_dif = n_elem_dif; 580 slot->task = task; 581 slot->port = port; 582 583 slot->tmf = tmf; 584 slot->is_internal = tmf; 585 586 /* protect task_prep and start_delivery sequence */ 587 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL); 588 589 return 0; 590 591 err_out_dif_dma_unmap: 592 if (!sas_protocol_ata(task->task_proto)) 593 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 594 err_out_dma_unmap: 595 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 596 n_elem_req); 597 prep_out: 598 dev_err(dev, "task exec: failed[%d]!\n", rc); 599 return rc; 600 } 601 602 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 603 gfp_t gfp_flags) 604 { 605 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 606 struct asd_sas_phy *sas_phy = &phy->sas_phy; 607 608 if (!phy->phy_attached) 609 return; 610 611 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 612 613 if (sas_phy->phy) { 614 struct sas_phy *sphy = sas_phy->phy; 615 616 sphy->negotiated_linkrate = sas_phy->linkrate; 617 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 618 sphy->maximum_linkrate_hw = 619 hisi_hba->hw->phy_get_max_linkrate(); 620 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 621 sphy->minimum_linkrate = phy->minimum_linkrate; 622 623 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 624 sphy->maximum_linkrate = phy->maximum_linkrate; 625 } 626 627 if (phy->phy_type & PORT_TYPE_SAS) { 628 struct sas_identify_frame *id; 629 630 id = (struct sas_identify_frame *)phy->frame_rcvd; 631 id->dev_type = phy->identify.device_type; 632 id->initiator_bits = SAS_PROTOCOL_ALL; 633 id->target_bits = phy->identify.target_port_protocols; 634 } else if (phy->phy_type & PORT_TYPE_SATA) { 635 /* Nothing */ 636 } 637 638 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 639 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 640 } 641 642 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 643 { 644 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 645 struct hisi_sas_device *sas_dev = NULL; 646 int last = hisi_hba->last_dev_id; 647 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 648 int i; 649 650 spin_lock(&hisi_hba->lock); 651 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 652 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 653 int queue = i % hisi_hba->queue_count; 654 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 655 656 hisi_hba->devices[i].device_id = i; 657 sas_dev = &hisi_hba->devices[i]; 658 sas_dev->dev_status = HISI_SAS_DEV_INIT; 659 sas_dev->dev_type = device->dev_type; 660 sas_dev->hisi_hba = hisi_hba; 661 sas_dev->sas_device = device; 662 sas_dev->dq = dq; 663 spin_lock_init(&sas_dev->lock); 664 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 665 break; 666 } 667 i++; 668 } 669 hisi_hba->last_dev_id = i; 670 spin_unlock(&hisi_hba->lock); 671 672 return sas_dev; 673 } 674 675 #define HISI_SAS_DISK_RECOVER_CNT 3 676 static int hisi_sas_init_device(struct domain_device *device) 677 { 678 int rc = TMF_RESP_FUNC_COMPLETE; 679 struct scsi_lun lun; 680 struct hisi_sas_tmf_task tmf_task; 681 int retry = HISI_SAS_DISK_RECOVER_CNT; 682 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 683 struct device *dev = hisi_hba->dev; 684 struct sas_phy *local_phy; 685 686 switch (device->dev_type) { 687 case SAS_END_DEVICE: 688 int_to_scsilun(0, &lun); 689 690 tmf_task.tmf = TMF_CLEAR_TASK_SET; 691 while (retry-- > 0) { 692 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 693 &tmf_task); 694 if (rc == TMF_RESP_FUNC_COMPLETE) { 695 hisi_sas_release_task(hisi_hba, device); 696 break; 697 } 698 } 699 break; 700 case SAS_SATA_DEV: 701 case SAS_SATA_PM: 702 case SAS_SATA_PM_PORT: 703 case SAS_SATA_PENDING: 704 /* 705 * send HARD RESET to clear previous affiliation of 706 * STP target port 707 */ 708 local_phy = sas_get_local_phy(device); 709 if (!scsi_is_sas_phy_local(local_phy) && 710 !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 711 unsigned long deadline = ata_deadline(jiffies, 20000); 712 struct sata_device *sata_dev = &device->sata_dev; 713 struct ata_host *ata_host = sata_dev->ata_host; 714 struct ata_port_operations *ops = ata_host->ops; 715 struct ata_port *ap = sata_dev->ap; 716 struct ata_link *link; 717 unsigned int classes; 718 719 ata_for_each_link(link, ap, EDGE) 720 rc = ops->hardreset(link, &classes, 721 deadline); 722 } 723 sas_put_local_phy(local_phy); 724 if (rc) { 725 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc); 726 return rc; 727 } 728 729 while (retry-- > 0) { 730 rc = hisi_sas_softreset_ata_disk(device); 731 if (!rc) 732 break; 733 } 734 break; 735 default: 736 break; 737 } 738 739 return rc; 740 } 741 742 int hisi_sas_slave_alloc(struct scsi_device *sdev) 743 { 744 struct domain_device *ddev; 745 int rc; 746 747 rc = sas_slave_alloc(sdev); 748 if (rc) 749 return rc; 750 ddev = sdev_to_domain_dev(sdev); 751 752 return hisi_sas_init_device(ddev); 753 } 754 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); 755 756 static int hisi_sas_dev_found(struct domain_device *device) 757 { 758 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 759 struct domain_device *parent_dev = device->parent; 760 struct hisi_sas_device *sas_dev; 761 struct device *dev = hisi_hba->dev; 762 int rc; 763 764 if (hisi_hba->hw->alloc_dev) 765 sas_dev = hisi_hba->hw->alloc_dev(device); 766 else 767 sas_dev = hisi_sas_alloc_dev(device); 768 if (!sas_dev) { 769 dev_err(dev, "fail alloc dev: max support %d devices\n", 770 HISI_SAS_MAX_DEVICES); 771 return -EINVAL; 772 } 773 774 device->lldd_dev = sas_dev; 775 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 776 777 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 778 int phy_no; 779 u8 phy_num = parent_dev->ex_dev.num_phys; 780 struct ex_phy *phy; 781 782 for (phy_no = 0; phy_no < phy_num; phy_no++) { 783 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 784 if (SAS_ADDR(phy->attached_sas_addr) == 785 SAS_ADDR(device->sas_addr)) 786 break; 787 } 788 789 if (phy_no == phy_num) { 790 dev_info(dev, "dev found: no attached " 791 "dev:%016llx at ex:%016llx\n", 792 SAS_ADDR(device->sas_addr), 793 SAS_ADDR(parent_dev->sas_addr)); 794 rc = -EINVAL; 795 goto err_out; 796 } 797 } 798 799 dev_info(dev, "dev[%d:%x] found\n", 800 sas_dev->device_id, sas_dev->dev_type); 801 802 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 803 return 0; 804 805 err_out: 806 hisi_sas_dev_gone(device); 807 return rc; 808 } 809 810 int hisi_sas_slave_configure(struct scsi_device *sdev) 811 { 812 struct domain_device *dev = sdev_to_domain_dev(sdev); 813 int ret = sas_slave_configure(sdev); 814 815 if (ret) 816 return ret; 817 if (!dev_is_sata(dev)) 818 sas_change_queue_depth(sdev, 64); 819 820 return 0; 821 } 822 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 823 824 void hisi_sas_scan_start(struct Scsi_Host *shost) 825 { 826 struct hisi_hba *hisi_hba = shost_priv(shost); 827 828 hisi_hba->hw->phys_init(hisi_hba); 829 } 830 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 831 832 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 833 { 834 struct hisi_hba *hisi_hba = shost_priv(shost); 835 struct sas_ha_struct *sha = &hisi_hba->sha; 836 837 /* Wait for PHY up interrupt to occur */ 838 if (time < HZ) 839 return 0; 840 841 sas_drain_work(sha); 842 return 1; 843 } 844 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 845 846 static void hisi_sas_phyup_work_common(struct work_struct *work, 847 enum hisi_sas_phy_event event) 848 { 849 struct hisi_sas_phy *phy = 850 container_of(work, typeof(*phy), works[event]); 851 struct hisi_hba *hisi_hba = phy->hisi_hba; 852 struct asd_sas_phy *sas_phy = &phy->sas_phy; 853 int phy_no = sas_phy->id; 854 855 phy->wait_phyup_cnt = 0; 856 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 857 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 858 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 859 } 860 861 static void hisi_sas_phyup_work(struct work_struct *work) 862 { 863 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); 864 } 865 866 static void hisi_sas_linkreset_work(struct work_struct *work) 867 { 868 struct hisi_sas_phy *phy = 869 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 870 struct asd_sas_phy *sas_phy = &phy->sas_phy; 871 872 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 873 } 874 875 static void hisi_sas_phyup_pm_work(struct work_struct *work) 876 { 877 struct hisi_sas_phy *phy = 878 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); 879 struct hisi_hba *hisi_hba = phy->hisi_hba; 880 struct device *dev = hisi_hba->dev; 881 882 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); 883 pm_runtime_put_sync(dev); 884 } 885 886 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 887 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 888 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 889 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, 890 }; 891 892 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 893 enum hisi_sas_phy_event event) 894 { 895 struct hisi_hba *hisi_hba = phy->hisi_hba; 896 897 if (WARN_ON(event >= HISI_PHYES_NUM)) 898 return false; 899 900 return queue_work(hisi_hba->wq, &phy->works[event]); 901 } 902 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 903 904 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 905 { 906 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 907 struct hisi_hba *hisi_hba = phy->hisi_hba; 908 struct device *dev = hisi_hba->dev; 909 int phy_no = phy->sas_phy.id; 910 911 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 912 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 913 } 914 915 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 916 917 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 918 { 919 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 920 struct device *dev = hisi_hba->dev; 921 unsigned long flags; 922 923 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 924 spin_lock_irqsave(&phy->lock, flags); 925 if (phy->phy_attached) { 926 spin_unlock_irqrestore(&phy->lock, flags); 927 return; 928 } 929 930 if (!timer_pending(&phy->timer)) { 931 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 932 phy->wait_phyup_cnt++; 933 phy->timer.expires = jiffies + 934 HISI_SAS_WAIT_PHYUP_TIMEOUT; 935 add_timer(&phy->timer); 936 spin_unlock_irqrestore(&phy->lock, flags); 937 return; 938 } 939 940 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 941 phy_no, phy->wait_phyup_cnt); 942 phy->wait_phyup_cnt = 0; 943 } 944 spin_unlock_irqrestore(&phy->lock, flags); 945 } 946 947 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 948 949 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 950 { 951 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 952 struct asd_sas_phy *sas_phy = &phy->sas_phy; 953 int i; 954 955 phy->hisi_hba = hisi_hba; 956 phy->port = NULL; 957 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 958 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 959 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 960 sas_phy->class = SAS; 961 sas_phy->iproto = SAS_PROTOCOL_ALL; 962 sas_phy->tproto = 0; 963 sas_phy->type = PHY_TYPE_PHYSICAL; 964 sas_phy->role = PHY_ROLE_INITIATOR; 965 sas_phy->oob_mode = OOB_NOT_CONNECTED; 966 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 967 sas_phy->id = phy_no; 968 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 969 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 970 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 971 sas_phy->lldd_phy = phy; 972 973 for (i = 0; i < HISI_PHYES_NUM; i++) 974 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 975 976 spin_lock_init(&phy->lock); 977 978 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 979 } 980 981 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 982 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 983 { 984 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 985 struct asd_sas_phy *aphy = &phy->sas_phy; 986 struct sas_phy *sphy = aphy->phy; 987 unsigned long flags; 988 989 spin_lock_irqsave(&phy->lock, flags); 990 991 if (enable) { 992 /* We may have been enabled already; if so, don't touch */ 993 if (!phy->enable) 994 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 995 hisi_hba->hw->phy_start(hisi_hba, phy_no); 996 } else { 997 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 998 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 999 } 1000 phy->enable = enable; 1001 spin_unlock_irqrestore(&phy->lock, flags); 1002 } 1003 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1004 1005 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1006 { 1007 struct sas_ha_struct *sas_ha = sas_phy->ha; 1008 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1009 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1010 struct asd_sas_port *sas_port = sas_phy->port; 1011 struct hisi_sas_port *port; 1012 unsigned long flags; 1013 1014 if (!sas_port) 1015 return; 1016 1017 port = to_hisi_sas_port(sas_port); 1018 spin_lock_irqsave(&hisi_hba->lock, flags); 1019 port->port_attached = 1; 1020 port->id = phy->port_id; 1021 phy->port = port; 1022 sas_port->lldd_port = port; 1023 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1024 } 1025 1026 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1027 struct hisi_sas_slot *slot) 1028 { 1029 if (task) { 1030 unsigned long flags; 1031 struct task_status_struct *ts; 1032 1033 ts = &task->task_status; 1034 1035 ts->resp = SAS_TASK_COMPLETE; 1036 ts->stat = SAS_ABORTED_TASK; 1037 spin_lock_irqsave(&task->task_state_lock, flags); 1038 task->task_state_flags &= 1039 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1040 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1041 task->task_state_flags |= SAS_TASK_STATE_DONE; 1042 spin_unlock_irqrestore(&task->task_state_lock, flags); 1043 } 1044 1045 hisi_sas_slot_task_free(hisi_hba, task, slot); 1046 } 1047 1048 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1049 struct domain_device *device) 1050 { 1051 struct hisi_sas_slot *slot, *slot2; 1052 struct hisi_sas_device *sas_dev = device->lldd_dev; 1053 1054 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1055 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1056 } 1057 1058 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1059 { 1060 struct hisi_sas_device *sas_dev; 1061 struct domain_device *device; 1062 int i; 1063 1064 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1065 sas_dev = &hisi_hba->devices[i]; 1066 device = sas_dev->sas_device; 1067 1068 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1069 !device) 1070 continue; 1071 1072 hisi_sas_release_task(hisi_hba, device); 1073 } 1074 } 1075 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1076 1077 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1078 struct domain_device *device) 1079 { 1080 if (hisi_hba->hw->dereg_device) 1081 hisi_hba->hw->dereg_device(hisi_hba, device); 1082 } 1083 1084 static void hisi_sas_dev_gone(struct domain_device *device) 1085 { 1086 struct hisi_sas_device *sas_dev = device->lldd_dev; 1087 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1088 struct device *dev = hisi_hba->dev; 1089 int ret = 0; 1090 1091 dev_info(dev, "dev[%d:%x] is gone\n", 1092 sas_dev->device_id, sas_dev->dev_type); 1093 1094 down(&hisi_hba->sem); 1095 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1096 hisi_sas_internal_task_abort(hisi_hba, device, 1097 HISI_SAS_INT_ABT_DEV, 0, true); 1098 1099 hisi_sas_dereg_device(hisi_hba, device); 1100 1101 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1102 device->lldd_dev = NULL; 1103 } 1104 1105 if (hisi_hba->hw->free_device) 1106 hisi_hba->hw->free_device(sas_dev); 1107 1108 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1109 if (!ret) 1110 sas_dev->dev_type = SAS_PHY_UNUSED; 1111 sas_dev->sas_device = NULL; 1112 up(&hisi_hba->sem); 1113 } 1114 1115 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 1116 { 1117 return hisi_sas_task_exec(task, gfp_flags, NULL); 1118 } 1119 1120 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1121 struct sas_phy_linkrates *r) 1122 { 1123 struct sas_phy_linkrates _r; 1124 1125 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1126 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1127 enum sas_linkrate min, max; 1128 1129 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1130 return -EINVAL; 1131 1132 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1133 max = sas_phy->phy->maximum_linkrate; 1134 min = r->minimum_linkrate; 1135 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1136 max = r->maximum_linkrate; 1137 min = sas_phy->phy->minimum_linkrate; 1138 } else 1139 return -EINVAL; 1140 1141 _r.maximum_linkrate = max; 1142 _r.minimum_linkrate = min; 1143 1144 sas_phy->phy->maximum_linkrate = max; 1145 sas_phy->phy->minimum_linkrate = min; 1146 1147 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1148 msleep(100); 1149 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1150 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1151 1152 return 0; 1153 } 1154 1155 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1156 void *funcdata) 1157 { 1158 struct hisi_sas_phy *phy = container_of(sas_phy, 1159 struct hisi_sas_phy, sas_phy); 1160 struct sas_ha_struct *sas_ha = sas_phy->ha; 1161 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1162 struct device *dev = hisi_hba->dev; 1163 DECLARE_COMPLETION_ONSTACK(completion); 1164 int phy_no = sas_phy->id; 1165 u8 sts = phy->phy_attached; 1166 int ret = 0; 1167 1168 down(&hisi_hba->sem); 1169 phy->reset_completion = &completion; 1170 1171 switch (func) { 1172 case PHY_FUNC_HARD_RESET: 1173 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1174 break; 1175 1176 case PHY_FUNC_LINK_RESET: 1177 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1178 msleep(100); 1179 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1180 break; 1181 1182 case PHY_FUNC_DISABLE: 1183 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1184 goto out; 1185 1186 case PHY_FUNC_SET_LINK_RATE: 1187 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1188 break; 1189 1190 case PHY_FUNC_GET_EVENTS: 1191 if (hisi_hba->hw->get_events) { 1192 hisi_hba->hw->get_events(hisi_hba, phy_no); 1193 goto out; 1194 } 1195 fallthrough; 1196 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1197 default: 1198 ret = -EOPNOTSUPP; 1199 goto out; 1200 } 1201 1202 if (sts && !wait_for_completion_timeout(&completion, 2 * HZ)) { 1203 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1204 phy_no, func); 1205 if (phy->in_reset) 1206 ret = -ETIMEDOUT; 1207 } 1208 1209 out: 1210 phy->reset_completion = NULL; 1211 1212 up(&hisi_hba->sem); 1213 return ret; 1214 } 1215 1216 static void hisi_sas_task_done(struct sas_task *task) 1217 { 1218 del_timer_sync(&task->slow_task->timer); 1219 complete(&task->slow_task->completion); 1220 } 1221 1222 static void hisi_sas_tmf_timedout(struct timer_list *t) 1223 { 1224 struct sas_task_slow *slow = from_timer(slow, t, timer); 1225 struct sas_task *task = slow->task; 1226 unsigned long flags; 1227 bool is_completed = true; 1228 1229 spin_lock_irqsave(&task->task_state_lock, flags); 1230 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1231 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1232 is_completed = false; 1233 } 1234 spin_unlock_irqrestore(&task->task_state_lock, flags); 1235 1236 if (!is_completed) 1237 complete(&task->slow_task->completion); 1238 } 1239 1240 #define TASK_TIMEOUT (20 * HZ) 1241 #define TASK_RETRY 3 1242 #define INTERNAL_ABORT_TIMEOUT (6 * HZ) 1243 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 1244 void *parameter, u32 para_len, 1245 struct hisi_sas_tmf_task *tmf) 1246 { 1247 struct hisi_sas_device *sas_dev = device->lldd_dev; 1248 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1249 struct device *dev = hisi_hba->dev; 1250 struct sas_task *task; 1251 int res, retry; 1252 1253 for (retry = 0; retry < TASK_RETRY; retry++) { 1254 task = sas_alloc_slow_task(GFP_KERNEL); 1255 if (!task) 1256 return -ENOMEM; 1257 1258 task->dev = device; 1259 task->task_proto = device->tproto; 1260 1261 if (dev_is_sata(device)) { 1262 task->ata_task.device_control_reg_update = 1; 1263 memcpy(&task->ata_task.fis, parameter, para_len); 1264 } else { 1265 memcpy(&task->ssp_task, parameter, para_len); 1266 } 1267 task->task_done = hisi_sas_task_done; 1268 1269 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1270 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT; 1271 add_timer(&task->slow_task->timer); 1272 1273 res = hisi_sas_task_exec(task, GFP_KERNEL, tmf); 1274 if (res) { 1275 del_timer_sync(&task->slow_task->timer); 1276 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1277 res); 1278 goto ex_err; 1279 } 1280 1281 wait_for_completion(&task->slow_task->completion); 1282 res = TMF_RESP_FUNC_FAILED; 1283 /* Even TMF timed out, return direct. */ 1284 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1285 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1286 struct hisi_sas_slot *slot = task->lldd_task; 1287 1288 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1289 if (slot) { 1290 struct hisi_sas_cq *cq = 1291 &hisi_hba->cq[slot->dlvry_queue]; 1292 /* 1293 * sync irq to avoid free'ing task 1294 * before using task in IO completion 1295 */ 1296 synchronize_irq(cq->irq_no); 1297 slot->task = NULL; 1298 } 1299 1300 goto ex_err; 1301 } else 1302 dev_err(dev, "abort tmf: TMF task timeout\n"); 1303 } 1304 1305 if (task->task_status.resp == SAS_TASK_COMPLETE && 1306 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1307 res = TMF_RESP_FUNC_COMPLETE; 1308 break; 1309 } 1310 1311 if (task->task_status.resp == SAS_TASK_COMPLETE && 1312 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1313 res = TMF_RESP_FUNC_SUCC; 1314 break; 1315 } 1316 1317 if (task->task_status.resp == SAS_TASK_COMPLETE && 1318 task->task_status.stat == SAS_DATA_UNDERRUN) { 1319 /* no error, but return the number of bytes of 1320 * underrun 1321 */ 1322 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n", 1323 SAS_ADDR(device->sas_addr), 1324 task->task_status.resp, 1325 task->task_status.stat); 1326 res = task->task_status.residual; 1327 break; 1328 } 1329 1330 if (task->task_status.resp == SAS_TASK_COMPLETE && 1331 task->task_status.stat == SAS_DATA_OVERRUN) { 1332 dev_warn(dev, "abort tmf: blocked task error\n"); 1333 res = -EMSGSIZE; 1334 break; 1335 } 1336 1337 if (task->task_status.resp == SAS_TASK_COMPLETE && 1338 task->task_status.stat == SAS_OPEN_REJECT) { 1339 dev_warn(dev, "abort tmf: open reject failed\n"); 1340 res = -EIO; 1341 } else { 1342 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n", 1343 SAS_ADDR(device->sas_addr), 1344 task->task_status.resp, 1345 task->task_status.stat); 1346 } 1347 sas_free_task(task); 1348 task = NULL; 1349 } 1350 ex_err: 1351 if (retry == TASK_RETRY) 1352 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 1353 sas_free_task(task); 1354 return res; 1355 } 1356 1357 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1358 bool reset, int pmp, u8 *fis) 1359 { 1360 struct ata_taskfile tf; 1361 1362 ata_tf_init(dev, &tf); 1363 if (reset) 1364 tf.ctl |= ATA_SRST; 1365 else 1366 tf.ctl &= ~ATA_SRST; 1367 tf.command = ATA_CMD_DEV_RESET; 1368 ata_tf_to_fis(&tf, pmp, 0, fis); 1369 } 1370 1371 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1372 { 1373 u8 fis[20] = {0}; 1374 struct ata_port *ap = device->sata_dev.ap; 1375 struct ata_link *link; 1376 int rc = TMF_RESP_FUNC_FAILED; 1377 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1378 struct device *dev = hisi_hba->dev; 1379 int s = sizeof(struct host_to_dev_fis); 1380 struct hisi_sas_tmf_task tmf = {}; 1381 1382 ata_for_each_link(link, ap, EDGE) { 1383 int pmp = sata_srst_pmp(link); 1384 1385 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1386 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf); 1387 if (rc != TMF_RESP_FUNC_COMPLETE) 1388 break; 1389 } 1390 1391 if (rc == TMF_RESP_FUNC_COMPLETE) { 1392 ata_for_each_link(link, ap, EDGE) { 1393 int pmp = sata_srst_pmp(link); 1394 1395 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1396 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1397 s, &tmf); 1398 if (rc != TMF_RESP_FUNC_COMPLETE) 1399 dev_err(dev, "ata disk %016llx de-reset failed\n", 1400 SAS_ADDR(device->sas_addr)); 1401 } 1402 } else { 1403 dev_err(dev, "ata disk %016llx reset failed\n", 1404 SAS_ADDR(device->sas_addr)); 1405 } 1406 1407 if (rc == TMF_RESP_FUNC_COMPLETE) 1408 hisi_sas_release_task(hisi_hba, device); 1409 1410 return rc; 1411 } 1412 1413 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1414 u8 *lun, struct hisi_sas_tmf_task *tmf) 1415 { 1416 struct sas_ssp_task ssp_task; 1417 1418 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1419 return TMF_RESP_FUNC_ESUPP; 1420 1421 memcpy(ssp_task.LUN, lun, 8); 1422 1423 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1424 sizeof(ssp_task), tmf); 1425 } 1426 1427 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1428 { 1429 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1430 int i; 1431 1432 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1433 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1434 struct domain_device *device = sas_dev->sas_device; 1435 struct asd_sas_port *sas_port; 1436 struct hisi_sas_port *port; 1437 struct hisi_sas_phy *phy = NULL; 1438 struct asd_sas_phy *sas_phy; 1439 1440 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1441 || !device || !device->port) 1442 continue; 1443 1444 sas_port = device->port; 1445 port = to_hisi_sas_port(sas_port); 1446 1447 spin_lock(&sas_port->phy_list_lock); 1448 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1449 if (state & BIT(sas_phy->id)) { 1450 phy = sas_phy->lldd_phy; 1451 break; 1452 } 1453 spin_unlock(&sas_port->phy_list_lock); 1454 1455 if (phy) { 1456 port->id = phy->port_id; 1457 1458 /* Update linkrate of directly attached device. */ 1459 if (!device->parent) 1460 device->linkrate = phy->sas_phy.linkrate; 1461 1462 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1463 } else 1464 port->id = 0xff; 1465 } 1466 } 1467 1468 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1469 { 1470 struct asd_sas_port *_sas_port = NULL; 1471 int phy_no; 1472 1473 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1474 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1475 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1476 struct asd_sas_port *sas_port = sas_phy->port; 1477 bool do_port_check = _sas_port != sas_port; 1478 1479 if (!sas_phy->phy->enabled) 1480 continue; 1481 1482 /* Report PHY state change to libsas */ 1483 if (state & BIT(phy_no)) { 1484 if (do_port_check && sas_port && sas_port->port_dev) { 1485 struct domain_device *dev = sas_port->port_dev; 1486 1487 _sas_port = sas_port; 1488 1489 if (dev_is_expander(dev->dev_type)) 1490 sas_notify_port_event(sas_phy, 1491 PORTE_BROADCAST_RCVD, 1492 GFP_KERNEL); 1493 } 1494 } else { 1495 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1496 } 1497 } 1498 } 1499 1500 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1501 { 1502 struct hisi_sas_device *sas_dev; 1503 struct domain_device *device; 1504 int i; 1505 1506 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1507 sas_dev = &hisi_hba->devices[i]; 1508 device = sas_dev->sas_device; 1509 1510 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1511 continue; 1512 1513 hisi_sas_init_device(device); 1514 } 1515 } 1516 1517 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1518 struct asd_sas_port *sas_port, 1519 struct domain_device *device) 1520 { 1521 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; 1522 struct ata_port *ap = device->sata_dev.ap; 1523 struct device *dev = hisi_hba->dev; 1524 int s = sizeof(struct host_to_dev_fis); 1525 int rc = TMF_RESP_FUNC_FAILED; 1526 struct ata_link *link; 1527 u8 fis[20] = {0}; 1528 int i; 1529 1530 for (i = 0; i < hisi_hba->n_phy; i++) { 1531 if (!(sas_port->phy_mask & BIT(i))) 1532 continue; 1533 1534 ata_for_each_link(link, ap, EDGE) { 1535 int pmp = sata_srst_pmp(link); 1536 1537 tmf_task.phy_id = i; 1538 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1539 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, 1540 &tmf_task); 1541 if (rc != TMF_RESP_FUNC_COMPLETE) { 1542 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1543 i, rc); 1544 break; 1545 } 1546 } 1547 } 1548 } 1549 1550 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1551 { 1552 struct device *dev = hisi_hba->dev; 1553 int port_no, rc, i; 1554 1555 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1556 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1557 struct domain_device *device = sas_dev->sas_device; 1558 1559 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1560 continue; 1561 1562 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1563 HISI_SAS_INT_ABT_DEV, 0, 1564 false); 1565 if (rc < 0) 1566 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1567 } 1568 1569 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1570 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1571 struct asd_sas_port *sas_port = &port->sas_port; 1572 struct domain_device *port_dev = sas_port->port_dev; 1573 struct domain_device *device; 1574 1575 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1576 continue; 1577 1578 /* Try to find a SATA device */ 1579 list_for_each_entry(device, &sas_port->dev_list, 1580 dev_list_node) { 1581 if (dev_is_sata(device)) { 1582 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1583 sas_port, 1584 device); 1585 break; 1586 } 1587 } 1588 } 1589 } 1590 1591 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1592 { 1593 struct Scsi_Host *shost = hisi_hba->shost; 1594 1595 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1596 1597 scsi_block_requests(shost); 1598 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1599 1600 del_timer_sync(&hisi_hba->timer); 1601 1602 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1603 } 1604 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1605 1606 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1607 { 1608 struct Scsi_Host *shost = hisi_hba->shost; 1609 1610 /* Init and wait for PHYs to come up and all libsas event finished. */ 1611 hisi_hba->hw->phys_init(hisi_hba); 1612 msleep(1000); 1613 hisi_sas_refresh_port_id(hisi_hba); 1614 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1615 1616 if (hisi_hba->reject_stp_links_msk) 1617 hisi_sas_terminate_stp_reject(hisi_hba); 1618 hisi_sas_reset_init_all_devices(hisi_hba); 1619 scsi_unblock_requests(shost); 1620 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1621 up(&hisi_hba->sem); 1622 1623 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1624 } 1625 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1626 1627 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1628 { 1629 if (!hisi_hba->hw->soft_reset) 1630 return -1; 1631 1632 down(&hisi_hba->sem); 1633 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1634 up(&hisi_hba->sem); 1635 return -1; 1636 } 1637 1638 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1639 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1640 1641 return 0; 1642 } 1643 1644 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1645 { 1646 struct device *dev = hisi_hba->dev; 1647 struct Scsi_Host *shost = hisi_hba->shost; 1648 int rc; 1649 1650 dev_info(dev, "controller resetting...\n"); 1651 hisi_sas_controller_reset_prepare(hisi_hba); 1652 1653 rc = hisi_hba->hw->soft_reset(hisi_hba); 1654 if (rc) { 1655 dev_warn(dev, "controller reset failed (%d)\n", rc); 1656 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1657 up(&hisi_hba->sem); 1658 scsi_unblock_requests(shost); 1659 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1660 return rc; 1661 } 1662 1663 hisi_sas_controller_reset_done(hisi_hba); 1664 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1665 dev_info(dev, "controller reset complete\n"); 1666 1667 return 0; 1668 } 1669 1670 static int hisi_sas_abort_task(struct sas_task *task) 1671 { 1672 struct scsi_lun lun; 1673 struct hisi_sas_tmf_task tmf_task; 1674 struct domain_device *device = task->dev; 1675 struct hisi_sas_device *sas_dev = device->lldd_dev; 1676 struct hisi_hba *hisi_hba; 1677 struct device *dev; 1678 int rc = TMF_RESP_FUNC_FAILED; 1679 unsigned long flags; 1680 1681 if (!sas_dev) 1682 return TMF_RESP_FUNC_FAILED; 1683 1684 hisi_hba = dev_to_hisi_hba(task->dev); 1685 dev = hisi_hba->dev; 1686 1687 spin_lock_irqsave(&task->task_state_lock, flags); 1688 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1689 struct hisi_sas_slot *slot = task->lldd_task; 1690 struct hisi_sas_cq *cq; 1691 1692 if (slot) { 1693 /* 1694 * sync irq to avoid free'ing task 1695 * before using task in IO completion 1696 */ 1697 cq = &hisi_hba->cq[slot->dlvry_queue]; 1698 synchronize_irq(cq->irq_no); 1699 } 1700 spin_unlock_irqrestore(&task->task_state_lock, flags); 1701 rc = TMF_RESP_FUNC_COMPLETE; 1702 goto out; 1703 } 1704 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1705 spin_unlock_irqrestore(&task->task_state_lock, flags); 1706 1707 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1708 struct scsi_cmnd *cmnd = task->uldd_task; 1709 struct hisi_sas_slot *slot = task->lldd_task; 1710 u16 tag = slot->idx; 1711 int rc2; 1712 1713 int_to_scsilun(cmnd->device->lun, &lun); 1714 tmf_task.tmf = TMF_ABORT_TASK; 1715 tmf_task.tag_of_task_to_be_managed = tag; 1716 1717 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1718 &tmf_task); 1719 1720 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1721 HISI_SAS_INT_ABT_CMD, tag, 1722 false); 1723 if (rc2 < 0) { 1724 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1725 return TMF_RESP_FUNC_FAILED; 1726 } 1727 1728 /* 1729 * If the TMF finds that the IO is not in the device and also 1730 * the internal abort does not succeed, then it is safe to 1731 * free the slot. 1732 * Note: if the internal abort succeeds then the slot 1733 * will have already been completed 1734 */ 1735 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1736 if (task->lldd_task) 1737 hisi_sas_do_release_task(hisi_hba, task, slot); 1738 } 1739 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1740 task->task_proto & SAS_PROTOCOL_STP) { 1741 if (task->dev->dev_type == SAS_SATA_DEV) { 1742 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1743 HISI_SAS_INT_ABT_DEV, 1744 0, false); 1745 if (rc < 0) { 1746 dev_err(dev, "abort task: internal abort failed\n"); 1747 goto out; 1748 } 1749 hisi_sas_dereg_device(hisi_hba, device); 1750 rc = hisi_sas_softreset_ata_disk(device); 1751 } 1752 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1753 /* SMP */ 1754 struct hisi_sas_slot *slot = task->lldd_task; 1755 u32 tag = slot->idx; 1756 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1757 1758 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1759 HISI_SAS_INT_ABT_CMD, tag, 1760 false); 1761 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1762 task->lldd_task) { 1763 /* 1764 * sync irq to avoid free'ing task 1765 * before using task in IO completion 1766 */ 1767 synchronize_irq(cq->irq_no); 1768 slot->task = NULL; 1769 } 1770 } 1771 1772 out: 1773 if (rc != TMF_RESP_FUNC_COMPLETE) 1774 dev_notice(dev, "abort task: rc=%d\n", rc); 1775 return rc; 1776 } 1777 1778 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1779 { 1780 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1781 struct device *dev = hisi_hba->dev; 1782 struct hisi_sas_tmf_task tmf_task; 1783 int rc; 1784 1785 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1786 HISI_SAS_INT_ABT_DEV, 0, false); 1787 if (rc < 0) { 1788 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1789 return TMF_RESP_FUNC_FAILED; 1790 } 1791 hisi_sas_dereg_device(hisi_hba, device); 1792 1793 tmf_task.tmf = TMF_ABORT_TASK_SET; 1794 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1795 1796 if (rc == TMF_RESP_FUNC_COMPLETE) 1797 hisi_sas_release_task(hisi_hba, device); 1798 1799 return rc; 1800 } 1801 1802 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1803 { 1804 struct hisi_sas_tmf_task tmf_task; 1805 int rc; 1806 1807 tmf_task.tmf = TMF_CLEAR_ACA; 1808 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1809 1810 return rc; 1811 } 1812 1813 #define I_T_NEXUS_RESET_PHYUP_TIMEOUT (2 * HZ) 1814 1815 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1816 { 1817 struct sas_phy *local_phy = sas_get_local_phy(device); 1818 struct hisi_sas_device *sas_dev = device->lldd_dev; 1819 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1820 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1821 int rc, reset_type; 1822 1823 if (!local_phy->enabled) { 1824 sas_put_local_phy(local_phy); 1825 return -ENODEV; 1826 } 1827 1828 if (scsi_is_sas_phy_local(local_phy)) { 1829 struct asd_sas_phy *sas_phy = 1830 sas_ha->sas_phy[local_phy->number]; 1831 struct hisi_sas_phy *phy = 1832 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1833 unsigned long flags; 1834 1835 spin_lock_irqsave(&phy->lock, flags); 1836 phy->in_reset = 1; 1837 spin_unlock_irqrestore(&phy->lock, flags); 1838 } 1839 1840 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1841 !dev_is_sata(device)) ? true : false; 1842 1843 rc = sas_phy_reset(local_phy, reset_type); 1844 sas_put_local_phy(local_phy); 1845 1846 if (scsi_is_sas_phy_local(local_phy)) { 1847 struct asd_sas_phy *sas_phy = 1848 sas_ha->sas_phy[local_phy->number]; 1849 struct hisi_sas_phy *phy = 1850 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1851 unsigned long flags; 1852 1853 spin_lock_irqsave(&phy->lock, flags); 1854 phy->in_reset = 0; 1855 spin_unlock_irqrestore(&phy->lock, flags); 1856 1857 /* report PHY down if timed out */ 1858 if (rc == -ETIMEDOUT) 1859 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1860 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1861 /* 1862 * If in init state, we rely on caller to wait for link to be 1863 * ready; otherwise, except phy reset is fail, delay. 1864 */ 1865 if (!rc) 1866 msleep(2000); 1867 } 1868 1869 return rc; 1870 } 1871 1872 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1873 { 1874 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1875 struct device *dev = hisi_hba->dev; 1876 int rc; 1877 1878 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1879 HISI_SAS_INT_ABT_DEV, 0, false); 1880 if (rc < 0) { 1881 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1882 return TMF_RESP_FUNC_FAILED; 1883 } 1884 hisi_sas_dereg_device(hisi_hba, device); 1885 1886 rc = hisi_sas_debug_I_T_nexus_reset(device); 1887 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1888 struct sas_phy *local_phy; 1889 1890 rc = hisi_sas_softreset_ata_disk(device); 1891 switch (rc) { 1892 case -ECOMM: 1893 rc = -ENODEV; 1894 break; 1895 case TMF_RESP_FUNC_FAILED: 1896 case -EMSGSIZE: 1897 case -EIO: 1898 local_phy = sas_get_local_phy(device); 1899 rc = sas_phy_enable(local_phy, 0); 1900 if (!rc) { 1901 local_phy->enabled = 0; 1902 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1903 SAS_ADDR(device->sas_addr), rc); 1904 rc = -ENODEV; 1905 } 1906 sas_put_local_phy(local_phy); 1907 break; 1908 default: 1909 break; 1910 } 1911 } 1912 1913 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1914 hisi_sas_release_task(hisi_hba, device); 1915 1916 return rc; 1917 } 1918 1919 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1920 { 1921 struct hisi_sas_device *sas_dev = device->lldd_dev; 1922 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1923 struct device *dev = hisi_hba->dev; 1924 int rc = TMF_RESP_FUNC_FAILED; 1925 1926 /* Clear internal IO and then lu reset */ 1927 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1928 HISI_SAS_INT_ABT_DEV, 0, false); 1929 if (rc < 0) { 1930 dev_err(dev, "lu_reset: internal abort failed\n"); 1931 goto out; 1932 } 1933 hisi_sas_dereg_device(hisi_hba, device); 1934 1935 if (dev_is_sata(device)) { 1936 struct sas_phy *phy; 1937 1938 phy = sas_get_local_phy(device); 1939 1940 rc = sas_phy_reset(phy, true); 1941 1942 if (rc == 0) 1943 hisi_sas_release_task(hisi_hba, device); 1944 sas_put_local_phy(phy); 1945 } else { 1946 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1947 1948 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1949 if (rc == TMF_RESP_FUNC_COMPLETE) 1950 hisi_sas_release_task(hisi_hba, device); 1951 } 1952 out: 1953 if (rc != TMF_RESP_FUNC_COMPLETE) 1954 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1955 sas_dev->device_id, rc); 1956 return rc; 1957 } 1958 1959 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1960 { 1961 struct domain_device *device = data; 1962 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1963 int rc; 1964 1965 rc = hisi_sas_debug_I_T_nexus_reset(device); 1966 if (rc != TMF_RESP_FUNC_COMPLETE) 1967 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1968 SAS_ADDR(device->sas_addr), rc); 1969 } 1970 1971 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1972 { 1973 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1974 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1975 ASYNC_DOMAIN_EXCLUSIVE(async); 1976 int i; 1977 1978 queue_work(hisi_hba->wq, &r.work); 1979 wait_for_completion(r.completion); 1980 if (!r.done) 1981 return TMF_RESP_FUNC_FAILED; 1982 1983 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1984 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1985 struct domain_device *device = sas_dev->sas_device; 1986 1987 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1988 dev_is_expander(device->dev_type)) 1989 continue; 1990 1991 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1992 device, &async); 1993 } 1994 1995 async_synchronize_full_domain(&async); 1996 hisi_sas_release_tasks(hisi_hba); 1997 1998 return TMF_RESP_FUNC_COMPLETE; 1999 } 2000 2001 static int hisi_sas_query_task(struct sas_task *task) 2002 { 2003 struct scsi_lun lun; 2004 struct hisi_sas_tmf_task tmf_task; 2005 int rc = TMF_RESP_FUNC_FAILED; 2006 2007 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 2008 struct scsi_cmnd *cmnd = task->uldd_task; 2009 struct domain_device *device = task->dev; 2010 struct hisi_sas_slot *slot = task->lldd_task; 2011 u32 tag = slot->idx; 2012 2013 int_to_scsilun(cmnd->device->lun, &lun); 2014 tmf_task.tmf = TMF_QUERY_TASK; 2015 tmf_task.tag_of_task_to_be_managed = tag; 2016 2017 rc = hisi_sas_debug_issue_ssp_tmf(device, 2018 lun.scsi_lun, 2019 &tmf_task); 2020 switch (rc) { 2021 /* The task is still in Lun, release it then */ 2022 case TMF_RESP_FUNC_SUCC: 2023 /* The task is not in Lun or failed, reset the phy */ 2024 case TMF_RESP_FUNC_FAILED: 2025 case TMF_RESP_FUNC_COMPLETE: 2026 break; 2027 default: 2028 rc = TMF_RESP_FUNC_FAILED; 2029 break; 2030 } 2031 } 2032 return rc; 2033 } 2034 2035 static int 2036 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 2037 struct hisi_sas_internal_abort *abort, 2038 struct sas_task *task, 2039 struct hisi_sas_dq *dq) 2040 { 2041 struct domain_device *device = task->dev; 2042 struct hisi_sas_device *sas_dev = device->lldd_dev; 2043 struct device *dev = hisi_hba->dev; 2044 struct hisi_sas_port *port; 2045 struct asd_sas_port *sas_port = device->port; 2046 struct hisi_sas_slot *slot; 2047 int slot_idx; 2048 2049 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 2050 return -EINVAL; 2051 2052 if (!device->port) 2053 return -1; 2054 2055 port = to_hisi_sas_port(sas_port); 2056 2057 /* simply get a slot and send abort command */ 2058 slot_idx = hisi_sas_slot_index_alloc(hisi_hba, NULL); 2059 if (slot_idx < 0) 2060 goto err_out; 2061 2062 slot = &hisi_hba->slot_info[slot_idx]; 2063 slot->n_elem = 0; 2064 slot->task = task; 2065 slot->port = port; 2066 slot->is_internal = true; 2067 2068 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort); 2069 2070 return 0; 2071 2072 err_out: 2073 dev_err(dev, "internal abort task prep: failed[%d]!\n", slot_idx); 2074 2075 return slot_idx; 2076 } 2077 2078 /** 2079 * _hisi_sas_internal_task_abort -- execute an internal 2080 * abort command for single IO command or a device 2081 * @hisi_hba: host controller struct 2082 * @device: domain device 2083 * @abort_flag: mode of operation, device or single IO 2084 * @tag: tag of IO to be aborted (only relevant to single 2085 * IO mode) 2086 * @dq: delivery queue for this internal abort command 2087 * @rst_to_recover: If rst_to_recover set, queue a controller 2088 * reset if an internal abort times out. 2089 */ 2090 static int 2091 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2092 struct domain_device *device, int abort_flag, 2093 int tag, struct hisi_sas_dq *dq, bool rst_to_recover) 2094 { 2095 struct sas_task *task; 2096 struct hisi_sas_device *sas_dev = device->lldd_dev; 2097 struct hisi_sas_internal_abort abort = { 2098 .flag = abort_flag, 2099 .tag = tag, 2100 }; 2101 struct device *dev = hisi_hba->dev; 2102 int res; 2103 /* 2104 * The interface is not realized means this HW don't support internal 2105 * abort, or don't need to do internal abort. Then here, we return 2106 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 2107 * the internal abort has been executed and returned CQ. 2108 */ 2109 if (!hisi_hba->hw->prep_abort) 2110 return TMF_RESP_FUNC_FAILED; 2111 2112 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 2113 return -EIO; 2114 2115 task = sas_alloc_slow_task(GFP_KERNEL); 2116 if (!task) 2117 return -ENOMEM; 2118 2119 task->dev = device; 2120 task->task_proto = SAS_PROTOCOL_NONE; 2121 task->task_done = hisi_sas_task_done; 2122 task->slow_task->timer.function = hisi_sas_tmf_timedout; 2123 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT; 2124 add_timer(&task->slow_task->timer); 2125 2126 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 2127 &abort, task, dq); 2128 if (res) { 2129 del_timer_sync(&task->slow_task->timer); 2130 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 2131 res); 2132 goto exit; 2133 } 2134 wait_for_completion(&task->slow_task->completion); 2135 res = TMF_RESP_FUNC_FAILED; 2136 2137 /* Internal abort timed out */ 2138 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 2139 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 2140 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 2141 2142 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 2143 struct hisi_sas_slot *slot = task->lldd_task; 2144 2145 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 2146 2147 if (slot) { 2148 struct hisi_sas_cq *cq = 2149 &hisi_hba->cq[slot->dlvry_queue]; 2150 /* 2151 * sync irq to avoid free'ing task 2152 * before using task in IO completion 2153 */ 2154 synchronize_irq(cq->irq_no); 2155 slot->task = NULL; 2156 } 2157 2158 if (rst_to_recover) { 2159 dev_err(dev, "internal task abort: timeout and not done. Queuing reset.\n"); 2160 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2161 } else { 2162 dev_err(dev, "internal task abort: timeout and not done.\n"); 2163 } 2164 2165 res = -EIO; 2166 goto exit; 2167 } else 2168 dev_err(dev, "internal task abort: timeout.\n"); 2169 } 2170 2171 if (task->task_status.resp == SAS_TASK_COMPLETE && 2172 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 2173 res = TMF_RESP_FUNC_COMPLETE; 2174 goto exit; 2175 } 2176 2177 if (task->task_status.resp == SAS_TASK_COMPLETE && 2178 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 2179 res = TMF_RESP_FUNC_SUCC; 2180 goto exit; 2181 } 2182 2183 exit: 2184 dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n", 2185 SAS_ADDR(device->sas_addr), task, 2186 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 2187 task->task_status.stat); 2188 sas_free_task(task); 2189 2190 return res; 2191 } 2192 2193 static int 2194 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2195 struct domain_device *device, 2196 int abort_flag, int tag, bool rst_to_recover) 2197 { 2198 struct hisi_sas_slot *slot; 2199 struct device *dev = hisi_hba->dev; 2200 struct hisi_sas_dq *dq; 2201 int i, rc; 2202 2203 switch (abort_flag) { 2204 case HISI_SAS_INT_ABT_CMD: 2205 slot = &hisi_hba->slot_info[tag]; 2206 dq = &hisi_hba->dq[slot->dlvry_queue]; 2207 return _hisi_sas_internal_task_abort(hisi_hba, device, 2208 abort_flag, tag, dq, 2209 rst_to_recover); 2210 case HISI_SAS_INT_ABT_DEV: 2211 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2212 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2213 const struct cpumask *mask = cq->irq_mask; 2214 2215 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 2216 continue; 2217 dq = &hisi_hba->dq[i]; 2218 rc = _hisi_sas_internal_task_abort(hisi_hba, device, 2219 abort_flag, tag, 2220 dq, rst_to_recover); 2221 if (rc) 2222 return rc; 2223 } 2224 break; 2225 default: 2226 dev_err(dev, "Unrecognised internal abort flag (%d)\n", 2227 abort_flag); 2228 return -EINVAL; 2229 } 2230 2231 return 0; 2232 } 2233 2234 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2235 { 2236 hisi_sas_port_notify_formed(sas_phy); 2237 } 2238 2239 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2240 u8 reg_index, u8 reg_count, u8 *write_data) 2241 { 2242 struct hisi_hba *hisi_hba = sha->lldd_ha; 2243 2244 if (!hisi_hba->hw->write_gpio) 2245 return -EOPNOTSUPP; 2246 2247 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2248 reg_index, reg_count, write_data); 2249 } 2250 2251 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2252 { 2253 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2254 struct sas_phy *sphy = sas_phy->phy; 2255 unsigned long flags; 2256 2257 phy->phy_attached = 0; 2258 phy->phy_type = 0; 2259 phy->port = NULL; 2260 2261 spin_lock_irqsave(&phy->lock, flags); 2262 if (phy->enable) 2263 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2264 else 2265 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2266 spin_unlock_irqrestore(&phy->lock, flags); 2267 } 2268 2269 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 2270 gfp_t gfp_flags) 2271 { 2272 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2273 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2274 struct device *dev = hisi_hba->dev; 2275 2276 if (rdy) { 2277 /* Phy down but ready */ 2278 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 2279 hisi_sas_port_notify_formed(sas_phy); 2280 } else { 2281 struct hisi_sas_port *port = phy->port; 2282 2283 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 2284 phy->in_reset) { 2285 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2286 return; 2287 } 2288 /* Phy down and not ready */ 2289 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 2290 sas_phy_disconnected(sas_phy); 2291 2292 if (port) { 2293 if (phy->phy_type & PORT_TYPE_SAS) { 2294 int port_id = port->id; 2295 2296 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2297 port_id)) 2298 port->port_attached = 0; 2299 } else if (phy->phy_type & PORT_TYPE_SATA) 2300 port->port_attached = 0; 2301 } 2302 hisi_sas_phy_disconnected(phy); 2303 } 2304 } 2305 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2306 2307 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) 2308 { 2309 int i; 2310 2311 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2312 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2313 2314 synchronize_irq(cq->irq_no); 2315 } 2316 } 2317 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); 2318 2319 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2320 { 2321 struct hisi_hba *hisi_hba = shost_priv(shost); 2322 2323 if (reset_type != SCSI_ADAPTER_RESET) 2324 return -EOPNOTSUPP; 2325 2326 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2327 2328 return 0; 2329 } 2330 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2331 2332 struct scsi_transport_template *hisi_sas_stt; 2333 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2334 2335 static struct sas_domain_function_template hisi_sas_transport_ops = { 2336 .lldd_dev_found = hisi_sas_dev_found, 2337 .lldd_dev_gone = hisi_sas_dev_gone, 2338 .lldd_execute_task = hisi_sas_queue_command, 2339 .lldd_control_phy = hisi_sas_control_phy, 2340 .lldd_abort_task = hisi_sas_abort_task, 2341 .lldd_abort_task_set = hisi_sas_abort_task_set, 2342 .lldd_clear_aca = hisi_sas_clear_aca, 2343 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2344 .lldd_lu_reset = hisi_sas_lu_reset, 2345 .lldd_query_task = hisi_sas_query_task, 2346 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2347 .lldd_port_formed = hisi_sas_port_formed, 2348 .lldd_write_gpio = hisi_sas_write_gpio, 2349 }; 2350 2351 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2352 { 2353 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2354 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2355 2356 for (i = 0; i < hisi_hba->queue_count; i++) { 2357 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2358 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2359 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2360 2361 s = sizeof(struct hisi_sas_cmd_hdr); 2362 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2363 memset(&cmd_hdr[j], 0, s); 2364 2365 dq->wr_point = 0; 2366 2367 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2368 memset(hisi_hba->complete_hdr[i], 0, s); 2369 cq->rd_point = 0; 2370 } 2371 2372 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2373 memset(hisi_hba->initial_fis, 0, s); 2374 2375 s = max_command_entries * sizeof(struct hisi_sas_iost); 2376 memset(hisi_hba->iost, 0, s); 2377 2378 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2379 memset(hisi_hba->breakpoint, 0, s); 2380 2381 s = sizeof(struct hisi_sas_sata_breakpoint); 2382 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2383 memset(&sata_breakpoint[j], 0, s); 2384 } 2385 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2386 2387 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2388 { 2389 struct device *dev = hisi_hba->dev; 2390 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2391 int max_command_entries_ru, sz_slot_buf_ru; 2392 int blk_cnt, slots_per_blk; 2393 2394 sema_init(&hisi_hba->sem, 1); 2395 spin_lock_init(&hisi_hba->lock); 2396 for (i = 0; i < hisi_hba->n_phy; i++) { 2397 hisi_sas_phy_init(hisi_hba, i); 2398 hisi_hba->port[i].port_attached = 0; 2399 hisi_hba->port[i].id = -1; 2400 } 2401 2402 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2403 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2404 hisi_hba->devices[i].device_id = i; 2405 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2406 } 2407 2408 for (i = 0; i < hisi_hba->queue_count; i++) { 2409 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2410 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2411 2412 /* Completion queue structure */ 2413 cq->id = i; 2414 cq->hisi_hba = hisi_hba; 2415 2416 /* Delivery queue structure */ 2417 spin_lock_init(&dq->lock); 2418 INIT_LIST_HEAD(&dq->list); 2419 dq->id = i; 2420 dq->hisi_hba = hisi_hba; 2421 2422 /* Delivery queue */ 2423 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2424 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2425 &hisi_hba->cmd_hdr_dma[i], 2426 GFP_KERNEL); 2427 if (!hisi_hba->cmd_hdr[i]) 2428 goto err_out; 2429 2430 /* Completion queue */ 2431 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2432 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2433 &hisi_hba->complete_hdr_dma[i], 2434 GFP_KERNEL); 2435 if (!hisi_hba->complete_hdr[i]) 2436 goto err_out; 2437 } 2438 2439 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2440 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2441 GFP_KERNEL); 2442 if (!hisi_hba->itct) 2443 goto err_out; 2444 2445 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2446 sizeof(struct hisi_sas_slot), 2447 GFP_KERNEL); 2448 if (!hisi_hba->slot_info) 2449 goto err_out; 2450 2451 /* roundup to avoid overly large block size */ 2452 max_command_entries_ru = roundup(max_command_entries, 64); 2453 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2454 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2455 else 2456 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2457 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2458 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2459 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2460 slots_per_blk = s / sz_slot_buf_ru; 2461 2462 for (i = 0; i < blk_cnt; i++) { 2463 int slot_index = i * slots_per_blk; 2464 dma_addr_t buf_dma; 2465 void *buf; 2466 2467 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2468 GFP_KERNEL); 2469 if (!buf) 2470 goto err_out; 2471 2472 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2473 struct hisi_sas_slot *slot; 2474 2475 slot = &hisi_hba->slot_info[slot_index]; 2476 slot->buf = buf; 2477 slot->buf_dma = buf_dma; 2478 slot->idx = slot_index; 2479 2480 buf += sz_slot_buf_ru; 2481 buf_dma += sz_slot_buf_ru; 2482 } 2483 } 2484 2485 s = max_command_entries * sizeof(struct hisi_sas_iost); 2486 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2487 GFP_KERNEL); 2488 if (!hisi_hba->iost) 2489 goto err_out; 2490 2491 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2492 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2493 &hisi_hba->breakpoint_dma, 2494 GFP_KERNEL); 2495 if (!hisi_hba->breakpoint) 2496 goto err_out; 2497 2498 s = hisi_hba->slot_index_count = max_command_entries; 2499 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); 2500 if (!hisi_hba->slot_index_tags) 2501 goto err_out; 2502 2503 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2504 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2505 &hisi_hba->initial_fis_dma, 2506 GFP_KERNEL); 2507 if (!hisi_hba->initial_fis) 2508 goto err_out; 2509 2510 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2511 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2512 &hisi_hba->sata_breakpoint_dma, 2513 GFP_KERNEL); 2514 if (!hisi_hba->sata_breakpoint) 2515 goto err_out; 2516 2517 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; 2518 2519 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2520 if (!hisi_hba->wq) { 2521 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2522 goto err_out; 2523 } 2524 2525 return 0; 2526 err_out: 2527 return -ENOMEM; 2528 } 2529 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2530 2531 void hisi_sas_free(struct hisi_hba *hisi_hba) 2532 { 2533 int i; 2534 2535 for (i = 0; i < hisi_hba->n_phy; i++) { 2536 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2537 2538 del_timer_sync(&phy->timer); 2539 } 2540 2541 if (hisi_hba->wq) 2542 destroy_workqueue(hisi_hba->wq); 2543 } 2544 EXPORT_SYMBOL_GPL(hisi_sas_free); 2545 2546 void hisi_sas_rst_work_handler(struct work_struct *work) 2547 { 2548 struct hisi_hba *hisi_hba = 2549 container_of(work, struct hisi_hba, rst_work); 2550 2551 if (hisi_sas_controller_prereset(hisi_hba)) 2552 return; 2553 2554 hisi_sas_controller_reset(hisi_hba); 2555 } 2556 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2557 2558 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2559 { 2560 struct hisi_sas_rst *rst = 2561 container_of(work, struct hisi_sas_rst, work); 2562 2563 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2564 goto rst_complete; 2565 2566 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2567 rst->done = true; 2568 rst_complete: 2569 complete(rst->completion); 2570 } 2571 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2572 2573 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2574 { 2575 struct device *dev = hisi_hba->dev; 2576 struct platform_device *pdev = hisi_hba->platform_dev; 2577 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2578 struct clk *refclk; 2579 2580 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2581 SAS_ADDR_SIZE)) { 2582 dev_err(dev, "could not get property sas-addr\n"); 2583 return -ENOENT; 2584 } 2585 2586 if (np) { 2587 /* 2588 * These properties are only required for platform device-based 2589 * controller with DT firmware. 2590 */ 2591 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2592 "hisilicon,sas-syscon"); 2593 if (IS_ERR(hisi_hba->ctrl)) { 2594 dev_err(dev, "could not get syscon\n"); 2595 return -ENOENT; 2596 } 2597 2598 if (device_property_read_u32(dev, "ctrl-reset-reg", 2599 &hisi_hba->ctrl_reset_reg)) { 2600 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2601 return -ENOENT; 2602 } 2603 2604 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2605 &hisi_hba->ctrl_reset_sts_reg)) { 2606 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2607 return -ENOENT; 2608 } 2609 2610 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2611 &hisi_hba->ctrl_clock_ena_reg)) { 2612 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2613 return -ENOENT; 2614 } 2615 } 2616 2617 refclk = devm_clk_get(dev, NULL); 2618 if (IS_ERR(refclk)) 2619 dev_dbg(dev, "no ref clk property\n"); 2620 else 2621 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2622 2623 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2624 dev_err(dev, "could not get property phy-count\n"); 2625 return -ENOENT; 2626 } 2627 2628 if (device_property_read_u32(dev, "queue-count", 2629 &hisi_hba->queue_count)) { 2630 dev_err(dev, "could not get property queue-count\n"); 2631 return -ENOENT; 2632 } 2633 2634 return 0; 2635 } 2636 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2637 2638 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2639 const struct hisi_sas_hw *hw) 2640 { 2641 struct resource *res; 2642 struct Scsi_Host *shost; 2643 struct hisi_hba *hisi_hba; 2644 struct device *dev = &pdev->dev; 2645 int error; 2646 2647 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2648 if (!shost) { 2649 dev_err(dev, "scsi host alloc failed\n"); 2650 return NULL; 2651 } 2652 hisi_hba = shost_priv(shost); 2653 2654 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2655 hisi_hba->hw = hw; 2656 hisi_hba->dev = dev; 2657 hisi_hba->platform_dev = pdev; 2658 hisi_hba->shost = shost; 2659 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2660 2661 timer_setup(&hisi_hba->timer, NULL, 0); 2662 2663 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2664 goto err_out; 2665 2666 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2667 if (error) { 2668 dev_err(dev, "No usable DMA addressing method\n"); 2669 goto err_out; 2670 } 2671 2672 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2673 if (IS_ERR(hisi_hba->regs)) 2674 goto err_out; 2675 2676 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2677 if (res) { 2678 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2679 if (IS_ERR(hisi_hba->sgpio_regs)) 2680 goto err_out; 2681 } 2682 2683 if (hisi_sas_alloc(hisi_hba)) { 2684 hisi_sas_free(hisi_hba); 2685 goto err_out; 2686 } 2687 2688 return shost; 2689 err_out: 2690 scsi_host_put(shost); 2691 dev_err(dev, "shost alloc failed\n"); 2692 return NULL; 2693 } 2694 2695 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2696 { 2697 if (hisi_hba->hw->interrupt_preinit) 2698 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2699 return 0; 2700 } 2701 2702 int hisi_sas_probe(struct platform_device *pdev, 2703 const struct hisi_sas_hw *hw) 2704 { 2705 struct Scsi_Host *shost; 2706 struct hisi_hba *hisi_hba; 2707 struct device *dev = &pdev->dev; 2708 struct asd_sas_phy **arr_phy; 2709 struct asd_sas_port **arr_port; 2710 struct sas_ha_struct *sha; 2711 int rc, phy_nr, port_nr, i; 2712 2713 shost = hisi_sas_shost_alloc(pdev, hw); 2714 if (!shost) 2715 return -ENOMEM; 2716 2717 sha = SHOST_TO_SAS_HA(shost); 2718 hisi_hba = shost_priv(shost); 2719 platform_set_drvdata(pdev, sha); 2720 2721 phy_nr = port_nr = hisi_hba->n_phy; 2722 2723 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2724 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2725 if (!arr_phy || !arr_port) { 2726 rc = -ENOMEM; 2727 goto err_out_ha; 2728 } 2729 2730 sha->sas_phy = arr_phy; 2731 sha->sas_port = arr_port; 2732 sha->lldd_ha = hisi_hba; 2733 2734 shost->transportt = hisi_sas_stt; 2735 shost->max_id = HISI_SAS_MAX_DEVICES; 2736 shost->max_lun = ~0; 2737 shost->max_channel = 1; 2738 shost->max_cmd_len = 16; 2739 if (hisi_hba->hw->slot_index_alloc) { 2740 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2741 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2742 } else { 2743 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2744 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2745 } 2746 2747 sha->sas_ha_name = DRV_NAME; 2748 sha->dev = hisi_hba->dev; 2749 sha->lldd_module = THIS_MODULE; 2750 sha->sas_addr = &hisi_hba->sas_addr[0]; 2751 sha->num_phys = hisi_hba->n_phy; 2752 sha->core.shost = hisi_hba->shost; 2753 2754 for (i = 0; i < hisi_hba->n_phy; i++) { 2755 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2756 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2757 } 2758 2759 rc = hisi_sas_interrupt_preinit(hisi_hba); 2760 if (rc) 2761 goto err_out_ha; 2762 2763 rc = scsi_add_host(shost, &pdev->dev); 2764 if (rc) 2765 goto err_out_ha; 2766 2767 rc = sas_register_ha(sha); 2768 if (rc) 2769 goto err_out_register_ha; 2770 2771 rc = hisi_hba->hw->hw_init(hisi_hba); 2772 if (rc) 2773 goto err_out_hw_init; 2774 2775 scsi_scan_host(shost); 2776 2777 return 0; 2778 2779 err_out_hw_init: 2780 sas_unregister_ha(sha); 2781 err_out_register_ha: 2782 scsi_remove_host(shost); 2783 err_out_ha: 2784 hisi_sas_free(hisi_hba); 2785 scsi_host_put(shost); 2786 return rc; 2787 } 2788 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2789 2790 int hisi_sas_remove(struct platform_device *pdev) 2791 { 2792 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2793 struct hisi_hba *hisi_hba = sha->lldd_ha; 2794 struct Scsi_Host *shost = sha->core.shost; 2795 2796 del_timer_sync(&hisi_hba->timer); 2797 2798 sas_unregister_ha(sha); 2799 sas_remove_host(sha->core.shost); 2800 2801 hisi_sas_free(hisi_hba); 2802 scsi_host_put(shost); 2803 return 0; 2804 } 2805 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2806 2807 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2808 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2809 bool hisi_sas_debugfs_enable = true; 2810 u32 hisi_sas_debugfs_dump_count = 50; 2811 #else 2812 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2813 bool hisi_sas_debugfs_enable; 2814 u32 hisi_sas_debugfs_dump_count = 1; 2815 #endif 2816 2817 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2818 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2819 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2820 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2821 2822 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2823 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2824 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2825 2826 struct dentry *hisi_sas_debugfs_dir; 2827 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2828 2829 static __init int hisi_sas_init(void) 2830 { 2831 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2832 if (!hisi_sas_stt) 2833 return -ENOMEM; 2834 2835 if (hisi_sas_debugfs_enable) { 2836 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2837 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2838 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2839 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2840 } 2841 } 2842 2843 return 0; 2844 } 2845 2846 static __exit void hisi_sas_exit(void) 2847 { 2848 sas_release_transport(hisi_sas_stt); 2849 2850 debugfs_remove(hisi_sas_debugfs_dir); 2851 } 2852 2853 module_init(hisi_sas_init); 2854 module_exit(hisi_sas_exit); 2855 2856 MODULE_LICENSE("GPL"); 2857 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2858 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2859 MODULE_ALIAS("platform:" DRV_NAME); 2860