1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 14 u8 *lun, struct hisi_sas_tmf_task *tmf); 15 static int 16 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 17 struct domain_device *device, 18 int abort_flag, int tag, bool rst_to_recover); 19 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 20 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 21 void *funcdata); 22 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 23 struct domain_device *device); 24 static void hisi_sas_dev_gone(struct domain_device *device); 25 26 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 27 { 28 switch (fis->command) { 29 case ATA_CMD_FPDMA_WRITE: 30 case ATA_CMD_FPDMA_READ: 31 case ATA_CMD_FPDMA_RECV: 32 case ATA_CMD_FPDMA_SEND: 33 case ATA_CMD_NCQ_NON_DATA: 34 return HISI_SAS_SATA_PROTOCOL_FPDMA; 35 36 case ATA_CMD_DOWNLOAD_MICRO: 37 case ATA_CMD_ID_ATA: 38 case ATA_CMD_PMP_READ: 39 case ATA_CMD_READ_LOG_EXT: 40 case ATA_CMD_PIO_READ: 41 case ATA_CMD_PIO_READ_EXT: 42 case ATA_CMD_PMP_WRITE: 43 case ATA_CMD_WRITE_LOG_EXT: 44 case ATA_CMD_PIO_WRITE: 45 case ATA_CMD_PIO_WRITE_EXT: 46 return HISI_SAS_SATA_PROTOCOL_PIO; 47 48 case ATA_CMD_DSM: 49 case ATA_CMD_DOWNLOAD_MICRO_DMA: 50 case ATA_CMD_PMP_READ_DMA: 51 case ATA_CMD_PMP_WRITE_DMA: 52 case ATA_CMD_READ: 53 case ATA_CMD_READ_EXT: 54 case ATA_CMD_READ_LOG_DMA_EXT: 55 case ATA_CMD_READ_STREAM_DMA_EXT: 56 case ATA_CMD_TRUSTED_RCV_DMA: 57 case ATA_CMD_TRUSTED_SND_DMA: 58 case ATA_CMD_WRITE: 59 case ATA_CMD_WRITE_EXT: 60 case ATA_CMD_WRITE_FUA_EXT: 61 case ATA_CMD_WRITE_QUEUED: 62 case ATA_CMD_WRITE_LOG_DMA_EXT: 63 case ATA_CMD_WRITE_STREAM_DMA_EXT: 64 case ATA_CMD_ZAC_MGMT_IN: 65 return HISI_SAS_SATA_PROTOCOL_DMA; 66 67 case ATA_CMD_CHK_POWER: 68 case ATA_CMD_DEV_RESET: 69 case ATA_CMD_EDD: 70 case ATA_CMD_FLUSH: 71 case ATA_CMD_FLUSH_EXT: 72 case ATA_CMD_VERIFY: 73 case ATA_CMD_VERIFY_EXT: 74 case ATA_CMD_SET_FEATURES: 75 case ATA_CMD_STANDBY: 76 case ATA_CMD_STANDBYNOW1: 77 case ATA_CMD_ZAC_MGMT_OUT: 78 return HISI_SAS_SATA_PROTOCOL_NONDATA; 79 80 case ATA_CMD_SET_MAX: 81 switch (fis->features) { 82 case ATA_SET_MAX_PASSWD: 83 case ATA_SET_MAX_LOCK: 84 return HISI_SAS_SATA_PROTOCOL_PIO; 85 86 case ATA_SET_MAX_PASSWD_DMA: 87 case ATA_SET_MAX_UNLOCK_DMA: 88 return HISI_SAS_SATA_PROTOCOL_DMA; 89 90 default: 91 return HISI_SAS_SATA_PROTOCOL_NONDATA; 92 } 93 94 default: 95 { 96 if (direction == DMA_NONE) 97 return HISI_SAS_SATA_PROTOCOL_NONDATA; 98 return HISI_SAS_SATA_PROTOCOL_PIO; 99 } 100 } 101 } 102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 103 104 void hisi_sas_sata_done(struct sas_task *task, 105 struct hisi_sas_slot *slot) 106 { 107 struct task_status_struct *ts = &task->task_status; 108 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 109 struct hisi_sas_status_buffer *status_buf = 110 hisi_sas_status_buf_addr_mem(slot); 111 u8 *iu = &status_buf->iu[0]; 112 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 113 114 resp->frame_len = sizeof(struct dev_to_host_fis); 115 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 116 117 ts->buf_valid_size = sizeof(*resp); 118 } 119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 120 121 /* 122 * This function assumes linkrate mask fits in 8 bits, which it 123 * does for all HW versions supported. 124 */ 125 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 126 { 127 u8 rate = 0; 128 int i; 129 130 max -= SAS_LINK_RATE_1_5_GBPS; 131 for (i = 0; i <= max; i++) 132 rate |= 1 << (i * 2); 133 return rate; 134 } 135 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 136 137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 138 { 139 return device->port->ha->lldd_ha; 140 } 141 142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 143 { 144 return container_of(sas_port, struct hisi_sas_port, sas_port); 145 } 146 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 147 148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 149 { 150 int phy_no; 151 152 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 153 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 154 } 155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 156 157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 158 { 159 void *bitmap = hisi_hba->slot_index_tags; 160 161 __clear_bit(slot_idx, bitmap); 162 } 163 164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 165 { 166 if (hisi_hba->hw->slot_index_alloc || 167 slot_idx >= HISI_SAS_UNRESERVED_IPTT) { 168 spin_lock(&hisi_hba->lock); 169 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 170 spin_unlock(&hisi_hba->lock); 171 } 172 } 173 174 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 175 { 176 void *bitmap = hisi_hba->slot_index_tags; 177 178 __set_bit(slot_idx, bitmap); 179 } 180 181 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 182 struct scsi_cmnd *scsi_cmnd) 183 { 184 int index; 185 void *bitmap = hisi_hba->slot_index_tags; 186 187 if (scsi_cmnd) 188 return scsi_cmd_to_rq(scsi_cmnd)->tag; 189 190 spin_lock(&hisi_hba->lock); 191 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 192 hisi_hba->last_slot_index + 1); 193 if (index >= hisi_hba->slot_index_count) { 194 index = find_next_zero_bit(bitmap, 195 hisi_hba->slot_index_count, 196 HISI_SAS_UNRESERVED_IPTT); 197 if (index >= hisi_hba->slot_index_count) { 198 spin_unlock(&hisi_hba->lock); 199 return -SAS_QUEUE_FULL; 200 } 201 } 202 hisi_sas_slot_index_set(hisi_hba, index); 203 hisi_hba->last_slot_index = index; 204 spin_unlock(&hisi_hba->lock); 205 206 return index; 207 } 208 209 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 210 struct hisi_sas_slot *slot) 211 { 212 int device_id = slot->device_id; 213 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 214 215 if (task) { 216 struct device *dev = hisi_hba->dev; 217 218 if (!task->lldd_task) 219 return; 220 221 task->lldd_task = NULL; 222 223 if (!sas_protocol_ata(task->task_proto)) { 224 if (slot->n_elem) 225 dma_unmap_sg(dev, task->scatter, 226 task->num_scatter, 227 task->data_dir); 228 if (slot->n_elem_dif) { 229 struct sas_ssp_task *ssp_task = &task->ssp_task; 230 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 231 232 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 233 scsi_prot_sg_count(scsi_cmnd), 234 task->data_dir); 235 } 236 } 237 } 238 239 spin_lock(&sas_dev->lock); 240 list_del_init(&slot->entry); 241 spin_unlock(&sas_dev->lock); 242 243 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 244 245 hisi_sas_slot_index_free(hisi_hba, slot->idx); 246 } 247 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 248 249 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 250 struct hisi_sas_slot *slot) 251 { 252 hisi_hba->hw->prep_smp(hisi_hba, slot); 253 } 254 255 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 256 struct hisi_sas_slot *slot) 257 { 258 hisi_hba->hw->prep_ssp(hisi_hba, slot); 259 } 260 261 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 262 struct hisi_sas_slot *slot) 263 { 264 hisi_hba->hw->prep_stp(hisi_hba, slot); 265 } 266 267 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 268 struct hisi_sas_internal_abort *abort, 269 struct hisi_sas_slot *slot, int device_id) 270 { 271 hisi_hba->hw->prep_abort(hisi_hba, slot, 272 device_id, abort->flag, abort->tag); 273 } 274 275 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 276 struct sas_task *task, int n_elem, 277 int n_elem_req) 278 { 279 struct device *dev = hisi_hba->dev; 280 281 if (!sas_protocol_ata(task->task_proto)) { 282 if (task->num_scatter) { 283 if (n_elem) 284 dma_unmap_sg(dev, task->scatter, 285 task->num_scatter, 286 task->data_dir); 287 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 288 if (n_elem_req) 289 dma_unmap_sg(dev, &task->smp_task.smp_req, 290 1, DMA_TO_DEVICE); 291 } 292 } 293 } 294 295 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 296 struct sas_task *task, int *n_elem, 297 int *n_elem_req) 298 { 299 struct device *dev = hisi_hba->dev; 300 int rc; 301 302 if (sas_protocol_ata(task->task_proto)) { 303 *n_elem = task->num_scatter; 304 } else { 305 unsigned int req_len; 306 307 if (task->num_scatter) { 308 *n_elem = dma_map_sg(dev, task->scatter, 309 task->num_scatter, task->data_dir); 310 if (!*n_elem) { 311 rc = -ENOMEM; 312 goto prep_out; 313 } 314 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 315 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 316 1, DMA_TO_DEVICE); 317 if (!*n_elem_req) { 318 rc = -ENOMEM; 319 goto prep_out; 320 } 321 req_len = sg_dma_len(&task->smp_task.smp_req); 322 if (req_len & 0x3) { 323 rc = -EINVAL; 324 goto err_out_dma_unmap; 325 } 326 } 327 } 328 329 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 330 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 331 *n_elem); 332 rc = -EINVAL; 333 goto err_out_dma_unmap; 334 } 335 return 0; 336 337 err_out_dma_unmap: 338 /* It would be better to call dma_unmap_sg() here, but it's messy */ 339 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 340 *n_elem_req); 341 prep_out: 342 return rc; 343 } 344 345 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 346 struct sas_task *task, int n_elem_dif) 347 { 348 struct device *dev = hisi_hba->dev; 349 350 if (n_elem_dif) { 351 struct sas_ssp_task *ssp_task = &task->ssp_task; 352 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 353 354 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 355 scsi_prot_sg_count(scsi_cmnd), 356 task->data_dir); 357 } 358 } 359 360 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 361 int *n_elem_dif, struct sas_task *task) 362 { 363 struct device *dev = hisi_hba->dev; 364 struct sas_ssp_task *ssp_task; 365 struct scsi_cmnd *scsi_cmnd; 366 int rc; 367 368 if (task->num_scatter) { 369 ssp_task = &task->ssp_task; 370 scsi_cmnd = ssp_task->cmd; 371 372 if (scsi_prot_sg_count(scsi_cmnd)) { 373 *n_elem_dif = dma_map_sg(dev, 374 scsi_prot_sglist(scsi_cmnd), 375 scsi_prot_sg_count(scsi_cmnd), 376 task->data_dir); 377 378 if (!*n_elem_dif) 379 return -ENOMEM; 380 381 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 382 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 383 *n_elem_dif); 384 rc = -EINVAL; 385 goto err_out_dif_dma_unmap; 386 } 387 } 388 } 389 390 return 0; 391 392 err_out_dif_dma_unmap: 393 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 394 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 395 return rc; 396 } 397 398 static 399 void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, 400 struct hisi_sas_slot *slot, 401 struct hisi_sas_dq *dq, 402 struct hisi_sas_device *sas_dev, 403 struct hisi_sas_internal_abort *abort, 404 struct hisi_sas_tmf_task *tmf) 405 { 406 struct hisi_sas_cmd_hdr *cmd_hdr_base; 407 int dlvry_queue_slot, dlvry_queue; 408 struct sas_task *task = slot->task; 409 unsigned long flags; 410 int wr_q_index; 411 412 spin_lock(&dq->lock); 413 wr_q_index = dq->wr_point; 414 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 415 list_add_tail(&slot->delivery, &dq->list); 416 spin_unlock(&dq->lock); 417 spin_lock(&sas_dev->lock); 418 list_add_tail(&slot->entry, &sas_dev->list); 419 spin_unlock(&sas_dev->lock); 420 421 dlvry_queue = dq->id; 422 dlvry_queue_slot = wr_q_index; 423 424 slot->device_id = sas_dev->device_id; 425 slot->dlvry_queue = dlvry_queue; 426 slot->dlvry_queue_slot = dlvry_queue_slot; 427 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 428 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 429 430 slot->tmf = tmf; 431 slot->is_internal = tmf; 432 task->lldd_task = slot; 433 434 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 435 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 436 memset(hisi_sas_status_buf_addr_mem(slot), 0, 437 sizeof(struct hisi_sas_err_record)); 438 439 switch (task->task_proto) { 440 case SAS_PROTOCOL_SMP: 441 hisi_sas_task_prep_smp(hisi_hba, slot); 442 break; 443 case SAS_PROTOCOL_SSP: 444 hisi_sas_task_prep_ssp(hisi_hba, slot); 445 break; 446 case SAS_PROTOCOL_SATA: 447 case SAS_PROTOCOL_STP: 448 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 449 hisi_sas_task_prep_ata(hisi_hba, slot); 450 break; 451 case SAS_PROTOCOL_NONE: 452 if (abort) { 453 hisi_sas_task_prep_abort(hisi_hba, abort, slot, sas_dev->device_id); 454 break; 455 } 456 fallthrough; 457 default: 458 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", 459 task->task_proto); 460 break; 461 } 462 463 spin_lock_irqsave(&task->task_state_lock, flags); 464 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 465 spin_unlock_irqrestore(&task->task_state_lock, flags); 466 467 WRITE_ONCE(slot->ready, 1); 468 469 spin_lock(&dq->lock); 470 hisi_hba->hw->start_delivery(dq); 471 spin_unlock(&dq->lock); 472 } 473 474 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 475 struct hisi_sas_tmf_task *tmf) 476 { 477 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; 478 struct domain_device *device = task->dev; 479 struct asd_sas_port *sas_port = device->port; 480 struct hisi_sas_device *sas_dev = device->lldd_dev; 481 struct scsi_cmnd *scmd = NULL; 482 struct hisi_sas_dq *dq = NULL; 483 struct hisi_sas_port *port; 484 struct hisi_hba *hisi_hba; 485 struct hisi_sas_slot *slot; 486 struct device *dev; 487 int rc; 488 489 if (!sas_port) { 490 struct task_status_struct *ts = &task->task_status; 491 492 ts->resp = SAS_TASK_UNDELIVERED; 493 ts->stat = SAS_PHY_DOWN; 494 /* 495 * libsas will use dev->port, should 496 * not call task_done for sata 497 */ 498 if (device->dev_type != SAS_SATA_DEV) 499 task->task_done(task); 500 return -ECOMM; 501 } 502 503 hisi_hba = dev_to_hisi_hba(device); 504 dev = hisi_hba->dev; 505 506 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 507 if (!gfpflags_allow_blocking(gfp_flags)) 508 return -EINVAL; 509 510 down(&hisi_hba->sem); 511 up(&hisi_hba->sem); 512 } 513 514 if (DEV_IS_GONE(sas_dev)) { 515 if (sas_dev) 516 dev_info(dev, "task prep: device %d not ready\n", 517 sas_dev->device_id); 518 else 519 dev_info(dev, "task prep: device %016llx not ready\n", 520 SAS_ADDR(device->sas_addr)); 521 522 return -ECOMM; 523 } 524 525 if (task->uldd_task) { 526 struct ata_queued_cmd *qc; 527 528 if (dev_is_sata(device)) { 529 qc = task->uldd_task; 530 scmd = qc->scsicmd; 531 } else { 532 scmd = task->uldd_task; 533 } 534 } 535 536 if (scmd) { 537 unsigned int dq_index; 538 u32 blk_tag; 539 540 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 541 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 542 dq = &hisi_hba->dq[dq_index]; 543 } else { 544 struct Scsi_Host *shost = hisi_hba->shost; 545 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 546 int queue = qmap->mq_map[raw_smp_processor_id()]; 547 548 dq = &hisi_hba->dq[queue]; 549 } 550 551 port = to_hisi_sas_port(sas_port); 552 if (port && !port->port_attached) { 553 dev_info(dev, "task prep: %s port%d not attach device\n", 554 (dev_is_sata(device)) ? 555 "SATA/STP" : "SAS", 556 device->port->id); 557 558 return -ECOMM; 559 } 560 561 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 562 &n_elem_req); 563 if (rc < 0) 564 goto prep_out; 565 566 if (!sas_protocol_ata(task->task_proto)) { 567 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 568 if (rc < 0) 569 goto err_out_dma_unmap; 570 } 571 572 if (hisi_hba->hw->slot_index_alloc) 573 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 574 else 575 rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); 576 577 if (rc < 0) 578 goto err_out_dif_dma_unmap; 579 580 slot = &hisi_hba->slot_info[rc]; 581 slot->n_elem = n_elem; 582 slot->n_elem_dif = n_elem_dif; 583 slot->task = task; 584 slot->port = port; 585 586 slot->tmf = tmf; 587 slot->is_internal = tmf; 588 589 /* protect task_prep and start_delivery sequence */ 590 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf); 591 592 return 0; 593 594 err_out_dif_dma_unmap: 595 if (!sas_protocol_ata(task->task_proto)) 596 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 597 err_out_dma_unmap: 598 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 599 n_elem_req); 600 prep_out: 601 dev_err(dev, "task exec: failed[%d]!\n", rc); 602 return rc; 603 } 604 605 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 606 gfp_t gfp_flags) 607 { 608 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 609 struct asd_sas_phy *sas_phy = &phy->sas_phy; 610 611 if (!phy->phy_attached) 612 return; 613 614 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 615 616 if (sas_phy->phy) { 617 struct sas_phy *sphy = sas_phy->phy; 618 619 sphy->negotiated_linkrate = sas_phy->linkrate; 620 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 621 sphy->maximum_linkrate_hw = 622 hisi_hba->hw->phy_get_max_linkrate(); 623 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 624 sphy->minimum_linkrate = phy->minimum_linkrate; 625 626 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 627 sphy->maximum_linkrate = phy->maximum_linkrate; 628 } 629 630 if (phy->phy_type & PORT_TYPE_SAS) { 631 struct sas_identify_frame *id; 632 633 id = (struct sas_identify_frame *)phy->frame_rcvd; 634 id->dev_type = phy->identify.device_type; 635 id->initiator_bits = SAS_PROTOCOL_ALL; 636 id->target_bits = phy->identify.target_port_protocols; 637 } else if (phy->phy_type & PORT_TYPE_SATA) { 638 /* Nothing */ 639 } 640 641 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 642 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 643 } 644 645 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 646 { 647 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 648 struct hisi_sas_device *sas_dev = NULL; 649 int last = hisi_hba->last_dev_id; 650 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 651 int i; 652 653 spin_lock(&hisi_hba->lock); 654 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 655 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 656 int queue = i % hisi_hba->queue_count; 657 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 658 659 hisi_hba->devices[i].device_id = i; 660 sas_dev = &hisi_hba->devices[i]; 661 sas_dev->dev_status = HISI_SAS_DEV_INIT; 662 sas_dev->dev_type = device->dev_type; 663 sas_dev->hisi_hba = hisi_hba; 664 sas_dev->sas_device = device; 665 sas_dev->dq = dq; 666 spin_lock_init(&sas_dev->lock); 667 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 668 break; 669 } 670 i++; 671 } 672 hisi_hba->last_dev_id = i; 673 spin_unlock(&hisi_hba->lock); 674 675 return sas_dev; 676 } 677 678 #define HISI_SAS_DISK_RECOVER_CNT 3 679 static int hisi_sas_init_device(struct domain_device *device) 680 { 681 int rc = TMF_RESP_FUNC_COMPLETE; 682 struct scsi_lun lun; 683 struct hisi_sas_tmf_task tmf_task; 684 int retry = HISI_SAS_DISK_RECOVER_CNT; 685 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 686 struct device *dev = hisi_hba->dev; 687 struct sas_phy *local_phy; 688 689 switch (device->dev_type) { 690 case SAS_END_DEVICE: 691 int_to_scsilun(0, &lun); 692 693 tmf_task.tmf = TMF_CLEAR_TASK_SET; 694 while (retry-- > 0) { 695 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 696 &tmf_task); 697 if (rc == TMF_RESP_FUNC_COMPLETE) { 698 hisi_sas_release_task(hisi_hba, device); 699 break; 700 } 701 } 702 break; 703 case SAS_SATA_DEV: 704 case SAS_SATA_PM: 705 case SAS_SATA_PM_PORT: 706 case SAS_SATA_PENDING: 707 /* 708 * send HARD RESET to clear previous affiliation of 709 * STP target port 710 */ 711 local_phy = sas_get_local_phy(device); 712 if (!scsi_is_sas_phy_local(local_phy) && 713 !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 714 unsigned long deadline = ata_deadline(jiffies, 20000); 715 struct sata_device *sata_dev = &device->sata_dev; 716 struct ata_host *ata_host = sata_dev->ata_host; 717 struct ata_port_operations *ops = ata_host->ops; 718 struct ata_port *ap = sata_dev->ap; 719 struct ata_link *link; 720 unsigned int classes; 721 722 ata_for_each_link(link, ap, EDGE) 723 rc = ops->hardreset(link, &classes, 724 deadline); 725 } 726 sas_put_local_phy(local_phy); 727 if (rc) { 728 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc); 729 return rc; 730 } 731 732 while (retry-- > 0) { 733 rc = hisi_sas_softreset_ata_disk(device); 734 if (!rc) 735 break; 736 } 737 break; 738 default: 739 break; 740 } 741 742 return rc; 743 } 744 745 int hisi_sas_slave_alloc(struct scsi_device *sdev) 746 { 747 struct domain_device *ddev; 748 int rc; 749 750 rc = sas_slave_alloc(sdev); 751 if (rc) 752 return rc; 753 ddev = sdev_to_domain_dev(sdev); 754 755 return hisi_sas_init_device(ddev); 756 } 757 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); 758 759 static int hisi_sas_dev_found(struct domain_device *device) 760 { 761 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 762 struct domain_device *parent_dev = device->parent; 763 struct hisi_sas_device *sas_dev; 764 struct device *dev = hisi_hba->dev; 765 int rc; 766 767 if (hisi_hba->hw->alloc_dev) 768 sas_dev = hisi_hba->hw->alloc_dev(device); 769 else 770 sas_dev = hisi_sas_alloc_dev(device); 771 if (!sas_dev) { 772 dev_err(dev, "fail alloc dev: max support %d devices\n", 773 HISI_SAS_MAX_DEVICES); 774 return -EINVAL; 775 } 776 777 device->lldd_dev = sas_dev; 778 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 779 780 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 781 int phy_no; 782 u8 phy_num = parent_dev->ex_dev.num_phys; 783 struct ex_phy *phy; 784 785 for (phy_no = 0; phy_no < phy_num; phy_no++) { 786 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 787 if (SAS_ADDR(phy->attached_sas_addr) == 788 SAS_ADDR(device->sas_addr)) 789 break; 790 } 791 792 if (phy_no == phy_num) { 793 dev_info(dev, "dev found: no attached " 794 "dev:%016llx at ex:%016llx\n", 795 SAS_ADDR(device->sas_addr), 796 SAS_ADDR(parent_dev->sas_addr)); 797 rc = -EINVAL; 798 goto err_out; 799 } 800 } 801 802 dev_info(dev, "dev[%d:%x] found\n", 803 sas_dev->device_id, sas_dev->dev_type); 804 805 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 806 return 0; 807 808 err_out: 809 hisi_sas_dev_gone(device); 810 return rc; 811 } 812 813 int hisi_sas_slave_configure(struct scsi_device *sdev) 814 { 815 struct domain_device *dev = sdev_to_domain_dev(sdev); 816 int ret = sas_slave_configure(sdev); 817 818 if (ret) 819 return ret; 820 if (!dev_is_sata(dev)) 821 sas_change_queue_depth(sdev, 64); 822 823 return 0; 824 } 825 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 826 827 void hisi_sas_scan_start(struct Scsi_Host *shost) 828 { 829 struct hisi_hba *hisi_hba = shost_priv(shost); 830 831 hisi_hba->hw->phys_init(hisi_hba); 832 } 833 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 834 835 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 836 { 837 struct hisi_hba *hisi_hba = shost_priv(shost); 838 struct sas_ha_struct *sha = &hisi_hba->sha; 839 840 /* Wait for PHY up interrupt to occur */ 841 if (time < HZ) 842 return 0; 843 844 sas_drain_work(sha); 845 return 1; 846 } 847 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 848 849 static void hisi_sas_phyup_work_common(struct work_struct *work, 850 enum hisi_sas_phy_event event) 851 { 852 struct hisi_sas_phy *phy = 853 container_of(work, typeof(*phy), works[event]); 854 struct hisi_hba *hisi_hba = phy->hisi_hba; 855 struct asd_sas_phy *sas_phy = &phy->sas_phy; 856 int phy_no = sas_phy->id; 857 858 phy->wait_phyup_cnt = 0; 859 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 860 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 861 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 862 } 863 864 static void hisi_sas_phyup_work(struct work_struct *work) 865 { 866 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP); 867 } 868 869 static void hisi_sas_linkreset_work(struct work_struct *work) 870 { 871 struct hisi_sas_phy *phy = 872 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 873 struct asd_sas_phy *sas_phy = &phy->sas_phy; 874 875 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 876 } 877 878 static void hisi_sas_phyup_pm_work(struct work_struct *work) 879 { 880 struct hisi_sas_phy *phy = 881 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); 882 struct hisi_hba *hisi_hba = phy->hisi_hba; 883 struct device *dev = hisi_hba->dev; 884 885 hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM); 886 pm_runtime_put_sync(dev); 887 } 888 889 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 890 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 891 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 892 [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work, 893 }; 894 895 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 896 enum hisi_sas_phy_event event) 897 { 898 struct hisi_hba *hisi_hba = phy->hisi_hba; 899 900 if (WARN_ON(event >= HISI_PHYES_NUM)) 901 return false; 902 903 return queue_work(hisi_hba->wq, &phy->works[event]); 904 } 905 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 906 907 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 908 { 909 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 910 struct hisi_hba *hisi_hba = phy->hisi_hba; 911 struct device *dev = hisi_hba->dev; 912 int phy_no = phy->sas_phy.id; 913 914 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 915 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 916 } 917 918 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 919 920 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 921 { 922 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 923 struct device *dev = hisi_hba->dev; 924 unsigned long flags; 925 926 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 927 spin_lock_irqsave(&phy->lock, flags); 928 if (phy->phy_attached) { 929 spin_unlock_irqrestore(&phy->lock, flags); 930 return; 931 } 932 933 if (!timer_pending(&phy->timer)) { 934 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 935 phy->wait_phyup_cnt++; 936 phy->timer.expires = jiffies + 937 HISI_SAS_WAIT_PHYUP_TIMEOUT; 938 add_timer(&phy->timer); 939 spin_unlock_irqrestore(&phy->lock, flags); 940 return; 941 } 942 943 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 944 phy_no, phy->wait_phyup_cnt); 945 phy->wait_phyup_cnt = 0; 946 } 947 spin_unlock_irqrestore(&phy->lock, flags); 948 } 949 950 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 951 952 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 953 { 954 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 955 struct asd_sas_phy *sas_phy = &phy->sas_phy; 956 int i; 957 958 phy->hisi_hba = hisi_hba; 959 phy->port = NULL; 960 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 961 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 962 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 963 sas_phy->class = SAS; 964 sas_phy->iproto = SAS_PROTOCOL_ALL; 965 sas_phy->tproto = 0; 966 sas_phy->type = PHY_TYPE_PHYSICAL; 967 sas_phy->role = PHY_ROLE_INITIATOR; 968 sas_phy->oob_mode = OOB_NOT_CONNECTED; 969 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 970 sas_phy->id = phy_no; 971 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 972 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 973 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 974 sas_phy->lldd_phy = phy; 975 976 for (i = 0; i < HISI_PHYES_NUM; i++) 977 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 978 979 spin_lock_init(&phy->lock); 980 981 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 982 } 983 984 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 985 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 986 { 987 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 988 struct asd_sas_phy *aphy = &phy->sas_phy; 989 struct sas_phy *sphy = aphy->phy; 990 unsigned long flags; 991 992 spin_lock_irqsave(&phy->lock, flags); 993 994 if (enable) { 995 /* We may have been enabled already; if so, don't touch */ 996 if (!phy->enable) 997 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 998 hisi_hba->hw->phy_start(hisi_hba, phy_no); 999 } else { 1000 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 1001 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 1002 } 1003 phy->enable = enable; 1004 spin_unlock_irqrestore(&phy->lock, flags); 1005 } 1006 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1007 1008 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1009 { 1010 struct sas_ha_struct *sas_ha = sas_phy->ha; 1011 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1012 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1013 struct asd_sas_port *sas_port = sas_phy->port; 1014 struct hisi_sas_port *port; 1015 unsigned long flags; 1016 1017 if (!sas_port) 1018 return; 1019 1020 port = to_hisi_sas_port(sas_port); 1021 spin_lock_irqsave(&hisi_hba->lock, flags); 1022 port->port_attached = 1; 1023 port->id = phy->port_id; 1024 phy->port = port; 1025 sas_port->lldd_port = port; 1026 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1027 } 1028 1029 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1030 struct hisi_sas_slot *slot) 1031 { 1032 if (task) { 1033 unsigned long flags; 1034 struct task_status_struct *ts; 1035 1036 ts = &task->task_status; 1037 1038 ts->resp = SAS_TASK_COMPLETE; 1039 ts->stat = SAS_ABORTED_TASK; 1040 spin_lock_irqsave(&task->task_state_lock, flags); 1041 task->task_state_flags &= 1042 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1043 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1044 task->task_state_flags |= SAS_TASK_STATE_DONE; 1045 spin_unlock_irqrestore(&task->task_state_lock, flags); 1046 } 1047 1048 hisi_sas_slot_task_free(hisi_hba, task, slot); 1049 } 1050 1051 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1052 struct domain_device *device) 1053 { 1054 struct hisi_sas_slot *slot, *slot2; 1055 struct hisi_sas_device *sas_dev = device->lldd_dev; 1056 1057 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1058 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1059 } 1060 1061 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1062 { 1063 struct hisi_sas_device *sas_dev; 1064 struct domain_device *device; 1065 int i; 1066 1067 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1068 sas_dev = &hisi_hba->devices[i]; 1069 device = sas_dev->sas_device; 1070 1071 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1072 !device) 1073 continue; 1074 1075 hisi_sas_release_task(hisi_hba, device); 1076 } 1077 } 1078 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1079 1080 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1081 struct domain_device *device) 1082 { 1083 if (hisi_hba->hw->dereg_device) 1084 hisi_hba->hw->dereg_device(hisi_hba, device); 1085 } 1086 1087 static void hisi_sas_dev_gone(struct domain_device *device) 1088 { 1089 struct hisi_sas_device *sas_dev = device->lldd_dev; 1090 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1091 struct device *dev = hisi_hba->dev; 1092 int ret = 0; 1093 1094 dev_info(dev, "dev[%d:%x] is gone\n", 1095 sas_dev->device_id, sas_dev->dev_type); 1096 1097 down(&hisi_hba->sem); 1098 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1099 hisi_sas_internal_task_abort(hisi_hba, device, 1100 HISI_SAS_INT_ABT_DEV, 0, true); 1101 1102 hisi_sas_dereg_device(hisi_hba, device); 1103 1104 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1105 device->lldd_dev = NULL; 1106 } 1107 1108 if (hisi_hba->hw->free_device) 1109 hisi_hba->hw->free_device(sas_dev); 1110 1111 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1112 if (!ret) 1113 sas_dev->dev_type = SAS_PHY_UNUSED; 1114 sas_dev->sas_device = NULL; 1115 up(&hisi_hba->sem); 1116 } 1117 1118 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 1119 { 1120 return hisi_sas_task_exec(task, gfp_flags, NULL); 1121 } 1122 1123 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1124 struct sas_phy_linkrates *r) 1125 { 1126 struct sas_phy_linkrates _r; 1127 1128 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1129 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1130 enum sas_linkrate min, max; 1131 1132 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1133 return -EINVAL; 1134 1135 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1136 max = sas_phy->phy->maximum_linkrate; 1137 min = r->minimum_linkrate; 1138 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1139 max = r->maximum_linkrate; 1140 min = sas_phy->phy->minimum_linkrate; 1141 } else 1142 return -EINVAL; 1143 1144 _r.maximum_linkrate = max; 1145 _r.minimum_linkrate = min; 1146 1147 sas_phy->phy->maximum_linkrate = max; 1148 sas_phy->phy->minimum_linkrate = min; 1149 1150 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1151 msleep(100); 1152 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1153 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1154 1155 return 0; 1156 } 1157 1158 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1159 void *funcdata) 1160 { 1161 struct hisi_sas_phy *phy = container_of(sas_phy, 1162 struct hisi_sas_phy, sas_phy); 1163 struct sas_ha_struct *sas_ha = sas_phy->ha; 1164 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1165 struct device *dev = hisi_hba->dev; 1166 DECLARE_COMPLETION_ONSTACK(completion); 1167 int phy_no = sas_phy->id; 1168 u8 sts = phy->phy_attached; 1169 int ret = 0; 1170 1171 down(&hisi_hba->sem); 1172 phy->reset_completion = &completion; 1173 1174 switch (func) { 1175 case PHY_FUNC_HARD_RESET: 1176 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1177 break; 1178 1179 case PHY_FUNC_LINK_RESET: 1180 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1181 msleep(100); 1182 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1183 break; 1184 1185 case PHY_FUNC_DISABLE: 1186 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1187 goto out; 1188 1189 case PHY_FUNC_SET_LINK_RATE: 1190 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1191 break; 1192 1193 case PHY_FUNC_GET_EVENTS: 1194 if (hisi_hba->hw->get_events) { 1195 hisi_hba->hw->get_events(hisi_hba, phy_no); 1196 goto out; 1197 } 1198 fallthrough; 1199 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1200 default: 1201 ret = -EOPNOTSUPP; 1202 goto out; 1203 } 1204 1205 if (sts && !wait_for_completion_timeout(&completion, 2 * HZ)) { 1206 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1207 phy_no, func); 1208 if (phy->in_reset) 1209 ret = -ETIMEDOUT; 1210 } 1211 1212 out: 1213 phy->reset_completion = NULL; 1214 1215 up(&hisi_hba->sem); 1216 return ret; 1217 } 1218 1219 static void hisi_sas_task_done(struct sas_task *task) 1220 { 1221 del_timer_sync(&task->slow_task->timer); 1222 complete(&task->slow_task->completion); 1223 } 1224 1225 static void hisi_sas_tmf_timedout(struct timer_list *t) 1226 { 1227 struct sas_task_slow *slow = from_timer(slow, t, timer); 1228 struct sas_task *task = slow->task; 1229 unsigned long flags; 1230 bool is_completed = true; 1231 1232 spin_lock_irqsave(&task->task_state_lock, flags); 1233 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1234 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1235 is_completed = false; 1236 } 1237 spin_unlock_irqrestore(&task->task_state_lock, flags); 1238 1239 if (!is_completed) 1240 complete(&task->slow_task->completion); 1241 } 1242 1243 #define TASK_TIMEOUT (20 * HZ) 1244 #define TASK_RETRY 3 1245 #define INTERNAL_ABORT_TIMEOUT (6 * HZ) 1246 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 1247 void *parameter, u32 para_len, 1248 struct hisi_sas_tmf_task *tmf) 1249 { 1250 struct hisi_sas_device *sas_dev = device->lldd_dev; 1251 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1252 struct device *dev = hisi_hba->dev; 1253 struct sas_task *task; 1254 int res, retry; 1255 1256 for (retry = 0; retry < TASK_RETRY; retry++) { 1257 task = sas_alloc_slow_task(GFP_KERNEL); 1258 if (!task) 1259 return -ENOMEM; 1260 1261 task->dev = device; 1262 task->task_proto = device->tproto; 1263 1264 if (dev_is_sata(device)) { 1265 task->ata_task.device_control_reg_update = 1; 1266 memcpy(&task->ata_task.fis, parameter, para_len); 1267 } else { 1268 memcpy(&task->ssp_task, parameter, para_len); 1269 } 1270 task->task_done = hisi_sas_task_done; 1271 1272 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1273 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT; 1274 add_timer(&task->slow_task->timer); 1275 1276 res = hisi_sas_task_exec(task, GFP_KERNEL, tmf); 1277 if (res) { 1278 del_timer_sync(&task->slow_task->timer); 1279 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1280 res); 1281 goto ex_err; 1282 } 1283 1284 wait_for_completion(&task->slow_task->completion); 1285 res = TMF_RESP_FUNC_FAILED; 1286 /* Even TMF timed out, return direct. */ 1287 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1288 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1289 struct hisi_sas_slot *slot = task->lldd_task; 1290 1291 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1292 if (slot) { 1293 struct hisi_sas_cq *cq = 1294 &hisi_hba->cq[slot->dlvry_queue]; 1295 /* 1296 * sync irq to avoid free'ing task 1297 * before using task in IO completion 1298 */ 1299 synchronize_irq(cq->irq_no); 1300 slot->task = NULL; 1301 } 1302 1303 goto ex_err; 1304 } else 1305 dev_err(dev, "abort tmf: TMF task timeout\n"); 1306 } 1307 1308 if (task->task_status.resp == SAS_TASK_COMPLETE && 1309 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1310 res = TMF_RESP_FUNC_COMPLETE; 1311 break; 1312 } 1313 1314 if (task->task_status.resp == SAS_TASK_COMPLETE && 1315 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1316 res = TMF_RESP_FUNC_SUCC; 1317 break; 1318 } 1319 1320 if (task->task_status.resp == SAS_TASK_COMPLETE && 1321 task->task_status.stat == SAS_DATA_UNDERRUN) { 1322 /* no error, but return the number of bytes of 1323 * underrun 1324 */ 1325 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n", 1326 SAS_ADDR(device->sas_addr), 1327 task->task_status.resp, 1328 task->task_status.stat); 1329 res = task->task_status.residual; 1330 break; 1331 } 1332 1333 if (task->task_status.resp == SAS_TASK_COMPLETE && 1334 task->task_status.stat == SAS_DATA_OVERRUN) { 1335 dev_warn(dev, "abort tmf: blocked task error\n"); 1336 res = -EMSGSIZE; 1337 break; 1338 } 1339 1340 if (task->task_status.resp == SAS_TASK_COMPLETE && 1341 task->task_status.stat == SAS_OPEN_REJECT) { 1342 dev_warn(dev, "abort tmf: open reject failed\n"); 1343 res = -EIO; 1344 } else { 1345 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n", 1346 SAS_ADDR(device->sas_addr), 1347 task->task_status.resp, 1348 task->task_status.stat); 1349 } 1350 sas_free_task(task); 1351 task = NULL; 1352 } 1353 ex_err: 1354 if (retry == TASK_RETRY) 1355 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 1356 sas_free_task(task); 1357 return res; 1358 } 1359 1360 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1361 bool reset, int pmp, u8 *fis) 1362 { 1363 struct ata_taskfile tf; 1364 1365 ata_tf_init(dev, &tf); 1366 if (reset) 1367 tf.ctl |= ATA_SRST; 1368 else 1369 tf.ctl &= ~ATA_SRST; 1370 tf.command = ATA_CMD_DEV_RESET; 1371 ata_tf_to_fis(&tf, pmp, 0, fis); 1372 } 1373 1374 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1375 { 1376 u8 fis[20] = {0}; 1377 struct ata_port *ap = device->sata_dev.ap; 1378 struct ata_link *link; 1379 int rc = TMF_RESP_FUNC_FAILED; 1380 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1381 struct device *dev = hisi_hba->dev; 1382 int s = sizeof(struct host_to_dev_fis); 1383 1384 ata_for_each_link(link, ap, EDGE) { 1385 int pmp = sata_srst_pmp(link); 1386 1387 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1388 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 1389 if (rc != TMF_RESP_FUNC_COMPLETE) 1390 break; 1391 } 1392 1393 if (rc == TMF_RESP_FUNC_COMPLETE) { 1394 ata_for_each_link(link, ap, EDGE) { 1395 int pmp = sata_srst_pmp(link); 1396 1397 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1398 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1399 s, NULL); 1400 if (rc != TMF_RESP_FUNC_COMPLETE) 1401 dev_err(dev, "ata disk %016llx de-reset failed\n", 1402 SAS_ADDR(device->sas_addr)); 1403 } 1404 } else { 1405 dev_err(dev, "ata disk %016llx reset failed\n", 1406 SAS_ADDR(device->sas_addr)); 1407 } 1408 1409 if (rc == TMF_RESP_FUNC_COMPLETE) 1410 hisi_sas_release_task(hisi_hba, device); 1411 1412 return rc; 1413 } 1414 1415 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1416 u8 *lun, struct hisi_sas_tmf_task *tmf) 1417 { 1418 struct sas_ssp_task ssp_task; 1419 1420 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1421 return TMF_RESP_FUNC_ESUPP; 1422 1423 memcpy(ssp_task.LUN, lun, 8); 1424 1425 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1426 sizeof(ssp_task), tmf); 1427 } 1428 1429 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1430 { 1431 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1432 int i; 1433 1434 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1435 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1436 struct domain_device *device = sas_dev->sas_device; 1437 struct asd_sas_port *sas_port; 1438 struct hisi_sas_port *port; 1439 struct hisi_sas_phy *phy = NULL; 1440 struct asd_sas_phy *sas_phy; 1441 1442 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1443 || !device || !device->port) 1444 continue; 1445 1446 sas_port = device->port; 1447 port = to_hisi_sas_port(sas_port); 1448 1449 spin_lock(&sas_port->phy_list_lock); 1450 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1451 if (state & BIT(sas_phy->id)) { 1452 phy = sas_phy->lldd_phy; 1453 break; 1454 } 1455 spin_unlock(&sas_port->phy_list_lock); 1456 1457 if (phy) { 1458 port->id = phy->port_id; 1459 1460 /* Update linkrate of directly attached device. */ 1461 if (!device->parent) 1462 device->linkrate = phy->sas_phy.linkrate; 1463 1464 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1465 } else 1466 port->id = 0xff; 1467 } 1468 } 1469 1470 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1471 { 1472 struct asd_sas_port *_sas_port = NULL; 1473 int phy_no; 1474 1475 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1476 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1477 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1478 struct asd_sas_port *sas_port = sas_phy->port; 1479 bool do_port_check = _sas_port != sas_port; 1480 1481 if (!sas_phy->phy->enabled) 1482 continue; 1483 1484 /* Report PHY state change to libsas */ 1485 if (state & BIT(phy_no)) { 1486 if (do_port_check && sas_port && sas_port->port_dev) { 1487 struct domain_device *dev = sas_port->port_dev; 1488 1489 _sas_port = sas_port; 1490 1491 if (dev_is_expander(dev->dev_type)) 1492 sas_notify_port_event(sas_phy, 1493 PORTE_BROADCAST_RCVD, 1494 GFP_KERNEL); 1495 } 1496 } else { 1497 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1498 } 1499 } 1500 } 1501 1502 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1503 { 1504 struct hisi_sas_device *sas_dev; 1505 struct domain_device *device; 1506 int i; 1507 1508 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1509 sas_dev = &hisi_hba->devices[i]; 1510 device = sas_dev->sas_device; 1511 1512 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1513 continue; 1514 1515 hisi_sas_init_device(device); 1516 } 1517 } 1518 1519 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1520 struct asd_sas_port *sas_port, 1521 struct domain_device *device) 1522 { 1523 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; 1524 struct ata_port *ap = device->sata_dev.ap; 1525 struct device *dev = hisi_hba->dev; 1526 int s = sizeof(struct host_to_dev_fis); 1527 int rc = TMF_RESP_FUNC_FAILED; 1528 struct asd_sas_phy *sas_phy; 1529 struct ata_link *link; 1530 u8 fis[20] = {0}; 1531 u32 state; 1532 int i; 1533 1534 state = hisi_hba->hw->get_phys_state(hisi_hba); 1535 for (i = 0; i < hisi_hba->n_phy; i++) { 1536 if (!(state & BIT(sas_phy->id))) 1537 continue; 1538 if (!(sas_port->phy_mask & BIT(i))) 1539 continue; 1540 1541 ata_for_each_link(link, ap, EDGE) { 1542 int pmp = sata_srst_pmp(link); 1543 1544 tmf_task.phy_id = i; 1545 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1546 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, 1547 &tmf_task); 1548 if (rc != TMF_RESP_FUNC_COMPLETE) { 1549 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1550 i, rc); 1551 break; 1552 } 1553 } 1554 } 1555 } 1556 1557 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1558 { 1559 struct device *dev = hisi_hba->dev; 1560 int port_no, rc, i; 1561 1562 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1563 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1564 struct domain_device *device = sas_dev->sas_device; 1565 1566 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1567 continue; 1568 1569 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1570 HISI_SAS_INT_ABT_DEV, 0, 1571 false); 1572 if (rc < 0) 1573 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1574 } 1575 1576 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1577 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1578 struct asd_sas_port *sas_port = &port->sas_port; 1579 struct domain_device *port_dev = sas_port->port_dev; 1580 struct domain_device *device; 1581 1582 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1583 continue; 1584 1585 /* Try to find a SATA device */ 1586 list_for_each_entry(device, &sas_port->dev_list, 1587 dev_list_node) { 1588 if (dev_is_sata(device)) { 1589 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1590 sas_port, 1591 device); 1592 break; 1593 } 1594 } 1595 } 1596 } 1597 1598 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1599 { 1600 struct Scsi_Host *shost = hisi_hba->shost; 1601 1602 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1603 1604 scsi_block_requests(shost); 1605 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1606 1607 del_timer_sync(&hisi_hba->timer); 1608 1609 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1610 } 1611 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1612 1613 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1614 { 1615 struct Scsi_Host *shost = hisi_hba->shost; 1616 1617 /* Init and wait for PHYs to come up and all libsas event finished. */ 1618 hisi_hba->hw->phys_init(hisi_hba); 1619 msleep(1000); 1620 hisi_sas_refresh_port_id(hisi_hba); 1621 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1622 1623 if (hisi_hba->reject_stp_links_msk) 1624 hisi_sas_terminate_stp_reject(hisi_hba); 1625 hisi_sas_reset_init_all_devices(hisi_hba); 1626 scsi_unblock_requests(shost); 1627 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1628 up(&hisi_hba->sem); 1629 1630 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1631 } 1632 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1633 1634 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1635 { 1636 if (!hisi_hba->hw->soft_reset) 1637 return -1; 1638 1639 down(&hisi_hba->sem); 1640 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1641 up(&hisi_hba->sem); 1642 return -1; 1643 } 1644 1645 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1646 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1647 1648 return 0; 1649 } 1650 1651 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1652 { 1653 struct device *dev = hisi_hba->dev; 1654 struct Scsi_Host *shost = hisi_hba->shost; 1655 int rc; 1656 1657 dev_info(dev, "controller resetting...\n"); 1658 hisi_sas_controller_reset_prepare(hisi_hba); 1659 1660 rc = hisi_hba->hw->soft_reset(hisi_hba); 1661 if (rc) { 1662 dev_warn(dev, "controller reset failed (%d)\n", rc); 1663 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1664 up(&hisi_hba->sem); 1665 scsi_unblock_requests(shost); 1666 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1667 return rc; 1668 } 1669 1670 hisi_sas_controller_reset_done(hisi_hba); 1671 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1672 dev_info(dev, "controller reset complete\n"); 1673 1674 return 0; 1675 } 1676 1677 static int hisi_sas_abort_task(struct sas_task *task) 1678 { 1679 struct scsi_lun lun; 1680 struct hisi_sas_tmf_task tmf_task; 1681 struct domain_device *device = task->dev; 1682 struct hisi_sas_device *sas_dev = device->lldd_dev; 1683 struct hisi_hba *hisi_hba; 1684 struct device *dev; 1685 int rc = TMF_RESP_FUNC_FAILED; 1686 unsigned long flags; 1687 1688 if (!sas_dev) 1689 return TMF_RESP_FUNC_FAILED; 1690 1691 hisi_hba = dev_to_hisi_hba(task->dev); 1692 dev = hisi_hba->dev; 1693 1694 spin_lock_irqsave(&task->task_state_lock, flags); 1695 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1696 struct hisi_sas_slot *slot = task->lldd_task; 1697 struct hisi_sas_cq *cq; 1698 1699 if (slot) { 1700 /* 1701 * sync irq to avoid free'ing task 1702 * before using task in IO completion 1703 */ 1704 cq = &hisi_hba->cq[slot->dlvry_queue]; 1705 synchronize_irq(cq->irq_no); 1706 } 1707 spin_unlock_irqrestore(&task->task_state_lock, flags); 1708 rc = TMF_RESP_FUNC_COMPLETE; 1709 goto out; 1710 } 1711 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1712 spin_unlock_irqrestore(&task->task_state_lock, flags); 1713 1714 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1715 struct scsi_cmnd *cmnd = task->uldd_task; 1716 struct hisi_sas_slot *slot = task->lldd_task; 1717 u16 tag = slot->idx; 1718 int rc2; 1719 1720 int_to_scsilun(cmnd->device->lun, &lun); 1721 tmf_task.tmf = TMF_ABORT_TASK; 1722 tmf_task.tag_of_task_to_be_managed = tag; 1723 1724 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1725 &tmf_task); 1726 1727 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1728 HISI_SAS_INT_ABT_CMD, tag, 1729 false); 1730 if (rc2 < 0) { 1731 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1732 return TMF_RESP_FUNC_FAILED; 1733 } 1734 1735 /* 1736 * If the TMF finds that the IO is not in the device and also 1737 * the internal abort does not succeed, then it is safe to 1738 * free the slot. 1739 * Note: if the internal abort succeeds then the slot 1740 * will have already been completed 1741 */ 1742 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1743 if (task->lldd_task) 1744 hisi_sas_do_release_task(hisi_hba, task, slot); 1745 } 1746 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1747 task->task_proto & SAS_PROTOCOL_STP) { 1748 if (task->dev->dev_type == SAS_SATA_DEV) { 1749 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1750 HISI_SAS_INT_ABT_DEV, 1751 0, false); 1752 if (rc < 0) { 1753 dev_err(dev, "abort task: internal abort failed\n"); 1754 goto out; 1755 } 1756 hisi_sas_dereg_device(hisi_hba, device); 1757 rc = hisi_sas_softreset_ata_disk(device); 1758 } 1759 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1760 /* SMP */ 1761 struct hisi_sas_slot *slot = task->lldd_task; 1762 u32 tag = slot->idx; 1763 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1764 1765 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1766 HISI_SAS_INT_ABT_CMD, tag, 1767 false); 1768 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1769 task->lldd_task) { 1770 /* 1771 * sync irq to avoid free'ing task 1772 * before using task in IO completion 1773 */ 1774 synchronize_irq(cq->irq_no); 1775 slot->task = NULL; 1776 } 1777 } 1778 1779 out: 1780 if (rc != TMF_RESP_FUNC_COMPLETE) 1781 dev_notice(dev, "abort task: rc=%d\n", rc); 1782 return rc; 1783 } 1784 1785 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1786 { 1787 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1788 struct device *dev = hisi_hba->dev; 1789 struct hisi_sas_tmf_task tmf_task; 1790 int rc; 1791 1792 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1793 HISI_SAS_INT_ABT_DEV, 0, false); 1794 if (rc < 0) { 1795 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1796 return TMF_RESP_FUNC_FAILED; 1797 } 1798 hisi_sas_dereg_device(hisi_hba, device); 1799 1800 tmf_task.tmf = TMF_ABORT_TASK_SET; 1801 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1802 1803 if (rc == TMF_RESP_FUNC_COMPLETE) 1804 hisi_sas_release_task(hisi_hba, device); 1805 1806 return rc; 1807 } 1808 1809 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1810 { 1811 struct hisi_sas_tmf_task tmf_task; 1812 int rc; 1813 1814 tmf_task.tmf = TMF_CLEAR_ACA; 1815 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1816 1817 return rc; 1818 } 1819 1820 #define I_T_NEXUS_RESET_PHYUP_TIMEOUT (2 * HZ) 1821 1822 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1823 { 1824 struct sas_phy *local_phy = sas_get_local_phy(device); 1825 struct hisi_sas_device *sas_dev = device->lldd_dev; 1826 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1827 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1828 int rc, reset_type; 1829 1830 if (!local_phy->enabled) { 1831 sas_put_local_phy(local_phy); 1832 return -ENODEV; 1833 } 1834 1835 if (scsi_is_sas_phy_local(local_phy)) { 1836 struct asd_sas_phy *sas_phy = 1837 sas_ha->sas_phy[local_phy->number]; 1838 struct hisi_sas_phy *phy = 1839 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1840 unsigned long flags; 1841 1842 spin_lock_irqsave(&phy->lock, flags); 1843 phy->in_reset = 1; 1844 spin_unlock_irqrestore(&phy->lock, flags); 1845 } 1846 1847 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1848 !dev_is_sata(device)) ? true : false; 1849 1850 rc = sas_phy_reset(local_phy, reset_type); 1851 sas_put_local_phy(local_phy); 1852 1853 if (scsi_is_sas_phy_local(local_phy)) { 1854 struct asd_sas_phy *sas_phy = 1855 sas_ha->sas_phy[local_phy->number]; 1856 struct hisi_sas_phy *phy = 1857 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1858 unsigned long flags; 1859 1860 spin_lock_irqsave(&phy->lock, flags); 1861 phy->in_reset = 0; 1862 spin_unlock_irqrestore(&phy->lock, flags); 1863 1864 /* report PHY down if timed out */ 1865 if (rc == -ETIMEDOUT) 1866 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1867 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1868 /* 1869 * If in init state, we rely on caller to wait for link to be 1870 * ready; otherwise, except phy reset is fail, delay. 1871 */ 1872 if (!rc) 1873 msleep(2000); 1874 } 1875 1876 return rc; 1877 } 1878 1879 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1880 { 1881 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1882 struct device *dev = hisi_hba->dev; 1883 int rc; 1884 1885 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1886 HISI_SAS_INT_ABT_DEV, 0, false); 1887 if (rc < 0) { 1888 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1889 return TMF_RESP_FUNC_FAILED; 1890 } 1891 hisi_sas_dereg_device(hisi_hba, device); 1892 1893 rc = hisi_sas_debug_I_T_nexus_reset(device); 1894 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1895 struct sas_phy *local_phy; 1896 1897 rc = hisi_sas_softreset_ata_disk(device); 1898 switch (rc) { 1899 case -ECOMM: 1900 rc = -ENODEV; 1901 break; 1902 case TMF_RESP_FUNC_FAILED: 1903 case -EMSGSIZE: 1904 case -EIO: 1905 local_phy = sas_get_local_phy(device); 1906 rc = sas_phy_enable(local_phy, 0); 1907 if (!rc) { 1908 local_phy->enabled = 0; 1909 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1910 SAS_ADDR(device->sas_addr), rc); 1911 rc = -ENODEV; 1912 } 1913 sas_put_local_phy(local_phy); 1914 break; 1915 default: 1916 break; 1917 } 1918 } 1919 1920 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1921 hisi_sas_release_task(hisi_hba, device); 1922 1923 return rc; 1924 } 1925 1926 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1927 { 1928 struct hisi_sas_device *sas_dev = device->lldd_dev; 1929 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1930 struct device *dev = hisi_hba->dev; 1931 int rc = TMF_RESP_FUNC_FAILED; 1932 1933 /* Clear internal IO and then lu reset */ 1934 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1935 HISI_SAS_INT_ABT_DEV, 0, false); 1936 if (rc < 0) { 1937 dev_err(dev, "lu_reset: internal abort failed\n"); 1938 goto out; 1939 } 1940 hisi_sas_dereg_device(hisi_hba, device); 1941 1942 if (dev_is_sata(device)) { 1943 struct sas_phy *phy; 1944 1945 phy = sas_get_local_phy(device); 1946 1947 rc = sas_phy_reset(phy, true); 1948 1949 if (rc == 0) 1950 hisi_sas_release_task(hisi_hba, device); 1951 sas_put_local_phy(phy); 1952 } else { 1953 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1954 1955 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1956 if (rc == TMF_RESP_FUNC_COMPLETE) 1957 hisi_sas_release_task(hisi_hba, device); 1958 } 1959 out: 1960 if (rc != TMF_RESP_FUNC_COMPLETE) 1961 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1962 sas_dev->device_id, rc); 1963 return rc; 1964 } 1965 1966 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1967 { 1968 struct domain_device *device = data; 1969 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1970 int rc; 1971 1972 rc = hisi_sas_debug_I_T_nexus_reset(device); 1973 if (rc != TMF_RESP_FUNC_COMPLETE) 1974 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1975 SAS_ADDR(device->sas_addr), rc); 1976 } 1977 1978 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1979 { 1980 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1981 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1982 ASYNC_DOMAIN_EXCLUSIVE(async); 1983 int i; 1984 1985 queue_work(hisi_hba->wq, &r.work); 1986 wait_for_completion(r.completion); 1987 if (!r.done) 1988 return TMF_RESP_FUNC_FAILED; 1989 1990 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1991 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1992 struct domain_device *device = sas_dev->sas_device; 1993 1994 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1995 dev_is_expander(device->dev_type)) 1996 continue; 1997 1998 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1999 device, &async); 2000 } 2001 2002 async_synchronize_full_domain(&async); 2003 hisi_sas_release_tasks(hisi_hba); 2004 2005 return TMF_RESP_FUNC_COMPLETE; 2006 } 2007 2008 static int hisi_sas_query_task(struct sas_task *task) 2009 { 2010 struct scsi_lun lun; 2011 struct hisi_sas_tmf_task tmf_task; 2012 int rc = TMF_RESP_FUNC_FAILED; 2013 2014 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 2015 struct scsi_cmnd *cmnd = task->uldd_task; 2016 struct domain_device *device = task->dev; 2017 struct hisi_sas_slot *slot = task->lldd_task; 2018 u32 tag = slot->idx; 2019 2020 int_to_scsilun(cmnd->device->lun, &lun); 2021 tmf_task.tmf = TMF_QUERY_TASK; 2022 tmf_task.tag_of_task_to_be_managed = tag; 2023 2024 rc = hisi_sas_debug_issue_ssp_tmf(device, 2025 lun.scsi_lun, 2026 &tmf_task); 2027 switch (rc) { 2028 /* The task is still in Lun, release it then */ 2029 case TMF_RESP_FUNC_SUCC: 2030 /* The task is not in Lun or failed, reset the phy */ 2031 case TMF_RESP_FUNC_FAILED: 2032 case TMF_RESP_FUNC_COMPLETE: 2033 break; 2034 default: 2035 rc = TMF_RESP_FUNC_FAILED; 2036 break; 2037 } 2038 } 2039 return rc; 2040 } 2041 2042 static int 2043 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 2044 struct hisi_sas_internal_abort *abort, 2045 struct sas_task *task, 2046 struct hisi_sas_dq *dq) 2047 { 2048 struct domain_device *device = task->dev; 2049 struct hisi_sas_device *sas_dev = device->lldd_dev; 2050 struct device *dev = hisi_hba->dev; 2051 struct hisi_sas_port *port; 2052 struct asd_sas_port *sas_port = device->port; 2053 struct hisi_sas_slot *slot; 2054 int slot_idx; 2055 2056 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 2057 return -EINVAL; 2058 2059 if (!device->port) 2060 return -1; 2061 2062 port = to_hisi_sas_port(sas_port); 2063 2064 /* simply get a slot and send abort command */ 2065 slot_idx = hisi_sas_slot_index_alloc(hisi_hba, NULL); 2066 if (slot_idx < 0) 2067 goto err_out; 2068 2069 slot = &hisi_hba->slot_info[slot_idx]; 2070 slot->n_elem = 0; 2071 slot->task = task; 2072 slot->port = port; 2073 slot->is_internal = true; 2074 2075 hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL); 2076 2077 return 0; 2078 2079 err_out: 2080 dev_err(dev, "internal abort task prep: failed[%d]!\n", slot_idx); 2081 2082 return slot_idx; 2083 } 2084 2085 /** 2086 * _hisi_sas_internal_task_abort -- execute an internal 2087 * abort command for single IO command or a device 2088 * @hisi_hba: host controller struct 2089 * @device: domain device 2090 * @abort_flag: mode of operation, device or single IO 2091 * @tag: tag of IO to be aborted (only relevant to single 2092 * IO mode) 2093 * @dq: delivery queue for this internal abort command 2094 * @rst_to_recover: If rst_to_recover set, queue a controller 2095 * reset if an internal abort times out. 2096 */ 2097 static int 2098 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2099 struct domain_device *device, int abort_flag, 2100 int tag, struct hisi_sas_dq *dq, bool rst_to_recover) 2101 { 2102 struct sas_task *task; 2103 struct hisi_sas_device *sas_dev = device->lldd_dev; 2104 struct hisi_sas_internal_abort abort = { 2105 .flag = abort_flag, 2106 .tag = tag, 2107 }; 2108 struct device *dev = hisi_hba->dev; 2109 int res; 2110 /* 2111 * The interface is not realized means this HW don't support internal 2112 * abort, or don't need to do internal abort. Then here, we return 2113 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 2114 * the internal abort has been executed and returned CQ. 2115 */ 2116 if (!hisi_hba->hw->prep_abort) 2117 return TMF_RESP_FUNC_FAILED; 2118 2119 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 2120 return -EIO; 2121 2122 task = sas_alloc_slow_task(GFP_KERNEL); 2123 if (!task) 2124 return -ENOMEM; 2125 2126 task->dev = device; 2127 task->task_proto = SAS_PROTOCOL_NONE; 2128 task->task_done = hisi_sas_task_done; 2129 task->slow_task->timer.function = hisi_sas_tmf_timedout; 2130 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT; 2131 add_timer(&task->slow_task->timer); 2132 2133 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 2134 &abort, task, dq); 2135 if (res) { 2136 del_timer_sync(&task->slow_task->timer); 2137 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 2138 res); 2139 goto exit; 2140 } 2141 wait_for_completion(&task->slow_task->completion); 2142 res = TMF_RESP_FUNC_FAILED; 2143 2144 /* Internal abort timed out */ 2145 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 2146 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 2147 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 2148 2149 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 2150 struct hisi_sas_slot *slot = task->lldd_task; 2151 2152 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 2153 2154 if (slot) { 2155 struct hisi_sas_cq *cq = 2156 &hisi_hba->cq[slot->dlvry_queue]; 2157 /* 2158 * sync irq to avoid free'ing task 2159 * before using task in IO completion 2160 */ 2161 synchronize_irq(cq->irq_no); 2162 slot->task = NULL; 2163 } 2164 2165 if (rst_to_recover) { 2166 dev_err(dev, "internal task abort: timeout and not done. Queuing reset.\n"); 2167 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2168 } else { 2169 dev_err(dev, "internal task abort: timeout and not done.\n"); 2170 } 2171 2172 res = -EIO; 2173 goto exit; 2174 } else 2175 dev_err(dev, "internal task abort: timeout.\n"); 2176 } 2177 2178 if (task->task_status.resp == SAS_TASK_COMPLETE && 2179 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 2180 res = TMF_RESP_FUNC_COMPLETE; 2181 goto exit; 2182 } 2183 2184 if (task->task_status.resp == SAS_TASK_COMPLETE && 2185 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 2186 res = TMF_RESP_FUNC_SUCC; 2187 goto exit; 2188 } 2189 2190 exit: 2191 dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n", 2192 SAS_ADDR(device->sas_addr), task, 2193 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 2194 task->task_status.stat); 2195 sas_free_task(task); 2196 2197 return res; 2198 } 2199 2200 static int 2201 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2202 struct domain_device *device, 2203 int abort_flag, int tag, bool rst_to_recover) 2204 { 2205 struct hisi_sas_slot *slot; 2206 struct device *dev = hisi_hba->dev; 2207 struct hisi_sas_dq *dq; 2208 int i, rc; 2209 2210 switch (abort_flag) { 2211 case HISI_SAS_INT_ABT_CMD: 2212 slot = &hisi_hba->slot_info[tag]; 2213 dq = &hisi_hba->dq[slot->dlvry_queue]; 2214 return _hisi_sas_internal_task_abort(hisi_hba, device, 2215 abort_flag, tag, dq, 2216 rst_to_recover); 2217 case HISI_SAS_INT_ABT_DEV: 2218 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2219 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2220 const struct cpumask *mask = cq->irq_mask; 2221 2222 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 2223 continue; 2224 dq = &hisi_hba->dq[i]; 2225 rc = _hisi_sas_internal_task_abort(hisi_hba, device, 2226 abort_flag, tag, 2227 dq, rst_to_recover); 2228 if (rc) 2229 return rc; 2230 } 2231 break; 2232 default: 2233 dev_err(dev, "Unrecognised internal abort flag (%d)\n", 2234 abort_flag); 2235 return -EINVAL; 2236 } 2237 2238 return 0; 2239 } 2240 2241 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2242 { 2243 hisi_sas_port_notify_formed(sas_phy); 2244 } 2245 2246 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2247 u8 reg_index, u8 reg_count, u8 *write_data) 2248 { 2249 struct hisi_hba *hisi_hba = sha->lldd_ha; 2250 2251 if (!hisi_hba->hw->write_gpio) 2252 return -EOPNOTSUPP; 2253 2254 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2255 reg_index, reg_count, write_data); 2256 } 2257 2258 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2259 { 2260 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2261 struct sas_phy *sphy = sas_phy->phy; 2262 unsigned long flags; 2263 2264 phy->phy_attached = 0; 2265 phy->phy_type = 0; 2266 phy->port = NULL; 2267 2268 spin_lock_irqsave(&phy->lock, flags); 2269 if (phy->enable) 2270 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2271 else 2272 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2273 spin_unlock_irqrestore(&phy->lock, flags); 2274 } 2275 2276 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 2277 gfp_t gfp_flags) 2278 { 2279 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2280 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2281 struct device *dev = hisi_hba->dev; 2282 2283 if (rdy) { 2284 /* Phy down but ready */ 2285 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 2286 hisi_sas_port_notify_formed(sas_phy); 2287 } else { 2288 struct hisi_sas_port *port = phy->port; 2289 2290 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 2291 phy->in_reset) { 2292 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2293 return; 2294 } 2295 /* Phy down and not ready */ 2296 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 2297 sas_phy_disconnected(sas_phy); 2298 2299 if (port) { 2300 if (phy->phy_type & PORT_TYPE_SAS) { 2301 int port_id = port->id; 2302 2303 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2304 port_id)) 2305 port->port_attached = 0; 2306 } else if (phy->phy_type & PORT_TYPE_SATA) 2307 port->port_attached = 0; 2308 } 2309 hisi_sas_phy_disconnected(phy); 2310 } 2311 } 2312 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2313 2314 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) 2315 { 2316 int i; 2317 2318 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2319 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2320 2321 synchronize_irq(cq->irq_no); 2322 } 2323 } 2324 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); 2325 2326 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2327 { 2328 struct hisi_hba *hisi_hba = shost_priv(shost); 2329 2330 if (reset_type != SCSI_ADAPTER_RESET) 2331 return -EOPNOTSUPP; 2332 2333 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2334 2335 return 0; 2336 } 2337 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2338 2339 struct scsi_transport_template *hisi_sas_stt; 2340 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2341 2342 static struct sas_domain_function_template hisi_sas_transport_ops = { 2343 .lldd_dev_found = hisi_sas_dev_found, 2344 .lldd_dev_gone = hisi_sas_dev_gone, 2345 .lldd_execute_task = hisi_sas_queue_command, 2346 .lldd_control_phy = hisi_sas_control_phy, 2347 .lldd_abort_task = hisi_sas_abort_task, 2348 .lldd_abort_task_set = hisi_sas_abort_task_set, 2349 .lldd_clear_aca = hisi_sas_clear_aca, 2350 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2351 .lldd_lu_reset = hisi_sas_lu_reset, 2352 .lldd_query_task = hisi_sas_query_task, 2353 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2354 .lldd_port_formed = hisi_sas_port_formed, 2355 .lldd_write_gpio = hisi_sas_write_gpio, 2356 }; 2357 2358 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2359 { 2360 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2361 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2362 2363 for (i = 0; i < hisi_hba->queue_count; i++) { 2364 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2365 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2366 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2367 2368 s = sizeof(struct hisi_sas_cmd_hdr); 2369 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2370 memset(&cmd_hdr[j], 0, s); 2371 2372 dq->wr_point = 0; 2373 2374 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2375 memset(hisi_hba->complete_hdr[i], 0, s); 2376 cq->rd_point = 0; 2377 } 2378 2379 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2380 memset(hisi_hba->initial_fis, 0, s); 2381 2382 s = max_command_entries * sizeof(struct hisi_sas_iost); 2383 memset(hisi_hba->iost, 0, s); 2384 2385 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2386 memset(hisi_hba->breakpoint, 0, s); 2387 2388 s = sizeof(struct hisi_sas_sata_breakpoint); 2389 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2390 memset(&sata_breakpoint[j], 0, s); 2391 } 2392 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2393 2394 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2395 { 2396 struct device *dev = hisi_hba->dev; 2397 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2398 int max_command_entries_ru, sz_slot_buf_ru; 2399 int blk_cnt, slots_per_blk; 2400 2401 sema_init(&hisi_hba->sem, 1); 2402 spin_lock_init(&hisi_hba->lock); 2403 for (i = 0; i < hisi_hba->n_phy; i++) { 2404 hisi_sas_phy_init(hisi_hba, i); 2405 hisi_hba->port[i].port_attached = 0; 2406 hisi_hba->port[i].id = -1; 2407 } 2408 2409 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2410 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2411 hisi_hba->devices[i].device_id = i; 2412 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2413 } 2414 2415 for (i = 0; i < hisi_hba->queue_count; i++) { 2416 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2417 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2418 2419 /* Completion queue structure */ 2420 cq->id = i; 2421 cq->hisi_hba = hisi_hba; 2422 2423 /* Delivery queue structure */ 2424 spin_lock_init(&dq->lock); 2425 INIT_LIST_HEAD(&dq->list); 2426 dq->id = i; 2427 dq->hisi_hba = hisi_hba; 2428 2429 /* Delivery queue */ 2430 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2431 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2432 &hisi_hba->cmd_hdr_dma[i], 2433 GFP_KERNEL); 2434 if (!hisi_hba->cmd_hdr[i]) 2435 goto err_out; 2436 2437 /* Completion queue */ 2438 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2439 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2440 &hisi_hba->complete_hdr_dma[i], 2441 GFP_KERNEL); 2442 if (!hisi_hba->complete_hdr[i]) 2443 goto err_out; 2444 } 2445 2446 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2447 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2448 GFP_KERNEL); 2449 if (!hisi_hba->itct) 2450 goto err_out; 2451 2452 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2453 sizeof(struct hisi_sas_slot), 2454 GFP_KERNEL); 2455 if (!hisi_hba->slot_info) 2456 goto err_out; 2457 2458 /* roundup to avoid overly large block size */ 2459 max_command_entries_ru = roundup(max_command_entries, 64); 2460 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2461 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2462 else 2463 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2464 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2465 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2466 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2467 slots_per_blk = s / sz_slot_buf_ru; 2468 2469 for (i = 0; i < blk_cnt; i++) { 2470 int slot_index = i * slots_per_blk; 2471 dma_addr_t buf_dma; 2472 void *buf; 2473 2474 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2475 GFP_KERNEL); 2476 if (!buf) 2477 goto err_out; 2478 2479 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2480 struct hisi_sas_slot *slot; 2481 2482 slot = &hisi_hba->slot_info[slot_index]; 2483 slot->buf = buf; 2484 slot->buf_dma = buf_dma; 2485 slot->idx = slot_index; 2486 2487 buf += sz_slot_buf_ru; 2488 buf_dma += sz_slot_buf_ru; 2489 } 2490 } 2491 2492 s = max_command_entries * sizeof(struct hisi_sas_iost); 2493 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2494 GFP_KERNEL); 2495 if (!hisi_hba->iost) 2496 goto err_out; 2497 2498 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2499 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2500 &hisi_hba->breakpoint_dma, 2501 GFP_KERNEL); 2502 if (!hisi_hba->breakpoint) 2503 goto err_out; 2504 2505 s = hisi_hba->slot_index_count = max_command_entries; 2506 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); 2507 if (!hisi_hba->slot_index_tags) 2508 goto err_out; 2509 2510 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2511 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2512 &hisi_hba->initial_fis_dma, 2513 GFP_KERNEL); 2514 if (!hisi_hba->initial_fis) 2515 goto err_out; 2516 2517 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2518 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2519 &hisi_hba->sata_breakpoint_dma, 2520 GFP_KERNEL); 2521 if (!hisi_hba->sata_breakpoint) 2522 goto err_out; 2523 2524 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; 2525 2526 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2527 if (!hisi_hba->wq) { 2528 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2529 goto err_out; 2530 } 2531 2532 return 0; 2533 err_out: 2534 return -ENOMEM; 2535 } 2536 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2537 2538 void hisi_sas_free(struct hisi_hba *hisi_hba) 2539 { 2540 int i; 2541 2542 for (i = 0; i < hisi_hba->n_phy; i++) { 2543 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2544 2545 del_timer_sync(&phy->timer); 2546 } 2547 2548 if (hisi_hba->wq) 2549 destroy_workqueue(hisi_hba->wq); 2550 } 2551 EXPORT_SYMBOL_GPL(hisi_sas_free); 2552 2553 void hisi_sas_rst_work_handler(struct work_struct *work) 2554 { 2555 struct hisi_hba *hisi_hba = 2556 container_of(work, struct hisi_hba, rst_work); 2557 2558 if (hisi_sas_controller_prereset(hisi_hba)) 2559 return; 2560 2561 hisi_sas_controller_reset(hisi_hba); 2562 } 2563 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2564 2565 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2566 { 2567 struct hisi_sas_rst *rst = 2568 container_of(work, struct hisi_sas_rst, work); 2569 2570 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2571 goto rst_complete; 2572 2573 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2574 rst->done = true; 2575 rst_complete: 2576 complete(rst->completion); 2577 } 2578 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2579 2580 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2581 { 2582 struct device *dev = hisi_hba->dev; 2583 struct platform_device *pdev = hisi_hba->platform_dev; 2584 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2585 struct clk *refclk; 2586 2587 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2588 SAS_ADDR_SIZE)) { 2589 dev_err(dev, "could not get property sas-addr\n"); 2590 return -ENOENT; 2591 } 2592 2593 if (np) { 2594 /* 2595 * These properties are only required for platform device-based 2596 * controller with DT firmware. 2597 */ 2598 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2599 "hisilicon,sas-syscon"); 2600 if (IS_ERR(hisi_hba->ctrl)) { 2601 dev_err(dev, "could not get syscon\n"); 2602 return -ENOENT; 2603 } 2604 2605 if (device_property_read_u32(dev, "ctrl-reset-reg", 2606 &hisi_hba->ctrl_reset_reg)) { 2607 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2608 return -ENOENT; 2609 } 2610 2611 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2612 &hisi_hba->ctrl_reset_sts_reg)) { 2613 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2614 return -ENOENT; 2615 } 2616 2617 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2618 &hisi_hba->ctrl_clock_ena_reg)) { 2619 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2620 return -ENOENT; 2621 } 2622 } 2623 2624 refclk = devm_clk_get(dev, NULL); 2625 if (IS_ERR(refclk)) 2626 dev_dbg(dev, "no ref clk property\n"); 2627 else 2628 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2629 2630 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2631 dev_err(dev, "could not get property phy-count\n"); 2632 return -ENOENT; 2633 } 2634 2635 if (device_property_read_u32(dev, "queue-count", 2636 &hisi_hba->queue_count)) { 2637 dev_err(dev, "could not get property queue-count\n"); 2638 return -ENOENT; 2639 } 2640 2641 return 0; 2642 } 2643 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2644 2645 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2646 const struct hisi_sas_hw *hw) 2647 { 2648 struct resource *res; 2649 struct Scsi_Host *shost; 2650 struct hisi_hba *hisi_hba; 2651 struct device *dev = &pdev->dev; 2652 int error; 2653 2654 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2655 if (!shost) { 2656 dev_err(dev, "scsi host alloc failed\n"); 2657 return NULL; 2658 } 2659 hisi_hba = shost_priv(shost); 2660 2661 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2662 hisi_hba->hw = hw; 2663 hisi_hba->dev = dev; 2664 hisi_hba->platform_dev = pdev; 2665 hisi_hba->shost = shost; 2666 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2667 2668 timer_setup(&hisi_hba->timer, NULL, 0); 2669 2670 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2671 goto err_out; 2672 2673 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2674 if (error) 2675 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2676 2677 if (error) { 2678 dev_err(dev, "No usable DMA addressing method\n"); 2679 goto err_out; 2680 } 2681 2682 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2683 if (IS_ERR(hisi_hba->regs)) 2684 goto err_out; 2685 2686 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2687 if (res) { 2688 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2689 if (IS_ERR(hisi_hba->sgpio_regs)) 2690 goto err_out; 2691 } 2692 2693 if (hisi_sas_alloc(hisi_hba)) { 2694 hisi_sas_free(hisi_hba); 2695 goto err_out; 2696 } 2697 2698 return shost; 2699 err_out: 2700 scsi_host_put(shost); 2701 dev_err(dev, "shost alloc failed\n"); 2702 return NULL; 2703 } 2704 2705 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2706 { 2707 if (hisi_hba->hw->interrupt_preinit) 2708 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2709 return 0; 2710 } 2711 2712 int hisi_sas_probe(struct platform_device *pdev, 2713 const struct hisi_sas_hw *hw) 2714 { 2715 struct Scsi_Host *shost; 2716 struct hisi_hba *hisi_hba; 2717 struct device *dev = &pdev->dev; 2718 struct asd_sas_phy **arr_phy; 2719 struct asd_sas_port **arr_port; 2720 struct sas_ha_struct *sha; 2721 int rc, phy_nr, port_nr, i; 2722 2723 shost = hisi_sas_shost_alloc(pdev, hw); 2724 if (!shost) 2725 return -ENOMEM; 2726 2727 sha = SHOST_TO_SAS_HA(shost); 2728 hisi_hba = shost_priv(shost); 2729 platform_set_drvdata(pdev, sha); 2730 2731 phy_nr = port_nr = hisi_hba->n_phy; 2732 2733 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2734 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2735 if (!arr_phy || !arr_port) { 2736 rc = -ENOMEM; 2737 goto err_out_ha; 2738 } 2739 2740 sha->sas_phy = arr_phy; 2741 sha->sas_port = arr_port; 2742 sha->lldd_ha = hisi_hba; 2743 2744 shost->transportt = hisi_sas_stt; 2745 shost->max_id = HISI_SAS_MAX_DEVICES; 2746 shost->max_lun = ~0; 2747 shost->max_channel = 1; 2748 shost->max_cmd_len = 16; 2749 if (hisi_hba->hw->slot_index_alloc) { 2750 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2751 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2752 } else { 2753 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2754 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2755 } 2756 2757 sha->sas_ha_name = DRV_NAME; 2758 sha->dev = hisi_hba->dev; 2759 sha->lldd_module = THIS_MODULE; 2760 sha->sas_addr = &hisi_hba->sas_addr[0]; 2761 sha->num_phys = hisi_hba->n_phy; 2762 sha->core.shost = hisi_hba->shost; 2763 2764 for (i = 0; i < hisi_hba->n_phy; i++) { 2765 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2766 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2767 } 2768 2769 rc = hisi_sas_interrupt_preinit(hisi_hba); 2770 if (rc) 2771 goto err_out_ha; 2772 2773 rc = scsi_add_host(shost, &pdev->dev); 2774 if (rc) 2775 goto err_out_ha; 2776 2777 rc = sas_register_ha(sha); 2778 if (rc) 2779 goto err_out_register_ha; 2780 2781 rc = hisi_hba->hw->hw_init(hisi_hba); 2782 if (rc) 2783 goto err_out_hw_init; 2784 2785 scsi_scan_host(shost); 2786 2787 return 0; 2788 2789 err_out_hw_init: 2790 sas_unregister_ha(sha); 2791 err_out_register_ha: 2792 scsi_remove_host(shost); 2793 err_out_ha: 2794 hisi_sas_free(hisi_hba); 2795 scsi_host_put(shost); 2796 return rc; 2797 } 2798 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2799 2800 int hisi_sas_remove(struct platform_device *pdev) 2801 { 2802 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2803 struct hisi_hba *hisi_hba = sha->lldd_ha; 2804 struct Scsi_Host *shost = sha->core.shost; 2805 2806 del_timer_sync(&hisi_hba->timer); 2807 2808 sas_unregister_ha(sha); 2809 sas_remove_host(sha->core.shost); 2810 2811 hisi_sas_free(hisi_hba); 2812 scsi_host_put(shost); 2813 return 0; 2814 } 2815 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2816 2817 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2818 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2819 bool hisi_sas_debugfs_enable = true; 2820 u32 hisi_sas_debugfs_dump_count = 50; 2821 #else 2822 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2823 bool hisi_sas_debugfs_enable; 2824 u32 hisi_sas_debugfs_dump_count = 1; 2825 #endif 2826 2827 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2828 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2829 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2830 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2831 2832 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2833 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2834 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2835 2836 struct dentry *hisi_sas_debugfs_dir; 2837 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2838 2839 static __init int hisi_sas_init(void) 2840 { 2841 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2842 if (!hisi_sas_stt) 2843 return -ENOMEM; 2844 2845 if (hisi_sas_debugfs_enable) { 2846 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2847 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2848 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2849 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2850 } 2851 } 2852 2853 return 0; 2854 } 2855 2856 static __exit void hisi_sas_exit(void) 2857 { 2858 sas_release_transport(hisi_sas_stt); 2859 2860 debugfs_remove(hisi_sas_debugfs_dir); 2861 } 2862 2863 module_init(hisi_sas_init); 2864 module_exit(hisi_sas_exit); 2865 2866 MODULE_LICENSE("GPL"); 2867 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2868 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2869 MODULE_ALIAS("platform:" DRV_NAME); 2870