1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 14 u8 *lun, struct hisi_sas_tmf_task *tmf); 15 static int 16 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 17 struct domain_device *device, 18 int abort_flag, int tag, bool rst_to_recover); 19 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 20 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 21 void *funcdata); 22 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 23 struct domain_device *device); 24 static void hisi_sas_dev_gone(struct domain_device *device); 25 26 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 27 { 28 switch (fis->command) { 29 case ATA_CMD_FPDMA_WRITE: 30 case ATA_CMD_FPDMA_READ: 31 case ATA_CMD_FPDMA_RECV: 32 case ATA_CMD_FPDMA_SEND: 33 case ATA_CMD_NCQ_NON_DATA: 34 return HISI_SAS_SATA_PROTOCOL_FPDMA; 35 36 case ATA_CMD_DOWNLOAD_MICRO: 37 case ATA_CMD_ID_ATA: 38 case ATA_CMD_PMP_READ: 39 case ATA_CMD_READ_LOG_EXT: 40 case ATA_CMD_PIO_READ: 41 case ATA_CMD_PIO_READ_EXT: 42 case ATA_CMD_PMP_WRITE: 43 case ATA_CMD_WRITE_LOG_EXT: 44 case ATA_CMD_PIO_WRITE: 45 case ATA_CMD_PIO_WRITE_EXT: 46 return HISI_SAS_SATA_PROTOCOL_PIO; 47 48 case ATA_CMD_DSM: 49 case ATA_CMD_DOWNLOAD_MICRO_DMA: 50 case ATA_CMD_PMP_READ_DMA: 51 case ATA_CMD_PMP_WRITE_DMA: 52 case ATA_CMD_READ: 53 case ATA_CMD_READ_EXT: 54 case ATA_CMD_READ_LOG_DMA_EXT: 55 case ATA_CMD_READ_STREAM_DMA_EXT: 56 case ATA_CMD_TRUSTED_RCV_DMA: 57 case ATA_CMD_TRUSTED_SND_DMA: 58 case ATA_CMD_WRITE: 59 case ATA_CMD_WRITE_EXT: 60 case ATA_CMD_WRITE_FUA_EXT: 61 case ATA_CMD_WRITE_QUEUED: 62 case ATA_CMD_WRITE_LOG_DMA_EXT: 63 case ATA_CMD_WRITE_STREAM_DMA_EXT: 64 case ATA_CMD_ZAC_MGMT_IN: 65 return HISI_SAS_SATA_PROTOCOL_DMA; 66 67 case ATA_CMD_CHK_POWER: 68 case ATA_CMD_DEV_RESET: 69 case ATA_CMD_EDD: 70 case ATA_CMD_FLUSH: 71 case ATA_CMD_FLUSH_EXT: 72 case ATA_CMD_VERIFY: 73 case ATA_CMD_VERIFY_EXT: 74 case ATA_CMD_SET_FEATURES: 75 case ATA_CMD_STANDBY: 76 case ATA_CMD_STANDBYNOW1: 77 case ATA_CMD_ZAC_MGMT_OUT: 78 return HISI_SAS_SATA_PROTOCOL_NONDATA; 79 80 case ATA_CMD_SET_MAX: 81 switch (fis->features) { 82 case ATA_SET_MAX_PASSWD: 83 case ATA_SET_MAX_LOCK: 84 return HISI_SAS_SATA_PROTOCOL_PIO; 85 86 case ATA_SET_MAX_PASSWD_DMA: 87 case ATA_SET_MAX_UNLOCK_DMA: 88 return HISI_SAS_SATA_PROTOCOL_DMA; 89 90 default: 91 return HISI_SAS_SATA_PROTOCOL_NONDATA; 92 } 93 94 default: 95 { 96 if (direction == DMA_NONE) 97 return HISI_SAS_SATA_PROTOCOL_NONDATA; 98 return HISI_SAS_SATA_PROTOCOL_PIO; 99 } 100 } 101 } 102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 103 104 void hisi_sas_sata_done(struct sas_task *task, 105 struct hisi_sas_slot *slot) 106 { 107 struct task_status_struct *ts = &task->task_status; 108 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 109 struct hisi_sas_status_buffer *status_buf = 110 hisi_sas_status_buf_addr_mem(slot); 111 u8 *iu = &status_buf->iu[0]; 112 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 113 114 resp->frame_len = sizeof(struct dev_to_host_fis); 115 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 116 117 ts->buf_valid_size = sizeof(*resp); 118 } 119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 120 121 /* 122 * This function assumes linkrate mask fits in 8 bits, which it 123 * does for all HW versions supported. 124 */ 125 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 126 { 127 u8 rate = 0; 128 int i; 129 130 max -= SAS_LINK_RATE_1_5_GBPS; 131 for (i = 0; i <= max; i++) 132 rate |= 1 << (i * 2); 133 return rate; 134 } 135 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 136 137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 138 { 139 return device->port->ha->lldd_ha; 140 } 141 142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 143 { 144 return container_of(sas_port, struct hisi_sas_port, sas_port); 145 } 146 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 147 148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 149 { 150 int phy_no; 151 152 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 153 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 154 } 155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 156 157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 158 { 159 void *bitmap = hisi_hba->slot_index_tags; 160 161 clear_bit(slot_idx, bitmap); 162 } 163 164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 165 { 166 if (hisi_hba->hw->slot_index_alloc || 167 slot_idx >= HISI_SAS_UNRESERVED_IPTT) { 168 spin_lock(&hisi_hba->lock); 169 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 170 spin_unlock(&hisi_hba->lock); 171 } 172 } 173 174 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 175 { 176 void *bitmap = hisi_hba->slot_index_tags; 177 178 set_bit(slot_idx, bitmap); 179 } 180 181 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 182 struct scsi_cmnd *scsi_cmnd) 183 { 184 int index; 185 void *bitmap = hisi_hba->slot_index_tags; 186 187 if (scsi_cmnd) 188 return scsi_cmd_to_rq(scsi_cmnd)->tag; 189 190 spin_lock(&hisi_hba->lock); 191 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 192 hisi_hba->last_slot_index + 1); 193 if (index >= hisi_hba->slot_index_count) { 194 index = find_next_zero_bit(bitmap, 195 hisi_hba->slot_index_count, 196 HISI_SAS_UNRESERVED_IPTT); 197 if (index >= hisi_hba->slot_index_count) { 198 spin_unlock(&hisi_hba->lock); 199 return -SAS_QUEUE_FULL; 200 } 201 } 202 hisi_sas_slot_index_set(hisi_hba, index); 203 hisi_hba->last_slot_index = index; 204 spin_unlock(&hisi_hba->lock); 205 206 return index; 207 } 208 209 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 210 { 211 int i; 212 213 for (i = 0; i < hisi_hba->slot_index_count; ++i) 214 hisi_sas_slot_index_clear(hisi_hba, i); 215 } 216 217 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 218 struct hisi_sas_slot *slot) 219 { 220 int device_id = slot->device_id; 221 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 222 223 if (task) { 224 struct device *dev = hisi_hba->dev; 225 226 if (!task->lldd_task) 227 return; 228 229 task->lldd_task = NULL; 230 231 if (!sas_protocol_ata(task->task_proto)) { 232 if (slot->n_elem) 233 dma_unmap_sg(dev, task->scatter, 234 task->num_scatter, 235 task->data_dir); 236 if (slot->n_elem_dif) { 237 struct sas_ssp_task *ssp_task = &task->ssp_task; 238 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 239 240 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 241 scsi_prot_sg_count(scsi_cmnd), 242 task->data_dir); 243 } 244 } 245 } 246 247 spin_lock(&sas_dev->lock); 248 list_del_init(&slot->entry); 249 spin_unlock(&sas_dev->lock); 250 251 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 252 253 hisi_sas_slot_index_free(hisi_hba, slot->idx); 254 } 255 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 256 257 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 258 struct hisi_sas_slot *slot) 259 { 260 hisi_hba->hw->prep_smp(hisi_hba, slot); 261 } 262 263 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 264 struct hisi_sas_slot *slot) 265 { 266 hisi_hba->hw->prep_ssp(hisi_hba, slot); 267 } 268 269 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 270 struct hisi_sas_slot *slot) 271 { 272 hisi_hba->hw->prep_stp(hisi_hba, slot); 273 } 274 275 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 276 struct hisi_sas_slot *slot, 277 int device_id, int abort_flag, int tag_to_abort) 278 { 279 hisi_hba->hw->prep_abort(hisi_hba, slot, 280 device_id, abort_flag, tag_to_abort); 281 } 282 283 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 284 struct sas_task *task, int n_elem, 285 int n_elem_req) 286 { 287 struct device *dev = hisi_hba->dev; 288 289 if (!sas_protocol_ata(task->task_proto)) { 290 if (task->num_scatter) { 291 if (n_elem) 292 dma_unmap_sg(dev, task->scatter, 293 task->num_scatter, 294 task->data_dir); 295 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 296 if (n_elem_req) 297 dma_unmap_sg(dev, &task->smp_task.smp_req, 298 1, DMA_TO_DEVICE); 299 } 300 } 301 } 302 303 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 304 struct sas_task *task, int *n_elem, 305 int *n_elem_req) 306 { 307 struct device *dev = hisi_hba->dev; 308 int rc; 309 310 if (sas_protocol_ata(task->task_proto)) { 311 *n_elem = task->num_scatter; 312 } else { 313 unsigned int req_len; 314 315 if (task->num_scatter) { 316 *n_elem = dma_map_sg(dev, task->scatter, 317 task->num_scatter, task->data_dir); 318 if (!*n_elem) { 319 rc = -ENOMEM; 320 goto prep_out; 321 } 322 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 323 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 324 1, DMA_TO_DEVICE); 325 if (!*n_elem_req) { 326 rc = -ENOMEM; 327 goto prep_out; 328 } 329 req_len = sg_dma_len(&task->smp_task.smp_req); 330 if (req_len & 0x3) { 331 rc = -EINVAL; 332 goto err_out_dma_unmap; 333 } 334 } 335 } 336 337 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 338 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 339 *n_elem); 340 rc = -EINVAL; 341 goto err_out_dma_unmap; 342 } 343 return 0; 344 345 err_out_dma_unmap: 346 /* It would be better to call dma_unmap_sg() here, but it's messy */ 347 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 348 *n_elem_req); 349 prep_out: 350 return rc; 351 } 352 353 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 354 struct sas_task *task, int n_elem_dif) 355 { 356 struct device *dev = hisi_hba->dev; 357 358 if (n_elem_dif) { 359 struct sas_ssp_task *ssp_task = &task->ssp_task; 360 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 361 362 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 363 scsi_prot_sg_count(scsi_cmnd), 364 task->data_dir); 365 } 366 } 367 368 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 369 int *n_elem_dif, struct sas_task *task) 370 { 371 struct device *dev = hisi_hba->dev; 372 struct sas_ssp_task *ssp_task; 373 struct scsi_cmnd *scsi_cmnd; 374 int rc; 375 376 if (task->num_scatter) { 377 ssp_task = &task->ssp_task; 378 scsi_cmnd = ssp_task->cmd; 379 380 if (scsi_prot_sg_count(scsi_cmnd)) { 381 *n_elem_dif = dma_map_sg(dev, 382 scsi_prot_sglist(scsi_cmnd), 383 scsi_prot_sg_count(scsi_cmnd), 384 task->data_dir); 385 386 if (!*n_elem_dif) 387 return -ENOMEM; 388 389 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 390 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 391 *n_elem_dif); 392 rc = -EINVAL; 393 goto err_out_dif_dma_unmap; 394 } 395 } 396 } 397 398 return 0; 399 400 err_out_dif_dma_unmap: 401 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 402 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 403 return rc; 404 } 405 406 static int hisi_sas_task_prep(struct sas_task *task, 407 struct hisi_sas_dq **dq_pointer, 408 bool is_tmf, struct hisi_sas_tmf_task *tmf, 409 int *pass) 410 { 411 struct domain_device *device = task->dev; 412 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 413 struct hisi_sas_device *sas_dev = device->lldd_dev; 414 struct hisi_sas_port *port; 415 struct hisi_sas_slot *slot; 416 struct hisi_sas_cmd_hdr *cmd_hdr_base; 417 struct asd_sas_port *sas_port = device->port; 418 struct device *dev = hisi_hba->dev; 419 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 420 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; 421 struct scsi_cmnd *scmd = NULL; 422 struct hisi_sas_dq *dq; 423 unsigned long flags; 424 int wr_q_index; 425 426 if (DEV_IS_GONE(sas_dev)) { 427 if (sas_dev) 428 dev_info(dev, "task prep: device %d not ready\n", 429 sas_dev->device_id); 430 else 431 dev_info(dev, "task prep: device %016llx not ready\n", 432 SAS_ADDR(device->sas_addr)); 433 434 return -ECOMM; 435 } 436 437 if (task->uldd_task) { 438 struct ata_queued_cmd *qc; 439 440 if (dev_is_sata(device)) { 441 qc = task->uldd_task; 442 scmd = qc->scsicmd; 443 } else { 444 scmd = task->uldd_task; 445 } 446 } 447 448 if (scmd) { 449 unsigned int dq_index; 450 u32 blk_tag; 451 452 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 453 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 454 *dq_pointer = dq = &hisi_hba->dq[dq_index]; 455 } else { 456 struct Scsi_Host *shost = hisi_hba->shost; 457 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 458 int queue = qmap->mq_map[raw_smp_processor_id()]; 459 460 *dq_pointer = dq = &hisi_hba->dq[queue]; 461 } 462 463 port = to_hisi_sas_port(sas_port); 464 if (port && !port->port_attached) { 465 dev_info(dev, "task prep: %s port%d not attach device\n", 466 (dev_is_sata(device)) ? 467 "SATA/STP" : "SAS", 468 device->port->id); 469 470 return -ECOMM; 471 } 472 473 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 474 &n_elem_req); 475 if (rc < 0) 476 goto prep_out; 477 478 if (!sas_protocol_ata(task->task_proto)) { 479 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 480 if (rc < 0) 481 goto err_out_dma_unmap; 482 } 483 484 if (hisi_hba->hw->slot_index_alloc) 485 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 486 else 487 rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); 488 489 if (rc < 0) 490 goto err_out_dif_dma_unmap; 491 492 slot_idx = rc; 493 slot = &hisi_hba->slot_info[slot_idx]; 494 495 spin_lock(&dq->lock); 496 wr_q_index = dq->wr_point; 497 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 498 list_add_tail(&slot->delivery, &dq->list); 499 spin_unlock(&dq->lock); 500 spin_lock(&sas_dev->lock); 501 list_add_tail(&slot->entry, &sas_dev->list); 502 spin_unlock(&sas_dev->lock); 503 504 dlvry_queue = dq->id; 505 dlvry_queue_slot = wr_q_index; 506 507 slot->device_id = sas_dev->device_id; 508 slot->n_elem = n_elem; 509 slot->n_elem_dif = n_elem_dif; 510 slot->dlvry_queue = dlvry_queue; 511 slot->dlvry_queue_slot = dlvry_queue_slot; 512 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 513 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 514 slot->task = task; 515 slot->port = port; 516 slot->tmf = tmf; 517 slot->is_internal = is_tmf; 518 task->lldd_task = slot; 519 520 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 521 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 522 memset(hisi_sas_status_buf_addr_mem(slot), 0, 523 sizeof(struct hisi_sas_err_record)); 524 525 switch (task->task_proto) { 526 case SAS_PROTOCOL_SMP: 527 hisi_sas_task_prep_smp(hisi_hba, slot); 528 break; 529 case SAS_PROTOCOL_SSP: 530 hisi_sas_task_prep_ssp(hisi_hba, slot); 531 break; 532 case SAS_PROTOCOL_SATA: 533 case SAS_PROTOCOL_STP: 534 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 535 hisi_sas_task_prep_ata(hisi_hba, slot); 536 break; 537 default: 538 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 539 task->task_proto); 540 break; 541 } 542 543 spin_lock_irqsave(&task->task_state_lock, flags); 544 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 545 spin_unlock_irqrestore(&task->task_state_lock, flags); 546 547 ++(*pass); 548 WRITE_ONCE(slot->ready, 1); 549 550 return 0; 551 552 err_out_dif_dma_unmap: 553 if (!sas_protocol_ata(task->task_proto)) 554 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 555 err_out_dma_unmap: 556 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 557 n_elem_req); 558 prep_out: 559 dev_err(dev, "task prep: failed[%d]!\n", rc); 560 return rc; 561 } 562 563 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 564 bool is_tmf, struct hisi_sas_tmf_task *tmf) 565 { 566 u32 rc; 567 u32 pass = 0; 568 struct hisi_hba *hisi_hba; 569 struct device *dev; 570 struct domain_device *device = task->dev; 571 struct asd_sas_port *sas_port = device->port; 572 struct hisi_sas_dq *dq = NULL; 573 574 if (!sas_port) { 575 struct task_status_struct *ts = &task->task_status; 576 577 ts->resp = SAS_TASK_UNDELIVERED; 578 ts->stat = SAS_PHY_DOWN; 579 /* 580 * libsas will use dev->port, should 581 * not call task_done for sata 582 */ 583 if (device->dev_type != SAS_SATA_DEV) 584 task->task_done(task); 585 return -ECOMM; 586 } 587 588 hisi_hba = dev_to_hisi_hba(device); 589 dev = hisi_hba->dev; 590 591 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 592 if (!gfpflags_allow_blocking(gfp_flags)) 593 return -EINVAL; 594 595 down(&hisi_hba->sem); 596 up(&hisi_hba->sem); 597 } 598 599 /* protect task_prep and start_delivery sequence */ 600 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); 601 if (rc) 602 dev_err(dev, "task exec: failed[%d]!\n", rc); 603 604 if (likely(pass)) { 605 spin_lock(&dq->lock); 606 hisi_hba->hw->start_delivery(dq); 607 spin_unlock(&dq->lock); 608 } 609 610 return rc; 611 } 612 613 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 614 gfp_t gfp_flags) 615 { 616 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 617 struct asd_sas_phy *sas_phy = &phy->sas_phy; 618 619 if (!phy->phy_attached) 620 return; 621 622 if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) && 623 !sas_phy->suspended) { 624 dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no); 625 return; 626 } 627 628 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 629 630 if (sas_phy->phy) { 631 struct sas_phy *sphy = sas_phy->phy; 632 633 sphy->negotiated_linkrate = sas_phy->linkrate; 634 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 635 sphy->maximum_linkrate_hw = 636 hisi_hba->hw->phy_get_max_linkrate(); 637 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 638 sphy->minimum_linkrate = phy->minimum_linkrate; 639 640 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 641 sphy->maximum_linkrate = phy->maximum_linkrate; 642 } 643 644 if (phy->phy_type & PORT_TYPE_SAS) { 645 struct sas_identify_frame *id; 646 647 id = (struct sas_identify_frame *)phy->frame_rcvd; 648 id->dev_type = phy->identify.device_type; 649 id->initiator_bits = SAS_PROTOCOL_ALL; 650 id->target_bits = phy->identify.target_port_protocols; 651 } else if (phy->phy_type & PORT_TYPE_SATA) { 652 /* Nothing */ 653 } 654 655 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 656 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 657 } 658 659 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 660 { 661 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 662 struct hisi_sas_device *sas_dev = NULL; 663 int last = hisi_hba->last_dev_id; 664 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 665 int i; 666 667 spin_lock(&hisi_hba->lock); 668 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 669 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 670 int queue = i % hisi_hba->queue_count; 671 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 672 673 hisi_hba->devices[i].device_id = i; 674 sas_dev = &hisi_hba->devices[i]; 675 sas_dev->dev_status = HISI_SAS_DEV_INIT; 676 sas_dev->dev_type = device->dev_type; 677 sas_dev->hisi_hba = hisi_hba; 678 sas_dev->sas_device = device; 679 sas_dev->dq = dq; 680 spin_lock_init(&sas_dev->lock); 681 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 682 break; 683 } 684 i++; 685 } 686 hisi_hba->last_dev_id = i; 687 spin_unlock(&hisi_hba->lock); 688 689 return sas_dev; 690 } 691 692 #define HISI_SAS_DISK_RECOVER_CNT 3 693 static int hisi_sas_init_device(struct domain_device *device) 694 { 695 int rc = TMF_RESP_FUNC_COMPLETE; 696 struct scsi_lun lun; 697 struct hisi_sas_tmf_task tmf_task; 698 int retry = HISI_SAS_DISK_RECOVER_CNT; 699 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 700 struct device *dev = hisi_hba->dev; 701 struct sas_phy *local_phy; 702 703 switch (device->dev_type) { 704 case SAS_END_DEVICE: 705 int_to_scsilun(0, &lun); 706 707 tmf_task.tmf = TMF_CLEAR_TASK_SET; 708 while (retry-- > 0) { 709 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 710 &tmf_task); 711 if (rc == TMF_RESP_FUNC_COMPLETE) { 712 hisi_sas_release_task(hisi_hba, device); 713 break; 714 } 715 } 716 break; 717 case SAS_SATA_DEV: 718 case SAS_SATA_PM: 719 case SAS_SATA_PM_PORT: 720 case SAS_SATA_PENDING: 721 /* 722 * send HARD RESET to clear previous affiliation of 723 * STP target port 724 */ 725 local_phy = sas_get_local_phy(device); 726 if (!scsi_is_sas_phy_local(local_phy) && 727 !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 728 unsigned long deadline = ata_deadline(jiffies, 20000); 729 struct sata_device *sata_dev = &device->sata_dev; 730 struct ata_host *ata_host = sata_dev->ata_host; 731 struct ata_port_operations *ops = ata_host->ops; 732 struct ata_port *ap = sata_dev->ap; 733 struct ata_link *link; 734 unsigned int classes; 735 736 ata_for_each_link(link, ap, EDGE) 737 rc = ops->hardreset(link, &classes, 738 deadline); 739 } 740 sas_put_local_phy(local_phy); 741 if (rc) { 742 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc); 743 return rc; 744 } 745 746 while (retry-- > 0) { 747 rc = hisi_sas_softreset_ata_disk(device); 748 if (!rc) 749 break; 750 } 751 break; 752 default: 753 break; 754 } 755 756 return rc; 757 } 758 759 static int hisi_sas_dev_found(struct domain_device *device) 760 { 761 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 762 struct domain_device *parent_dev = device->parent; 763 struct hisi_sas_device *sas_dev; 764 struct device *dev = hisi_hba->dev; 765 int rc; 766 767 if (hisi_hba->hw->alloc_dev) 768 sas_dev = hisi_hba->hw->alloc_dev(device); 769 else 770 sas_dev = hisi_sas_alloc_dev(device); 771 if (!sas_dev) { 772 dev_err(dev, "fail alloc dev: max support %d devices\n", 773 HISI_SAS_MAX_DEVICES); 774 return -EINVAL; 775 } 776 777 device->lldd_dev = sas_dev; 778 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 779 780 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 781 int phy_no; 782 u8 phy_num = parent_dev->ex_dev.num_phys; 783 struct ex_phy *phy; 784 785 for (phy_no = 0; phy_no < phy_num; phy_no++) { 786 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 787 if (SAS_ADDR(phy->attached_sas_addr) == 788 SAS_ADDR(device->sas_addr)) 789 break; 790 } 791 792 if (phy_no == phy_num) { 793 dev_info(dev, "dev found: no attached " 794 "dev:%016llx at ex:%016llx\n", 795 SAS_ADDR(device->sas_addr), 796 SAS_ADDR(parent_dev->sas_addr)); 797 rc = -EINVAL; 798 goto err_out; 799 } 800 } 801 802 dev_info(dev, "dev[%d:%x] found\n", 803 sas_dev->device_id, sas_dev->dev_type); 804 805 rc = hisi_sas_init_device(device); 806 if (rc) 807 goto err_out; 808 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 809 return 0; 810 811 err_out: 812 hisi_sas_dev_gone(device); 813 return rc; 814 } 815 816 int hisi_sas_slave_configure(struct scsi_device *sdev) 817 { 818 struct domain_device *dev = sdev_to_domain_dev(sdev); 819 int ret = sas_slave_configure(sdev); 820 821 if (ret) 822 return ret; 823 if (!dev_is_sata(dev)) 824 sas_change_queue_depth(sdev, 64); 825 826 return 0; 827 } 828 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 829 830 void hisi_sas_scan_start(struct Scsi_Host *shost) 831 { 832 struct hisi_hba *hisi_hba = shost_priv(shost); 833 834 hisi_hba->hw->phys_init(hisi_hba); 835 } 836 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 837 838 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 839 { 840 struct hisi_hba *hisi_hba = shost_priv(shost); 841 struct sas_ha_struct *sha = &hisi_hba->sha; 842 843 /* Wait for PHY up interrupt to occur */ 844 if (time < HZ) 845 return 0; 846 847 sas_drain_work(sha); 848 return 1; 849 } 850 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 851 852 static void hisi_sas_phyup_work(struct work_struct *work) 853 { 854 struct hisi_sas_phy *phy = 855 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); 856 struct hisi_hba *hisi_hba = phy->hisi_hba; 857 struct asd_sas_phy *sas_phy = &phy->sas_phy; 858 int phy_no = sas_phy->id; 859 860 phy->wait_phyup_cnt = 0; 861 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 862 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 863 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 864 } 865 866 static void hisi_sas_linkreset_work(struct work_struct *work) 867 { 868 struct hisi_sas_phy *phy = 869 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 870 struct asd_sas_phy *sas_phy = &phy->sas_phy; 871 872 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 873 } 874 875 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 876 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 877 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 878 }; 879 880 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 881 enum hisi_sas_phy_event event) 882 { 883 struct hisi_hba *hisi_hba = phy->hisi_hba; 884 885 if (WARN_ON(event >= HISI_PHYES_NUM)) 886 return false; 887 888 return queue_work(hisi_hba->wq, &phy->works[event]); 889 } 890 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 891 892 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 893 { 894 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 895 struct hisi_hba *hisi_hba = phy->hisi_hba; 896 struct device *dev = hisi_hba->dev; 897 int phy_no = phy->sas_phy.id; 898 899 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 900 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 901 } 902 903 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 904 905 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 906 { 907 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 908 struct device *dev = hisi_hba->dev; 909 910 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 911 if (phy->phy_attached) 912 return; 913 914 if (!timer_pending(&phy->timer)) { 915 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 916 phy->wait_phyup_cnt++; 917 phy->timer.expires = jiffies + 918 HISI_SAS_WAIT_PHYUP_TIMEOUT; 919 add_timer(&phy->timer); 920 } else { 921 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 922 phy_no, phy->wait_phyup_cnt); 923 phy->wait_phyup_cnt = 0; 924 } 925 } 926 } 927 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 928 929 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 930 { 931 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 932 struct asd_sas_phy *sas_phy = &phy->sas_phy; 933 int i; 934 935 phy->hisi_hba = hisi_hba; 936 phy->port = NULL; 937 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 938 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 939 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 940 sas_phy->class = SAS; 941 sas_phy->iproto = SAS_PROTOCOL_ALL; 942 sas_phy->tproto = 0; 943 sas_phy->type = PHY_TYPE_PHYSICAL; 944 sas_phy->role = PHY_ROLE_INITIATOR; 945 sas_phy->oob_mode = OOB_NOT_CONNECTED; 946 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 947 sas_phy->id = phy_no; 948 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 949 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 950 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 951 sas_phy->lldd_phy = phy; 952 953 for (i = 0; i < HISI_PHYES_NUM; i++) 954 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 955 956 spin_lock_init(&phy->lock); 957 958 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 959 } 960 961 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 962 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 963 { 964 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 965 struct asd_sas_phy *aphy = &phy->sas_phy; 966 struct sas_phy *sphy = aphy->phy; 967 unsigned long flags; 968 969 spin_lock_irqsave(&phy->lock, flags); 970 971 if (enable) { 972 /* We may have been enabled already; if so, don't touch */ 973 if (!phy->enable) 974 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 975 hisi_hba->hw->phy_start(hisi_hba, phy_no); 976 } else { 977 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 978 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 979 } 980 phy->enable = enable; 981 spin_unlock_irqrestore(&phy->lock, flags); 982 } 983 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 984 985 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 986 { 987 struct sas_ha_struct *sas_ha = sas_phy->ha; 988 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 989 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 990 struct asd_sas_port *sas_port = sas_phy->port; 991 struct hisi_sas_port *port; 992 unsigned long flags; 993 994 if (!sas_port) 995 return; 996 997 port = to_hisi_sas_port(sas_port); 998 spin_lock_irqsave(&hisi_hba->lock, flags); 999 port->port_attached = 1; 1000 port->id = phy->port_id; 1001 phy->port = port; 1002 sas_port->lldd_port = port; 1003 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1004 } 1005 1006 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1007 struct hisi_sas_slot *slot) 1008 { 1009 if (task) { 1010 unsigned long flags; 1011 struct task_status_struct *ts; 1012 1013 ts = &task->task_status; 1014 1015 ts->resp = SAS_TASK_COMPLETE; 1016 ts->stat = SAS_ABORTED_TASK; 1017 spin_lock_irqsave(&task->task_state_lock, flags); 1018 task->task_state_flags &= 1019 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1020 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1021 task->task_state_flags |= SAS_TASK_STATE_DONE; 1022 spin_unlock_irqrestore(&task->task_state_lock, flags); 1023 } 1024 1025 hisi_sas_slot_task_free(hisi_hba, task, slot); 1026 } 1027 1028 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1029 struct domain_device *device) 1030 { 1031 struct hisi_sas_slot *slot, *slot2; 1032 struct hisi_sas_device *sas_dev = device->lldd_dev; 1033 1034 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1035 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1036 } 1037 1038 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1039 { 1040 struct hisi_sas_device *sas_dev; 1041 struct domain_device *device; 1042 int i; 1043 1044 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1045 sas_dev = &hisi_hba->devices[i]; 1046 device = sas_dev->sas_device; 1047 1048 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1049 !device) 1050 continue; 1051 1052 hisi_sas_release_task(hisi_hba, device); 1053 } 1054 } 1055 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1056 1057 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1058 struct domain_device *device) 1059 { 1060 if (hisi_hba->hw->dereg_device) 1061 hisi_hba->hw->dereg_device(hisi_hba, device); 1062 } 1063 1064 static void hisi_sas_dev_gone(struct domain_device *device) 1065 { 1066 struct hisi_sas_device *sas_dev = device->lldd_dev; 1067 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1068 struct device *dev = hisi_hba->dev; 1069 int ret = 0; 1070 1071 dev_info(dev, "dev[%d:%x] is gone\n", 1072 sas_dev->device_id, sas_dev->dev_type); 1073 1074 down(&hisi_hba->sem); 1075 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 1076 hisi_sas_internal_task_abort(hisi_hba, device, 1077 HISI_SAS_INT_ABT_DEV, 0, true); 1078 1079 hisi_sas_dereg_device(hisi_hba, device); 1080 1081 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1082 device->lldd_dev = NULL; 1083 } 1084 1085 if (hisi_hba->hw->free_device) 1086 hisi_hba->hw->free_device(sas_dev); 1087 1088 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1089 if (!ret) 1090 sas_dev->dev_type = SAS_PHY_UNUSED; 1091 sas_dev->sas_device = NULL; 1092 up(&hisi_hba->sem); 1093 } 1094 1095 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 1096 { 1097 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 1098 } 1099 1100 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1101 struct sas_phy_linkrates *r) 1102 { 1103 struct sas_phy_linkrates _r; 1104 1105 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1106 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1107 enum sas_linkrate min, max; 1108 1109 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1110 return -EINVAL; 1111 1112 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1113 max = sas_phy->phy->maximum_linkrate; 1114 min = r->minimum_linkrate; 1115 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1116 max = r->maximum_linkrate; 1117 min = sas_phy->phy->minimum_linkrate; 1118 } else 1119 return -EINVAL; 1120 1121 _r.maximum_linkrate = max; 1122 _r.minimum_linkrate = min; 1123 1124 sas_phy->phy->maximum_linkrate = max; 1125 sas_phy->phy->minimum_linkrate = min; 1126 1127 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1128 msleep(100); 1129 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1130 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1131 1132 return 0; 1133 } 1134 1135 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1136 void *funcdata) 1137 { 1138 struct sas_ha_struct *sas_ha = sas_phy->ha; 1139 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1140 int phy_no = sas_phy->id; 1141 1142 switch (func) { 1143 case PHY_FUNC_HARD_RESET: 1144 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1145 break; 1146 1147 case PHY_FUNC_LINK_RESET: 1148 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1149 msleep(100); 1150 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1151 break; 1152 1153 case PHY_FUNC_DISABLE: 1154 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1155 break; 1156 1157 case PHY_FUNC_SET_LINK_RATE: 1158 return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1159 case PHY_FUNC_GET_EVENTS: 1160 if (hisi_hba->hw->get_events) { 1161 hisi_hba->hw->get_events(hisi_hba, phy_no); 1162 break; 1163 } 1164 fallthrough; 1165 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1166 default: 1167 return -EOPNOTSUPP; 1168 } 1169 return 0; 1170 } 1171 1172 static void hisi_sas_task_done(struct sas_task *task) 1173 { 1174 del_timer(&task->slow_task->timer); 1175 complete(&task->slow_task->completion); 1176 } 1177 1178 static void hisi_sas_tmf_timedout(struct timer_list *t) 1179 { 1180 struct sas_task_slow *slow = from_timer(slow, t, timer); 1181 struct sas_task *task = slow->task; 1182 unsigned long flags; 1183 bool is_completed = true; 1184 1185 spin_lock_irqsave(&task->task_state_lock, flags); 1186 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1187 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1188 is_completed = false; 1189 } 1190 spin_unlock_irqrestore(&task->task_state_lock, flags); 1191 1192 if (!is_completed) 1193 complete(&task->slow_task->completion); 1194 } 1195 1196 #define TASK_TIMEOUT (20 * HZ) 1197 #define TASK_RETRY 3 1198 #define INTERNAL_ABORT_TIMEOUT (6 * HZ) 1199 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 1200 void *parameter, u32 para_len, 1201 struct hisi_sas_tmf_task *tmf) 1202 { 1203 struct hisi_sas_device *sas_dev = device->lldd_dev; 1204 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1205 struct device *dev = hisi_hba->dev; 1206 struct sas_task *task; 1207 int res, retry; 1208 1209 for (retry = 0; retry < TASK_RETRY; retry++) { 1210 task = sas_alloc_slow_task(GFP_KERNEL); 1211 if (!task) 1212 return -ENOMEM; 1213 1214 task->dev = device; 1215 task->task_proto = device->tproto; 1216 1217 if (dev_is_sata(device)) { 1218 task->ata_task.device_control_reg_update = 1; 1219 memcpy(&task->ata_task.fis, parameter, para_len); 1220 } else { 1221 memcpy(&task->ssp_task, parameter, para_len); 1222 } 1223 task->task_done = hisi_sas_task_done; 1224 1225 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1226 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT; 1227 add_timer(&task->slow_task->timer); 1228 1229 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 1230 1231 if (res) { 1232 del_timer(&task->slow_task->timer); 1233 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1234 res); 1235 goto ex_err; 1236 } 1237 1238 wait_for_completion(&task->slow_task->completion); 1239 res = TMF_RESP_FUNC_FAILED; 1240 /* Even TMF timed out, return direct. */ 1241 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1242 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1243 struct hisi_sas_slot *slot = task->lldd_task; 1244 1245 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1246 if (slot) { 1247 struct hisi_sas_cq *cq = 1248 &hisi_hba->cq[slot->dlvry_queue]; 1249 /* 1250 * sync irq to avoid free'ing task 1251 * before using task in IO completion 1252 */ 1253 synchronize_irq(cq->irq_no); 1254 slot->task = NULL; 1255 } 1256 1257 goto ex_err; 1258 } else 1259 dev_err(dev, "abort tmf: TMF task timeout\n"); 1260 } 1261 1262 if (task->task_status.resp == SAS_TASK_COMPLETE && 1263 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1264 res = TMF_RESP_FUNC_COMPLETE; 1265 break; 1266 } 1267 1268 if (task->task_status.resp == SAS_TASK_COMPLETE && 1269 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1270 res = TMF_RESP_FUNC_SUCC; 1271 break; 1272 } 1273 1274 if (task->task_status.resp == SAS_TASK_COMPLETE && 1275 task->task_status.stat == SAS_DATA_UNDERRUN) { 1276 /* no error, but return the number of bytes of 1277 * underrun 1278 */ 1279 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n", 1280 SAS_ADDR(device->sas_addr), 1281 task->task_status.resp, 1282 task->task_status.stat); 1283 res = task->task_status.residual; 1284 break; 1285 } 1286 1287 if (task->task_status.resp == SAS_TASK_COMPLETE && 1288 task->task_status.stat == SAS_DATA_OVERRUN) { 1289 dev_warn(dev, "abort tmf: blocked task error\n"); 1290 res = -EMSGSIZE; 1291 break; 1292 } 1293 1294 if (task->task_status.resp == SAS_TASK_COMPLETE && 1295 task->task_status.stat == SAS_OPEN_REJECT) { 1296 dev_warn(dev, "abort tmf: open reject failed\n"); 1297 res = -EIO; 1298 } else { 1299 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n", 1300 SAS_ADDR(device->sas_addr), 1301 task->task_status.resp, 1302 task->task_status.stat); 1303 } 1304 sas_free_task(task); 1305 task = NULL; 1306 } 1307 ex_err: 1308 if (retry == TASK_RETRY) 1309 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 1310 sas_free_task(task); 1311 return res; 1312 } 1313 1314 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1315 bool reset, int pmp, u8 *fis) 1316 { 1317 struct ata_taskfile tf; 1318 1319 ata_tf_init(dev, &tf); 1320 if (reset) 1321 tf.ctl |= ATA_SRST; 1322 else 1323 tf.ctl &= ~ATA_SRST; 1324 tf.command = ATA_CMD_DEV_RESET; 1325 ata_tf_to_fis(&tf, pmp, 0, fis); 1326 } 1327 1328 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1329 { 1330 u8 fis[20] = {0}; 1331 struct ata_port *ap = device->sata_dev.ap; 1332 struct ata_link *link; 1333 int rc = TMF_RESP_FUNC_FAILED; 1334 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1335 struct device *dev = hisi_hba->dev; 1336 int s = sizeof(struct host_to_dev_fis); 1337 1338 ata_for_each_link(link, ap, EDGE) { 1339 int pmp = sata_srst_pmp(link); 1340 1341 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1342 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 1343 if (rc != TMF_RESP_FUNC_COMPLETE) 1344 break; 1345 } 1346 1347 if (rc == TMF_RESP_FUNC_COMPLETE) { 1348 ata_for_each_link(link, ap, EDGE) { 1349 int pmp = sata_srst_pmp(link); 1350 1351 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1352 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1353 s, NULL); 1354 if (rc != TMF_RESP_FUNC_COMPLETE) 1355 dev_err(dev, "ata disk %016llx de-reset failed\n", 1356 SAS_ADDR(device->sas_addr)); 1357 } 1358 } else { 1359 dev_err(dev, "ata disk %016llx reset failed\n", 1360 SAS_ADDR(device->sas_addr)); 1361 } 1362 1363 if (rc == TMF_RESP_FUNC_COMPLETE) 1364 hisi_sas_release_task(hisi_hba, device); 1365 1366 return rc; 1367 } 1368 1369 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1370 u8 *lun, struct hisi_sas_tmf_task *tmf) 1371 { 1372 struct sas_ssp_task ssp_task; 1373 1374 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1375 return TMF_RESP_FUNC_ESUPP; 1376 1377 memcpy(ssp_task.LUN, lun, 8); 1378 1379 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1380 sizeof(ssp_task), tmf); 1381 } 1382 1383 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1384 { 1385 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1386 int i; 1387 1388 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1389 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1390 struct domain_device *device = sas_dev->sas_device; 1391 struct asd_sas_port *sas_port; 1392 struct hisi_sas_port *port; 1393 struct hisi_sas_phy *phy = NULL; 1394 struct asd_sas_phy *sas_phy; 1395 1396 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1397 || !device || !device->port) 1398 continue; 1399 1400 sas_port = device->port; 1401 port = to_hisi_sas_port(sas_port); 1402 1403 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1404 if (state & BIT(sas_phy->id)) { 1405 phy = sas_phy->lldd_phy; 1406 break; 1407 } 1408 1409 if (phy) { 1410 port->id = phy->port_id; 1411 1412 /* Update linkrate of directly attached device. */ 1413 if (!device->parent) 1414 device->linkrate = phy->sas_phy.linkrate; 1415 1416 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1417 } else 1418 port->id = 0xff; 1419 } 1420 } 1421 1422 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1423 { 1424 struct asd_sas_port *_sas_port = NULL; 1425 int phy_no; 1426 1427 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1428 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1429 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1430 struct asd_sas_port *sas_port = sas_phy->port; 1431 bool do_port_check = _sas_port != sas_port; 1432 1433 if (!sas_phy->phy->enabled) 1434 continue; 1435 1436 /* Report PHY state change to libsas */ 1437 if (state & BIT(phy_no)) { 1438 if (do_port_check && sas_port && sas_port->port_dev) { 1439 struct domain_device *dev = sas_port->port_dev; 1440 1441 _sas_port = sas_port; 1442 1443 if (dev_is_expander(dev->dev_type)) 1444 sas_notify_port_event(sas_phy, 1445 PORTE_BROADCAST_RCVD, 1446 GFP_KERNEL); 1447 } 1448 } else { 1449 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1450 } 1451 } 1452 } 1453 1454 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1455 { 1456 struct hisi_sas_device *sas_dev; 1457 struct domain_device *device; 1458 int i; 1459 1460 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1461 sas_dev = &hisi_hba->devices[i]; 1462 device = sas_dev->sas_device; 1463 1464 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1465 continue; 1466 1467 hisi_sas_init_device(device); 1468 } 1469 } 1470 1471 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1472 struct asd_sas_port *sas_port, 1473 struct domain_device *device) 1474 { 1475 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; 1476 struct ata_port *ap = device->sata_dev.ap; 1477 struct device *dev = hisi_hba->dev; 1478 int s = sizeof(struct host_to_dev_fis); 1479 int rc = TMF_RESP_FUNC_FAILED; 1480 struct asd_sas_phy *sas_phy; 1481 struct ata_link *link; 1482 u8 fis[20] = {0}; 1483 u32 state; 1484 1485 state = hisi_hba->hw->get_phys_state(hisi_hba); 1486 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { 1487 if (!(state & BIT(sas_phy->id))) 1488 continue; 1489 1490 ata_for_each_link(link, ap, EDGE) { 1491 int pmp = sata_srst_pmp(link); 1492 1493 tmf_task.phy_id = sas_phy->id; 1494 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1495 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, 1496 &tmf_task); 1497 if (rc != TMF_RESP_FUNC_COMPLETE) { 1498 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1499 sas_phy->id, rc); 1500 break; 1501 } 1502 } 1503 } 1504 } 1505 1506 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1507 { 1508 struct device *dev = hisi_hba->dev; 1509 int port_no, rc, i; 1510 1511 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1512 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1513 struct domain_device *device = sas_dev->sas_device; 1514 1515 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1516 continue; 1517 1518 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1519 HISI_SAS_INT_ABT_DEV, 0, 1520 false); 1521 if (rc < 0) 1522 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1523 } 1524 1525 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1526 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1527 struct asd_sas_port *sas_port = &port->sas_port; 1528 struct domain_device *port_dev = sas_port->port_dev; 1529 struct domain_device *device; 1530 1531 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1532 continue; 1533 1534 /* Try to find a SATA device */ 1535 list_for_each_entry(device, &sas_port->dev_list, 1536 dev_list_node) { 1537 if (dev_is_sata(device)) { 1538 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1539 sas_port, 1540 device); 1541 break; 1542 } 1543 } 1544 } 1545 } 1546 1547 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1548 { 1549 struct Scsi_Host *shost = hisi_hba->shost; 1550 1551 down(&hisi_hba->sem); 1552 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1553 1554 scsi_block_requests(shost); 1555 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1556 1557 if (timer_pending(&hisi_hba->timer)) 1558 del_timer_sync(&hisi_hba->timer); 1559 1560 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1561 } 1562 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1563 1564 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1565 { 1566 struct Scsi_Host *shost = hisi_hba->shost; 1567 1568 /* Init and wait for PHYs to come up and all libsas event finished. */ 1569 hisi_hba->hw->phys_init(hisi_hba); 1570 msleep(1000); 1571 hisi_sas_refresh_port_id(hisi_hba); 1572 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1573 1574 if (hisi_hba->reject_stp_links_msk) 1575 hisi_sas_terminate_stp_reject(hisi_hba); 1576 hisi_sas_reset_init_all_devices(hisi_hba); 1577 up(&hisi_hba->sem); 1578 scsi_unblock_requests(shost); 1579 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1580 1581 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1582 } 1583 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1584 1585 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1586 { 1587 if (!hisi_hba->hw->soft_reset) 1588 return -1; 1589 1590 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1591 return -1; 1592 1593 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1594 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1595 1596 return 0; 1597 } 1598 1599 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1600 { 1601 struct device *dev = hisi_hba->dev; 1602 struct Scsi_Host *shost = hisi_hba->shost; 1603 int rc; 1604 1605 dev_info(dev, "controller resetting...\n"); 1606 hisi_sas_controller_reset_prepare(hisi_hba); 1607 1608 rc = hisi_hba->hw->soft_reset(hisi_hba); 1609 if (rc) { 1610 dev_warn(dev, "controller reset failed (%d)\n", rc); 1611 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1612 up(&hisi_hba->sem); 1613 scsi_unblock_requests(shost); 1614 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1615 return rc; 1616 } 1617 1618 hisi_sas_controller_reset_done(hisi_hba); 1619 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1620 dev_info(dev, "controller reset complete\n"); 1621 1622 return 0; 1623 } 1624 1625 static int hisi_sas_abort_task(struct sas_task *task) 1626 { 1627 struct scsi_lun lun; 1628 struct hisi_sas_tmf_task tmf_task; 1629 struct domain_device *device = task->dev; 1630 struct hisi_sas_device *sas_dev = device->lldd_dev; 1631 struct hisi_hba *hisi_hba; 1632 struct device *dev; 1633 int rc = TMF_RESP_FUNC_FAILED; 1634 unsigned long flags; 1635 1636 if (!sas_dev) 1637 return TMF_RESP_FUNC_FAILED; 1638 1639 hisi_hba = dev_to_hisi_hba(task->dev); 1640 dev = hisi_hba->dev; 1641 1642 spin_lock_irqsave(&task->task_state_lock, flags); 1643 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1644 struct hisi_sas_slot *slot = task->lldd_task; 1645 struct hisi_sas_cq *cq; 1646 1647 if (slot) { 1648 /* 1649 * sync irq to avoid free'ing task 1650 * before using task in IO completion 1651 */ 1652 cq = &hisi_hba->cq[slot->dlvry_queue]; 1653 synchronize_irq(cq->irq_no); 1654 } 1655 spin_unlock_irqrestore(&task->task_state_lock, flags); 1656 rc = TMF_RESP_FUNC_COMPLETE; 1657 goto out; 1658 } 1659 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1660 spin_unlock_irqrestore(&task->task_state_lock, flags); 1661 1662 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1663 struct scsi_cmnd *cmnd = task->uldd_task; 1664 struct hisi_sas_slot *slot = task->lldd_task; 1665 u16 tag = slot->idx; 1666 int rc2; 1667 1668 int_to_scsilun(cmnd->device->lun, &lun); 1669 tmf_task.tmf = TMF_ABORT_TASK; 1670 tmf_task.tag_of_task_to_be_managed = tag; 1671 1672 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1673 &tmf_task); 1674 1675 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1676 HISI_SAS_INT_ABT_CMD, tag, 1677 false); 1678 if (rc2 < 0) { 1679 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1680 return TMF_RESP_FUNC_FAILED; 1681 } 1682 1683 /* 1684 * If the TMF finds that the IO is not in the device and also 1685 * the internal abort does not succeed, then it is safe to 1686 * free the slot. 1687 * Note: if the internal abort succeeds then the slot 1688 * will have already been completed 1689 */ 1690 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1691 if (task->lldd_task) 1692 hisi_sas_do_release_task(hisi_hba, task, slot); 1693 } 1694 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1695 task->task_proto & SAS_PROTOCOL_STP) { 1696 if (task->dev->dev_type == SAS_SATA_DEV) { 1697 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1698 HISI_SAS_INT_ABT_DEV, 1699 0, false); 1700 if (rc < 0) { 1701 dev_err(dev, "abort task: internal abort failed\n"); 1702 goto out; 1703 } 1704 hisi_sas_dereg_device(hisi_hba, device); 1705 rc = hisi_sas_softreset_ata_disk(device); 1706 } 1707 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1708 /* SMP */ 1709 struct hisi_sas_slot *slot = task->lldd_task; 1710 u32 tag = slot->idx; 1711 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1712 1713 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1714 HISI_SAS_INT_ABT_CMD, tag, 1715 false); 1716 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1717 task->lldd_task) { 1718 /* 1719 * sync irq to avoid free'ing task 1720 * before using task in IO completion 1721 */ 1722 synchronize_irq(cq->irq_no); 1723 slot->task = NULL; 1724 } 1725 } 1726 1727 out: 1728 if (rc != TMF_RESP_FUNC_COMPLETE) 1729 dev_notice(dev, "abort task: rc=%d\n", rc); 1730 return rc; 1731 } 1732 1733 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1734 { 1735 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1736 struct device *dev = hisi_hba->dev; 1737 struct hisi_sas_tmf_task tmf_task; 1738 int rc; 1739 1740 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1741 HISI_SAS_INT_ABT_DEV, 0, false); 1742 if (rc < 0) { 1743 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1744 return TMF_RESP_FUNC_FAILED; 1745 } 1746 hisi_sas_dereg_device(hisi_hba, device); 1747 1748 tmf_task.tmf = TMF_ABORT_TASK_SET; 1749 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1750 1751 if (rc == TMF_RESP_FUNC_COMPLETE) 1752 hisi_sas_release_task(hisi_hba, device); 1753 1754 return rc; 1755 } 1756 1757 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1758 { 1759 struct hisi_sas_tmf_task tmf_task; 1760 int rc; 1761 1762 tmf_task.tmf = TMF_CLEAR_ACA; 1763 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1764 1765 return rc; 1766 } 1767 1768 #define I_T_NEXUS_RESET_PHYUP_TIMEOUT (2 * HZ) 1769 1770 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1771 { 1772 struct sas_phy *local_phy = sas_get_local_phy(device); 1773 struct hisi_sas_device *sas_dev = device->lldd_dev; 1774 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1775 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1776 DECLARE_COMPLETION_ONSTACK(phyreset); 1777 int rc, reset_type; 1778 1779 if (!local_phy->enabled) { 1780 sas_put_local_phy(local_phy); 1781 return -ENODEV; 1782 } 1783 1784 if (scsi_is_sas_phy_local(local_phy)) { 1785 struct asd_sas_phy *sas_phy = 1786 sas_ha->sas_phy[local_phy->number]; 1787 struct hisi_sas_phy *phy = 1788 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1789 phy->in_reset = 1; 1790 phy->reset_completion = &phyreset; 1791 } 1792 1793 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1794 !dev_is_sata(device)) ? true : false; 1795 1796 rc = sas_phy_reset(local_phy, reset_type); 1797 sas_put_local_phy(local_phy); 1798 1799 if (scsi_is_sas_phy_local(local_phy)) { 1800 struct asd_sas_phy *sas_phy = 1801 sas_ha->sas_phy[local_phy->number]; 1802 struct hisi_sas_phy *phy = 1803 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1804 int ret = wait_for_completion_timeout(&phyreset, 1805 I_T_NEXUS_RESET_PHYUP_TIMEOUT); 1806 unsigned long flags; 1807 1808 spin_lock_irqsave(&phy->lock, flags); 1809 phy->reset_completion = NULL; 1810 phy->in_reset = 0; 1811 spin_unlock_irqrestore(&phy->lock, flags); 1812 1813 /* report PHY down if timed out */ 1814 if (!ret) 1815 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1816 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1817 /* 1818 * If in init state, we rely on caller to wait for link to be 1819 * ready; otherwise, except phy reset is fail, delay. 1820 */ 1821 if (!rc) 1822 msleep(2000); 1823 } 1824 1825 return rc; 1826 } 1827 1828 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1829 { 1830 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1831 struct device *dev = hisi_hba->dev; 1832 int rc; 1833 1834 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1835 HISI_SAS_INT_ABT_DEV, 0, false); 1836 if (rc < 0) { 1837 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1838 return TMF_RESP_FUNC_FAILED; 1839 } 1840 hisi_sas_dereg_device(hisi_hba, device); 1841 1842 if (dev_is_sata(device)) { 1843 rc = hisi_sas_softreset_ata_disk(device); 1844 if (rc == TMF_RESP_FUNC_FAILED) 1845 return TMF_RESP_FUNC_FAILED; 1846 } 1847 1848 rc = hisi_sas_debug_I_T_nexus_reset(device); 1849 1850 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1851 hisi_sas_release_task(hisi_hba, device); 1852 1853 return rc; 1854 } 1855 1856 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1857 { 1858 struct hisi_sas_device *sas_dev = device->lldd_dev; 1859 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1860 struct device *dev = hisi_hba->dev; 1861 int rc = TMF_RESP_FUNC_FAILED; 1862 1863 /* Clear internal IO and then lu reset */ 1864 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1865 HISI_SAS_INT_ABT_DEV, 0, false); 1866 if (rc < 0) { 1867 dev_err(dev, "lu_reset: internal abort failed\n"); 1868 goto out; 1869 } 1870 hisi_sas_dereg_device(hisi_hba, device); 1871 1872 if (dev_is_sata(device)) { 1873 struct sas_phy *phy; 1874 1875 phy = sas_get_local_phy(device); 1876 1877 rc = sas_phy_reset(phy, true); 1878 1879 if (rc == 0) 1880 hisi_sas_release_task(hisi_hba, device); 1881 sas_put_local_phy(phy); 1882 } else { 1883 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1884 1885 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1886 if (rc == TMF_RESP_FUNC_COMPLETE) 1887 hisi_sas_release_task(hisi_hba, device); 1888 } 1889 out: 1890 if (rc != TMF_RESP_FUNC_COMPLETE) 1891 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1892 sas_dev->device_id, rc); 1893 return rc; 1894 } 1895 1896 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1897 { 1898 struct domain_device *device = data; 1899 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1900 int rc; 1901 1902 rc = hisi_sas_debug_I_T_nexus_reset(device); 1903 if (rc != TMF_RESP_FUNC_COMPLETE) 1904 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1905 SAS_ADDR(device->sas_addr), rc); 1906 } 1907 1908 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1909 { 1910 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1911 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1912 ASYNC_DOMAIN_EXCLUSIVE(async); 1913 int i; 1914 1915 queue_work(hisi_hba->wq, &r.work); 1916 wait_for_completion(r.completion); 1917 if (!r.done) 1918 return TMF_RESP_FUNC_FAILED; 1919 1920 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1921 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1922 struct domain_device *device = sas_dev->sas_device; 1923 1924 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1925 dev_is_expander(device->dev_type)) 1926 continue; 1927 1928 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1929 device, &async); 1930 } 1931 1932 async_synchronize_full_domain(&async); 1933 hisi_sas_release_tasks(hisi_hba); 1934 1935 return TMF_RESP_FUNC_COMPLETE; 1936 } 1937 1938 static int hisi_sas_query_task(struct sas_task *task) 1939 { 1940 struct scsi_lun lun; 1941 struct hisi_sas_tmf_task tmf_task; 1942 int rc = TMF_RESP_FUNC_FAILED; 1943 1944 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1945 struct scsi_cmnd *cmnd = task->uldd_task; 1946 struct domain_device *device = task->dev; 1947 struct hisi_sas_slot *slot = task->lldd_task; 1948 u32 tag = slot->idx; 1949 1950 int_to_scsilun(cmnd->device->lun, &lun); 1951 tmf_task.tmf = TMF_QUERY_TASK; 1952 tmf_task.tag_of_task_to_be_managed = tag; 1953 1954 rc = hisi_sas_debug_issue_ssp_tmf(device, 1955 lun.scsi_lun, 1956 &tmf_task); 1957 switch (rc) { 1958 /* The task is still in Lun, release it then */ 1959 case TMF_RESP_FUNC_SUCC: 1960 /* The task is not in Lun or failed, reset the phy */ 1961 case TMF_RESP_FUNC_FAILED: 1962 case TMF_RESP_FUNC_COMPLETE: 1963 break; 1964 default: 1965 rc = TMF_RESP_FUNC_FAILED; 1966 break; 1967 } 1968 } 1969 return rc; 1970 } 1971 1972 static int 1973 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 1974 struct sas_task *task, int abort_flag, 1975 int task_tag, struct hisi_sas_dq *dq) 1976 { 1977 struct domain_device *device = task->dev; 1978 struct hisi_sas_device *sas_dev = device->lldd_dev; 1979 struct device *dev = hisi_hba->dev; 1980 struct hisi_sas_port *port; 1981 struct hisi_sas_slot *slot; 1982 struct asd_sas_port *sas_port = device->port; 1983 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1984 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1985 unsigned long flags; 1986 int wr_q_index; 1987 1988 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 1989 return -EINVAL; 1990 1991 if (!device->port) 1992 return -1; 1993 1994 port = to_hisi_sas_port(sas_port); 1995 1996 /* simply get a slot and send abort command */ 1997 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL); 1998 if (rc < 0) 1999 goto err_out; 2000 2001 slot_idx = rc; 2002 slot = &hisi_hba->slot_info[slot_idx]; 2003 2004 spin_lock(&dq->lock); 2005 wr_q_index = dq->wr_point; 2006 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 2007 list_add_tail(&slot->delivery, &dq->list); 2008 spin_unlock(&dq->lock); 2009 spin_lock(&sas_dev->lock); 2010 list_add_tail(&slot->entry, &sas_dev->list); 2011 spin_unlock(&sas_dev->lock); 2012 2013 dlvry_queue = dq->id; 2014 dlvry_queue_slot = wr_q_index; 2015 2016 slot->device_id = sas_dev->device_id; 2017 slot->n_elem = n_elem; 2018 slot->dlvry_queue = dlvry_queue; 2019 slot->dlvry_queue_slot = dlvry_queue_slot; 2020 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 2021 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 2022 slot->task = task; 2023 slot->port = port; 2024 slot->is_internal = true; 2025 task->lldd_task = slot; 2026 2027 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 2028 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 2029 memset(hisi_sas_status_buf_addr_mem(slot), 0, 2030 sizeof(struct hisi_sas_err_record)); 2031 2032 hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 2033 abort_flag, task_tag); 2034 2035 spin_lock_irqsave(&task->task_state_lock, flags); 2036 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 2037 spin_unlock_irqrestore(&task->task_state_lock, flags); 2038 WRITE_ONCE(slot->ready, 1); 2039 /* send abort command to the chip */ 2040 spin_lock(&dq->lock); 2041 hisi_hba->hw->start_delivery(dq); 2042 spin_unlock(&dq->lock); 2043 2044 return 0; 2045 2046 err_out: 2047 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 2048 2049 return rc; 2050 } 2051 2052 /** 2053 * _hisi_sas_internal_task_abort -- execute an internal 2054 * abort command for single IO command or a device 2055 * @hisi_hba: host controller struct 2056 * @device: domain device 2057 * @abort_flag: mode of operation, device or single IO 2058 * @tag: tag of IO to be aborted (only relevant to single 2059 * IO mode) 2060 * @dq: delivery queue for this internal abort command 2061 * @rst_to_recover: If rst_to_recover set, queue a controller 2062 * reset if an internal abort times out. 2063 */ 2064 static int 2065 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2066 struct domain_device *device, int abort_flag, 2067 int tag, struct hisi_sas_dq *dq, bool rst_to_recover) 2068 { 2069 struct sas_task *task; 2070 struct hisi_sas_device *sas_dev = device->lldd_dev; 2071 struct device *dev = hisi_hba->dev; 2072 int res; 2073 2074 /* 2075 * The interface is not realized means this HW don't support internal 2076 * abort, or don't need to do internal abort. Then here, we return 2077 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 2078 * the internal abort has been executed and returned CQ. 2079 */ 2080 if (!hisi_hba->hw->prep_abort) 2081 return TMF_RESP_FUNC_FAILED; 2082 2083 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 2084 return -EIO; 2085 2086 task = sas_alloc_slow_task(GFP_KERNEL); 2087 if (!task) 2088 return -ENOMEM; 2089 2090 task->dev = device; 2091 task->task_proto = device->tproto; 2092 task->task_done = hisi_sas_task_done; 2093 task->slow_task->timer.function = hisi_sas_tmf_timedout; 2094 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT; 2095 add_timer(&task->slow_task->timer); 2096 2097 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 2098 task, abort_flag, tag, dq); 2099 if (res) { 2100 del_timer(&task->slow_task->timer); 2101 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 2102 res); 2103 goto exit; 2104 } 2105 wait_for_completion(&task->slow_task->completion); 2106 res = TMF_RESP_FUNC_FAILED; 2107 2108 /* Internal abort timed out */ 2109 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 2110 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 2111 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 2112 2113 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 2114 struct hisi_sas_slot *slot = task->lldd_task; 2115 2116 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 2117 2118 if (slot) { 2119 struct hisi_sas_cq *cq = 2120 &hisi_hba->cq[slot->dlvry_queue]; 2121 /* 2122 * sync irq to avoid free'ing task 2123 * before using task in IO completion 2124 */ 2125 synchronize_irq(cq->irq_no); 2126 slot->task = NULL; 2127 } 2128 2129 if (rst_to_recover) { 2130 dev_err(dev, "internal task abort: timeout and not done. Queuing reset.\n"); 2131 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2132 } else { 2133 dev_err(dev, "internal task abort: timeout and not done.\n"); 2134 } 2135 2136 res = -EIO; 2137 goto exit; 2138 } else 2139 dev_err(dev, "internal task abort: timeout.\n"); 2140 } 2141 2142 if (task->task_status.resp == SAS_TASK_COMPLETE && 2143 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 2144 res = TMF_RESP_FUNC_COMPLETE; 2145 goto exit; 2146 } 2147 2148 if (task->task_status.resp == SAS_TASK_COMPLETE && 2149 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 2150 res = TMF_RESP_FUNC_SUCC; 2151 goto exit; 2152 } 2153 2154 exit: 2155 dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n", 2156 SAS_ADDR(device->sas_addr), task, 2157 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 2158 task->task_status.stat); 2159 sas_free_task(task); 2160 2161 return res; 2162 } 2163 2164 static int 2165 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2166 struct domain_device *device, 2167 int abort_flag, int tag, bool rst_to_recover) 2168 { 2169 struct hisi_sas_slot *slot; 2170 struct device *dev = hisi_hba->dev; 2171 struct hisi_sas_dq *dq; 2172 int i, rc; 2173 2174 switch (abort_flag) { 2175 case HISI_SAS_INT_ABT_CMD: 2176 slot = &hisi_hba->slot_info[tag]; 2177 dq = &hisi_hba->dq[slot->dlvry_queue]; 2178 return _hisi_sas_internal_task_abort(hisi_hba, device, 2179 abort_flag, tag, dq, 2180 rst_to_recover); 2181 case HISI_SAS_INT_ABT_DEV: 2182 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2183 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2184 const struct cpumask *mask = cq->irq_mask; 2185 2186 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 2187 continue; 2188 dq = &hisi_hba->dq[i]; 2189 rc = _hisi_sas_internal_task_abort(hisi_hba, device, 2190 abort_flag, tag, 2191 dq, rst_to_recover); 2192 if (rc) 2193 return rc; 2194 } 2195 break; 2196 default: 2197 dev_err(dev, "Unrecognised internal abort flag (%d)\n", 2198 abort_flag); 2199 return -EINVAL; 2200 } 2201 2202 return 0; 2203 } 2204 2205 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2206 { 2207 hisi_sas_port_notify_formed(sas_phy); 2208 } 2209 2210 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2211 u8 reg_index, u8 reg_count, u8 *write_data) 2212 { 2213 struct hisi_hba *hisi_hba = sha->lldd_ha; 2214 2215 if (!hisi_hba->hw->write_gpio) 2216 return -EOPNOTSUPP; 2217 2218 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2219 reg_index, reg_count, write_data); 2220 } 2221 2222 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2223 { 2224 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2225 struct sas_phy *sphy = sas_phy->phy; 2226 unsigned long flags; 2227 2228 phy->phy_attached = 0; 2229 phy->phy_type = 0; 2230 phy->port = NULL; 2231 2232 spin_lock_irqsave(&phy->lock, flags); 2233 if (phy->enable) 2234 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2235 else 2236 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2237 spin_unlock_irqrestore(&phy->lock, flags); 2238 } 2239 2240 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 2241 gfp_t gfp_flags) 2242 { 2243 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2244 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2245 struct device *dev = hisi_hba->dev; 2246 2247 if (rdy) { 2248 /* Phy down but ready */ 2249 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 2250 hisi_sas_port_notify_formed(sas_phy); 2251 } else { 2252 struct hisi_sas_port *port = phy->port; 2253 2254 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) || 2255 phy->in_reset) { 2256 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2257 return; 2258 } 2259 /* Phy down and not ready */ 2260 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 2261 sas_phy_disconnected(sas_phy); 2262 2263 if (port) { 2264 if (phy->phy_type & PORT_TYPE_SAS) { 2265 int port_id = port->id; 2266 2267 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2268 port_id)) 2269 port->port_attached = 0; 2270 } else if (phy->phy_type & PORT_TYPE_SATA) 2271 port->port_attached = 0; 2272 } 2273 hisi_sas_phy_disconnected(phy); 2274 } 2275 } 2276 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2277 2278 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) 2279 { 2280 int i; 2281 2282 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2283 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2284 2285 synchronize_irq(cq->irq_no); 2286 } 2287 } 2288 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); 2289 2290 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2291 { 2292 struct hisi_hba *hisi_hba = shost_priv(shost); 2293 2294 if (reset_type != SCSI_ADAPTER_RESET) 2295 return -EOPNOTSUPP; 2296 2297 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2298 2299 return 0; 2300 } 2301 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2302 2303 struct scsi_transport_template *hisi_sas_stt; 2304 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2305 2306 static struct sas_domain_function_template hisi_sas_transport_ops = { 2307 .lldd_dev_found = hisi_sas_dev_found, 2308 .lldd_dev_gone = hisi_sas_dev_gone, 2309 .lldd_execute_task = hisi_sas_queue_command, 2310 .lldd_control_phy = hisi_sas_control_phy, 2311 .lldd_abort_task = hisi_sas_abort_task, 2312 .lldd_abort_task_set = hisi_sas_abort_task_set, 2313 .lldd_clear_aca = hisi_sas_clear_aca, 2314 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2315 .lldd_lu_reset = hisi_sas_lu_reset, 2316 .lldd_query_task = hisi_sas_query_task, 2317 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2318 .lldd_port_formed = hisi_sas_port_formed, 2319 .lldd_write_gpio = hisi_sas_write_gpio, 2320 }; 2321 2322 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2323 { 2324 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2325 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2326 2327 for (i = 0; i < hisi_hba->queue_count; i++) { 2328 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2329 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2330 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2331 2332 s = sizeof(struct hisi_sas_cmd_hdr); 2333 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2334 memset(&cmd_hdr[j], 0, s); 2335 2336 dq->wr_point = 0; 2337 2338 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2339 memset(hisi_hba->complete_hdr[i], 0, s); 2340 cq->rd_point = 0; 2341 } 2342 2343 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2344 memset(hisi_hba->initial_fis, 0, s); 2345 2346 s = max_command_entries * sizeof(struct hisi_sas_iost); 2347 memset(hisi_hba->iost, 0, s); 2348 2349 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2350 memset(hisi_hba->breakpoint, 0, s); 2351 2352 s = sizeof(struct hisi_sas_sata_breakpoint); 2353 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2354 memset(&sata_breakpoint[j], 0, s); 2355 } 2356 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2357 2358 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2359 { 2360 struct device *dev = hisi_hba->dev; 2361 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2362 int max_command_entries_ru, sz_slot_buf_ru; 2363 int blk_cnt, slots_per_blk; 2364 2365 sema_init(&hisi_hba->sem, 1); 2366 spin_lock_init(&hisi_hba->lock); 2367 for (i = 0; i < hisi_hba->n_phy; i++) { 2368 hisi_sas_phy_init(hisi_hba, i); 2369 hisi_hba->port[i].port_attached = 0; 2370 hisi_hba->port[i].id = -1; 2371 } 2372 2373 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2374 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2375 hisi_hba->devices[i].device_id = i; 2376 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2377 } 2378 2379 for (i = 0; i < hisi_hba->queue_count; i++) { 2380 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2381 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2382 2383 /* Completion queue structure */ 2384 cq->id = i; 2385 cq->hisi_hba = hisi_hba; 2386 2387 /* Delivery queue structure */ 2388 spin_lock_init(&dq->lock); 2389 INIT_LIST_HEAD(&dq->list); 2390 dq->id = i; 2391 dq->hisi_hba = hisi_hba; 2392 2393 /* Delivery queue */ 2394 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2395 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2396 &hisi_hba->cmd_hdr_dma[i], 2397 GFP_KERNEL); 2398 if (!hisi_hba->cmd_hdr[i]) 2399 goto err_out; 2400 2401 /* Completion queue */ 2402 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2403 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2404 &hisi_hba->complete_hdr_dma[i], 2405 GFP_KERNEL); 2406 if (!hisi_hba->complete_hdr[i]) 2407 goto err_out; 2408 } 2409 2410 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2411 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2412 GFP_KERNEL); 2413 if (!hisi_hba->itct) 2414 goto err_out; 2415 2416 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2417 sizeof(struct hisi_sas_slot), 2418 GFP_KERNEL); 2419 if (!hisi_hba->slot_info) 2420 goto err_out; 2421 2422 /* roundup to avoid overly large block size */ 2423 max_command_entries_ru = roundup(max_command_entries, 64); 2424 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2425 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2426 else 2427 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2428 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2429 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2430 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2431 slots_per_blk = s / sz_slot_buf_ru; 2432 2433 for (i = 0; i < blk_cnt; i++) { 2434 int slot_index = i * slots_per_blk; 2435 dma_addr_t buf_dma; 2436 void *buf; 2437 2438 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2439 GFP_KERNEL); 2440 if (!buf) 2441 goto err_out; 2442 2443 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2444 struct hisi_sas_slot *slot; 2445 2446 slot = &hisi_hba->slot_info[slot_index]; 2447 slot->buf = buf; 2448 slot->buf_dma = buf_dma; 2449 slot->idx = slot_index; 2450 2451 buf += sz_slot_buf_ru; 2452 buf_dma += sz_slot_buf_ru; 2453 } 2454 } 2455 2456 s = max_command_entries * sizeof(struct hisi_sas_iost); 2457 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2458 GFP_KERNEL); 2459 if (!hisi_hba->iost) 2460 goto err_out; 2461 2462 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2463 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2464 &hisi_hba->breakpoint_dma, 2465 GFP_KERNEL); 2466 if (!hisi_hba->breakpoint) 2467 goto err_out; 2468 2469 hisi_hba->slot_index_count = max_command_entries; 2470 s = hisi_hba->slot_index_count / BITS_PER_BYTE; 2471 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 2472 if (!hisi_hba->slot_index_tags) 2473 goto err_out; 2474 2475 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2476 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2477 &hisi_hba->initial_fis_dma, 2478 GFP_KERNEL); 2479 if (!hisi_hba->initial_fis) 2480 goto err_out; 2481 2482 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2483 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2484 &hisi_hba->sata_breakpoint_dma, 2485 GFP_KERNEL); 2486 if (!hisi_hba->sata_breakpoint) 2487 goto err_out; 2488 2489 hisi_sas_slot_index_init(hisi_hba); 2490 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; 2491 2492 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2493 if (!hisi_hba->wq) { 2494 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2495 goto err_out; 2496 } 2497 2498 return 0; 2499 err_out: 2500 return -ENOMEM; 2501 } 2502 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2503 2504 void hisi_sas_free(struct hisi_hba *hisi_hba) 2505 { 2506 int i; 2507 2508 for (i = 0; i < hisi_hba->n_phy; i++) { 2509 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2510 2511 del_timer_sync(&phy->timer); 2512 } 2513 2514 if (hisi_hba->wq) 2515 destroy_workqueue(hisi_hba->wq); 2516 } 2517 EXPORT_SYMBOL_GPL(hisi_sas_free); 2518 2519 void hisi_sas_rst_work_handler(struct work_struct *work) 2520 { 2521 struct hisi_hba *hisi_hba = 2522 container_of(work, struct hisi_hba, rst_work); 2523 2524 if (hisi_sas_controller_prereset(hisi_hba)) 2525 return; 2526 2527 hisi_sas_controller_reset(hisi_hba); 2528 } 2529 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2530 2531 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2532 { 2533 struct hisi_sas_rst *rst = 2534 container_of(work, struct hisi_sas_rst, work); 2535 2536 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2537 goto rst_complete; 2538 2539 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2540 rst->done = true; 2541 rst_complete: 2542 complete(rst->completion); 2543 } 2544 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2545 2546 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2547 { 2548 struct device *dev = hisi_hba->dev; 2549 struct platform_device *pdev = hisi_hba->platform_dev; 2550 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2551 struct clk *refclk; 2552 2553 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2554 SAS_ADDR_SIZE)) { 2555 dev_err(dev, "could not get property sas-addr\n"); 2556 return -ENOENT; 2557 } 2558 2559 if (np) { 2560 /* 2561 * These properties are only required for platform device-based 2562 * controller with DT firmware. 2563 */ 2564 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2565 "hisilicon,sas-syscon"); 2566 if (IS_ERR(hisi_hba->ctrl)) { 2567 dev_err(dev, "could not get syscon\n"); 2568 return -ENOENT; 2569 } 2570 2571 if (device_property_read_u32(dev, "ctrl-reset-reg", 2572 &hisi_hba->ctrl_reset_reg)) { 2573 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2574 return -ENOENT; 2575 } 2576 2577 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2578 &hisi_hba->ctrl_reset_sts_reg)) { 2579 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2580 return -ENOENT; 2581 } 2582 2583 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2584 &hisi_hba->ctrl_clock_ena_reg)) { 2585 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2586 return -ENOENT; 2587 } 2588 } 2589 2590 refclk = devm_clk_get(dev, NULL); 2591 if (IS_ERR(refclk)) 2592 dev_dbg(dev, "no ref clk property\n"); 2593 else 2594 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2595 2596 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2597 dev_err(dev, "could not get property phy-count\n"); 2598 return -ENOENT; 2599 } 2600 2601 if (device_property_read_u32(dev, "queue-count", 2602 &hisi_hba->queue_count)) { 2603 dev_err(dev, "could not get property queue-count\n"); 2604 return -ENOENT; 2605 } 2606 2607 return 0; 2608 } 2609 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2610 2611 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2612 const struct hisi_sas_hw *hw) 2613 { 2614 struct resource *res; 2615 struct Scsi_Host *shost; 2616 struct hisi_hba *hisi_hba; 2617 struct device *dev = &pdev->dev; 2618 int error; 2619 2620 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2621 if (!shost) { 2622 dev_err(dev, "scsi host alloc failed\n"); 2623 return NULL; 2624 } 2625 hisi_hba = shost_priv(shost); 2626 2627 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2628 hisi_hba->hw = hw; 2629 hisi_hba->dev = dev; 2630 hisi_hba->platform_dev = pdev; 2631 hisi_hba->shost = shost; 2632 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2633 2634 timer_setup(&hisi_hba->timer, NULL, 0); 2635 2636 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2637 goto err_out; 2638 2639 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2640 if (error) 2641 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2642 2643 if (error) { 2644 dev_err(dev, "No usable DMA addressing method\n"); 2645 goto err_out; 2646 } 2647 2648 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2649 if (IS_ERR(hisi_hba->regs)) 2650 goto err_out; 2651 2652 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2653 if (res) { 2654 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2655 if (IS_ERR(hisi_hba->sgpio_regs)) 2656 goto err_out; 2657 } 2658 2659 if (hisi_sas_alloc(hisi_hba)) { 2660 hisi_sas_free(hisi_hba); 2661 goto err_out; 2662 } 2663 2664 return shost; 2665 err_out: 2666 scsi_host_put(shost); 2667 dev_err(dev, "shost alloc failed\n"); 2668 return NULL; 2669 } 2670 2671 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2672 { 2673 if (hisi_hba->hw->interrupt_preinit) 2674 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2675 return 0; 2676 } 2677 2678 int hisi_sas_probe(struct platform_device *pdev, 2679 const struct hisi_sas_hw *hw) 2680 { 2681 struct Scsi_Host *shost; 2682 struct hisi_hba *hisi_hba; 2683 struct device *dev = &pdev->dev; 2684 struct asd_sas_phy **arr_phy; 2685 struct asd_sas_port **arr_port; 2686 struct sas_ha_struct *sha; 2687 int rc, phy_nr, port_nr, i; 2688 2689 shost = hisi_sas_shost_alloc(pdev, hw); 2690 if (!shost) 2691 return -ENOMEM; 2692 2693 sha = SHOST_TO_SAS_HA(shost); 2694 hisi_hba = shost_priv(shost); 2695 platform_set_drvdata(pdev, sha); 2696 2697 phy_nr = port_nr = hisi_hba->n_phy; 2698 2699 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2700 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2701 if (!arr_phy || !arr_port) { 2702 rc = -ENOMEM; 2703 goto err_out_ha; 2704 } 2705 2706 sha->sas_phy = arr_phy; 2707 sha->sas_port = arr_port; 2708 sha->lldd_ha = hisi_hba; 2709 2710 shost->transportt = hisi_sas_stt; 2711 shost->max_id = HISI_SAS_MAX_DEVICES; 2712 shost->max_lun = ~0; 2713 shost->max_channel = 1; 2714 shost->max_cmd_len = 16; 2715 if (hisi_hba->hw->slot_index_alloc) { 2716 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2717 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2718 } else { 2719 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2720 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2721 } 2722 2723 sha->sas_ha_name = DRV_NAME; 2724 sha->dev = hisi_hba->dev; 2725 sha->lldd_module = THIS_MODULE; 2726 sha->sas_addr = &hisi_hba->sas_addr[0]; 2727 sha->num_phys = hisi_hba->n_phy; 2728 sha->core.shost = hisi_hba->shost; 2729 2730 for (i = 0; i < hisi_hba->n_phy; i++) { 2731 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2732 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2733 } 2734 2735 rc = hisi_sas_interrupt_preinit(hisi_hba); 2736 if (rc) 2737 goto err_out_ha; 2738 2739 rc = scsi_add_host(shost, &pdev->dev); 2740 if (rc) 2741 goto err_out_ha; 2742 2743 rc = sas_register_ha(sha); 2744 if (rc) 2745 goto err_out_register_ha; 2746 2747 rc = hisi_hba->hw->hw_init(hisi_hba); 2748 if (rc) 2749 goto err_out_hw_init; 2750 2751 scsi_scan_host(shost); 2752 2753 return 0; 2754 2755 err_out_hw_init: 2756 sas_unregister_ha(sha); 2757 err_out_register_ha: 2758 scsi_remove_host(shost); 2759 err_out_ha: 2760 hisi_sas_free(hisi_hba); 2761 scsi_host_put(shost); 2762 return rc; 2763 } 2764 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2765 2766 int hisi_sas_remove(struct platform_device *pdev) 2767 { 2768 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2769 struct hisi_hba *hisi_hba = sha->lldd_ha; 2770 struct Scsi_Host *shost = sha->core.shost; 2771 2772 if (timer_pending(&hisi_hba->timer)) 2773 del_timer(&hisi_hba->timer); 2774 2775 sas_unregister_ha(sha); 2776 sas_remove_host(sha->core.shost); 2777 2778 hisi_sas_free(hisi_hba); 2779 scsi_host_put(shost); 2780 return 0; 2781 } 2782 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2783 2784 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2785 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2786 bool hisi_sas_debugfs_enable = true; 2787 u32 hisi_sas_debugfs_dump_count = 50; 2788 #else 2789 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2790 bool hisi_sas_debugfs_enable; 2791 u32 hisi_sas_debugfs_dump_count = 1; 2792 #endif 2793 2794 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2795 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2796 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2797 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2798 2799 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2800 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2801 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2802 2803 struct dentry *hisi_sas_debugfs_dir; 2804 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2805 2806 static __init int hisi_sas_init(void) 2807 { 2808 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2809 if (!hisi_sas_stt) 2810 return -ENOMEM; 2811 2812 if (hisi_sas_debugfs_enable) { 2813 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2814 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2815 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2816 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2817 } 2818 } 2819 2820 return 0; 2821 } 2822 2823 static __exit void hisi_sas_exit(void) 2824 { 2825 sas_release_transport(hisi_sas_stt); 2826 2827 debugfs_remove(hisi_sas_debugfs_dir); 2828 } 2829 2830 module_init(hisi_sas_init); 2831 module_exit(hisi_sas_exit); 2832 2833 MODULE_LICENSE("GPL"); 2834 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2835 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2836 MODULE_ALIAS("platform:" DRV_NAME); 2837