1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 14 u8 *lun, struct hisi_sas_tmf_task *tmf); 15 static int 16 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 17 struct domain_device *device, 18 int abort_flag, int tag, bool rst_to_recover); 19 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 20 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 21 void *funcdata); 22 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 23 struct domain_device *device); 24 static void hisi_sas_dev_gone(struct domain_device *device); 25 26 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 27 { 28 switch (fis->command) { 29 case ATA_CMD_FPDMA_WRITE: 30 case ATA_CMD_FPDMA_READ: 31 case ATA_CMD_FPDMA_RECV: 32 case ATA_CMD_FPDMA_SEND: 33 case ATA_CMD_NCQ_NON_DATA: 34 return HISI_SAS_SATA_PROTOCOL_FPDMA; 35 36 case ATA_CMD_DOWNLOAD_MICRO: 37 case ATA_CMD_ID_ATA: 38 case ATA_CMD_PMP_READ: 39 case ATA_CMD_READ_LOG_EXT: 40 case ATA_CMD_PIO_READ: 41 case ATA_CMD_PIO_READ_EXT: 42 case ATA_CMD_PMP_WRITE: 43 case ATA_CMD_WRITE_LOG_EXT: 44 case ATA_CMD_PIO_WRITE: 45 case ATA_CMD_PIO_WRITE_EXT: 46 return HISI_SAS_SATA_PROTOCOL_PIO; 47 48 case ATA_CMD_DSM: 49 case ATA_CMD_DOWNLOAD_MICRO_DMA: 50 case ATA_CMD_PMP_READ_DMA: 51 case ATA_CMD_PMP_WRITE_DMA: 52 case ATA_CMD_READ: 53 case ATA_CMD_READ_EXT: 54 case ATA_CMD_READ_LOG_DMA_EXT: 55 case ATA_CMD_READ_STREAM_DMA_EXT: 56 case ATA_CMD_TRUSTED_RCV_DMA: 57 case ATA_CMD_TRUSTED_SND_DMA: 58 case ATA_CMD_WRITE: 59 case ATA_CMD_WRITE_EXT: 60 case ATA_CMD_WRITE_FUA_EXT: 61 case ATA_CMD_WRITE_QUEUED: 62 case ATA_CMD_WRITE_LOG_DMA_EXT: 63 case ATA_CMD_WRITE_STREAM_DMA_EXT: 64 case ATA_CMD_ZAC_MGMT_IN: 65 return HISI_SAS_SATA_PROTOCOL_DMA; 66 67 case ATA_CMD_CHK_POWER: 68 case ATA_CMD_DEV_RESET: 69 case ATA_CMD_EDD: 70 case ATA_CMD_FLUSH: 71 case ATA_CMD_FLUSH_EXT: 72 case ATA_CMD_VERIFY: 73 case ATA_CMD_VERIFY_EXT: 74 case ATA_CMD_SET_FEATURES: 75 case ATA_CMD_STANDBY: 76 case ATA_CMD_STANDBYNOW1: 77 case ATA_CMD_ZAC_MGMT_OUT: 78 return HISI_SAS_SATA_PROTOCOL_NONDATA; 79 80 case ATA_CMD_SET_MAX: 81 switch (fis->features) { 82 case ATA_SET_MAX_PASSWD: 83 case ATA_SET_MAX_LOCK: 84 return HISI_SAS_SATA_PROTOCOL_PIO; 85 86 case ATA_SET_MAX_PASSWD_DMA: 87 case ATA_SET_MAX_UNLOCK_DMA: 88 return HISI_SAS_SATA_PROTOCOL_DMA; 89 90 default: 91 return HISI_SAS_SATA_PROTOCOL_NONDATA; 92 } 93 94 default: 95 { 96 if (direction == DMA_NONE) 97 return HISI_SAS_SATA_PROTOCOL_NONDATA; 98 return HISI_SAS_SATA_PROTOCOL_PIO; 99 } 100 } 101 } 102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 103 104 void hisi_sas_sata_done(struct sas_task *task, 105 struct hisi_sas_slot *slot) 106 { 107 struct task_status_struct *ts = &task->task_status; 108 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 109 struct hisi_sas_status_buffer *status_buf = 110 hisi_sas_status_buf_addr_mem(slot); 111 u8 *iu = &status_buf->iu[0]; 112 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 113 114 resp->frame_len = sizeof(struct dev_to_host_fis); 115 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 116 117 ts->buf_valid_size = sizeof(*resp); 118 } 119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 120 121 /* 122 * This function assumes linkrate mask fits in 8 bits, which it 123 * does for all HW versions supported. 124 */ 125 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 126 { 127 u8 rate = 0; 128 int i; 129 130 max -= SAS_LINK_RATE_1_5_GBPS; 131 for (i = 0; i <= max; i++) 132 rate |= 1 << (i * 2); 133 return rate; 134 } 135 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 136 137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 138 { 139 return device->port->ha->lldd_ha; 140 } 141 142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 143 { 144 return container_of(sas_port, struct hisi_sas_port, sas_port); 145 } 146 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 147 148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 149 { 150 int phy_no; 151 152 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 153 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 154 } 155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 156 157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 158 { 159 void *bitmap = hisi_hba->slot_index_tags; 160 161 clear_bit(slot_idx, bitmap); 162 } 163 164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 165 { 166 if (hisi_hba->hw->slot_index_alloc || 167 slot_idx >= HISI_SAS_UNRESERVED_IPTT) { 168 spin_lock(&hisi_hba->lock); 169 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 170 spin_unlock(&hisi_hba->lock); 171 } 172 } 173 174 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 175 { 176 void *bitmap = hisi_hba->slot_index_tags; 177 178 set_bit(slot_idx, bitmap); 179 } 180 181 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 182 struct scsi_cmnd *scsi_cmnd) 183 { 184 int index; 185 void *bitmap = hisi_hba->slot_index_tags; 186 187 if (scsi_cmnd) 188 return scsi_cmd_to_rq(scsi_cmnd)->tag; 189 190 spin_lock(&hisi_hba->lock); 191 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 192 hisi_hba->last_slot_index + 1); 193 if (index >= hisi_hba->slot_index_count) { 194 index = find_next_zero_bit(bitmap, 195 hisi_hba->slot_index_count, 196 HISI_SAS_UNRESERVED_IPTT); 197 if (index >= hisi_hba->slot_index_count) { 198 spin_unlock(&hisi_hba->lock); 199 return -SAS_QUEUE_FULL; 200 } 201 } 202 hisi_sas_slot_index_set(hisi_hba, index); 203 hisi_hba->last_slot_index = index; 204 spin_unlock(&hisi_hba->lock); 205 206 return index; 207 } 208 209 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 210 { 211 int i; 212 213 for (i = 0; i < hisi_hba->slot_index_count; ++i) 214 hisi_sas_slot_index_clear(hisi_hba, i); 215 } 216 217 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 218 struct hisi_sas_slot *slot) 219 { 220 int device_id = slot->device_id; 221 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 222 223 if (task) { 224 struct device *dev = hisi_hba->dev; 225 226 if (!task->lldd_task) 227 return; 228 229 task->lldd_task = NULL; 230 231 if (!sas_protocol_ata(task->task_proto)) { 232 if (slot->n_elem) 233 dma_unmap_sg(dev, task->scatter, 234 task->num_scatter, 235 task->data_dir); 236 if (slot->n_elem_dif) { 237 struct sas_ssp_task *ssp_task = &task->ssp_task; 238 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 239 240 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 241 scsi_prot_sg_count(scsi_cmnd), 242 task->data_dir); 243 } 244 } 245 } 246 247 spin_lock(&sas_dev->lock); 248 list_del_init(&slot->entry); 249 spin_unlock(&sas_dev->lock); 250 251 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 252 253 hisi_sas_slot_index_free(hisi_hba, slot->idx); 254 } 255 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 256 257 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 258 struct hisi_sas_slot *slot) 259 { 260 hisi_hba->hw->prep_smp(hisi_hba, slot); 261 } 262 263 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 264 struct hisi_sas_slot *slot) 265 { 266 hisi_hba->hw->prep_ssp(hisi_hba, slot); 267 } 268 269 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 270 struct hisi_sas_slot *slot) 271 { 272 hisi_hba->hw->prep_stp(hisi_hba, slot); 273 } 274 275 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 276 struct hisi_sas_slot *slot, 277 int device_id, int abort_flag, int tag_to_abort) 278 { 279 hisi_hba->hw->prep_abort(hisi_hba, slot, 280 device_id, abort_flag, tag_to_abort); 281 } 282 283 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 284 struct sas_task *task, int n_elem, 285 int n_elem_req) 286 { 287 struct device *dev = hisi_hba->dev; 288 289 if (!sas_protocol_ata(task->task_proto)) { 290 if (task->num_scatter) { 291 if (n_elem) 292 dma_unmap_sg(dev, task->scatter, 293 task->num_scatter, 294 task->data_dir); 295 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 296 if (n_elem_req) 297 dma_unmap_sg(dev, &task->smp_task.smp_req, 298 1, DMA_TO_DEVICE); 299 } 300 } 301 } 302 303 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 304 struct sas_task *task, int *n_elem, 305 int *n_elem_req) 306 { 307 struct device *dev = hisi_hba->dev; 308 int rc; 309 310 if (sas_protocol_ata(task->task_proto)) { 311 *n_elem = task->num_scatter; 312 } else { 313 unsigned int req_len; 314 315 if (task->num_scatter) { 316 *n_elem = dma_map_sg(dev, task->scatter, 317 task->num_scatter, task->data_dir); 318 if (!*n_elem) { 319 rc = -ENOMEM; 320 goto prep_out; 321 } 322 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 323 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 324 1, DMA_TO_DEVICE); 325 if (!*n_elem_req) { 326 rc = -ENOMEM; 327 goto prep_out; 328 } 329 req_len = sg_dma_len(&task->smp_task.smp_req); 330 if (req_len & 0x3) { 331 rc = -EINVAL; 332 goto err_out_dma_unmap; 333 } 334 } 335 } 336 337 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 338 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n", 339 *n_elem); 340 rc = -EINVAL; 341 goto err_out_dma_unmap; 342 } 343 return 0; 344 345 err_out_dma_unmap: 346 /* It would be better to call dma_unmap_sg() here, but it's messy */ 347 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 348 *n_elem_req); 349 prep_out: 350 return rc; 351 } 352 353 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 354 struct sas_task *task, int n_elem_dif) 355 { 356 struct device *dev = hisi_hba->dev; 357 358 if (n_elem_dif) { 359 struct sas_ssp_task *ssp_task = &task->ssp_task; 360 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 361 362 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 363 scsi_prot_sg_count(scsi_cmnd), 364 task->data_dir); 365 } 366 } 367 368 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 369 int *n_elem_dif, struct sas_task *task) 370 { 371 struct device *dev = hisi_hba->dev; 372 struct sas_ssp_task *ssp_task; 373 struct scsi_cmnd *scsi_cmnd; 374 int rc; 375 376 if (task->num_scatter) { 377 ssp_task = &task->ssp_task; 378 scsi_cmnd = ssp_task->cmd; 379 380 if (scsi_prot_sg_count(scsi_cmnd)) { 381 *n_elem_dif = dma_map_sg(dev, 382 scsi_prot_sglist(scsi_cmnd), 383 scsi_prot_sg_count(scsi_cmnd), 384 task->data_dir); 385 386 if (!*n_elem_dif) 387 return -ENOMEM; 388 389 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 390 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 391 *n_elem_dif); 392 rc = -EINVAL; 393 goto err_out_dif_dma_unmap; 394 } 395 } 396 } 397 398 return 0; 399 400 err_out_dif_dma_unmap: 401 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 402 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 403 return rc; 404 } 405 406 static int hisi_sas_task_prep(struct sas_task *task, 407 struct hisi_sas_dq **dq_pointer, 408 bool is_tmf, struct hisi_sas_tmf_task *tmf, 409 int *pass) 410 { 411 struct domain_device *device = task->dev; 412 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 413 struct hisi_sas_device *sas_dev = device->lldd_dev; 414 struct hisi_sas_port *port; 415 struct hisi_sas_slot *slot; 416 struct hisi_sas_cmd_hdr *cmd_hdr_base; 417 struct asd_sas_port *sas_port = device->port; 418 struct device *dev = hisi_hba->dev; 419 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 420 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; 421 struct scsi_cmnd *scmd = NULL; 422 struct hisi_sas_dq *dq; 423 unsigned long flags; 424 int wr_q_index; 425 426 if (DEV_IS_GONE(sas_dev)) { 427 if (sas_dev) 428 dev_info(dev, "task prep: device %d not ready\n", 429 sas_dev->device_id); 430 else 431 dev_info(dev, "task prep: device %016llx not ready\n", 432 SAS_ADDR(device->sas_addr)); 433 434 return -ECOMM; 435 } 436 437 if (task->uldd_task) { 438 struct ata_queued_cmd *qc; 439 440 if (dev_is_sata(device)) { 441 qc = task->uldd_task; 442 scmd = qc->scsicmd; 443 } else { 444 scmd = task->uldd_task; 445 } 446 } 447 448 if (scmd) { 449 unsigned int dq_index; 450 u32 blk_tag; 451 452 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); 453 dq_index = blk_mq_unique_tag_to_hwq(blk_tag); 454 *dq_pointer = dq = &hisi_hba->dq[dq_index]; 455 } else { 456 struct Scsi_Host *shost = hisi_hba->shost; 457 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; 458 int queue = qmap->mq_map[raw_smp_processor_id()]; 459 460 *dq_pointer = dq = &hisi_hba->dq[queue]; 461 } 462 463 port = to_hisi_sas_port(sas_port); 464 if (port && !port->port_attached) { 465 dev_info(dev, "task prep: %s port%d not attach device\n", 466 (dev_is_sata(device)) ? 467 "SATA/STP" : "SAS", 468 device->port->id); 469 470 return -ECOMM; 471 } 472 473 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 474 &n_elem_req); 475 if (rc < 0) 476 goto prep_out; 477 478 if (!sas_protocol_ata(task->task_proto)) { 479 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 480 if (rc < 0) 481 goto err_out_dma_unmap; 482 } 483 484 if (hisi_hba->hw->slot_index_alloc) 485 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 486 else 487 rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); 488 489 if (rc < 0) 490 goto err_out_dif_dma_unmap; 491 492 slot_idx = rc; 493 slot = &hisi_hba->slot_info[slot_idx]; 494 495 spin_lock(&dq->lock); 496 wr_q_index = dq->wr_point; 497 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 498 list_add_tail(&slot->delivery, &dq->list); 499 spin_unlock(&dq->lock); 500 spin_lock(&sas_dev->lock); 501 list_add_tail(&slot->entry, &sas_dev->list); 502 spin_unlock(&sas_dev->lock); 503 504 dlvry_queue = dq->id; 505 dlvry_queue_slot = wr_q_index; 506 507 slot->device_id = sas_dev->device_id; 508 slot->n_elem = n_elem; 509 slot->n_elem_dif = n_elem_dif; 510 slot->dlvry_queue = dlvry_queue; 511 slot->dlvry_queue_slot = dlvry_queue_slot; 512 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 513 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 514 slot->task = task; 515 slot->port = port; 516 slot->tmf = tmf; 517 slot->is_internal = is_tmf; 518 task->lldd_task = slot; 519 520 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 521 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 522 memset(hisi_sas_status_buf_addr_mem(slot), 0, 523 sizeof(struct hisi_sas_err_record)); 524 525 switch (task->task_proto) { 526 case SAS_PROTOCOL_SMP: 527 hisi_sas_task_prep_smp(hisi_hba, slot); 528 break; 529 case SAS_PROTOCOL_SSP: 530 hisi_sas_task_prep_ssp(hisi_hba, slot); 531 break; 532 case SAS_PROTOCOL_SATA: 533 case SAS_PROTOCOL_STP: 534 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 535 hisi_sas_task_prep_ata(hisi_hba, slot); 536 break; 537 default: 538 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 539 task->task_proto); 540 break; 541 } 542 543 spin_lock_irqsave(&task->task_state_lock, flags); 544 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 545 spin_unlock_irqrestore(&task->task_state_lock, flags); 546 547 ++(*pass); 548 WRITE_ONCE(slot->ready, 1); 549 550 return 0; 551 552 err_out_dif_dma_unmap: 553 if (!sas_protocol_ata(task->task_proto)) 554 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 555 err_out_dma_unmap: 556 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 557 n_elem_req); 558 prep_out: 559 dev_err(dev, "task prep: failed[%d]!\n", rc); 560 return rc; 561 } 562 563 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 564 bool is_tmf, struct hisi_sas_tmf_task *tmf) 565 { 566 u32 rc; 567 u32 pass = 0; 568 struct hisi_hba *hisi_hba; 569 struct device *dev; 570 struct domain_device *device = task->dev; 571 struct asd_sas_port *sas_port = device->port; 572 struct hisi_sas_dq *dq = NULL; 573 574 if (!sas_port) { 575 struct task_status_struct *ts = &task->task_status; 576 577 ts->resp = SAS_TASK_UNDELIVERED; 578 ts->stat = SAS_PHY_DOWN; 579 /* 580 * libsas will use dev->port, should 581 * not call task_done for sata 582 */ 583 if (device->dev_type != SAS_SATA_DEV) 584 task->task_done(task); 585 return -ECOMM; 586 } 587 588 hisi_hba = dev_to_hisi_hba(device); 589 dev = hisi_hba->dev; 590 591 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 592 if (!gfpflags_allow_blocking(gfp_flags)) 593 return -EINVAL; 594 595 down(&hisi_hba->sem); 596 up(&hisi_hba->sem); 597 } 598 599 /* protect task_prep and start_delivery sequence */ 600 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); 601 if (rc) 602 dev_err(dev, "task exec: failed[%d]!\n", rc); 603 604 if (likely(pass)) { 605 spin_lock(&dq->lock); 606 hisi_hba->hw->start_delivery(dq); 607 spin_unlock(&dq->lock); 608 } 609 610 return rc; 611 } 612 613 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no, 614 gfp_t gfp_flags) 615 { 616 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 617 struct asd_sas_phy *sas_phy = &phy->sas_phy; 618 619 if (!phy->phy_attached) 620 return; 621 622 if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) && 623 !sas_phy->suspended) { 624 dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no); 625 return; 626 } 627 628 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 629 630 if (sas_phy->phy) { 631 struct sas_phy *sphy = sas_phy->phy; 632 633 sphy->negotiated_linkrate = sas_phy->linkrate; 634 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 635 sphy->maximum_linkrate_hw = 636 hisi_hba->hw->phy_get_max_linkrate(); 637 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 638 sphy->minimum_linkrate = phy->minimum_linkrate; 639 640 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 641 sphy->maximum_linkrate = phy->maximum_linkrate; 642 } 643 644 if (phy->phy_type & PORT_TYPE_SAS) { 645 struct sas_identify_frame *id; 646 647 id = (struct sas_identify_frame *)phy->frame_rcvd; 648 id->dev_type = phy->identify.device_type; 649 id->initiator_bits = SAS_PROTOCOL_ALL; 650 id->target_bits = phy->identify.target_port_protocols; 651 } else if (phy->phy_type & PORT_TYPE_SATA) { 652 /* Nothing */ 653 } 654 655 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 656 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 657 } 658 659 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 660 { 661 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 662 struct hisi_sas_device *sas_dev = NULL; 663 int last = hisi_hba->last_dev_id; 664 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 665 int i; 666 667 spin_lock(&hisi_hba->lock); 668 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 669 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 670 int queue = i % hisi_hba->queue_count; 671 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 672 673 hisi_hba->devices[i].device_id = i; 674 sas_dev = &hisi_hba->devices[i]; 675 sas_dev->dev_status = HISI_SAS_DEV_INIT; 676 sas_dev->dev_type = device->dev_type; 677 sas_dev->hisi_hba = hisi_hba; 678 sas_dev->sas_device = device; 679 sas_dev->dq = dq; 680 spin_lock_init(&sas_dev->lock); 681 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 682 break; 683 } 684 i++; 685 } 686 hisi_hba->last_dev_id = i; 687 spin_unlock(&hisi_hba->lock); 688 689 return sas_dev; 690 } 691 692 #define HISI_SAS_DISK_RECOVER_CNT 3 693 static int hisi_sas_init_device(struct domain_device *device) 694 { 695 int rc = TMF_RESP_FUNC_COMPLETE; 696 struct scsi_lun lun; 697 struct hisi_sas_tmf_task tmf_task; 698 int retry = HISI_SAS_DISK_RECOVER_CNT; 699 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 700 struct device *dev = hisi_hba->dev; 701 struct sas_phy *local_phy; 702 703 switch (device->dev_type) { 704 case SAS_END_DEVICE: 705 int_to_scsilun(0, &lun); 706 707 tmf_task.tmf = TMF_CLEAR_TASK_SET; 708 while (retry-- > 0) { 709 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 710 &tmf_task); 711 if (rc == TMF_RESP_FUNC_COMPLETE) { 712 hisi_sas_release_task(hisi_hba, device); 713 break; 714 } 715 } 716 break; 717 case SAS_SATA_DEV: 718 case SAS_SATA_PM: 719 case SAS_SATA_PM_PORT: 720 case SAS_SATA_PENDING: 721 /* 722 * send HARD RESET to clear previous affiliation of 723 * STP target port 724 */ 725 local_phy = sas_get_local_phy(device); 726 if (!scsi_is_sas_phy_local(local_phy) && 727 !test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 728 unsigned long deadline = ata_deadline(jiffies, 20000); 729 struct sata_device *sata_dev = &device->sata_dev; 730 struct ata_host *ata_host = sata_dev->ata_host; 731 struct ata_port_operations *ops = ata_host->ops; 732 struct ata_port *ap = sata_dev->ap; 733 struct ata_link *link; 734 unsigned int classes; 735 736 ata_for_each_link(link, ap, EDGE) 737 rc = ops->hardreset(link, &classes, 738 deadline); 739 } 740 sas_put_local_phy(local_phy); 741 if (rc) { 742 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc); 743 return rc; 744 } 745 746 while (retry-- > 0) { 747 rc = hisi_sas_softreset_ata_disk(device); 748 if (!rc) 749 break; 750 } 751 break; 752 default: 753 break; 754 } 755 756 return rc; 757 } 758 759 int hisi_sas_slave_alloc(struct scsi_device *sdev) 760 { 761 struct domain_device *ddev; 762 int rc; 763 764 rc = sas_slave_alloc(sdev); 765 if (rc) 766 return rc; 767 ddev = sdev_to_domain_dev(sdev); 768 769 return hisi_sas_init_device(ddev); 770 } 771 EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc); 772 773 static int hisi_sas_dev_found(struct domain_device *device) 774 { 775 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 776 struct domain_device *parent_dev = device->parent; 777 struct hisi_sas_device *sas_dev; 778 struct device *dev = hisi_hba->dev; 779 int rc; 780 781 if (hisi_hba->hw->alloc_dev) 782 sas_dev = hisi_hba->hw->alloc_dev(device); 783 else 784 sas_dev = hisi_sas_alloc_dev(device); 785 if (!sas_dev) { 786 dev_err(dev, "fail alloc dev: max support %d devices\n", 787 HISI_SAS_MAX_DEVICES); 788 return -EINVAL; 789 } 790 791 device->lldd_dev = sas_dev; 792 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 793 794 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 795 int phy_no; 796 u8 phy_num = parent_dev->ex_dev.num_phys; 797 struct ex_phy *phy; 798 799 for (phy_no = 0; phy_no < phy_num; phy_no++) { 800 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 801 if (SAS_ADDR(phy->attached_sas_addr) == 802 SAS_ADDR(device->sas_addr)) 803 break; 804 } 805 806 if (phy_no == phy_num) { 807 dev_info(dev, "dev found: no attached " 808 "dev:%016llx at ex:%016llx\n", 809 SAS_ADDR(device->sas_addr), 810 SAS_ADDR(parent_dev->sas_addr)); 811 rc = -EINVAL; 812 goto err_out; 813 } 814 } 815 816 dev_info(dev, "dev[%d:%x] found\n", 817 sas_dev->device_id, sas_dev->dev_type); 818 819 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 820 return 0; 821 822 err_out: 823 hisi_sas_dev_gone(device); 824 return rc; 825 } 826 827 int hisi_sas_slave_configure(struct scsi_device *sdev) 828 { 829 struct domain_device *dev = sdev_to_domain_dev(sdev); 830 int ret = sas_slave_configure(sdev); 831 832 if (ret) 833 return ret; 834 if (!dev_is_sata(dev)) 835 sas_change_queue_depth(sdev, 64); 836 837 return 0; 838 } 839 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 840 841 void hisi_sas_scan_start(struct Scsi_Host *shost) 842 { 843 struct hisi_hba *hisi_hba = shost_priv(shost); 844 845 hisi_hba->hw->phys_init(hisi_hba); 846 } 847 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 848 849 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 850 { 851 struct hisi_hba *hisi_hba = shost_priv(shost); 852 struct sas_ha_struct *sha = &hisi_hba->sha; 853 854 /* Wait for PHY up interrupt to occur */ 855 if (time < HZ) 856 return 0; 857 858 sas_drain_work(sha); 859 return 1; 860 } 861 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 862 863 static void hisi_sas_phyup_work(struct work_struct *work) 864 { 865 struct hisi_sas_phy *phy = 866 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); 867 struct hisi_hba *hisi_hba = phy->hisi_hba; 868 struct asd_sas_phy *sas_phy = &phy->sas_phy; 869 int phy_no = sas_phy->id; 870 871 phy->wait_phyup_cnt = 0; 872 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 873 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 874 hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL); 875 } 876 877 static void hisi_sas_linkreset_work(struct work_struct *work) 878 { 879 struct hisi_sas_phy *phy = 880 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 881 struct asd_sas_phy *sas_phy = &phy->sas_phy; 882 883 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 884 } 885 886 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 887 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 888 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 889 }; 890 891 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 892 enum hisi_sas_phy_event event) 893 { 894 struct hisi_hba *hisi_hba = phy->hisi_hba; 895 896 if (WARN_ON(event >= HISI_PHYES_NUM)) 897 return false; 898 899 return queue_work(hisi_hba->wq, &phy->works[event]); 900 } 901 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 902 903 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 904 { 905 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 906 struct hisi_hba *hisi_hba = phy->hisi_hba; 907 struct device *dev = hisi_hba->dev; 908 int phy_no = phy->sas_phy.id; 909 910 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 911 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 912 } 913 914 #define HISI_SAS_WAIT_PHYUP_RETRIES 10 915 916 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 917 { 918 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 919 struct device *dev = hisi_hba->dev; 920 921 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 922 if (phy->phy_attached) 923 return; 924 925 if (!timer_pending(&phy->timer)) { 926 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { 927 phy->wait_phyup_cnt++; 928 phy->timer.expires = jiffies + 929 HISI_SAS_WAIT_PHYUP_TIMEOUT; 930 add_timer(&phy->timer); 931 } else { 932 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", 933 phy_no, phy->wait_phyup_cnt); 934 phy->wait_phyup_cnt = 0; 935 } 936 } 937 } 938 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 939 940 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 941 { 942 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 943 struct asd_sas_phy *sas_phy = &phy->sas_phy; 944 int i; 945 946 phy->hisi_hba = hisi_hba; 947 phy->port = NULL; 948 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 949 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 950 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 951 sas_phy->class = SAS; 952 sas_phy->iproto = SAS_PROTOCOL_ALL; 953 sas_phy->tproto = 0; 954 sas_phy->type = PHY_TYPE_PHYSICAL; 955 sas_phy->role = PHY_ROLE_INITIATOR; 956 sas_phy->oob_mode = OOB_NOT_CONNECTED; 957 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 958 sas_phy->id = phy_no; 959 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 960 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 961 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 962 sas_phy->lldd_phy = phy; 963 964 for (i = 0; i < HISI_PHYES_NUM; i++) 965 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 966 967 spin_lock_init(&phy->lock); 968 969 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 970 } 971 972 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 973 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 974 { 975 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 976 struct asd_sas_phy *aphy = &phy->sas_phy; 977 struct sas_phy *sphy = aphy->phy; 978 unsigned long flags; 979 980 spin_lock_irqsave(&phy->lock, flags); 981 982 if (enable) { 983 /* We may have been enabled already; if so, don't touch */ 984 if (!phy->enable) 985 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 986 hisi_hba->hw->phy_start(hisi_hba, phy_no); 987 } else { 988 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 989 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 990 } 991 phy->enable = enable; 992 spin_unlock_irqrestore(&phy->lock, flags); 993 } 994 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 995 996 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 997 { 998 struct sas_ha_struct *sas_ha = sas_phy->ha; 999 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1000 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1001 struct asd_sas_port *sas_port = sas_phy->port; 1002 struct hisi_sas_port *port; 1003 unsigned long flags; 1004 1005 if (!sas_port) 1006 return; 1007 1008 port = to_hisi_sas_port(sas_port); 1009 spin_lock_irqsave(&hisi_hba->lock, flags); 1010 port->port_attached = 1; 1011 port->id = phy->port_id; 1012 phy->port = port; 1013 sas_port->lldd_port = port; 1014 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1015 } 1016 1017 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1018 struct hisi_sas_slot *slot) 1019 { 1020 if (task) { 1021 unsigned long flags; 1022 struct task_status_struct *ts; 1023 1024 ts = &task->task_status; 1025 1026 ts->resp = SAS_TASK_COMPLETE; 1027 ts->stat = SAS_ABORTED_TASK; 1028 spin_lock_irqsave(&task->task_state_lock, flags); 1029 task->task_state_flags &= 1030 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1031 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1032 task->task_state_flags |= SAS_TASK_STATE_DONE; 1033 spin_unlock_irqrestore(&task->task_state_lock, flags); 1034 } 1035 1036 hisi_sas_slot_task_free(hisi_hba, task, slot); 1037 } 1038 1039 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1040 struct domain_device *device) 1041 { 1042 struct hisi_sas_slot *slot, *slot2; 1043 struct hisi_sas_device *sas_dev = device->lldd_dev; 1044 1045 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1046 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1047 } 1048 1049 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1050 { 1051 struct hisi_sas_device *sas_dev; 1052 struct domain_device *device; 1053 int i; 1054 1055 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1056 sas_dev = &hisi_hba->devices[i]; 1057 device = sas_dev->sas_device; 1058 1059 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1060 !device) 1061 continue; 1062 1063 hisi_sas_release_task(hisi_hba, device); 1064 } 1065 } 1066 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1067 1068 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1069 struct domain_device *device) 1070 { 1071 if (hisi_hba->hw->dereg_device) 1072 hisi_hba->hw->dereg_device(hisi_hba, device); 1073 } 1074 1075 static void hisi_sas_dev_gone(struct domain_device *device) 1076 { 1077 struct hisi_sas_device *sas_dev = device->lldd_dev; 1078 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1079 struct device *dev = hisi_hba->dev; 1080 int ret = 0; 1081 1082 dev_info(dev, "dev[%d:%x] is gone\n", 1083 sas_dev->device_id, sas_dev->dev_type); 1084 1085 down(&hisi_hba->sem); 1086 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { 1087 hisi_sas_internal_task_abort(hisi_hba, device, 1088 HISI_SAS_INT_ABT_DEV, 0, true); 1089 1090 hisi_sas_dereg_device(hisi_hba, device); 1091 1092 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1093 device->lldd_dev = NULL; 1094 } 1095 1096 if (hisi_hba->hw->free_device) 1097 hisi_hba->hw->free_device(sas_dev); 1098 1099 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1100 if (!ret) 1101 sas_dev->dev_type = SAS_PHY_UNUSED; 1102 sas_dev->sas_device = NULL; 1103 up(&hisi_hba->sem); 1104 } 1105 1106 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 1107 { 1108 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 1109 } 1110 1111 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1112 struct sas_phy_linkrates *r) 1113 { 1114 struct sas_phy_linkrates _r; 1115 1116 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1117 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1118 enum sas_linkrate min, max; 1119 1120 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1121 return -EINVAL; 1122 1123 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1124 max = sas_phy->phy->maximum_linkrate; 1125 min = r->minimum_linkrate; 1126 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1127 max = r->maximum_linkrate; 1128 min = sas_phy->phy->minimum_linkrate; 1129 } else 1130 return -EINVAL; 1131 1132 _r.maximum_linkrate = max; 1133 _r.minimum_linkrate = min; 1134 1135 sas_phy->phy->maximum_linkrate = max; 1136 sas_phy->phy->minimum_linkrate = min; 1137 1138 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1139 msleep(100); 1140 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1141 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1142 1143 return 0; 1144 } 1145 1146 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1147 void *funcdata) 1148 { 1149 struct hisi_sas_phy *phy = container_of(sas_phy, 1150 struct hisi_sas_phy, sas_phy); 1151 struct sas_ha_struct *sas_ha = sas_phy->ha; 1152 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1153 struct device *dev = hisi_hba->dev; 1154 DECLARE_COMPLETION_ONSTACK(completion); 1155 int phy_no = sas_phy->id; 1156 u8 sts = phy->phy_attached; 1157 int ret = 0; 1158 1159 phy->reset_completion = &completion; 1160 1161 switch (func) { 1162 case PHY_FUNC_HARD_RESET: 1163 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1164 break; 1165 1166 case PHY_FUNC_LINK_RESET: 1167 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1168 msleep(100); 1169 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1170 break; 1171 1172 case PHY_FUNC_DISABLE: 1173 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1174 goto out; 1175 1176 case PHY_FUNC_SET_LINK_RATE: 1177 ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1178 break; 1179 1180 case PHY_FUNC_GET_EVENTS: 1181 if (hisi_hba->hw->get_events) { 1182 hisi_hba->hw->get_events(hisi_hba, phy_no); 1183 goto out; 1184 } 1185 fallthrough; 1186 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1187 default: 1188 ret = -EOPNOTSUPP; 1189 goto out; 1190 } 1191 1192 if (sts && !wait_for_completion_timeout(&completion, 2 * HZ)) { 1193 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", 1194 phy_no, func); 1195 if (phy->in_reset) 1196 ret = -ETIMEDOUT; 1197 } 1198 1199 out: 1200 phy->reset_completion = NULL; 1201 1202 return ret; 1203 } 1204 1205 static void hisi_sas_task_done(struct sas_task *task) 1206 { 1207 del_timer_sync(&task->slow_task->timer); 1208 complete(&task->slow_task->completion); 1209 } 1210 1211 static void hisi_sas_tmf_timedout(struct timer_list *t) 1212 { 1213 struct sas_task_slow *slow = from_timer(slow, t, timer); 1214 struct sas_task *task = slow->task; 1215 unsigned long flags; 1216 bool is_completed = true; 1217 1218 spin_lock_irqsave(&task->task_state_lock, flags); 1219 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1220 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1221 is_completed = false; 1222 } 1223 spin_unlock_irqrestore(&task->task_state_lock, flags); 1224 1225 if (!is_completed) 1226 complete(&task->slow_task->completion); 1227 } 1228 1229 #define TASK_TIMEOUT (20 * HZ) 1230 #define TASK_RETRY 3 1231 #define INTERNAL_ABORT_TIMEOUT (6 * HZ) 1232 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 1233 void *parameter, u32 para_len, 1234 struct hisi_sas_tmf_task *tmf) 1235 { 1236 struct hisi_sas_device *sas_dev = device->lldd_dev; 1237 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1238 struct device *dev = hisi_hba->dev; 1239 struct sas_task *task; 1240 int res, retry; 1241 1242 for (retry = 0; retry < TASK_RETRY; retry++) { 1243 task = sas_alloc_slow_task(GFP_KERNEL); 1244 if (!task) 1245 return -ENOMEM; 1246 1247 task->dev = device; 1248 task->task_proto = device->tproto; 1249 1250 if (dev_is_sata(device)) { 1251 task->ata_task.device_control_reg_update = 1; 1252 memcpy(&task->ata_task.fis, parameter, para_len); 1253 } else { 1254 memcpy(&task->ssp_task, parameter, para_len); 1255 } 1256 task->task_done = hisi_sas_task_done; 1257 1258 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1259 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT; 1260 add_timer(&task->slow_task->timer); 1261 1262 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 1263 1264 if (res) { 1265 del_timer_sync(&task->slow_task->timer); 1266 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1267 res); 1268 goto ex_err; 1269 } 1270 1271 wait_for_completion(&task->slow_task->completion); 1272 res = TMF_RESP_FUNC_FAILED; 1273 /* Even TMF timed out, return direct. */ 1274 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1275 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1276 struct hisi_sas_slot *slot = task->lldd_task; 1277 1278 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1279 if (slot) { 1280 struct hisi_sas_cq *cq = 1281 &hisi_hba->cq[slot->dlvry_queue]; 1282 /* 1283 * sync irq to avoid free'ing task 1284 * before using task in IO completion 1285 */ 1286 synchronize_irq(cq->irq_no); 1287 slot->task = NULL; 1288 } 1289 1290 goto ex_err; 1291 } else 1292 dev_err(dev, "abort tmf: TMF task timeout\n"); 1293 } 1294 1295 if (task->task_status.resp == SAS_TASK_COMPLETE && 1296 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1297 res = TMF_RESP_FUNC_COMPLETE; 1298 break; 1299 } 1300 1301 if (task->task_status.resp == SAS_TASK_COMPLETE && 1302 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1303 res = TMF_RESP_FUNC_SUCC; 1304 break; 1305 } 1306 1307 if (task->task_status.resp == SAS_TASK_COMPLETE && 1308 task->task_status.stat == SAS_DATA_UNDERRUN) { 1309 /* no error, but return the number of bytes of 1310 * underrun 1311 */ 1312 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n", 1313 SAS_ADDR(device->sas_addr), 1314 task->task_status.resp, 1315 task->task_status.stat); 1316 res = task->task_status.residual; 1317 break; 1318 } 1319 1320 if (task->task_status.resp == SAS_TASK_COMPLETE && 1321 task->task_status.stat == SAS_DATA_OVERRUN) { 1322 dev_warn(dev, "abort tmf: blocked task error\n"); 1323 res = -EMSGSIZE; 1324 break; 1325 } 1326 1327 if (task->task_status.resp == SAS_TASK_COMPLETE && 1328 task->task_status.stat == SAS_OPEN_REJECT) { 1329 dev_warn(dev, "abort tmf: open reject failed\n"); 1330 res = -EIO; 1331 } else { 1332 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n", 1333 SAS_ADDR(device->sas_addr), 1334 task->task_status.resp, 1335 task->task_status.stat); 1336 } 1337 sas_free_task(task); 1338 task = NULL; 1339 } 1340 ex_err: 1341 if (retry == TASK_RETRY) 1342 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 1343 sas_free_task(task); 1344 return res; 1345 } 1346 1347 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1348 bool reset, int pmp, u8 *fis) 1349 { 1350 struct ata_taskfile tf; 1351 1352 ata_tf_init(dev, &tf); 1353 if (reset) 1354 tf.ctl |= ATA_SRST; 1355 else 1356 tf.ctl &= ~ATA_SRST; 1357 tf.command = ATA_CMD_DEV_RESET; 1358 ata_tf_to_fis(&tf, pmp, 0, fis); 1359 } 1360 1361 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1362 { 1363 u8 fis[20] = {0}; 1364 struct ata_port *ap = device->sata_dev.ap; 1365 struct ata_link *link; 1366 int rc = TMF_RESP_FUNC_FAILED; 1367 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1368 struct device *dev = hisi_hba->dev; 1369 int s = sizeof(struct host_to_dev_fis); 1370 1371 ata_for_each_link(link, ap, EDGE) { 1372 int pmp = sata_srst_pmp(link); 1373 1374 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1375 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 1376 if (rc != TMF_RESP_FUNC_COMPLETE) 1377 break; 1378 } 1379 1380 if (rc == TMF_RESP_FUNC_COMPLETE) { 1381 ata_for_each_link(link, ap, EDGE) { 1382 int pmp = sata_srst_pmp(link); 1383 1384 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1385 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1386 s, NULL); 1387 if (rc != TMF_RESP_FUNC_COMPLETE) 1388 dev_err(dev, "ata disk %016llx de-reset failed\n", 1389 SAS_ADDR(device->sas_addr)); 1390 } 1391 } else { 1392 dev_err(dev, "ata disk %016llx reset failed\n", 1393 SAS_ADDR(device->sas_addr)); 1394 } 1395 1396 if (rc == TMF_RESP_FUNC_COMPLETE) 1397 hisi_sas_release_task(hisi_hba, device); 1398 1399 return rc; 1400 } 1401 1402 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1403 u8 *lun, struct hisi_sas_tmf_task *tmf) 1404 { 1405 struct sas_ssp_task ssp_task; 1406 1407 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1408 return TMF_RESP_FUNC_ESUPP; 1409 1410 memcpy(ssp_task.LUN, lun, 8); 1411 1412 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1413 sizeof(ssp_task), tmf); 1414 } 1415 1416 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1417 { 1418 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1419 int i; 1420 1421 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1422 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1423 struct domain_device *device = sas_dev->sas_device; 1424 struct asd_sas_port *sas_port; 1425 struct hisi_sas_port *port; 1426 struct hisi_sas_phy *phy = NULL; 1427 struct asd_sas_phy *sas_phy; 1428 1429 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1430 || !device || !device->port) 1431 continue; 1432 1433 sas_port = device->port; 1434 port = to_hisi_sas_port(sas_port); 1435 1436 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1437 if (state & BIT(sas_phy->id)) { 1438 phy = sas_phy->lldd_phy; 1439 break; 1440 } 1441 1442 if (phy) { 1443 port->id = phy->port_id; 1444 1445 /* Update linkrate of directly attached device. */ 1446 if (!device->parent) 1447 device->linkrate = phy->sas_phy.linkrate; 1448 1449 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1450 } else 1451 port->id = 0xff; 1452 } 1453 } 1454 1455 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1456 { 1457 struct asd_sas_port *_sas_port = NULL; 1458 int phy_no; 1459 1460 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1461 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1462 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1463 struct asd_sas_port *sas_port = sas_phy->port; 1464 bool do_port_check = _sas_port != sas_port; 1465 1466 if (!sas_phy->phy->enabled) 1467 continue; 1468 1469 /* Report PHY state change to libsas */ 1470 if (state & BIT(phy_no)) { 1471 if (do_port_check && sas_port && sas_port->port_dev) { 1472 struct domain_device *dev = sas_port->port_dev; 1473 1474 _sas_port = sas_port; 1475 1476 if (dev_is_expander(dev->dev_type)) 1477 sas_notify_port_event(sas_phy, 1478 PORTE_BROADCAST_RCVD, 1479 GFP_KERNEL); 1480 } 1481 } else { 1482 hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); 1483 } 1484 } 1485 } 1486 1487 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1488 { 1489 struct hisi_sas_device *sas_dev; 1490 struct domain_device *device; 1491 int i; 1492 1493 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1494 sas_dev = &hisi_hba->devices[i]; 1495 device = sas_dev->sas_device; 1496 1497 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1498 continue; 1499 1500 hisi_sas_init_device(device); 1501 } 1502 } 1503 1504 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1505 struct asd_sas_port *sas_port, 1506 struct domain_device *device) 1507 { 1508 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; 1509 struct ata_port *ap = device->sata_dev.ap; 1510 struct device *dev = hisi_hba->dev; 1511 int s = sizeof(struct host_to_dev_fis); 1512 int rc = TMF_RESP_FUNC_FAILED; 1513 struct asd_sas_phy *sas_phy; 1514 struct ata_link *link; 1515 u8 fis[20] = {0}; 1516 u32 state; 1517 1518 state = hisi_hba->hw->get_phys_state(hisi_hba); 1519 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { 1520 if (!(state & BIT(sas_phy->id))) 1521 continue; 1522 1523 ata_for_each_link(link, ap, EDGE) { 1524 int pmp = sata_srst_pmp(link); 1525 1526 tmf_task.phy_id = sas_phy->id; 1527 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1528 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, 1529 &tmf_task); 1530 if (rc != TMF_RESP_FUNC_COMPLETE) { 1531 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1532 sas_phy->id, rc); 1533 break; 1534 } 1535 } 1536 } 1537 } 1538 1539 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1540 { 1541 struct device *dev = hisi_hba->dev; 1542 int port_no, rc, i; 1543 1544 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1545 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1546 struct domain_device *device = sas_dev->sas_device; 1547 1548 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1549 continue; 1550 1551 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1552 HISI_SAS_INT_ABT_DEV, 0, 1553 false); 1554 if (rc < 0) 1555 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1556 } 1557 1558 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1559 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1560 struct asd_sas_port *sas_port = &port->sas_port; 1561 struct domain_device *port_dev = sas_port->port_dev; 1562 struct domain_device *device; 1563 1564 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1565 continue; 1566 1567 /* Try to find a SATA device */ 1568 list_for_each_entry(device, &sas_port->dev_list, 1569 dev_list_node) { 1570 if (dev_is_sata(device)) { 1571 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1572 sas_port, 1573 device); 1574 break; 1575 } 1576 } 1577 } 1578 } 1579 1580 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1581 { 1582 struct Scsi_Host *shost = hisi_hba->shost; 1583 1584 down(&hisi_hba->sem); 1585 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1586 1587 scsi_block_requests(shost); 1588 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1589 1590 del_timer_sync(&hisi_hba->timer); 1591 1592 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1593 } 1594 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1595 1596 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1597 { 1598 struct Scsi_Host *shost = hisi_hba->shost; 1599 1600 /* Init and wait for PHYs to come up and all libsas event finished. */ 1601 hisi_hba->hw->phys_init(hisi_hba); 1602 msleep(1000); 1603 hisi_sas_refresh_port_id(hisi_hba); 1604 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1605 1606 if (hisi_hba->reject_stp_links_msk) 1607 hisi_sas_terminate_stp_reject(hisi_hba); 1608 hisi_sas_reset_init_all_devices(hisi_hba); 1609 up(&hisi_hba->sem); 1610 scsi_unblock_requests(shost); 1611 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1612 1613 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); 1614 } 1615 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1616 1617 static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) 1618 { 1619 if (!hisi_hba->hw->soft_reset) 1620 return -1; 1621 1622 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) 1623 return -1; 1624 1625 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1626 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); 1627 1628 return 0; 1629 } 1630 1631 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1632 { 1633 struct device *dev = hisi_hba->dev; 1634 struct Scsi_Host *shost = hisi_hba->shost; 1635 int rc; 1636 1637 dev_info(dev, "controller resetting...\n"); 1638 hisi_sas_controller_reset_prepare(hisi_hba); 1639 1640 rc = hisi_hba->hw->soft_reset(hisi_hba); 1641 if (rc) { 1642 dev_warn(dev, "controller reset failed (%d)\n", rc); 1643 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1644 up(&hisi_hba->sem); 1645 scsi_unblock_requests(shost); 1646 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); 1647 return rc; 1648 } 1649 1650 hisi_sas_controller_reset_done(hisi_hba); 1651 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 1652 dev_info(dev, "controller reset complete\n"); 1653 1654 return 0; 1655 } 1656 1657 static int hisi_sas_abort_task(struct sas_task *task) 1658 { 1659 struct scsi_lun lun; 1660 struct hisi_sas_tmf_task tmf_task; 1661 struct domain_device *device = task->dev; 1662 struct hisi_sas_device *sas_dev = device->lldd_dev; 1663 struct hisi_hba *hisi_hba; 1664 struct device *dev; 1665 int rc = TMF_RESP_FUNC_FAILED; 1666 unsigned long flags; 1667 1668 if (!sas_dev) 1669 return TMF_RESP_FUNC_FAILED; 1670 1671 hisi_hba = dev_to_hisi_hba(task->dev); 1672 dev = hisi_hba->dev; 1673 1674 spin_lock_irqsave(&task->task_state_lock, flags); 1675 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1676 struct hisi_sas_slot *slot = task->lldd_task; 1677 struct hisi_sas_cq *cq; 1678 1679 if (slot) { 1680 /* 1681 * sync irq to avoid free'ing task 1682 * before using task in IO completion 1683 */ 1684 cq = &hisi_hba->cq[slot->dlvry_queue]; 1685 synchronize_irq(cq->irq_no); 1686 } 1687 spin_unlock_irqrestore(&task->task_state_lock, flags); 1688 rc = TMF_RESP_FUNC_COMPLETE; 1689 goto out; 1690 } 1691 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1692 spin_unlock_irqrestore(&task->task_state_lock, flags); 1693 1694 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1695 struct scsi_cmnd *cmnd = task->uldd_task; 1696 struct hisi_sas_slot *slot = task->lldd_task; 1697 u16 tag = slot->idx; 1698 int rc2; 1699 1700 int_to_scsilun(cmnd->device->lun, &lun); 1701 tmf_task.tmf = TMF_ABORT_TASK; 1702 tmf_task.tag_of_task_to_be_managed = tag; 1703 1704 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1705 &tmf_task); 1706 1707 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1708 HISI_SAS_INT_ABT_CMD, tag, 1709 false); 1710 if (rc2 < 0) { 1711 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1712 return TMF_RESP_FUNC_FAILED; 1713 } 1714 1715 /* 1716 * If the TMF finds that the IO is not in the device and also 1717 * the internal abort does not succeed, then it is safe to 1718 * free the slot. 1719 * Note: if the internal abort succeeds then the slot 1720 * will have already been completed 1721 */ 1722 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1723 if (task->lldd_task) 1724 hisi_sas_do_release_task(hisi_hba, task, slot); 1725 } 1726 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1727 task->task_proto & SAS_PROTOCOL_STP) { 1728 if (task->dev->dev_type == SAS_SATA_DEV) { 1729 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1730 HISI_SAS_INT_ABT_DEV, 1731 0, false); 1732 if (rc < 0) { 1733 dev_err(dev, "abort task: internal abort failed\n"); 1734 goto out; 1735 } 1736 hisi_sas_dereg_device(hisi_hba, device); 1737 rc = hisi_sas_softreset_ata_disk(device); 1738 } 1739 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1740 /* SMP */ 1741 struct hisi_sas_slot *slot = task->lldd_task; 1742 u32 tag = slot->idx; 1743 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1744 1745 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1746 HISI_SAS_INT_ABT_CMD, tag, 1747 false); 1748 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1749 task->lldd_task) { 1750 /* 1751 * sync irq to avoid free'ing task 1752 * before using task in IO completion 1753 */ 1754 synchronize_irq(cq->irq_no); 1755 slot->task = NULL; 1756 } 1757 } 1758 1759 out: 1760 if (rc != TMF_RESP_FUNC_COMPLETE) 1761 dev_notice(dev, "abort task: rc=%d\n", rc); 1762 return rc; 1763 } 1764 1765 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1766 { 1767 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1768 struct device *dev = hisi_hba->dev; 1769 struct hisi_sas_tmf_task tmf_task; 1770 int rc; 1771 1772 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1773 HISI_SAS_INT_ABT_DEV, 0, false); 1774 if (rc < 0) { 1775 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1776 return TMF_RESP_FUNC_FAILED; 1777 } 1778 hisi_sas_dereg_device(hisi_hba, device); 1779 1780 tmf_task.tmf = TMF_ABORT_TASK_SET; 1781 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1782 1783 if (rc == TMF_RESP_FUNC_COMPLETE) 1784 hisi_sas_release_task(hisi_hba, device); 1785 1786 return rc; 1787 } 1788 1789 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1790 { 1791 struct hisi_sas_tmf_task tmf_task; 1792 int rc; 1793 1794 tmf_task.tmf = TMF_CLEAR_ACA; 1795 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1796 1797 return rc; 1798 } 1799 1800 #define I_T_NEXUS_RESET_PHYUP_TIMEOUT (2 * HZ) 1801 1802 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1803 { 1804 struct sas_phy *local_phy = sas_get_local_phy(device); 1805 struct hisi_sas_device *sas_dev = device->lldd_dev; 1806 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1807 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1808 int rc, reset_type; 1809 1810 if (!local_phy->enabled) { 1811 sas_put_local_phy(local_phy); 1812 return -ENODEV; 1813 } 1814 1815 if (scsi_is_sas_phy_local(local_phy)) { 1816 struct asd_sas_phy *sas_phy = 1817 sas_ha->sas_phy[local_phy->number]; 1818 struct hisi_sas_phy *phy = 1819 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1820 unsigned long flags; 1821 1822 spin_lock_irqsave(&phy->lock, flags); 1823 phy->in_reset = 1; 1824 spin_unlock_irqrestore(&phy->lock, flags); 1825 } 1826 1827 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1828 !dev_is_sata(device)) ? true : false; 1829 1830 rc = sas_phy_reset(local_phy, reset_type); 1831 sas_put_local_phy(local_phy); 1832 1833 if (scsi_is_sas_phy_local(local_phy)) { 1834 struct asd_sas_phy *sas_phy = 1835 sas_ha->sas_phy[local_phy->number]; 1836 struct hisi_sas_phy *phy = 1837 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1838 unsigned long flags; 1839 1840 spin_lock_irqsave(&phy->lock, flags); 1841 phy->in_reset = 0; 1842 spin_unlock_irqrestore(&phy->lock, flags); 1843 1844 /* report PHY down if timed out */ 1845 if (rc == -ETIMEDOUT) 1846 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); 1847 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1848 /* 1849 * If in init state, we rely on caller to wait for link to be 1850 * ready; otherwise, except phy reset is fail, delay. 1851 */ 1852 if (!rc) 1853 msleep(2000); 1854 } 1855 1856 return rc; 1857 } 1858 1859 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1860 { 1861 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1862 struct device *dev = hisi_hba->dev; 1863 int rc; 1864 1865 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1866 HISI_SAS_INT_ABT_DEV, 0, false); 1867 if (rc < 0) { 1868 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1869 return TMF_RESP_FUNC_FAILED; 1870 } 1871 hisi_sas_dereg_device(hisi_hba, device); 1872 1873 rc = hisi_sas_debug_I_T_nexus_reset(device); 1874 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { 1875 struct sas_phy *local_phy; 1876 1877 rc = hisi_sas_softreset_ata_disk(device); 1878 switch (rc) { 1879 case -ECOMM: 1880 rc = -ENODEV; 1881 break; 1882 case TMF_RESP_FUNC_FAILED: 1883 case -EMSGSIZE: 1884 case -EIO: 1885 local_phy = sas_get_local_phy(device); 1886 rc = sas_phy_enable(local_phy, 0); 1887 if (!rc) { 1888 local_phy->enabled = 0; 1889 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", 1890 SAS_ADDR(device->sas_addr), rc); 1891 rc = -ENODEV; 1892 } 1893 sas_put_local_phy(local_phy); 1894 break; 1895 default: 1896 break; 1897 } 1898 } 1899 1900 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1901 hisi_sas_release_task(hisi_hba, device); 1902 1903 return rc; 1904 } 1905 1906 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1907 { 1908 struct hisi_sas_device *sas_dev = device->lldd_dev; 1909 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1910 struct device *dev = hisi_hba->dev; 1911 int rc = TMF_RESP_FUNC_FAILED; 1912 1913 /* Clear internal IO and then lu reset */ 1914 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1915 HISI_SAS_INT_ABT_DEV, 0, false); 1916 if (rc < 0) { 1917 dev_err(dev, "lu_reset: internal abort failed\n"); 1918 goto out; 1919 } 1920 hisi_sas_dereg_device(hisi_hba, device); 1921 1922 if (dev_is_sata(device)) { 1923 struct sas_phy *phy; 1924 1925 phy = sas_get_local_phy(device); 1926 1927 rc = sas_phy_reset(phy, true); 1928 1929 if (rc == 0) 1930 hisi_sas_release_task(hisi_hba, device); 1931 sas_put_local_phy(phy); 1932 } else { 1933 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1934 1935 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1936 if (rc == TMF_RESP_FUNC_COMPLETE) 1937 hisi_sas_release_task(hisi_hba, device); 1938 } 1939 out: 1940 if (rc != TMF_RESP_FUNC_COMPLETE) 1941 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1942 sas_dev->device_id, rc); 1943 return rc; 1944 } 1945 1946 static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie) 1947 { 1948 struct domain_device *device = data; 1949 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1950 int rc; 1951 1952 rc = hisi_sas_debug_I_T_nexus_reset(device); 1953 if (rc != TMF_RESP_FUNC_COMPLETE) 1954 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", 1955 SAS_ADDR(device->sas_addr), rc); 1956 } 1957 1958 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1959 { 1960 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1961 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1962 ASYNC_DOMAIN_EXCLUSIVE(async); 1963 int i; 1964 1965 queue_work(hisi_hba->wq, &r.work); 1966 wait_for_completion(r.completion); 1967 if (!r.done) 1968 return TMF_RESP_FUNC_FAILED; 1969 1970 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1971 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1972 struct domain_device *device = sas_dev->sas_device; 1973 1974 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1975 dev_is_expander(device->dev_type)) 1976 continue; 1977 1978 async_schedule_domain(hisi_sas_async_I_T_nexus_reset, 1979 device, &async); 1980 } 1981 1982 async_synchronize_full_domain(&async); 1983 hisi_sas_release_tasks(hisi_hba); 1984 1985 return TMF_RESP_FUNC_COMPLETE; 1986 } 1987 1988 static int hisi_sas_query_task(struct sas_task *task) 1989 { 1990 struct scsi_lun lun; 1991 struct hisi_sas_tmf_task tmf_task; 1992 int rc = TMF_RESP_FUNC_FAILED; 1993 1994 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1995 struct scsi_cmnd *cmnd = task->uldd_task; 1996 struct domain_device *device = task->dev; 1997 struct hisi_sas_slot *slot = task->lldd_task; 1998 u32 tag = slot->idx; 1999 2000 int_to_scsilun(cmnd->device->lun, &lun); 2001 tmf_task.tmf = TMF_QUERY_TASK; 2002 tmf_task.tag_of_task_to_be_managed = tag; 2003 2004 rc = hisi_sas_debug_issue_ssp_tmf(device, 2005 lun.scsi_lun, 2006 &tmf_task); 2007 switch (rc) { 2008 /* The task is still in Lun, release it then */ 2009 case TMF_RESP_FUNC_SUCC: 2010 /* The task is not in Lun or failed, reset the phy */ 2011 case TMF_RESP_FUNC_FAILED: 2012 case TMF_RESP_FUNC_COMPLETE: 2013 break; 2014 default: 2015 rc = TMF_RESP_FUNC_FAILED; 2016 break; 2017 } 2018 } 2019 return rc; 2020 } 2021 2022 static int 2023 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 2024 struct sas_task *task, int abort_flag, 2025 int task_tag, struct hisi_sas_dq *dq) 2026 { 2027 struct domain_device *device = task->dev; 2028 struct hisi_sas_device *sas_dev = device->lldd_dev; 2029 struct device *dev = hisi_hba->dev; 2030 struct hisi_sas_port *port; 2031 struct hisi_sas_slot *slot; 2032 struct asd_sas_port *sas_port = device->port; 2033 struct hisi_sas_cmd_hdr *cmd_hdr_base; 2034 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 2035 unsigned long flags; 2036 int wr_q_index; 2037 2038 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 2039 return -EINVAL; 2040 2041 if (!device->port) 2042 return -1; 2043 2044 port = to_hisi_sas_port(sas_port); 2045 2046 /* simply get a slot and send abort command */ 2047 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL); 2048 if (rc < 0) 2049 goto err_out; 2050 2051 slot_idx = rc; 2052 slot = &hisi_hba->slot_info[slot_idx]; 2053 2054 spin_lock(&dq->lock); 2055 wr_q_index = dq->wr_point; 2056 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 2057 list_add_tail(&slot->delivery, &dq->list); 2058 spin_unlock(&dq->lock); 2059 spin_lock(&sas_dev->lock); 2060 list_add_tail(&slot->entry, &sas_dev->list); 2061 spin_unlock(&sas_dev->lock); 2062 2063 dlvry_queue = dq->id; 2064 dlvry_queue_slot = wr_q_index; 2065 2066 slot->device_id = sas_dev->device_id; 2067 slot->n_elem = n_elem; 2068 slot->dlvry_queue = dlvry_queue; 2069 slot->dlvry_queue_slot = dlvry_queue_slot; 2070 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 2071 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 2072 slot->task = task; 2073 slot->port = port; 2074 slot->is_internal = true; 2075 task->lldd_task = slot; 2076 2077 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 2078 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 2079 memset(hisi_sas_status_buf_addr_mem(slot), 0, 2080 sizeof(struct hisi_sas_err_record)); 2081 2082 hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 2083 abort_flag, task_tag); 2084 2085 spin_lock_irqsave(&task->task_state_lock, flags); 2086 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 2087 spin_unlock_irqrestore(&task->task_state_lock, flags); 2088 WRITE_ONCE(slot->ready, 1); 2089 /* send abort command to the chip */ 2090 spin_lock(&dq->lock); 2091 hisi_hba->hw->start_delivery(dq); 2092 spin_unlock(&dq->lock); 2093 2094 return 0; 2095 2096 err_out: 2097 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 2098 2099 return rc; 2100 } 2101 2102 /** 2103 * _hisi_sas_internal_task_abort -- execute an internal 2104 * abort command for single IO command or a device 2105 * @hisi_hba: host controller struct 2106 * @device: domain device 2107 * @abort_flag: mode of operation, device or single IO 2108 * @tag: tag of IO to be aborted (only relevant to single 2109 * IO mode) 2110 * @dq: delivery queue for this internal abort command 2111 * @rst_to_recover: If rst_to_recover set, queue a controller 2112 * reset if an internal abort times out. 2113 */ 2114 static int 2115 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2116 struct domain_device *device, int abort_flag, 2117 int tag, struct hisi_sas_dq *dq, bool rst_to_recover) 2118 { 2119 struct sas_task *task; 2120 struct hisi_sas_device *sas_dev = device->lldd_dev; 2121 struct device *dev = hisi_hba->dev; 2122 int res; 2123 2124 /* 2125 * The interface is not realized means this HW don't support internal 2126 * abort, or don't need to do internal abort. Then here, we return 2127 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 2128 * the internal abort has been executed and returned CQ. 2129 */ 2130 if (!hisi_hba->hw->prep_abort) 2131 return TMF_RESP_FUNC_FAILED; 2132 2133 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) 2134 return -EIO; 2135 2136 task = sas_alloc_slow_task(GFP_KERNEL); 2137 if (!task) 2138 return -ENOMEM; 2139 2140 task->dev = device; 2141 task->task_proto = device->tproto; 2142 task->task_done = hisi_sas_task_done; 2143 task->slow_task->timer.function = hisi_sas_tmf_timedout; 2144 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT; 2145 add_timer(&task->slow_task->timer); 2146 2147 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 2148 task, abort_flag, tag, dq); 2149 if (res) { 2150 del_timer_sync(&task->slow_task->timer); 2151 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 2152 res); 2153 goto exit; 2154 } 2155 wait_for_completion(&task->slow_task->completion); 2156 res = TMF_RESP_FUNC_FAILED; 2157 2158 /* Internal abort timed out */ 2159 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 2160 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 2161 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 2162 2163 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 2164 struct hisi_sas_slot *slot = task->lldd_task; 2165 2166 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); 2167 2168 if (slot) { 2169 struct hisi_sas_cq *cq = 2170 &hisi_hba->cq[slot->dlvry_queue]; 2171 /* 2172 * sync irq to avoid free'ing task 2173 * before using task in IO completion 2174 */ 2175 synchronize_irq(cq->irq_no); 2176 slot->task = NULL; 2177 } 2178 2179 if (rst_to_recover) { 2180 dev_err(dev, "internal task abort: timeout and not done. Queuing reset.\n"); 2181 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2182 } else { 2183 dev_err(dev, "internal task abort: timeout and not done.\n"); 2184 } 2185 2186 res = -EIO; 2187 goto exit; 2188 } else 2189 dev_err(dev, "internal task abort: timeout.\n"); 2190 } 2191 2192 if (task->task_status.resp == SAS_TASK_COMPLETE && 2193 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 2194 res = TMF_RESP_FUNC_COMPLETE; 2195 goto exit; 2196 } 2197 2198 if (task->task_status.resp == SAS_TASK_COMPLETE && 2199 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 2200 res = TMF_RESP_FUNC_SUCC; 2201 goto exit; 2202 } 2203 2204 exit: 2205 dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n", 2206 SAS_ADDR(device->sas_addr), task, 2207 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 2208 task->task_status.stat); 2209 sas_free_task(task); 2210 2211 return res; 2212 } 2213 2214 static int 2215 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2216 struct domain_device *device, 2217 int abort_flag, int tag, bool rst_to_recover) 2218 { 2219 struct hisi_sas_slot *slot; 2220 struct device *dev = hisi_hba->dev; 2221 struct hisi_sas_dq *dq; 2222 int i, rc; 2223 2224 switch (abort_flag) { 2225 case HISI_SAS_INT_ABT_CMD: 2226 slot = &hisi_hba->slot_info[tag]; 2227 dq = &hisi_hba->dq[slot->dlvry_queue]; 2228 return _hisi_sas_internal_task_abort(hisi_hba, device, 2229 abort_flag, tag, dq, 2230 rst_to_recover); 2231 case HISI_SAS_INT_ABT_DEV: 2232 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2233 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2234 const struct cpumask *mask = cq->irq_mask; 2235 2236 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 2237 continue; 2238 dq = &hisi_hba->dq[i]; 2239 rc = _hisi_sas_internal_task_abort(hisi_hba, device, 2240 abort_flag, tag, 2241 dq, rst_to_recover); 2242 if (rc) 2243 return rc; 2244 } 2245 break; 2246 default: 2247 dev_err(dev, "Unrecognised internal abort flag (%d)\n", 2248 abort_flag); 2249 return -EINVAL; 2250 } 2251 2252 return 0; 2253 } 2254 2255 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2256 { 2257 hisi_sas_port_notify_formed(sas_phy); 2258 } 2259 2260 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2261 u8 reg_index, u8 reg_count, u8 *write_data) 2262 { 2263 struct hisi_hba *hisi_hba = sha->lldd_ha; 2264 2265 if (!hisi_hba->hw->write_gpio) 2266 return -EOPNOTSUPP; 2267 2268 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2269 reg_index, reg_count, write_data); 2270 } 2271 2272 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2273 { 2274 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2275 struct sas_phy *sphy = sas_phy->phy; 2276 unsigned long flags; 2277 2278 phy->phy_attached = 0; 2279 phy->phy_type = 0; 2280 phy->port = NULL; 2281 2282 spin_lock_irqsave(&phy->lock, flags); 2283 if (phy->enable) 2284 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2285 else 2286 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2287 spin_unlock_irqrestore(&phy->lock, flags); 2288 } 2289 2290 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy, 2291 gfp_t gfp_flags) 2292 { 2293 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2294 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2295 struct device *dev = hisi_hba->dev; 2296 2297 if (rdy) { 2298 /* Phy down but ready */ 2299 hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags); 2300 hisi_sas_port_notify_formed(sas_phy); 2301 } else { 2302 struct hisi_sas_port *port = phy->port; 2303 2304 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || 2305 phy->in_reset) { 2306 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2307 return; 2308 } 2309 /* Phy down and not ready */ 2310 sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags); 2311 sas_phy_disconnected(sas_phy); 2312 2313 if (port) { 2314 if (phy->phy_type & PORT_TYPE_SAS) { 2315 int port_id = port->id; 2316 2317 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2318 port_id)) 2319 port->port_attached = 0; 2320 } else if (phy->phy_type & PORT_TYPE_SATA) 2321 port->port_attached = 0; 2322 } 2323 hisi_sas_phy_disconnected(phy); 2324 } 2325 } 2326 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2327 2328 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) 2329 { 2330 int i; 2331 2332 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2333 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2334 2335 synchronize_irq(cq->irq_no); 2336 } 2337 } 2338 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); 2339 2340 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2341 { 2342 struct hisi_hba *hisi_hba = shost_priv(shost); 2343 2344 if (reset_type != SCSI_ADAPTER_RESET) 2345 return -EOPNOTSUPP; 2346 2347 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2348 2349 return 0; 2350 } 2351 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2352 2353 struct scsi_transport_template *hisi_sas_stt; 2354 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2355 2356 static struct sas_domain_function_template hisi_sas_transport_ops = { 2357 .lldd_dev_found = hisi_sas_dev_found, 2358 .lldd_dev_gone = hisi_sas_dev_gone, 2359 .lldd_execute_task = hisi_sas_queue_command, 2360 .lldd_control_phy = hisi_sas_control_phy, 2361 .lldd_abort_task = hisi_sas_abort_task, 2362 .lldd_abort_task_set = hisi_sas_abort_task_set, 2363 .lldd_clear_aca = hisi_sas_clear_aca, 2364 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2365 .lldd_lu_reset = hisi_sas_lu_reset, 2366 .lldd_query_task = hisi_sas_query_task, 2367 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2368 .lldd_port_formed = hisi_sas_port_formed, 2369 .lldd_write_gpio = hisi_sas_write_gpio, 2370 }; 2371 2372 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2373 { 2374 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2375 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2376 2377 for (i = 0; i < hisi_hba->queue_count; i++) { 2378 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2379 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2380 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2381 2382 s = sizeof(struct hisi_sas_cmd_hdr); 2383 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2384 memset(&cmd_hdr[j], 0, s); 2385 2386 dq->wr_point = 0; 2387 2388 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2389 memset(hisi_hba->complete_hdr[i], 0, s); 2390 cq->rd_point = 0; 2391 } 2392 2393 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2394 memset(hisi_hba->initial_fis, 0, s); 2395 2396 s = max_command_entries * sizeof(struct hisi_sas_iost); 2397 memset(hisi_hba->iost, 0, s); 2398 2399 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2400 memset(hisi_hba->breakpoint, 0, s); 2401 2402 s = sizeof(struct hisi_sas_sata_breakpoint); 2403 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2404 memset(&sata_breakpoint[j], 0, s); 2405 } 2406 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2407 2408 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2409 { 2410 struct device *dev = hisi_hba->dev; 2411 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2412 int max_command_entries_ru, sz_slot_buf_ru; 2413 int blk_cnt, slots_per_blk; 2414 2415 sema_init(&hisi_hba->sem, 1); 2416 spin_lock_init(&hisi_hba->lock); 2417 for (i = 0; i < hisi_hba->n_phy; i++) { 2418 hisi_sas_phy_init(hisi_hba, i); 2419 hisi_hba->port[i].port_attached = 0; 2420 hisi_hba->port[i].id = -1; 2421 } 2422 2423 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2424 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2425 hisi_hba->devices[i].device_id = i; 2426 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2427 } 2428 2429 for (i = 0; i < hisi_hba->queue_count; i++) { 2430 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2431 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2432 2433 /* Completion queue structure */ 2434 cq->id = i; 2435 cq->hisi_hba = hisi_hba; 2436 2437 /* Delivery queue structure */ 2438 spin_lock_init(&dq->lock); 2439 INIT_LIST_HEAD(&dq->list); 2440 dq->id = i; 2441 dq->hisi_hba = hisi_hba; 2442 2443 /* Delivery queue */ 2444 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2445 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2446 &hisi_hba->cmd_hdr_dma[i], 2447 GFP_KERNEL); 2448 if (!hisi_hba->cmd_hdr[i]) 2449 goto err_out; 2450 2451 /* Completion queue */ 2452 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2453 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2454 &hisi_hba->complete_hdr_dma[i], 2455 GFP_KERNEL); 2456 if (!hisi_hba->complete_hdr[i]) 2457 goto err_out; 2458 } 2459 2460 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2461 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2462 GFP_KERNEL); 2463 if (!hisi_hba->itct) 2464 goto err_out; 2465 2466 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2467 sizeof(struct hisi_sas_slot), 2468 GFP_KERNEL); 2469 if (!hisi_hba->slot_info) 2470 goto err_out; 2471 2472 /* roundup to avoid overly large block size */ 2473 max_command_entries_ru = roundup(max_command_entries, 64); 2474 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2475 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2476 else 2477 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2478 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2479 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2480 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2481 slots_per_blk = s / sz_slot_buf_ru; 2482 2483 for (i = 0; i < blk_cnt; i++) { 2484 int slot_index = i * slots_per_blk; 2485 dma_addr_t buf_dma; 2486 void *buf; 2487 2488 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2489 GFP_KERNEL); 2490 if (!buf) 2491 goto err_out; 2492 2493 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2494 struct hisi_sas_slot *slot; 2495 2496 slot = &hisi_hba->slot_info[slot_index]; 2497 slot->buf = buf; 2498 slot->buf_dma = buf_dma; 2499 slot->idx = slot_index; 2500 2501 buf += sz_slot_buf_ru; 2502 buf_dma += sz_slot_buf_ru; 2503 } 2504 } 2505 2506 s = max_command_entries * sizeof(struct hisi_sas_iost); 2507 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2508 GFP_KERNEL); 2509 if (!hisi_hba->iost) 2510 goto err_out; 2511 2512 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2513 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2514 &hisi_hba->breakpoint_dma, 2515 GFP_KERNEL); 2516 if (!hisi_hba->breakpoint) 2517 goto err_out; 2518 2519 hisi_hba->slot_index_count = max_command_entries; 2520 s = hisi_hba->slot_index_count / BITS_PER_BYTE; 2521 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 2522 if (!hisi_hba->slot_index_tags) 2523 goto err_out; 2524 2525 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2526 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2527 &hisi_hba->initial_fis_dma, 2528 GFP_KERNEL); 2529 if (!hisi_hba->initial_fis) 2530 goto err_out; 2531 2532 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2533 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2534 &hisi_hba->sata_breakpoint_dma, 2535 GFP_KERNEL); 2536 if (!hisi_hba->sata_breakpoint) 2537 goto err_out; 2538 2539 hisi_sas_slot_index_init(hisi_hba); 2540 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; 2541 2542 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2543 if (!hisi_hba->wq) { 2544 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2545 goto err_out; 2546 } 2547 2548 return 0; 2549 err_out: 2550 return -ENOMEM; 2551 } 2552 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2553 2554 void hisi_sas_free(struct hisi_hba *hisi_hba) 2555 { 2556 int i; 2557 2558 for (i = 0; i < hisi_hba->n_phy; i++) { 2559 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2560 2561 del_timer_sync(&phy->timer); 2562 } 2563 2564 if (hisi_hba->wq) 2565 destroy_workqueue(hisi_hba->wq); 2566 } 2567 EXPORT_SYMBOL_GPL(hisi_sas_free); 2568 2569 void hisi_sas_rst_work_handler(struct work_struct *work) 2570 { 2571 struct hisi_hba *hisi_hba = 2572 container_of(work, struct hisi_hba, rst_work); 2573 2574 if (hisi_sas_controller_prereset(hisi_hba)) 2575 return; 2576 2577 hisi_sas_controller_reset(hisi_hba); 2578 } 2579 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2580 2581 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2582 { 2583 struct hisi_sas_rst *rst = 2584 container_of(work, struct hisi_sas_rst, work); 2585 2586 if (hisi_sas_controller_prereset(rst->hisi_hba)) 2587 goto rst_complete; 2588 2589 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2590 rst->done = true; 2591 rst_complete: 2592 complete(rst->completion); 2593 } 2594 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2595 2596 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2597 { 2598 struct device *dev = hisi_hba->dev; 2599 struct platform_device *pdev = hisi_hba->platform_dev; 2600 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2601 struct clk *refclk; 2602 2603 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2604 SAS_ADDR_SIZE)) { 2605 dev_err(dev, "could not get property sas-addr\n"); 2606 return -ENOENT; 2607 } 2608 2609 if (np) { 2610 /* 2611 * These properties are only required for platform device-based 2612 * controller with DT firmware. 2613 */ 2614 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2615 "hisilicon,sas-syscon"); 2616 if (IS_ERR(hisi_hba->ctrl)) { 2617 dev_err(dev, "could not get syscon\n"); 2618 return -ENOENT; 2619 } 2620 2621 if (device_property_read_u32(dev, "ctrl-reset-reg", 2622 &hisi_hba->ctrl_reset_reg)) { 2623 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2624 return -ENOENT; 2625 } 2626 2627 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2628 &hisi_hba->ctrl_reset_sts_reg)) { 2629 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2630 return -ENOENT; 2631 } 2632 2633 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2634 &hisi_hba->ctrl_clock_ena_reg)) { 2635 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2636 return -ENOENT; 2637 } 2638 } 2639 2640 refclk = devm_clk_get(dev, NULL); 2641 if (IS_ERR(refclk)) 2642 dev_dbg(dev, "no ref clk property\n"); 2643 else 2644 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2645 2646 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2647 dev_err(dev, "could not get property phy-count\n"); 2648 return -ENOENT; 2649 } 2650 2651 if (device_property_read_u32(dev, "queue-count", 2652 &hisi_hba->queue_count)) { 2653 dev_err(dev, "could not get property queue-count\n"); 2654 return -ENOENT; 2655 } 2656 2657 return 0; 2658 } 2659 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2660 2661 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2662 const struct hisi_sas_hw *hw) 2663 { 2664 struct resource *res; 2665 struct Scsi_Host *shost; 2666 struct hisi_hba *hisi_hba; 2667 struct device *dev = &pdev->dev; 2668 int error; 2669 2670 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2671 if (!shost) { 2672 dev_err(dev, "scsi host alloc failed\n"); 2673 return NULL; 2674 } 2675 hisi_hba = shost_priv(shost); 2676 2677 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2678 hisi_hba->hw = hw; 2679 hisi_hba->dev = dev; 2680 hisi_hba->platform_dev = pdev; 2681 hisi_hba->shost = shost; 2682 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2683 2684 timer_setup(&hisi_hba->timer, NULL, 0); 2685 2686 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2687 goto err_out; 2688 2689 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2690 if (error) 2691 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2692 2693 if (error) { 2694 dev_err(dev, "No usable DMA addressing method\n"); 2695 goto err_out; 2696 } 2697 2698 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2699 if (IS_ERR(hisi_hba->regs)) 2700 goto err_out; 2701 2702 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2703 if (res) { 2704 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2705 if (IS_ERR(hisi_hba->sgpio_regs)) 2706 goto err_out; 2707 } 2708 2709 if (hisi_sas_alloc(hisi_hba)) { 2710 hisi_sas_free(hisi_hba); 2711 goto err_out; 2712 } 2713 2714 return shost; 2715 err_out: 2716 scsi_host_put(shost); 2717 dev_err(dev, "shost alloc failed\n"); 2718 return NULL; 2719 } 2720 2721 static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba) 2722 { 2723 if (hisi_hba->hw->interrupt_preinit) 2724 return hisi_hba->hw->interrupt_preinit(hisi_hba); 2725 return 0; 2726 } 2727 2728 int hisi_sas_probe(struct platform_device *pdev, 2729 const struct hisi_sas_hw *hw) 2730 { 2731 struct Scsi_Host *shost; 2732 struct hisi_hba *hisi_hba; 2733 struct device *dev = &pdev->dev; 2734 struct asd_sas_phy **arr_phy; 2735 struct asd_sas_port **arr_port; 2736 struct sas_ha_struct *sha; 2737 int rc, phy_nr, port_nr, i; 2738 2739 shost = hisi_sas_shost_alloc(pdev, hw); 2740 if (!shost) 2741 return -ENOMEM; 2742 2743 sha = SHOST_TO_SAS_HA(shost); 2744 hisi_hba = shost_priv(shost); 2745 platform_set_drvdata(pdev, sha); 2746 2747 phy_nr = port_nr = hisi_hba->n_phy; 2748 2749 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2750 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2751 if (!arr_phy || !arr_port) { 2752 rc = -ENOMEM; 2753 goto err_out_ha; 2754 } 2755 2756 sha->sas_phy = arr_phy; 2757 sha->sas_port = arr_port; 2758 sha->lldd_ha = hisi_hba; 2759 2760 shost->transportt = hisi_sas_stt; 2761 shost->max_id = HISI_SAS_MAX_DEVICES; 2762 shost->max_lun = ~0; 2763 shost->max_channel = 1; 2764 shost->max_cmd_len = 16; 2765 if (hisi_hba->hw->slot_index_alloc) { 2766 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2767 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2768 } else { 2769 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2770 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2771 } 2772 2773 sha->sas_ha_name = DRV_NAME; 2774 sha->dev = hisi_hba->dev; 2775 sha->lldd_module = THIS_MODULE; 2776 sha->sas_addr = &hisi_hba->sas_addr[0]; 2777 sha->num_phys = hisi_hba->n_phy; 2778 sha->core.shost = hisi_hba->shost; 2779 2780 for (i = 0; i < hisi_hba->n_phy; i++) { 2781 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2782 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2783 } 2784 2785 rc = hisi_sas_interrupt_preinit(hisi_hba); 2786 if (rc) 2787 goto err_out_ha; 2788 2789 rc = scsi_add_host(shost, &pdev->dev); 2790 if (rc) 2791 goto err_out_ha; 2792 2793 rc = sas_register_ha(sha); 2794 if (rc) 2795 goto err_out_register_ha; 2796 2797 rc = hisi_hba->hw->hw_init(hisi_hba); 2798 if (rc) 2799 goto err_out_hw_init; 2800 2801 scsi_scan_host(shost); 2802 2803 return 0; 2804 2805 err_out_hw_init: 2806 sas_unregister_ha(sha); 2807 err_out_register_ha: 2808 scsi_remove_host(shost); 2809 err_out_ha: 2810 hisi_sas_free(hisi_hba); 2811 scsi_host_put(shost); 2812 return rc; 2813 } 2814 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2815 2816 int hisi_sas_remove(struct platform_device *pdev) 2817 { 2818 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2819 struct hisi_hba *hisi_hba = sha->lldd_ha; 2820 struct Scsi_Host *shost = sha->core.shost; 2821 2822 del_timer_sync(&hisi_hba->timer); 2823 2824 sas_unregister_ha(sha); 2825 sas_remove_host(sha->core.shost); 2826 2827 hisi_sas_free(hisi_hba); 2828 scsi_host_put(shost); 2829 return 0; 2830 } 2831 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2832 2833 #if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE) 2834 #define DEBUGFS_ENABLE_DEFAULT "enabled" 2835 bool hisi_sas_debugfs_enable = true; 2836 u32 hisi_sas_debugfs_dump_count = 50; 2837 #else 2838 #define DEBUGFS_ENABLE_DEFAULT "disabled" 2839 bool hisi_sas_debugfs_enable; 2840 u32 hisi_sas_debugfs_dump_count = 1; 2841 #endif 2842 2843 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 2844 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 2845 MODULE_PARM_DESC(hisi_sas_debugfs_enable, 2846 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")"); 2847 2848 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 2849 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 2850 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 2851 2852 struct dentry *hisi_sas_debugfs_dir; 2853 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir); 2854 2855 static __init int hisi_sas_init(void) 2856 { 2857 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2858 if (!hisi_sas_stt) 2859 return -ENOMEM; 2860 2861 if (hisi_sas_debugfs_enable) { 2862 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 2863 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 2864 pr_info("hisi_sas: Limiting debugfs dump count\n"); 2865 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 2866 } 2867 } 2868 2869 return 0; 2870 } 2871 2872 static __exit void hisi_sas_exit(void) 2873 { 2874 sas_release_transport(hisi_sas_stt); 2875 2876 debugfs_remove(hisi_sas_debugfs_dir); 2877 } 2878 2879 module_init(hisi_sas_init); 2880 module_exit(hisi_sas_exit); 2881 2882 MODULE_LICENSE("GPL"); 2883 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2884 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2885 MODULE_ALIAS("platform:" DRV_NAME); 2886