1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 14 u8 *lun, struct hisi_sas_tmf_task *tmf); 15 static int 16 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 17 struct domain_device *device, 18 int abort_flag, int tag); 19 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 20 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 21 void *funcdata); 22 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 23 struct domain_device *device); 24 static void hisi_sas_dev_gone(struct domain_device *device); 25 26 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 27 { 28 switch (fis->command) { 29 case ATA_CMD_FPDMA_WRITE: 30 case ATA_CMD_FPDMA_READ: 31 case ATA_CMD_FPDMA_RECV: 32 case ATA_CMD_FPDMA_SEND: 33 case ATA_CMD_NCQ_NON_DATA: 34 return HISI_SAS_SATA_PROTOCOL_FPDMA; 35 36 case ATA_CMD_DOWNLOAD_MICRO: 37 case ATA_CMD_ID_ATA: 38 case ATA_CMD_PMP_READ: 39 case ATA_CMD_READ_LOG_EXT: 40 case ATA_CMD_PIO_READ: 41 case ATA_CMD_PIO_READ_EXT: 42 case ATA_CMD_PMP_WRITE: 43 case ATA_CMD_WRITE_LOG_EXT: 44 case ATA_CMD_PIO_WRITE: 45 case ATA_CMD_PIO_WRITE_EXT: 46 return HISI_SAS_SATA_PROTOCOL_PIO; 47 48 case ATA_CMD_DSM: 49 case ATA_CMD_DOWNLOAD_MICRO_DMA: 50 case ATA_CMD_PMP_READ_DMA: 51 case ATA_CMD_PMP_WRITE_DMA: 52 case ATA_CMD_READ: 53 case ATA_CMD_READ_EXT: 54 case ATA_CMD_READ_LOG_DMA_EXT: 55 case ATA_CMD_READ_STREAM_DMA_EXT: 56 case ATA_CMD_TRUSTED_RCV_DMA: 57 case ATA_CMD_TRUSTED_SND_DMA: 58 case ATA_CMD_WRITE: 59 case ATA_CMD_WRITE_EXT: 60 case ATA_CMD_WRITE_FUA_EXT: 61 case ATA_CMD_WRITE_QUEUED: 62 case ATA_CMD_WRITE_LOG_DMA_EXT: 63 case ATA_CMD_WRITE_STREAM_DMA_EXT: 64 case ATA_CMD_ZAC_MGMT_IN: 65 return HISI_SAS_SATA_PROTOCOL_DMA; 66 67 case ATA_CMD_CHK_POWER: 68 case ATA_CMD_DEV_RESET: 69 case ATA_CMD_EDD: 70 case ATA_CMD_FLUSH: 71 case ATA_CMD_FLUSH_EXT: 72 case ATA_CMD_VERIFY: 73 case ATA_CMD_VERIFY_EXT: 74 case ATA_CMD_SET_FEATURES: 75 case ATA_CMD_STANDBY: 76 case ATA_CMD_STANDBYNOW1: 77 case ATA_CMD_ZAC_MGMT_OUT: 78 return HISI_SAS_SATA_PROTOCOL_NONDATA; 79 80 case ATA_CMD_SET_MAX: 81 switch (fis->features) { 82 case ATA_SET_MAX_PASSWD: 83 case ATA_SET_MAX_LOCK: 84 return HISI_SAS_SATA_PROTOCOL_PIO; 85 86 case ATA_SET_MAX_PASSWD_DMA: 87 case ATA_SET_MAX_UNLOCK_DMA: 88 return HISI_SAS_SATA_PROTOCOL_DMA; 89 90 default: 91 return HISI_SAS_SATA_PROTOCOL_NONDATA; 92 } 93 94 default: 95 { 96 if (direction == DMA_NONE) 97 return HISI_SAS_SATA_PROTOCOL_NONDATA; 98 return HISI_SAS_SATA_PROTOCOL_PIO; 99 } 100 } 101 } 102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 103 104 void hisi_sas_sata_done(struct sas_task *task, 105 struct hisi_sas_slot *slot) 106 { 107 struct task_status_struct *ts = &task->task_status; 108 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 109 struct hisi_sas_status_buffer *status_buf = 110 hisi_sas_status_buf_addr_mem(slot); 111 u8 *iu = &status_buf->iu[0]; 112 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 113 114 resp->frame_len = sizeof(struct dev_to_host_fis); 115 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 116 117 ts->buf_valid_size = sizeof(*resp); 118 } 119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 120 121 /* 122 * This function assumes linkrate mask fits in 8 bits, which it 123 * does for all HW versions supported. 124 */ 125 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 126 { 127 u8 rate = 0; 128 int i; 129 130 max -= SAS_LINK_RATE_1_5_GBPS; 131 for (i = 0; i <= max; i++) 132 rate |= 1 << (i * 2); 133 return rate; 134 } 135 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 136 137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 138 { 139 return device->port->ha->lldd_ha; 140 } 141 142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 143 { 144 return container_of(sas_port, struct hisi_sas_port, sas_port); 145 } 146 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 147 148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 149 { 150 int phy_no; 151 152 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 153 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 154 } 155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 156 157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 158 { 159 void *bitmap = hisi_hba->slot_index_tags; 160 161 clear_bit(slot_idx, bitmap); 162 } 163 164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 165 { 166 if (hisi_hba->hw->slot_index_alloc || 167 slot_idx >= HISI_SAS_UNRESERVED_IPTT) { 168 spin_lock(&hisi_hba->lock); 169 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 170 spin_unlock(&hisi_hba->lock); 171 } 172 } 173 174 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 175 { 176 void *bitmap = hisi_hba->slot_index_tags; 177 178 set_bit(slot_idx, bitmap); 179 } 180 181 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 182 struct scsi_cmnd *scsi_cmnd) 183 { 184 int index; 185 void *bitmap = hisi_hba->slot_index_tags; 186 187 if (scsi_cmnd) 188 return scsi_cmnd->request->tag; 189 190 spin_lock(&hisi_hba->lock); 191 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 192 hisi_hba->last_slot_index + 1); 193 if (index >= hisi_hba->slot_index_count) { 194 index = find_next_zero_bit(bitmap, 195 hisi_hba->slot_index_count, 196 HISI_SAS_UNRESERVED_IPTT); 197 if (index >= hisi_hba->slot_index_count) { 198 spin_unlock(&hisi_hba->lock); 199 return -SAS_QUEUE_FULL; 200 } 201 } 202 hisi_sas_slot_index_set(hisi_hba, index); 203 hisi_hba->last_slot_index = index; 204 spin_unlock(&hisi_hba->lock); 205 206 return index; 207 } 208 209 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 210 { 211 int i; 212 213 for (i = 0; i < hisi_hba->slot_index_count; ++i) 214 hisi_sas_slot_index_clear(hisi_hba, i); 215 } 216 217 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 218 struct hisi_sas_slot *slot) 219 { 220 int device_id = slot->device_id; 221 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 222 223 if (task) { 224 struct device *dev = hisi_hba->dev; 225 226 if (!task->lldd_task) 227 return; 228 229 task->lldd_task = NULL; 230 231 if (!sas_protocol_ata(task->task_proto)) { 232 struct sas_ssp_task *ssp_task = &task->ssp_task; 233 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 234 235 if (slot->n_elem) 236 dma_unmap_sg(dev, task->scatter, 237 task->num_scatter, 238 task->data_dir); 239 if (slot->n_elem_dif) 240 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 241 scsi_prot_sg_count(scsi_cmnd), 242 task->data_dir); 243 } 244 } 245 246 spin_lock(&sas_dev->lock); 247 list_del_init(&slot->entry); 248 spin_unlock(&sas_dev->lock); 249 250 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 251 252 hisi_sas_slot_index_free(hisi_hba, slot->idx); 253 } 254 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 255 256 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 257 struct hisi_sas_slot *slot) 258 { 259 hisi_hba->hw->prep_smp(hisi_hba, slot); 260 } 261 262 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 263 struct hisi_sas_slot *slot) 264 { 265 hisi_hba->hw->prep_ssp(hisi_hba, slot); 266 } 267 268 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 269 struct hisi_sas_slot *slot) 270 { 271 hisi_hba->hw->prep_stp(hisi_hba, slot); 272 } 273 274 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 275 struct hisi_sas_slot *slot, 276 int device_id, int abort_flag, int tag_to_abort) 277 { 278 hisi_hba->hw->prep_abort(hisi_hba, slot, 279 device_id, abort_flag, tag_to_abort); 280 } 281 282 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 283 struct sas_task *task, int n_elem, 284 int n_elem_req) 285 { 286 struct device *dev = hisi_hba->dev; 287 288 if (!sas_protocol_ata(task->task_proto)) { 289 if (task->num_scatter) { 290 if (n_elem) 291 dma_unmap_sg(dev, task->scatter, 292 task->num_scatter, 293 task->data_dir); 294 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 295 if (n_elem_req) 296 dma_unmap_sg(dev, &task->smp_task.smp_req, 297 1, DMA_TO_DEVICE); 298 } 299 } 300 } 301 302 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 303 struct sas_task *task, int *n_elem, 304 int *n_elem_req) 305 { 306 struct device *dev = hisi_hba->dev; 307 int rc; 308 309 if (sas_protocol_ata(task->task_proto)) { 310 *n_elem = task->num_scatter; 311 } else { 312 unsigned int req_len; 313 314 if (task->num_scatter) { 315 *n_elem = dma_map_sg(dev, task->scatter, 316 task->num_scatter, task->data_dir); 317 if (!*n_elem) { 318 rc = -ENOMEM; 319 goto prep_out; 320 } 321 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 322 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 323 1, DMA_TO_DEVICE); 324 if (!*n_elem_req) { 325 rc = -ENOMEM; 326 goto prep_out; 327 } 328 req_len = sg_dma_len(&task->smp_task.smp_req); 329 if (req_len & 0x3) { 330 rc = -EINVAL; 331 goto err_out_dma_unmap; 332 } 333 } 334 } 335 336 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 337 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 338 *n_elem); 339 rc = -EINVAL; 340 goto err_out_dma_unmap; 341 } 342 return 0; 343 344 err_out_dma_unmap: 345 /* It would be better to call dma_unmap_sg() here, but it's messy */ 346 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 347 *n_elem_req); 348 prep_out: 349 return rc; 350 } 351 352 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 353 struct sas_task *task, int n_elem_dif) 354 { 355 struct device *dev = hisi_hba->dev; 356 357 if (n_elem_dif) { 358 struct sas_ssp_task *ssp_task = &task->ssp_task; 359 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 360 361 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 362 scsi_prot_sg_count(scsi_cmnd), 363 task->data_dir); 364 } 365 } 366 367 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 368 int *n_elem_dif, struct sas_task *task) 369 { 370 struct device *dev = hisi_hba->dev; 371 struct sas_ssp_task *ssp_task; 372 struct scsi_cmnd *scsi_cmnd; 373 int rc; 374 375 if (task->num_scatter) { 376 ssp_task = &task->ssp_task; 377 scsi_cmnd = ssp_task->cmd; 378 379 if (scsi_prot_sg_count(scsi_cmnd)) { 380 *n_elem_dif = dma_map_sg(dev, 381 scsi_prot_sglist(scsi_cmnd), 382 scsi_prot_sg_count(scsi_cmnd), 383 task->data_dir); 384 385 if (!*n_elem_dif) 386 return -ENOMEM; 387 388 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 389 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 390 *n_elem_dif); 391 rc = -EINVAL; 392 goto err_out_dif_dma_unmap; 393 } 394 } 395 } 396 397 return 0; 398 399 err_out_dif_dma_unmap: 400 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 401 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 402 return rc; 403 } 404 405 static int hisi_sas_task_prep(struct sas_task *task, 406 struct hisi_sas_dq **dq_pointer, 407 bool is_tmf, struct hisi_sas_tmf_task *tmf, 408 int *pass) 409 { 410 struct domain_device *device = task->dev; 411 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 412 struct hisi_sas_device *sas_dev = device->lldd_dev; 413 struct hisi_sas_port *port; 414 struct hisi_sas_slot *slot; 415 struct hisi_sas_cmd_hdr *cmd_hdr_base; 416 struct asd_sas_port *sas_port = device->port; 417 struct device *dev = hisi_hba->dev; 418 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 419 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; 420 struct hisi_sas_dq *dq; 421 unsigned long flags; 422 int wr_q_index; 423 424 if (DEV_IS_GONE(sas_dev)) { 425 if (sas_dev) 426 dev_info(dev, "task prep: device %d not ready\n", 427 sas_dev->device_id); 428 else 429 dev_info(dev, "task prep: device %016llx not ready\n", 430 SAS_ADDR(device->sas_addr)); 431 432 return -ECOMM; 433 } 434 435 if (hisi_hba->reply_map) { 436 int cpu = raw_smp_processor_id(); 437 unsigned int dq_index = hisi_hba->reply_map[cpu]; 438 439 *dq_pointer = dq = &hisi_hba->dq[dq_index]; 440 } else { 441 *dq_pointer = dq = sas_dev->dq; 442 } 443 444 port = to_hisi_sas_port(sas_port); 445 if (port && !port->port_attached) { 446 dev_info(dev, "task prep: %s port%d not attach device\n", 447 (dev_is_sata(device)) ? 448 "SATA/STP" : "SAS", 449 device->port->id); 450 451 return -ECOMM; 452 } 453 454 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 455 &n_elem_req); 456 if (rc < 0) 457 goto prep_out; 458 459 if (!sas_protocol_ata(task->task_proto)) { 460 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 461 if (rc < 0) 462 goto err_out_dma_unmap; 463 } 464 465 if (hisi_hba->hw->slot_index_alloc) 466 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 467 else { 468 struct scsi_cmnd *scsi_cmnd = NULL; 469 470 if (task->uldd_task) { 471 struct ata_queued_cmd *qc; 472 473 if (dev_is_sata(device)) { 474 qc = task->uldd_task; 475 scsi_cmnd = qc->scsicmd; 476 } else { 477 scsi_cmnd = task->uldd_task; 478 } 479 } 480 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd); 481 } 482 if (rc < 0) 483 goto err_out_dif_dma_unmap; 484 485 slot_idx = rc; 486 slot = &hisi_hba->slot_info[slot_idx]; 487 488 spin_lock(&dq->lock); 489 wr_q_index = dq->wr_point; 490 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 491 list_add_tail(&slot->delivery, &dq->list); 492 spin_unlock(&dq->lock); 493 spin_lock(&sas_dev->lock); 494 list_add_tail(&slot->entry, &sas_dev->list); 495 spin_unlock(&sas_dev->lock); 496 497 dlvry_queue = dq->id; 498 dlvry_queue_slot = wr_q_index; 499 500 slot->device_id = sas_dev->device_id; 501 slot->n_elem = n_elem; 502 slot->n_elem_dif = n_elem_dif; 503 slot->dlvry_queue = dlvry_queue; 504 slot->dlvry_queue_slot = dlvry_queue_slot; 505 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 506 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 507 slot->task = task; 508 slot->port = port; 509 slot->tmf = tmf; 510 slot->is_internal = is_tmf; 511 task->lldd_task = slot; 512 513 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 514 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 515 memset(hisi_sas_status_buf_addr_mem(slot), 0, 516 sizeof(struct hisi_sas_err_record)); 517 518 switch (task->task_proto) { 519 case SAS_PROTOCOL_SMP: 520 hisi_sas_task_prep_smp(hisi_hba, slot); 521 break; 522 case SAS_PROTOCOL_SSP: 523 hisi_sas_task_prep_ssp(hisi_hba, slot); 524 break; 525 case SAS_PROTOCOL_SATA: 526 case SAS_PROTOCOL_STP: 527 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 528 hisi_sas_task_prep_ata(hisi_hba, slot); 529 break; 530 default: 531 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 532 task->task_proto); 533 break; 534 } 535 536 spin_lock_irqsave(&task->task_state_lock, flags); 537 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 538 spin_unlock_irqrestore(&task->task_state_lock, flags); 539 540 ++(*pass); 541 WRITE_ONCE(slot->ready, 1); 542 543 return 0; 544 545 err_out_dif_dma_unmap: 546 if (!sas_protocol_ata(task->task_proto)) 547 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 548 err_out_dma_unmap: 549 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 550 n_elem_req); 551 prep_out: 552 dev_err(dev, "task prep: failed[%d]!\n", rc); 553 return rc; 554 } 555 556 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 557 bool is_tmf, struct hisi_sas_tmf_task *tmf) 558 { 559 u32 rc; 560 u32 pass = 0; 561 struct hisi_hba *hisi_hba; 562 struct device *dev; 563 struct domain_device *device = task->dev; 564 struct asd_sas_port *sas_port = device->port; 565 struct hisi_sas_dq *dq = NULL; 566 567 if (!sas_port) { 568 struct task_status_struct *ts = &task->task_status; 569 570 ts->resp = SAS_TASK_UNDELIVERED; 571 ts->stat = SAS_PHY_DOWN; 572 /* 573 * libsas will use dev->port, should 574 * not call task_done for sata 575 */ 576 if (device->dev_type != SAS_SATA_DEV) 577 task->task_done(task); 578 return -ECOMM; 579 } 580 581 hisi_hba = dev_to_hisi_hba(device); 582 dev = hisi_hba->dev; 583 584 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 585 /* 586 * For IOs from upper layer, it may already disable preempt 587 * in the IO path, if disable preempt again in down(), 588 * function schedule() will report schedule_bug(), so check 589 * preemptible() before goto down(). 590 */ 591 if (!preemptible()) 592 return -EINVAL; 593 594 down(&hisi_hba->sem); 595 up(&hisi_hba->sem); 596 } 597 598 /* protect task_prep and start_delivery sequence */ 599 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); 600 if (rc) 601 dev_err(dev, "task exec: failed[%d]!\n", rc); 602 603 if (likely(pass)) { 604 spin_lock(&dq->lock); 605 hisi_hba->hw->start_delivery(dq); 606 spin_unlock(&dq->lock); 607 } 608 609 return rc; 610 } 611 612 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) 613 { 614 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 615 struct asd_sas_phy *sas_phy = &phy->sas_phy; 616 struct sas_ha_struct *sas_ha; 617 618 if (!phy->phy_attached) 619 return; 620 621 sas_ha = &hisi_hba->sha; 622 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); 623 624 if (sas_phy->phy) { 625 struct sas_phy *sphy = sas_phy->phy; 626 627 sphy->negotiated_linkrate = sas_phy->linkrate; 628 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 629 sphy->maximum_linkrate_hw = 630 hisi_hba->hw->phy_get_max_linkrate(); 631 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 632 sphy->minimum_linkrate = phy->minimum_linkrate; 633 634 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 635 sphy->maximum_linkrate = phy->maximum_linkrate; 636 } 637 638 if (phy->phy_type & PORT_TYPE_SAS) { 639 struct sas_identify_frame *id; 640 641 id = (struct sas_identify_frame *)phy->frame_rcvd; 642 id->dev_type = phy->identify.device_type; 643 id->initiator_bits = SAS_PROTOCOL_ALL; 644 id->target_bits = phy->identify.target_port_protocols; 645 } else if (phy->phy_type & PORT_TYPE_SATA) { 646 /* Nothing */ 647 } 648 649 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 650 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED); 651 } 652 653 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 654 { 655 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 656 struct hisi_sas_device *sas_dev = NULL; 657 int last = hisi_hba->last_dev_id; 658 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 659 int i; 660 661 spin_lock(&hisi_hba->lock); 662 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 663 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 664 int queue = i % hisi_hba->queue_count; 665 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 666 667 hisi_hba->devices[i].device_id = i; 668 sas_dev = &hisi_hba->devices[i]; 669 sas_dev->dev_status = HISI_SAS_DEV_INIT; 670 sas_dev->dev_type = device->dev_type; 671 sas_dev->hisi_hba = hisi_hba; 672 sas_dev->sas_device = device; 673 sas_dev->dq = dq; 674 spin_lock_init(&sas_dev->lock); 675 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 676 break; 677 } 678 i++; 679 } 680 hisi_hba->last_dev_id = i; 681 spin_unlock(&hisi_hba->lock); 682 683 return sas_dev; 684 } 685 686 #define HISI_SAS_DISK_RECOVER_CNT 3 687 static int hisi_sas_init_device(struct domain_device *device) 688 { 689 int rc = TMF_RESP_FUNC_COMPLETE; 690 struct scsi_lun lun; 691 struct hisi_sas_tmf_task tmf_task; 692 int retry = HISI_SAS_DISK_RECOVER_CNT; 693 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 694 struct device *dev = hisi_hba->dev; 695 struct sas_phy *local_phy; 696 697 switch (device->dev_type) { 698 case SAS_END_DEVICE: 699 int_to_scsilun(0, &lun); 700 701 tmf_task.tmf = TMF_CLEAR_TASK_SET; 702 while (retry-- > 0) { 703 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 704 &tmf_task); 705 if (rc == TMF_RESP_FUNC_COMPLETE) { 706 hisi_sas_release_task(hisi_hba, device); 707 break; 708 } 709 } 710 break; 711 case SAS_SATA_DEV: 712 case SAS_SATA_PM: 713 case SAS_SATA_PM_PORT: 714 case SAS_SATA_PENDING: 715 /* 716 * send HARD RESET to clear previous affiliation of 717 * STP target port 718 */ 719 local_phy = sas_get_local_phy(device); 720 if (!scsi_is_sas_phy_local(local_phy) && 721 !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 722 unsigned long deadline = ata_deadline(jiffies, 20000); 723 struct sata_device *sata_dev = &device->sata_dev; 724 struct ata_host *ata_host = sata_dev->ata_host; 725 struct ata_port_operations *ops = ata_host->ops; 726 struct ata_port *ap = sata_dev->ap; 727 struct ata_link *link; 728 unsigned int classes; 729 730 ata_for_each_link(link, ap, EDGE) 731 rc = ops->hardreset(link, &classes, 732 deadline); 733 } 734 sas_put_local_phy(local_phy); 735 if (rc) { 736 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc); 737 return rc; 738 } 739 740 while (retry-- > 0) { 741 rc = hisi_sas_softreset_ata_disk(device); 742 if (!rc) 743 break; 744 } 745 break; 746 default: 747 break; 748 } 749 750 return rc; 751 } 752 753 static int hisi_sas_dev_found(struct domain_device *device) 754 { 755 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 756 struct domain_device *parent_dev = device->parent; 757 struct hisi_sas_device *sas_dev; 758 struct device *dev = hisi_hba->dev; 759 int rc; 760 761 if (hisi_hba->hw->alloc_dev) 762 sas_dev = hisi_hba->hw->alloc_dev(device); 763 else 764 sas_dev = hisi_sas_alloc_dev(device); 765 if (!sas_dev) { 766 dev_err(dev, "fail alloc dev: max support %d devices\n", 767 HISI_SAS_MAX_DEVICES); 768 return -EINVAL; 769 } 770 771 device->lldd_dev = sas_dev; 772 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 773 774 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 775 int phy_no; 776 u8 phy_num = parent_dev->ex_dev.num_phys; 777 struct ex_phy *phy; 778 779 for (phy_no = 0; phy_no < phy_num; phy_no++) { 780 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 781 if (SAS_ADDR(phy->attached_sas_addr) == 782 SAS_ADDR(device->sas_addr)) 783 break; 784 } 785 786 if (phy_no == phy_num) { 787 dev_info(dev, "dev found: no attached " 788 "dev:%016llx at ex:%016llx\n", 789 SAS_ADDR(device->sas_addr), 790 SAS_ADDR(parent_dev->sas_addr)); 791 rc = -EINVAL; 792 goto err_out; 793 } 794 } 795 796 dev_info(dev, "dev[%d:%x] found\n", 797 sas_dev->device_id, sas_dev->dev_type); 798 799 rc = hisi_sas_init_device(device); 800 if (rc) 801 goto err_out; 802 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 803 return 0; 804 805 err_out: 806 hisi_sas_dev_gone(device); 807 return rc; 808 } 809 810 int hisi_sas_slave_configure(struct scsi_device *sdev) 811 { 812 struct domain_device *dev = sdev_to_domain_dev(sdev); 813 int ret = sas_slave_configure(sdev); 814 815 if (ret) 816 return ret; 817 if (!dev_is_sata(dev)) 818 sas_change_queue_depth(sdev, 64); 819 820 return 0; 821 } 822 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 823 824 void hisi_sas_scan_start(struct Scsi_Host *shost) 825 { 826 struct hisi_hba *hisi_hba = shost_priv(shost); 827 828 hisi_hba->hw->phys_init(hisi_hba); 829 } 830 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 831 832 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 833 { 834 struct hisi_hba *hisi_hba = shost_priv(shost); 835 struct sas_ha_struct *sha = &hisi_hba->sha; 836 837 /* Wait for PHY up interrupt to occur */ 838 if (time < HZ) 839 return 0; 840 841 sas_drain_work(sha); 842 return 1; 843 } 844 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 845 846 static void hisi_sas_phyup_work(struct work_struct *work) 847 { 848 struct hisi_sas_phy *phy = 849 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); 850 struct hisi_hba *hisi_hba = phy->hisi_hba; 851 struct asd_sas_phy *sas_phy = &phy->sas_phy; 852 int phy_no = sas_phy->id; 853 854 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 855 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 856 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 857 } 858 859 static void hisi_sas_linkreset_work(struct work_struct *work) 860 { 861 struct hisi_sas_phy *phy = 862 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 863 struct asd_sas_phy *sas_phy = &phy->sas_phy; 864 865 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 866 } 867 868 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 869 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 870 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 871 }; 872 873 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 874 enum hisi_sas_phy_event event) 875 { 876 struct hisi_hba *hisi_hba = phy->hisi_hba; 877 878 if (WARN_ON(event >= HISI_PHYES_NUM)) 879 return false; 880 881 return queue_work(hisi_hba->wq, &phy->works[event]); 882 } 883 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 884 885 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 886 { 887 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 888 struct hisi_hba *hisi_hba = phy->hisi_hba; 889 struct device *dev = hisi_hba->dev; 890 int phy_no = phy->sas_phy.id; 891 892 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 893 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 894 } 895 896 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 897 { 898 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 899 struct device *dev = hisi_hba->dev; 900 901 if (!timer_pending(&phy->timer)) { 902 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 903 phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ; 904 add_timer(&phy->timer); 905 } 906 } 907 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 908 909 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 910 { 911 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 912 struct asd_sas_phy *sas_phy = &phy->sas_phy; 913 int i; 914 915 phy->hisi_hba = hisi_hba; 916 phy->port = NULL; 917 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 918 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 919 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 920 sas_phy->class = SAS; 921 sas_phy->iproto = SAS_PROTOCOL_ALL; 922 sas_phy->tproto = 0; 923 sas_phy->type = PHY_TYPE_PHYSICAL; 924 sas_phy->role = PHY_ROLE_INITIATOR; 925 sas_phy->oob_mode = OOB_NOT_CONNECTED; 926 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 927 sas_phy->id = phy_no; 928 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 929 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 930 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 931 sas_phy->lldd_phy = phy; 932 933 for (i = 0; i < HISI_PHYES_NUM; i++) 934 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 935 936 spin_lock_init(&phy->lock); 937 938 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 939 } 940 941 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 942 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 943 { 944 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 945 struct asd_sas_phy *aphy = &phy->sas_phy; 946 struct sas_phy *sphy = aphy->phy; 947 unsigned long flags; 948 949 spin_lock_irqsave(&phy->lock, flags); 950 951 if (enable) { 952 /* We may have been enabled already; if so, don't touch */ 953 if (!phy->enable) 954 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 955 hisi_hba->hw->phy_start(hisi_hba, phy_no); 956 } else { 957 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 958 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 959 } 960 phy->enable = enable; 961 spin_unlock_irqrestore(&phy->lock, flags); 962 } 963 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 964 965 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 966 { 967 struct sas_ha_struct *sas_ha = sas_phy->ha; 968 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 969 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 970 struct asd_sas_port *sas_port = sas_phy->port; 971 struct hisi_sas_port *port; 972 unsigned long flags; 973 974 if (!sas_port) 975 return; 976 977 port = to_hisi_sas_port(sas_port); 978 spin_lock_irqsave(&hisi_hba->lock, flags); 979 port->port_attached = 1; 980 port->id = phy->port_id; 981 phy->port = port; 982 sas_port->lldd_port = port; 983 spin_unlock_irqrestore(&hisi_hba->lock, flags); 984 } 985 986 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 987 struct hisi_sas_slot *slot) 988 { 989 if (task) { 990 unsigned long flags; 991 struct task_status_struct *ts; 992 993 ts = &task->task_status; 994 995 ts->resp = SAS_TASK_COMPLETE; 996 ts->stat = SAS_ABORTED_TASK; 997 spin_lock_irqsave(&task->task_state_lock, flags); 998 task->task_state_flags &= 999 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1000 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1001 task->task_state_flags |= SAS_TASK_STATE_DONE; 1002 spin_unlock_irqrestore(&task->task_state_lock, flags); 1003 } 1004 1005 hisi_sas_slot_task_free(hisi_hba, task, slot); 1006 } 1007 1008 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1009 struct domain_device *device) 1010 { 1011 struct hisi_sas_slot *slot, *slot2; 1012 struct hisi_sas_device *sas_dev = device->lldd_dev; 1013 1014 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1015 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1016 } 1017 1018 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1019 { 1020 struct hisi_sas_device *sas_dev; 1021 struct domain_device *device; 1022 int i; 1023 1024 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1025 sas_dev = &hisi_hba->devices[i]; 1026 device = sas_dev->sas_device; 1027 1028 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1029 !device) 1030 continue; 1031 1032 hisi_sas_release_task(hisi_hba, device); 1033 } 1034 } 1035 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1036 1037 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1038 struct domain_device *device) 1039 { 1040 if (hisi_hba->hw->dereg_device) 1041 hisi_hba->hw->dereg_device(hisi_hba, device); 1042 } 1043 1044 static void hisi_sas_dev_gone(struct domain_device *device) 1045 { 1046 struct hisi_sas_device *sas_dev = device->lldd_dev; 1047 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1048 struct device *dev = hisi_hba->dev; 1049 int ret = 0; 1050 1051 dev_info(dev, "dev[%d:%x] is gone\n", 1052 sas_dev->device_id, sas_dev->dev_type); 1053 1054 down(&hisi_hba->sem); 1055 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 1056 hisi_sas_internal_task_abort(hisi_hba, device, 1057 HISI_SAS_INT_ABT_DEV, 0); 1058 1059 hisi_sas_dereg_device(hisi_hba, device); 1060 1061 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1062 device->lldd_dev = NULL; 1063 } 1064 1065 if (hisi_hba->hw->free_device) 1066 hisi_hba->hw->free_device(sas_dev); 1067 1068 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ 1069 if (!ret) 1070 sas_dev->dev_type = SAS_PHY_UNUSED; 1071 sas_dev->sas_device = NULL; 1072 up(&hisi_hba->sem); 1073 } 1074 1075 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 1076 { 1077 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 1078 } 1079 1080 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1081 struct sas_phy_linkrates *r) 1082 { 1083 struct sas_phy_linkrates _r; 1084 1085 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1086 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1087 enum sas_linkrate min, max; 1088 1089 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1090 return -EINVAL; 1091 1092 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1093 max = sas_phy->phy->maximum_linkrate; 1094 min = r->minimum_linkrate; 1095 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1096 max = r->maximum_linkrate; 1097 min = sas_phy->phy->minimum_linkrate; 1098 } else 1099 return -EINVAL; 1100 1101 _r.maximum_linkrate = max; 1102 _r.minimum_linkrate = min; 1103 1104 sas_phy->phy->maximum_linkrate = max; 1105 sas_phy->phy->minimum_linkrate = min; 1106 1107 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1108 msleep(100); 1109 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1110 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1111 1112 return 0; 1113 } 1114 1115 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1116 void *funcdata) 1117 { 1118 struct sas_ha_struct *sas_ha = sas_phy->ha; 1119 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1120 int phy_no = sas_phy->id; 1121 1122 switch (func) { 1123 case PHY_FUNC_HARD_RESET: 1124 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1125 break; 1126 1127 case PHY_FUNC_LINK_RESET: 1128 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1129 msleep(100); 1130 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1131 break; 1132 1133 case PHY_FUNC_DISABLE: 1134 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1135 break; 1136 1137 case PHY_FUNC_SET_LINK_RATE: 1138 return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1139 case PHY_FUNC_GET_EVENTS: 1140 if (hisi_hba->hw->get_events) { 1141 hisi_hba->hw->get_events(hisi_hba, phy_no); 1142 break; 1143 } 1144 /* fallthru */ 1145 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1146 default: 1147 return -EOPNOTSUPP; 1148 } 1149 return 0; 1150 } 1151 1152 static void hisi_sas_task_done(struct sas_task *task) 1153 { 1154 del_timer(&task->slow_task->timer); 1155 complete(&task->slow_task->completion); 1156 } 1157 1158 static void hisi_sas_tmf_timedout(struct timer_list *t) 1159 { 1160 struct sas_task_slow *slow = from_timer(slow, t, timer); 1161 struct sas_task *task = slow->task; 1162 unsigned long flags; 1163 bool is_completed = true; 1164 1165 spin_lock_irqsave(&task->task_state_lock, flags); 1166 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1167 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1168 is_completed = false; 1169 } 1170 spin_unlock_irqrestore(&task->task_state_lock, flags); 1171 1172 if (!is_completed) 1173 complete(&task->slow_task->completion); 1174 } 1175 1176 #define TASK_TIMEOUT 20 1177 #define TASK_RETRY 3 1178 #define INTERNAL_ABORT_TIMEOUT 6 1179 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 1180 void *parameter, u32 para_len, 1181 struct hisi_sas_tmf_task *tmf) 1182 { 1183 struct hisi_sas_device *sas_dev = device->lldd_dev; 1184 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1185 struct device *dev = hisi_hba->dev; 1186 struct sas_task *task; 1187 int res, retry; 1188 1189 for (retry = 0; retry < TASK_RETRY; retry++) { 1190 task = sas_alloc_slow_task(GFP_KERNEL); 1191 if (!task) 1192 return -ENOMEM; 1193 1194 task->dev = device; 1195 task->task_proto = device->tproto; 1196 1197 if (dev_is_sata(device)) { 1198 task->ata_task.device_control_reg_update = 1; 1199 memcpy(&task->ata_task.fis, parameter, para_len); 1200 } else { 1201 memcpy(&task->ssp_task, parameter, para_len); 1202 } 1203 task->task_done = hisi_sas_task_done; 1204 1205 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1206 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ; 1207 add_timer(&task->slow_task->timer); 1208 1209 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 1210 1211 if (res) { 1212 del_timer(&task->slow_task->timer); 1213 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1214 res); 1215 goto ex_err; 1216 } 1217 1218 wait_for_completion(&task->slow_task->completion); 1219 res = TMF_RESP_FUNC_FAILED; 1220 /* Even TMF timed out, return direct. */ 1221 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1222 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1223 struct hisi_sas_slot *slot = task->lldd_task; 1224 1225 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1226 if (slot) { 1227 struct hisi_sas_cq *cq = 1228 &hisi_hba->cq[slot->dlvry_queue]; 1229 /* 1230 * sync irq to avoid free'ing task 1231 * before using task in IO completion 1232 */ 1233 synchronize_irq(cq->irq_no); 1234 slot->task = NULL; 1235 } 1236 1237 goto ex_err; 1238 } else 1239 dev_err(dev, "abort tmf: TMF task timeout\n"); 1240 } 1241 1242 if (task->task_status.resp == SAS_TASK_COMPLETE && 1243 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1244 res = TMF_RESP_FUNC_COMPLETE; 1245 break; 1246 } 1247 1248 if (task->task_status.resp == SAS_TASK_COMPLETE && 1249 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1250 res = TMF_RESP_FUNC_SUCC; 1251 break; 1252 } 1253 1254 if (task->task_status.resp == SAS_TASK_COMPLETE && 1255 task->task_status.stat == SAS_DATA_UNDERRUN) { 1256 /* no error, but return the number of bytes of 1257 * underrun 1258 */ 1259 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n", 1260 SAS_ADDR(device->sas_addr), 1261 task->task_status.resp, 1262 task->task_status.stat); 1263 res = task->task_status.residual; 1264 break; 1265 } 1266 1267 if (task->task_status.resp == SAS_TASK_COMPLETE && 1268 task->task_status.stat == SAS_DATA_OVERRUN) { 1269 dev_warn(dev, "abort tmf: blocked task error\n"); 1270 res = -EMSGSIZE; 1271 break; 1272 } 1273 1274 if (task->task_status.resp == SAS_TASK_COMPLETE && 1275 task->task_status.stat == SAS_OPEN_REJECT) { 1276 dev_warn(dev, "abort tmf: open reject failed\n"); 1277 res = -EIO; 1278 } else { 1279 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n", 1280 SAS_ADDR(device->sas_addr), 1281 task->task_status.resp, 1282 task->task_status.stat); 1283 } 1284 sas_free_task(task); 1285 task = NULL; 1286 } 1287 ex_err: 1288 if (retry == TASK_RETRY) 1289 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 1290 sas_free_task(task); 1291 return res; 1292 } 1293 1294 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1295 bool reset, int pmp, u8 *fis) 1296 { 1297 struct ata_taskfile tf; 1298 1299 ata_tf_init(dev, &tf); 1300 if (reset) 1301 tf.ctl |= ATA_SRST; 1302 else 1303 tf.ctl &= ~ATA_SRST; 1304 tf.command = ATA_CMD_DEV_RESET; 1305 ata_tf_to_fis(&tf, pmp, 0, fis); 1306 } 1307 1308 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1309 { 1310 u8 fis[20] = {0}; 1311 struct ata_port *ap = device->sata_dev.ap; 1312 struct ata_link *link; 1313 int rc = TMF_RESP_FUNC_FAILED; 1314 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1315 struct device *dev = hisi_hba->dev; 1316 int s = sizeof(struct host_to_dev_fis); 1317 1318 ata_for_each_link(link, ap, EDGE) { 1319 int pmp = sata_srst_pmp(link); 1320 1321 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1322 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 1323 if (rc != TMF_RESP_FUNC_COMPLETE) 1324 break; 1325 } 1326 1327 if (rc == TMF_RESP_FUNC_COMPLETE) { 1328 ata_for_each_link(link, ap, EDGE) { 1329 int pmp = sata_srst_pmp(link); 1330 1331 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1332 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1333 s, NULL); 1334 if (rc != TMF_RESP_FUNC_COMPLETE) 1335 dev_err(dev, "ata disk de-reset failed\n"); 1336 } 1337 } else { 1338 dev_err(dev, "ata disk reset failed\n"); 1339 } 1340 1341 if (rc == TMF_RESP_FUNC_COMPLETE) 1342 hisi_sas_release_task(hisi_hba, device); 1343 1344 return rc; 1345 } 1346 1347 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1348 u8 *lun, struct hisi_sas_tmf_task *tmf) 1349 { 1350 struct sas_ssp_task ssp_task; 1351 1352 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1353 return TMF_RESP_FUNC_ESUPP; 1354 1355 memcpy(ssp_task.LUN, lun, 8); 1356 1357 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1358 sizeof(ssp_task), tmf); 1359 } 1360 1361 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1362 { 1363 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1364 int i; 1365 1366 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1367 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1368 struct domain_device *device = sas_dev->sas_device; 1369 struct asd_sas_port *sas_port; 1370 struct hisi_sas_port *port; 1371 struct hisi_sas_phy *phy = NULL; 1372 struct asd_sas_phy *sas_phy; 1373 1374 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1375 || !device || !device->port) 1376 continue; 1377 1378 sas_port = device->port; 1379 port = to_hisi_sas_port(sas_port); 1380 1381 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1382 if (state & BIT(sas_phy->id)) { 1383 phy = sas_phy->lldd_phy; 1384 break; 1385 } 1386 1387 if (phy) { 1388 port->id = phy->port_id; 1389 1390 /* Update linkrate of directly attached device. */ 1391 if (!device->parent) 1392 device->linkrate = phy->sas_phy.linkrate; 1393 1394 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1395 } else 1396 port->id = 0xff; 1397 } 1398 } 1399 1400 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) 1401 { 1402 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1403 struct asd_sas_port *_sas_port = NULL; 1404 int phy_no; 1405 1406 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1407 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1408 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1409 struct asd_sas_port *sas_port = sas_phy->port; 1410 bool do_port_check = _sas_port != sas_port; 1411 1412 if (!sas_phy->phy->enabled) 1413 continue; 1414 1415 /* Report PHY state change to libsas */ 1416 if (state & BIT(phy_no)) { 1417 if (do_port_check && sas_port && sas_port->port_dev) { 1418 struct domain_device *dev = sas_port->port_dev; 1419 1420 _sas_port = sas_port; 1421 1422 if (dev_is_expander(dev->dev_type)) 1423 sas_ha->notify_port_event(sas_phy, 1424 PORTE_BROADCAST_RCVD); 1425 } 1426 } else { 1427 hisi_sas_phy_down(hisi_hba, phy_no, 0); 1428 } 1429 1430 } 1431 } 1432 1433 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1434 { 1435 struct hisi_sas_device *sas_dev; 1436 struct domain_device *device; 1437 int i; 1438 1439 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1440 sas_dev = &hisi_hba->devices[i]; 1441 device = sas_dev->sas_device; 1442 1443 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1444 continue; 1445 1446 hisi_sas_init_device(device); 1447 } 1448 } 1449 1450 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1451 struct asd_sas_port *sas_port, 1452 struct domain_device *device) 1453 { 1454 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; 1455 struct ata_port *ap = device->sata_dev.ap; 1456 struct device *dev = hisi_hba->dev; 1457 int s = sizeof(struct host_to_dev_fis); 1458 int rc = TMF_RESP_FUNC_FAILED; 1459 struct asd_sas_phy *sas_phy; 1460 struct ata_link *link; 1461 u8 fis[20] = {0}; 1462 u32 state; 1463 1464 state = hisi_hba->hw->get_phys_state(hisi_hba); 1465 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { 1466 if (!(state & BIT(sas_phy->id))) 1467 continue; 1468 1469 ata_for_each_link(link, ap, EDGE) { 1470 int pmp = sata_srst_pmp(link); 1471 1472 tmf_task.phy_id = sas_phy->id; 1473 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1474 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, 1475 &tmf_task); 1476 if (rc != TMF_RESP_FUNC_COMPLETE) { 1477 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1478 sas_phy->id, rc); 1479 break; 1480 } 1481 } 1482 } 1483 } 1484 1485 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1486 { 1487 struct device *dev = hisi_hba->dev; 1488 int port_no, rc, i; 1489 1490 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1491 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1492 struct domain_device *device = sas_dev->sas_device; 1493 1494 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1495 continue; 1496 1497 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1498 HISI_SAS_INT_ABT_DEV, 0); 1499 if (rc < 0) 1500 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1501 } 1502 1503 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1504 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1505 struct asd_sas_port *sas_port = &port->sas_port; 1506 struct domain_device *port_dev = sas_port->port_dev; 1507 struct domain_device *device; 1508 1509 if (!port_dev || !dev_is_expander(port_dev->dev_type)) 1510 continue; 1511 1512 /* Try to find a SATA device */ 1513 list_for_each_entry(device, &sas_port->dev_list, 1514 dev_list_node) { 1515 if (dev_is_sata(device)) { 1516 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1517 sas_port, 1518 device); 1519 break; 1520 } 1521 } 1522 } 1523 } 1524 1525 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1526 { 1527 struct Scsi_Host *shost = hisi_hba->shost; 1528 1529 down(&hisi_hba->sem); 1530 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1531 1532 scsi_block_requests(shost); 1533 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1534 1535 if (timer_pending(&hisi_hba->timer)) 1536 del_timer_sync(&hisi_hba->timer); 1537 1538 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1539 } 1540 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1541 1542 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1543 { 1544 struct Scsi_Host *shost = hisi_hba->shost; 1545 u32 state; 1546 1547 /* Init and wait for PHYs to come up and all libsas event finished. */ 1548 hisi_hba->hw->phys_init(hisi_hba); 1549 msleep(1000); 1550 hisi_sas_refresh_port_id(hisi_hba); 1551 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1552 1553 if (hisi_hba->reject_stp_links_msk) 1554 hisi_sas_terminate_stp_reject(hisi_hba); 1555 hisi_sas_reset_init_all_devices(hisi_hba); 1556 up(&hisi_hba->sem); 1557 scsi_unblock_requests(shost); 1558 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1559 1560 state = hisi_hba->hw->get_phys_state(hisi_hba); 1561 hisi_sas_rescan_topology(hisi_hba, state); 1562 } 1563 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1564 1565 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1566 { 1567 struct device *dev = hisi_hba->dev; 1568 struct Scsi_Host *shost = hisi_hba->shost; 1569 int rc; 1570 1571 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 1572 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 1573 1574 if (!hisi_hba->hw->soft_reset) 1575 return -1; 1576 1577 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1578 return -1; 1579 1580 dev_info(dev, "controller resetting...\n"); 1581 hisi_sas_controller_reset_prepare(hisi_hba); 1582 1583 rc = hisi_hba->hw->soft_reset(hisi_hba); 1584 if (rc) { 1585 dev_warn(dev, "controller reset failed (%d)\n", rc); 1586 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1587 up(&hisi_hba->sem); 1588 scsi_unblock_requests(shost); 1589 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1590 return rc; 1591 } 1592 1593 hisi_sas_controller_reset_done(hisi_hba); 1594 dev_info(dev, "controller reset complete\n"); 1595 1596 return 0; 1597 } 1598 1599 static int hisi_sas_abort_task(struct sas_task *task) 1600 { 1601 struct scsi_lun lun; 1602 struct hisi_sas_tmf_task tmf_task; 1603 struct domain_device *device = task->dev; 1604 struct hisi_sas_device *sas_dev = device->lldd_dev; 1605 struct hisi_hba *hisi_hba; 1606 struct device *dev; 1607 int rc = TMF_RESP_FUNC_FAILED; 1608 unsigned long flags; 1609 1610 if (!sas_dev) 1611 return TMF_RESP_FUNC_FAILED; 1612 1613 hisi_hba = dev_to_hisi_hba(task->dev); 1614 dev = hisi_hba->dev; 1615 1616 spin_lock_irqsave(&task->task_state_lock, flags); 1617 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1618 struct hisi_sas_slot *slot = task->lldd_task; 1619 struct hisi_sas_cq *cq; 1620 1621 if (slot) { 1622 /* 1623 * sync irq to avoid free'ing task 1624 * before using task in IO completion 1625 */ 1626 cq = &hisi_hba->cq[slot->dlvry_queue]; 1627 synchronize_irq(cq->irq_no); 1628 } 1629 spin_unlock_irqrestore(&task->task_state_lock, flags); 1630 rc = TMF_RESP_FUNC_COMPLETE; 1631 goto out; 1632 } 1633 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1634 spin_unlock_irqrestore(&task->task_state_lock, flags); 1635 1636 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1637 struct scsi_cmnd *cmnd = task->uldd_task; 1638 struct hisi_sas_slot *slot = task->lldd_task; 1639 u16 tag = slot->idx; 1640 int rc2; 1641 1642 int_to_scsilun(cmnd->device->lun, &lun); 1643 tmf_task.tmf = TMF_ABORT_TASK; 1644 tmf_task.tag_of_task_to_be_managed = tag; 1645 1646 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1647 &tmf_task); 1648 1649 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1650 HISI_SAS_INT_ABT_CMD, tag); 1651 if (rc2 < 0) { 1652 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1653 return TMF_RESP_FUNC_FAILED; 1654 } 1655 1656 /* 1657 * If the TMF finds that the IO is not in the device and also 1658 * the internal abort does not succeed, then it is safe to 1659 * free the slot. 1660 * Note: if the internal abort succeeds then the slot 1661 * will have already been completed 1662 */ 1663 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1664 if (task->lldd_task) 1665 hisi_sas_do_release_task(hisi_hba, task, slot); 1666 } 1667 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1668 task->task_proto & SAS_PROTOCOL_STP) { 1669 if (task->dev->dev_type == SAS_SATA_DEV) { 1670 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1671 HISI_SAS_INT_ABT_DEV, 1672 0); 1673 if (rc < 0) { 1674 dev_err(dev, "abort task: internal abort failed\n"); 1675 goto out; 1676 } 1677 hisi_sas_dereg_device(hisi_hba, device); 1678 rc = hisi_sas_softreset_ata_disk(device); 1679 } 1680 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1681 /* SMP */ 1682 struct hisi_sas_slot *slot = task->lldd_task; 1683 u32 tag = slot->idx; 1684 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1685 1686 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1687 HISI_SAS_INT_ABT_CMD, tag); 1688 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1689 task->lldd_task) { 1690 /* 1691 * sync irq to avoid free'ing task 1692 * before using task in IO completion 1693 */ 1694 synchronize_irq(cq->irq_no); 1695 slot->task = NULL; 1696 } 1697 } 1698 1699 out: 1700 if (rc != TMF_RESP_FUNC_COMPLETE) 1701 dev_notice(dev, "abort task: rc=%d\n", rc); 1702 return rc; 1703 } 1704 1705 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1706 { 1707 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1708 struct device *dev = hisi_hba->dev; 1709 struct hisi_sas_tmf_task tmf_task; 1710 int rc; 1711 1712 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1713 HISI_SAS_INT_ABT_DEV, 0); 1714 if (rc < 0) { 1715 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1716 return TMF_RESP_FUNC_FAILED; 1717 } 1718 hisi_sas_dereg_device(hisi_hba, device); 1719 1720 tmf_task.tmf = TMF_ABORT_TASK_SET; 1721 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1722 1723 if (rc == TMF_RESP_FUNC_COMPLETE) 1724 hisi_sas_release_task(hisi_hba, device); 1725 1726 return rc; 1727 } 1728 1729 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1730 { 1731 struct hisi_sas_tmf_task tmf_task; 1732 int rc; 1733 1734 tmf_task.tmf = TMF_CLEAR_ACA; 1735 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1736 1737 return rc; 1738 } 1739 1740 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1741 { 1742 struct sas_phy *local_phy = sas_get_local_phy(device); 1743 struct hisi_sas_device *sas_dev = device->lldd_dev; 1744 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1745 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1746 DECLARE_COMPLETION_ONSTACK(phyreset); 1747 int rc, reset_type; 1748 1749 if (!local_phy->enabled) { 1750 sas_put_local_phy(local_phy); 1751 return -ENODEV; 1752 } 1753 1754 if (scsi_is_sas_phy_local(local_phy)) { 1755 struct asd_sas_phy *sas_phy = 1756 sas_ha->sas_phy[local_phy->number]; 1757 struct hisi_sas_phy *phy = 1758 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1759 phy->in_reset = 1; 1760 phy->reset_completion = &phyreset; 1761 } 1762 1763 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1764 !dev_is_sata(device)) ? true : false; 1765 1766 rc = sas_phy_reset(local_phy, reset_type); 1767 sas_put_local_phy(local_phy); 1768 1769 if (scsi_is_sas_phy_local(local_phy)) { 1770 struct asd_sas_phy *sas_phy = 1771 sas_ha->sas_phy[local_phy->number]; 1772 struct hisi_sas_phy *phy = 1773 container_of(sas_phy, struct hisi_sas_phy, sas_phy); 1774 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ); 1775 unsigned long flags; 1776 1777 spin_lock_irqsave(&phy->lock, flags); 1778 phy->reset_completion = NULL; 1779 phy->in_reset = 0; 1780 spin_unlock_irqrestore(&phy->lock, flags); 1781 1782 /* report PHY down if timed out */ 1783 if (!ret) 1784 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0); 1785 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1786 /* 1787 * If in init state, we rely on caller to wait for link to be 1788 * ready; otherwise, except phy reset is fail, delay. 1789 */ 1790 if (!rc) 1791 msleep(2000); 1792 } 1793 1794 return rc; 1795 } 1796 1797 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1798 { 1799 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1800 struct device *dev = hisi_hba->dev; 1801 int rc; 1802 1803 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1804 HISI_SAS_INT_ABT_DEV, 0); 1805 if (rc < 0) { 1806 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1807 return TMF_RESP_FUNC_FAILED; 1808 } 1809 hisi_sas_dereg_device(hisi_hba, device); 1810 1811 if (dev_is_sata(device)) { 1812 rc = hisi_sas_softreset_ata_disk(device); 1813 if (rc == TMF_RESP_FUNC_FAILED) 1814 return TMF_RESP_FUNC_FAILED; 1815 } 1816 1817 rc = hisi_sas_debug_I_T_nexus_reset(device); 1818 1819 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1820 hisi_sas_release_task(hisi_hba, device); 1821 1822 return rc; 1823 } 1824 1825 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1826 { 1827 struct hisi_sas_device *sas_dev = device->lldd_dev; 1828 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1829 struct device *dev = hisi_hba->dev; 1830 int rc = TMF_RESP_FUNC_FAILED; 1831 1832 /* Clear internal IO and then lu reset */ 1833 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1834 HISI_SAS_INT_ABT_DEV, 0); 1835 if (rc < 0) { 1836 dev_err(dev, "lu_reset: internal abort failed\n"); 1837 goto out; 1838 } 1839 hisi_sas_dereg_device(hisi_hba, device); 1840 1841 if (dev_is_sata(device)) { 1842 struct sas_phy *phy; 1843 1844 phy = sas_get_local_phy(device); 1845 1846 rc = sas_phy_reset(phy, true); 1847 1848 if (rc == 0) 1849 hisi_sas_release_task(hisi_hba, device); 1850 sas_put_local_phy(phy); 1851 } else { 1852 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1853 1854 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1855 if (rc == TMF_RESP_FUNC_COMPLETE) 1856 hisi_sas_release_task(hisi_hba, device); 1857 } 1858 out: 1859 if (rc != TMF_RESP_FUNC_COMPLETE) 1860 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1861 sas_dev->device_id, rc); 1862 return rc; 1863 } 1864 1865 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1866 { 1867 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1868 struct device *dev = hisi_hba->dev; 1869 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1870 int rc, i; 1871 1872 queue_work(hisi_hba->wq, &r.work); 1873 wait_for_completion(r.completion); 1874 if (!r.done) 1875 return TMF_RESP_FUNC_FAILED; 1876 1877 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1878 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1879 struct domain_device *device = sas_dev->sas_device; 1880 1881 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1882 dev_is_expander(device->dev_type)) 1883 continue; 1884 1885 rc = hisi_sas_debug_I_T_nexus_reset(device); 1886 if (rc != TMF_RESP_FUNC_COMPLETE) 1887 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n", 1888 sas_dev->device_id, rc); 1889 } 1890 1891 hisi_sas_release_tasks(hisi_hba); 1892 1893 return TMF_RESP_FUNC_COMPLETE; 1894 } 1895 1896 static int hisi_sas_query_task(struct sas_task *task) 1897 { 1898 struct scsi_lun lun; 1899 struct hisi_sas_tmf_task tmf_task; 1900 int rc = TMF_RESP_FUNC_FAILED; 1901 1902 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1903 struct scsi_cmnd *cmnd = task->uldd_task; 1904 struct domain_device *device = task->dev; 1905 struct hisi_sas_slot *slot = task->lldd_task; 1906 u32 tag = slot->idx; 1907 1908 int_to_scsilun(cmnd->device->lun, &lun); 1909 tmf_task.tmf = TMF_QUERY_TASK; 1910 tmf_task.tag_of_task_to_be_managed = tag; 1911 1912 rc = hisi_sas_debug_issue_ssp_tmf(device, 1913 lun.scsi_lun, 1914 &tmf_task); 1915 switch (rc) { 1916 /* The task is still in Lun, release it then */ 1917 case TMF_RESP_FUNC_SUCC: 1918 /* The task is not in Lun or failed, reset the phy */ 1919 case TMF_RESP_FUNC_FAILED: 1920 case TMF_RESP_FUNC_COMPLETE: 1921 break; 1922 default: 1923 rc = TMF_RESP_FUNC_FAILED; 1924 break; 1925 } 1926 } 1927 return rc; 1928 } 1929 1930 static int 1931 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 1932 struct sas_task *task, int abort_flag, 1933 int task_tag, struct hisi_sas_dq *dq) 1934 { 1935 struct domain_device *device = task->dev; 1936 struct hisi_sas_device *sas_dev = device->lldd_dev; 1937 struct device *dev = hisi_hba->dev; 1938 struct hisi_sas_port *port; 1939 struct hisi_sas_slot *slot; 1940 struct asd_sas_port *sas_port = device->port; 1941 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1942 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1943 unsigned long flags; 1944 int wr_q_index; 1945 1946 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 1947 return -EINVAL; 1948 1949 if (!device->port) 1950 return -1; 1951 1952 port = to_hisi_sas_port(sas_port); 1953 1954 /* simply get a slot and send abort command */ 1955 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL); 1956 if (rc < 0) 1957 goto err_out; 1958 1959 slot_idx = rc; 1960 slot = &hisi_hba->slot_info[slot_idx]; 1961 1962 spin_lock(&dq->lock); 1963 wr_q_index = dq->wr_point; 1964 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; 1965 list_add_tail(&slot->delivery, &dq->list); 1966 spin_unlock(&dq->lock); 1967 spin_lock(&sas_dev->lock); 1968 list_add_tail(&slot->entry, &sas_dev->list); 1969 spin_unlock(&sas_dev->lock); 1970 1971 dlvry_queue = dq->id; 1972 dlvry_queue_slot = wr_q_index; 1973 1974 slot->device_id = sas_dev->device_id; 1975 slot->n_elem = n_elem; 1976 slot->dlvry_queue = dlvry_queue; 1977 slot->dlvry_queue_slot = dlvry_queue_slot; 1978 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 1979 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 1980 slot->task = task; 1981 slot->port = port; 1982 slot->is_internal = true; 1983 task->lldd_task = slot; 1984 1985 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 1986 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 1987 memset(hisi_sas_status_buf_addr_mem(slot), 0, 1988 sizeof(struct hisi_sas_err_record)); 1989 1990 hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 1991 abort_flag, task_tag); 1992 1993 spin_lock_irqsave(&task->task_state_lock, flags); 1994 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 1995 spin_unlock_irqrestore(&task->task_state_lock, flags); 1996 WRITE_ONCE(slot->ready, 1); 1997 /* send abort command to the chip */ 1998 spin_lock(&dq->lock); 1999 hisi_hba->hw->start_delivery(dq); 2000 spin_unlock(&dq->lock); 2001 2002 return 0; 2003 2004 err_out: 2005 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 2006 2007 return rc; 2008 } 2009 2010 /** 2011 * _hisi_sas_internal_task_abort -- execute an internal 2012 * abort command for single IO command or a device 2013 * @hisi_hba: host controller struct 2014 * @device: domain device 2015 * @abort_flag: mode of operation, device or single IO 2016 * @tag: tag of IO to be aborted (only relevant to single 2017 * IO mode) 2018 * @dq: delivery queue for this internal abort command 2019 */ 2020 static int 2021 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2022 struct domain_device *device, int abort_flag, 2023 int tag, struct hisi_sas_dq *dq) 2024 { 2025 struct sas_task *task; 2026 struct hisi_sas_device *sas_dev = device->lldd_dev; 2027 struct device *dev = hisi_hba->dev; 2028 int res; 2029 2030 /* 2031 * The interface is not realized means this HW don't support internal 2032 * abort, or don't need to do internal abort. Then here, we return 2033 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 2034 * the internal abort has been executed and returned CQ. 2035 */ 2036 if (!hisi_hba->hw->prep_abort) 2037 return TMF_RESP_FUNC_FAILED; 2038 2039 task = sas_alloc_slow_task(GFP_KERNEL); 2040 if (!task) 2041 return -ENOMEM; 2042 2043 task->dev = device; 2044 task->task_proto = device->tproto; 2045 task->task_done = hisi_sas_task_done; 2046 task->slow_task->timer.function = hisi_sas_tmf_timedout; 2047 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ; 2048 add_timer(&task->slow_task->timer); 2049 2050 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 2051 task, abort_flag, tag, dq); 2052 if (res) { 2053 del_timer(&task->slow_task->timer); 2054 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 2055 res); 2056 goto exit; 2057 } 2058 wait_for_completion(&task->slow_task->completion); 2059 res = TMF_RESP_FUNC_FAILED; 2060 2061 /* Internal abort timed out */ 2062 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 2063 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) 2064 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 2065 2066 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 2067 struct hisi_sas_slot *slot = task->lldd_task; 2068 2069 if (slot) { 2070 struct hisi_sas_cq *cq = 2071 &hisi_hba->cq[slot->dlvry_queue]; 2072 /* 2073 * sync irq to avoid free'ing task 2074 * before using task in IO completion 2075 */ 2076 synchronize_irq(cq->irq_no); 2077 slot->task = NULL; 2078 } 2079 dev_err(dev, "internal task abort: timeout and not done.\n"); 2080 2081 res = -EIO; 2082 goto exit; 2083 } else 2084 dev_err(dev, "internal task abort: timeout.\n"); 2085 } 2086 2087 if (task->task_status.resp == SAS_TASK_COMPLETE && 2088 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 2089 res = TMF_RESP_FUNC_COMPLETE; 2090 goto exit; 2091 } 2092 2093 if (task->task_status.resp == SAS_TASK_COMPLETE && 2094 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 2095 res = TMF_RESP_FUNC_SUCC; 2096 goto exit; 2097 } 2098 2099 exit: 2100 dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n", 2101 SAS_ADDR(device->sas_addr), task, 2102 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 2103 task->task_status.stat); 2104 sas_free_task(task); 2105 2106 return res; 2107 } 2108 2109 static int 2110 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2111 struct domain_device *device, 2112 int abort_flag, int tag) 2113 { 2114 struct hisi_sas_slot *slot; 2115 struct device *dev = hisi_hba->dev; 2116 struct hisi_sas_dq *dq; 2117 int i, rc; 2118 2119 switch (abort_flag) { 2120 case HISI_SAS_INT_ABT_CMD: 2121 slot = &hisi_hba->slot_info[tag]; 2122 dq = &hisi_hba->dq[slot->dlvry_queue]; 2123 return _hisi_sas_internal_task_abort(hisi_hba, device, 2124 abort_flag, tag, dq); 2125 case HISI_SAS_INT_ABT_DEV: 2126 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2127 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2128 const struct cpumask *mask = cq->irq_mask; 2129 2130 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 2131 continue; 2132 dq = &hisi_hba->dq[i]; 2133 rc = _hisi_sas_internal_task_abort(hisi_hba, device, 2134 abort_flag, tag, 2135 dq); 2136 if (rc) 2137 return rc; 2138 } 2139 break; 2140 default: 2141 dev_err(dev, "Unrecognised internal abort flag (%d)\n", 2142 abort_flag); 2143 return -EINVAL; 2144 } 2145 2146 return 0; 2147 } 2148 2149 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2150 { 2151 hisi_sas_port_notify_formed(sas_phy); 2152 } 2153 2154 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2155 u8 reg_index, u8 reg_count, u8 *write_data) 2156 { 2157 struct hisi_hba *hisi_hba = sha->lldd_ha; 2158 2159 if (!hisi_hba->hw->write_gpio) 2160 return -EOPNOTSUPP; 2161 2162 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2163 reg_index, reg_count, write_data); 2164 } 2165 2166 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2167 { 2168 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2169 struct sas_phy *sphy = sas_phy->phy; 2170 unsigned long flags; 2171 2172 phy->phy_attached = 0; 2173 phy->phy_type = 0; 2174 phy->port = NULL; 2175 2176 spin_lock_irqsave(&phy->lock, flags); 2177 if (phy->enable) 2178 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2179 else 2180 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2181 spin_unlock_irqrestore(&phy->lock, flags); 2182 } 2183 2184 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) 2185 { 2186 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2187 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2188 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 2189 struct device *dev = hisi_hba->dev; 2190 2191 if (rdy) { 2192 /* Phy down but ready */ 2193 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 2194 hisi_sas_port_notify_formed(sas_phy); 2195 } else { 2196 struct hisi_sas_port *port = phy->port; 2197 2198 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) || 2199 phy->in_reset) { 2200 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2201 return; 2202 } 2203 /* Phy down and not ready */ 2204 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 2205 sas_phy_disconnected(sas_phy); 2206 2207 if (port) { 2208 if (phy->phy_type & PORT_TYPE_SAS) { 2209 int port_id = port->id; 2210 2211 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2212 port_id)) 2213 port->port_attached = 0; 2214 } else if (phy->phy_type & PORT_TYPE_SATA) 2215 port->port_attached = 0; 2216 } 2217 hisi_sas_phy_disconnected(phy); 2218 } 2219 } 2220 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2221 2222 void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) 2223 { 2224 int i; 2225 2226 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2227 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2228 2229 synchronize_irq(cq->irq_no); 2230 } 2231 } 2232 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); 2233 2234 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2235 { 2236 struct hisi_hba *hisi_hba = shost_priv(shost); 2237 2238 if (reset_type != SCSI_ADAPTER_RESET) 2239 return -EOPNOTSUPP; 2240 2241 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2242 2243 return 0; 2244 } 2245 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2246 2247 struct scsi_transport_template *hisi_sas_stt; 2248 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2249 2250 static struct sas_domain_function_template hisi_sas_transport_ops = { 2251 .lldd_dev_found = hisi_sas_dev_found, 2252 .lldd_dev_gone = hisi_sas_dev_gone, 2253 .lldd_execute_task = hisi_sas_queue_command, 2254 .lldd_control_phy = hisi_sas_control_phy, 2255 .lldd_abort_task = hisi_sas_abort_task, 2256 .lldd_abort_task_set = hisi_sas_abort_task_set, 2257 .lldd_clear_aca = hisi_sas_clear_aca, 2258 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2259 .lldd_lu_reset = hisi_sas_lu_reset, 2260 .lldd_query_task = hisi_sas_query_task, 2261 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2262 .lldd_port_formed = hisi_sas_port_formed, 2263 .lldd_write_gpio = hisi_sas_write_gpio, 2264 }; 2265 2266 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2267 { 2268 int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; 2269 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2270 2271 for (i = 0; i < hisi_hba->queue_count; i++) { 2272 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2273 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2274 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2275 2276 s = sizeof(struct hisi_sas_cmd_hdr); 2277 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2278 memset(&cmd_hdr[j], 0, s); 2279 2280 dq->wr_point = 0; 2281 2282 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2283 memset(hisi_hba->complete_hdr[i], 0, s); 2284 cq->rd_point = 0; 2285 } 2286 2287 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2288 memset(hisi_hba->initial_fis, 0, s); 2289 2290 s = max_command_entries * sizeof(struct hisi_sas_iost); 2291 memset(hisi_hba->iost, 0, s); 2292 2293 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2294 memset(hisi_hba->breakpoint, 0, s); 2295 2296 s = sizeof(struct hisi_sas_sata_breakpoint); 2297 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2298 memset(&sata_breakpoint[j], 0, s); 2299 } 2300 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2301 2302 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2303 { 2304 struct device *dev = hisi_hba->dev; 2305 int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; 2306 int max_command_entries_ru, sz_slot_buf_ru; 2307 int blk_cnt, slots_per_blk; 2308 2309 sema_init(&hisi_hba->sem, 1); 2310 spin_lock_init(&hisi_hba->lock); 2311 for (i = 0; i < hisi_hba->n_phy; i++) { 2312 hisi_sas_phy_init(hisi_hba, i); 2313 hisi_hba->port[i].port_attached = 0; 2314 hisi_hba->port[i].id = -1; 2315 } 2316 2317 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2318 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2319 hisi_hba->devices[i].device_id = i; 2320 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2321 } 2322 2323 for (i = 0; i < hisi_hba->queue_count; i++) { 2324 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2325 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2326 2327 /* Completion queue structure */ 2328 cq->id = i; 2329 cq->hisi_hba = hisi_hba; 2330 2331 /* Delivery queue structure */ 2332 spin_lock_init(&dq->lock); 2333 INIT_LIST_HEAD(&dq->list); 2334 dq->id = i; 2335 dq->hisi_hba = hisi_hba; 2336 2337 /* Delivery queue */ 2338 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2339 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2340 &hisi_hba->cmd_hdr_dma[i], 2341 GFP_KERNEL); 2342 if (!hisi_hba->cmd_hdr[i]) 2343 goto err_out; 2344 2345 /* Completion queue */ 2346 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2347 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2348 &hisi_hba->complete_hdr_dma[i], 2349 GFP_KERNEL); 2350 if (!hisi_hba->complete_hdr[i]) 2351 goto err_out; 2352 } 2353 2354 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2355 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2356 GFP_KERNEL); 2357 if (!hisi_hba->itct) 2358 goto err_out; 2359 2360 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2361 sizeof(struct hisi_sas_slot), 2362 GFP_KERNEL); 2363 if (!hisi_hba->slot_info) 2364 goto err_out; 2365 2366 /* roundup to avoid overly large block size */ 2367 max_command_entries_ru = roundup(max_command_entries, 64); 2368 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2369 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2370 else 2371 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2372 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2373 s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); 2374 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2375 slots_per_blk = s / sz_slot_buf_ru; 2376 2377 for (i = 0; i < blk_cnt; i++) { 2378 int slot_index = i * slots_per_blk; 2379 dma_addr_t buf_dma; 2380 void *buf; 2381 2382 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2383 GFP_KERNEL); 2384 if (!buf) 2385 goto err_out; 2386 2387 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2388 struct hisi_sas_slot *slot; 2389 2390 slot = &hisi_hba->slot_info[slot_index]; 2391 slot->buf = buf; 2392 slot->buf_dma = buf_dma; 2393 slot->idx = slot_index; 2394 2395 buf += sz_slot_buf_ru; 2396 buf_dma += sz_slot_buf_ru; 2397 } 2398 } 2399 2400 s = max_command_entries * sizeof(struct hisi_sas_iost); 2401 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2402 GFP_KERNEL); 2403 if (!hisi_hba->iost) 2404 goto err_out; 2405 2406 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2407 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2408 &hisi_hba->breakpoint_dma, 2409 GFP_KERNEL); 2410 if (!hisi_hba->breakpoint) 2411 goto err_out; 2412 2413 hisi_hba->slot_index_count = max_command_entries; 2414 s = hisi_hba->slot_index_count / BITS_PER_BYTE; 2415 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 2416 if (!hisi_hba->slot_index_tags) 2417 goto err_out; 2418 2419 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2420 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2421 &hisi_hba->initial_fis_dma, 2422 GFP_KERNEL); 2423 if (!hisi_hba->initial_fis) 2424 goto err_out; 2425 2426 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2427 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2428 &hisi_hba->sata_breakpoint_dma, 2429 GFP_KERNEL); 2430 if (!hisi_hba->sata_breakpoint) 2431 goto err_out; 2432 2433 hisi_sas_slot_index_init(hisi_hba); 2434 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; 2435 2436 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2437 if (!hisi_hba->wq) { 2438 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2439 goto err_out; 2440 } 2441 2442 return 0; 2443 err_out: 2444 return -ENOMEM; 2445 } 2446 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2447 2448 void hisi_sas_free(struct hisi_hba *hisi_hba) 2449 { 2450 int i; 2451 2452 for (i = 0; i < hisi_hba->n_phy; i++) { 2453 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; 2454 2455 del_timer_sync(&phy->timer); 2456 } 2457 2458 if (hisi_hba->wq) 2459 destroy_workqueue(hisi_hba->wq); 2460 } 2461 EXPORT_SYMBOL_GPL(hisi_sas_free); 2462 2463 void hisi_sas_rst_work_handler(struct work_struct *work) 2464 { 2465 struct hisi_hba *hisi_hba = 2466 container_of(work, struct hisi_hba, rst_work); 2467 2468 hisi_sas_controller_reset(hisi_hba); 2469 } 2470 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2471 2472 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2473 { 2474 struct hisi_sas_rst *rst = 2475 container_of(work, struct hisi_sas_rst, work); 2476 2477 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2478 rst->done = true; 2479 complete(rst->completion); 2480 } 2481 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2482 2483 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2484 { 2485 struct device *dev = hisi_hba->dev; 2486 struct platform_device *pdev = hisi_hba->platform_dev; 2487 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2488 struct clk *refclk; 2489 2490 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2491 SAS_ADDR_SIZE)) { 2492 dev_err(dev, "could not get property sas-addr\n"); 2493 return -ENOENT; 2494 } 2495 2496 if (np) { 2497 /* 2498 * These properties are only required for platform device-based 2499 * controller with DT firmware. 2500 */ 2501 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2502 "hisilicon,sas-syscon"); 2503 if (IS_ERR(hisi_hba->ctrl)) { 2504 dev_err(dev, "could not get syscon\n"); 2505 return -ENOENT; 2506 } 2507 2508 if (device_property_read_u32(dev, "ctrl-reset-reg", 2509 &hisi_hba->ctrl_reset_reg)) { 2510 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2511 return -ENOENT; 2512 } 2513 2514 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2515 &hisi_hba->ctrl_reset_sts_reg)) { 2516 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2517 return -ENOENT; 2518 } 2519 2520 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2521 &hisi_hba->ctrl_clock_ena_reg)) { 2522 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2523 return -ENOENT; 2524 } 2525 } 2526 2527 refclk = devm_clk_get(dev, NULL); 2528 if (IS_ERR(refclk)) 2529 dev_dbg(dev, "no ref clk property\n"); 2530 else 2531 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2532 2533 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2534 dev_err(dev, "could not get property phy-count\n"); 2535 return -ENOENT; 2536 } 2537 2538 if (device_property_read_u32(dev, "queue-count", 2539 &hisi_hba->queue_count)) { 2540 dev_err(dev, "could not get property queue-count\n"); 2541 return -ENOENT; 2542 } 2543 2544 return 0; 2545 } 2546 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2547 2548 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2549 const struct hisi_sas_hw *hw) 2550 { 2551 struct resource *res; 2552 struct Scsi_Host *shost; 2553 struct hisi_hba *hisi_hba; 2554 struct device *dev = &pdev->dev; 2555 int error; 2556 2557 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2558 if (!shost) { 2559 dev_err(dev, "scsi host alloc failed\n"); 2560 return NULL; 2561 } 2562 hisi_hba = shost_priv(shost); 2563 2564 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2565 hisi_hba->hw = hw; 2566 hisi_hba->dev = dev; 2567 hisi_hba->platform_dev = pdev; 2568 hisi_hba->shost = shost; 2569 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2570 2571 timer_setup(&hisi_hba->timer, NULL, 0); 2572 2573 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2574 goto err_out; 2575 2576 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2577 if (error) 2578 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2579 2580 if (error) { 2581 dev_err(dev, "No usable DMA addressing method\n"); 2582 goto err_out; 2583 } 2584 2585 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); 2586 if (IS_ERR(hisi_hba->regs)) 2587 goto err_out; 2588 2589 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2590 if (res) { 2591 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2592 if (IS_ERR(hisi_hba->sgpio_regs)) 2593 goto err_out; 2594 } 2595 2596 if (hisi_sas_alloc(hisi_hba)) { 2597 hisi_sas_free(hisi_hba); 2598 goto err_out; 2599 } 2600 2601 return shost; 2602 err_out: 2603 scsi_host_put(shost); 2604 dev_err(dev, "shost alloc failed\n"); 2605 return NULL; 2606 } 2607 2608 int hisi_sas_probe(struct platform_device *pdev, 2609 const struct hisi_sas_hw *hw) 2610 { 2611 struct Scsi_Host *shost; 2612 struct hisi_hba *hisi_hba; 2613 struct device *dev = &pdev->dev; 2614 struct asd_sas_phy **arr_phy; 2615 struct asd_sas_port **arr_port; 2616 struct sas_ha_struct *sha; 2617 int rc, phy_nr, port_nr, i; 2618 2619 shost = hisi_sas_shost_alloc(pdev, hw); 2620 if (!shost) 2621 return -ENOMEM; 2622 2623 sha = SHOST_TO_SAS_HA(shost); 2624 hisi_hba = shost_priv(shost); 2625 platform_set_drvdata(pdev, sha); 2626 2627 phy_nr = port_nr = hisi_hba->n_phy; 2628 2629 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2630 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2631 if (!arr_phy || !arr_port) { 2632 rc = -ENOMEM; 2633 goto err_out_ha; 2634 } 2635 2636 sha->sas_phy = arr_phy; 2637 sha->sas_port = arr_port; 2638 sha->lldd_ha = hisi_hba; 2639 2640 shost->transportt = hisi_sas_stt; 2641 shost->max_id = HISI_SAS_MAX_DEVICES; 2642 shost->max_lun = ~0; 2643 shost->max_channel = 1; 2644 shost->max_cmd_len = 16; 2645 if (hisi_hba->hw->slot_index_alloc) { 2646 shost->can_queue = HISI_SAS_MAX_COMMANDS; 2647 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; 2648 } else { 2649 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; 2650 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; 2651 } 2652 2653 sha->sas_ha_name = DRV_NAME; 2654 sha->dev = hisi_hba->dev; 2655 sha->lldd_module = THIS_MODULE; 2656 sha->sas_addr = &hisi_hba->sas_addr[0]; 2657 sha->num_phys = hisi_hba->n_phy; 2658 sha->core.shost = hisi_hba->shost; 2659 2660 for (i = 0; i < hisi_hba->n_phy; i++) { 2661 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2662 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2663 } 2664 2665 rc = scsi_add_host(shost, &pdev->dev); 2666 if (rc) 2667 goto err_out_ha; 2668 2669 rc = sas_register_ha(sha); 2670 if (rc) 2671 goto err_out_register_ha; 2672 2673 rc = hisi_hba->hw->hw_init(hisi_hba); 2674 if (rc) 2675 goto err_out_register_ha; 2676 2677 scsi_scan_host(shost); 2678 2679 return 0; 2680 2681 err_out_register_ha: 2682 scsi_remove_host(shost); 2683 err_out_ha: 2684 hisi_sas_debugfs_exit(hisi_hba); 2685 hisi_sas_free(hisi_hba); 2686 scsi_host_put(shost); 2687 return rc; 2688 } 2689 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2690 2691 struct dentry *hisi_sas_debugfs_dir; 2692 2693 static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba) 2694 { 2695 int queue_entry_size = hisi_hba->hw->complete_hdr_size; 2696 int dump_index = hisi_hba->debugfs_dump_index; 2697 int i; 2698 2699 for (i = 0; i < hisi_hba->queue_count; i++) 2700 memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr, 2701 hisi_hba->complete_hdr[i], 2702 HISI_SAS_QUEUE_SLOTS * queue_entry_size); 2703 } 2704 2705 static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba) 2706 { 2707 int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); 2708 int dump_index = hisi_hba->debugfs_dump_index; 2709 int i; 2710 2711 for (i = 0; i < hisi_hba->queue_count; i++) { 2712 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; 2713 int j; 2714 2715 debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr; 2716 cmd_hdr = hisi_hba->cmd_hdr[i]; 2717 2718 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2719 memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j], 2720 queue_entry_size); 2721 } 2722 } 2723 2724 static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) 2725 { 2726 int dump_index = hisi_hba->debugfs_dump_index; 2727 const struct hisi_sas_debugfs_reg *port = 2728 hisi_hba->hw->debugfs_reg_port; 2729 int i, phy_cnt; 2730 u32 offset; 2731 u32 *databuf; 2732 2733 for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { 2734 databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data; 2735 for (i = 0; i < port->count; i++, databuf++) { 2736 offset = port->base_off + 4 * i; 2737 *databuf = port->read_port_reg(hisi_hba, phy_cnt, 2738 offset); 2739 } 2740 } 2741 } 2742 2743 static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba) 2744 { 2745 int dump_index = hisi_hba->debugfs_dump_index; 2746 u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data; 2747 const struct hisi_sas_hw *hw = hisi_hba->hw; 2748 const struct hisi_sas_debugfs_reg *global = 2749 hw->debugfs_reg_array[DEBUGFS_GLOBAL]; 2750 int i; 2751 2752 for (i = 0; i < global->count; i++, databuf++) 2753 *databuf = global->read_global_reg(hisi_hba, 4 * i); 2754 } 2755 2756 static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba) 2757 { 2758 int dump_index = hisi_hba->debugfs_dump_index; 2759 u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data; 2760 const struct hisi_sas_hw *hw = hisi_hba->hw; 2761 const struct hisi_sas_debugfs_reg *axi = 2762 hw->debugfs_reg_array[DEBUGFS_AXI]; 2763 int i; 2764 2765 for (i = 0; i < axi->count; i++, databuf++) 2766 *databuf = axi->read_global_reg(hisi_hba, 2767 4 * i + axi->base_off); 2768 } 2769 2770 static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba) 2771 { 2772 int dump_index = hisi_hba->debugfs_dump_index; 2773 u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data; 2774 const struct hisi_sas_hw *hw = hisi_hba->hw; 2775 const struct hisi_sas_debugfs_reg *ras = 2776 hw->debugfs_reg_array[DEBUGFS_RAS]; 2777 int i; 2778 2779 for (i = 0; i < ras->count; i++, databuf++) 2780 *databuf = ras->read_global_reg(hisi_hba, 2781 4 * i + ras->base_off); 2782 } 2783 2784 static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) 2785 { 2786 int dump_index = hisi_hba->debugfs_dump_index; 2787 void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache; 2788 void *databuf = hisi_hba->debugfs_itct[dump_index].itct; 2789 struct hisi_sas_itct *itct; 2790 int i; 2791 2792 hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_ITCT_CACHE, 2793 cachebuf); 2794 2795 itct = hisi_hba->itct; 2796 2797 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 2798 memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); 2799 databuf += sizeof(struct hisi_sas_itct); 2800 } 2801 } 2802 2803 static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba) 2804 { 2805 int dump_index = hisi_hba->debugfs_dump_index; 2806 int max_command_entries = HISI_SAS_MAX_COMMANDS; 2807 void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache; 2808 void *databuf = hisi_hba->debugfs_iost[dump_index].iost; 2809 struct hisi_sas_iost *iost; 2810 int i; 2811 2812 hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_IOST_CACHE, 2813 cachebuf); 2814 2815 iost = hisi_hba->iost; 2816 2817 for (i = 0; i < max_command_entries; i++, iost++) { 2818 memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); 2819 databuf += sizeof(struct hisi_sas_iost); 2820 } 2821 } 2822 2823 static const char * 2824 hisi_sas_debugfs_to_reg_name(int off, int base_off, 2825 const struct hisi_sas_debugfs_reg_lu *lu) 2826 { 2827 for (; lu->name; lu++) { 2828 if (off == lu->off - base_off) 2829 return lu->name; 2830 } 2831 2832 return NULL; 2833 } 2834 2835 static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr, 2836 struct seq_file *s) 2837 { 2838 const struct hisi_sas_debugfs_reg *reg = ptr; 2839 int i; 2840 2841 for (i = 0; i < reg->count; i++) { 2842 int off = i * 4; 2843 const char *name; 2844 2845 name = hisi_sas_debugfs_to_reg_name(off, reg->base_off, 2846 reg->lu); 2847 2848 if (name) 2849 seq_printf(s, "0x%08x 0x%08x %s\n", off, 2850 regs_val[i], name); 2851 else 2852 seq_printf(s, "0x%08x 0x%08x\n", off, 2853 regs_val[i]); 2854 } 2855 } 2856 2857 static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p) 2858 { 2859 struct hisi_sas_debugfs_regs *global = s->private; 2860 struct hisi_hba *hisi_hba = global->hisi_hba; 2861 const struct hisi_sas_hw *hw = hisi_hba->hw; 2862 const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL]; 2863 2864 hisi_sas_debugfs_print_reg(global->data, 2865 reg_global, s); 2866 2867 return 0; 2868 } 2869 2870 static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp) 2871 { 2872 return single_open(filp, hisi_sas_debugfs_global_show, 2873 inode->i_private); 2874 } 2875 2876 static const struct file_operations hisi_sas_debugfs_global_fops = { 2877 .open = hisi_sas_debugfs_global_open, 2878 .read = seq_read, 2879 .llseek = seq_lseek, 2880 .release = single_release, 2881 .owner = THIS_MODULE, 2882 }; 2883 2884 static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p) 2885 { 2886 struct hisi_sas_debugfs_regs *axi = s->private; 2887 struct hisi_hba *hisi_hba = axi->hisi_hba; 2888 const struct hisi_sas_hw *hw = hisi_hba->hw; 2889 const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI]; 2890 2891 hisi_sas_debugfs_print_reg(axi->data, 2892 reg_axi, s); 2893 2894 return 0; 2895 } 2896 2897 static int hisi_sas_debugfs_axi_open(struct inode *inode, struct file *filp) 2898 { 2899 return single_open(filp, hisi_sas_debugfs_axi_show, 2900 inode->i_private); 2901 } 2902 2903 static const struct file_operations hisi_sas_debugfs_axi_fops = { 2904 .open = hisi_sas_debugfs_axi_open, 2905 .read = seq_read, 2906 .llseek = seq_lseek, 2907 .release = single_release, 2908 .owner = THIS_MODULE, 2909 }; 2910 2911 static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p) 2912 { 2913 struct hisi_sas_debugfs_regs *ras = s->private; 2914 struct hisi_hba *hisi_hba = ras->hisi_hba; 2915 const struct hisi_sas_hw *hw = hisi_hba->hw; 2916 const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS]; 2917 2918 hisi_sas_debugfs_print_reg(ras->data, 2919 reg_ras, s); 2920 2921 return 0; 2922 } 2923 2924 static int hisi_sas_debugfs_ras_open(struct inode *inode, struct file *filp) 2925 { 2926 return single_open(filp, hisi_sas_debugfs_ras_show, 2927 inode->i_private); 2928 } 2929 2930 static const struct file_operations hisi_sas_debugfs_ras_fops = { 2931 .open = hisi_sas_debugfs_ras_open, 2932 .read = seq_read, 2933 .llseek = seq_lseek, 2934 .release = single_release, 2935 .owner = THIS_MODULE, 2936 }; 2937 2938 static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p) 2939 { 2940 struct hisi_sas_debugfs_port *port = s->private; 2941 struct hisi_sas_phy *phy = port->phy; 2942 struct hisi_hba *hisi_hba = phy->hisi_hba; 2943 const struct hisi_sas_hw *hw = hisi_hba->hw; 2944 const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port; 2945 2946 hisi_sas_debugfs_print_reg(port->data, reg_port, s); 2947 2948 return 0; 2949 } 2950 2951 static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp) 2952 { 2953 return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private); 2954 } 2955 2956 static const struct file_operations hisi_sas_debugfs_port_fops = { 2957 .open = hisi_sas_debugfs_port_open, 2958 .read = seq_read, 2959 .llseek = seq_lseek, 2960 .release = single_release, 2961 .owner = THIS_MODULE, 2962 }; 2963 2964 static void hisi_sas_show_row_64(struct seq_file *s, int index, 2965 int sz, __le64 *ptr) 2966 { 2967 int i; 2968 2969 /* completion header size not fixed per HW version */ 2970 seq_printf(s, "index %04d:\n\t", index); 2971 for (i = 1; i <= sz / 8; i++, ptr++) { 2972 seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); 2973 if (!(i % 2)) 2974 seq_puts(s, "\n\t"); 2975 } 2976 2977 seq_puts(s, "\n"); 2978 } 2979 2980 static void hisi_sas_show_row_32(struct seq_file *s, int index, 2981 int sz, __le32 *ptr) 2982 { 2983 int i; 2984 2985 /* completion header size not fixed per HW version */ 2986 seq_printf(s, "index %04d:\n\t", index); 2987 for (i = 1; i <= sz / 4; i++, ptr++) { 2988 seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); 2989 if (!(i % 4)) 2990 seq_puts(s, "\n\t"); 2991 } 2992 seq_puts(s, "\n"); 2993 } 2994 2995 static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, 2996 struct hisi_sas_debugfs_cq *debugfs_cq) 2997 { 2998 struct hisi_sas_cq *cq = debugfs_cq->cq; 2999 struct hisi_hba *hisi_hba = cq->hisi_hba; 3000 __le32 *complete_hdr = debugfs_cq->complete_hdr + 3001 (hisi_hba->hw->complete_hdr_size * slot); 3002 3003 hisi_sas_show_row_32(s, slot, 3004 hisi_hba->hw->complete_hdr_size, 3005 complete_hdr); 3006 } 3007 3008 static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p) 3009 { 3010 struct hisi_sas_debugfs_cq *debugfs_cq = s->private; 3011 int slot; 3012 3013 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 3014 hisi_sas_cq_show_slot(s, slot, debugfs_cq); 3015 } 3016 return 0; 3017 } 3018 3019 static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp) 3020 { 3021 return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private); 3022 } 3023 3024 static const struct file_operations hisi_sas_debugfs_cq_fops = { 3025 .open = hisi_sas_debugfs_cq_open, 3026 .read = seq_read, 3027 .llseek = seq_lseek, 3028 .release = single_release, 3029 .owner = THIS_MODULE, 3030 }; 3031 3032 static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) 3033 { 3034 struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr; 3035 void *cmd_queue = debugfs_dq->hdr; 3036 __le32 *cmd_hdr = cmd_queue + 3037 sizeof(struct hisi_sas_cmd_hdr) * slot; 3038 3039 hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr); 3040 } 3041 3042 static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p) 3043 { 3044 int slot; 3045 3046 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 3047 hisi_sas_dq_show_slot(s, slot, s->private); 3048 } 3049 return 0; 3050 } 3051 3052 static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp) 3053 { 3054 return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private); 3055 } 3056 3057 static const struct file_operations hisi_sas_debugfs_dq_fops = { 3058 .open = hisi_sas_debugfs_dq_open, 3059 .read = seq_read, 3060 .llseek = seq_lseek, 3061 .release = single_release, 3062 .owner = THIS_MODULE, 3063 }; 3064 3065 static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p) 3066 { 3067 struct hisi_sas_debugfs_iost *debugfs_iost = s->private; 3068 struct hisi_sas_iost *iost = debugfs_iost->iost; 3069 int i, max_command_entries = HISI_SAS_MAX_COMMANDS; 3070 3071 for (i = 0; i < max_command_entries; i++, iost++) { 3072 __le64 *data = &iost->qw0; 3073 3074 hisi_sas_show_row_64(s, i, sizeof(*iost), data); 3075 } 3076 3077 return 0; 3078 } 3079 3080 static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp) 3081 { 3082 return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private); 3083 } 3084 3085 static const struct file_operations hisi_sas_debugfs_iost_fops = { 3086 .open = hisi_sas_debugfs_iost_open, 3087 .read = seq_read, 3088 .llseek = seq_lseek, 3089 .release = single_release, 3090 .owner = THIS_MODULE, 3091 }; 3092 3093 static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p) 3094 { 3095 struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private; 3096 struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache; 3097 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; 3098 int i, tab_idx; 3099 __le64 *iost; 3100 3101 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { 3102 /* 3103 * Data struct of IOST cache: 3104 * Data[1]: BIT0~15: Table index 3105 * Bit16: Valid mask 3106 * Data[2]~[9]: IOST table 3107 */ 3108 tab_idx = (iost_cache->data[1] & 0xffff); 3109 iost = (__le64 *)iost_cache; 3110 3111 hisi_sas_show_row_64(s, tab_idx, cache_size, iost); 3112 } 3113 3114 return 0; 3115 } 3116 3117 static int hisi_sas_debugfs_iost_cache_open(struct inode *inode, 3118 struct file *filp) 3119 { 3120 return single_open(filp, hisi_sas_debugfs_iost_cache_show, 3121 inode->i_private); 3122 } 3123 3124 static const struct file_operations hisi_sas_debugfs_iost_cache_fops = { 3125 .open = hisi_sas_debugfs_iost_cache_open, 3126 .read = seq_read, 3127 .llseek = seq_lseek, 3128 .release = single_release, 3129 .owner = THIS_MODULE, 3130 }; 3131 3132 static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p) 3133 { 3134 int i; 3135 struct hisi_sas_debugfs_itct *debugfs_itct = s->private; 3136 struct hisi_sas_itct *itct = debugfs_itct->itct; 3137 3138 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 3139 __le64 *data = &itct->qw0; 3140 3141 hisi_sas_show_row_64(s, i, sizeof(*itct), data); 3142 } 3143 3144 return 0; 3145 } 3146 3147 static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp) 3148 { 3149 return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private); 3150 } 3151 3152 static const struct file_operations hisi_sas_debugfs_itct_fops = { 3153 .open = hisi_sas_debugfs_itct_open, 3154 .read = seq_read, 3155 .llseek = seq_lseek, 3156 .release = single_release, 3157 .owner = THIS_MODULE, 3158 }; 3159 3160 static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p) 3161 { 3162 struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private; 3163 struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache; 3164 u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; 3165 int i, tab_idx; 3166 __le64 *itct; 3167 3168 for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { 3169 /* 3170 * Data struct of ITCT cache: 3171 * Data[1]: BIT0~15: Table index 3172 * Bit16: Valid mask 3173 * Data[2]~[9]: ITCT table 3174 */ 3175 tab_idx = itct_cache->data[1] & 0xffff; 3176 itct = (__le64 *)itct_cache; 3177 3178 hisi_sas_show_row_64(s, tab_idx, cache_size, itct); 3179 } 3180 3181 return 0; 3182 } 3183 3184 static int hisi_sas_debugfs_itct_cache_open(struct inode *inode, 3185 struct file *filp) 3186 { 3187 return single_open(filp, hisi_sas_debugfs_itct_cache_show, 3188 inode->i_private); 3189 } 3190 3191 static const struct file_operations hisi_sas_debugfs_itct_cache_fops = { 3192 .open = hisi_sas_debugfs_itct_cache_open, 3193 .read = seq_read, 3194 .llseek = seq_lseek, 3195 .release = single_release, 3196 .owner = THIS_MODULE, 3197 }; 3198 3199 static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) 3200 { 3201 u64 *debugfs_timestamp; 3202 int dump_index = hisi_hba->debugfs_dump_index; 3203 struct dentry *dump_dentry; 3204 struct dentry *dentry; 3205 char name[256]; 3206 int p; 3207 int c; 3208 int d; 3209 3210 snprintf(name, 256, "%d", dump_index); 3211 3212 dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); 3213 3214 debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index]; 3215 3216 debugfs_create_u64("timestamp", 0400, dump_dentry, 3217 debugfs_timestamp); 3218 3219 debugfs_create_file("global", 0400, dump_dentry, 3220 &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL], 3221 &hisi_sas_debugfs_global_fops); 3222 3223 /* Create port dir and files */ 3224 dentry = debugfs_create_dir("port", dump_dentry); 3225 for (p = 0; p < hisi_hba->n_phy; p++) { 3226 snprintf(name, 256, "%d", p); 3227 3228 debugfs_create_file(name, 0400, dentry, 3229 &hisi_hba->debugfs_port_reg[dump_index][p], 3230 &hisi_sas_debugfs_port_fops); 3231 } 3232 3233 /* Create CQ dir and files */ 3234 dentry = debugfs_create_dir("cq", dump_dentry); 3235 for (c = 0; c < hisi_hba->queue_count; c++) { 3236 snprintf(name, 256, "%d", c); 3237 3238 debugfs_create_file(name, 0400, dentry, 3239 &hisi_hba->debugfs_cq[dump_index][c], 3240 &hisi_sas_debugfs_cq_fops); 3241 } 3242 3243 /* Create DQ dir and files */ 3244 dentry = debugfs_create_dir("dq", dump_dentry); 3245 for (d = 0; d < hisi_hba->queue_count; d++) { 3246 snprintf(name, 256, "%d", d); 3247 3248 debugfs_create_file(name, 0400, dentry, 3249 &hisi_hba->debugfs_dq[dump_index][d], 3250 &hisi_sas_debugfs_dq_fops); 3251 } 3252 3253 debugfs_create_file("iost", 0400, dump_dentry, 3254 &hisi_hba->debugfs_iost[dump_index], 3255 &hisi_sas_debugfs_iost_fops); 3256 3257 debugfs_create_file("iost_cache", 0400, dump_dentry, 3258 &hisi_hba->debugfs_iost_cache[dump_index], 3259 &hisi_sas_debugfs_iost_cache_fops); 3260 3261 debugfs_create_file("itct", 0400, dump_dentry, 3262 &hisi_hba->debugfs_itct[dump_index], 3263 &hisi_sas_debugfs_itct_fops); 3264 3265 debugfs_create_file("itct_cache", 0400, dump_dentry, 3266 &hisi_hba->debugfs_itct_cache[dump_index], 3267 &hisi_sas_debugfs_itct_cache_fops); 3268 3269 debugfs_create_file("axi", 0400, dump_dentry, 3270 &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI], 3271 &hisi_sas_debugfs_axi_fops); 3272 3273 debugfs_create_file("ras", 0400, dump_dentry, 3274 &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS], 3275 &hisi_sas_debugfs_ras_fops); 3276 3277 return; 3278 } 3279 3280 static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba) 3281 { 3282 hisi_hba->hw->snapshot_prepare(hisi_hba); 3283 3284 hisi_sas_debugfs_snapshot_global_reg(hisi_hba); 3285 hisi_sas_debugfs_snapshot_port_reg(hisi_hba); 3286 hisi_sas_debugfs_snapshot_axi_reg(hisi_hba); 3287 hisi_sas_debugfs_snapshot_ras_reg(hisi_hba); 3288 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba); 3289 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba); 3290 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba); 3291 hisi_sas_debugfs_snapshot_iost_reg(hisi_hba); 3292 3293 hisi_sas_debugfs_create_files(hisi_hba); 3294 3295 hisi_hba->hw->snapshot_restore(hisi_hba); 3296 } 3297 3298 static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file, 3299 const char __user *user_buf, 3300 size_t count, loff_t *ppos) 3301 { 3302 struct hisi_hba *hisi_hba = file->f_inode->i_private; 3303 char buf[8]; 3304 3305 if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count) 3306 return -EFAULT; 3307 3308 if (count > 8) 3309 return -EFAULT; 3310 3311 if (copy_from_user(buf, user_buf, count)) 3312 return -EFAULT; 3313 3314 if (buf[0] != '1') 3315 return -EFAULT; 3316 3317 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 3318 3319 return count; 3320 } 3321 3322 static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = { 3323 .write = &hisi_sas_debugfs_trigger_dump_write, 3324 .owner = THIS_MODULE, 3325 }; 3326 3327 enum { 3328 HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0, 3329 HISI_SAS_BIST_LOOPBACK_MODE_SERDES, 3330 HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, 3331 }; 3332 3333 enum { 3334 HISI_SAS_BIST_CODE_MODE_PRBS7 = 0, 3335 HISI_SAS_BIST_CODE_MODE_PRBS23, 3336 HISI_SAS_BIST_CODE_MODE_PRBS31, 3337 HISI_SAS_BIST_CODE_MODE_JTPAT, 3338 HISI_SAS_BIST_CODE_MODE_CJTPAT, 3339 HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, 3340 HISI_SAS_BIST_CODE_MODE_TRAIN, 3341 HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, 3342 HISI_SAS_BIST_CODE_MODE_HFTP, 3343 HISI_SAS_BIST_CODE_MODE_MFTP, 3344 HISI_SAS_BIST_CODE_MODE_LFTP, 3345 HISI_SAS_BIST_CODE_MODE_FIXED_DATA, 3346 }; 3347 3348 static const struct { 3349 int value; 3350 char *name; 3351 } hisi_sas_debugfs_loop_linkrate[] = { 3352 { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, 3353 { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, 3354 { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, 3355 { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, 3356 }; 3357 3358 static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p) 3359 { 3360 struct hisi_hba *hisi_hba = s->private; 3361 int i; 3362 3363 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) { 3364 int match = (hisi_hba->debugfs_bist_linkrate == 3365 hisi_sas_debugfs_loop_linkrate[i].value); 3366 3367 seq_printf(s, "%s%s%s ", match ? "[" : "", 3368 hisi_sas_debugfs_loop_linkrate[i].name, 3369 match ? "]" : ""); 3370 } 3371 seq_puts(s, "\n"); 3372 3373 return 0; 3374 } 3375 3376 static ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp, 3377 const char __user *buf, 3378 size_t count, loff_t *ppos) 3379 { 3380 struct seq_file *m = filp->private_data; 3381 struct hisi_hba *hisi_hba = m->private; 3382 char kbuf[16] = {}, *pkbuf; 3383 bool found = false; 3384 int i; 3385 3386 if (hisi_hba->debugfs_bist_enable) 3387 return -EPERM; 3388 3389 if (count >= sizeof(kbuf)) 3390 return -EOVERFLOW; 3391 3392 if (copy_from_user(kbuf, buf, count)) 3393 return -EINVAL; 3394 3395 pkbuf = strstrip(kbuf); 3396 3397 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) { 3398 if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name, 3399 pkbuf, 16)) { 3400 hisi_hba->debugfs_bist_linkrate = 3401 hisi_sas_debugfs_loop_linkrate[i].value; 3402 found = true; 3403 break; 3404 } 3405 } 3406 3407 if (!found) 3408 return -EINVAL; 3409 3410 return count; 3411 } 3412 3413 static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode, 3414 struct file *filp) 3415 { 3416 return single_open(filp, hisi_sas_debugfs_bist_linkrate_show, 3417 inode->i_private); 3418 } 3419 3420 static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = { 3421 .open = hisi_sas_debugfs_bist_linkrate_open, 3422 .read = seq_read, 3423 .write = hisi_sas_debugfs_bist_linkrate_write, 3424 .llseek = seq_lseek, 3425 .release = single_release, 3426 .owner = THIS_MODULE, 3427 }; 3428 3429 static const struct { 3430 int value; 3431 char *name; 3432 } hisi_sas_debugfs_loop_code_mode[] = { 3433 { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" }, 3434 { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" }, 3435 { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" }, 3436 { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" }, 3437 { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" }, 3438 { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" }, 3439 { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" }, 3440 { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" }, 3441 { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" }, 3442 { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" }, 3443 { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" }, 3444 { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" }, 3445 }; 3446 3447 static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p) 3448 { 3449 struct hisi_hba *hisi_hba = s->private; 3450 int i; 3451 3452 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) { 3453 int match = (hisi_hba->debugfs_bist_code_mode == 3454 hisi_sas_debugfs_loop_code_mode[i].value); 3455 3456 seq_printf(s, "%s%s%s ", match ? "[" : "", 3457 hisi_sas_debugfs_loop_code_mode[i].name, 3458 match ? "]" : ""); 3459 } 3460 seq_puts(s, "\n"); 3461 3462 return 0; 3463 } 3464 3465 static ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp, 3466 const char __user *buf, 3467 size_t count, 3468 loff_t *ppos) 3469 { 3470 struct seq_file *m = filp->private_data; 3471 struct hisi_hba *hisi_hba = m->private; 3472 char kbuf[16] = {}, *pkbuf; 3473 bool found = false; 3474 int i; 3475 3476 if (hisi_hba->debugfs_bist_enable) 3477 return -EPERM; 3478 3479 if (count >= sizeof(kbuf)) 3480 return -EINVAL; 3481 3482 if (copy_from_user(kbuf, buf, count)) 3483 return -EOVERFLOW; 3484 3485 pkbuf = strstrip(kbuf); 3486 3487 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) { 3488 if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name, 3489 pkbuf, 16)) { 3490 hisi_hba->debugfs_bist_code_mode = 3491 hisi_sas_debugfs_loop_code_mode[i].value; 3492 found = true; 3493 break; 3494 } 3495 } 3496 3497 if (!found) 3498 return -EINVAL; 3499 3500 return count; 3501 } 3502 3503 static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode, 3504 struct file *filp) 3505 { 3506 return single_open(filp, hisi_sas_debugfs_bist_code_mode_show, 3507 inode->i_private); 3508 } 3509 3510 static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = { 3511 .open = hisi_sas_debugfs_bist_code_mode_open, 3512 .read = seq_read, 3513 .write = hisi_sas_debugfs_bist_code_mode_write, 3514 .llseek = seq_lseek, 3515 .release = single_release, 3516 .owner = THIS_MODULE, 3517 }; 3518 3519 static ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp, 3520 const char __user *buf, 3521 size_t count, loff_t *ppos) 3522 { 3523 struct seq_file *m = filp->private_data; 3524 struct hisi_hba *hisi_hba = m->private; 3525 unsigned int phy_no; 3526 int val; 3527 3528 if (hisi_hba->debugfs_bist_enable) 3529 return -EPERM; 3530 3531 val = kstrtouint_from_user(buf, count, 0, &phy_no); 3532 if (val) 3533 return val; 3534 3535 if (phy_no >= hisi_hba->n_phy) 3536 return -EINVAL; 3537 3538 hisi_hba->debugfs_bist_phy_no = phy_no; 3539 3540 return count; 3541 } 3542 3543 static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p) 3544 { 3545 struct hisi_hba *hisi_hba = s->private; 3546 3547 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no); 3548 3549 return 0; 3550 } 3551 3552 static int hisi_sas_debugfs_bist_phy_open(struct inode *inode, 3553 struct file *filp) 3554 { 3555 return single_open(filp, hisi_sas_debugfs_bist_phy_show, 3556 inode->i_private); 3557 } 3558 3559 static const struct file_operations hisi_sas_debugfs_bist_phy_ops = { 3560 .open = hisi_sas_debugfs_bist_phy_open, 3561 .read = seq_read, 3562 .write = hisi_sas_debugfs_bist_phy_write, 3563 .llseek = seq_lseek, 3564 .release = single_release, 3565 .owner = THIS_MODULE, 3566 }; 3567 3568 static const struct { 3569 int value; 3570 char *name; 3571 } hisi_sas_debugfs_loop_modes[] = { 3572 { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" }, 3573 { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" }, 3574 { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" }, 3575 }; 3576 3577 static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p) 3578 { 3579 struct hisi_hba *hisi_hba = s->private; 3580 int i; 3581 3582 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) { 3583 int match = (hisi_hba->debugfs_bist_mode == 3584 hisi_sas_debugfs_loop_modes[i].value); 3585 3586 seq_printf(s, "%s%s%s ", match ? "[" : "", 3587 hisi_sas_debugfs_loop_modes[i].name, 3588 match ? "]" : ""); 3589 } 3590 seq_puts(s, "\n"); 3591 3592 return 0; 3593 } 3594 3595 static ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp, 3596 const char __user *buf, 3597 size_t count, loff_t *ppos) 3598 { 3599 struct seq_file *m = filp->private_data; 3600 struct hisi_hba *hisi_hba = m->private; 3601 char kbuf[16] = {}, *pkbuf; 3602 bool found = false; 3603 int i; 3604 3605 if (hisi_hba->debugfs_bist_enable) 3606 return -EPERM; 3607 3608 if (count >= sizeof(kbuf)) 3609 return -EINVAL; 3610 3611 if (copy_from_user(kbuf, buf, count)) 3612 return -EOVERFLOW; 3613 3614 pkbuf = strstrip(kbuf); 3615 3616 for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) { 3617 if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) { 3618 hisi_hba->debugfs_bist_mode = 3619 hisi_sas_debugfs_loop_modes[i].value; 3620 found = true; 3621 break; 3622 } 3623 } 3624 3625 if (!found) 3626 return -EINVAL; 3627 3628 return count; 3629 } 3630 3631 static int hisi_sas_debugfs_bist_mode_open(struct inode *inode, 3632 struct file *filp) 3633 { 3634 return single_open(filp, hisi_sas_debugfs_bist_mode_show, 3635 inode->i_private); 3636 } 3637 3638 static const struct file_operations hisi_sas_debugfs_bist_mode_ops = { 3639 .open = hisi_sas_debugfs_bist_mode_open, 3640 .read = seq_read, 3641 .write = hisi_sas_debugfs_bist_mode_write, 3642 .llseek = seq_lseek, 3643 .release = single_release, 3644 .owner = THIS_MODULE, 3645 }; 3646 3647 static ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp, 3648 const char __user *buf, 3649 size_t count, loff_t *ppos) 3650 { 3651 struct seq_file *m = filp->private_data; 3652 struct hisi_hba *hisi_hba = m->private; 3653 unsigned int enable; 3654 int val; 3655 3656 val = kstrtouint_from_user(buf, count, 0, &enable); 3657 if (val) 3658 return val; 3659 3660 if (enable > 1) 3661 return -EINVAL; 3662 3663 if (enable == hisi_hba->debugfs_bist_enable) 3664 return count; 3665 3666 if (!hisi_hba->hw->set_bist) 3667 return -EPERM; 3668 3669 val = hisi_hba->hw->set_bist(hisi_hba, enable); 3670 if (val < 0) 3671 return val; 3672 3673 hisi_hba->debugfs_bist_enable = enable; 3674 3675 return count; 3676 } 3677 3678 static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p) 3679 { 3680 struct hisi_hba *hisi_hba = s->private; 3681 3682 seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable); 3683 3684 return 0; 3685 } 3686 3687 static int hisi_sas_debugfs_bist_enable_open(struct inode *inode, 3688 struct file *filp) 3689 { 3690 return single_open(filp, hisi_sas_debugfs_bist_enable_show, 3691 inode->i_private); 3692 } 3693 3694 static const struct file_operations hisi_sas_debugfs_bist_enable_ops = { 3695 .open = hisi_sas_debugfs_bist_enable_open, 3696 .read = seq_read, 3697 .write = hisi_sas_debugfs_bist_enable_write, 3698 .llseek = seq_lseek, 3699 .release = single_release, 3700 .owner = THIS_MODULE, 3701 }; 3702 3703 static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp, 3704 const char __user *buf, 3705 size_t count, loff_t *ppos) 3706 { 3707 struct seq_file *s = filp->private_data; 3708 struct hisi_sas_phy *phy = s->private; 3709 unsigned int set_val; 3710 int res; 3711 3712 res = kstrtouint_from_user(buf, count, 0, &set_val); 3713 if (res) 3714 return res; 3715 3716 if (set_val > 0) 3717 return -EINVAL; 3718 3719 atomic_set(&phy->down_cnt, 0); 3720 3721 return count; 3722 } 3723 3724 static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p) 3725 { 3726 struct hisi_sas_phy *phy = s->private; 3727 3728 seq_printf(s, "%d\n", atomic_read(&phy->down_cnt)); 3729 3730 return 0; 3731 } 3732 3733 static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode, 3734 struct file *filp) 3735 { 3736 return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show, 3737 inode->i_private); 3738 } 3739 3740 static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = { 3741 .open = hisi_sas_debugfs_phy_down_cnt_open, 3742 .read = seq_read, 3743 .write = hisi_sas_debugfs_phy_down_cnt_write, 3744 .llseek = seq_lseek, 3745 .release = single_release, 3746 .owner = THIS_MODULE, 3747 }; 3748 3749 void hisi_sas_debugfs_work_handler(struct work_struct *work) 3750 { 3751 struct hisi_hba *hisi_hba = 3752 container_of(work, struct hisi_hba, debugfs_work); 3753 int debugfs_dump_index = hisi_hba->debugfs_dump_index; 3754 struct device *dev = hisi_hba->dev; 3755 u64 timestamp = local_clock(); 3756 3757 if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) { 3758 dev_warn(dev, "dump count exceeded!\n"); 3759 return; 3760 } 3761 3762 do_div(timestamp, NSEC_PER_MSEC); 3763 hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp; 3764 3765 hisi_sas_debugfs_snapshot_regs(hisi_hba); 3766 hisi_hba->debugfs_dump_index++; 3767 } 3768 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); 3769 3770 static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index) 3771 { 3772 struct device *dev = hisi_hba->dev; 3773 int i; 3774 3775 devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); 3776 devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); 3777 devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); 3778 devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); 3779 3780 for (i = 0; i < hisi_hba->queue_count; i++) 3781 devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); 3782 3783 for (i = 0; i < hisi_hba->queue_count; i++) 3784 devm_kfree(dev, 3785 hisi_hba->debugfs_cq[dump_index][i].complete_hdr); 3786 3787 for (i = 0; i < DEBUGFS_REGS_NUM; i++) 3788 devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); 3789 3790 for (i = 0; i < hisi_hba->n_phy; i++) 3791 devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); 3792 } 3793 3794 static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index) 3795 { 3796 const struct hisi_sas_hw *hw = hisi_hba->hw; 3797 struct device *dev = hisi_hba->dev; 3798 int p, c, d, r, i; 3799 size_t sz; 3800 3801 for (r = 0; r < DEBUGFS_REGS_NUM; r++) { 3802 struct hisi_sas_debugfs_regs *regs = 3803 &hisi_hba->debugfs_regs[dump_index][r]; 3804 3805 sz = hw->debugfs_reg_array[r]->count * 4; 3806 regs->data = devm_kmalloc(dev, sz, GFP_KERNEL); 3807 if (!regs->data) 3808 goto fail; 3809 regs->hisi_hba = hisi_hba; 3810 } 3811 3812 sz = hw->debugfs_reg_port->count * 4; 3813 for (p = 0; p < hisi_hba->n_phy; p++) { 3814 struct hisi_sas_debugfs_port *port = 3815 &hisi_hba->debugfs_port_reg[dump_index][p]; 3816 3817 port->data = devm_kmalloc(dev, sz, GFP_KERNEL); 3818 if (!port->data) 3819 goto fail; 3820 port->phy = &hisi_hba->phy[p]; 3821 } 3822 3823 sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 3824 for (c = 0; c < hisi_hba->queue_count; c++) { 3825 struct hisi_sas_debugfs_cq *cq = 3826 &hisi_hba->debugfs_cq[dump_index][c]; 3827 3828 cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL); 3829 if (!cq->complete_hdr) 3830 goto fail; 3831 cq->cq = &hisi_hba->cq[c]; 3832 } 3833 3834 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 3835 for (d = 0; d < hisi_hba->queue_count; d++) { 3836 struct hisi_sas_debugfs_dq *dq = 3837 &hisi_hba->debugfs_dq[dump_index][d]; 3838 3839 dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL); 3840 if (!dq->hdr) 3841 goto fail; 3842 dq->dq = &hisi_hba->dq[d]; 3843 } 3844 3845 sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost); 3846 3847 hisi_hba->debugfs_iost[dump_index].iost = 3848 devm_kmalloc(dev, sz, GFP_KERNEL); 3849 if (!hisi_hba->debugfs_iost[dump_index].iost) 3850 goto fail; 3851 3852 sz = HISI_SAS_IOST_ITCT_CACHE_NUM * 3853 sizeof(struct hisi_sas_iost_itct_cache); 3854 3855 hisi_hba->debugfs_iost_cache[dump_index].cache = 3856 devm_kmalloc(dev, sz, GFP_KERNEL); 3857 if (!hisi_hba->debugfs_iost_cache[dump_index].cache) 3858 goto fail; 3859 3860 sz = HISI_SAS_IOST_ITCT_CACHE_NUM * 3861 sizeof(struct hisi_sas_iost_itct_cache); 3862 3863 hisi_hba->debugfs_itct_cache[dump_index].cache = 3864 devm_kmalloc(dev, sz, GFP_KERNEL); 3865 if (!hisi_hba->debugfs_itct_cache[dump_index].cache) 3866 goto fail; 3867 3868 /* New memory allocation must be locate before itct */ 3869 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 3870 3871 hisi_hba->debugfs_itct[dump_index].itct = 3872 devm_kmalloc(dev, sz, GFP_KERNEL); 3873 if (!hisi_hba->debugfs_itct[dump_index].itct) 3874 goto fail; 3875 3876 return 0; 3877 fail: 3878 for (i = 0; i < hisi_sas_debugfs_dump_count; i++) 3879 hisi_sas_debugfs_release(hisi_hba, i); 3880 return -ENOMEM; 3881 } 3882 3883 static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba) 3884 { 3885 struct dentry *dir = debugfs_create_dir("phy_down_cnt", 3886 hisi_hba->debugfs_dir); 3887 char name[16]; 3888 int phy_no; 3889 3890 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 3891 snprintf(name, 16, "%d", phy_no); 3892 debugfs_create_file(name, 0600, dir, 3893 &hisi_hba->phy[phy_no], 3894 &hisi_sas_debugfs_phy_down_cnt_ops); 3895 } 3896 } 3897 3898 static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba) 3899 { 3900 hisi_hba->debugfs_bist_dentry = 3901 debugfs_create_dir("bist", hisi_hba->debugfs_dir); 3902 debugfs_create_file("link_rate", 0600, 3903 hisi_hba->debugfs_bist_dentry, hisi_hba, 3904 &hisi_sas_debugfs_bist_linkrate_ops); 3905 3906 debugfs_create_file("code_mode", 0600, 3907 hisi_hba->debugfs_bist_dentry, hisi_hba, 3908 &hisi_sas_debugfs_bist_code_mode_ops); 3909 3910 debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry, 3911 hisi_hba, &hisi_sas_debugfs_bist_phy_ops); 3912 3913 debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry, 3914 &hisi_hba->debugfs_bist_cnt); 3915 3916 debugfs_create_file("loopback_mode", 0600, 3917 hisi_hba->debugfs_bist_dentry, 3918 hisi_hba, &hisi_sas_debugfs_bist_mode_ops); 3919 3920 debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry, 3921 hisi_hba, &hisi_sas_debugfs_bist_enable_ops); 3922 3923 hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; 3924 } 3925 3926 void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) 3927 { 3928 struct device *dev = hisi_hba->dev; 3929 int i; 3930 3931 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), 3932 hisi_sas_debugfs_dir); 3933 debugfs_create_file("trigger_dump", 0200, 3934 hisi_hba->debugfs_dir, 3935 hisi_hba, 3936 &hisi_sas_debugfs_trigger_dump_fops); 3937 3938 /* create bist structures */ 3939 hisi_sas_debugfs_bist_init(hisi_hba); 3940 3941 hisi_hba->debugfs_dump_dentry = 3942 debugfs_create_dir("dump", hisi_hba->debugfs_dir); 3943 3944 hisi_sas_debugfs_phy_down_cnt_init(hisi_hba); 3945 3946 for (i = 0; i < hisi_sas_debugfs_dump_count; i++) { 3947 if (hisi_sas_debugfs_alloc(hisi_hba, i)) { 3948 debugfs_remove_recursive(hisi_hba->debugfs_dir); 3949 dev_dbg(dev, "failed to init debugfs!\n"); 3950 break; 3951 } 3952 } 3953 } 3954 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init); 3955 3956 void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba) 3957 { 3958 debugfs_remove_recursive(hisi_hba->debugfs_dir); 3959 } 3960 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit); 3961 3962 int hisi_sas_remove(struct platform_device *pdev) 3963 { 3964 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 3965 struct hisi_hba *hisi_hba = sha->lldd_ha; 3966 struct Scsi_Host *shost = sha->core.shost; 3967 3968 if (timer_pending(&hisi_hba->timer)) 3969 del_timer(&hisi_hba->timer); 3970 3971 sas_unregister_ha(sha); 3972 sas_remove_host(sha->core.shost); 3973 3974 hisi_sas_free(hisi_hba); 3975 scsi_host_put(shost); 3976 return 0; 3977 } 3978 EXPORT_SYMBOL_GPL(hisi_sas_remove); 3979 3980 bool hisi_sas_debugfs_enable; 3981 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 3982 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 3983 MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)"); 3984 3985 u32 hisi_sas_debugfs_dump_count = 1; 3986 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); 3987 module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); 3988 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); 3989 3990 static __init int hisi_sas_init(void) 3991 { 3992 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 3993 if (!hisi_sas_stt) 3994 return -ENOMEM; 3995 3996 if (hisi_sas_debugfs_enable) { 3997 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 3998 if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { 3999 pr_info("hisi_sas: Limiting debugfs dump count\n"); 4000 hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; 4001 } 4002 } 4003 4004 return 0; 4005 } 4006 4007 static __exit void hisi_sas_exit(void) 4008 { 4009 sas_release_transport(hisi_sas_stt); 4010 4011 debugfs_remove(hisi_sas_debugfs_dir); 4012 } 4013 4014 module_init(hisi_sas_init); 4015 module_exit(hisi_sas_exit); 4016 4017 MODULE_LICENSE("GPL"); 4018 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 4019 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 4020 MODULE_ALIAS("platform:" DRV_NAME); 4021