1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (c) 2015 Linaro Ltd. 4 * Copyright (c) 2015 Hisilicon Limited. 5 */ 6 7 #include "hisi_sas.h" 8 #define DRV_NAME "hisi_sas" 9 10 #define DEV_IS_GONE(dev) \ 11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 12 13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 14 u8 *lun, struct hisi_sas_tmf_task *tmf); 15 static int 16 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 17 struct domain_device *device, 18 int abort_flag, int tag); 19 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 20 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 21 void *funcdata); 22 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 23 struct domain_device *device); 24 static void hisi_sas_dev_gone(struct domain_device *device); 25 26 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 27 { 28 switch (fis->command) { 29 case ATA_CMD_FPDMA_WRITE: 30 case ATA_CMD_FPDMA_READ: 31 case ATA_CMD_FPDMA_RECV: 32 case ATA_CMD_FPDMA_SEND: 33 case ATA_CMD_NCQ_NON_DATA: 34 return HISI_SAS_SATA_PROTOCOL_FPDMA; 35 36 case ATA_CMD_DOWNLOAD_MICRO: 37 case ATA_CMD_ID_ATA: 38 case ATA_CMD_PMP_READ: 39 case ATA_CMD_READ_LOG_EXT: 40 case ATA_CMD_PIO_READ: 41 case ATA_CMD_PIO_READ_EXT: 42 case ATA_CMD_PMP_WRITE: 43 case ATA_CMD_WRITE_LOG_EXT: 44 case ATA_CMD_PIO_WRITE: 45 case ATA_CMD_PIO_WRITE_EXT: 46 return HISI_SAS_SATA_PROTOCOL_PIO; 47 48 case ATA_CMD_DSM: 49 case ATA_CMD_DOWNLOAD_MICRO_DMA: 50 case ATA_CMD_PMP_READ_DMA: 51 case ATA_CMD_PMP_WRITE_DMA: 52 case ATA_CMD_READ: 53 case ATA_CMD_READ_EXT: 54 case ATA_CMD_READ_LOG_DMA_EXT: 55 case ATA_CMD_READ_STREAM_DMA_EXT: 56 case ATA_CMD_TRUSTED_RCV_DMA: 57 case ATA_CMD_TRUSTED_SND_DMA: 58 case ATA_CMD_WRITE: 59 case ATA_CMD_WRITE_EXT: 60 case ATA_CMD_WRITE_FUA_EXT: 61 case ATA_CMD_WRITE_QUEUED: 62 case ATA_CMD_WRITE_LOG_DMA_EXT: 63 case ATA_CMD_WRITE_STREAM_DMA_EXT: 64 case ATA_CMD_ZAC_MGMT_IN: 65 return HISI_SAS_SATA_PROTOCOL_DMA; 66 67 case ATA_CMD_CHK_POWER: 68 case ATA_CMD_DEV_RESET: 69 case ATA_CMD_EDD: 70 case ATA_CMD_FLUSH: 71 case ATA_CMD_FLUSH_EXT: 72 case ATA_CMD_VERIFY: 73 case ATA_CMD_VERIFY_EXT: 74 case ATA_CMD_SET_FEATURES: 75 case ATA_CMD_STANDBY: 76 case ATA_CMD_STANDBYNOW1: 77 case ATA_CMD_ZAC_MGMT_OUT: 78 return HISI_SAS_SATA_PROTOCOL_NONDATA; 79 80 case ATA_CMD_SET_MAX: 81 switch (fis->features) { 82 case ATA_SET_MAX_PASSWD: 83 case ATA_SET_MAX_LOCK: 84 return HISI_SAS_SATA_PROTOCOL_PIO; 85 86 case ATA_SET_MAX_PASSWD_DMA: 87 case ATA_SET_MAX_UNLOCK_DMA: 88 return HISI_SAS_SATA_PROTOCOL_DMA; 89 90 default: 91 return HISI_SAS_SATA_PROTOCOL_NONDATA; 92 } 93 94 default: 95 { 96 if (direction == DMA_NONE) 97 return HISI_SAS_SATA_PROTOCOL_NONDATA; 98 return HISI_SAS_SATA_PROTOCOL_PIO; 99 } 100 } 101 } 102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 103 104 void hisi_sas_sata_done(struct sas_task *task, 105 struct hisi_sas_slot *slot) 106 { 107 struct task_status_struct *ts = &task->task_status; 108 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 109 struct hisi_sas_status_buffer *status_buf = 110 hisi_sas_status_buf_addr_mem(slot); 111 u8 *iu = &status_buf->iu[0]; 112 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 113 114 resp->frame_len = sizeof(struct dev_to_host_fis); 115 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 116 117 ts->buf_valid_size = sizeof(*resp); 118 } 119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 120 121 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) 122 { 123 struct ata_queued_cmd *qc = task->uldd_task; 124 125 if (qc) { 126 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 127 qc->tf.command == ATA_CMD_FPDMA_READ) { 128 *tag = qc->tag; 129 return 1; 130 } 131 } 132 return 0; 133 } 134 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); 135 136 /* 137 * This function assumes linkrate mask fits in 8 bits, which it 138 * does for all HW versions supported. 139 */ 140 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 141 { 142 u8 rate = 0; 143 int i; 144 145 max -= SAS_LINK_RATE_1_5_GBPS; 146 for (i = 0; i <= max; i++) 147 rate |= 1 << (i * 2); 148 return rate; 149 } 150 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 151 152 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 153 { 154 return device->port->ha->lldd_ha; 155 } 156 157 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 158 { 159 return container_of(sas_port, struct hisi_sas_port, sas_port); 160 } 161 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 162 163 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 164 { 165 int phy_no; 166 167 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 168 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 169 } 170 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 171 172 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 173 { 174 void *bitmap = hisi_hba->slot_index_tags; 175 176 clear_bit(slot_idx, bitmap); 177 } 178 179 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 180 { 181 unsigned long flags; 182 183 if (hisi_hba->hw->slot_index_alloc || (slot_idx >= 184 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) { 185 spin_lock_irqsave(&hisi_hba->lock, flags); 186 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 187 spin_unlock_irqrestore(&hisi_hba->lock, flags); 188 } 189 } 190 191 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 192 { 193 void *bitmap = hisi_hba->slot_index_tags; 194 195 set_bit(slot_idx, bitmap); 196 } 197 198 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 199 struct scsi_cmnd *scsi_cmnd) 200 { 201 int index; 202 void *bitmap = hisi_hba->slot_index_tags; 203 unsigned long flags; 204 205 if (scsi_cmnd) 206 return scsi_cmnd->request->tag; 207 208 spin_lock_irqsave(&hisi_hba->lock, flags); 209 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 210 hisi_hba->last_slot_index + 1); 211 if (index >= hisi_hba->slot_index_count) { 212 index = find_next_zero_bit(bitmap, 213 hisi_hba->slot_index_count, 214 hisi_hba->hw->max_command_entries - 215 HISI_SAS_RESERVED_IPTT_CNT); 216 if (index >= hisi_hba->slot_index_count) { 217 spin_unlock_irqrestore(&hisi_hba->lock, flags); 218 return -SAS_QUEUE_FULL; 219 } 220 } 221 hisi_sas_slot_index_set(hisi_hba, index); 222 hisi_hba->last_slot_index = index; 223 spin_unlock_irqrestore(&hisi_hba->lock, flags); 224 225 return index; 226 } 227 228 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 229 { 230 int i; 231 232 for (i = 0; i < hisi_hba->slot_index_count; ++i) 233 hisi_sas_slot_index_clear(hisi_hba, i); 234 } 235 236 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 237 struct hisi_sas_slot *slot) 238 { 239 unsigned long flags; 240 int device_id = slot->device_id; 241 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 242 243 if (task) { 244 struct device *dev = hisi_hba->dev; 245 246 if (!task->lldd_task) 247 return; 248 249 task->lldd_task = NULL; 250 251 if (!sas_protocol_ata(task->task_proto)) { 252 struct sas_ssp_task *ssp_task = &task->ssp_task; 253 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 254 255 if (slot->n_elem) 256 dma_unmap_sg(dev, task->scatter, 257 task->num_scatter, 258 task->data_dir); 259 if (slot->n_elem_dif) 260 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 261 scsi_prot_sg_count(scsi_cmnd), 262 task->data_dir); 263 } 264 } 265 266 spin_lock_irqsave(&sas_dev->lock, flags); 267 list_del_init(&slot->entry); 268 spin_unlock_irqrestore(&sas_dev->lock, flags); 269 270 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 271 272 hisi_sas_slot_index_free(hisi_hba, slot->idx); 273 } 274 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 275 276 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 277 struct hisi_sas_slot *slot) 278 { 279 hisi_hba->hw->prep_smp(hisi_hba, slot); 280 } 281 282 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 283 struct hisi_sas_slot *slot) 284 { 285 hisi_hba->hw->prep_ssp(hisi_hba, slot); 286 } 287 288 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 289 struct hisi_sas_slot *slot) 290 { 291 hisi_hba->hw->prep_stp(hisi_hba, slot); 292 } 293 294 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 295 struct hisi_sas_slot *slot, 296 int device_id, int abort_flag, int tag_to_abort) 297 { 298 hisi_hba->hw->prep_abort(hisi_hba, slot, 299 device_id, abort_flag, tag_to_abort); 300 } 301 302 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 303 struct sas_task *task, int n_elem, 304 int n_elem_req, int n_elem_resp) 305 { 306 struct device *dev = hisi_hba->dev; 307 308 if (!sas_protocol_ata(task->task_proto)) { 309 if (task->num_scatter) { 310 if (n_elem) 311 dma_unmap_sg(dev, task->scatter, 312 task->num_scatter, 313 task->data_dir); 314 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 315 if (n_elem_req) 316 dma_unmap_sg(dev, &task->smp_task.smp_req, 317 1, DMA_TO_DEVICE); 318 if (n_elem_resp) 319 dma_unmap_sg(dev, &task->smp_task.smp_resp, 320 1, DMA_FROM_DEVICE); 321 } 322 } 323 } 324 325 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 326 struct sas_task *task, int *n_elem, 327 int *n_elem_req, int *n_elem_resp) 328 { 329 struct device *dev = hisi_hba->dev; 330 int rc; 331 332 if (sas_protocol_ata(task->task_proto)) { 333 *n_elem = task->num_scatter; 334 } else { 335 unsigned int req_len, resp_len; 336 337 if (task->num_scatter) { 338 *n_elem = dma_map_sg(dev, task->scatter, 339 task->num_scatter, task->data_dir); 340 if (!*n_elem) { 341 rc = -ENOMEM; 342 goto prep_out; 343 } 344 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 345 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 346 1, DMA_TO_DEVICE); 347 if (!*n_elem_req) { 348 rc = -ENOMEM; 349 goto prep_out; 350 } 351 req_len = sg_dma_len(&task->smp_task.smp_req); 352 if (req_len & 0x3) { 353 rc = -EINVAL; 354 goto err_out_dma_unmap; 355 } 356 *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp, 357 1, DMA_FROM_DEVICE); 358 if (!*n_elem_resp) { 359 rc = -ENOMEM; 360 goto err_out_dma_unmap; 361 } 362 resp_len = sg_dma_len(&task->smp_task.smp_resp); 363 if (resp_len & 0x3) { 364 rc = -EINVAL; 365 goto err_out_dma_unmap; 366 } 367 } 368 } 369 370 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 371 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 372 *n_elem); 373 rc = -EINVAL; 374 goto err_out_dma_unmap; 375 } 376 return 0; 377 378 err_out_dma_unmap: 379 /* It would be better to call dma_unmap_sg() here, but it's messy */ 380 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 381 *n_elem_req, *n_elem_resp); 382 prep_out: 383 return rc; 384 } 385 386 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 387 struct sas_task *task, int n_elem_dif) 388 { 389 struct device *dev = hisi_hba->dev; 390 391 if (n_elem_dif) { 392 struct sas_ssp_task *ssp_task = &task->ssp_task; 393 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 394 395 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 396 scsi_prot_sg_count(scsi_cmnd), 397 task->data_dir); 398 } 399 } 400 401 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 402 int *n_elem_dif, struct sas_task *task) 403 { 404 struct device *dev = hisi_hba->dev; 405 struct sas_ssp_task *ssp_task; 406 struct scsi_cmnd *scsi_cmnd; 407 int rc; 408 409 if (task->num_scatter) { 410 ssp_task = &task->ssp_task; 411 scsi_cmnd = ssp_task->cmd; 412 413 if (scsi_prot_sg_count(scsi_cmnd)) { 414 *n_elem_dif = dma_map_sg(dev, 415 scsi_prot_sglist(scsi_cmnd), 416 scsi_prot_sg_count(scsi_cmnd), 417 task->data_dir); 418 419 if (!*n_elem_dif) 420 return -ENOMEM; 421 422 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 423 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 424 *n_elem_dif); 425 rc = -EINVAL; 426 goto err_out_dif_dma_unmap; 427 } 428 } 429 } 430 431 return 0; 432 433 err_out_dif_dma_unmap: 434 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 435 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 436 return rc; 437 } 438 439 static int hisi_sas_task_prep(struct sas_task *task, 440 struct hisi_sas_dq **dq_pointer, 441 bool is_tmf, struct hisi_sas_tmf_task *tmf, 442 int *pass) 443 { 444 struct domain_device *device = task->dev; 445 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 446 struct hisi_sas_device *sas_dev = device->lldd_dev; 447 struct hisi_sas_port *port; 448 struct hisi_sas_slot *slot; 449 struct hisi_sas_cmd_hdr *cmd_hdr_base; 450 struct asd_sas_port *sas_port = device->port; 451 struct device *dev = hisi_hba->dev; 452 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 453 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0; 454 struct hisi_sas_dq *dq; 455 unsigned long flags; 456 int wr_q_index; 457 458 if (DEV_IS_GONE(sas_dev)) { 459 if (sas_dev) 460 dev_info(dev, "task prep: device %d not ready\n", 461 sas_dev->device_id); 462 else 463 dev_info(dev, "task prep: device %016llx not ready\n", 464 SAS_ADDR(device->sas_addr)); 465 466 return -ECOMM; 467 } 468 469 if (hisi_hba->reply_map) { 470 int cpu = raw_smp_processor_id(); 471 unsigned int dq_index = hisi_hba->reply_map[cpu]; 472 473 *dq_pointer = dq = &hisi_hba->dq[dq_index]; 474 } else { 475 *dq_pointer = dq = sas_dev->dq; 476 } 477 478 port = to_hisi_sas_port(sas_port); 479 if (port && !port->port_attached) { 480 dev_info(dev, "task prep: %s port%d not attach device\n", 481 (dev_is_sata(device)) ? 482 "SATA/STP" : "SAS", 483 device->port->id); 484 485 return -ECOMM; 486 } 487 488 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 489 &n_elem_req, &n_elem_resp); 490 if (rc < 0) 491 goto prep_out; 492 493 if (!sas_protocol_ata(task->task_proto)) { 494 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 495 if (rc < 0) 496 goto err_out_dma_unmap; 497 } 498 499 if (hisi_hba->hw->slot_index_alloc) 500 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 501 else { 502 struct scsi_cmnd *scsi_cmnd = NULL; 503 504 if (task->uldd_task) { 505 struct ata_queued_cmd *qc; 506 507 if (dev_is_sata(device)) { 508 qc = task->uldd_task; 509 scsi_cmnd = qc->scsicmd; 510 } else { 511 scsi_cmnd = task->uldd_task; 512 } 513 } 514 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd); 515 } 516 if (rc < 0) 517 goto err_out_dif_dma_unmap; 518 519 slot_idx = rc; 520 slot = &hisi_hba->slot_info[slot_idx]; 521 522 spin_lock_irqsave(&dq->lock, flags); 523 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 524 if (wr_q_index < 0) { 525 spin_unlock_irqrestore(&dq->lock, flags); 526 rc = -EAGAIN; 527 goto err_out_tag; 528 } 529 530 list_add_tail(&slot->delivery, &dq->list); 531 spin_unlock_irqrestore(&dq->lock, flags); 532 spin_lock_irqsave(&sas_dev->lock, flags); 533 list_add_tail(&slot->entry, &sas_dev->list); 534 spin_unlock_irqrestore(&sas_dev->lock, flags); 535 536 dlvry_queue = dq->id; 537 dlvry_queue_slot = wr_q_index; 538 539 slot->device_id = sas_dev->device_id; 540 slot->n_elem = n_elem; 541 slot->n_elem_dif = n_elem_dif; 542 slot->dlvry_queue = dlvry_queue; 543 slot->dlvry_queue_slot = dlvry_queue_slot; 544 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 545 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 546 slot->task = task; 547 slot->port = port; 548 slot->tmf = tmf; 549 slot->is_internal = is_tmf; 550 task->lldd_task = slot; 551 552 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 553 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 554 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 555 556 switch (task->task_proto) { 557 case SAS_PROTOCOL_SMP: 558 hisi_sas_task_prep_smp(hisi_hba, slot); 559 break; 560 case SAS_PROTOCOL_SSP: 561 hisi_sas_task_prep_ssp(hisi_hba, slot); 562 break; 563 case SAS_PROTOCOL_SATA: 564 case SAS_PROTOCOL_STP: 565 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 566 hisi_sas_task_prep_ata(hisi_hba, slot); 567 break; 568 default: 569 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 570 task->task_proto); 571 break; 572 } 573 574 spin_lock_irqsave(&task->task_state_lock, flags); 575 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 576 spin_unlock_irqrestore(&task->task_state_lock, flags); 577 578 ++(*pass); 579 WRITE_ONCE(slot->ready, 1); 580 581 return 0; 582 583 err_out_tag: 584 hisi_sas_slot_index_free(hisi_hba, slot_idx); 585 err_out_dif_dma_unmap: 586 if (!sas_protocol_ata(task->task_proto)) 587 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 588 err_out_dma_unmap: 589 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 590 n_elem_req, n_elem_resp); 591 prep_out: 592 dev_err(dev, "task prep: failed[%d]!\n", rc); 593 return rc; 594 } 595 596 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 597 bool is_tmf, struct hisi_sas_tmf_task *tmf) 598 { 599 u32 rc; 600 u32 pass = 0; 601 unsigned long flags; 602 struct hisi_hba *hisi_hba; 603 struct device *dev; 604 struct domain_device *device = task->dev; 605 struct asd_sas_port *sas_port = device->port; 606 struct hisi_sas_dq *dq = NULL; 607 608 if (!sas_port) { 609 struct task_status_struct *ts = &task->task_status; 610 611 ts->resp = SAS_TASK_UNDELIVERED; 612 ts->stat = SAS_PHY_DOWN; 613 /* 614 * libsas will use dev->port, should 615 * not call task_done for sata 616 */ 617 if (device->dev_type != SAS_SATA_DEV) 618 task->task_done(task); 619 return -ECOMM; 620 } 621 622 hisi_hba = dev_to_hisi_hba(device); 623 dev = hisi_hba->dev; 624 625 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 626 if (in_softirq()) 627 return -EINVAL; 628 629 down(&hisi_hba->sem); 630 up(&hisi_hba->sem); 631 } 632 633 /* protect task_prep and start_delivery sequence */ 634 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); 635 if (rc) 636 dev_err(dev, "task exec: failed[%d]!\n", rc); 637 638 if (likely(pass)) { 639 spin_lock_irqsave(&dq->lock, flags); 640 hisi_hba->hw->start_delivery(dq); 641 spin_unlock_irqrestore(&dq->lock, flags); 642 } 643 644 return rc; 645 } 646 647 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) 648 { 649 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 650 struct asd_sas_phy *sas_phy = &phy->sas_phy; 651 struct sas_ha_struct *sas_ha; 652 653 if (!phy->phy_attached) 654 return; 655 656 sas_ha = &hisi_hba->sha; 657 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); 658 659 if (sas_phy->phy) { 660 struct sas_phy *sphy = sas_phy->phy; 661 662 sphy->negotiated_linkrate = sas_phy->linkrate; 663 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 664 sphy->maximum_linkrate_hw = 665 hisi_hba->hw->phy_get_max_linkrate(); 666 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 667 sphy->minimum_linkrate = phy->minimum_linkrate; 668 669 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 670 sphy->maximum_linkrate = phy->maximum_linkrate; 671 } 672 673 if (phy->phy_type & PORT_TYPE_SAS) { 674 struct sas_identify_frame *id; 675 676 id = (struct sas_identify_frame *)phy->frame_rcvd; 677 id->dev_type = phy->identify.device_type; 678 id->initiator_bits = SAS_PROTOCOL_ALL; 679 id->target_bits = phy->identify.target_port_protocols; 680 } else if (phy->phy_type & PORT_TYPE_SATA) { 681 /* Nothing */ 682 } 683 684 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 685 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED); 686 } 687 688 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 689 { 690 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 691 struct hisi_sas_device *sas_dev = NULL; 692 unsigned long flags; 693 int last = hisi_hba->last_dev_id; 694 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 695 int i; 696 697 spin_lock_irqsave(&hisi_hba->lock, flags); 698 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 699 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 700 int queue = i % hisi_hba->queue_count; 701 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 702 703 hisi_hba->devices[i].device_id = i; 704 sas_dev = &hisi_hba->devices[i]; 705 sas_dev->dev_status = HISI_SAS_DEV_INIT; 706 sas_dev->dev_type = device->dev_type; 707 sas_dev->hisi_hba = hisi_hba; 708 sas_dev->sas_device = device; 709 sas_dev->dq = dq; 710 spin_lock_init(&sas_dev->lock); 711 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 712 break; 713 } 714 i++; 715 } 716 hisi_hba->last_dev_id = i; 717 spin_unlock_irqrestore(&hisi_hba->lock, flags); 718 719 return sas_dev; 720 } 721 722 #define HISI_SAS_SRST_ATA_DISK_CNT 3 723 static int hisi_sas_init_device(struct domain_device *device) 724 { 725 int rc = TMF_RESP_FUNC_COMPLETE; 726 struct scsi_lun lun; 727 struct hisi_sas_tmf_task tmf_task; 728 int retry = HISI_SAS_SRST_ATA_DISK_CNT; 729 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 730 struct device *dev = hisi_hba->dev; 731 struct sas_phy *local_phy; 732 733 switch (device->dev_type) { 734 case SAS_END_DEVICE: 735 int_to_scsilun(0, &lun); 736 737 tmf_task.tmf = TMF_CLEAR_TASK_SET; 738 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 739 &tmf_task); 740 if (rc == TMF_RESP_FUNC_COMPLETE) 741 hisi_sas_release_task(hisi_hba, device); 742 break; 743 case SAS_SATA_DEV: 744 case SAS_SATA_PM: 745 case SAS_SATA_PM_PORT: 746 case SAS_SATA_PENDING: 747 /* 748 * send HARD RESET to clear previous affiliation of 749 * STP target port 750 */ 751 local_phy = sas_get_local_phy(device); 752 if (!scsi_is_sas_phy_local(local_phy) && 753 !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 754 unsigned long deadline = ata_deadline(jiffies, 20000); 755 struct sata_device *sata_dev = &device->sata_dev; 756 struct ata_host *ata_host = sata_dev->ata_host; 757 struct ata_port_operations *ops = ata_host->ops; 758 struct ata_port *ap = sata_dev->ap; 759 struct ata_link *link; 760 unsigned int classes; 761 762 ata_for_each_link(link, ap, EDGE) 763 rc = ops->hardreset(link, &classes, 764 deadline); 765 } 766 sas_put_local_phy(local_phy); 767 if (rc) { 768 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc); 769 return rc; 770 } 771 772 while (retry-- > 0) { 773 rc = hisi_sas_softreset_ata_disk(device); 774 if (!rc) 775 break; 776 } 777 break; 778 default: 779 break; 780 } 781 782 return rc; 783 } 784 785 static int hisi_sas_dev_found(struct domain_device *device) 786 { 787 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 788 struct domain_device *parent_dev = device->parent; 789 struct hisi_sas_device *sas_dev; 790 struct device *dev = hisi_hba->dev; 791 int rc; 792 793 if (hisi_hba->hw->alloc_dev) 794 sas_dev = hisi_hba->hw->alloc_dev(device); 795 else 796 sas_dev = hisi_sas_alloc_dev(device); 797 if (!sas_dev) { 798 dev_err(dev, "fail alloc dev: max support %d devices\n", 799 HISI_SAS_MAX_DEVICES); 800 return -EINVAL; 801 } 802 803 device->lldd_dev = sas_dev; 804 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 805 806 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 807 int phy_no; 808 u8 phy_num = parent_dev->ex_dev.num_phys; 809 struct ex_phy *phy; 810 811 for (phy_no = 0; phy_no < phy_num; phy_no++) { 812 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 813 if (SAS_ADDR(phy->attached_sas_addr) == 814 SAS_ADDR(device->sas_addr)) 815 break; 816 } 817 818 if (phy_no == phy_num) { 819 dev_info(dev, "dev found: no attached " 820 "dev:%016llx at ex:%016llx\n", 821 SAS_ADDR(device->sas_addr), 822 SAS_ADDR(parent_dev->sas_addr)); 823 rc = -EINVAL; 824 goto err_out; 825 } 826 } 827 828 dev_info(dev, "dev[%d:%x] found\n", 829 sas_dev->device_id, sas_dev->dev_type); 830 831 rc = hisi_sas_init_device(device); 832 if (rc) 833 goto err_out; 834 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 835 return 0; 836 837 err_out: 838 hisi_sas_dev_gone(device); 839 return rc; 840 } 841 842 int hisi_sas_slave_configure(struct scsi_device *sdev) 843 { 844 struct domain_device *dev = sdev_to_domain_dev(sdev); 845 int ret = sas_slave_configure(sdev); 846 847 if (ret) 848 return ret; 849 if (!dev_is_sata(dev)) 850 sas_change_queue_depth(sdev, 64); 851 852 return 0; 853 } 854 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 855 856 void hisi_sas_scan_start(struct Scsi_Host *shost) 857 { 858 struct hisi_hba *hisi_hba = shost_priv(shost); 859 860 hisi_hba->hw->phys_init(hisi_hba); 861 } 862 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 863 864 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 865 { 866 struct hisi_hba *hisi_hba = shost_priv(shost); 867 struct sas_ha_struct *sha = &hisi_hba->sha; 868 869 /* Wait for PHY up interrupt to occur */ 870 if (time < HZ) 871 return 0; 872 873 sas_drain_work(sha); 874 return 1; 875 } 876 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 877 878 static void hisi_sas_phyup_work(struct work_struct *work) 879 { 880 struct hisi_sas_phy *phy = 881 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); 882 struct hisi_hba *hisi_hba = phy->hisi_hba; 883 struct asd_sas_phy *sas_phy = &phy->sas_phy; 884 int phy_no = sas_phy->id; 885 886 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 887 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 888 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 889 } 890 891 static void hisi_sas_linkreset_work(struct work_struct *work) 892 { 893 struct hisi_sas_phy *phy = 894 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 895 struct asd_sas_phy *sas_phy = &phy->sas_phy; 896 897 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 898 } 899 900 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 901 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 902 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 903 }; 904 905 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 906 enum hisi_sas_phy_event event) 907 { 908 struct hisi_hba *hisi_hba = phy->hisi_hba; 909 910 if (WARN_ON(event >= HISI_PHYES_NUM)) 911 return false; 912 913 return queue_work(hisi_hba->wq, &phy->works[event]); 914 } 915 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 916 917 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 918 { 919 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 920 struct hisi_hba *hisi_hba = phy->hisi_hba; 921 struct device *dev = hisi_hba->dev; 922 int phy_no = phy->sas_phy.id; 923 924 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 925 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 926 } 927 928 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 929 { 930 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 931 struct device *dev = hisi_hba->dev; 932 933 if (!timer_pending(&phy->timer)) { 934 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 935 phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ; 936 add_timer(&phy->timer); 937 } 938 } 939 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 940 941 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 942 { 943 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 944 struct asd_sas_phy *sas_phy = &phy->sas_phy; 945 int i; 946 947 phy->hisi_hba = hisi_hba; 948 phy->port = NULL; 949 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 950 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 951 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 952 sas_phy->class = SAS; 953 sas_phy->iproto = SAS_PROTOCOL_ALL; 954 sas_phy->tproto = 0; 955 sas_phy->type = PHY_TYPE_PHYSICAL; 956 sas_phy->role = PHY_ROLE_INITIATOR; 957 sas_phy->oob_mode = OOB_NOT_CONNECTED; 958 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 959 sas_phy->id = phy_no; 960 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 961 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 962 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 963 sas_phy->lldd_phy = phy; 964 965 for (i = 0; i < HISI_PHYES_NUM; i++) 966 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 967 968 spin_lock_init(&phy->lock); 969 970 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 971 } 972 973 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 974 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 975 { 976 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 977 struct asd_sas_phy *aphy = &phy->sas_phy; 978 struct sas_phy *sphy = aphy->phy; 979 unsigned long flags; 980 981 spin_lock_irqsave(&phy->lock, flags); 982 983 if (enable) { 984 /* We may have been enabled already; if so, don't touch */ 985 if (!phy->enable) 986 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 987 hisi_hba->hw->phy_start(hisi_hba, phy_no); 988 } else { 989 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 990 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 991 } 992 phy->enable = enable; 993 spin_unlock_irqrestore(&phy->lock, flags); 994 } 995 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 996 997 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 998 { 999 struct sas_ha_struct *sas_ha = sas_phy->ha; 1000 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1001 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1002 struct asd_sas_port *sas_port = sas_phy->port; 1003 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 1004 unsigned long flags; 1005 1006 if (!sas_port) 1007 return; 1008 1009 spin_lock_irqsave(&hisi_hba->lock, flags); 1010 port->port_attached = 1; 1011 port->id = phy->port_id; 1012 phy->port = port; 1013 sas_port->lldd_port = port; 1014 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1015 } 1016 1017 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1018 struct hisi_sas_slot *slot) 1019 { 1020 if (task) { 1021 unsigned long flags; 1022 struct task_status_struct *ts; 1023 1024 ts = &task->task_status; 1025 1026 ts->resp = SAS_TASK_COMPLETE; 1027 ts->stat = SAS_ABORTED_TASK; 1028 spin_lock_irqsave(&task->task_state_lock, flags); 1029 task->task_state_flags &= 1030 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1031 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1032 task->task_state_flags |= SAS_TASK_STATE_DONE; 1033 spin_unlock_irqrestore(&task->task_state_lock, flags); 1034 } 1035 1036 hisi_sas_slot_task_free(hisi_hba, task, slot); 1037 } 1038 1039 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1040 struct domain_device *device) 1041 { 1042 struct hisi_sas_slot *slot, *slot2; 1043 struct hisi_sas_device *sas_dev = device->lldd_dev; 1044 1045 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1046 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1047 } 1048 1049 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1050 { 1051 struct hisi_sas_device *sas_dev; 1052 struct domain_device *device; 1053 int i; 1054 1055 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1056 sas_dev = &hisi_hba->devices[i]; 1057 device = sas_dev->sas_device; 1058 1059 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1060 !device) 1061 continue; 1062 1063 hisi_sas_release_task(hisi_hba, device); 1064 } 1065 } 1066 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1067 1068 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1069 struct domain_device *device) 1070 { 1071 if (hisi_hba->hw->dereg_device) 1072 hisi_hba->hw->dereg_device(hisi_hba, device); 1073 } 1074 1075 static void hisi_sas_dev_gone(struct domain_device *device) 1076 { 1077 struct hisi_sas_device *sas_dev = device->lldd_dev; 1078 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1079 struct device *dev = hisi_hba->dev; 1080 1081 dev_info(dev, "dev[%d:%x] is gone\n", 1082 sas_dev->device_id, sas_dev->dev_type); 1083 1084 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 1085 hisi_sas_internal_task_abort(hisi_hba, device, 1086 HISI_SAS_INT_ABT_DEV, 0); 1087 1088 hisi_sas_dereg_device(hisi_hba, device); 1089 1090 down(&hisi_hba->sem); 1091 hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1092 up(&hisi_hba->sem); 1093 device->lldd_dev = NULL; 1094 } 1095 1096 if (hisi_hba->hw->free_device) 1097 hisi_hba->hw->free_device(sas_dev); 1098 sas_dev->dev_type = SAS_PHY_UNUSED; 1099 } 1100 1101 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 1102 { 1103 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 1104 } 1105 1106 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1107 struct sas_phy_linkrates *r) 1108 { 1109 struct sas_phy_linkrates _r; 1110 1111 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1112 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1113 enum sas_linkrate min, max; 1114 1115 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1116 return -EINVAL; 1117 1118 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1119 max = sas_phy->phy->maximum_linkrate; 1120 min = r->minimum_linkrate; 1121 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1122 max = r->maximum_linkrate; 1123 min = sas_phy->phy->minimum_linkrate; 1124 } else 1125 return -EINVAL; 1126 1127 _r.maximum_linkrate = max; 1128 _r.minimum_linkrate = min; 1129 1130 sas_phy->phy->maximum_linkrate = max; 1131 sas_phy->phy->minimum_linkrate = min; 1132 1133 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1134 msleep(100); 1135 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1136 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1137 1138 return 0; 1139 } 1140 1141 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1142 void *funcdata) 1143 { 1144 struct sas_ha_struct *sas_ha = sas_phy->ha; 1145 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1146 int phy_no = sas_phy->id; 1147 1148 switch (func) { 1149 case PHY_FUNC_HARD_RESET: 1150 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1151 break; 1152 1153 case PHY_FUNC_LINK_RESET: 1154 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1155 msleep(100); 1156 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1157 break; 1158 1159 case PHY_FUNC_DISABLE: 1160 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1161 break; 1162 1163 case PHY_FUNC_SET_LINK_RATE: 1164 return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1165 case PHY_FUNC_GET_EVENTS: 1166 if (hisi_hba->hw->get_events) { 1167 hisi_hba->hw->get_events(hisi_hba, phy_no); 1168 break; 1169 } 1170 /* fallthru */ 1171 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1172 default: 1173 return -EOPNOTSUPP; 1174 } 1175 return 0; 1176 } 1177 1178 static void hisi_sas_task_done(struct sas_task *task) 1179 { 1180 del_timer(&task->slow_task->timer); 1181 complete(&task->slow_task->completion); 1182 } 1183 1184 static void hisi_sas_tmf_timedout(struct timer_list *t) 1185 { 1186 struct sas_task_slow *slow = from_timer(slow, t, timer); 1187 struct sas_task *task = slow->task; 1188 unsigned long flags; 1189 bool is_completed = true; 1190 1191 spin_lock_irqsave(&task->task_state_lock, flags); 1192 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1193 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1194 is_completed = false; 1195 } 1196 spin_unlock_irqrestore(&task->task_state_lock, flags); 1197 1198 if (!is_completed) 1199 complete(&task->slow_task->completion); 1200 } 1201 1202 #define TASK_TIMEOUT 20 1203 #define TASK_RETRY 3 1204 #define INTERNAL_ABORT_TIMEOUT 6 1205 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 1206 void *parameter, u32 para_len, 1207 struct hisi_sas_tmf_task *tmf) 1208 { 1209 struct hisi_sas_device *sas_dev = device->lldd_dev; 1210 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1211 struct device *dev = hisi_hba->dev; 1212 struct sas_task *task; 1213 int res, retry; 1214 1215 for (retry = 0; retry < TASK_RETRY; retry++) { 1216 task = sas_alloc_slow_task(GFP_KERNEL); 1217 if (!task) 1218 return -ENOMEM; 1219 1220 task->dev = device; 1221 task->task_proto = device->tproto; 1222 1223 if (dev_is_sata(device)) { 1224 task->ata_task.device_control_reg_update = 1; 1225 memcpy(&task->ata_task.fis, parameter, para_len); 1226 } else { 1227 memcpy(&task->ssp_task, parameter, para_len); 1228 } 1229 task->task_done = hisi_sas_task_done; 1230 1231 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1232 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ; 1233 add_timer(&task->slow_task->timer); 1234 1235 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 1236 1237 if (res) { 1238 del_timer(&task->slow_task->timer); 1239 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1240 res); 1241 goto ex_err; 1242 } 1243 1244 wait_for_completion(&task->slow_task->completion); 1245 res = TMF_RESP_FUNC_FAILED; 1246 /* Even TMF timed out, return direct. */ 1247 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1248 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1249 struct hisi_sas_slot *slot = task->lldd_task; 1250 1251 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1252 if (slot) { 1253 struct hisi_sas_cq *cq = 1254 &hisi_hba->cq[slot->dlvry_queue]; 1255 /* 1256 * flush tasklet to avoid free'ing task 1257 * before using task in IO completion 1258 */ 1259 tasklet_kill(&cq->tasklet); 1260 slot->task = NULL; 1261 } 1262 1263 goto ex_err; 1264 } else 1265 dev_err(dev, "abort tmf: TMF task timeout\n"); 1266 } 1267 1268 if (task->task_status.resp == SAS_TASK_COMPLETE && 1269 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1270 res = TMF_RESP_FUNC_COMPLETE; 1271 break; 1272 } 1273 1274 if (task->task_status.resp == SAS_TASK_COMPLETE && 1275 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1276 res = TMF_RESP_FUNC_SUCC; 1277 break; 1278 } 1279 1280 if (task->task_status.resp == SAS_TASK_COMPLETE && 1281 task->task_status.stat == SAS_DATA_UNDERRUN) { 1282 /* no error, but return the number of bytes of 1283 * underrun 1284 */ 1285 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n", 1286 SAS_ADDR(device->sas_addr), 1287 task->task_status.resp, 1288 task->task_status.stat); 1289 res = task->task_status.residual; 1290 break; 1291 } 1292 1293 if (task->task_status.resp == SAS_TASK_COMPLETE && 1294 task->task_status.stat == SAS_DATA_OVERRUN) { 1295 dev_warn(dev, "abort tmf: blocked task error\n"); 1296 res = -EMSGSIZE; 1297 break; 1298 } 1299 1300 if (task->task_status.resp == SAS_TASK_COMPLETE && 1301 task->task_status.stat == SAS_OPEN_REJECT) { 1302 dev_warn(dev, "abort tmf: open reject failed\n"); 1303 res = -EIO; 1304 } else { 1305 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n", 1306 SAS_ADDR(device->sas_addr), 1307 task->task_status.resp, 1308 task->task_status.stat); 1309 } 1310 sas_free_task(task); 1311 task = NULL; 1312 } 1313 ex_err: 1314 if (retry == TASK_RETRY) 1315 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 1316 sas_free_task(task); 1317 return res; 1318 } 1319 1320 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1321 bool reset, int pmp, u8 *fis) 1322 { 1323 struct ata_taskfile tf; 1324 1325 ata_tf_init(dev, &tf); 1326 if (reset) 1327 tf.ctl |= ATA_SRST; 1328 else 1329 tf.ctl &= ~ATA_SRST; 1330 tf.command = ATA_CMD_DEV_RESET; 1331 ata_tf_to_fis(&tf, pmp, 0, fis); 1332 } 1333 1334 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1335 { 1336 u8 fis[20] = {0}; 1337 struct ata_port *ap = device->sata_dev.ap; 1338 struct ata_link *link; 1339 int rc = TMF_RESP_FUNC_FAILED; 1340 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1341 struct device *dev = hisi_hba->dev; 1342 int s = sizeof(struct host_to_dev_fis); 1343 1344 ata_for_each_link(link, ap, EDGE) { 1345 int pmp = sata_srst_pmp(link); 1346 1347 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1348 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 1349 if (rc != TMF_RESP_FUNC_COMPLETE) 1350 break; 1351 } 1352 1353 if (rc == TMF_RESP_FUNC_COMPLETE) { 1354 ata_for_each_link(link, ap, EDGE) { 1355 int pmp = sata_srst_pmp(link); 1356 1357 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1358 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1359 s, NULL); 1360 if (rc != TMF_RESP_FUNC_COMPLETE) 1361 dev_err(dev, "ata disk de-reset failed\n"); 1362 } 1363 } else { 1364 dev_err(dev, "ata disk reset failed\n"); 1365 } 1366 1367 if (rc == TMF_RESP_FUNC_COMPLETE) 1368 hisi_sas_release_task(hisi_hba, device); 1369 1370 return rc; 1371 } 1372 1373 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1374 u8 *lun, struct hisi_sas_tmf_task *tmf) 1375 { 1376 struct sas_ssp_task ssp_task; 1377 1378 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1379 return TMF_RESP_FUNC_ESUPP; 1380 1381 memcpy(ssp_task.LUN, lun, 8); 1382 1383 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1384 sizeof(ssp_task), tmf); 1385 } 1386 1387 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1388 { 1389 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1390 int i; 1391 1392 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1393 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1394 struct domain_device *device = sas_dev->sas_device; 1395 struct asd_sas_port *sas_port; 1396 struct hisi_sas_port *port; 1397 struct hisi_sas_phy *phy = NULL; 1398 struct asd_sas_phy *sas_phy; 1399 1400 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1401 || !device || !device->port) 1402 continue; 1403 1404 sas_port = device->port; 1405 port = to_hisi_sas_port(sas_port); 1406 1407 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1408 if (state & BIT(sas_phy->id)) { 1409 phy = sas_phy->lldd_phy; 1410 break; 1411 } 1412 1413 if (phy) { 1414 port->id = phy->port_id; 1415 1416 /* Update linkrate of directly attached device. */ 1417 if (!device->parent) 1418 device->linkrate = phy->sas_phy.linkrate; 1419 1420 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1421 } else 1422 port->id = 0xff; 1423 } 1424 } 1425 1426 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, 1427 u32 state) 1428 { 1429 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1430 struct asd_sas_port *_sas_port = NULL; 1431 int phy_no; 1432 1433 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1434 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1435 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1436 struct asd_sas_port *sas_port = sas_phy->port; 1437 bool do_port_check = !!(_sas_port != sas_port); 1438 1439 if (!sas_phy->phy->enabled) 1440 continue; 1441 1442 /* Report PHY state change to libsas */ 1443 if (state & BIT(phy_no)) { 1444 if (do_port_check && sas_port && sas_port->port_dev) { 1445 struct domain_device *dev = sas_port->port_dev; 1446 1447 _sas_port = sas_port; 1448 1449 if (DEV_IS_EXPANDER(dev->dev_type)) 1450 sas_ha->notify_port_event(sas_phy, 1451 PORTE_BROADCAST_RCVD); 1452 } 1453 } else { 1454 hisi_sas_phy_down(hisi_hba, phy_no, 0); 1455 } 1456 1457 } 1458 } 1459 1460 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1461 { 1462 struct hisi_sas_device *sas_dev; 1463 struct domain_device *device; 1464 int i; 1465 1466 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1467 sas_dev = &hisi_hba->devices[i]; 1468 device = sas_dev->sas_device; 1469 1470 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1471 continue; 1472 1473 hisi_sas_init_device(device); 1474 } 1475 } 1476 1477 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1478 struct asd_sas_port *sas_port, 1479 struct domain_device *device) 1480 { 1481 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; 1482 struct ata_port *ap = device->sata_dev.ap; 1483 struct device *dev = hisi_hba->dev; 1484 int s = sizeof(struct host_to_dev_fis); 1485 int rc = TMF_RESP_FUNC_FAILED; 1486 struct asd_sas_phy *sas_phy; 1487 struct ata_link *link; 1488 u8 fis[20] = {0}; 1489 u32 state; 1490 1491 state = hisi_hba->hw->get_phys_state(hisi_hba); 1492 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { 1493 if (!(state & BIT(sas_phy->id))) 1494 continue; 1495 1496 ata_for_each_link(link, ap, EDGE) { 1497 int pmp = sata_srst_pmp(link); 1498 1499 tmf_task.phy_id = sas_phy->id; 1500 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1501 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, 1502 &tmf_task); 1503 if (rc != TMF_RESP_FUNC_COMPLETE) { 1504 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1505 sas_phy->id, rc); 1506 break; 1507 } 1508 } 1509 } 1510 } 1511 1512 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1513 { 1514 struct device *dev = hisi_hba->dev; 1515 int port_no, rc, i; 1516 1517 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1518 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1519 struct domain_device *device = sas_dev->sas_device; 1520 1521 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1522 continue; 1523 1524 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1525 HISI_SAS_INT_ABT_DEV, 0); 1526 if (rc < 0) 1527 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1528 } 1529 1530 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1531 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1532 struct asd_sas_port *sas_port = &port->sas_port; 1533 struct domain_device *port_dev = sas_port->port_dev; 1534 struct domain_device *device; 1535 1536 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type)) 1537 continue; 1538 1539 /* Try to find a SATA device */ 1540 list_for_each_entry(device, &sas_port->dev_list, 1541 dev_list_node) { 1542 if (dev_is_sata(device)) { 1543 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1544 sas_port, 1545 device); 1546 break; 1547 } 1548 } 1549 } 1550 } 1551 1552 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1553 { 1554 struct Scsi_Host *shost = hisi_hba->shost; 1555 1556 down(&hisi_hba->sem); 1557 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1558 1559 scsi_block_requests(shost); 1560 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1561 1562 if (timer_pending(&hisi_hba->timer)) 1563 del_timer_sync(&hisi_hba->timer); 1564 1565 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1566 } 1567 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1568 1569 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1570 { 1571 struct Scsi_Host *shost = hisi_hba->shost; 1572 u32 state; 1573 1574 /* Init and wait for PHYs to come up and all libsas event finished. */ 1575 hisi_hba->hw->phys_init(hisi_hba); 1576 msleep(1000); 1577 hisi_sas_refresh_port_id(hisi_hba); 1578 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1579 up(&hisi_hba->sem); 1580 1581 if (hisi_hba->reject_stp_links_msk) 1582 hisi_sas_terminate_stp_reject(hisi_hba); 1583 hisi_sas_reset_init_all_devices(hisi_hba); 1584 scsi_unblock_requests(shost); 1585 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1586 1587 state = hisi_hba->hw->get_phys_state(hisi_hba); 1588 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state); 1589 } 1590 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1591 1592 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1593 { 1594 struct device *dev = hisi_hba->dev; 1595 struct Scsi_Host *shost = hisi_hba->shost; 1596 int rc; 1597 1598 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct) 1599 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 1600 1601 if (!hisi_hba->hw->soft_reset) 1602 return -1; 1603 1604 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1605 return -1; 1606 1607 dev_info(dev, "controller resetting...\n"); 1608 hisi_sas_controller_reset_prepare(hisi_hba); 1609 1610 rc = hisi_hba->hw->soft_reset(hisi_hba); 1611 if (rc) { 1612 dev_warn(dev, "controller reset failed (%d)\n", rc); 1613 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1614 up(&hisi_hba->sem); 1615 scsi_unblock_requests(shost); 1616 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1617 return rc; 1618 } 1619 1620 hisi_sas_controller_reset_done(hisi_hba); 1621 dev_info(dev, "controller reset complete\n"); 1622 1623 return 0; 1624 } 1625 1626 static int hisi_sas_abort_task(struct sas_task *task) 1627 { 1628 struct scsi_lun lun; 1629 struct hisi_sas_tmf_task tmf_task; 1630 struct domain_device *device = task->dev; 1631 struct hisi_sas_device *sas_dev = device->lldd_dev; 1632 struct hisi_hba *hisi_hba; 1633 struct device *dev; 1634 int rc = TMF_RESP_FUNC_FAILED; 1635 unsigned long flags; 1636 1637 if (!sas_dev) 1638 return TMF_RESP_FUNC_FAILED; 1639 1640 hisi_hba = dev_to_hisi_hba(task->dev); 1641 dev = hisi_hba->dev; 1642 1643 spin_lock_irqsave(&task->task_state_lock, flags); 1644 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1645 struct hisi_sas_slot *slot = task->lldd_task; 1646 struct hisi_sas_cq *cq; 1647 1648 if (slot) { 1649 /* 1650 * flush tasklet to avoid free'ing task 1651 * before using task in IO completion 1652 */ 1653 cq = &hisi_hba->cq[slot->dlvry_queue]; 1654 tasklet_kill(&cq->tasklet); 1655 } 1656 spin_unlock_irqrestore(&task->task_state_lock, flags); 1657 rc = TMF_RESP_FUNC_COMPLETE; 1658 goto out; 1659 } 1660 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1661 spin_unlock_irqrestore(&task->task_state_lock, flags); 1662 1663 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1664 struct scsi_cmnd *cmnd = task->uldd_task; 1665 struct hisi_sas_slot *slot = task->lldd_task; 1666 u16 tag = slot->idx; 1667 int rc2; 1668 1669 int_to_scsilun(cmnd->device->lun, &lun); 1670 tmf_task.tmf = TMF_ABORT_TASK; 1671 tmf_task.tag_of_task_to_be_managed = tag; 1672 1673 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1674 &tmf_task); 1675 1676 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1677 HISI_SAS_INT_ABT_CMD, tag); 1678 if (rc2 < 0) { 1679 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1680 return TMF_RESP_FUNC_FAILED; 1681 } 1682 1683 /* 1684 * If the TMF finds that the IO is not in the device and also 1685 * the internal abort does not succeed, then it is safe to 1686 * free the slot. 1687 * Note: if the internal abort succeeds then the slot 1688 * will have already been completed 1689 */ 1690 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1691 if (task->lldd_task) 1692 hisi_sas_do_release_task(hisi_hba, task, slot); 1693 } 1694 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1695 task->task_proto & SAS_PROTOCOL_STP) { 1696 if (task->dev->dev_type == SAS_SATA_DEV) { 1697 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1698 HISI_SAS_INT_ABT_DEV, 1699 0); 1700 if (rc < 0) { 1701 dev_err(dev, "abort task: internal abort failed\n"); 1702 goto out; 1703 } 1704 hisi_sas_dereg_device(hisi_hba, device); 1705 rc = hisi_sas_softreset_ata_disk(device); 1706 } 1707 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1708 /* SMP */ 1709 struct hisi_sas_slot *slot = task->lldd_task; 1710 u32 tag = slot->idx; 1711 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1712 1713 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1714 HISI_SAS_INT_ABT_CMD, tag); 1715 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1716 task->lldd_task) { 1717 /* 1718 * flush tasklet to avoid free'ing task 1719 * before using task in IO completion 1720 */ 1721 tasklet_kill(&cq->tasklet); 1722 slot->task = NULL; 1723 } 1724 } 1725 1726 out: 1727 if (rc != TMF_RESP_FUNC_COMPLETE) 1728 dev_notice(dev, "abort task: rc=%d\n", rc); 1729 return rc; 1730 } 1731 1732 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1733 { 1734 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1735 struct device *dev = hisi_hba->dev; 1736 struct hisi_sas_tmf_task tmf_task; 1737 int rc; 1738 1739 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1740 HISI_SAS_INT_ABT_DEV, 0); 1741 if (rc < 0) { 1742 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1743 return TMF_RESP_FUNC_FAILED; 1744 } 1745 hisi_sas_dereg_device(hisi_hba, device); 1746 1747 tmf_task.tmf = TMF_ABORT_TASK_SET; 1748 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1749 1750 if (rc == TMF_RESP_FUNC_COMPLETE) 1751 hisi_sas_release_task(hisi_hba, device); 1752 1753 return rc; 1754 } 1755 1756 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1757 { 1758 struct hisi_sas_tmf_task tmf_task; 1759 int rc; 1760 1761 tmf_task.tmf = TMF_CLEAR_ACA; 1762 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1763 1764 return rc; 1765 } 1766 1767 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1768 { 1769 struct sas_phy *local_phy = sas_get_local_phy(device); 1770 struct hisi_sas_device *sas_dev = device->lldd_dev; 1771 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1772 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1773 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; 1774 struct hisi_sas_phy *phy = container_of(sas_phy, 1775 struct hisi_sas_phy, sas_phy); 1776 DECLARE_COMPLETION_ONSTACK(phyreset); 1777 int rc, reset_type; 1778 1779 if (scsi_is_sas_phy_local(local_phy)) { 1780 phy->in_reset = 1; 1781 phy->reset_completion = &phyreset; 1782 } 1783 1784 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1785 !dev_is_sata(device)) ? 1 : 0; 1786 1787 rc = sas_phy_reset(local_phy, reset_type); 1788 sas_put_local_phy(local_phy); 1789 1790 if (scsi_is_sas_phy_local(local_phy)) { 1791 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ); 1792 unsigned long flags; 1793 1794 spin_lock_irqsave(&phy->lock, flags); 1795 phy->reset_completion = NULL; 1796 phy->in_reset = 0; 1797 spin_unlock_irqrestore(&phy->lock, flags); 1798 1799 /* report PHY down if timed out */ 1800 if (!ret) 1801 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0); 1802 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1803 /* 1804 * If in init state, we rely on caller to wait for link to be 1805 * ready; otherwise, delay. 1806 */ 1807 msleep(2000); 1808 } 1809 1810 return rc; 1811 } 1812 1813 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1814 { 1815 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1816 struct device *dev = hisi_hba->dev; 1817 int rc; 1818 1819 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1820 HISI_SAS_INT_ABT_DEV, 0); 1821 if (rc < 0) { 1822 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1823 return TMF_RESP_FUNC_FAILED; 1824 } 1825 hisi_sas_dereg_device(hisi_hba, device); 1826 1827 if (dev_is_sata(device)) { 1828 rc = hisi_sas_softreset_ata_disk(device); 1829 if (rc == TMF_RESP_FUNC_FAILED) 1830 return TMF_RESP_FUNC_FAILED; 1831 } 1832 1833 rc = hisi_sas_debug_I_T_nexus_reset(device); 1834 1835 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1836 hisi_sas_release_task(hisi_hba, device); 1837 1838 return rc; 1839 } 1840 1841 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1842 { 1843 struct hisi_sas_device *sas_dev = device->lldd_dev; 1844 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1845 struct device *dev = hisi_hba->dev; 1846 int rc = TMF_RESP_FUNC_FAILED; 1847 1848 if (dev_is_sata(device)) { 1849 struct sas_phy *phy; 1850 1851 /* Clear internal IO and then hardreset */ 1852 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1853 HISI_SAS_INT_ABT_DEV, 0); 1854 if (rc < 0) { 1855 dev_err(dev, "lu_reset: internal abort failed\n"); 1856 goto out; 1857 } 1858 hisi_sas_dereg_device(hisi_hba, device); 1859 1860 phy = sas_get_local_phy(device); 1861 1862 rc = sas_phy_reset(phy, 1); 1863 1864 if (rc == 0) 1865 hisi_sas_release_task(hisi_hba, device); 1866 sas_put_local_phy(phy); 1867 } else { 1868 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1869 1870 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1871 HISI_SAS_INT_ABT_DEV, 0); 1872 if (rc < 0) { 1873 dev_err(dev, "lu_reset: internal abort failed\n"); 1874 goto out; 1875 } 1876 hisi_sas_dereg_device(hisi_hba, device); 1877 1878 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1879 if (rc == TMF_RESP_FUNC_COMPLETE) 1880 hisi_sas_release_task(hisi_hba, device); 1881 } 1882 out: 1883 if (rc != TMF_RESP_FUNC_COMPLETE) 1884 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1885 sas_dev->device_id, rc); 1886 return rc; 1887 } 1888 1889 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1890 { 1891 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1892 struct device *dev = hisi_hba->dev; 1893 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1894 int rc, i; 1895 1896 queue_work(hisi_hba->wq, &r.work); 1897 wait_for_completion(r.completion); 1898 if (!r.done) 1899 return TMF_RESP_FUNC_FAILED; 1900 1901 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1902 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1903 struct domain_device *device = sas_dev->sas_device; 1904 1905 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1906 DEV_IS_EXPANDER(device->dev_type)) 1907 continue; 1908 1909 rc = hisi_sas_debug_I_T_nexus_reset(device); 1910 if (rc != TMF_RESP_FUNC_COMPLETE) 1911 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n", 1912 sas_dev->device_id, rc); 1913 } 1914 1915 hisi_sas_release_tasks(hisi_hba); 1916 1917 return TMF_RESP_FUNC_COMPLETE; 1918 } 1919 1920 static int hisi_sas_query_task(struct sas_task *task) 1921 { 1922 struct scsi_lun lun; 1923 struct hisi_sas_tmf_task tmf_task; 1924 int rc = TMF_RESP_FUNC_FAILED; 1925 1926 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1927 struct scsi_cmnd *cmnd = task->uldd_task; 1928 struct domain_device *device = task->dev; 1929 struct hisi_sas_slot *slot = task->lldd_task; 1930 u32 tag = slot->idx; 1931 1932 int_to_scsilun(cmnd->device->lun, &lun); 1933 tmf_task.tmf = TMF_QUERY_TASK; 1934 tmf_task.tag_of_task_to_be_managed = tag; 1935 1936 rc = hisi_sas_debug_issue_ssp_tmf(device, 1937 lun.scsi_lun, 1938 &tmf_task); 1939 switch (rc) { 1940 /* The task is still in Lun, release it then */ 1941 case TMF_RESP_FUNC_SUCC: 1942 /* The task is not in Lun or failed, reset the phy */ 1943 case TMF_RESP_FUNC_FAILED: 1944 case TMF_RESP_FUNC_COMPLETE: 1945 break; 1946 default: 1947 rc = TMF_RESP_FUNC_FAILED; 1948 break; 1949 } 1950 } 1951 return rc; 1952 } 1953 1954 static int 1955 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 1956 struct sas_task *task, int abort_flag, 1957 int task_tag, struct hisi_sas_dq *dq) 1958 { 1959 struct domain_device *device = task->dev; 1960 struct hisi_sas_device *sas_dev = device->lldd_dev; 1961 struct device *dev = hisi_hba->dev; 1962 struct hisi_sas_port *port; 1963 struct hisi_sas_slot *slot; 1964 struct asd_sas_port *sas_port = device->port; 1965 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1966 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1967 unsigned long flags, flags_dq = 0; 1968 int wr_q_index; 1969 1970 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 1971 return -EINVAL; 1972 1973 if (!device->port) 1974 return -1; 1975 1976 port = to_hisi_sas_port(sas_port); 1977 1978 /* simply get a slot and send abort command */ 1979 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL); 1980 if (rc < 0) 1981 goto err_out; 1982 1983 slot_idx = rc; 1984 slot = &hisi_hba->slot_info[slot_idx]; 1985 1986 spin_lock_irqsave(&dq->lock, flags_dq); 1987 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 1988 if (wr_q_index < 0) { 1989 spin_unlock_irqrestore(&dq->lock, flags_dq); 1990 rc = -EAGAIN; 1991 goto err_out_tag; 1992 } 1993 list_add_tail(&slot->delivery, &dq->list); 1994 spin_unlock_irqrestore(&dq->lock, flags_dq); 1995 spin_lock_irqsave(&sas_dev->lock, flags); 1996 list_add_tail(&slot->entry, &sas_dev->list); 1997 spin_unlock_irqrestore(&sas_dev->lock, flags); 1998 1999 dlvry_queue = dq->id; 2000 dlvry_queue_slot = wr_q_index; 2001 2002 slot->device_id = sas_dev->device_id; 2003 slot->n_elem = n_elem; 2004 slot->dlvry_queue = dlvry_queue; 2005 slot->dlvry_queue_slot = dlvry_queue_slot; 2006 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 2007 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 2008 slot->task = task; 2009 slot->port = port; 2010 slot->is_internal = true; 2011 task->lldd_task = slot; 2012 2013 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 2014 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 2015 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 2016 2017 hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 2018 abort_flag, task_tag); 2019 2020 spin_lock_irqsave(&task->task_state_lock, flags); 2021 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 2022 spin_unlock_irqrestore(&task->task_state_lock, flags); 2023 WRITE_ONCE(slot->ready, 1); 2024 /* send abort command to the chip */ 2025 spin_lock_irqsave(&dq->lock, flags); 2026 hisi_hba->hw->start_delivery(dq); 2027 spin_unlock_irqrestore(&dq->lock, flags); 2028 2029 return 0; 2030 2031 err_out_tag: 2032 hisi_sas_slot_index_free(hisi_hba, slot_idx); 2033 err_out: 2034 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 2035 2036 return rc; 2037 } 2038 2039 /** 2040 * _hisi_sas_internal_task_abort -- execute an internal 2041 * abort command for single IO command or a device 2042 * @hisi_hba: host controller struct 2043 * @device: domain device 2044 * @abort_flag: mode of operation, device or single IO 2045 * @tag: tag of IO to be aborted (only relevant to single 2046 * IO mode) 2047 * @dq: delivery queue for this internal abort command 2048 */ 2049 static int 2050 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2051 struct domain_device *device, int abort_flag, 2052 int tag, struct hisi_sas_dq *dq) 2053 { 2054 struct sas_task *task; 2055 struct hisi_sas_device *sas_dev = device->lldd_dev; 2056 struct device *dev = hisi_hba->dev; 2057 int res; 2058 2059 /* 2060 * The interface is not realized means this HW don't support internal 2061 * abort, or don't need to do internal abort. Then here, we return 2062 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 2063 * the internal abort has been executed and returned CQ. 2064 */ 2065 if (!hisi_hba->hw->prep_abort) 2066 return TMF_RESP_FUNC_FAILED; 2067 2068 task = sas_alloc_slow_task(GFP_KERNEL); 2069 if (!task) 2070 return -ENOMEM; 2071 2072 task->dev = device; 2073 task->task_proto = device->tproto; 2074 task->task_done = hisi_sas_task_done; 2075 task->slow_task->timer.function = hisi_sas_tmf_timedout; 2076 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ; 2077 add_timer(&task->slow_task->timer); 2078 2079 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 2080 task, abort_flag, tag, dq); 2081 if (res) { 2082 del_timer(&task->slow_task->timer); 2083 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 2084 res); 2085 goto exit; 2086 } 2087 wait_for_completion(&task->slow_task->completion); 2088 res = TMF_RESP_FUNC_FAILED; 2089 2090 /* Internal abort timed out */ 2091 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 2092 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 2093 struct hisi_sas_slot *slot = task->lldd_task; 2094 2095 if (slot) { 2096 struct hisi_sas_cq *cq = 2097 &hisi_hba->cq[slot->dlvry_queue]; 2098 /* 2099 * flush tasklet to avoid free'ing task 2100 * before using task in IO completion 2101 */ 2102 tasklet_kill(&cq->tasklet); 2103 slot->task = NULL; 2104 } 2105 dev_err(dev, "internal task abort: timeout and not done.\n"); 2106 2107 res = -EIO; 2108 goto exit; 2109 } else 2110 dev_err(dev, "internal task abort: timeout.\n"); 2111 } 2112 2113 if (task->task_status.resp == SAS_TASK_COMPLETE && 2114 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 2115 res = TMF_RESP_FUNC_COMPLETE; 2116 goto exit; 2117 } 2118 2119 if (task->task_status.resp == SAS_TASK_COMPLETE && 2120 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 2121 res = TMF_RESP_FUNC_SUCC; 2122 goto exit; 2123 } 2124 2125 exit: 2126 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p resp: 0x%x sts 0x%x\n", 2127 SAS_ADDR(device->sas_addr), task, 2128 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 2129 task->task_status.stat); 2130 sas_free_task(task); 2131 2132 return res; 2133 } 2134 2135 static int 2136 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2137 struct domain_device *device, 2138 int abort_flag, int tag) 2139 { 2140 struct hisi_sas_slot *slot; 2141 struct device *dev = hisi_hba->dev; 2142 struct hisi_sas_dq *dq; 2143 int i, rc; 2144 2145 switch (abort_flag) { 2146 case HISI_SAS_INT_ABT_CMD: 2147 slot = &hisi_hba->slot_info[tag]; 2148 dq = &hisi_hba->dq[slot->dlvry_queue]; 2149 return _hisi_sas_internal_task_abort(hisi_hba, device, 2150 abort_flag, tag, dq); 2151 case HISI_SAS_INT_ABT_DEV: 2152 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2153 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2154 const struct cpumask *mask = cq->pci_irq_mask; 2155 2156 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 2157 continue; 2158 dq = &hisi_hba->dq[i]; 2159 rc = _hisi_sas_internal_task_abort(hisi_hba, device, 2160 abort_flag, tag, 2161 dq); 2162 if (rc) 2163 return rc; 2164 } 2165 break; 2166 default: 2167 dev_err(dev, "Unrecognised internal abort flag (%d)\n", 2168 abort_flag); 2169 return -EINVAL; 2170 } 2171 2172 return 0; 2173 } 2174 2175 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2176 { 2177 hisi_sas_port_notify_formed(sas_phy); 2178 } 2179 2180 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2181 u8 reg_index, u8 reg_count, u8 *write_data) 2182 { 2183 struct hisi_hba *hisi_hba = sha->lldd_ha; 2184 2185 if (!hisi_hba->hw->write_gpio) 2186 return -EOPNOTSUPP; 2187 2188 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2189 reg_index, reg_count, write_data); 2190 } 2191 2192 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2193 { 2194 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2195 struct sas_phy *sphy = sas_phy->phy; 2196 unsigned long flags; 2197 2198 phy->phy_attached = 0; 2199 phy->phy_type = 0; 2200 phy->port = NULL; 2201 2202 spin_lock_irqsave(&phy->lock, flags); 2203 if (phy->enable) 2204 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2205 else 2206 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2207 spin_unlock_irqrestore(&phy->lock, flags); 2208 } 2209 2210 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) 2211 { 2212 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2213 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2214 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 2215 struct device *dev = hisi_hba->dev; 2216 2217 if (rdy) { 2218 /* Phy down but ready */ 2219 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 2220 hisi_sas_port_notify_formed(sas_phy); 2221 } else { 2222 struct hisi_sas_port *port = phy->port; 2223 2224 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) || 2225 phy->in_reset) { 2226 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2227 return; 2228 } 2229 /* Phy down and not ready */ 2230 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 2231 sas_phy_disconnected(sas_phy); 2232 2233 if (port) { 2234 if (phy->phy_type & PORT_TYPE_SAS) { 2235 int port_id = port->id; 2236 2237 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2238 port_id)) 2239 port->port_attached = 0; 2240 } else if (phy->phy_type & PORT_TYPE_SATA) 2241 port->port_attached = 0; 2242 } 2243 hisi_sas_phy_disconnected(phy); 2244 } 2245 } 2246 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2247 2248 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) 2249 { 2250 int i; 2251 2252 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2253 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2254 2255 tasklet_kill(&cq->tasklet); 2256 } 2257 } 2258 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets); 2259 2260 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2261 { 2262 struct hisi_hba *hisi_hba = shost_priv(shost); 2263 2264 if (reset_type != SCSI_ADAPTER_RESET) 2265 return -EOPNOTSUPP; 2266 2267 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2268 2269 return 0; 2270 } 2271 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2272 2273 struct scsi_transport_template *hisi_sas_stt; 2274 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2275 2276 static struct sas_domain_function_template hisi_sas_transport_ops = { 2277 .lldd_dev_found = hisi_sas_dev_found, 2278 .lldd_dev_gone = hisi_sas_dev_gone, 2279 .lldd_execute_task = hisi_sas_queue_command, 2280 .lldd_control_phy = hisi_sas_control_phy, 2281 .lldd_abort_task = hisi_sas_abort_task, 2282 .lldd_abort_task_set = hisi_sas_abort_task_set, 2283 .lldd_clear_aca = hisi_sas_clear_aca, 2284 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2285 .lldd_lu_reset = hisi_sas_lu_reset, 2286 .lldd_query_task = hisi_sas_query_task, 2287 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2288 .lldd_port_formed = hisi_sas_port_formed, 2289 .lldd_write_gpio = hisi_sas_write_gpio, 2290 }; 2291 2292 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2293 { 2294 int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries; 2295 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2296 2297 for (i = 0; i < hisi_hba->queue_count; i++) { 2298 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2299 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2300 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2301 2302 s = sizeof(struct hisi_sas_cmd_hdr); 2303 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2304 memset(&cmd_hdr[j], 0, s); 2305 2306 dq->wr_point = 0; 2307 2308 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2309 memset(hisi_hba->complete_hdr[i], 0, s); 2310 cq->rd_point = 0; 2311 } 2312 2313 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2314 memset(hisi_hba->initial_fis, 0, s); 2315 2316 s = max_command_entries * sizeof(struct hisi_sas_iost); 2317 memset(hisi_hba->iost, 0, s); 2318 2319 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2320 memset(hisi_hba->breakpoint, 0, s); 2321 2322 s = sizeof(struct hisi_sas_sata_breakpoint); 2323 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2324 memset(&sata_breakpoint[j], 0, s); 2325 } 2326 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2327 2328 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2329 { 2330 struct device *dev = hisi_hba->dev; 2331 int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries; 2332 int max_command_entries_ru, sz_slot_buf_ru; 2333 int blk_cnt, slots_per_blk; 2334 2335 sema_init(&hisi_hba->sem, 1); 2336 spin_lock_init(&hisi_hba->lock); 2337 for (i = 0; i < hisi_hba->n_phy; i++) { 2338 hisi_sas_phy_init(hisi_hba, i); 2339 hisi_hba->port[i].port_attached = 0; 2340 hisi_hba->port[i].id = -1; 2341 } 2342 2343 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2344 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2345 hisi_hba->devices[i].device_id = i; 2346 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2347 } 2348 2349 for (i = 0; i < hisi_hba->queue_count; i++) { 2350 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2351 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2352 2353 /* Completion queue structure */ 2354 cq->id = i; 2355 cq->hisi_hba = hisi_hba; 2356 2357 /* Delivery queue structure */ 2358 spin_lock_init(&dq->lock); 2359 INIT_LIST_HEAD(&dq->list); 2360 dq->id = i; 2361 dq->hisi_hba = hisi_hba; 2362 2363 /* Delivery queue */ 2364 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2365 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2366 &hisi_hba->cmd_hdr_dma[i], 2367 GFP_KERNEL); 2368 if (!hisi_hba->cmd_hdr[i]) 2369 goto err_out; 2370 2371 /* Completion queue */ 2372 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2373 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2374 &hisi_hba->complete_hdr_dma[i], 2375 GFP_KERNEL); 2376 if (!hisi_hba->complete_hdr[i]) 2377 goto err_out; 2378 } 2379 2380 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2381 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2382 GFP_KERNEL | __GFP_ZERO); 2383 if (!hisi_hba->itct) 2384 goto err_out; 2385 2386 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2387 sizeof(struct hisi_sas_slot), 2388 GFP_KERNEL); 2389 if (!hisi_hba->slot_info) 2390 goto err_out; 2391 2392 /* roundup to avoid overly large block size */ 2393 max_command_entries_ru = roundup(max_command_entries, 64); 2394 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2395 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2396 else 2397 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2398 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2399 s = lcm(max_command_entries_ru, sz_slot_buf_ru); 2400 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2401 slots_per_blk = s / sz_slot_buf_ru; 2402 2403 for (i = 0; i < blk_cnt; i++) { 2404 int slot_index = i * slots_per_blk; 2405 dma_addr_t buf_dma; 2406 void *buf; 2407 2408 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2409 GFP_KERNEL | __GFP_ZERO); 2410 if (!buf) 2411 goto err_out; 2412 2413 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2414 struct hisi_sas_slot *slot; 2415 2416 slot = &hisi_hba->slot_info[slot_index]; 2417 slot->buf = buf; 2418 slot->buf_dma = buf_dma; 2419 slot->idx = slot_index; 2420 2421 buf += sz_slot_buf_ru; 2422 buf_dma += sz_slot_buf_ru; 2423 } 2424 } 2425 2426 s = max_command_entries * sizeof(struct hisi_sas_iost); 2427 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2428 GFP_KERNEL); 2429 if (!hisi_hba->iost) 2430 goto err_out; 2431 2432 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2433 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2434 &hisi_hba->breakpoint_dma, 2435 GFP_KERNEL); 2436 if (!hisi_hba->breakpoint) 2437 goto err_out; 2438 2439 hisi_hba->slot_index_count = max_command_entries; 2440 s = hisi_hba->slot_index_count / BITS_PER_BYTE; 2441 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 2442 if (!hisi_hba->slot_index_tags) 2443 goto err_out; 2444 2445 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2446 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2447 &hisi_hba->initial_fis_dma, 2448 GFP_KERNEL); 2449 if (!hisi_hba->initial_fis) 2450 goto err_out; 2451 2452 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2453 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2454 &hisi_hba->sata_breakpoint_dma, 2455 GFP_KERNEL); 2456 if (!hisi_hba->sata_breakpoint) 2457 goto err_out; 2458 hisi_sas_init_mem(hisi_hba); 2459 2460 hisi_sas_slot_index_init(hisi_hba); 2461 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries - 2462 HISI_SAS_RESERVED_IPTT_CNT; 2463 2464 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2465 if (!hisi_hba->wq) { 2466 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2467 goto err_out; 2468 } 2469 2470 return 0; 2471 err_out: 2472 return -ENOMEM; 2473 } 2474 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2475 2476 void hisi_sas_free(struct hisi_hba *hisi_hba) 2477 { 2478 if (hisi_hba->wq) 2479 destroy_workqueue(hisi_hba->wq); 2480 } 2481 EXPORT_SYMBOL_GPL(hisi_sas_free); 2482 2483 void hisi_sas_rst_work_handler(struct work_struct *work) 2484 { 2485 struct hisi_hba *hisi_hba = 2486 container_of(work, struct hisi_hba, rst_work); 2487 2488 hisi_sas_controller_reset(hisi_hba); 2489 } 2490 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2491 2492 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2493 { 2494 struct hisi_sas_rst *rst = 2495 container_of(work, struct hisi_sas_rst, work); 2496 2497 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2498 rst->done = true; 2499 complete(rst->completion); 2500 } 2501 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2502 2503 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2504 { 2505 struct device *dev = hisi_hba->dev; 2506 struct platform_device *pdev = hisi_hba->platform_dev; 2507 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2508 struct clk *refclk; 2509 2510 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2511 SAS_ADDR_SIZE)) { 2512 dev_err(dev, "could not get property sas-addr\n"); 2513 return -ENOENT; 2514 } 2515 2516 if (np) { 2517 /* 2518 * These properties are only required for platform device-based 2519 * controller with DT firmware. 2520 */ 2521 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2522 "hisilicon,sas-syscon"); 2523 if (IS_ERR(hisi_hba->ctrl)) { 2524 dev_err(dev, "could not get syscon\n"); 2525 return -ENOENT; 2526 } 2527 2528 if (device_property_read_u32(dev, "ctrl-reset-reg", 2529 &hisi_hba->ctrl_reset_reg)) { 2530 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2531 return -ENOENT; 2532 } 2533 2534 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2535 &hisi_hba->ctrl_reset_sts_reg)) { 2536 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2537 return -ENOENT; 2538 } 2539 2540 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2541 &hisi_hba->ctrl_clock_ena_reg)) { 2542 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2543 return -ENOENT; 2544 } 2545 } 2546 2547 refclk = devm_clk_get(dev, NULL); 2548 if (IS_ERR(refclk)) 2549 dev_dbg(dev, "no ref clk property\n"); 2550 else 2551 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2552 2553 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2554 dev_err(dev, "could not get property phy-count\n"); 2555 return -ENOENT; 2556 } 2557 2558 if (device_property_read_u32(dev, "queue-count", 2559 &hisi_hba->queue_count)) { 2560 dev_err(dev, "could not get property queue-count\n"); 2561 return -ENOENT; 2562 } 2563 2564 return 0; 2565 } 2566 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2567 2568 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2569 const struct hisi_sas_hw *hw) 2570 { 2571 struct resource *res; 2572 struct Scsi_Host *shost; 2573 struct hisi_hba *hisi_hba; 2574 struct device *dev = &pdev->dev; 2575 int error; 2576 2577 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2578 if (!shost) { 2579 dev_err(dev, "scsi host alloc failed\n"); 2580 return NULL; 2581 } 2582 hisi_hba = shost_priv(shost); 2583 2584 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2585 hisi_hba->hw = hw; 2586 hisi_hba->dev = dev; 2587 hisi_hba->platform_dev = pdev; 2588 hisi_hba->shost = shost; 2589 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2590 2591 timer_setup(&hisi_hba->timer, NULL, 0); 2592 2593 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2594 goto err_out; 2595 2596 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2597 if (error) 2598 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2599 2600 if (error) { 2601 dev_err(dev, "No usable DMA addressing method\n"); 2602 goto err_out; 2603 } 2604 2605 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2606 hisi_hba->regs = devm_ioremap_resource(dev, res); 2607 if (IS_ERR(hisi_hba->regs)) 2608 goto err_out; 2609 2610 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2611 if (res) { 2612 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2613 if (IS_ERR(hisi_hba->sgpio_regs)) 2614 goto err_out; 2615 } 2616 2617 if (hisi_sas_alloc(hisi_hba)) { 2618 hisi_sas_free(hisi_hba); 2619 goto err_out; 2620 } 2621 2622 return shost; 2623 err_out: 2624 scsi_host_put(shost); 2625 dev_err(dev, "shost alloc failed\n"); 2626 return NULL; 2627 } 2628 2629 int hisi_sas_probe(struct platform_device *pdev, 2630 const struct hisi_sas_hw *hw) 2631 { 2632 struct Scsi_Host *shost; 2633 struct hisi_hba *hisi_hba; 2634 struct device *dev = &pdev->dev; 2635 struct asd_sas_phy **arr_phy; 2636 struct asd_sas_port **arr_port; 2637 struct sas_ha_struct *sha; 2638 int rc, phy_nr, port_nr, i; 2639 2640 shost = hisi_sas_shost_alloc(pdev, hw); 2641 if (!shost) 2642 return -ENOMEM; 2643 2644 sha = SHOST_TO_SAS_HA(shost); 2645 hisi_hba = shost_priv(shost); 2646 platform_set_drvdata(pdev, sha); 2647 2648 phy_nr = port_nr = hisi_hba->n_phy; 2649 2650 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2651 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2652 if (!arr_phy || !arr_port) { 2653 rc = -ENOMEM; 2654 goto err_out_ha; 2655 } 2656 2657 sha->sas_phy = arr_phy; 2658 sha->sas_port = arr_port; 2659 sha->lldd_ha = hisi_hba; 2660 2661 shost->transportt = hisi_sas_stt; 2662 shost->max_id = HISI_SAS_MAX_DEVICES; 2663 shost->max_lun = ~0; 2664 shost->max_channel = 1; 2665 shost->max_cmd_len = 16; 2666 if (hisi_hba->hw->slot_index_alloc) { 2667 shost->can_queue = hisi_hba->hw->max_command_entries; 2668 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2669 } else { 2670 shost->can_queue = hisi_hba->hw->max_command_entries - 2671 HISI_SAS_RESERVED_IPTT_CNT; 2672 shost->cmd_per_lun = hisi_hba->hw->max_command_entries - 2673 HISI_SAS_RESERVED_IPTT_CNT; 2674 } 2675 2676 sha->sas_ha_name = DRV_NAME; 2677 sha->dev = hisi_hba->dev; 2678 sha->lldd_module = THIS_MODULE; 2679 sha->sas_addr = &hisi_hba->sas_addr[0]; 2680 sha->num_phys = hisi_hba->n_phy; 2681 sha->core.shost = hisi_hba->shost; 2682 2683 for (i = 0; i < hisi_hba->n_phy; i++) { 2684 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2685 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2686 } 2687 2688 rc = scsi_add_host(shost, &pdev->dev); 2689 if (rc) 2690 goto err_out_ha; 2691 2692 rc = sas_register_ha(sha); 2693 if (rc) 2694 goto err_out_register_ha; 2695 2696 rc = hisi_hba->hw->hw_init(hisi_hba); 2697 if (rc) 2698 goto err_out_register_ha; 2699 2700 scsi_scan_host(shost); 2701 2702 return 0; 2703 2704 err_out_register_ha: 2705 scsi_remove_host(shost); 2706 err_out_ha: 2707 hisi_sas_free(hisi_hba); 2708 scsi_host_put(shost); 2709 return rc; 2710 } 2711 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2712 2713 struct dentry *hisi_sas_debugfs_dir; 2714 2715 static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba) 2716 { 2717 int queue_entry_size = hisi_hba->hw->complete_hdr_size; 2718 int i; 2719 2720 for (i = 0; i < hisi_hba->queue_count; i++) 2721 memcpy(hisi_hba->debugfs_complete_hdr[i], 2722 hisi_hba->complete_hdr[i], 2723 HISI_SAS_QUEUE_SLOTS * queue_entry_size); 2724 } 2725 2726 static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba) 2727 { 2728 int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); 2729 int i; 2730 2731 for (i = 0; i < hisi_hba->queue_count; i++) { 2732 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; 2733 int j; 2734 2735 debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i]; 2736 cmd_hdr = hisi_hba->cmd_hdr[i]; 2737 2738 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2739 memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j], 2740 queue_entry_size); 2741 } 2742 } 2743 2744 static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) 2745 { 2746 const struct hisi_sas_debugfs_reg *port = 2747 hisi_hba->hw->debugfs_reg_port; 2748 int i, phy_cnt; 2749 u32 offset; 2750 u32 *databuf; 2751 2752 for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { 2753 databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt]; 2754 for (i = 0; i < port->count; i++, databuf++) { 2755 offset = port->base_off + 4 * i; 2756 *databuf = port->read_port_reg(hisi_hba, phy_cnt, 2757 offset); 2758 } 2759 } 2760 } 2761 2762 static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba) 2763 { 2764 u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg; 2765 const struct hisi_sas_debugfs_reg *global = 2766 hisi_hba->hw->debugfs_reg_global; 2767 int i; 2768 2769 for (i = 0; i < global->count; i++, databuf++) 2770 *databuf = global->read_global_reg(hisi_hba, 4 * i); 2771 } 2772 2773 static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) 2774 { 2775 void *databuf = hisi_hba->debugfs_itct; 2776 struct hisi_sas_itct *itct; 2777 int i; 2778 2779 itct = hisi_hba->itct; 2780 2781 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 2782 memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); 2783 databuf += sizeof(struct hisi_sas_itct); 2784 } 2785 } 2786 2787 static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba) 2788 { 2789 int max_command_entries = hisi_hba->hw->max_command_entries; 2790 void *databuf = hisi_hba->debugfs_iost; 2791 struct hisi_sas_iost *iost; 2792 int i; 2793 2794 iost = hisi_hba->iost; 2795 2796 for (i = 0; i < max_command_entries; i++, iost++) { 2797 memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); 2798 databuf += sizeof(struct hisi_sas_iost); 2799 } 2800 } 2801 2802 static const char * 2803 hisi_sas_debugfs_to_reg_name(int off, int base_off, 2804 const struct hisi_sas_debugfs_reg_lu *lu) 2805 { 2806 for (; lu->name; lu++) { 2807 if (off == lu->off - base_off) 2808 return lu->name; 2809 } 2810 2811 return NULL; 2812 } 2813 2814 static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr, 2815 struct seq_file *s) 2816 { 2817 const struct hisi_sas_debugfs_reg *reg = ptr; 2818 int i; 2819 2820 for (i = 0; i < reg->count; i++) { 2821 int off = i * 4; 2822 const char *name; 2823 2824 name = hisi_sas_debugfs_to_reg_name(off, reg->base_off, 2825 reg->lu); 2826 2827 if (name) 2828 seq_printf(s, "0x%08x 0x%08x %s\n", off, 2829 regs_val[i], name); 2830 else 2831 seq_printf(s, "0x%08x 0x%08x\n", off, 2832 regs_val[i]); 2833 } 2834 } 2835 2836 static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p) 2837 { 2838 struct hisi_hba *hisi_hba = s->private; 2839 const struct hisi_sas_hw *hw = hisi_hba->hw; 2840 const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global; 2841 2842 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg, 2843 reg_global, s); 2844 2845 return 0; 2846 } 2847 2848 static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp) 2849 { 2850 return single_open(filp, hisi_sas_debugfs_global_show, 2851 inode->i_private); 2852 } 2853 2854 static const struct file_operations hisi_sas_debugfs_global_fops = { 2855 .open = hisi_sas_debugfs_global_open, 2856 .read = seq_read, 2857 .llseek = seq_lseek, 2858 .release = single_release, 2859 .owner = THIS_MODULE, 2860 }; 2861 2862 static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p) 2863 { 2864 struct hisi_sas_phy *phy = s->private; 2865 struct hisi_hba *hisi_hba = phy->hisi_hba; 2866 const struct hisi_sas_hw *hw = hisi_hba->hw; 2867 const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port; 2868 u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id]; 2869 2870 hisi_sas_debugfs_print_reg(databuf, reg_port, s); 2871 2872 return 0; 2873 } 2874 2875 static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp) 2876 { 2877 return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private); 2878 } 2879 2880 static const struct file_operations hisi_sas_debugfs_port_fops = { 2881 .open = hisi_sas_debugfs_port_open, 2882 .read = seq_read, 2883 .llseek = seq_lseek, 2884 .release = single_release, 2885 .owner = THIS_MODULE, 2886 }; 2887 2888 static int hisi_sas_show_row_64(struct seq_file *s, int index, 2889 int sz, __le64 *ptr) 2890 { 2891 int i; 2892 2893 /* completion header size not fixed per HW version */ 2894 seq_printf(s, "index %04d:\n\t", index); 2895 for (i = 1; i <= sz / 8; i++, ptr++) { 2896 seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); 2897 if (!(i % 2)) 2898 seq_puts(s, "\n\t"); 2899 } 2900 2901 seq_puts(s, "\n"); 2902 2903 return 0; 2904 } 2905 2906 static int hisi_sas_show_row_32(struct seq_file *s, int index, 2907 int sz, __le32 *ptr) 2908 { 2909 int i; 2910 2911 /* completion header size not fixed per HW version */ 2912 seq_printf(s, "index %04d:\n\t", index); 2913 for (i = 1; i <= sz / 4; i++, ptr++) { 2914 seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); 2915 if (!(i % 4)) 2916 seq_puts(s, "\n\t"); 2917 } 2918 seq_puts(s, "\n"); 2919 2920 return 0; 2921 } 2922 2923 static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr) 2924 { 2925 struct hisi_sas_cq *cq = cq_ptr; 2926 struct hisi_hba *hisi_hba = cq->hisi_hba; 2927 void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id]; 2928 __le32 *complete_hdr = complete_queue + 2929 (hisi_hba->hw->complete_hdr_size * slot); 2930 2931 return hisi_sas_show_row_32(s, slot, 2932 hisi_hba->hw->complete_hdr_size, 2933 complete_hdr); 2934 } 2935 2936 static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p) 2937 { 2938 struct hisi_sas_cq *cq = s->private; 2939 int slot, ret; 2940 2941 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 2942 ret = hisi_sas_cq_show_slot(s, slot, cq); 2943 if (ret) 2944 return ret; 2945 } 2946 return 0; 2947 } 2948 2949 static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp) 2950 { 2951 return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private); 2952 } 2953 2954 static const struct file_operations hisi_sas_debugfs_cq_fops = { 2955 .open = hisi_sas_debugfs_cq_open, 2956 .read = seq_read, 2957 .llseek = seq_lseek, 2958 .release = single_release, 2959 .owner = THIS_MODULE, 2960 }; 2961 2962 static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) 2963 { 2964 struct hisi_sas_dq *dq = dq_ptr; 2965 struct hisi_hba *hisi_hba = dq->hisi_hba; 2966 void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id]; 2967 __le32 *cmd_hdr = cmd_queue + 2968 sizeof(struct hisi_sas_cmd_hdr) * slot; 2969 2970 return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), 2971 cmd_hdr); 2972 } 2973 2974 static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p) 2975 { 2976 int slot, ret; 2977 2978 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 2979 ret = hisi_sas_dq_show_slot(s, slot, s->private); 2980 if (ret) 2981 return ret; 2982 } 2983 return 0; 2984 } 2985 2986 static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp) 2987 { 2988 return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private); 2989 } 2990 2991 static const struct file_operations hisi_sas_debugfs_dq_fops = { 2992 .open = hisi_sas_debugfs_dq_open, 2993 .read = seq_read, 2994 .llseek = seq_lseek, 2995 .release = single_release, 2996 .owner = THIS_MODULE, 2997 }; 2998 2999 static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p) 3000 { 3001 struct hisi_hba *hisi_hba = s->private; 3002 struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost; 3003 int i, ret, max_command_entries = hisi_hba->hw->max_command_entries; 3004 __le64 *iost = &debugfs_iost->qw0; 3005 3006 for (i = 0; i < max_command_entries; i++, debugfs_iost++) { 3007 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), 3008 iost); 3009 if (ret) 3010 return ret; 3011 } 3012 3013 return 0; 3014 } 3015 3016 static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp) 3017 { 3018 return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private); 3019 } 3020 3021 static const struct file_operations hisi_sas_debugfs_iost_fops = { 3022 .open = hisi_sas_debugfs_iost_open, 3023 .read = seq_read, 3024 .llseek = seq_lseek, 3025 .release = single_release, 3026 .owner = THIS_MODULE, 3027 }; 3028 3029 static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p) 3030 { 3031 int i, ret; 3032 struct hisi_hba *hisi_hba = s->private; 3033 struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct; 3034 __le64 *itct = &debugfs_itct->qw0; 3035 3036 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) { 3037 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), 3038 itct); 3039 if (ret) 3040 return ret; 3041 } 3042 3043 return 0; 3044 } 3045 3046 static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp) 3047 { 3048 return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private); 3049 } 3050 3051 static const struct file_operations hisi_sas_debugfs_itct_fops = { 3052 .open = hisi_sas_debugfs_itct_open, 3053 .read = seq_read, 3054 .llseek = seq_lseek, 3055 .release = single_release, 3056 .owner = THIS_MODULE, 3057 }; 3058 3059 static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) 3060 { 3061 struct dentry *dump_dentry; 3062 struct dentry *dentry; 3063 char name[256]; 3064 int p; 3065 int c; 3066 int d; 3067 3068 /* Create dump dir inside device dir */ 3069 dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir); 3070 hisi_hba->debugfs_dump_dentry = dump_dentry; 3071 3072 debugfs_create_file("global", 0400, dump_dentry, hisi_hba, 3073 &hisi_sas_debugfs_global_fops); 3074 3075 /* Create port dir and files */ 3076 dentry = debugfs_create_dir("port", dump_dentry); 3077 for (p = 0; p < hisi_hba->n_phy; p++) { 3078 snprintf(name, 256, "%d", p); 3079 3080 debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p], 3081 &hisi_sas_debugfs_port_fops); 3082 } 3083 3084 /* Create CQ dir and files */ 3085 dentry = debugfs_create_dir("cq", dump_dentry); 3086 for (c = 0; c < hisi_hba->queue_count; c++) { 3087 snprintf(name, 256, "%d", c); 3088 3089 debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c], 3090 &hisi_sas_debugfs_cq_fops); 3091 } 3092 3093 /* Create DQ dir and files */ 3094 dentry = debugfs_create_dir("dq", dump_dentry); 3095 for (d = 0; d < hisi_hba->queue_count; d++) { 3096 snprintf(name, 256, "%d", d); 3097 3098 debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d], 3099 &hisi_sas_debugfs_dq_fops); 3100 } 3101 3102 debugfs_create_file("iost", 0400, dump_dentry, hisi_hba, 3103 &hisi_sas_debugfs_iost_fops); 3104 3105 debugfs_create_file("itct", 0400, dump_dentry, hisi_hba, 3106 &hisi_sas_debugfs_itct_fops); 3107 3108 return; 3109 } 3110 3111 static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba) 3112 { 3113 hisi_hba->hw->snapshot_prepare(hisi_hba); 3114 3115 hisi_sas_debugfs_snapshot_global_reg(hisi_hba); 3116 hisi_sas_debugfs_snapshot_port_reg(hisi_hba); 3117 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba); 3118 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba); 3119 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba); 3120 hisi_sas_debugfs_snapshot_iost_reg(hisi_hba); 3121 3122 hisi_sas_debugfs_create_files(hisi_hba); 3123 3124 hisi_hba->hw->snapshot_restore(hisi_hba); 3125 } 3126 3127 static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file, 3128 const char __user *user_buf, 3129 size_t count, loff_t *ppos) 3130 { 3131 struct hisi_hba *hisi_hba = file->f_inode->i_private; 3132 char buf[8]; 3133 3134 /* A bit racy, but don't care too much since it's only debugfs */ 3135 if (hisi_hba->debugfs_snapshot) 3136 return -EFAULT; 3137 3138 if (count > 8) 3139 return -EFAULT; 3140 3141 if (copy_from_user(buf, user_buf, count)) 3142 return -EFAULT; 3143 3144 if (buf[0] != '1') 3145 return -EFAULT; 3146 3147 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 3148 3149 return count; 3150 } 3151 3152 static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = { 3153 .write = &hisi_sas_debugfs_trigger_dump_write, 3154 .owner = THIS_MODULE, 3155 }; 3156 3157 void hisi_sas_debugfs_work_handler(struct work_struct *work) 3158 { 3159 struct hisi_hba *hisi_hba = 3160 container_of(work, struct hisi_hba, debugfs_work); 3161 3162 if (hisi_hba->debugfs_snapshot) 3163 return; 3164 hisi_hba->debugfs_snapshot = true; 3165 3166 hisi_sas_debugfs_snapshot_regs(hisi_hba); 3167 } 3168 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); 3169 3170 void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) 3171 { 3172 int max_command_entries = hisi_hba->hw->max_command_entries; 3173 struct device *dev = hisi_hba->dev; 3174 int p, i, c, d; 3175 size_t sz; 3176 3177 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), 3178 hisi_sas_debugfs_dir); 3179 debugfs_create_file("trigger_dump", 0600, 3180 hisi_hba->debugfs_dir, 3181 hisi_hba, 3182 &hisi_sas_debugfs_trigger_dump_fops); 3183 3184 /* Alloc buffer for global */ 3185 sz = hisi_hba->hw->debugfs_reg_global->count * 4; 3186 hisi_hba->debugfs_global_reg = 3187 devm_kmalloc(dev, sz, GFP_KERNEL); 3188 3189 if (!hisi_hba->debugfs_global_reg) 3190 goto fail_global; 3191 3192 /* Alloc buffer for port */ 3193 sz = hisi_hba->hw->debugfs_reg_port->count * 4; 3194 for (p = 0; p < hisi_hba->n_phy; p++) { 3195 hisi_hba->debugfs_port_reg[p] = 3196 devm_kmalloc(dev, sz, GFP_KERNEL); 3197 3198 if (!hisi_hba->debugfs_port_reg[p]) 3199 goto fail_port; 3200 } 3201 3202 /* Alloc buffer for cq */ 3203 sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 3204 for (c = 0; c < hisi_hba->queue_count; c++) { 3205 hisi_hba->debugfs_complete_hdr[c] = 3206 devm_kmalloc(dev, sz, GFP_KERNEL); 3207 3208 if (!hisi_hba->debugfs_complete_hdr[c]) 3209 goto fail_cq; 3210 } 3211 3212 /* Alloc buffer for dq */ 3213 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 3214 for (d = 0; d < hisi_hba->queue_count; d++) { 3215 hisi_hba->debugfs_cmd_hdr[d] = 3216 devm_kmalloc(dev, sz, GFP_KERNEL); 3217 3218 if (!hisi_hba->debugfs_cmd_hdr[d]) 3219 goto fail_iost_dq; 3220 } 3221 3222 /* Alloc buffer for iost */ 3223 sz = max_command_entries * sizeof(struct hisi_sas_iost); 3224 3225 hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL); 3226 if (!hisi_hba->debugfs_iost) 3227 goto fail_iost_dq; 3228 3229 /* Alloc buffer for itct */ 3230 /* New memory allocation must be locate before itct */ 3231 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 3232 3233 hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL); 3234 if (!hisi_hba->debugfs_itct) 3235 goto fail_itct; 3236 3237 return; 3238 fail_itct: 3239 devm_kfree(dev, hisi_hba->debugfs_iost); 3240 fail_iost_dq: 3241 for (i = 0; i < d; i++) 3242 devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]); 3243 fail_cq: 3244 for (i = 0; i < c; i++) 3245 devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]); 3246 fail_port: 3247 for (i = 0; i < p; i++) 3248 devm_kfree(dev, hisi_hba->debugfs_port_reg[i]); 3249 devm_kfree(dev, hisi_hba->debugfs_global_reg); 3250 fail_global: 3251 debugfs_remove_recursive(hisi_hba->debugfs_dir); 3252 dev_dbg(dev, "failed to init debugfs!\n"); 3253 } 3254 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init); 3255 3256 void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba) 3257 { 3258 debugfs_remove_recursive(hisi_hba->debugfs_dir); 3259 } 3260 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit); 3261 3262 int hisi_sas_remove(struct platform_device *pdev) 3263 { 3264 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 3265 struct hisi_hba *hisi_hba = sha->lldd_ha; 3266 struct Scsi_Host *shost = sha->core.shost; 3267 3268 if (timer_pending(&hisi_hba->timer)) 3269 del_timer(&hisi_hba->timer); 3270 3271 sas_unregister_ha(sha); 3272 sas_remove_host(sha->core.shost); 3273 3274 hisi_sas_free(hisi_hba); 3275 scsi_host_put(shost); 3276 return 0; 3277 } 3278 EXPORT_SYMBOL_GPL(hisi_sas_remove); 3279 3280 bool hisi_sas_debugfs_enable; 3281 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 3282 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 3283 MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)"); 3284 3285 static __init int hisi_sas_init(void) 3286 { 3287 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 3288 if (!hisi_sas_stt) 3289 return -ENOMEM; 3290 3291 if (hisi_sas_debugfs_enable) 3292 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 3293 3294 return 0; 3295 } 3296 3297 static __exit void hisi_sas_exit(void) 3298 { 3299 sas_release_transport(hisi_sas_stt); 3300 3301 debugfs_remove(hisi_sas_debugfs_dir); 3302 } 3303 3304 module_init(hisi_sas_init); 3305 module_exit(hisi_sas_exit); 3306 3307 MODULE_LICENSE("GPL"); 3308 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 3309 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 3310 MODULE_ALIAS("platform:" DRV_NAME); 3311