1 /* 2 * Copyright (c) 2015 Linaro Ltd. 3 * Copyright (c) 2015 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 */ 11 12 #include "hisi_sas.h" 13 #define DRV_NAME "hisi_sas" 14 15 #define DEV_IS_GONE(dev) \ 16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 17 18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 19 u8 *lun, struct hisi_sas_tmf_task *tmf); 20 static int 21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 22 struct domain_device *device, 23 int abort_flag, int tag); 24 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 26 void *funcdata); 27 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 28 struct domain_device *device); 29 static void hisi_sas_dev_gone(struct domain_device *device); 30 31 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 32 { 33 switch (fis->command) { 34 case ATA_CMD_FPDMA_WRITE: 35 case ATA_CMD_FPDMA_READ: 36 case ATA_CMD_FPDMA_RECV: 37 case ATA_CMD_FPDMA_SEND: 38 case ATA_CMD_NCQ_NON_DATA: 39 return HISI_SAS_SATA_PROTOCOL_FPDMA; 40 41 case ATA_CMD_DOWNLOAD_MICRO: 42 case ATA_CMD_ID_ATA: 43 case ATA_CMD_PMP_READ: 44 case ATA_CMD_READ_LOG_EXT: 45 case ATA_CMD_PIO_READ: 46 case ATA_CMD_PIO_READ_EXT: 47 case ATA_CMD_PMP_WRITE: 48 case ATA_CMD_WRITE_LOG_EXT: 49 case ATA_CMD_PIO_WRITE: 50 case ATA_CMD_PIO_WRITE_EXT: 51 return HISI_SAS_SATA_PROTOCOL_PIO; 52 53 case ATA_CMD_DSM: 54 case ATA_CMD_DOWNLOAD_MICRO_DMA: 55 case ATA_CMD_PMP_READ_DMA: 56 case ATA_CMD_PMP_WRITE_DMA: 57 case ATA_CMD_READ: 58 case ATA_CMD_READ_EXT: 59 case ATA_CMD_READ_LOG_DMA_EXT: 60 case ATA_CMD_READ_STREAM_DMA_EXT: 61 case ATA_CMD_TRUSTED_RCV_DMA: 62 case ATA_CMD_TRUSTED_SND_DMA: 63 case ATA_CMD_WRITE: 64 case ATA_CMD_WRITE_EXT: 65 case ATA_CMD_WRITE_FUA_EXT: 66 case ATA_CMD_WRITE_QUEUED: 67 case ATA_CMD_WRITE_LOG_DMA_EXT: 68 case ATA_CMD_WRITE_STREAM_DMA_EXT: 69 case ATA_CMD_ZAC_MGMT_IN: 70 return HISI_SAS_SATA_PROTOCOL_DMA; 71 72 case ATA_CMD_CHK_POWER: 73 case ATA_CMD_DEV_RESET: 74 case ATA_CMD_EDD: 75 case ATA_CMD_FLUSH: 76 case ATA_CMD_FLUSH_EXT: 77 case ATA_CMD_VERIFY: 78 case ATA_CMD_VERIFY_EXT: 79 case ATA_CMD_SET_FEATURES: 80 case ATA_CMD_STANDBY: 81 case ATA_CMD_STANDBYNOW1: 82 case ATA_CMD_ZAC_MGMT_OUT: 83 return HISI_SAS_SATA_PROTOCOL_NONDATA; 84 85 case ATA_CMD_SET_MAX: 86 switch (fis->features) { 87 case ATA_SET_MAX_PASSWD: 88 case ATA_SET_MAX_LOCK: 89 return HISI_SAS_SATA_PROTOCOL_PIO; 90 91 case ATA_SET_MAX_PASSWD_DMA: 92 case ATA_SET_MAX_UNLOCK_DMA: 93 return HISI_SAS_SATA_PROTOCOL_DMA; 94 95 default: 96 return HISI_SAS_SATA_PROTOCOL_NONDATA; 97 } 98 99 default: 100 { 101 if (direction == DMA_NONE) 102 return HISI_SAS_SATA_PROTOCOL_NONDATA; 103 return HISI_SAS_SATA_PROTOCOL_PIO; 104 } 105 } 106 } 107 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 108 109 void hisi_sas_sata_done(struct sas_task *task, 110 struct hisi_sas_slot *slot) 111 { 112 struct task_status_struct *ts = &task->task_status; 113 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 114 struct hisi_sas_status_buffer *status_buf = 115 hisi_sas_status_buf_addr_mem(slot); 116 u8 *iu = &status_buf->iu[0]; 117 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 118 119 resp->frame_len = sizeof(struct dev_to_host_fis); 120 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 121 122 ts->buf_valid_size = sizeof(*resp); 123 } 124 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 125 126 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) 127 { 128 struct ata_queued_cmd *qc = task->uldd_task; 129 130 if (qc) { 131 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 132 qc->tf.command == ATA_CMD_FPDMA_READ) { 133 *tag = qc->tag; 134 return 1; 135 } 136 } 137 return 0; 138 } 139 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); 140 141 /* 142 * This function assumes linkrate mask fits in 8 bits, which it 143 * does for all HW versions supported. 144 */ 145 u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) 146 { 147 u8 rate = 0; 148 int i; 149 150 max -= SAS_LINK_RATE_1_5_GBPS; 151 for (i = 0; i <= max; i++) 152 rate |= 1 << (i * 2); 153 return rate; 154 } 155 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask); 156 157 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 158 { 159 return device->port->ha->lldd_ha; 160 } 161 162 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 163 { 164 return container_of(sas_port, struct hisi_sas_port, sas_port); 165 } 166 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 167 168 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 169 { 170 int phy_no; 171 172 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 173 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 174 } 175 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 176 177 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 178 { 179 void *bitmap = hisi_hba->slot_index_tags; 180 181 clear_bit(slot_idx, bitmap); 182 } 183 184 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 185 { 186 unsigned long flags; 187 188 if (hisi_hba->hw->slot_index_alloc || (slot_idx >= 189 hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) { 190 spin_lock_irqsave(&hisi_hba->lock, flags); 191 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 192 spin_unlock_irqrestore(&hisi_hba->lock, flags); 193 } 194 } 195 196 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 197 { 198 void *bitmap = hisi_hba->slot_index_tags; 199 200 set_bit(slot_idx, bitmap); 201 } 202 203 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, 204 struct scsi_cmnd *scsi_cmnd) 205 { 206 int index; 207 void *bitmap = hisi_hba->slot_index_tags; 208 unsigned long flags; 209 210 if (scsi_cmnd) 211 return scsi_cmnd->request->tag; 212 213 spin_lock_irqsave(&hisi_hba->lock, flags); 214 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, 215 hisi_hba->last_slot_index + 1); 216 if (index >= hisi_hba->slot_index_count) { 217 index = find_next_zero_bit(bitmap, 218 hisi_hba->slot_index_count, 219 hisi_hba->hw->max_command_entries - 220 HISI_SAS_RESERVED_IPTT_CNT); 221 if (index >= hisi_hba->slot_index_count) { 222 spin_unlock_irqrestore(&hisi_hba->lock, flags); 223 return -SAS_QUEUE_FULL; 224 } 225 } 226 hisi_sas_slot_index_set(hisi_hba, index); 227 hisi_hba->last_slot_index = index; 228 spin_unlock_irqrestore(&hisi_hba->lock, flags); 229 230 return index; 231 } 232 233 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 234 { 235 int i; 236 237 for (i = 0; i < hisi_hba->slot_index_count; ++i) 238 hisi_sas_slot_index_clear(hisi_hba, i); 239 } 240 241 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 242 struct hisi_sas_slot *slot) 243 { 244 unsigned long flags; 245 int device_id = slot->device_id; 246 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; 247 248 if (task) { 249 struct device *dev = hisi_hba->dev; 250 251 if (!task->lldd_task) 252 return; 253 254 task->lldd_task = NULL; 255 256 if (!sas_protocol_ata(task->task_proto)) { 257 struct sas_ssp_task *ssp_task = &task->ssp_task; 258 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 259 260 if (slot->n_elem) 261 dma_unmap_sg(dev, task->scatter, 262 task->num_scatter, 263 task->data_dir); 264 if (slot->n_elem_dif) 265 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 266 scsi_prot_sg_count(scsi_cmnd), 267 task->data_dir); 268 } 269 } 270 271 spin_lock_irqsave(&sas_dev->lock, flags); 272 list_del_init(&slot->entry); 273 spin_unlock_irqrestore(&sas_dev->lock, flags); 274 275 memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); 276 277 hisi_sas_slot_index_free(hisi_hba, slot->idx); 278 } 279 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 280 281 static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 282 struct hisi_sas_slot *slot) 283 { 284 hisi_hba->hw->prep_smp(hisi_hba, slot); 285 } 286 287 static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 288 struct hisi_sas_slot *slot) 289 { 290 hisi_hba->hw->prep_ssp(hisi_hba, slot); 291 } 292 293 static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 294 struct hisi_sas_slot *slot) 295 { 296 hisi_hba->hw->prep_stp(hisi_hba, slot); 297 } 298 299 static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 300 struct hisi_sas_slot *slot, 301 int device_id, int abort_flag, int tag_to_abort) 302 { 303 hisi_hba->hw->prep_abort(hisi_hba, slot, 304 device_id, abort_flag, tag_to_abort); 305 } 306 307 static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, 308 struct sas_task *task, int n_elem, 309 int n_elem_req, int n_elem_resp) 310 { 311 struct device *dev = hisi_hba->dev; 312 313 if (!sas_protocol_ata(task->task_proto)) { 314 if (task->num_scatter) { 315 if (n_elem) 316 dma_unmap_sg(dev, task->scatter, 317 task->num_scatter, 318 task->data_dir); 319 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 320 if (n_elem_req) 321 dma_unmap_sg(dev, &task->smp_task.smp_req, 322 1, DMA_TO_DEVICE); 323 if (n_elem_resp) 324 dma_unmap_sg(dev, &task->smp_task.smp_resp, 325 1, DMA_FROM_DEVICE); 326 } 327 } 328 } 329 330 static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, 331 struct sas_task *task, int *n_elem, 332 int *n_elem_req, int *n_elem_resp) 333 { 334 struct device *dev = hisi_hba->dev; 335 int rc; 336 337 if (sas_protocol_ata(task->task_proto)) { 338 *n_elem = task->num_scatter; 339 } else { 340 unsigned int req_len, resp_len; 341 342 if (task->num_scatter) { 343 *n_elem = dma_map_sg(dev, task->scatter, 344 task->num_scatter, task->data_dir); 345 if (!*n_elem) { 346 rc = -ENOMEM; 347 goto prep_out; 348 } 349 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 350 *n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req, 351 1, DMA_TO_DEVICE); 352 if (!*n_elem_req) { 353 rc = -ENOMEM; 354 goto prep_out; 355 } 356 req_len = sg_dma_len(&task->smp_task.smp_req); 357 if (req_len & 0x3) { 358 rc = -EINVAL; 359 goto err_out_dma_unmap; 360 } 361 *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp, 362 1, DMA_FROM_DEVICE); 363 if (!*n_elem_resp) { 364 rc = -ENOMEM; 365 goto err_out_dma_unmap; 366 } 367 resp_len = sg_dma_len(&task->smp_task.smp_resp); 368 if (resp_len & 0x3) { 369 rc = -EINVAL; 370 goto err_out_dma_unmap; 371 } 372 } 373 } 374 375 if (*n_elem > HISI_SAS_SGE_PAGE_CNT) { 376 dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT", 377 *n_elem); 378 rc = -EINVAL; 379 goto err_out_dma_unmap; 380 } 381 return 0; 382 383 err_out_dma_unmap: 384 /* It would be better to call dma_unmap_sg() here, but it's messy */ 385 hisi_sas_dma_unmap(hisi_hba, task, *n_elem, 386 *n_elem_req, *n_elem_resp); 387 prep_out: 388 return rc; 389 } 390 391 static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, 392 struct sas_task *task, int n_elem_dif) 393 { 394 struct device *dev = hisi_hba->dev; 395 396 if (n_elem_dif) { 397 struct sas_ssp_task *ssp_task = &task->ssp_task; 398 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; 399 400 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 401 scsi_prot_sg_count(scsi_cmnd), 402 task->data_dir); 403 } 404 } 405 406 static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, 407 int *n_elem_dif, struct sas_task *task) 408 { 409 struct device *dev = hisi_hba->dev; 410 struct sas_ssp_task *ssp_task; 411 struct scsi_cmnd *scsi_cmnd; 412 int rc; 413 414 if (task->num_scatter) { 415 ssp_task = &task->ssp_task; 416 scsi_cmnd = ssp_task->cmd; 417 418 if (scsi_prot_sg_count(scsi_cmnd)) { 419 *n_elem_dif = dma_map_sg(dev, 420 scsi_prot_sglist(scsi_cmnd), 421 scsi_prot_sg_count(scsi_cmnd), 422 task->data_dir); 423 424 if (!*n_elem_dif) 425 return -ENOMEM; 426 427 if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { 428 dev_err(dev, "task prep: n_elem_dif(%d) too large\n", 429 *n_elem_dif); 430 rc = -EINVAL; 431 goto err_out_dif_dma_unmap; 432 } 433 } 434 } 435 436 return 0; 437 438 err_out_dif_dma_unmap: 439 dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), 440 scsi_prot_sg_count(scsi_cmnd), task->data_dir); 441 return rc; 442 } 443 444 static int hisi_sas_task_prep(struct sas_task *task, 445 struct hisi_sas_dq **dq_pointer, 446 bool is_tmf, struct hisi_sas_tmf_task *tmf, 447 int *pass) 448 { 449 struct domain_device *device = task->dev; 450 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 451 struct hisi_sas_device *sas_dev = device->lldd_dev; 452 struct hisi_sas_port *port; 453 struct hisi_sas_slot *slot; 454 struct hisi_sas_cmd_hdr *cmd_hdr_base; 455 struct asd_sas_port *sas_port = device->port; 456 struct device *dev = hisi_hba->dev; 457 int dlvry_queue_slot, dlvry_queue, rc, slot_idx; 458 int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0; 459 struct hisi_sas_dq *dq; 460 unsigned long flags; 461 int wr_q_index; 462 463 if (DEV_IS_GONE(sas_dev)) { 464 if (sas_dev) 465 dev_info(dev, "task prep: device %d not ready\n", 466 sas_dev->device_id); 467 else 468 dev_info(dev, "task prep: device %016llx not ready\n", 469 SAS_ADDR(device->sas_addr)); 470 471 return -ECOMM; 472 } 473 474 if (hisi_hba->reply_map) { 475 int cpu = raw_smp_processor_id(); 476 unsigned int dq_index = hisi_hba->reply_map[cpu]; 477 478 *dq_pointer = dq = &hisi_hba->dq[dq_index]; 479 } else { 480 *dq_pointer = dq = sas_dev->dq; 481 } 482 483 port = to_hisi_sas_port(sas_port); 484 if (port && !port->port_attached) { 485 dev_info(dev, "task prep: %s port%d not attach device\n", 486 (dev_is_sata(device)) ? 487 "SATA/STP" : "SAS", 488 device->port->id); 489 490 return -ECOMM; 491 } 492 493 rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, 494 &n_elem_req, &n_elem_resp); 495 if (rc < 0) 496 goto prep_out; 497 498 if (!sas_protocol_ata(task->task_proto)) { 499 rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); 500 if (rc < 0) 501 goto err_out_dma_unmap; 502 } 503 504 if (hisi_hba->hw->slot_index_alloc) 505 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); 506 else { 507 struct scsi_cmnd *scsi_cmnd = NULL; 508 509 if (task->uldd_task) { 510 struct ata_queued_cmd *qc; 511 512 if (dev_is_sata(device)) { 513 qc = task->uldd_task; 514 scsi_cmnd = qc->scsicmd; 515 } else { 516 scsi_cmnd = task->uldd_task; 517 } 518 } 519 rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd); 520 } 521 if (rc < 0) 522 goto err_out_dif_dma_unmap; 523 524 slot_idx = rc; 525 slot = &hisi_hba->slot_info[slot_idx]; 526 527 spin_lock_irqsave(&dq->lock, flags); 528 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 529 if (wr_q_index < 0) { 530 spin_unlock_irqrestore(&dq->lock, flags); 531 rc = -EAGAIN; 532 goto err_out_tag; 533 } 534 535 list_add_tail(&slot->delivery, &dq->list); 536 spin_unlock_irqrestore(&dq->lock, flags); 537 spin_lock_irqsave(&sas_dev->lock, flags); 538 list_add_tail(&slot->entry, &sas_dev->list); 539 spin_unlock_irqrestore(&sas_dev->lock, flags); 540 541 dlvry_queue = dq->id; 542 dlvry_queue_slot = wr_q_index; 543 544 slot->device_id = sas_dev->device_id; 545 slot->n_elem = n_elem; 546 slot->n_elem_dif = n_elem_dif; 547 slot->dlvry_queue = dlvry_queue; 548 slot->dlvry_queue_slot = dlvry_queue_slot; 549 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 550 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 551 slot->task = task; 552 slot->port = port; 553 slot->tmf = tmf; 554 slot->is_internal = is_tmf; 555 task->lldd_task = slot; 556 557 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 558 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 559 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 560 561 switch (task->task_proto) { 562 case SAS_PROTOCOL_SMP: 563 hisi_sas_task_prep_smp(hisi_hba, slot); 564 break; 565 case SAS_PROTOCOL_SSP: 566 hisi_sas_task_prep_ssp(hisi_hba, slot); 567 break; 568 case SAS_PROTOCOL_SATA: 569 case SAS_PROTOCOL_STP: 570 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 571 hisi_sas_task_prep_ata(hisi_hba, slot); 572 break; 573 default: 574 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 575 task->task_proto); 576 break; 577 } 578 579 spin_lock_irqsave(&task->task_state_lock, flags); 580 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 581 spin_unlock_irqrestore(&task->task_state_lock, flags); 582 583 ++(*pass); 584 WRITE_ONCE(slot->ready, 1); 585 586 return 0; 587 588 err_out_tag: 589 hisi_sas_slot_index_free(hisi_hba, slot_idx); 590 err_out_dif_dma_unmap: 591 if (!sas_protocol_ata(task->task_proto)) 592 hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); 593 err_out_dma_unmap: 594 hisi_sas_dma_unmap(hisi_hba, task, n_elem, 595 n_elem_req, n_elem_resp); 596 prep_out: 597 dev_err(dev, "task prep: failed[%d]!\n", rc); 598 return rc; 599 } 600 601 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 602 bool is_tmf, struct hisi_sas_tmf_task *tmf) 603 { 604 u32 rc; 605 u32 pass = 0; 606 unsigned long flags; 607 struct hisi_hba *hisi_hba; 608 struct device *dev; 609 struct domain_device *device = task->dev; 610 struct asd_sas_port *sas_port = device->port; 611 struct hisi_sas_dq *dq = NULL; 612 613 if (!sas_port) { 614 struct task_status_struct *ts = &task->task_status; 615 616 ts->resp = SAS_TASK_UNDELIVERED; 617 ts->stat = SAS_PHY_DOWN; 618 /* 619 * libsas will use dev->port, should 620 * not call task_done for sata 621 */ 622 if (device->dev_type != SAS_SATA_DEV) 623 task->task_done(task); 624 return -ECOMM; 625 } 626 627 hisi_hba = dev_to_hisi_hba(device); 628 dev = hisi_hba->dev; 629 630 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { 631 if (in_softirq()) 632 return -EINVAL; 633 634 down(&hisi_hba->sem); 635 up(&hisi_hba->sem); 636 } 637 638 /* protect task_prep and start_delivery sequence */ 639 rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); 640 if (rc) 641 dev_err(dev, "task exec: failed[%d]!\n", rc); 642 643 if (likely(pass)) { 644 spin_lock_irqsave(&dq->lock, flags); 645 hisi_hba->hw->start_delivery(dq); 646 spin_unlock_irqrestore(&dq->lock, flags); 647 } 648 649 return rc; 650 } 651 652 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) 653 { 654 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 655 struct asd_sas_phy *sas_phy = &phy->sas_phy; 656 struct sas_ha_struct *sas_ha; 657 658 if (!phy->phy_attached) 659 return; 660 661 sas_ha = &hisi_hba->sha; 662 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); 663 664 if (sas_phy->phy) { 665 struct sas_phy *sphy = sas_phy->phy; 666 667 sphy->negotiated_linkrate = sas_phy->linkrate; 668 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 669 sphy->maximum_linkrate_hw = 670 hisi_hba->hw->phy_get_max_linkrate(); 671 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 672 sphy->minimum_linkrate = phy->minimum_linkrate; 673 674 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 675 sphy->maximum_linkrate = phy->maximum_linkrate; 676 } 677 678 if (phy->phy_type & PORT_TYPE_SAS) { 679 struct sas_identify_frame *id; 680 681 id = (struct sas_identify_frame *)phy->frame_rcvd; 682 id->dev_type = phy->identify.device_type; 683 id->initiator_bits = SAS_PROTOCOL_ALL; 684 id->target_bits = phy->identify.target_port_protocols; 685 } else if (phy->phy_type & PORT_TYPE_SATA) { 686 /* Nothing */ 687 } 688 689 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 690 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED); 691 } 692 693 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 694 { 695 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 696 struct hisi_sas_device *sas_dev = NULL; 697 unsigned long flags; 698 int last = hisi_hba->last_dev_id; 699 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; 700 int i; 701 702 spin_lock_irqsave(&hisi_hba->lock, flags); 703 for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { 704 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 705 int queue = i % hisi_hba->queue_count; 706 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 707 708 hisi_hba->devices[i].device_id = i; 709 sas_dev = &hisi_hba->devices[i]; 710 sas_dev->dev_status = HISI_SAS_DEV_INIT; 711 sas_dev->dev_type = device->dev_type; 712 sas_dev->hisi_hba = hisi_hba; 713 sas_dev->sas_device = device; 714 sas_dev->dq = dq; 715 spin_lock_init(&sas_dev->lock); 716 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 717 break; 718 } 719 i++; 720 } 721 hisi_hba->last_dev_id = i; 722 spin_unlock_irqrestore(&hisi_hba->lock, flags); 723 724 return sas_dev; 725 } 726 727 #define HISI_SAS_SRST_ATA_DISK_CNT 3 728 static int hisi_sas_init_device(struct domain_device *device) 729 { 730 int rc = TMF_RESP_FUNC_COMPLETE; 731 struct scsi_lun lun; 732 struct hisi_sas_tmf_task tmf_task; 733 int retry = HISI_SAS_SRST_ATA_DISK_CNT; 734 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 735 struct device *dev = hisi_hba->dev; 736 struct sas_phy *local_phy; 737 738 switch (device->dev_type) { 739 case SAS_END_DEVICE: 740 int_to_scsilun(0, &lun); 741 742 tmf_task.tmf = TMF_CLEAR_TASK_SET; 743 rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, 744 &tmf_task); 745 if (rc == TMF_RESP_FUNC_COMPLETE) 746 hisi_sas_release_task(hisi_hba, device); 747 break; 748 case SAS_SATA_DEV: 749 case SAS_SATA_PM: 750 case SAS_SATA_PM_PORT: 751 case SAS_SATA_PENDING: 752 /* 753 * send HARD RESET to clear previous affiliation of 754 * STP target port 755 */ 756 local_phy = sas_get_local_phy(device); 757 if (!scsi_is_sas_phy_local(local_phy) && 758 !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 759 unsigned long deadline = ata_deadline(jiffies, 20000); 760 struct sata_device *sata_dev = &device->sata_dev; 761 struct ata_host *ata_host = sata_dev->ata_host; 762 struct ata_port_operations *ops = ata_host->ops; 763 struct ata_port *ap = sata_dev->ap; 764 struct ata_link *link; 765 unsigned int classes; 766 767 ata_for_each_link(link, ap, EDGE) 768 rc = ops->hardreset(link, &classes, 769 deadline); 770 } 771 sas_put_local_phy(local_phy); 772 if (rc) { 773 dev_warn(dev, "SATA disk hardreset fail: %d\n", rc); 774 return rc; 775 } 776 777 while (retry-- > 0) { 778 rc = hisi_sas_softreset_ata_disk(device); 779 if (!rc) 780 break; 781 } 782 break; 783 default: 784 break; 785 } 786 787 return rc; 788 } 789 790 static int hisi_sas_dev_found(struct domain_device *device) 791 { 792 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 793 struct domain_device *parent_dev = device->parent; 794 struct hisi_sas_device *sas_dev; 795 struct device *dev = hisi_hba->dev; 796 int rc; 797 798 if (hisi_hba->hw->alloc_dev) 799 sas_dev = hisi_hba->hw->alloc_dev(device); 800 else 801 sas_dev = hisi_sas_alloc_dev(device); 802 if (!sas_dev) { 803 dev_err(dev, "fail alloc dev: max support %d devices\n", 804 HISI_SAS_MAX_DEVICES); 805 return -EINVAL; 806 } 807 808 device->lldd_dev = sas_dev; 809 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 810 811 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 812 int phy_no; 813 u8 phy_num = parent_dev->ex_dev.num_phys; 814 struct ex_phy *phy; 815 816 for (phy_no = 0; phy_no < phy_num; phy_no++) { 817 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 818 if (SAS_ADDR(phy->attached_sas_addr) == 819 SAS_ADDR(device->sas_addr)) 820 break; 821 } 822 823 if (phy_no == phy_num) { 824 dev_info(dev, "dev found: no attached " 825 "dev:%016llx at ex:%016llx\n", 826 SAS_ADDR(device->sas_addr), 827 SAS_ADDR(parent_dev->sas_addr)); 828 rc = -EINVAL; 829 goto err_out; 830 } 831 } 832 833 dev_info(dev, "dev[%d:%x] found\n", 834 sas_dev->device_id, sas_dev->dev_type); 835 836 rc = hisi_sas_init_device(device); 837 if (rc) 838 goto err_out; 839 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 840 return 0; 841 842 err_out: 843 hisi_sas_dev_gone(device); 844 return rc; 845 } 846 847 int hisi_sas_slave_configure(struct scsi_device *sdev) 848 { 849 struct domain_device *dev = sdev_to_domain_dev(sdev); 850 int ret = sas_slave_configure(sdev); 851 852 if (ret) 853 return ret; 854 if (!dev_is_sata(dev)) 855 sas_change_queue_depth(sdev, 64); 856 857 return 0; 858 } 859 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); 860 861 void hisi_sas_scan_start(struct Scsi_Host *shost) 862 { 863 struct hisi_hba *hisi_hba = shost_priv(shost); 864 865 hisi_hba->hw->phys_init(hisi_hba); 866 } 867 EXPORT_SYMBOL_GPL(hisi_sas_scan_start); 868 869 int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 870 { 871 struct hisi_hba *hisi_hba = shost_priv(shost); 872 struct sas_ha_struct *sha = &hisi_hba->sha; 873 874 /* Wait for PHY up interrupt to occur */ 875 if (time < HZ) 876 return 0; 877 878 sas_drain_work(sha); 879 return 1; 880 } 881 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished); 882 883 static void hisi_sas_phyup_work(struct work_struct *work) 884 { 885 struct hisi_sas_phy *phy = 886 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); 887 struct hisi_hba *hisi_hba = phy->hisi_hba; 888 struct asd_sas_phy *sas_phy = &phy->sas_phy; 889 int phy_no = sas_phy->id; 890 891 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) 892 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); 893 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 894 } 895 896 static void hisi_sas_linkreset_work(struct work_struct *work) 897 { 898 struct hisi_sas_phy *phy = 899 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 900 struct asd_sas_phy *sas_phy = &phy->sas_phy; 901 902 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 903 } 904 905 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 906 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 907 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 908 }; 909 910 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 911 enum hisi_sas_phy_event event) 912 { 913 struct hisi_hba *hisi_hba = phy->hisi_hba; 914 915 if (WARN_ON(event >= HISI_PHYES_NUM)) 916 return false; 917 918 return queue_work(hisi_hba->wq, &phy->works[event]); 919 } 920 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 921 922 static void hisi_sas_wait_phyup_timedout(struct timer_list *t) 923 { 924 struct hisi_sas_phy *phy = from_timer(phy, t, timer); 925 struct hisi_hba *hisi_hba = phy->hisi_hba; 926 struct device *dev = hisi_hba->dev; 927 int phy_no = phy->sas_phy.id; 928 929 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); 930 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 931 } 932 933 void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) 934 { 935 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 936 struct device *dev = hisi_hba->dev; 937 938 if (!timer_pending(&phy->timer)) { 939 dev_dbg(dev, "phy%d OOB ready\n", phy_no); 940 phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ; 941 add_timer(&phy->timer); 942 } 943 } 944 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); 945 946 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 947 { 948 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 949 struct asd_sas_phy *sas_phy = &phy->sas_phy; 950 int i; 951 952 phy->hisi_hba = hisi_hba; 953 phy->port = NULL; 954 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; 955 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); 956 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 957 sas_phy->class = SAS; 958 sas_phy->iproto = SAS_PROTOCOL_ALL; 959 sas_phy->tproto = 0; 960 sas_phy->type = PHY_TYPE_PHYSICAL; 961 sas_phy->role = PHY_ROLE_INITIATOR; 962 sas_phy->oob_mode = OOB_NOT_CONNECTED; 963 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 964 sas_phy->id = phy_no; 965 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 966 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 967 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 968 sas_phy->lldd_phy = phy; 969 970 for (i = 0; i < HISI_PHYES_NUM; i++) 971 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 972 973 spin_lock_init(&phy->lock); 974 975 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); 976 } 977 978 /* Wrapper to ensure we track hisi_sas_phy.enable properly */ 979 void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable) 980 { 981 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 982 struct asd_sas_phy *aphy = &phy->sas_phy; 983 struct sas_phy *sphy = aphy->phy; 984 unsigned long flags; 985 986 spin_lock_irqsave(&phy->lock, flags); 987 988 if (enable) { 989 /* We may have been enabled already; if so, don't touch */ 990 if (!phy->enable) 991 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 992 hisi_hba->hw->phy_start(hisi_hba, phy_no); 993 } else { 994 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 995 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 996 } 997 phy->enable = enable; 998 spin_unlock_irqrestore(&phy->lock, flags); 999 } 1000 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable); 1001 1002 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 1003 { 1004 struct sas_ha_struct *sas_ha = sas_phy->ha; 1005 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1006 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 1007 struct asd_sas_port *sas_port = sas_phy->port; 1008 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 1009 unsigned long flags; 1010 1011 if (!sas_port) 1012 return; 1013 1014 spin_lock_irqsave(&hisi_hba->lock, flags); 1015 port->port_attached = 1; 1016 port->id = phy->port_id; 1017 phy->port = port; 1018 sas_port->lldd_port = port; 1019 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1020 } 1021 1022 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 1023 struct hisi_sas_slot *slot) 1024 { 1025 if (task) { 1026 unsigned long flags; 1027 struct task_status_struct *ts; 1028 1029 ts = &task->task_status; 1030 1031 ts->resp = SAS_TASK_COMPLETE; 1032 ts->stat = SAS_ABORTED_TASK; 1033 spin_lock_irqsave(&task->task_state_lock, flags); 1034 task->task_state_flags &= 1035 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1036 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) 1037 task->task_state_flags |= SAS_TASK_STATE_DONE; 1038 spin_unlock_irqrestore(&task->task_state_lock, flags); 1039 } 1040 1041 hisi_sas_slot_task_free(hisi_hba, task, slot); 1042 } 1043 1044 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 1045 struct domain_device *device) 1046 { 1047 struct hisi_sas_slot *slot, *slot2; 1048 struct hisi_sas_device *sas_dev = device->lldd_dev; 1049 1050 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 1051 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 1052 } 1053 1054 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 1055 { 1056 struct hisi_sas_device *sas_dev; 1057 struct domain_device *device; 1058 int i; 1059 1060 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1061 sas_dev = &hisi_hba->devices[i]; 1062 device = sas_dev->sas_device; 1063 1064 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 1065 !device) 1066 continue; 1067 1068 hisi_sas_release_task(hisi_hba, device); 1069 } 1070 } 1071 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 1072 1073 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 1074 struct domain_device *device) 1075 { 1076 if (hisi_hba->hw->dereg_device) 1077 hisi_hba->hw->dereg_device(hisi_hba, device); 1078 } 1079 1080 static void hisi_sas_dev_gone(struct domain_device *device) 1081 { 1082 struct hisi_sas_device *sas_dev = device->lldd_dev; 1083 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1084 struct device *dev = hisi_hba->dev; 1085 1086 dev_info(dev, "dev[%d:%x] is gone\n", 1087 sas_dev->device_id, sas_dev->dev_type); 1088 1089 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 1090 hisi_sas_internal_task_abort(hisi_hba, device, 1091 HISI_SAS_INT_ABT_DEV, 0); 1092 1093 hisi_sas_dereg_device(hisi_hba, device); 1094 1095 down(&hisi_hba->sem); 1096 hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 1097 up(&hisi_hba->sem); 1098 device->lldd_dev = NULL; 1099 } 1100 1101 if (hisi_hba->hw->free_device) 1102 hisi_hba->hw->free_device(sas_dev); 1103 sas_dev->dev_type = SAS_PHY_UNUSED; 1104 } 1105 1106 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 1107 { 1108 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 1109 } 1110 1111 static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, 1112 struct sas_phy_linkrates *r) 1113 { 1114 struct sas_phy_linkrates _r; 1115 1116 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1117 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1118 enum sas_linkrate min, max; 1119 1120 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) 1121 return -EINVAL; 1122 1123 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1124 max = sas_phy->phy->maximum_linkrate; 1125 min = r->minimum_linkrate; 1126 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { 1127 max = r->maximum_linkrate; 1128 min = sas_phy->phy->minimum_linkrate; 1129 } else 1130 return -EINVAL; 1131 1132 _r.maximum_linkrate = max; 1133 _r.minimum_linkrate = min; 1134 1135 sas_phy->phy->maximum_linkrate = max; 1136 sas_phy->phy->minimum_linkrate = min; 1137 1138 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1139 msleep(100); 1140 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); 1141 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1142 1143 return 0; 1144 } 1145 1146 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 1147 void *funcdata) 1148 { 1149 struct sas_ha_struct *sas_ha = sas_phy->ha; 1150 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1151 int phy_no = sas_phy->id; 1152 1153 switch (func) { 1154 case PHY_FUNC_HARD_RESET: 1155 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 1156 break; 1157 1158 case PHY_FUNC_LINK_RESET: 1159 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1160 msleep(100); 1161 hisi_sas_phy_enable(hisi_hba, phy_no, 1); 1162 break; 1163 1164 case PHY_FUNC_DISABLE: 1165 hisi_sas_phy_enable(hisi_hba, phy_no, 0); 1166 break; 1167 1168 case PHY_FUNC_SET_LINK_RATE: 1169 return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); 1170 case PHY_FUNC_GET_EVENTS: 1171 if (hisi_hba->hw->get_events) { 1172 hisi_hba->hw->get_events(hisi_hba, phy_no); 1173 break; 1174 } 1175 /* fallthru */ 1176 case PHY_FUNC_RELEASE_SPINUP_HOLD: 1177 default: 1178 return -EOPNOTSUPP; 1179 } 1180 return 0; 1181 } 1182 1183 static void hisi_sas_task_done(struct sas_task *task) 1184 { 1185 del_timer(&task->slow_task->timer); 1186 complete(&task->slow_task->completion); 1187 } 1188 1189 static void hisi_sas_tmf_timedout(struct timer_list *t) 1190 { 1191 struct sas_task_slow *slow = from_timer(slow, t, timer); 1192 struct sas_task *task = slow->task; 1193 unsigned long flags; 1194 bool is_completed = true; 1195 1196 spin_lock_irqsave(&task->task_state_lock, flags); 1197 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1198 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1199 is_completed = false; 1200 } 1201 spin_unlock_irqrestore(&task->task_state_lock, flags); 1202 1203 if (!is_completed) 1204 complete(&task->slow_task->completion); 1205 } 1206 1207 #define TASK_TIMEOUT 20 1208 #define TASK_RETRY 3 1209 #define INTERNAL_ABORT_TIMEOUT 6 1210 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 1211 void *parameter, u32 para_len, 1212 struct hisi_sas_tmf_task *tmf) 1213 { 1214 struct hisi_sas_device *sas_dev = device->lldd_dev; 1215 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 1216 struct device *dev = hisi_hba->dev; 1217 struct sas_task *task; 1218 int res, retry; 1219 1220 for (retry = 0; retry < TASK_RETRY; retry++) { 1221 task = sas_alloc_slow_task(GFP_KERNEL); 1222 if (!task) 1223 return -ENOMEM; 1224 1225 task->dev = device; 1226 task->task_proto = device->tproto; 1227 1228 if (dev_is_sata(device)) { 1229 task->ata_task.device_control_reg_update = 1; 1230 memcpy(&task->ata_task.fis, parameter, para_len); 1231 } else { 1232 memcpy(&task->ssp_task, parameter, para_len); 1233 } 1234 task->task_done = hisi_sas_task_done; 1235 1236 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1237 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ; 1238 add_timer(&task->slow_task->timer); 1239 1240 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 1241 1242 if (res) { 1243 del_timer(&task->slow_task->timer); 1244 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 1245 res); 1246 goto ex_err; 1247 } 1248 1249 wait_for_completion(&task->slow_task->completion); 1250 res = TMF_RESP_FUNC_FAILED; 1251 /* Even TMF timed out, return direct. */ 1252 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1253 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1254 struct hisi_sas_slot *slot = task->lldd_task; 1255 1256 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 1257 if (slot) { 1258 struct hisi_sas_cq *cq = 1259 &hisi_hba->cq[slot->dlvry_queue]; 1260 /* 1261 * flush tasklet to avoid free'ing task 1262 * before using task in IO completion 1263 */ 1264 tasklet_kill(&cq->tasklet); 1265 slot->task = NULL; 1266 } 1267 1268 goto ex_err; 1269 } else 1270 dev_err(dev, "abort tmf: TMF task timeout\n"); 1271 } 1272 1273 if (task->task_status.resp == SAS_TASK_COMPLETE && 1274 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1275 res = TMF_RESP_FUNC_COMPLETE; 1276 break; 1277 } 1278 1279 if (task->task_status.resp == SAS_TASK_COMPLETE && 1280 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1281 res = TMF_RESP_FUNC_SUCC; 1282 break; 1283 } 1284 1285 if (task->task_status.resp == SAS_TASK_COMPLETE && 1286 task->task_status.stat == SAS_DATA_UNDERRUN) { 1287 /* no error, but return the number of bytes of 1288 * underrun 1289 */ 1290 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n", 1291 SAS_ADDR(device->sas_addr), 1292 task->task_status.resp, 1293 task->task_status.stat); 1294 res = task->task_status.residual; 1295 break; 1296 } 1297 1298 if (task->task_status.resp == SAS_TASK_COMPLETE && 1299 task->task_status.stat == SAS_DATA_OVERRUN) { 1300 dev_warn(dev, "abort tmf: blocked task error\n"); 1301 res = -EMSGSIZE; 1302 break; 1303 } 1304 1305 if (task->task_status.resp == SAS_TASK_COMPLETE && 1306 task->task_status.stat == SAS_OPEN_REJECT) { 1307 dev_warn(dev, "abort tmf: open reject failed\n"); 1308 res = -EIO; 1309 } else { 1310 dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n", 1311 SAS_ADDR(device->sas_addr), 1312 task->task_status.resp, 1313 task->task_status.stat); 1314 } 1315 sas_free_task(task); 1316 task = NULL; 1317 } 1318 ex_err: 1319 if (retry == TASK_RETRY) 1320 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 1321 sas_free_task(task); 1322 return res; 1323 } 1324 1325 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 1326 bool reset, int pmp, u8 *fis) 1327 { 1328 struct ata_taskfile tf; 1329 1330 ata_tf_init(dev, &tf); 1331 if (reset) 1332 tf.ctl |= ATA_SRST; 1333 else 1334 tf.ctl &= ~ATA_SRST; 1335 tf.command = ATA_CMD_DEV_RESET; 1336 ata_tf_to_fis(&tf, pmp, 0, fis); 1337 } 1338 1339 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 1340 { 1341 u8 fis[20] = {0}; 1342 struct ata_port *ap = device->sata_dev.ap; 1343 struct ata_link *link; 1344 int rc = TMF_RESP_FUNC_FAILED; 1345 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1346 struct device *dev = hisi_hba->dev; 1347 int s = sizeof(struct host_to_dev_fis); 1348 1349 ata_for_each_link(link, ap, EDGE) { 1350 int pmp = sata_srst_pmp(link); 1351 1352 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1353 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 1354 if (rc != TMF_RESP_FUNC_COMPLETE) 1355 break; 1356 } 1357 1358 if (rc == TMF_RESP_FUNC_COMPLETE) { 1359 ata_for_each_link(link, ap, EDGE) { 1360 int pmp = sata_srst_pmp(link); 1361 1362 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1363 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1364 s, NULL); 1365 if (rc != TMF_RESP_FUNC_COMPLETE) 1366 dev_err(dev, "ata disk de-reset failed\n"); 1367 } 1368 } else { 1369 dev_err(dev, "ata disk reset failed\n"); 1370 } 1371 1372 if (rc == TMF_RESP_FUNC_COMPLETE) 1373 hisi_sas_release_task(hisi_hba, device); 1374 1375 return rc; 1376 } 1377 1378 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1379 u8 *lun, struct hisi_sas_tmf_task *tmf) 1380 { 1381 struct sas_ssp_task ssp_task; 1382 1383 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1384 return TMF_RESP_FUNC_ESUPP; 1385 1386 memcpy(ssp_task.LUN, lun, 8); 1387 1388 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1389 sizeof(ssp_task), tmf); 1390 } 1391 1392 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1393 { 1394 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1395 int i; 1396 1397 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1398 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1399 struct domain_device *device = sas_dev->sas_device; 1400 struct asd_sas_port *sas_port; 1401 struct hisi_sas_port *port; 1402 struct hisi_sas_phy *phy = NULL; 1403 struct asd_sas_phy *sas_phy; 1404 1405 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1406 || !device || !device->port) 1407 continue; 1408 1409 sas_port = device->port; 1410 port = to_hisi_sas_port(sas_port); 1411 1412 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1413 if (state & BIT(sas_phy->id)) { 1414 phy = sas_phy->lldd_phy; 1415 break; 1416 } 1417 1418 if (phy) { 1419 port->id = phy->port_id; 1420 1421 /* Update linkrate of directly attached device. */ 1422 if (!device->parent) 1423 device->linkrate = phy->sas_phy.linkrate; 1424 1425 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1426 } else 1427 port->id = 0xff; 1428 } 1429 } 1430 1431 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, 1432 u32 state) 1433 { 1434 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1435 struct asd_sas_port *_sas_port = NULL; 1436 int phy_no; 1437 1438 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1439 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1440 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1441 struct asd_sas_port *sas_port = sas_phy->port; 1442 bool do_port_check = !!(_sas_port != sas_port); 1443 1444 if (!sas_phy->phy->enabled) 1445 continue; 1446 1447 /* Report PHY state change to libsas */ 1448 if (state & BIT(phy_no)) { 1449 if (do_port_check && sas_port && sas_port->port_dev) { 1450 struct domain_device *dev = sas_port->port_dev; 1451 1452 _sas_port = sas_port; 1453 1454 if (DEV_IS_EXPANDER(dev->dev_type)) 1455 sas_ha->notify_port_event(sas_phy, 1456 PORTE_BROADCAST_RCVD); 1457 } 1458 } else { 1459 hisi_sas_phy_down(hisi_hba, phy_no, 0); 1460 } 1461 1462 } 1463 } 1464 1465 static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) 1466 { 1467 struct hisi_sas_device *sas_dev; 1468 struct domain_device *device; 1469 int i; 1470 1471 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1472 sas_dev = &hisi_hba->devices[i]; 1473 device = sas_dev->sas_device; 1474 1475 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1476 continue; 1477 1478 hisi_sas_init_device(device); 1479 } 1480 } 1481 1482 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba, 1483 struct asd_sas_port *sas_port, 1484 struct domain_device *device) 1485 { 1486 struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 }; 1487 struct ata_port *ap = device->sata_dev.ap; 1488 struct device *dev = hisi_hba->dev; 1489 int s = sizeof(struct host_to_dev_fis); 1490 int rc = TMF_RESP_FUNC_FAILED; 1491 struct asd_sas_phy *sas_phy; 1492 struct ata_link *link; 1493 u8 fis[20] = {0}; 1494 u32 state; 1495 1496 state = hisi_hba->hw->get_phys_state(hisi_hba); 1497 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) { 1498 if (!(state & BIT(sas_phy->id))) 1499 continue; 1500 1501 ata_for_each_link(link, ap, EDGE) { 1502 int pmp = sata_srst_pmp(link); 1503 1504 tmf_task.phy_id = sas_phy->id; 1505 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1506 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, 1507 &tmf_task); 1508 if (rc != TMF_RESP_FUNC_COMPLETE) { 1509 dev_err(dev, "phy%d ata reset failed rc=%d\n", 1510 sas_phy->id, rc); 1511 break; 1512 } 1513 } 1514 } 1515 } 1516 1517 static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) 1518 { 1519 struct device *dev = hisi_hba->dev; 1520 int port_no, rc, i; 1521 1522 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1523 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1524 struct domain_device *device = sas_dev->sas_device; 1525 1526 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) 1527 continue; 1528 1529 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1530 HISI_SAS_INT_ABT_DEV, 0); 1531 if (rc < 0) 1532 dev_err(dev, "STP reject: abort dev failed %d\n", rc); 1533 } 1534 1535 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { 1536 struct hisi_sas_port *port = &hisi_hba->port[port_no]; 1537 struct asd_sas_port *sas_port = &port->sas_port; 1538 struct domain_device *port_dev = sas_port->port_dev; 1539 struct domain_device *device; 1540 1541 if (!port_dev || !DEV_IS_EXPANDER(port_dev->dev_type)) 1542 continue; 1543 1544 /* Try to find a SATA device */ 1545 list_for_each_entry(device, &sas_port->dev_list, 1546 dev_list_node) { 1547 if (dev_is_sata(device)) { 1548 hisi_sas_send_ata_reset_each_phy(hisi_hba, 1549 sas_port, 1550 device); 1551 break; 1552 } 1553 } 1554 } 1555 } 1556 1557 void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) 1558 { 1559 struct Scsi_Host *shost = hisi_hba->shost; 1560 1561 down(&hisi_hba->sem); 1562 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); 1563 1564 scsi_block_requests(shost); 1565 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); 1566 1567 if (timer_pending(&hisi_hba->timer)) 1568 del_timer_sync(&hisi_hba->timer); 1569 1570 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1571 } 1572 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); 1573 1574 void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) 1575 { 1576 struct Scsi_Host *shost = hisi_hba->shost; 1577 u32 state; 1578 1579 /* Init and wait for PHYs to come up and all libsas event finished. */ 1580 hisi_hba->hw->phys_init(hisi_hba); 1581 msleep(1000); 1582 hisi_sas_refresh_port_id(hisi_hba); 1583 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1584 up(&hisi_hba->sem); 1585 1586 if (hisi_hba->reject_stp_links_msk) 1587 hisi_sas_terminate_stp_reject(hisi_hba); 1588 hisi_sas_reset_init_all_devices(hisi_hba); 1589 scsi_unblock_requests(shost); 1590 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1591 1592 state = hisi_hba->hw->get_phys_state(hisi_hba); 1593 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state); 1594 } 1595 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); 1596 1597 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1598 { 1599 struct device *dev = hisi_hba->dev; 1600 struct Scsi_Host *shost = hisi_hba->shost; 1601 int rc; 1602 1603 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct) 1604 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 1605 1606 if (!hisi_hba->hw->soft_reset) 1607 return -1; 1608 1609 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1610 return -1; 1611 1612 dev_info(dev, "controller resetting...\n"); 1613 hisi_sas_controller_reset_prepare(hisi_hba); 1614 1615 rc = hisi_hba->hw->soft_reset(hisi_hba); 1616 if (rc) { 1617 dev_warn(dev, "controller reset failed (%d)\n", rc); 1618 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1619 up(&hisi_hba->sem); 1620 scsi_unblock_requests(shost); 1621 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1622 return rc; 1623 } 1624 1625 hisi_sas_controller_reset_done(hisi_hba); 1626 dev_info(dev, "controller reset complete\n"); 1627 1628 return 0; 1629 } 1630 1631 static int hisi_sas_abort_task(struct sas_task *task) 1632 { 1633 struct scsi_lun lun; 1634 struct hisi_sas_tmf_task tmf_task; 1635 struct domain_device *device = task->dev; 1636 struct hisi_sas_device *sas_dev = device->lldd_dev; 1637 struct hisi_hba *hisi_hba; 1638 struct device *dev; 1639 int rc = TMF_RESP_FUNC_FAILED; 1640 unsigned long flags; 1641 1642 if (!sas_dev) 1643 return TMF_RESP_FUNC_FAILED; 1644 1645 hisi_hba = dev_to_hisi_hba(task->dev); 1646 dev = hisi_hba->dev; 1647 1648 spin_lock_irqsave(&task->task_state_lock, flags); 1649 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1650 struct hisi_sas_slot *slot = task->lldd_task; 1651 struct hisi_sas_cq *cq; 1652 1653 if (slot) { 1654 /* 1655 * flush tasklet to avoid free'ing task 1656 * before using task in IO completion 1657 */ 1658 cq = &hisi_hba->cq[slot->dlvry_queue]; 1659 tasklet_kill(&cq->tasklet); 1660 } 1661 spin_unlock_irqrestore(&task->task_state_lock, flags); 1662 rc = TMF_RESP_FUNC_COMPLETE; 1663 goto out; 1664 } 1665 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1666 spin_unlock_irqrestore(&task->task_state_lock, flags); 1667 1668 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1669 struct scsi_cmnd *cmnd = task->uldd_task; 1670 struct hisi_sas_slot *slot = task->lldd_task; 1671 u16 tag = slot->idx; 1672 int rc2; 1673 1674 int_to_scsilun(cmnd->device->lun, &lun); 1675 tmf_task.tmf = TMF_ABORT_TASK; 1676 tmf_task.tag_of_task_to_be_managed = tag; 1677 1678 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1679 &tmf_task); 1680 1681 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1682 HISI_SAS_INT_ABT_CMD, tag); 1683 if (rc2 < 0) { 1684 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1685 return TMF_RESP_FUNC_FAILED; 1686 } 1687 1688 /* 1689 * If the TMF finds that the IO is not in the device and also 1690 * the internal abort does not succeed, then it is safe to 1691 * free the slot. 1692 * Note: if the internal abort succeeds then the slot 1693 * will have already been completed 1694 */ 1695 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1696 if (task->lldd_task) 1697 hisi_sas_do_release_task(hisi_hba, task, slot); 1698 } 1699 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1700 task->task_proto & SAS_PROTOCOL_STP) { 1701 if (task->dev->dev_type == SAS_SATA_DEV) { 1702 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1703 HISI_SAS_INT_ABT_DEV, 1704 0); 1705 if (rc < 0) { 1706 dev_err(dev, "abort task: internal abort failed\n"); 1707 goto out; 1708 } 1709 hisi_sas_dereg_device(hisi_hba, device); 1710 rc = hisi_sas_softreset_ata_disk(device); 1711 } 1712 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1713 /* SMP */ 1714 struct hisi_sas_slot *slot = task->lldd_task; 1715 u32 tag = slot->idx; 1716 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; 1717 1718 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1719 HISI_SAS_INT_ABT_CMD, tag); 1720 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1721 task->lldd_task) { 1722 /* 1723 * flush tasklet to avoid free'ing task 1724 * before using task in IO completion 1725 */ 1726 tasklet_kill(&cq->tasklet); 1727 slot->task = NULL; 1728 } 1729 } 1730 1731 out: 1732 if (rc != TMF_RESP_FUNC_COMPLETE) 1733 dev_notice(dev, "abort task: rc=%d\n", rc); 1734 return rc; 1735 } 1736 1737 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1738 { 1739 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1740 struct device *dev = hisi_hba->dev; 1741 struct hisi_sas_tmf_task tmf_task; 1742 int rc; 1743 1744 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1745 HISI_SAS_INT_ABT_DEV, 0); 1746 if (rc < 0) { 1747 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1748 return TMF_RESP_FUNC_FAILED; 1749 } 1750 hisi_sas_dereg_device(hisi_hba, device); 1751 1752 tmf_task.tmf = TMF_ABORT_TASK_SET; 1753 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1754 1755 if (rc == TMF_RESP_FUNC_COMPLETE) 1756 hisi_sas_release_task(hisi_hba, device); 1757 1758 return rc; 1759 } 1760 1761 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1762 { 1763 struct hisi_sas_tmf_task tmf_task; 1764 int rc; 1765 1766 tmf_task.tmf = TMF_CLEAR_ACA; 1767 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1768 1769 return rc; 1770 } 1771 1772 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1773 { 1774 struct sas_phy *local_phy = sas_get_local_phy(device); 1775 struct hisi_sas_device *sas_dev = device->lldd_dev; 1776 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1777 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1778 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; 1779 struct hisi_sas_phy *phy = container_of(sas_phy, 1780 struct hisi_sas_phy, sas_phy); 1781 DECLARE_COMPLETION_ONSTACK(phyreset); 1782 int rc, reset_type; 1783 1784 if (scsi_is_sas_phy_local(local_phy)) { 1785 phy->in_reset = 1; 1786 phy->reset_completion = &phyreset; 1787 } 1788 1789 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || 1790 !dev_is_sata(device)) ? 1 : 0; 1791 1792 rc = sas_phy_reset(local_phy, reset_type); 1793 sas_put_local_phy(local_phy); 1794 1795 if (scsi_is_sas_phy_local(local_phy)) { 1796 int ret = wait_for_completion_timeout(&phyreset, 2 * HZ); 1797 unsigned long flags; 1798 1799 spin_lock_irqsave(&phy->lock, flags); 1800 phy->reset_completion = NULL; 1801 phy->in_reset = 0; 1802 spin_unlock_irqrestore(&phy->lock, flags); 1803 1804 /* report PHY down if timed out */ 1805 if (!ret) 1806 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0); 1807 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { 1808 /* 1809 * If in init state, we rely on caller to wait for link to be 1810 * ready; otherwise, delay. 1811 */ 1812 msleep(2000); 1813 } 1814 1815 return rc; 1816 } 1817 1818 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1819 { 1820 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1821 struct device *dev = hisi_hba->dev; 1822 int rc; 1823 1824 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1825 HISI_SAS_INT_ABT_DEV, 0); 1826 if (rc < 0) { 1827 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1828 return TMF_RESP_FUNC_FAILED; 1829 } 1830 hisi_sas_dereg_device(hisi_hba, device); 1831 1832 if (dev_is_sata(device)) { 1833 rc = hisi_sas_softreset_ata_disk(device); 1834 if (rc == TMF_RESP_FUNC_FAILED) 1835 return TMF_RESP_FUNC_FAILED; 1836 } 1837 1838 rc = hisi_sas_debug_I_T_nexus_reset(device); 1839 1840 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1841 hisi_sas_release_task(hisi_hba, device); 1842 1843 return rc; 1844 } 1845 1846 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1847 { 1848 struct hisi_sas_device *sas_dev = device->lldd_dev; 1849 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1850 struct device *dev = hisi_hba->dev; 1851 int rc = TMF_RESP_FUNC_FAILED; 1852 1853 if (dev_is_sata(device)) { 1854 struct sas_phy *phy; 1855 1856 /* Clear internal IO and then hardreset */ 1857 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1858 HISI_SAS_INT_ABT_DEV, 0); 1859 if (rc < 0) { 1860 dev_err(dev, "lu_reset: internal abort failed\n"); 1861 goto out; 1862 } 1863 hisi_sas_dereg_device(hisi_hba, device); 1864 1865 phy = sas_get_local_phy(device); 1866 1867 rc = sas_phy_reset(phy, 1); 1868 1869 if (rc == 0) 1870 hisi_sas_release_task(hisi_hba, device); 1871 sas_put_local_phy(phy); 1872 } else { 1873 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1874 1875 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1876 HISI_SAS_INT_ABT_DEV, 0); 1877 if (rc < 0) { 1878 dev_err(dev, "lu_reset: internal abort failed\n"); 1879 goto out; 1880 } 1881 hisi_sas_dereg_device(hisi_hba, device); 1882 1883 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1884 if (rc == TMF_RESP_FUNC_COMPLETE) 1885 hisi_sas_release_task(hisi_hba, device); 1886 } 1887 out: 1888 if (rc != TMF_RESP_FUNC_COMPLETE) 1889 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1890 sas_dev->device_id, rc); 1891 return rc; 1892 } 1893 1894 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1895 { 1896 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1897 struct device *dev = hisi_hba->dev; 1898 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1899 int rc, i; 1900 1901 queue_work(hisi_hba->wq, &r.work); 1902 wait_for_completion(r.completion); 1903 if (!r.done) 1904 return TMF_RESP_FUNC_FAILED; 1905 1906 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1907 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1908 struct domain_device *device = sas_dev->sas_device; 1909 1910 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || 1911 DEV_IS_EXPANDER(device->dev_type)) 1912 continue; 1913 1914 rc = hisi_sas_debug_I_T_nexus_reset(device); 1915 if (rc != TMF_RESP_FUNC_COMPLETE) 1916 dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n", 1917 sas_dev->device_id, rc); 1918 } 1919 1920 hisi_sas_release_tasks(hisi_hba); 1921 1922 return TMF_RESP_FUNC_COMPLETE; 1923 } 1924 1925 static int hisi_sas_query_task(struct sas_task *task) 1926 { 1927 struct scsi_lun lun; 1928 struct hisi_sas_tmf_task tmf_task; 1929 int rc = TMF_RESP_FUNC_FAILED; 1930 1931 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1932 struct scsi_cmnd *cmnd = task->uldd_task; 1933 struct domain_device *device = task->dev; 1934 struct hisi_sas_slot *slot = task->lldd_task; 1935 u32 tag = slot->idx; 1936 1937 int_to_scsilun(cmnd->device->lun, &lun); 1938 tmf_task.tmf = TMF_QUERY_TASK; 1939 tmf_task.tag_of_task_to_be_managed = tag; 1940 1941 rc = hisi_sas_debug_issue_ssp_tmf(device, 1942 lun.scsi_lun, 1943 &tmf_task); 1944 switch (rc) { 1945 /* The task is still in Lun, release it then */ 1946 case TMF_RESP_FUNC_SUCC: 1947 /* The task is not in Lun or failed, reset the phy */ 1948 case TMF_RESP_FUNC_FAILED: 1949 case TMF_RESP_FUNC_COMPLETE: 1950 break; 1951 default: 1952 rc = TMF_RESP_FUNC_FAILED; 1953 break; 1954 } 1955 } 1956 return rc; 1957 } 1958 1959 static int 1960 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 1961 struct sas_task *task, int abort_flag, 1962 int task_tag, struct hisi_sas_dq *dq) 1963 { 1964 struct domain_device *device = task->dev; 1965 struct hisi_sas_device *sas_dev = device->lldd_dev; 1966 struct device *dev = hisi_hba->dev; 1967 struct hisi_sas_port *port; 1968 struct hisi_sas_slot *slot; 1969 struct asd_sas_port *sas_port = device->port; 1970 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1971 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1972 unsigned long flags, flags_dq = 0; 1973 int wr_q_index; 1974 1975 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 1976 return -EINVAL; 1977 1978 if (!device->port) 1979 return -1; 1980 1981 port = to_hisi_sas_port(sas_port); 1982 1983 /* simply get a slot and send abort command */ 1984 rc = hisi_sas_slot_index_alloc(hisi_hba, NULL); 1985 if (rc < 0) 1986 goto err_out; 1987 1988 slot_idx = rc; 1989 slot = &hisi_hba->slot_info[slot_idx]; 1990 1991 spin_lock_irqsave(&dq->lock, flags_dq); 1992 wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); 1993 if (wr_q_index < 0) { 1994 spin_unlock_irqrestore(&dq->lock, flags_dq); 1995 rc = -EAGAIN; 1996 goto err_out_tag; 1997 } 1998 list_add_tail(&slot->delivery, &dq->list); 1999 spin_unlock_irqrestore(&dq->lock, flags_dq); 2000 spin_lock_irqsave(&sas_dev->lock, flags); 2001 list_add_tail(&slot->entry, &sas_dev->list); 2002 spin_unlock_irqrestore(&sas_dev->lock, flags); 2003 2004 dlvry_queue = dq->id; 2005 dlvry_queue_slot = wr_q_index; 2006 2007 slot->device_id = sas_dev->device_id; 2008 slot->n_elem = n_elem; 2009 slot->dlvry_queue = dlvry_queue; 2010 slot->dlvry_queue_slot = dlvry_queue_slot; 2011 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 2012 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 2013 slot->task = task; 2014 slot->port = port; 2015 slot->is_internal = true; 2016 task->lldd_task = slot; 2017 2018 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 2019 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 2020 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 2021 2022 hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 2023 abort_flag, task_tag); 2024 2025 spin_lock_irqsave(&task->task_state_lock, flags); 2026 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 2027 spin_unlock_irqrestore(&task->task_state_lock, flags); 2028 WRITE_ONCE(slot->ready, 1); 2029 /* send abort command to the chip */ 2030 spin_lock_irqsave(&dq->lock, flags); 2031 hisi_hba->hw->start_delivery(dq); 2032 spin_unlock_irqrestore(&dq->lock, flags); 2033 2034 return 0; 2035 2036 err_out_tag: 2037 hisi_sas_slot_index_free(hisi_hba, slot_idx); 2038 err_out: 2039 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 2040 2041 return rc; 2042 } 2043 2044 /** 2045 * _hisi_sas_internal_task_abort -- execute an internal 2046 * abort command for single IO command or a device 2047 * @hisi_hba: host controller struct 2048 * @device: domain device 2049 * @abort_flag: mode of operation, device or single IO 2050 * @tag: tag of IO to be aborted (only relevant to single 2051 * IO mode) 2052 * @dq: delivery queue for this internal abort command 2053 */ 2054 static int 2055 _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2056 struct domain_device *device, int abort_flag, 2057 int tag, struct hisi_sas_dq *dq) 2058 { 2059 struct sas_task *task; 2060 struct hisi_sas_device *sas_dev = device->lldd_dev; 2061 struct device *dev = hisi_hba->dev; 2062 int res; 2063 2064 /* 2065 * The interface is not realized means this HW don't support internal 2066 * abort, or don't need to do internal abort. Then here, we return 2067 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 2068 * the internal abort has been executed and returned CQ. 2069 */ 2070 if (!hisi_hba->hw->prep_abort) 2071 return TMF_RESP_FUNC_FAILED; 2072 2073 task = sas_alloc_slow_task(GFP_KERNEL); 2074 if (!task) 2075 return -ENOMEM; 2076 2077 task->dev = device; 2078 task->task_proto = device->tproto; 2079 task->task_done = hisi_sas_task_done; 2080 task->slow_task->timer.function = hisi_sas_tmf_timedout; 2081 task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ; 2082 add_timer(&task->slow_task->timer); 2083 2084 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 2085 task, abort_flag, tag, dq); 2086 if (res) { 2087 del_timer(&task->slow_task->timer); 2088 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 2089 res); 2090 goto exit; 2091 } 2092 wait_for_completion(&task->slow_task->completion); 2093 res = TMF_RESP_FUNC_FAILED; 2094 2095 /* Internal abort timed out */ 2096 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 2097 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 2098 struct hisi_sas_slot *slot = task->lldd_task; 2099 2100 if (slot) { 2101 struct hisi_sas_cq *cq = 2102 &hisi_hba->cq[slot->dlvry_queue]; 2103 /* 2104 * flush tasklet to avoid free'ing task 2105 * before using task in IO completion 2106 */ 2107 tasklet_kill(&cq->tasklet); 2108 slot->task = NULL; 2109 } 2110 dev_err(dev, "internal task abort: timeout and not done.\n"); 2111 2112 res = -EIO; 2113 goto exit; 2114 } else 2115 dev_err(dev, "internal task abort: timeout.\n"); 2116 } 2117 2118 if (task->task_status.resp == SAS_TASK_COMPLETE && 2119 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 2120 res = TMF_RESP_FUNC_COMPLETE; 2121 goto exit; 2122 } 2123 2124 if (task->task_status.resp == SAS_TASK_COMPLETE && 2125 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 2126 res = TMF_RESP_FUNC_SUCC; 2127 goto exit; 2128 } 2129 2130 exit: 2131 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p resp: 0x%x sts 0x%x\n", 2132 SAS_ADDR(device->sas_addr), task, 2133 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 2134 task->task_status.stat); 2135 sas_free_task(task); 2136 2137 return res; 2138 } 2139 2140 static int 2141 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 2142 struct domain_device *device, 2143 int abort_flag, int tag) 2144 { 2145 struct hisi_sas_slot *slot; 2146 struct device *dev = hisi_hba->dev; 2147 struct hisi_sas_dq *dq; 2148 int i, rc; 2149 2150 switch (abort_flag) { 2151 case HISI_SAS_INT_ABT_CMD: 2152 slot = &hisi_hba->slot_info[tag]; 2153 dq = &hisi_hba->dq[slot->dlvry_queue]; 2154 return _hisi_sas_internal_task_abort(hisi_hba, device, 2155 abort_flag, tag, dq); 2156 case HISI_SAS_INT_ABT_DEV: 2157 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2158 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2159 const struct cpumask *mask = cq->pci_irq_mask; 2160 2161 if (mask && !cpumask_intersects(cpu_online_mask, mask)) 2162 continue; 2163 dq = &hisi_hba->dq[i]; 2164 rc = _hisi_sas_internal_task_abort(hisi_hba, device, 2165 abort_flag, tag, 2166 dq); 2167 if (rc) 2168 return rc; 2169 } 2170 break; 2171 default: 2172 dev_err(dev, "Unrecognised internal abort flag (%d)\n", 2173 abort_flag); 2174 return -EINVAL; 2175 } 2176 2177 return 0; 2178 } 2179 2180 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 2181 { 2182 hisi_sas_port_notify_formed(sas_phy); 2183 } 2184 2185 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 2186 u8 reg_index, u8 reg_count, u8 *write_data) 2187 { 2188 struct hisi_hba *hisi_hba = sha->lldd_ha; 2189 2190 if (!hisi_hba->hw->write_gpio) 2191 return -EOPNOTSUPP; 2192 2193 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 2194 reg_index, reg_count, write_data); 2195 } 2196 2197 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2198 { 2199 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2200 struct sas_phy *sphy = sas_phy->phy; 2201 unsigned long flags; 2202 2203 phy->phy_attached = 0; 2204 phy->phy_type = 0; 2205 phy->port = NULL; 2206 2207 spin_lock_irqsave(&phy->lock, flags); 2208 if (phy->enable) 2209 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; 2210 else 2211 sphy->negotiated_linkrate = SAS_PHY_DISABLED; 2212 spin_unlock_irqrestore(&phy->lock, flags); 2213 } 2214 2215 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) 2216 { 2217 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 2218 struct asd_sas_phy *sas_phy = &phy->sas_phy; 2219 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 2220 struct device *dev = hisi_hba->dev; 2221 2222 if (rdy) { 2223 /* Phy down but ready */ 2224 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 2225 hisi_sas_port_notify_formed(sas_phy); 2226 } else { 2227 struct hisi_sas_port *port = phy->port; 2228 2229 if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) || 2230 phy->in_reset) { 2231 dev_info(dev, "ignore flutter phy%d down\n", phy_no); 2232 return; 2233 } 2234 /* Phy down and not ready */ 2235 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 2236 sas_phy_disconnected(sas_phy); 2237 2238 if (port) { 2239 if (phy->phy_type & PORT_TYPE_SAS) { 2240 int port_id = port->id; 2241 2242 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 2243 port_id)) 2244 port->port_attached = 0; 2245 } else if (phy->phy_type & PORT_TYPE_SATA) 2246 port->port_attached = 0; 2247 } 2248 hisi_sas_phy_disconnected(phy); 2249 } 2250 } 2251 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 2252 2253 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) 2254 { 2255 int i; 2256 2257 for (i = 0; i < hisi_hba->cq_nvecs; i++) { 2258 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2259 2260 tasklet_kill(&cq->tasklet); 2261 } 2262 } 2263 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets); 2264 2265 int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) 2266 { 2267 struct hisi_hba *hisi_hba = shost_priv(shost); 2268 2269 if (reset_type != SCSI_ADAPTER_RESET) 2270 return -EOPNOTSUPP; 2271 2272 queue_work(hisi_hba->wq, &hisi_hba->rst_work); 2273 2274 return 0; 2275 } 2276 EXPORT_SYMBOL_GPL(hisi_sas_host_reset); 2277 2278 struct scsi_transport_template *hisi_sas_stt; 2279 EXPORT_SYMBOL_GPL(hisi_sas_stt); 2280 2281 static struct sas_domain_function_template hisi_sas_transport_ops = { 2282 .lldd_dev_found = hisi_sas_dev_found, 2283 .lldd_dev_gone = hisi_sas_dev_gone, 2284 .lldd_execute_task = hisi_sas_queue_command, 2285 .lldd_control_phy = hisi_sas_control_phy, 2286 .lldd_abort_task = hisi_sas_abort_task, 2287 .lldd_abort_task_set = hisi_sas_abort_task_set, 2288 .lldd_clear_aca = hisi_sas_clear_aca, 2289 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 2290 .lldd_lu_reset = hisi_sas_lu_reset, 2291 .lldd_query_task = hisi_sas_query_task, 2292 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 2293 .lldd_port_formed = hisi_sas_port_formed, 2294 .lldd_write_gpio = hisi_sas_write_gpio, 2295 }; 2296 2297 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 2298 { 2299 int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries; 2300 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; 2301 2302 for (i = 0; i < hisi_hba->queue_count; i++) { 2303 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2304 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2305 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; 2306 2307 s = sizeof(struct hisi_sas_cmd_hdr); 2308 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2309 memset(&cmd_hdr[j], 0, s); 2310 2311 dq->wr_point = 0; 2312 2313 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2314 memset(hisi_hba->complete_hdr[i], 0, s); 2315 cq->rd_point = 0; 2316 } 2317 2318 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 2319 memset(hisi_hba->initial_fis, 0, s); 2320 2321 s = max_command_entries * sizeof(struct hisi_sas_iost); 2322 memset(hisi_hba->iost, 0, s); 2323 2324 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2325 memset(hisi_hba->breakpoint, 0, s); 2326 2327 s = sizeof(struct hisi_sas_sata_breakpoint); 2328 for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) 2329 memset(&sata_breakpoint[j], 0, s); 2330 } 2331 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 2332 2333 int hisi_sas_alloc(struct hisi_hba *hisi_hba) 2334 { 2335 struct device *dev = hisi_hba->dev; 2336 int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries; 2337 int max_command_entries_ru, sz_slot_buf_ru; 2338 int blk_cnt, slots_per_blk; 2339 2340 sema_init(&hisi_hba->sem, 1); 2341 spin_lock_init(&hisi_hba->lock); 2342 for (i = 0; i < hisi_hba->n_phy; i++) { 2343 hisi_sas_phy_init(hisi_hba, i); 2344 hisi_hba->port[i].port_attached = 0; 2345 hisi_hba->port[i].id = -1; 2346 } 2347 2348 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2349 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2350 hisi_hba->devices[i].device_id = i; 2351 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; 2352 } 2353 2354 for (i = 0; i < hisi_hba->queue_count; i++) { 2355 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 2356 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 2357 2358 /* Completion queue structure */ 2359 cq->id = i; 2360 cq->hisi_hba = hisi_hba; 2361 2362 /* Delivery queue structure */ 2363 spin_lock_init(&dq->lock); 2364 INIT_LIST_HEAD(&dq->list); 2365 dq->id = i; 2366 dq->hisi_hba = hisi_hba; 2367 2368 /* Delivery queue */ 2369 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 2370 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, 2371 &hisi_hba->cmd_hdr_dma[i], 2372 GFP_KERNEL); 2373 if (!hisi_hba->cmd_hdr[i]) 2374 goto err_out; 2375 2376 /* Completion queue */ 2377 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 2378 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, 2379 &hisi_hba->complete_hdr_dma[i], 2380 GFP_KERNEL); 2381 if (!hisi_hba->complete_hdr[i]) 2382 goto err_out; 2383 } 2384 2385 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 2386 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, 2387 GFP_KERNEL | __GFP_ZERO); 2388 if (!hisi_hba->itct) 2389 goto err_out; 2390 2391 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 2392 sizeof(struct hisi_sas_slot), 2393 GFP_KERNEL); 2394 if (!hisi_hba->slot_info) 2395 goto err_out; 2396 2397 /* roundup to avoid overly large block size */ 2398 max_command_entries_ru = roundup(max_command_entries, 64); 2399 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) 2400 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); 2401 else 2402 sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); 2403 sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); 2404 s = lcm(max_command_entries_ru, sz_slot_buf_ru); 2405 blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; 2406 slots_per_blk = s / sz_slot_buf_ru; 2407 2408 for (i = 0; i < blk_cnt; i++) { 2409 int slot_index = i * slots_per_blk; 2410 dma_addr_t buf_dma; 2411 void *buf; 2412 2413 buf = dmam_alloc_coherent(dev, s, &buf_dma, 2414 GFP_KERNEL | __GFP_ZERO); 2415 if (!buf) 2416 goto err_out; 2417 2418 for (j = 0; j < slots_per_blk; j++, slot_index++) { 2419 struct hisi_sas_slot *slot; 2420 2421 slot = &hisi_hba->slot_info[slot_index]; 2422 slot->buf = buf; 2423 slot->buf_dma = buf_dma; 2424 slot->idx = slot_index; 2425 2426 buf += sz_slot_buf_ru; 2427 buf_dma += sz_slot_buf_ru; 2428 } 2429 } 2430 2431 s = max_command_entries * sizeof(struct hisi_sas_iost); 2432 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, 2433 GFP_KERNEL); 2434 if (!hisi_hba->iost) 2435 goto err_out; 2436 2437 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 2438 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, 2439 &hisi_hba->breakpoint_dma, 2440 GFP_KERNEL); 2441 if (!hisi_hba->breakpoint) 2442 goto err_out; 2443 2444 hisi_hba->slot_index_count = max_command_entries; 2445 s = hisi_hba->slot_index_count / BITS_PER_BYTE; 2446 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 2447 if (!hisi_hba->slot_index_tags) 2448 goto err_out; 2449 2450 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 2451 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, 2452 &hisi_hba->initial_fis_dma, 2453 GFP_KERNEL); 2454 if (!hisi_hba->initial_fis) 2455 goto err_out; 2456 2457 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 2458 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, 2459 &hisi_hba->sata_breakpoint_dma, 2460 GFP_KERNEL); 2461 if (!hisi_hba->sata_breakpoint) 2462 goto err_out; 2463 hisi_sas_init_mem(hisi_hba); 2464 2465 hisi_sas_slot_index_init(hisi_hba); 2466 hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries - 2467 HISI_SAS_RESERVED_IPTT_CNT; 2468 2469 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 2470 if (!hisi_hba->wq) { 2471 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 2472 goto err_out; 2473 } 2474 2475 return 0; 2476 err_out: 2477 return -ENOMEM; 2478 } 2479 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 2480 2481 void hisi_sas_free(struct hisi_hba *hisi_hba) 2482 { 2483 if (hisi_hba->wq) 2484 destroy_workqueue(hisi_hba->wq); 2485 } 2486 EXPORT_SYMBOL_GPL(hisi_sas_free); 2487 2488 void hisi_sas_rst_work_handler(struct work_struct *work) 2489 { 2490 struct hisi_hba *hisi_hba = 2491 container_of(work, struct hisi_hba, rst_work); 2492 2493 hisi_sas_controller_reset(hisi_hba); 2494 } 2495 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 2496 2497 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 2498 { 2499 struct hisi_sas_rst *rst = 2500 container_of(work, struct hisi_sas_rst, work); 2501 2502 if (!hisi_sas_controller_reset(rst->hisi_hba)) 2503 rst->done = true; 2504 complete(rst->completion); 2505 } 2506 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 2507 2508 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 2509 { 2510 struct device *dev = hisi_hba->dev; 2511 struct platform_device *pdev = hisi_hba->platform_dev; 2512 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 2513 struct clk *refclk; 2514 2515 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 2516 SAS_ADDR_SIZE)) { 2517 dev_err(dev, "could not get property sas-addr\n"); 2518 return -ENOENT; 2519 } 2520 2521 if (np) { 2522 /* 2523 * These properties are only required for platform device-based 2524 * controller with DT firmware. 2525 */ 2526 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 2527 "hisilicon,sas-syscon"); 2528 if (IS_ERR(hisi_hba->ctrl)) { 2529 dev_err(dev, "could not get syscon\n"); 2530 return -ENOENT; 2531 } 2532 2533 if (device_property_read_u32(dev, "ctrl-reset-reg", 2534 &hisi_hba->ctrl_reset_reg)) { 2535 dev_err(dev, "could not get property ctrl-reset-reg\n"); 2536 return -ENOENT; 2537 } 2538 2539 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 2540 &hisi_hba->ctrl_reset_sts_reg)) { 2541 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); 2542 return -ENOENT; 2543 } 2544 2545 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2546 &hisi_hba->ctrl_clock_ena_reg)) { 2547 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); 2548 return -ENOENT; 2549 } 2550 } 2551 2552 refclk = devm_clk_get(dev, NULL); 2553 if (IS_ERR(refclk)) 2554 dev_dbg(dev, "no ref clk property\n"); 2555 else 2556 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2557 2558 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2559 dev_err(dev, "could not get property phy-count\n"); 2560 return -ENOENT; 2561 } 2562 2563 if (device_property_read_u32(dev, "queue-count", 2564 &hisi_hba->queue_count)) { 2565 dev_err(dev, "could not get property queue-count\n"); 2566 return -ENOENT; 2567 } 2568 2569 return 0; 2570 } 2571 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2572 2573 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2574 const struct hisi_sas_hw *hw) 2575 { 2576 struct resource *res; 2577 struct Scsi_Host *shost; 2578 struct hisi_hba *hisi_hba; 2579 struct device *dev = &pdev->dev; 2580 int error; 2581 2582 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); 2583 if (!shost) { 2584 dev_err(dev, "scsi host alloc failed\n"); 2585 return NULL; 2586 } 2587 hisi_hba = shost_priv(shost); 2588 2589 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2590 hisi_hba->hw = hw; 2591 hisi_hba->dev = dev; 2592 hisi_hba->platform_dev = pdev; 2593 hisi_hba->shost = shost; 2594 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2595 2596 timer_setup(&hisi_hba->timer, NULL, 0); 2597 2598 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2599 goto err_out; 2600 2601 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 2602 if (error) 2603 error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2604 2605 if (error) { 2606 dev_err(dev, "No usable DMA addressing method\n"); 2607 goto err_out; 2608 } 2609 2610 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2611 hisi_hba->regs = devm_ioremap_resource(dev, res); 2612 if (IS_ERR(hisi_hba->regs)) 2613 goto err_out; 2614 2615 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2616 if (res) { 2617 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2618 if (IS_ERR(hisi_hba->sgpio_regs)) 2619 goto err_out; 2620 } 2621 2622 if (hisi_sas_alloc(hisi_hba)) { 2623 hisi_sas_free(hisi_hba); 2624 goto err_out; 2625 } 2626 2627 return shost; 2628 err_out: 2629 scsi_host_put(shost); 2630 dev_err(dev, "shost alloc failed\n"); 2631 return NULL; 2632 } 2633 2634 int hisi_sas_probe(struct platform_device *pdev, 2635 const struct hisi_sas_hw *hw) 2636 { 2637 struct Scsi_Host *shost; 2638 struct hisi_hba *hisi_hba; 2639 struct device *dev = &pdev->dev; 2640 struct asd_sas_phy **arr_phy; 2641 struct asd_sas_port **arr_port; 2642 struct sas_ha_struct *sha; 2643 int rc, phy_nr, port_nr, i; 2644 2645 shost = hisi_sas_shost_alloc(pdev, hw); 2646 if (!shost) 2647 return -ENOMEM; 2648 2649 sha = SHOST_TO_SAS_HA(shost); 2650 hisi_hba = shost_priv(shost); 2651 platform_set_drvdata(pdev, sha); 2652 2653 phy_nr = port_nr = hisi_hba->n_phy; 2654 2655 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2656 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2657 if (!arr_phy || !arr_port) { 2658 rc = -ENOMEM; 2659 goto err_out_ha; 2660 } 2661 2662 sha->sas_phy = arr_phy; 2663 sha->sas_port = arr_port; 2664 sha->lldd_ha = hisi_hba; 2665 2666 shost->transportt = hisi_sas_stt; 2667 shost->max_id = HISI_SAS_MAX_DEVICES; 2668 shost->max_lun = ~0; 2669 shost->max_channel = 1; 2670 shost->max_cmd_len = 16; 2671 if (hisi_hba->hw->slot_index_alloc) { 2672 shost->can_queue = hisi_hba->hw->max_command_entries; 2673 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2674 } else { 2675 shost->can_queue = hisi_hba->hw->max_command_entries - 2676 HISI_SAS_RESERVED_IPTT_CNT; 2677 shost->cmd_per_lun = hisi_hba->hw->max_command_entries - 2678 HISI_SAS_RESERVED_IPTT_CNT; 2679 } 2680 2681 sha->sas_ha_name = DRV_NAME; 2682 sha->dev = hisi_hba->dev; 2683 sha->lldd_module = THIS_MODULE; 2684 sha->sas_addr = &hisi_hba->sas_addr[0]; 2685 sha->num_phys = hisi_hba->n_phy; 2686 sha->core.shost = hisi_hba->shost; 2687 2688 for (i = 0; i < hisi_hba->n_phy; i++) { 2689 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2690 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2691 } 2692 2693 rc = scsi_add_host(shost, &pdev->dev); 2694 if (rc) 2695 goto err_out_ha; 2696 2697 rc = sas_register_ha(sha); 2698 if (rc) 2699 goto err_out_register_ha; 2700 2701 rc = hisi_hba->hw->hw_init(hisi_hba); 2702 if (rc) 2703 goto err_out_register_ha; 2704 2705 scsi_scan_host(shost); 2706 2707 return 0; 2708 2709 err_out_register_ha: 2710 scsi_remove_host(shost); 2711 err_out_ha: 2712 hisi_sas_free(hisi_hba); 2713 scsi_host_put(shost); 2714 return rc; 2715 } 2716 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2717 2718 struct dentry *hisi_sas_debugfs_dir; 2719 2720 static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba) 2721 { 2722 int queue_entry_size = hisi_hba->hw->complete_hdr_size; 2723 int i; 2724 2725 for (i = 0; i < hisi_hba->queue_count; i++) 2726 memcpy(hisi_hba->debugfs_complete_hdr[i], 2727 hisi_hba->complete_hdr[i], 2728 HISI_SAS_QUEUE_SLOTS * queue_entry_size); 2729 } 2730 2731 static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba) 2732 { 2733 int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); 2734 int i; 2735 2736 for (i = 0; i < hisi_hba->queue_count; i++) { 2737 struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; 2738 int j; 2739 2740 debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i]; 2741 cmd_hdr = hisi_hba->cmd_hdr[i]; 2742 2743 for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) 2744 memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j], 2745 queue_entry_size); 2746 } 2747 } 2748 2749 static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) 2750 { 2751 const struct hisi_sas_debugfs_reg *port = 2752 hisi_hba->hw->debugfs_reg_port; 2753 int i, phy_cnt; 2754 u32 offset; 2755 u32 *databuf; 2756 2757 for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { 2758 databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt]; 2759 for (i = 0; i < port->count; i++, databuf++) { 2760 offset = port->base_off + 4 * i; 2761 *databuf = port->read_port_reg(hisi_hba, phy_cnt, 2762 offset); 2763 } 2764 } 2765 } 2766 2767 static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba) 2768 { 2769 u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg; 2770 const struct hisi_sas_debugfs_reg *global = 2771 hisi_hba->hw->debugfs_reg_global; 2772 int i; 2773 2774 for (i = 0; i < global->count; i++, databuf++) 2775 *databuf = global->read_global_reg(hisi_hba, 4 * i); 2776 } 2777 2778 static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) 2779 { 2780 void *databuf = hisi_hba->debugfs_itct; 2781 struct hisi_sas_itct *itct; 2782 int i; 2783 2784 itct = hisi_hba->itct; 2785 2786 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { 2787 memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); 2788 databuf += sizeof(struct hisi_sas_itct); 2789 } 2790 } 2791 2792 static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba) 2793 { 2794 int max_command_entries = hisi_hba->hw->max_command_entries; 2795 void *databuf = hisi_hba->debugfs_iost; 2796 struct hisi_sas_iost *iost; 2797 int i; 2798 2799 iost = hisi_hba->iost; 2800 2801 for (i = 0; i < max_command_entries; i++, iost++) { 2802 memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); 2803 databuf += sizeof(struct hisi_sas_iost); 2804 } 2805 } 2806 2807 static const char * 2808 hisi_sas_debugfs_to_reg_name(int off, int base_off, 2809 const struct hisi_sas_debugfs_reg_lu *lu) 2810 { 2811 for (; lu->name; lu++) { 2812 if (off == lu->off - base_off) 2813 return lu->name; 2814 } 2815 2816 return NULL; 2817 } 2818 2819 static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr, 2820 struct seq_file *s) 2821 { 2822 const struct hisi_sas_debugfs_reg *reg = ptr; 2823 int i; 2824 2825 for (i = 0; i < reg->count; i++) { 2826 int off = i * 4; 2827 const char *name; 2828 2829 name = hisi_sas_debugfs_to_reg_name(off, reg->base_off, 2830 reg->lu); 2831 2832 if (name) 2833 seq_printf(s, "0x%08x 0x%08x %s\n", off, 2834 regs_val[i], name); 2835 else 2836 seq_printf(s, "0x%08x 0x%08x\n", off, 2837 regs_val[i]); 2838 } 2839 } 2840 2841 static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p) 2842 { 2843 struct hisi_hba *hisi_hba = s->private; 2844 const struct hisi_sas_hw *hw = hisi_hba->hw; 2845 const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global; 2846 2847 hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg, 2848 reg_global, s); 2849 2850 return 0; 2851 } 2852 2853 static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp) 2854 { 2855 return single_open(filp, hisi_sas_debugfs_global_show, 2856 inode->i_private); 2857 } 2858 2859 static const struct file_operations hisi_sas_debugfs_global_fops = { 2860 .open = hisi_sas_debugfs_global_open, 2861 .read = seq_read, 2862 .llseek = seq_lseek, 2863 .release = single_release, 2864 .owner = THIS_MODULE, 2865 }; 2866 2867 static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p) 2868 { 2869 struct hisi_sas_phy *phy = s->private; 2870 struct hisi_hba *hisi_hba = phy->hisi_hba; 2871 const struct hisi_sas_hw *hw = hisi_hba->hw; 2872 const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port; 2873 u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id]; 2874 2875 hisi_sas_debugfs_print_reg(databuf, reg_port, s); 2876 2877 return 0; 2878 } 2879 2880 static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp) 2881 { 2882 return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private); 2883 } 2884 2885 static const struct file_operations hisi_sas_debugfs_port_fops = { 2886 .open = hisi_sas_debugfs_port_open, 2887 .read = seq_read, 2888 .llseek = seq_lseek, 2889 .release = single_release, 2890 .owner = THIS_MODULE, 2891 }; 2892 2893 static int hisi_sas_show_row_64(struct seq_file *s, int index, 2894 int sz, __le64 *ptr) 2895 { 2896 int i; 2897 2898 /* completion header size not fixed per HW version */ 2899 seq_printf(s, "index %04d:\n\t", index); 2900 for (i = 1; i <= sz / 8; i++, ptr++) { 2901 seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); 2902 if (!(i % 2)) 2903 seq_puts(s, "\n\t"); 2904 } 2905 2906 seq_puts(s, "\n"); 2907 2908 return 0; 2909 } 2910 2911 static int hisi_sas_show_row_32(struct seq_file *s, int index, 2912 int sz, __le32 *ptr) 2913 { 2914 int i; 2915 2916 /* completion header size not fixed per HW version */ 2917 seq_printf(s, "index %04d:\n\t", index); 2918 for (i = 1; i <= sz / 4; i++, ptr++) { 2919 seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); 2920 if (!(i % 4)) 2921 seq_puts(s, "\n\t"); 2922 } 2923 seq_puts(s, "\n"); 2924 2925 return 0; 2926 } 2927 2928 static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr) 2929 { 2930 struct hisi_sas_cq *cq = cq_ptr; 2931 struct hisi_hba *hisi_hba = cq->hisi_hba; 2932 void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id]; 2933 __le32 *complete_hdr = complete_queue + 2934 (hisi_hba->hw->complete_hdr_size * slot); 2935 2936 return hisi_sas_show_row_32(s, slot, 2937 hisi_hba->hw->complete_hdr_size, 2938 complete_hdr); 2939 } 2940 2941 static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p) 2942 { 2943 struct hisi_sas_cq *cq = s->private; 2944 int slot, ret; 2945 2946 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 2947 ret = hisi_sas_cq_show_slot(s, slot, cq); 2948 if (ret) 2949 return ret; 2950 } 2951 return 0; 2952 } 2953 2954 static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp) 2955 { 2956 return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private); 2957 } 2958 2959 static const struct file_operations hisi_sas_debugfs_cq_fops = { 2960 .open = hisi_sas_debugfs_cq_open, 2961 .read = seq_read, 2962 .llseek = seq_lseek, 2963 .release = single_release, 2964 .owner = THIS_MODULE, 2965 }; 2966 2967 static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) 2968 { 2969 struct hisi_sas_dq *dq = dq_ptr; 2970 struct hisi_hba *hisi_hba = dq->hisi_hba; 2971 void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id]; 2972 __le32 *cmd_hdr = cmd_queue + 2973 sizeof(struct hisi_sas_cmd_hdr) * slot; 2974 2975 return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), 2976 cmd_hdr); 2977 } 2978 2979 static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p) 2980 { 2981 int slot, ret; 2982 2983 for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { 2984 ret = hisi_sas_dq_show_slot(s, slot, s->private); 2985 if (ret) 2986 return ret; 2987 } 2988 return 0; 2989 } 2990 2991 static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp) 2992 { 2993 return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private); 2994 } 2995 2996 static const struct file_operations hisi_sas_debugfs_dq_fops = { 2997 .open = hisi_sas_debugfs_dq_open, 2998 .read = seq_read, 2999 .llseek = seq_lseek, 3000 .release = single_release, 3001 .owner = THIS_MODULE, 3002 }; 3003 3004 static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p) 3005 { 3006 struct hisi_hba *hisi_hba = s->private; 3007 struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost; 3008 int i, ret, max_command_entries = hisi_hba->hw->max_command_entries; 3009 __le64 *iost = &debugfs_iost->qw0; 3010 3011 for (i = 0; i < max_command_entries; i++, debugfs_iost++) { 3012 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), 3013 iost); 3014 if (ret) 3015 return ret; 3016 } 3017 3018 return 0; 3019 } 3020 3021 static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp) 3022 { 3023 return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private); 3024 } 3025 3026 static const struct file_operations hisi_sas_debugfs_iost_fops = { 3027 .open = hisi_sas_debugfs_iost_open, 3028 .read = seq_read, 3029 .llseek = seq_lseek, 3030 .release = single_release, 3031 .owner = THIS_MODULE, 3032 }; 3033 3034 static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p) 3035 { 3036 int i, ret; 3037 struct hisi_hba *hisi_hba = s->private; 3038 struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct; 3039 __le64 *itct = &debugfs_itct->qw0; 3040 3041 for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) { 3042 ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), 3043 itct); 3044 if (ret) 3045 return ret; 3046 } 3047 3048 return 0; 3049 } 3050 3051 static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp) 3052 { 3053 return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private); 3054 } 3055 3056 static const struct file_operations hisi_sas_debugfs_itct_fops = { 3057 .open = hisi_sas_debugfs_itct_open, 3058 .read = seq_read, 3059 .llseek = seq_lseek, 3060 .release = single_release, 3061 .owner = THIS_MODULE, 3062 }; 3063 3064 static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) 3065 { 3066 struct dentry *dump_dentry; 3067 struct dentry *dentry; 3068 char name[256]; 3069 int p; 3070 int c; 3071 int d; 3072 3073 /* Create dump dir inside device dir */ 3074 dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir); 3075 hisi_hba->debugfs_dump_dentry = dump_dentry; 3076 3077 debugfs_create_file("global", 0400, dump_dentry, hisi_hba, 3078 &hisi_sas_debugfs_global_fops); 3079 3080 /* Create port dir and files */ 3081 dentry = debugfs_create_dir("port", dump_dentry); 3082 for (p = 0; p < hisi_hba->n_phy; p++) { 3083 snprintf(name, 256, "%d", p); 3084 3085 debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p], 3086 &hisi_sas_debugfs_port_fops); 3087 } 3088 3089 /* Create CQ dir and files */ 3090 dentry = debugfs_create_dir("cq", dump_dentry); 3091 for (c = 0; c < hisi_hba->queue_count; c++) { 3092 snprintf(name, 256, "%d", c); 3093 3094 debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c], 3095 &hisi_sas_debugfs_cq_fops); 3096 } 3097 3098 /* Create DQ dir and files */ 3099 dentry = debugfs_create_dir("dq", dump_dentry); 3100 for (d = 0; d < hisi_hba->queue_count; d++) { 3101 snprintf(name, 256, "%d", d); 3102 3103 debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d], 3104 &hisi_sas_debugfs_dq_fops); 3105 } 3106 3107 debugfs_create_file("iost", 0400, dump_dentry, hisi_hba, 3108 &hisi_sas_debugfs_iost_fops); 3109 3110 debugfs_create_file("itct", 0400, dump_dentry, hisi_hba, 3111 &hisi_sas_debugfs_itct_fops); 3112 3113 return; 3114 } 3115 3116 static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba) 3117 { 3118 hisi_hba->hw->snapshot_prepare(hisi_hba); 3119 3120 hisi_sas_debugfs_snapshot_global_reg(hisi_hba); 3121 hisi_sas_debugfs_snapshot_port_reg(hisi_hba); 3122 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba); 3123 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba); 3124 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba); 3125 hisi_sas_debugfs_snapshot_iost_reg(hisi_hba); 3126 3127 hisi_sas_debugfs_create_files(hisi_hba); 3128 3129 hisi_hba->hw->snapshot_restore(hisi_hba); 3130 } 3131 3132 static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file, 3133 const char __user *user_buf, 3134 size_t count, loff_t *ppos) 3135 { 3136 struct hisi_hba *hisi_hba = file->f_inode->i_private; 3137 char buf[8]; 3138 3139 /* A bit racy, but don't care too much since it's only debugfs */ 3140 if (hisi_hba->debugfs_snapshot) 3141 return -EFAULT; 3142 3143 if (count > 8) 3144 return -EFAULT; 3145 3146 if (copy_from_user(buf, user_buf, count)) 3147 return -EFAULT; 3148 3149 if (buf[0] != '1') 3150 return -EFAULT; 3151 3152 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); 3153 3154 return count; 3155 } 3156 3157 static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = { 3158 .write = &hisi_sas_debugfs_trigger_dump_write, 3159 .owner = THIS_MODULE, 3160 }; 3161 3162 void hisi_sas_debugfs_work_handler(struct work_struct *work) 3163 { 3164 struct hisi_hba *hisi_hba = 3165 container_of(work, struct hisi_hba, debugfs_work); 3166 3167 if (hisi_hba->debugfs_snapshot) 3168 return; 3169 hisi_hba->debugfs_snapshot = true; 3170 3171 hisi_sas_debugfs_snapshot_regs(hisi_hba); 3172 } 3173 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); 3174 3175 void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) 3176 { 3177 int max_command_entries = hisi_hba->hw->max_command_entries; 3178 struct device *dev = hisi_hba->dev; 3179 int p, i, c, d; 3180 size_t sz; 3181 3182 hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), 3183 hisi_sas_debugfs_dir); 3184 debugfs_create_file("trigger_dump", 0600, 3185 hisi_hba->debugfs_dir, 3186 hisi_hba, 3187 &hisi_sas_debugfs_trigger_dump_fops); 3188 3189 /* Alloc buffer for global */ 3190 sz = hisi_hba->hw->debugfs_reg_global->count * 4; 3191 hisi_hba->debugfs_global_reg = 3192 devm_kmalloc(dev, sz, GFP_KERNEL); 3193 3194 if (!hisi_hba->debugfs_global_reg) 3195 goto fail_global; 3196 3197 /* Alloc buffer for port */ 3198 sz = hisi_hba->hw->debugfs_reg_port->count * 4; 3199 for (p = 0; p < hisi_hba->n_phy; p++) { 3200 hisi_hba->debugfs_port_reg[p] = 3201 devm_kmalloc(dev, sz, GFP_KERNEL); 3202 3203 if (!hisi_hba->debugfs_port_reg[p]) 3204 goto fail_port; 3205 } 3206 3207 /* Alloc buffer for cq */ 3208 sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 3209 for (c = 0; c < hisi_hba->queue_count; c++) { 3210 hisi_hba->debugfs_complete_hdr[c] = 3211 devm_kmalloc(dev, sz, GFP_KERNEL); 3212 3213 if (!hisi_hba->debugfs_complete_hdr[c]) 3214 goto fail_cq; 3215 } 3216 3217 /* Alloc buffer for dq */ 3218 sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 3219 for (d = 0; d < hisi_hba->queue_count; d++) { 3220 hisi_hba->debugfs_cmd_hdr[d] = 3221 devm_kmalloc(dev, sz, GFP_KERNEL); 3222 3223 if (!hisi_hba->debugfs_cmd_hdr[d]) 3224 goto fail_iost_dq; 3225 } 3226 3227 /* Alloc buffer for iost */ 3228 sz = max_command_entries * sizeof(struct hisi_sas_iost); 3229 3230 hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL); 3231 if (!hisi_hba->debugfs_iost) 3232 goto fail_iost_dq; 3233 3234 /* Alloc buffer for itct */ 3235 /* New memory allocation must be locate before itct */ 3236 sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 3237 3238 hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL); 3239 if (!hisi_hba->debugfs_itct) 3240 goto fail_itct; 3241 3242 return; 3243 fail_itct: 3244 devm_kfree(dev, hisi_hba->debugfs_iost); 3245 fail_iost_dq: 3246 for (i = 0; i < d; i++) 3247 devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]); 3248 fail_cq: 3249 for (i = 0; i < c; i++) 3250 devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]); 3251 fail_port: 3252 for (i = 0; i < p; i++) 3253 devm_kfree(dev, hisi_hba->debugfs_port_reg[i]); 3254 devm_kfree(dev, hisi_hba->debugfs_global_reg); 3255 fail_global: 3256 debugfs_remove_recursive(hisi_hba->debugfs_dir); 3257 dev_dbg(dev, "failed to init debugfs!\n"); 3258 } 3259 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init); 3260 3261 void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba) 3262 { 3263 debugfs_remove_recursive(hisi_hba->debugfs_dir); 3264 } 3265 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit); 3266 3267 int hisi_sas_remove(struct platform_device *pdev) 3268 { 3269 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 3270 struct hisi_hba *hisi_hba = sha->lldd_ha; 3271 struct Scsi_Host *shost = sha->core.shost; 3272 3273 if (timer_pending(&hisi_hba->timer)) 3274 del_timer(&hisi_hba->timer); 3275 3276 sas_unregister_ha(sha); 3277 sas_remove_host(sha->core.shost); 3278 3279 hisi_sas_free(hisi_hba); 3280 scsi_host_put(shost); 3281 return 0; 3282 } 3283 EXPORT_SYMBOL_GPL(hisi_sas_remove); 3284 3285 bool hisi_sas_debugfs_enable; 3286 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); 3287 module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); 3288 MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)"); 3289 3290 static __init int hisi_sas_init(void) 3291 { 3292 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 3293 if (!hisi_sas_stt) 3294 return -ENOMEM; 3295 3296 if (hisi_sas_debugfs_enable) 3297 hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); 3298 3299 return 0; 3300 } 3301 3302 static __exit void hisi_sas_exit(void) 3303 { 3304 sas_release_transport(hisi_sas_stt); 3305 3306 debugfs_remove(hisi_sas_debugfs_dir); 3307 } 3308 3309 module_init(hisi_sas_init); 3310 module_exit(hisi_sas_exit); 3311 3312 MODULE_LICENSE("GPL"); 3313 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 3314 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 3315 MODULE_ALIAS("platform:" DRV_NAME); 3316