1 /* 2 * Copyright (c) 2015 Linaro Ltd. 3 * Copyright (c) 2015 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 */ 11 12 #include "hisi_sas.h" 13 #define DRV_NAME "hisi_sas" 14 15 #define DEV_IS_GONE(dev) \ 16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 17 18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 19 u8 *lun, struct hisi_sas_tmf_task *tmf); 20 static int 21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 22 struct domain_device *device, 23 int abort_flag, int tag); 24 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 25 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 26 void *funcdata); 27 28 u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction) 29 { 30 switch (fis->command) { 31 case ATA_CMD_FPDMA_WRITE: 32 case ATA_CMD_FPDMA_READ: 33 case ATA_CMD_FPDMA_RECV: 34 case ATA_CMD_FPDMA_SEND: 35 case ATA_CMD_NCQ_NON_DATA: 36 return HISI_SAS_SATA_PROTOCOL_FPDMA; 37 38 case ATA_CMD_DOWNLOAD_MICRO: 39 case ATA_CMD_ID_ATA: 40 case ATA_CMD_PMP_READ: 41 case ATA_CMD_READ_LOG_EXT: 42 case ATA_CMD_PIO_READ: 43 case ATA_CMD_PIO_READ_EXT: 44 case ATA_CMD_PMP_WRITE: 45 case ATA_CMD_WRITE_LOG_EXT: 46 case ATA_CMD_PIO_WRITE: 47 case ATA_CMD_PIO_WRITE_EXT: 48 return HISI_SAS_SATA_PROTOCOL_PIO; 49 50 case ATA_CMD_DSM: 51 case ATA_CMD_DOWNLOAD_MICRO_DMA: 52 case ATA_CMD_PMP_READ_DMA: 53 case ATA_CMD_PMP_WRITE_DMA: 54 case ATA_CMD_READ: 55 case ATA_CMD_READ_EXT: 56 case ATA_CMD_READ_LOG_DMA_EXT: 57 case ATA_CMD_READ_STREAM_DMA_EXT: 58 case ATA_CMD_TRUSTED_RCV_DMA: 59 case ATA_CMD_TRUSTED_SND_DMA: 60 case ATA_CMD_WRITE: 61 case ATA_CMD_WRITE_EXT: 62 case ATA_CMD_WRITE_FUA_EXT: 63 case ATA_CMD_WRITE_QUEUED: 64 case ATA_CMD_WRITE_LOG_DMA_EXT: 65 case ATA_CMD_WRITE_STREAM_DMA_EXT: 66 case ATA_CMD_ZAC_MGMT_IN: 67 return HISI_SAS_SATA_PROTOCOL_DMA; 68 69 case ATA_CMD_CHK_POWER: 70 case ATA_CMD_DEV_RESET: 71 case ATA_CMD_EDD: 72 case ATA_CMD_FLUSH: 73 case ATA_CMD_FLUSH_EXT: 74 case ATA_CMD_VERIFY: 75 case ATA_CMD_VERIFY_EXT: 76 case ATA_CMD_SET_FEATURES: 77 case ATA_CMD_STANDBY: 78 case ATA_CMD_STANDBYNOW1: 79 case ATA_CMD_ZAC_MGMT_OUT: 80 return HISI_SAS_SATA_PROTOCOL_NONDATA; 81 default: 82 { 83 if (fis->command == ATA_CMD_SET_MAX) { 84 switch (fis->features) { 85 case ATA_SET_MAX_PASSWD: 86 case ATA_SET_MAX_LOCK: 87 return HISI_SAS_SATA_PROTOCOL_PIO; 88 89 case ATA_SET_MAX_PASSWD_DMA: 90 case ATA_SET_MAX_UNLOCK_DMA: 91 return HISI_SAS_SATA_PROTOCOL_DMA; 92 93 default: 94 return HISI_SAS_SATA_PROTOCOL_NONDATA; 95 } 96 } 97 if (direction == DMA_NONE) 98 return HISI_SAS_SATA_PROTOCOL_NONDATA; 99 return HISI_SAS_SATA_PROTOCOL_PIO; 100 } 101 } 102 } 103 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 104 105 void hisi_sas_sata_done(struct sas_task *task, 106 struct hisi_sas_slot *slot) 107 { 108 struct task_status_struct *ts = &task->task_status; 109 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 110 struct hisi_sas_status_buffer *status_buf = 111 hisi_sas_status_buf_addr_mem(slot); 112 u8 *iu = &status_buf->iu[0]; 113 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 114 115 resp->frame_len = sizeof(struct dev_to_host_fis); 116 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 117 118 ts->buf_valid_size = sizeof(*resp); 119 } 120 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 121 122 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) 123 { 124 struct ata_queued_cmd *qc = task->uldd_task; 125 126 if (qc) { 127 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 128 qc->tf.command == ATA_CMD_FPDMA_READ) { 129 *tag = qc->tag; 130 return 1; 131 } 132 } 133 return 0; 134 } 135 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); 136 137 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 138 { 139 return device->port->ha->lldd_ha; 140 } 141 142 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 143 { 144 return container_of(sas_port, struct hisi_sas_port, sas_port); 145 } 146 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 147 148 void hisi_sas_stop_phys(struct hisi_hba *hisi_hba) 149 { 150 int phy_no; 151 152 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) 153 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 154 } 155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys); 156 157 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 158 { 159 void *bitmap = hisi_hba->slot_index_tags; 160 161 clear_bit(slot_idx, bitmap); 162 } 163 164 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 165 { 166 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 167 } 168 169 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 170 { 171 void *bitmap = hisi_hba->slot_index_tags; 172 173 set_bit(slot_idx, bitmap); 174 } 175 176 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx) 177 { 178 unsigned int index; 179 void *bitmap = hisi_hba->slot_index_tags; 180 181 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count); 182 if (index >= hisi_hba->slot_index_count) 183 return -SAS_QUEUE_FULL; 184 hisi_sas_slot_index_set(hisi_hba, index); 185 *slot_idx = index; 186 return 0; 187 } 188 189 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 190 { 191 int i; 192 193 for (i = 0; i < hisi_hba->slot_index_count; ++i) 194 hisi_sas_slot_index_clear(hisi_hba, i); 195 } 196 197 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 198 struct hisi_sas_slot *slot) 199 { 200 201 if (task) { 202 struct device *dev = hisi_hba->dev; 203 struct domain_device *device = task->dev; 204 struct hisi_sas_device *sas_dev = device->lldd_dev; 205 206 if (!task->lldd_task) 207 return; 208 209 task->lldd_task = NULL; 210 211 if (!sas_protocol_ata(task->task_proto)) 212 if (slot->n_elem) 213 dma_unmap_sg(dev, task->scatter, 214 task->num_scatter, 215 task->data_dir); 216 217 if (sas_dev) 218 atomic64_dec(&sas_dev->running_req); 219 } 220 221 if (slot->buf) 222 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); 223 224 list_del_init(&slot->entry); 225 slot->buf = NULL; 226 slot->task = NULL; 227 slot->port = NULL; 228 hisi_sas_slot_index_free(hisi_hba, slot->idx); 229 230 /* slot memory is fully zeroed when it is reused */ 231 } 232 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 233 234 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 235 struct hisi_sas_slot *slot) 236 { 237 return hisi_hba->hw->prep_smp(hisi_hba, slot); 238 } 239 240 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 241 struct hisi_sas_slot *slot, int is_tmf, 242 struct hisi_sas_tmf_task *tmf) 243 { 244 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf); 245 } 246 247 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 248 struct hisi_sas_slot *slot) 249 { 250 return hisi_hba->hw->prep_stp(hisi_hba, slot); 251 } 252 253 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 254 struct hisi_sas_slot *slot, 255 int device_id, int abort_flag, int tag_to_abort) 256 { 257 return hisi_hba->hw->prep_abort(hisi_hba, slot, 258 device_id, abort_flag, tag_to_abort); 259 } 260 261 /* 262 * This function will issue an abort TMF regardless of whether the 263 * task is in the sdev or not. Then it will do the task complete 264 * cleanup and callbacks. 265 */ 266 static void hisi_sas_slot_abort(struct work_struct *work) 267 { 268 struct hisi_sas_slot *abort_slot = 269 container_of(work, struct hisi_sas_slot, abort_slot); 270 struct sas_task *task = abort_slot->task; 271 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 272 struct scsi_cmnd *cmnd = task->uldd_task; 273 struct hisi_sas_tmf_task tmf_task; 274 struct scsi_lun lun; 275 struct device *dev = hisi_hba->dev; 276 int tag = abort_slot->idx; 277 unsigned long flags; 278 279 if (!(task->task_proto & SAS_PROTOCOL_SSP)) { 280 dev_err(dev, "cannot abort slot for non-ssp task\n"); 281 goto out; 282 } 283 284 int_to_scsilun(cmnd->device->lun, &lun); 285 tmf_task.tmf = TMF_ABORT_TASK; 286 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 287 288 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task); 289 out: 290 /* Do cleanup for this task */ 291 spin_lock_irqsave(&hisi_hba->lock, flags); 292 hisi_sas_slot_task_free(hisi_hba, task, abort_slot); 293 spin_unlock_irqrestore(&hisi_hba->lock, flags); 294 if (task->task_done) 295 task->task_done(task); 296 } 297 298 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq 299 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf, 300 int *pass) 301 { 302 struct hisi_hba *hisi_hba = dq->hisi_hba; 303 struct domain_device *device = task->dev; 304 struct hisi_sas_device *sas_dev = device->lldd_dev; 305 struct hisi_sas_port *port; 306 struct hisi_sas_slot *slot; 307 struct hisi_sas_cmd_hdr *cmd_hdr_base; 308 struct asd_sas_port *sas_port = device->port; 309 struct device *dev = hisi_hba->dev; 310 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 311 unsigned long flags; 312 313 if (!sas_port) { 314 struct task_status_struct *ts = &task->task_status; 315 316 ts->resp = SAS_TASK_UNDELIVERED; 317 ts->stat = SAS_PHY_DOWN; 318 /* 319 * libsas will use dev->port, should 320 * not call task_done for sata 321 */ 322 if (device->dev_type != SAS_SATA_DEV) 323 task->task_done(task); 324 return SAS_PHY_DOWN; 325 } 326 327 if (DEV_IS_GONE(sas_dev)) { 328 if (sas_dev) 329 dev_info(dev, "task prep: device %d not ready\n", 330 sas_dev->device_id); 331 else 332 dev_info(dev, "task prep: device %016llx not ready\n", 333 SAS_ADDR(device->sas_addr)); 334 335 return SAS_PHY_DOWN; 336 } 337 338 port = to_hisi_sas_port(sas_port); 339 if (port && !port->port_attached) { 340 dev_info(dev, "task prep: %s port%d not attach device\n", 341 (dev_is_sata(device)) ? 342 "SATA/STP" : "SAS", 343 device->port->id); 344 345 return SAS_PHY_DOWN; 346 } 347 348 if (!sas_protocol_ata(task->task_proto)) { 349 if (task->num_scatter) { 350 n_elem = dma_map_sg(dev, task->scatter, 351 task->num_scatter, task->data_dir); 352 if (!n_elem) { 353 rc = -ENOMEM; 354 goto prep_out; 355 } 356 } 357 } else 358 n_elem = task->num_scatter; 359 360 spin_lock_irqsave(&hisi_hba->lock, flags); 361 if (hisi_hba->hw->slot_index_alloc) 362 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, 363 device); 364 else 365 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 366 if (rc) { 367 spin_unlock_irqrestore(&hisi_hba->lock, flags); 368 goto err_out; 369 } 370 spin_unlock_irqrestore(&hisi_hba->lock, flags); 371 372 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq); 373 if (rc) 374 goto err_out_tag; 375 376 dlvry_queue = dq->id; 377 dlvry_queue_slot = dq->wr_point; 378 slot = &hisi_hba->slot_info[slot_idx]; 379 memset(slot, 0, sizeof(struct hisi_sas_slot)); 380 381 slot->idx = slot_idx; 382 slot->n_elem = n_elem; 383 slot->dlvry_queue = dlvry_queue; 384 slot->dlvry_queue_slot = dlvry_queue_slot; 385 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 386 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 387 slot->task = task; 388 slot->port = port; 389 task->lldd_task = slot; 390 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort); 391 392 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, 393 GFP_ATOMIC, &slot->buf_dma); 394 if (!slot->buf) { 395 rc = -ENOMEM; 396 goto err_out_slot_buf; 397 } 398 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 399 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 400 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 401 402 switch (task->task_proto) { 403 case SAS_PROTOCOL_SMP: 404 rc = hisi_sas_task_prep_smp(hisi_hba, slot); 405 break; 406 case SAS_PROTOCOL_SSP: 407 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf); 408 break; 409 case SAS_PROTOCOL_SATA: 410 case SAS_PROTOCOL_STP: 411 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 412 rc = hisi_sas_task_prep_ata(hisi_hba, slot); 413 break; 414 default: 415 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 416 task->task_proto); 417 rc = -EINVAL; 418 break; 419 } 420 421 if (rc) { 422 dev_err(dev, "task prep: rc = 0x%x\n", rc); 423 goto err_out_buf; 424 } 425 426 spin_lock_irqsave(&hisi_hba->lock, flags); 427 list_add_tail(&slot->entry, &sas_dev->list); 428 spin_unlock_irqrestore(&hisi_hba->lock, flags); 429 spin_lock_irqsave(&task->task_state_lock, flags); 430 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 431 spin_unlock_irqrestore(&task->task_state_lock, flags); 432 433 dq->slot_prep = slot; 434 435 atomic64_inc(&sas_dev->running_req); 436 ++(*pass); 437 438 return 0; 439 440 err_out_buf: 441 dma_pool_free(hisi_hba->buffer_pool, slot->buf, 442 slot->buf_dma); 443 err_out_slot_buf: 444 /* Nothing to be done */ 445 err_out_tag: 446 spin_lock_irqsave(&hisi_hba->lock, flags); 447 hisi_sas_slot_index_free(hisi_hba, slot_idx); 448 spin_unlock_irqrestore(&hisi_hba->lock, flags); 449 err_out: 450 dev_err(dev, "task prep: failed[%d]!\n", rc); 451 if (!sas_protocol_ata(task->task_proto)) 452 if (n_elem) 453 dma_unmap_sg(dev, task->scatter, 454 task->num_scatter, 455 task->data_dir); 456 prep_out: 457 return rc; 458 } 459 460 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 461 int is_tmf, struct hisi_sas_tmf_task *tmf) 462 { 463 u32 rc; 464 u32 pass = 0; 465 unsigned long flags; 466 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 467 struct device *dev = hisi_hba->dev; 468 struct domain_device *device = task->dev; 469 struct hisi_sas_device *sas_dev = device->lldd_dev; 470 struct hisi_sas_dq *dq = sas_dev->dq; 471 472 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 473 return -EINVAL; 474 475 /* protect task_prep and start_delivery sequence */ 476 spin_lock_irqsave(&dq->lock, flags); 477 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass); 478 if (rc) 479 dev_err(dev, "task exec: failed[%d]!\n", rc); 480 481 if (likely(pass)) 482 hisi_hba->hw->start_delivery(dq); 483 spin_unlock_irqrestore(&dq->lock, flags); 484 485 return rc; 486 } 487 488 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) 489 { 490 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 491 struct asd_sas_phy *sas_phy = &phy->sas_phy; 492 struct sas_ha_struct *sas_ha; 493 494 if (!phy->phy_attached) 495 return; 496 497 sas_ha = &hisi_hba->sha; 498 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); 499 500 if (sas_phy->phy) { 501 struct sas_phy *sphy = sas_phy->phy; 502 503 sphy->negotiated_linkrate = sas_phy->linkrate; 504 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 505 sphy->maximum_linkrate_hw = 506 hisi_hba->hw->phy_get_max_linkrate(); 507 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 508 sphy->minimum_linkrate = phy->minimum_linkrate; 509 510 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 511 sphy->maximum_linkrate = phy->maximum_linkrate; 512 } 513 514 if (phy->phy_type & PORT_TYPE_SAS) { 515 struct sas_identify_frame *id; 516 517 id = (struct sas_identify_frame *)phy->frame_rcvd; 518 id->dev_type = phy->identify.device_type; 519 id->initiator_bits = SAS_PROTOCOL_ALL; 520 id->target_bits = phy->identify.target_port_protocols; 521 } else if (phy->phy_type & PORT_TYPE_SATA) { 522 /*Nothing*/ 523 } 524 525 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 526 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED); 527 } 528 529 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 530 { 531 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 532 struct hisi_sas_device *sas_dev = NULL; 533 unsigned long flags; 534 int i; 535 536 spin_lock_irqsave(&hisi_hba->lock, flags); 537 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 538 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 539 int queue = i % hisi_hba->queue_count; 540 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 541 542 hisi_hba->devices[i].device_id = i; 543 sas_dev = &hisi_hba->devices[i]; 544 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 545 sas_dev->dev_type = device->dev_type; 546 sas_dev->hisi_hba = hisi_hba; 547 sas_dev->sas_device = device; 548 sas_dev->dq = dq; 549 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 550 break; 551 } 552 } 553 spin_unlock_irqrestore(&hisi_hba->lock, flags); 554 555 return sas_dev; 556 } 557 558 static int hisi_sas_dev_found(struct domain_device *device) 559 { 560 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 561 struct domain_device *parent_dev = device->parent; 562 struct hisi_sas_device *sas_dev; 563 struct device *dev = hisi_hba->dev; 564 565 if (hisi_hba->hw->alloc_dev) 566 sas_dev = hisi_hba->hw->alloc_dev(device); 567 else 568 sas_dev = hisi_sas_alloc_dev(device); 569 if (!sas_dev) { 570 dev_err(dev, "fail alloc dev: max support %d devices\n", 571 HISI_SAS_MAX_DEVICES); 572 return -EINVAL; 573 } 574 575 device->lldd_dev = sas_dev; 576 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 577 578 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 579 int phy_no; 580 u8 phy_num = parent_dev->ex_dev.num_phys; 581 struct ex_phy *phy; 582 583 for (phy_no = 0; phy_no < phy_num; phy_no++) { 584 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 585 if (SAS_ADDR(phy->attached_sas_addr) == 586 SAS_ADDR(device->sas_addr)) { 587 sas_dev->attached_phy = phy_no; 588 break; 589 } 590 } 591 592 if (phy_no == phy_num) { 593 dev_info(dev, "dev found: no attached " 594 "dev:%016llx at ex:%016llx\n", 595 SAS_ADDR(device->sas_addr), 596 SAS_ADDR(parent_dev->sas_addr)); 597 return -EINVAL; 598 } 599 } 600 601 dev_info(dev, "dev[%d:%x] found\n", 602 sas_dev->device_id, sas_dev->dev_type); 603 604 return 0; 605 } 606 607 static int hisi_sas_slave_configure(struct scsi_device *sdev) 608 { 609 struct domain_device *dev = sdev_to_domain_dev(sdev); 610 int ret = sas_slave_configure(sdev); 611 612 if (ret) 613 return ret; 614 if (!dev_is_sata(dev)) 615 sas_change_queue_depth(sdev, 64); 616 617 return 0; 618 } 619 620 static void hisi_sas_scan_start(struct Scsi_Host *shost) 621 { 622 struct hisi_hba *hisi_hba = shost_priv(shost); 623 624 hisi_hba->hw->phys_init(hisi_hba); 625 } 626 627 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 628 { 629 struct hisi_hba *hisi_hba = shost_priv(shost); 630 struct sas_ha_struct *sha = &hisi_hba->sha; 631 632 /* Wait for PHY up interrupt to occur */ 633 if (time < HZ) 634 return 0; 635 636 sas_drain_work(sha); 637 return 1; 638 } 639 640 static void hisi_sas_phyup_work(struct work_struct *work) 641 { 642 struct hisi_sas_phy *phy = 643 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]); 644 struct hisi_hba *hisi_hba = phy->hisi_hba; 645 struct asd_sas_phy *sas_phy = &phy->sas_phy; 646 int phy_no = sas_phy->id; 647 648 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */ 649 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 650 } 651 652 static void hisi_sas_linkreset_work(struct work_struct *work) 653 { 654 struct hisi_sas_phy *phy = 655 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); 656 struct asd_sas_phy *sas_phy = &phy->sas_phy; 657 658 hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL); 659 } 660 661 static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = { 662 [HISI_PHYE_PHY_UP] = hisi_sas_phyup_work, 663 [HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work, 664 }; 665 666 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, 667 enum hisi_sas_phy_event event) 668 { 669 struct hisi_hba *hisi_hba = phy->hisi_hba; 670 671 if (WARN_ON(event >= HISI_PHYES_NUM)) 672 return false; 673 674 return queue_work(hisi_hba->wq, &phy->works[event]); 675 } 676 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); 677 678 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 679 { 680 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 681 struct asd_sas_phy *sas_phy = &phy->sas_phy; 682 int i; 683 684 phy->hisi_hba = hisi_hba; 685 phy->port = NULL; 686 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 687 sas_phy->class = SAS; 688 sas_phy->iproto = SAS_PROTOCOL_ALL; 689 sas_phy->tproto = 0; 690 sas_phy->type = PHY_TYPE_PHYSICAL; 691 sas_phy->role = PHY_ROLE_INITIATOR; 692 sas_phy->oob_mode = OOB_NOT_CONNECTED; 693 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 694 sas_phy->id = phy_no; 695 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 696 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 697 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 698 sas_phy->lldd_phy = phy; 699 700 for (i = 0; i < HISI_PHYES_NUM; i++) 701 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); 702 } 703 704 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 705 { 706 struct sas_ha_struct *sas_ha = sas_phy->ha; 707 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 708 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 709 struct asd_sas_port *sas_port = sas_phy->port; 710 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 711 unsigned long flags; 712 713 if (!sas_port) 714 return; 715 716 spin_lock_irqsave(&hisi_hba->lock, flags); 717 port->port_attached = 1; 718 port->id = phy->port_id; 719 phy->port = port; 720 sas_port->lldd_port = port; 721 spin_unlock_irqrestore(&hisi_hba->lock, flags); 722 } 723 724 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 725 struct hisi_sas_slot *slot) 726 { 727 if (task) { 728 unsigned long flags; 729 struct task_status_struct *ts; 730 731 ts = &task->task_status; 732 733 ts->resp = SAS_TASK_COMPLETE; 734 ts->stat = SAS_ABORTED_TASK; 735 spin_lock_irqsave(&task->task_state_lock, flags); 736 task->task_state_flags &= 737 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 738 task->task_state_flags |= SAS_TASK_STATE_DONE; 739 spin_unlock_irqrestore(&task->task_state_lock, flags); 740 } 741 742 hisi_sas_slot_task_free(hisi_hba, task, slot); 743 } 744 745 /* hisi_hba.lock should be locked */ 746 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 747 struct domain_device *device) 748 { 749 struct hisi_sas_slot *slot, *slot2; 750 struct hisi_sas_device *sas_dev = device->lldd_dev; 751 752 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 753 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 754 } 755 756 void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 757 { 758 struct hisi_sas_device *sas_dev; 759 struct domain_device *device; 760 int i; 761 762 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 763 sas_dev = &hisi_hba->devices[i]; 764 device = sas_dev->sas_device; 765 766 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 767 !device) 768 continue; 769 770 hisi_sas_release_task(hisi_hba, device); 771 } 772 } 773 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks); 774 775 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 776 struct domain_device *device) 777 { 778 if (hisi_hba->hw->dereg_device) 779 hisi_hba->hw->dereg_device(hisi_hba, device); 780 } 781 782 static void hisi_sas_dev_gone(struct domain_device *device) 783 { 784 struct hisi_sas_device *sas_dev = device->lldd_dev; 785 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 786 struct device *dev = hisi_hba->dev; 787 788 dev_info(dev, "dev[%d:%x] is gone\n", 789 sas_dev->device_id, sas_dev->dev_type); 790 791 if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 792 hisi_sas_internal_task_abort(hisi_hba, device, 793 HISI_SAS_INT_ABT_DEV, 0); 794 795 hisi_sas_dereg_device(hisi_hba, device); 796 797 hisi_hba->hw->clear_itct(hisi_hba, sas_dev); 798 device->lldd_dev = NULL; 799 } 800 801 if (hisi_hba->hw->free_device) 802 hisi_hba->hw->free_device(sas_dev); 803 sas_dev->dev_type = SAS_PHY_UNUSED; 804 } 805 806 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 807 { 808 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 809 } 810 811 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 812 void *funcdata) 813 { 814 struct sas_ha_struct *sas_ha = sas_phy->ha; 815 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 816 int phy_no = sas_phy->id; 817 818 switch (func) { 819 case PHY_FUNC_HARD_RESET: 820 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 821 break; 822 823 case PHY_FUNC_LINK_RESET: 824 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 825 msleep(100); 826 hisi_hba->hw->phy_start(hisi_hba, phy_no); 827 break; 828 829 case PHY_FUNC_DISABLE: 830 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 831 break; 832 833 case PHY_FUNC_SET_LINK_RATE: 834 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata); 835 break; 836 case PHY_FUNC_GET_EVENTS: 837 if (hisi_hba->hw->get_events) { 838 hisi_hba->hw->get_events(hisi_hba, phy_no); 839 break; 840 } 841 /* fallthru */ 842 case PHY_FUNC_RELEASE_SPINUP_HOLD: 843 default: 844 return -EOPNOTSUPP; 845 } 846 return 0; 847 } 848 849 static void hisi_sas_task_done(struct sas_task *task) 850 { 851 if (!del_timer(&task->slow_task->timer)) 852 return; 853 complete(&task->slow_task->completion); 854 } 855 856 static void hisi_sas_tmf_timedout(struct timer_list *t) 857 { 858 struct sas_task_slow *slow = from_timer(slow, t, timer); 859 struct sas_task *task = slow->task; 860 unsigned long flags; 861 862 spin_lock_irqsave(&task->task_state_lock, flags); 863 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) 864 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 865 spin_unlock_irqrestore(&task->task_state_lock, flags); 866 867 complete(&task->slow_task->completion); 868 } 869 870 #define TASK_TIMEOUT 20 871 #define TASK_RETRY 3 872 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 873 void *parameter, u32 para_len, 874 struct hisi_sas_tmf_task *tmf) 875 { 876 struct hisi_sas_device *sas_dev = device->lldd_dev; 877 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 878 struct device *dev = hisi_hba->dev; 879 struct sas_task *task; 880 int res, retry; 881 882 for (retry = 0; retry < TASK_RETRY; retry++) { 883 task = sas_alloc_slow_task(GFP_KERNEL); 884 if (!task) 885 return -ENOMEM; 886 887 task->dev = device; 888 task->task_proto = device->tproto; 889 890 if (dev_is_sata(device)) { 891 task->ata_task.device_control_reg_update = 1; 892 memcpy(&task->ata_task.fis, parameter, para_len); 893 } else { 894 memcpy(&task->ssp_task, parameter, para_len); 895 } 896 task->task_done = hisi_sas_task_done; 897 898 task->slow_task->timer.function = hisi_sas_tmf_timedout; 899 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ; 900 add_timer(&task->slow_task->timer); 901 902 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 903 904 if (res) { 905 del_timer(&task->slow_task->timer); 906 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 907 res); 908 goto ex_err; 909 } 910 911 wait_for_completion(&task->slow_task->completion); 912 res = TMF_RESP_FUNC_FAILED; 913 /* Even TMF timed out, return direct. */ 914 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 915 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 916 struct hisi_sas_slot *slot = task->lldd_task; 917 918 dev_err(dev, "abort tmf: TMF task timeout and not done\n"); 919 if (slot) 920 slot->task = NULL; 921 922 goto ex_err; 923 } else 924 dev_err(dev, "abort tmf: TMF task timeout\n"); 925 } 926 927 if (task->task_status.resp == SAS_TASK_COMPLETE && 928 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 929 res = TMF_RESP_FUNC_COMPLETE; 930 break; 931 } 932 933 if (task->task_status.resp == SAS_TASK_COMPLETE && 934 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 935 res = TMF_RESP_FUNC_SUCC; 936 break; 937 } 938 939 if (task->task_status.resp == SAS_TASK_COMPLETE && 940 task->task_status.stat == SAS_DATA_UNDERRUN) { 941 /* no error, but return the number of bytes of 942 * underrun 943 */ 944 dev_warn(dev, "abort tmf: task to dev %016llx " 945 "resp: 0x%x sts 0x%x underrun\n", 946 SAS_ADDR(device->sas_addr), 947 task->task_status.resp, 948 task->task_status.stat); 949 res = task->task_status.residual; 950 break; 951 } 952 953 if (task->task_status.resp == SAS_TASK_COMPLETE && 954 task->task_status.stat == SAS_DATA_OVERRUN) { 955 dev_warn(dev, "abort tmf: blocked task error\n"); 956 res = -EMSGSIZE; 957 break; 958 } 959 960 dev_warn(dev, "abort tmf: task to dev " 961 "%016llx resp: 0x%x status 0x%x\n", 962 SAS_ADDR(device->sas_addr), task->task_status.resp, 963 task->task_status.stat); 964 sas_free_task(task); 965 task = NULL; 966 } 967 ex_err: 968 if (retry == TASK_RETRY) 969 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 970 sas_free_task(task); 971 return res; 972 } 973 974 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 975 bool reset, int pmp, u8 *fis) 976 { 977 struct ata_taskfile tf; 978 979 ata_tf_init(dev, &tf); 980 if (reset) 981 tf.ctl |= ATA_SRST; 982 else 983 tf.ctl &= ~ATA_SRST; 984 tf.command = ATA_CMD_DEV_RESET; 985 ata_tf_to_fis(&tf, pmp, 0, fis); 986 } 987 988 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 989 { 990 u8 fis[20] = {0}; 991 struct ata_port *ap = device->sata_dev.ap; 992 struct ata_link *link; 993 int rc = TMF_RESP_FUNC_FAILED; 994 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 995 struct device *dev = hisi_hba->dev; 996 int s = sizeof(struct host_to_dev_fis); 997 unsigned long flags; 998 999 ata_for_each_link(link, ap, EDGE) { 1000 int pmp = sata_srst_pmp(link); 1001 1002 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 1003 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 1004 if (rc != TMF_RESP_FUNC_COMPLETE) 1005 break; 1006 } 1007 1008 if (rc == TMF_RESP_FUNC_COMPLETE) { 1009 ata_for_each_link(link, ap, EDGE) { 1010 int pmp = sata_srst_pmp(link); 1011 1012 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 1013 rc = hisi_sas_exec_internal_tmf_task(device, fis, 1014 s, NULL); 1015 if (rc != TMF_RESP_FUNC_COMPLETE) 1016 dev_err(dev, "ata disk de-reset failed\n"); 1017 } 1018 } else { 1019 dev_err(dev, "ata disk reset failed\n"); 1020 } 1021 1022 if (rc == TMF_RESP_FUNC_COMPLETE) { 1023 spin_lock_irqsave(&hisi_hba->lock, flags); 1024 hisi_sas_release_task(hisi_hba, device); 1025 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1026 } 1027 1028 return rc; 1029 } 1030 1031 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 1032 u8 *lun, struct hisi_sas_tmf_task *tmf) 1033 { 1034 struct sas_ssp_task ssp_task; 1035 1036 if (!(device->tproto & SAS_PROTOCOL_SSP)) 1037 return TMF_RESP_FUNC_ESUPP; 1038 1039 memcpy(ssp_task.LUN, lun, 8); 1040 1041 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 1042 sizeof(ssp_task), tmf); 1043 } 1044 1045 static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) 1046 { 1047 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); 1048 int i; 1049 1050 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1051 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; 1052 struct domain_device *device = sas_dev->sas_device; 1053 struct asd_sas_port *sas_port; 1054 struct hisi_sas_port *port; 1055 struct hisi_sas_phy *phy = NULL; 1056 struct asd_sas_phy *sas_phy; 1057 1058 if ((sas_dev->dev_type == SAS_PHY_UNUSED) 1059 || !device || !device->port) 1060 continue; 1061 1062 sas_port = device->port; 1063 port = to_hisi_sas_port(sas_port); 1064 1065 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) 1066 if (state & BIT(sas_phy->id)) { 1067 phy = sas_phy->lldd_phy; 1068 break; 1069 } 1070 1071 if (phy) { 1072 port->id = phy->port_id; 1073 1074 /* Update linkrate of directly attached device. */ 1075 if (!device->parent) 1076 device->linkrate = phy->sas_phy.linkrate; 1077 1078 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 1079 } else 1080 port->id = 0xff; 1081 } 1082 } 1083 1084 static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, 1085 u32 state) 1086 { 1087 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1088 struct asd_sas_port *_sas_port = NULL; 1089 int phy_no; 1090 1091 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1092 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1093 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1094 struct asd_sas_port *sas_port = sas_phy->port; 1095 bool do_port_check = !!(_sas_port != sas_port); 1096 1097 if (!sas_phy->phy->enabled) 1098 continue; 1099 1100 /* Report PHY state change to libsas */ 1101 if (state & BIT(phy_no)) { 1102 if (do_port_check && sas_port && sas_port->port_dev) { 1103 struct domain_device *dev = sas_port->port_dev; 1104 1105 _sas_port = sas_port; 1106 1107 if (DEV_IS_EXPANDER(dev->dev_type)) 1108 sas_ha->notify_port_event(sas_phy, 1109 PORTE_BROADCAST_RCVD); 1110 } 1111 } else if (old_state & (1 << phy_no)) 1112 /* PHY down but was up before */ 1113 hisi_sas_phy_down(hisi_hba, phy_no, 0); 1114 1115 } 1116 } 1117 1118 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 1119 { 1120 struct device *dev = hisi_hba->dev; 1121 struct Scsi_Host *shost = hisi_hba->shost; 1122 u32 old_state, state; 1123 unsigned long flags; 1124 int rc; 1125 1126 if (!hisi_hba->hw->soft_reset) 1127 return -1; 1128 1129 if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) 1130 return -1; 1131 1132 dev_info(dev, "controller resetting...\n"); 1133 old_state = hisi_hba->hw->get_phys_state(hisi_hba); 1134 1135 scsi_block_requests(shost); 1136 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1137 rc = hisi_hba->hw->soft_reset(hisi_hba); 1138 if (rc) { 1139 dev_warn(dev, "controller reset failed (%d)\n", rc); 1140 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1141 scsi_unblock_requests(shost); 1142 goto out; 1143 } 1144 spin_lock_irqsave(&hisi_hba->lock, flags); 1145 hisi_sas_release_tasks(hisi_hba); 1146 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1147 1148 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); 1149 1150 /* Init and wait for PHYs to come up and all libsas event finished. */ 1151 hisi_hba->hw->phys_init(hisi_hba); 1152 msleep(1000); 1153 hisi_sas_refresh_port_id(hisi_hba); 1154 scsi_unblock_requests(shost); 1155 1156 state = hisi_hba->hw->get_phys_state(hisi_hba); 1157 hisi_sas_rescan_topology(hisi_hba, old_state, state); 1158 dev_info(dev, "controller reset complete\n"); 1159 1160 out: 1161 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1162 1163 return rc; 1164 } 1165 1166 static int hisi_sas_abort_task(struct sas_task *task) 1167 { 1168 struct scsi_lun lun; 1169 struct hisi_sas_tmf_task tmf_task; 1170 struct domain_device *device = task->dev; 1171 struct hisi_sas_device *sas_dev = device->lldd_dev; 1172 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 1173 struct device *dev = hisi_hba->dev; 1174 int rc = TMF_RESP_FUNC_FAILED; 1175 unsigned long flags; 1176 1177 if (!sas_dev) { 1178 dev_warn(dev, "Device has been removed\n"); 1179 return TMF_RESP_FUNC_FAILED; 1180 } 1181 1182 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1183 rc = TMF_RESP_FUNC_COMPLETE; 1184 goto out; 1185 } 1186 1187 sas_dev->dev_status = HISI_SAS_DEV_EH; 1188 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1189 struct scsi_cmnd *cmnd = task->uldd_task; 1190 struct hisi_sas_slot *slot = task->lldd_task; 1191 u32 tag = slot->idx; 1192 int rc2; 1193 1194 int_to_scsilun(cmnd->device->lun, &lun); 1195 tmf_task.tmf = TMF_ABORT_TASK; 1196 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1197 1198 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1199 &tmf_task); 1200 1201 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1202 HISI_SAS_INT_ABT_CMD, tag); 1203 if (rc2 < 0) { 1204 dev_err(dev, "abort task: internal abort (%d)\n", rc2); 1205 return TMF_RESP_FUNC_FAILED; 1206 } 1207 1208 /* 1209 * If the TMF finds that the IO is not in the device and also 1210 * the internal abort does not succeed, then it is safe to 1211 * free the slot. 1212 * Note: if the internal abort succeeds then the slot 1213 * will have already been completed 1214 */ 1215 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1216 if (task->lldd_task) { 1217 spin_lock_irqsave(&hisi_hba->lock, flags); 1218 hisi_sas_do_release_task(hisi_hba, task, slot); 1219 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1220 } 1221 } 1222 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1223 task->task_proto & SAS_PROTOCOL_STP) { 1224 if (task->dev->dev_type == SAS_SATA_DEV) { 1225 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1226 HISI_SAS_INT_ABT_DEV, 0); 1227 if (rc < 0) { 1228 dev_err(dev, "abort task: internal abort failed\n"); 1229 goto out; 1230 } 1231 hisi_sas_dereg_device(hisi_hba, device); 1232 rc = hisi_sas_softreset_ata_disk(device); 1233 } 1234 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1235 /* SMP */ 1236 struct hisi_sas_slot *slot = task->lldd_task; 1237 u32 tag = slot->idx; 1238 1239 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1240 HISI_SAS_INT_ABT_CMD, tag); 1241 if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && 1242 task->lldd_task) { 1243 spin_lock_irqsave(&hisi_hba->lock, flags); 1244 hisi_sas_do_release_task(hisi_hba, task, slot); 1245 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1246 } 1247 } 1248 1249 out: 1250 if (rc != TMF_RESP_FUNC_COMPLETE) 1251 dev_notice(dev, "abort task: rc=%d\n", rc); 1252 return rc; 1253 } 1254 1255 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1256 { 1257 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1258 struct device *dev = hisi_hba->dev; 1259 struct hisi_sas_tmf_task tmf_task; 1260 int rc = TMF_RESP_FUNC_FAILED; 1261 unsigned long flags; 1262 1263 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1264 HISI_SAS_INT_ABT_DEV, 0); 1265 if (rc < 0) { 1266 dev_err(dev, "abort task set: internal abort rc=%d\n", rc); 1267 return TMF_RESP_FUNC_FAILED; 1268 } 1269 hisi_sas_dereg_device(hisi_hba, device); 1270 1271 tmf_task.tmf = TMF_ABORT_TASK_SET; 1272 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1273 1274 if (rc == TMF_RESP_FUNC_COMPLETE) { 1275 spin_lock_irqsave(&hisi_hba->lock, flags); 1276 hisi_sas_release_task(hisi_hba, device); 1277 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1278 } 1279 1280 return rc; 1281 } 1282 1283 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1284 { 1285 int rc = TMF_RESP_FUNC_FAILED; 1286 struct hisi_sas_tmf_task tmf_task; 1287 1288 tmf_task.tmf = TMF_CLEAR_ACA; 1289 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1290 1291 return rc; 1292 } 1293 1294 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1295 { 1296 struct sas_phy *phy = sas_get_local_phy(device); 1297 int rc, reset_type = (device->dev_type == SAS_SATA_DEV || 1298 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1299 rc = sas_phy_reset(phy, reset_type); 1300 sas_put_local_phy(phy); 1301 msleep(2000); 1302 return rc; 1303 } 1304 1305 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1306 { 1307 struct hisi_sas_device *sas_dev = device->lldd_dev; 1308 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1309 struct device *dev = hisi_hba->dev; 1310 int rc = TMF_RESP_FUNC_FAILED; 1311 unsigned long flags; 1312 1313 if (sas_dev->dev_status != HISI_SAS_DEV_EH) 1314 return TMF_RESP_FUNC_FAILED; 1315 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 1316 1317 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1318 HISI_SAS_INT_ABT_DEV, 0); 1319 if (rc < 0) { 1320 dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); 1321 return TMF_RESP_FUNC_FAILED; 1322 } 1323 hisi_sas_dereg_device(hisi_hba, device); 1324 1325 rc = hisi_sas_debug_I_T_nexus_reset(device); 1326 1327 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) { 1328 spin_lock_irqsave(&hisi_hba->lock, flags); 1329 hisi_sas_release_task(hisi_hba, device); 1330 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1331 } 1332 return rc; 1333 } 1334 1335 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1336 { 1337 struct hisi_sas_device *sas_dev = device->lldd_dev; 1338 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1339 struct device *dev = hisi_hba->dev; 1340 unsigned long flags; 1341 int rc = TMF_RESP_FUNC_FAILED; 1342 1343 sas_dev->dev_status = HISI_SAS_DEV_EH; 1344 if (dev_is_sata(device)) { 1345 struct sas_phy *phy; 1346 1347 /* Clear internal IO and then hardreset */ 1348 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1349 HISI_SAS_INT_ABT_DEV, 0); 1350 if (rc < 0) { 1351 dev_err(dev, "lu_reset: internal abort failed\n"); 1352 goto out; 1353 } 1354 hisi_sas_dereg_device(hisi_hba, device); 1355 1356 phy = sas_get_local_phy(device); 1357 1358 rc = sas_phy_reset(phy, 1); 1359 1360 if (rc == 0) { 1361 spin_lock_irqsave(&hisi_hba->lock, flags); 1362 hisi_sas_release_task(hisi_hba, device); 1363 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1364 } 1365 sas_put_local_phy(phy); 1366 } else { 1367 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1368 1369 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1370 HISI_SAS_INT_ABT_DEV, 0); 1371 if (rc < 0) { 1372 dev_err(dev, "lu_reset: internal abort failed\n"); 1373 goto out; 1374 } 1375 hisi_sas_dereg_device(hisi_hba, device); 1376 1377 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1378 if (rc == TMF_RESP_FUNC_COMPLETE) { 1379 spin_lock_irqsave(&hisi_hba->lock, flags); 1380 hisi_sas_release_task(hisi_hba, device); 1381 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1382 } 1383 } 1384 out: 1385 if (rc != TMF_RESP_FUNC_COMPLETE) 1386 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1387 sas_dev->device_id, rc); 1388 return rc; 1389 } 1390 1391 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1392 { 1393 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1394 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); 1395 1396 queue_work(hisi_hba->wq, &r.work); 1397 wait_for_completion(r.completion); 1398 if (r.done) 1399 return TMF_RESP_FUNC_COMPLETE; 1400 1401 return TMF_RESP_FUNC_FAILED; 1402 } 1403 1404 static int hisi_sas_query_task(struct sas_task *task) 1405 { 1406 struct scsi_lun lun; 1407 struct hisi_sas_tmf_task tmf_task; 1408 int rc = TMF_RESP_FUNC_FAILED; 1409 1410 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1411 struct scsi_cmnd *cmnd = task->uldd_task; 1412 struct domain_device *device = task->dev; 1413 struct hisi_sas_slot *slot = task->lldd_task; 1414 u32 tag = slot->idx; 1415 1416 int_to_scsilun(cmnd->device->lun, &lun); 1417 tmf_task.tmf = TMF_QUERY_TASK; 1418 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1419 1420 rc = hisi_sas_debug_issue_ssp_tmf(device, 1421 lun.scsi_lun, 1422 &tmf_task); 1423 switch (rc) { 1424 /* The task is still in Lun, release it then */ 1425 case TMF_RESP_FUNC_SUCC: 1426 /* The task is not in Lun or failed, reset the phy */ 1427 case TMF_RESP_FUNC_FAILED: 1428 case TMF_RESP_FUNC_COMPLETE: 1429 break; 1430 default: 1431 rc = TMF_RESP_FUNC_FAILED; 1432 break; 1433 } 1434 } 1435 return rc; 1436 } 1437 1438 static int 1439 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 1440 struct sas_task *task, int abort_flag, 1441 int task_tag) 1442 { 1443 struct domain_device *device = task->dev; 1444 struct hisi_sas_device *sas_dev = device->lldd_dev; 1445 struct device *dev = hisi_hba->dev; 1446 struct hisi_sas_port *port; 1447 struct hisi_sas_slot *slot; 1448 struct asd_sas_port *sas_port = device->port; 1449 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1450 struct hisi_sas_dq *dq = sas_dev->dq; 1451 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1452 unsigned long flags, flags_dq; 1453 1454 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) 1455 return -EINVAL; 1456 1457 if (!device->port) 1458 return -1; 1459 1460 port = to_hisi_sas_port(sas_port); 1461 1462 /* simply get a slot and send abort command */ 1463 spin_lock_irqsave(&hisi_hba->lock, flags); 1464 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 1465 if (rc) { 1466 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1467 goto err_out; 1468 } 1469 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1470 1471 spin_lock_irqsave(&dq->lock, flags_dq); 1472 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq); 1473 if (rc) 1474 goto err_out_tag; 1475 1476 dlvry_queue = dq->id; 1477 dlvry_queue_slot = dq->wr_point; 1478 1479 slot = &hisi_hba->slot_info[slot_idx]; 1480 memset(slot, 0, sizeof(struct hisi_sas_slot)); 1481 1482 slot->idx = slot_idx; 1483 slot->n_elem = n_elem; 1484 slot->dlvry_queue = dlvry_queue; 1485 slot->dlvry_queue_slot = dlvry_queue_slot; 1486 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 1487 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 1488 slot->task = task; 1489 slot->port = port; 1490 task->lldd_task = slot; 1491 1492 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, 1493 GFP_ATOMIC, &slot->buf_dma); 1494 if (!slot->buf) { 1495 rc = -ENOMEM; 1496 goto err_out_tag; 1497 } 1498 1499 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 1500 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 1501 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 1502 1503 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 1504 abort_flag, task_tag); 1505 if (rc) 1506 goto err_out_buf; 1507 1508 spin_lock_irqsave(&hisi_hba->lock, flags); 1509 list_add_tail(&slot->entry, &sas_dev->list); 1510 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1511 spin_lock_irqsave(&task->task_state_lock, flags); 1512 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 1513 spin_unlock_irqrestore(&task->task_state_lock, flags); 1514 1515 dq->slot_prep = slot; 1516 1517 atomic64_inc(&sas_dev->running_req); 1518 1519 /* send abort command to the chip */ 1520 hisi_hba->hw->start_delivery(dq); 1521 spin_unlock_irqrestore(&dq->lock, flags_dq); 1522 1523 return 0; 1524 1525 err_out_buf: 1526 dma_pool_free(hisi_hba->buffer_pool, slot->buf, 1527 slot->buf_dma); 1528 err_out_tag: 1529 spin_lock_irqsave(&hisi_hba->lock, flags); 1530 hisi_sas_slot_index_free(hisi_hba, slot_idx); 1531 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1532 spin_unlock_irqrestore(&dq->lock, flags_dq); 1533 err_out: 1534 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 1535 1536 return rc; 1537 } 1538 1539 /** 1540 * hisi_sas_internal_task_abort -- execute an internal 1541 * abort command for single IO command or a device 1542 * @hisi_hba: host controller struct 1543 * @device: domain device 1544 * @abort_flag: mode of operation, device or single IO 1545 * @tag: tag of IO to be aborted (only relevant to single 1546 * IO mode) 1547 */ 1548 static int 1549 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 1550 struct domain_device *device, 1551 int abort_flag, int tag) 1552 { 1553 struct sas_task *task; 1554 struct hisi_sas_device *sas_dev = device->lldd_dev; 1555 struct device *dev = hisi_hba->dev; 1556 int res; 1557 1558 /* 1559 * The interface is not realized means this HW don't support internal 1560 * abort, or don't need to do internal abort. Then here, we return 1561 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that 1562 * the internal abort has been executed and returned CQ. 1563 */ 1564 if (!hisi_hba->hw->prep_abort) 1565 return TMF_RESP_FUNC_FAILED; 1566 1567 task = sas_alloc_slow_task(GFP_KERNEL); 1568 if (!task) 1569 return -ENOMEM; 1570 1571 task->dev = device; 1572 task->task_proto = device->tproto; 1573 task->task_done = hisi_sas_task_done; 1574 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1575 task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110); 1576 add_timer(&task->slow_task->timer); 1577 1578 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 1579 task, abort_flag, tag); 1580 if (res) { 1581 del_timer(&task->slow_task->timer); 1582 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 1583 res); 1584 goto exit; 1585 } 1586 wait_for_completion(&task->slow_task->completion); 1587 res = TMF_RESP_FUNC_FAILED; 1588 1589 /* Internal abort timed out */ 1590 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1591 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1592 struct hisi_sas_slot *slot = task->lldd_task; 1593 1594 if (slot) 1595 slot->task = NULL; 1596 dev_err(dev, "internal task abort: timeout and not done.\n"); 1597 res = -EIO; 1598 goto exit; 1599 } else 1600 dev_err(dev, "internal task abort: timeout.\n"); 1601 } 1602 1603 if (task->task_status.resp == SAS_TASK_COMPLETE && 1604 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1605 res = TMF_RESP_FUNC_COMPLETE; 1606 goto exit; 1607 } 1608 1609 if (task->task_status.resp == SAS_TASK_COMPLETE && 1610 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1611 res = TMF_RESP_FUNC_SUCC; 1612 goto exit; 1613 } 1614 1615 exit: 1616 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p " 1617 "resp: 0x%x sts 0x%x\n", 1618 SAS_ADDR(device->sas_addr), 1619 task, 1620 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 1621 task->task_status.stat); 1622 sas_free_task(task); 1623 1624 return res; 1625 } 1626 1627 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 1628 { 1629 hisi_sas_port_notify_formed(sas_phy); 1630 } 1631 1632 static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy) 1633 { 1634 } 1635 1636 static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type, 1637 u8 reg_index, u8 reg_count, u8 *write_data) 1638 { 1639 struct hisi_hba *hisi_hba = sha->lldd_ha; 1640 1641 if (!hisi_hba->hw->write_gpio) 1642 return -EOPNOTSUPP; 1643 1644 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, 1645 reg_index, reg_count, write_data); 1646 } 1647 1648 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 1649 { 1650 phy->phy_attached = 0; 1651 phy->phy_type = 0; 1652 phy->port = NULL; 1653 } 1654 1655 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) 1656 { 1657 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1658 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1659 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1660 1661 if (rdy) { 1662 /* Phy down but ready */ 1663 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 1664 hisi_sas_port_notify_formed(sas_phy); 1665 } else { 1666 struct hisi_sas_port *port = phy->port; 1667 1668 /* Phy down and not ready */ 1669 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 1670 sas_phy_disconnected(sas_phy); 1671 1672 if (port) { 1673 if (phy->phy_type & PORT_TYPE_SAS) { 1674 int port_id = port->id; 1675 1676 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 1677 port_id)) 1678 port->port_attached = 0; 1679 } else if (phy->phy_type & PORT_TYPE_SATA) 1680 port->port_attached = 0; 1681 } 1682 hisi_sas_phy_disconnected(phy); 1683 } 1684 } 1685 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 1686 1687 void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) 1688 { 1689 int i; 1690 1691 for (i = 0; i < hisi_hba->queue_count; i++) { 1692 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1693 1694 tasklet_kill(&cq->tasklet); 1695 } 1696 } 1697 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets); 1698 1699 struct scsi_transport_template *hisi_sas_stt; 1700 EXPORT_SYMBOL_GPL(hisi_sas_stt); 1701 1702 static struct device_attribute *host_attrs[] = { 1703 &dev_attr_phy_event_threshold, 1704 NULL, 1705 }; 1706 1707 static struct scsi_host_template _hisi_sas_sht = { 1708 .module = THIS_MODULE, 1709 .name = DRV_NAME, 1710 .queuecommand = sas_queuecommand, 1711 .target_alloc = sas_target_alloc, 1712 .slave_configure = hisi_sas_slave_configure, 1713 .scan_finished = hisi_sas_scan_finished, 1714 .scan_start = hisi_sas_scan_start, 1715 .change_queue_depth = sas_change_queue_depth, 1716 .bios_param = sas_bios_param, 1717 .can_queue = 1, 1718 .this_id = -1, 1719 .sg_tablesize = SG_ALL, 1720 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 1721 .use_clustering = ENABLE_CLUSTERING, 1722 .eh_device_reset_handler = sas_eh_device_reset_handler, 1723 .eh_target_reset_handler = sas_eh_target_reset_handler, 1724 .target_destroy = sas_target_destroy, 1725 .ioctl = sas_ioctl, 1726 .shost_attrs = host_attrs, 1727 }; 1728 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht; 1729 EXPORT_SYMBOL_GPL(hisi_sas_sht); 1730 1731 static struct sas_domain_function_template hisi_sas_transport_ops = { 1732 .lldd_dev_found = hisi_sas_dev_found, 1733 .lldd_dev_gone = hisi_sas_dev_gone, 1734 .lldd_execute_task = hisi_sas_queue_command, 1735 .lldd_control_phy = hisi_sas_control_phy, 1736 .lldd_abort_task = hisi_sas_abort_task, 1737 .lldd_abort_task_set = hisi_sas_abort_task_set, 1738 .lldd_clear_aca = hisi_sas_clear_aca, 1739 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 1740 .lldd_lu_reset = hisi_sas_lu_reset, 1741 .lldd_query_task = hisi_sas_query_task, 1742 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 1743 .lldd_port_formed = hisi_sas_port_formed, 1744 .lldd_port_deformed = hisi_sas_port_deformed, 1745 .lldd_write_gpio = hisi_sas_write_gpio, 1746 }; 1747 1748 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 1749 { 1750 int i, s, max_command_entries = hisi_hba->hw->max_command_entries; 1751 1752 for (i = 0; i < hisi_hba->queue_count; i++) { 1753 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1754 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 1755 1756 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 1757 memset(hisi_hba->cmd_hdr[i], 0, s); 1758 dq->wr_point = 0; 1759 1760 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 1761 memset(hisi_hba->complete_hdr[i], 0, s); 1762 cq->rd_point = 0; 1763 } 1764 1765 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 1766 memset(hisi_hba->initial_fis, 0, s); 1767 1768 s = max_command_entries * sizeof(struct hisi_sas_iost); 1769 memset(hisi_hba->iost, 0, s); 1770 1771 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 1772 memset(hisi_hba->breakpoint, 0, s); 1773 1774 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 1775 memset(hisi_hba->sata_breakpoint, 0, s); 1776 } 1777 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 1778 1779 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) 1780 { 1781 struct device *dev = hisi_hba->dev; 1782 int i, s, max_command_entries = hisi_hba->hw->max_command_entries; 1783 1784 spin_lock_init(&hisi_hba->lock); 1785 for (i = 0; i < hisi_hba->n_phy; i++) { 1786 hisi_sas_phy_init(hisi_hba, i); 1787 hisi_hba->port[i].port_attached = 0; 1788 hisi_hba->port[i].id = -1; 1789 } 1790 1791 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1792 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 1793 hisi_hba->devices[i].device_id = i; 1794 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL; 1795 } 1796 1797 for (i = 0; i < hisi_hba->queue_count; i++) { 1798 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1799 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 1800 1801 /* Completion queue structure */ 1802 cq->id = i; 1803 cq->hisi_hba = hisi_hba; 1804 1805 /* Delivery queue structure */ 1806 spin_lock_init(&dq->lock); 1807 dq->id = i; 1808 dq->hisi_hba = hisi_hba; 1809 1810 /* Delivery queue */ 1811 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 1812 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s, 1813 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL); 1814 if (!hisi_hba->cmd_hdr[i]) 1815 goto err_out; 1816 1817 /* Completion queue */ 1818 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 1819 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s, 1820 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL); 1821 if (!hisi_hba->complete_hdr[i]) 1822 goto err_out; 1823 } 1824 1825 s = sizeof(struct hisi_sas_slot_buf_table); 1826 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0); 1827 if (!hisi_hba->buffer_pool) 1828 goto err_out; 1829 1830 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 1831 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma, 1832 GFP_KERNEL); 1833 if (!hisi_hba->itct) 1834 goto err_out; 1835 1836 memset(hisi_hba->itct, 0, s); 1837 1838 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 1839 sizeof(struct hisi_sas_slot), 1840 GFP_KERNEL); 1841 if (!hisi_hba->slot_info) 1842 goto err_out; 1843 1844 s = max_command_entries * sizeof(struct hisi_sas_iost); 1845 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma, 1846 GFP_KERNEL); 1847 if (!hisi_hba->iost) 1848 goto err_out; 1849 1850 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 1851 hisi_hba->breakpoint = dma_alloc_coherent(dev, s, 1852 &hisi_hba->breakpoint_dma, GFP_KERNEL); 1853 if (!hisi_hba->breakpoint) 1854 goto err_out; 1855 1856 hisi_hba->slot_index_count = max_command_entries; 1857 s = hisi_hba->slot_index_count / BITS_PER_BYTE; 1858 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 1859 if (!hisi_hba->slot_index_tags) 1860 goto err_out; 1861 1862 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 1863 hisi_hba->initial_fis = dma_alloc_coherent(dev, s, 1864 &hisi_hba->initial_fis_dma, GFP_KERNEL); 1865 if (!hisi_hba->initial_fis) 1866 goto err_out; 1867 1868 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 1869 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s, 1870 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL); 1871 if (!hisi_hba->sata_breakpoint) 1872 goto err_out; 1873 hisi_sas_init_mem(hisi_hba); 1874 1875 hisi_sas_slot_index_init(hisi_hba); 1876 1877 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 1878 if (!hisi_hba->wq) { 1879 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 1880 goto err_out; 1881 } 1882 1883 return 0; 1884 err_out: 1885 return -ENOMEM; 1886 } 1887 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 1888 1889 void hisi_sas_free(struct hisi_hba *hisi_hba) 1890 { 1891 struct device *dev = hisi_hba->dev; 1892 int i, s, max_command_entries = hisi_hba->hw->max_command_entries; 1893 1894 for (i = 0; i < hisi_hba->queue_count; i++) { 1895 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 1896 if (hisi_hba->cmd_hdr[i]) 1897 dma_free_coherent(dev, s, 1898 hisi_hba->cmd_hdr[i], 1899 hisi_hba->cmd_hdr_dma[i]); 1900 1901 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 1902 if (hisi_hba->complete_hdr[i]) 1903 dma_free_coherent(dev, s, 1904 hisi_hba->complete_hdr[i], 1905 hisi_hba->complete_hdr_dma[i]); 1906 } 1907 1908 dma_pool_destroy(hisi_hba->buffer_pool); 1909 1910 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 1911 if (hisi_hba->itct) 1912 dma_free_coherent(dev, s, 1913 hisi_hba->itct, hisi_hba->itct_dma); 1914 1915 s = max_command_entries * sizeof(struct hisi_sas_iost); 1916 if (hisi_hba->iost) 1917 dma_free_coherent(dev, s, 1918 hisi_hba->iost, hisi_hba->iost_dma); 1919 1920 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 1921 if (hisi_hba->breakpoint) 1922 dma_free_coherent(dev, s, 1923 hisi_hba->breakpoint, 1924 hisi_hba->breakpoint_dma); 1925 1926 1927 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 1928 if (hisi_hba->initial_fis) 1929 dma_free_coherent(dev, s, 1930 hisi_hba->initial_fis, 1931 hisi_hba->initial_fis_dma); 1932 1933 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); 1934 if (hisi_hba->sata_breakpoint) 1935 dma_free_coherent(dev, s, 1936 hisi_hba->sata_breakpoint, 1937 hisi_hba->sata_breakpoint_dma); 1938 1939 if (hisi_hba->wq) 1940 destroy_workqueue(hisi_hba->wq); 1941 } 1942 EXPORT_SYMBOL_GPL(hisi_sas_free); 1943 1944 void hisi_sas_rst_work_handler(struct work_struct *work) 1945 { 1946 struct hisi_hba *hisi_hba = 1947 container_of(work, struct hisi_hba, rst_work); 1948 1949 hisi_sas_controller_reset(hisi_hba); 1950 } 1951 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler); 1952 1953 void hisi_sas_sync_rst_work_handler(struct work_struct *work) 1954 { 1955 struct hisi_sas_rst *rst = 1956 container_of(work, struct hisi_sas_rst, work); 1957 1958 if (!hisi_sas_controller_reset(rst->hisi_hba)) 1959 rst->done = true; 1960 complete(rst->completion); 1961 } 1962 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler); 1963 1964 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 1965 { 1966 struct device *dev = hisi_hba->dev; 1967 struct platform_device *pdev = hisi_hba->platform_dev; 1968 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 1969 struct clk *refclk; 1970 1971 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 1972 SAS_ADDR_SIZE)) { 1973 dev_err(dev, "could not get property sas-addr\n"); 1974 return -ENOENT; 1975 } 1976 1977 if (np) { 1978 /* 1979 * These properties are only required for platform device-based 1980 * controller with DT firmware. 1981 */ 1982 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 1983 "hisilicon,sas-syscon"); 1984 if (IS_ERR(hisi_hba->ctrl)) { 1985 dev_err(dev, "could not get syscon\n"); 1986 return -ENOENT; 1987 } 1988 1989 if (device_property_read_u32(dev, "ctrl-reset-reg", 1990 &hisi_hba->ctrl_reset_reg)) { 1991 dev_err(dev, 1992 "could not get property ctrl-reset-reg\n"); 1993 return -ENOENT; 1994 } 1995 1996 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 1997 &hisi_hba->ctrl_reset_sts_reg)) { 1998 dev_err(dev, 1999 "could not get property ctrl-reset-sts-reg\n"); 2000 return -ENOENT; 2001 } 2002 2003 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 2004 &hisi_hba->ctrl_clock_ena_reg)) { 2005 dev_err(dev, 2006 "could not get property ctrl-clock-ena-reg\n"); 2007 return -ENOENT; 2008 } 2009 } 2010 2011 refclk = devm_clk_get(dev, NULL); 2012 if (IS_ERR(refclk)) 2013 dev_dbg(dev, "no ref clk property\n"); 2014 else 2015 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 2016 2017 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 2018 dev_err(dev, "could not get property phy-count\n"); 2019 return -ENOENT; 2020 } 2021 2022 if (device_property_read_u32(dev, "queue-count", 2023 &hisi_hba->queue_count)) { 2024 dev_err(dev, "could not get property queue-count\n"); 2025 return -ENOENT; 2026 } 2027 2028 return 0; 2029 } 2030 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 2031 2032 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 2033 const struct hisi_sas_hw *hw) 2034 { 2035 struct resource *res; 2036 struct Scsi_Host *shost; 2037 struct hisi_hba *hisi_hba; 2038 struct device *dev = &pdev->dev; 2039 2040 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); 2041 if (!shost) { 2042 dev_err(dev, "scsi host alloc failed\n"); 2043 return NULL; 2044 } 2045 hisi_hba = shost_priv(shost); 2046 2047 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 2048 hisi_hba->hw = hw; 2049 hisi_hba->dev = dev; 2050 hisi_hba->platform_dev = pdev; 2051 hisi_hba->shost = shost; 2052 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 2053 2054 timer_setup(&hisi_hba->timer, NULL, 0); 2055 2056 if (hisi_sas_get_fw_info(hisi_hba) < 0) 2057 goto err_out; 2058 2059 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) && 2060 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { 2061 dev_err(dev, "No usable DMA addressing method\n"); 2062 goto err_out; 2063 } 2064 2065 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2066 hisi_hba->regs = devm_ioremap_resource(dev, res); 2067 if (IS_ERR(hisi_hba->regs)) 2068 goto err_out; 2069 2070 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2071 if (res) { 2072 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); 2073 if (IS_ERR(hisi_hba->sgpio_regs)) 2074 goto err_out; 2075 } 2076 2077 if (hisi_sas_alloc(hisi_hba, shost)) { 2078 hisi_sas_free(hisi_hba); 2079 goto err_out; 2080 } 2081 2082 return shost; 2083 err_out: 2084 scsi_host_put(shost); 2085 dev_err(dev, "shost alloc failed\n"); 2086 return NULL; 2087 } 2088 2089 void hisi_sas_init_add(struct hisi_hba *hisi_hba) 2090 { 2091 int i; 2092 2093 for (i = 0; i < hisi_hba->n_phy; i++) 2094 memcpy(&hisi_hba->phy[i].dev_sas_addr, 2095 hisi_hba->sas_addr, 2096 SAS_ADDR_SIZE); 2097 } 2098 EXPORT_SYMBOL_GPL(hisi_sas_init_add); 2099 2100 int hisi_sas_probe(struct platform_device *pdev, 2101 const struct hisi_sas_hw *hw) 2102 { 2103 struct Scsi_Host *shost; 2104 struct hisi_hba *hisi_hba; 2105 struct device *dev = &pdev->dev; 2106 struct asd_sas_phy **arr_phy; 2107 struct asd_sas_port **arr_port; 2108 struct sas_ha_struct *sha; 2109 int rc, phy_nr, port_nr, i; 2110 2111 shost = hisi_sas_shost_alloc(pdev, hw); 2112 if (!shost) 2113 return -ENOMEM; 2114 2115 sha = SHOST_TO_SAS_HA(shost); 2116 hisi_hba = shost_priv(shost); 2117 platform_set_drvdata(pdev, sha); 2118 2119 phy_nr = port_nr = hisi_hba->n_phy; 2120 2121 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 2122 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 2123 if (!arr_phy || !arr_port) { 2124 rc = -ENOMEM; 2125 goto err_out_ha; 2126 } 2127 2128 sha->sas_phy = arr_phy; 2129 sha->sas_port = arr_port; 2130 sha->lldd_ha = hisi_hba; 2131 2132 shost->transportt = hisi_sas_stt; 2133 shost->max_id = HISI_SAS_MAX_DEVICES; 2134 shost->max_lun = ~0; 2135 shost->max_channel = 1; 2136 shost->max_cmd_len = 16; 2137 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); 2138 shost->can_queue = hisi_hba->hw->max_command_entries; 2139 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 2140 2141 sha->sas_ha_name = DRV_NAME; 2142 sha->dev = hisi_hba->dev; 2143 sha->lldd_module = THIS_MODULE; 2144 sha->sas_addr = &hisi_hba->sas_addr[0]; 2145 sha->num_phys = hisi_hba->n_phy; 2146 sha->core.shost = hisi_hba->shost; 2147 2148 for (i = 0; i < hisi_hba->n_phy; i++) { 2149 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 2150 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 2151 } 2152 2153 hisi_sas_init_add(hisi_hba); 2154 2155 rc = scsi_add_host(shost, &pdev->dev); 2156 if (rc) 2157 goto err_out_ha; 2158 2159 rc = sas_register_ha(sha); 2160 if (rc) 2161 goto err_out_register_ha; 2162 2163 rc = hisi_hba->hw->hw_init(hisi_hba); 2164 if (rc) 2165 goto err_out_register_ha; 2166 2167 scsi_scan_host(shost); 2168 2169 return 0; 2170 2171 err_out_register_ha: 2172 scsi_remove_host(shost); 2173 err_out_ha: 2174 hisi_sas_free(hisi_hba); 2175 scsi_host_put(shost); 2176 return rc; 2177 } 2178 EXPORT_SYMBOL_GPL(hisi_sas_probe); 2179 2180 int hisi_sas_remove(struct platform_device *pdev) 2181 { 2182 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 2183 struct hisi_hba *hisi_hba = sha->lldd_ha; 2184 struct Scsi_Host *shost = sha->core.shost; 2185 2186 sas_unregister_ha(sha); 2187 sas_remove_host(sha->core.shost); 2188 2189 hisi_sas_free(hisi_hba); 2190 scsi_host_put(shost); 2191 return 0; 2192 } 2193 EXPORT_SYMBOL_GPL(hisi_sas_remove); 2194 2195 static __init int hisi_sas_init(void) 2196 { 2197 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 2198 if (!hisi_sas_stt) 2199 return -ENOMEM; 2200 2201 return 0; 2202 } 2203 2204 static __exit void hisi_sas_exit(void) 2205 { 2206 sas_release_transport(hisi_sas_stt); 2207 } 2208 2209 module_init(hisi_sas_init); 2210 module_exit(hisi_sas_exit); 2211 2212 MODULE_LICENSE("GPL"); 2213 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 2214 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 2215 MODULE_ALIAS("platform:" DRV_NAME); 2216