1 /* 2 * Copyright (c) 2015 Linaro Ltd. 3 * Copyright (c) 2015 Hisilicon Limited. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 */ 11 12 #include "hisi_sas.h" 13 #define DRV_NAME "hisi_sas" 14 15 #define DEV_IS_GONE(dev) \ 16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED)) 17 18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 19 u8 *lun, struct hisi_sas_tmf_task *tmf); 20 static int 21 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 22 struct domain_device *device, 23 int abort_flag, int tag); 24 static int hisi_sas_softreset_ata_disk(struct domain_device *device); 25 26 u8 hisi_sas_get_ata_protocol(u8 cmd, int direction) 27 { 28 switch (cmd) { 29 case ATA_CMD_FPDMA_WRITE: 30 case ATA_CMD_FPDMA_READ: 31 case ATA_CMD_FPDMA_RECV: 32 case ATA_CMD_FPDMA_SEND: 33 case ATA_CMD_NCQ_NON_DATA: 34 return HISI_SAS_SATA_PROTOCOL_FPDMA; 35 36 case ATA_CMD_DOWNLOAD_MICRO: 37 case ATA_CMD_ID_ATA: 38 case ATA_CMD_PMP_READ: 39 case ATA_CMD_READ_LOG_EXT: 40 case ATA_CMD_PIO_READ: 41 case ATA_CMD_PIO_READ_EXT: 42 case ATA_CMD_PMP_WRITE: 43 case ATA_CMD_WRITE_LOG_EXT: 44 case ATA_CMD_PIO_WRITE: 45 case ATA_CMD_PIO_WRITE_EXT: 46 return HISI_SAS_SATA_PROTOCOL_PIO; 47 48 case ATA_CMD_DSM: 49 case ATA_CMD_DOWNLOAD_MICRO_DMA: 50 case ATA_CMD_PMP_READ_DMA: 51 case ATA_CMD_PMP_WRITE_DMA: 52 case ATA_CMD_READ: 53 case ATA_CMD_READ_EXT: 54 case ATA_CMD_READ_LOG_DMA_EXT: 55 case ATA_CMD_READ_STREAM_DMA_EXT: 56 case ATA_CMD_TRUSTED_RCV_DMA: 57 case ATA_CMD_TRUSTED_SND_DMA: 58 case ATA_CMD_WRITE: 59 case ATA_CMD_WRITE_EXT: 60 case ATA_CMD_WRITE_FUA_EXT: 61 case ATA_CMD_WRITE_QUEUED: 62 case ATA_CMD_WRITE_LOG_DMA_EXT: 63 case ATA_CMD_WRITE_STREAM_DMA_EXT: 64 return HISI_SAS_SATA_PROTOCOL_DMA; 65 66 case ATA_CMD_CHK_POWER: 67 case ATA_CMD_DEV_RESET: 68 case ATA_CMD_EDD: 69 case ATA_CMD_FLUSH: 70 case ATA_CMD_FLUSH_EXT: 71 case ATA_CMD_VERIFY: 72 case ATA_CMD_VERIFY_EXT: 73 case ATA_CMD_SET_FEATURES: 74 case ATA_CMD_STANDBY: 75 case ATA_CMD_STANDBYNOW1: 76 return HISI_SAS_SATA_PROTOCOL_NONDATA; 77 default: 78 if (direction == DMA_NONE) 79 return HISI_SAS_SATA_PROTOCOL_NONDATA; 80 return HISI_SAS_SATA_PROTOCOL_PIO; 81 } 82 } 83 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol); 84 85 void hisi_sas_sata_done(struct sas_task *task, 86 struct hisi_sas_slot *slot) 87 { 88 struct task_status_struct *ts = &task->task_status; 89 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; 90 struct hisi_sas_status_buffer *status_buf = 91 hisi_sas_status_buf_addr_mem(slot); 92 u8 *iu = &status_buf->iu[0]; 93 struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu; 94 95 resp->frame_len = sizeof(struct dev_to_host_fis); 96 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); 97 98 ts->buf_valid_size = sizeof(*resp); 99 } 100 EXPORT_SYMBOL_GPL(hisi_sas_sata_done); 101 102 int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) 103 { 104 struct ata_queued_cmd *qc = task->uldd_task; 105 106 if (qc) { 107 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 108 qc->tf.command == ATA_CMD_FPDMA_READ) { 109 *tag = qc->tag; 110 return 1; 111 } 112 } 113 return 0; 114 } 115 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); 116 117 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 118 { 119 return device->port->ha->lldd_ha; 120 } 121 122 struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) 123 { 124 return container_of(sas_port, struct hisi_sas_port, sas_port); 125 } 126 EXPORT_SYMBOL_GPL(to_hisi_sas_port); 127 128 static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) 129 { 130 void *bitmap = hisi_hba->slot_index_tags; 131 132 clear_bit(slot_idx, bitmap); 133 } 134 135 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) 136 { 137 hisi_sas_slot_index_clear(hisi_hba, slot_idx); 138 } 139 140 static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) 141 { 142 void *bitmap = hisi_hba->slot_index_tags; 143 144 set_bit(slot_idx, bitmap); 145 } 146 147 static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx) 148 { 149 unsigned int index; 150 void *bitmap = hisi_hba->slot_index_tags; 151 152 index = find_first_zero_bit(bitmap, hisi_hba->slot_index_count); 153 if (index >= hisi_hba->slot_index_count) 154 return -SAS_QUEUE_FULL; 155 hisi_sas_slot_index_set(hisi_hba, index); 156 *slot_idx = index; 157 return 0; 158 } 159 160 static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) 161 { 162 int i; 163 164 for (i = 0; i < hisi_hba->slot_index_count; ++i) 165 hisi_sas_slot_index_clear(hisi_hba, i); 166 } 167 168 void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, 169 struct hisi_sas_slot *slot) 170 { 171 172 if (task) { 173 struct device *dev = hisi_hba->dev; 174 struct domain_device *device = task->dev; 175 struct hisi_sas_device *sas_dev = device->lldd_dev; 176 177 if (!sas_protocol_ata(task->task_proto)) 178 if (slot->n_elem) 179 dma_unmap_sg(dev, task->scatter, slot->n_elem, 180 task->data_dir); 181 182 task->lldd_task = NULL; 183 184 if (sas_dev) 185 atomic64_dec(&sas_dev->running_req); 186 } 187 188 if (slot->buf) 189 dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); 190 191 192 list_del_init(&slot->entry); 193 slot->task = NULL; 194 slot->port = NULL; 195 hisi_sas_slot_index_free(hisi_hba, slot->idx); 196 197 /* slot memory is fully zeroed when it is reused */ 198 } 199 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 200 201 static int hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba, 202 struct hisi_sas_slot *slot) 203 { 204 return hisi_hba->hw->prep_smp(hisi_hba, slot); 205 } 206 207 static int hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba, 208 struct hisi_sas_slot *slot, int is_tmf, 209 struct hisi_sas_tmf_task *tmf) 210 { 211 return hisi_hba->hw->prep_ssp(hisi_hba, slot, is_tmf, tmf); 212 } 213 214 static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba, 215 struct hisi_sas_slot *slot) 216 { 217 return hisi_hba->hw->prep_stp(hisi_hba, slot); 218 } 219 220 static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, 221 struct hisi_sas_slot *slot, 222 int device_id, int abort_flag, int tag_to_abort) 223 { 224 return hisi_hba->hw->prep_abort(hisi_hba, slot, 225 device_id, abort_flag, tag_to_abort); 226 } 227 228 /* 229 * This function will issue an abort TMF regardless of whether the 230 * task is in the sdev or not. Then it will do the task complete 231 * cleanup and callbacks. 232 */ 233 static void hisi_sas_slot_abort(struct work_struct *work) 234 { 235 struct hisi_sas_slot *abort_slot = 236 container_of(work, struct hisi_sas_slot, abort_slot); 237 struct sas_task *task = abort_slot->task; 238 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 239 struct scsi_cmnd *cmnd = task->uldd_task; 240 struct hisi_sas_tmf_task tmf_task; 241 struct scsi_lun lun; 242 struct device *dev = hisi_hba->dev; 243 int tag = abort_slot->idx; 244 unsigned long flags; 245 246 if (!(task->task_proto & SAS_PROTOCOL_SSP)) { 247 dev_err(dev, "cannot abort slot for non-ssp task\n"); 248 goto out; 249 } 250 251 int_to_scsilun(cmnd->device->lun, &lun); 252 tmf_task.tmf = TMF_ABORT_TASK; 253 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 254 255 hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task); 256 out: 257 /* Do cleanup for this task */ 258 spin_lock_irqsave(&hisi_hba->lock, flags); 259 hisi_sas_slot_task_free(hisi_hba, task, abort_slot); 260 spin_unlock_irqrestore(&hisi_hba->lock, flags); 261 if (task->task_done) 262 task->task_done(task); 263 } 264 265 static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq 266 *dq, int is_tmf, struct hisi_sas_tmf_task *tmf, 267 int *pass) 268 { 269 struct hisi_hba *hisi_hba = dq->hisi_hba; 270 struct domain_device *device = task->dev; 271 struct hisi_sas_device *sas_dev = device->lldd_dev; 272 struct hisi_sas_port *port; 273 struct hisi_sas_slot *slot; 274 struct hisi_sas_cmd_hdr *cmd_hdr_base; 275 struct asd_sas_port *sas_port = device->port; 276 struct device *dev = hisi_hba->dev; 277 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 278 unsigned long flags; 279 280 if (!sas_port) { 281 struct task_status_struct *ts = &task->task_status; 282 283 ts->resp = SAS_TASK_UNDELIVERED; 284 ts->stat = SAS_PHY_DOWN; 285 /* 286 * libsas will use dev->port, should 287 * not call task_done for sata 288 */ 289 if (device->dev_type != SAS_SATA_DEV) 290 task->task_done(task); 291 return SAS_PHY_DOWN; 292 } 293 294 if (DEV_IS_GONE(sas_dev)) { 295 if (sas_dev) 296 dev_info(dev, "task prep: device %d not ready\n", 297 sas_dev->device_id); 298 else 299 dev_info(dev, "task prep: device %016llx not ready\n", 300 SAS_ADDR(device->sas_addr)); 301 302 return SAS_PHY_DOWN; 303 } 304 305 port = to_hisi_sas_port(sas_port); 306 if (port && !port->port_attached) { 307 dev_info(dev, "task prep: %s port%d not attach device\n", 308 (dev_is_sata(device)) ? 309 "SATA/STP" : "SAS", 310 device->port->id); 311 312 return SAS_PHY_DOWN; 313 } 314 315 if (!sas_protocol_ata(task->task_proto)) { 316 if (task->num_scatter) { 317 n_elem = dma_map_sg(dev, task->scatter, 318 task->num_scatter, task->data_dir); 319 if (!n_elem) { 320 rc = -ENOMEM; 321 goto prep_out; 322 } 323 } 324 } else 325 n_elem = task->num_scatter; 326 327 spin_lock_irqsave(&hisi_hba->lock, flags); 328 if (hisi_hba->hw->slot_index_alloc) 329 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx, 330 device); 331 else 332 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 333 if (rc) { 334 spin_unlock_irqrestore(&hisi_hba->lock, flags); 335 goto err_out; 336 } 337 spin_unlock_irqrestore(&hisi_hba->lock, flags); 338 339 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq); 340 if (rc) 341 goto err_out_tag; 342 343 dlvry_queue = dq->id; 344 dlvry_queue_slot = dq->wr_point; 345 slot = &hisi_hba->slot_info[slot_idx]; 346 memset(slot, 0, sizeof(struct hisi_sas_slot)); 347 348 slot->idx = slot_idx; 349 slot->n_elem = n_elem; 350 slot->dlvry_queue = dlvry_queue; 351 slot->dlvry_queue_slot = dlvry_queue_slot; 352 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 353 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 354 slot->task = task; 355 slot->port = port; 356 task->lldd_task = slot; 357 INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort); 358 359 slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, 360 GFP_ATOMIC, &slot->buf_dma); 361 if (!slot->buf) { 362 rc = -ENOMEM; 363 goto err_out_slot_buf; 364 } 365 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 366 memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); 367 memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); 368 369 switch (task->task_proto) { 370 case SAS_PROTOCOL_SMP: 371 rc = hisi_sas_task_prep_smp(hisi_hba, slot); 372 break; 373 case SAS_PROTOCOL_SSP: 374 rc = hisi_sas_task_prep_ssp(hisi_hba, slot, is_tmf, tmf); 375 break; 376 case SAS_PROTOCOL_SATA: 377 case SAS_PROTOCOL_STP: 378 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 379 rc = hisi_sas_task_prep_ata(hisi_hba, slot); 380 break; 381 default: 382 dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n", 383 task->task_proto); 384 rc = -EINVAL; 385 break; 386 } 387 388 if (rc) { 389 dev_err(dev, "task prep: rc = 0x%x\n", rc); 390 goto err_out_buf; 391 } 392 393 list_add_tail(&slot->entry, &sas_dev->list); 394 spin_lock_irqsave(&task->task_state_lock, flags); 395 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 396 spin_unlock_irqrestore(&task->task_state_lock, flags); 397 398 dq->slot_prep = slot; 399 400 atomic64_inc(&sas_dev->running_req); 401 ++(*pass); 402 403 return 0; 404 405 err_out_buf: 406 dma_pool_free(hisi_hba->buffer_pool, slot->buf, 407 slot->buf_dma); 408 err_out_slot_buf: 409 /* Nothing to be done */ 410 err_out_tag: 411 spin_lock_irqsave(&hisi_hba->lock, flags); 412 hisi_sas_slot_index_free(hisi_hba, slot_idx); 413 spin_unlock_irqrestore(&hisi_hba->lock, flags); 414 err_out: 415 dev_err(dev, "task prep: failed[%d]!\n", rc); 416 if (!sas_protocol_ata(task->task_proto)) 417 if (n_elem) 418 dma_unmap_sg(dev, task->scatter, n_elem, 419 task->data_dir); 420 prep_out: 421 return rc; 422 } 423 424 static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, 425 int is_tmf, struct hisi_sas_tmf_task *tmf) 426 { 427 u32 rc; 428 u32 pass = 0; 429 unsigned long flags; 430 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 431 struct device *dev = hisi_hba->dev; 432 struct domain_device *device = task->dev; 433 struct hisi_sas_device *sas_dev = device->lldd_dev; 434 struct hisi_sas_dq *dq = sas_dev->dq; 435 436 if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))) 437 return -EINVAL; 438 439 /* protect task_prep and start_delivery sequence */ 440 spin_lock_irqsave(&dq->lock, flags); 441 rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass); 442 if (rc) 443 dev_err(dev, "task exec: failed[%d]!\n", rc); 444 445 if (likely(pass)) 446 hisi_hba->hw->start_delivery(dq); 447 spin_unlock_irqrestore(&dq->lock, flags); 448 449 return rc; 450 } 451 452 static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no) 453 { 454 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 455 struct asd_sas_phy *sas_phy = &phy->sas_phy; 456 struct sas_ha_struct *sas_ha; 457 458 if (!phy->phy_attached) 459 return; 460 461 sas_ha = &hisi_hba->sha; 462 sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); 463 464 if (sas_phy->phy) { 465 struct sas_phy *sphy = sas_phy->phy; 466 467 sphy->negotiated_linkrate = sas_phy->linkrate; 468 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 469 sphy->maximum_linkrate_hw = 470 hisi_hba->hw->phy_get_max_linkrate(); 471 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) 472 sphy->minimum_linkrate = phy->minimum_linkrate; 473 474 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) 475 sphy->maximum_linkrate = phy->maximum_linkrate; 476 } 477 478 if (phy->phy_type & PORT_TYPE_SAS) { 479 struct sas_identify_frame *id; 480 481 id = (struct sas_identify_frame *)phy->frame_rcvd; 482 id->dev_type = phy->identify.device_type; 483 id->initiator_bits = SAS_PROTOCOL_ALL; 484 id->target_bits = phy->identify.target_port_protocols; 485 } else if (phy->phy_type & PORT_TYPE_SATA) { 486 /*Nothing*/ 487 } 488 489 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 490 sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED); 491 } 492 493 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) 494 { 495 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 496 struct hisi_sas_device *sas_dev = NULL; 497 int i; 498 499 spin_lock(&hisi_hba->lock); 500 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 501 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { 502 int queue = i % hisi_hba->queue_count; 503 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; 504 505 hisi_hba->devices[i].device_id = i; 506 sas_dev = &hisi_hba->devices[i]; 507 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 508 sas_dev->dev_type = device->dev_type; 509 sas_dev->hisi_hba = hisi_hba; 510 sas_dev->sas_device = device; 511 sas_dev->dq = dq; 512 INIT_LIST_HEAD(&hisi_hba->devices[i].list); 513 break; 514 } 515 } 516 spin_unlock(&hisi_hba->lock); 517 518 return sas_dev; 519 } 520 521 static int hisi_sas_dev_found(struct domain_device *device) 522 { 523 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 524 struct domain_device *parent_dev = device->parent; 525 struct hisi_sas_device *sas_dev; 526 struct device *dev = hisi_hba->dev; 527 528 if (hisi_hba->hw->alloc_dev) 529 sas_dev = hisi_hba->hw->alloc_dev(device); 530 else 531 sas_dev = hisi_sas_alloc_dev(device); 532 if (!sas_dev) { 533 dev_err(dev, "fail alloc dev: max support %d devices\n", 534 HISI_SAS_MAX_DEVICES); 535 return -EINVAL; 536 } 537 538 device->lldd_dev = sas_dev; 539 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); 540 541 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { 542 int phy_no; 543 u8 phy_num = parent_dev->ex_dev.num_phys; 544 struct ex_phy *phy; 545 546 for (phy_no = 0; phy_no < phy_num; phy_no++) { 547 phy = &parent_dev->ex_dev.ex_phy[phy_no]; 548 if (SAS_ADDR(phy->attached_sas_addr) == 549 SAS_ADDR(device->sas_addr)) { 550 sas_dev->attached_phy = phy_no; 551 break; 552 } 553 } 554 555 if (phy_no == phy_num) { 556 dev_info(dev, "dev found: no attached " 557 "dev:%016llx at ex:%016llx\n", 558 SAS_ADDR(device->sas_addr), 559 SAS_ADDR(parent_dev->sas_addr)); 560 return -EINVAL; 561 } 562 } 563 564 return 0; 565 } 566 567 static int hisi_sas_slave_configure(struct scsi_device *sdev) 568 { 569 struct domain_device *dev = sdev_to_domain_dev(sdev); 570 int ret = sas_slave_configure(sdev); 571 572 if (ret) 573 return ret; 574 if (!dev_is_sata(dev)) 575 sas_change_queue_depth(sdev, 64); 576 577 return 0; 578 } 579 580 static void hisi_sas_scan_start(struct Scsi_Host *shost) 581 { 582 struct hisi_hba *hisi_hba = shost_priv(shost); 583 584 hisi_hba->hw->phys_init(hisi_hba); 585 } 586 587 static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) 588 { 589 struct hisi_hba *hisi_hba = shost_priv(shost); 590 struct sas_ha_struct *sha = &hisi_hba->sha; 591 592 /* Wait for PHY up interrupt to occur */ 593 if (time < HZ) 594 return 0; 595 596 sas_drain_work(sha); 597 return 1; 598 } 599 600 static void hisi_sas_phyup_work(struct work_struct *work) 601 { 602 struct hisi_sas_phy *phy = 603 container_of(work, struct hisi_sas_phy, phyup_ws); 604 struct hisi_hba *hisi_hba = phy->hisi_hba; 605 struct asd_sas_phy *sas_phy = &phy->sas_phy; 606 int phy_no = sas_phy->id; 607 608 hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */ 609 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 610 } 611 612 static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) 613 { 614 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 615 struct asd_sas_phy *sas_phy = &phy->sas_phy; 616 617 phy->hisi_hba = hisi_hba; 618 phy->port = NULL; 619 init_timer(&phy->timer); 620 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; 621 sas_phy->class = SAS; 622 sas_phy->iproto = SAS_PROTOCOL_ALL; 623 sas_phy->tproto = 0; 624 sas_phy->type = PHY_TYPE_PHYSICAL; 625 sas_phy->role = PHY_ROLE_INITIATOR; 626 sas_phy->oob_mode = OOB_NOT_CONNECTED; 627 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; 628 sas_phy->id = phy_no; 629 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; 630 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; 631 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; 632 sas_phy->lldd_phy = phy; 633 634 INIT_WORK(&phy->phyup_ws, hisi_sas_phyup_work); 635 } 636 637 static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) 638 { 639 struct sas_ha_struct *sas_ha = sas_phy->ha; 640 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 641 struct hisi_sas_phy *phy = sas_phy->lldd_phy; 642 struct asd_sas_port *sas_port = sas_phy->port; 643 struct hisi_sas_port *port = to_hisi_sas_port(sas_port); 644 unsigned long flags; 645 646 if (!sas_port) 647 return; 648 649 spin_lock_irqsave(&hisi_hba->lock, flags); 650 port->port_attached = 1; 651 port->id = phy->port_id; 652 phy->port = port; 653 sas_port->lldd_port = port; 654 spin_unlock_irqrestore(&hisi_hba->lock, flags); 655 } 656 657 static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, 658 struct hisi_sas_slot *slot) 659 { 660 if (task) { 661 unsigned long flags; 662 struct task_status_struct *ts; 663 664 ts = &task->task_status; 665 666 ts->resp = SAS_TASK_COMPLETE; 667 ts->stat = SAS_ABORTED_TASK; 668 spin_lock_irqsave(&task->task_state_lock, flags); 669 task->task_state_flags &= 670 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 671 task->task_state_flags |= SAS_TASK_STATE_DONE; 672 spin_unlock_irqrestore(&task->task_state_lock, flags); 673 } 674 675 hisi_sas_slot_task_free(hisi_hba, task, slot); 676 } 677 678 /* hisi_hba.lock should be locked */ 679 static void hisi_sas_release_task(struct hisi_hba *hisi_hba, 680 struct domain_device *device) 681 { 682 struct hisi_sas_slot *slot, *slot2; 683 struct hisi_sas_device *sas_dev = device->lldd_dev; 684 685 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) 686 hisi_sas_do_release_task(hisi_hba, slot->task, slot); 687 } 688 689 static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) 690 { 691 struct hisi_sas_device *sas_dev; 692 struct domain_device *device; 693 int i; 694 695 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 696 sas_dev = &hisi_hba->devices[i]; 697 device = sas_dev->sas_device; 698 699 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || 700 !device) 701 continue; 702 703 hisi_sas_release_task(hisi_hba, device); 704 } 705 } 706 707 static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba, 708 struct domain_device *device) 709 { 710 if (hisi_hba->hw->dereg_device) 711 hisi_hba->hw->dereg_device(hisi_hba, device); 712 } 713 714 static void hisi_sas_dev_gone(struct domain_device *device) 715 { 716 struct hisi_sas_device *sas_dev = device->lldd_dev; 717 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 718 struct device *dev = hisi_hba->dev; 719 int dev_id = sas_dev->device_id; 720 721 dev_info(dev, "found dev[%d:%x] is gone\n", 722 sas_dev->device_id, sas_dev->dev_type); 723 724 hisi_sas_internal_task_abort(hisi_hba, device, 725 HISI_SAS_INT_ABT_DEV, 0); 726 727 hisi_sas_dereg_device(hisi_hba, device); 728 729 hisi_hba->hw->free_device(hisi_hba, sas_dev); 730 device->lldd_dev = NULL; 731 memset(sas_dev, 0, sizeof(*sas_dev)); 732 sas_dev->device_id = dev_id; 733 sas_dev->dev_type = SAS_PHY_UNUSED; 734 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 735 } 736 737 static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) 738 { 739 return hisi_sas_task_exec(task, gfp_flags, 0, NULL); 740 } 741 742 static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, 743 void *funcdata) 744 { 745 struct sas_ha_struct *sas_ha = sas_phy->ha; 746 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 747 int phy_no = sas_phy->id; 748 749 switch (func) { 750 case PHY_FUNC_HARD_RESET: 751 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); 752 break; 753 754 case PHY_FUNC_LINK_RESET: 755 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 756 msleep(100); 757 hisi_hba->hw->phy_enable(hisi_hba, phy_no); 758 break; 759 760 case PHY_FUNC_DISABLE: 761 hisi_hba->hw->phy_disable(hisi_hba, phy_no); 762 break; 763 764 case PHY_FUNC_SET_LINK_RATE: 765 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, funcdata); 766 break; 767 768 case PHY_FUNC_RELEASE_SPINUP_HOLD: 769 default: 770 return -EOPNOTSUPP; 771 } 772 return 0; 773 } 774 775 static void hisi_sas_task_done(struct sas_task *task) 776 { 777 if (!del_timer(&task->slow_task->timer)) 778 return; 779 complete(&task->slow_task->completion); 780 } 781 782 static void hisi_sas_tmf_timedout(unsigned long data) 783 { 784 struct sas_task *task = (struct sas_task *)data; 785 unsigned long flags; 786 787 spin_lock_irqsave(&task->task_state_lock, flags); 788 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) 789 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 790 spin_unlock_irqrestore(&task->task_state_lock, flags); 791 792 complete(&task->slow_task->completion); 793 } 794 795 #define TASK_TIMEOUT 20 796 #define TASK_RETRY 3 797 static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, 798 void *parameter, u32 para_len, 799 struct hisi_sas_tmf_task *tmf) 800 { 801 struct hisi_sas_device *sas_dev = device->lldd_dev; 802 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; 803 struct device *dev = hisi_hba->dev; 804 struct sas_task *task; 805 int res, retry; 806 807 for (retry = 0; retry < TASK_RETRY; retry++) { 808 task = sas_alloc_slow_task(GFP_KERNEL); 809 if (!task) 810 return -ENOMEM; 811 812 task->dev = device; 813 task->task_proto = device->tproto; 814 815 if (dev_is_sata(device)) { 816 task->ata_task.device_control_reg_update = 1; 817 memcpy(&task->ata_task.fis, parameter, para_len); 818 } else { 819 memcpy(&task->ssp_task, parameter, para_len); 820 } 821 task->task_done = hisi_sas_task_done; 822 823 task->slow_task->timer.data = (unsigned long) task; 824 task->slow_task->timer.function = hisi_sas_tmf_timedout; 825 task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ; 826 add_timer(&task->slow_task->timer); 827 828 res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); 829 830 if (res) { 831 del_timer(&task->slow_task->timer); 832 dev_err(dev, "abort tmf: executing internal task failed: %d\n", 833 res); 834 goto ex_err; 835 } 836 837 wait_for_completion(&task->slow_task->completion); 838 res = TMF_RESP_FUNC_FAILED; 839 /* Even TMF timed out, return direct. */ 840 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 841 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 842 struct hisi_sas_slot *slot = task->lldd_task; 843 844 dev_err(dev, "abort tmf: TMF task timeout\n"); 845 if (slot) 846 slot->task = NULL; 847 848 goto ex_err; 849 } 850 } 851 852 if (task->task_status.resp == SAS_TASK_COMPLETE && 853 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 854 res = TMF_RESP_FUNC_COMPLETE; 855 break; 856 } 857 858 if (task->task_status.resp == SAS_TASK_COMPLETE && 859 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 860 res = TMF_RESP_FUNC_SUCC; 861 break; 862 } 863 864 if (task->task_status.resp == SAS_TASK_COMPLETE && 865 task->task_status.stat == SAS_DATA_UNDERRUN) { 866 /* no error, but return the number of bytes of 867 * underrun 868 */ 869 dev_warn(dev, "abort tmf: task to dev %016llx " 870 "resp: 0x%x sts 0x%x underrun\n", 871 SAS_ADDR(device->sas_addr), 872 task->task_status.resp, 873 task->task_status.stat); 874 res = task->task_status.residual; 875 break; 876 } 877 878 if (task->task_status.resp == SAS_TASK_COMPLETE && 879 task->task_status.stat == SAS_DATA_OVERRUN) { 880 dev_warn(dev, "abort tmf: blocked task error\n"); 881 res = -EMSGSIZE; 882 break; 883 } 884 885 dev_warn(dev, "abort tmf: task to dev " 886 "%016llx resp: 0x%x status 0x%x\n", 887 SAS_ADDR(device->sas_addr), task->task_status.resp, 888 task->task_status.stat); 889 sas_free_task(task); 890 task = NULL; 891 } 892 ex_err: 893 if (retry == TASK_RETRY) 894 dev_warn(dev, "abort tmf: executing internal task failed!\n"); 895 sas_free_task(task); 896 return res; 897 } 898 899 static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, 900 bool reset, int pmp, u8 *fis) 901 { 902 struct ata_taskfile tf; 903 904 ata_tf_init(dev, &tf); 905 if (reset) 906 tf.ctl |= ATA_SRST; 907 else 908 tf.ctl &= ~ATA_SRST; 909 tf.command = ATA_CMD_DEV_RESET; 910 ata_tf_to_fis(&tf, pmp, 0, fis); 911 } 912 913 static int hisi_sas_softreset_ata_disk(struct domain_device *device) 914 { 915 u8 fis[20] = {0}; 916 struct ata_port *ap = device->sata_dev.ap; 917 struct ata_link *link; 918 int rc = TMF_RESP_FUNC_FAILED; 919 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 920 struct device *dev = hisi_hba->dev; 921 int s = sizeof(struct host_to_dev_fis); 922 unsigned long flags; 923 924 ata_for_each_link(link, ap, EDGE) { 925 int pmp = sata_srst_pmp(link); 926 927 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); 928 rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); 929 if (rc != TMF_RESP_FUNC_COMPLETE) 930 break; 931 } 932 933 if (rc == TMF_RESP_FUNC_COMPLETE) { 934 ata_for_each_link(link, ap, EDGE) { 935 int pmp = sata_srst_pmp(link); 936 937 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); 938 rc = hisi_sas_exec_internal_tmf_task(device, fis, 939 s, NULL); 940 if (rc != TMF_RESP_FUNC_COMPLETE) 941 dev_err(dev, "ata disk de-reset failed\n"); 942 } 943 } else { 944 dev_err(dev, "ata disk reset failed\n"); 945 } 946 947 if (rc == TMF_RESP_FUNC_COMPLETE) { 948 spin_lock_irqsave(&hisi_hba->lock, flags); 949 hisi_sas_release_task(hisi_hba, device); 950 spin_unlock_irqrestore(&hisi_hba->lock, flags); 951 } 952 953 return rc; 954 } 955 956 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 957 u8 *lun, struct hisi_sas_tmf_task *tmf) 958 { 959 struct sas_ssp_task ssp_task; 960 961 if (!(device->tproto & SAS_PROTOCOL_SSP)) 962 return TMF_RESP_FUNC_ESUPP; 963 964 memcpy(ssp_task.LUN, lun, 8); 965 966 return hisi_sas_exec_internal_tmf_task(device, &ssp_task, 967 sizeof(ssp_task), tmf); 968 } 969 970 static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) 971 { 972 int rc; 973 974 if (!hisi_hba->hw->soft_reset) 975 return -1; 976 977 if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { 978 struct device *dev = hisi_hba->dev; 979 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 980 unsigned long flags; 981 982 dev_dbg(dev, "controller reset begins!\n"); 983 scsi_block_requests(hisi_hba->shost); 984 rc = hisi_hba->hw->soft_reset(hisi_hba); 985 if (rc) { 986 dev_warn(dev, "controller reset failed (%d)\n", rc); 987 goto out; 988 } 989 spin_lock_irqsave(&hisi_hba->lock, flags); 990 hisi_sas_release_tasks(hisi_hba); 991 spin_unlock_irqrestore(&hisi_hba->lock, flags); 992 993 sas_ha->notify_ha_event(sas_ha, HAE_RESET); 994 dev_dbg(dev, "controller reset successful!\n"); 995 } else 996 return -1; 997 998 out: 999 scsi_unblock_requests(hisi_hba->shost); 1000 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); 1001 return rc; 1002 } 1003 1004 static int hisi_sas_abort_task(struct sas_task *task) 1005 { 1006 struct scsi_lun lun; 1007 struct hisi_sas_tmf_task tmf_task; 1008 struct domain_device *device = task->dev; 1009 struct hisi_sas_device *sas_dev = device->lldd_dev; 1010 struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); 1011 struct device *dev = hisi_hba->dev; 1012 int rc = TMF_RESP_FUNC_FAILED; 1013 unsigned long flags; 1014 1015 if (!sas_dev) { 1016 dev_warn(dev, "Device has been removed\n"); 1017 return TMF_RESP_FUNC_FAILED; 1018 } 1019 1020 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1021 rc = TMF_RESP_FUNC_COMPLETE; 1022 goto out; 1023 } 1024 1025 sas_dev->dev_status = HISI_SAS_DEV_EH; 1026 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1027 struct scsi_cmnd *cmnd = task->uldd_task; 1028 struct hisi_sas_slot *slot = task->lldd_task; 1029 u32 tag = slot->idx; 1030 int rc2; 1031 1032 int_to_scsilun(cmnd->device->lun, &lun); 1033 tmf_task.tmf = TMF_ABORT_TASK; 1034 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1035 1036 rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, 1037 &tmf_task); 1038 1039 rc2 = hisi_sas_internal_task_abort(hisi_hba, device, 1040 HISI_SAS_INT_ABT_CMD, tag); 1041 /* 1042 * If the TMF finds that the IO is not in the device and also 1043 * the internal abort does not succeed, then it is safe to 1044 * free the slot. 1045 * Note: if the internal abort succeeds then the slot 1046 * will have already been completed 1047 */ 1048 if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { 1049 if (task->lldd_task) { 1050 spin_lock_irqsave(&hisi_hba->lock, flags); 1051 hisi_sas_do_release_task(hisi_hba, task, slot); 1052 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1053 } 1054 } 1055 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1056 task->task_proto & SAS_PROTOCOL_STP) { 1057 if (task->dev->dev_type == SAS_SATA_DEV) { 1058 hisi_sas_internal_task_abort(hisi_hba, device, 1059 HISI_SAS_INT_ABT_DEV, 0); 1060 hisi_sas_dereg_device(hisi_hba, device); 1061 rc = hisi_sas_softreset_ata_disk(device); 1062 } 1063 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { 1064 /* SMP */ 1065 struct hisi_sas_slot *slot = task->lldd_task; 1066 u32 tag = slot->idx; 1067 1068 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1069 HISI_SAS_INT_ABT_CMD, tag); 1070 if (rc == TMF_RESP_FUNC_FAILED) { 1071 spin_lock_irqsave(&hisi_hba->lock, flags); 1072 hisi_sas_do_release_task(hisi_hba, task, slot); 1073 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1074 } 1075 } 1076 1077 out: 1078 if (rc != TMF_RESP_FUNC_COMPLETE) 1079 dev_notice(dev, "abort task: rc=%d\n", rc); 1080 return rc; 1081 } 1082 1083 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) 1084 { 1085 struct hisi_sas_tmf_task tmf_task; 1086 int rc = TMF_RESP_FUNC_FAILED; 1087 1088 tmf_task.tmf = TMF_ABORT_TASK_SET; 1089 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1090 1091 return rc; 1092 } 1093 1094 static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) 1095 { 1096 int rc = TMF_RESP_FUNC_FAILED; 1097 struct hisi_sas_tmf_task tmf_task; 1098 1099 tmf_task.tmf = TMF_CLEAR_ACA; 1100 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1101 1102 return rc; 1103 } 1104 1105 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1106 { 1107 struct sas_phy *phy = sas_get_local_phy(device); 1108 int rc, reset_type = (device->dev_type == SAS_SATA_DEV || 1109 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1110 rc = sas_phy_reset(phy, reset_type); 1111 sas_put_local_phy(phy); 1112 msleep(2000); 1113 return rc; 1114 } 1115 1116 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) 1117 { 1118 struct hisi_sas_device *sas_dev = device->lldd_dev; 1119 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1120 unsigned long flags; 1121 int rc = TMF_RESP_FUNC_FAILED; 1122 1123 if (sas_dev->dev_status != HISI_SAS_DEV_EH) 1124 return TMF_RESP_FUNC_FAILED; 1125 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; 1126 1127 hisi_sas_internal_task_abort(hisi_hba, device, 1128 HISI_SAS_INT_ABT_DEV, 0); 1129 hisi_sas_dereg_device(hisi_hba, device); 1130 1131 rc = hisi_sas_debug_I_T_nexus_reset(device); 1132 1133 if (rc == TMF_RESP_FUNC_COMPLETE) { 1134 spin_lock_irqsave(&hisi_hba->lock, flags); 1135 hisi_sas_release_task(hisi_hba, device); 1136 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1137 } 1138 return rc; 1139 } 1140 1141 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) 1142 { 1143 struct hisi_sas_device *sas_dev = device->lldd_dev; 1144 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1145 struct device *dev = hisi_hba->dev; 1146 unsigned long flags; 1147 int rc = TMF_RESP_FUNC_FAILED; 1148 1149 sas_dev->dev_status = HISI_SAS_DEV_EH; 1150 if (dev_is_sata(device)) { 1151 struct sas_phy *phy; 1152 1153 /* Clear internal IO and then hardreset */ 1154 rc = hisi_sas_internal_task_abort(hisi_hba, device, 1155 HISI_SAS_INT_ABT_DEV, 0); 1156 if (rc == TMF_RESP_FUNC_FAILED) 1157 goto out; 1158 hisi_sas_dereg_device(hisi_hba, device); 1159 1160 phy = sas_get_local_phy(device); 1161 1162 rc = sas_phy_reset(phy, 1); 1163 1164 if (rc == 0) { 1165 spin_lock_irqsave(&hisi_hba->lock, flags); 1166 hisi_sas_release_task(hisi_hba, device); 1167 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1168 } 1169 sas_put_local_phy(phy); 1170 } else { 1171 struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; 1172 1173 rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); 1174 if (rc == TMF_RESP_FUNC_COMPLETE) { 1175 spin_lock_irqsave(&hisi_hba->lock, flags); 1176 hisi_sas_release_task(hisi_hba, device); 1177 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1178 } 1179 } 1180 out: 1181 if (rc != TMF_RESP_FUNC_COMPLETE) 1182 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", 1183 sas_dev->device_id, rc); 1184 return rc; 1185 } 1186 1187 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) 1188 { 1189 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; 1190 1191 return hisi_sas_controller_reset(hisi_hba); 1192 } 1193 1194 static int hisi_sas_query_task(struct sas_task *task) 1195 { 1196 struct scsi_lun lun; 1197 struct hisi_sas_tmf_task tmf_task; 1198 int rc = TMF_RESP_FUNC_FAILED; 1199 1200 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1201 struct scsi_cmnd *cmnd = task->uldd_task; 1202 struct domain_device *device = task->dev; 1203 struct hisi_sas_slot *slot = task->lldd_task; 1204 u32 tag = slot->idx; 1205 1206 int_to_scsilun(cmnd->device->lun, &lun); 1207 tmf_task.tmf = TMF_QUERY_TASK; 1208 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1209 1210 rc = hisi_sas_debug_issue_ssp_tmf(device, 1211 lun.scsi_lun, 1212 &tmf_task); 1213 switch (rc) { 1214 /* The task is still in Lun, release it then */ 1215 case TMF_RESP_FUNC_SUCC: 1216 /* The task is not in Lun or failed, reset the phy */ 1217 case TMF_RESP_FUNC_FAILED: 1218 case TMF_RESP_FUNC_COMPLETE: 1219 break; 1220 default: 1221 rc = TMF_RESP_FUNC_FAILED; 1222 break; 1223 } 1224 } 1225 return rc; 1226 } 1227 1228 static int 1229 hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, 1230 struct sas_task *task, int abort_flag, 1231 int task_tag) 1232 { 1233 struct domain_device *device = task->dev; 1234 struct hisi_sas_device *sas_dev = device->lldd_dev; 1235 struct device *dev = hisi_hba->dev; 1236 struct hisi_sas_port *port; 1237 struct hisi_sas_slot *slot; 1238 struct asd_sas_port *sas_port = device->port; 1239 struct hisi_sas_cmd_hdr *cmd_hdr_base; 1240 struct hisi_sas_dq *dq = sas_dev->dq; 1241 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; 1242 unsigned long flags, flags_dq; 1243 1244 if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))) 1245 return -EINVAL; 1246 1247 if (!device->port) 1248 return -1; 1249 1250 port = to_hisi_sas_port(sas_port); 1251 1252 /* simply get a slot and send abort command */ 1253 spin_lock_irqsave(&hisi_hba->lock, flags); 1254 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); 1255 if (rc) { 1256 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1257 goto err_out; 1258 } 1259 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1260 1261 spin_lock_irqsave(&dq->lock, flags_dq); 1262 rc = hisi_hba->hw->get_free_slot(hisi_hba, dq); 1263 if (rc) 1264 goto err_out_tag; 1265 1266 dlvry_queue = dq->id; 1267 dlvry_queue_slot = dq->wr_point; 1268 1269 slot = &hisi_hba->slot_info[slot_idx]; 1270 memset(slot, 0, sizeof(struct hisi_sas_slot)); 1271 1272 slot->idx = slot_idx; 1273 slot->n_elem = n_elem; 1274 slot->dlvry_queue = dlvry_queue; 1275 slot->dlvry_queue_slot = dlvry_queue_slot; 1276 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; 1277 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; 1278 slot->task = task; 1279 slot->port = port; 1280 task->lldd_task = slot; 1281 1282 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); 1283 1284 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id, 1285 abort_flag, task_tag); 1286 if (rc) 1287 goto err_out_tag; 1288 1289 1290 list_add_tail(&slot->entry, &sas_dev->list); 1291 spin_lock_irqsave(&task->task_state_lock, flags); 1292 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 1293 spin_unlock_irqrestore(&task->task_state_lock, flags); 1294 1295 dq->slot_prep = slot; 1296 1297 atomic64_inc(&sas_dev->running_req); 1298 1299 /* send abort command to the chip */ 1300 hisi_hba->hw->start_delivery(dq); 1301 spin_unlock_irqrestore(&dq->lock, flags_dq); 1302 1303 return 0; 1304 1305 err_out_tag: 1306 spin_lock_irqsave(&hisi_hba->lock, flags); 1307 hisi_sas_slot_index_free(hisi_hba, slot_idx); 1308 spin_unlock_irqrestore(&hisi_hba->lock, flags); 1309 spin_unlock_irqrestore(&dq->lock, flags_dq); 1310 err_out: 1311 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); 1312 1313 return rc; 1314 } 1315 1316 /** 1317 * hisi_sas_internal_task_abort -- execute an internal 1318 * abort command for single IO command or a device 1319 * @hisi_hba: host controller struct 1320 * @device: domain device 1321 * @abort_flag: mode of operation, device or single IO 1322 * @tag: tag of IO to be aborted (only relevant to single 1323 * IO mode) 1324 */ 1325 static int 1326 hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, 1327 struct domain_device *device, 1328 int abort_flag, int tag) 1329 { 1330 struct sas_task *task; 1331 struct hisi_sas_device *sas_dev = device->lldd_dev; 1332 struct device *dev = hisi_hba->dev; 1333 int res; 1334 1335 if (!hisi_hba->hw->prep_abort) 1336 return -EOPNOTSUPP; 1337 1338 task = sas_alloc_slow_task(GFP_KERNEL); 1339 if (!task) 1340 return -ENOMEM; 1341 1342 task->dev = device; 1343 task->task_proto = device->tproto; 1344 task->task_done = hisi_sas_task_done; 1345 task->slow_task->timer.data = (unsigned long)task; 1346 task->slow_task->timer.function = hisi_sas_tmf_timedout; 1347 task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110); 1348 add_timer(&task->slow_task->timer); 1349 1350 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, 1351 task, abort_flag, tag); 1352 if (res) { 1353 del_timer(&task->slow_task->timer); 1354 dev_err(dev, "internal task abort: executing internal task failed: %d\n", 1355 res); 1356 goto exit; 1357 } 1358 wait_for_completion(&task->slow_task->completion); 1359 res = TMF_RESP_FUNC_FAILED; 1360 1361 /* Internal abort timed out */ 1362 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1363 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1364 struct hisi_sas_slot *slot = task->lldd_task; 1365 1366 if (slot) 1367 slot->task = NULL; 1368 dev_err(dev, "internal task abort: timeout.\n"); 1369 } 1370 } 1371 1372 if (task->task_status.resp == SAS_TASK_COMPLETE && 1373 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) { 1374 res = TMF_RESP_FUNC_COMPLETE; 1375 goto exit; 1376 } 1377 1378 if (task->task_status.resp == SAS_TASK_COMPLETE && 1379 task->task_status.stat == TMF_RESP_FUNC_SUCC) { 1380 res = TMF_RESP_FUNC_SUCC; 1381 goto exit; 1382 } 1383 1384 exit: 1385 dev_dbg(dev, "internal task abort: task to dev %016llx task=%p " 1386 "resp: 0x%x sts 0x%x\n", 1387 SAS_ADDR(device->sas_addr), 1388 task, 1389 task->task_status.resp, /* 0 is complete, -1 is undelivered */ 1390 task->task_status.stat); 1391 sas_free_task(task); 1392 1393 return res; 1394 } 1395 1396 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 1397 { 1398 hisi_sas_port_notify_formed(sas_phy); 1399 } 1400 1401 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 1402 { 1403 phy->phy_attached = 0; 1404 phy->phy_type = 0; 1405 phy->port = NULL; 1406 } 1407 1408 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) 1409 { 1410 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1411 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1412 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1413 1414 if (rdy) { 1415 /* Phy down but ready */ 1416 hisi_sas_bytes_dmaed(hisi_hba, phy_no); 1417 hisi_sas_port_notify_formed(sas_phy); 1418 } else { 1419 struct hisi_sas_port *port = phy->port; 1420 1421 /* Phy down and not ready */ 1422 sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); 1423 sas_phy_disconnected(sas_phy); 1424 1425 if (port) { 1426 if (phy->phy_type & PORT_TYPE_SAS) { 1427 int port_id = port->id; 1428 1429 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, 1430 port_id)) 1431 port->port_attached = 0; 1432 } else if (phy->phy_type & PORT_TYPE_SATA) 1433 port->port_attached = 0; 1434 } 1435 hisi_sas_phy_disconnected(phy); 1436 } 1437 } 1438 EXPORT_SYMBOL_GPL(hisi_sas_phy_down); 1439 1440 void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, 1441 u32 state) 1442 { 1443 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1444 int phy_no; 1445 1446 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { 1447 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1448 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1449 struct asd_sas_port *sas_port = sas_phy->port; 1450 struct domain_device *dev; 1451 1452 if (sas_phy->enabled) { 1453 /* Report PHY state change to libsas */ 1454 if (state & (1 << phy_no)) 1455 continue; 1456 1457 if (old_state & (1 << phy_no)) 1458 /* PHY down but was up before */ 1459 hisi_sas_phy_down(hisi_hba, phy_no, 0); 1460 } 1461 if (!sas_port) 1462 continue; 1463 dev = sas_port->port_dev; 1464 1465 if (DEV_IS_EXPANDER(dev->dev_type)) 1466 sas_ha->notify_phy_event(sas_phy, PORTE_BROADCAST_RCVD); 1467 } 1468 } 1469 EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology); 1470 1471 struct scsi_transport_template *hisi_sas_stt; 1472 EXPORT_SYMBOL_GPL(hisi_sas_stt); 1473 1474 static struct scsi_host_template _hisi_sas_sht = { 1475 .module = THIS_MODULE, 1476 .name = DRV_NAME, 1477 .queuecommand = sas_queuecommand, 1478 .target_alloc = sas_target_alloc, 1479 .slave_configure = hisi_sas_slave_configure, 1480 .scan_finished = hisi_sas_scan_finished, 1481 .scan_start = hisi_sas_scan_start, 1482 .change_queue_depth = sas_change_queue_depth, 1483 .bios_param = sas_bios_param, 1484 .can_queue = 1, 1485 .this_id = -1, 1486 .sg_tablesize = SG_ALL, 1487 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 1488 .use_clustering = ENABLE_CLUSTERING, 1489 .eh_device_reset_handler = sas_eh_device_reset_handler, 1490 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 1491 .target_destroy = sas_target_destroy, 1492 .ioctl = sas_ioctl, 1493 }; 1494 struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht; 1495 EXPORT_SYMBOL_GPL(hisi_sas_sht); 1496 1497 static struct sas_domain_function_template hisi_sas_transport_ops = { 1498 .lldd_dev_found = hisi_sas_dev_found, 1499 .lldd_dev_gone = hisi_sas_dev_gone, 1500 .lldd_execute_task = hisi_sas_queue_command, 1501 .lldd_control_phy = hisi_sas_control_phy, 1502 .lldd_abort_task = hisi_sas_abort_task, 1503 .lldd_abort_task_set = hisi_sas_abort_task_set, 1504 .lldd_clear_aca = hisi_sas_clear_aca, 1505 .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, 1506 .lldd_lu_reset = hisi_sas_lu_reset, 1507 .lldd_query_task = hisi_sas_query_task, 1508 .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, 1509 .lldd_port_formed = hisi_sas_port_formed, 1510 }; 1511 1512 void hisi_sas_init_mem(struct hisi_hba *hisi_hba) 1513 { 1514 int i, s, max_command_entries = hisi_hba->hw->max_command_entries; 1515 1516 for (i = 0; i < hisi_hba->queue_count; i++) { 1517 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1518 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 1519 1520 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 1521 memset(hisi_hba->cmd_hdr[i], 0, s); 1522 dq->wr_point = 0; 1523 1524 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 1525 memset(hisi_hba->complete_hdr[i], 0, s); 1526 cq->rd_point = 0; 1527 } 1528 1529 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; 1530 memset(hisi_hba->initial_fis, 0, s); 1531 1532 s = max_command_entries * sizeof(struct hisi_sas_iost); 1533 memset(hisi_hba->iost, 0, s); 1534 1535 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 1536 memset(hisi_hba->breakpoint, 0, s); 1537 1538 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; 1539 memset(hisi_hba->sata_breakpoint, 0, s); 1540 } 1541 EXPORT_SYMBOL_GPL(hisi_sas_init_mem); 1542 1543 int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) 1544 { 1545 struct device *dev = hisi_hba->dev; 1546 int i, s, max_command_entries = hisi_hba->hw->max_command_entries; 1547 1548 spin_lock_init(&hisi_hba->lock); 1549 for (i = 0; i < hisi_hba->n_phy; i++) { 1550 hisi_sas_phy_init(hisi_hba, i); 1551 hisi_hba->port[i].port_attached = 0; 1552 hisi_hba->port[i].id = -1; 1553 } 1554 1555 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 1556 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 1557 hisi_hba->devices[i].device_id = i; 1558 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL; 1559 } 1560 1561 for (i = 0; i < hisi_hba->queue_count; i++) { 1562 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1563 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; 1564 1565 /* Completion queue structure */ 1566 cq->id = i; 1567 cq->hisi_hba = hisi_hba; 1568 1569 /* Delivery queue structure */ 1570 dq->id = i; 1571 dq->hisi_hba = hisi_hba; 1572 1573 /* Delivery queue */ 1574 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 1575 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s, 1576 &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL); 1577 if (!hisi_hba->cmd_hdr[i]) 1578 goto err_out; 1579 1580 /* Completion queue */ 1581 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 1582 hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s, 1583 &hisi_hba->complete_hdr_dma[i], GFP_KERNEL); 1584 if (!hisi_hba->complete_hdr[i]) 1585 goto err_out; 1586 } 1587 1588 s = sizeof(struct hisi_sas_slot_buf_table); 1589 hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0); 1590 if (!hisi_hba->buffer_pool) 1591 goto err_out; 1592 1593 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 1594 hisi_hba->itct = dma_alloc_coherent(dev, s, &hisi_hba->itct_dma, 1595 GFP_KERNEL); 1596 if (!hisi_hba->itct) 1597 goto err_out; 1598 1599 memset(hisi_hba->itct, 0, s); 1600 1601 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, 1602 sizeof(struct hisi_sas_slot), 1603 GFP_KERNEL); 1604 if (!hisi_hba->slot_info) 1605 goto err_out; 1606 1607 s = max_command_entries * sizeof(struct hisi_sas_iost); 1608 hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma, 1609 GFP_KERNEL); 1610 if (!hisi_hba->iost) 1611 goto err_out; 1612 1613 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 1614 hisi_hba->breakpoint = dma_alloc_coherent(dev, s, 1615 &hisi_hba->breakpoint_dma, GFP_KERNEL); 1616 if (!hisi_hba->breakpoint) 1617 goto err_out; 1618 1619 hisi_hba->slot_index_count = max_command_entries; 1620 s = hisi_hba->slot_index_count / BITS_PER_BYTE; 1621 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 1622 if (!hisi_hba->slot_index_tags) 1623 goto err_out; 1624 1625 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 1626 hisi_hba->initial_fis = dma_alloc_coherent(dev, s, 1627 &hisi_hba->initial_fis_dma, GFP_KERNEL); 1628 if (!hisi_hba->initial_fis) 1629 goto err_out; 1630 1631 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; 1632 hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s, 1633 &hisi_hba->sata_breakpoint_dma, GFP_KERNEL); 1634 if (!hisi_hba->sata_breakpoint) 1635 goto err_out; 1636 hisi_sas_init_mem(hisi_hba); 1637 1638 hisi_sas_slot_index_init(hisi_hba); 1639 1640 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); 1641 if (!hisi_hba->wq) { 1642 dev_err(dev, "sas_alloc: failed to create workqueue\n"); 1643 goto err_out; 1644 } 1645 1646 return 0; 1647 err_out: 1648 return -ENOMEM; 1649 } 1650 EXPORT_SYMBOL_GPL(hisi_sas_alloc); 1651 1652 void hisi_sas_free(struct hisi_hba *hisi_hba) 1653 { 1654 struct device *dev = hisi_hba->dev; 1655 int i, s, max_command_entries = hisi_hba->hw->max_command_entries; 1656 1657 for (i = 0; i < hisi_hba->queue_count; i++) { 1658 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 1659 if (hisi_hba->cmd_hdr[i]) 1660 dma_free_coherent(dev, s, 1661 hisi_hba->cmd_hdr[i], 1662 hisi_hba->cmd_hdr_dma[i]); 1663 1664 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; 1665 if (hisi_hba->complete_hdr[i]) 1666 dma_free_coherent(dev, s, 1667 hisi_hba->complete_hdr[i], 1668 hisi_hba->complete_hdr_dma[i]); 1669 } 1670 1671 dma_pool_destroy(hisi_hba->buffer_pool); 1672 1673 s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); 1674 if (hisi_hba->itct) 1675 dma_free_coherent(dev, s, 1676 hisi_hba->itct, hisi_hba->itct_dma); 1677 1678 s = max_command_entries * sizeof(struct hisi_sas_iost); 1679 if (hisi_hba->iost) 1680 dma_free_coherent(dev, s, 1681 hisi_hba->iost, hisi_hba->iost_dma); 1682 1683 s = max_command_entries * sizeof(struct hisi_sas_breakpoint); 1684 if (hisi_hba->breakpoint) 1685 dma_free_coherent(dev, s, 1686 hisi_hba->breakpoint, 1687 hisi_hba->breakpoint_dma); 1688 1689 1690 s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; 1691 if (hisi_hba->initial_fis) 1692 dma_free_coherent(dev, s, 1693 hisi_hba->initial_fis, 1694 hisi_hba->initial_fis_dma); 1695 1696 s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; 1697 if (hisi_hba->sata_breakpoint) 1698 dma_free_coherent(dev, s, 1699 hisi_hba->sata_breakpoint, 1700 hisi_hba->sata_breakpoint_dma); 1701 1702 if (hisi_hba->wq) 1703 destroy_workqueue(hisi_hba->wq); 1704 } 1705 EXPORT_SYMBOL_GPL(hisi_sas_free); 1706 1707 static void hisi_sas_rst_work_handler(struct work_struct *work) 1708 { 1709 struct hisi_hba *hisi_hba = 1710 container_of(work, struct hisi_hba, rst_work); 1711 1712 hisi_sas_controller_reset(hisi_hba); 1713 } 1714 1715 int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba) 1716 { 1717 struct device *dev = hisi_hba->dev; 1718 struct platform_device *pdev = hisi_hba->platform_dev; 1719 struct device_node *np = pdev ? pdev->dev.of_node : NULL; 1720 struct clk *refclk; 1721 1722 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, 1723 SAS_ADDR_SIZE)) { 1724 dev_err(dev, "could not get property sas-addr\n"); 1725 return -ENOENT; 1726 } 1727 1728 if (np) { 1729 /* 1730 * These properties are only required for platform device-based 1731 * controller with DT firmware. 1732 */ 1733 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, 1734 "hisilicon,sas-syscon"); 1735 if (IS_ERR(hisi_hba->ctrl)) { 1736 dev_err(dev, "could not get syscon\n"); 1737 return -ENOENT; 1738 } 1739 1740 if (device_property_read_u32(dev, "ctrl-reset-reg", 1741 &hisi_hba->ctrl_reset_reg)) { 1742 dev_err(dev, 1743 "could not get property ctrl-reset-reg\n"); 1744 return -ENOENT; 1745 } 1746 1747 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", 1748 &hisi_hba->ctrl_reset_sts_reg)) { 1749 dev_err(dev, 1750 "could not get property ctrl-reset-sts-reg\n"); 1751 return -ENOENT; 1752 } 1753 1754 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", 1755 &hisi_hba->ctrl_clock_ena_reg)) { 1756 dev_err(dev, 1757 "could not get property ctrl-clock-ena-reg\n"); 1758 return -ENOENT; 1759 } 1760 } 1761 1762 refclk = devm_clk_get(dev, NULL); 1763 if (IS_ERR(refclk)) 1764 dev_dbg(dev, "no ref clk property\n"); 1765 else 1766 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; 1767 1768 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { 1769 dev_err(dev, "could not get property phy-count\n"); 1770 return -ENOENT; 1771 } 1772 1773 if (device_property_read_u32(dev, "queue-count", 1774 &hisi_hba->queue_count)) { 1775 dev_err(dev, "could not get property queue-count\n"); 1776 return -ENOENT; 1777 } 1778 1779 return 0; 1780 } 1781 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info); 1782 1783 static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, 1784 const struct hisi_sas_hw *hw) 1785 { 1786 struct resource *res; 1787 struct Scsi_Host *shost; 1788 struct hisi_hba *hisi_hba; 1789 struct device *dev = &pdev->dev; 1790 1791 shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba)); 1792 if (!shost) { 1793 dev_err(dev, "scsi host alloc failed\n"); 1794 return NULL; 1795 } 1796 hisi_hba = shost_priv(shost); 1797 1798 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); 1799 hisi_hba->hw = hw; 1800 hisi_hba->dev = dev; 1801 hisi_hba->platform_dev = pdev; 1802 hisi_hba->shost = shost; 1803 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; 1804 1805 init_timer(&hisi_hba->timer); 1806 1807 if (hisi_sas_get_fw_info(hisi_hba) < 0) 1808 goto err_out; 1809 1810 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) && 1811 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { 1812 dev_err(dev, "No usable DMA addressing method\n"); 1813 goto err_out; 1814 } 1815 1816 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1817 hisi_hba->regs = devm_ioremap_resource(dev, res); 1818 if (IS_ERR(hisi_hba->regs)) 1819 goto err_out; 1820 1821 if (hisi_sas_alloc(hisi_hba, shost)) { 1822 hisi_sas_free(hisi_hba); 1823 goto err_out; 1824 } 1825 1826 return shost; 1827 err_out: 1828 kfree(shost); 1829 dev_err(dev, "shost alloc failed\n"); 1830 return NULL; 1831 } 1832 1833 void hisi_sas_init_add(struct hisi_hba *hisi_hba) 1834 { 1835 int i; 1836 1837 for (i = 0; i < hisi_hba->n_phy; i++) 1838 memcpy(&hisi_hba->phy[i].dev_sas_addr, 1839 hisi_hba->sas_addr, 1840 SAS_ADDR_SIZE); 1841 } 1842 EXPORT_SYMBOL_GPL(hisi_sas_init_add); 1843 1844 int hisi_sas_probe(struct platform_device *pdev, 1845 const struct hisi_sas_hw *hw) 1846 { 1847 struct Scsi_Host *shost; 1848 struct hisi_hba *hisi_hba; 1849 struct device *dev = &pdev->dev; 1850 struct asd_sas_phy **arr_phy; 1851 struct asd_sas_port **arr_port; 1852 struct sas_ha_struct *sha; 1853 int rc, phy_nr, port_nr, i; 1854 1855 shost = hisi_sas_shost_alloc(pdev, hw); 1856 if (!shost) 1857 return -ENOMEM; 1858 1859 sha = SHOST_TO_SAS_HA(shost); 1860 hisi_hba = shost_priv(shost); 1861 platform_set_drvdata(pdev, sha); 1862 1863 phy_nr = port_nr = hisi_hba->n_phy; 1864 1865 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 1866 arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL); 1867 if (!arr_phy || !arr_port) { 1868 rc = -ENOMEM; 1869 goto err_out_ha; 1870 } 1871 1872 sha->sas_phy = arr_phy; 1873 sha->sas_port = arr_port; 1874 sha->lldd_ha = hisi_hba; 1875 1876 shost->transportt = hisi_sas_stt; 1877 shost->max_id = HISI_SAS_MAX_DEVICES; 1878 shost->max_lun = ~0; 1879 shost->max_channel = 1; 1880 shost->max_cmd_len = 16; 1881 shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT); 1882 shost->can_queue = hisi_hba->hw->max_command_entries; 1883 shost->cmd_per_lun = hisi_hba->hw->max_command_entries; 1884 1885 sha->sas_ha_name = DRV_NAME; 1886 sha->dev = hisi_hba->dev; 1887 sha->lldd_module = THIS_MODULE; 1888 sha->sas_addr = &hisi_hba->sas_addr[0]; 1889 sha->num_phys = hisi_hba->n_phy; 1890 sha->core.shost = hisi_hba->shost; 1891 1892 for (i = 0; i < hisi_hba->n_phy; i++) { 1893 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; 1894 sha->sas_port[i] = &hisi_hba->port[i].sas_port; 1895 } 1896 1897 hisi_sas_init_add(hisi_hba); 1898 1899 rc = scsi_add_host(shost, &pdev->dev); 1900 if (rc) 1901 goto err_out_ha; 1902 1903 rc = sas_register_ha(sha); 1904 if (rc) 1905 goto err_out_register_ha; 1906 1907 rc = hisi_hba->hw->hw_init(hisi_hba); 1908 if (rc) 1909 goto err_out_register_ha; 1910 1911 scsi_scan_host(shost); 1912 1913 return 0; 1914 1915 err_out_register_ha: 1916 scsi_remove_host(shost); 1917 err_out_ha: 1918 hisi_sas_free(hisi_hba); 1919 kfree(shost); 1920 return rc; 1921 } 1922 EXPORT_SYMBOL_GPL(hisi_sas_probe); 1923 1924 int hisi_sas_remove(struct platform_device *pdev) 1925 { 1926 struct sas_ha_struct *sha = platform_get_drvdata(pdev); 1927 struct hisi_hba *hisi_hba = sha->lldd_ha; 1928 struct Scsi_Host *shost = sha->core.shost; 1929 1930 sas_unregister_ha(sha); 1931 sas_remove_host(sha->core.shost); 1932 1933 hisi_sas_free(hisi_hba); 1934 kfree(shost); 1935 return 0; 1936 } 1937 EXPORT_SYMBOL_GPL(hisi_sas_remove); 1938 1939 static __init int hisi_sas_init(void) 1940 { 1941 pr_info("hisi_sas: driver version %s\n", DRV_VERSION); 1942 1943 hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); 1944 if (!hisi_sas_stt) 1945 return -ENOMEM; 1946 1947 return 0; 1948 } 1949 1950 static __exit void hisi_sas_exit(void) 1951 { 1952 sas_release_transport(hisi_sas_stt); 1953 } 1954 1955 module_init(hisi_sas_init); 1956 module_exit(hisi_sas_exit); 1957 1958 MODULE_VERSION(DRV_VERSION); 1959 MODULE_LICENSE("GPL"); 1960 MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); 1961 MODULE_DESCRIPTION("HISILICON SAS controller driver"); 1962 MODULE_ALIAS("platform:" DRV_NAME); 1963