1 /* 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver 3 * 4 * Copyright (c) 2008-2009 USI Co., Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * substantially similar to the "NO WARRANTY" disclaimer below 15 * ("Disclaimer") and any redistribution must be conditioned upon 16 * including a substantially similar Disclaimer requirement for further 17 * binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * Alternatively, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") version 2 as published by the Free 24 * Software Foundation. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 */ 40 41 #include <linux/slab.h> 42 #include "pm8001_sas.h" 43 #include "pm80xx_tracepoints.h" 44 45 /** 46 * pm8001_find_tag - from sas task to find out tag that belongs to this task 47 * @task: the task sent to the LLDD 48 * @tag: the found tag associated with the task 49 */ 50 static int pm8001_find_tag(struct sas_task *task, u32 *tag) 51 { 52 if (task->lldd_task) { 53 struct pm8001_ccb_info *ccb; 54 ccb = task->lldd_task; 55 *tag = ccb->ccb_tag; 56 return 1; 57 } 58 return 0; 59 } 60 61 /** 62 * pm8001_tag_free - free the no more needed tag 63 * @pm8001_ha: our hba struct 64 * @tag: the found tag associated with the task 65 */ 66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) 67 { 68 void *bitmap = pm8001_ha->tags; 69 clear_bit(tag, bitmap); 70 } 71 72 /** 73 * pm8001_tag_alloc - allocate a empty tag for task used. 74 * @pm8001_ha: our hba struct 75 * @tag_out: the found empty tag . 76 */ 77 inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) 78 { 79 unsigned int tag; 80 void *bitmap = pm8001_ha->tags; 81 unsigned long flags; 82 83 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); 84 tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num); 85 if (tag >= pm8001_ha->tags_num) { 86 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); 87 return -SAS_QUEUE_FULL; 88 } 89 set_bit(tag, bitmap); 90 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); 91 *tag_out = tag; 92 return 0; 93 } 94 95 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) 96 { 97 int i; 98 for (i = 0; i < pm8001_ha->tags_num; ++i) 99 pm8001_tag_free(pm8001_ha, i); 100 } 101 102 /** 103 * pm8001_mem_alloc - allocate memory for pm8001. 104 * @pdev: pci device. 105 * @virt_addr: the allocated virtual address 106 * @pphys_addr: DMA address for this device 107 * @pphys_addr_hi: the physical address high byte address. 108 * @pphys_addr_lo: the physical address low byte address. 109 * @mem_size: memory size. 110 * @align: requested byte alignment 111 */ 112 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, 113 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, 114 u32 *pphys_addr_lo, u32 mem_size, u32 align) 115 { 116 caddr_t mem_virt_alloc; 117 dma_addr_t mem_dma_handle; 118 u64 phys_align; 119 u64 align_offset = 0; 120 if (align) 121 align_offset = (dma_addr_t)align - 1; 122 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, 123 &mem_dma_handle, GFP_KERNEL); 124 if (!mem_virt_alloc) 125 return -ENOMEM; 126 *pphys_addr = mem_dma_handle; 127 phys_align = (*pphys_addr + align_offset) & ~align_offset; 128 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; 129 *pphys_addr_hi = upper_32_bits(phys_align); 130 *pphys_addr_lo = lower_32_bits(phys_align); 131 return 0; 132 } 133 134 /** 135 * pm8001_find_ha_by_dev - from domain device which come from sas layer to 136 * find out our hba struct. 137 * @dev: the domain device which from sas layer. 138 */ 139 static 140 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) 141 { 142 struct sas_ha_struct *sha = dev->port->ha; 143 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 144 return pm8001_ha; 145 } 146 147 /** 148 * pm8001_phy_control - this function should be registered to 149 * sas_domain_function_template to provide libsas used, note: this is just 150 * control the HBA phy rather than other expander phy if you want control 151 * other phy, you should use SMP command. 152 * @sas_phy: which phy in HBA phys. 153 * @func: the operation. 154 * @funcdata: always NULL. 155 */ 156 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 157 void *funcdata) 158 { 159 int rc = 0, phy_id = sas_phy->id; 160 struct pm8001_hba_info *pm8001_ha = NULL; 161 struct sas_phy_linkrates *rates; 162 struct pm8001_phy *phy; 163 DECLARE_COMPLETION_ONSTACK(completion); 164 unsigned long flags; 165 pm8001_ha = sas_phy->ha->lldd_ha; 166 phy = &pm8001_ha->phy[phy_id]; 167 pm8001_ha->phy[phy_id].enable_completion = &completion; 168 switch (func) { 169 case PHY_FUNC_SET_LINK_RATE: 170 rates = funcdata; 171 if (rates->minimum_linkrate) { 172 pm8001_ha->phy[phy_id].minimum_linkrate = 173 rates->minimum_linkrate; 174 } 175 if (rates->maximum_linkrate) { 176 pm8001_ha->phy[phy_id].maximum_linkrate = 177 rates->maximum_linkrate; 178 } 179 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 180 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 181 wait_for_completion(&completion); 182 } 183 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 184 PHY_LINK_RESET); 185 break; 186 case PHY_FUNC_HARD_RESET: 187 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 188 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 189 wait_for_completion(&completion); 190 } 191 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 192 PHY_HARD_RESET); 193 break; 194 case PHY_FUNC_LINK_RESET: 195 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 196 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 197 wait_for_completion(&completion); 198 } 199 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 200 PHY_LINK_RESET); 201 break; 202 case PHY_FUNC_RELEASE_SPINUP_HOLD: 203 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 204 PHY_LINK_RESET); 205 break; 206 case PHY_FUNC_DISABLE: 207 if (pm8001_ha->chip_id != chip_8001) { 208 if (pm8001_ha->phy[phy_id].phy_state == 209 PHY_STATE_LINK_UP_SPCV) { 210 sas_phy_disconnected(&phy->sas_phy); 211 sas_notify_phy_event(&phy->sas_phy, 212 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL); 213 phy->phy_attached = 0; 214 } 215 } else { 216 if (pm8001_ha->phy[phy_id].phy_state == 217 PHY_STATE_LINK_UP_SPC) { 218 sas_phy_disconnected(&phy->sas_phy); 219 sas_notify_phy_event(&phy->sas_phy, 220 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL); 221 phy->phy_attached = 0; 222 } 223 } 224 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); 225 break; 226 case PHY_FUNC_GET_EVENTS: 227 spin_lock_irqsave(&pm8001_ha->lock, flags); 228 if (pm8001_ha->chip_id == chip_8001) { 229 if (-1 == pm8001_bar4_shift(pm8001_ha, 230 (phy_id < 4) ? 0x30000 : 0x40000)) { 231 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 232 return -EINVAL; 233 } 234 } 235 { 236 struct sas_phy *phy = sas_phy->phy; 237 uint32_t *qp = (uint32_t *)(((char *) 238 pm8001_ha->io_mem[2].memvirtaddr) 239 + 0x1034 + (0x4000 * (phy_id & 3))); 240 241 phy->invalid_dword_count = qp[0]; 242 phy->running_disparity_error_count = qp[1]; 243 phy->loss_of_dword_sync_count = qp[3]; 244 phy->phy_reset_problem_count = qp[4]; 245 } 246 if (pm8001_ha->chip_id == chip_8001) 247 pm8001_bar4_shift(pm8001_ha, 0); 248 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 249 return 0; 250 default: 251 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func); 252 rc = -EOPNOTSUPP; 253 } 254 msleep(300); 255 return rc; 256 } 257 258 /** 259 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start 260 * command to HBA. 261 * @shost: the scsi host data. 262 */ 263 void pm8001_scan_start(struct Scsi_Host *shost) 264 { 265 int i; 266 struct pm8001_hba_info *pm8001_ha; 267 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 268 DECLARE_COMPLETION_ONSTACK(completion); 269 pm8001_ha = sha->lldd_ha; 270 /* SAS_RE_INITIALIZATION not available in SPCv/ve */ 271 if (pm8001_ha->chip_id == chip_8001) 272 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); 273 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) { 274 pm8001_ha->phy[i].enable_completion = &completion; 275 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); 276 wait_for_completion(&completion); 277 msleep(300); 278 } 279 } 280 281 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) 282 { 283 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 284 285 /* give the phy enabling interrupt event time to come in (1s 286 * is empirically about all it takes) */ 287 if (time < HZ) 288 return 0; 289 /* Wait for discovery to finish */ 290 sas_drain_work(ha); 291 return 1; 292 } 293 294 /** 295 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task 296 * @pm8001_ha: our hba card information 297 * @ccb: the ccb which attached to smp task 298 */ 299 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, 300 struct pm8001_ccb_info *ccb) 301 { 302 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); 303 } 304 305 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) 306 { 307 struct ata_queued_cmd *qc = task->uldd_task; 308 if (qc) { 309 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 310 qc->tf.command == ATA_CMD_FPDMA_READ || 311 qc->tf.command == ATA_CMD_FPDMA_RECV || 312 qc->tf.command == ATA_CMD_FPDMA_SEND || 313 qc->tf.command == ATA_CMD_NCQ_NON_DATA) { 314 *tag = qc->tag; 315 return 1; 316 } 317 } 318 return 0; 319 } 320 321 /** 322 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task 323 * @pm8001_ha: our hba card information 324 * @ccb: the ccb which attached to sata task 325 */ 326 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, 327 struct pm8001_ccb_info *ccb) 328 { 329 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); 330 } 331 332 /** 333 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data 334 * @pm8001_ha: our hba card information 335 * @ccb: the ccb which attached to TM 336 * @tmf: the task management IU 337 */ 338 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, 339 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) 340 { 341 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); 342 } 343 344 /** 345 * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task 346 * @pm8001_ha: our hba card information 347 * @ccb: the ccb which attached to ssp task 348 */ 349 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, 350 struct pm8001_ccb_info *ccb) 351 { 352 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); 353 } 354 355 /* Find the local port id that's attached to this device */ 356 static int sas_find_local_port_id(struct domain_device *dev) 357 { 358 struct domain_device *pdev = dev->parent; 359 360 /* Directly attached device */ 361 if (!pdev) 362 return dev->port->id; 363 while (pdev) { 364 struct domain_device *pdev_p = pdev->parent; 365 if (!pdev_p) 366 return pdev->port->id; 367 pdev = pdev->parent; 368 } 369 return 0; 370 } 371 372 #define DEV_IS_GONE(pm8001_dev) \ 373 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) 374 /** 375 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. 376 * @task: the task to be execute. 377 * @gfp_flags: gfp_flags. 378 * @is_tmf: if it is task management task. 379 * @tmf: the task management IU 380 */ 381 static int pm8001_task_exec(struct sas_task *task, 382 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) 383 { 384 struct domain_device *dev = task->dev; 385 struct pm8001_hba_info *pm8001_ha; 386 struct pm8001_device *pm8001_dev; 387 struct pm8001_port *port = NULL; 388 struct sas_task *t = task; 389 struct pm8001_ccb_info *ccb; 390 u32 tag = 0xdeadbeef, rc = 0, n_elem = 0; 391 unsigned long flags = 0; 392 enum sas_protocol task_proto = t->task_proto; 393 394 if (!dev->port) { 395 struct task_status_struct *tsm = &t->task_status; 396 tsm->resp = SAS_TASK_UNDELIVERED; 397 tsm->stat = SAS_PHY_DOWN; 398 if (dev->dev_type != SAS_SATA_DEV) 399 t->task_done(t); 400 return 0; 401 } 402 pm8001_ha = pm8001_find_ha_by_dev(task->dev); 403 if (pm8001_ha->controller_fatal_error) { 404 struct task_status_struct *ts = &t->task_status; 405 406 ts->resp = SAS_TASK_UNDELIVERED; 407 t->task_done(t); 408 return 0; 409 } 410 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n"); 411 spin_lock_irqsave(&pm8001_ha->lock, flags); 412 do { 413 dev = t->dev; 414 pm8001_dev = dev->lldd_dev; 415 port = &pm8001_ha->port[sas_find_local_port_id(dev)]; 416 if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) { 417 if (sas_protocol_ata(task_proto)) { 418 struct task_status_struct *ts = &t->task_status; 419 ts->resp = SAS_TASK_UNDELIVERED; 420 ts->stat = SAS_PHY_DOWN; 421 422 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 423 t->task_done(t); 424 spin_lock_irqsave(&pm8001_ha->lock, flags); 425 continue; 426 } else { 427 struct task_status_struct *ts = &t->task_status; 428 ts->resp = SAS_TASK_UNDELIVERED; 429 ts->stat = SAS_PHY_DOWN; 430 t->task_done(t); 431 continue; 432 } 433 } 434 rc = pm8001_tag_alloc(pm8001_ha, &tag); 435 if (rc) 436 goto err_out; 437 ccb = &pm8001_ha->ccb_info[tag]; 438 439 if (!sas_protocol_ata(task_proto)) { 440 if (t->num_scatter) { 441 n_elem = dma_map_sg(pm8001_ha->dev, 442 t->scatter, 443 t->num_scatter, 444 t->data_dir); 445 if (!n_elem) { 446 rc = -ENOMEM; 447 goto err_out_tag; 448 } 449 } 450 } else { 451 n_elem = t->num_scatter; 452 } 453 454 t->lldd_task = ccb; 455 ccb->n_elem = n_elem; 456 ccb->ccb_tag = tag; 457 ccb->task = t; 458 ccb->device = pm8001_dev; 459 switch (task_proto) { 460 case SAS_PROTOCOL_SMP: 461 atomic_inc(&pm8001_dev->running_req); 462 rc = pm8001_task_prep_smp(pm8001_ha, ccb); 463 break; 464 case SAS_PROTOCOL_SSP: 465 atomic_inc(&pm8001_dev->running_req); 466 if (is_tmf) 467 rc = pm8001_task_prep_ssp_tm(pm8001_ha, 468 ccb, tmf); 469 else 470 rc = pm8001_task_prep_ssp(pm8001_ha, ccb); 471 break; 472 case SAS_PROTOCOL_SATA: 473 case SAS_PROTOCOL_STP: 474 atomic_inc(&pm8001_dev->running_req); 475 rc = pm8001_task_prep_ata(pm8001_ha, ccb); 476 break; 477 default: 478 dev_printk(KERN_ERR, pm8001_ha->dev, 479 "unknown sas_task proto: 0x%x\n", task_proto); 480 rc = -EINVAL; 481 break; 482 } 483 484 if (rc) { 485 pm8001_dbg(pm8001_ha, IO, "rc is %x\n", rc); 486 atomic_dec(&pm8001_dev->running_req); 487 goto err_out_tag; 488 } 489 /* TODO: select normal or high priority */ 490 spin_lock(&t->task_state_lock); 491 t->task_state_flags |= SAS_TASK_AT_INITIATOR; 492 spin_unlock(&t->task_state_lock); 493 } while (0); 494 rc = 0; 495 goto out_done; 496 497 err_out_tag: 498 pm8001_tag_free(pm8001_ha, tag); 499 err_out: 500 dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); 501 if (!sas_protocol_ata(task_proto)) 502 if (n_elem) 503 dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter, 504 t->data_dir); 505 out_done: 506 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 507 return rc; 508 } 509 510 /** 511 * pm8001_queue_command - register for upper layer used, all IO commands sent 512 * to HBA are from this interface. 513 * @task: the task to be execute. 514 * @gfp_flags: gfp_flags 515 */ 516 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) 517 { 518 return pm8001_task_exec(task, gfp_flags, 0, NULL); 519 } 520 521 /** 522 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. 523 * @pm8001_ha: our hba card information 524 * @ccb: the ccb which attached to ssp task 525 * @task: the task to be free. 526 * @ccb_idx: ccb index. 527 */ 528 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, 529 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) 530 { 531 struct ata_queued_cmd *qc; 532 struct pm8001_device *pm8001_dev; 533 534 if (!ccb->task) 535 return; 536 if (!sas_protocol_ata(task->task_proto)) 537 if (ccb->n_elem) 538 dma_unmap_sg(pm8001_ha->dev, task->scatter, 539 task->num_scatter, task->data_dir); 540 541 switch (task->task_proto) { 542 case SAS_PROTOCOL_SMP: 543 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, 544 DMA_FROM_DEVICE); 545 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, 546 DMA_TO_DEVICE); 547 break; 548 549 case SAS_PROTOCOL_SATA: 550 case SAS_PROTOCOL_STP: 551 case SAS_PROTOCOL_SSP: 552 default: 553 /* do nothing */ 554 break; 555 } 556 557 if (sas_protocol_ata(task->task_proto)) { 558 // For SCSI/ATA commands uldd_task points to ata_queued_cmd 559 qc = task->uldd_task; 560 pm8001_dev = ccb->device; 561 trace_pm80xx_request_complete(pm8001_ha->id, 562 pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS, 563 ccb_idx, 0 /* ctlr_opcode not known */, 564 qc ? qc->tf.command : 0, // ata opcode 565 pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1); 566 } 567 568 task->lldd_task = NULL; 569 ccb->task = NULL; 570 ccb->ccb_tag = 0xFFFFFFFF; 571 ccb->open_retry = 0; 572 pm8001_tag_free(pm8001_ha, ccb_idx); 573 } 574 575 /** 576 * pm8001_alloc_dev - find a empty pm8001_device 577 * @pm8001_ha: our hba card information 578 */ 579 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) 580 { 581 u32 dev; 582 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 583 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { 584 pm8001_ha->devices[dev].id = dev; 585 return &pm8001_ha->devices[dev]; 586 } 587 } 588 if (dev == PM8001_MAX_DEVICES) { 589 pm8001_dbg(pm8001_ha, FAIL, 590 "max support %d devices, ignore ..\n", 591 PM8001_MAX_DEVICES); 592 } 593 return NULL; 594 } 595 /** 596 * pm8001_find_dev - find a matching pm8001_device 597 * @pm8001_ha: our hba card information 598 * @device_id: device ID to match against 599 */ 600 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, 601 u32 device_id) 602 { 603 u32 dev; 604 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 605 if (pm8001_ha->devices[dev].device_id == device_id) 606 return &pm8001_ha->devices[dev]; 607 } 608 if (dev == PM8001_MAX_DEVICES) { 609 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n"); 610 } 611 return NULL; 612 } 613 614 void pm8001_free_dev(struct pm8001_device *pm8001_dev) 615 { 616 u32 id = pm8001_dev->id; 617 memset(pm8001_dev, 0, sizeof(*pm8001_dev)); 618 pm8001_dev->id = id; 619 pm8001_dev->dev_type = SAS_PHY_UNUSED; 620 pm8001_dev->device_id = PM8001_MAX_DEVICES; 621 pm8001_dev->sas_device = NULL; 622 } 623 624 /** 625 * pm8001_dev_found_notify - libsas notify a device is found. 626 * @dev: the device structure which sas layer used. 627 * 628 * when libsas find a sas domain device, it should tell the LLDD that 629 * device is found, and then LLDD register this device to HBA firmware 630 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a 631 * device ID(according to device's sas address) and returned it to LLDD. From 632 * now on, we communicate with HBA FW with the device ID which HBA assigned 633 * rather than sas address. it is the necessary step for our HBA but it is 634 * the optional for other HBA driver. 635 */ 636 static int pm8001_dev_found_notify(struct domain_device *dev) 637 { 638 unsigned long flags = 0; 639 int res = 0; 640 struct pm8001_hba_info *pm8001_ha = NULL; 641 struct domain_device *parent_dev = dev->parent; 642 struct pm8001_device *pm8001_device; 643 DECLARE_COMPLETION_ONSTACK(completion); 644 u32 flag = 0; 645 pm8001_ha = pm8001_find_ha_by_dev(dev); 646 spin_lock_irqsave(&pm8001_ha->lock, flags); 647 648 pm8001_device = pm8001_alloc_dev(pm8001_ha); 649 if (!pm8001_device) { 650 res = -1; 651 goto found_out; 652 } 653 pm8001_device->sas_device = dev; 654 dev->lldd_dev = pm8001_device; 655 pm8001_device->dev_type = dev->dev_type; 656 pm8001_device->dcompletion = &completion; 657 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 658 int phy_id; 659 struct ex_phy *phy; 660 for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys; 661 phy_id++) { 662 phy = &parent_dev->ex_dev.ex_phy[phy_id]; 663 if (SAS_ADDR(phy->attached_sas_addr) 664 == SAS_ADDR(dev->sas_addr)) { 665 pm8001_device->attached_phy = phy_id; 666 break; 667 } 668 } 669 if (phy_id == parent_dev->ex_dev.num_phys) { 670 pm8001_dbg(pm8001_ha, FAIL, 671 "Error: no attached dev:%016llx at ex:%016llx.\n", 672 SAS_ADDR(dev->sas_addr), 673 SAS_ADDR(parent_dev->sas_addr)); 674 res = -1; 675 } 676 } else { 677 if (dev->dev_type == SAS_SATA_DEV) { 678 pm8001_device->attached_phy = 679 dev->rphy->identify.phy_identifier; 680 flag = 1; /* directly sata */ 681 } 682 } /*register this device to HBA*/ 683 pm8001_dbg(pm8001_ha, DISC, "Found device\n"); 684 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); 685 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 686 wait_for_completion(&completion); 687 if (dev->dev_type == SAS_END_DEVICE) 688 msleep(50); 689 pm8001_ha->flags = PM8001F_RUN_TIME; 690 return 0; 691 found_out: 692 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 693 return res; 694 } 695 696 int pm8001_dev_found(struct domain_device *dev) 697 { 698 return pm8001_dev_found_notify(dev); 699 } 700 701 void pm8001_task_done(struct sas_task *task) 702 { 703 del_timer(&task->slow_task->timer); 704 complete(&task->slow_task->completion); 705 } 706 707 static void pm8001_tmf_timedout(struct timer_list *t) 708 { 709 struct sas_task_slow *slow = from_timer(slow, t, timer); 710 struct sas_task *task = slow->task; 711 unsigned long flags; 712 713 spin_lock_irqsave(&task->task_state_lock, flags); 714 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 715 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 716 complete(&task->slow_task->completion); 717 } 718 spin_unlock_irqrestore(&task->task_state_lock, flags); 719 } 720 721 #define PM8001_TASK_TIMEOUT 20 722 /** 723 * pm8001_exec_internal_tmf_task - execute some task management commands. 724 * @dev: the wanted device. 725 * @tmf: which task management wanted to be take. 726 * @para_len: para_len. 727 * @parameter: ssp task parameter. 728 * 729 * when errors or exception happened, we may want to do something, for example 730 * abort the issued task which result in this exception, it is done by calling 731 * this function, note it is also with the task execute interface. 732 */ 733 static int pm8001_exec_internal_tmf_task(struct domain_device *dev, 734 void *parameter, u32 para_len, struct pm8001_tmf_task *tmf) 735 { 736 int res, retry; 737 struct sas_task *task = NULL; 738 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 739 struct pm8001_device *pm8001_dev = dev->lldd_dev; 740 DECLARE_COMPLETION_ONSTACK(completion_setstate); 741 742 for (retry = 0; retry < 3; retry++) { 743 task = sas_alloc_slow_task(GFP_KERNEL); 744 if (!task) 745 return -ENOMEM; 746 747 task->dev = dev; 748 task->task_proto = dev->tproto; 749 memcpy(&task->ssp_task, parameter, para_len); 750 task->task_done = pm8001_task_done; 751 task->slow_task->timer.function = pm8001_tmf_timedout; 752 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 753 add_timer(&task->slow_task->timer); 754 755 res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf); 756 757 if (res) { 758 del_timer(&task->slow_task->timer); 759 pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n"); 760 goto ex_err; 761 } 762 wait_for_completion(&task->slow_task->completion); 763 if (pm8001_ha->chip_id != chip_8001) { 764 pm8001_dev->setds_completion = &completion_setstate; 765 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 766 pm8001_dev, DS_OPERATIONAL); 767 wait_for_completion(&completion_setstate); 768 } 769 res = -TMF_RESP_FUNC_FAILED; 770 /* Even TMF timed out, return direct. */ 771 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 772 struct pm8001_ccb_info *ccb = task->lldd_task; 773 774 pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n", 775 tmf->tmf); 776 777 if (ccb) 778 ccb->task = NULL; 779 goto ex_err; 780 } 781 782 if (task->task_status.resp == SAS_TASK_COMPLETE && 783 task->task_status.stat == SAS_SAM_STAT_GOOD) { 784 res = TMF_RESP_FUNC_COMPLETE; 785 break; 786 } 787 788 if (task->task_status.resp == SAS_TASK_COMPLETE && 789 task->task_status.stat == SAS_DATA_UNDERRUN) { 790 /* no error, but return the number of bytes of 791 * underrun */ 792 res = task->task_status.residual; 793 break; 794 } 795 796 if (task->task_status.resp == SAS_TASK_COMPLETE && 797 task->task_status.stat == SAS_DATA_OVERRUN) { 798 pm8001_dbg(pm8001_ha, FAIL, "Blocked task error.\n"); 799 res = -EMSGSIZE; 800 break; 801 } else { 802 pm8001_dbg(pm8001_ha, EH, 803 " Task to dev %016llx response:0x%x status 0x%x\n", 804 SAS_ADDR(dev->sas_addr), 805 task->task_status.resp, 806 task->task_status.stat); 807 sas_free_task(task); 808 task = NULL; 809 } 810 } 811 ex_err: 812 BUG_ON(retry == 3 && task != NULL); 813 sas_free_task(task); 814 return res; 815 } 816 817 static int 818 pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, 819 struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag, 820 u32 task_tag) 821 { 822 int res, retry; 823 u32 ccb_tag; 824 struct pm8001_ccb_info *ccb; 825 struct sas_task *task = NULL; 826 827 for (retry = 0; retry < 3; retry++) { 828 task = sas_alloc_slow_task(GFP_KERNEL); 829 if (!task) 830 return -ENOMEM; 831 832 task->dev = dev; 833 task->task_proto = dev->tproto; 834 task->task_done = pm8001_task_done; 835 task->slow_task->timer.function = pm8001_tmf_timedout; 836 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; 837 add_timer(&task->slow_task->timer); 838 839 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 840 if (res) 841 goto ex_err; 842 ccb = &pm8001_ha->ccb_info[ccb_tag]; 843 ccb->device = pm8001_dev; 844 ccb->ccb_tag = ccb_tag; 845 ccb->task = task; 846 ccb->n_elem = 0; 847 848 res = PM8001_CHIP_DISP->task_abort(pm8001_ha, 849 pm8001_dev, flag, task_tag, ccb_tag); 850 851 if (res) { 852 del_timer(&task->slow_task->timer); 853 pm8001_dbg(pm8001_ha, FAIL, "Executing internal task failed\n"); 854 goto ex_err; 855 } 856 wait_for_completion(&task->slow_task->completion); 857 res = TMF_RESP_FUNC_FAILED; 858 /* Even TMF timed out, return direct. */ 859 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 860 pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n"); 861 goto ex_err; 862 } 863 864 if (task->task_status.resp == SAS_TASK_COMPLETE && 865 task->task_status.stat == SAS_SAM_STAT_GOOD) { 866 res = TMF_RESP_FUNC_COMPLETE; 867 break; 868 869 } else { 870 pm8001_dbg(pm8001_ha, EH, 871 " Task to dev %016llx response: 0x%x status 0x%x\n", 872 SAS_ADDR(dev->sas_addr), 873 task->task_status.resp, 874 task->task_status.stat); 875 sas_free_task(task); 876 task = NULL; 877 } 878 } 879 ex_err: 880 BUG_ON(retry == 3 && task != NULL); 881 sas_free_task(task); 882 return res; 883 } 884 885 /** 886 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" 887 * @dev: the device structure which sas layer used. 888 */ 889 static void pm8001_dev_gone_notify(struct domain_device *dev) 890 { 891 unsigned long flags = 0; 892 struct pm8001_hba_info *pm8001_ha; 893 struct pm8001_device *pm8001_dev = dev->lldd_dev; 894 895 pm8001_ha = pm8001_find_ha_by_dev(dev); 896 spin_lock_irqsave(&pm8001_ha->lock, flags); 897 if (pm8001_dev) { 898 u32 device_id = pm8001_dev->device_id; 899 900 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n", 901 pm8001_dev->device_id, pm8001_dev->dev_type); 902 if (atomic_read(&pm8001_dev->running_req)) { 903 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 904 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 905 dev, 1, 0); 906 while (atomic_read(&pm8001_dev->running_req)) 907 msleep(20); 908 spin_lock_irqsave(&pm8001_ha->lock, flags); 909 } 910 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); 911 pm8001_free_dev(pm8001_dev); 912 } else { 913 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); 914 } 915 dev->lldd_dev = NULL; 916 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 917 } 918 919 void pm8001_dev_gone(struct domain_device *dev) 920 { 921 pm8001_dev_gone_notify(dev); 922 } 923 924 static int pm8001_issue_ssp_tmf(struct domain_device *dev, 925 u8 *lun, struct pm8001_tmf_task *tmf) 926 { 927 struct sas_ssp_task ssp_task; 928 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 929 return TMF_RESP_FUNC_ESUPP; 930 931 memcpy((u8 *)&ssp_task.LUN, lun, 8); 932 return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task), 933 tmf); 934 } 935 936 /* retry commands by ha, by task and/or by device */ 937 void pm8001_open_reject_retry( 938 struct pm8001_hba_info *pm8001_ha, 939 struct sas_task *task_to_close, 940 struct pm8001_device *device_to_close) 941 { 942 int i; 943 unsigned long flags; 944 945 if (pm8001_ha == NULL) 946 return; 947 948 spin_lock_irqsave(&pm8001_ha->lock, flags); 949 950 for (i = 0; i < PM8001_MAX_CCB; i++) { 951 struct sas_task *task; 952 struct task_status_struct *ts; 953 struct pm8001_device *pm8001_dev; 954 unsigned long flags1; 955 u32 tag; 956 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; 957 958 pm8001_dev = ccb->device; 959 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) 960 continue; 961 if (!device_to_close) { 962 uintptr_t d = (uintptr_t)pm8001_dev 963 - (uintptr_t)&pm8001_ha->devices; 964 if (((d % sizeof(*pm8001_dev)) != 0) 965 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES)) 966 continue; 967 } else if (pm8001_dev != device_to_close) 968 continue; 969 tag = ccb->ccb_tag; 970 if (!tag || (tag == 0xFFFFFFFF)) 971 continue; 972 task = ccb->task; 973 if (!task || !task->task_done) 974 continue; 975 if (task_to_close && (task != task_to_close)) 976 continue; 977 ts = &task->task_status; 978 ts->resp = SAS_TASK_COMPLETE; 979 /* Force the midlayer to retry */ 980 ts->stat = SAS_OPEN_REJECT; 981 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 982 if (pm8001_dev) 983 atomic_dec(&pm8001_dev->running_req); 984 spin_lock_irqsave(&task->task_state_lock, flags1); 985 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 986 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 987 task->task_state_flags |= SAS_TASK_STATE_DONE; 988 if (unlikely((task->task_state_flags 989 & SAS_TASK_STATE_ABORTED))) { 990 spin_unlock_irqrestore(&task->task_state_lock, 991 flags1); 992 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 993 } else { 994 spin_unlock_irqrestore(&task->task_state_lock, 995 flags1); 996 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 997 mb();/* in order to force CPU ordering */ 998 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 999 task->task_done(task); 1000 spin_lock_irqsave(&pm8001_ha->lock, flags); 1001 } 1002 } 1003 1004 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 1005 } 1006 1007 /** 1008 * pm8001_I_T_nexus_reset() - reset the initiator/target connection 1009 * @dev: the device structure for the device to reset. 1010 * 1011 * Standard mandates link reset for ATA (type 0) and hard reset for 1012 * SSP (type 1), only for RECOVERY 1013 */ 1014 int pm8001_I_T_nexus_reset(struct domain_device *dev) 1015 { 1016 int rc = TMF_RESP_FUNC_FAILED; 1017 struct pm8001_device *pm8001_dev; 1018 struct pm8001_hba_info *pm8001_ha; 1019 struct sas_phy *phy; 1020 1021 if (!dev || !dev->lldd_dev) 1022 return -ENODEV; 1023 1024 pm8001_dev = dev->lldd_dev; 1025 pm8001_ha = pm8001_find_ha_by_dev(dev); 1026 phy = sas_get_local_phy(dev); 1027 1028 if (dev_is_sata(dev)) { 1029 if (scsi_is_sas_phy_local(phy)) { 1030 rc = 0; 1031 goto out; 1032 } 1033 rc = sas_phy_reset(phy, 1); 1034 if (rc) { 1035 pm8001_dbg(pm8001_ha, EH, 1036 "phy reset failed for device %x\n" 1037 "with rc %d\n", pm8001_dev->device_id, rc); 1038 rc = TMF_RESP_FUNC_FAILED; 1039 goto out; 1040 } 1041 msleep(2000); 1042 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1043 dev, 1, 0); 1044 if (rc) { 1045 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n" 1046 "with rc %d\n", pm8001_dev->device_id, rc); 1047 rc = TMF_RESP_FUNC_FAILED; 1048 } 1049 } else { 1050 rc = sas_phy_reset(phy, 1); 1051 msleep(2000); 1052 } 1053 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n", 1054 pm8001_dev->device_id, rc); 1055 out: 1056 sas_put_local_phy(phy); 1057 return rc; 1058 } 1059 1060 /* 1061 * This function handle the IT_NEXUS_XXX event or completion 1062 * status code for SSP/SATA/SMP I/O request. 1063 */ 1064 int pm8001_I_T_nexus_event_handler(struct domain_device *dev) 1065 { 1066 int rc = TMF_RESP_FUNC_FAILED; 1067 struct pm8001_device *pm8001_dev; 1068 struct pm8001_hba_info *pm8001_ha; 1069 struct sas_phy *phy; 1070 1071 if (!dev || !dev->lldd_dev) 1072 return -1; 1073 1074 pm8001_dev = dev->lldd_dev; 1075 pm8001_ha = pm8001_find_ha_by_dev(dev); 1076 1077 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n"); 1078 1079 phy = sas_get_local_phy(dev); 1080 1081 if (dev_is_sata(dev)) { 1082 DECLARE_COMPLETION_ONSTACK(completion_setstate); 1083 if (scsi_is_sas_phy_local(phy)) { 1084 rc = 0; 1085 goto out; 1086 } 1087 /* send internal ssp/sata/smp abort command to FW */ 1088 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1089 dev, 1, 0); 1090 msleep(100); 1091 1092 /* deregister the target device */ 1093 pm8001_dev_gone_notify(dev); 1094 msleep(200); 1095 1096 /*send phy reset to hard reset target */ 1097 rc = sas_phy_reset(phy, 1); 1098 msleep(2000); 1099 pm8001_dev->setds_completion = &completion_setstate; 1100 1101 wait_for_completion(&completion_setstate); 1102 } else { 1103 /* send internal ssp/sata/smp abort command to FW */ 1104 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1105 dev, 1, 0); 1106 msleep(100); 1107 1108 /* deregister the target device */ 1109 pm8001_dev_gone_notify(dev); 1110 msleep(200); 1111 1112 /*send phy reset to hard reset target */ 1113 rc = sas_phy_reset(phy, 1); 1114 msleep(2000); 1115 } 1116 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n", 1117 pm8001_dev->device_id, rc); 1118 out: 1119 sas_put_local_phy(phy); 1120 1121 return rc; 1122 } 1123 /* mandatory SAM-3, the task reset the specified LUN*/ 1124 int pm8001_lu_reset(struct domain_device *dev, u8 *lun) 1125 { 1126 int rc = TMF_RESP_FUNC_FAILED; 1127 struct pm8001_tmf_task tmf_task; 1128 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1129 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1130 DECLARE_COMPLETION_ONSTACK(completion_setstate); 1131 if (dev_is_sata(dev)) { 1132 struct sas_phy *phy = sas_get_local_phy(dev); 1133 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1134 dev, 1, 0); 1135 rc = sas_phy_reset(phy, 1); 1136 sas_put_local_phy(phy); 1137 pm8001_dev->setds_completion = &completion_setstate; 1138 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1139 pm8001_dev, DS_OPERATIONAL); 1140 wait_for_completion(&completion_setstate); 1141 } else { 1142 tmf_task.tmf = TMF_LU_RESET; 1143 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1144 } 1145 /* If failed, fall-through I_T_Nexus reset */ 1146 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n", 1147 pm8001_dev->device_id, rc); 1148 return rc; 1149 } 1150 1151 /* optional SAM-3 */ 1152 int pm8001_query_task(struct sas_task *task) 1153 { 1154 u32 tag = 0xdeadbeef; 1155 struct scsi_lun lun; 1156 struct pm8001_tmf_task tmf_task; 1157 int rc = TMF_RESP_FUNC_FAILED; 1158 if (unlikely(!task || !task->lldd_task || !task->dev)) 1159 return rc; 1160 1161 if (task->task_proto & SAS_PROTOCOL_SSP) { 1162 struct scsi_cmnd *cmnd = task->uldd_task; 1163 struct domain_device *dev = task->dev; 1164 struct pm8001_hba_info *pm8001_ha = 1165 pm8001_find_ha_by_dev(dev); 1166 1167 int_to_scsilun(cmnd->device->lun, &lun); 1168 rc = pm8001_find_tag(task, &tag); 1169 if (rc == 0) { 1170 rc = TMF_RESP_FUNC_FAILED; 1171 return rc; 1172 } 1173 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd); 1174 tmf_task.tmf = TMF_QUERY_TASK; 1175 tmf_task.tag_of_task_to_be_managed = tag; 1176 1177 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1178 switch (rc) { 1179 /* The task is still in Lun, release it then */ 1180 case TMF_RESP_FUNC_SUCC: 1181 pm8001_dbg(pm8001_ha, EH, 1182 "The task is still in Lun\n"); 1183 break; 1184 /* The task is not in Lun or failed, reset the phy */ 1185 case TMF_RESP_FUNC_FAILED: 1186 case TMF_RESP_FUNC_COMPLETE: 1187 pm8001_dbg(pm8001_ha, EH, 1188 "The task is not in Lun or failed, reset the phy\n"); 1189 break; 1190 } 1191 } 1192 pr_err("pm80xx: rc= %d\n", rc); 1193 return rc; 1194 } 1195 1196 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */ 1197 int pm8001_abort_task(struct sas_task *task) 1198 { 1199 unsigned long flags; 1200 u32 tag; 1201 struct domain_device *dev ; 1202 struct pm8001_hba_info *pm8001_ha; 1203 struct scsi_lun lun; 1204 struct pm8001_device *pm8001_dev; 1205 struct pm8001_tmf_task tmf_task; 1206 int rc = TMF_RESP_FUNC_FAILED, ret; 1207 u32 phy_id, port_id; 1208 struct sas_task_slow slow_task; 1209 1210 if (unlikely(!task || !task->lldd_task || !task->dev)) 1211 return TMF_RESP_FUNC_FAILED; 1212 1213 dev = task->dev; 1214 pm8001_dev = dev->lldd_dev; 1215 pm8001_ha = pm8001_find_ha_by_dev(dev); 1216 phy_id = pm8001_dev->attached_phy; 1217 1218 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { 1219 // If the controller is seeing fatal errors 1220 // abort task will not get a response from the controller 1221 return TMF_RESP_FUNC_FAILED; 1222 } 1223 1224 ret = pm8001_find_tag(task, &tag); 1225 if (ret == 0) { 1226 pm8001_info(pm8001_ha, "no tag for task:%p\n", task); 1227 return TMF_RESP_FUNC_FAILED; 1228 } 1229 spin_lock_irqsave(&task->task_state_lock, flags); 1230 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1231 spin_unlock_irqrestore(&task->task_state_lock, flags); 1232 return TMF_RESP_FUNC_COMPLETE; 1233 } 1234 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1235 if (task->slow_task == NULL) { 1236 init_completion(&slow_task.completion); 1237 task->slow_task = &slow_task; 1238 } 1239 spin_unlock_irqrestore(&task->task_state_lock, flags); 1240 if (task->task_proto & SAS_PROTOCOL_SSP) { 1241 struct scsi_cmnd *cmnd = task->uldd_task; 1242 int_to_scsilun(cmnd->device->lun, &lun); 1243 tmf_task.tmf = TMF_ABORT_TASK; 1244 tmf_task.tag_of_task_to_be_managed = tag; 1245 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1246 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1247 pm8001_dev->sas_device, 0, tag); 1248 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1249 task->task_proto & SAS_PROTOCOL_STP) { 1250 if (pm8001_ha->chip_id == chip_8006) { 1251 DECLARE_COMPLETION_ONSTACK(completion_reset); 1252 DECLARE_COMPLETION_ONSTACK(completion); 1253 struct pm8001_phy *phy = pm8001_ha->phy + phy_id; 1254 port_id = phy->port->port_id; 1255 1256 /* 1. Set Device state as Recovery */ 1257 pm8001_dev->setds_completion = &completion; 1258 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1259 pm8001_dev, DS_IN_RECOVERY); 1260 wait_for_completion(&completion); 1261 1262 /* 2. Send Phy Control Hard Reset */ 1263 reinit_completion(&completion); 1264 phy->port_reset_status = PORT_RESET_TMO; 1265 phy->reset_success = false; 1266 phy->enable_completion = &completion; 1267 phy->reset_completion = &completion_reset; 1268 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 1269 PHY_HARD_RESET); 1270 if (ret) { 1271 phy->enable_completion = NULL; 1272 phy->reset_completion = NULL; 1273 goto out; 1274 } 1275 1276 /* In the case of the reset timeout/fail we still 1277 * abort the command at the firmware. The assumption 1278 * here is that the drive is off doing something so 1279 * that it's not processing requests, and we want to 1280 * avoid getting a completion for this and either 1281 * leaking the task in libsas or losing the race and 1282 * getting a double free. 1283 */ 1284 pm8001_dbg(pm8001_ha, MSG, 1285 "Waiting for local phy ctl\n"); 1286 ret = wait_for_completion_timeout(&completion, 1287 PM8001_TASK_TIMEOUT * HZ); 1288 if (!ret || !phy->reset_success) { 1289 phy->enable_completion = NULL; 1290 phy->reset_completion = NULL; 1291 } else { 1292 /* 3. Wait for Port Reset complete or 1293 * Port reset TMO 1294 */ 1295 pm8001_dbg(pm8001_ha, MSG, 1296 "Waiting for Port reset\n"); 1297 ret = wait_for_completion_timeout( 1298 &completion_reset, 1299 PM8001_TASK_TIMEOUT * HZ); 1300 if (!ret) 1301 phy->reset_completion = NULL; 1302 WARN_ON(phy->port_reset_status == 1303 PORT_RESET_TMO); 1304 if (phy->port_reset_status == PORT_RESET_TMO) { 1305 pm8001_dev_gone_notify(dev); 1306 PM8001_CHIP_DISP->hw_event_ack_req( 1307 pm8001_ha, 0, 1308 0x07, /*HW_EVENT_PHY_DOWN ack*/ 1309 port_id, phy_id, 0, 0); 1310 goto out; 1311 } 1312 } 1313 1314 /* 1315 * 4. SATA Abort ALL 1316 * we wait for the task to be aborted so that the task 1317 * is removed from the ccb. on success the caller is 1318 * going to free the task. 1319 */ 1320 ret = pm8001_exec_internal_task_abort(pm8001_ha, 1321 pm8001_dev, pm8001_dev->sas_device, 1, tag); 1322 if (ret) 1323 goto out; 1324 ret = wait_for_completion_timeout( 1325 &task->slow_task->completion, 1326 PM8001_TASK_TIMEOUT * HZ); 1327 if (!ret) 1328 goto out; 1329 1330 /* 5. Set Device State as Operational */ 1331 reinit_completion(&completion); 1332 pm8001_dev->setds_completion = &completion; 1333 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1334 pm8001_dev, DS_OPERATIONAL); 1335 wait_for_completion(&completion); 1336 } else { 1337 rc = pm8001_exec_internal_task_abort(pm8001_ha, 1338 pm8001_dev, pm8001_dev->sas_device, 0, tag); 1339 } 1340 rc = TMF_RESP_FUNC_COMPLETE; 1341 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 1342 /* SMP */ 1343 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1344 pm8001_dev->sas_device, 0, tag); 1345 1346 } 1347 out: 1348 spin_lock_irqsave(&task->task_state_lock, flags); 1349 if (task->slow_task == &slow_task) 1350 task->slow_task = NULL; 1351 spin_unlock_irqrestore(&task->task_state_lock, flags); 1352 if (rc != TMF_RESP_FUNC_COMPLETE) 1353 pm8001_info(pm8001_ha, "rc= %d\n", rc); 1354 return rc; 1355 } 1356 1357 int pm8001_abort_task_set(struct domain_device *dev, u8 *lun) 1358 { 1359 struct pm8001_tmf_task tmf_task; 1360 1361 tmf_task.tmf = TMF_ABORT_TASK_SET; 1362 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1363 } 1364 1365 int pm8001_clear_aca(struct domain_device *dev, u8 *lun) 1366 { 1367 struct pm8001_tmf_task tmf_task; 1368 1369 tmf_task.tmf = TMF_CLEAR_ACA; 1370 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1371 } 1372 1373 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) 1374 { 1375 struct pm8001_tmf_task tmf_task; 1376 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1377 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1378 1379 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n", 1380 pm8001_dev->device_id); 1381 tmf_task.tmf = TMF_CLEAR_TASK_SET; 1382 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1383 } 1384 1385 void pm8001_port_formed(struct asd_sas_phy *sas_phy) 1386 { 1387 struct sas_ha_struct *sas_ha = sas_phy->ha; 1388 struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha; 1389 struct pm8001_phy *phy = sas_phy->lldd_phy; 1390 struct asd_sas_port *sas_port = sas_phy->port; 1391 struct pm8001_port *port = phy->port; 1392 1393 if (!sas_port) { 1394 pm8001_dbg(pm8001_ha, FAIL, "Received null port\n"); 1395 return; 1396 } 1397 sas_port->lldd_port = port; 1398 } 1399