1 /* 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver 3 * 4 * Copyright (c) 2008-2009 USI Co., Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * substantially similar to the "NO WARRANTY" disclaimer below 15 * ("Disclaimer") and any redistribution must be conditioned upon 16 * including a substantially similar Disclaimer requirement for further 17 * binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * Alternatively, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") version 2 as published by the Free 24 * Software Foundation. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 */ 40 41 #include <linux/slab.h> 42 #include "pm8001_sas.h" 43 44 /** 45 * pm8001_find_tag - from sas task to find out tag that belongs to this task 46 * @task: the task sent to the LLDD 47 * @tag: the found tag associated with the task 48 */ 49 static int pm8001_find_tag(struct sas_task *task, u32 *tag) 50 { 51 if (task->lldd_task) { 52 struct pm8001_ccb_info *ccb; 53 ccb = task->lldd_task; 54 *tag = ccb->ccb_tag; 55 return 1; 56 } 57 return 0; 58 } 59 60 /** 61 * pm8001_tag_free - free the no more needed tag 62 * @pm8001_ha: our hba struct 63 * @tag: the found tag associated with the task 64 */ 65 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) 66 { 67 void *bitmap = pm8001_ha->tags; 68 clear_bit(tag, bitmap); 69 } 70 71 /** 72 * pm8001_tag_alloc - allocate a empty tag for task used. 73 * @pm8001_ha: our hba struct 74 * @tag_out: the found empty tag . 75 */ 76 inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) 77 { 78 unsigned int tag; 79 void *bitmap = pm8001_ha->tags; 80 unsigned long flags; 81 82 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); 83 tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num); 84 if (tag >= pm8001_ha->tags_num) { 85 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); 86 return -SAS_QUEUE_FULL; 87 } 88 set_bit(tag, bitmap); 89 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); 90 *tag_out = tag; 91 return 0; 92 } 93 94 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) 95 { 96 int i; 97 for (i = 0; i < pm8001_ha->tags_num; ++i) 98 pm8001_tag_free(pm8001_ha, i); 99 } 100 101 /** 102 * pm8001_mem_alloc - allocate memory for pm8001. 103 * @pdev: pci device. 104 * @virt_addr: the allocated virtual address 105 * @pphys_addr_hi: the physical address high byte address. 106 * @pphys_addr_lo: the physical address low byte address. 107 * @mem_size: memory size. 108 */ 109 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, 110 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, 111 u32 *pphys_addr_lo, u32 mem_size, u32 align) 112 { 113 caddr_t mem_virt_alloc; 114 dma_addr_t mem_dma_handle; 115 u64 phys_align; 116 u64 align_offset = 0; 117 if (align) 118 align_offset = (dma_addr_t)align - 1; 119 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, 120 &mem_dma_handle, GFP_KERNEL); 121 if (!mem_virt_alloc) { 122 pr_err("pm80xx: memory allocation error\n"); 123 return -1; 124 } 125 *pphys_addr = mem_dma_handle; 126 phys_align = (*pphys_addr + align_offset) & ~align_offset; 127 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; 128 *pphys_addr_hi = upper_32_bits(phys_align); 129 *pphys_addr_lo = lower_32_bits(phys_align); 130 return 0; 131 } 132 /** 133 * pm8001_find_ha_by_dev - from domain device which come from sas layer to 134 * find out our hba struct. 135 * @dev: the domain device which from sas layer. 136 */ 137 static 138 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) 139 { 140 struct sas_ha_struct *sha = dev->port->ha; 141 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 142 return pm8001_ha; 143 } 144 145 /** 146 * pm8001_phy_control - this function should be registered to 147 * sas_domain_function_template to provide libsas used, note: this is just 148 * control the HBA phy rather than other expander phy if you want control 149 * other phy, you should use SMP command. 150 * @sas_phy: which phy in HBA phys. 151 * @func: the operation. 152 * @funcdata: always NULL. 153 */ 154 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 155 void *funcdata) 156 { 157 int rc = 0, phy_id = sas_phy->id; 158 struct pm8001_hba_info *pm8001_ha = NULL; 159 struct sas_phy_linkrates *rates; 160 struct sas_ha_struct *sas_ha; 161 struct pm8001_phy *phy; 162 DECLARE_COMPLETION_ONSTACK(completion); 163 unsigned long flags; 164 pm8001_ha = sas_phy->ha->lldd_ha; 165 phy = &pm8001_ha->phy[phy_id]; 166 pm8001_ha->phy[phy_id].enable_completion = &completion; 167 switch (func) { 168 case PHY_FUNC_SET_LINK_RATE: 169 rates = funcdata; 170 if (rates->minimum_linkrate) { 171 pm8001_ha->phy[phy_id].minimum_linkrate = 172 rates->minimum_linkrate; 173 } 174 if (rates->maximum_linkrate) { 175 pm8001_ha->phy[phy_id].maximum_linkrate = 176 rates->maximum_linkrate; 177 } 178 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 179 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 180 wait_for_completion(&completion); 181 } 182 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 183 PHY_LINK_RESET); 184 break; 185 case PHY_FUNC_HARD_RESET: 186 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 187 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 188 wait_for_completion(&completion); 189 } 190 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 191 PHY_HARD_RESET); 192 break; 193 case PHY_FUNC_LINK_RESET: 194 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 195 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 196 wait_for_completion(&completion); 197 } 198 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 199 PHY_LINK_RESET); 200 break; 201 case PHY_FUNC_RELEASE_SPINUP_HOLD: 202 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 203 PHY_LINK_RESET); 204 break; 205 case PHY_FUNC_DISABLE: 206 if (pm8001_ha->chip_id != chip_8001) { 207 if (pm8001_ha->phy[phy_id].phy_state == 208 PHY_STATE_LINK_UP_SPCV) { 209 sas_ha = pm8001_ha->sas; 210 sas_phy_disconnected(&phy->sas_phy); 211 sas_ha->notify_phy_event(&phy->sas_phy, 212 PHYE_LOSS_OF_SIGNAL); 213 phy->phy_attached = 0; 214 } 215 } else { 216 if (pm8001_ha->phy[phy_id].phy_state == 217 PHY_STATE_LINK_UP_SPC) { 218 sas_ha = pm8001_ha->sas; 219 sas_phy_disconnected(&phy->sas_phy); 220 sas_ha->notify_phy_event(&phy->sas_phy, 221 PHYE_LOSS_OF_SIGNAL); 222 phy->phy_attached = 0; 223 } 224 } 225 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); 226 break; 227 case PHY_FUNC_GET_EVENTS: 228 spin_lock_irqsave(&pm8001_ha->lock, flags); 229 if (pm8001_ha->chip_id == chip_8001) { 230 if (-1 == pm8001_bar4_shift(pm8001_ha, 231 (phy_id < 4) ? 0x30000 : 0x40000)) { 232 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 233 return -EINVAL; 234 } 235 } 236 { 237 struct sas_phy *phy = sas_phy->phy; 238 uint32_t *qp = (uint32_t *)(((char *) 239 pm8001_ha->io_mem[2].memvirtaddr) 240 + 0x1034 + (0x4000 * (phy_id & 3))); 241 242 phy->invalid_dword_count = qp[0]; 243 phy->running_disparity_error_count = qp[1]; 244 phy->loss_of_dword_sync_count = qp[3]; 245 phy->phy_reset_problem_count = qp[4]; 246 } 247 if (pm8001_ha->chip_id == chip_8001) 248 pm8001_bar4_shift(pm8001_ha, 0); 249 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 250 return 0; 251 default: 252 PM8001_DEVIO_DBG(pm8001_ha, 253 pm8001_printk("func 0x%x\n", func)); 254 rc = -EOPNOTSUPP; 255 } 256 msleep(300); 257 return rc; 258 } 259 260 /** 261 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start 262 * command to HBA. 263 * @shost: the scsi host data. 264 */ 265 void pm8001_scan_start(struct Scsi_Host *shost) 266 { 267 int i; 268 struct pm8001_hba_info *pm8001_ha; 269 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 270 pm8001_ha = sha->lldd_ha; 271 /* SAS_RE_INITIALIZATION not available in SPCv/ve */ 272 if (pm8001_ha->chip_id == chip_8001) 273 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); 274 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) 275 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); 276 } 277 278 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) 279 { 280 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 281 282 /* give the phy enabling interrupt event time to come in (1s 283 * is empirically about all it takes) */ 284 if (time < HZ) 285 return 0; 286 /* Wait for discovery to finish */ 287 sas_drain_work(ha); 288 return 1; 289 } 290 291 /** 292 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task 293 * @pm8001_ha: our hba card information 294 * @ccb: the ccb which attached to smp task 295 */ 296 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, 297 struct pm8001_ccb_info *ccb) 298 { 299 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); 300 } 301 302 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) 303 { 304 struct ata_queued_cmd *qc = task->uldd_task; 305 if (qc) { 306 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 307 qc->tf.command == ATA_CMD_FPDMA_READ || 308 qc->tf.command == ATA_CMD_FPDMA_RECV || 309 qc->tf.command == ATA_CMD_FPDMA_SEND || 310 qc->tf.command == ATA_CMD_NCQ_NON_DATA) { 311 *tag = qc->tag; 312 return 1; 313 } 314 } 315 return 0; 316 } 317 318 /** 319 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task 320 * @pm8001_ha: our hba card information 321 * @ccb: the ccb which attached to sata task 322 */ 323 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, 324 struct pm8001_ccb_info *ccb) 325 { 326 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); 327 } 328 329 /** 330 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data 331 * @pm8001_ha: our hba card information 332 * @ccb: the ccb which attached to TM 333 * @tmf: the task management IU 334 */ 335 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, 336 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) 337 { 338 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); 339 } 340 341 /** 342 * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task 343 * @pm8001_ha: our hba card information 344 * @ccb: the ccb which attached to ssp task 345 */ 346 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, 347 struct pm8001_ccb_info *ccb) 348 { 349 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); 350 } 351 352 /* Find the local port id that's attached to this device */ 353 static int sas_find_local_port_id(struct domain_device *dev) 354 { 355 struct domain_device *pdev = dev->parent; 356 357 /* Directly attached device */ 358 if (!pdev) 359 return dev->port->id; 360 while (pdev) { 361 struct domain_device *pdev_p = pdev->parent; 362 if (!pdev_p) 363 return pdev->port->id; 364 pdev = pdev->parent; 365 } 366 return 0; 367 } 368 369 /** 370 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. 371 * @task: the task to be execute. 372 * @num: if can_queue great than 1, the task can be queued up. for SMP task, 373 * we always execute one one time. 374 * @gfp_flags: gfp_flags. 375 * @is_tmf: if it is task management task. 376 * @tmf: the task management IU 377 */ 378 #define DEV_IS_GONE(pm8001_dev) \ 379 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) 380 static int pm8001_task_exec(struct sas_task *task, 381 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) 382 { 383 struct domain_device *dev = task->dev; 384 struct pm8001_hba_info *pm8001_ha; 385 struct pm8001_device *pm8001_dev; 386 struct pm8001_port *port = NULL; 387 struct sas_task *t = task; 388 struct pm8001_ccb_info *ccb; 389 u32 tag = 0xdeadbeef, rc = 0, n_elem = 0; 390 unsigned long flags = 0; 391 enum sas_protocol task_proto = t->task_proto; 392 393 if (!dev->port) { 394 struct task_status_struct *tsm = &t->task_status; 395 tsm->resp = SAS_TASK_UNDELIVERED; 396 tsm->stat = SAS_PHY_DOWN; 397 if (dev->dev_type != SAS_SATA_DEV) 398 t->task_done(t); 399 return 0; 400 } 401 pm8001_ha = pm8001_find_ha_by_dev(task->dev); 402 if (pm8001_ha->controller_fatal_error) { 403 struct task_status_struct *ts = &t->task_status; 404 405 ts->resp = SAS_TASK_UNDELIVERED; 406 t->task_done(t); 407 return 0; 408 } 409 PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); 410 spin_lock_irqsave(&pm8001_ha->lock, flags); 411 do { 412 dev = t->dev; 413 pm8001_dev = dev->lldd_dev; 414 port = &pm8001_ha->port[sas_find_local_port_id(dev)]; 415 if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) { 416 if (sas_protocol_ata(task_proto)) { 417 struct task_status_struct *ts = &t->task_status; 418 ts->resp = SAS_TASK_UNDELIVERED; 419 ts->stat = SAS_PHY_DOWN; 420 421 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 422 t->task_done(t); 423 spin_lock_irqsave(&pm8001_ha->lock, flags); 424 continue; 425 } else { 426 struct task_status_struct *ts = &t->task_status; 427 ts->resp = SAS_TASK_UNDELIVERED; 428 ts->stat = SAS_PHY_DOWN; 429 t->task_done(t); 430 continue; 431 } 432 } 433 rc = pm8001_tag_alloc(pm8001_ha, &tag); 434 if (rc) 435 goto err_out; 436 ccb = &pm8001_ha->ccb_info[tag]; 437 438 if (!sas_protocol_ata(task_proto)) { 439 if (t->num_scatter) { 440 n_elem = dma_map_sg(pm8001_ha->dev, 441 t->scatter, 442 t->num_scatter, 443 t->data_dir); 444 if (!n_elem) { 445 rc = -ENOMEM; 446 goto err_out_tag; 447 } 448 } 449 } else { 450 n_elem = t->num_scatter; 451 } 452 453 t->lldd_task = ccb; 454 ccb->n_elem = n_elem; 455 ccb->ccb_tag = tag; 456 ccb->task = t; 457 ccb->device = pm8001_dev; 458 switch (task_proto) { 459 case SAS_PROTOCOL_SMP: 460 rc = pm8001_task_prep_smp(pm8001_ha, ccb); 461 break; 462 case SAS_PROTOCOL_SSP: 463 if (is_tmf) 464 rc = pm8001_task_prep_ssp_tm(pm8001_ha, 465 ccb, tmf); 466 else 467 rc = pm8001_task_prep_ssp(pm8001_ha, ccb); 468 break; 469 case SAS_PROTOCOL_SATA: 470 case SAS_PROTOCOL_STP: 471 rc = pm8001_task_prep_ata(pm8001_ha, ccb); 472 break; 473 default: 474 dev_printk(KERN_ERR, pm8001_ha->dev, 475 "unknown sas_task proto: 0x%x\n", task_proto); 476 rc = -EINVAL; 477 break; 478 } 479 480 if (rc) { 481 PM8001_IO_DBG(pm8001_ha, 482 pm8001_printk("rc is %x\n", rc)); 483 goto err_out_tag; 484 } 485 /* TODO: select normal or high priority */ 486 spin_lock(&t->task_state_lock); 487 t->task_state_flags |= SAS_TASK_AT_INITIATOR; 488 spin_unlock(&t->task_state_lock); 489 pm8001_dev->running_req++; 490 } while (0); 491 rc = 0; 492 goto out_done; 493 494 err_out_tag: 495 pm8001_tag_free(pm8001_ha, tag); 496 err_out: 497 dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); 498 if (!sas_protocol_ata(task_proto)) 499 if (n_elem) 500 dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter, 501 t->data_dir); 502 out_done: 503 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 504 return rc; 505 } 506 507 /** 508 * pm8001_queue_command - register for upper layer used, all IO commands sent 509 * to HBA are from this interface. 510 * @task: the task to be execute. 511 * @gfp_flags: gfp_flags 512 */ 513 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) 514 { 515 return pm8001_task_exec(task, gfp_flags, 0, NULL); 516 } 517 518 /** 519 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. 520 * @pm8001_ha: our hba card information 521 * @ccb: the ccb which attached to ssp task 522 * @task: the task to be free. 523 * @ccb_idx: ccb index. 524 */ 525 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, 526 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) 527 { 528 if (!ccb->task) 529 return; 530 if (!sas_protocol_ata(task->task_proto)) 531 if (ccb->n_elem) 532 dma_unmap_sg(pm8001_ha->dev, task->scatter, 533 task->num_scatter, task->data_dir); 534 535 switch (task->task_proto) { 536 case SAS_PROTOCOL_SMP: 537 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, 538 DMA_FROM_DEVICE); 539 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, 540 DMA_TO_DEVICE); 541 break; 542 543 case SAS_PROTOCOL_SATA: 544 case SAS_PROTOCOL_STP: 545 case SAS_PROTOCOL_SSP: 546 default: 547 /* do nothing */ 548 break; 549 } 550 task->lldd_task = NULL; 551 ccb->task = NULL; 552 ccb->ccb_tag = 0xFFFFFFFF; 553 ccb->open_retry = 0; 554 pm8001_tag_free(pm8001_ha, ccb_idx); 555 } 556 557 /** 558 * pm8001_alloc_dev - find a empty pm8001_device 559 * @pm8001_ha: our hba card information 560 */ 561 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) 562 { 563 u32 dev; 564 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 565 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { 566 pm8001_ha->devices[dev].id = dev; 567 return &pm8001_ha->devices[dev]; 568 } 569 } 570 if (dev == PM8001_MAX_DEVICES) { 571 PM8001_FAIL_DBG(pm8001_ha, 572 pm8001_printk("max support %d devices, ignore ..\n", 573 PM8001_MAX_DEVICES)); 574 } 575 return NULL; 576 } 577 /** 578 * pm8001_find_dev - find a matching pm8001_device 579 * @pm8001_ha: our hba card information 580 */ 581 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, 582 u32 device_id) 583 { 584 u32 dev; 585 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 586 if (pm8001_ha->devices[dev].device_id == device_id) 587 return &pm8001_ha->devices[dev]; 588 } 589 if (dev == PM8001_MAX_DEVICES) { 590 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING " 591 "DEVICE FOUND !!!\n")); 592 } 593 return NULL; 594 } 595 596 static void pm8001_free_dev(struct pm8001_device *pm8001_dev) 597 { 598 u32 id = pm8001_dev->id; 599 memset(pm8001_dev, 0, sizeof(*pm8001_dev)); 600 pm8001_dev->id = id; 601 pm8001_dev->dev_type = SAS_PHY_UNUSED; 602 pm8001_dev->device_id = PM8001_MAX_DEVICES; 603 pm8001_dev->sas_device = NULL; 604 } 605 606 /** 607 * pm8001_dev_found_notify - libsas notify a device is found. 608 * @dev: the device structure which sas layer used. 609 * 610 * when libsas find a sas domain device, it should tell the LLDD that 611 * device is found, and then LLDD register this device to HBA firmware 612 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a 613 * device ID(according to device's sas address) and returned it to LLDD. From 614 * now on, we communicate with HBA FW with the device ID which HBA assigned 615 * rather than sas address. it is the necessary step for our HBA but it is 616 * the optional for other HBA driver. 617 */ 618 static int pm8001_dev_found_notify(struct domain_device *dev) 619 { 620 unsigned long flags = 0; 621 int res = 0; 622 struct pm8001_hba_info *pm8001_ha = NULL; 623 struct domain_device *parent_dev = dev->parent; 624 struct pm8001_device *pm8001_device; 625 DECLARE_COMPLETION_ONSTACK(completion); 626 u32 flag = 0; 627 pm8001_ha = pm8001_find_ha_by_dev(dev); 628 spin_lock_irqsave(&pm8001_ha->lock, flags); 629 630 pm8001_device = pm8001_alloc_dev(pm8001_ha); 631 if (!pm8001_device) { 632 res = -1; 633 goto found_out; 634 } 635 pm8001_device->sas_device = dev; 636 dev->lldd_dev = pm8001_device; 637 pm8001_device->dev_type = dev->dev_type; 638 pm8001_device->dcompletion = &completion; 639 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 640 int phy_id; 641 struct ex_phy *phy; 642 for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys; 643 phy_id++) { 644 phy = &parent_dev->ex_dev.ex_phy[phy_id]; 645 if (SAS_ADDR(phy->attached_sas_addr) 646 == SAS_ADDR(dev->sas_addr)) { 647 pm8001_device->attached_phy = phy_id; 648 break; 649 } 650 } 651 if (phy_id == parent_dev->ex_dev.num_phys) { 652 PM8001_FAIL_DBG(pm8001_ha, 653 pm8001_printk("Error: no attached dev:%016llx" 654 " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), 655 SAS_ADDR(parent_dev->sas_addr))); 656 res = -1; 657 } 658 } else { 659 if (dev->dev_type == SAS_SATA_DEV) { 660 pm8001_device->attached_phy = 661 dev->rphy->identify.phy_identifier; 662 flag = 1; /* directly sata */ 663 } 664 } /*register this device to HBA*/ 665 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); 666 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); 667 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 668 wait_for_completion(&completion); 669 if (dev->dev_type == SAS_END_DEVICE) 670 msleep(50); 671 pm8001_ha->flags = PM8001F_RUN_TIME; 672 return 0; 673 found_out: 674 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 675 return res; 676 } 677 678 int pm8001_dev_found(struct domain_device *dev) 679 { 680 return pm8001_dev_found_notify(dev); 681 } 682 683 void pm8001_task_done(struct sas_task *task) 684 { 685 if (!del_timer(&task->slow_task->timer)) 686 return; 687 complete(&task->slow_task->completion); 688 } 689 690 static void pm8001_tmf_timedout(struct timer_list *t) 691 { 692 struct sas_task_slow *slow = from_timer(slow, t, timer); 693 struct sas_task *task = slow->task; 694 695 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 696 complete(&task->slow_task->completion); 697 } 698 699 #define PM8001_TASK_TIMEOUT 20 700 /** 701 * pm8001_exec_internal_tmf_task - execute some task management commands. 702 * @dev: the wanted device. 703 * @tmf: which task management wanted to be take. 704 * @para_len: para_len. 705 * @parameter: ssp task parameter. 706 * 707 * when errors or exception happened, we may want to do something, for example 708 * abort the issued task which result in this execption, it is done by calling 709 * this function, note it is also with the task execute interface. 710 */ 711 static int pm8001_exec_internal_tmf_task(struct domain_device *dev, 712 void *parameter, u32 para_len, struct pm8001_tmf_task *tmf) 713 { 714 int res, retry; 715 struct sas_task *task = NULL; 716 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 717 struct pm8001_device *pm8001_dev = dev->lldd_dev; 718 DECLARE_COMPLETION_ONSTACK(completion_setstate); 719 720 for (retry = 0; retry < 3; retry++) { 721 task = sas_alloc_slow_task(GFP_KERNEL); 722 if (!task) 723 return -ENOMEM; 724 725 task->dev = dev; 726 task->task_proto = dev->tproto; 727 memcpy(&task->ssp_task, parameter, para_len); 728 task->task_done = pm8001_task_done; 729 task->slow_task->timer.function = pm8001_tmf_timedout; 730 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 731 add_timer(&task->slow_task->timer); 732 733 res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf); 734 735 if (res) { 736 del_timer(&task->slow_task->timer); 737 PM8001_FAIL_DBG(pm8001_ha, 738 pm8001_printk("Executing internal task " 739 "failed\n")); 740 goto ex_err; 741 } 742 wait_for_completion(&task->slow_task->completion); 743 if (pm8001_ha->chip_id != chip_8001) { 744 pm8001_dev->setds_completion = &completion_setstate; 745 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 746 pm8001_dev, 0x01); 747 wait_for_completion(&completion_setstate); 748 } 749 res = -TMF_RESP_FUNC_FAILED; 750 /* Even TMF timed out, return direct. */ 751 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 752 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 753 PM8001_FAIL_DBG(pm8001_ha, 754 pm8001_printk("TMF task[%x]timeout.\n", 755 tmf->tmf)); 756 goto ex_err; 757 } 758 } 759 760 if (task->task_status.resp == SAS_TASK_COMPLETE && 761 task->task_status.stat == SAM_STAT_GOOD) { 762 res = TMF_RESP_FUNC_COMPLETE; 763 break; 764 } 765 766 if (task->task_status.resp == SAS_TASK_COMPLETE && 767 task->task_status.stat == SAS_DATA_UNDERRUN) { 768 /* no error, but return the number of bytes of 769 * underrun */ 770 res = task->task_status.residual; 771 break; 772 } 773 774 if (task->task_status.resp == SAS_TASK_COMPLETE && 775 task->task_status.stat == SAS_DATA_OVERRUN) { 776 PM8001_FAIL_DBG(pm8001_ha, 777 pm8001_printk("Blocked task error.\n")); 778 res = -EMSGSIZE; 779 break; 780 } else { 781 PM8001_EH_DBG(pm8001_ha, 782 pm8001_printk(" Task to dev %016llx response:" 783 "0x%x status 0x%x\n", 784 SAS_ADDR(dev->sas_addr), 785 task->task_status.resp, 786 task->task_status.stat)); 787 sas_free_task(task); 788 task = NULL; 789 } 790 } 791 ex_err: 792 BUG_ON(retry == 3 && task != NULL); 793 sas_free_task(task); 794 return res; 795 } 796 797 static int 798 pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, 799 struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag, 800 u32 task_tag) 801 { 802 int res, retry; 803 u32 ccb_tag; 804 struct pm8001_ccb_info *ccb; 805 struct sas_task *task = NULL; 806 807 for (retry = 0; retry < 3; retry++) { 808 task = sas_alloc_slow_task(GFP_KERNEL); 809 if (!task) 810 return -ENOMEM; 811 812 task->dev = dev; 813 task->task_proto = dev->tproto; 814 task->task_done = pm8001_task_done; 815 task->slow_task->timer.function = pm8001_tmf_timedout; 816 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; 817 add_timer(&task->slow_task->timer); 818 819 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 820 if (res) 821 return res; 822 ccb = &pm8001_ha->ccb_info[ccb_tag]; 823 ccb->device = pm8001_dev; 824 ccb->ccb_tag = ccb_tag; 825 ccb->task = task; 826 ccb->n_elem = 0; 827 828 res = PM8001_CHIP_DISP->task_abort(pm8001_ha, 829 pm8001_dev, flag, task_tag, ccb_tag); 830 831 if (res) { 832 del_timer(&task->slow_task->timer); 833 PM8001_FAIL_DBG(pm8001_ha, 834 pm8001_printk("Executing internal task " 835 "failed\n")); 836 goto ex_err; 837 } 838 wait_for_completion(&task->slow_task->completion); 839 res = TMF_RESP_FUNC_FAILED; 840 /* Even TMF timed out, return direct. */ 841 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 842 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 843 PM8001_FAIL_DBG(pm8001_ha, 844 pm8001_printk("TMF task timeout.\n")); 845 goto ex_err; 846 } 847 } 848 849 if (task->task_status.resp == SAS_TASK_COMPLETE && 850 task->task_status.stat == SAM_STAT_GOOD) { 851 res = TMF_RESP_FUNC_COMPLETE; 852 break; 853 854 } else { 855 PM8001_EH_DBG(pm8001_ha, 856 pm8001_printk(" Task to dev %016llx response: " 857 "0x%x status 0x%x\n", 858 SAS_ADDR(dev->sas_addr), 859 task->task_status.resp, 860 task->task_status.stat)); 861 sas_free_task(task); 862 task = NULL; 863 } 864 } 865 ex_err: 866 BUG_ON(retry == 3 && task != NULL); 867 sas_free_task(task); 868 return res; 869 } 870 871 /** 872 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" 873 * @dev: the device structure which sas layer used. 874 */ 875 static void pm8001_dev_gone_notify(struct domain_device *dev) 876 { 877 unsigned long flags = 0; 878 struct pm8001_hba_info *pm8001_ha; 879 struct pm8001_device *pm8001_dev = dev->lldd_dev; 880 881 pm8001_ha = pm8001_find_ha_by_dev(dev); 882 spin_lock_irqsave(&pm8001_ha->lock, flags); 883 if (pm8001_dev) { 884 u32 device_id = pm8001_dev->device_id; 885 886 PM8001_DISC_DBG(pm8001_ha, 887 pm8001_printk("found dev[%d:%x] is gone.\n", 888 pm8001_dev->device_id, pm8001_dev->dev_type)); 889 if (pm8001_dev->running_req) { 890 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 891 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 892 dev, 1, 0); 893 while (pm8001_dev->running_req) 894 msleep(20); 895 spin_lock_irqsave(&pm8001_ha->lock, flags); 896 } 897 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); 898 pm8001_free_dev(pm8001_dev); 899 } else { 900 PM8001_DISC_DBG(pm8001_ha, 901 pm8001_printk("Found dev has gone.\n")); 902 } 903 dev->lldd_dev = NULL; 904 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 905 } 906 907 void pm8001_dev_gone(struct domain_device *dev) 908 { 909 pm8001_dev_gone_notify(dev); 910 } 911 912 static int pm8001_issue_ssp_tmf(struct domain_device *dev, 913 u8 *lun, struct pm8001_tmf_task *tmf) 914 { 915 struct sas_ssp_task ssp_task; 916 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 917 return TMF_RESP_FUNC_ESUPP; 918 919 strncpy((u8 *)&ssp_task.LUN, lun, 8); 920 return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task), 921 tmf); 922 } 923 924 /* retry commands by ha, by task and/or by device */ 925 void pm8001_open_reject_retry( 926 struct pm8001_hba_info *pm8001_ha, 927 struct sas_task *task_to_close, 928 struct pm8001_device *device_to_close) 929 { 930 int i; 931 unsigned long flags; 932 933 if (pm8001_ha == NULL) 934 return; 935 936 spin_lock_irqsave(&pm8001_ha->lock, flags); 937 938 for (i = 0; i < PM8001_MAX_CCB; i++) { 939 struct sas_task *task; 940 struct task_status_struct *ts; 941 struct pm8001_device *pm8001_dev; 942 unsigned long flags1; 943 u32 tag; 944 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; 945 946 pm8001_dev = ccb->device; 947 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) 948 continue; 949 if (!device_to_close) { 950 uintptr_t d = (uintptr_t)pm8001_dev 951 - (uintptr_t)&pm8001_ha->devices; 952 if (((d % sizeof(*pm8001_dev)) != 0) 953 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES)) 954 continue; 955 } else if (pm8001_dev != device_to_close) 956 continue; 957 tag = ccb->ccb_tag; 958 if (!tag || (tag == 0xFFFFFFFF)) 959 continue; 960 task = ccb->task; 961 if (!task || !task->task_done) 962 continue; 963 if (task_to_close && (task != task_to_close)) 964 continue; 965 ts = &task->task_status; 966 ts->resp = SAS_TASK_COMPLETE; 967 /* Force the midlayer to retry */ 968 ts->stat = SAS_OPEN_REJECT; 969 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 970 if (pm8001_dev) 971 pm8001_dev->running_req--; 972 spin_lock_irqsave(&task->task_state_lock, flags1); 973 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 974 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 975 task->task_state_flags |= SAS_TASK_STATE_DONE; 976 if (unlikely((task->task_state_flags 977 & SAS_TASK_STATE_ABORTED))) { 978 spin_unlock_irqrestore(&task->task_state_lock, 979 flags1); 980 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 981 } else { 982 spin_unlock_irqrestore(&task->task_state_lock, 983 flags1); 984 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 985 mb();/* in order to force CPU ordering */ 986 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 987 task->task_done(task); 988 spin_lock_irqsave(&pm8001_ha->lock, flags); 989 } 990 } 991 992 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 993 } 994 995 /** 996 * Standard mandates link reset for ATA (type 0) and hard reset for 997 * SSP (type 1) , only for RECOVERY 998 */ 999 int pm8001_I_T_nexus_reset(struct domain_device *dev) 1000 { 1001 int rc = TMF_RESP_FUNC_FAILED; 1002 struct pm8001_device *pm8001_dev; 1003 struct pm8001_hba_info *pm8001_ha; 1004 struct sas_phy *phy; 1005 1006 if (!dev || !dev->lldd_dev) 1007 return -ENODEV; 1008 1009 pm8001_dev = dev->lldd_dev; 1010 pm8001_ha = pm8001_find_ha_by_dev(dev); 1011 phy = sas_get_local_phy(dev); 1012 1013 if (dev_is_sata(dev)) { 1014 if (scsi_is_sas_phy_local(phy)) { 1015 rc = 0; 1016 goto out; 1017 } 1018 rc = sas_phy_reset(phy, 1); 1019 if (rc) { 1020 PM8001_EH_DBG(pm8001_ha, 1021 pm8001_printk("phy reset failed for device %x\n" 1022 "with rc %d\n", pm8001_dev->device_id, rc)); 1023 rc = TMF_RESP_FUNC_FAILED; 1024 goto out; 1025 } 1026 msleep(2000); 1027 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1028 dev, 1, 0); 1029 if (rc) { 1030 PM8001_EH_DBG(pm8001_ha, 1031 pm8001_printk("task abort failed %x\n" 1032 "with rc %d\n", pm8001_dev->device_id, rc)); 1033 rc = TMF_RESP_FUNC_FAILED; 1034 } 1035 } else { 1036 rc = sas_phy_reset(phy, 1); 1037 msleep(2000); 1038 } 1039 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", 1040 pm8001_dev->device_id, rc)); 1041 out: 1042 sas_put_local_phy(phy); 1043 return rc; 1044 } 1045 1046 /* 1047 * This function handle the IT_NEXUS_XXX event or completion 1048 * status code for SSP/SATA/SMP I/O request. 1049 */ 1050 int pm8001_I_T_nexus_event_handler(struct domain_device *dev) 1051 { 1052 int rc = TMF_RESP_FUNC_FAILED; 1053 struct pm8001_device *pm8001_dev; 1054 struct pm8001_hba_info *pm8001_ha; 1055 struct sas_phy *phy; 1056 1057 if (!dev || !dev->lldd_dev) 1058 return -1; 1059 1060 pm8001_dev = dev->lldd_dev; 1061 pm8001_ha = pm8001_find_ha_by_dev(dev); 1062 1063 PM8001_EH_DBG(pm8001_ha, 1064 pm8001_printk("I_T_Nexus handler invoked !!")); 1065 1066 phy = sas_get_local_phy(dev); 1067 1068 if (dev_is_sata(dev)) { 1069 DECLARE_COMPLETION_ONSTACK(completion_setstate); 1070 if (scsi_is_sas_phy_local(phy)) { 1071 rc = 0; 1072 goto out; 1073 } 1074 /* send internal ssp/sata/smp abort command to FW */ 1075 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1076 dev, 1, 0); 1077 msleep(100); 1078 1079 /* deregister the target device */ 1080 pm8001_dev_gone_notify(dev); 1081 msleep(200); 1082 1083 /*send phy reset to hard reset target */ 1084 rc = sas_phy_reset(phy, 1); 1085 msleep(2000); 1086 pm8001_dev->setds_completion = &completion_setstate; 1087 1088 wait_for_completion(&completion_setstate); 1089 } else { 1090 /* send internal ssp/sata/smp abort command to FW */ 1091 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1092 dev, 1, 0); 1093 msleep(100); 1094 1095 /* deregister the target device */ 1096 pm8001_dev_gone_notify(dev); 1097 msleep(200); 1098 1099 /*send phy reset to hard reset target */ 1100 rc = sas_phy_reset(phy, 1); 1101 msleep(2000); 1102 } 1103 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", 1104 pm8001_dev->device_id, rc)); 1105 out: 1106 sas_put_local_phy(phy); 1107 1108 return rc; 1109 } 1110 /* mandatory SAM-3, the task reset the specified LUN*/ 1111 int pm8001_lu_reset(struct domain_device *dev, u8 *lun) 1112 { 1113 int rc = TMF_RESP_FUNC_FAILED; 1114 struct pm8001_tmf_task tmf_task; 1115 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1116 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1117 DECLARE_COMPLETION_ONSTACK(completion_setstate); 1118 if (dev_is_sata(dev)) { 1119 struct sas_phy *phy = sas_get_local_phy(dev); 1120 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1121 dev, 1, 0); 1122 rc = sas_phy_reset(phy, 1); 1123 sas_put_local_phy(phy); 1124 pm8001_dev->setds_completion = &completion_setstate; 1125 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1126 pm8001_dev, 0x01); 1127 wait_for_completion(&completion_setstate); 1128 } else { 1129 tmf_task.tmf = TMF_LU_RESET; 1130 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1131 } 1132 /* If failed, fall-through I_T_Nexus reset */ 1133 PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n", 1134 pm8001_dev->device_id, rc)); 1135 return rc; 1136 } 1137 1138 /* optional SAM-3 */ 1139 int pm8001_query_task(struct sas_task *task) 1140 { 1141 u32 tag = 0xdeadbeef; 1142 int i = 0; 1143 struct scsi_lun lun; 1144 struct pm8001_tmf_task tmf_task; 1145 int rc = TMF_RESP_FUNC_FAILED; 1146 if (unlikely(!task || !task->lldd_task || !task->dev)) 1147 return rc; 1148 1149 if (task->task_proto & SAS_PROTOCOL_SSP) { 1150 struct scsi_cmnd *cmnd = task->uldd_task; 1151 struct domain_device *dev = task->dev; 1152 struct pm8001_hba_info *pm8001_ha = 1153 pm8001_find_ha_by_dev(dev); 1154 1155 int_to_scsilun(cmnd->device->lun, &lun); 1156 rc = pm8001_find_tag(task, &tag); 1157 if (rc == 0) { 1158 rc = TMF_RESP_FUNC_FAILED; 1159 return rc; 1160 } 1161 PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:[")); 1162 for (i = 0; i < 16; i++) 1163 printk(KERN_INFO "%02x ", cmnd->cmnd[i]); 1164 printk(KERN_INFO "]\n"); 1165 tmf_task.tmf = TMF_QUERY_TASK; 1166 tmf_task.tag_of_task_to_be_managed = tag; 1167 1168 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1169 switch (rc) { 1170 /* The task is still in Lun, release it then */ 1171 case TMF_RESP_FUNC_SUCC: 1172 PM8001_EH_DBG(pm8001_ha, 1173 pm8001_printk("The task is still in Lun\n")); 1174 break; 1175 /* The task is not in Lun or failed, reset the phy */ 1176 case TMF_RESP_FUNC_FAILED: 1177 case TMF_RESP_FUNC_COMPLETE: 1178 PM8001_EH_DBG(pm8001_ha, 1179 pm8001_printk("The task is not in Lun or failed," 1180 " reset the phy\n")); 1181 break; 1182 } 1183 } 1184 pr_err("pm80xx: rc= %d\n", rc); 1185 return rc; 1186 } 1187 1188 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */ 1189 int pm8001_abort_task(struct sas_task *task) 1190 { 1191 unsigned long flags; 1192 u32 tag; 1193 struct domain_device *dev ; 1194 struct pm8001_hba_info *pm8001_ha; 1195 struct scsi_lun lun; 1196 struct pm8001_device *pm8001_dev; 1197 struct pm8001_tmf_task tmf_task; 1198 int rc = TMF_RESP_FUNC_FAILED, ret; 1199 u32 phy_id; 1200 struct sas_task_slow slow_task; 1201 if (unlikely(!task || !task->lldd_task || !task->dev)) 1202 return TMF_RESP_FUNC_FAILED; 1203 dev = task->dev; 1204 pm8001_dev = dev->lldd_dev; 1205 pm8001_ha = pm8001_find_ha_by_dev(dev); 1206 phy_id = pm8001_dev->attached_phy; 1207 ret = pm8001_find_tag(task, &tag); 1208 if (ret == 0) { 1209 pm8001_printk("no tag for task:%p\n", task); 1210 return TMF_RESP_FUNC_FAILED; 1211 } 1212 spin_lock_irqsave(&task->task_state_lock, flags); 1213 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1214 spin_unlock_irqrestore(&task->task_state_lock, flags); 1215 return TMF_RESP_FUNC_COMPLETE; 1216 } 1217 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1218 if (task->slow_task == NULL) { 1219 init_completion(&slow_task.completion); 1220 task->slow_task = &slow_task; 1221 } 1222 spin_unlock_irqrestore(&task->task_state_lock, flags); 1223 if (task->task_proto & SAS_PROTOCOL_SSP) { 1224 struct scsi_cmnd *cmnd = task->uldd_task; 1225 int_to_scsilun(cmnd->device->lun, &lun); 1226 tmf_task.tmf = TMF_ABORT_TASK; 1227 tmf_task.tag_of_task_to_be_managed = tag; 1228 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1229 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1230 pm8001_dev->sas_device, 0, tag); 1231 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1232 task->task_proto & SAS_PROTOCOL_STP) { 1233 if (pm8001_ha->chip_id == chip_8006) { 1234 DECLARE_COMPLETION_ONSTACK(completion_reset); 1235 DECLARE_COMPLETION_ONSTACK(completion); 1236 struct pm8001_phy *phy = pm8001_ha->phy + phy_id; 1237 1238 /* 1. Set Device state as Recovery */ 1239 pm8001_dev->setds_completion = &completion; 1240 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1241 pm8001_dev, 0x03); 1242 wait_for_completion(&completion); 1243 1244 /* 2. Send Phy Control Hard Reset */ 1245 reinit_completion(&completion); 1246 phy->port_reset_status = PORT_RESET_TMO; 1247 phy->reset_success = false; 1248 phy->enable_completion = &completion; 1249 phy->reset_completion = &completion_reset; 1250 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 1251 PHY_HARD_RESET); 1252 if (ret) { 1253 phy->enable_completion = NULL; 1254 phy->reset_completion = NULL; 1255 goto out; 1256 } 1257 1258 /* In the case of the reset timeout/fail we still 1259 * abort the command at the firmware. The assumption 1260 * here is that the drive is off doing something so 1261 * that it's not processing requests, and we want to 1262 * avoid getting a completion for this and either 1263 * leaking the task in libsas or losing the race and 1264 * getting a double free. 1265 */ 1266 PM8001_MSG_DBG(pm8001_ha, 1267 pm8001_printk("Waiting for local phy ctl\n")); 1268 ret = wait_for_completion_timeout(&completion, 1269 PM8001_TASK_TIMEOUT * HZ); 1270 if (!ret || !phy->reset_success) { 1271 phy->enable_completion = NULL; 1272 phy->reset_completion = NULL; 1273 } else { 1274 /* 3. Wait for Port Reset complete or 1275 * Port reset TMO 1276 */ 1277 PM8001_MSG_DBG(pm8001_ha, 1278 pm8001_printk("Waiting for Port reset\n")); 1279 ret = wait_for_completion_timeout( 1280 &completion_reset, 1281 PM8001_TASK_TIMEOUT * HZ); 1282 if (!ret) 1283 phy->reset_completion = NULL; 1284 WARN_ON(phy->port_reset_status == 1285 PORT_RESET_TMO); 1286 if (phy->port_reset_status == PORT_RESET_TMO) { 1287 pm8001_dev_gone_notify(dev); 1288 goto out; 1289 } 1290 } 1291 1292 /* 1293 * 4. SATA Abort ALL 1294 * we wait for the task to be aborted so that the task 1295 * is removed from the ccb. on success the caller is 1296 * going to free the task. 1297 */ 1298 ret = pm8001_exec_internal_task_abort(pm8001_ha, 1299 pm8001_dev, pm8001_dev->sas_device, 1, tag); 1300 if (ret) 1301 goto out; 1302 ret = wait_for_completion_timeout( 1303 &task->slow_task->completion, 1304 PM8001_TASK_TIMEOUT * HZ); 1305 if (!ret) 1306 goto out; 1307 1308 /* 5. Set Device State as Operational */ 1309 reinit_completion(&completion); 1310 pm8001_dev->setds_completion = &completion; 1311 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1312 pm8001_dev, 0x01); 1313 wait_for_completion(&completion); 1314 } else { 1315 rc = pm8001_exec_internal_task_abort(pm8001_ha, 1316 pm8001_dev, pm8001_dev->sas_device, 0, tag); 1317 } 1318 rc = TMF_RESP_FUNC_COMPLETE; 1319 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 1320 /* SMP */ 1321 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1322 pm8001_dev->sas_device, 0, tag); 1323 1324 } 1325 out: 1326 spin_lock_irqsave(&task->task_state_lock, flags); 1327 if (task->slow_task == &slow_task) 1328 task->slow_task = NULL; 1329 spin_unlock_irqrestore(&task->task_state_lock, flags); 1330 if (rc != TMF_RESP_FUNC_COMPLETE) 1331 pm8001_printk("rc= %d\n", rc); 1332 return rc; 1333 } 1334 1335 int pm8001_abort_task_set(struct domain_device *dev, u8 *lun) 1336 { 1337 struct pm8001_tmf_task tmf_task; 1338 1339 tmf_task.tmf = TMF_ABORT_TASK_SET; 1340 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1341 } 1342 1343 int pm8001_clear_aca(struct domain_device *dev, u8 *lun) 1344 { 1345 struct pm8001_tmf_task tmf_task; 1346 1347 tmf_task.tmf = TMF_CLEAR_ACA; 1348 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1349 } 1350 1351 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) 1352 { 1353 struct pm8001_tmf_task tmf_task; 1354 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1355 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1356 1357 PM8001_EH_DBG(pm8001_ha, 1358 pm8001_printk("I_T_L_Q clear task set[%x]\n", 1359 pm8001_dev->device_id)); 1360 tmf_task.tmf = TMF_CLEAR_TASK_SET; 1361 return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1362 } 1363 1364