1 /* 2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver 3 * 4 * Copyright (c) 2008-2009 USI Co., Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 14 * substantially similar to the "NO WARRANTY" disclaimer below 15 * ("Disclaimer") and any redistribution must be conditioned upon 16 * including a substantially similar Disclaimer requirement for further 17 * binary redistribution. 18 * 3. Neither the names of the above-listed copyright holders nor the names 19 * of any contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * Alternatively, this software may be distributed under the terms of the 23 * GNU General Public License ("GPL") version 2 as published by the Free 24 * Software Foundation. 25 * 26 * NO WARRANTY 27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGES. 38 * 39 */ 40 41 #include <linux/slab.h> 42 #include "pm8001_sas.h" 43 44 /** 45 * pm8001_find_tag - from sas task to find out tag that belongs to this task 46 * @task: the task sent to the LLDD 47 * @tag: the found tag associated with the task 48 */ 49 static int pm8001_find_tag(struct sas_task *task, u32 *tag) 50 { 51 if (task->lldd_task) { 52 struct pm8001_ccb_info *ccb; 53 ccb = task->lldd_task; 54 *tag = ccb->ccb_tag; 55 return 1; 56 } 57 return 0; 58 } 59 60 /** 61 * pm8001_tag_free - free the no more needed tag 62 * @pm8001_ha: our hba struct 63 * @tag: the found tag associated with the task 64 */ 65 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) 66 { 67 void *bitmap = pm8001_ha->tags; 68 clear_bit(tag, bitmap); 69 } 70 71 /** 72 * pm8001_tag_alloc - allocate a empty tag for task used. 73 * @pm8001_ha: our hba struct 74 * @tag_out: the found empty tag . 75 */ 76 inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) 77 { 78 unsigned int tag; 79 void *bitmap = pm8001_ha->tags; 80 unsigned long flags; 81 82 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); 83 tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num); 84 if (tag >= pm8001_ha->tags_num) { 85 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); 86 return -SAS_QUEUE_FULL; 87 } 88 set_bit(tag, bitmap); 89 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); 90 *tag_out = tag; 91 return 0; 92 } 93 94 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) 95 { 96 int i; 97 for (i = 0; i < pm8001_ha->tags_num; ++i) 98 pm8001_tag_free(pm8001_ha, i); 99 } 100 101 /** 102 * pm8001_mem_alloc - allocate memory for pm8001. 103 * @pdev: pci device. 104 * @virt_addr: the allocated virtual address 105 * @pphys_addr_hi: the physical address high byte address. 106 * @pphys_addr_lo: the physical address low byte address. 107 * @mem_size: memory size. 108 */ 109 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, 110 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, 111 u32 *pphys_addr_lo, u32 mem_size, u32 align) 112 { 113 caddr_t mem_virt_alloc; 114 dma_addr_t mem_dma_handle; 115 u64 phys_align; 116 u64 align_offset = 0; 117 if (align) 118 align_offset = (dma_addr_t)align - 1; 119 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, 120 &mem_dma_handle, GFP_KERNEL); 121 if (!mem_virt_alloc) { 122 pm8001_printk("memory allocation error\n"); 123 return -1; 124 } 125 *pphys_addr = mem_dma_handle; 126 phys_align = (*pphys_addr + align_offset) & ~align_offset; 127 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr; 128 *pphys_addr_hi = upper_32_bits(phys_align); 129 *pphys_addr_lo = lower_32_bits(phys_align); 130 return 0; 131 } 132 /** 133 * pm8001_find_ha_by_dev - from domain device which come from sas layer to 134 * find out our hba struct. 135 * @dev: the domain device which from sas layer. 136 */ 137 static 138 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev) 139 { 140 struct sas_ha_struct *sha = dev->port->ha; 141 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 142 return pm8001_ha; 143 } 144 145 /** 146 * pm8001_phy_control - this function should be registered to 147 * sas_domain_function_template to provide libsas used, note: this is just 148 * control the HBA phy rather than other expander phy if you want control 149 * other phy, you should use SMP command. 150 * @sas_phy: which phy in HBA phys. 151 * @func: the operation. 152 * @funcdata: always NULL. 153 */ 154 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 155 void *funcdata) 156 { 157 int rc = 0, phy_id = sas_phy->id; 158 struct pm8001_hba_info *pm8001_ha = NULL; 159 struct sas_phy_linkrates *rates; 160 struct sas_ha_struct *sas_ha; 161 struct pm8001_phy *phy; 162 DECLARE_COMPLETION_ONSTACK(completion); 163 unsigned long flags; 164 pm8001_ha = sas_phy->ha->lldd_ha; 165 phy = &pm8001_ha->phy[phy_id]; 166 pm8001_ha->phy[phy_id].enable_completion = &completion; 167 switch (func) { 168 case PHY_FUNC_SET_LINK_RATE: 169 rates = funcdata; 170 if (rates->minimum_linkrate) { 171 pm8001_ha->phy[phy_id].minimum_linkrate = 172 rates->minimum_linkrate; 173 } 174 if (rates->maximum_linkrate) { 175 pm8001_ha->phy[phy_id].maximum_linkrate = 176 rates->maximum_linkrate; 177 } 178 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 179 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 180 wait_for_completion(&completion); 181 } 182 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 183 PHY_LINK_RESET); 184 break; 185 case PHY_FUNC_HARD_RESET: 186 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 187 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 188 wait_for_completion(&completion); 189 } 190 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 191 PHY_HARD_RESET); 192 break; 193 case PHY_FUNC_LINK_RESET: 194 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) { 195 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id); 196 wait_for_completion(&completion); 197 } 198 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 199 PHY_LINK_RESET); 200 break; 201 case PHY_FUNC_RELEASE_SPINUP_HOLD: 202 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 203 PHY_LINK_RESET); 204 break; 205 case PHY_FUNC_DISABLE: 206 if (pm8001_ha->chip_id != chip_8001) { 207 if (pm8001_ha->phy[phy_id].phy_state == 208 PHY_STATE_LINK_UP_SPCV) { 209 sas_ha = pm8001_ha->sas; 210 sas_phy_disconnected(&phy->sas_phy); 211 sas_ha->notify_phy_event(&phy->sas_phy, 212 PHYE_LOSS_OF_SIGNAL); 213 phy->phy_attached = 0; 214 } 215 } else { 216 if (pm8001_ha->phy[phy_id].phy_state == 217 PHY_STATE_LINK_UP_SPC) { 218 sas_ha = pm8001_ha->sas; 219 sas_phy_disconnected(&phy->sas_phy); 220 sas_ha->notify_phy_event(&phy->sas_phy, 221 PHYE_LOSS_OF_SIGNAL); 222 phy->phy_attached = 0; 223 } 224 } 225 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); 226 break; 227 case PHY_FUNC_GET_EVENTS: 228 spin_lock_irqsave(&pm8001_ha->lock, flags); 229 if (pm8001_ha->chip_id == chip_8001) { 230 if (-1 == pm8001_bar4_shift(pm8001_ha, 231 (phy_id < 4) ? 0x30000 : 0x40000)) { 232 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 233 return -EINVAL; 234 } 235 } 236 { 237 struct sas_phy *phy = sas_phy->phy; 238 uint32_t *qp = (uint32_t *)(((char *) 239 pm8001_ha->io_mem[2].memvirtaddr) 240 + 0x1034 + (0x4000 * (phy_id & 3))); 241 242 phy->invalid_dword_count = qp[0]; 243 phy->running_disparity_error_count = qp[1]; 244 phy->loss_of_dword_sync_count = qp[3]; 245 phy->phy_reset_problem_count = qp[4]; 246 } 247 if (pm8001_ha->chip_id == chip_8001) 248 pm8001_bar4_shift(pm8001_ha, 0); 249 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 250 return 0; 251 default: 252 rc = -EOPNOTSUPP; 253 } 254 msleep(300); 255 return rc; 256 } 257 258 /** 259 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start 260 * command to HBA. 261 * @shost: the scsi host data. 262 */ 263 void pm8001_scan_start(struct Scsi_Host *shost) 264 { 265 int i; 266 struct pm8001_hba_info *pm8001_ha; 267 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 268 pm8001_ha = sha->lldd_ha; 269 /* SAS_RE_INITIALIZATION not available in SPCv/ve */ 270 if (pm8001_ha->chip_id == chip_8001) 271 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); 272 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) 273 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); 274 } 275 276 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) 277 { 278 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 279 280 /* give the phy enabling interrupt event time to come in (1s 281 * is empirically about all it takes) */ 282 if (time < HZ) 283 return 0; 284 /* Wait for discovery to finish */ 285 sas_drain_work(ha); 286 return 1; 287 } 288 289 /** 290 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task 291 * @pm8001_ha: our hba card information 292 * @ccb: the ccb which attached to smp task 293 */ 294 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha, 295 struct pm8001_ccb_info *ccb) 296 { 297 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb); 298 } 299 300 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag) 301 { 302 struct ata_queued_cmd *qc = task->uldd_task; 303 if (qc) { 304 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 305 qc->tf.command == ATA_CMD_FPDMA_READ || 306 qc->tf.command == ATA_CMD_FPDMA_RECV || 307 qc->tf.command == ATA_CMD_FPDMA_SEND || 308 qc->tf.command == ATA_CMD_NCQ_NON_DATA) { 309 *tag = qc->tag; 310 return 1; 311 } 312 } 313 return 0; 314 } 315 316 /** 317 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task 318 * @pm8001_ha: our hba card information 319 * @ccb: the ccb which attached to sata task 320 */ 321 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha, 322 struct pm8001_ccb_info *ccb) 323 { 324 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb); 325 } 326 327 /** 328 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data 329 * @pm8001_ha: our hba card information 330 * @ccb: the ccb which attached to TM 331 * @tmf: the task management IU 332 */ 333 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, 334 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) 335 { 336 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf); 337 } 338 339 /** 340 * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task 341 * @pm8001_ha: our hba card information 342 * @ccb: the ccb which attached to ssp task 343 */ 344 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, 345 struct pm8001_ccb_info *ccb) 346 { 347 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); 348 } 349 350 /* Find the local port id that's attached to this device */ 351 static int sas_find_local_port_id(struct domain_device *dev) 352 { 353 struct domain_device *pdev = dev->parent; 354 355 /* Directly attached device */ 356 if (!pdev) 357 return dev->port->id; 358 while (pdev) { 359 struct domain_device *pdev_p = pdev->parent; 360 if (!pdev_p) 361 return pdev->port->id; 362 pdev = pdev->parent; 363 } 364 return 0; 365 } 366 367 /** 368 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware. 369 * @task: the task to be execute. 370 * @num: if can_queue great than 1, the task can be queued up. for SMP task, 371 * we always execute one one time. 372 * @gfp_flags: gfp_flags. 373 * @is_tmf: if it is task management task. 374 * @tmf: the task management IU 375 */ 376 #define DEV_IS_GONE(pm8001_dev) \ 377 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) 378 static int pm8001_task_exec(struct sas_task *task, 379 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) 380 { 381 struct domain_device *dev = task->dev; 382 struct pm8001_hba_info *pm8001_ha; 383 struct pm8001_device *pm8001_dev; 384 struct pm8001_port *port = NULL; 385 struct sas_task *t = task; 386 struct pm8001_ccb_info *ccb; 387 u32 tag = 0xdeadbeef, rc, n_elem = 0; 388 unsigned long flags = 0; 389 390 if (!dev->port) { 391 struct task_status_struct *tsm = &t->task_status; 392 tsm->resp = SAS_TASK_UNDELIVERED; 393 tsm->stat = SAS_PHY_DOWN; 394 if (dev->dev_type != SAS_SATA_DEV) 395 t->task_done(t); 396 return 0; 397 } 398 pm8001_ha = pm8001_find_ha_by_dev(task->dev); 399 if (pm8001_ha->controller_fatal_error) { 400 struct task_status_struct *ts = &t->task_status; 401 402 ts->resp = SAS_TASK_UNDELIVERED; 403 t->task_done(t); 404 return 0; 405 } 406 PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n ")); 407 spin_lock_irqsave(&pm8001_ha->lock, flags); 408 do { 409 dev = t->dev; 410 pm8001_dev = dev->lldd_dev; 411 port = &pm8001_ha->port[sas_find_local_port_id(dev)]; 412 if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) { 413 if (sas_protocol_ata(t->task_proto)) { 414 struct task_status_struct *ts = &t->task_status; 415 ts->resp = SAS_TASK_UNDELIVERED; 416 ts->stat = SAS_PHY_DOWN; 417 418 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 419 t->task_done(t); 420 spin_lock_irqsave(&pm8001_ha->lock, flags); 421 continue; 422 } else { 423 struct task_status_struct *ts = &t->task_status; 424 ts->resp = SAS_TASK_UNDELIVERED; 425 ts->stat = SAS_PHY_DOWN; 426 t->task_done(t); 427 continue; 428 } 429 } 430 rc = pm8001_tag_alloc(pm8001_ha, &tag); 431 if (rc) 432 goto err_out; 433 ccb = &pm8001_ha->ccb_info[tag]; 434 435 if (!sas_protocol_ata(t->task_proto)) { 436 if (t->num_scatter) { 437 n_elem = dma_map_sg(pm8001_ha->dev, 438 t->scatter, 439 t->num_scatter, 440 t->data_dir); 441 if (!n_elem) { 442 rc = -ENOMEM; 443 goto err_out_tag; 444 } 445 } 446 } else { 447 n_elem = t->num_scatter; 448 } 449 450 t->lldd_task = ccb; 451 ccb->n_elem = n_elem; 452 ccb->ccb_tag = tag; 453 ccb->task = t; 454 ccb->device = pm8001_dev; 455 switch (t->task_proto) { 456 case SAS_PROTOCOL_SMP: 457 rc = pm8001_task_prep_smp(pm8001_ha, ccb); 458 break; 459 case SAS_PROTOCOL_SSP: 460 if (is_tmf) 461 rc = pm8001_task_prep_ssp_tm(pm8001_ha, 462 ccb, tmf); 463 else 464 rc = pm8001_task_prep_ssp(pm8001_ha, ccb); 465 break; 466 case SAS_PROTOCOL_SATA: 467 case SAS_PROTOCOL_STP: 468 rc = pm8001_task_prep_ata(pm8001_ha, ccb); 469 break; 470 default: 471 dev_printk(KERN_ERR, pm8001_ha->dev, 472 "unknown sas_task proto: 0x%x\n", 473 t->task_proto); 474 rc = -EINVAL; 475 break; 476 } 477 478 if (rc) { 479 PM8001_IO_DBG(pm8001_ha, 480 pm8001_printk("rc is %x\n", rc)); 481 goto err_out_tag; 482 } 483 /* TODO: select normal or high priority */ 484 spin_lock(&t->task_state_lock); 485 t->task_state_flags |= SAS_TASK_AT_INITIATOR; 486 spin_unlock(&t->task_state_lock); 487 pm8001_dev->running_req++; 488 } while (0); 489 rc = 0; 490 goto out_done; 491 492 err_out_tag: 493 pm8001_tag_free(pm8001_ha, tag); 494 err_out: 495 dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); 496 if (!sas_protocol_ata(t->task_proto)) 497 if (n_elem) 498 dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter, 499 t->data_dir); 500 out_done: 501 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 502 return rc; 503 } 504 505 /** 506 * pm8001_queue_command - register for upper layer used, all IO commands sent 507 * to HBA are from this interface. 508 * @task: the task to be execute. 509 * @gfp_flags: gfp_flags 510 */ 511 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags) 512 { 513 return pm8001_task_exec(task, gfp_flags, 0, NULL); 514 } 515 516 /** 517 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb. 518 * @pm8001_ha: our hba card information 519 * @ccb: the ccb which attached to ssp task 520 * @task: the task to be free. 521 * @ccb_idx: ccb index. 522 */ 523 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, 524 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx) 525 { 526 if (!ccb->task) 527 return; 528 if (!sas_protocol_ata(task->task_proto)) 529 if (ccb->n_elem) 530 dma_unmap_sg(pm8001_ha->dev, task->scatter, 531 task->num_scatter, task->data_dir); 532 533 switch (task->task_proto) { 534 case SAS_PROTOCOL_SMP: 535 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1, 536 DMA_FROM_DEVICE); 537 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1, 538 DMA_TO_DEVICE); 539 break; 540 541 case SAS_PROTOCOL_SATA: 542 case SAS_PROTOCOL_STP: 543 case SAS_PROTOCOL_SSP: 544 default: 545 /* do nothing */ 546 break; 547 } 548 task->lldd_task = NULL; 549 ccb->task = NULL; 550 ccb->ccb_tag = 0xFFFFFFFF; 551 ccb->open_retry = 0; 552 pm8001_tag_free(pm8001_ha, ccb_idx); 553 } 554 555 /** 556 * pm8001_alloc_dev - find a empty pm8001_device 557 * @pm8001_ha: our hba card information 558 */ 559 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) 560 { 561 u32 dev; 562 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 563 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { 564 pm8001_ha->devices[dev].id = dev; 565 return &pm8001_ha->devices[dev]; 566 } 567 } 568 if (dev == PM8001_MAX_DEVICES) { 569 PM8001_FAIL_DBG(pm8001_ha, 570 pm8001_printk("max support %d devices, ignore ..\n", 571 PM8001_MAX_DEVICES)); 572 } 573 return NULL; 574 } 575 /** 576 * pm8001_find_dev - find a matching pm8001_device 577 * @pm8001_ha: our hba card information 578 */ 579 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, 580 u32 device_id) 581 { 582 u32 dev; 583 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 584 if (pm8001_ha->devices[dev].device_id == device_id) 585 return &pm8001_ha->devices[dev]; 586 } 587 if (dev == PM8001_MAX_DEVICES) { 588 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING " 589 "DEVICE FOUND !!!\n")); 590 } 591 return NULL; 592 } 593 594 static void pm8001_free_dev(struct pm8001_device *pm8001_dev) 595 { 596 u32 id = pm8001_dev->id; 597 memset(pm8001_dev, 0, sizeof(*pm8001_dev)); 598 pm8001_dev->id = id; 599 pm8001_dev->dev_type = SAS_PHY_UNUSED; 600 pm8001_dev->device_id = PM8001_MAX_DEVICES; 601 pm8001_dev->sas_device = NULL; 602 } 603 604 /** 605 * pm8001_dev_found_notify - libsas notify a device is found. 606 * @dev: the device structure which sas layer used. 607 * 608 * when libsas find a sas domain device, it should tell the LLDD that 609 * device is found, and then LLDD register this device to HBA firmware 610 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a 611 * device ID(according to device's sas address) and returned it to LLDD. From 612 * now on, we communicate with HBA FW with the device ID which HBA assigned 613 * rather than sas address. it is the necessary step for our HBA but it is 614 * the optional for other HBA driver. 615 */ 616 static int pm8001_dev_found_notify(struct domain_device *dev) 617 { 618 unsigned long flags = 0; 619 int res = 0; 620 struct pm8001_hba_info *pm8001_ha = NULL; 621 struct domain_device *parent_dev = dev->parent; 622 struct pm8001_device *pm8001_device; 623 DECLARE_COMPLETION_ONSTACK(completion); 624 u32 flag = 0; 625 pm8001_ha = pm8001_find_ha_by_dev(dev); 626 spin_lock_irqsave(&pm8001_ha->lock, flags); 627 628 pm8001_device = pm8001_alloc_dev(pm8001_ha); 629 if (!pm8001_device) { 630 res = -1; 631 goto found_out; 632 } 633 pm8001_device->sas_device = dev; 634 dev->lldd_dev = pm8001_device; 635 pm8001_device->dev_type = dev->dev_type; 636 pm8001_device->dcompletion = &completion; 637 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 638 int phy_id; 639 struct ex_phy *phy; 640 for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys; 641 phy_id++) { 642 phy = &parent_dev->ex_dev.ex_phy[phy_id]; 643 if (SAS_ADDR(phy->attached_sas_addr) 644 == SAS_ADDR(dev->sas_addr)) { 645 pm8001_device->attached_phy = phy_id; 646 break; 647 } 648 } 649 if (phy_id == parent_dev->ex_dev.num_phys) { 650 PM8001_FAIL_DBG(pm8001_ha, 651 pm8001_printk("Error: no attached dev:%016llx" 652 " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), 653 SAS_ADDR(parent_dev->sas_addr))); 654 res = -1; 655 } 656 } else { 657 if (dev->dev_type == SAS_SATA_DEV) { 658 pm8001_device->attached_phy = 659 dev->rphy->identify.phy_identifier; 660 flag = 1; /* directly sata */ 661 } 662 } /*register this device to HBA*/ 663 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); 664 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); 665 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 666 wait_for_completion(&completion); 667 if (dev->dev_type == SAS_END_DEVICE) 668 msleep(50); 669 pm8001_ha->flags = PM8001F_RUN_TIME; 670 return 0; 671 found_out: 672 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 673 return res; 674 } 675 676 int pm8001_dev_found(struct domain_device *dev) 677 { 678 return pm8001_dev_found_notify(dev); 679 } 680 681 void pm8001_task_done(struct sas_task *task) 682 { 683 if (!del_timer(&task->slow_task->timer)) 684 return; 685 complete(&task->slow_task->completion); 686 } 687 688 static void pm8001_tmf_timedout(struct timer_list *t) 689 { 690 struct sas_task_slow *slow = from_timer(slow, t, timer); 691 struct sas_task *task = slow->task; 692 693 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 694 complete(&task->slow_task->completion); 695 } 696 697 #define PM8001_TASK_TIMEOUT 20 698 /** 699 * pm8001_exec_internal_tmf_task - execute some task management commands. 700 * @dev: the wanted device. 701 * @tmf: which task management wanted to be take. 702 * @para_len: para_len. 703 * @parameter: ssp task parameter. 704 * 705 * when errors or exception happened, we may want to do something, for example 706 * abort the issued task which result in this execption, it is done by calling 707 * this function, note it is also with the task execute interface. 708 */ 709 static int pm8001_exec_internal_tmf_task(struct domain_device *dev, 710 void *parameter, u32 para_len, struct pm8001_tmf_task *tmf) 711 { 712 int res, retry; 713 struct sas_task *task = NULL; 714 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 715 struct pm8001_device *pm8001_dev = dev->lldd_dev; 716 DECLARE_COMPLETION_ONSTACK(completion_setstate); 717 718 for (retry = 0; retry < 3; retry++) { 719 task = sas_alloc_slow_task(GFP_KERNEL); 720 if (!task) 721 return -ENOMEM; 722 723 task->dev = dev; 724 task->task_proto = dev->tproto; 725 memcpy(&task->ssp_task, parameter, para_len); 726 task->task_done = pm8001_task_done; 727 task->slow_task->timer.function = pm8001_tmf_timedout; 728 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 729 add_timer(&task->slow_task->timer); 730 731 res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf); 732 733 if (res) { 734 del_timer(&task->slow_task->timer); 735 PM8001_FAIL_DBG(pm8001_ha, 736 pm8001_printk("Executing internal task " 737 "failed\n")); 738 goto ex_err; 739 } 740 wait_for_completion(&task->slow_task->completion); 741 if (pm8001_ha->chip_id != chip_8001) { 742 pm8001_dev->setds_completion = &completion_setstate; 743 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 744 pm8001_dev, 0x01); 745 wait_for_completion(&completion_setstate); 746 } 747 res = -TMF_RESP_FUNC_FAILED; 748 /* Even TMF timed out, return direct. */ 749 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 750 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 751 PM8001_FAIL_DBG(pm8001_ha, 752 pm8001_printk("TMF task[%x]timeout.\n", 753 tmf->tmf)); 754 goto ex_err; 755 } 756 } 757 758 if (task->task_status.resp == SAS_TASK_COMPLETE && 759 task->task_status.stat == SAM_STAT_GOOD) { 760 res = TMF_RESP_FUNC_COMPLETE; 761 break; 762 } 763 764 if (task->task_status.resp == SAS_TASK_COMPLETE && 765 task->task_status.stat == SAS_DATA_UNDERRUN) { 766 /* no error, but return the number of bytes of 767 * underrun */ 768 res = task->task_status.residual; 769 break; 770 } 771 772 if (task->task_status.resp == SAS_TASK_COMPLETE && 773 task->task_status.stat == SAS_DATA_OVERRUN) { 774 PM8001_FAIL_DBG(pm8001_ha, 775 pm8001_printk("Blocked task error.\n")); 776 res = -EMSGSIZE; 777 break; 778 } else { 779 PM8001_EH_DBG(pm8001_ha, 780 pm8001_printk(" Task to dev %016llx response:" 781 "0x%x status 0x%x\n", 782 SAS_ADDR(dev->sas_addr), 783 task->task_status.resp, 784 task->task_status.stat)); 785 sas_free_task(task); 786 task = NULL; 787 } 788 } 789 ex_err: 790 BUG_ON(retry == 3 && task != NULL); 791 sas_free_task(task); 792 return res; 793 } 794 795 static int 796 pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, 797 struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag, 798 u32 task_tag) 799 { 800 int res, retry; 801 u32 ccb_tag; 802 struct pm8001_ccb_info *ccb; 803 struct sas_task *task = NULL; 804 805 for (retry = 0; retry < 3; retry++) { 806 task = sas_alloc_slow_task(GFP_KERNEL); 807 if (!task) 808 return -ENOMEM; 809 810 task->dev = dev; 811 task->task_proto = dev->tproto; 812 task->task_done = pm8001_task_done; 813 task->slow_task->timer.function = pm8001_tmf_timedout; 814 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; 815 add_timer(&task->slow_task->timer); 816 817 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); 818 if (res) 819 return res; 820 ccb = &pm8001_ha->ccb_info[ccb_tag]; 821 ccb->device = pm8001_dev; 822 ccb->ccb_tag = ccb_tag; 823 ccb->task = task; 824 ccb->n_elem = 0; 825 826 res = PM8001_CHIP_DISP->task_abort(pm8001_ha, 827 pm8001_dev, flag, task_tag, ccb_tag); 828 829 if (res) { 830 del_timer(&task->slow_task->timer); 831 PM8001_FAIL_DBG(pm8001_ha, 832 pm8001_printk("Executing internal task " 833 "failed\n")); 834 goto ex_err; 835 } 836 wait_for_completion(&task->slow_task->completion); 837 res = TMF_RESP_FUNC_FAILED; 838 /* Even TMF timed out, return direct. */ 839 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 840 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 841 PM8001_FAIL_DBG(pm8001_ha, 842 pm8001_printk("TMF task timeout.\n")); 843 goto ex_err; 844 } 845 } 846 847 if (task->task_status.resp == SAS_TASK_COMPLETE && 848 task->task_status.stat == SAM_STAT_GOOD) { 849 res = TMF_RESP_FUNC_COMPLETE; 850 break; 851 852 } else { 853 PM8001_EH_DBG(pm8001_ha, 854 pm8001_printk(" Task to dev %016llx response: " 855 "0x%x status 0x%x\n", 856 SAS_ADDR(dev->sas_addr), 857 task->task_status.resp, 858 task->task_status.stat)); 859 sas_free_task(task); 860 task = NULL; 861 } 862 } 863 ex_err: 864 BUG_ON(retry == 3 && task != NULL); 865 sas_free_task(task); 866 return res; 867 } 868 869 /** 870 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify" 871 * @dev: the device structure which sas layer used. 872 */ 873 static void pm8001_dev_gone_notify(struct domain_device *dev) 874 { 875 unsigned long flags = 0; 876 struct pm8001_hba_info *pm8001_ha; 877 struct pm8001_device *pm8001_dev = dev->lldd_dev; 878 879 pm8001_ha = pm8001_find_ha_by_dev(dev); 880 spin_lock_irqsave(&pm8001_ha->lock, flags); 881 if (pm8001_dev) { 882 u32 device_id = pm8001_dev->device_id; 883 884 PM8001_DISC_DBG(pm8001_ha, 885 pm8001_printk("found dev[%d:%x] is gone.\n", 886 pm8001_dev->device_id, pm8001_dev->dev_type)); 887 if (pm8001_dev->running_req) { 888 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 889 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 890 dev, 1, 0); 891 while (pm8001_dev->running_req) 892 msleep(20); 893 spin_lock_irqsave(&pm8001_ha->lock, flags); 894 } 895 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); 896 pm8001_free_dev(pm8001_dev); 897 } else { 898 PM8001_DISC_DBG(pm8001_ha, 899 pm8001_printk("Found dev has gone.\n")); 900 } 901 dev->lldd_dev = NULL; 902 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 903 } 904 905 void pm8001_dev_gone(struct domain_device *dev) 906 { 907 pm8001_dev_gone_notify(dev); 908 } 909 910 static int pm8001_issue_ssp_tmf(struct domain_device *dev, 911 u8 *lun, struct pm8001_tmf_task *tmf) 912 { 913 struct sas_ssp_task ssp_task; 914 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 915 return TMF_RESP_FUNC_ESUPP; 916 917 strncpy((u8 *)&ssp_task.LUN, lun, 8); 918 return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task), 919 tmf); 920 } 921 922 /* retry commands by ha, by task and/or by device */ 923 void pm8001_open_reject_retry( 924 struct pm8001_hba_info *pm8001_ha, 925 struct sas_task *task_to_close, 926 struct pm8001_device *device_to_close) 927 { 928 int i; 929 unsigned long flags; 930 931 if (pm8001_ha == NULL) 932 return; 933 934 spin_lock_irqsave(&pm8001_ha->lock, flags); 935 936 for (i = 0; i < PM8001_MAX_CCB; i++) { 937 struct sas_task *task; 938 struct task_status_struct *ts; 939 struct pm8001_device *pm8001_dev; 940 unsigned long flags1; 941 u32 tag; 942 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; 943 944 pm8001_dev = ccb->device; 945 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) 946 continue; 947 if (!device_to_close) { 948 uintptr_t d = (uintptr_t)pm8001_dev 949 - (uintptr_t)&pm8001_ha->devices; 950 if (((d % sizeof(*pm8001_dev)) != 0) 951 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES)) 952 continue; 953 } else if (pm8001_dev != device_to_close) 954 continue; 955 tag = ccb->ccb_tag; 956 if (!tag || (tag == 0xFFFFFFFF)) 957 continue; 958 task = ccb->task; 959 if (!task || !task->task_done) 960 continue; 961 if (task_to_close && (task != task_to_close)) 962 continue; 963 ts = &task->task_status; 964 ts->resp = SAS_TASK_COMPLETE; 965 /* Force the midlayer to retry */ 966 ts->stat = SAS_OPEN_REJECT; 967 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 968 if (pm8001_dev) 969 pm8001_dev->running_req--; 970 spin_lock_irqsave(&task->task_state_lock, flags1); 971 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; 972 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 973 task->task_state_flags |= SAS_TASK_STATE_DONE; 974 if (unlikely((task->task_state_flags 975 & SAS_TASK_STATE_ABORTED))) { 976 spin_unlock_irqrestore(&task->task_state_lock, 977 flags1); 978 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 979 } else { 980 spin_unlock_irqrestore(&task->task_state_lock, 981 flags1); 982 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); 983 mb();/* in order to force CPU ordering */ 984 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 985 task->task_done(task); 986 spin_lock_irqsave(&pm8001_ha->lock, flags); 987 } 988 } 989 990 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 991 } 992 993 /** 994 * Standard mandates link reset for ATA (type 0) and hard reset for 995 * SSP (type 1) , only for RECOVERY 996 */ 997 int pm8001_I_T_nexus_reset(struct domain_device *dev) 998 { 999 int rc = TMF_RESP_FUNC_FAILED; 1000 struct pm8001_device *pm8001_dev; 1001 struct pm8001_hba_info *pm8001_ha; 1002 struct sas_phy *phy; 1003 1004 if (!dev || !dev->lldd_dev) 1005 return -ENODEV; 1006 1007 pm8001_dev = dev->lldd_dev; 1008 pm8001_ha = pm8001_find_ha_by_dev(dev); 1009 phy = sas_get_local_phy(dev); 1010 1011 if (dev_is_sata(dev)) { 1012 if (scsi_is_sas_phy_local(phy)) { 1013 rc = 0; 1014 goto out; 1015 } 1016 rc = sas_phy_reset(phy, 1); 1017 if (rc) { 1018 PM8001_EH_DBG(pm8001_ha, 1019 pm8001_printk("phy reset failed for device %x\n" 1020 "with rc %d\n", pm8001_dev->device_id, rc)); 1021 rc = TMF_RESP_FUNC_FAILED; 1022 goto out; 1023 } 1024 msleep(2000); 1025 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1026 dev, 1, 0); 1027 if (rc) { 1028 PM8001_EH_DBG(pm8001_ha, 1029 pm8001_printk("task abort failed %x\n" 1030 "with rc %d\n", pm8001_dev->device_id, rc)); 1031 rc = TMF_RESP_FUNC_FAILED; 1032 } 1033 } else { 1034 rc = sas_phy_reset(phy, 1); 1035 msleep(2000); 1036 } 1037 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", 1038 pm8001_dev->device_id, rc)); 1039 out: 1040 sas_put_local_phy(phy); 1041 return rc; 1042 } 1043 1044 /* 1045 * This function handle the IT_NEXUS_XXX event or completion 1046 * status code for SSP/SATA/SMP I/O request. 1047 */ 1048 int pm8001_I_T_nexus_event_handler(struct domain_device *dev) 1049 { 1050 int rc = TMF_RESP_FUNC_FAILED; 1051 struct pm8001_device *pm8001_dev; 1052 struct pm8001_hba_info *pm8001_ha; 1053 struct sas_phy *phy; 1054 1055 if (!dev || !dev->lldd_dev) 1056 return -1; 1057 1058 pm8001_dev = dev->lldd_dev; 1059 pm8001_ha = pm8001_find_ha_by_dev(dev); 1060 1061 PM8001_EH_DBG(pm8001_ha, 1062 pm8001_printk("I_T_Nexus handler invoked !!")); 1063 1064 phy = sas_get_local_phy(dev); 1065 1066 if (dev_is_sata(dev)) { 1067 DECLARE_COMPLETION_ONSTACK(completion_setstate); 1068 if (scsi_is_sas_phy_local(phy)) { 1069 rc = 0; 1070 goto out; 1071 } 1072 /* send internal ssp/sata/smp abort command to FW */ 1073 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1074 dev, 1, 0); 1075 msleep(100); 1076 1077 /* deregister the target device */ 1078 pm8001_dev_gone_notify(dev); 1079 msleep(200); 1080 1081 /*send phy reset to hard reset target */ 1082 rc = sas_phy_reset(phy, 1); 1083 msleep(2000); 1084 pm8001_dev->setds_completion = &completion_setstate; 1085 1086 wait_for_completion(&completion_setstate); 1087 } else { 1088 /* send internal ssp/sata/smp abort command to FW */ 1089 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1090 dev, 1, 0); 1091 msleep(100); 1092 1093 /* deregister the target device */ 1094 pm8001_dev_gone_notify(dev); 1095 msleep(200); 1096 1097 /*send phy reset to hard reset target */ 1098 rc = sas_phy_reset(phy, 1); 1099 msleep(2000); 1100 } 1101 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", 1102 pm8001_dev->device_id, rc)); 1103 out: 1104 sas_put_local_phy(phy); 1105 1106 return rc; 1107 } 1108 /* mandatory SAM-3, the task reset the specified LUN*/ 1109 int pm8001_lu_reset(struct domain_device *dev, u8 *lun) 1110 { 1111 int rc = TMF_RESP_FUNC_FAILED; 1112 struct pm8001_tmf_task tmf_task; 1113 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1114 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1115 DECLARE_COMPLETION_ONSTACK(completion_setstate); 1116 if (dev_is_sata(dev)) { 1117 struct sas_phy *phy = sas_get_local_phy(dev); 1118 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1119 dev, 1, 0); 1120 rc = sas_phy_reset(phy, 1); 1121 sas_put_local_phy(phy); 1122 pm8001_dev->setds_completion = &completion_setstate; 1123 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1124 pm8001_dev, 0x01); 1125 wait_for_completion(&completion_setstate); 1126 } else { 1127 tmf_task.tmf = TMF_LU_RESET; 1128 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1129 } 1130 /* If failed, fall-through I_T_Nexus reset */ 1131 PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n", 1132 pm8001_dev->device_id, rc)); 1133 return rc; 1134 } 1135 1136 /* optional SAM-3 */ 1137 int pm8001_query_task(struct sas_task *task) 1138 { 1139 u32 tag = 0xdeadbeef; 1140 int i = 0; 1141 struct scsi_lun lun; 1142 struct pm8001_tmf_task tmf_task; 1143 int rc = TMF_RESP_FUNC_FAILED; 1144 if (unlikely(!task || !task->lldd_task || !task->dev)) 1145 return rc; 1146 1147 if (task->task_proto & SAS_PROTOCOL_SSP) { 1148 struct scsi_cmnd *cmnd = task->uldd_task; 1149 struct domain_device *dev = task->dev; 1150 struct pm8001_hba_info *pm8001_ha = 1151 pm8001_find_ha_by_dev(dev); 1152 1153 int_to_scsilun(cmnd->device->lun, &lun); 1154 rc = pm8001_find_tag(task, &tag); 1155 if (rc == 0) { 1156 rc = TMF_RESP_FUNC_FAILED; 1157 return rc; 1158 } 1159 PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:[")); 1160 for (i = 0; i < 16; i++) 1161 printk(KERN_INFO "%02x ", cmnd->cmnd[i]); 1162 printk(KERN_INFO "]\n"); 1163 tmf_task.tmf = TMF_QUERY_TASK; 1164 tmf_task.tag_of_task_to_be_managed = tag; 1165 1166 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1167 switch (rc) { 1168 /* The task is still in Lun, release it then */ 1169 case TMF_RESP_FUNC_SUCC: 1170 PM8001_EH_DBG(pm8001_ha, 1171 pm8001_printk("The task is still in Lun\n")); 1172 break; 1173 /* The task is not in Lun or failed, reset the phy */ 1174 case TMF_RESP_FUNC_FAILED: 1175 case TMF_RESP_FUNC_COMPLETE: 1176 PM8001_EH_DBG(pm8001_ha, 1177 pm8001_printk("The task is not in Lun or failed," 1178 " reset the phy\n")); 1179 break; 1180 } 1181 } 1182 pm8001_printk(":rc= %d\n", rc); 1183 return rc; 1184 } 1185 1186 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */ 1187 int pm8001_abort_task(struct sas_task *task) 1188 { 1189 unsigned long flags; 1190 u32 tag; 1191 struct domain_device *dev ; 1192 struct pm8001_hba_info *pm8001_ha; 1193 struct scsi_lun lun; 1194 struct pm8001_device *pm8001_dev; 1195 struct pm8001_tmf_task tmf_task; 1196 int rc = TMF_RESP_FUNC_FAILED, ret; 1197 u32 phy_id; 1198 struct sas_task_slow slow_task; 1199 if (unlikely(!task || !task->lldd_task || !task->dev)) 1200 return TMF_RESP_FUNC_FAILED; 1201 dev = task->dev; 1202 pm8001_dev = dev->lldd_dev; 1203 pm8001_ha = pm8001_find_ha_by_dev(dev); 1204 phy_id = pm8001_dev->attached_phy; 1205 rc = pm8001_find_tag(task, &tag); 1206 if (rc == 0) { 1207 pm8001_printk("no tag for task:%p\n", task); 1208 return TMF_RESP_FUNC_FAILED; 1209 } 1210 spin_lock_irqsave(&task->task_state_lock, flags); 1211 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1212 spin_unlock_irqrestore(&task->task_state_lock, flags); 1213 return TMF_RESP_FUNC_COMPLETE; 1214 } 1215 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1216 if (task->slow_task == NULL) { 1217 init_completion(&slow_task.completion); 1218 task->slow_task = &slow_task; 1219 } 1220 spin_unlock_irqrestore(&task->task_state_lock, flags); 1221 if (task->task_proto & SAS_PROTOCOL_SSP) { 1222 struct scsi_cmnd *cmnd = task->uldd_task; 1223 int_to_scsilun(cmnd->device->lun, &lun); 1224 tmf_task.tmf = TMF_ABORT_TASK; 1225 tmf_task.tag_of_task_to_be_managed = tag; 1226 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1227 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1228 pm8001_dev->sas_device, 0, tag); 1229 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1230 task->task_proto & SAS_PROTOCOL_STP) { 1231 if (pm8001_ha->chip_id == chip_8006) { 1232 DECLARE_COMPLETION_ONSTACK(completion_reset); 1233 DECLARE_COMPLETION_ONSTACK(completion); 1234 struct pm8001_phy *phy = pm8001_ha->phy + phy_id; 1235 1236 /* 1. Set Device state as Recovery */ 1237 pm8001_dev->setds_completion = &completion; 1238 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1239 pm8001_dev, 0x03); 1240 wait_for_completion(&completion); 1241 1242 /* 2. Send Phy Control Hard Reset */ 1243 reinit_completion(&completion); 1244 phy->reset_success = false; 1245 phy->enable_completion = &completion; 1246 phy->reset_completion = &completion_reset; 1247 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, 1248 PHY_HARD_RESET); 1249 if (ret) 1250 goto out; 1251 PM8001_MSG_DBG(pm8001_ha, 1252 pm8001_printk("Waiting for local phy ctl\n")); 1253 wait_for_completion(&completion); 1254 if (!phy->reset_success) 1255 goto out; 1256 1257 /* 3. Wait for Port Reset complete / Port reset TMO */ 1258 PM8001_MSG_DBG(pm8001_ha, 1259 pm8001_printk("Waiting for Port reset\n")); 1260 wait_for_completion(&completion_reset); 1261 if (phy->port_reset_status) { 1262 pm8001_dev_gone_notify(dev); 1263 goto out; 1264 } 1265 1266 /* 1267 * 4. SATA Abort ALL 1268 * we wait for the task to be aborted so that the task 1269 * is removed from the ccb. on success the caller is 1270 * going to free the task. 1271 */ 1272 ret = pm8001_exec_internal_task_abort(pm8001_ha, 1273 pm8001_dev, pm8001_dev->sas_device, 1, tag); 1274 if (ret) 1275 goto out; 1276 ret = wait_for_completion_timeout( 1277 &task->slow_task->completion, 1278 PM8001_TASK_TIMEOUT * HZ); 1279 if (!ret) 1280 goto out; 1281 1282 /* 5. Set Device State as Operational */ 1283 reinit_completion(&completion); 1284 pm8001_dev->setds_completion = &completion; 1285 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1286 pm8001_dev, 0x01); 1287 wait_for_completion(&completion); 1288 } else { 1289 rc = pm8001_exec_internal_task_abort(pm8001_ha, 1290 pm8001_dev, pm8001_dev->sas_device, 0, tag); 1291 } 1292 rc = TMF_RESP_FUNC_COMPLETE; 1293 } else if (task->task_proto & SAS_PROTOCOL_SMP) { 1294 /* SMP */ 1295 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev, 1296 pm8001_dev->sas_device, 0, tag); 1297 1298 } 1299 out: 1300 spin_lock_irqsave(&task->task_state_lock, flags); 1301 if (task->slow_task == &slow_task) 1302 task->slow_task = NULL; 1303 spin_unlock_irqrestore(&task->task_state_lock, flags); 1304 if (rc != TMF_RESP_FUNC_COMPLETE) 1305 pm8001_printk("rc= %d\n", rc); 1306 return rc; 1307 } 1308 1309 int pm8001_abort_task_set(struct domain_device *dev, u8 *lun) 1310 { 1311 int rc = TMF_RESP_FUNC_FAILED; 1312 struct pm8001_tmf_task tmf_task; 1313 1314 tmf_task.tmf = TMF_ABORT_TASK_SET; 1315 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1316 return rc; 1317 } 1318 1319 int pm8001_clear_aca(struct domain_device *dev, u8 *lun) 1320 { 1321 int rc = TMF_RESP_FUNC_FAILED; 1322 struct pm8001_tmf_task tmf_task; 1323 1324 tmf_task.tmf = TMF_CLEAR_ACA; 1325 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1326 1327 return rc; 1328 } 1329 1330 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) 1331 { 1332 int rc = TMF_RESP_FUNC_FAILED; 1333 struct pm8001_tmf_task tmf_task; 1334 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1335 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1336 1337 PM8001_EH_DBG(pm8001_ha, 1338 pm8001_printk("I_T_L_Q clear task set[%x]\n", 1339 pm8001_dev->device_id)); 1340 tmf_task.tmf = TMF_CLEAR_TASK_SET; 1341 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); 1342 return rc; 1343 } 1344 1345