1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Marvell 88SE64xx/88SE94xx main function 4 * 5 * Copyright 2007 Red Hat, Inc. 6 * Copyright 2008 Marvell. <kewei@marvell.com> 7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com> 8 */ 9 10 #include "mv_sas.h" 11 12 static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) 13 { 14 if (task->lldd_task) { 15 struct mvs_slot_info *slot; 16 slot = task->lldd_task; 17 *tag = slot->slot_tag; 18 return 1; 19 } 20 return 0; 21 } 22 23 void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 24 { 25 void *bitmap = mvi->tags; 26 clear_bit(tag, bitmap); 27 } 28 29 void mvs_tag_free(struct mvs_info *mvi, u32 tag) 30 { 31 mvs_tag_clear(mvi, tag); 32 } 33 34 void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 35 { 36 void *bitmap = mvi->tags; 37 set_bit(tag, bitmap); 38 } 39 40 inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 41 { 42 unsigned int index, tag; 43 void *bitmap = mvi->tags; 44 45 index = find_first_zero_bit(bitmap, mvi->tags_num); 46 tag = index; 47 if (tag >= mvi->tags_num) 48 return -SAS_QUEUE_FULL; 49 mvs_tag_set(mvi, tag); 50 *tag_out = tag; 51 return 0; 52 } 53 54 void mvs_tag_init(struct mvs_info *mvi) 55 { 56 int i; 57 for (i = 0; i < mvi->tags_num; ++i) 58 mvs_tag_clear(mvi, i); 59 } 60 61 static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) 62 { 63 unsigned long i = 0, j = 0, hi = 0; 64 struct sas_ha_struct *sha = dev->port->ha; 65 struct mvs_info *mvi = NULL; 66 struct asd_sas_phy *phy; 67 68 while (sha->sas_port[i]) { 69 if (sha->sas_port[i] == dev->port) { 70 spin_lock(&sha->sas_port[i]->phy_list_lock); 71 phy = container_of(sha->sas_port[i]->phy_list.next, 72 struct asd_sas_phy, port_phy_el); 73 spin_unlock(&sha->sas_port[i]->phy_list_lock); 74 j = 0; 75 while (sha->sas_phy[j]) { 76 if (sha->sas_phy[j] == phy) 77 break; 78 j++; 79 } 80 break; 81 } 82 i++; 83 } 84 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; 85 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; 86 87 return mvi; 88 89 } 90 91 static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) 92 { 93 unsigned long i = 0, j = 0, n = 0, num = 0; 94 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 95 struct mvs_info *mvi = mvi_dev->mvi_info; 96 struct sas_ha_struct *sha = dev->port->ha; 97 98 while (sha->sas_port[i]) { 99 if (sha->sas_port[i] == dev->port) { 100 struct asd_sas_phy *phy; 101 102 spin_lock(&sha->sas_port[i]->phy_list_lock); 103 list_for_each_entry(phy, 104 &sha->sas_port[i]->phy_list, port_phy_el) { 105 j = 0; 106 while (sha->sas_phy[j]) { 107 if (sha->sas_phy[j] == phy) 108 break; 109 j++; 110 } 111 phyno[n] = (j >= mvi->chip->n_phy) ? 112 (j - mvi->chip->n_phy) : j; 113 num++; 114 n++; 115 } 116 spin_unlock(&sha->sas_port[i]->phy_list_lock); 117 break; 118 } 119 i++; 120 } 121 return num; 122 } 123 124 struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, 125 u8 reg_set) 126 { 127 u32 dev_no; 128 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) { 129 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED) 130 continue; 131 132 if (mvi->devices[dev_no].taskfileset == reg_set) 133 return &mvi->devices[dev_no]; 134 } 135 return NULL; 136 } 137 138 static inline void mvs_free_reg_set(struct mvs_info *mvi, 139 struct mvs_device *dev) 140 { 141 if (!dev) { 142 mv_printk("device has been free.\n"); 143 return; 144 } 145 if (dev->taskfileset == MVS_ID_NOT_MAPPED) 146 return; 147 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); 148 } 149 150 static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, 151 struct mvs_device *dev) 152 { 153 if (dev->taskfileset != MVS_ID_NOT_MAPPED) 154 return 0; 155 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); 156 } 157 158 void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) 159 { 160 u32 no; 161 for_each_phy(phy_mask, phy_mask, no) { 162 if (!(phy_mask & 1)) 163 continue; 164 MVS_CHIP_DISP->phy_reset(mvi, no, hard); 165 } 166 } 167 168 int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 169 void *funcdata) 170 { 171 int rc = 0, phy_id = sas_phy->id; 172 u32 tmp, i = 0, hi; 173 struct sas_ha_struct *sha = sas_phy->ha; 174 struct mvs_info *mvi = NULL; 175 176 while (sha->sas_phy[i]) { 177 if (sha->sas_phy[i] == sas_phy) 178 break; 179 i++; 180 } 181 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; 182 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; 183 184 switch (func) { 185 case PHY_FUNC_SET_LINK_RATE: 186 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); 187 break; 188 189 case PHY_FUNC_HARD_RESET: 190 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); 191 if (tmp & PHY_RST_HARD) 192 break; 193 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET); 194 break; 195 196 case PHY_FUNC_LINK_RESET: 197 MVS_CHIP_DISP->phy_enable(mvi, phy_id); 198 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET); 199 break; 200 201 case PHY_FUNC_DISABLE: 202 MVS_CHIP_DISP->phy_disable(mvi, phy_id); 203 break; 204 case PHY_FUNC_RELEASE_SPINUP_HOLD: 205 default: 206 rc = -ENOSYS; 207 } 208 msleep(200); 209 return rc; 210 } 211 212 void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, 213 u32 off_hi, u64 sas_addr) 214 { 215 u32 lo = (u32)sas_addr; 216 u32 hi = (u32)(sas_addr>>32); 217 218 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); 219 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); 220 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); 221 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); 222 } 223 224 static void mvs_bytes_dmaed(struct mvs_info *mvi, int i, gfp_t gfp_flags) 225 { 226 struct mvs_phy *phy = &mvi->phy[i]; 227 struct asd_sas_phy *sas_phy = &phy->sas_phy; 228 229 if (!phy->phy_attached) 230 return; 231 232 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) 233 && phy->phy_type & PORT_TYPE_SAS) { 234 return; 235 } 236 237 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 238 239 if (sas_phy->phy) { 240 struct sas_phy *sphy = sas_phy->phy; 241 242 sphy->negotiated_linkrate = sas_phy->linkrate; 243 sphy->minimum_linkrate = phy->minimum_linkrate; 244 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 245 sphy->maximum_linkrate = phy->maximum_linkrate; 246 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); 247 } 248 249 if (phy->phy_type & PORT_TYPE_SAS) { 250 struct sas_identify_frame *id; 251 252 id = (struct sas_identify_frame *)phy->frame_rcvd; 253 id->dev_type = phy->identify.device_type; 254 id->initiator_bits = SAS_PROTOCOL_ALL; 255 id->target_bits = phy->identify.target_port_protocols; 256 257 /* direct attached SAS device */ 258 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { 259 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); 260 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00); 261 } 262 } else if (phy->phy_type & PORT_TYPE_SATA) { 263 /*Nothing*/ 264 } 265 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); 266 267 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 268 269 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 270 } 271 272 void mvs_scan_start(struct Scsi_Host *shost) 273 { 274 int i, j; 275 unsigned short core_nr; 276 struct mvs_info *mvi; 277 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 278 struct mvs_prv_info *mvs_prv = sha->lldd_ha; 279 280 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 281 282 for (j = 0; j < core_nr; j++) { 283 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; 284 for (i = 0; i < mvi->chip->n_phy; ++i) 285 mvs_bytes_dmaed(mvi, i, GFP_KERNEL); 286 } 287 mvs_prv->scan_finished = 1; 288 } 289 290 int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) 291 { 292 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 293 struct mvs_prv_info *mvs_prv = sha->lldd_ha; 294 295 if (mvs_prv->scan_finished == 0) 296 return 0; 297 298 sas_drain_work(sha); 299 return 1; 300 } 301 302 static int mvs_task_prep_smp(struct mvs_info *mvi, 303 struct mvs_task_exec_info *tei) 304 { 305 int elem, rc, i; 306 struct sas_ha_struct *sha = mvi->sas; 307 struct sas_task *task = tei->task; 308 struct mvs_cmd_hdr *hdr = tei->hdr; 309 struct domain_device *dev = task->dev; 310 struct asd_sas_port *sas_port = dev->port; 311 struct sas_phy *sphy = dev->phy; 312 struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; 313 struct scatterlist *sg_req, *sg_resp; 314 u32 req_len, resp_len, tag = tei->tag; 315 void *buf_tmp; 316 u8 *buf_oaf; 317 dma_addr_t buf_tmp_dma; 318 void *buf_prd; 319 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 320 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 321 322 /* 323 * DMA-map SMP request, response buffers 324 */ 325 sg_req = &task->smp_task.smp_req; 326 elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE); 327 if (!elem) 328 return -ENOMEM; 329 req_len = sg_dma_len(sg_req); 330 331 sg_resp = &task->smp_task.smp_resp; 332 elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE); 333 if (!elem) { 334 rc = -ENOMEM; 335 goto err_out; 336 } 337 resp_len = SB_RFB_MAX; 338 339 /* must be in dwords */ 340 if ((req_len & 0x3) || (resp_len & 0x3)) { 341 rc = -EINVAL; 342 goto err_out_2; 343 } 344 345 /* 346 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 347 */ 348 349 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ 350 buf_tmp = slot->buf; 351 buf_tmp_dma = slot->buf_dma; 352 353 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); 354 355 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 356 buf_oaf = buf_tmp; 357 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 358 359 buf_tmp += MVS_OAF_SZ; 360 buf_tmp_dma += MVS_OAF_SZ; 361 362 /* region 3: PRD table *********************************** */ 363 buf_prd = buf_tmp; 364 if (tei->n_elem) 365 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 366 else 367 hdr->prd_tbl = 0; 368 369 i = MVS_CHIP_DISP->prd_size() * tei->n_elem; 370 buf_tmp += i; 371 buf_tmp_dma += i; 372 373 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 374 slot->response = buf_tmp; 375 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 376 if (mvi->flags & MVF_FLAG_SOC) 377 hdr->reserved[0] = 0; 378 379 /* 380 * Fill in TX ring and command slot header 381 */ 382 slot->tx = mvi->tx_prod; 383 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | 384 TXQ_MODE_I | tag | 385 (MVS_PHY_ID << TXQ_PHY_SHIFT)); 386 387 hdr->flags |= flags; 388 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); 389 hdr->tags = cpu_to_le32(tag); 390 hdr->data_len = 0; 391 392 /* generate open address frame hdr (first 12 bytes) */ 393 /* initiator, SMP, ftype 1h */ 394 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; 395 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; 396 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ 397 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 398 399 /* fill in PRD (scatter/gather) table, if any */ 400 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 401 402 return 0; 403 404 err_out_2: 405 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, 406 DMA_FROM_DEVICE); 407 err_out: 408 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, 409 DMA_TO_DEVICE); 410 return rc; 411 } 412 413 static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) 414 { 415 struct ata_queued_cmd *qc = task->uldd_task; 416 417 if (qc) { 418 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 419 qc->tf.command == ATA_CMD_FPDMA_READ || 420 qc->tf.command == ATA_CMD_FPDMA_RECV || 421 qc->tf.command == ATA_CMD_FPDMA_SEND || 422 qc->tf.command == ATA_CMD_NCQ_NON_DATA) { 423 *tag = qc->tag; 424 return 1; 425 } 426 } 427 428 return 0; 429 } 430 431 static int mvs_task_prep_ata(struct mvs_info *mvi, 432 struct mvs_task_exec_info *tei) 433 { 434 struct sas_task *task = tei->task; 435 struct domain_device *dev = task->dev; 436 struct mvs_device *mvi_dev = dev->lldd_dev; 437 struct mvs_cmd_hdr *hdr = tei->hdr; 438 struct asd_sas_port *sas_port = dev->port; 439 struct mvs_slot_info *slot; 440 void *buf_prd; 441 u32 tag = tei->tag, hdr_tag; 442 u32 flags, del_q; 443 void *buf_tmp; 444 u8 *buf_cmd, *buf_oaf; 445 dma_addr_t buf_tmp_dma; 446 u32 i, req_len, resp_len; 447 const u32 max_resp_len = SB_RFB_MAX; 448 449 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { 450 mv_dprintk("Have not enough regiset for dev %d.\n", 451 mvi_dev->device_id); 452 return -EBUSY; 453 } 454 slot = &mvi->slot_info[tag]; 455 slot->tx = mvi->tx_prod; 456 del_q = TXQ_MODE_I | tag | 457 (TXQ_CMD_STP << TXQ_CMD_SHIFT) | 458 ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) | 459 (mvi_dev->taskfileset << TXQ_SRS_SHIFT); 460 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); 461 462 if (task->data_dir == DMA_FROM_DEVICE) 463 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); 464 else 465 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 466 467 if (task->ata_task.use_ncq) 468 flags |= MCH_FPDMA; 469 if (dev->sata_dev.class == ATA_DEV_ATAPI) { 470 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) 471 flags |= MCH_ATAPI; 472 } 473 474 hdr->flags = cpu_to_le32(flags); 475 476 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) 477 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 478 else 479 hdr_tag = tag; 480 481 hdr->tags = cpu_to_le32(hdr_tag); 482 483 hdr->data_len = cpu_to_le32(task->total_xfer_len); 484 485 /* 486 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 487 */ 488 489 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ 490 buf_cmd = buf_tmp = slot->buf; 491 buf_tmp_dma = slot->buf_dma; 492 493 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); 494 495 buf_tmp += MVS_ATA_CMD_SZ; 496 buf_tmp_dma += MVS_ATA_CMD_SZ; 497 498 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 499 /* used for STP. unused for SATA? */ 500 buf_oaf = buf_tmp; 501 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 502 503 buf_tmp += MVS_OAF_SZ; 504 buf_tmp_dma += MVS_OAF_SZ; 505 506 /* region 3: PRD table ********************************************* */ 507 buf_prd = buf_tmp; 508 509 if (tei->n_elem) 510 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 511 else 512 hdr->prd_tbl = 0; 513 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); 514 515 buf_tmp += i; 516 buf_tmp_dma += i; 517 518 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 519 slot->response = buf_tmp; 520 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 521 if (mvi->flags & MVF_FLAG_SOC) 522 hdr->reserved[0] = 0; 523 524 req_len = sizeof(struct host_to_dev_fis); 525 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - 526 sizeof(struct mvs_err_info) - i; 527 528 /* request, response lengths */ 529 resp_len = min(resp_len, max_resp_len); 530 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 531 532 if (likely(!task->ata_task.device_control_reg_update)) 533 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 534 /* fill in command FIS and ATAPI CDB */ 535 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 536 if (dev->sata_dev.class == ATA_DEV_ATAPI) 537 memcpy(buf_cmd + STP_ATAPI_CMD, 538 task->ata_task.atapi_packet, 16); 539 540 /* generate open address frame hdr (first 12 bytes) */ 541 /* initiator, STP, ftype 1h */ 542 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; 543 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; 544 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 545 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 546 547 /* fill in PRD (scatter/gather) table, if any */ 548 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 549 550 if (task->data_dir == DMA_FROM_DEVICE) 551 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask, 552 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); 553 554 return 0; 555 } 556 557 static int mvs_task_prep_ssp(struct mvs_info *mvi, 558 struct mvs_task_exec_info *tei, int is_tmf, 559 struct mvs_tmf_task *tmf) 560 { 561 struct sas_task *task = tei->task; 562 struct mvs_cmd_hdr *hdr = tei->hdr; 563 struct mvs_port *port = tei->port; 564 struct domain_device *dev = task->dev; 565 struct mvs_device *mvi_dev = dev->lldd_dev; 566 struct asd_sas_port *sas_port = dev->port; 567 struct mvs_slot_info *slot; 568 void *buf_prd; 569 struct ssp_frame_hdr *ssp_hdr; 570 void *buf_tmp; 571 u8 *buf_cmd, *buf_oaf, fburst = 0; 572 dma_addr_t buf_tmp_dma; 573 u32 flags; 574 u32 resp_len, req_len, i, tag = tei->tag; 575 const u32 max_resp_len = SB_RFB_MAX; 576 u32 phy_mask; 577 578 slot = &mvi->slot_info[tag]; 579 580 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : 581 sas_port->phy_mask) & TXQ_PHY_MASK; 582 583 slot->tx = mvi->tx_prod; 584 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | 585 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | 586 (phy_mask << TXQ_PHY_SHIFT)); 587 588 flags = MCH_RETRY; 589 if (task->ssp_task.enable_first_burst) { 590 flags |= MCH_FBURST; 591 fburst = (1 << 7); 592 } 593 if (is_tmf) 594 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); 595 else 596 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); 597 598 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); 599 hdr->tags = cpu_to_le32(tag); 600 hdr->data_len = cpu_to_le32(task->total_xfer_len); 601 602 /* 603 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 604 */ 605 606 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ 607 buf_cmd = buf_tmp = slot->buf; 608 buf_tmp_dma = slot->buf_dma; 609 610 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); 611 612 buf_tmp += MVS_SSP_CMD_SZ; 613 buf_tmp_dma += MVS_SSP_CMD_SZ; 614 615 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 616 buf_oaf = buf_tmp; 617 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 618 619 buf_tmp += MVS_OAF_SZ; 620 buf_tmp_dma += MVS_OAF_SZ; 621 622 /* region 3: PRD table ********************************************* */ 623 buf_prd = buf_tmp; 624 if (tei->n_elem) 625 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 626 else 627 hdr->prd_tbl = 0; 628 629 i = MVS_CHIP_DISP->prd_size() * tei->n_elem; 630 buf_tmp += i; 631 buf_tmp_dma += i; 632 633 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 634 slot->response = buf_tmp; 635 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 636 if (mvi->flags & MVF_FLAG_SOC) 637 hdr->reserved[0] = 0; 638 639 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - 640 sizeof(struct mvs_err_info) - i; 641 resp_len = min(resp_len, max_resp_len); 642 643 req_len = sizeof(struct ssp_frame_hdr) + 28; 644 645 /* request, response lengths */ 646 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 647 648 /* generate open address frame hdr (first 12 bytes) */ 649 /* initiator, SSP, ftype 1h */ 650 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; 651 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; 652 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 653 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 654 655 /* fill in SSP frame header (Command Table.SSP frame header) */ 656 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; 657 658 if (is_tmf) 659 ssp_hdr->frame_type = SSP_TASK; 660 else 661 ssp_hdr->frame_type = SSP_COMMAND; 662 663 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, 664 HASHED_SAS_ADDR_SIZE); 665 memcpy(ssp_hdr->hashed_src_addr, 666 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); 667 ssp_hdr->tag = cpu_to_be16(tag); 668 669 /* fill in IU for TASK and Command Frame */ 670 buf_cmd += sizeof(*ssp_hdr); 671 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 672 673 if (ssp_hdr->frame_type != SSP_TASK) { 674 buf_cmd[9] = fburst | task->ssp_task.task_attr | 675 (task->ssp_task.task_prio << 3); 676 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, 677 task->ssp_task.cmd->cmd_len); 678 } else{ 679 buf_cmd[10] = tmf->tmf; 680 switch (tmf->tmf) { 681 case TMF_ABORT_TASK: 682 case TMF_QUERY_TASK: 683 buf_cmd[12] = 684 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 685 buf_cmd[13] = 686 tmf->tag_of_task_to_be_managed & 0xff; 687 break; 688 default: 689 break; 690 } 691 } 692 /* fill in PRD (scatter/gather) table, if any */ 693 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 694 return 0; 695 } 696 697 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) 698 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, 699 struct mvs_tmf_task *tmf, int *pass) 700 { 701 struct domain_device *dev = task->dev; 702 struct mvs_device *mvi_dev = dev->lldd_dev; 703 struct mvs_task_exec_info tei; 704 struct mvs_slot_info *slot; 705 u32 tag = 0xdeadbeef, n_elem = 0; 706 int rc = 0; 707 708 if (!dev->port) { 709 struct task_status_struct *tsm = &task->task_status; 710 711 tsm->resp = SAS_TASK_UNDELIVERED; 712 tsm->stat = SAS_PHY_DOWN; 713 /* 714 * libsas will use dev->port, should 715 * not call task_done for sata 716 */ 717 if (dev->dev_type != SAS_SATA_DEV) 718 task->task_done(task); 719 return rc; 720 } 721 722 if (DEV_IS_GONE(mvi_dev)) { 723 if (mvi_dev) 724 mv_dprintk("device %d not ready.\n", 725 mvi_dev->device_id); 726 else 727 mv_dprintk("device %016llx not ready.\n", 728 SAS_ADDR(dev->sas_addr)); 729 730 rc = SAS_PHY_DOWN; 731 return rc; 732 } 733 tei.port = dev->port->lldd_port; 734 if (tei.port && !tei.port->port_attached && !tmf) { 735 if (sas_protocol_ata(task->task_proto)) { 736 struct task_status_struct *ts = &task->task_status; 737 mv_dprintk("SATA/STP port %d does not attach" 738 "device.\n", dev->port->id); 739 ts->resp = SAS_TASK_COMPLETE; 740 ts->stat = SAS_PHY_DOWN; 741 742 task->task_done(task); 743 744 } else { 745 struct task_status_struct *ts = &task->task_status; 746 mv_dprintk("SAS port %d does not attach" 747 "device.\n", dev->port->id); 748 ts->resp = SAS_TASK_UNDELIVERED; 749 ts->stat = SAS_PHY_DOWN; 750 task->task_done(task); 751 } 752 return rc; 753 } 754 755 if (!sas_protocol_ata(task->task_proto)) { 756 if (task->num_scatter) { 757 n_elem = dma_map_sg(mvi->dev, 758 task->scatter, 759 task->num_scatter, 760 task->data_dir); 761 if (!n_elem) { 762 rc = -ENOMEM; 763 goto prep_out; 764 } 765 } 766 } else { 767 n_elem = task->num_scatter; 768 } 769 770 rc = mvs_tag_alloc(mvi, &tag); 771 if (rc) 772 goto err_out; 773 774 slot = &mvi->slot_info[tag]; 775 776 task->lldd_task = NULL; 777 slot->n_elem = n_elem; 778 slot->slot_tag = tag; 779 780 slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); 781 if (!slot->buf) { 782 rc = -ENOMEM; 783 goto err_out_tag; 784 } 785 786 tei.task = task; 787 tei.hdr = &mvi->slot[tag]; 788 tei.tag = tag; 789 tei.n_elem = n_elem; 790 switch (task->task_proto) { 791 case SAS_PROTOCOL_SMP: 792 rc = mvs_task_prep_smp(mvi, &tei); 793 break; 794 case SAS_PROTOCOL_SSP: 795 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); 796 break; 797 case SAS_PROTOCOL_SATA: 798 case SAS_PROTOCOL_STP: 799 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 800 rc = mvs_task_prep_ata(mvi, &tei); 801 break; 802 default: 803 dev_printk(KERN_ERR, mvi->dev, 804 "unknown sas_task proto: 0x%x\n", 805 task->task_proto); 806 rc = -EINVAL; 807 break; 808 } 809 810 if (rc) { 811 mv_dprintk("rc is %x\n", rc); 812 goto err_out_slot_buf; 813 } 814 slot->task = task; 815 slot->port = tei.port; 816 task->lldd_task = slot; 817 list_add_tail(&slot->entry, &tei.port->list); 818 spin_lock(&task->task_state_lock); 819 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 820 spin_unlock(&task->task_state_lock); 821 822 mvi_dev->running_req++; 823 ++(*pass); 824 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 825 826 return rc; 827 828 err_out_slot_buf: 829 dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); 830 err_out_tag: 831 mvs_tag_free(mvi, tag); 832 err_out: 833 834 dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc); 835 if (!sas_protocol_ata(task->task_proto)) 836 if (n_elem) 837 dma_unmap_sg(mvi->dev, task->scatter, n_elem, 838 task->data_dir); 839 prep_out: 840 return rc; 841 } 842 843 static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags, 844 struct completion *completion, int is_tmf, 845 struct mvs_tmf_task *tmf) 846 { 847 struct mvs_info *mvi = NULL; 848 u32 rc = 0; 849 u32 pass = 0; 850 unsigned long flags = 0; 851 852 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; 853 854 spin_lock_irqsave(&mvi->lock, flags); 855 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); 856 if (rc) 857 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); 858 859 if (likely(pass)) 860 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & 861 (MVS_CHIP_SLOT_SZ - 1)); 862 spin_unlock_irqrestore(&mvi->lock, flags); 863 864 return rc; 865 } 866 867 int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) 868 { 869 return mvs_task_exec(task, gfp_flags, NULL, 0, NULL); 870 } 871 872 static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 873 { 874 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 875 mvs_tag_clear(mvi, slot_idx); 876 } 877 878 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, 879 struct mvs_slot_info *slot, u32 slot_idx) 880 { 881 if (!slot) 882 return; 883 if (!slot->task) 884 return; 885 if (!sas_protocol_ata(task->task_proto)) 886 if (slot->n_elem) 887 dma_unmap_sg(mvi->dev, task->scatter, 888 slot->n_elem, task->data_dir); 889 890 switch (task->task_proto) { 891 case SAS_PROTOCOL_SMP: 892 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, 893 DMA_FROM_DEVICE); 894 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, 895 DMA_TO_DEVICE); 896 break; 897 898 case SAS_PROTOCOL_SATA: 899 case SAS_PROTOCOL_STP: 900 case SAS_PROTOCOL_SSP: 901 default: 902 /* do nothing */ 903 break; 904 } 905 906 if (slot->buf) { 907 dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); 908 slot->buf = NULL; 909 } 910 list_del_init(&slot->entry); 911 task->lldd_task = NULL; 912 slot->task = NULL; 913 slot->port = NULL; 914 slot->slot_tag = 0xFFFFFFFF; 915 mvs_slot_free(mvi, slot_idx); 916 } 917 918 static void mvs_update_wideport(struct mvs_info *mvi, int phy_no) 919 { 920 struct mvs_phy *phy = &mvi->phy[phy_no]; 921 struct mvs_port *port = phy->port; 922 int j, no; 923 924 for_each_phy(port->wide_port_phymap, j, no) { 925 if (j & 1) { 926 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, 927 PHYR_WIDE_PORT); 928 MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 929 port->wide_port_phymap); 930 } else { 931 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, 932 PHYR_WIDE_PORT); 933 MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 934 0); 935 } 936 } 937 } 938 939 static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) 940 { 941 u32 tmp; 942 struct mvs_phy *phy = &mvi->phy[i]; 943 struct mvs_port *port = phy->port; 944 945 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); 946 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { 947 if (!port) 948 phy->phy_attached = 1; 949 return tmp; 950 } 951 952 if (port) { 953 if (phy->phy_type & PORT_TYPE_SAS) { 954 port->wide_port_phymap &= ~(1U << i); 955 if (!port->wide_port_phymap) 956 port->port_attached = 0; 957 mvs_update_wideport(mvi, i); 958 } else if (phy->phy_type & PORT_TYPE_SATA) 959 port->port_attached = 0; 960 phy->port = NULL; 961 phy->phy_attached = 0; 962 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 963 } 964 return 0; 965 } 966 967 static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) 968 { 969 u32 *s = (u32 *) buf; 970 971 if (!s) 972 return NULL; 973 974 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 975 s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 976 977 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 978 s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 979 980 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 981 s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 982 983 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 984 s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 985 986 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) 987 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); 988 989 return s; 990 } 991 992 static u32 mvs_is_sig_fis_received(u32 irq_status) 993 { 994 return irq_status & PHYEV_SIG_FIS; 995 } 996 997 static void mvs_sig_remove_timer(struct mvs_phy *phy) 998 { 999 if (phy->timer.function) 1000 del_timer(&phy->timer); 1001 phy->timer.function = NULL; 1002 } 1003 1004 void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) 1005 { 1006 struct mvs_phy *phy = &mvi->phy[i]; 1007 struct sas_identify_frame *id; 1008 1009 id = (struct sas_identify_frame *)phy->frame_rcvd; 1010 1011 if (get_st) { 1012 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); 1013 phy->phy_status = mvs_is_phy_ready(mvi, i); 1014 } 1015 1016 if (phy->phy_status) { 1017 int oob_done = 0; 1018 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; 1019 1020 oob_done = MVS_CHIP_DISP->oob_done(mvi, i); 1021 1022 MVS_CHIP_DISP->fix_phy_info(mvi, i, id); 1023 if (phy->phy_type & PORT_TYPE_SATA) { 1024 phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 1025 if (mvs_is_sig_fis_received(phy->irq_status)) { 1026 mvs_sig_remove_timer(phy); 1027 phy->phy_attached = 1; 1028 phy->att_dev_sas_addr = 1029 i + mvi->id * mvi->chip->n_phy; 1030 if (oob_done) 1031 sas_phy->oob_mode = SATA_OOB_MODE; 1032 phy->frame_rcvd_size = 1033 sizeof(struct dev_to_host_fis); 1034 mvs_get_d2h_reg(mvi, i, id); 1035 } else { 1036 u32 tmp; 1037 dev_printk(KERN_DEBUG, mvi->dev, 1038 "Phy%d : No sig fis\n", i); 1039 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); 1040 MVS_CHIP_DISP->write_port_irq_mask(mvi, i, 1041 tmp | PHYEV_SIG_FIS); 1042 phy->phy_attached = 0; 1043 phy->phy_type &= ~PORT_TYPE_SATA; 1044 goto out_done; 1045 } 1046 } else if (phy->phy_type & PORT_TYPE_SAS 1047 || phy->att_dev_info & PORT_SSP_INIT_MASK) { 1048 phy->phy_attached = 1; 1049 phy->identify.device_type = 1050 phy->att_dev_info & PORT_DEV_TYPE_MASK; 1051 1052 if (phy->identify.device_type == SAS_END_DEVICE) 1053 phy->identify.target_port_protocols = 1054 SAS_PROTOCOL_SSP; 1055 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1056 phy->identify.target_port_protocols = 1057 SAS_PROTOCOL_SMP; 1058 if (oob_done) 1059 sas_phy->oob_mode = SAS_OOB_MODE; 1060 phy->frame_rcvd_size = 1061 sizeof(struct sas_identify_frame); 1062 } 1063 memcpy(sas_phy->attached_sas_addr, 1064 &phy->att_dev_sas_addr, SAS_ADDR_SIZE); 1065 1066 if (MVS_CHIP_DISP->phy_work_around) 1067 MVS_CHIP_DISP->phy_work_around(mvi, i); 1068 } 1069 mv_dprintk("phy %d attach dev info is %x\n", 1070 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); 1071 mv_dprintk("phy %d attach sas addr is %llx\n", 1072 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); 1073 out_done: 1074 if (get_st) 1075 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); 1076 } 1077 1078 static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) 1079 { 1080 struct sas_ha_struct *sas_ha = sas_phy->ha; 1081 struct mvs_info *mvi = NULL; int i = 0, hi; 1082 struct mvs_phy *phy = sas_phy->lldd_phy; 1083 struct asd_sas_port *sas_port = sas_phy->port; 1084 struct mvs_port *port; 1085 unsigned long flags = 0; 1086 if (!sas_port) 1087 return; 1088 1089 while (sas_ha->sas_phy[i]) { 1090 if (sas_ha->sas_phy[i] == sas_phy) 1091 break; 1092 i++; 1093 } 1094 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; 1095 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; 1096 if (i >= mvi->chip->n_phy) 1097 port = &mvi->port[i - mvi->chip->n_phy]; 1098 else 1099 port = &mvi->port[i]; 1100 if (lock) 1101 spin_lock_irqsave(&mvi->lock, flags); 1102 port->port_attached = 1; 1103 phy->port = port; 1104 sas_port->lldd_port = port; 1105 if (phy->phy_type & PORT_TYPE_SAS) { 1106 port->wide_port_phymap = sas_port->phy_mask; 1107 mv_printk("set wide port phy map %x\n", sas_port->phy_mask); 1108 mvs_update_wideport(mvi, sas_phy->id); 1109 1110 /* direct attached SAS device */ 1111 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { 1112 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); 1113 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04); 1114 } 1115 } 1116 if (lock) 1117 spin_unlock_irqrestore(&mvi->lock, flags); 1118 } 1119 1120 static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) 1121 { 1122 struct domain_device *dev; 1123 struct mvs_phy *phy = sas_phy->lldd_phy; 1124 struct mvs_info *mvi = phy->mvi; 1125 struct asd_sas_port *port = sas_phy->port; 1126 int phy_no = 0; 1127 1128 while (phy != &mvi->phy[phy_no]) { 1129 phy_no++; 1130 if (phy_no >= MVS_MAX_PHYS) 1131 return; 1132 } 1133 list_for_each_entry(dev, &port->dev_list, dev_list_node) 1134 mvs_do_release_task(phy->mvi, phy_no, dev); 1135 1136 } 1137 1138 1139 void mvs_port_formed(struct asd_sas_phy *sas_phy) 1140 { 1141 mvs_port_notify_formed(sas_phy, 1); 1142 } 1143 1144 void mvs_port_deformed(struct asd_sas_phy *sas_phy) 1145 { 1146 mvs_port_notify_deformed(sas_phy, 1); 1147 } 1148 1149 static struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) 1150 { 1151 u32 dev; 1152 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { 1153 if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { 1154 mvi->devices[dev].device_id = dev; 1155 return &mvi->devices[dev]; 1156 } 1157 } 1158 1159 if (dev == MVS_MAX_DEVICES) 1160 mv_printk("max support %d devices, ignore ..\n", 1161 MVS_MAX_DEVICES); 1162 1163 return NULL; 1164 } 1165 1166 static void mvs_free_dev(struct mvs_device *mvi_dev) 1167 { 1168 u32 id = mvi_dev->device_id; 1169 memset(mvi_dev, 0, sizeof(*mvi_dev)); 1170 mvi_dev->device_id = id; 1171 mvi_dev->dev_type = SAS_PHY_UNUSED; 1172 mvi_dev->dev_status = MVS_DEV_NORMAL; 1173 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; 1174 } 1175 1176 static int mvs_dev_found_notify(struct domain_device *dev, int lock) 1177 { 1178 unsigned long flags = 0; 1179 int res = 0; 1180 struct mvs_info *mvi = NULL; 1181 struct domain_device *parent_dev = dev->parent; 1182 struct mvs_device *mvi_device; 1183 1184 mvi = mvs_find_dev_mvi(dev); 1185 1186 if (lock) 1187 spin_lock_irqsave(&mvi->lock, flags); 1188 1189 mvi_device = mvs_alloc_dev(mvi); 1190 if (!mvi_device) { 1191 res = -1; 1192 goto found_out; 1193 } 1194 dev->lldd_dev = mvi_device; 1195 mvi_device->dev_status = MVS_DEV_NORMAL; 1196 mvi_device->dev_type = dev->dev_type; 1197 mvi_device->mvi_info = mvi; 1198 mvi_device->sas_device = dev; 1199 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 1200 int phy_id; 1201 u8 phy_num = parent_dev->ex_dev.num_phys; 1202 struct ex_phy *phy; 1203 for (phy_id = 0; phy_id < phy_num; phy_id++) { 1204 phy = &parent_dev->ex_dev.ex_phy[phy_id]; 1205 if (SAS_ADDR(phy->attached_sas_addr) == 1206 SAS_ADDR(dev->sas_addr)) { 1207 mvi_device->attached_phy = phy_id; 1208 break; 1209 } 1210 } 1211 1212 if (phy_id == phy_num) { 1213 mv_printk("Error: no attached dev:%016llx" 1214 "at ex:%016llx.\n", 1215 SAS_ADDR(dev->sas_addr), 1216 SAS_ADDR(parent_dev->sas_addr)); 1217 res = -1; 1218 } 1219 } 1220 1221 found_out: 1222 if (lock) 1223 spin_unlock_irqrestore(&mvi->lock, flags); 1224 return res; 1225 } 1226 1227 int mvs_dev_found(struct domain_device *dev) 1228 { 1229 return mvs_dev_found_notify(dev, 1); 1230 } 1231 1232 static void mvs_dev_gone_notify(struct domain_device *dev) 1233 { 1234 unsigned long flags = 0; 1235 struct mvs_device *mvi_dev = dev->lldd_dev; 1236 struct mvs_info *mvi; 1237 1238 if (!mvi_dev) { 1239 mv_dprintk("found dev has gone.\n"); 1240 return; 1241 } 1242 1243 mvi = mvi_dev->mvi_info; 1244 1245 spin_lock_irqsave(&mvi->lock, flags); 1246 1247 mv_dprintk("found dev[%d:%x] is gone.\n", 1248 mvi_dev->device_id, mvi_dev->dev_type); 1249 mvs_release_task(mvi, dev); 1250 mvs_free_reg_set(mvi, mvi_dev); 1251 mvs_free_dev(mvi_dev); 1252 1253 dev->lldd_dev = NULL; 1254 mvi_dev->sas_device = NULL; 1255 1256 spin_unlock_irqrestore(&mvi->lock, flags); 1257 } 1258 1259 1260 void mvs_dev_gone(struct domain_device *dev) 1261 { 1262 mvs_dev_gone_notify(dev); 1263 } 1264 1265 static void mvs_task_done(struct sas_task *task) 1266 { 1267 if (!del_timer(&task->slow_task->timer)) 1268 return; 1269 complete(&task->slow_task->completion); 1270 } 1271 1272 static void mvs_tmf_timedout(struct timer_list *t) 1273 { 1274 struct sas_task_slow *slow = from_timer(slow, t, timer); 1275 struct sas_task *task = slow->task; 1276 1277 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1278 complete(&task->slow_task->completion); 1279 } 1280 1281 #define MVS_TASK_TIMEOUT 20 1282 static int mvs_exec_internal_tmf_task(struct domain_device *dev, 1283 void *parameter, u32 para_len, struct mvs_tmf_task *tmf) 1284 { 1285 int res, retry; 1286 struct sas_task *task = NULL; 1287 1288 for (retry = 0; retry < 3; retry++) { 1289 task = sas_alloc_slow_task(GFP_KERNEL); 1290 if (!task) 1291 return -ENOMEM; 1292 1293 task->dev = dev; 1294 task->task_proto = dev->tproto; 1295 1296 memcpy(&task->ssp_task, parameter, para_len); 1297 task->task_done = mvs_task_done; 1298 1299 task->slow_task->timer.function = mvs_tmf_timedout; 1300 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1301 add_timer(&task->slow_task->timer); 1302 1303 res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf); 1304 1305 if (res) { 1306 del_timer(&task->slow_task->timer); 1307 mv_printk("executing internal task failed:%d\n", res); 1308 goto ex_err; 1309 } 1310 1311 wait_for_completion(&task->slow_task->completion); 1312 res = TMF_RESP_FUNC_FAILED; 1313 /* Even TMF timed out, return direct. */ 1314 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1315 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1316 mv_printk("TMF task[%x] timeout.\n", tmf->tmf); 1317 goto ex_err; 1318 } 1319 } 1320 1321 if (task->task_status.resp == SAS_TASK_COMPLETE && 1322 task->task_status.stat == SAS_SAM_STAT_GOOD) { 1323 res = TMF_RESP_FUNC_COMPLETE; 1324 break; 1325 } 1326 1327 if (task->task_status.resp == SAS_TASK_COMPLETE && 1328 task->task_status.stat == SAS_DATA_UNDERRUN) { 1329 /* no error, but return the number of bytes of 1330 * underrun */ 1331 res = task->task_status.residual; 1332 break; 1333 } 1334 1335 if (task->task_status.resp == SAS_TASK_COMPLETE && 1336 task->task_status.stat == SAS_DATA_OVERRUN) { 1337 mv_dprintk("blocked task error.\n"); 1338 res = -EMSGSIZE; 1339 break; 1340 } else { 1341 mv_dprintk(" task to dev %016llx response: 0x%x " 1342 "status 0x%x\n", 1343 SAS_ADDR(dev->sas_addr), 1344 task->task_status.resp, 1345 task->task_status.stat); 1346 sas_free_task(task); 1347 task = NULL; 1348 1349 } 1350 } 1351 ex_err: 1352 BUG_ON(retry == 3 && task != NULL); 1353 sas_free_task(task); 1354 return res; 1355 } 1356 1357 static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, 1358 u8 *lun, struct mvs_tmf_task *tmf) 1359 { 1360 struct sas_ssp_task ssp_task; 1361 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 1362 return TMF_RESP_FUNC_ESUPP; 1363 1364 memcpy(ssp_task.LUN, lun, 8); 1365 1366 return mvs_exec_internal_tmf_task(dev, &ssp_task, 1367 sizeof(ssp_task), tmf); 1368 } 1369 1370 1371 /* Standard mandates link reset for ATA (type 0) 1372 and hard reset for SSP (type 1) , only for RECOVERY */ 1373 static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) 1374 { 1375 int rc; 1376 struct sas_phy *phy = sas_get_local_phy(dev); 1377 int reset_type = (dev->dev_type == SAS_SATA_DEV || 1378 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1379 rc = sas_phy_reset(phy, reset_type); 1380 sas_put_local_phy(phy); 1381 msleep(2000); 1382 return rc; 1383 } 1384 1385 /* mandatory SAM-3 */ 1386 int mvs_lu_reset(struct domain_device *dev, u8 *lun) 1387 { 1388 unsigned long flags; 1389 int rc = TMF_RESP_FUNC_FAILED; 1390 struct mvs_tmf_task tmf_task; 1391 struct mvs_device * mvi_dev = dev->lldd_dev; 1392 struct mvs_info *mvi = mvi_dev->mvi_info; 1393 1394 tmf_task.tmf = TMF_LU_RESET; 1395 mvi_dev->dev_status = MVS_DEV_EH; 1396 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1397 if (rc == TMF_RESP_FUNC_COMPLETE) { 1398 spin_lock_irqsave(&mvi->lock, flags); 1399 mvs_release_task(mvi, dev); 1400 spin_unlock_irqrestore(&mvi->lock, flags); 1401 } 1402 /* If failed, fall-through I_T_Nexus reset */ 1403 mv_printk("%s for device[%x]:rc= %d\n", __func__, 1404 mvi_dev->device_id, rc); 1405 return rc; 1406 } 1407 1408 int mvs_I_T_nexus_reset(struct domain_device *dev) 1409 { 1410 unsigned long flags; 1411 int rc = TMF_RESP_FUNC_FAILED; 1412 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1413 struct mvs_info *mvi = mvi_dev->mvi_info; 1414 1415 if (mvi_dev->dev_status != MVS_DEV_EH) 1416 return TMF_RESP_FUNC_COMPLETE; 1417 else 1418 mvi_dev->dev_status = MVS_DEV_NORMAL; 1419 rc = mvs_debug_I_T_nexus_reset(dev); 1420 mv_printk("%s for device[%x]:rc= %d\n", 1421 __func__, mvi_dev->device_id, rc); 1422 1423 spin_lock_irqsave(&mvi->lock, flags); 1424 mvs_release_task(mvi, dev); 1425 spin_unlock_irqrestore(&mvi->lock, flags); 1426 1427 return rc; 1428 } 1429 /* optional SAM-3 */ 1430 int mvs_query_task(struct sas_task *task) 1431 { 1432 u32 tag; 1433 struct scsi_lun lun; 1434 struct mvs_tmf_task tmf_task; 1435 int rc = TMF_RESP_FUNC_FAILED; 1436 1437 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1438 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; 1439 struct domain_device *dev = task->dev; 1440 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1441 struct mvs_info *mvi = mvi_dev->mvi_info; 1442 1443 int_to_scsilun(cmnd->device->lun, &lun); 1444 rc = mvs_find_tag(mvi, task, &tag); 1445 if (rc == 0) { 1446 rc = TMF_RESP_FUNC_FAILED; 1447 return rc; 1448 } 1449 1450 tmf_task.tmf = TMF_QUERY_TASK; 1451 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1452 1453 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1454 switch (rc) { 1455 /* The task is still in Lun, release it then */ 1456 case TMF_RESP_FUNC_SUCC: 1457 /* The task is not in Lun or failed, reset the phy */ 1458 case TMF_RESP_FUNC_FAILED: 1459 case TMF_RESP_FUNC_COMPLETE: 1460 break; 1461 } 1462 } 1463 mv_printk("%s:rc= %d\n", __func__, rc); 1464 return rc; 1465 } 1466 1467 /* mandatory SAM-3, still need free task/slot info */ 1468 int mvs_abort_task(struct sas_task *task) 1469 { 1470 struct scsi_lun lun; 1471 struct mvs_tmf_task tmf_task; 1472 struct domain_device *dev = task->dev; 1473 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1474 struct mvs_info *mvi; 1475 int rc = TMF_RESP_FUNC_FAILED; 1476 unsigned long flags; 1477 u32 tag; 1478 1479 if (!mvi_dev) { 1480 mv_printk("Device has removed\n"); 1481 return TMF_RESP_FUNC_FAILED; 1482 } 1483 1484 mvi = mvi_dev->mvi_info; 1485 1486 spin_lock_irqsave(&task->task_state_lock, flags); 1487 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1488 spin_unlock_irqrestore(&task->task_state_lock, flags); 1489 rc = TMF_RESP_FUNC_COMPLETE; 1490 goto out; 1491 } 1492 spin_unlock_irqrestore(&task->task_state_lock, flags); 1493 mvi_dev->dev_status = MVS_DEV_EH; 1494 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1495 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; 1496 1497 int_to_scsilun(cmnd->device->lun, &lun); 1498 rc = mvs_find_tag(mvi, task, &tag); 1499 if (rc == 0) { 1500 mv_printk("No such tag in %s\n", __func__); 1501 rc = TMF_RESP_FUNC_FAILED; 1502 return rc; 1503 } 1504 1505 tmf_task.tmf = TMF_ABORT_TASK; 1506 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1507 1508 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1509 1510 /* if successful, clear the task and callback forwards.*/ 1511 if (rc == TMF_RESP_FUNC_COMPLETE) { 1512 u32 slot_no; 1513 struct mvs_slot_info *slot; 1514 1515 if (task->lldd_task) { 1516 slot = task->lldd_task; 1517 slot_no = (u32) (slot - mvi->slot_info); 1518 spin_lock_irqsave(&mvi->lock, flags); 1519 mvs_slot_complete(mvi, slot_no, 1); 1520 spin_unlock_irqrestore(&mvi->lock, flags); 1521 } 1522 } 1523 1524 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1525 task->task_proto & SAS_PROTOCOL_STP) { 1526 if (SAS_SATA_DEV == dev->dev_type) { 1527 struct mvs_slot_info *slot = task->lldd_task; 1528 u32 slot_idx = (u32)(slot - mvi->slot_info); 1529 mv_dprintk("mvs_abort_task() mvi=%p task=%p " 1530 "slot=%p slot_idx=x%x\n", 1531 mvi, task, slot, slot_idx); 1532 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1533 mvs_slot_task_free(mvi, task, slot, slot_idx); 1534 rc = TMF_RESP_FUNC_COMPLETE; 1535 goto out; 1536 } 1537 1538 } 1539 out: 1540 if (rc != TMF_RESP_FUNC_COMPLETE) 1541 mv_printk("%s:rc= %d\n", __func__, rc); 1542 return rc; 1543 } 1544 1545 int mvs_abort_task_set(struct domain_device *dev, u8 *lun) 1546 { 1547 int rc; 1548 struct mvs_tmf_task tmf_task; 1549 1550 tmf_task.tmf = TMF_ABORT_TASK_SET; 1551 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1552 1553 return rc; 1554 } 1555 1556 int mvs_clear_aca(struct domain_device *dev, u8 *lun) 1557 { 1558 int rc = TMF_RESP_FUNC_FAILED; 1559 struct mvs_tmf_task tmf_task; 1560 1561 tmf_task.tmf = TMF_CLEAR_ACA; 1562 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1563 1564 return rc; 1565 } 1566 1567 int mvs_clear_task_set(struct domain_device *dev, u8 *lun) 1568 { 1569 int rc = TMF_RESP_FUNC_FAILED; 1570 struct mvs_tmf_task tmf_task; 1571 1572 tmf_task.tmf = TMF_CLEAR_TASK_SET; 1573 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1574 1575 return rc; 1576 } 1577 1578 static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, 1579 u32 slot_idx, int err) 1580 { 1581 struct mvs_device *mvi_dev = task->dev->lldd_dev; 1582 struct task_status_struct *tstat = &task->task_status; 1583 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; 1584 int stat = SAM_STAT_GOOD; 1585 1586 1587 resp->frame_len = sizeof(struct dev_to_host_fis); 1588 memcpy(&resp->ending_fis[0], 1589 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), 1590 sizeof(struct dev_to_host_fis)); 1591 tstat->buf_valid_size = sizeof(*resp); 1592 if (unlikely(err)) { 1593 if (unlikely(err & CMD_ISS_STPD)) 1594 stat = SAS_OPEN_REJECT; 1595 else 1596 stat = SAS_PROTO_RESPONSE; 1597 } 1598 1599 return stat; 1600 } 1601 1602 static void mvs_set_sense(u8 *buffer, int len, int d_sense, 1603 int key, int asc, int ascq) 1604 { 1605 memset(buffer, 0, len); 1606 1607 if (d_sense) { 1608 /* Descriptor format */ 1609 if (len < 4) { 1610 mv_printk("Length %d of sense buffer too small to " 1611 "fit sense %x:%x:%x", len, key, asc, ascq); 1612 } 1613 1614 buffer[0] = 0x72; /* Response Code */ 1615 if (len > 1) 1616 buffer[1] = key; /* Sense Key */ 1617 if (len > 2) 1618 buffer[2] = asc; /* ASC */ 1619 if (len > 3) 1620 buffer[3] = ascq; /* ASCQ */ 1621 } else { 1622 if (len < 14) { 1623 mv_printk("Length %d of sense buffer too small to " 1624 "fit sense %x:%x:%x", len, key, asc, ascq); 1625 } 1626 1627 buffer[0] = 0x70; /* Response Code */ 1628 if (len > 2) 1629 buffer[2] = key; /* Sense Key */ 1630 if (len > 7) 1631 buffer[7] = 0x0a; /* Additional Sense Length */ 1632 if (len > 12) 1633 buffer[12] = asc; /* ASC */ 1634 if (len > 13) 1635 buffer[13] = ascq; /* ASCQ */ 1636 } 1637 1638 return; 1639 } 1640 1641 static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu, 1642 u8 key, u8 asc, u8 asc_q) 1643 { 1644 iu->datapres = 2; 1645 iu->response_data_len = 0; 1646 iu->sense_data_len = 17; 1647 iu->status = 02; 1648 mvs_set_sense(iu->sense_data, 17, 0, 1649 key, asc, asc_q); 1650 } 1651 1652 static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 1653 u32 slot_idx) 1654 { 1655 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1656 int stat; 1657 u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response); 1658 u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1)); 1659 u32 tfs = 0; 1660 enum mvs_port_type type = PORT_TYPE_SAS; 1661 1662 if (err_dw0 & CMD_ISS_STPD) 1663 MVS_CHIP_DISP->issue_stop(mvi, type, tfs); 1664 1665 MVS_CHIP_DISP->command_active(mvi, slot_idx); 1666 1667 stat = SAM_STAT_CHECK_CONDITION; 1668 switch (task->task_proto) { 1669 case SAS_PROTOCOL_SSP: 1670 { 1671 stat = SAS_ABORTED_TASK; 1672 if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) { 1673 struct ssp_response_iu *iu = slot->response + 1674 sizeof(struct mvs_err_info); 1675 mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01); 1676 sas_ssp_task_response(mvi->dev, task, iu); 1677 stat = SAM_STAT_CHECK_CONDITION; 1678 } 1679 if (err_dw1 & bit(31)) 1680 mv_printk("reuse same slot, retry command.\n"); 1681 break; 1682 } 1683 case SAS_PROTOCOL_SMP: 1684 stat = SAM_STAT_CHECK_CONDITION; 1685 break; 1686 1687 case SAS_PROTOCOL_SATA: 1688 case SAS_PROTOCOL_STP: 1689 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1690 { 1691 task->ata_task.use_ncq = 0; 1692 stat = SAS_PROTO_RESPONSE; 1693 mvs_sata_done(mvi, task, slot_idx, err_dw0); 1694 } 1695 break; 1696 default: 1697 break; 1698 } 1699 1700 return stat; 1701 } 1702 1703 int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) 1704 { 1705 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 1706 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1707 struct sas_task *task = slot->task; 1708 struct mvs_device *mvi_dev = NULL; 1709 struct task_status_struct *tstat; 1710 struct domain_device *dev; 1711 u32 aborted; 1712 1713 void *to; 1714 enum exec_status sts; 1715 1716 if (unlikely(!task || !task->lldd_task || !task->dev)) 1717 return -1; 1718 1719 tstat = &task->task_status; 1720 dev = task->dev; 1721 mvi_dev = dev->lldd_dev; 1722 1723 spin_lock(&task->task_state_lock); 1724 task->task_state_flags &= 1725 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1726 task->task_state_flags |= SAS_TASK_STATE_DONE; 1727 /* race condition*/ 1728 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 1729 spin_unlock(&task->task_state_lock); 1730 1731 memset(tstat, 0, sizeof(*tstat)); 1732 tstat->resp = SAS_TASK_COMPLETE; 1733 1734 if (unlikely(aborted)) { 1735 tstat->stat = SAS_ABORTED_TASK; 1736 if (mvi_dev && mvi_dev->running_req) 1737 mvi_dev->running_req--; 1738 if (sas_protocol_ata(task->task_proto)) 1739 mvs_free_reg_set(mvi, mvi_dev); 1740 1741 mvs_slot_task_free(mvi, task, slot, slot_idx); 1742 return -1; 1743 } 1744 1745 /* when no device attaching, go ahead and complete by error handling*/ 1746 if (unlikely(!mvi_dev || flags)) { 1747 if (!mvi_dev) 1748 mv_dprintk("port has not device.\n"); 1749 tstat->stat = SAS_PHY_DOWN; 1750 goto out; 1751 } 1752 1753 /* 1754 * error info record present; slot->response is 32 bit aligned but may 1755 * not be 64 bit aligned, so check for zero in two 32 bit reads 1756 */ 1757 if (unlikely((rx_desc & RXQ_ERR) 1758 && (*((u32 *)slot->response) 1759 || *(((u32 *)slot->response) + 1)))) { 1760 mv_dprintk("port %d slot %d rx_desc %X has error info" 1761 "%016llX.\n", slot->port->sas_port.id, slot_idx, 1762 rx_desc, get_unaligned_le64(slot->response)); 1763 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1764 tstat->resp = SAS_TASK_COMPLETE; 1765 goto out; 1766 } 1767 1768 switch (task->task_proto) { 1769 case SAS_PROTOCOL_SSP: 1770 /* hw says status == 0, datapres == 0 */ 1771 if (rx_desc & RXQ_GOOD) { 1772 tstat->stat = SAS_SAM_STAT_GOOD; 1773 tstat->resp = SAS_TASK_COMPLETE; 1774 } 1775 /* response frame present */ 1776 else if (rx_desc & RXQ_RSP) { 1777 struct ssp_response_iu *iu = slot->response + 1778 sizeof(struct mvs_err_info); 1779 sas_ssp_task_response(mvi->dev, task, iu); 1780 } else 1781 tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; 1782 break; 1783 1784 case SAS_PROTOCOL_SMP: { 1785 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1786 tstat->stat = SAS_SAM_STAT_GOOD; 1787 to = kmap_atomic(sg_page(sg_resp)); 1788 memcpy(to + sg_resp->offset, 1789 slot->response + sizeof(struct mvs_err_info), 1790 sg_dma_len(sg_resp)); 1791 kunmap_atomic(to); 1792 break; 1793 } 1794 1795 case SAS_PROTOCOL_SATA: 1796 case SAS_PROTOCOL_STP: 1797 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { 1798 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); 1799 break; 1800 } 1801 1802 default: 1803 tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; 1804 break; 1805 } 1806 if (!slot->port->port_attached) { 1807 mv_dprintk("port %d has removed.\n", slot->port->sas_port.id); 1808 tstat->stat = SAS_PHY_DOWN; 1809 } 1810 1811 1812 out: 1813 if (mvi_dev && mvi_dev->running_req) { 1814 mvi_dev->running_req--; 1815 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req) 1816 mvs_free_reg_set(mvi, mvi_dev); 1817 } 1818 mvs_slot_task_free(mvi, task, slot, slot_idx); 1819 sts = tstat->stat; 1820 1821 spin_unlock(&mvi->lock); 1822 if (task->task_done) 1823 task->task_done(task); 1824 1825 spin_lock(&mvi->lock); 1826 1827 return sts; 1828 } 1829 1830 void mvs_do_release_task(struct mvs_info *mvi, 1831 int phy_no, struct domain_device *dev) 1832 { 1833 u32 slot_idx; 1834 struct mvs_phy *phy; 1835 struct mvs_port *port; 1836 struct mvs_slot_info *slot, *slot2; 1837 1838 phy = &mvi->phy[phy_no]; 1839 port = phy->port; 1840 if (!port) 1841 return; 1842 /* clean cmpl queue in case request is already finished */ 1843 mvs_int_rx(mvi, false); 1844 1845 1846 1847 list_for_each_entry_safe(slot, slot2, &port->list, entry) { 1848 struct sas_task *task; 1849 slot_idx = (u32) (slot - mvi->slot_info); 1850 task = slot->task; 1851 1852 if (dev && task->dev != dev) 1853 continue; 1854 1855 mv_printk("Release slot [%x] tag[%x], task [%p]:\n", 1856 slot_idx, slot->slot_tag, task); 1857 MVS_CHIP_DISP->command_active(mvi, slot_idx); 1858 1859 mvs_slot_complete(mvi, slot_idx, 1); 1860 } 1861 } 1862 1863 void mvs_release_task(struct mvs_info *mvi, 1864 struct domain_device *dev) 1865 { 1866 int i, phyno[WIDE_PORT_MAX_PHY], num; 1867 num = mvs_find_dev_phyno(dev, phyno); 1868 for (i = 0; i < num; i++) 1869 mvs_do_release_task(mvi, phyno[i], dev); 1870 } 1871 1872 static void mvs_phy_disconnected(struct mvs_phy *phy) 1873 { 1874 phy->phy_attached = 0; 1875 phy->att_dev_info = 0; 1876 phy->att_dev_sas_addr = 0; 1877 } 1878 1879 static void mvs_work_queue(struct work_struct *work) 1880 { 1881 struct delayed_work *dw = container_of(work, struct delayed_work, work); 1882 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); 1883 struct mvs_info *mvi = mwq->mvi; 1884 unsigned long flags; 1885 u32 phy_no = (unsigned long) mwq->data; 1886 struct mvs_phy *phy = &mvi->phy[phy_no]; 1887 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1888 1889 spin_lock_irqsave(&mvi->lock, flags); 1890 if (mwq->handler & PHY_PLUG_EVENT) { 1891 1892 if (phy->phy_event & PHY_PLUG_OUT) { 1893 u32 tmp; 1894 1895 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); 1896 phy->phy_event &= ~PHY_PLUG_OUT; 1897 if (!(tmp & PHY_READY_MASK)) { 1898 sas_phy_disconnected(sas_phy); 1899 mvs_phy_disconnected(phy); 1900 sas_notify_phy_event(sas_phy, 1901 PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC); 1902 mv_dprintk("phy%d Removed Device\n", phy_no); 1903 } else { 1904 MVS_CHIP_DISP->detect_porttype(mvi, phy_no); 1905 mvs_update_phyinfo(mvi, phy_no, 1); 1906 mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC); 1907 mvs_port_notify_formed(sas_phy, 0); 1908 mv_dprintk("phy%d Attached Device\n", phy_no); 1909 } 1910 } 1911 } else if (mwq->handler & EXP_BRCT_CHG) { 1912 phy->phy_event &= ~EXP_BRCT_CHG; 1913 sas_notify_port_event(sas_phy, 1914 PORTE_BROADCAST_RCVD, GFP_ATOMIC); 1915 mv_dprintk("phy%d Got Broadcast Change\n", phy_no); 1916 } 1917 list_del(&mwq->entry); 1918 spin_unlock_irqrestore(&mvi->lock, flags); 1919 kfree(mwq); 1920 } 1921 1922 static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) 1923 { 1924 struct mvs_wq *mwq; 1925 int ret = 0; 1926 1927 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); 1928 if (mwq) { 1929 mwq->mvi = mvi; 1930 mwq->data = data; 1931 mwq->handler = handler; 1932 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); 1933 list_add_tail(&mwq->entry, &mvi->wq_list); 1934 schedule_delayed_work(&mwq->work_q, HZ * 2); 1935 } else 1936 ret = -ENOMEM; 1937 1938 return ret; 1939 } 1940 1941 static void mvs_sig_time_out(struct timer_list *t) 1942 { 1943 struct mvs_phy *phy = from_timer(phy, t, timer); 1944 struct mvs_info *mvi = phy->mvi; 1945 u8 phy_no; 1946 1947 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { 1948 if (&mvi->phy[phy_no] == phy) { 1949 mv_dprintk("Get signature time out, reset phy %d\n", 1950 phy_no+mvi->id*mvi->chip->n_phy); 1951 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET); 1952 } 1953 } 1954 } 1955 1956 void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 1957 { 1958 u32 tmp; 1959 struct mvs_phy *phy = &mvi->phy[phy_no]; 1960 1961 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); 1962 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); 1963 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy, 1964 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); 1965 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy, 1966 phy->irq_status); 1967 1968 /* 1969 * events is port event now , 1970 * we need check the interrupt status which belongs to per port. 1971 */ 1972 1973 if (phy->irq_status & PHYEV_DCDR_ERR) { 1974 mv_dprintk("phy %d STP decoding error.\n", 1975 phy_no + mvi->id*mvi->chip->n_phy); 1976 } 1977 1978 if (phy->irq_status & PHYEV_POOF) { 1979 mdelay(500); 1980 if (!(phy->phy_event & PHY_PLUG_OUT)) { 1981 int dev_sata = phy->phy_type & PORT_TYPE_SATA; 1982 int ready; 1983 mvs_do_release_task(mvi, phy_no, NULL); 1984 phy->phy_event |= PHY_PLUG_OUT; 1985 MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1); 1986 mvs_handle_event(mvi, 1987 (void *)(unsigned long)phy_no, 1988 PHY_PLUG_EVENT); 1989 ready = mvs_is_phy_ready(mvi, phy_no); 1990 if (ready || dev_sata) { 1991 if (MVS_CHIP_DISP->stp_reset) 1992 MVS_CHIP_DISP->stp_reset(mvi, 1993 phy_no); 1994 else 1995 MVS_CHIP_DISP->phy_reset(mvi, 1996 phy_no, MVS_SOFT_RESET); 1997 return; 1998 } 1999 } 2000 } 2001 2002 if (phy->irq_status & PHYEV_COMWAKE) { 2003 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); 2004 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, 2005 tmp | PHYEV_SIG_FIS); 2006 if (phy->timer.function == NULL) { 2007 phy->timer.function = mvs_sig_time_out; 2008 phy->timer.expires = jiffies + 5*HZ; 2009 add_timer(&phy->timer); 2010 } 2011 } 2012 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 2013 phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 2014 mv_dprintk("notify plug in on phy[%d]\n", phy_no); 2015 if (phy->phy_status) { 2016 mdelay(10); 2017 MVS_CHIP_DISP->detect_porttype(mvi, phy_no); 2018 if (phy->phy_type & PORT_TYPE_SATA) { 2019 tmp = MVS_CHIP_DISP->read_port_irq_mask( 2020 mvi, phy_no); 2021 tmp &= ~PHYEV_SIG_FIS; 2022 MVS_CHIP_DISP->write_port_irq_mask(mvi, 2023 phy_no, tmp); 2024 } 2025 mvs_update_phyinfo(mvi, phy_no, 0); 2026 if (phy->phy_type & PORT_TYPE_SAS) { 2027 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE); 2028 mdelay(10); 2029 } 2030 2031 mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC); 2032 /* whether driver is going to handle hot plug */ 2033 if (phy->phy_event & PHY_PLUG_OUT) { 2034 mvs_port_notify_formed(&phy->sas_phy, 0); 2035 phy->phy_event &= ~PHY_PLUG_OUT; 2036 } 2037 } else { 2038 mv_dprintk("plugin interrupt but phy%d is gone\n", 2039 phy_no + mvi->id*mvi->chip->n_phy); 2040 } 2041 } else if (phy->irq_status & PHYEV_BROAD_CH) { 2042 mv_dprintk("phy %d broadcast change.\n", 2043 phy_no + mvi->id*mvi->chip->n_phy); 2044 mvs_handle_event(mvi, (void *)(unsigned long)phy_no, 2045 EXP_BRCT_CHG); 2046 } 2047 } 2048 2049 int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 2050 { 2051 u32 rx_prod_idx, rx_desc; 2052 bool attn = false; 2053 2054 /* the first dword in the RX ring is special: it contains 2055 * a mirror of the hardware's RX producer index, so that 2056 * we don't have to stall the CPU reading that register. 2057 * The actual RX ring is offset by one dword, due to this. 2058 */ 2059 rx_prod_idx = mvi->rx_cons; 2060 mvi->rx_cons = le32_to_cpu(mvi->rx[0]); 2061 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ 2062 return 0; 2063 2064 /* The CMPL_Q may come late, read from register and try again 2065 * note: if coalescing is enabled, 2066 * it will need to read from register every time for sure 2067 */ 2068 if (unlikely(mvi->rx_cons == rx_prod_idx)) 2069 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; 2070 2071 if (mvi->rx_cons == rx_prod_idx) 2072 return 0; 2073 2074 while (mvi->rx_cons != rx_prod_idx) { 2075 /* increment our internal RX consumer pointer */ 2076 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); 2077 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); 2078 2079 if (likely(rx_desc & RXQ_DONE)) 2080 mvs_slot_complete(mvi, rx_desc, 0); 2081 if (rx_desc & RXQ_ATTN) { 2082 attn = true; 2083 } else if (rx_desc & RXQ_ERR) { 2084 if (!(rx_desc & RXQ_DONE)) 2085 mvs_slot_complete(mvi, rx_desc, 0); 2086 } else if (rx_desc & RXQ_SLOT_RESET) { 2087 mvs_slot_free(mvi, rx_desc); 2088 } 2089 } 2090 2091 if (attn && self_clear) 2092 MVS_CHIP_DISP->int_full(mvi); 2093 return 0; 2094 } 2095 2096 int mvs_gpio_write(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index, 2097 u8 reg_count, u8 *write_data) 2098 { 2099 struct mvs_prv_info *mvs_prv = sha->lldd_ha; 2100 struct mvs_info *mvi = mvs_prv->mvi[0]; 2101 2102 if (MVS_CHIP_DISP->gpio_write) { 2103 return MVS_CHIP_DISP->gpio_write(mvs_prv, reg_type, 2104 reg_index, reg_count, write_data); 2105 } 2106 2107 return -ENOSYS; 2108 } 2109