1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Marvell 88SE64xx/88SE94xx main function 4 * 5 * Copyright 2007 Red Hat, Inc. 6 * Copyright 2008 Marvell. <kewei@marvell.com> 7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com> 8 */ 9 10 #include "mv_sas.h" 11 12 static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) 13 { 14 if (task->lldd_task) { 15 struct mvs_slot_info *slot; 16 slot = task->lldd_task; 17 *tag = slot->slot_tag; 18 return 1; 19 } 20 return 0; 21 } 22 23 void mvs_tag_clear(struct mvs_info *mvi, u32 tag) 24 { 25 void *bitmap = mvi->tags; 26 clear_bit(tag, bitmap); 27 } 28 29 void mvs_tag_free(struct mvs_info *mvi, u32 tag) 30 { 31 mvs_tag_clear(mvi, tag); 32 } 33 34 void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) 35 { 36 void *bitmap = mvi->tags; 37 set_bit(tag, bitmap); 38 } 39 40 inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) 41 { 42 unsigned int index, tag; 43 void *bitmap = mvi->tags; 44 45 index = find_first_zero_bit(bitmap, mvi->tags_num); 46 tag = index; 47 if (tag >= mvi->tags_num) 48 return -SAS_QUEUE_FULL; 49 mvs_tag_set(mvi, tag); 50 *tag_out = tag; 51 return 0; 52 } 53 54 void mvs_tag_init(struct mvs_info *mvi) 55 { 56 int i; 57 for (i = 0; i < mvi->tags_num; ++i) 58 mvs_tag_clear(mvi, i); 59 } 60 61 static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) 62 { 63 unsigned long i = 0, j = 0, hi = 0; 64 struct sas_ha_struct *sha = dev->port->ha; 65 struct mvs_info *mvi = NULL; 66 struct asd_sas_phy *phy; 67 68 while (sha->sas_port[i]) { 69 if (sha->sas_port[i] == dev->port) { 70 phy = container_of(sha->sas_port[i]->phy_list.next, 71 struct asd_sas_phy, port_phy_el); 72 j = 0; 73 while (sha->sas_phy[j]) { 74 if (sha->sas_phy[j] == phy) 75 break; 76 j++; 77 } 78 break; 79 } 80 i++; 81 } 82 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; 83 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; 84 85 return mvi; 86 87 } 88 89 static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) 90 { 91 unsigned long i = 0, j = 0, n = 0, num = 0; 92 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 93 struct mvs_info *mvi = mvi_dev->mvi_info; 94 struct sas_ha_struct *sha = dev->port->ha; 95 96 while (sha->sas_port[i]) { 97 if (sha->sas_port[i] == dev->port) { 98 struct asd_sas_phy *phy; 99 list_for_each_entry(phy, 100 &sha->sas_port[i]->phy_list, port_phy_el) { 101 j = 0; 102 while (sha->sas_phy[j]) { 103 if (sha->sas_phy[j] == phy) 104 break; 105 j++; 106 } 107 phyno[n] = (j >= mvi->chip->n_phy) ? 108 (j - mvi->chip->n_phy) : j; 109 num++; 110 n++; 111 } 112 break; 113 } 114 i++; 115 } 116 return num; 117 } 118 119 struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi, 120 u8 reg_set) 121 { 122 u32 dev_no; 123 for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) { 124 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED) 125 continue; 126 127 if (mvi->devices[dev_no].taskfileset == reg_set) 128 return &mvi->devices[dev_no]; 129 } 130 return NULL; 131 } 132 133 static inline void mvs_free_reg_set(struct mvs_info *mvi, 134 struct mvs_device *dev) 135 { 136 if (!dev) { 137 mv_printk("device has been free.\n"); 138 return; 139 } 140 if (dev->taskfileset == MVS_ID_NOT_MAPPED) 141 return; 142 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); 143 } 144 145 static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, 146 struct mvs_device *dev) 147 { 148 if (dev->taskfileset != MVS_ID_NOT_MAPPED) 149 return 0; 150 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); 151 } 152 153 void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) 154 { 155 u32 no; 156 for_each_phy(phy_mask, phy_mask, no) { 157 if (!(phy_mask & 1)) 158 continue; 159 MVS_CHIP_DISP->phy_reset(mvi, no, hard); 160 } 161 } 162 163 int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 164 void *funcdata) 165 { 166 int rc = 0, phy_id = sas_phy->id; 167 u32 tmp, i = 0, hi; 168 struct sas_ha_struct *sha = sas_phy->ha; 169 struct mvs_info *mvi = NULL; 170 171 while (sha->sas_phy[i]) { 172 if (sha->sas_phy[i] == sas_phy) 173 break; 174 i++; 175 } 176 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; 177 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; 178 179 switch (func) { 180 case PHY_FUNC_SET_LINK_RATE: 181 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); 182 break; 183 184 case PHY_FUNC_HARD_RESET: 185 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); 186 if (tmp & PHY_RST_HARD) 187 break; 188 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET); 189 break; 190 191 case PHY_FUNC_LINK_RESET: 192 MVS_CHIP_DISP->phy_enable(mvi, phy_id); 193 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET); 194 break; 195 196 case PHY_FUNC_DISABLE: 197 MVS_CHIP_DISP->phy_disable(mvi, phy_id); 198 break; 199 case PHY_FUNC_RELEASE_SPINUP_HOLD: 200 default: 201 rc = -ENOSYS; 202 } 203 msleep(200); 204 return rc; 205 } 206 207 void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, 208 u32 off_hi, u64 sas_addr) 209 { 210 u32 lo = (u32)sas_addr; 211 u32 hi = (u32)(sas_addr>>32); 212 213 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); 214 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); 215 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); 216 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); 217 } 218 219 static void mvs_bytes_dmaed(struct mvs_info *mvi, int i, gfp_t gfp_flags) 220 { 221 struct mvs_phy *phy = &mvi->phy[i]; 222 struct asd_sas_phy *sas_phy = &phy->sas_phy; 223 224 if (!phy->phy_attached) 225 return; 226 227 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) 228 && phy->phy_type & PORT_TYPE_SAS) { 229 return; 230 } 231 232 sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags); 233 234 if (sas_phy->phy) { 235 struct sas_phy *sphy = sas_phy->phy; 236 237 sphy->negotiated_linkrate = sas_phy->linkrate; 238 sphy->minimum_linkrate = phy->minimum_linkrate; 239 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; 240 sphy->maximum_linkrate = phy->maximum_linkrate; 241 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); 242 } 243 244 if (phy->phy_type & PORT_TYPE_SAS) { 245 struct sas_identify_frame *id; 246 247 id = (struct sas_identify_frame *)phy->frame_rcvd; 248 id->dev_type = phy->identify.device_type; 249 id->initiator_bits = SAS_PROTOCOL_ALL; 250 id->target_bits = phy->identify.target_port_protocols; 251 252 /* direct attached SAS device */ 253 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { 254 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); 255 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00); 256 } 257 } else if (phy->phy_type & PORT_TYPE_SATA) { 258 /*Nothing*/ 259 } 260 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); 261 262 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; 263 264 sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags); 265 } 266 267 void mvs_scan_start(struct Scsi_Host *shost) 268 { 269 int i, j; 270 unsigned short core_nr; 271 struct mvs_info *mvi; 272 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 273 struct mvs_prv_info *mvs_prv = sha->lldd_ha; 274 275 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; 276 277 for (j = 0; j < core_nr; j++) { 278 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; 279 for (i = 0; i < mvi->chip->n_phy; ++i) 280 mvs_bytes_dmaed(mvi, i, GFP_KERNEL); 281 } 282 mvs_prv->scan_finished = 1; 283 } 284 285 int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) 286 { 287 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 288 struct mvs_prv_info *mvs_prv = sha->lldd_ha; 289 290 if (mvs_prv->scan_finished == 0) 291 return 0; 292 293 sas_drain_work(sha); 294 return 1; 295 } 296 297 static int mvs_task_prep_smp(struct mvs_info *mvi, 298 struct mvs_task_exec_info *tei) 299 { 300 int elem, rc, i; 301 struct sas_ha_struct *sha = mvi->sas; 302 struct sas_task *task = tei->task; 303 struct mvs_cmd_hdr *hdr = tei->hdr; 304 struct domain_device *dev = task->dev; 305 struct asd_sas_port *sas_port = dev->port; 306 struct sas_phy *sphy = dev->phy; 307 struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; 308 struct scatterlist *sg_req, *sg_resp; 309 u32 req_len, resp_len, tag = tei->tag; 310 void *buf_tmp; 311 u8 *buf_oaf; 312 dma_addr_t buf_tmp_dma; 313 void *buf_prd; 314 struct mvs_slot_info *slot = &mvi->slot_info[tag]; 315 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 316 317 /* 318 * DMA-map SMP request, response buffers 319 */ 320 sg_req = &task->smp_task.smp_req; 321 elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE); 322 if (!elem) 323 return -ENOMEM; 324 req_len = sg_dma_len(sg_req); 325 326 sg_resp = &task->smp_task.smp_resp; 327 elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE); 328 if (!elem) { 329 rc = -ENOMEM; 330 goto err_out; 331 } 332 resp_len = SB_RFB_MAX; 333 334 /* must be in dwords */ 335 if ((req_len & 0x3) || (resp_len & 0x3)) { 336 rc = -EINVAL; 337 goto err_out_2; 338 } 339 340 /* 341 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 342 */ 343 344 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ 345 buf_tmp = slot->buf; 346 buf_tmp_dma = slot->buf_dma; 347 348 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); 349 350 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 351 buf_oaf = buf_tmp; 352 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 353 354 buf_tmp += MVS_OAF_SZ; 355 buf_tmp_dma += MVS_OAF_SZ; 356 357 /* region 3: PRD table *********************************** */ 358 buf_prd = buf_tmp; 359 if (tei->n_elem) 360 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 361 else 362 hdr->prd_tbl = 0; 363 364 i = MVS_CHIP_DISP->prd_size() * tei->n_elem; 365 buf_tmp += i; 366 buf_tmp_dma += i; 367 368 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 369 slot->response = buf_tmp; 370 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 371 if (mvi->flags & MVF_FLAG_SOC) 372 hdr->reserved[0] = 0; 373 374 /* 375 * Fill in TX ring and command slot header 376 */ 377 slot->tx = mvi->tx_prod; 378 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | 379 TXQ_MODE_I | tag | 380 (MVS_PHY_ID << TXQ_PHY_SHIFT)); 381 382 hdr->flags |= flags; 383 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); 384 hdr->tags = cpu_to_le32(tag); 385 hdr->data_len = 0; 386 387 /* generate open address frame hdr (first 12 bytes) */ 388 /* initiator, SMP, ftype 1h */ 389 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; 390 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; 391 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ 392 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 393 394 /* fill in PRD (scatter/gather) table, if any */ 395 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 396 397 return 0; 398 399 err_out_2: 400 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, 401 DMA_FROM_DEVICE); 402 err_out: 403 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, 404 DMA_TO_DEVICE); 405 return rc; 406 } 407 408 static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) 409 { 410 struct ata_queued_cmd *qc = task->uldd_task; 411 412 if (qc) { 413 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || 414 qc->tf.command == ATA_CMD_FPDMA_READ || 415 qc->tf.command == ATA_CMD_FPDMA_RECV || 416 qc->tf.command == ATA_CMD_FPDMA_SEND || 417 qc->tf.command == ATA_CMD_NCQ_NON_DATA) { 418 *tag = qc->tag; 419 return 1; 420 } 421 } 422 423 return 0; 424 } 425 426 static int mvs_task_prep_ata(struct mvs_info *mvi, 427 struct mvs_task_exec_info *tei) 428 { 429 struct sas_task *task = tei->task; 430 struct domain_device *dev = task->dev; 431 struct mvs_device *mvi_dev = dev->lldd_dev; 432 struct mvs_cmd_hdr *hdr = tei->hdr; 433 struct asd_sas_port *sas_port = dev->port; 434 struct mvs_slot_info *slot; 435 void *buf_prd; 436 u32 tag = tei->tag, hdr_tag; 437 u32 flags, del_q; 438 void *buf_tmp; 439 u8 *buf_cmd, *buf_oaf; 440 dma_addr_t buf_tmp_dma; 441 u32 i, req_len, resp_len; 442 const u32 max_resp_len = SB_RFB_MAX; 443 444 if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { 445 mv_dprintk("Have not enough regiset for dev %d.\n", 446 mvi_dev->device_id); 447 return -EBUSY; 448 } 449 slot = &mvi->slot_info[tag]; 450 slot->tx = mvi->tx_prod; 451 del_q = TXQ_MODE_I | tag | 452 (TXQ_CMD_STP << TXQ_CMD_SHIFT) | 453 ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) | 454 (mvi_dev->taskfileset << TXQ_SRS_SHIFT); 455 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); 456 457 if (task->data_dir == DMA_FROM_DEVICE) 458 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); 459 else 460 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); 461 462 if (task->ata_task.use_ncq) 463 flags |= MCH_FPDMA; 464 if (dev->sata_dev.class == ATA_DEV_ATAPI) { 465 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) 466 flags |= MCH_ATAPI; 467 } 468 469 hdr->flags = cpu_to_le32(flags); 470 471 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) 472 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); 473 else 474 hdr_tag = tag; 475 476 hdr->tags = cpu_to_le32(hdr_tag); 477 478 hdr->data_len = cpu_to_le32(task->total_xfer_len); 479 480 /* 481 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 482 */ 483 484 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ 485 buf_cmd = buf_tmp = slot->buf; 486 buf_tmp_dma = slot->buf_dma; 487 488 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); 489 490 buf_tmp += MVS_ATA_CMD_SZ; 491 buf_tmp_dma += MVS_ATA_CMD_SZ; 492 493 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 494 /* used for STP. unused for SATA? */ 495 buf_oaf = buf_tmp; 496 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 497 498 buf_tmp += MVS_OAF_SZ; 499 buf_tmp_dma += MVS_OAF_SZ; 500 501 /* region 3: PRD table ********************************************* */ 502 buf_prd = buf_tmp; 503 504 if (tei->n_elem) 505 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 506 else 507 hdr->prd_tbl = 0; 508 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); 509 510 buf_tmp += i; 511 buf_tmp_dma += i; 512 513 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 514 slot->response = buf_tmp; 515 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 516 if (mvi->flags & MVF_FLAG_SOC) 517 hdr->reserved[0] = 0; 518 519 req_len = sizeof(struct host_to_dev_fis); 520 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - 521 sizeof(struct mvs_err_info) - i; 522 523 /* request, response lengths */ 524 resp_len = min(resp_len, max_resp_len); 525 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 526 527 if (likely(!task->ata_task.device_control_reg_update)) 528 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 529 /* fill in command FIS and ATAPI CDB */ 530 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 531 if (dev->sata_dev.class == ATA_DEV_ATAPI) 532 memcpy(buf_cmd + STP_ATAPI_CMD, 533 task->ata_task.atapi_packet, 16); 534 535 /* generate open address frame hdr (first 12 bytes) */ 536 /* initiator, STP, ftype 1h */ 537 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; 538 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; 539 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 540 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 541 542 /* fill in PRD (scatter/gather) table, if any */ 543 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 544 545 if (task->data_dir == DMA_FROM_DEVICE) 546 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask, 547 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); 548 549 return 0; 550 } 551 552 static int mvs_task_prep_ssp(struct mvs_info *mvi, 553 struct mvs_task_exec_info *tei, int is_tmf, 554 struct mvs_tmf_task *tmf) 555 { 556 struct sas_task *task = tei->task; 557 struct mvs_cmd_hdr *hdr = tei->hdr; 558 struct mvs_port *port = tei->port; 559 struct domain_device *dev = task->dev; 560 struct mvs_device *mvi_dev = dev->lldd_dev; 561 struct asd_sas_port *sas_port = dev->port; 562 struct mvs_slot_info *slot; 563 void *buf_prd; 564 struct ssp_frame_hdr *ssp_hdr; 565 void *buf_tmp; 566 u8 *buf_cmd, *buf_oaf, fburst = 0; 567 dma_addr_t buf_tmp_dma; 568 u32 flags; 569 u32 resp_len, req_len, i, tag = tei->tag; 570 const u32 max_resp_len = SB_RFB_MAX; 571 u32 phy_mask; 572 573 slot = &mvi->slot_info[tag]; 574 575 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : 576 sas_port->phy_mask) & TXQ_PHY_MASK; 577 578 slot->tx = mvi->tx_prod; 579 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | 580 (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | 581 (phy_mask << TXQ_PHY_SHIFT)); 582 583 flags = MCH_RETRY; 584 if (task->ssp_task.enable_first_burst) { 585 flags |= MCH_FBURST; 586 fburst = (1 << 7); 587 } 588 if (is_tmf) 589 flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); 590 else 591 flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); 592 593 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); 594 hdr->tags = cpu_to_le32(tag); 595 hdr->data_len = cpu_to_le32(task->total_xfer_len); 596 597 /* 598 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs 599 */ 600 601 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ 602 buf_cmd = buf_tmp = slot->buf; 603 buf_tmp_dma = slot->buf_dma; 604 605 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); 606 607 buf_tmp += MVS_SSP_CMD_SZ; 608 buf_tmp_dma += MVS_SSP_CMD_SZ; 609 610 /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ 611 buf_oaf = buf_tmp; 612 hdr->open_frame = cpu_to_le64(buf_tmp_dma); 613 614 buf_tmp += MVS_OAF_SZ; 615 buf_tmp_dma += MVS_OAF_SZ; 616 617 /* region 3: PRD table ********************************************* */ 618 buf_prd = buf_tmp; 619 if (tei->n_elem) 620 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); 621 else 622 hdr->prd_tbl = 0; 623 624 i = MVS_CHIP_DISP->prd_size() * tei->n_elem; 625 buf_tmp += i; 626 buf_tmp_dma += i; 627 628 /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ 629 slot->response = buf_tmp; 630 hdr->status_buf = cpu_to_le64(buf_tmp_dma); 631 if (mvi->flags & MVF_FLAG_SOC) 632 hdr->reserved[0] = 0; 633 634 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - 635 sizeof(struct mvs_err_info) - i; 636 resp_len = min(resp_len, max_resp_len); 637 638 req_len = sizeof(struct ssp_frame_hdr) + 28; 639 640 /* request, response lengths */ 641 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); 642 643 /* generate open address frame hdr (first 12 bytes) */ 644 /* initiator, SSP, ftype 1h */ 645 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; 646 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; 647 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 648 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 649 650 /* fill in SSP frame header (Command Table.SSP frame header) */ 651 ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; 652 653 if (is_tmf) 654 ssp_hdr->frame_type = SSP_TASK; 655 else 656 ssp_hdr->frame_type = SSP_COMMAND; 657 658 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, 659 HASHED_SAS_ADDR_SIZE); 660 memcpy(ssp_hdr->hashed_src_addr, 661 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); 662 ssp_hdr->tag = cpu_to_be16(tag); 663 664 /* fill in IU for TASK and Command Frame */ 665 buf_cmd += sizeof(*ssp_hdr); 666 memcpy(buf_cmd, &task->ssp_task.LUN, 8); 667 668 if (ssp_hdr->frame_type != SSP_TASK) { 669 buf_cmd[9] = fburst | task->ssp_task.task_attr | 670 (task->ssp_task.task_prio << 3); 671 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, 672 task->ssp_task.cmd->cmd_len); 673 } else{ 674 buf_cmd[10] = tmf->tmf; 675 switch (tmf->tmf) { 676 case TMF_ABORT_TASK: 677 case TMF_QUERY_TASK: 678 buf_cmd[12] = 679 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; 680 buf_cmd[13] = 681 tmf->tag_of_task_to_be_managed & 0xff; 682 break; 683 default: 684 break; 685 } 686 } 687 /* fill in PRD (scatter/gather) table, if any */ 688 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); 689 return 0; 690 } 691 692 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) 693 static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, 694 struct mvs_tmf_task *tmf, int *pass) 695 { 696 struct domain_device *dev = task->dev; 697 struct mvs_device *mvi_dev = dev->lldd_dev; 698 struct mvs_task_exec_info tei; 699 struct mvs_slot_info *slot; 700 u32 tag = 0xdeadbeef, n_elem = 0; 701 int rc = 0; 702 703 if (!dev->port) { 704 struct task_status_struct *tsm = &task->task_status; 705 706 tsm->resp = SAS_TASK_UNDELIVERED; 707 tsm->stat = SAS_PHY_DOWN; 708 /* 709 * libsas will use dev->port, should 710 * not call task_done for sata 711 */ 712 if (dev->dev_type != SAS_SATA_DEV) 713 task->task_done(task); 714 return rc; 715 } 716 717 if (DEV_IS_GONE(mvi_dev)) { 718 if (mvi_dev) 719 mv_dprintk("device %d not ready.\n", 720 mvi_dev->device_id); 721 else 722 mv_dprintk("device %016llx not ready.\n", 723 SAS_ADDR(dev->sas_addr)); 724 725 rc = SAS_PHY_DOWN; 726 return rc; 727 } 728 tei.port = dev->port->lldd_port; 729 if (tei.port && !tei.port->port_attached && !tmf) { 730 if (sas_protocol_ata(task->task_proto)) { 731 struct task_status_struct *ts = &task->task_status; 732 mv_dprintk("SATA/STP port %d does not attach" 733 "device.\n", dev->port->id); 734 ts->resp = SAS_TASK_COMPLETE; 735 ts->stat = SAS_PHY_DOWN; 736 737 task->task_done(task); 738 739 } else { 740 struct task_status_struct *ts = &task->task_status; 741 mv_dprintk("SAS port %d does not attach" 742 "device.\n", dev->port->id); 743 ts->resp = SAS_TASK_UNDELIVERED; 744 ts->stat = SAS_PHY_DOWN; 745 task->task_done(task); 746 } 747 return rc; 748 } 749 750 if (!sas_protocol_ata(task->task_proto)) { 751 if (task->num_scatter) { 752 n_elem = dma_map_sg(mvi->dev, 753 task->scatter, 754 task->num_scatter, 755 task->data_dir); 756 if (!n_elem) { 757 rc = -ENOMEM; 758 goto prep_out; 759 } 760 } 761 } else { 762 n_elem = task->num_scatter; 763 } 764 765 rc = mvs_tag_alloc(mvi, &tag); 766 if (rc) 767 goto err_out; 768 769 slot = &mvi->slot_info[tag]; 770 771 task->lldd_task = NULL; 772 slot->n_elem = n_elem; 773 slot->slot_tag = tag; 774 775 slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); 776 if (!slot->buf) { 777 rc = -ENOMEM; 778 goto err_out_tag; 779 } 780 781 tei.task = task; 782 tei.hdr = &mvi->slot[tag]; 783 tei.tag = tag; 784 tei.n_elem = n_elem; 785 switch (task->task_proto) { 786 case SAS_PROTOCOL_SMP: 787 rc = mvs_task_prep_smp(mvi, &tei); 788 break; 789 case SAS_PROTOCOL_SSP: 790 rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); 791 break; 792 case SAS_PROTOCOL_SATA: 793 case SAS_PROTOCOL_STP: 794 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 795 rc = mvs_task_prep_ata(mvi, &tei); 796 break; 797 default: 798 dev_printk(KERN_ERR, mvi->dev, 799 "unknown sas_task proto: 0x%x\n", 800 task->task_proto); 801 rc = -EINVAL; 802 break; 803 } 804 805 if (rc) { 806 mv_dprintk("rc is %x\n", rc); 807 goto err_out_slot_buf; 808 } 809 slot->task = task; 810 slot->port = tei.port; 811 task->lldd_task = slot; 812 list_add_tail(&slot->entry, &tei.port->list); 813 spin_lock(&task->task_state_lock); 814 task->task_state_flags |= SAS_TASK_AT_INITIATOR; 815 spin_unlock(&task->task_state_lock); 816 817 mvi_dev->running_req++; 818 ++(*pass); 819 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); 820 821 return rc; 822 823 err_out_slot_buf: 824 dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); 825 err_out_tag: 826 mvs_tag_free(mvi, tag); 827 err_out: 828 829 dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc); 830 if (!sas_protocol_ata(task->task_proto)) 831 if (n_elem) 832 dma_unmap_sg(mvi->dev, task->scatter, n_elem, 833 task->data_dir); 834 prep_out: 835 return rc; 836 } 837 838 static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags, 839 struct completion *completion, int is_tmf, 840 struct mvs_tmf_task *tmf) 841 { 842 struct mvs_info *mvi = NULL; 843 u32 rc = 0; 844 u32 pass = 0; 845 unsigned long flags = 0; 846 847 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; 848 849 spin_lock_irqsave(&mvi->lock, flags); 850 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); 851 if (rc) 852 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); 853 854 if (likely(pass)) 855 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & 856 (MVS_CHIP_SLOT_SZ - 1)); 857 spin_unlock_irqrestore(&mvi->lock, flags); 858 859 return rc; 860 } 861 862 int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) 863 { 864 return mvs_task_exec(task, gfp_flags, NULL, 0, NULL); 865 } 866 867 static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 868 { 869 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 870 mvs_tag_clear(mvi, slot_idx); 871 } 872 873 static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, 874 struct mvs_slot_info *slot, u32 slot_idx) 875 { 876 if (!slot) 877 return; 878 if (!slot->task) 879 return; 880 if (!sas_protocol_ata(task->task_proto)) 881 if (slot->n_elem) 882 dma_unmap_sg(mvi->dev, task->scatter, 883 slot->n_elem, task->data_dir); 884 885 switch (task->task_proto) { 886 case SAS_PROTOCOL_SMP: 887 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, 888 DMA_FROM_DEVICE); 889 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, 890 DMA_TO_DEVICE); 891 break; 892 893 case SAS_PROTOCOL_SATA: 894 case SAS_PROTOCOL_STP: 895 case SAS_PROTOCOL_SSP: 896 default: 897 /* do nothing */ 898 break; 899 } 900 901 if (slot->buf) { 902 dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); 903 slot->buf = NULL; 904 } 905 list_del_init(&slot->entry); 906 task->lldd_task = NULL; 907 slot->task = NULL; 908 slot->port = NULL; 909 slot->slot_tag = 0xFFFFFFFF; 910 mvs_slot_free(mvi, slot_idx); 911 } 912 913 static void mvs_update_wideport(struct mvs_info *mvi, int phy_no) 914 { 915 struct mvs_phy *phy = &mvi->phy[phy_no]; 916 struct mvs_port *port = phy->port; 917 int j, no; 918 919 for_each_phy(port->wide_port_phymap, j, no) { 920 if (j & 1) { 921 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, 922 PHYR_WIDE_PORT); 923 MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 924 port->wide_port_phymap); 925 } else { 926 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, 927 PHYR_WIDE_PORT); 928 MVS_CHIP_DISP->write_port_cfg_data(mvi, no, 929 0); 930 } 931 } 932 } 933 934 static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) 935 { 936 u32 tmp; 937 struct mvs_phy *phy = &mvi->phy[i]; 938 struct mvs_port *port = phy->port; 939 940 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); 941 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { 942 if (!port) 943 phy->phy_attached = 1; 944 return tmp; 945 } 946 947 if (port) { 948 if (phy->phy_type & PORT_TYPE_SAS) { 949 port->wide_port_phymap &= ~(1U << i); 950 if (!port->wide_port_phymap) 951 port->port_attached = 0; 952 mvs_update_wideport(mvi, i); 953 } else if (phy->phy_type & PORT_TYPE_SATA) 954 port->port_attached = 0; 955 phy->port = NULL; 956 phy->phy_attached = 0; 957 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); 958 } 959 return 0; 960 } 961 962 static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) 963 { 964 u32 *s = (u32 *) buf; 965 966 if (!s) 967 return NULL; 968 969 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); 970 s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 971 972 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); 973 s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 974 975 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); 976 s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 977 978 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); 979 s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); 980 981 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) 982 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); 983 984 return s; 985 } 986 987 static u32 mvs_is_sig_fis_received(u32 irq_status) 988 { 989 return irq_status & PHYEV_SIG_FIS; 990 } 991 992 static void mvs_sig_remove_timer(struct mvs_phy *phy) 993 { 994 if (phy->timer.function) 995 del_timer(&phy->timer); 996 phy->timer.function = NULL; 997 } 998 999 void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) 1000 { 1001 struct mvs_phy *phy = &mvi->phy[i]; 1002 struct sas_identify_frame *id; 1003 1004 id = (struct sas_identify_frame *)phy->frame_rcvd; 1005 1006 if (get_st) { 1007 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); 1008 phy->phy_status = mvs_is_phy_ready(mvi, i); 1009 } 1010 1011 if (phy->phy_status) { 1012 int oob_done = 0; 1013 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; 1014 1015 oob_done = MVS_CHIP_DISP->oob_done(mvi, i); 1016 1017 MVS_CHIP_DISP->fix_phy_info(mvi, i, id); 1018 if (phy->phy_type & PORT_TYPE_SATA) { 1019 phy->identify.target_port_protocols = SAS_PROTOCOL_STP; 1020 if (mvs_is_sig_fis_received(phy->irq_status)) { 1021 mvs_sig_remove_timer(phy); 1022 phy->phy_attached = 1; 1023 phy->att_dev_sas_addr = 1024 i + mvi->id * mvi->chip->n_phy; 1025 if (oob_done) 1026 sas_phy->oob_mode = SATA_OOB_MODE; 1027 phy->frame_rcvd_size = 1028 sizeof(struct dev_to_host_fis); 1029 mvs_get_d2h_reg(mvi, i, id); 1030 } else { 1031 u32 tmp; 1032 dev_printk(KERN_DEBUG, mvi->dev, 1033 "Phy%d : No sig fis\n", i); 1034 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); 1035 MVS_CHIP_DISP->write_port_irq_mask(mvi, i, 1036 tmp | PHYEV_SIG_FIS); 1037 phy->phy_attached = 0; 1038 phy->phy_type &= ~PORT_TYPE_SATA; 1039 goto out_done; 1040 } 1041 } else if (phy->phy_type & PORT_TYPE_SAS 1042 || phy->att_dev_info & PORT_SSP_INIT_MASK) { 1043 phy->phy_attached = 1; 1044 phy->identify.device_type = 1045 phy->att_dev_info & PORT_DEV_TYPE_MASK; 1046 1047 if (phy->identify.device_type == SAS_END_DEVICE) 1048 phy->identify.target_port_protocols = 1049 SAS_PROTOCOL_SSP; 1050 else if (phy->identify.device_type != SAS_PHY_UNUSED) 1051 phy->identify.target_port_protocols = 1052 SAS_PROTOCOL_SMP; 1053 if (oob_done) 1054 sas_phy->oob_mode = SAS_OOB_MODE; 1055 phy->frame_rcvd_size = 1056 sizeof(struct sas_identify_frame); 1057 } 1058 memcpy(sas_phy->attached_sas_addr, 1059 &phy->att_dev_sas_addr, SAS_ADDR_SIZE); 1060 1061 if (MVS_CHIP_DISP->phy_work_around) 1062 MVS_CHIP_DISP->phy_work_around(mvi, i); 1063 } 1064 mv_dprintk("phy %d attach dev info is %x\n", 1065 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); 1066 mv_dprintk("phy %d attach sas addr is %llx\n", 1067 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); 1068 out_done: 1069 if (get_st) 1070 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); 1071 } 1072 1073 static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) 1074 { 1075 struct sas_ha_struct *sas_ha = sas_phy->ha; 1076 struct mvs_info *mvi = NULL; int i = 0, hi; 1077 struct mvs_phy *phy = sas_phy->lldd_phy; 1078 struct asd_sas_port *sas_port = sas_phy->port; 1079 struct mvs_port *port; 1080 unsigned long flags = 0; 1081 if (!sas_port) 1082 return; 1083 1084 while (sas_ha->sas_phy[i]) { 1085 if (sas_ha->sas_phy[i] == sas_phy) 1086 break; 1087 i++; 1088 } 1089 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; 1090 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; 1091 if (i >= mvi->chip->n_phy) 1092 port = &mvi->port[i - mvi->chip->n_phy]; 1093 else 1094 port = &mvi->port[i]; 1095 if (lock) 1096 spin_lock_irqsave(&mvi->lock, flags); 1097 port->port_attached = 1; 1098 phy->port = port; 1099 sas_port->lldd_port = port; 1100 if (phy->phy_type & PORT_TYPE_SAS) { 1101 port->wide_port_phymap = sas_port->phy_mask; 1102 mv_printk("set wide port phy map %x\n", sas_port->phy_mask); 1103 mvs_update_wideport(mvi, sas_phy->id); 1104 1105 /* direct attached SAS device */ 1106 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { 1107 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); 1108 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04); 1109 } 1110 } 1111 if (lock) 1112 spin_unlock_irqrestore(&mvi->lock, flags); 1113 } 1114 1115 static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) 1116 { 1117 struct domain_device *dev; 1118 struct mvs_phy *phy = sas_phy->lldd_phy; 1119 struct mvs_info *mvi = phy->mvi; 1120 struct asd_sas_port *port = sas_phy->port; 1121 int phy_no = 0; 1122 1123 while (phy != &mvi->phy[phy_no]) { 1124 phy_no++; 1125 if (phy_no >= MVS_MAX_PHYS) 1126 return; 1127 } 1128 list_for_each_entry(dev, &port->dev_list, dev_list_node) 1129 mvs_do_release_task(phy->mvi, phy_no, dev); 1130 1131 } 1132 1133 1134 void mvs_port_formed(struct asd_sas_phy *sas_phy) 1135 { 1136 mvs_port_notify_formed(sas_phy, 1); 1137 } 1138 1139 void mvs_port_deformed(struct asd_sas_phy *sas_phy) 1140 { 1141 mvs_port_notify_deformed(sas_phy, 1); 1142 } 1143 1144 static struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) 1145 { 1146 u32 dev; 1147 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { 1148 if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { 1149 mvi->devices[dev].device_id = dev; 1150 return &mvi->devices[dev]; 1151 } 1152 } 1153 1154 if (dev == MVS_MAX_DEVICES) 1155 mv_printk("max support %d devices, ignore ..\n", 1156 MVS_MAX_DEVICES); 1157 1158 return NULL; 1159 } 1160 1161 static void mvs_free_dev(struct mvs_device *mvi_dev) 1162 { 1163 u32 id = mvi_dev->device_id; 1164 memset(mvi_dev, 0, sizeof(*mvi_dev)); 1165 mvi_dev->device_id = id; 1166 mvi_dev->dev_type = SAS_PHY_UNUSED; 1167 mvi_dev->dev_status = MVS_DEV_NORMAL; 1168 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; 1169 } 1170 1171 static int mvs_dev_found_notify(struct domain_device *dev, int lock) 1172 { 1173 unsigned long flags = 0; 1174 int res = 0; 1175 struct mvs_info *mvi = NULL; 1176 struct domain_device *parent_dev = dev->parent; 1177 struct mvs_device *mvi_device; 1178 1179 mvi = mvs_find_dev_mvi(dev); 1180 1181 if (lock) 1182 spin_lock_irqsave(&mvi->lock, flags); 1183 1184 mvi_device = mvs_alloc_dev(mvi); 1185 if (!mvi_device) { 1186 res = -1; 1187 goto found_out; 1188 } 1189 dev->lldd_dev = mvi_device; 1190 mvi_device->dev_status = MVS_DEV_NORMAL; 1191 mvi_device->dev_type = dev->dev_type; 1192 mvi_device->mvi_info = mvi; 1193 mvi_device->sas_device = dev; 1194 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { 1195 int phy_id; 1196 u8 phy_num = parent_dev->ex_dev.num_phys; 1197 struct ex_phy *phy; 1198 for (phy_id = 0; phy_id < phy_num; phy_id++) { 1199 phy = &parent_dev->ex_dev.ex_phy[phy_id]; 1200 if (SAS_ADDR(phy->attached_sas_addr) == 1201 SAS_ADDR(dev->sas_addr)) { 1202 mvi_device->attached_phy = phy_id; 1203 break; 1204 } 1205 } 1206 1207 if (phy_id == phy_num) { 1208 mv_printk("Error: no attached dev:%016llx" 1209 "at ex:%016llx.\n", 1210 SAS_ADDR(dev->sas_addr), 1211 SAS_ADDR(parent_dev->sas_addr)); 1212 res = -1; 1213 } 1214 } 1215 1216 found_out: 1217 if (lock) 1218 spin_unlock_irqrestore(&mvi->lock, flags); 1219 return res; 1220 } 1221 1222 int mvs_dev_found(struct domain_device *dev) 1223 { 1224 return mvs_dev_found_notify(dev, 1); 1225 } 1226 1227 static void mvs_dev_gone_notify(struct domain_device *dev) 1228 { 1229 unsigned long flags = 0; 1230 struct mvs_device *mvi_dev = dev->lldd_dev; 1231 struct mvs_info *mvi; 1232 1233 if (!mvi_dev) { 1234 mv_dprintk("found dev has gone.\n"); 1235 return; 1236 } 1237 1238 mvi = mvi_dev->mvi_info; 1239 1240 spin_lock_irqsave(&mvi->lock, flags); 1241 1242 mv_dprintk("found dev[%d:%x] is gone.\n", 1243 mvi_dev->device_id, mvi_dev->dev_type); 1244 mvs_release_task(mvi, dev); 1245 mvs_free_reg_set(mvi, mvi_dev); 1246 mvs_free_dev(mvi_dev); 1247 1248 dev->lldd_dev = NULL; 1249 mvi_dev->sas_device = NULL; 1250 1251 spin_unlock_irqrestore(&mvi->lock, flags); 1252 } 1253 1254 1255 void mvs_dev_gone(struct domain_device *dev) 1256 { 1257 mvs_dev_gone_notify(dev); 1258 } 1259 1260 static void mvs_task_done(struct sas_task *task) 1261 { 1262 if (!del_timer(&task->slow_task->timer)) 1263 return; 1264 complete(&task->slow_task->completion); 1265 } 1266 1267 static void mvs_tmf_timedout(struct timer_list *t) 1268 { 1269 struct sas_task_slow *slow = from_timer(slow, t, timer); 1270 struct sas_task *task = slow->task; 1271 1272 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1273 complete(&task->slow_task->completion); 1274 } 1275 1276 #define MVS_TASK_TIMEOUT 20 1277 static int mvs_exec_internal_tmf_task(struct domain_device *dev, 1278 void *parameter, u32 para_len, struct mvs_tmf_task *tmf) 1279 { 1280 int res, retry; 1281 struct sas_task *task = NULL; 1282 1283 for (retry = 0; retry < 3; retry++) { 1284 task = sas_alloc_slow_task(GFP_KERNEL); 1285 if (!task) 1286 return -ENOMEM; 1287 1288 task->dev = dev; 1289 task->task_proto = dev->tproto; 1290 1291 memcpy(&task->ssp_task, parameter, para_len); 1292 task->task_done = mvs_task_done; 1293 1294 task->slow_task->timer.function = mvs_tmf_timedout; 1295 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1296 add_timer(&task->slow_task->timer); 1297 1298 res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf); 1299 1300 if (res) { 1301 del_timer(&task->slow_task->timer); 1302 mv_printk("executing internal task failed:%d\n", res); 1303 goto ex_err; 1304 } 1305 1306 wait_for_completion(&task->slow_task->completion); 1307 res = TMF_RESP_FUNC_FAILED; 1308 /* Even TMF timed out, return direct. */ 1309 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { 1310 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 1311 mv_printk("TMF task[%x] timeout.\n", tmf->tmf); 1312 goto ex_err; 1313 } 1314 } 1315 1316 if (task->task_status.resp == SAS_TASK_COMPLETE && 1317 task->task_status.stat == SAM_STAT_GOOD) { 1318 res = TMF_RESP_FUNC_COMPLETE; 1319 break; 1320 } 1321 1322 if (task->task_status.resp == SAS_TASK_COMPLETE && 1323 task->task_status.stat == SAS_DATA_UNDERRUN) { 1324 /* no error, but return the number of bytes of 1325 * underrun */ 1326 res = task->task_status.residual; 1327 break; 1328 } 1329 1330 if (task->task_status.resp == SAS_TASK_COMPLETE && 1331 task->task_status.stat == SAS_DATA_OVERRUN) { 1332 mv_dprintk("blocked task error.\n"); 1333 res = -EMSGSIZE; 1334 break; 1335 } else { 1336 mv_dprintk(" task to dev %016llx response: 0x%x " 1337 "status 0x%x\n", 1338 SAS_ADDR(dev->sas_addr), 1339 task->task_status.resp, 1340 task->task_status.stat); 1341 sas_free_task(task); 1342 task = NULL; 1343 1344 } 1345 } 1346 ex_err: 1347 BUG_ON(retry == 3 && task != NULL); 1348 sas_free_task(task); 1349 return res; 1350 } 1351 1352 static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, 1353 u8 *lun, struct mvs_tmf_task *tmf) 1354 { 1355 struct sas_ssp_task ssp_task; 1356 if (!(dev->tproto & SAS_PROTOCOL_SSP)) 1357 return TMF_RESP_FUNC_ESUPP; 1358 1359 memcpy(ssp_task.LUN, lun, 8); 1360 1361 return mvs_exec_internal_tmf_task(dev, &ssp_task, 1362 sizeof(ssp_task), tmf); 1363 } 1364 1365 1366 /* Standard mandates link reset for ATA (type 0) 1367 and hard reset for SSP (type 1) , only for RECOVERY */ 1368 static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) 1369 { 1370 int rc; 1371 struct sas_phy *phy = sas_get_local_phy(dev); 1372 int reset_type = (dev->dev_type == SAS_SATA_DEV || 1373 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1374 rc = sas_phy_reset(phy, reset_type); 1375 sas_put_local_phy(phy); 1376 msleep(2000); 1377 return rc; 1378 } 1379 1380 /* mandatory SAM-3 */ 1381 int mvs_lu_reset(struct domain_device *dev, u8 *lun) 1382 { 1383 unsigned long flags; 1384 int rc = TMF_RESP_FUNC_FAILED; 1385 struct mvs_tmf_task tmf_task; 1386 struct mvs_device * mvi_dev = dev->lldd_dev; 1387 struct mvs_info *mvi = mvi_dev->mvi_info; 1388 1389 tmf_task.tmf = TMF_LU_RESET; 1390 mvi_dev->dev_status = MVS_DEV_EH; 1391 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1392 if (rc == TMF_RESP_FUNC_COMPLETE) { 1393 spin_lock_irqsave(&mvi->lock, flags); 1394 mvs_release_task(mvi, dev); 1395 spin_unlock_irqrestore(&mvi->lock, flags); 1396 } 1397 /* If failed, fall-through I_T_Nexus reset */ 1398 mv_printk("%s for device[%x]:rc= %d\n", __func__, 1399 mvi_dev->device_id, rc); 1400 return rc; 1401 } 1402 1403 int mvs_I_T_nexus_reset(struct domain_device *dev) 1404 { 1405 unsigned long flags; 1406 int rc = TMF_RESP_FUNC_FAILED; 1407 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1408 struct mvs_info *mvi = mvi_dev->mvi_info; 1409 1410 if (mvi_dev->dev_status != MVS_DEV_EH) 1411 return TMF_RESP_FUNC_COMPLETE; 1412 else 1413 mvi_dev->dev_status = MVS_DEV_NORMAL; 1414 rc = mvs_debug_I_T_nexus_reset(dev); 1415 mv_printk("%s for device[%x]:rc= %d\n", 1416 __func__, mvi_dev->device_id, rc); 1417 1418 spin_lock_irqsave(&mvi->lock, flags); 1419 mvs_release_task(mvi, dev); 1420 spin_unlock_irqrestore(&mvi->lock, flags); 1421 1422 return rc; 1423 } 1424 /* optional SAM-3 */ 1425 int mvs_query_task(struct sas_task *task) 1426 { 1427 u32 tag; 1428 struct scsi_lun lun; 1429 struct mvs_tmf_task tmf_task; 1430 int rc = TMF_RESP_FUNC_FAILED; 1431 1432 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1433 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; 1434 struct domain_device *dev = task->dev; 1435 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1436 struct mvs_info *mvi = mvi_dev->mvi_info; 1437 1438 int_to_scsilun(cmnd->device->lun, &lun); 1439 rc = mvs_find_tag(mvi, task, &tag); 1440 if (rc == 0) { 1441 rc = TMF_RESP_FUNC_FAILED; 1442 return rc; 1443 } 1444 1445 tmf_task.tmf = TMF_QUERY_TASK; 1446 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1447 1448 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1449 switch (rc) { 1450 /* The task is still in Lun, release it then */ 1451 case TMF_RESP_FUNC_SUCC: 1452 /* The task is not in Lun or failed, reset the phy */ 1453 case TMF_RESP_FUNC_FAILED: 1454 case TMF_RESP_FUNC_COMPLETE: 1455 break; 1456 } 1457 } 1458 mv_printk("%s:rc= %d\n", __func__, rc); 1459 return rc; 1460 } 1461 1462 /* mandatory SAM-3, still need free task/slot info */ 1463 int mvs_abort_task(struct sas_task *task) 1464 { 1465 struct scsi_lun lun; 1466 struct mvs_tmf_task tmf_task; 1467 struct domain_device *dev = task->dev; 1468 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 1469 struct mvs_info *mvi; 1470 int rc = TMF_RESP_FUNC_FAILED; 1471 unsigned long flags; 1472 u32 tag; 1473 1474 if (!mvi_dev) { 1475 mv_printk("Device has removed\n"); 1476 return TMF_RESP_FUNC_FAILED; 1477 } 1478 1479 mvi = mvi_dev->mvi_info; 1480 1481 spin_lock_irqsave(&task->task_state_lock, flags); 1482 if (task->task_state_flags & SAS_TASK_STATE_DONE) { 1483 spin_unlock_irqrestore(&task->task_state_lock, flags); 1484 rc = TMF_RESP_FUNC_COMPLETE; 1485 goto out; 1486 } 1487 spin_unlock_irqrestore(&task->task_state_lock, flags); 1488 mvi_dev->dev_status = MVS_DEV_EH; 1489 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { 1490 struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; 1491 1492 int_to_scsilun(cmnd->device->lun, &lun); 1493 rc = mvs_find_tag(mvi, task, &tag); 1494 if (rc == 0) { 1495 mv_printk("No such tag in %s\n", __func__); 1496 rc = TMF_RESP_FUNC_FAILED; 1497 return rc; 1498 } 1499 1500 tmf_task.tmf = TMF_ABORT_TASK; 1501 tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); 1502 1503 rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); 1504 1505 /* if successful, clear the task and callback forwards.*/ 1506 if (rc == TMF_RESP_FUNC_COMPLETE) { 1507 u32 slot_no; 1508 struct mvs_slot_info *slot; 1509 1510 if (task->lldd_task) { 1511 slot = task->lldd_task; 1512 slot_no = (u32) (slot - mvi->slot_info); 1513 spin_lock_irqsave(&mvi->lock, flags); 1514 mvs_slot_complete(mvi, slot_no, 1); 1515 spin_unlock_irqrestore(&mvi->lock, flags); 1516 } 1517 } 1518 1519 } else if (task->task_proto & SAS_PROTOCOL_SATA || 1520 task->task_proto & SAS_PROTOCOL_STP) { 1521 if (SAS_SATA_DEV == dev->dev_type) { 1522 struct mvs_slot_info *slot = task->lldd_task; 1523 u32 slot_idx = (u32)(slot - mvi->slot_info); 1524 mv_dprintk("mvs_abort_task() mvi=%p task=%p " 1525 "slot=%p slot_idx=x%x\n", 1526 mvi, task, slot, slot_idx); 1527 task->task_state_flags |= SAS_TASK_STATE_ABORTED; 1528 mvs_slot_task_free(mvi, task, slot, slot_idx); 1529 rc = TMF_RESP_FUNC_COMPLETE; 1530 goto out; 1531 } 1532 1533 } 1534 out: 1535 if (rc != TMF_RESP_FUNC_COMPLETE) 1536 mv_printk("%s:rc= %d\n", __func__, rc); 1537 return rc; 1538 } 1539 1540 int mvs_abort_task_set(struct domain_device *dev, u8 *lun) 1541 { 1542 int rc; 1543 struct mvs_tmf_task tmf_task; 1544 1545 tmf_task.tmf = TMF_ABORT_TASK_SET; 1546 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1547 1548 return rc; 1549 } 1550 1551 int mvs_clear_aca(struct domain_device *dev, u8 *lun) 1552 { 1553 int rc = TMF_RESP_FUNC_FAILED; 1554 struct mvs_tmf_task tmf_task; 1555 1556 tmf_task.tmf = TMF_CLEAR_ACA; 1557 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1558 1559 return rc; 1560 } 1561 1562 int mvs_clear_task_set(struct domain_device *dev, u8 *lun) 1563 { 1564 int rc = TMF_RESP_FUNC_FAILED; 1565 struct mvs_tmf_task tmf_task; 1566 1567 tmf_task.tmf = TMF_CLEAR_TASK_SET; 1568 rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); 1569 1570 return rc; 1571 } 1572 1573 static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, 1574 u32 slot_idx, int err) 1575 { 1576 struct mvs_device *mvi_dev = task->dev->lldd_dev; 1577 struct task_status_struct *tstat = &task->task_status; 1578 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; 1579 int stat = SAM_STAT_GOOD; 1580 1581 1582 resp->frame_len = sizeof(struct dev_to_host_fis); 1583 memcpy(&resp->ending_fis[0], 1584 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), 1585 sizeof(struct dev_to_host_fis)); 1586 tstat->buf_valid_size = sizeof(*resp); 1587 if (unlikely(err)) { 1588 if (unlikely(err & CMD_ISS_STPD)) 1589 stat = SAS_OPEN_REJECT; 1590 else 1591 stat = SAS_PROTO_RESPONSE; 1592 } 1593 1594 return stat; 1595 } 1596 1597 static void mvs_set_sense(u8 *buffer, int len, int d_sense, 1598 int key, int asc, int ascq) 1599 { 1600 memset(buffer, 0, len); 1601 1602 if (d_sense) { 1603 /* Descriptor format */ 1604 if (len < 4) { 1605 mv_printk("Length %d of sense buffer too small to " 1606 "fit sense %x:%x:%x", len, key, asc, ascq); 1607 } 1608 1609 buffer[0] = 0x72; /* Response Code */ 1610 if (len > 1) 1611 buffer[1] = key; /* Sense Key */ 1612 if (len > 2) 1613 buffer[2] = asc; /* ASC */ 1614 if (len > 3) 1615 buffer[3] = ascq; /* ASCQ */ 1616 } else { 1617 if (len < 14) { 1618 mv_printk("Length %d of sense buffer too small to " 1619 "fit sense %x:%x:%x", len, key, asc, ascq); 1620 } 1621 1622 buffer[0] = 0x70; /* Response Code */ 1623 if (len > 2) 1624 buffer[2] = key; /* Sense Key */ 1625 if (len > 7) 1626 buffer[7] = 0x0a; /* Additional Sense Length */ 1627 if (len > 12) 1628 buffer[12] = asc; /* ASC */ 1629 if (len > 13) 1630 buffer[13] = ascq; /* ASCQ */ 1631 } 1632 1633 return; 1634 } 1635 1636 static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu, 1637 u8 key, u8 asc, u8 asc_q) 1638 { 1639 iu->datapres = 2; 1640 iu->response_data_len = 0; 1641 iu->sense_data_len = 17; 1642 iu->status = 02; 1643 mvs_set_sense(iu->sense_data, 17, 0, 1644 key, asc, asc_q); 1645 } 1646 1647 static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, 1648 u32 slot_idx) 1649 { 1650 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1651 int stat; 1652 u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response); 1653 u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1)); 1654 u32 tfs = 0; 1655 enum mvs_port_type type = PORT_TYPE_SAS; 1656 1657 if (err_dw0 & CMD_ISS_STPD) 1658 MVS_CHIP_DISP->issue_stop(mvi, type, tfs); 1659 1660 MVS_CHIP_DISP->command_active(mvi, slot_idx); 1661 1662 stat = SAM_STAT_CHECK_CONDITION; 1663 switch (task->task_proto) { 1664 case SAS_PROTOCOL_SSP: 1665 { 1666 stat = SAS_ABORTED_TASK; 1667 if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) { 1668 struct ssp_response_iu *iu = slot->response + 1669 sizeof(struct mvs_err_info); 1670 mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01); 1671 sas_ssp_task_response(mvi->dev, task, iu); 1672 stat = SAM_STAT_CHECK_CONDITION; 1673 } 1674 if (err_dw1 & bit(31)) 1675 mv_printk("reuse same slot, retry command.\n"); 1676 break; 1677 } 1678 case SAS_PROTOCOL_SMP: 1679 stat = SAM_STAT_CHECK_CONDITION; 1680 break; 1681 1682 case SAS_PROTOCOL_SATA: 1683 case SAS_PROTOCOL_STP: 1684 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: 1685 { 1686 task->ata_task.use_ncq = 0; 1687 stat = SAS_PROTO_RESPONSE; 1688 mvs_sata_done(mvi, task, slot_idx, err_dw0); 1689 } 1690 break; 1691 default: 1692 break; 1693 } 1694 1695 return stat; 1696 } 1697 1698 int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) 1699 { 1700 u32 slot_idx = rx_desc & RXQ_SLOT_MASK; 1701 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; 1702 struct sas_task *task = slot->task; 1703 struct mvs_device *mvi_dev = NULL; 1704 struct task_status_struct *tstat; 1705 struct domain_device *dev; 1706 u32 aborted; 1707 1708 void *to; 1709 enum exec_status sts; 1710 1711 if (unlikely(!task || !task->lldd_task || !task->dev)) 1712 return -1; 1713 1714 tstat = &task->task_status; 1715 dev = task->dev; 1716 mvi_dev = dev->lldd_dev; 1717 1718 spin_lock(&task->task_state_lock); 1719 task->task_state_flags &= 1720 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1721 task->task_state_flags |= SAS_TASK_STATE_DONE; 1722 /* race condition*/ 1723 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; 1724 spin_unlock(&task->task_state_lock); 1725 1726 memset(tstat, 0, sizeof(*tstat)); 1727 tstat->resp = SAS_TASK_COMPLETE; 1728 1729 if (unlikely(aborted)) { 1730 tstat->stat = SAS_ABORTED_TASK; 1731 if (mvi_dev && mvi_dev->running_req) 1732 mvi_dev->running_req--; 1733 if (sas_protocol_ata(task->task_proto)) 1734 mvs_free_reg_set(mvi, mvi_dev); 1735 1736 mvs_slot_task_free(mvi, task, slot, slot_idx); 1737 return -1; 1738 } 1739 1740 /* when no device attaching, go ahead and complete by error handling*/ 1741 if (unlikely(!mvi_dev || flags)) { 1742 if (!mvi_dev) 1743 mv_dprintk("port has not device.\n"); 1744 tstat->stat = SAS_PHY_DOWN; 1745 goto out; 1746 } 1747 1748 /* 1749 * error info record present; slot->response is 32 bit aligned but may 1750 * not be 64 bit aligned, so check for zero in two 32 bit reads 1751 */ 1752 if (unlikely((rx_desc & RXQ_ERR) 1753 && (*((u32 *)slot->response) 1754 || *(((u32 *)slot->response) + 1)))) { 1755 mv_dprintk("port %d slot %d rx_desc %X has error info" 1756 "%016llX.\n", slot->port->sas_port.id, slot_idx, 1757 rx_desc, get_unaligned_le64(slot->response)); 1758 tstat->stat = mvs_slot_err(mvi, task, slot_idx); 1759 tstat->resp = SAS_TASK_COMPLETE; 1760 goto out; 1761 } 1762 1763 switch (task->task_proto) { 1764 case SAS_PROTOCOL_SSP: 1765 /* hw says status == 0, datapres == 0 */ 1766 if (rx_desc & RXQ_GOOD) { 1767 tstat->stat = SAM_STAT_GOOD; 1768 tstat->resp = SAS_TASK_COMPLETE; 1769 } 1770 /* response frame present */ 1771 else if (rx_desc & RXQ_RSP) { 1772 struct ssp_response_iu *iu = slot->response + 1773 sizeof(struct mvs_err_info); 1774 sas_ssp_task_response(mvi->dev, task, iu); 1775 } else 1776 tstat->stat = SAM_STAT_CHECK_CONDITION; 1777 break; 1778 1779 case SAS_PROTOCOL_SMP: { 1780 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1781 tstat->stat = SAM_STAT_GOOD; 1782 to = kmap_atomic(sg_page(sg_resp)); 1783 memcpy(to + sg_resp->offset, 1784 slot->response + sizeof(struct mvs_err_info), 1785 sg_dma_len(sg_resp)); 1786 kunmap_atomic(to); 1787 break; 1788 } 1789 1790 case SAS_PROTOCOL_SATA: 1791 case SAS_PROTOCOL_STP: 1792 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { 1793 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); 1794 break; 1795 } 1796 1797 default: 1798 tstat->stat = SAM_STAT_CHECK_CONDITION; 1799 break; 1800 } 1801 if (!slot->port->port_attached) { 1802 mv_dprintk("port %d has removed.\n", slot->port->sas_port.id); 1803 tstat->stat = SAS_PHY_DOWN; 1804 } 1805 1806 1807 out: 1808 if (mvi_dev && mvi_dev->running_req) { 1809 mvi_dev->running_req--; 1810 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req) 1811 mvs_free_reg_set(mvi, mvi_dev); 1812 } 1813 mvs_slot_task_free(mvi, task, slot, slot_idx); 1814 sts = tstat->stat; 1815 1816 spin_unlock(&mvi->lock); 1817 if (task->task_done) 1818 task->task_done(task); 1819 1820 spin_lock(&mvi->lock); 1821 1822 return sts; 1823 } 1824 1825 void mvs_do_release_task(struct mvs_info *mvi, 1826 int phy_no, struct domain_device *dev) 1827 { 1828 u32 slot_idx; 1829 struct mvs_phy *phy; 1830 struct mvs_port *port; 1831 struct mvs_slot_info *slot, *slot2; 1832 1833 phy = &mvi->phy[phy_no]; 1834 port = phy->port; 1835 if (!port) 1836 return; 1837 /* clean cmpl queue in case request is already finished */ 1838 mvs_int_rx(mvi, false); 1839 1840 1841 1842 list_for_each_entry_safe(slot, slot2, &port->list, entry) { 1843 struct sas_task *task; 1844 slot_idx = (u32) (slot - mvi->slot_info); 1845 task = slot->task; 1846 1847 if (dev && task->dev != dev) 1848 continue; 1849 1850 mv_printk("Release slot [%x] tag[%x], task [%p]:\n", 1851 slot_idx, slot->slot_tag, task); 1852 MVS_CHIP_DISP->command_active(mvi, slot_idx); 1853 1854 mvs_slot_complete(mvi, slot_idx, 1); 1855 } 1856 } 1857 1858 void mvs_release_task(struct mvs_info *mvi, 1859 struct domain_device *dev) 1860 { 1861 int i, phyno[WIDE_PORT_MAX_PHY], num; 1862 num = mvs_find_dev_phyno(dev, phyno); 1863 for (i = 0; i < num; i++) 1864 mvs_do_release_task(mvi, phyno[i], dev); 1865 } 1866 1867 static void mvs_phy_disconnected(struct mvs_phy *phy) 1868 { 1869 phy->phy_attached = 0; 1870 phy->att_dev_info = 0; 1871 phy->att_dev_sas_addr = 0; 1872 } 1873 1874 static void mvs_work_queue(struct work_struct *work) 1875 { 1876 struct delayed_work *dw = container_of(work, struct delayed_work, work); 1877 struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); 1878 struct mvs_info *mvi = mwq->mvi; 1879 unsigned long flags; 1880 u32 phy_no = (unsigned long) mwq->data; 1881 struct mvs_phy *phy = &mvi->phy[phy_no]; 1882 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1883 1884 spin_lock_irqsave(&mvi->lock, flags); 1885 if (mwq->handler & PHY_PLUG_EVENT) { 1886 1887 if (phy->phy_event & PHY_PLUG_OUT) { 1888 u32 tmp; 1889 1890 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); 1891 phy->phy_event &= ~PHY_PLUG_OUT; 1892 if (!(tmp & PHY_READY_MASK)) { 1893 sas_phy_disconnected(sas_phy); 1894 mvs_phy_disconnected(phy); 1895 sas_notify_phy_event(sas_phy, 1896 PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC); 1897 mv_dprintk("phy%d Removed Device\n", phy_no); 1898 } else { 1899 MVS_CHIP_DISP->detect_porttype(mvi, phy_no); 1900 mvs_update_phyinfo(mvi, phy_no, 1); 1901 mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC); 1902 mvs_port_notify_formed(sas_phy, 0); 1903 mv_dprintk("phy%d Attached Device\n", phy_no); 1904 } 1905 } 1906 } else if (mwq->handler & EXP_BRCT_CHG) { 1907 phy->phy_event &= ~EXP_BRCT_CHG; 1908 sas_notify_port_event(sas_phy, 1909 PORTE_BROADCAST_RCVD, GFP_ATOMIC); 1910 mv_dprintk("phy%d Got Broadcast Change\n", phy_no); 1911 } 1912 list_del(&mwq->entry); 1913 spin_unlock_irqrestore(&mvi->lock, flags); 1914 kfree(mwq); 1915 } 1916 1917 static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) 1918 { 1919 struct mvs_wq *mwq; 1920 int ret = 0; 1921 1922 mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); 1923 if (mwq) { 1924 mwq->mvi = mvi; 1925 mwq->data = data; 1926 mwq->handler = handler; 1927 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); 1928 list_add_tail(&mwq->entry, &mvi->wq_list); 1929 schedule_delayed_work(&mwq->work_q, HZ * 2); 1930 } else 1931 ret = -ENOMEM; 1932 1933 return ret; 1934 } 1935 1936 static void mvs_sig_time_out(struct timer_list *t) 1937 { 1938 struct mvs_phy *phy = from_timer(phy, t, timer); 1939 struct mvs_info *mvi = phy->mvi; 1940 u8 phy_no; 1941 1942 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { 1943 if (&mvi->phy[phy_no] == phy) { 1944 mv_dprintk("Get signature time out, reset phy %d\n", 1945 phy_no+mvi->id*mvi->chip->n_phy); 1946 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET); 1947 } 1948 } 1949 } 1950 1951 void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) 1952 { 1953 u32 tmp; 1954 struct mvs_phy *phy = &mvi->phy[phy_no]; 1955 1956 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); 1957 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); 1958 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy, 1959 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); 1960 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy, 1961 phy->irq_status); 1962 1963 /* 1964 * events is port event now , 1965 * we need check the interrupt status which belongs to per port. 1966 */ 1967 1968 if (phy->irq_status & PHYEV_DCDR_ERR) { 1969 mv_dprintk("phy %d STP decoding error.\n", 1970 phy_no + mvi->id*mvi->chip->n_phy); 1971 } 1972 1973 if (phy->irq_status & PHYEV_POOF) { 1974 mdelay(500); 1975 if (!(phy->phy_event & PHY_PLUG_OUT)) { 1976 int dev_sata = phy->phy_type & PORT_TYPE_SATA; 1977 int ready; 1978 mvs_do_release_task(mvi, phy_no, NULL); 1979 phy->phy_event |= PHY_PLUG_OUT; 1980 MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1); 1981 mvs_handle_event(mvi, 1982 (void *)(unsigned long)phy_no, 1983 PHY_PLUG_EVENT); 1984 ready = mvs_is_phy_ready(mvi, phy_no); 1985 if (ready || dev_sata) { 1986 if (MVS_CHIP_DISP->stp_reset) 1987 MVS_CHIP_DISP->stp_reset(mvi, 1988 phy_no); 1989 else 1990 MVS_CHIP_DISP->phy_reset(mvi, 1991 phy_no, MVS_SOFT_RESET); 1992 return; 1993 } 1994 } 1995 } 1996 1997 if (phy->irq_status & PHYEV_COMWAKE) { 1998 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); 1999 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, 2000 tmp | PHYEV_SIG_FIS); 2001 if (phy->timer.function == NULL) { 2002 phy->timer.function = mvs_sig_time_out; 2003 phy->timer.expires = jiffies + 5*HZ; 2004 add_timer(&phy->timer); 2005 } 2006 } 2007 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { 2008 phy->phy_status = mvs_is_phy_ready(mvi, phy_no); 2009 mv_dprintk("notify plug in on phy[%d]\n", phy_no); 2010 if (phy->phy_status) { 2011 mdelay(10); 2012 MVS_CHIP_DISP->detect_porttype(mvi, phy_no); 2013 if (phy->phy_type & PORT_TYPE_SATA) { 2014 tmp = MVS_CHIP_DISP->read_port_irq_mask( 2015 mvi, phy_no); 2016 tmp &= ~PHYEV_SIG_FIS; 2017 MVS_CHIP_DISP->write_port_irq_mask(mvi, 2018 phy_no, tmp); 2019 } 2020 mvs_update_phyinfo(mvi, phy_no, 0); 2021 if (phy->phy_type & PORT_TYPE_SAS) { 2022 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE); 2023 mdelay(10); 2024 } 2025 2026 mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC); 2027 /* whether driver is going to handle hot plug */ 2028 if (phy->phy_event & PHY_PLUG_OUT) { 2029 mvs_port_notify_formed(&phy->sas_phy, 0); 2030 phy->phy_event &= ~PHY_PLUG_OUT; 2031 } 2032 } else { 2033 mv_dprintk("plugin interrupt but phy%d is gone\n", 2034 phy_no + mvi->id*mvi->chip->n_phy); 2035 } 2036 } else if (phy->irq_status & PHYEV_BROAD_CH) { 2037 mv_dprintk("phy %d broadcast change.\n", 2038 phy_no + mvi->id*mvi->chip->n_phy); 2039 mvs_handle_event(mvi, (void *)(unsigned long)phy_no, 2040 EXP_BRCT_CHG); 2041 } 2042 } 2043 2044 int mvs_int_rx(struct mvs_info *mvi, bool self_clear) 2045 { 2046 u32 rx_prod_idx, rx_desc; 2047 bool attn = false; 2048 2049 /* the first dword in the RX ring is special: it contains 2050 * a mirror of the hardware's RX producer index, so that 2051 * we don't have to stall the CPU reading that register. 2052 * The actual RX ring is offset by one dword, due to this. 2053 */ 2054 rx_prod_idx = mvi->rx_cons; 2055 mvi->rx_cons = le32_to_cpu(mvi->rx[0]); 2056 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ 2057 return 0; 2058 2059 /* The CMPL_Q may come late, read from register and try again 2060 * note: if coalescing is enabled, 2061 * it will need to read from register every time for sure 2062 */ 2063 if (unlikely(mvi->rx_cons == rx_prod_idx)) 2064 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; 2065 2066 if (mvi->rx_cons == rx_prod_idx) 2067 return 0; 2068 2069 while (mvi->rx_cons != rx_prod_idx) { 2070 /* increment our internal RX consumer pointer */ 2071 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); 2072 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); 2073 2074 if (likely(rx_desc & RXQ_DONE)) 2075 mvs_slot_complete(mvi, rx_desc, 0); 2076 if (rx_desc & RXQ_ATTN) { 2077 attn = true; 2078 } else if (rx_desc & RXQ_ERR) { 2079 if (!(rx_desc & RXQ_DONE)) 2080 mvs_slot_complete(mvi, rx_desc, 0); 2081 } else if (rx_desc & RXQ_SLOT_RESET) { 2082 mvs_slot_free(mvi, rx_desc); 2083 } 2084 } 2085 2086 if (attn && self_clear) 2087 MVS_CHIP_DISP->int_full(mvi); 2088 return 0; 2089 } 2090 2091 int mvs_gpio_write(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index, 2092 u8 reg_count, u8 *write_data) 2093 { 2094 struct mvs_prv_info *mvs_prv = sha->lldd_ha; 2095 struct mvs_info *mvi = mvs_prv->mvi[0]; 2096 2097 if (MVS_CHIP_DISP->gpio_write) { 2098 return MVS_CHIP_DISP->gpio_write(mvs_prv, reg_type, 2099 reg_index, reg_count, write_data); 2100 } 2101 2102 return -ENOSYS; 2103 } 2104