1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Driver for Realtek PCI-Express card reader 3 * 4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. 5 * 6 * Author: 7 * Wei WANG <wei_wang@realsil.com.cn> 8 */ 9 10 #include <linux/pci.h> 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/highmem.h> 15 #include <linux/interrupt.h> 16 #include <linux/delay.h> 17 #include <linux/idr.h> 18 #include <linux/platform_device.h> 19 #include <linux/mfd/core.h> 20 #include <linux/rtsx_pci.h> 21 #include <linux/mmc/card.h> 22 #include <asm/unaligned.h> 23 24 #include "rtsx_pcr.h" 25 #include "rts5261.h" 26 27 static bool msi_en = true; 28 module_param(msi_en, bool, S_IRUGO | S_IWUSR); 29 MODULE_PARM_DESC(msi_en, "Enable MSI"); 30 31 static DEFINE_IDR(rtsx_pci_idr); 32 static DEFINE_SPINLOCK(rtsx_pci_lock); 33 34 static struct mfd_cell rtsx_pcr_cells[] = { 35 [RTSX_SD_CARD] = { 36 .name = DRV_NAME_RTSX_PCI_SDMMC, 37 }, 38 }; 39 40 static const struct pci_device_id rtsx_pci_ids[] = { 41 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 42 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 43 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 44 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 45 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 46 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 47 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 48 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 49 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 50 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 51 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 52 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 53 { 0, } 54 }; 55 56 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids); 57 58 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr) 59 { 60 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, 61 PCI_EXP_LNKCTL_ASPMC, 0); 62 } 63 64 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) 65 { 66 rtsx_pci_write_register(pcr, MSGTXDATA0, 67 MASK_8_BIT_DEF, (u8) (latency & 0xFF)); 68 rtsx_pci_write_register(pcr, MSGTXDATA1, 69 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF)); 70 rtsx_pci_write_register(pcr, MSGTXDATA2, 71 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF)); 72 rtsx_pci_write_register(pcr, MSGTXDATA3, 73 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF)); 74 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK | 75 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW); 76 77 return 0; 78 } 79 80 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) 81 { 82 return rtsx_comm_set_ltr_latency(pcr, latency); 83 } 84 85 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable) 86 { 87 if (pcr->aspm_enabled == enable) 88 return; 89 90 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, 91 PCI_EXP_LNKCTL_ASPMC, 92 enable ? pcr->aspm_en : 0); 93 94 pcr->aspm_enabled = enable; 95 } 96 97 static void rtsx_disable_aspm(struct rtsx_pcr *pcr) 98 { 99 if (pcr->ops->set_aspm) 100 pcr->ops->set_aspm(pcr, false); 101 else 102 rtsx_comm_set_aspm(pcr, false); 103 } 104 105 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val) 106 { 107 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val); 108 109 return 0; 110 } 111 112 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active) 113 { 114 if (pcr->ops->set_l1off_cfg_sub_d0) 115 pcr->ops->set_l1off_cfg_sub_d0(pcr, active); 116 } 117 118 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) 119 { 120 struct rtsx_cr_option *option = &pcr->option; 121 122 rtsx_disable_aspm(pcr); 123 124 /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */ 125 msleep(1); 126 127 if (option->ltr_enabled) 128 rtsx_set_ltr_latency(pcr, option->ltr_active_latency); 129 130 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) 131 rtsx_set_l1off_sub_cfg_d0(pcr, 1); 132 } 133 134 static void rtsx_pm_full_on(struct rtsx_pcr *pcr) 135 { 136 rtsx_comm_pm_full_on(pcr); 137 } 138 139 void rtsx_pci_start_run(struct rtsx_pcr *pcr) 140 { 141 /* If pci device removed, don't queue idle work any more */ 142 if (pcr->remove_pci) 143 return; 144 145 if (pcr->state != PDEV_STAT_RUN) { 146 pcr->state = PDEV_STAT_RUN; 147 if (pcr->ops->enable_auto_blink) 148 pcr->ops->enable_auto_blink(pcr); 149 rtsx_pm_full_on(pcr); 150 } 151 152 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200)); 153 } 154 EXPORT_SYMBOL_GPL(rtsx_pci_start_run); 155 156 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data) 157 { 158 int i; 159 u32 val = HAIMR_WRITE_START; 160 161 val |= (u32)(addr & 0x3FFF) << 16; 162 val |= (u32)mask << 8; 163 val |= (u32)data; 164 165 rtsx_pci_writel(pcr, RTSX_HAIMR, val); 166 167 for (i = 0; i < MAX_RW_REG_CNT; i++) { 168 val = rtsx_pci_readl(pcr, RTSX_HAIMR); 169 if ((val & HAIMR_TRANS_END) == 0) { 170 if (data != (u8)val) 171 return -EIO; 172 return 0; 173 } 174 } 175 176 return -ETIMEDOUT; 177 } 178 EXPORT_SYMBOL_GPL(rtsx_pci_write_register); 179 180 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data) 181 { 182 u32 val = HAIMR_READ_START; 183 int i; 184 185 val |= (u32)(addr & 0x3FFF) << 16; 186 rtsx_pci_writel(pcr, RTSX_HAIMR, val); 187 188 for (i = 0; i < MAX_RW_REG_CNT; i++) { 189 val = rtsx_pci_readl(pcr, RTSX_HAIMR); 190 if ((val & HAIMR_TRANS_END) == 0) 191 break; 192 } 193 194 if (i >= MAX_RW_REG_CNT) 195 return -ETIMEDOUT; 196 197 if (data) 198 *data = (u8)(val & 0xFF); 199 200 return 0; 201 } 202 EXPORT_SYMBOL_GPL(rtsx_pci_read_register); 203 204 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) 205 { 206 int err, i, finished = 0; 207 u8 tmp; 208 209 rtsx_pci_init_cmd(pcr); 210 211 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val); 212 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8)); 213 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr); 214 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81); 215 216 err = rtsx_pci_send_cmd(pcr, 100); 217 if (err < 0) 218 return err; 219 220 for (i = 0; i < 100000; i++) { 221 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); 222 if (err < 0) 223 return err; 224 225 if (!(tmp & 0x80)) { 226 finished = 1; 227 break; 228 } 229 } 230 231 if (!finished) 232 return -ETIMEDOUT; 233 234 return 0; 235 } 236 237 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) 238 { 239 if (pcr->ops->write_phy) 240 return pcr->ops->write_phy(pcr, addr, val); 241 242 return __rtsx_pci_write_phy_register(pcr, addr, val); 243 } 244 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register); 245 246 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) 247 { 248 int err, i, finished = 0; 249 u16 data; 250 u8 *ptr, tmp; 251 252 rtsx_pci_init_cmd(pcr); 253 254 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr); 255 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80); 256 257 err = rtsx_pci_send_cmd(pcr, 100); 258 if (err < 0) 259 return err; 260 261 for (i = 0; i < 100000; i++) { 262 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); 263 if (err < 0) 264 return err; 265 266 if (!(tmp & 0x80)) { 267 finished = 1; 268 break; 269 } 270 } 271 272 if (!finished) 273 return -ETIMEDOUT; 274 275 rtsx_pci_init_cmd(pcr); 276 277 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0); 278 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0); 279 280 err = rtsx_pci_send_cmd(pcr, 100); 281 if (err < 0) 282 return err; 283 284 ptr = rtsx_pci_get_cmd_data(pcr); 285 data = ((u16)ptr[1] << 8) | ptr[0]; 286 287 if (val) 288 *val = data; 289 290 return 0; 291 } 292 293 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) 294 { 295 if (pcr->ops->read_phy) 296 return pcr->ops->read_phy(pcr, addr, val); 297 298 return __rtsx_pci_read_phy_register(pcr, addr, val); 299 } 300 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register); 301 302 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr) 303 { 304 if (pcr->ops->stop_cmd) 305 return pcr->ops->stop_cmd(pcr); 306 307 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); 308 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); 309 310 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80); 311 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80); 312 } 313 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd); 314 315 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr, 316 u8 cmd_type, u16 reg_addr, u8 mask, u8 data) 317 { 318 unsigned long flags; 319 u32 val = 0; 320 u32 *ptr = (u32 *)(pcr->host_cmds_ptr); 321 322 val |= (u32)(cmd_type & 0x03) << 30; 323 val |= (u32)(reg_addr & 0x3FFF) << 16; 324 val |= (u32)mask << 8; 325 val |= (u32)data; 326 327 spin_lock_irqsave(&pcr->lock, flags); 328 ptr += pcr->ci; 329 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) { 330 put_unaligned_le32(val, ptr); 331 ptr++; 332 pcr->ci++; 333 } 334 spin_unlock_irqrestore(&pcr->lock, flags); 335 } 336 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd); 337 338 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr) 339 { 340 u32 val = 1 << 31; 341 342 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 343 344 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; 345 /* Hardware Auto Response */ 346 val |= 0x40000000; 347 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); 348 } 349 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait); 350 351 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout) 352 { 353 struct completion trans_done; 354 u32 val = 1 << 31; 355 long timeleft; 356 unsigned long flags; 357 int err = 0; 358 359 spin_lock_irqsave(&pcr->lock, flags); 360 361 /* set up data structures for the wakeup system */ 362 pcr->done = &trans_done; 363 pcr->trans_result = TRANS_NOT_READY; 364 init_completion(&trans_done); 365 366 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 367 368 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; 369 /* Hardware Auto Response */ 370 val |= 0x40000000; 371 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); 372 373 spin_unlock_irqrestore(&pcr->lock, flags); 374 375 /* Wait for TRANS_OK_INT */ 376 timeleft = wait_for_completion_interruptible_timeout( 377 &trans_done, msecs_to_jiffies(timeout)); 378 if (timeleft <= 0) { 379 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); 380 err = -ETIMEDOUT; 381 goto finish_send_cmd; 382 } 383 384 spin_lock_irqsave(&pcr->lock, flags); 385 if (pcr->trans_result == TRANS_RESULT_FAIL) 386 err = -EINVAL; 387 else if (pcr->trans_result == TRANS_RESULT_OK) 388 err = 0; 389 else if (pcr->trans_result == TRANS_NO_DEVICE) 390 err = -ENODEV; 391 spin_unlock_irqrestore(&pcr->lock, flags); 392 393 finish_send_cmd: 394 spin_lock_irqsave(&pcr->lock, flags); 395 pcr->done = NULL; 396 spin_unlock_irqrestore(&pcr->lock, flags); 397 398 if ((err < 0) && (err != -ENODEV)) 399 rtsx_pci_stop_cmd(pcr); 400 401 if (pcr->finish_me) 402 complete(pcr->finish_me); 403 404 return err; 405 } 406 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd); 407 408 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, 409 dma_addr_t addr, unsigned int len, int end) 410 { 411 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi; 412 u64 val; 413 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA; 414 415 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len); 416 417 if (end) 418 option |= RTSX_SG_END; 419 420 if (PCI_PID(pcr) == PID_5261) { 421 if (len > 0xFFFF) 422 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16) 423 | (((u64)len >> 16) << 6) | option; 424 else 425 val = ((u64)addr << 32) | ((u64)len << 16) | option; 426 } else { 427 val = ((u64)addr << 32) | ((u64)len << 12) | option; 428 } 429 put_unaligned_le64(val, ptr); 430 pcr->sgi++; 431 } 432 433 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, 434 int num_sg, bool read, int timeout) 435 { 436 int err = 0, count; 437 438 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg); 439 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); 440 if (count < 1) 441 return -EINVAL; 442 pcr_dbg(pcr, "DMA mapping count: %d\n", count); 443 444 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout); 445 446 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); 447 448 return err; 449 } 450 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); 451 452 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 453 int num_sg, bool read) 454 { 455 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 456 457 if (pcr->remove_pci) 458 return -EINVAL; 459 460 if ((sglist == NULL) || (num_sg <= 0)) 461 return -EINVAL; 462 463 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir); 464 } 465 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg); 466 467 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 468 int num_sg, bool read) 469 { 470 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 471 472 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir); 473 } 474 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg); 475 476 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, 477 int count, bool read, int timeout) 478 { 479 struct completion trans_done; 480 struct scatterlist *sg; 481 dma_addr_t addr; 482 long timeleft; 483 unsigned long flags; 484 unsigned int len; 485 int i, err = 0; 486 u32 val; 487 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE; 488 489 if (pcr->remove_pci) 490 return -ENODEV; 491 492 if ((sglist == NULL) || (count < 1)) 493 return -EINVAL; 494 495 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; 496 pcr->sgi = 0; 497 for_each_sg(sglist, sg, count, i) { 498 addr = sg_dma_address(sg); 499 len = sg_dma_len(sg); 500 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1); 501 } 502 503 spin_lock_irqsave(&pcr->lock, flags); 504 505 pcr->done = &trans_done; 506 pcr->trans_result = TRANS_NOT_READY; 507 init_completion(&trans_done); 508 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); 509 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); 510 511 spin_unlock_irqrestore(&pcr->lock, flags); 512 513 timeleft = wait_for_completion_interruptible_timeout( 514 &trans_done, msecs_to_jiffies(timeout)); 515 if (timeleft <= 0) { 516 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); 517 err = -ETIMEDOUT; 518 goto out; 519 } 520 521 spin_lock_irqsave(&pcr->lock, flags); 522 if (pcr->trans_result == TRANS_RESULT_FAIL) { 523 err = -EILSEQ; 524 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION) 525 pcr->dma_error_count++; 526 } 527 528 else if (pcr->trans_result == TRANS_NO_DEVICE) 529 err = -ENODEV; 530 spin_unlock_irqrestore(&pcr->lock, flags); 531 532 out: 533 spin_lock_irqsave(&pcr->lock, flags); 534 pcr->done = NULL; 535 spin_unlock_irqrestore(&pcr->lock, flags); 536 537 if ((err < 0) && (err != -ENODEV)) 538 rtsx_pci_stop_cmd(pcr); 539 540 if (pcr->finish_me) 541 complete(pcr->finish_me); 542 543 return err; 544 } 545 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer); 546 547 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 548 { 549 int err; 550 int i, j; 551 u16 reg; 552 u8 *ptr; 553 554 if (buf_len > 512) 555 buf_len = 512; 556 557 ptr = buf; 558 reg = PPBUF_BASE2; 559 for (i = 0; i < buf_len / 256; i++) { 560 rtsx_pci_init_cmd(pcr); 561 562 for (j = 0; j < 256; j++) 563 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); 564 565 err = rtsx_pci_send_cmd(pcr, 250); 566 if (err < 0) 567 return err; 568 569 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256); 570 ptr += 256; 571 } 572 573 if (buf_len % 256) { 574 rtsx_pci_init_cmd(pcr); 575 576 for (j = 0; j < buf_len % 256; j++) 577 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); 578 579 err = rtsx_pci_send_cmd(pcr, 250); 580 if (err < 0) 581 return err; 582 } 583 584 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256); 585 586 return 0; 587 } 588 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf); 589 590 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 591 { 592 int err; 593 int i, j; 594 u16 reg; 595 u8 *ptr; 596 597 if (buf_len > 512) 598 buf_len = 512; 599 600 ptr = buf; 601 reg = PPBUF_BASE2; 602 for (i = 0; i < buf_len / 256; i++) { 603 rtsx_pci_init_cmd(pcr); 604 605 for (j = 0; j < 256; j++) { 606 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 607 reg++, 0xFF, *ptr); 608 ptr++; 609 } 610 611 err = rtsx_pci_send_cmd(pcr, 250); 612 if (err < 0) 613 return err; 614 } 615 616 if (buf_len % 256) { 617 rtsx_pci_init_cmd(pcr); 618 619 for (j = 0; j < buf_len % 256; j++) { 620 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 621 reg++, 0xFF, *ptr); 622 ptr++; 623 } 624 625 err = rtsx_pci_send_cmd(pcr, 250); 626 if (err < 0) 627 return err; 628 } 629 630 return 0; 631 } 632 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf); 633 634 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) 635 { 636 rtsx_pci_init_cmd(pcr); 637 638 while (*tbl & 0xFFFF0000) { 639 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 640 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl)); 641 tbl++; 642 } 643 644 return rtsx_pci_send_cmd(pcr, 100); 645 } 646 647 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card) 648 { 649 const u32 *tbl; 650 651 if (card == RTSX_SD_CARD) 652 tbl = pcr->sd_pull_ctl_enable_tbl; 653 else if (card == RTSX_MS_CARD) 654 tbl = pcr->ms_pull_ctl_enable_tbl; 655 else 656 return -EINVAL; 657 658 return rtsx_pci_set_pull_ctl(pcr, tbl); 659 } 660 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable); 661 662 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card) 663 { 664 const u32 *tbl; 665 666 if (card == RTSX_SD_CARD) 667 tbl = pcr->sd_pull_ctl_disable_tbl; 668 else if (card == RTSX_MS_CARD) 669 tbl = pcr->ms_pull_ctl_disable_tbl; 670 else 671 return -EINVAL; 672 673 return rtsx_pci_set_pull_ctl(pcr, tbl); 674 } 675 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable); 676 677 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr) 678 { 679 struct rtsx_hw_param *hw_param = &pcr->hw_param; 680 681 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN 682 | hw_param->interrupt_en; 683 684 if (pcr->num_slots > 1) 685 pcr->bier |= MS_INT_EN; 686 687 /* Enable Bus Interrupt */ 688 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier); 689 690 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier); 691 } 692 693 static inline u8 double_ssc_depth(u8 depth) 694 { 695 return ((depth > 1) ? (depth - 1) : depth); 696 } 697 698 static u8 revise_ssc_depth(u8 ssc_depth, u8 div) 699 { 700 if (div > CLK_DIV_1) { 701 if (ssc_depth > (div - 1)) 702 ssc_depth -= (div - 1); 703 else 704 ssc_depth = SSC_DEPTH_4M; 705 } 706 707 return ssc_depth; 708 } 709 710 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, 711 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) 712 { 713 int err, clk; 714 u8 n, clk_divider, mcu_cnt, div; 715 static const u8 depth[] = { 716 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M, 717 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M, 718 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M, 719 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K, 720 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K, 721 }; 722 723 if (PCI_PID(pcr) == PID_5261) 724 return rts5261_pci_switch_clock(pcr, card_clock, 725 ssc_depth, initial_mode, double_clk, vpclk); 726 727 if (initial_mode) { 728 /* We use 250k(around) here, in initial stage */ 729 clk_divider = SD_CLK_DIVIDE_128; 730 card_clock = 30000000; 731 } else { 732 clk_divider = SD_CLK_DIVIDE_0; 733 } 734 err = rtsx_pci_write_register(pcr, SD_CFG1, 735 SD_CLK_DIVIDE_MASK, clk_divider); 736 if (err < 0) 737 return err; 738 739 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */ 740 if (card_clock == UHS_SDR104_MAX_DTR && 741 pcr->dma_error_count && 742 PCI_PID(pcr) == RTS5227_DEVICE_ID) 743 card_clock = UHS_SDR104_MAX_DTR - 744 (pcr->dma_error_count * 20000000); 745 746 card_clock /= 1000000; 747 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); 748 749 clk = card_clock; 750 if (!initial_mode && double_clk) 751 clk = card_clock * 2; 752 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", 753 clk, pcr->cur_clock); 754 755 if (clk == pcr->cur_clock) 756 return 0; 757 758 if (pcr->ops->conv_clk_and_div_n) 759 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); 760 else 761 n = (u8)(clk - 2); 762 if ((clk <= 2) || (n > MAX_DIV_N_PCR)) 763 return -EINVAL; 764 765 mcu_cnt = (u8)(125/clk + 3); 766 if (mcu_cnt > 15) 767 mcu_cnt = 15; 768 769 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */ 770 div = CLK_DIV_1; 771 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) { 772 if (pcr->ops->conv_clk_and_div_n) { 773 int dbl_clk = pcr->ops->conv_clk_and_div_n(n, 774 DIV_N_TO_CLK) * 2; 775 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk, 776 CLK_TO_DIV_N); 777 } else { 778 n = (n + 2) * 2 - 2; 779 } 780 div++; 781 } 782 pcr_dbg(pcr, "n = %d, div = %d\n", n, div); 783 784 ssc_depth = depth[ssc_depth]; 785 if (double_clk) 786 ssc_depth = double_ssc_depth(ssc_depth); 787 788 ssc_depth = revise_ssc_depth(ssc_depth, div); 789 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); 790 791 rtsx_pci_init_cmd(pcr); 792 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, 793 CLK_LOW_FREQ, CLK_LOW_FREQ); 794 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 795 0xFF, (div << 4) | mcu_cnt); 796 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); 797 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 798 SSC_DEPTH_MASK, ssc_depth); 799 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); 800 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); 801 if (vpclk) { 802 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, 803 PHASE_NOT_RESET, 0); 804 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, 805 PHASE_NOT_RESET, PHASE_NOT_RESET); 806 } 807 808 err = rtsx_pci_send_cmd(pcr, 2000); 809 if (err < 0) 810 return err; 811 812 /* Wait SSC clock stable */ 813 udelay(SSC_CLOCK_STABLE_WAIT); 814 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); 815 if (err < 0) 816 return err; 817 818 pcr->cur_clock = clk; 819 return 0; 820 } 821 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock); 822 823 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card) 824 { 825 if (pcr->ops->card_power_on) 826 return pcr->ops->card_power_on(pcr, card); 827 828 return 0; 829 } 830 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on); 831 832 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card) 833 { 834 if (pcr->ops->card_power_off) 835 return pcr->ops->card_power_off(pcr, card); 836 837 return 0; 838 } 839 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); 840 841 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card) 842 { 843 static const unsigned int cd_mask[] = { 844 [RTSX_SD_CARD] = SD_EXIST, 845 [RTSX_MS_CARD] = MS_EXIST 846 }; 847 848 if (!(pcr->flags & PCR_MS_PMOS)) { 849 /* When using single PMOS, accessing card is not permitted 850 * if the existing card is not the designated one. 851 */ 852 if (pcr->card_exist & (~cd_mask[card])) 853 return -EIO; 854 } 855 856 return 0; 857 } 858 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check); 859 860 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 861 { 862 if (pcr->ops->switch_output_voltage) 863 return pcr->ops->switch_output_voltage(pcr, voltage); 864 865 return 0; 866 } 867 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage); 868 869 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr) 870 { 871 unsigned int val; 872 873 val = rtsx_pci_readl(pcr, RTSX_BIPR); 874 if (pcr->ops->cd_deglitch) 875 val = pcr->ops->cd_deglitch(pcr); 876 877 return val; 878 } 879 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist); 880 881 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr) 882 { 883 struct completion finish; 884 885 pcr->finish_me = &finish; 886 init_completion(&finish); 887 888 if (pcr->done) 889 complete(pcr->done); 890 891 if (!pcr->remove_pci) 892 rtsx_pci_stop_cmd(pcr); 893 894 wait_for_completion_interruptible_timeout(&finish, 895 msecs_to_jiffies(2)); 896 pcr->finish_me = NULL; 897 } 898 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer); 899 900 static void rtsx_pci_card_detect(struct work_struct *work) 901 { 902 struct delayed_work *dwork; 903 struct rtsx_pcr *pcr; 904 unsigned long flags; 905 unsigned int card_detect = 0, card_inserted, card_removed; 906 u32 irq_status; 907 908 dwork = to_delayed_work(work); 909 pcr = container_of(dwork, struct rtsx_pcr, carddet_work); 910 911 pcr_dbg(pcr, "--> %s\n", __func__); 912 913 mutex_lock(&pcr->pcr_mutex); 914 spin_lock_irqsave(&pcr->lock, flags); 915 916 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR); 917 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status); 918 919 irq_status &= CARD_EXIST; 920 card_inserted = pcr->card_inserted & irq_status; 921 card_removed = pcr->card_removed; 922 pcr->card_inserted = 0; 923 pcr->card_removed = 0; 924 925 spin_unlock_irqrestore(&pcr->lock, flags); 926 927 if (card_inserted || card_removed) { 928 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n", 929 card_inserted, card_removed); 930 931 if (pcr->ops->cd_deglitch) 932 card_inserted = pcr->ops->cd_deglitch(pcr); 933 934 card_detect = card_inserted | card_removed; 935 936 pcr->card_exist |= card_inserted; 937 pcr->card_exist &= ~card_removed; 938 } 939 940 mutex_unlock(&pcr->pcr_mutex); 941 942 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event) 943 pcr->slots[RTSX_SD_CARD].card_event( 944 pcr->slots[RTSX_SD_CARD].p_dev); 945 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event) 946 pcr->slots[RTSX_MS_CARD].card_event( 947 pcr->slots[RTSX_MS_CARD].p_dev); 948 } 949 950 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) 951 { 952 if (pcr->ops->process_ocp) { 953 pcr->ops->process_ocp(pcr); 954 } else { 955 if (!pcr->option.ocp_en) 956 return; 957 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); 958 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { 959 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); 960 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); 961 rtsx_pci_clear_ocpstat(pcr); 962 pcr->ocp_stat = 0; 963 } 964 } 965 } 966 967 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) 968 { 969 if (pcr->option.ocp_en) 970 rtsx_pci_process_ocp(pcr); 971 972 return 0; 973 } 974 975 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) 976 { 977 struct rtsx_pcr *pcr = dev_id; 978 u32 int_reg; 979 980 if (!pcr) 981 return IRQ_NONE; 982 983 spin_lock(&pcr->lock); 984 985 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); 986 /* Clear interrupt flag */ 987 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); 988 if ((int_reg & pcr->bier) == 0) { 989 spin_unlock(&pcr->lock); 990 return IRQ_NONE; 991 } 992 if (int_reg == 0xFFFFFFFF) { 993 spin_unlock(&pcr->lock); 994 return IRQ_HANDLED; 995 } 996 997 int_reg &= (pcr->bier | 0x7FFFFF); 998 999 if (int_reg & SD_OC_INT) 1000 rtsx_pci_process_ocp_interrupt(pcr); 1001 1002 if (int_reg & SD_INT) { 1003 if (int_reg & SD_EXIST) { 1004 pcr->card_inserted |= SD_EXIST; 1005 } else { 1006 pcr->card_removed |= SD_EXIST; 1007 pcr->card_inserted &= ~SD_EXIST; 1008 } 1009 pcr->dma_error_count = 0; 1010 } 1011 1012 if (int_reg & MS_INT) { 1013 if (int_reg & MS_EXIST) { 1014 pcr->card_inserted |= MS_EXIST; 1015 } else { 1016 pcr->card_removed |= MS_EXIST; 1017 pcr->card_inserted &= ~MS_EXIST; 1018 } 1019 } 1020 1021 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { 1022 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { 1023 pcr->trans_result = TRANS_RESULT_FAIL; 1024 if (pcr->done) 1025 complete(pcr->done); 1026 } else if (int_reg & TRANS_OK_INT) { 1027 pcr->trans_result = TRANS_RESULT_OK; 1028 if (pcr->done) 1029 complete(pcr->done); 1030 } 1031 } 1032 1033 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT)) 1034 schedule_delayed_work(&pcr->carddet_work, 1035 msecs_to_jiffies(200)); 1036 1037 spin_unlock(&pcr->lock); 1038 return IRQ_HANDLED; 1039 } 1040 1041 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr) 1042 { 1043 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n", 1044 __func__, pcr->msi_en, pcr->pci->irq); 1045 1046 if (request_irq(pcr->pci->irq, rtsx_pci_isr, 1047 pcr->msi_en ? 0 : IRQF_SHARED, 1048 DRV_NAME_RTSX_PCI, pcr)) { 1049 dev_err(&(pcr->pci->dev), 1050 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n", 1051 pcr->pci->irq); 1052 return -1; 1053 } 1054 1055 pcr->irq = pcr->pci->irq; 1056 pci_intx(pcr->pci, !pcr->msi_en); 1057 1058 return 0; 1059 } 1060 1061 static void rtsx_enable_aspm(struct rtsx_pcr *pcr) 1062 { 1063 if (pcr->ops->set_aspm) 1064 pcr->ops->set_aspm(pcr, true); 1065 else 1066 rtsx_comm_set_aspm(pcr, true); 1067 } 1068 1069 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr) 1070 { 1071 struct rtsx_cr_option *option = &pcr->option; 1072 1073 if (option->ltr_enabled) { 1074 u32 latency = option->ltr_l1off_latency; 1075 1076 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN)) 1077 mdelay(option->l1_snooze_delay); 1078 1079 rtsx_set_ltr_latency(pcr, latency); 1080 } 1081 1082 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) 1083 rtsx_set_l1off_sub_cfg_d0(pcr, 0); 1084 1085 rtsx_enable_aspm(pcr); 1086 } 1087 1088 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) 1089 { 1090 rtsx_comm_pm_power_saving(pcr); 1091 } 1092 1093 static void rtsx_pci_idle_work(struct work_struct *work) 1094 { 1095 struct delayed_work *dwork = to_delayed_work(work); 1096 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work); 1097 1098 pcr_dbg(pcr, "--> %s\n", __func__); 1099 1100 mutex_lock(&pcr->pcr_mutex); 1101 1102 pcr->state = PDEV_STAT_IDLE; 1103 1104 if (pcr->ops->disable_auto_blink) 1105 pcr->ops->disable_auto_blink(pcr); 1106 if (pcr->ops->turn_off_led) 1107 pcr->ops->turn_off_led(pcr); 1108 1109 rtsx_pm_power_saving(pcr); 1110 1111 mutex_unlock(&pcr->pcr_mutex); 1112 } 1113 1114 #ifdef CONFIG_PM 1115 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) 1116 { 1117 if (pcr->ops->turn_off_led) 1118 pcr->ops->turn_off_led(pcr); 1119 1120 rtsx_pci_writel(pcr, RTSX_BIER, 0); 1121 pcr->bier = 0; 1122 1123 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08); 1124 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state); 1125 1126 if (pcr->ops->force_power_down) 1127 pcr->ops->force_power_down(pcr, pm_state); 1128 } 1129 #endif 1130 1131 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr) 1132 { 1133 u8 val = SD_OCP_INT_EN | SD_DETECT_EN; 1134 1135 if (pcr->ops->enable_ocp) { 1136 pcr->ops->enable_ocp(pcr); 1137 } else { 1138 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); 1139 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); 1140 } 1141 1142 } 1143 1144 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr) 1145 { 1146 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN; 1147 1148 if (pcr->ops->disable_ocp) { 1149 pcr->ops->disable_ocp(pcr); 1150 } else { 1151 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1152 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 1153 OC_POWER_DOWN); 1154 } 1155 } 1156 1157 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr) 1158 { 1159 if (pcr->ops->init_ocp) { 1160 pcr->ops->init_ocp(pcr); 1161 } else { 1162 struct rtsx_cr_option *option = &(pcr->option); 1163 1164 if (option->ocp_en) { 1165 u8 val = option->sd_800mA_ocp_thd; 1166 1167 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); 1168 rtsx_pci_write_register(pcr, REG_OCPPARA1, 1169 SD_OCP_TIME_MASK, SD_OCP_TIME_800); 1170 rtsx_pci_write_register(pcr, REG_OCPPARA2, 1171 SD_OCP_THD_MASK, val); 1172 rtsx_pci_write_register(pcr, REG_OCPGLITCH, 1173 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch); 1174 rtsx_pci_enable_ocp(pcr); 1175 } else { 1176 /* OC power down */ 1177 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 1178 OC_POWER_DOWN); 1179 } 1180 } 1181 } 1182 1183 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val) 1184 { 1185 if (pcr->ops->get_ocpstat) 1186 return pcr->ops->get_ocpstat(pcr, val); 1187 else 1188 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val); 1189 } 1190 1191 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr) 1192 { 1193 if (pcr->ops->clear_ocpstat) { 1194 pcr->ops->clear_ocpstat(pcr); 1195 } else { 1196 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR; 1197 u8 val = SD_OCP_INT_CLR | SD_OC_CLR; 1198 1199 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); 1200 udelay(100); 1201 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1202 } 1203 } 1204 1205 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr) 1206 { 1207 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | 1208 MS_CLK_EN | SD40_CLK_EN, 0); 1209 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); 1210 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); 1211 1212 msleep(50); 1213 1214 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); 1215 1216 return 0; 1217 } 1218 1219 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr) 1220 { 1221 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | 1222 MS_CLK_EN | SD40_CLK_EN, 0); 1223 1224 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD); 1225 1226 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0); 1227 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD); 1228 1229 return 0; 1230 } 1231 1232 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) 1233 { 1234 int err; 1235 1236 pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP); 1237 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 1238 1239 rtsx_pci_enable_bus_int(pcr); 1240 1241 /* Power on SSC */ 1242 if (PCI_PID(pcr) == PID_5261) { 1243 /* Gating real mcu clock */ 1244 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1, 1245 RTS5261_MCU_CLOCK_GATING, 0); 1246 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL, 1247 SSC_POWER_DOWN, 0); 1248 } else { 1249 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0); 1250 } 1251 if (err < 0) 1252 return err; 1253 1254 /* Wait SSC power stable */ 1255 udelay(200); 1256 1257 rtsx_pci_disable_aspm(pcr); 1258 if (pcr->ops->optimize_phy) { 1259 err = pcr->ops->optimize_phy(pcr); 1260 if (err < 0) 1261 return err; 1262 } 1263 1264 rtsx_pci_init_cmd(pcr); 1265 1266 /* Set mcu_cnt to 7 to ensure data can be sampled properly */ 1267 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07); 1268 1269 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00); 1270 /* Disable card clock */ 1271 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0); 1272 /* Reset delink mode */ 1273 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0); 1274 /* Card driving select */ 1275 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL, 1276 0xFF, pcr->card_drive_sel); 1277 /* Enable SSC Clock */ 1278 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, 1279 0xFF, SSC_8X_EN | SSC_SEL_4M); 1280 if (PCI_PID(pcr) == PID_5261) 1281 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 1282 RTS5261_SSC_DEPTH_2M); 1283 else 1284 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12); 1285 1286 /* Disable cd_pwr_save */ 1287 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10); 1288 /* Clear Link Ready Interrupt */ 1289 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, 1290 LINK_RDY_INT, LINK_RDY_INT); 1291 /* Enlarge the estimation window of PERST# glitch 1292 * to reduce the chance of invalid card interrupt 1293 */ 1294 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80); 1295 /* Update RC oscillator to 400k 1296 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1 1297 * 1: 2M 0: 400k 1298 */ 1299 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00); 1300 /* Set interrupt write clear 1301 * bit 1: U_elbi_if_rd_clr_en 1302 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear 1303 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear 1304 */ 1305 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0); 1306 1307 err = rtsx_pci_send_cmd(pcr, 100); 1308 if (err < 0) 1309 return err; 1310 1311 switch (PCI_PID(pcr)) { 1312 case PID_5250: 1313 case PID_524A: 1314 case PID_525A: 1315 case PID_5260: 1316 case PID_5261: 1317 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1); 1318 break; 1319 default: 1320 break; 1321 } 1322 1323 /*init ocp*/ 1324 rtsx_pci_init_ocp(pcr); 1325 1326 /* Enable clk_request_n to enable clock power management */ 1327 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1); 1328 /* Enter L1 when host tx idle */ 1329 rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B); 1330 1331 if (pcr->ops->extra_init_hw) { 1332 err = pcr->ops->extra_init_hw(pcr); 1333 if (err < 0) 1334 return err; 1335 } 1336 1337 /* No CD interrupt if probing driver with card inserted. 1338 * So we need to initialize pcr->card_exist here. 1339 */ 1340 if (pcr->ops->cd_deglitch) 1341 pcr->card_exist = pcr->ops->cd_deglitch(pcr); 1342 else 1343 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST; 1344 1345 return 0; 1346 } 1347 1348 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) 1349 { 1350 int err; 1351 1352 spin_lock_init(&pcr->lock); 1353 mutex_init(&pcr->pcr_mutex); 1354 1355 switch (PCI_PID(pcr)) { 1356 default: 1357 case 0x5209: 1358 rts5209_init_params(pcr); 1359 break; 1360 1361 case 0x5229: 1362 rts5229_init_params(pcr); 1363 break; 1364 1365 case 0x5289: 1366 rtl8411_init_params(pcr); 1367 break; 1368 1369 case 0x5227: 1370 rts5227_init_params(pcr); 1371 break; 1372 1373 case 0x522A: 1374 rts522a_init_params(pcr); 1375 break; 1376 1377 case 0x5249: 1378 rts5249_init_params(pcr); 1379 break; 1380 1381 case 0x524A: 1382 rts524a_init_params(pcr); 1383 break; 1384 1385 case 0x525A: 1386 rts525a_init_params(pcr); 1387 break; 1388 1389 case 0x5287: 1390 rtl8411b_init_params(pcr); 1391 break; 1392 1393 case 0x5286: 1394 rtl8402_init_params(pcr); 1395 break; 1396 1397 case 0x5260: 1398 rts5260_init_params(pcr); 1399 break; 1400 1401 case 0x5261: 1402 rts5261_init_params(pcr); 1403 break; 1404 } 1405 1406 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n", 1407 PCI_PID(pcr), pcr->ic_version); 1408 1409 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot), 1410 GFP_KERNEL); 1411 if (!pcr->slots) 1412 return -ENOMEM; 1413 1414 if (pcr->ops->fetch_vendor_settings) 1415 pcr->ops->fetch_vendor_settings(pcr); 1416 1417 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en); 1418 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n", 1419 pcr->sd30_drive_sel_1v8); 1420 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n", 1421 pcr->sd30_drive_sel_3v3); 1422 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n", 1423 pcr->card_drive_sel); 1424 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags); 1425 1426 pcr->state = PDEV_STAT_IDLE; 1427 err = rtsx_pci_init_hw(pcr); 1428 if (err < 0) { 1429 kfree(pcr->slots); 1430 return err; 1431 } 1432 1433 return 0; 1434 } 1435 1436 static int rtsx_pci_probe(struct pci_dev *pcidev, 1437 const struct pci_device_id *id) 1438 { 1439 struct rtsx_pcr *pcr; 1440 struct pcr_handle *handle; 1441 u32 base, len; 1442 int ret, i, bar = 0; 1443 1444 dev_dbg(&(pcidev->dev), 1445 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n", 1446 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, 1447 (int)pcidev->revision); 1448 1449 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 1450 if (ret < 0) 1451 return ret; 1452 1453 ret = pci_enable_device(pcidev); 1454 if (ret) 1455 return ret; 1456 1457 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI); 1458 if (ret) 1459 goto disable; 1460 1461 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL); 1462 if (!pcr) { 1463 ret = -ENOMEM; 1464 goto release_pci; 1465 } 1466 1467 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1468 if (!handle) { 1469 ret = -ENOMEM; 1470 goto free_pcr; 1471 } 1472 handle->pcr = pcr; 1473 1474 idr_preload(GFP_KERNEL); 1475 spin_lock(&rtsx_pci_lock); 1476 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT); 1477 if (ret >= 0) 1478 pcr->id = ret; 1479 spin_unlock(&rtsx_pci_lock); 1480 idr_preload_end(); 1481 if (ret < 0) 1482 goto free_handle; 1483 1484 pcr->pci = pcidev; 1485 dev_set_drvdata(&pcidev->dev, handle); 1486 1487 if (CHK_PCI_PID(pcr, 0x525A)) 1488 bar = 1; 1489 len = pci_resource_len(pcidev, bar); 1490 base = pci_resource_start(pcidev, bar); 1491 pcr->remap_addr = ioremap(base, len); 1492 if (!pcr->remap_addr) { 1493 ret = -ENOMEM; 1494 goto free_handle; 1495 } 1496 1497 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev), 1498 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr), 1499 GFP_KERNEL); 1500 if (pcr->rtsx_resv_buf == NULL) { 1501 ret = -ENXIO; 1502 goto unmap; 1503 } 1504 pcr->host_cmds_ptr = pcr->rtsx_resv_buf; 1505 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr; 1506 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN; 1507 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; 1508 1509 pcr->card_inserted = 0; 1510 pcr->card_removed = 0; 1511 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect); 1512 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work); 1513 1514 pcr->msi_en = msi_en; 1515 if (pcr->msi_en) { 1516 ret = pci_enable_msi(pcidev); 1517 if (ret) 1518 pcr->msi_en = false; 1519 } 1520 1521 ret = rtsx_pci_acquire_irq(pcr); 1522 if (ret < 0) 1523 goto disable_msi; 1524 1525 pci_set_master(pcidev); 1526 synchronize_irq(pcr->irq); 1527 1528 ret = rtsx_pci_init_chip(pcr); 1529 if (ret < 0) 1530 goto disable_irq; 1531 1532 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) { 1533 rtsx_pcr_cells[i].platform_data = handle; 1534 rtsx_pcr_cells[i].pdata_size = sizeof(*handle); 1535 } 1536 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, 1537 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); 1538 if (ret < 0) 1539 goto disable_irq; 1540 1541 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); 1542 1543 return 0; 1544 1545 disable_irq: 1546 free_irq(pcr->irq, (void *)pcr); 1547 disable_msi: 1548 if (pcr->msi_en) 1549 pci_disable_msi(pcr->pci); 1550 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, 1551 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); 1552 unmap: 1553 iounmap(pcr->remap_addr); 1554 free_handle: 1555 kfree(handle); 1556 free_pcr: 1557 kfree(pcr); 1558 release_pci: 1559 pci_release_regions(pcidev); 1560 disable: 1561 pci_disable_device(pcidev); 1562 1563 return ret; 1564 } 1565 1566 static void rtsx_pci_remove(struct pci_dev *pcidev) 1567 { 1568 struct pcr_handle *handle = pci_get_drvdata(pcidev); 1569 struct rtsx_pcr *pcr = handle->pcr; 1570 1571 pcr->remove_pci = true; 1572 1573 /* Disable interrupts at the pcr level */ 1574 spin_lock_irq(&pcr->lock); 1575 rtsx_pci_writel(pcr, RTSX_BIER, 0); 1576 pcr->bier = 0; 1577 spin_unlock_irq(&pcr->lock); 1578 1579 cancel_delayed_work_sync(&pcr->carddet_work); 1580 cancel_delayed_work_sync(&pcr->idle_work); 1581 1582 mfd_remove_devices(&pcidev->dev); 1583 1584 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, 1585 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); 1586 free_irq(pcr->irq, (void *)pcr); 1587 if (pcr->msi_en) 1588 pci_disable_msi(pcr->pci); 1589 iounmap(pcr->remap_addr); 1590 1591 pci_release_regions(pcidev); 1592 pci_disable_device(pcidev); 1593 1594 spin_lock(&rtsx_pci_lock); 1595 idr_remove(&rtsx_pci_idr, pcr->id); 1596 spin_unlock(&rtsx_pci_lock); 1597 1598 kfree(pcr->slots); 1599 kfree(pcr); 1600 kfree(handle); 1601 1602 dev_dbg(&(pcidev->dev), 1603 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n", 1604 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device); 1605 } 1606 1607 #ifdef CONFIG_PM 1608 1609 static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state) 1610 { 1611 struct pcr_handle *handle; 1612 struct rtsx_pcr *pcr; 1613 1614 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1615 1616 handle = pci_get_drvdata(pcidev); 1617 pcr = handle->pcr; 1618 1619 cancel_delayed_work(&pcr->carddet_work); 1620 cancel_delayed_work(&pcr->idle_work); 1621 1622 mutex_lock(&pcr->pcr_mutex); 1623 1624 rtsx_pci_power_off(pcr, HOST_ENTER_S3); 1625 1626 pci_save_state(pcidev); 1627 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); 1628 pci_disable_device(pcidev); 1629 pci_set_power_state(pcidev, pci_choose_state(pcidev, state)); 1630 1631 mutex_unlock(&pcr->pcr_mutex); 1632 return 0; 1633 } 1634 1635 static int rtsx_pci_resume(struct pci_dev *pcidev) 1636 { 1637 struct pcr_handle *handle; 1638 struct rtsx_pcr *pcr; 1639 int ret = 0; 1640 1641 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1642 1643 handle = pci_get_drvdata(pcidev); 1644 pcr = handle->pcr; 1645 1646 mutex_lock(&pcr->pcr_mutex); 1647 1648 pci_set_power_state(pcidev, PCI_D0); 1649 pci_restore_state(pcidev); 1650 ret = pci_enable_device(pcidev); 1651 if (ret) 1652 goto out; 1653 pci_set_master(pcidev); 1654 1655 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); 1656 if (ret) 1657 goto out; 1658 1659 ret = rtsx_pci_init_hw(pcr); 1660 if (ret) 1661 goto out; 1662 1663 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); 1664 1665 out: 1666 mutex_unlock(&pcr->pcr_mutex); 1667 return ret; 1668 } 1669 1670 static void rtsx_pci_shutdown(struct pci_dev *pcidev) 1671 { 1672 struct pcr_handle *handle; 1673 struct rtsx_pcr *pcr; 1674 1675 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1676 1677 handle = pci_get_drvdata(pcidev); 1678 pcr = handle->pcr; 1679 rtsx_pci_power_off(pcr, HOST_ENTER_S1); 1680 1681 pci_disable_device(pcidev); 1682 free_irq(pcr->irq, (void *)pcr); 1683 if (pcr->msi_en) 1684 pci_disable_msi(pcr->pci); 1685 } 1686 1687 #else /* CONFIG_PM */ 1688 1689 #define rtsx_pci_suspend NULL 1690 #define rtsx_pci_resume NULL 1691 #define rtsx_pci_shutdown NULL 1692 1693 #endif /* CONFIG_PM */ 1694 1695 static struct pci_driver rtsx_pci_driver = { 1696 .name = DRV_NAME_RTSX_PCI, 1697 .id_table = rtsx_pci_ids, 1698 .probe = rtsx_pci_probe, 1699 .remove = rtsx_pci_remove, 1700 .suspend = rtsx_pci_suspend, 1701 .resume = rtsx_pci_resume, 1702 .shutdown = rtsx_pci_shutdown, 1703 }; 1704 module_pci_driver(rtsx_pci_driver); 1705 1706 MODULE_LICENSE("GPL"); 1707 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); 1708 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver"); 1709