1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Driver for Realtek PCI-Express card reader 3 * 4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. 5 * 6 * Author: 7 * Wei WANG <wei_wang@realsil.com.cn> 8 */ 9 10 #include <linux/pci.h> 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/highmem.h> 15 #include <linux/interrupt.h> 16 #include <linux/delay.h> 17 #include <linux/idr.h> 18 #include <linux/platform_device.h> 19 #include <linux/mfd/core.h> 20 #include <linux/rtsx_pci.h> 21 #include <linux/mmc/card.h> 22 #include <asm/unaligned.h> 23 24 #include "rtsx_pcr.h" 25 #include "rts5261.h" 26 #include "rts5228.h" 27 28 static bool msi_en = true; 29 module_param(msi_en, bool, S_IRUGO | S_IWUSR); 30 MODULE_PARM_DESC(msi_en, "Enable MSI"); 31 32 static DEFINE_IDR(rtsx_pci_idr); 33 static DEFINE_SPINLOCK(rtsx_pci_lock); 34 35 static struct mfd_cell rtsx_pcr_cells[] = { 36 [RTSX_SD_CARD] = { 37 .name = DRV_NAME_RTSX_PCI_SDMMC, 38 }, 39 }; 40 41 static const struct pci_device_id rtsx_pci_ids[] = { 42 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 43 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 44 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 45 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 46 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 47 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 48 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 49 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 50 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 51 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 52 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 53 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 54 { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 55 { 0, } 56 }; 57 58 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids); 59 60 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr) 61 { 62 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, 63 PCI_EXP_LNKCTL_ASPMC, 0); 64 } 65 66 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) 67 { 68 rtsx_pci_write_register(pcr, MSGTXDATA0, 69 MASK_8_BIT_DEF, (u8) (latency & 0xFF)); 70 rtsx_pci_write_register(pcr, MSGTXDATA1, 71 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF)); 72 rtsx_pci_write_register(pcr, MSGTXDATA2, 73 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF)); 74 rtsx_pci_write_register(pcr, MSGTXDATA3, 75 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF)); 76 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK | 77 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW); 78 79 return 0; 80 } 81 82 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) 83 { 84 return rtsx_comm_set_ltr_latency(pcr, latency); 85 } 86 87 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable) 88 { 89 if (pcr->aspm_enabled == enable) 90 return; 91 92 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, 93 PCI_EXP_LNKCTL_ASPMC, 94 enable ? pcr->aspm_en : 0); 95 96 pcr->aspm_enabled = enable; 97 } 98 99 static void rtsx_disable_aspm(struct rtsx_pcr *pcr) 100 { 101 if (pcr->ops->set_aspm) 102 pcr->ops->set_aspm(pcr, false); 103 else 104 rtsx_comm_set_aspm(pcr, false); 105 } 106 107 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val) 108 { 109 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val); 110 111 return 0; 112 } 113 114 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active) 115 { 116 if (pcr->ops->set_l1off_cfg_sub_d0) 117 pcr->ops->set_l1off_cfg_sub_d0(pcr, active); 118 } 119 120 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) 121 { 122 struct rtsx_cr_option *option = &pcr->option; 123 124 rtsx_disable_aspm(pcr); 125 126 /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */ 127 msleep(1); 128 129 if (option->ltr_enabled) 130 rtsx_set_ltr_latency(pcr, option->ltr_active_latency); 131 132 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) 133 rtsx_set_l1off_sub_cfg_d0(pcr, 1); 134 } 135 136 static void rtsx_pm_full_on(struct rtsx_pcr *pcr) 137 { 138 rtsx_comm_pm_full_on(pcr); 139 } 140 141 void rtsx_pci_start_run(struct rtsx_pcr *pcr) 142 { 143 /* If pci device removed, don't queue idle work any more */ 144 if (pcr->remove_pci) 145 return; 146 147 if (pcr->state != PDEV_STAT_RUN) { 148 pcr->state = PDEV_STAT_RUN; 149 if (pcr->ops->enable_auto_blink) 150 pcr->ops->enable_auto_blink(pcr); 151 rtsx_pm_full_on(pcr); 152 } 153 154 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200)); 155 } 156 EXPORT_SYMBOL_GPL(rtsx_pci_start_run); 157 158 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data) 159 { 160 int i; 161 u32 val = HAIMR_WRITE_START; 162 163 val |= (u32)(addr & 0x3FFF) << 16; 164 val |= (u32)mask << 8; 165 val |= (u32)data; 166 167 rtsx_pci_writel(pcr, RTSX_HAIMR, val); 168 169 for (i = 0; i < MAX_RW_REG_CNT; i++) { 170 val = rtsx_pci_readl(pcr, RTSX_HAIMR); 171 if ((val & HAIMR_TRANS_END) == 0) { 172 if (data != (u8)val) 173 return -EIO; 174 return 0; 175 } 176 } 177 178 return -ETIMEDOUT; 179 } 180 EXPORT_SYMBOL_GPL(rtsx_pci_write_register); 181 182 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data) 183 { 184 u32 val = HAIMR_READ_START; 185 int i; 186 187 val |= (u32)(addr & 0x3FFF) << 16; 188 rtsx_pci_writel(pcr, RTSX_HAIMR, val); 189 190 for (i = 0; i < MAX_RW_REG_CNT; i++) { 191 val = rtsx_pci_readl(pcr, RTSX_HAIMR); 192 if ((val & HAIMR_TRANS_END) == 0) 193 break; 194 } 195 196 if (i >= MAX_RW_REG_CNT) 197 return -ETIMEDOUT; 198 199 if (data) 200 *data = (u8)(val & 0xFF); 201 202 return 0; 203 } 204 EXPORT_SYMBOL_GPL(rtsx_pci_read_register); 205 206 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) 207 { 208 int err, i, finished = 0; 209 u8 tmp; 210 211 rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val); 212 rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8)); 213 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr); 214 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81); 215 216 for (i = 0; i < 100000; i++) { 217 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); 218 if (err < 0) 219 return err; 220 221 if (!(tmp & 0x80)) { 222 finished = 1; 223 break; 224 } 225 } 226 227 if (!finished) 228 return -ETIMEDOUT; 229 230 return 0; 231 } 232 233 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) 234 { 235 if (pcr->ops->write_phy) 236 return pcr->ops->write_phy(pcr, addr, val); 237 238 return __rtsx_pci_write_phy_register(pcr, addr, val); 239 } 240 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register); 241 242 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) 243 { 244 int err, i, finished = 0; 245 u16 data; 246 u8 tmp, val1, val2; 247 248 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr); 249 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80); 250 251 for (i = 0; i < 100000; i++) { 252 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); 253 if (err < 0) 254 return err; 255 256 if (!(tmp & 0x80)) { 257 finished = 1; 258 break; 259 } 260 } 261 262 if (!finished) 263 return -ETIMEDOUT; 264 265 rtsx_pci_read_register(pcr, PHYDATA0, &val1); 266 rtsx_pci_read_register(pcr, PHYDATA1, &val2); 267 data = val1 | (val2 << 8); 268 269 if (val) 270 *val = data; 271 272 return 0; 273 } 274 275 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) 276 { 277 if (pcr->ops->read_phy) 278 return pcr->ops->read_phy(pcr, addr, val); 279 280 return __rtsx_pci_read_phy_register(pcr, addr, val); 281 } 282 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register); 283 284 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr) 285 { 286 if (pcr->ops->stop_cmd) 287 return pcr->ops->stop_cmd(pcr); 288 289 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); 290 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); 291 292 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80); 293 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80); 294 } 295 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd); 296 297 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr, 298 u8 cmd_type, u16 reg_addr, u8 mask, u8 data) 299 { 300 unsigned long flags; 301 u32 val = 0; 302 u32 *ptr = (u32 *)(pcr->host_cmds_ptr); 303 304 val |= (u32)(cmd_type & 0x03) << 30; 305 val |= (u32)(reg_addr & 0x3FFF) << 16; 306 val |= (u32)mask << 8; 307 val |= (u32)data; 308 309 spin_lock_irqsave(&pcr->lock, flags); 310 ptr += pcr->ci; 311 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) { 312 put_unaligned_le32(val, ptr); 313 ptr++; 314 pcr->ci++; 315 } 316 spin_unlock_irqrestore(&pcr->lock, flags); 317 } 318 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd); 319 320 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr) 321 { 322 u32 val = 1 << 31; 323 324 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 325 326 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; 327 /* Hardware Auto Response */ 328 val |= 0x40000000; 329 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); 330 } 331 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait); 332 333 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout) 334 { 335 struct completion trans_done; 336 u32 val = 1 << 31; 337 long timeleft; 338 unsigned long flags; 339 int err = 0; 340 341 spin_lock_irqsave(&pcr->lock, flags); 342 343 /* set up data structures for the wakeup system */ 344 pcr->done = &trans_done; 345 pcr->trans_result = TRANS_NOT_READY; 346 init_completion(&trans_done); 347 348 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 349 350 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; 351 /* Hardware Auto Response */ 352 val |= 0x40000000; 353 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); 354 355 spin_unlock_irqrestore(&pcr->lock, flags); 356 357 /* Wait for TRANS_OK_INT */ 358 timeleft = wait_for_completion_interruptible_timeout( 359 &trans_done, msecs_to_jiffies(timeout)); 360 if (timeleft <= 0) { 361 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); 362 err = -ETIMEDOUT; 363 goto finish_send_cmd; 364 } 365 366 spin_lock_irqsave(&pcr->lock, flags); 367 if (pcr->trans_result == TRANS_RESULT_FAIL) 368 err = -EINVAL; 369 else if (pcr->trans_result == TRANS_RESULT_OK) 370 err = 0; 371 else if (pcr->trans_result == TRANS_NO_DEVICE) 372 err = -ENODEV; 373 spin_unlock_irqrestore(&pcr->lock, flags); 374 375 finish_send_cmd: 376 spin_lock_irqsave(&pcr->lock, flags); 377 pcr->done = NULL; 378 spin_unlock_irqrestore(&pcr->lock, flags); 379 380 if ((err < 0) && (err != -ENODEV)) 381 rtsx_pci_stop_cmd(pcr); 382 383 if (pcr->finish_me) 384 complete(pcr->finish_me); 385 386 return err; 387 } 388 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd); 389 390 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, 391 dma_addr_t addr, unsigned int len, int end) 392 { 393 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi; 394 u64 val; 395 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA; 396 397 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len); 398 399 if (end) 400 option |= RTSX_SG_END; 401 402 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) { 403 if (len > 0xFFFF) 404 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16) 405 | (((u64)len >> 16) << 6) | option; 406 else 407 val = ((u64)addr << 32) | ((u64)len << 16) | option; 408 } else { 409 val = ((u64)addr << 32) | ((u64)len << 12) | option; 410 } 411 put_unaligned_le64(val, ptr); 412 pcr->sgi++; 413 } 414 415 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, 416 int num_sg, bool read, int timeout) 417 { 418 int err = 0, count; 419 420 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg); 421 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); 422 if (count < 1) 423 return -EINVAL; 424 pcr_dbg(pcr, "DMA mapping count: %d\n", count); 425 426 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout); 427 428 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); 429 430 return err; 431 } 432 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); 433 434 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 435 int num_sg, bool read) 436 { 437 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 438 439 if (pcr->remove_pci) 440 return -EINVAL; 441 442 if ((sglist == NULL) || (num_sg <= 0)) 443 return -EINVAL; 444 445 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir); 446 } 447 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg); 448 449 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 450 int num_sg, bool read) 451 { 452 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 453 454 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir); 455 } 456 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg); 457 458 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, 459 int count, bool read, int timeout) 460 { 461 struct completion trans_done; 462 struct scatterlist *sg; 463 dma_addr_t addr; 464 long timeleft; 465 unsigned long flags; 466 unsigned int len; 467 int i, err = 0; 468 u32 val; 469 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE; 470 471 if (pcr->remove_pci) 472 return -ENODEV; 473 474 if ((sglist == NULL) || (count < 1)) 475 return -EINVAL; 476 477 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; 478 pcr->sgi = 0; 479 for_each_sg(sglist, sg, count, i) { 480 addr = sg_dma_address(sg); 481 len = sg_dma_len(sg); 482 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1); 483 } 484 485 spin_lock_irqsave(&pcr->lock, flags); 486 487 pcr->done = &trans_done; 488 pcr->trans_result = TRANS_NOT_READY; 489 init_completion(&trans_done); 490 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); 491 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); 492 493 spin_unlock_irqrestore(&pcr->lock, flags); 494 495 timeleft = wait_for_completion_interruptible_timeout( 496 &trans_done, msecs_to_jiffies(timeout)); 497 if (timeleft <= 0) { 498 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); 499 err = -ETIMEDOUT; 500 goto out; 501 } 502 503 spin_lock_irqsave(&pcr->lock, flags); 504 if (pcr->trans_result == TRANS_RESULT_FAIL) { 505 err = -EILSEQ; 506 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION) 507 pcr->dma_error_count++; 508 } 509 510 else if (pcr->trans_result == TRANS_NO_DEVICE) 511 err = -ENODEV; 512 spin_unlock_irqrestore(&pcr->lock, flags); 513 514 out: 515 spin_lock_irqsave(&pcr->lock, flags); 516 pcr->done = NULL; 517 spin_unlock_irqrestore(&pcr->lock, flags); 518 519 if ((err < 0) && (err != -ENODEV)) 520 rtsx_pci_stop_cmd(pcr); 521 522 if (pcr->finish_me) 523 complete(pcr->finish_me); 524 525 return err; 526 } 527 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer); 528 529 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 530 { 531 int err; 532 int i, j; 533 u16 reg; 534 u8 *ptr; 535 536 if (buf_len > 512) 537 buf_len = 512; 538 539 ptr = buf; 540 reg = PPBUF_BASE2; 541 for (i = 0; i < buf_len / 256; i++) { 542 rtsx_pci_init_cmd(pcr); 543 544 for (j = 0; j < 256; j++) 545 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); 546 547 err = rtsx_pci_send_cmd(pcr, 250); 548 if (err < 0) 549 return err; 550 551 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256); 552 ptr += 256; 553 } 554 555 if (buf_len % 256) { 556 rtsx_pci_init_cmd(pcr); 557 558 for (j = 0; j < buf_len % 256; j++) 559 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); 560 561 err = rtsx_pci_send_cmd(pcr, 250); 562 if (err < 0) 563 return err; 564 } 565 566 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256); 567 568 return 0; 569 } 570 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf); 571 572 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 573 { 574 int err; 575 int i, j; 576 u16 reg; 577 u8 *ptr; 578 579 if (buf_len > 512) 580 buf_len = 512; 581 582 ptr = buf; 583 reg = PPBUF_BASE2; 584 for (i = 0; i < buf_len / 256; i++) { 585 rtsx_pci_init_cmd(pcr); 586 587 for (j = 0; j < 256; j++) { 588 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 589 reg++, 0xFF, *ptr); 590 ptr++; 591 } 592 593 err = rtsx_pci_send_cmd(pcr, 250); 594 if (err < 0) 595 return err; 596 } 597 598 if (buf_len % 256) { 599 rtsx_pci_init_cmd(pcr); 600 601 for (j = 0; j < buf_len % 256; j++) { 602 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 603 reg++, 0xFF, *ptr); 604 ptr++; 605 } 606 607 err = rtsx_pci_send_cmd(pcr, 250); 608 if (err < 0) 609 return err; 610 } 611 612 return 0; 613 } 614 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf); 615 616 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) 617 { 618 rtsx_pci_init_cmd(pcr); 619 620 while (*tbl & 0xFFFF0000) { 621 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 622 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl)); 623 tbl++; 624 } 625 626 return rtsx_pci_send_cmd(pcr, 100); 627 } 628 629 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card) 630 { 631 const u32 *tbl; 632 633 if (card == RTSX_SD_CARD) 634 tbl = pcr->sd_pull_ctl_enable_tbl; 635 else if (card == RTSX_MS_CARD) 636 tbl = pcr->ms_pull_ctl_enable_tbl; 637 else 638 return -EINVAL; 639 640 return rtsx_pci_set_pull_ctl(pcr, tbl); 641 } 642 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable); 643 644 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card) 645 { 646 const u32 *tbl; 647 648 if (card == RTSX_SD_CARD) 649 tbl = pcr->sd_pull_ctl_disable_tbl; 650 else if (card == RTSX_MS_CARD) 651 tbl = pcr->ms_pull_ctl_disable_tbl; 652 else 653 return -EINVAL; 654 655 return rtsx_pci_set_pull_ctl(pcr, tbl); 656 } 657 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable); 658 659 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr) 660 { 661 struct rtsx_hw_param *hw_param = &pcr->hw_param; 662 663 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN 664 | hw_param->interrupt_en; 665 666 if (pcr->num_slots > 1) 667 pcr->bier |= MS_INT_EN; 668 669 /* Enable Bus Interrupt */ 670 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier); 671 672 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier); 673 } 674 675 static inline u8 double_ssc_depth(u8 depth) 676 { 677 return ((depth > 1) ? (depth - 1) : depth); 678 } 679 680 static u8 revise_ssc_depth(u8 ssc_depth, u8 div) 681 { 682 if (div > CLK_DIV_1) { 683 if (ssc_depth > (div - 1)) 684 ssc_depth -= (div - 1); 685 else 686 ssc_depth = SSC_DEPTH_4M; 687 } 688 689 return ssc_depth; 690 } 691 692 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, 693 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) 694 { 695 int err, clk; 696 u8 n, clk_divider, mcu_cnt, div; 697 static const u8 depth[] = { 698 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M, 699 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M, 700 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M, 701 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K, 702 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K, 703 }; 704 705 if (PCI_PID(pcr) == PID_5261) 706 return rts5261_pci_switch_clock(pcr, card_clock, 707 ssc_depth, initial_mode, double_clk, vpclk); 708 if (PCI_PID(pcr) == PID_5228) 709 return rts5228_pci_switch_clock(pcr, card_clock, 710 ssc_depth, initial_mode, double_clk, vpclk); 711 712 if (initial_mode) { 713 /* We use 250k(around) here, in initial stage */ 714 clk_divider = SD_CLK_DIVIDE_128; 715 card_clock = 30000000; 716 } else { 717 clk_divider = SD_CLK_DIVIDE_0; 718 } 719 err = rtsx_pci_write_register(pcr, SD_CFG1, 720 SD_CLK_DIVIDE_MASK, clk_divider); 721 if (err < 0) 722 return err; 723 724 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */ 725 if (card_clock == UHS_SDR104_MAX_DTR && 726 pcr->dma_error_count && 727 PCI_PID(pcr) == RTS5227_DEVICE_ID) 728 card_clock = UHS_SDR104_MAX_DTR - 729 (pcr->dma_error_count * 20000000); 730 731 card_clock /= 1000000; 732 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); 733 734 clk = card_clock; 735 if (!initial_mode && double_clk) 736 clk = card_clock * 2; 737 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", 738 clk, pcr->cur_clock); 739 740 if (clk == pcr->cur_clock) 741 return 0; 742 743 if (pcr->ops->conv_clk_and_div_n) 744 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); 745 else 746 n = (u8)(clk - 2); 747 if ((clk <= 2) || (n > MAX_DIV_N_PCR)) 748 return -EINVAL; 749 750 mcu_cnt = (u8)(125/clk + 3); 751 if (mcu_cnt > 15) 752 mcu_cnt = 15; 753 754 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */ 755 div = CLK_DIV_1; 756 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) { 757 if (pcr->ops->conv_clk_and_div_n) { 758 int dbl_clk = pcr->ops->conv_clk_and_div_n(n, 759 DIV_N_TO_CLK) * 2; 760 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk, 761 CLK_TO_DIV_N); 762 } else { 763 n = (n + 2) * 2 - 2; 764 } 765 div++; 766 } 767 pcr_dbg(pcr, "n = %d, div = %d\n", n, div); 768 769 ssc_depth = depth[ssc_depth]; 770 if (double_clk) 771 ssc_depth = double_ssc_depth(ssc_depth); 772 773 ssc_depth = revise_ssc_depth(ssc_depth, div); 774 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); 775 776 rtsx_pci_init_cmd(pcr); 777 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, 778 CLK_LOW_FREQ, CLK_LOW_FREQ); 779 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 780 0xFF, (div << 4) | mcu_cnt); 781 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); 782 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 783 SSC_DEPTH_MASK, ssc_depth); 784 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); 785 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); 786 if (vpclk) { 787 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, 788 PHASE_NOT_RESET, 0); 789 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, 790 PHASE_NOT_RESET, PHASE_NOT_RESET); 791 } 792 793 err = rtsx_pci_send_cmd(pcr, 2000); 794 if (err < 0) 795 return err; 796 797 /* Wait SSC clock stable */ 798 udelay(SSC_CLOCK_STABLE_WAIT); 799 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); 800 if (err < 0) 801 return err; 802 803 pcr->cur_clock = clk; 804 return 0; 805 } 806 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock); 807 808 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card) 809 { 810 if (pcr->ops->card_power_on) 811 return pcr->ops->card_power_on(pcr, card); 812 813 return 0; 814 } 815 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on); 816 817 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card) 818 { 819 if (pcr->ops->card_power_off) 820 return pcr->ops->card_power_off(pcr, card); 821 822 return 0; 823 } 824 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); 825 826 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card) 827 { 828 static const unsigned int cd_mask[] = { 829 [RTSX_SD_CARD] = SD_EXIST, 830 [RTSX_MS_CARD] = MS_EXIST 831 }; 832 833 if (!(pcr->flags & PCR_MS_PMOS)) { 834 /* When using single PMOS, accessing card is not permitted 835 * if the existing card is not the designated one. 836 */ 837 if (pcr->card_exist & (~cd_mask[card])) 838 return -EIO; 839 } 840 841 return 0; 842 } 843 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check); 844 845 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 846 { 847 if (pcr->ops->switch_output_voltage) 848 return pcr->ops->switch_output_voltage(pcr, voltage); 849 850 return 0; 851 } 852 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage); 853 854 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr) 855 { 856 unsigned int val; 857 858 val = rtsx_pci_readl(pcr, RTSX_BIPR); 859 if (pcr->ops->cd_deglitch) 860 val = pcr->ops->cd_deglitch(pcr); 861 862 return val; 863 } 864 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist); 865 866 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr) 867 { 868 struct completion finish; 869 870 pcr->finish_me = &finish; 871 init_completion(&finish); 872 873 if (pcr->done) 874 complete(pcr->done); 875 876 if (!pcr->remove_pci) 877 rtsx_pci_stop_cmd(pcr); 878 879 wait_for_completion_interruptible_timeout(&finish, 880 msecs_to_jiffies(2)); 881 pcr->finish_me = NULL; 882 } 883 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer); 884 885 static void rtsx_pci_card_detect(struct work_struct *work) 886 { 887 struct delayed_work *dwork; 888 struct rtsx_pcr *pcr; 889 unsigned long flags; 890 unsigned int card_detect = 0, card_inserted, card_removed; 891 u32 irq_status; 892 893 dwork = to_delayed_work(work); 894 pcr = container_of(dwork, struct rtsx_pcr, carddet_work); 895 896 pcr_dbg(pcr, "--> %s\n", __func__); 897 898 mutex_lock(&pcr->pcr_mutex); 899 spin_lock_irqsave(&pcr->lock, flags); 900 901 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR); 902 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status); 903 904 irq_status &= CARD_EXIST; 905 card_inserted = pcr->card_inserted & irq_status; 906 card_removed = pcr->card_removed; 907 pcr->card_inserted = 0; 908 pcr->card_removed = 0; 909 910 spin_unlock_irqrestore(&pcr->lock, flags); 911 912 if (card_inserted || card_removed) { 913 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n", 914 card_inserted, card_removed); 915 916 if (pcr->ops->cd_deglitch) 917 card_inserted = pcr->ops->cd_deglitch(pcr); 918 919 card_detect = card_inserted | card_removed; 920 921 pcr->card_exist |= card_inserted; 922 pcr->card_exist &= ~card_removed; 923 } 924 925 mutex_unlock(&pcr->pcr_mutex); 926 927 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event) 928 pcr->slots[RTSX_SD_CARD].card_event( 929 pcr->slots[RTSX_SD_CARD].p_dev); 930 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event) 931 pcr->slots[RTSX_MS_CARD].card_event( 932 pcr->slots[RTSX_MS_CARD].p_dev); 933 } 934 935 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) 936 { 937 if (pcr->ops->process_ocp) { 938 pcr->ops->process_ocp(pcr); 939 } else { 940 if (!pcr->option.ocp_en) 941 return; 942 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); 943 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { 944 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); 945 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); 946 rtsx_pci_clear_ocpstat(pcr); 947 pcr->ocp_stat = 0; 948 } 949 } 950 } 951 952 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) 953 { 954 if (pcr->option.ocp_en) 955 rtsx_pci_process_ocp(pcr); 956 957 return 0; 958 } 959 960 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) 961 { 962 struct rtsx_pcr *pcr = dev_id; 963 u32 int_reg; 964 965 if (!pcr) 966 return IRQ_NONE; 967 968 spin_lock(&pcr->lock); 969 970 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); 971 /* Clear interrupt flag */ 972 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); 973 if ((int_reg & pcr->bier) == 0) { 974 spin_unlock(&pcr->lock); 975 return IRQ_NONE; 976 } 977 if (int_reg == 0xFFFFFFFF) { 978 spin_unlock(&pcr->lock); 979 return IRQ_HANDLED; 980 } 981 982 int_reg &= (pcr->bier | 0x7FFFFF); 983 984 if (int_reg & SD_OC_INT) 985 rtsx_pci_process_ocp_interrupt(pcr); 986 987 if (int_reg & SD_INT) { 988 if (int_reg & SD_EXIST) { 989 pcr->card_inserted |= SD_EXIST; 990 } else { 991 pcr->card_removed |= SD_EXIST; 992 pcr->card_inserted &= ~SD_EXIST; 993 } 994 pcr->dma_error_count = 0; 995 } 996 997 if (int_reg & MS_INT) { 998 if (int_reg & MS_EXIST) { 999 pcr->card_inserted |= MS_EXIST; 1000 } else { 1001 pcr->card_removed |= MS_EXIST; 1002 pcr->card_inserted &= ~MS_EXIST; 1003 } 1004 } 1005 1006 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { 1007 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { 1008 pcr->trans_result = TRANS_RESULT_FAIL; 1009 if (pcr->done) 1010 complete(pcr->done); 1011 } else if (int_reg & TRANS_OK_INT) { 1012 pcr->trans_result = TRANS_RESULT_OK; 1013 if (pcr->done) 1014 complete(pcr->done); 1015 } 1016 } 1017 1018 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT)) 1019 schedule_delayed_work(&pcr->carddet_work, 1020 msecs_to_jiffies(200)); 1021 1022 spin_unlock(&pcr->lock); 1023 return IRQ_HANDLED; 1024 } 1025 1026 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr) 1027 { 1028 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n", 1029 __func__, pcr->msi_en, pcr->pci->irq); 1030 1031 if (request_irq(pcr->pci->irq, rtsx_pci_isr, 1032 pcr->msi_en ? 0 : IRQF_SHARED, 1033 DRV_NAME_RTSX_PCI, pcr)) { 1034 dev_err(&(pcr->pci->dev), 1035 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n", 1036 pcr->pci->irq); 1037 return -1; 1038 } 1039 1040 pcr->irq = pcr->pci->irq; 1041 pci_intx(pcr->pci, !pcr->msi_en); 1042 1043 return 0; 1044 } 1045 1046 static void rtsx_enable_aspm(struct rtsx_pcr *pcr) 1047 { 1048 if (pcr->ops->set_aspm) 1049 pcr->ops->set_aspm(pcr, true); 1050 else 1051 rtsx_comm_set_aspm(pcr, true); 1052 } 1053 1054 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr) 1055 { 1056 struct rtsx_cr_option *option = &pcr->option; 1057 1058 if (option->ltr_enabled) { 1059 u32 latency = option->ltr_l1off_latency; 1060 1061 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN)) 1062 mdelay(option->l1_snooze_delay); 1063 1064 rtsx_set_ltr_latency(pcr, latency); 1065 } 1066 1067 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) 1068 rtsx_set_l1off_sub_cfg_d0(pcr, 0); 1069 1070 rtsx_enable_aspm(pcr); 1071 } 1072 1073 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) 1074 { 1075 rtsx_comm_pm_power_saving(pcr); 1076 } 1077 1078 static void rtsx_pci_idle_work(struct work_struct *work) 1079 { 1080 struct delayed_work *dwork = to_delayed_work(work); 1081 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work); 1082 1083 pcr_dbg(pcr, "--> %s\n", __func__); 1084 1085 mutex_lock(&pcr->pcr_mutex); 1086 1087 pcr->state = PDEV_STAT_IDLE; 1088 1089 if (pcr->ops->disable_auto_blink) 1090 pcr->ops->disable_auto_blink(pcr); 1091 if (pcr->ops->turn_off_led) 1092 pcr->ops->turn_off_led(pcr); 1093 1094 rtsx_pm_power_saving(pcr); 1095 1096 mutex_unlock(&pcr->pcr_mutex); 1097 } 1098 1099 static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) 1100 { 1101 if (pcr->ops->turn_off_led) 1102 pcr->ops->turn_off_led(pcr); 1103 1104 rtsx_pci_writel(pcr, RTSX_BIER, 0); 1105 pcr->bier = 0; 1106 1107 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08); 1108 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state); 1109 1110 if (pcr->ops->force_power_down) 1111 pcr->ops->force_power_down(pcr, pm_state); 1112 } 1113 1114 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr) 1115 { 1116 u8 val = SD_OCP_INT_EN | SD_DETECT_EN; 1117 1118 if (pcr->ops->enable_ocp) { 1119 pcr->ops->enable_ocp(pcr); 1120 } else { 1121 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); 1122 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); 1123 } 1124 1125 } 1126 1127 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr) 1128 { 1129 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN; 1130 1131 if (pcr->ops->disable_ocp) { 1132 pcr->ops->disable_ocp(pcr); 1133 } else { 1134 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1135 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 1136 OC_POWER_DOWN); 1137 } 1138 } 1139 1140 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr) 1141 { 1142 if (pcr->ops->init_ocp) { 1143 pcr->ops->init_ocp(pcr); 1144 } else { 1145 struct rtsx_cr_option *option = &(pcr->option); 1146 1147 if (option->ocp_en) { 1148 u8 val = option->sd_800mA_ocp_thd; 1149 1150 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); 1151 rtsx_pci_write_register(pcr, REG_OCPPARA1, 1152 SD_OCP_TIME_MASK, SD_OCP_TIME_800); 1153 rtsx_pci_write_register(pcr, REG_OCPPARA2, 1154 SD_OCP_THD_MASK, val); 1155 rtsx_pci_write_register(pcr, REG_OCPGLITCH, 1156 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch); 1157 rtsx_pci_enable_ocp(pcr); 1158 } else { 1159 /* OC power down */ 1160 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 1161 OC_POWER_DOWN); 1162 } 1163 } 1164 } 1165 1166 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val) 1167 { 1168 if (pcr->ops->get_ocpstat) 1169 return pcr->ops->get_ocpstat(pcr, val); 1170 else 1171 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val); 1172 } 1173 1174 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr) 1175 { 1176 if (pcr->ops->clear_ocpstat) { 1177 pcr->ops->clear_ocpstat(pcr); 1178 } else { 1179 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR; 1180 u8 val = SD_OCP_INT_CLR | SD_OC_CLR; 1181 1182 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); 1183 udelay(100); 1184 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1185 } 1186 } 1187 1188 void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr) 1189 { 1190 u16 val; 1191 1192 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { 1193 rtsx_pci_read_phy_register(pcr, 0x01, &val); 1194 val |= 1<<9; 1195 rtsx_pci_write_phy_register(pcr, 0x01, val); 1196 } 1197 rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32); 1198 rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05); 1199 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83); 1200 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE); 1201 1202 } 1203 1204 void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr) 1205 { 1206 u16 val; 1207 1208 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { 1209 rtsx_pci_read_phy_register(pcr, 0x01, &val); 1210 val &= ~(1<<9); 1211 rtsx_pci_write_phy_register(pcr, 0x01, val); 1212 } 1213 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03); 1214 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00); 1215 1216 } 1217 1218 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr) 1219 { 1220 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | 1221 MS_CLK_EN | SD40_CLK_EN, 0); 1222 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); 1223 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); 1224 1225 msleep(50); 1226 1227 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); 1228 1229 return 0; 1230 } 1231 1232 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr) 1233 { 1234 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | 1235 MS_CLK_EN | SD40_CLK_EN, 0); 1236 1237 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD); 1238 1239 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0); 1240 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD); 1241 1242 return 0; 1243 } 1244 1245 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) 1246 { 1247 struct pci_dev *pdev = pcr->pci; 1248 int err; 1249 1250 if (PCI_PID(pcr) == PID_5228) 1251 rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK, 1252 RTS5228_LDO1_SR_0_5); 1253 1254 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 1255 1256 rtsx_pci_enable_bus_int(pcr); 1257 1258 /* Power on SSC */ 1259 if (PCI_PID(pcr) == PID_5261) { 1260 /* Gating real mcu clock */ 1261 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1, 1262 RTS5261_MCU_CLOCK_GATING, 0); 1263 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL, 1264 SSC_POWER_DOWN, 0); 1265 } else { 1266 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0); 1267 } 1268 if (err < 0) 1269 return err; 1270 1271 /* Wait SSC power stable */ 1272 udelay(200); 1273 1274 rtsx_pci_disable_aspm(pcr); 1275 if (pcr->ops->optimize_phy) { 1276 err = pcr->ops->optimize_phy(pcr); 1277 if (err < 0) 1278 return err; 1279 } 1280 1281 rtsx_pci_init_cmd(pcr); 1282 1283 /* Set mcu_cnt to 7 to ensure data can be sampled properly */ 1284 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07); 1285 1286 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00); 1287 /* Disable card clock */ 1288 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0); 1289 /* Reset delink mode */ 1290 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0); 1291 /* Card driving select */ 1292 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL, 1293 0xFF, pcr->card_drive_sel); 1294 /* Enable SSC Clock */ 1295 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, 1296 0xFF, SSC_8X_EN | SSC_SEL_4M); 1297 if (PCI_PID(pcr) == PID_5261) 1298 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 1299 RTS5261_SSC_DEPTH_2M); 1300 else if (PCI_PID(pcr) == PID_5228) 1301 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 1302 RTS5228_SSC_DEPTH_2M); 1303 else 1304 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12); 1305 1306 /* Disable cd_pwr_save */ 1307 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10); 1308 /* Clear Link Ready Interrupt */ 1309 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, 1310 LINK_RDY_INT, LINK_RDY_INT); 1311 /* Enlarge the estimation window of PERST# glitch 1312 * to reduce the chance of invalid card interrupt 1313 */ 1314 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80); 1315 /* Update RC oscillator to 400k 1316 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1 1317 * 1: 2M 0: 400k 1318 */ 1319 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00); 1320 /* Set interrupt write clear 1321 * bit 1: U_elbi_if_rd_clr_en 1322 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear 1323 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear 1324 */ 1325 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0); 1326 1327 err = rtsx_pci_send_cmd(pcr, 100); 1328 if (err < 0) 1329 return err; 1330 1331 switch (PCI_PID(pcr)) { 1332 case PID_5250: 1333 case PID_524A: 1334 case PID_525A: 1335 case PID_5260: 1336 case PID_5261: 1337 case PID_5228: 1338 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1); 1339 break; 1340 default: 1341 break; 1342 } 1343 1344 /*init ocp*/ 1345 rtsx_pci_init_ocp(pcr); 1346 1347 /* Enable clk_request_n to enable clock power management */ 1348 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, 1349 PCI_EXP_LNKCTL_CLKREQ_EN); 1350 /* Enter L1 when host tx idle */ 1351 pci_write_config_byte(pdev, 0x70F, 0x5B); 1352 1353 if (pcr->ops->extra_init_hw) { 1354 err = pcr->ops->extra_init_hw(pcr); 1355 if (err < 0) 1356 return err; 1357 } 1358 1359 /* No CD interrupt if probing driver with card inserted. 1360 * So we need to initialize pcr->card_exist here. 1361 */ 1362 if (pcr->ops->cd_deglitch) 1363 pcr->card_exist = pcr->ops->cd_deglitch(pcr); 1364 else 1365 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST; 1366 1367 return 0; 1368 } 1369 1370 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) 1371 { 1372 int err; 1373 1374 spin_lock_init(&pcr->lock); 1375 mutex_init(&pcr->pcr_mutex); 1376 1377 switch (PCI_PID(pcr)) { 1378 default: 1379 case 0x5209: 1380 rts5209_init_params(pcr); 1381 break; 1382 1383 case 0x5229: 1384 rts5229_init_params(pcr); 1385 break; 1386 1387 case 0x5289: 1388 rtl8411_init_params(pcr); 1389 break; 1390 1391 case 0x5227: 1392 rts5227_init_params(pcr); 1393 break; 1394 1395 case 0x522A: 1396 rts522a_init_params(pcr); 1397 break; 1398 1399 case 0x5249: 1400 rts5249_init_params(pcr); 1401 break; 1402 1403 case 0x524A: 1404 rts524a_init_params(pcr); 1405 break; 1406 1407 case 0x525A: 1408 rts525a_init_params(pcr); 1409 break; 1410 1411 case 0x5287: 1412 rtl8411b_init_params(pcr); 1413 break; 1414 1415 case 0x5286: 1416 rtl8402_init_params(pcr); 1417 break; 1418 1419 case 0x5260: 1420 rts5260_init_params(pcr); 1421 break; 1422 1423 case 0x5261: 1424 rts5261_init_params(pcr); 1425 break; 1426 1427 case 0x5228: 1428 rts5228_init_params(pcr); 1429 break; 1430 } 1431 1432 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n", 1433 PCI_PID(pcr), pcr->ic_version); 1434 1435 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot), 1436 GFP_KERNEL); 1437 if (!pcr->slots) 1438 return -ENOMEM; 1439 1440 if (pcr->ops->fetch_vendor_settings) 1441 pcr->ops->fetch_vendor_settings(pcr); 1442 1443 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en); 1444 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n", 1445 pcr->sd30_drive_sel_1v8); 1446 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n", 1447 pcr->sd30_drive_sel_3v3); 1448 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n", 1449 pcr->card_drive_sel); 1450 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags); 1451 1452 pcr->state = PDEV_STAT_IDLE; 1453 err = rtsx_pci_init_hw(pcr); 1454 if (err < 0) { 1455 kfree(pcr->slots); 1456 return err; 1457 } 1458 1459 return 0; 1460 } 1461 1462 static int rtsx_pci_probe(struct pci_dev *pcidev, 1463 const struct pci_device_id *id) 1464 { 1465 struct rtsx_pcr *pcr; 1466 struct pcr_handle *handle; 1467 u32 base, len; 1468 int ret, i, bar = 0; 1469 1470 dev_dbg(&(pcidev->dev), 1471 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n", 1472 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, 1473 (int)pcidev->revision); 1474 1475 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 1476 if (ret < 0) 1477 return ret; 1478 1479 ret = pci_enable_device(pcidev); 1480 if (ret) 1481 return ret; 1482 1483 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI); 1484 if (ret) 1485 goto disable; 1486 1487 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL); 1488 if (!pcr) { 1489 ret = -ENOMEM; 1490 goto release_pci; 1491 } 1492 1493 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1494 if (!handle) { 1495 ret = -ENOMEM; 1496 goto free_pcr; 1497 } 1498 handle->pcr = pcr; 1499 1500 idr_preload(GFP_KERNEL); 1501 spin_lock(&rtsx_pci_lock); 1502 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT); 1503 if (ret >= 0) 1504 pcr->id = ret; 1505 spin_unlock(&rtsx_pci_lock); 1506 idr_preload_end(); 1507 if (ret < 0) 1508 goto free_handle; 1509 1510 pcr->pci = pcidev; 1511 dev_set_drvdata(&pcidev->dev, handle); 1512 1513 if (CHK_PCI_PID(pcr, 0x525A)) 1514 bar = 1; 1515 len = pci_resource_len(pcidev, bar); 1516 base = pci_resource_start(pcidev, bar); 1517 pcr->remap_addr = ioremap(base, len); 1518 if (!pcr->remap_addr) { 1519 ret = -ENOMEM; 1520 goto free_handle; 1521 } 1522 1523 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev), 1524 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr), 1525 GFP_KERNEL); 1526 if (pcr->rtsx_resv_buf == NULL) { 1527 ret = -ENXIO; 1528 goto unmap; 1529 } 1530 pcr->host_cmds_ptr = pcr->rtsx_resv_buf; 1531 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr; 1532 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN; 1533 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; 1534 1535 pcr->card_inserted = 0; 1536 pcr->card_removed = 0; 1537 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect); 1538 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work); 1539 1540 pcr->msi_en = msi_en; 1541 if (pcr->msi_en) { 1542 ret = pci_enable_msi(pcidev); 1543 if (ret) 1544 pcr->msi_en = false; 1545 } 1546 1547 ret = rtsx_pci_acquire_irq(pcr); 1548 if (ret < 0) 1549 goto disable_msi; 1550 1551 pci_set_master(pcidev); 1552 synchronize_irq(pcr->irq); 1553 1554 ret = rtsx_pci_init_chip(pcr); 1555 if (ret < 0) 1556 goto disable_irq; 1557 1558 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) { 1559 rtsx_pcr_cells[i].platform_data = handle; 1560 rtsx_pcr_cells[i].pdata_size = sizeof(*handle); 1561 } 1562 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, 1563 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); 1564 if (ret < 0) 1565 goto disable_irq; 1566 1567 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); 1568 1569 return 0; 1570 1571 disable_irq: 1572 free_irq(pcr->irq, (void *)pcr); 1573 disable_msi: 1574 if (pcr->msi_en) 1575 pci_disable_msi(pcr->pci); 1576 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, 1577 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); 1578 unmap: 1579 iounmap(pcr->remap_addr); 1580 free_handle: 1581 kfree(handle); 1582 free_pcr: 1583 kfree(pcr); 1584 release_pci: 1585 pci_release_regions(pcidev); 1586 disable: 1587 pci_disable_device(pcidev); 1588 1589 return ret; 1590 } 1591 1592 static void rtsx_pci_remove(struct pci_dev *pcidev) 1593 { 1594 struct pcr_handle *handle = pci_get_drvdata(pcidev); 1595 struct rtsx_pcr *pcr = handle->pcr; 1596 1597 pcr->remove_pci = true; 1598 1599 /* Disable interrupts at the pcr level */ 1600 spin_lock_irq(&pcr->lock); 1601 rtsx_pci_writel(pcr, RTSX_BIER, 0); 1602 pcr->bier = 0; 1603 spin_unlock_irq(&pcr->lock); 1604 1605 cancel_delayed_work_sync(&pcr->carddet_work); 1606 cancel_delayed_work_sync(&pcr->idle_work); 1607 1608 mfd_remove_devices(&pcidev->dev); 1609 1610 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, 1611 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); 1612 free_irq(pcr->irq, (void *)pcr); 1613 if (pcr->msi_en) 1614 pci_disable_msi(pcr->pci); 1615 iounmap(pcr->remap_addr); 1616 1617 pci_release_regions(pcidev); 1618 pci_disable_device(pcidev); 1619 1620 spin_lock(&rtsx_pci_lock); 1621 idr_remove(&rtsx_pci_idr, pcr->id); 1622 spin_unlock(&rtsx_pci_lock); 1623 1624 kfree(pcr->slots); 1625 kfree(pcr); 1626 kfree(handle); 1627 1628 dev_dbg(&(pcidev->dev), 1629 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n", 1630 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device); 1631 } 1632 1633 static int __maybe_unused rtsx_pci_suspend(struct device *dev_d) 1634 { 1635 struct pci_dev *pcidev = to_pci_dev(dev_d); 1636 struct pcr_handle *handle; 1637 struct rtsx_pcr *pcr; 1638 1639 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1640 1641 handle = pci_get_drvdata(pcidev); 1642 pcr = handle->pcr; 1643 1644 cancel_delayed_work(&pcr->carddet_work); 1645 cancel_delayed_work(&pcr->idle_work); 1646 1647 mutex_lock(&pcr->pcr_mutex); 1648 1649 rtsx_pci_power_off(pcr, HOST_ENTER_S3); 1650 1651 device_wakeup_disable(dev_d); 1652 1653 mutex_unlock(&pcr->pcr_mutex); 1654 return 0; 1655 } 1656 1657 static int __maybe_unused rtsx_pci_resume(struct device *dev_d) 1658 { 1659 struct pci_dev *pcidev = to_pci_dev(dev_d); 1660 struct pcr_handle *handle; 1661 struct rtsx_pcr *pcr; 1662 int ret = 0; 1663 1664 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1665 1666 handle = pci_get_drvdata(pcidev); 1667 pcr = handle->pcr; 1668 1669 mutex_lock(&pcr->pcr_mutex); 1670 1671 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); 1672 if (ret) 1673 goto out; 1674 1675 ret = rtsx_pci_init_hw(pcr); 1676 if (ret) 1677 goto out; 1678 1679 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); 1680 1681 out: 1682 mutex_unlock(&pcr->pcr_mutex); 1683 return ret; 1684 } 1685 1686 #ifdef CONFIG_PM 1687 1688 static void rtsx_pci_shutdown(struct pci_dev *pcidev) 1689 { 1690 struct pcr_handle *handle; 1691 struct rtsx_pcr *pcr; 1692 1693 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1694 1695 handle = pci_get_drvdata(pcidev); 1696 pcr = handle->pcr; 1697 rtsx_pci_power_off(pcr, HOST_ENTER_S1); 1698 1699 pci_disable_device(pcidev); 1700 free_irq(pcr->irq, (void *)pcr); 1701 if (pcr->msi_en) 1702 pci_disable_msi(pcr->pci); 1703 } 1704 1705 #else /* CONFIG_PM */ 1706 1707 #define rtsx_pci_shutdown NULL 1708 1709 #endif /* CONFIG_PM */ 1710 1711 static SIMPLE_DEV_PM_OPS(rtsx_pci_pm_ops, rtsx_pci_suspend, rtsx_pci_resume); 1712 1713 static struct pci_driver rtsx_pci_driver = { 1714 .name = DRV_NAME_RTSX_PCI, 1715 .id_table = rtsx_pci_ids, 1716 .probe = rtsx_pci_probe, 1717 .remove = rtsx_pci_remove, 1718 .driver.pm = &rtsx_pci_pm_ops, 1719 .shutdown = rtsx_pci_shutdown, 1720 }; 1721 module_pci_driver(rtsx_pci_driver); 1722 1723 MODULE_LICENSE("GPL"); 1724 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); 1725 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver"); 1726