1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Driver for Realtek PCI-Express card reader 3 * 4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. 5 * 6 * Author: 7 * Wei WANG <wei_wang@realsil.com.cn> 8 */ 9 10 #include <linux/pci.h> 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/highmem.h> 15 #include <linux/interrupt.h> 16 #include <linux/delay.h> 17 #include <linux/idr.h> 18 #include <linux/platform_device.h> 19 #include <linux/mfd/core.h> 20 #include <linux/rtsx_pci.h> 21 #include <linux/mmc/card.h> 22 #include <asm/unaligned.h> 23 #include <linux/pm.h> 24 #include <linux/pm_runtime.h> 25 26 #include "rtsx_pcr.h" 27 #include "rts5261.h" 28 #include "rts5228.h" 29 30 static bool msi_en = true; 31 module_param(msi_en, bool, S_IRUGO | S_IWUSR); 32 MODULE_PARM_DESC(msi_en, "Enable MSI"); 33 34 static DEFINE_IDR(rtsx_pci_idr); 35 static DEFINE_SPINLOCK(rtsx_pci_lock); 36 37 static struct mfd_cell rtsx_pcr_cells[] = { 38 [RTSX_SD_CARD] = { 39 .name = DRV_NAME_RTSX_PCI_SDMMC, 40 }, 41 }; 42 43 static const struct pci_device_id rtsx_pci_ids[] = { 44 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 45 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 46 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 47 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 48 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 49 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 50 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 51 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 52 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 53 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 54 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 55 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 56 { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 57 { 0, } 58 }; 59 60 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids); 61 62 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr) 63 { 64 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, 65 PCI_EXP_LNKCTL_ASPMC, 0); 66 } 67 68 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) 69 { 70 rtsx_pci_write_register(pcr, MSGTXDATA0, 71 MASK_8_BIT_DEF, (u8) (latency & 0xFF)); 72 rtsx_pci_write_register(pcr, MSGTXDATA1, 73 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF)); 74 rtsx_pci_write_register(pcr, MSGTXDATA2, 75 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF)); 76 rtsx_pci_write_register(pcr, MSGTXDATA3, 77 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF)); 78 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK | 79 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW); 80 81 return 0; 82 } 83 84 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) 85 { 86 return rtsx_comm_set_ltr_latency(pcr, latency); 87 } 88 89 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable) 90 { 91 if (pcr->aspm_enabled == enable) 92 return; 93 94 if (pcr->aspm_en & 0x02) 95 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 | 96 FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1); 97 else 98 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 | 99 FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1); 100 101 if (!enable && (pcr->aspm_en & 0x02)) 102 mdelay(10); 103 104 pcr->aspm_enabled = enable; 105 } 106 107 static void rtsx_disable_aspm(struct rtsx_pcr *pcr) 108 { 109 if (pcr->ops->set_aspm) 110 pcr->ops->set_aspm(pcr, false); 111 else 112 rtsx_comm_set_aspm(pcr, false); 113 } 114 115 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val) 116 { 117 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val); 118 119 return 0; 120 } 121 122 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active) 123 { 124 if (pcr->ops->set_l1off_cfg_sub_d0) 125 pcr->ops->set_l1off_cfg_sub_d0(pcr, active); 126 } 127 128 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) 129 { 130 struct rtsx_cr_option *option = &pcr->option; 131 132 rtsx_disable_aspm(pcr); 133 134 /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */ 135 msleep(1); 136 137 if (option->ltr_enabled) 138 rtsx_set_ltr_latency(pcr, option->ltr_active_latency); 139 140 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) 141 rtsx_set_l1off_sub_cfg_d0(pcr, 1); 142 } 143 144 static void rtsx_pm_full_on(struct rtsx_pcr *pcr) 145 { 146 rtsx_comm_pm_full_on(pcr); 147 } 148 149 void rtsx_pci_start_run(struct rtsx_pcr *pcr) 150 { 151 /* If pci device removed, don't queue idle work any more */ 152 if (pcr->remove_pci) 153 return; 154 155 if (pcr->rtd3_en) 156 if (pcr->is_runtime_suspended) { 157 pm_runtime_get(&(pcr->pci->dev)); 158 pcr->is_runtime_suspended = false; 159 } 160 161 if (pcr->state != PDEV_STAT_RUN) { 162 pcr->state = PDEV_STAT_RUN; 163 if (pcr->ops->enable_auto_blink) 164 pcr->ops->enable_auto_blink(pcr); 165 rtsx_pm_full_on(pcr); 166 } 167 168 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200)); 169 } 170 EXPORT_SYMBOL_GPL(rtsx_pci_start_run); 171 172 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data) 173 { 174 int i; 175 u32 val = HAIMR_WRITE_START; 176 177 val |= (u32)(addr & 0x3FFF) << 16; 178 val |= (u32)mask << 8; 179 val |= (u32)data; 180 181 rtsx_pci_writel(pcr, RTSX_HAIMR, val); 182 183 for (i = 0; i < MAX_RW_REG_CNT; i++) { 184 val = rtsx_pci_readl(pcr, RTSX_HAIMR); 185 if ((val & HAIMR_TRANS_END) == 0) { 186 if (data != (u8)val) 187 return -EIO; 188 return 0; 189 } 190 } 191 192 return -ETIMEDOUT; 193 } 194 EXPORT_SYMBOL_GPL(rtsx_pci_write_register); 195 196 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data) 197 { 198 u32 val = HAIMR_READ_START; 199 int i; 200 201 val |= (u32)(addr & 0x3FFF) << 16; 202 rtsx_pci_writel(pcr, RTSX_HAIMR, val); 203 204 for (i = 0; i < MAX_RW_REG_CNT; i++) { 205 val = rtsx_pci_readl(pcr, RTSX_HAIMR); 206 if ((val & HAIMR_TRANS_END) == 0) 207 break; 208 } 209 210 if (i >= MAX_RW_REG_CNT) 211 return -ETIMEDOUT; 212 213 if (data) 214 *data = (u8)(val & 0xFF); 215 216 return 0; 217 } 218 EXPORT_SYMBOL_GPL(rtsx_pci_read_register); 219 220 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) 221 { 222 int err, i, finished = 0; 223 u8 tmp; 224 225 rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val); 226 rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8)); 227 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr); 228 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81); 229 230 for (i = 0; i < 100000; i++) { 231 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); 232 if (err < 0) 233 return err; 234 235 if (!(tmp & 0x80)) { 236 finished = 1; 237 break; 238 } 239 } 240 241 if (!finished) 242 return -ETIMEDOUT; 243 244 return 0; 245 } 246 247 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) 248 { 249 if (pcr->ops->write_phy) 250 return pcr->ops->write_phy(pcr, addr, val); 251 252 return __rtsx_pci_write_phy_register(pcr, addr, val); 253 } 254 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register); 255 256 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) 257 { 258 int err, i, finished = 0; 259 u16 data; 260 u8 tmp, val1, val2; 261 262 rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr); 263 rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80); 264 265 for (i = 0; i < 100000; i++) { 266 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); 267 if (err < 0) 268 return err; 269 270 if (!(tmp & 0x80)) { 271 finished = 1; 272 break; 273 } 274 } 275 276 if (!finished) 277 return -ETIMEDOUT; 278 279 rtsx_pci_read_register(pcr, PHYDATA0, &val1); 280 rtsx_pci_read_register(pcr, PHYDATA1, &val2); 281 data = val1 | (val2 << 8); 282 283 if (val) 284 *val = data; 285 286 return 0; 287 } 288 289 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) 290 { 291 if (pcr->ops->read_phy) 292 return pcr->ops->read_phy(pcr, addr, val); 293 294 return __rtsx_pci_read_phy_register(pcr, addr, val); 295 } 296 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register); 297 298 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr) 299 { 300 if (pcr->ops->stop_cmd) 301 return pcr->ops->stop_cmd(pcr); 302 303 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); 304 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); 305 306 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80); 307 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80); 308 } 309 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd); 310 311 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr, 312 u8 cmd_type, u16 reg_addr, u8 mask, u8 data) 313 { 314 unsigned long flags; 315 u32 val = 0; 316 u32 *ptr = (u32 *)(pcr->host_cmds_ptr); 317 318 val |= (u32)(cmd_type & 0x03) << 30; 319 val |= (u32)(reg_addr & 0x3FFF) << 16; 320 val |= (u32)mask << 8; 321 val |= (u32)data; 322 323 spin_lock_irqsave(&pcr->lock, flags); 324 ptr += pcr->ci; 325 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) { 326 put_unaligned_le32(val, ptr); 327 ptr++; 328 pcr->ci++; 329 } 330 spin_unlock_irqrestore(&pcr->lock, flags); 331 } 332 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd); 333 334 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr) 335 { 336 u32 val = 1 << 31; 337 338 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 339 340 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; 341 /* Hardware Auto Response */ 342 val |= 0x40000000; 343 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); 344 } 345 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait); 346 347 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout) 348 { 349 struct completion trans_done; 350 u32 val = 1 << 31; 351 long timeleft; 352 unsigned long flags; 353 int err = 0; 354 355 spin_lock_irqsave(&pcr->lock, flags); 356 357 /* set up data structures for the wakeup system */ 358 pcr->done = &trans_done; 359 pcr->trans_result = TRANS_NOT_READY; 360 init_completion(&trans_done); 361 362 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 363 364 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; 365 /* Hardware Auto Response */ 366 val |= 0x40000000; 367 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); 368 369 spin_unlock_irqrestore(&pcr->lock, flags); 370 371 /* Wait for TRANS_OK_INT */ 372 timeleft = wait_for_completion_interruptible_timeout( 373 &trans_done, msecs_to_jiffies(timeout)); 374 if (timeleft <= 0) { 375 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); 376 err = -ETIMEDOUT; 377 goto finish_send_cmd; 378 } 379 380 spin_lock_irqsave(&pcr->lock, flags); 381 if (pcr->trans_result == TRANS_RESULT_FAIL) 382 err = -EINVAL; 383 else if (pcr->trans_result == TRANS_RESULT_OK) 384 err = 0; 385 else if (pcr->trans_result == TRANS_NO_DEVICE) 386 err = -ENODEV; 387 spin_unlock_irqrestore(&pcr->lock, flags); 388 389 finish_send_cmd: 390 spin_lock_irqsave(&pcr->lock, flags); 391 pcr->done = NULL; 392 spin_unlock_irqrestore(&pcr->lock, flags); 393 394 if ((err < 0) && (err != -ENODEV)) 395 rtsx_pci_stop_cmd(pcr); 396 397 if (pcr->finish_me) 398 complete(pcr->finish_me); 399 400 return err; 401 } 402 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd); 403 404 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, 405 dma_addr_t addr, unsigned int len, int end) 406 { 407 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi; 408 u64 val; 409 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA; 410 411 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len); 412 413 if (end) 414 option |= RTSX_SG_END; 415 416 if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) { 417 if (len > 0xFFFF) 418 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16) 419 | (((u64)len >> 16) << 6) | option; 420 else 421 val = ((u64)addr << 32) | ((u64)len << 16) | option; 422 } else { 423 val = ((u64)addr << 32) | ((u64)len << 12) | option; 424 } 425 put_unaligned_le64(val, ptr); 426 pcr->sgi++; 427 } 428 429 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, 430 int num_sg, bool read, int timeout) 431 { 432 int err = 0, count; 433 434 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg); 435 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); 436 if (count < 1) 437 return -EINVAL; 438 pcr_dbg(pcr, "DMA mapping count: %d\n", count); 439 440 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout); 441 442 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); 443 444 return err; 445 } 446 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); 447 448 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 449 int num_sg, bool read) 450 { 451 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 452 453 if (pcr->remove_pci) 454 return -EINVAL; 455 456 if ((sglist == NULL) || (num_sg <= 0)) 457 return -EINVAL; 458 459 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir); 460 } 461 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg); 462 463 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 464 int num_sg, bool read) 465 { 466 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 467 468 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir); 469 } 470 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg); 471 472 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, 473 int count, bool read, int timeout) 474 { 475 struct completion trans_done; 476 struct scatterlist *sg; 477 dma_addr_t addr; 478 long timeleft; 479 unsigned long flags; 480 unsigned int len; 481 int i, err = 0; 482 u32 val; 483 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE; 484 485 if (pcr->remove_pci) 486 return -ENODEV; 487 488 if ((sglist == NULL) || (count < 1)) 489 return -EINVAL; 490 491 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; 492 pcr->sgi = 0; 493 for_each_sg(sglist, sg, count, i) { 494 addr = sg_dma_address(sg); 495 len = sg_dma_len(sg); 496 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1); 497 } 498 499 spin_lock_irqsave(&pcr->lock, flags); 500 501 pcr->done = &trans_done; 502 pcr->trans_result = TRANS_NOT_READY; 503 init_completion(&trans_done); 504 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); 505 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); 506 507 spin_unlock_irqrestore(&pcr->lock, flags); 508 509 timeleft = wait_for_completion_interruptible_timeout( 510 &trans_done, msecs_to_jiffies(timeout)); 511 if (timeleft <= 0) { 512 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); 513 err = -ETIMEDOUT; 514 goto out; 515 } 516 517 spin_lock_irqsave(&pcr->lock, flags); 518 if (pcr->trans_result == TRANS_RESULT_FAIL) { 519 err = -EILSEQ; 520 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION) 521 pcr->dma_error_count++; 522 } 523 524 else if (pcr->trans_result == TRANS_NO_DEVICE) 525 err = -ENODEV; 526 spin_unlock_irqrestore(&pcr->lock, flags); 527 528 out: 529 spin_lock_irqsave(&pcr->lock, flags); 530 pcr->done = NULL; 531 spin_unlock_irqrestore(&pcr->lock, flags); 532 533 if ((err < 0) && (err != -ENODEV)) 534 rtsx_pci_stop_cmd(pcr); 535 536 if (pcr->finish_me) 537 complete(pcr->finish_me); 538 539 return err; 540 } 541 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer); 542 543 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 544 { 545 int err; 546 int i, j; 547 u16 reg; 548 u8 *ptr; 549 550 if (buf_len > 512) 551 buf_len = 512; 552 553 ptr = buf; 554 reg = PPBUF_BASE2; 555 for (i = 0; i < buf_len / 256; i++) { 556 rtsx_pci_init_cmd(pcr); 557 558 for (j = 0; j < 256; j++) 559 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); 560 561 err = rtsx_pci_send_cmd(pcr, 250); 562 if (err < 0) 563 return err; 564 565 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256); 566 ptr += 256; 567 } 568 569 if (buf_len % 256) { 570 rtsx_pci_init_cmd(pcr); 571 572 for (j = 0; j < buf_len % 256; j++) 573 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); 574 575 err = rtsx_pci_send_cmd(pcr, 250); 576 if (err < 0) 577 return err; 578 } 579 580 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256); 581 582 return 0; 583 } 584 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf); 585 586 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 587 { 588 int err; 589 int i, j; 590 u16 reg; 591 u8 *ptr; 592 593 if (buf_len > 512) 594 buf_len = 512; 595 596 ptr = buf; 597 reg = PPBUF_BASE2; 598 for (i = 0; i < buf_len / 256; i++) { 599 rtsx_pci_init_cmd(pcr); 600 601 for (j = 0; j < 256; j++) { 602 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 603 reg++, 0xFF, *ptr); 604 ptr++; 605 } 606 607 err = rtsx_pci_send_cmd(pcr, 250); 608 if (err < 0) 609 return err; 610 } 611 612 if (buf_len % 256) { 613 rtsx_pci_init_cmd(pcr); 614 615 for (j = 0; j < buf_len % 256; j++) { 616 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 617 reg++, 0xFF, *ptr); 618 ptr++; 619 } 620 621 err = rtsx_pci_send_cmd(pcr, 250); 622 if (err < 0) 623 return err; 624 } 625 626 return 0; 627 } 628 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf); 629 630 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) 631 { 632 rtsx_pci_init_cmd(pcr); 633 634 while (*tbl & 0xFFFF0000) { 635 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 636 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl)); 637 tbl++; 638 } 639 640 return rtsx_pci_send_cmd(pcr, 100); 641 } 642 643 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card) 644 { 645 const u32 *tbl; 646 647 if (card == RTSX_SD_CARD) 648 tbl = pcr->sd_pull_ctl_enable_tbl; 649 else if (card == RTSX_MS_CARD) 650 tbl = pcr->ms_pull_ctl_enable_tbl; 651 else 652 return -EINVAL; 653 654 return rtsx_pci_set_pull_ctl(pcr, tbl); 655 } 656 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable); 657 658 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card) 659 { 660 const u32 *tbl; 661 662 if (card == RTSX_SD_CARD) 663 tbl = pcr->sd_pull_ctl_disable_tbl; 664 else if (card == RTSX_MS_CARD) 665 tbl = pcr->ms_pull_ctl_disable_tbl; 666 else 667 return -EINVAL; 668 669 return rtsx_pci_set_pull_ctl(pcr, tbl); 670 } 671 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable); 672 673 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr) 674 { 675 struct rtsx_hw_param *hw_param = &pcr->hw_param; 676 677 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN 678 | hw_param->interrupt_en; 679 680 if (pcr->num_slots > 1) 681 pcr->bier |= MS_INT_EN; 682 683 /* Enable Bus Interrupt */ 684 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier); 685 686 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier); 687 } 688 689 static inline u8 double_ssc_depth(u8 depth) 690 { 691 return ((depth > 1) ? (depth - 1) : depth); 692 } 693 694 static u8 revise_ssc_depth(u8 ssc_depth, u8 div) 695 { 696 if (div > CLK_DIV_1) { 697 if (ssc_depth > (div - 1)) 698 ssc_depth -= (div - 1); 699 else 700 ssc_depth = SSC_DEPTH_4M; 701 } 702 703 return ssc_depth; 704 } 705 706 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, 707 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) 708 { 709 int err, clk; 710 u8 n, clk_divider, mcu_cnt, div; 711 static const u8 depth[] = { 712 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M, 713 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M, 714 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M, 715 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K, 716 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K, 717 }; 718 719 if (PCI_PID(pcr) == PID_5261) 720 return rts5261_pci_switch_clock(pcr, card_clock, 721 ssc_depth, initial_mode, double_clk, vpclk); 722 if (PCI_PID(pcr) == PID_5228) 723 return rts5228_pci_switch_clock(pcr, card_clock, 724 ssc_depth, initial_mode, double_clk, vpclk); 725 726 if (initial_mode) { 727 /* We use 250k(around) here, in initial stage */ 728 clk_divider = SD_CLK_DIVIDE_128; 729 card_clock = 30000000; 730 } else { 731 clk_divider = SD_CLK_DIVIDE_0; 732 } 733 err = rtsx_pci_write_register(pcr, SD_CFG1, 734 SD_CLK_DIVIDE_MASK, clk_divider); 735 if (err < 0) 736 return err; 737 738 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */ 739 if (card_clock == UHS_SDR104_MAX_DTR && 740 pcr->dma_error_count && 741 PCI_PID(pcr) == RTS5227_DEVICE_ID) 742 card_clock = UHS_SDR104_MAX_DTR - 743 (pcr->dma_error_count * 20000000); 744 745 card_clock /= 1000000; 746 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); 747 748 clk = card_clock; 749 if (!initial_mode && double_clk) 750 clk = card_clock * 2; 751 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", 752 clk, pcr->cur_clock); 753 754 if (clk == pcr->cur_clock) 755 return 0; 756 757 if (pcr->ops->conv_clk_and_div_n) 758 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); 759 else 760 n = (u8)(clk - 2); 761 if ((clk <= 2) || (n > MAX_DIV_N_PCR)) 762 return -EINVAL; 763 764 mcu_cnt = (u8)(125/clk + 3); 765 if (mcu_cnt > 15) 766 mcu_cnt = 15; 767 768 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */ 769 div = CLK_DIV_1; 770 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) { 771 if (pcr->ops->conv_clk_and_div_n) { 772 int dbl_clk = pcr->ops->conv_clk_and_div_n(n, 773 DIV_N_TO_CLK) * 2; 774 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk, 775 CLK_TO_DIV_N); 776 } else { 777 n = (n + 2) * 2 - 2; 778 } 779 div++; 780 } 781 pcr_dbg(pcr, "n = %d, div = %d\n", n, div); 782 783 ssc_depth = depth[ssc_depth]; 784 if (double_clk) 785 ssc_depth = double_ssc_depth(ssc_depth); 786 787 ssc_depth = revise_ssc_depth(ssc_depth, div); 788 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); 789 790 rtsx_pci_init_cmd(pcr); 791 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, 792 CLK_LOW_FREQ, CLK_LOW_FREQ); 793 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 794 0xFF, (div << 4) | mcu_cnt); 795 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); 796 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 797 SSC_DEPTH_MASK, ssc_depth); 798 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); 799 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); 800 if (vpclk) { 801 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, 802 PHASE_NOT_RESET, 0); 803 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, 804 PHASE_NOT_RESET, PHASE_NOT_RESET); 805 } 806 807 err = rtsx_pci_send_cmd(pcr, 2000); 808 if (err < 0) 809 return err; 810 811 /* Wait SSC clock stable */ 812 udelay(SSC_CLOCK_STABLE_WAIT); 813 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); 814 if (err < 0) 815 return err; 816 817 pcr->cur_clock = clk; 818 return 0; 819 } 820 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock); 821 822 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card) 823 { 824 if (pcr->ops->card_power_on) 825 return pcr->ops->card_power_on(pcr, card); 826 827 return 0; 828 } 829 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on); 830 831 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card) 832 { 833 if (pcr->ops->card_power_off) 834 return pcr->ops->card_power_off(pcr, card); 835 836 return 0; 837 } 838 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); 839 840 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card) 841 { 842 static const unsigned int cd_mask[] = { 843 [RTSX_SD_CARD] = SD_EXIST, 844 [RTSX_MS_CARD] = MS_EXIST 845 }; 846 847 if (!(pcr->flags & PCR_MS_PMOS)) { 848 /* When using single PMOS, accessing card is not permitted 849 * if the existing card is not the designated one. 850 */ 851 if (pcr->card_exist & (~cd_mask[card])) 852 return -EIO; 853 } 854 855 return 0; 856 } 857 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check); 858 859 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 860 { 861 if (pcr->ops->switch_output_voltage) 862 return pcr->ops->switch_output_voltage(pcr, voltage); 863 864 return 0; 865 } 866 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage); 867 868 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr) 869 { 870 unsigned int val; 871 872 val = rtsx_pci_readl(pcr, RTSX_BIPR); 873 if (pcr->ops->cd_deglitch) 874 val = pcr->ops->cd_deglitch(pcr); 875 876 return val; 877 } 878 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist); 879 880 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr) 881 { 882 struct completion finish; 883 884 pcr->finish_me = &finish; 885 init_completion(&finish); 886 887 if (pcr->done) 888 complete(pcr->done); 889 890 if (!pcr->remove_pci) 891 rtsx_pci_stop_cmd(pcr); 892 893 wait_for_completion_interruptible_timeout(&finish, 894 msecs_to_jiffies(2)); 895 pcr->finish_me = NULL; 896 } 897 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer); 898 899 static void rtsx_pci_card_detect(struct work_struct *work) 900 { 901 struct delayed_work *dwork; 902 struct rtsx_pcr *pcr; 903 unsigned long flags; 904 unsigned int card_detect = 0, card_inserted, card_removed; 905 u32 irq_status; 906 907 dwork = to_delayed_work(work); 908 pcr = container_of(dwork, struct rtsx_pcr, carddet_work); 909 910 pcr_dbg(pcr, "--> %s\n", __func__); 911 912 mutex_lock(&pcr->pcr_mutex); 913 spin_lock_irqsave(&pcr->lock, flags); 914 915 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR); 916 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status); 917 918 irq_status &= CARD_EXIST; 919 card_inserted = pcr->card_inserted & irq_status; 920 card_removed = pcr->card_removed; 921 pcr->card_inserted = 0; 922 pcr->card_removed = 0; 923 924 spin_unlock_irqrestore(&pcr->lock, flags); 925 926 if (card_inserted || card_removed) { 927 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n", 928 card_inserted, card_removed); 929 930 if (pcr->ops->cd_deglitch) 931 card_inserted = pcr->ops->cd_deglitch(pcr); 932 933 card_detect = card_inserted | card_removed; 934 935 pcr->card_exist |= card_inserted; 936 pcr->card_exist &= ~card_removed; 937 } 938 939 mutex_unlock(&pcr->pcr_mutex); 940 941 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event) 942 pcr->slots[RTSX_SD_CARD].card_event( 943 pcr->slots[RTSX_SD_CARD].p_dev); 944 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event) 945 pcr->slots[RTSX_MS_CARD].card_event( 946 pcr->slots[RTSX_MS_CARD].p_dev); 947 } 948 949 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) 950 { 951 if (pcr->ops->process_ocp) { 952 pcr->ops->process_ocp(pcr); 953 } else { 954 if (!pcr->option.ocp_en) 955 return; 956 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); 957 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { 958 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); 959 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); 960 rtsx_pci_clear_ocpstat(pcr); 961 pcr->ocp_stat = 0; 962 } 963 } 964 } 965 966 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) 967 { 968 if (pcr->option.ocp_en) 969 rtsx_pci_process_ocp(pcr); 970 971 return 0; 972 } 973 974 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) 975 { 976 struct rtsx_pcr *pcr = dev_id; 977 u32 int_reg; 978 979 if (!pcr) 980 return IRQ_NONE; 981 982 spin_lock(&pcr->lock); 983 984 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); 985 /* Clear interrupt flag */ 986 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); 987 if ((int_reg & pcr->bier) == 0) { 988 spin_unlock(&pcr->lock); 989 return IRQ_NONE; 990 } 991 if (int_reg == 0xFFFFFFFF) { 992 spin_unlock(&pcr->lock); 993 return IRQ_HANDLED; 994 } 995 996 int_reg &= (pcr->bier | 0x7FFFFF); 997 998 if (int_reg & SD_OC_INT) 999 rtsx_pci_process_ocp_interrupt(pcr); 1000 1001 if (int_reg & SD_INT) { 1002 if (int_reg & SD_EXIST) { 1003 pcr->card_inserted |= SD_EXIST; 1004 } else { 1005 pcr->card_removed |= SD_EXIST; 1006 pcr->card_inserted &= ~SD_EXIST; 1007 if (PCI_PID(pcr) == PID_5261) { 1008 rtsx_pci_write_register(pcr, RTS5261_FW_STATUS, 1009 RTS5261_EXPRESS_LINK_FAIL_MASK, 0); 1010 pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS; 1011 } 1012 } 1013 pcr->dma_error_count = 0; 1014 } 1015 1016 if (int_reg & MS_INT) { 1017 if (int_reg & MS_EXIST) { 1018 pcr->card_inserted |= MS_EXIST; 1019 } else { 1020 pcr->card_removed |= MS_EXIST; 1021 pcr->card_inserted &= ~MS_EXIST; 1022 } 1023 } 1024 1025 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { 1026 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { 1027 pcr->trans_result = TRANS_RESULT_FAIL; 1028 if (pcr->done) 1029 complete(pcr->done); 1030 } else if (int_reg & TRANS_OK_INT) { 1031 pcr->trans_result = TRANS_RESULT_OK; 1032 if (pcr->done) 1033 complete(pcr->done); 1034 } 1035 } 1036 1037 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT)) 1038 schedule_delayed_work(&pcr->carddet_work, 1039 msecs_to_jiffies(200)); 1040 1041 spin_unlock(&pcr->lock); 1042 return IRQ_HANDLED; 1043 } 1044 1045 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr) 1046 { 1047 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n", 1048 __func__, pcr->msi_en, pcr->pci->irq); 1049 1050 if (request_irq(pcr->pci->irq, rtsx_pci_isr, 1051 pcr->msi_en ? 0 : IRQF_SHARED, 1052 DRV_NAME_RTSX_PCI, pcr)) { 1053 dev_err(&(pcr->pci->dev), 1054 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n", 1055 pcr->pci->irq); 1056 return -1; 1057 } 1058 1059 pcr->irq = pcr->pci->irq; 1060 pci_intx(pcr->pci, !pcr->msi_en); 1061 1062 return 0; 1063 } 1064 1065 static void rtsx_enable_aspm(struct rtsx_pcr *pcr) 1066 { 1067 if (pcr->ops->set_aspm) 1068 pcr->ops->set_aspm(pcr, true); 1069 else 1070 rtsx_comm_set_aspm(pcr, true); 1071 } 1072 1073 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr) 1074 { 1075 struct rtsx_cr_option *option = &pcr->option; 1076 1077 if (option->ltr_enabled) { 1078 u32 latency = option->ltr_l1off_latency; 1079 1080 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN)) 1081 mdelay(option->l1_snooze_delay); 1082 1083 rtsx_set_ltr_latency(pcr, latency); 1084 } 1085 1086 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) 1087 rtsx_set_l1off_sub_cfg_d0(pcr, 0); 1088 1089 rtsx_enable_aspm(pcr); 1090 } 1091 1092 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) 1093 { 1094 rtsx_comm_pm_power_saving(pcr); 1095 } 1096 1097 static void rtsx_pci_rtd3_work(struct work_struct *work) 1098 { 1099 struct delayed_work *dwork = to_delayed_work(work); 1100 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, rtd3_work); 1101 1102 pcr_dbg(pcr, "--> %s\n", __func__); 1103 if (!pcr->is_runtime_suspended) 1104 pm_runtime_put(&(pcr->pci->dev)); 1105 } 1106 1107 static void rtsx_pci_idle_work(struct work_struct *work) 1108 { 1109 struct delayed_work *dwork = to_delayed_work(work); 1110 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work); 1111 1112 pcr_dbg(pcr, "--> %s\n", __func__); 1113 1114 mutex_lock(&pcr->pcr_mutex); 1115 1116 pcr->state = PDEV_STAT_IDLE; 1117 1118 if (pcr->ops->disable_auto_blink) 1119 pcr->ops->disable_auto_blink(pcr); 1120 if (pcr->ops->turn_off_led) 1121 pcr->ops->turn_off_led(pcr); 1122 1123 rtsx_pm_power_saving(pcr); 1124 1125 mutex_unlock(&pcr->pcr_mutex); 1126 1127 if (pcr->rtd3_en) 1128 mod_delayed_work(system_wq, &pcr->rtd3_work, msecs_to_jiffies(10000)); 1129 } 1130 1131 static void rtsx_base_force_power_down(struct rtsx_pcr *pcr, u8 pm_state) 1132 { 1133 /* Set relink_time to 0 */ 1134 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); 1135 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0); 1136 rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, 1137 RELINK_TIME_MASK, 0); 1138 1139 rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 1140 D3_DELINK_MODE_EN, D3_DELINK_MODE_EN); 1141 1142 rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN); 1143 } 1144 1145 static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) 1146 { 1147 if (pcr->ops->turn_off_led) 1148 pcr->ops->turn_off_led(pcr); 1149 1150 rtsx_pci_writel(pcr, RTSX_BIER, 0); 1151 pcr->bier = 0; 1152 1153 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08); 1154 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state); 1155 1156 if (pcr->ops->force_power_down) 1157 pcr->ops->force_power_down(pcr, pm_state); 1158 else 1159 rtsx_base_force_power_down(pcr, pm_state); 1160 } 1161 1162 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr) 1163 { 1164 u8 val = SD_OCP_INT_EN | SD_DETECT_EN; 1165 1166 if (pcr->ops->enable_ocp) { 1167 pcr->ops->enable_ocp(pcr); 1168 } else { 1169 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); 1170 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); 1171 } 1172 1173 } 1174 1175 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr) 1176 { 1177 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN; 1178 1179 if (pcr->ops->disable_ocp) { 1180 pcr->ops->disable_ocp(pcr); 1181 } else { 1182 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1183 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 1184 OC_POWER_DOWN); 1185 } 1186 } 1187 1188 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr) 1189 { 1190 if (pcr->ops->init_ocp) { 1191 pcr->ops->init_ocp(pcr); 1192 } else { 1193 struct rtsx_cr_option *option = &(pcr->option); 1194 1195 if (option->ocp_en) { 1196 u8 val = option->sd_800mA_ocp_thd; 1197 1198 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); 1199 rtsx_pci_write_register(pcr, REG_OCPPARA1, 1200 SD_OCP_TIME_MASK, SD_OCP_TIME_800); 1201 rtsx_pci_write_register(pcr, REG_OCPPARA2, 1202 SD_OCP_THD_MASK, val); 1203 rtsx_pci_write_register(pcr, REG_OCPGLITCH, 1204 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch); 1205 rtsx_pci_enable_ocp(pcr); 1206 } 1207 } 1208 } 1209 1210 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val) 1211 { 1212 if (pcr->ops->get_ocpstat) 1213 return pcr->ops->get_ocpstat(pcr, val); 1214 else 1215 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val); 1216 } 1217 1218 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr) 1219 { 1220 if (pcr->ops->clear_ocpstat) { 1221 pcr->ops->clear_ocpstat(pcr); 1222 } else { 1223 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR; 1224 u8 val = SD_OCP_INT_CLR | SD_OC_CLR; 1225 1226 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); 1227 udelay(100); 1228 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1229 } 1230 } 1231 1232 void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr) 1233 { 1234 u16 val; 1235 1236 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { 1237 rtsx_pci_read_phy_register(pcr, 0x01, &val); 1238 val |= 1<<9; 1239 rtsx_pci_write_phy_register(pcr, 0x01, val); 1240 } 1241 rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32); 1242 rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05); 1243 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83); 1244 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE); 1245 1246 } 1247 1248 void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr) 1249 { 1250 u16 val; 1251 1252 if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { 1253 rtsx_pci_read_phy_register(pcr, 0x01, &val); 1254 val &= ~(1<<9); 1255 rtsx_pci_write_phy_register(pcr, 0x01, val); 1256 } 1257 rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03); 1258 rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00); 1259 1260 } 1261 1262 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr) 1263 { 1264 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | 1265 MS_CLK_EN | SD40_CLK_EN, 0); 1266 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); 1267 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); 1268 1269 msleep(50); 1270 1271 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); 1272 1273 return 0; 1274 } 1275 1276 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr) 1277 { 1278 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | 1279 MS_CLK_EN | SD40_CLK_EN, 0); 1280 1281 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD); 1282 1283 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0); 1284 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD); 1285 1286 return 0; 1287 } 1288 1289 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) 1290 { 1291 struct pci_dev *pdev = pcr->pci; 1292 int err; 1293 1294 if (PCI_PID(pcr) == PID_5228) 1295 rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK, 1296 RTS5228_LDO1_SR_0_5); 1297 1298 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 1299 1300 rtsx_pci_enable_bus_int(pcr); 1301 1302 /* Power on SSC */ 1303 if (PCI_PID(pcr) == PID_5261) { 1304 /* Gating real mcu clock */ 1305 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1, 1306 RTS5261_MCU_CLOCK_GATING, 0); 1307 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL, 1308 SSC_POWER_DOWN, 0); 1309 } else { 1310 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0); 1311 } 1312 if (err < 0) 1313 return err; 1314 1315 /* Wait SSC power stable */ 1316 udelay(200); 1317 1318 rtsx_disable_aspm(pcr); 1319 if (pcr->ops->optimize_phy) { 1320 err = pcr->ops->optimize_phy(pcr); 1321 if (err < 0) 1322 return err; 1323 } 1324 1325 rtsx_pci_init_cmd(pcr); 1326 1327 /* Set mcu_cnt to 7 to ensure data can be sampled properly */ 1328 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07); 1329 1330 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00); 1331 /* Disable card clock */ 1332 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0); 1333 /* Reset delink mode */ 1334 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0); 1335 /* Card driving select */ 1336 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL, 1337 0xFF, pcr->card_drive_sel); 1338 /* Enable SSC Clock */ 1339 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, 1340 0xFF, SSC_8X_EN | SSC_SEL_4M); 1341 if (PCI_PID(pcr) == PID_5261) 1342 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 1343 RTS5261_SSC_DEPTH_2M); 1344 else if (PCI_PID(pcr) == PID_5228) 1345 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 1346 RTS5228_SSC_DEPTH_2M); 1347 else 1348 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12); 1349 1350 /* Disable cd_pwr_save */ 1351 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10); 1352 /* Clear Link Ready Interrupt */ 1353 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, 1354 LINK_RDY_INT, LINK_RDY_INT); 1355 /* Enlarge the estimation window of PERST# glitch 1356 * to reduce the chance of invalid card interrupt 1357 */ 1358 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80); 1359 /* Update RC oscillator to 400k 1360 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1 1361 * 1: 2M 0: 400k 1362 */ 1363 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00); 1364 /* Set interrupt write clear 1365 * bit 1: U_elbi_if_rd_clr_en 1366 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear 1367 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear 1368 */ 1369 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0); 1370 1371 err = rtsx_pci_send_cmd(pcr, 100); 1372 if (err < 0) 1373 return err; 1374 1375 switch (PCI_PID(pcr)) { 1376 case PID_5250: 1377 case PID_524A: 1378 case PID_525A: 1379 case PID_5260: 1380 case PID_5261: 1381 case PID_5228: 1382 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1); 1383 break; 1384 default: 1385 break; 1386 } 1387 1388 /*init ocp*/ 1389 rtsx_pci_init_ocp(pcr); 1390 1391 /* Enable clk_request_n to enable clock power management */ 1392 pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, 1393 0, PCI_EXP_LNKCTL_CLKREQ_EN); 1394 /* Enter L1 when host tx idle */ 1395 pci_write_config_byte(pdev, 0x70F, 0x5B); 1396 1397 if (pcr->ops->extra_init_hw) { 1398 err = pcr->ops->extra_init_hw(pcr); 1399 if (err < 0) 1400 return err; 1401 } 1402 1403 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30); 1404 1405 /* No CD interrupt if probing driver with card inserted. 1406 * So we need to initialize pcr->card_exist here. 1407 */ 1408 if (pcr->ops->cd_deglitch) 1409 pcr->card_exist = pcr->ops->cd_deglitch(pcr); 1410 else 1411 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST; 1412 1413 return 0; 1414 } 1415 1416 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) 1417 { 1418 int err; 1419 1420 spin_lock_init(&pcr->lock); 1421 mutex_init(&pcr->pcr_mutex); 1422 1423 switch (PCI_PID(pcr)) { 1424 default: 1425 case 0x5209: 1426 rts5209_init_params(pcr); 1427 break; 1428 1429 case 0x5229: 1430 rts5229_init_params(pcr); 1431 break; 1432 1433 case 0x5289: 1434 rtl8411_init_params(pcr); 1435 break; 1436 1437 case 0x5227: 1438 rts5227_init_params(pcr); 1439 break; 1440 1441 case 0x522A: 1442 rts522a_init_params(pcr); 1443 break; 1444 1445 case 0x5249: 1446 rts5249_init_params(pcr); 1447 break; 1448 1449 case 0x524A: 1450 rts524a_init_params(pcr); 1451 break; 1452 1453 case 0x525A: 1454 rts525a_init_params(pcr); 1455 break; 1456 1457 case 0x5287: 1458 rtl8411b_init_params(pcr); 1459 break; 1460 1461 case 0x5286: 1462 rtl8402_init_params(pcr); 1463 break; 1464 1465 case 0x5260: 1466 rts5260_init_params(pcr); 1467 break; 1468 1469 case 0x5261: 1470 rts5261_init_params(pcr); 1471 break; 1472 1473 case 0x5228: 1474 rts5228_init_params(pcr); 1475 break; 1476 } 1477 1478 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n", 1479 PCI_PID(pcr), pcr->ic_version); 1480 1481 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot), 1482 GFP_KERNEL); 1483 if (!pcr->slots) 1484 return -ENOMEM; 1485 1486 if (pcr->ops->fetch_vendor_settings) 1487 pcr->ops->fetch_vendor_settings(pcr); 1488 1489 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en); 1490 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n", 1491 pcr->sd30_drive_sel_1v8); 1492 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n", 1493 pcr->sd30_drive_sel_3v3); 1494 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n", 1495 pcr->card_drive_sel); 1496 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags); 1497 1498 pcr->state = PDEV_STAT_IDLE; 1499 err = rtsx_pci_init_hw(pcr); 1500 if (err < 0) { 1501 kfree(pcr->slots); 1502 return err; 1503 } 1504 1505 return 0; 1506 } 1507 1508 static int rtsx_pci_probe(struct pci_dev *pcidev, 1509 const struct pci_device_id *id) 1510 { 1511 struct rtsx_pcr *pcr; 1512 struct pcr_handle *handle; 1513 u32 base, len; 1514 int ret, i, bar = 0; 1515 1516 dev_dbg(&(pcidev->dev), 1517 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n", 1518 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, 1519 (int)pcidev->revision); 1520 1521 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 1522 if (ret < 0) 1523 return ret; 1524 1525 ret = pci_enable_device(pcidev); 1526 if (ret) 1527 return ret; 1528 1529 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI); 1530 if (ret) 1531 goto disable; 1532 1533 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL); 1534 if (!pcr) { 1535 ret = -ENOMEM; 1536 goto release_pci; 1537 } 1538 1539 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1540 if (!handle) { 1541 ret = -ENOMEM; 1542 goto free_pcr; 1543 } 1544 handle->pcr = pcr; 1545 1546 idr_preload(GFP_KERNEL); 1547 spin_lock(&rtsx_pci_lock); 1548 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT); 1549 if (ret >= 0) 1550 pcr->id = ret; 1551 spin_unlock(&rtsx_pci_lock); 1552 idr_preload_end(); 1553 if (ret < 0) 1554 goto free_handle; 1555 1556 pcr->pci = pcidev; 1557 dev_set_drvdata(&pcidev->dev, handle); 1558 1559 if (CHK_PCI_PID(pcr, 0x525A)) 1560 bar = 1; 1561 len = pci_resource_len(pcidev, bar); 1562 base = pci_resource_start(pcidev, bar); 1563 pcr->remap_addr = ioremap(base, len); 1564 if (!pcr->remap_addr) { 1565 ret = -ENOMEM; 1566 goto free_handle; 1567 } 1568 1569 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev), 1570 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr), 1571 GFP_KERNEL); 1572 if (pcr->rtsx_resv_buf == NULL) { 1573 ret = -ENXIO; 1574 goto unmap; 1575 } 1576 pcr->host_cmds_ptr = pcr->rtsx_resv_buf; 1577 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr; 1578 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN; 1579 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; 1580 1581 pcr->card_inserted = 0; 1582 pcr->card_removed = 0; 1583 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect); 1584 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work); 1585 1586 pcr->msi_en = msi_en; 1587 if (pcr->msi_en) { 1588 ret = pci_enable_msi(pcidev); 1589 if (ret) 1590 pcr->msi_en = false; 1591 } 1592 1593 ret = rtsx_pci_acquire_irq(pcr); 1594 if (ret < 0) 1595 goto disable_msi; 1596 1597 pci_set_master(pcidev); 1598 synchronize_irq(pcr->irq); 1599 1600 ret = rtsx_pci_init_chip(pcr); 1601 if (ret < 0) 1602 goto disable_irq; 1603 1604 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) { 1605 rtsx_pcr_cells[i].platform_data = handle; 1606 rtsx_pcr_cells[i].pdata_size = sizeof(*handle); 1607 } 1608 1609 if (pcr->rtd3_en) { 1610 INIT_DELAYED_WORK(&pcr->rtd3_work, rtsx_pci_rtd3_work); 1611 pm_runtime_allow(&pcidev->dev); 1612 pm_runtime_enable(&pcidev->dev); 1613 pcr->is_runtime_suspended = false; 1614 } 1615 1616 1617 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, 1618 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); 1619 if (ret < 0) 1620 goto free_slots; 1621 1622 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); 1623 1624 return 0; 1625 1626 free_slots: 1627 kfree(pcr->slots); 1628 disable_irq: 1629 free_irq(pcr->irq, (void *)pcr); 1630 disable_msi: 1631 if (pcr->msi_en) 1632 pci_disable_msi(pcr->pci); 1633 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, 1634 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); 1635 unmap: 1636 iounmap(pcr->remap_addr); 1637 free_handle: 1638 kfree(handle); 1639 free_pcr: 1640 kfree(pcr); 1641 release_pci: 1642 pci_release_regions(pcidev); 1643 disable: 1644 pci_disable_device(pcidev); 1645 1646 return ret; 1647 } 1648 1649 static void rtsx_pci_remove(struct pci_dev *pcidev) 1650 { 1651 struct pcr_handle *handle = pci_get_drvdata(pcidev); 1652 struct rtsx_pcr *pcr = handle->pcr; 1653 1654 if (pcr->rtd3_en) 1655 pm_runtime_get_noresume(&pcr->pci->dev); 1656 1657 pcr->remove_pci = true; 1658 1659 /* Disable interrupts at the pcr level */ 1660 spin_lock_irq(&pcr->lock); 1661 rtsx_pci_writel(pcr, RTSX_BIER, 0); 1662 pcr->bier = 0; 1663 spin_unlock_irq(&pcr->lock); 1664 1665 cancel_delayed_work_sync(&pcr->carddet_work); 1666 cancel_delayed_work_sync(&pcr->idle_work); 1667 if (pcr->rtd3_en) 1668 cancel_delayed_work_sync(&pcr->rtd3_work); 1669 1670 mfd_remove_devices(&pcidev->dev); 1671 1672 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, 1673 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); 1674 free_irq(pcr->irq, (void *)pcr); 1675 if (pcr->msi_en) 1676 pci_disable_msi(pcr->pci); 1677 iounmap(pcr->remap_addr); 1678 1679 pci_release_regions(pcidev); 1680 pci_disable_device(pcidev); 1681 1682 spin_lock(&rtsx_pci_lock); 1683 idr_remove(&rtsx_pci_idr, pcr->id); 1684 spin_unlock(&rtsx_pci_lock); 1685 1686 if (pcr->rtd3_en) { 1687 pm_runtime_disable(&pcr->pci->dev); 1688 pm_runtime_put_noidle(&pcr->pci->dev); 1689 } 1690 1691 kfree(pcr->slots); 1692 kfree(pcr); 1693 kfree(handle); 1694 1695 dev_dbg(&(pcidev->dev), 1696 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n", 1697 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device); 1698 } 1699 1700 static int __maybe_unused rtsx_pci_suspend(struct device *dev_d) 1701 { 1702 struct pci_dev *pcidev = to_pci_dev(dev_d); 1703 struct pcr_handle *handle; 1704 struct rtsx_pcr *pcr; 1705 1706 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1707 1708 handle = pci_get_drvdata(pcidev); 1709 pcr = handle->pcr; 1710 1711 cancel_delayed_work(&pcr->carddet_work); 1712 cancel_delayed_work(&pcr->idle_work); 1713 1714 mutex_lock(&pcr->pcr_mutex); 1715 1716 rtsx_pci_power_off(pcr, HOST_ENTER_S3); 1717 1718 device_wakeup_disable(dev_d); 1719 1720 mutex_unlock(&pcr->pcr_mutex); 1721 return 0; 1722 } 1723 1724 static int __maybe_unused rtsx_pci_resume(struct device *dev_d) 1725 { 1726 struct pci_dev *pcidev = to_pci_dev(dev_d); 1727 struct pcr_handle *handle; 1728 struct rtsx_pcr *pcr; 1729 int ret = 0; 1730 1731 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1732 1733 handle = pci_get_drvdata(pcidev); 1734 pcr = handle->pcr; 1735 1736 mutex_lock(&pcr->pcr_mutex); 1737 1738 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); 1739 if (ret) 1740 goto out; 1741 1742 ret = rtsx_pci_init_hw(pcr); 1743 if (ret) 1744 goto out; 1745 1746 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); 1747 1748 out: 1749 mutex_unlock(&pcr->pcr_mutex); 1750 return ret; 1751 } 1752 1753 #ifdef CONFIG_PM 1754 1755 static void rtsx_pci_shutdown(struct pci_dev *pcidev) 1756 { 1757 struct pcr_handle *handle; 1758 struct rtsx_pcr *pcr; 1759 1760 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1761 1762 handle = pci_get_drvdata(pcidev); 1763 pcr = handle->pcr; 1764 rtsx_pci_power_off(pcr, HOST_ENTER_S1); 1765 1766 pci_disable_device(pcidev); 1767 free_irq(pcr->irq, (void *)pcr); 1768 if (pcr->msi_en) 1769 pci_disable_msi(pcr->pci); 1770 } 1771 1772 static int rtsx_pci_runtime_suspend(struct device *device) 1773 { 1774 struct pci_dev *pcidev = to_pci_dev(device); 1775 struct pcr_handle *handle; 1776 struct rtsx_pcr *pcr; 1777 1778 handle = pci_get_drvdata(pcidev); 1779 pcr = handle->pcr; 1780 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1781 1782 cancel_delayed_work(&pcr->carddet_work); 1783 cancel_delayed_work(&pcr->rtd3_work); 1784 cancel_delayed_work(&pcr->idle_work); 1785 1786 mutex_lock(&pcr->pcr_mutex); 1787 rtsx_pci_power_off(pcr, HOST_ENTER_S3); 1788 1789 free_irq(pcr->irq, (void *)pcr); 1790 1791 mutex_unlock(&pcr->pcr_mutex); 1792 1793 pcr->is_runtime_suspended = true; 1794 1795 return 0; 1796 } 1797 1798 static int rtsx_pci_runtime_resume(struct device *device) 1799 { 1800 struct pci_dev *pcidev = to_pci_dev(device); 1801 struct pcr_handle *handle; 1802 struct rtsx_pcr *pcr; 1803 int ret = 0; 1804 1805 handle = pci_get_drvdata(pcidev); 1806 pcr = handle->pcr; 1807 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1808 1809 mutex_lock(&pcr->pcr_mutex); 1810 1811 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); 1812 rtsx_pci_acquire_irq(pcr); 1813 synchronize_irq(pcr->irq); 1814 1815 if (pcr->ops->fetch_vendor_settings) 1816 pcr->ops->fetch_vendor_settings(pcr); 1817 1818 rtsx_pci_init_hw(pcr); 1819 1820 if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) { 1821 pcr->slots[RTSX_SD_CARD].card_event( 1822 pcr->slots[RTSX_SD_CARD].p_dev); 1823 } 1824 1825 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); 1826 1827 mutex_unlock(&pcr->pcr_mutex); 1828 return ret; 1829 } 1830 1831 #else /* CONFIG_PM */ 1832 1833 #define rtsx_pci_shutdown NULL 1834 #define rtsx_pci_runtime_suspend NULL 1835 #define rtsx_pic_runtime_resume NULL 1836 1837 #endif /* CONFIG_PM */ 1838 1839 static const struct dev_pm_ops rtsx_pci_pm_ops = { 1840 SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume) 1841 SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, NULL) 1842 }; 1843 1844 static struct pci_driver rtsx_pci_driver = { 1845 .name = DRV_NAME_RTSX_PCI, 1846 .id_table = rtsx_pci_ids, 1847 .probe = rtsx_pci_probe, 1848 .remove = rtsx_pci_remove, 1849 .driver.pm = &rtsx_pci_pm_ops, 1850 .shutdown = rtsx_pci_shutdown, 1851 }; 1852 module_pci_driver(rtsx_pci_driver); 1853 1854 MODULE_LICENSE("GPL"); 1855 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); 1856 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver"); 1857