1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Driver for Realtek PCI-Express card reader 3 * 4 * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. 5 * 6 * Author: 7 * Wei WANG <wei_wang@realsil.com.cn> 8 */ 9 10 #include <linux/pci.h> 11 #include <linux/module.h> 12 #include <linux/slab.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/highmem.h> 15 #include <linux/interrupt.h> 16 #include <linux/delay.h> 17 #include <linux/idr.h> 18 #include <linux/platform_device.h> 19 #include <linux/mfd/core.h> 20 #include <linux/rtsx_pci.h> 21 #include <linux/mmc/card.h> 22 #include <asm/unaligned.h> 23 24 #include "rtsx_pcr.h" 25 #include "rts5261.h" 26 27 static bool msi_en = true; 28 module_param(msi_en, bool, S_IRUGO | S_IWUSR); 29 MODULE_PARM_DESC(msi_en, "Enable MSI"); 30 31 static DEFINE_IDR(rtsx_pci_idr); 32 static DEFINE_SPINLOCK(rtsx_pci_lock); 33 34 static struct mfd_cell rtsx_pcr_cells[] = { 35 [RTSX_SD_CARD] = { 36 .name = DRV_NAME_RTSX_PCI_SDMMC, 37 }, 38 }; 39 40 static const struct pci_device_id rtsx_pci_ids[] = { 41 { PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 42 { PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 43 { PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 44 { PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 45 { PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 46 { PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 47 { PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 48 { PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 49 { PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 50 { PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 51 { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 52 { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 }, 53 { 0, } 54 }; 55 56 MODULE_DEVICE_TABLE(pci, rtsx_pci_ids); 57 58 static inline void rtsx_pci_enable_aspm(struct rtsx_pcr *pcr) 59 { 60 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL, 61 0xFC, pcr->aspm_en); 62 } 63 64 static inline void rtsx_pci_disable_aspm(struct rtsx_pcr *pcr) 65 { 66 rtsx_pci_update_cfg_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL, 67 0xFC, 0); 68 } 69 70 static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) 71 { 72 rtsx_pci_write_register(pcr, MSGTXDATA0, 73 MASK_8_BIT_DEF, (u8) (latency & 0xFF)); 74 rtsx_pci_write_register(pcr, MSGTXDATA1, 75 MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF)); 76 rtsx_pci_write_register(pcr, MSGTXDATA2, 77 MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF)); 78 rtsx_pci_write_register(pcr, MSGTXDATA3, 79 MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF)); 80 rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK | 81 LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW); 82 83 return 0; 84 } 85 86 int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency) 87 { 88 if (pcr->ops->set_ltr_latency) 89 return pcr->ops->set_ltr_latency(pcr, latency); 90 else 91 return rtsx_comm_set_ltr_latency(pcr, latency); 92 } 93 94 static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable) 95 { 96 struct rtsx_cr_option *option = &pcr->option; 97 98 if (pcr->aspm_enabled == enable) 99 return; 100 101 if (option->dev_aspm_mode == DEV_ASPM_DYNAMIC) { 102 if (enable) 103 rtsx_pci_enable_aspm(pcr); 104 else 105 rtsx_pci_disable_aspm(pcr); 106 } else if (option->dev_aspm_mode == DEV_ASPM_BACKDOOR) { 107 u8 mask = FORCE_ASPM_VAL_MASK; 108 u8 val = 0; 109 110 if (enable) 111 val = pcr->aspm_en; 112 rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val); 113 } 114 115 pcr->aspm_enabled = enable; 116 } 117 118 static void rtsx_disable_aspm(struct rtsx_pcr *pcr) 119 { 120 if (pcr->ops->set_aspm) 121 pcr->ops->set_aspm(pcr, false); 122 else 123 rtsx_comm_set_aspm(pcr, false); 124 } 125 126 int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val) 127 { 128 rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val); 129 130 return 0; 131 } 132 133 static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active) 134 { 135 if (pcr->ops->set_l1off_cfg_sub_d0) 136 pcr->ops->set_l1off_cfg_sub_d0(pcr, active); 137 } 138 139 static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr) 140 { 141 struct rtsx_cr_option *option = &pcr->option; 142 143 rtsx_disable_aspm(pcr); 144 145 /* Fixes DMA transfer timout issue after disabling ASPM on RTS5260 */ 146 msleep(1); 147 148 if (option->ltr_enabled) 149 rtsx_set_ltr_latency(pcr, option->ltr_active_latency); 150 151 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) 152 rtsx_set_l1off_sub_cfg_d0(pcr, 1); 153 } 154 155 static void rtsx_pm_full_on(struct rtsx_pcr *pcr) 156 { 157 if (pcr->ops->full_on) 158 pcr->ops->full_on(pcr); 159 else 160 rtsx_comm_pm_full_on(pcr); 161 } 162 163 void rtsx_pci_start_run(struct rtsx_pcr *pcr) 164 { 165 /* If pci device removed, don't queue idle work any more */ 166 if (pcr->remove_pci) 167 return; 168 169 if (pcr->state != PDEV_STAT_RUN) { 170 pcr->state = PDEV_STAT_RUN; 171 if (pcr->ops->enable_auto_blink) 172 pcr->ops->enable_auto_blink(pcr); 173 rtsx_pm_full_on(pcr); 174 } 175 176 mod_delayed_work(system_wq, &pcr->idle_work, msecs_to_jiffies(200)); 177 } 178 EXPORT_SYMBOL_GPL(rtsx_pci_start_run); 179 180 int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data) 181 { 182 int i; 183 u32 val = HAIMR_WRITE_START; 184 185 val |= (u32)(addr & 0x3FFF) << 16; 186 val |= (u32)mask << 8; 187 val |= (u32)data; 188 189 rtsx_pci_writel(pcr, RTSX_HAIMR, val); 190 191 for (i = 0; i < MAX_RW_REG_CNT; i++) { 192 val = rtsx_pci_readl(pcr, RTSX_HAIMR); 193 if ((val & HAIMR_TRANS_END) == 0) { 194 if (data != (u8)val) 195 return -EIO; 196 return 0; 197 } 198 } 199 200 return -ETIMEDOUT; 201 } 202 EXPORT_SYMBOL_GPL(rtsx_pci_write_register); 203 204 int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data) 205 { 206 u32 val = HAIMR_READ_START; 207 int i; 208 209 val |= (u32)(addr & 0x3FFF) << 16; 210 rtsx_pci_writel(pcr, RTSX_HAIMR, val); 211 212 for (i = 0; i < MAX_RW_REG_CNT; i++) { 213 val = rtsx_pci_readl(pcr, RTSX_HAIMR); 214 if ((val & HAIMR_TRANS_END) == 0) 215 break; 216 } 217 218 if (i >= MAX_RW_REG_CNT) 219 return -ETIMEDOUT; 220 221 if (data) 222 *data = (u8)(val & 0xFF); 223 224 return 0; 225 } 226 EXPORT_SYMBOL_GPL(rtsx_pci_read_register); 227 228 int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) 229 { 230 int err, i, finished = 0; 231 u8 tmp; 232 233 rtsx_pci_init_cmd(pcr); 234 235 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA0, 0xFF, (u8)val); 236 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYDATA1, 0xFF, (u8)(val >> 8)); 237 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr); 238 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x81); 239 240 err = rtsx_pci_send_cmd(pcr, 100); 241 if (err < 0) 242 return err; 243 244 for (i = 0; i < 100000; i++) { 245 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); 246 if (err < 0) 247 return err; 248 249 if (!(tmp & 0x80)) { 250 finished = 1; 251 break; 252 } 253 } 254 255 if (!finished) 256 return -ETIMEDOUT; 257 258 return 0; 259 } 260 261 int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val) 262 { 263 if (pcr->ops->write_phy) 264 return pcr->ops->write_phy(pcr, addr, val); 265 266 return __rtsx_pci_write_phy_register(pcr, addr, val); 267 } 268 EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register); 269 270 int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) 271 { 272 int err, i, finished = 0; 273 u16 data; 274 u8 *ptr, tmp; 275 276 rtsx_pci_init_cmd(pcr); 277 278 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYADDR, 0xFF, addr); 279 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PHYRWCTL, 0xFF, 0x80); 280 281 err = rtsx_pci_send_cmd(pcr, 100); 282 if (err < 0) 283 return err; 284 285 for (i = 0; i < 100000; i++) { 286 err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp); 287 if (err < 0) 288 return err; 289 290 if (!(tmp & 0x80)) { 291 finished = 1; 292 break; 293 } 294 } 295 296 if (!finished) 297 return -ETIMEDOUT; 298 299 rtsx_pci_init_cmd(pcr); 300 301 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA0, 0, 0); 302 rtsx_pci_add_cmd(pcr, READ_REG_CMD, PHYDATA1, 0, 0); 303 304 err = rtsx_pci_send_cmd(pcr, 100); 305 if (err < 0) 306 return err; 307 308 ptr = rtsx_pci_get_cmd_data(pcr); 309 data = ((u16)ptr[1] << 8) | ptr[0]; 310 311 if (val) 312 *val = data; 313 314 return 0; 315 } 316 317 int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val) 318 { 319 if (pcr->ops->read_phy) 320 return pcr->ops->read_phy(pcr, addr, val); 321 322 return __rtsx_pci_read_phy_register(pcr, addr, val); 323 } 324 EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register); 325 326 void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr) 327 { 328 if (pcr->ops->stop_cmd) 329 return pcr->ops->stop_cmd(pcr); 330 331 rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); 332 rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); 333 334 rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80); 335 rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80); 336 } 337 EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd); 338 339 void rtsx_pci_add_cmd(struct rtsx_pcr *pcr, 340 u8 cmd_type, u16 reg_addr, u8 mask, u8 data) 341 { 342 unsigned long flags; 343 u32 val = 0; 344 u32 *ptr = (u32 *)(pcr->host_cmds_ptr); 345 346 val |= (u32)(cmd_type & 0x03) << 30; 347 val |= (u32)(reg_addr & 0x3FFF) << 16; 348 val |= (u32)mask << 8; 349 val |= (u32)data; 350 351 spin_lock_irqsave(&pcr->lock, flags); 352 ptr += pcr->ci; 353 if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) { 354 put_unaligned_le32(val, ptr); 355 ptr++; 356 pcr->ci++; 357 } 358 spin_unlock_irqrestore(&pcr->lock, flags); 359 } 360 EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd); 361 362 void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr) 363 { 364 u32 val = 1 << 31; 365 366 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 367 368 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; 369 /* Hardware Auto Response */ 370 val |= 0x40000000; 371 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); 372 } 373 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait); 374 375 int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout) 376 { 377 struct completion trans_done; 378 u32 val = 1 << 31; 379 long timeleft; 380 unsigned long flags; 381 int err = 0; 382 383 spin_lock_irqsave(&pcr->lock, flags); 384 385 /* set up data structures for the wakeup system */ 386 pcr->done = &trans_done; 387 pcr->trans_result = TRANS_NOT_READY; 388 init_completion(&trans_done); 389 390 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 391 392 val |= (u32)(pcr->ci * 4) & 0x00FFFFFF; 393 /* Hardware Auto Response */ 394 val |= 0x40000000; 395 rtsx_pci_writel(pcr, RTSX_HCBCTLR, val); 396 397 spin_unlock_irqrestore(&pcr->lock, flags); 398 399 /* Wait for TRANS_OK_INT */ 400 timeleft = wait_for_completion_interruptible_timeout( 401 &trans_done, msecs_to_jiffies(timeout)); 402 if (timeleft <= 0) { 403 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); 404 err = -ETIMEDOUT; 405 goto finish_send_cmd; 406 } 407 408 spin_lock_irqsave(&pcr->lock, flags); 409 if (pcr->trans_result == TRANS_RESULT_FAIL) 410 err = -EINVAL; 411 else if (pcr->trans_result == TRANS_RESULT_OK) 412 err = 0; 413 else if (pcr->trans_result == TRANS_NO_DEVICE) 414 err = -ENODEV; 415 spin_unlock_irqrestore(&pcr->lock, flags); 416 417 finish_send_cmd: 418 spin_lock_irqsave(&pcr->lock, flags); 419 pcr->done = NULL; 420 spin_unlock_irqrestore(&pcr->lock, flags); 421 422 if ((err < 0) && (err != -ENODEV)) 423 rtsx_pci_stop_cmd(pcr); 424 425 if (pcr->finish_me) 426 complete(pcr->finish_me); 427 428 return err; 429 } 430 EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd); 431 432 static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr, 433 dma_addr_t addr, unsigned int len, int end) 434 { 435 u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi; 436 u64 val; 437 u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA; 438 439 pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len); 440 441 if (end) 442 option |= RTSX_SG_END; 443 444 if (PCI_PID(pcr) == PID_5261) { 445 if (len > 0xFFFF) 446 val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16) 447 | (((u64)len >> 16) << 6) | option; 448 else 449 val = ((u64)addr << 32) | ((u64)len << 16) | option; 450 } else { 451 val = ((u64)addr << 32) | ((u64)len << 12) | option; 452 } 453 put_unaligned_le64(val, ptr); 454 pcr->sgi++; 455 } 456 457 int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, 458 int num_sg, bool read, int timeout) 459 { 460 int err = 0, count; 461 462 pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg); 463 count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); 464 if (count < 1) 465 return -EINVAL; 466 pcr_dbg(pcr, "DMA mapping count: %d\n", count); 467 468 err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout); 469 470 rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); 471 472 return err; 473 } 474 EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); 475 476 int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 477 int num_sg, bool read) 478 { 479 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 480 481 if (pcr->remove_pci) 482 return -EINVAL; 483 484 if ((sglist == NULL) || (num_sg <= 0)) 485 return -EINVAL; 486 487 return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir); 488 } 489 EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg); 490 491 void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, 492 int num_sg, bool read) 493 { 494 enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 495 496 dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir); 497 } 498 EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg); 499 500 int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, 501 int count, bool read, int timeout) 502 { 503 struct completion trans_done; 504 struct scatterlist *sg; 505 dma_addr_t addr; 506 long timeleft; 507 unsigned long flags; 508 unsigned int len; 509 int i, err = 0; 510 u32 val; 511 u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE; 512 513 if (pcr->remove_pci) 514 return -ENODEV; 515 516 if ((sglist == NULL) || (count < 1)) 517 return -EINVAL; 518 519 val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; 520 pcr->sgi = 0; 521 for_each_sg(sglist, sg, count, i) { 522 addr = sg_dma_address(sg); 523 len = sg_dma_len(sg); 524 rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1); 525 } 526 527 spin_lock_irqsave(&pcr->lock, flags); 528 529 pcr->done = &trans_done; 530 pcr->trans_result = TRANS_NOT_READY; 531 init_completion(&trans_done); 532 rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); 533 rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); 534 535 spin_unlock_irqrestore(&pcr->lock, flags); 536 537 timeleft = wait_for_completion_interruptible_timeout( 538 &trans_done, msecs_to_jiffies(timeout)); 539 if (timeleft <= 0) { 540 pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__); 541 err = -ETIMEDOUT; 542 goto out; 543 } 544 545 spin_lock_irqsave(&pcr->lock, flags); 546 if (pcr->trans_result == TRANS_RESULT_FAIL) { 547 err = -EILSEQ; 548 if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION) 549 pcr->dma_error_count++; 550 } 551 552 else if (pcr->trans_result == TRANS_NO_DEVICE) 553 err = -ENODEV; 554 spin_unlock_irqrestore(&pcr->lock, flags); 555 556 out: 557 spin_lock_irqsave(&pcr->lock, flags); 558 pcr->done = NULL; 559 spin_unlock_irqrestore(&pcr->lock, flags); 560 561 if ((err < 0) && (err != -ENODEV)) 562 rtsx_pci_stop_cmd(pcr); 563 564 if (pcr->finish_me) 565 complete(pcr->finish_me); 566 567 return err; 568 } 569 EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer); 570 571 int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 572 { 573 int err; 574 int i, j; 575 u16 reg; 576 u8 *ptr; 577 578 if (buf_len > 512) 579 buf_len = 512; 580 581 ptr = buf; 582 reg = PPBUF_BASE2; 583 for (i = 0; i < buf_len / 256; i++) { 584 rtsx_pci_init_cmd(pcr); 585 586 for (j = 0; j < 256; j++) 587 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); 588 589 err = rtsx_pci_send_cmd(pcr, 250); 590 if (err < 0) 591 return err; 592 593 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256); 594 ptr += 256; 595 } 596 597 if (buf_len % 256) { 598 rtsx_pci_init_cmd(pcr); 599 600 for (j = 0; j < buf_len % 256; j++) 601 rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0); 602 603 err = rtsx_pci_send_cmd(pcr, 250); 604 if (err < 0) 605 return err; 606 } 607 608 memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256); 609 610 return 0; 611 } 612 EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf); 613 614 int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) 615 { 616 int err; 617 int i, j; 618 u16 reg; 619 u8 *ptr; 620 621 if (buf_len > 512) 622 buf_len = 512; 623 624 ptr = buf; 625 reg = PPBUF_BASE2; 626 for (i = 0; i < buf_len / 256; i++) { 627 rtsx_pci_init_cmd(pcr); 628 629 for (j = 0; j < 256; j++) { 630 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 631 reg++, 0xFF, *ptr); 632 ptr++; 633 } 634 635 err = rtsx_pci_send_cmd(pcr, 250); 636 if (err < 0) 637 return err; 638 } 639 640 if (buf_len % 256) { 641 rtsx_pci_init_cmd(pcr); 642 643 for (j = 0; j < buf_len % 256; j++) { 644 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 645 reg++, 0xFF, *ptr); 646 ptr++; 647 } 648 649 err = rtsx_pci_send_cmd(pcr, 250); 650 if (err < 0) 651 return err; 652 } 653 654 return 0; 655 } 656 EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf); 657 658 static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl) 659 { 660 rtsx_pci_init_cmd(pcr); 661 662 while (*tbl & 0xFFFF0000) { 663 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, 664 (u16)(*tbl >> 16), 0xFF, (u8)(*tbl)); 665 tbl++; 666 } 667 668 return rtsx_pci_send_cmd(pcr, 100); 669 } 670 671 int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card) 672 { 673 const u32 *tbl; 674 675 if (card == RTSX_SD_CARD) 676 tbl = pcr->sd_pull_ctl_enable_tbl; 677 else if (card == RTSX_MS_CARD) 678 tbl = pcr->ms_pull_ctl_enable_tbl; 679 else 680 return -EINVAL; 681 682 return rtsx_pci_set_pull_ctl(pcr, tbl); 683 } 684 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable); 685 686 int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card) 687 { 688 const u32 *tbl; 689 690 if (card == RTSX_SD_CARD) 691 tbl = pcr->sd_pull_ctl_disable_tbl; 692 else if (card == RTSX_MS_CARD) 693 tbl = pcr->ms_pull_ctl_disable_tbl; 694 else 695 return -EINVAL; 696 697 return rtsx_pci_set_pull_ctl(pcr, tbl); 698 } 699 EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable); 700 701 static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr) 702 { 703 struct rtsx_hw_param *hw_param = &pcr->hw_param; 704 705 pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN 706 | hw_param->interrupt_en; 707 708 if (pcr->num_slots > 1) 709 pcr->bier |= MS_INT_EN; 710 711 /* Enable Bus Interrupt */ 712 rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier); 713 714 pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier); 715 } 716 717 static inline u8 double_ssc_depth(u8 depth) 718 { 719 return ((depth > 1) ? (depth - 1) : depth); 720 } 721 722 static u8 revise_ssc_depth(u8 ssc_depth, u8 div) 723 { 724 if (div > CLK_DIV_1) { 725 if (ssc_depth > (div - 1)) 726 ssc_depth -= (div - 1); 727 else 728 ssc_depth = SSC_DEPTH_4M; 729 } 730 731 return ssc_depth; 732 } 733 734 int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, 735 u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) 736 { 737 int err, clk; 738 u8 n, clk_divider, mcu_cnt, div; 739 static const u8 depth[] = { 740 [RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M, 741 [RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M, 742 [RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M, 743 [RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K, 744 [RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K, 745 }; 746 747 if (PCI_PID(pcr) == PID_5261) 748 return rts5261_pci_switch_clock(pcr, card_clock, 749 ssc_depth, initial_mode, double_clk, vpclk); 750 751 if (initial_mode) { 752 /* We use 250k(around) here, in initial stage */ 753 clk_divider = SD_CLK_DIVIDE_128; 754 card_clock = 30000000; 755 } else { 756 clk_divider = SD_CLK_DIVIDE_0; 757 } 758 err = rtsx_pci_write_register(pcr, SD_CFG1, 759 SD_CLK_DIVIDE_MASK, clk_divider); 760 if (err < 0) 761 return err; 762 763 /* Reduce card clock by 20MHz each time a DMA transfer error occurs */ 764 if (card_clock == UHS_SDR104_MAX_DTR && 765 pcr->dma_error_count && 766 PCI_PID(pcr) == RTS5227_DEVICE_ID) 767 card_clock = UHS_SDR104_MAX_DTR - 768 (pcr->dma_error_count * 20000000); 769 770 card_clock /= 1000000; 771 pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); 772 773 clk = card_clock; 774 if (!initial_mode && double_clk) 775 clk = card_clock * 2; 776 pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", 777 clk, pcr->cur_clock); 778 779 if (clk == pcr->cur_clock) 780 return 0; 781 782 if (pcr->ops->conv_clk_and_div_n) 783 n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); 784 else 785 n = (u8)(clk - 2); 786 if ((clk <= 2) || (n > MAX_DIV_N_PCR)) 787 return -EINVAL; 788 789 mcu_cnt = (u8)(125/clk + 3); 790 if (mcu_cnt > 15) 791 mcu_cnt = 15; 792 793 /* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */ 794 div = CLK_DIV_1; 795 while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) { 796 if (pcr->ops->conv_clk_and_div_n) { 797 int dbl_clk = pcr->ops->conv_clk_and_div_n(n, 798 DIV_N_TO_CLK) * 2; 799 n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk, 800 CLK_TO_DIV_N); 801 } else { 802 n = (n + 2) * 2 - 2; 803 } 804 div++; 805 } 806 pcr_dbg(pcr, "n = %d, div = %d\n", n, div); 807 808 ssc_depth = depth[ssc_depth]; 809 if (double_clk) 810 ssc_depth = double_ssc_depth(ssc_depth); 811 812 ssc_depth = revise_ssc_depth(ssc_depth, div); 813 pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); 814 815 rtsx_pci_init_cmd(pcr); 816 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, 817 CLK_LOW_FREQ, CLK_LOW_FREQ); 818 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 819 0xFF, (div << 4) | mcu_cnt); 820 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); 821 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 822 SSC_DEPTH_MASK, ssc_depth); 823 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); 824 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); 825 if (vpclk) { 826 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, 827 PHASE_NOT_RESET, 0); 828 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, 829 PHASE_NOT_RESET, PHASE_NOT_RESET); 830 } 831 832 err = rtsx_pci_send_cmd(pcr, 2000); 833 if (err < 0) 834 return err; 835 836 /* Wait SSC clock stable */ 837 udelay(SSC_CLOCK_STABLE_WAIT); 838 err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); 839 if (err < 0) 840 return err; 841 842 pcr->cur_clock = clk; 843 return 0; 844 } 845 EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock); 846 847 int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card) 848 { 849 if (pcr->ops->card_power_on) 850 return pcr->ops->card_power_on(pcr, card); 851 852 return 0; 853 } 854 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on); 855 856 int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card) 857 { 858 if (pcr->ops->card_power_off) 859 return pcr->ops->card_power_off(pcr, card); 860 861 return 0; 862 } 863 EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off); 864 865 int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card) 866 { 867 static const unsigned int cd_mask[] = { 868 [RTSX_SD_CARD] = SD_EXIST, 869 [RTSX_MS_CARD] = MS_EXIST 870 }; 871 872 if (!(pcr->flags & PCR_MS_PMOS)) { 873 /* When using single PMOS, accessing card is not permitted 874 * if the existing card is not the designated one. 875 */ 876 if (pcr->card_exist & (~cd_mask[card])) 877 return -EIO; 878 } 879 880 return 0; 881 } 882 EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check); 883 884 int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) 885 { 886 if (pcr->ops->switch_output_voltage) 887 return pcr->ops->switch_output_voltage(pcr, voltage); 888 889 return 0; 890 } 891 EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage); 892 893 unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr) 894 { 895 unsigned int val; 896 897 val = rtsx_pci_readl(pcr, RTSX_BIPR); 898 if (pcr->ops->cd_deglitch) 899 val = pcr->ops->cd_deglitch(pcr); 900 901 return val; 902 } 903 EXPORT_SYMBOL_GPL(rtsx_pci_card_exist); 904 905 void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr) 906 { 907 struct completion finish; 908 909 pcr->finish_me = &finish; 910 init_completion(&finish); 911 912 if (pcr->done) 913 complete(pcr->done); 914 915 if (!pcr->remove_pci) 916 rtsx_pci_stop_cmd(pcr); 917 918 wait_for_completion_interruptible_timeout(&finish, 919 msecs_to_jiffies(2)); 920 pcr->finish_me = NULL; 921 } 922 EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer); 923 924 static void rtsx_pci_card_detect(struct work_struct *work) 925 { 926 struct delayed_work *dwork; 927 struct rtsx_pcr *pcr; 928 unsigned long flags; 929 unsigned int card_detect = 0, card_inserted, card_removed; 930 u32 irq_status; 931 932 dwork = to_delayed_work(work); 933 pcr = container_of(dwork, struct rtsx_pcr, carddet_work); 934 935 pcr_dbg(pcr, "--> %s\n", __func__); 936 937 mutex_lock(&pcr->pcr_mutex); 938 spin_lock_irqsave(&pcr->lock, flags); 939 940 irq_status = rtsx_pci_readl(pcr, RTSX_BIPR); 941 pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status); 942 943 irq_status &= CARD_EXIST; 944 card_inserted = pcr->card_inserted & irq_status; 945 card_removed = pcr->card_removed; 946 pcr->card_inserted = 0; 947 pcr->card_removed = 0; 948 949 spin_unlock_irqrestore(&pcr->lock, flags); 950 951 if (card_inserted || card_removed) { 952 pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n", 953 card_inserted, card_removed); 954 955 if (pcr->ops->cd_deglitch) 956 card_inserted = pcr->ops->cd_deglitch(pcr); 957 958 card_detect = card_inserted | card_removed; 959 960 pcr->card_exist |= card_inserted; 961 pcr->card_exist &= ~card_removed; 962 } 963 964 mutex_unlock(&pcr->pcr_mutex); 965 966 if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event) 967 pcr->slots[RTSX_SD_CARD].card_event( 968 pcr->slots[RTSX_SD_CARD].p_dev); 969 if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event) 970 pcr->slots[RTSX_MS_CARD].card_event( 971 pcr->slots[RTSX_MS_CARD].p_dev); 972 } 973 974 static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) 975 { 976 if (pcr->ops->process_ocp) { 977 pcr->ops->process_ocp(pcr); 978 } else { 979 if (!pcr->option.ocp_en) 980 return; 981 rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); 982 if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { 983 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); 984 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); 985 rtsx_pci_clear_ocpstat(pcr); 986 pcr->ocp_stat = 0; 987 } 988 } 989 } 990 991 static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) 992 { 993 if (pcr->option.ocp_en) 994 rtsx_pci_process_ocp(pcr); 995 996 return 0; 997 } 998 999 static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) 1000 { 1001 struct rtsx_pcr *pcr = dev_id; 1002 u32 int_reg; 1003 1004 if (!pcr) 1005 return IRQ_NONE; 1006 1007 spin_lock(&pcr->lock); 1008 1009 int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); 1010 /* Clear interrupt flag */ 1011 rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); 1012 if ((int_reg & pcr->bier) == 0) { 1013 spin_unlock(&pcr->lock); 1014 return IRQ_NONE; 1015 } 1016 if (int_reg == 0xFFFFFFFF) { 1017 spin_unlock(&pcr->lock); 1018 return IRQ_HANDLED; 1019 } 1020 1021 int_reg &= (pcr->bier | 0x7FFFFF); 1022 1023 if (int_reg & SD_OC_INT) 1024 rtsx_pci_process_ocp_interrupt(pcr); 1025 1026 if (int_reg & SD_INT) { 1027 if (int_reg & SD_EXIST) { 1028 pcr->card_inserted |= SD_EXIST; 1029 } else { 1030 pcr->card_removed |= SD_EXIST; 1031 pcr->card_inserted &= ~SD_EXIST; 1032 } 1033 pcr->dma_error_count = 0; 1034 } 1035 1036 if (int_reg & MS_INT) { 1037 if (int_reg & MS_EXIST) { 1038 pcr->card_inserted |= MS_EXIST; 1039 } else { 1040 pcr->card_removed |= MS_EXIST; 1041 pcr->card_inserted &= ~MS_EXIST; 1042 } 1043 } 1044 1045 if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { 1046 if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { 1047 pcr->trans_result = TRANS_RESULT_FAIL; 1048 if (pcr->done) 1049 complete(pcr->done); 1050 } else if (int_reg & TRANS_OK_INT) { 1051 pcr->trans_result = TRANS_RESULT_OK; 1052 if (pcr->done) 1053 complete(pcr->done); 1054 } 1055 } 1056 1057 if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT)) 1058 schedule_delayed_work(&pcr->carddet_work, 1059 msecs_to_jiffies(200)); 1060 1061 spin_unlock(&pcr->lock); 1062 return IRQ_HANDLED; 1063 } 1064 1065 static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr) 1066 { 1067 pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n", 1068 __func__, pcr->msi_en, pcr->pci->irq); 1069 1070 if (request_irq(pcr->pci->irq, rtsx_pci_isr, 1071 pcr->msi_en ? 0 : IRQF_SHARED, 1072 DRV_NAME_RTSX_PCI, pcr)) { 1073 dev_err(&(pcr->pci->dev), 1074 "rtsx_sdmmc: unable to grab IRQ %d, disabling device\n", 1075 pcr->pci->irq); 1076 return -1; 1077 } 1078 1079 pcr->irq = pcr->pci->irq; 1080 pci_intx(pcr->pci, !pcr->msi_en); 1081 1082 return 0; 1083 } 1084 1085 static void rtsx_enable_aspm(struct rtsx_pcr *pcr) 1086 { 1087 if (pcr->ops->set_aspm) 1088 pcr->ops->set_aspm(pcr, true); 1089 else 1090 rtsx_comm_set_aspm(pcr, true); 1091 } 1092 1093 static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr) 1094 { 1095 struct rtsx_cr_option *option = &pcr->option; 1096 1097 if (option->ltr_enabled) { 1098 u32 latency = option->ltr_l1off_latency; 1099 1100 if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN)) 1101 mdelay(option->l1_snooze_delay); 1102 1103 rtsx_set_ltr_latency(pcr, latency); 1104 } 1105 1106 if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN)) 1107 rtsx_set_l1off_sub_cfg_d0(pcr, 0); 1108 1109 rtsx_enable_aspm(pcr); 1110 } 1111 1112 static void rtsx_pm_power_saving(struct rtsx_pcr *pcr) 1113 { 1114 if (pcr->ops->power_saving) 1115 pcr->ops->power_saving(pcr); 1116 else 1117 rtsx_comm_pm_power_saving(pcr); 1118 } 1119 1120 static void rtsx_pci_idle_work(struct work_struct *work) 1121 { 1122 struct delayed_work *dwork = to_delayed_work(work); 1123 struct rtsx_pcr *pcr = container_of(dwork, struct rtsx_pcr, idle_work); 1124 1125 pcr_dbg(pcr, "--> %s\n", __func__); 1126 1127 mutex_lock(&pcr->pcr_mutex); 1128 1129 pcr->state = PDEV_STAT_IDLE; 1130 1131 if (pcr->ops->disable_auto_blink) 1132 pcr->ops->disable_auto_blink(pcr); 1133 if (pcr->ops->turn_off_led) 1134 pcr->ops->turn_off_led(pcr); 1135 1136 rtsx_pm_power_saving(pcr); 1137 1138 mutex_unlock(&pcr->pcr_mutex); 1139 } 1140 1141 #ifdef CONFIG_PM 1142 static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) 1143 { 1144 if (pcr->ops->turn_off_led) 1145 pcr->ops->turn_off_led(pcr); 1146 1147 rtsx_pci_writel(pcr, RTSX_BIER, 0); 1148 pcr->bier = 0; 1149 1150 rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08); 1151 rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state); 1152 1153 if (pcr->ops->force_power_down) 1154 pcr->ops->force_power_down(pcr, pm_state); 1155 } 1156 #endif 1157 1158 void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr) 1159 { 1160 u8 val = SD_OCP_INT_EN | SD_DETECT_EN; 1161 1162 if (pcr->ops->enable_ocp) { 1163 pcr->ops->enable_ocp(pcr); 1164 } else { 1165 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); 1166 rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); 1167 } 1168 1169 } 1170 1171 void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr) 1172 { 1173 u8 mask = SD_OCP_INT_EN | SD_DETECT_EN; 1174 1175 if (pcr->ops->disable_ocp) { 1176 pcr->ops->disable_ocp(pcr); 1177 } else { 1178 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1179 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 1180 OC_POWER_DOWN); 1181 } 1182 } 1183 1184 void rtsx_pci_init_ocp(struct rtsx_pcr *pcr) 1185 { 1186 if (pcr->ops->init_ocp) { 1187 pcr->ops->init_ocp(pcr); 1188 } else { 1189 struct rtsx_cr_option *option = &(pcr->option); 1190 1191 if (option->ocp_en) { 1192 u8 val = option->sd_800mA_ocp_thd; 1193 1194 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); 1195 rtsx_pci_write_register(pcr, REG_OCPPARA1, 1196 SD_OCP_TIME_MASK, SD_OCP_TIME_800); 1197 rtsx_pci_write_register(pcr, REG_OCPPARA2, 1198 SD_OCP_THD_MASK, val); 1199 rtsx_pci_write_register(pcr, REG_OCPGLITCH, 1200 SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch); 1201 rtsx_pci_enable_ocp(pcr); 1202 } else { 1203 /* OC power down */ 1204 rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 1205 OC_POWER_DOWN); 1206 } 1207 } 1208 } 1209 1210 int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val) 1211 { 1212 if (pcr->ops->get_ocpstat) 1213 return pcr->ops->get_ocpstat(pcr, val); 1214 else 1215 return rtsx_pci_read_register(pcr, REG_OCPSTAT, val); 1216 } 1217 1218 void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr) 1219 { 1220 if (pcr->ops->clear_ocpstat) { 1221 pcr->ops->clear_ocpstat(pcr); 1222 } else { 1223 u8 mask = SD_OCP_INT_CLR | SD_OC_CLR; 1224 u8 val = SD_OCP_INT_CLR | SD_OC_CLR; 1225 1226 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); 1227 udelay(100); 1228 rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); 1229 } 1230 } 1231 1232 int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr) 1233 { 1234 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | 1235 MS_CLK_EN | SD40_CLK_EN, 0); 1236 rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); 1237 rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); 1238 1239 msleep(50); 1240 1241 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); 1242 1243 return 0; 1244 } 1245 1246 int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr) 1247 { 1248 rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | 1249 MS_CLK_EN | SD40_CLK_EN, 0); 1250 1251 rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD); 1252 1253 rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0); 1254 rtsx_pci_card_power_off(pcr, RTSX_MS_CARD); 1255 1256 return 0; 1257 } 1258 1259 static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) 1260 { 1261 int err; 1262 1263 pcr->pcie_cap = pci_find_capability(pcr->pci, PCI_CAP_ID_EXP); 1264 rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr); 1265 1266 rtsx_pci_enable_bus_int(pcr); 1267 1268 /* Power on SSC */ 1269 if (PCI_PID(pcr) == PID_5261) { 1270 /* Gating real mcu clock */ 1271 err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1, 1272 RTS5261_MCU_CLOCK_GATING, 0); 1273 err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL, 1274 SSC_POWER_DOWN, 0); 1275 } else { 1276 err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0); 1277 } 1278 if (err < 0) 1279 return err; 1280 1281 /* Wait SSC power stable */ 1282 udelay(200); 1283 1284 rtsx_pci_disable_aspm(pcr); 1285 if (pcr->ops->optimize_phy) { 1286 err = pcr->ops->optimize_phy(pcr); 1287 if (err < 0) 1288 return err; 1289 } 1290 1291 rtsx_pci_init_cmd(pcr); 1292 1293 /* Set mcu_cnt to 7 to ensure data can be sampled properly */ 1294 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07); 1295 1296 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00); 1297 /* Disable card clock */ 1298 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0); 1299 /* Reset delink mode */ 1300 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0); 1301 /* Card driving select */ 1302 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL, 1303 0xFF, pcr->card_drive_sel); 1304 /* Enable SSC Clock */ 1305 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, 1306 0xFF, SSC_8X_EN | SSC_SEL_4M); 1307 if (PCI_PID(pcr) == PID_5261) 1308 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 1309 RTS5261_SSC_DEPTH_2M); 1310 else 1311 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12); 1312 1313 /* Disable cd_pwr_save */ 1314 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10); 1315 /* Clear Link Ready Interrupt */ 1316 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, 1317 LINK_RDY_INT, LINK_RDY_INT); 1318 /* Enlarge the estimation window of PERST# glitch 1319 * to reduce the chance of invalid card interrupt 1320 */ 1321 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80); 1322 /* Update RC oscillator to 400k 1323 * bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1 1324 * 1: 2M 0: 400k 1325 */ 1326 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00); 1327 /* Set interrupt write clear 1328 * bit 1: U_elbi_if_rd_clr_en 1329 * 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear 1330 * 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear 1331 */ 1332 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0); 1333 1334 err = rtsx_pci_send_cmd(pcr, 100); 1335 if (err < 0) 1336 return err; 1337 1338 switch (PCI_PID(pcr)) { 1339 case PID_5250: 1340 case PID_524A: 1341 case PID_525A: 1342 case PID_5260: 1343 case PID_5261: 1344 rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1); 1345 break; 1346 default: 1347 break; 1348 } 1349 1350 /*init ocp*/ 1351 rtsx_pci_init_ocp(pcr); 1352 1353 /* Enable clk_request_n to enable clock power management */ 1354 rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1); 1355 /* Enter L1 when host tx idle */ 1356 rtsx_pci_write_config_byte(pcr, 0x70F, 0x5B); 1357 1358 if (pcr->ops->extra_init_hw) { 1359 err = pcr->ops->extra_init_hw(pcr); 1360 if (err < 0) 1361 return err; 1362 } 1363 1364 /* No CD interrupt if probing driver with card inserted. 1365 * So we need to initialize pcr->card_exist here. 1366 */ 1367 if (pcr->ops->cd_deglitch) 1368 pcr->card_exist = pcr->ops->cd_deglitch(pcr); 1369 else 1370 pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST; 1371 1372 return 0; 1373 } 1374 1375 static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) 1376 { 1377 int err; 1378 1379 spin_lock_init(&pcr->lock); 1380 mutex_init(&pcr->pcr_mutex); 1381 1382 switch (PCI_PID(pcr)) { 1383 default: 1384 case 0x5209: 1385 rts5209_init_params(pcr); 1386 break; 1387 1388 case 0x5229: 1389 rts5229_init_params(pcr); 1390 break; 1391 1392 case 0x5289: 1393 rtl8411_init_params(pcr); 1394 break; 1395 1396 case 0x5227: 1397 rts5227_init_params(pcr); 1398 break; 1399 1400 case 0x522A: 1401 rts522a_init_params(pcr); 1402 break; 1403 1404 case 0x5249: 1405 rts5249_init_params(pcr); 1406 break; 1407 1408 case 0x524A: 1409 rts524a_init_params(pcr); 1410 break; 1411 1412 case 0x525A: 1413 rts525a_init_params(pcr); 1414 break; 1415 1416 case 0x5287: 1417 rtl8411b_init_params(pcr); 1418 break; 1419 1420 case 0x5286: 1421 rtl8402_init_params(pcr); 1422 break; 1423 1424 case 0x5260: 1425 rts5260_init_params(pcr); 1426 break; 1427 1428 case 0x5261: 1429 rts5261_init_params(pcr); 1430 break; 1431 } 1432 1433 pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n", 1434 PCI_PID(pcr), pcr->ic_version); 1435 1436 pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot), 1437 GFP_KERNEL); 1438 if (!pcr->slots) 1439 return -ENOMEM; 1440 1441 if (pcr->ops->fetch_vendor_settings) 1442 pcr->ops->fetch_vendor_settings(pcr); 1443 1444 pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en); 1445 pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n", 1446 pcr->sd30_drive_sel_1v8); 1447 pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n", 1448 pcr->sd30_drive_sel_3v3); 1449 pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n", 1450 pcr->card_drive_sel); 1451 pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags); 1452 1453 pcr->state = PDEV_STAT_IDLE; 1454 err = rtsx_pci_init_hw(pcr); 1455 if (err < 0) { 1456 kfree(pcr->slots); 1457 return err; 1458 } 1459 1460 return 0; 1461 } 1462 1463 static int rtsx_pci_probe(struct pci_dev *pcidev, 1464 const struct pci_device_id *id) 1465 { 1466 struct rtsx_pcr *pcr; 1467 struct pcr_handle *handle; 1468 u32 base, len; 1469 int ret, i, bar = 0; 1470 1471 dev_dbg(&(pcidev->dev), 1472 ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n", 1473 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, 1474 (int)pcidev->revision); 1475 1476 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 1477 if (ret < 0) 1478 return ret; 1479 1480 ret = pci_enable_device(pcidev); 1481 if (ret) 1482 return ret; 1483 1484 ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI); 1485 if (ret) 1486 goto disable; 1487 1488 pcr = kzalloc(sizeof(*pcr), GFP_KERNEL); 1489 if (!pcr) { 1490 ret = -ENOMEM; 1491 goto release_pci; 1492 } 1493 1494 handle = kzalloc(sizeof(*handle), GFP_KERNEL); 1495 if (!handle) { 1496 ret = -ENOMEM; 1497 goto free_pcr; 1498 } 1499 handle->pcr = pcr; 1500 1501 idr_preload(GFP_KERNEL); 1502 spin_lock(&rtsx_pci_lock); 1503 ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT); 1504 if (ret >= 0) 1505 pcr->id = ret; 1506 spin_unlock(&rtsx_pci_lock); 1507 idr_preload_end(); 1508 if (ret < 0) 1509 goto free_handle; 1510 1511 pcr->pci = pcidev; 1512 dev_set_drvdata(&pcidev->dev, handle); 1513 1514 if (CHK_PCI_PID(pcr, 0x525A)) 1515 bar = 1; 1516 len = pci_resource_len(pcidev, bar); 1517 base = pci_resource_start(pcidev, bar); 1518 pcr->remap_addr = ioremap(base, len); 1519 if (!pcr->remap_addr) { 1520 ret = -ENOMEM; 1521 goto free_handle; 1522 } 1523 1524 pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev), 1525 RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr), 1526 GFP_KERNEL); 1527 if (pcr->rtsx_resv_buf == NULL) { 1528 ret = -ENXIO; 1529 goto unmap; 1530 } 1531 pcr->host_cmds_ptr = pcr->rtsx_resv_buf; 1532 pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr; 1533 pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN; 1534 pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; 1535 1536 pcr->card_inserted = 0; 1537 pcr->card_removed = 0; 1538 INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect); 1539 INIT_DELAYED_WORK(&pcr->idle_work, rtsx_pci_idle_work); 1540 1541 pcr->msi_en = msi_en; 1542 if (pcr->msi_en) { 1543 ret = pci_enable_msi(pcidev); 1544 if (ret) 1545 pcr->msi_en = false; 1546 } 1547 1548 ret = rtsx_pci_acquire_irq(pcr); 1549 if (ret < 0) 1550 goto disable_msi; 1551 1552 pci_set_master(pcidev); 1553 synchronize_irq(pcr->irq); 1554 1555 ret = rtsx_pci_init_chip(pcr); 1556 if (ret < 0) 1557 goto disable_irq; 1558 1559 for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) { 1560 rtsx_pcr_cells[i].platform_data = handle; 1561 rtsx_pcr_cells[i].pdata_size = sizeof(*handle); 1562 } 1563 ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells, 1564 ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL); 1565 if (ret < 0) 1566 goto disable_irq; 1567 1568 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); 1569 1570 return 0; 1571 1572 disable_irq: 1573 free_irq(pcr->irq, (void *)pcr); 1574 disable_msi: 1575 if (pcr->msi_en) 1576 pci_disable_msi(pcr->pci); 1577 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, 1578 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); 1579 unmap: 1580 iounmap(pcr->remap_addr); 1581 free_handle: 1582 kfree(handle); 1583 free_pcr: 1584 kfree(pcr); 1585 release_pci: 1586 pci_release_regions(pcidev); 1587 disable: 1588 pci_disable_device(pcidev); 1589 1590 return ret; 1591 } 1592 1593 static void rtsx_pci_remove(struct pci_dev *pcidev) 1594 { 1595 struct pcr_handle *handle = pci_get_drvdata(pcidev); 1596 struct rtsx_pcr *pcr = handle->pcr; 1597 1598 pcr->remove_pci = true; 1599 1600 /* Disable interrupts at the pcr level */ 1601 spin_lock_irq(&pcr->lock); 1602 rtsx_pci_writel(pcr, RTSX_BIER, 0); 1603 pcr->bier = 0; 1604 spin_unlock_irq(&pcr->lock); 1605 1606 cancel_delayed_work_sync(&pcr->carddet_work); 1607 cancel_delayed_work_sync(&pcr->idle_work); 1608 1609 mfd_remove_devices(&pcidev->dev); 1610 1611 dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN, 1612 pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr); 1613 free_irq(pcr->irq, (void *)pcr); 1614 if (pcr->msi_en) 1615 pci_disable_msi(pcr->pci); 1616 iounmap(pcr->remap_addr); 1617 1618 pci_release_regions(pcidev); 1619 pci_disable_device(pcidev); 1620 1621 spin_lock(&rtsx_pci_lock); 1622 idr_remove(&rtsx_pci_idr, pcr->id); 1623 spin_unlock(&rtsx_pci_lock); 1624 1625 kfree(pcr->slots); 1626 kfree(pcr); 1627 kfree(handle); 1628 1629 dev_dbg(&(pcidev->dev), 1630 ": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n", 1631 pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device); 1632 } 1633 1634 #ifdef CONFIG_PM 1635 1636 static int rtsx_pci_suspend(struct pci_dev *pcidev, pm_message_t state) 1637 { 1638 struct pcr_handle *handle; 1639 struct rtsx_pcr *pcr; 1640 1641 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1642 1643 handle = pci_get_drvdata(pcidev); 1644 pcr = handle->pcr; 1645 1646 cancel_delayed_work(&pcr->carddet_work); 1647 cancel_delayed_work(&pcr->idle_work); 1648 1649 mutex_lock(&pcr->pcr_mutex); 1650 1651 rtsx_pci_power_off(pcr, HOST_ENTER_S3); 1652 1653 pci_save_state(pcidev); 1654 pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); 1655 pci_disable_device(pcidev); 1656 pci_set_power_state(pcidev, pci_choose_state(pcidev, state)); 1657 1658 mutex_unlock(&pcr->pcr_mutex); 1659 return 0; 1660 } 1661 1662 static int rtsx_pci_resume(struct pci_dev *pcidev) 1663 { 1664 struct pcr_handle *handle; 1665 struct rtsx_pcr *pcr; 1666 int ret = 0; 1667 1668 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1669 1670 handle = pci_get_drvdata(pcidev); 1671 pcr = handle->pcr; 1672 1673 mutex_lock(&pcr->pcr_mutex); 1674 1675 pci_set_power_state(pcidev, PCI_D0); 1676 pci_restore_state(pcidev); 1677 ret = pci_enable_device(pcidev); 1678 if (ret) 1679 goto out; 1680 pci_set_master(pcidev); 1681 1682 ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00); 1683 if (ret) 1684 goto out; 1685 1686 ret = rtsx_pci_init_hw(pcr); 1687 if (ret) 1688 goto out; 1689 1690 schedule_delayed_work(&pcr->idle_work, msecs_to_jiffies(200)); 1691 1692 out: 1693 mutex_unlock(&pcr->pcr_mutex); 1694 return ret; 1695 } 1696 1697 static void rtsx_pci_shutdown(struct pci_dev *pcidev) 1698 { 1699 struct pcr_handle *handle; 1700 struct rtsx_pcr *pcr; 1701 1702 dev_dbg(&(pcidev->dev), "--> %s\n", __func__); 1703 1704 handle = pci_get_drvdata(pcidev); 1705 pcr = handle->pcr; 1706 rtsx_pci_power_off(pcr, HOST_ENTER_S1); 1707 1708 pci_disable_device(pcidev); 1709 free_irq(pcr->irq, (void *)pcr); 1710 if (pcr->msi_en) 1711 pci_disable_msi(pcr->pci); 1712 } 1713 1714 #else /* CONFIG_PM */ 1715 1716 #define rtsx_pci_suspend NULL 1717 #define rtsx_pci_resume NULL 1718 #define rtsx_pci_shutdown NULL 1719 1720 #endif /* CONFIG_PM */ 1721 1722 static struct pci_driver rtsx_pci_driver = { 1723 .name = DRV_NAME_RTSX_PCI, 1724 .id_table = rtsx_pci_ids, 1725 .probe = rtsx_pci_probe, 1726 .remove = rtsx_pci_remove, 1727 .suspend = rtsx_pci_suspend, 1728 .resume = rtsx_pci_resume, 1729 .shutdown = rtsx_pci_shutdown, 1730 }; 1731 module_pci_driver(rtsx_pci_driver); 1732 1733 MODULE_LICENSE("GPL"); 1734 MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); 1735 MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver"); 1736