1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011-2017, The Linux Foundation 4 */ 5 6 #include <linux/irq.h> 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/io.h> 11 #include <linux/interrupt.h> 12 #include <linux/platform_device.h> 13 #include <linux/delay.h> 14 #include <linux/clk.h> 15 #include <linux/of.h> 16 #include <linux/pm_runtime.h> 17 #include "slimbus.h" 18 19 /* Manager registers */ 20 #define MGR_CFG 0x200 21 #define MGR_STATUS 0x204 22 #define MGR_INT_EN 0x210 23 #define MGR_INT_STAT 0x214 24 #define MGR_INT_CLR 0x218 25 #define MGR_TX_MSG 0x230 26 #define MGR_RX_MSG 0x270 27 #define MGR_IE_STAT 0x2F0 28 #define MGR_VE_STAT 0x300 29 #define MGR_CFG_ENABLE 1 30 31 /* Framer registers */ 32 #define FRM_CFG 0x400 33 #define FRM_STAT 0x404 34 #define FRM_INT_EN 0x410 35 #define FRM_INT_STAT 0x414 36 #define FRM_INT_CLR 0x418 37 #define FRM_WAKEUP 0x41C 38 #define FRM_CLKCTL_DONE 0x420 39 #define FRM_IE_STAT 0x430 40 #define FRM_VE_STAT 0x440 41 42 /* Interface registers */ 43 #define INTF_CFG 0x600 44 #define INTF_STAT 0x604 45 #define INTF_INT_EN 0x610 46 #define INTF_INT_STAT 0x614 47 #define INTF_INT_CLR 0x618 48 #define INTF_IE_STAT 0x630 49 #define INTF_VE_STAT 0x640 50 51 /* Interrupt status bits */ 52 #define MGR_INT_TX_NACKED_2 BIT(25) 53 #define MGR_INT_MSG_BUF_CONTE BIT(26) 54 #define MGR_INT_RX_MSG_RCVD BIT(30) 55 #define MGR_INT_TX_MSG_SENT BIT(31) 56 57 /* Framer config register settings */ 58 #define FRM_ACTIVE 1 59 #define CLK_GEAR 7 60 #define ROOT_FREQ 11 61 #define REF_CLK_GEAR 15 62 #define INTR_WAKE 19 63 64 #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \ 65 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16)) 66 67 #define SLIM_ROOT_FREQ 24576000 68 #define QCOM_SLIM_AUTOSUSPEND 1000 69 70 /* MAX message size over control channel */ 71 #define SLIM_MSGQ_BUF_LEN 40 72 #define QCOM_TX_MSGS 2 73 #define QCOM_RX_MSGS 8 74 #define QCOM_BUF_ALLOC_RETRIES 10 75 76 #define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r)) 77 78 /* V2 Component registers */ 79 #define CFG_PORT_V2(r) ((r ## _V2)) 80 #define COMP_CFG_V2 4 81 #define COMP_TRUST_CFG_V2 0x3000 82 83 /* V1 Component registers */ 84 #define CFG_PORT_V1(r) ((r ## _V1)) 85 #define COMP_CFG_V1 0 86 #define COMP_TRUST_CFG_V1 0x14 87 88 /* Resource group info for manager, and non-ported generic device-components */ 89 #define EE_MGR_RSC_GRP (1 << 10) 90 #define EE_NGD_2 (2 << 6) 91 #define EE_NGD_1 0 92 93 struct slim_ctrl_buf { 94 void *base; 95 spinlock_t lock; 96 int head; 97 int tail; 98 int sl_sz; 99 int n; 100 }; 101 102 struct qcom_slim_ctrl { 103 struct slim_controller ctrl; 104 struct slim_framer framer; 105 struct device *dev; 106 void __iomem *base; 107 void __iomem *slew_reg; 108 109 struct slim_ctrl_buf rx; 110 struct slim_ctrl_buf tx; 111 112 struct completion **wr_comp; 113 int irq; 114 struct workqueue_struct *rxwq; 115 struct work_struct wd; 116 struct clk *rclk; 117 struct clk *hclk; 118 }; 119 120 static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf, 121 u8 len, u32 tx_reg) 122 { 123 int count = (len + 3) >> 2; 124 125 __iowrite32_copy(ctrl->base + tx_reg, buf, count); 126 127 /* Ensure Oder of subsequent writes */ 128 mb(); 129 } 130 131 static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl) 132 { 133 unsigned long flags; 134 int idx; 135 136 spin_lock_irqsave(&ctrl->rx.lock, flags); 137 if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) { 138 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 139 dev_err(ctrl->dev, "RX QUEUE full!"); 140 return NULL; 141 } 142 idx = ctrl->rx.tail; 143 ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n; 144 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 145 146 return ctrl->rx.base + (idx * ctrl->rx.sl_sz); 147 } 148 149 static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err) 150 { 151 struct completion *comp; 152 unsigned long flags; 153 int idx; 154 155 spin_lock_irqsave(&ctrl->tx.lock, flags); 156 idx = ctrl->tx.head; 157 ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n; 158 spin_unlock_irqrestore(&ctrl->tx.lock, flags); 159 160 comp = ctrl->wr_comp[idx]; 161 ctrl->wr_comp[idx] = NULL; 162 163 complete(comp); 164 } 165 166 static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl, 167 u32 stat) 168 { 169 int err = 0; 170 171 if (stat & MGR_INT_TX_MSG_SENT) 172 writel_relaxed(MGR_INT_TX_MSG_SENT, 173 ctrl->base + MGR_INT_CLR); 174 175 if (stat & MGR_INT_TX_NACKED_2) { 176 u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS); 177 u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT); 178 u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT); 179 u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG); 180 u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT); 181 u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT); 182 u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT); 183 u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT); 184 u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT); 185 186 writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR); 187 188 dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n", 189 stat, mgr_stat); 190 dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat); 191 dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n", 192 frm_intr_stat, frm_stat); 193 dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n", 194 frm_cfg, frm_ie_stat); 195 dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n", 196 intf_intr_stat, intf_stat); 197 dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n", 198 intf_ie_stat); 199 err = -ENOTCONN; 200 } 201 202 slim_ack_txn(ctrl, err); 203 204 return IRQ_HANDLED; 205 } 206 207 static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl, 208 u32 stat) 209 { 210 u32 *rx_buf, pkt[10]; 211 bool q_rx = false; 212 u8 mc, mt, len; 213 214 pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG); 215 mt = SLIM_HEADER_GET_MT(pkt[0]); 216 len = SLIM_HEADER_GET_RL(pkt[0]); 217 mc = SLIM_HEADER_GET_MC(pkt[0]>>8); 218 219 /* 220 * this message cannot be handled by ISR, so 221 * let work-queue handle it 222 */ 223 if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) { 224 rx_buf = (u32 *)slim_alloc_rxbuf(ctrl); 225 if (!rx_buf) { 226 dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n", 227 pkt[0]); 228 goto rx_ret_irq; 229 } 230 rx_buf[0] = pkt[0]; 231 232 } else { 233 rx_buf = pkt; 234 } 235 236 __ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4, 237 DIV_ROUND_UP(len, 4)); 238 239 switch (mc) { 240 241 case SLIM_MSG_MC_REPORT_PRESENT: 242 q_rx = true; 243 break; 244 case SLIM_MSG_MC_REPLY_INFORMATION: 245 case SLIM_MSG_MC_REPLY_VALUE: 246 slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1), 247 (u8)(*rx_buf >> 24), (len - 4)); 248 break; 249 default: 250 dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n", 251 mc, mt); 252 break; 253 } 254 rx_ret_irq: 255 writel(MGR_INT_RX_MSG_RCVD, ctrl->base + 256 MGR_INT_CLR); 257 if (q_rx) 258 queue_work(ctrl->rxwq, &ctrl->wd); 259 260 return IRQ_HANDLED; 261 } 262 263 static irqreturn_t qcom_slim_interrupt(int irq, void *d) 264 { 265 struct qcom_slim_ctrl *ctrl = d; 266 u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT); 267 int ret = IRQ_NONE; 268 269 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) 270 ret = qcom_slim_handle_tx_irq(ctrl, stat); 271 272 if (stat & MGR_INT_RX_MSG_RCVD) 273 ret = qcom_slim_handle_rx_irq(ctrl, stat); 274 275 return ret; 276 } 277 278 static int qcom_clk_pause_wakeup(struct slim_controller *sctrl) 279 { 280 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 281 282 clk_prepare_enable(ctrl->hclk); 283 clk_prepare_enable(ctrl->rclk); 284 enable_irq(ctrl->irq); 285 286 writel_relaxed(1, ctrl->base + FRM_WAKEUP); 287 /* Make sure framer wakeup write goes through before ISR fires */ 288 mb(); 289 /* 290 * HW Workaround: Currently, slave is reporting lost-sync messages 291 * after SLIMbus comes out of clock pause. 292 * Transaction with slave fail before slave reports that message 293 * Give some time for that report to come 294 * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe 295 * being 250 usecs, we wait for 5-10 superframes here to ensure 296 * we get the message 297 */ 298 usleep_range(1250, 2500); 299 return 0; 300 } 301 302 static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl, 303 struct slim_msg_txn *txn, 304 struct completion *done) 305 { 306 unsigned long flags; 307 int idx; 308 309 spin_lock_irqsave(&ctrl->tx.lock, flags); 310 if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) { 311 spin_unlock_irqrestore(&ctrl->tx.lock, flags); 312 dev_err(ctrl->dev, "controller TX buf unavailable"); 313 return NULL; 314 } 315 idx = ctrl->tx.tail; 316 ctrl->wr_comp[idx] = done; 317 ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n; 318 319 spin_unlock_irqrestore(&ctrl->tx.lock, flags); 320 321 return ctrl->tx.base + (idx * ctrl->tx.sl_sz); 322 } 323 324 325 static int qcom_xfer_msg(struct slim_controller *sctrl, 326 struct slim_msg_txn *txn) 327 { 328 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 329 DECLARE_COMPLETION_ONSTACK(done); 330 void *pbuf = slim_alloc_txbuf(ctrl, txn, &done); 331 unsigned long ms = txn->rl + HZ; 332 u8 *puc; 333 int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES; 334 u8 la = txn->la; 335 u32 *head; 336 /* HW expects length field to be excluded */ 337 txn->rl--; 338 339 /* spin till buffer is made available */ 340 if (!pbuf) { 341 while (retries--) { 342 usleep_range(10000, 15000); 343 pbuf = slim_alloc_txbuf(ctrl, txn, &done); 344 if (pbuf) 345 break; 346 } 347 } 348 349 if (retries < 0 && !pbuf) 350 return -ENOMEM; 351 352 puc = (u8 *)pbuf; 353 head = (u32 *)pbuf; 354 355 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) { 356 *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, 357 txn->mc, 0, la); 358 puc += 3; 359 } else { 360 *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, 361 txn->mc, 1, la); 362 puc += 2; 363 } 364 365 if (slim_tid_txn(txn->mt, txn->mc)) 366 *(puc++) = txn->tid; 367 368 if (slim_ec_txn(txn->mt, txn->mc)) { 369 *(puc++) = (txn->ec & 0xFF); 370 *(puc++) = (txn->ec >> 8) & 0xFF; 371 } 372 373 if (txn->msg && txn->msg->wbuf) 374 memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes); 375 376 qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG); 377 timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms)); 378 379 if (!timeout) { 380 dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, 381 txn->mt); 382 ret = -ETIMEDOUT; 383 } 384 385 return ret; 386 387 } 388 389 static int qcom_set_laddr(struct slim_controller *sctrl, 390 struct slim_eaddr *ead, u8 laddr) 391 { 392 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 393 struct { 394 __be16 manf_id; 395 __be16 prod_code; 396 u8 dev_index; 397 u8 instance; 398 u8 laddr; 399 } __packed p; 400 struct slim_val_inf msg = {0}; 401 DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS, 402 10, laddr, &msg); 403 int ret; 404 405 p.manf_id = cpu_to_be16(ead->manf_id); 406 p.prod_code = cpu_to_be16(ead->prod_code); 407 p.dev_index = ead->dev_index; 408 p.instance = ead->instance; 409 p.laddr = laddr; 410 411 msg.wbuf = (void *)&p; 412 msg.num_bytes = 7; 413 ret = slim_do_transfer(&ctrl->ctrl, &txn); 414 415 if (ret) 416 dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n", 417 laddr, ret); 418 return ret; 419 } 420 421 static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf) 422 { 423 unsigned long flags; 424 425 spin_lock_irqsave(&ctrl->rx.lock, flags); 426 if (ctrl->rx.tail == ctrl->rx.head) { 427 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 428 return -ENODATA; 429 } 430 memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz), 431 ctrl->rx.sl_sz); 432 433 ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n; 434 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 435 436 return 0; 437 } 438 439 static void qcom_slim_rxwq(struct work_struct *work) 440 { 441 u8 buf[SLIM_MSGQ_BUF_LEN]; 442 u8 mc, mt; 443 int ret; 444 struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl, 445 wd); 446 447 while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) { 448 mt = SLIM_HEADER_GET_MT(buf[0]); 449 mc = SLIM_HEADER_GET_MC(buf[1]); 450 if (mt == SLIM_MSG_MT_CORE && 451 mc == SLIM_MSG_MC_REPORT_PRESENT) { 452 struct slim_eaddr ea; 453 u8 laddr; 454 455 ea.manf_id = be16_to_cpup((__be16 *)&buf[2]); 456 ea.prod_code = be16_to_cpup((__be16 *)&buf[4]); 457 ea.dev_index = buf[6]; 458 ea.instance = buf[7]; 459 460 ret = slim_device_report_present(&ctrl->ctrl, &ea, 461 &laddr); 462 if (ret < 0) 463 dev_err(ctrl->dev, "assign laddr failed:%d\n", 464 ret); 465 } else { 466 dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n", 467 mc, mt); 468 } 469 } 470 } 471 472 static void qcom_slim_prg_slew(struct platform_device *pdev, 473 struct qcom_slim_ctrl *ctrl) 474 { 475 if (!ctrl->slew_reg) { 476 /* SLEW RATE register for this SLIMbus */ 477 ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew"); 478 if (IS_ERR(ctrl->slew_reg)) 479 return; 480 } 481 482 writel_relaxed(1, ctrl->slew_reg); 483 /* Make sure SLIMbus-slew rate enabling goes through */ 484 wmb(); 485 } 486 487 static int qcom_slim_probe(struct platform_device *pdev) 488 { 489 struct qcom_slim_ctrl *ctrl; 490 struct slim_controller *sctrl; 491 int ret, ver; 492 493 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL); 494 if (!ctrl) 495 return -ENOMEM; 496 497 ctrl->hclk = devm_clk_get(&pdev->dev, "iface"); 498 if (IS_ERR(ctrl->hclk)) 499 return PTR_ERR(ctrl->hclk); 500 501 ctrl->rclk = devm_clk_get(&pdev->dev, "core"); 502 if (IS_ERR(ctrl->rclk)) 503 return PTR_ERR(ctrl->rclk); 504 505 ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ); 506 if (ret) { 507 dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret); 508 return ret; 509 } 510 511 ctrl->irq = platform_get_irq(pdev, 0); 512 if (ctrl->irq < 0) 513 return ctrl->irq; 514 515 sctrl = &ctrl->ctrl; 516 sctrl->dev = &pdev->dev; 517 ctrl->dev = &pdev->dev; 518 platform_set_drvdata(pdev, ctrl); 519 dev_set_drvdata(ctrl->dev, ctrl); 520 521 ctrl->base = devm_platform_ioremap_resource_byname(pdev, "ctrl"); 522 if (IS_ERR(ctrl->base)) 523 return PTR_ERR(ctrl->base); 524 525 sctrl->set_laddr = qcom_set_laddr; 526 sctrl->xfer_msg = qcom_xfer_msg; 527 sctrl->wakeup = qcom_clk_pause_wakeup; 528 ctrl->tx.n = QCOM_TX_MSGS; 529 ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN; 530 ctrl->rx.n = QCOM_RX_MSGS; 531 ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN; 532 ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *), 533 GFP_KERNEL); 534 if (!ctrl->wr_comp) 535 return -ENOMEM; 536 537 spin_lock_init(&ctrl->rx.lock); 538 spin_lock_init(&ctrl->tx.lock); 539 INIT_WORK(&ctrl->wd, qcom_slim_rxwq); 540 ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx"); 541 if (!ctrl->rxwq) { 542 dev_err(ctrl->dev, "Failed to start Rx WQ\n"); 543 return -ENOMEM; 544 } 545 546 ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8; 547 ctrl->framer.superfreq = 548 ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8; 549 sctrl->a_framer = &ctrl->framer; 550 sctrl->clkgear = SLIM_MAX_CLK_GEAR; 551 552 qcom_slim_prg_slew(pdev, ctrl); 553 554 ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt, 555 IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl); 556 if (ret) { 557 dev_err(&pdev->dev, "request IRQ failed\n"); 558 goto err_request_irq_failed; 559 } 560 561 ret = clk_prepare_enable(ctrl->hclk); 562 if (ret) 563 goto err_hclk_enable_failed; 564 565 ret = clk_prepare_enable(ctrl->rclk); 566 if (ret) 567 goto err_rclk_enable_failed; 568 569 ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz, 570 GFP_KERNEL); 571 if (!ctrl->tx.base) { 572 ret = -ENOMEM; 573 goto err; 574 } 575 576 ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz, 577 GFP_KERNEL); 578 if (!ctrl->rx.base) { 579 ret = -ENOMEM; 580 goto err; 581 } 582 583 /* Register with framework before enabling frame, clock */ 584 ret = slim_register_controller(&ctrl->ctrl); 585 if (ret) { 586 dev_err(ctrl->dev, "error adding controller\n"); 587 goto err; 588 } 589 590 ver = readl_relaxed(ctrl->base); 591 /* Version info in 16 MSbits */ 592 ver >>= 16; 593 /* Component register initialization */ 594 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver)); 595 writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1), 596 ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver)); 597 598 writel((MGR_INT_TX_NACKED_2 | 599 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD | 600 MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN); 601 writel(1, ctrl->base + MGR_CFG); 602 /* Framer register initialization */ 603 writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) | 604 (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1, 605 ctrl->base + FRM_CFG); 606 writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG); 607 writel(1, ctrl->base + INTF_CFG); 608 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver)); 609 610 pm_runtime_use_autosuspend(&pdev->dev); 611 pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND); 612 pm_runtime_set_active(&pdev->dev); 613 pm_runtime_mark_last_busy(&pdev->dev); 614 pm_runtime_enable(&pdev->dev); 615 616 dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver); 617 return 0; 618 619 err: 620 clk_disable_unprepare(ctrl->rclk); 621 err_rclk_enable_failed: 622 clk_disable_unprepare(ctrl->hclk); 623 err_hclk_enable_failed: 624 err_request_irq_failed: 625 destroy_workqueue(ctrl->rxwq); 626 return ret; 627 } 628 629 static int qcom_slim_remove(struct platform_device *pdev) 630 { 631 struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev); 632 633 pm_runtime_disable(&pdev->dev); 634 slim_unregister_controller(&ctrl->ctrl); 635 clk_disable_unprepare(ctrl->rclk); 636 clk_disable_unprepare(ctrl->hclk); 637 destroy_workqueue(ctrl->rxwq); 638 return 0; 639 } 640 641 /* 642 * If PM_RUNTIME is not defined, these 2 functions become helper 643 * functions to be called from system suspend/resume. 644 */ 645 #ifdef CONFIG_PM 646 static int qcom_slim_runtime_suspend(struct device *device) 647 { 648 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device); 649 int ret; 650 651 dev_dbg(device, "pm_runtime: suspending...\n"); 652 ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED); 653 if (ret) { 654 dev_err(device, "clk pause not entered:%d", ret); 655 } else { 656 disable_irq(ctrl->irq); 657 clk_disable_unprepare(ctrl->hclk); 658 clk_disable_unprepare(ctrl->rclk); 659 } 660 return ret; 661 } 662 663 static int qcom_slim_runtime_resume(struct device *device) 664 { 665 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device); 666 int ret = 0; 667 668 dev_dbg(device, "pm_runtime: resuming...\n"); 669 ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0); 670 if (ret) 671 dev_err(device, "clk pause not exited:%d", ret); 672 return ret; 673 } 674 #endif 675 676 #ifdef CONFIG_PM_SLEEP 677 static int qcom_slim_suspend(struct device *dev) 678 { 679 int ret = 0; 680 681 if (!pm_runtime_enabled(dev) || 682 (!pm_runtime_suspended(dev))) { 683 dev_dbg(dev, "system suspend"); 684 ret = qcom_slim_runtime_suspend(dev); 685 } 686 687 return ret; 688 } 689 690 static int qcom_slim_resume(struct device *dev) 691 { 692 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) { 693 int ret; 694 695 dev_dbg(dev, "system resume"); 696 ret = qcom_slim_runtime_resume(dev); 697 if (!ret) { 698 pm_runtime_mark_last_busy(dev); 699 pm_request_autosuspend(dev); 700 } 701 return ret; 702 703 } 704 return 0; 705 } 706 #endif /* CONFIG_PM_SLEEP */ 707 708 static const struct dev_pm_ops qcom_slim_dev_pm_ops = { 709 SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume) 710 SET_RUNTIME_PM_OPS( 711 qcom_slim_runtime_suspend, 712 qcom_slim_runtime_resume, 713 NULL 714 ) 715 }; 716 717 static const struct of_device_id qcom_slim_dt_match[] = { 718 { .compatible = "qcom,slim", }, 719 {} 720 }; 721 722 static struct platform_driver qcom_slim_driver = { 723 .probe = qcom_slim_probe, 724 .remove = qcom_slim_remove, 725 .driver = { 726 .name = "qcom_slim_ctrl", 727 .of_match_table = qcom_slim_dt_match, 728 .pm = &qcom_slim_dev_pm_ops, 729 }, 730 }; 731 module_platform_driver(qcom_slim_driver); 732 733 MODULE_LICENSE("GPL v2"); 734 MODULE_DESCRIPTION("Qualcomm SLIMbus Controller"); 735