1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011-2017, The Linux Foundation 4 */ 5 6 #include <linux/irq.h> 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/io.h> 11 #include <linux/interrupt.h> 12 #include <linux/platform_device.h> 13 #include <linux/delay.h> 14 #include <linux/clk.h> 15 #include <linux/of.h> 16 #include <linux/pm_runtime.h> 17 #include "slimbus.h" 18 19 /* Manager registers */ 20 #define MGR_CFG 0x200 21 #define MGR_STATUS 0x204 22 #define MGR_INT_EN 0x210 23 #define MGR_INT_STAT 0x214 24 #define MGR_INT_CLR 0x218 25 #define MGR_TX_MSG 0x230 26 #define MGR_RX_MSG 0x270 27 #define MGR_IE_STAT 0x2F0 28 #define MGR_VE_STAT 0x300 29 #define MGR_CFG_ENABLE 1 30 31 /* Framer registers */ 32 #define FRM_CFG 0x400 33 #define FRM_STAT 0x404 34 #define FRM_INT_EN 0x410 35 #define FRM_INT_STAT 0x414 36 #define FRM_INT_CLR 0x418 37 #define FRM_WAKEUP 0x41C 38 #define FRM_CLKCTL_DONE 0x420 39 #define FRM_IE_STAT 0x430 40 #define FRM_VE_STAT 0x440 41 42 /* Interface registers */ 43 #define INTF_CFG 0x600 44 #define INTF_STAT 0x604 45 #define INTF_INT_EN 0x610 46 #define INTF_INT_STAT 0x614 47 #define INTF_INT_CLR 0x618 48 #define INTF_IE_STAT 0x630 49 #define INTF_VE_STAT 0x640 50 51 /* Interrupt status bits */ 52 #define MGR_INT_TX_NACKED_2 BIT(25) 53 #define MGR_INT_MSG_BUF_CONTE BIT(26) 54 #define MGR_INT_RX_MSG_RCVD BIT(30) 55 #define MGR_INT_TX_MSG_SENT BIT(31) 56 57 /* Framer config register settings */ 58 #define FRM_ACTIVE 1 59 #define CLK_GEAR 7 60 #define ROOT_FREQ 11 61 #define REF_CLK_GEAR 15 62 #define INTR_WAKE 19 63 64 #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \ 65 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16)) 66 67 #define SLIM_ROOT_FREQ 24576000 68 #define QCOM_SLIM_AUTOSUSPEND 1000 69 70 /* MAX message size over control channel */ 71 #define SLIM_MSGQ_BUF_LEN 40 72 #define QCOM_TX_MSGS 2 73 #define QCOM_RX_MSGS 8 74 #define QCOM_BUF_ALLOC_RETRIES 10 75 76 #define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r)) 77 78 /* V2 Component registers */ 79 #define CFG_PORT_V2(r) ((r ## _V2)) 80 #define COMP_CFG_V2 4 81 #define COMP_TRUST_CFG_V2 0x3000 82 83 /* V1 Component registers */ 84 #define CFG_PORT_V1(r) ((r ## _V1)) 85 #define COMP_CFG_V1 0 86 #define COMP_TRUST_CFG_V1 0x14 87 88 /* Resource group info for manager, and non-ported generic device-components */ 89 #define EE_MGR_RSC_GRP (1 << 10) 90 #define EE_NGD_2 (2 << 6) 91 #define EE_NGD_1 0 92 93 struct slim_ctrl_buf { 94 void *base; 95 spinlock_t lock; 96 int head; 97 int tail; 98 int sl_sz; 99 int n; 100 }; 101 102 struct qcom_slim_ctrl { 103 struct slim_controller ctrl; 104 struct slim_framer framer; 105 struct device *dev; 106 void __iomem *base; 107 void __iomem *slew_reg; 108 109 struct slim_ctrl_buf rx; 110 struct slim_ctrl_buf tx; 111 112 struct completion **wr_comp; 113 int irq; 114 struct workqueue_struct *rxwq; 115 struct work_struct wd; 116 struct clk *rclk; 117 struct clk *hclk; 118 }; 119 120 static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf, 121 u8 len, u32 tx_reg) 122 { 123 int count = (len + 3) >> 2; 124 125 __iowrite32_copy(ctrl->base + tx_reg, buf, count); 126 127 /* Ensure Oder of subsequent writes */ 128 mb(); 129 } 130 131 static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl) 132 { 133 unsigned long flags; 134 int idx; 135 136 spin_lock_irqsave(&ctrl->rx.lock, flags); 137 if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) { 138 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 139 dev_err(ctrl->dev, "RX QUEUE full!"); 140 return NULL; 141 } 142 idx = ctrl->rx.tail; 143 ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n; 144 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 145 146 return ctrl->rx.base + (idx * ctrl->rx.sl_sz); 147 } 148 149 static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err) 150 { 151 struct completion *comp; 152 unsigned long flags; 153 int idx; 154 155 spin_lock_irqsave(&ctrl->tx.lock, flags); 156 idx = ctrl->tx.head; 157 ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n; 158 spin_unlock_irqrestore(&ctrl->tx.lock, flags); 159 160 comp = ctrl->wr_comp[idx]; 161 ctrl->wr_comp[idx] = NULL; 162 163 complete(comp); 164 } 165 166 static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl, 167 u32 stat) 168 { 169 int err = 0; 170 171 if (stat & MGR_INT_TX_MSG_SENT) 172 writel_relaxed(MGR_INT_TX_MSG_SENT, 173 ctrl->base + MGR_INT_CLR); 174 175 if (stat & MGR_INT_TX_NACKED_2) { 176 u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS); 177 u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT); 178 u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT); 179 u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG); 180 u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT); 181 u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT); 182 u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT); 183 u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT); 184 u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT); 185 186 writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR); 187 188 dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n", 189 stat, mgr_stat); 190 dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat); 191 dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n", 192 frm_intr_stat, frm_stat); 193 dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n", 194 frm_cfg, frm_ie_stat); 195 dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n", 196 intf_intr_stat, intf_stat); 197 dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n", 198 intf_ie_stat); 199 err = -ENOTCONN; 200 } 201 202 slim_ack_txn(ctrl, err); 203 204 return IRQ_HANDLED; 205 } 206 207 static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl, 208 u32 stat) 209 { 210 u32 *rx_buf, pkt[10]; 211 bool q_rx = false; 212 u8 mc, mt, len; 213 214 pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG); 215 mt = SLIM_HEADER_GET_MT(pkt[0]); 216 len = SLIM_HEADER_GET_RL(pkt[0]); 217 mc = SLIM_HEADER_GET_MC(pkt[0]>>8); 218 219 /* 220 * this message cannot be handled by ISR, so 221 * let work-queue handle it 222 */ 223 if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) { 224 rx_buf = (u32 *)slim_alloc_rxbuf(ctrl); 225 if (!rx_buf) { 226 dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n", 227 pkt[0]); 228 goto rx_ret_irq; 229 } 230 rx_buf[0] = pkt[0]; 231 232 } else { 233 rx_buf = pkt; 234 } 235 236 __ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4, 237 DIV_ROUND_UP(len, 4)); 238 239 switch (mc) { 240 241 case SLIM_MSG_MC_REPORT_PRESENT: 242 q_rx = true; 243 break; 244 case SLIM_MSG_MC_REPLY_INFORMATION: 245 case SLIM_MSG_MC_REPLY_VALUE: 246 slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1), 247 (u8)(*rx_buf >> 24), (len - 4)); 248 break; 249 default: 250 dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n", 251 mc, mt); 252 break; 253 } 254 rx_ret_irq: 255 writel(MGR_INT_RX_MSG_RCVD, ctrl->base + 256 MGR_INT_CLR); 257 if (q_rx) 258 queue_work(ctrl->rxwq, &ctrl->wd); 259 260 return IRQ_HANDLED; 261 } 262 263 static irqreturn_t qcom_slim_interrupt(int irq, void *d) 264 { 265 struct qcom_slim_ctrl *ctrl = d; 266 u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT); 267 int ret = IRQ_NONE; 268 269 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) 270 ret = qcom_slim_handle_tx_irq(ctrl, stat); 271 272 if (stat & MGR_INT_RX_MSG_RCVD) 273 ret = qcom_slim_handle_rx_irq(ctrl, stat); 274 275 return ret; 276 } 277 278 static int qcom_clk_pause_wakeup(struct slim_controller *sctrl) 279 { 280 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 281 282 clk_prepare_enable(ctrl->hclk); 283 clk_prepare_enable(ctrl->rclk); 284 enable_irq(ctrl->irq); 285 286 writel_relaxed(1, ctrl->base + FRM_WAKEUP); 287 /* Make sure framer wakeup write goes through before ISR fires */ 288 mb(); 289 /* 290 * HW Workaround: Currently, slave is reporting lost-sync messages 291 * after SLIMbus comes out of clock pause. 292 * Transaction with slave fail before slave reports that message 293 * Give some time for that report to come 294 * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe 295 * being 250 usecs, we wait for 5-10 superframes here to ensure 296 * we get the message 297 */ 298 usleep_range(1250, 2500); 299 return 0; 300 } 301 302 static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl, 303 struct slim_msg_txn *txn, 304 struct completion *done) 305 { 306 unsigned long flags; 307 int idx; 308 309 spin_lock_irqsave(&ctrl->tx.lock, flags); 310 if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) { 311 spin_unlock_irqrestore(&ctrl->tx.lock, flags); 312 dev_err(ctrl->dev, "controller TX buf unavailable"); 313 return NULL; 314 } 315 idx = ctrl->tx.tail; 316 ctrl->wr_comp[idx] = done; 317 ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n; 318 319 spin_unlock_irqrestore(&ctrl->tx.lock, flags); 320 321 return ctrl->tx.base + (idx * ctrl->tx.sl_sz); 322 } 323 324 325 static int qcom_xfer_msg(struct slim_controller *sctrl, 326 struct slim_msg_txn *txn) 327 { 328 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 329 DECLARE_COMPLETION_ONSTACK(done); 330 void *pbuf = slim_alloc_txbuf(ctrl, txn, &done); 331 unsigned long ms = txn->rl + HZ; 332 u8 *puc; 333 int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES; 334 u8 la = txn->la; 335 u32 *head; 336 /* HW expects length field to be excluded */ 337 txn->rl--; 338 339 /* spin till buffer is made available */ 340 if (!pbuf) { 341 while (retries--) { 342 usleep_range(10000, 15000); 343 pbuf = slim_alloc_txbuf(ctrl, txn, &done); 344 if (pbuf) 345 break; 346 } 347 } 348 349 if (retries < 0 && !pbuf) 350 return -ENOMEM; 351 352 puc = (u8 *)pbuf; 353 head = (u32 *)pbuf; 354 355 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) { 356 *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, 357 txn->mc, 0, la); 358 puc += 3; 359 } else { 360 *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, 361 txn->mc, 1, la); 362 puc += 2; 363 } 364 365 if (slim_tid_txn(txn->mt, txn->mc)) 366 *(puc++) = txn->tid; 367 368 if (slim_ec_txn(txn->mt, txn->mc)) { 369 *(puc++) = (txn->ec & 0xFF); 370 *(puc++) = (txn->ec >> 8) & 0xFF; 371 } 372 373 if (txn->msg && txn->msg->wbuf) 374 memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes); 375 376 qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG); 377 timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms)); 378 379 if (!timeout) { 380 dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, 381 txn->mt); 382 ret = -ETIMEDOUT; 383 } 384 385 return ret; 386 387 } 388 389 static int qcom_set_laddr(struct slim_controller *sctrl, 390 struct slim_eaddr *ead, u8 laddr) 391 { 392 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 393 struct { 394 __be16 manf_id; 395 __be16 prod_code; 396 u8 dev_index; 397 u8 instance; 398 u8 laddr; 399 } __packed p; 400 struct slim_val_inf msg = {0}; 401 DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS, 402 10, laddr, &msg); 403 int ret; 404 405 p.manf_id = cpu_to_be16(ead->manf_id); 406 p.prod_code = cpu_to_be16(ead->prod_code); 407 p.dev_index = ead->dev_index; 408 p.instance = ead->instance; 409 p.laddr = laddr; 410 411 msg.wbuf = (void *)&p; 412 msg.num_bytes = 7; 413 ret = slim_do_transfer(&ctrl->ctrl, &txn); 414 415 if (ret) 416 dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n", 417 laddr, ret); 418 return ret; 419 } 420 421 static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf) 422 { 423 unsigned long flags; 424 425 spin_lock_irqsave(&ctrl->rx.lock, flags); 426 if (ctrl->rx.tail == ctrl->rx.head) { 427 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 428 return -ENODATA; 429 } 430 memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz), 431 ctrl->rx.sl_sz); 432 433 ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n; 434 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 435 436 return 0; 437 } 438 439 static void qcom_slim_rxwq(struct work_struct *work) 440 { 441 u8 buf[SLIM_MSGQ_BUF_LEN]; 442 u8 mc, mt; 443 int ret; 444 struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl, 445 wd); 446 447 while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) { 448 mt = SLIM_HEADER_GET_MT(buf[0]); 449 mc = SLIM_HEADER_GET_MC(buf[1]); 450 if (mt == SLIM_MSG_MT_CORE && 451 mc == SLIM_MSG_MC_REPORT_PRESENT) { 452 struct slim_eaddr ea; 453 u8 laddr; 454 455 ea.manf_id = be16_to_cpup((__be16 *)&buf[2]); 456 ea.prod_code = be16_to_cpup((__be16 *)&buf[4]); 457 ea.dev_index = buf[6]; 458 ea.instance = buf[7]; 459 460 ret = slim_device_report_present(&ctrl->ctrl, &ea, 461 &laddr); 462 if (ret < 0) 463 dev_err(ctrl->dev, "assign laddr failed:%d\n", 464 ret); 465 } else { 466 dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n", 467 mc, mt); 468 } 469 } 470 } 471 472 static void qcom_slim_prg_slew(struct platform_device *pdev, 473 struct qcom_slim_ctrl *ctrl) 474 { 475 if (!ctrl->slew_reg) { 476 /* SLEW RATE register for this SLIMbus */ 477 ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew"); 478 if (IS_ERR(ctrl->slew_reg)) 479 return; 480 } 481 482 writel_relaxed(1, ctrl->slew_reg); 483 /* Make sure SLIMbus-slew rate enabling goes through */ 484 wmb(); 485 } 486 487 static int qcom_slim_probe(struct platform_device *pdev) 488 { 489 struct qcom_slim_ctrl *ctrl; 490 struct slim_controller *sctrl; 491 struct resource *slim_mem; 492 int ret, ver; 493 494 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL); 495 if (!ctrl) 496 return -ENOMEM; 497 498 ctrl->hclk = devm_clk_get(&pdev->dev, "iface"); 499 if (IS_ERR(ctrl->hclk)) 500 return PTR_ERR(ctrl->hclk); 501 502 ctrl->rclk = devm_clk_get(&pdev->dev, "core"); 503 if (IS_ERR(ctrl->rclk)) 504 return PTR_ERR(ctrl->rclk); 505 506 ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ); 507 if (ret) { 508 dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret); 509 return ret; 510 } 511 512 ctrl->irq = platform_get_irq(pdev, 0); 513 if (ctrl->irq < 0) 514 return ctrl->irq; 515 516 sctrl = &ctrl->ctrl; 517 sctrl->dev = &pdev->dev; 518 ctrl->dev = &pdev->dev; 519 platform_set_drvdata(pdev, ctrl); 520 dev_set_drvdata(ctrl->dev, ctrl); 521 522 slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); 523 ctrl->base = devm_ioremap_resource(ctrl->dev, slim_mem); 524 if (IS_ERR(ctrl->base)) 525 return PTR_ERR(ctrl->base); 526 527 sctrl->set_laddr = qcom_set_laddr; 528 sctrl->xfer_msg = qcom_xfer_msg; 529 sctrl->wakeup = qcom_clk_pause_wakeup; 530 ctrl->tx.n = QCOM_TX_MSGS; 531 ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN; 532 ctrl->rx.n = QCOM_RX_MSGS; 533 ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN; 534 ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *), 535 GFP_KERNEL); 536 if (!ctrl->wr_comp) 537 return -ENOMEM; 538 539 spin_lock_init(&ctrl->rx.lock); 540 spin_lock_init(&ctrl->tx.lock); 541 INIT_WORK(&ctrl->wd, qcom_slim_rxwq); 542 ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx"); 543 if (!ctrl->rxwq) { 544 dev_err(ctrl->dev, "Failed to start Rx WQ\n"); 545 return -ENOMEM; 546 } 547 548 ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8; 549 ctrl->framer.superfreq = 550 ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8; 551 sctrl->a_framer = &ctrl->framer; 552 sctrl->clkgear = SLIM_MAX_CLK_GEAR; 553 554 qcom_slim_prg_slew(pdev, ctrl); 555 556 ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt, 557 IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl); 558 if (ret) { 559 dev_err(&pdev->dev, "request IRQ failed\n"); 560 goto err_request_irq_failed; 561 } 562 563 ret = clk_prepare_enable(ctrl->hclk); 564 if (ret) 565 goto err_hclk_enable_failed; 566 567 ret = clk_prepare_enable(ctrl->rclk); 568 if (ret) 569 goto err_rclk_enable_failed; 570 571 ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz, 572 GFP_KERNEL); 573 if (!ctrl->tx.base) { 574 ret = -ENOMEM; 575 goto err; 576 } 577 578 ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz, 579 GFP_KERNEL); 580 if (!ctrl->rx.base) { 581 ret = -ENOMEM; 582 goto err; 583 } 584 585 /* Register with framework before enabling frame, clock */ 586 ret = slim_register_controller(&ctrl->ctrl); 587 if (ret) { 588 dev_err(ctrl->dev, "error adding controller\n"); 589 goto err; 590 } 591 592 ver = readl_relaxed(ctrl->base); 593 /* Version info in 16 MSbits */ 594 ver >>= 16; 595 /* Component register initialization */ 596 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver)); 597 writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1), 598 ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver)); 599 600 writel((MGR_INT_TX_NACKED_2 | 601 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD | 602 MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN); 603 writel(1, ctrl->base + MGR_CFG); 604 /* Framer register initialization */ 605 writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) | 606 (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1, 607 ctrl->base + FRM_CFG); 608 writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG); 609 writel(1, ctrl->base + INTF_CFG); 610 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver)); 611 612 pm_runtime_use_autosuspend(&pdev->dev); 613 pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND); 614 pm_runtime_set_active(&pdev->dev); 615 pm_runtime_mark_last_busy(&pdev->dev); 616 pm_runtime_enable(&pdev->dev); 617 618 dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver); 619 return 0; 620 621 err: 622 clk_disable_unprepare(ctrl->rclk); 623 err_rclk_enable_failed: 624 clk_disable_unprepare(ctrl->hclk); 625 err_hclk_enable_failed: 626 err_request_irq_failed: 627 destroy_workqueue(ctrl->rxwq); 628 return ret; 629 } 630 631 static int qcom_slim_remove(struct platform_device *pdev) 632 { 633 struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev); 634 635 pm_runtime_disable(&pdev->dev); 636 slim_unregister_controller(&ctrl->ctrl); 637 clk_disable_unprepare(ctrl->rclk); 638 clk_disable_unprepare(ctrl->hclk); 639 destroy_workqueue(ctrl->rxwq); 640 return 0; 641 } 642 643 /* 644 * If PM_RUNTIME is not defined, these 2 functions become helper 645 * functions to be called from system suspend/resume. 646 */ 647 #ifdef CONFIG_PM 648 static int qcom_slim_runtime_suspend(struct device *device) 649 { 650 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device); 651 int ret; 652 653 dev_dbg(device, "pm_runtime: suspending...\n"); 654 ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED); 655 if (ret) { 656 dev_err(device, "clk pause not entered:%d", ret); 657 } else { 658 disable_irq(ctrl->irq); 659 clk_disable_unprepare(ctrl->hclk); 660 clk_disable_unprepare(ctrl->rclk); 661 } 662 return ret; 663 } 664 665 static int qcom_slim_runtime_resume(struct device *device) 666 { 667 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device); 668 int ret = 0; 669 670 dev_dbg(device, "pm_runtime: resuming...\n"); 671 ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0); 672 if (ret) 673 dev_err(device, "clk pause not exited:%d", ret); 674 return ret; 675 } 676 #endif 677 678 #ifdef CONFIG_PM_SLEEP 679 static int qcom_slim_suspend(struct device *dev) 680 { 681 int ret = 0; 682 683 if (!pm_runtime_enabled(dev) || 684 (!pm_runtime_suspended(dev))) { 685 dev_dbg(dev, "system suspend"); 686 ret = qcom_slim_runtime_suspend(dev); 687 } 688 689 return ret; 690 } 691 692 static int qcom_slim_resume(struct device *dev) 693 { 694 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) { 695 int ret; 696 697 dev_dbg(dev, "system resume"); 698 ret = qcom_slim_runtime_resume(dev); 699 if (!ret) { 700 pm_runtime_mark_last_busy(dev); 701 pm_request_autosuspend(dev); 702 } 703 return ret; 704 705 } 706 return 0; 707 } 708 #endif /* CONFIG_PM_SLEEP */ 709 710 static const struct dev_pm_ops qcom_slim_dev_pm_ops = { 711 SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume) 712 SET_RUNTIME_PM_OPS( 713 qcom_slim_runtime_suspend, 714 qcom_slim_runtime_resume, 715 NULL 716 ) 717 }; 718 719 static const struct of_device_id qcom_slim_dt_match[] = { 720 { .compatible = "qcom,slim", }, 721 { .compatible = "qcom,apq8064-slim", }, 722 {} 723 }; 724 725 static struct platform_driver qcom_slim_driver = { 726 .probe = qcom_slim_probe, 727 .remove = qcom_slim_remove, 728 .driver = { 729 .name = "qcom_slim_ctrl", 730 .of_match_table = qcom_slim_dt_match, 731 .pm = &qcom_slim_dev_pm_ops, 732 }, 733 }; 734 module_platform_driver(qcom_slim_driver); 735 736 MODULE_LICENSE("GPL v2"); 737 MODULE_DESCRIPTION("Qualcomm SLIMbus Controller"); 738