1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 */ 5 6 #define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME 7 8 #include <linux/atomic.h> 9 #include <linux/cpu_pm.h> 10 #include <linux/delay.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/iopoll.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/module.h> 17 #include <linux/of.h> 18 #include <linux/of_irq.h> 19 #include <linux/of_platform.h> 20 #include <linux/platform_device.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 #include <linux/wait.h> 24 25 #include <soc/qcom/cmd-db.h> 26 #include <soc/qcom/tcs.h> 27 #include <dt-bindings/soc/qcom,rpmh-rsc.h> 28 29 #include "rpmh-internal.h" 30 31 #define CREATE_TRACE_POINTS 32 #include "trace-rpmh.h" 33 34 #define RSC_DRV_TCS_OFFSET 672 35 #define RSC_DRV_CMD_OFFSET 20 36 37 /* DRV HW Solver Configuration Information Register */ 38 #define DRV_SOLVER_CONFIG 0x04 39 #define DRV_HW_SOLVER_MASK 1 40 #define DRV_HW_SOLVER_SHIFT 24 41 42 /* DRV TCS Configuration Information Register */ 43 #define DRV_PRNT_CHLD_CONFIG 0x0C 44 #define DRV_NUM_TCS_MASK 0x3F 45 #define DRV_NUM_TCS_SHIFT 6 46 #define DRV_NCPT_MASK 0x1F 47 #define DRV_NCPT_SHIFT 27 48 49 /* Offsets for common TCS Registers, one bit per TCS */ 50 #define RSC_DRV_IRQ_ENABLE 0x00 51 #define RSC_DRV_IRQ_STATUS 0x04 52 #define RSC_DRV_IRQ_CLEAR 0x08 /* w/o; write 1 to clear */ 53 54 /* 55 * Offsets for per TCS Registers. 56 * 57 * TCSes start at 0x10 from tcs_base and are stored one after another. 58 * Multiply tcs_id by RSC_DRV_TCS_OFFSET to find a given TCS and add one 59 * of the below to find a register. 60 */ 61 #define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10 /* 1 bit per command */ 62 #define RSC_DRV_CONTROL 0x14 63 #define RSC_DRV_STATUS 0x18 /* zero if tcs is busy */ 64 #define RSC_DRV_CMD_ENABLE 0x1C /* 1 bit per command */ 65 66 /* 67 * Offsets for per command in a TCS. 68 * 69 * Commands (up to 16) start at 0x30 in a TCS; multiply command index 70 * by RSC_DRV_CMD_OFFSET and add one of the below to find a register. 71 */ 72 #define RSC_DRV_CMD_MSGID 0x30 73 #define RSC_DRV_CMD_ADDR 0x34 74 #define RSC_DRV_CMD_DATA 0x38 75 #define RSC_DRV_CMD_STATUS 0x3C 76 #define RSC_DRV_CMD_RESP_DATA 0x40 77 78 #define TCS_AMC_MODE_ENABLE BIT(16) 79 #define TCS_AMC_MODE_TRIGGER BIT(24) 80 81 /* TCS CMD register bit mask */ 82 #define CMD_MSGID_LEN 8 83 #define CMD_MSGID_RESP_REQ BIT(8) 84 #define CMD_MSGID_WRITE BIT(16) 85 #define CMD_STATUS_ISSUED BIT(8) 86 #define CMD_STATUS_COMPL BIT(16) 87 88 /* 89 * Here's a high level overview of how all the registers in RPMH work 90 * together: 91 * 92 * - The main rpmh-rsc address is the base of a register space that can 93 * be used to find overall configuration of the hardware 94 * (DRV_PRNT_CHLD_CONFIG). Also found within the rpmh-rsc register 95 * space are all the TCS blocks. The offset of the TCS blocks is 96 * specified in the device tree by "qcom,tcs-offset" and used to 97 * compute tcs_base. 98 * - TCS blocks come one after another. Type, count, and order are 99 * specified by the device tree as "qcom,tcs-config". 100 * - Each TCS block has some registers, then space for up to 16 commands. 101 * Note that though address space is reserved for 16 commands, fewer 102 * might be present. See ncpt (num cmds per TCS). 103 * 104 * Here's a picture: 105 * 106 * +---------------------------------------------------+ 107 * |RSC | 108 * | ctrl | 109 * | | 110 * | Drvs: | 111 * | +-----------------------------------------------+ | 112 * | |DRV0 | | 113 * | | ctrl/config | | 114 * | | IRQ | | 115 * | | | | 116 * | | TCSes: | | 117 * | | +------------------------------------------+ | | 118 * | | |TCS0 | | | | | | | | | | | | | | | 119 * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 120 * | | | | | | | | | | | | | | | | | | 121 * | | +------------------------------------------+ | | 122 * | | +------------------------------------------+ | | 123 * | | |TCS1 | | | | | | | | | | | | | | | 124 * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 125 * | | | | | | | | | | | | | | | | | | 126 * | | +------------------------------------------+ | | 127 * | | +------------------------------------------+ | | 128 * | | |TCS2 | | | | | | | | | | | | | | | 129 * | | | ctrl | 0| 1| 2| 3| 4| 5| .| .| .| .|14|15| | | 130 * | | | | | | | | | | | | | | | | | | 131 * | | +------------------------------------------+ | | 132 * | | ...... | | 133 * | +-----------------------------------------------+ | 134 * | +-----------------------------------------------+ | 135 * | |DRV1 | | 136 * | | (same as DRV0) | | 137 * | +-----------------------------------------------+ | 138 * | ...... | 139 * +---------------------------------------------------+ 140 */ 141 142 static inline void __iomem * 143 tcs_reg_addr(const struct rsc_drv *drv, int reg, int tcs_id) 144 { 145 return drv->tcs_base + RSC_DRV_TCS_OFFSET * tcs_id + reg; 146 } 147 148 static inline void __iomem * 149 tcs_cmd_addr(const struct rsc_drv *drv, int reg, int tcs_id, int cmd_id) 150 { 151 return tcs_reg_addr(drv, reg, tcs_id) + RSC_DRV_CMD_OFFSET * cmd_id; 152 } 153 154 static u32 read_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, 155 int cmd_id) 156 { 157 return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); 158 } 159 160 static u32 read_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id) 161 { 162 return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id)); 163 } 164 165 static void write_tcs_cmd(const struct rsc_drv *drv, int reg, int tcs_id, 166 int cmd_id, u32 data) 167 { 168 writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id)); 169 } 170 171 static void write_tcs_reg(const struct rsc_drv *drv, int reg, int tcs_id, 172 u32 data) 173 { 174 writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id)); 175 } 176 177 static void write_tcs_reg_sync(const struct rsc_drv *drv, int reg, int tcs_id, 178 u32 data) 179 { 180 int i; 181 182 writel(data, tcs_reg_addr(drv, reg, tcs_id)); 183 184 /* 185 * Wait until we read back the same value. Use a counter rather than 186 * ktime for timeout since this may be called after timekeeping stops. 187 */ 188 for (i = 0; i < USEC_PER_SEC; i++) { 189 if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data) 190 return; 191 udelay(1); 192 } 193 pr_err("%s: error writing %#x to %d:%#x\n", drv->name, 194 data, tcs_id, reg); 195 } 196 197 /** 198 * tcs_is_free() - Return if a TCS is totally free. 199 * @drv: The RSC controller. 200 * @tcs_id: The global ID of this TCS. 201 * 202 * Returns true if nobody has claimed this TCS (by setting tcs_in_use). 203 * 204 * Context: Must be called with the drv->lock held. 205 * 206 * Return: true if the given TCS is free. 207 */ 208 static bool tcs_is_free(struct rsc_drv *drv, int tcs_id) 209 { 210 return !test_bit(tcs_id, drv->tcs_in_use); 211 } 212 213 /** 214 * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake). 215 * @drv: The RSC controller. 216 * @type: SLEEP_TCS or WAKE_TCS 217 * 218 * This will clear the "slots" variable of the given tcs_group and also 219 * tell the hardware to forget about all entries. 220 * 221 * The caller must ensure that no other RPMH actions are happening when this 222 * function is called, since otherwise the device may immediately become 223 * used again even before this function exits. 224 */ 225 static void tcs_invalidate(struct rsc_drv *drv, int type) 226 { 227 int m; 228 struct tcs_group *tcs = &drv->tcs[type]; 229 230 /* Caller ensures nobody else is running so no lock */ 231 if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) 232 return; 233 234 for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) 235 write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0); 236 237 bitmap_zero(tcs->slots, MAX_TCS_SLOTS); 238 } 239 240 /** 241 * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes. 242 * @drv: The RSC controller. 243 * 244 * The caller must ensure that no other RPMH actions are happening when this 245 * function is called, since otherwise the device may immediately become 246 * used again even before this function exits. 247 */ 248 void rpmh_rsc_invalidate(struct rsc_drv *drv) 249 { 250 tcs_invalidate(drv, SLEEP_TCS); 251 tcs_invalidate(drv, WAKE_TCS); 252 } 253 254 /** 255 * get_tcs_for_msg() - Get the tcs_group used to send the given message. 256 * @drv: The RSC controller. 257 * @msg: The message we want to send. 258 * 259 * This is normally pretty straightforward except if we are trying to send 260 * an ACTIVE_ONLY message but don't have any active_only TCSes. 261 * 262 * Return: A pointer to a tcs_group or an ERR_PTR. 263 */ 264 static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, 265 const struct tcs_request *msg) 266 { 267 int type; 268 struct tcs_group *tcs; 269 270 switch (msg->state) { 271 case RPMH_ACTIVE_ONLY_STATE: 272 type = ACTIVE_TCS; 273 break; 274 case RPMH_WAKE_ONLY_STATE: 275 type = WAKE_TCS; 276 break; 277 case RPMH_SLEEP_STATE: 278 type = SLEEP_TCS; 279 break; 280 default: 281 return ERR_PTR(-EINVAL); 282 } 283 284 /* 285 * If we are making an active request on a RSC that does not have a 286 * dedicated TCS for active state use, then re-purpose a wake TCS to 287 * send active votes. This is safe because we ensure any active-only 288 * transfers have finished before we use it (maybe by running from 289 * the last CPU in PM code). 290 */ 291 tcs = &drv->tcs[type]; 292 if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) 293 tcs = &drv->tcs[WAKE_TCS]; 294 295 return tcs; 296 } 297 298 /** 299 * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS. 300 * @drv: The RSC controller. 301 * @tcs_id: The global ID of this TCS. 302 * 303 * For ACTIVE_ONLY transfers we want to call back into the client when the 304 * transfer finishes. To do this we need the "request" that the client 305 * originally provided us. This function grabs the request that we stashed 306 * when we started the transfer. 307 * 308 * This only makes sense for ACTIVE_ONLY transfers since those are the only 309 * ones we track sending (the only ones we enable interrupts for and the only 310 * ones we call back to the client for). 311 * 312 * Return: The stashed request. 313 */ 314 static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv, 315 int tcs_id) 316 { 317 struct tcs_group *tcs; 318 int i; 319 320 for (i = 0; i < TCS_TYPE_NR; i++) { 321 tcs = &drv->tcs[i]; 322 if (tcs->mask & BIT(tcs_id)) 323 return tcs->req[tcs_id - tcs->offset]; 324 } 325 326 return NULL; 327 } 328 329 /** 330 * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS 331 * @drv: The controller. 332 * @tcs_id: The global ID of this TCS. 333 * @trigger: If true then untrigger/retrigger. If false then just untrigger. 334 * 335 * In the normal case we only ever call with "trigger=true" to start a 336 * transfer. That will un-trigger/disable the TCS from the last transfer 337 * then trigger/enable for this transfer. 338 * 339 * If we borrowed a wake TCS for an active-only transfer we'll also call 340 * this function with "trigger=false" to just do the un-trigger/disable 341 * before using the TCS for wake purposes again. 342 * 343 * Note that the AP is only in charge of triggering active-only transfers. 344 * The AP never triggers sleep/wake values using this function. 345 */ 346 static void __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger) 347 { 348 u32 enable; 349 350 /* 351 * HW req: Clear the DRV_CONTROL and enable TCS again 352 * While clearing ensure that the AMC mode trigger is cleared 353 * and then the mode enable is cleared. 354 */ 355 enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id); 356 enable &= ~TCS_AMC_MODE_TRIGGER; 357 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 358 enable &= ~TCS_AMC_MODE_ENABLE; 359 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 360 361 if (trigger) { 362 /* Enable the AMC mode on the TCS and then trigger the TCS */ 363 enable = TCS_AMC_MODE_ENABLE; 364 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable); 365 enable |= TCS_AMC_MODE_TRIGGER; 366 write_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, enable); 367 } 368 } 369 370 /** 371 * enable_tcs_irq() - Enable or disable interrupts on the given TCS. 372 * @drv: The controller. 373 * @tcs_id: The global ID of this TCS. 374 * @enable: If true then enable; if false then disable 375 * 376 * We only ever call this when we borrow a wake TCS for an active-only 377 * transfer. For active-only TCSes interrupts are always left enabled. 378 */ 379 static void enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable) 380 { 381 u32 data; 382 383 data = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_ENABLE); 384 if (enable) 385 data |= BIT(tcs_id); 386 else 387 data &= ~BIT(tcs_id); 388 writel_relaxed(data, drv->tcs_base + RSC_DRV_IRQ_ENABLE); 389 } 390 391 /** 392 * tcs_tx_done() - TX Done interrupt handler. 393 * @irq: The IRQ number (ignored). 394 * @p: Pointer to "struct rsc_drv". 395 * 396 * Called for ACTIVE_ONLY transfers (those are the only ones we enable the 397 * IRQ for) when a transfer is done. 398 * 399 * Return: IRQ_HANDLED 400 */ 401 static irqreturn_t tcs_tx_done(int irq, void *p) 402 { 403 struct rsc_drv *drv = p; 404 int i, j, err = 0; 405 unsigned long irq_status; 406 const struct tcs_request *req; 407 struct tcs_cmd *cmd; 408 409 irq_status = readl_relaxed(drv->tcs_base + RSC_DRV_IRQ_STATUS); 410 411 for_each_set_bit(i, &irq_status, BITS_PER_LONG) { 412 req = get_req_from_tcs(drv, i); 413 if (!req) { 414 WARN_ON(1); 415 goto skip; 416 } 417 418 err = 0; 419 for (j = 0; j < req->num_cmds; j++) { 420 u32 sts; 421 422 cmd = &req->cmds[j]; 423 sts = read_tcs_cmd(drv, RSC_DRV_CMD_STATUS, i, j); 424 if (!(sts & CMD_STATUS_ISSUED) || 425 ((req->wait_for_compl || cmd->wait) && 426 !(sts & CMD_STATUS_COMPL))) { 427 pr_err("Incomplete request: %s: addr=%#x data=%#x", 428 drv->name, cmd->addr, cmd->data); 429 err = -EIO; 430 } 431 } 432 433 trace_rpmh_tx_done(drv, i, req, err); 434 435 /* 436 * If wake tcs was re-purposed for sending active 437 * votes, clear AMC trigger & enable modes and 438 * disable interrupt for this TCS 439 */ 440 if (!drv->tcs[ACTIVE_TCS].num_tcs) 441 __tcs_set_trigger(drv, i, false); 442 skip: 443 /* Reclaim the TCS */ 444 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0); 445 writel_relaxed(BIT(i), drv->tcs_base + RSC_DRV_IRQ_CLEAR); 446 spin_lock(&drv->lock); 447 clear_bit(i, drv->tcs_in_use); 448 /* 449 * Disable interrupt for WAKE TCS to avoid being 450 * spammed with interrupts coming when the solver 451 * sends its wake votes. 452 */ 453 if (!drv->tcs[ACTIVE_TCS].num_tcs) 454 enable_tcs_irq(drv, i, false); 455 spin_unlock(&drv->lock); 456 wake_up(&drv->tcs_wait); 457 if (req) 458 rpmh_tx_done(req, err); 459 } 460 461 return IRQ_HANDLED; 462 } 463 464 /** 465 * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger. 466 * @drv: The controller. 467 * @tcs_id: The global ID of this TCS. 468 * @cmd_id: The index within the TCS to start writing. 469 * @msg: The message we want to send, which will contain several addr/data 470 * pairs to program (but few enough that they all fit in one TCS). 471 * 472 * This is used for all types of transfers (active, sleep, and wake). 473 */ 474 static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, 475 const struct tcs_request *msg) 476 { 477 u32 msgid; 478 u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE; 479 u32 cmd_enable = 0; 480 struct tcs_cmd *cmd; 481 int i, j; 482 483 /* Convert all commands to RR when the request has wait_for_compl set */ 484 cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0; 485 486 for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) { 487 cmd = &msg->cmds[i]; 488 cmd_enable |= BIT(j); 489 msgid = cmd_msgid; 490 /* 491 * Additionally, if the cmd->wait is set, make the command 492 * response reqd even if the overall request was fire-n-forget. 493 */ 494 msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0; 495 496 write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid); 497 write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr); 498 write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data); 499 trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd); 500 } 501 502 cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id); 503 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable); 504 } 505 506 /** 507 * check_for_req_inflight() - Look to see if conflicting cmds are in flight. 508 * @drv: The controller. 509 * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers. 510 * @msg: The message we want to send, which will contain several addr/data 511 * pairs to program (but few enough that they all fit in one TCS). 512 * 513 * This will walk through the TCSes in the group and check if any of them 514 * appear to be sending to addresses referenced in the message. If it finds 515 * one it'll return -EBUSY. 516 * 517 * Only for use for active-only transfers. 518 * 519 * Must be called with the drv->lock held since that protects tcs_in_use. 520 * 521 * Return: 0 if nothing in flight or -EBUSY if we should try again later. 522 * The caller must re-enable interrupts between tries since that's 523 * the only way tcs_is_free() will ever return true and the only way 524 * RSC_DRV_CMD_ENABLE will ever be cleared. 525 */ 526 static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, 527 const struct tcs_request *msg) 528 { 529 unsigned long curr_enabled; 530 u32 addr; 531 int i, j, k; 532 int tcs_id = tcs->offset; 533 534 for (i = 0; i < tcs->num_tcs; i++, tcs_id++) { 535 if (tcs_is_free(drv, tcs_id)) 536 continue; 537 538 curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id); 539 540 for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) { 541 addr = read_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j); 542 for (k = 0; k < msg->num_cmds; k++) { 543 if (addr == msg->cmds[k].addr) 544 return -EBUSY; 545 } 546 } 547 } 548 549 return 0; 550 } 551 552 /** 553 * find_free_tcs() - Find free tcs in the given tcs_group; only for active. 554 * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if 555 * we borrowed it because there are zero active-only ones). 556 * 557 * Must be called with the drv->lock held since that protects tcs_in_use. 558 * 559 * Return: The first tcs that's free. 560 */ 561 static int find_free_tcs(struct tcs_group *tcs) 562 { 563 int i; 564 565 for (i = 0; i < tcs->num_tcs; i++) { 566 if (tcs_is_free(tcs->drv, tcs->offset + i)) 567 return tcs->offset + i; 568 } 569 570 return -EBUSY; 571 } 572 573 /** 574 * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active. 575 * @drv: The controller. 576 * @tcs: The tcs_group used for ACTIVE_ONLY transfers. 577 * @msg: The data to be sent. 578 * 579 * Claims a tcs in the given tcs_group while making sure that no existing cmd 580 * is in flight that would conflict with the one in @msg. 581 * 582 * Context: Must be called with the drv->lock held since that protects 583 * tcs_in_use. 584 * 585 * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight 586 * or the tcs_group is full. 587 */ 588 static int claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs, 589 const struct tcs_request *msg) 590 { 591 int ret; 592 593 /* 594 * The h/w does not like if we send a request to the same address, 595 * when one is already in-flight or being processed. 596 */ 597 ret = check_for_req_inflight(drv, tcs, msg); 598 if (ret) 599 return ret; 600 601 return find_free_tcs(tcs); 602 } 603 604 /** 605 * rpmh_rsc_send_data() - Write / trigger active-only message. 606 * @drv: The controller. 607 * @msg: The data to be sent. 608 * 609 * NOTES: 610 * - This is only used for "ACTIVE_ONLY" since the limitations of this 611 * function don't make sense for sleep/wake cases. 612 * - To do the transfer, we will grab a whole TCS for ourselves--we don't 613 * try to share. If there are none available we'll wait indefinitely 614 * for a free one. 615 * - This function will not wait for the commands to be finished, only for 616 * data to be programmed into the RPMh. See rpmh_tx_done() which will 617 * be called when the transfer is fully complete. 618 * - This function must be called with interrupts enabled. If the hardware 619 * is busy doing someone else's transfer we need that transfer to fully 620 * finish so that we can have the hardware, and to fully finish it needs 621 * the interrupt handler to run. If the interrupts is set to run on the 622 * active CPU this can never happen if interrupts are disabled. 623 * 624 * Return: 0 on success, -EINVAL on error. 625 */ 626 int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg) 627 { 628 struct tcs_group *tcs; 629 int tcs_id; 630 unsigned long flags; 631 632 tcs = get_tcs_for_msg(drv, msg); 633 if (IS_ERR(tcs)) 634 return PTR_ERR(tcs); 635 636 spin_lock_irqsave(&drv->lock, flags); 637 638 /* Wait forever for a free tcs. It better be there eventually! */ 639 wait_event_lock_irq(drv->tcs_wait, 640 (tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0, 641 drv->lock); 642 643 tcs->req[tcs_id - tcs->offset] = msg; 644 set_bit(tcs_id, drv->tcs_in_use); 645 if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) { 646 /* 647 * Clear previously programmed WAKE commands in selected 648 * repurposed TCS to avoid triggering them. tcs->slots will be 649 * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate() 650 */ 651 write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0); 652 enable_tcs_irq(drv, tcs_id, true); 653 } 654 spin_unlock_irqrestore(&drv->lock, flags); 655 656 /* 657 * These two can be done after the lock is released because: 658 * - We marked "tcs_in_use" under lock. 659 * - Once "tcs_in_use" has been marked nobody else could be writing 660 * to these registers until the interrupt goes off. 661 * - The interrupt can't go off until we trigger w/ the last line 662 * of __tcs_set_trigger() below. 663 */ 664 __tcs_buffer_write(drv, tcs_id, 0, msg); 665 __tcs_set_trigger(drv, tcs_id, true); 666 667 return 0; 668 } 669 670 /** 671 * find_slots() - Find a place to write the given message. 672 * @tcs: The tcs group to search. 673 * @msg: The message we want to find room for. 674 * @tcs_id: If we return 0 from the function, we return the global ID of the 675 * TCS to write to here. 676 * @cmd_id: If we return 0 from the function, we return the index of 677 * the command array of the returned TCS where the client should 678 * start writing the message. 679 * 680 * Only for use on sleep/wake TCSes since those are the only ones we maintain 681 * tcs->slots for. 682 * 683 * Return: -ENOMEM if there was no room, else 0. 684 */ 685 static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg, 686 int *tcs_id, int *cmd_id) 687 { 688 int slot, offset; 689 int i = 0; 690 691 /* Do over, until we can fit the full payload in a single TCS */ 692 do { 693 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS, 694 i, msg->num_cmds, 0); 695 if (slot >= tcs->num_tcs * tcs->ncpt) 696 return -ENOMEM; 697 i += tcs->ncpt; 698 } while (slot + msg->num_cmds - 1 >= i); 699 700 bitmap_set(tcs->slots, slot, msg->num_cmds); 701 702 offset = slot / tcs->ncpt; 703 *tcs_id = offset + tcs->offset; 704 *cmd_id = slot % tcs->ncpt; 705 706 return 0; 707 } 708 709 /** 710 * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger. 711 * @drv: The controller. 712 * @msg: The data to be written to the controller. 713 * 714 * This should only be called for for sleep/wake state, never active-only 715 * state. 716 * 717 * The caller must ensure that no other RPMH actions are happening and the 718 * controller is idle when this function is called since it runs lockless. 719 * 720 * Return: 0 if no error; else -error. 721 */ 722 int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg) 723 { 724 struct tcs_group *tcs; 725 int tcs_id = 0, cmd_id = 0; 726 int ret; 727 728 tcs = get_tcs_for_msg(drv, msg); 729 if (IS_ERR(tcs)) 730 return PTR_ERR(tcs); 731 732 /* find the TCS id and the command in the TCS to write to */ 733 ret = find_slots(tcs, msg, &tcs_id, &cmd_id); 734 if (!ret) 735 __tcs_buffer_write(drv, tcs_id, cmd_id, msg); 736 737 return ret; 738 } 739 740 /** 741 * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy. 742 * @drv: The controller 743 * 744 * Checks if any of the AMCs are busy in handling ACTIVE sets. 745 * This is called from the last cpu powering down before flushing 746 * SLEEP and WAKE sets. If AMCs are busy, controller can not enter 747 * power collapse, so deny from the last cpu's pm notification. 748 * 749 * Context: Must be called with the drv->lock held. 750 * 751 * Return: 752 * * False - AMCs are idle 753 * * True - AMCs are busy 754 */ 755 static bool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv) 756 { 757 int m; 758 struct tcs_group *tcs = &drv->tcs[ACTIVE_TCS]; 759 760 /* 761 * If we made an active request on a RSC that does not have a 762 * dedicated TCS for active state use, then re-purposed wake TCSes 763 * should be checked for not busy, because we used wake TCSes for 764 * active requests in this case. 765 */ 766 if (!tcs->num_tcs) 767 tcs = &drv->tcs[WAKE_TCS]; 768 769 for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) { 770 if (!tcs_is_free(drv, m)) 771 return true; 772 } 773 774 return false; 775 } 776 777 /** 778 * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy. 779 * @nfb: Pointer to the notifier block in struct rsc_drv. 780 * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT. 781 * @v: Unused 782 * 783 * This function is given to cpu_pm_register_notifier so we can be informed 784 * about when CPUs go down. When all CPUs go down we know no more active 785 * transfers will be started so we write sleep/wake sets. This function gets 786 * called from cpuidle code paths and also at system suspend time. 787 * 788 * If its last CPU going down and AMCs are not busy then writes cached sleep 789 * and wake messages to TCSes. The firmware then takes care of triggering 790 * them when entering deepest low power modes. 791 * 792 * Return: See cpu_pm_register_notifier() 793 */ 794 static int rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb, 795 unsigned long action, void *v) 796 { 797 struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm); 798 int ret = NOTIFY_OK; 799 int cpus_in_pm; 800 801 switch (action) { 802 case CPU_PM_ENTER: 803 cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm); 804 /* 805 * NOTE: comments for num_online_cpus() point out that it's 806 * only a snapshot so we need to be careful. It should be OK 807 * for us to use, though. It's important for us not to miss 808 * if we're the last CPU going down so it would only be a 809 * problem if a CPU went offline right after we did the check 810 * AND that CPU was not idle AND that CPU was the last non-idle 811 * CPU. That can't happen. CPUs would have to come out of idle 812 * before the CPU could go offline. 813 */ 814 if (cpus_in_pm < num_online_cpus()) 815 return NOTIFY_OK; 816 break; 817 case CPU_PM_ENTER_FAILED: 818 case CPU_PM_EXIT: 819 atomic_dec(&drv->cpus_in_pm); 820 return NOTIFY_OK; 821 default: 822 return NOTIFY_DONE; 823 } 824 825 /* 826 * It's likely we're on the last CPU. Grab the drv->lock and write 827 * out the sleep/wake commands to RPMH hardware. Grabbing the lock 828 * means that if we race with another CPU coming up we are still 829 * guaranteed to be safe. If another CPU came up just after we checked 830 * and has grabbed the lock or started an active transfer then we'll 831 * notice we're busy and abort. If another CPU comes up after we start 832 * flushing it will be blocked from starting an active transfer until 833 * we're done flushing. If another CPU starts an active transfer after 834 * we release the lock we're still OK because we're no longer the last 835 * CPU. 836 */ 837 if (spin_trylock(&drv->lock)) { 838 if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client)) 839 ret = NOTIFY_BAD; 840 spin_unlock(&drv->lock); 841 } else { 842 /* Another CPU must be up */ 843 return NOTIFY_OK; 844 } 845 846 if (ret == NOTIFY_BAD) { 847 /* Double-check if we're here because someone else is up */ 848 if (cpus_in_pm < num_online_cpus()) 849 ret = NOTIFY_OK; 850 else 851 /* We won't be called w/ CPU_PM_ENTER_FAILED */ 852 atomic_dec(&drv->cpus_in_pm); 853 } 854 855 return ret; 856 } 857 858 static int rpmh_probe_tcs_config(struct platform_device *pdev, 859 struct rsc_drv *drv, void __iomem *base) 860 { 861 struct tcs_type_config { 862 u32 type; 863 u32 n; 864 } tcs_cfg[TCS_TYPE_NR] = { { 0 } }; 865 struct device_node *dn = pdev->dev.of_node; 866 u32 config, max_tcs, ncpt, offset; 867 int i, ret, n, st = 0; 868 struct tcs_group *tcs; 869 870 ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset); 871 if (ret) 872 return ret; 873 drv->tcs_base = base + offset; 874 875 config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG); 876 877 max_tcs = config; 878 max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id); 879 max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id); 880 881 ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT); 882 ncpt = ncpt >> DRV_NCPT_SHIFT; 883 884 n = of_property_count_u32_elems(dn, "qcom,tcs-config"); 885 if (n != 2 * TCS_TYPE_NR) 886 return -EINVAL; 887 888 for (i = 0; i < TCS_TYPE_NR; i++) { 889 ret = of_property_read_u32_index(dn, "qcom,tcs-config", 890 i * 2, &tcs_cfg[i].type); 891 if (ret) 892 return ret; 893 if (tcs_cfg[i].type >= TCS_TYPE_NR) 894 return -EINVAL; 895 896 ret = of_property_read_u32_index(dn, "qcom,tcs-config", 897 i * 2 + 1, &tcs_cfg[i].n); 898 if (ret) 899 return ret; 900 if (tcs_cfg[i].n > MAX_TCS_PER_TYPE) 901 return -EINVAL; 902 } 903 904 for (i = 0; i < TCS_TYPE_NR; i++) { 905 tcs = &drv->tcs[tcs_cfg[i].type]; 906 if (tcs->drv) 907 return -EINVAL; 908 tcs->drv = drv; 909 tcs->type = tcs_cfg[i].type; 910 tcs->num_tcs = tcs_cfg[i].n; 911 tcs->ncpt = ncpt; 912 913 if (!tcs->num_tcs || tcs->type == CONTROL_TCS) 914 continue; 915 916 if (st + tcs->num_tcs > max_tcs || 917 st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask)) 918 return -EINVAL; 919 920 tcs->mask = ((1 << tcs->num_tcs) - 1) << st; 921 tcs->offset = st; 922 st += tcs->num_tcs; 923 } 924 925 drv->num_tcs = st; 926 927 return 0; 928 } 929 930 static int rpmh_rsc_probe(struct platform_device *pdev) 931 { 932 struct device_node *dn = pdev->dev.of_node; 933 struct rsc_drv *drv; 934 struct resource *res; 935 char drv_id[10] = {0}; 936 int ret, irq; 937 u32 solver_config; 938 void __iomem *base; 939 940 /* 941 * Even though RPMh doesn't directly use cmd-db, all of its children 942 * do. To avoid adding this check to our children we'll do it now. 943 */ 944 ret = cmd_db_ready(); 945 if (ret) { 946 if (ret != -EPROBE_DEFER) 947 dev_err(&pdev->dev, "Command DB not available (%d)\n", 948 ret); 949 return ret; 950 } 951 952 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); 953 if (!drv) 954 return -ENOMEM; 955 956 ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id); 957 if (ret) 958 return ret; 959 960 drv->name = of_get_property(dn, "label", NULL); 961 if (!drv->name) 962 drv->name = dev_name(&pdev->dev); 963 964 snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id); 965 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id); 966 base = devm_ioremap_resource(&pdev->dev, res); 967 if (IS_ERR(base)) 968 return PTR_ERR(base); 969 970 ret = rpmh_probe_tcs_config(pdev, drv, base); 971 if (ret) 972 return ret; 973 974 spin_lock_init(&drv->lock); 975 init_waitqueue_head(&drv->tcs_wait); 976 bitmap_zero(drv->tcs_in_use, MAX_TCS_NR); 977 978 irq = platform_get_irq(pdev, drv->id); 979 if (irq < 0) 980 return irq; 981 982 ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done, 983 IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, 984 drv->name, drv); 985 if (ret) 986 return ret; 987 988 /* 989 * CPU PM notification are not required for controllers that support 990 * 'HW solver' mode where they can be in autonomous mode executing low 991 * power mode to power down. 992 */ 993 solver_config = readl_relaxed(base + DRV_SOLVER_CONFIG); 994 solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT; 995 solver_config = solver_config >> DRV_HW_SOLVER_SHIFT; 996 if (!solver_config) { 997 drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback; 998 cpu_pm_register_notifier(&drv->rsc_pm); 999 } 1000 1001 /* Enable the active TCS to send requests immediately */ 1002 writel_relaxed(drv->tcs[ACTIVE_TCS].mask, 1003 drv->tcs_base + RSC_DRV_IRQ_ENABLE); 1004 1005 spin_lock_init(&drv->client.cache_lock); 1006 INIT_LIST_HEAD(&drv->client.cache); 1007 INIT_LIST_HEAD(&drv->client.batch_cache); 1008 1009 dev_set_drvdata(&pdev->dev, drv); 1010 1011 return devm_of_platform_populate(&pdev->dev); 1012 } 1013 1014 static const struct of_device_id rpmh_drv_match[] = { 1015 { .compatible = "qcom,rpmh-rsc", }, 1016 { } 1017 }; 1018 MODULE_DEVICE_TABLE(of, rpmh_drv_match); 1019 1020 static struct platform_driver rpmh_driver = { 1021 .probe = rpmh_rsc_probe, 1022 .driver = { 1023 .name = "rpmh", 1024 .of_match_table = rpmh_drv_match, 1025 .suppress_bind_attrs = true, 1026 }, 1027 }; 1028 1029 static int __init rpmh_driver_init(void) 1030 { 1031 return platform_driver_register(&rpmh_driver); 1032 } 1033 arch_initcall(rpmh_driver_init); 1034 1035 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver"); 1036 MODULE_LICENSE("GPL v2"); 1037