1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/atomic.h> 7 #include <linux/bug.h> 8 #include <linux/interrupt.h> 9 #include <linux/jiffies.h> 10 #include <linux/kernel.h> 11 #include <linux/list.h> 12 #include <linux/lockdep.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/platform_device.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/types.h> 19 #include <linux/wait.h> 20 21 #include <soc/qcom/rpmh.h> 22 23 #include "rpmh-internal.h" 24 25 #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000) 26 27 #define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \ 28 struct rpmh_request name = { \ 29 .msg = { \ 30 .state = s, \ 31 .cmds = name.cmd, \ 32 .num_cmds = 0, \ 33 .wait_for_compl = true, \ 34 }, \ 35 .cmd = { { 0 } }, \ 36 .completion = q, \ 37 .dev = device, \ 38 .needs_free = false, \ 39 } 40 41 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client) 42 43 /** 44 * struct cache_req: the request object for caching 45 * 46 * @addr: the address of the resource 47 * @sleep_val: the sleep vote 48 * @wake_val: the wake vote 49 * @list: linked list obj 50 */ 51 struct cache_req { 52 u32 addr; 53 u32 sleep_val; 54 u32 wake_val; 55 struct list_head list; 56 }; 57 58 /** 59 * struct batch_cache_req - An entry in our batch catch 60 * 61 * @list: linked list obj 62 * @count: number of messages 63 * @rpm_msgs: the messages 64 */ 65 66 struct batch_cache_req { 67 struct list_head list; 68 int count; 69 struct rpmh_request rpm_msgs[]; 70 }; 71 72 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev) 73 { 74 struct rsc_drv *drv = dev_get_drvdata(dev->parent); 75 76 return &drv->client; 77 } 78 79 void rpmh_tx_done(const struct tcs_request *msg) 80 { 81 struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request, 82 msg); 83 struct completion *compl = rpm_msg->completion; 84 bool free = rpm_msg->needs_free; 85 86 if (!compl) 87 goto exit; 88 89 /* Signal the blocking thread we are done */ 90 complete(compl); 91 92 exit: 93 if (free) 94 kfree(rpm_msg); 95 } 96 97 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr) 98 { 99 struct cache_req *p, *req = NULL; 100 101 list_for_each_entry(p, &ctrlr->cache, list) { 102 if (p->addr == addr) { 103 req = p; 104 break; 105 } 106 } 107 108 return req; 109 } 110 111 static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, 112 enum rpmh_state state, 113 struct tcs_cmd *cmd) 114 { 115 struct cache_req *req; 116 unsigned long flags; 117 u32 old_sleep_val, old_wake_val; 118 119 spin_lock_irqsave(&ctrlr->cache_lock, flags); 120 req = __find_req(ctrlr, cmd->addr); 121 if (req) 122 goto existing; 123 124 req = kzalloc(sizeof(*req), GFP_ATOMIC); 125 if (!req) { 126 req = ERR_PTR(-ENOMEM); 127 goto unlock; 128 } 129 130 req->addr = cmd->addr; 131 req->sleep_val = req->wake_val = UINT_MAX; 132 list_add_tail(&req->list, &ctrlr->cache); 133 134 existing: 135 old_sleep_val = req->sleep_val; 136 old_wake_val = req->wake_val; 137 138 switch (state) { 139 case RPMH_ACTIVE_ONLY_STATE: 140 case RPMH_WAKE_ONLY_STATE: 141 req->wake_val = cmd->data; 142 break; 143 case RPMH_SLEEP_STATE: 144 req->sleep_val = cmd->data; 145 break; 146 } 147 148 ctrlr->dirty |= (req->sleep_val != old_sleep_val || 149 req->wake_val != old_wake_val) && 150 req->sleep_val != UINT_MAX && 151 req->wake_val != UINT_MAX; 152 153 unlock: 154 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 155 156 return req; 157 } 158 159 /** 160 * __rpmh_write: Cache and send the RPMH request 161 * 162 * @dev: The device making the request 163 * @state: Active/Sleep request type 164 * @rpm_msg: The data that needs to be sent (cmds). 165 * 166 * Cache the RPMH request and send if the state is ACTIVE_ONLY. 167 * SLEEP/WAKE_ONLY requests are not sent to the controller at 168 * this time. Use rpmh_flush() to send them to the controller. 169 */ 170 static int __rpmh_write(const struct device *dev, enum rpmh_state state, 171 struct rpmh_request *rpm_msg) 172 { 173 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 174 int ret = -EINVAL; 175 struct cache_req *req; 176 int i; 177 178 /* Cache the request in our store and link the payload */ 179 for (i = 0; i < rpm_msg->msg.num_cmds; i++) { 180 req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]); 181 if (IS_ERR(req)) 182 return PTR_ERR(req); 183 } 184 185 if (state == RPMH_ACTIVE_ONLY_STATE) { 186 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); 187 } else { 188 /* Clean up our call by spoofing tx_done */ 189 ret = 0; 190 rpmh_tx_done(&rpm_msg->msg); 191 } 192 193 return ret; 194 } 195 196 static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state, 197 const struct tcs_cmd *cmd, u32 n) 198 { 199 if (!cmd || !n || n > MAX_RPMH_PAYLOAD) 200 return -EINVAL; 201 202 memcpy(req->cmd, cmd, n * sizeof(*cmd)); 203 204 req->msg.state = state; 205 req->msg.cmds = req->cmd; 206 req->msg.num_cmds = n; 207 208 return 0; 209 } 210 211 /** 212 * rpmh_write_async: Write a set of RPMH commands 213 * 214 * @dev: The device making the request 215 * @state: Active/sleep set 216 * @cmd: The payload data 217 * @n: The number of elements in payload 218 * 219 * Write a set of RPMH commands, the order of commands is maintained 220 * and will be sent as a single shot. 221 */ 222 int rpmh_write_async(const struct device *dev, enum rpmh_state state, 223 const struct tcs_cmd *cmd, u32 n) 224 { 225 struct rpmh_request *rpm_msg; 226 int ret; 227 228 rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC); 229 if (!rpm_msg) 230 return -ENOMEM; 231 rpm_msg->needs_free = true; 232 233 ret = __fill_rpmh_msg(rpm_msg, state, cmd, n); 234 if (ret) { 235 kfree(rpm_msg); 236 return ret; 237 } 238 239 return __rpmh_write(dev, state, rpm_msg); 240 } 241 EXPORT_SYMBOL(rpmh_write_async); 242 243 /** 244 * rpmh_write: Write a set of RPMH commands and block until response 245 * 246 * @dev: The device making the request 247 * @state: Active/sleep set 248 * @cmd: The payload data 249 * @n: The number of elements in @cmd 250 * 251 * May sleep. Do not call from atomic contexts. 252 */ 253 int rpmh_write(const struct device *dev, enum rpmh_state state, 254 const struct tcs_cmd *cmd, u32 n) 255 { 256 DECLARE_COMPLETION_ONSTACK(compl); 257 DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg); 258 int ret; 259 260 ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n); 261 if (ret) 262 return ret; 263 264 ret = __rpmh_write(dev, state, &rpm_msg); 265 if (ret) 266 return ret; 267 268 ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS); 269 WARN_ON(!ret); 270 return (ret > 0) ? 0 : -ETIMEDOUT; 271 } 272 EXPORT_SYMBOL(rpmh_write); 273 274 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req) 275 { 276 unsigned long flags; 277 278 spin_lock_irqsave(&ctrlr->cache_lock, flags); 279 list_add_tail(&req->list, &ctrlr->batch_cache); 280 ctrlr->dirty = true; 281 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 282 } 283 284 static int flush_batch(struct rpmh_ctrlr *ctrlr) 285 { 286 struct batch_cache_req *req; 287 const struct rpmh_request *rpm_msg; 288 int ret = 0; 289 int i; 290 291 /* Send Sleep/Wake requests to the controller, expect no response */ 292 list_for_each_entry(req, &ctrlr->batch_cache, list) { 293 for (i = 0; i < req->count; i++) { 294 rpm_msg = req->rpm_msgs + i; 295 ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), 296 &rpm_msg->msg); 297 if (ret) 298 break; 299 } 300 } 301 302 return ret; 303 } 304 305 /** 306 * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the 307 * batch to finish. 308 * 309 * @dev: the device making the request 310 * @state: Active/sleep set 311 * @cmd: The payload data 312 * @n: The array of count of elements in each batch, 0 terminated. 313 * 314 * Write a request to the RSC controller without caching. If the request 315 * state is ACTIVE, then the requests are treated as completion request 316 * and sent to the controller immediately. The function waits until all the 317 * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the 318 * request is sent as fire-n-forget and no ack is expected. 319 * 320 * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests. 321 */ 322 int rpmh_write_batch(const struct device *dev, enum rpmh_state state, 323 const struct tcs_cmd *cmd, u32 *n) 324 { 325 struct batch_cache_req *req; 326 struct rpmh_request *rpm_msgs; 327 struct completion *compls; 328 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 329 unsigned long time_left; 330 int count = 0; 331 int ret, i; 332 void *ptr; 333 334 if (!cmd || !n) 335 return -EINVAL; 336 337 while (n[count] > 0) 338 count++; 339 if (!count) 340 return -EINVAL; 341 342 ptr = kzalloc(sizeof(*req) + 343 count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)), 344 GFP_ATOMIC); 345 if (!ptr) 346 return -ENOMEM; 347 348 req = ptr; 349 compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs); 350 351 req->count = count; 352 rpm_msgs = req->rpm_msgs; 353 354 for (i = 0; i < count; i++) { 355 __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]); 356 cmd += n[i]; 357 } 358 359 if (state != RPMH_ACTIVE_ONLY_STATE) { 360 cache_batch(ctrlr, req); 361 return 0; 362 } 363 364 for (i = 0; i < count; i++) { 365 struct completion *compl = &compls[i]; 366 367 init_completion(compl); 368 rpm_msgs[i].completion = compl; 369 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg); 370 if (ret) { 371 pr_err("Error(%d) sending RPMH message addr=%#x\n", 372 ret, rpm_msgs[i].msg.cmds[0].addr); 373 break; 374 } 375 } 376 377 time_left = RPMH_TIMEOUT_MS; 378 while (i--) { 379 time_left = wait_for_completion_timeout(&compls[i], time_left); 380 if (!time_left) { 381 /* 382 * Better hope they never finish because they'll signal 383 * the completion that we're going to free once 384 * we've returned from this function. 385 */ 386 WARN_ON(1); 387 ret = -ETIMEDOUT; 388 goto exit; 389 } 390 } 391 392 exit: 393 kfree(ptr); 394 395 return ret; 396 } 397 EXPORT_SYMBOL(rpmh_write_batch); 398 399 static int is_req_valid(struct cache_req *req) 400 { 401 return (req->sleep_val != UINT_MAX && 402 req->wake_val != UINT_MAX && 403 req->sleep_val != req->wake_val); 404 } 405 406 static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state, 407 u32 addr, u32 data) 408 { 409 DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg); 410 411 /* Wake sets are always complete and sleep sets are not */ 412 rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE); 413 rpm_msg.cmd[0].addr = addr; 414 rpm_msg.cmd[0].data = data; 415 rpm_msg.msg.num_cmds = 1; 416 417 return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg); 418 } 419 420 /** 421 * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes 422 * 423 * @ctrlr: Controller making request to flush cached data 424 * 425 * Return: 426 * * 0 - Success 427 * * Error code - Otherwise 428 */ 429 int rpmh_flush(struct rpmh_ctrlr *ctrlr) 430 { 431 struct cache_req *p; 432 int ret = 0; 433 434 lockdep_assert_irqs_disabled(); 435 436 /* 437 * Currently rpmh_flush() is only called when we think we're running 438 * on the last processor. If the lock is busy it means another 439 * processor is up and it's better to abort than spin. 440 */ 441 if (!spin_trylock(&ctrlr->cache_lock)) 442 return -EBUSY; 443 444 if (!ctrlr->dirty) { 445 pr_debug("Skipping flush, TCS has latest data.\n"); 446 goto write_next_wakeup; 447 } 448 449 /* Invalidate the TCSes first to avoid stale data */ 450 rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); 451 452 /* First flush the cached batch requests */ 453 ret = flush_batch(ctrlr); 454 if (ret) 455 goto exit; 456 457 list_for_each_entry(p, &ctrlr->cache, list) { 458 if (!is_req_valid(p)) { 459 pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x", 460 __func__, p->addr, p->sleep_val, p->wake_val); 461 continue; 462 } 463 ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr, 464 p->sleep_val); 465 if (ret) 466 goto exit; 467 ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr, 468 p->wake_val); 469 if (ret) 470 goto exit; 471 } 472 473 ctrlr->dirty = false; 474 475 write_next_wakeup: 476 rpmh_rsc_write_next_wakeup(ctrlr_to_drv(ctrlr)); 477 exit: 478 spin_unlock(&ctrlr->cache_lock); 479 return ret; 480 } 481 482 /** 483 * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache 484 * 485 * @dev: The device making the request 486 * 487 * Invalidate the sleep and wake values in batch_cache. 488 */ 489 void rpmh_invalidate(const struct device *dev) 490 { 491 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); 492 struct batch_cache_req *req, *tmp; 493 unsigned long flags; 494 495 spin_lock_irqsave(&ctrlr->cache_lock, flags); 496 list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) 497 kfree(req); 498 INIT_LIST_HEAD(&ctrlr->batch_cache); 499 ctrlr->dirty = true; 500 spin_unlock_irqrestore(&ctrlr->cache_lock, flags); 501 } 502 EXPORT_SYMBOL(rpmh_invalidate); 503