1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Hardware spinlock framework 4 * 5 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com 6 * 7 * Contact: Ohad Ben-Cohen <ohad@wizery.com> 8 */ 9 10 #define pr_fmt(fmt) "%s: " fmt, __func__ 11 12 #include <linux/delay.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/spinlock.h> 16 #include <linux/types.h> 17 #include <linux/err.h> 18 #include <linux/jiffies.h> 19 #include <linux/radix-tree.h> 20 #include <linux/hwspinlock.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/mutex.h> 23 #include <linux/of.h> 24 25 #include "hwspinlock_internal.h" 26 27 /* retry delay used in atomic context */ 28 #define HWSPINLOCK_RETRY_DELAY_US 100 29 30 /* radix tree tags */ 31 #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ 32 33 /* 34 * A radix tree is used to maintain the available hwspinlock instances. 35 * The tree associates hwspinlock pointers with their integer key id, 36 * and provides easy-to-use API which makes the hwspinlock core code simple 37 * and easy to read. 38 * 39 * Radix trees are quick on lookups, and reasonably efficient in terms of 40 * storage, especially with high density usages such as this framework 41 * requires (a continuous range of integer keys, beginning with zero, is 42 * used as the ID's of the hwspinlock instances). 43 * 44 * The radix tree API supports tagging items in the tree, which this 45 * framework uses to mark unused hwspinlock instances (see the 46 * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the 47 * tree, looking for an unused hwspinlock instance, is now reduced to a 48 * single radix tree API call. 49 */ 50 static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); 51 52 /* 53 * Synchronization of access to the tree is achieved using this mutex, 54 * as the radix-tree API requires that users provide all synchronisation. 55 * A mutex is needed because we're using non-atomic radix tree allocations. 56 */ 57 static DEFINE_MUTEX(hwspinlock_tree_lock); 58 59 60 /** 61 * __hwspin_trylock() - attempt to lock a specific hwspinlock 62 * @hwlock: an hwspinlock which we want to trylock 63 * @mode: controls whether local interrupts are disabled or not 64 * @flags: a pointer where the caller's interrupt state will be saved at (if 65 * requested) 66 * 67 * This function attempts to lock an hwspinlock, and will immediately 68 * fail if the hwspinlock is already taken. 69 * 70 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine 71 * of getting hardware lock with mutex or spinlock. Since in some scenarios, 72 * user need some time-consuming or sleepable operations under the hardware 73 * lock, they need one sleepable lock (like mutex) to protect the operations. 74 * 75 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful 76 * return from this function, preemption (and possibly interrupts) is disabled, 77 * so the caller must not sleep, and is advised to release the hwspinlock as 78 * soon as possible. This is required in order to minimize remote cores polling 79 * on the hardware interconnect. 80 * 81 * The user decides whether local interrupts are disabled or not, and if yes, 82 * whether he wants their previous state to be saved. It is up to the user 83 * to choose the appropriate @mode of operation, exactly the same way users 84 * should decide between spin_trylock, spin_trylock_irq and 85 * spin_trylock_irqsave. 86 * 87 * Returns 0 if we successfully locked the hwspinlock or -EBUSY if 88 * the hwspinlock was already taken. 89 * This function will never sleep. 90 */ 91 int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) 92 { 93 int ret; 94 95 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) 96 return -EINVAL; 97 98 /* 99 * This spin_lock{_irq, _irqsave} serves three purposes: 100 * 101 * 1. Disable preemption, in order to minimize the period of time 102 * in which the hwspinlock is taken. This is important in order 103 * to minimize the possible polling on the hardware interconnect 104 * by a remote user of this lock. 105 * 2. Make the hwspinlock SMP-safe (so we can take it from 106 * additional contexts on the local host). 107 * 3. Ensure that in_atomic/might_sleep checks catch potential 108 * problems with hwspinlock usage (e.g. scheduler checks like 109 * 'scheduling while atomic' etc.) 110 */ 111 switch (mode) { 112 case HWLOCK_IRQSTATE: 113 ret = spin_trylock_irqsave(&hwlock->lock, *flags); 114 break; 115 case HWLOCK_IRQ: 116 ret = spin_trylock_irq(&hwlock->lock); 117 break; 118 case HWLOCK_RAW: 119 case HWLOCK_IN_ATOMIC: 120 ret = 1; 121 break; 122 default: 123 ret = spin_trylock(&hwlock->lock); 124 break; 125 } 126 127 /* is lock already taken by another context on the local cpu ? */ 128 if (!ret) 129 return -EBUSY; 130 131 /* try to take the hwspinlock device */ 132 ret = hwlock->bank->ops->trylock(hwlock); 133 134 /* if hwlock is already taken, undo spin_trylock_* and exit */ 135 if (!ret) { 136 switch (mode) { 137 case HWLOCK_IRQSTATE: 138 spin_unlock_irqrestore(&hwlock->lock, *flags); 139 break; 140 case HWLOCK_IRQ: 141 spin_unlock_irq(&hwlock->lock); 142 break; 143 case HWLOCK_RAW: 144 case HWLOCK_IN_ATOMIC: 145 /* Nothing to do */ 146 break; 147 default: 148 spin_unlock(&hwlock->lock); 149 break; 150 } 151 152 return -EBUSY; 153 } 154 155 /* 156 * We can be sure the other core's memory operations 157 * are observable to us only _after_ we successfully take 158 * the hwspinlock, and we must make sure that subsequent memory 159 * operations (both reads and writes) will not be reordered before 160 * we actually took the hwspinlock. 161 * 162 * Note: the implicit memory barrier of the spinlock above is too 163 * early, so we need this additional explicit memory barrier. 164 */ 165 mb(); 166 167 return 0; 168 } 169 EXPORT_SYMBOL_GPL(__hwspin_trylock); 170 171 /** 172 * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit 173 * @hwlock: the hwspinlock to be locked 174 * @timeout: timeout value in msecs 175 * @mode: mode which controls whether local interrupts are disabled or not 176 * @flags: a pointer to where the caller's interrupt state will be saved at (if 177 * requested) 178 * 179 * This function locks the given @hwlock. If the @hwlock 180 * is already taken, the function will busy loop waiting for it to 181 * be released, but give up after @timeout msecs have elapsed. 182 * 183 * Caution: If the mode is HWLOCK_RAW, that means user must protect the routine 184 * of getting hardware lock with mutex or spinlock. Since in some scenarios, 185 * user need some time-consuming or sleepable operations under the hardware 186 * lock, they need one sleepable lock (like mutex) to protect the operations. 187 * 188 * If the mode is HWLOCK_IN_ATOMIC (called from an atomic context) the timeout 189 * is handled with busy-waiting delays, hence shall not exceed few msecs. 190 * 191 * If the mode is neither HWLOCK_IN_ATOMIC nor HWLOCK_RAW, upon a successful 192 * return from this function, preemption (and possibly interrupts) is disabled, 193 * so the caller must not sleep, and is advised to release the hwspinlock as 194 * soon as possible. This is required in order to minimize remote cores polling 195 * on the hardware interconnect. 196 * 197 * The user decides whether local interrupts are disabled or not, and if yes, 198 * whether he wants their previous state to be saved. It is up to the user 199 * to choose the appropriate @mode of operation, exactly the same way users 200 * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. 201 * 202 * Returns 0 when the @hwlock was successfully taken, and an appropriate 203 * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still 204 * busy after @timeout msecs). The function will never sleep. 205 */ 206 int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, 207 int mode, unsigned long *flags) 208 { 209 int ret; 210 unsigned long expire, atomic_delay = 0; 211 212 expire = msecs_to_jiffies(to) + jiffies; 213 214 for (;;) { 215 /* Try to take the hwspinlock */ 216 ret = __hwspin_trylock(hwlock, mode, flags); 217 if (ret != -EBUSY) 218 break; 219 220 /* 221 * The lock is already taken, let's check if the user wants 222 * us to try again 223 */ 224 if (mode == HWLOCK_IN_ATOMIC) { 225 udelay(HWSPINLOCK_RETRY_DELAY_US); 226 atomic_delay += HWSPINLOCK_RETRY_DELAY_US; 227 if (atomic_delay > to * 1000) 228 return -ETIMEDOUT; 229 } else { 230 if (time_is_before_eq_jiffies(expire)) 231 return -ETIMEDOUT; 232 } 233 234 /* 235 * Allow platform-specific relax handlers to prevent 236 * hogging the interconnect (no sleeping, though) 237 */ 238 if (hwlock->bank->ops->relax) 239 hwlock->bank->ops->relax(hwlock); 240 } 241 242 return ret; 243 } 244 EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); 245 246 /** 247 * __hwspin_unlock() - unlock a specific hwspinlock 248 * @hwlock: a previously-acquired hwspinlock which we want to unlock 249 * @mode: controls whether local interrupts needs to be restored or not 250 * @flags: previous caller's interrupt state to restore (if requested) 251 * 252 * This function will unlock a specific hwspinlock, enable preemption and 253 * (possibly) enable interrupts or restore their previous state. 254 * @hwlock must be already locked before calling this function: it is a bug 255 * to call unlock on a @hwlock that is already unlocked. 256 * 257 * The user decides whether local interrupts should be enabled or not, and 258 * if yes, whether he wants their previous state to be restored. It is up 259 * to the user to choose the appropriate @mode of operation, exactly the 260 * same way users decide between spin_unlock, spin_unlock_irq and 261 * spin_unlock_irqrestore. 262 * 263 * The function will never sleep. 264 */ 265 void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) 266 { 267 if (WARN_ON(!hwlock || (!flags && mode == HWLOCK_IRQSTATE))) 268 return; 269 270 /* 271 * We must make sure that memory operations (both reads and writes), 272 * done before unlocking the hwspinlock, will not be reordered 273 * after the lock is released. 274 * 275 * That's the purpose of this explicit memory barrier. 276 * 277 * Note: the memory barrier induced by the spin_unlock below is too 278 * late; the other core is going to access memory soon after it will 279 * take the hwspinlock, and by then we want to be sure our memory 280 * operations are already observable. 281 */ 282 mb(); 283 284 hwlock->bank->ops->unlock(hwlock); 285 286 /* Undo the spin_trylock{_irq, _irqsave} called while locking */ 287 switch (mode) { 288 case HWLOCK_IRQSTATE: 289 spin_unlock_irqrestore(&hwlock->lock, *flags); 290 break; 291 case HWLOCK_IRQ: 292 spin_unlock_irq(&hwlock->lock); 293 break; 294 case HWLOCK_RAW: 295 case HWLOCK_IN_ATOMIC: 296 /* Nothing to do */ 297 break; 298 default: 299 spin_unlock(&hwlock->lock); 300 break; 301 } 302 } 303 EXPORT_SYMBOL_GPL(__hwspin_unlock); 304 305 /** 306 * hwspin_lock_bust() - bust a specific hwspinlock 307 * @hwlock: a previously-acquired hwspinlock which we want to bust 308 * @id: identifier of the remote lock holder, if applicable 309 * 310 * This function will bust a hwspinlock that was previously acquired as 311 * long as the current owner of the lock matches the id given by the caller. 312 * 313 * Context: Process context. 314 * 315 * Returns: 0 on success, or -EINVAL if the hwspinlock does not exist, or 316 * the bust operation fails, and -EOPNOTSUPP if the bust operation is not 317 * defined for the hwspinlock. 318 */ 319 int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id) 320 { 321 if (WARN_ON(!hwlock)) 322 return -EINVAL; 323 324 if (!hwlock->bank->ops->bust) { 325 pr_err("bust operation not defined\n"); 326 return -EOPNOTSUPP; 327 } 328 329 return hwlock->bank->ops->bust(hwlock, id); 330 } 331 EXPORT_SYMBOL_GPL(hwspin_lock_bust); 332 333 /** 334 * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id 335 * @bank: the hwspinlock device bank 336 * @hwlock_spec: hwlock specifier as found in the device tree 337 * 338 * This is a simple translation function, suitable for hwspinlock platform 339 * drivers that only has a lock specifier length of 1. 340 * 341 * Returns a relative index of the lock within a specified bank on success, 342 * or -EINVAL on invalid specifier cell count. 343 */ 344 static inline int 345 of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec) 346 { 347 if (WARN_ON(hwlock_spec->args_count != 1)) 348 return -EINVAL; 349 350 return hwlock_spec->args[0]; 351 } 352 353 /** 354 * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock 355 * @np: device node from which to request the specific hwlock 356 * @index: index of the hwlock in the list of values 357 * 358 * This function provides a means for DT users of the hwspinlock module to 359 * get the global lock id of a specific hwspinlock using the phandle of the 360 * hwspinlock device, so that it can be requested using the normal 361 * hwspin_lock_request_specific() API. 362 * 363 * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock 364 * device is not yet registered, -EINVAL on invalid args specifier value or an 365 * appropriate error as returned from the OF parsing of the DT client node. 366 */ 367 int of_hwspin_lock_get_id(struct device_node *np, int index) 368 { 369 struct of_phandle_args args; 370 struct hwspinlock *hwlock; 371 struct radix_tree_iter iter; 372 void **slot; 373 int id; 374 int ret; 375 376 ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index, 377 &args); 378 if (ret) 379 return ret; 380 381 if (!of_device_is_available(args.np)) { 382 ret = -ENOENT; 383 goto out; 384 } 385 386 /* Find the hwspinlock device: we need its base_id */ 387 ret = -EPROBE_DEFER; 388 rcu_read_lock(); 389 radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) { 390 hwlock = radix_tree_deref_slot(slot); 391 if (unlikely(!hwlock)) 392 continue; 393 if (radix_tree_deref_retry(hwlock)) { 394 slot = radix_tree_iter_retry(&iter); 395 continue; 396 } 397 398 if (device_match_of_node(hwlock->bank->dev, args.np)) { 399 ret = 0; 400 break; 401 } 402 } 403 rcu_read_unlock(); 404 if (ret < 0) 405 goto out; 406 407 id = of_hwspin_lock_simple_xlate(&args); 408 if (id < 0 || id >= hwlock->bank->num_locks) { 409 ret = -EINVAL; 410 goto out; 411 } 412 id += hwlock->bank->base_id; 413 414 out: 415 of_node_put(args.np); 416 return ret ? ret : id; 417 } 418 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id); 419 420 /** 421 * of_hwspin_lock_get_id_byname() - get lock id for an specified hwlock name 422 * @np: device node from which to request the specific hwlock 423 * @name: hwlock name 424 * 425 * This function provides a means for DT users of the hwspinlock module to 426 * get the global lock id of a specific hwspinlock using the specified name of 427 * the hwspinlock device, so that it can be requested using the normal 428 * hwspin_lock_request_specific() API. 429 * 430 * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock 431 * device is not yet registered, -EINVAL on invalid args specifier value or an 432 * appropriate error as returned from the OF parsing of the DT client node. 433 */ 434 int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name) 435 { 436 int index; 437 438 if (!name) 439 return -EINVAL; 440 441 index = of_property_match_string(np, "hwlock-names", name); 442 if (index < 0) 443 return index; 444 445 return of_hwspin_lock_get_id(np, index); 446 } 447 EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id_byname); 448 449 static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id) 450 { 451 struct hwspinlock *tmp; 452 int ret; 453 454 mutex_lock(&hwspinlock_tree_lock); 455 456 ret = radix_tree_insert(&hwspinlock_tree, id, hwlock); 457 if (ret) { 458 if (ret == -EEXIST) 459 pr_err("hwspinlock id %d already exists!\n", id); 460 goto out; 461 } 462 463 /* mark this hwspinlock as available */ 464 tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 465 466 /* self-sanity check which should never fail */ 467 WARN_ON(tmp != hwlock); 468 469 out: 470 mutex_unlock(&hwspinlock_tree_lock); 471 return 0; 472 } 473 474 static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id) 475 { 476 struct hwspinlock *hwlock = NULL; 477 int ret; 478 479 mutex_lock(&hwspinlock_tree_lock); 480 481 /* make sure the hwspinlock is not in use (tag is set) */ 482 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 483 if (ret == 0) { 484 pr_err("hwspinlock %d still in use (or not present)\n", id); 485 goto out; 486 } 487 488 hwlock = radix_tree_delete(&hwspinlock_tree, id); 489 if (!hwlock) { 490 pr_err("failed to delete hwspinlock %d\n", id); 491 goto out; 492 } 493 494 out: 495 mutex_unlock(&hwspinlock_tree_lock); 496 return hwlock; 497 } 498 499 /** 500 * hwspin_lock_register() - register a new hw spinlock device 501 * @bank: the hwspinlock device, which usually provides numerous hw locks 502 * @dev: the backing device 503 * @ops: hwspinlock handlers for this device 504 * @base_id: id of the first hardware spinlock in this bank 505 * @num_locks: number of hwspinlocks provided by this device 506 * 507 * This function should be called from the underlying platform-specific 508 * implementation, to register a new hwspinlock device instance. 509 * 510 * Should be called from a process context (might sleep) 511 * 512 * Returns 0 on success, or an appropriate error code on failure 513 */ 514 int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, 515 const struct hwspinlock_ops *ops, int base_id, int num_locks) 516 { 517 struct hwspinlock *hwlock; 518 int ret = 0, i; 519 520 if (!bank || !ops || !dev || !num_locks || !ops->trylock || 521 !ops->unlock) { 522 pr_err("invalid parameters\n"); 523 return -EINVAL; 524 } 525 526 bank->dev = dev; 527 bank->ops = ops; 528 bank->base_id = base_id; 529 bank->num_locks = num_locks; 530 531 for (i = 0; i < num_locks; i++) { 532 hwlock = &bank->lock[i]; 533 534 spin_lock_init(&hwlock->lock); 535 hwlock->bank = bank; 536 537 ret = hwspin_lock_register_single(hwlock, base_id + i); 538 if (ret) 539 goto reg_failed; 540 } 541 542 return 0; 543 544 reg_failed: 545 while (--i >= 0) 546 hwspin_lock_unregister_single(base_id + i); 547 return ret; 548 } 549 EXPORT_SYMBOL_GPL(hwspin_lock_register); 550 551 /** 552 * hwspin_lock_unregister() - unregister an hw spinlock device 553 * @bank: the hwspinlock device, which usually provides numerous hw locks 554 * 555 * This function should be called from the underlying platform-specific 556 * implementation, to unregister an existing (and unused) hwspinlock. 557 * 558 * Should be called from a process context (might sleep) 559 * 560 * Returns 0 on success, or an appropriate error code on failure 561 */ 562 int hwspin_lock_unregister(struct hwspinlock_device *bank) 563 { 564 struct hwspinlock *hwlock, *tmp; 565 int i; 566 567 for (i = 0; i < bank->num_locks; i++) { 568 hwlock = &bank->lock[i]; 569 570 tmp = hwspin_lock_unregister_single(bank->base_id + i); 571 if (!tmp) 572 return -EBUSY; 573 574 /* self-sanity check that should never fail */ 575 WARN_ON(tmp != hwlock); 576 } 577 578 return 0; 579 } 580 EXPORT_SYMBOL_GPL(hwspin_lock_unregister); 581 582 static void devm_hwspin_lock_unreg(struct device *dev, void *res) 583 { 584 hwspin_lock_unregister(*(struct hwspinlock_device **)res); 585 } 586 587 static int devm_hwspin_lock_device_match(struct device *dev, void *res, 588 void *data) 589 { 590 struct hwspinlock_device **bank = res; 591 592 if (WARN_ON(!bank || !*bank)) 593 return 0; 594 595 return *bank == data; 596 } 597 598 /** 599 * devm_hwspin_lock_unregister() - unregister an hw spinlock device for 600 * a managed device 601 * @dev: the backing device 602 * @bank: the hwspinlock device, which usually provides numerous hw locks 603 * 604 * This function should be called from the underlying platform-specific 605 * implementation, to unregister an existing (and unused) hwspinlock. 606 * 607 * Should be called from a process context (might sleep) 608 * 609 * Returns 0 on success, or an appropriate error code on failure 610 */ 611 int devm_hwspin_lock_unregister(struct device *dev, 612 struct hwspinlock_device *bank) 613 { 614 int ret; 615 616 ret = devres_release(dev, devm_hwspin_lock_unreg, 617 devm_hwspin_lock_device_match, bank); 618 WARN_ON(ret); 619 620 return ret; 621 } 622 EXPORT_SYMBOL_GPL(devm_hwspin_lock_unregister); 623 624 /** 625 * devm_hwspin_lock_register() - register a new hw spinlock device for 626 * a managed device 627 * @dev: the backing device 628 * @bank: the hwspinlock device, which usually provides numerous hw locks 629 * @ops: hwspinlock handlers for this device 630 * @base_id: id of the first hardware spinlock in this bank 631 * @num_locks: number of hwspinlocks provided by this device 632 * 633 * This function should be called from the underlying platform-specific 634 * implementation, to register a new hwspinlock device instance. 635 * 636 * Should be called from a process context (might sleep) 637 * 638 * Returns 0 on success, or an appropriate error code on failure 639 */ 640 int devm_hwspin_lock_register(struct device *dev, 641 struct hwspinlock_device *bank, 642 const struct hwspinlock_ops *ops, 643 int base_id, int num_locks) 644 { 645 struct hwspinlock_device **ptr; 646 int ret; 647 648 ptr = devres_alloc(devm_hwspin_lock_unreg, sizeof(*ptr), GFP_KERNEL); 649 if (!ptr) 650 return -ENOMEM; 651 652 ret = hwspin_lock_register(bank, dev, ops, base_id, num_locks); 653 if (!ret) { 654 *ptr = bank; 655 devres_add(dev, ptr); 656 } else { 657 devres_free(ptr); 658 } 659 660 return ret; 661 } 662 EXPORT_SYMBOL_GPL(devm_hwspin_lock_register); 663 664 /** 665 * __hwspin_lock_request() - tag an hwspinlock as used and power it up 666 * 667 * This is an internal function that prepares an hwspinlock instance 668 * before it is given to the user. The function assumes that 669 * hwspinlock_tree_lock is taken. 670 * 671 * Returns 0 or positive to indicate success, and a negative value to 672 * indicate an error (with the appropriate error code) 673 */ 674 static int __hwspin_lock_request(struct hwspinlock *hwlock) 675 { 676 struct device *dev = hwlock->bank->dev; 677 struct hwspinlock *tmp; 678 int ret; 679 680 /* prevent underlying implementation from being removed */ 681 if (!try_module_get(dev->driver->owner)) { 682 dev_err(dev, "%s: can't get owner\n", __func__); 683 return -EINVAL; 684 } 685 686 /* notify PM core that power is now needed */ 687 ret = pm_runtime_get_sync(dev); 688 if (ret < 0 && ret != -EACCES) { 689 dev_err(dev, "%s: can't power on device\n", __func__); 690 pm_runtime_put_noidle(dev); 691 module_put(dev->driver->owner); 692 return ret; 693 } 694 695 ret = 0; 696 697 /* mark hwspinlock as used, should not fail */ 698 tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock), 699 HWSPINLOCK_UNUSED); 700 701 /* self-sanity check that should never fail */ 702 WARN_ON(tmp != hwlock); 703 704 return ret; 705 } 706 707 /** 708 * hwspin_lock_get_id() - retrieve id number of a given hwspinlock 709 * @hwlock: a valid hwspinlock instance 710 * 711 * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. 712 */ 713 int hwspin_lock_get_id(struct hwspinlock *hwlock) 714 { 715 if (!hwlock) { 716 pr_err("invalid hwlock\n"); 717 return -EINVAL; 718 } 719 720 return hwlock_to_id(hwlock); 721 } 722 EXPORT_SYMBOL_GPL(hwspin_lock_get_id); 723 724 /** 725 * hwspin_lock_request() - request an hwspinlock 726 * 727 * This function should be called by users of the hwspinlock device, 728 * in order to dynamically assign them an unused hwspinlock. 729 * Usually the user of this lock will then have to communicate the lock's id 730 * to the remote core before it can be used for synchronization (to get the 731 * id of a given hwlock, use hwspin_lock_get_id()). 732 * 733 * Should be called from a process context (might sleep) 734 * 735 * Returns the address of the assigned hwspinlock, or NULL on error 736 */ 737 struct hwspinlock *hwspin_lock_request(void) 738 { 739 struct hwspinlock *hwlock; 740 int ret; 741 742 mutex_lock(&hwspinlock_tree_lock); 743 744 /* look for an unused lock */ 745 ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, 746 0, 1, HWSPINLOCK_UNUSED); 747 if (ret == 0) { 748 pr_warn("a free hwspinlock is not available\n"); 749 hwlock = NULL; 750 goto out; 751 } 752 753 /* sanity check that should never fail */ 754 WARN_ON(ret > 1); 755 756 /* mark as used and power up */ 757 ret = __hwspin_lock_request(hwlock); 758 if (ret < 0) 759 hwlock = NULL; 760 761 out: 762 mutex_unlock(&hwspinlock_tree_lock); 763 return hwlock; 764 } 765 EXPORT_SYMBOL_GPL(hwspin_lock_request); 766 767 /** 768 * hwspin_lock_request_specific() - request for a specific hwspinlock 769 * @id: index of the specific hwspinlock that is requested 770 * 771 * This function should be called by users of the hwspinlock module, 772 * in order to assign them a specific hwspinlock. 773 * Usually early board code will be calling this function in order to 774 * reserve specific hwspinlock ids for predefined purposes. 775 * 776 * Should be called from a process context (might sleep) 777 * 778 * Returns the address of the assigned hwspinlock, or NULL on error 779 */ 780 struct hwspinlock *hwspin_lock_request_specific(unsigned int id) 781 { 782 struct hwspinlock *hwlock; 783 int ret; 784 785 mutex_lock(&hwspinlock_tree_lock); 786 787 /* make sure this hwspinlock exists */ 788 hwlock = radix_tree_lookup(&hwspinlock_tree, id); 789 if (!hwlock) { 790 pr_warn("hwspinlock %u does not exist\n", id); 791 goto out; 792 } 793 794 /* sanity check (this shouldn't happen) */ 795 WARN_ON(hwlock_to_id(hwlock) != id); 796 797 /* make sure this hwspinlock is unused */ 798 ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); 799 if (ret == 0) { 800 pr_warn("hwspinlock %u is already in use\n", id); 801 hwlock = NULL; 802 goto out; 803 } 804 805 /* mark as used and power up */ 806 ret = __hwspin_lock_request(hwlock); 807 if (ret < 0) 808 hwlock = NULL; 809 810 out: 811 mutex_unlock(&hwspinlock_tree_lock); 812 return hwlock; 813 } 814 EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); 815 816 /** 817 * hwspin_lock_free() - free a specific hwspinlock 818 * @hwlock: the specific hwspinlock to free 819 * 820 * This function mark @hwlock as free again. 821 * Should only be called with an @hwlock that was retrieved from 822 * an earlier call to hwspin_lock_request{_specific}. 823 * 824 * Should be called from a process context (might sleep) 825 * 826 * Returns 0 on success, or an appropriate error code on failure 827 */ 828 int hwspin_lock_free(struct hwspinlock *hwlock) 829 { 830 struct device *dev; 831 struct hwspinlock *tmp; 832 int ret; 833 834 if (!hwlock) { 835 pr_err("invalid hwlock\n"); 836 return -EINVAL; 837 } 838 839 dev = hwlock->bank->dev; 840 mutex_lock(&hwspinlock_tree_lock); 841 842 /* make sure the hwspinlock is used */ 843 ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock), 844 HWSPINLOCK_UNUSED); 845 if (ret == 1) { 846 dev_err(dev, "%s: hwlock is already free\n", __func__); 847 dump_stack(); 848 ret = -EINVAL; 849 goto out; 850 } 851 852 /* notify the underlying device that power is not needed */ 853 pm_runtime_put(dev); 854 855 /* mark this hwspinlock as available */ 856 tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock), 857 HWSPINLOCK_UNUSED); 858 859 /* sanity check (this shouldn't happen) */ 860 WARN_ON(tmp != hwlock); 861 862 module_put(dev->driver->owner); 863 864 out: 865 mutex_unlock(&hwspinlock_tree_lock); 866 return ret; 867 } 868 EXPORT_SYMBOL_GPL(hwspin_lock_free); 869 870 static int devm_hwspin_lock_match(struct device *dev, void *res, void *data) 871 { 872 struct hwspinlock **hwlock = res; 873 874 if (WARN_ON(!hwlock || !*hwlock)) 875 return 0; 876 877 return *hwlock == data; 878 } 879 880 static void devm_hwspin_lock_release(struct device *dev, void *res) 881 { 882 hwspin_lock_free(*(struct hwspinlock **)res); 883 } 884 885 /** 886 * devm_hwspin_lock_free() - free a specific hwspinlock for a managed device 887 * @dev: the device to free the specific hwspinlock 888 * @hwlock: the specific hwspinlock to free 889 * 890 * This function mark @hwlock as free again. 891 * Should only be called with an @hwlock that was retrieved from 892 * an earlier call to hwspin_lock_request{_specific}. 893 * 894 * Should be called from a process context (might sleep) 895 * 896 * Returns 0 on success, or an appropriate error code on failure 897 */ 898 int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock) 899 { 900 int ret; 901 902 ret = devres_release(dev, devm_hwspin_lock_release, 903 devm_hwspin_lock_match, hwlock); 904 WARN_ON(ret); 905 906 return ret; 907 } 908 EXPORT_SYMBOL_GPL(devm_hwspin_lock_free); 909 910 /** 911 * devm_hwspin_lock_request() - request an hwspinlock for a managed device 912 * @dev: the device to request an hwspinlock 913 * 914 * This function should be called by users of the hwspinlock device, 915 * in order to dynamically assign them an unused hwspinlock. 916 * Usually the user of this lock will then have to communicate the lock's id 917 * to the remote core before it can be used for synchronization (to get the 918 * id of a given hwlock, use hwspin_lock_get_id()). 919 * 920 * Should be called from a process context (might sleep) 921 * 922 * Returns the address of the assigned hwspinlock, or NULL on error 923 */ 924 struct hwspinlock *devm_hwspin_lock_request(struct device *dev) 925 { 926 struct hwspinlock **ptr, *hwlock; 927 928 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL); 929 if (!ptr) 930 return NULL; 931 932 hwlock = hwspin_lock_request(); 933 if (hwlock) { 934 *ptr = hwlock; 935 devres_add(dev, ptr); 936 } else { 937 devres_free(ptr); 938 } 939 940 return hwlock; 941 } 942 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request); 943 944 /** 945 * devm_hwspin_lock_request_specific() - request for a specific hwspinlock for 946 * a managed device 947 * @dev: the device to request the specific hwspinlock 948 * @id: index of the specific hwspinlock that is requested 949 * 950 * This function should be called by users of the hwspinlock module, 951 * in order to assign them a specific hwspinlock. 952 * Usually early board code will be calling this function in order to 953 * reserve specific hwspinlock ids for predefined purposes. 954 * 955 * Should be called from a process context (might sleep) 956 * 957 * Returns the address of the assigned hwspinlock, or NULL on error 958 */ 959 struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev, 960 unsigned int id) 961 { 962 struct hwspinlock **ptr, *hwlock; 963 964 ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL); 965 if (!ptr) 966 return NULL; 967 968 hwlock = hwspin_lock_request_specific(id); 969 if (hwlock) { 970 *ptr = hwlock; 971 devres_add(dev, ptr); 972 } else { 973 devres_free(ptr); 974 } 975 976 return hwlock; 977 } 978 EXPORT_SYMBOL_GPL(devm_hwspin_lock_request_specific); 979 980 MODULE_DESCRIPTION("Hardware spinlock interface"); 981 MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); 982