1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include "intel_runtime_pm.h" 8 #include "intel_wakeref.h" 9 10 static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf) 11 { 12 wf->wakeref = intel_runtime_pm_get(rpm); 13 } 14 15 static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf) 16 { 17 intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); 18 19 intel_runtime_pm_put(rpm, wakeref); 20 INTEL_WAKEREF_BUG_ON(!wakeref); 21 } 22 23 int __intel_wakeref_get_first(struct intel_runtime_pm *rpm, 24 struct intel_wakeref *wf, 25 int (*fn)(struct intel_wakeref *wf)) 26 { 27 /* 28 * Treat get/put as different subclasses, as we may need to run 29 * the put callback from under the shrinker and do not want to 30 * cross-contanimate that callback with any extra work performed 31 * upon acquiring the wakeref. 32 */ 33 mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING); 34 if (!atomic_read(&wf->count)) { 35 int err; 36 37 rpm_get(rpm, wf); 38 39 err = fn(wf); 40 if (unlikely(err)) { 41 rpm_put(rpm, wf); 42 mutex_unlock(&wf->mutex); 43 return err; 44 } 45 46 smp_mb__before_atomic(); /* release wf->count */ 47 } 48 atomic_inc(&wf->count); 49 mutex_unlock(&wf->mutex); 50 51 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); 52 return 0; 53 } 54 55 int __intel_wakeref_put_last(struct intel_runtime_pm *rpm, 56 struct intel_wakeref *wf, 57 int (*fn)(struct intel_wakeref *wf)) 58 { 59 int err; 60 61 err = fn(wf); 62 if (likely(!err)) 63 rpm_put(rpm, wf); 64 else 65 atomic_inc(&wf->count); 66 mutex_unlock(&wf->mutex); 67 68 return err; 69 } 70 71 void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key) 72 { 73 __mutex_init(&wf->mutex, "wakeref", key); 74 atomic_set(&wf->count, 0); 75 wf->wakeref = 0; 76 } 77 78 static void wakeref_auto_timeout(struct timer_list *t) 79 { 80 struct intel_wakeref_auto *wf = from_timer(wf, t, timer); 81 intel_wakeref_t wakeref; 82 unsigned long flags; 83 84 if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags)) 85 return; 86 87 wakeref = fetch_and_zero(&wf->wakeref); 88 spin_unlock_irqrestore(&wf->lock, flags); 89 90 intel_runtime_pm_put(wf->rpm, wakeref); 91 } 92 93 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, 94 struct intel_runtime_pm *rpm) 95 { 96 spin_lock_init(&wf->lock); 97 timer_setup(&wf->timer, wakeref_auto_timeout, 0); 98 refcount_set(&wf->count, 0); 99 wf->wakeref = 0; 100 wf->rpm = rpm; 101 } 102 103 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) 104 { 105 unsigned long flags; 106 107 if (!timeout) { 108 if (del_timer_sync(&wf->timer)) 109 wakeref_auto_timeout(&wf->timer); 110 return; 111 } 112 113 /* Our mission is that we only extend an already active wakeref */ 114 assert_rpm_wakelock_held(wf->rpm); 115 116 if (!refcount_inc_not_zero(&wf->count)) { 117 spin_lock_irqsave(&wf->lock, flags); 118 if (!refcount_inc_not_zero(&wf->count)) { 119 INTEL_WAKEREF_BUG_ON(wf->wakeref); 120 wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm); 121 refcount_set(&wf->count, 1); 122 } 123 spin_unlock_irqrestore(&wf->lock, flags); 124 } 125 126 /* 127 * If we extend a pending timer, we will only get a single timer 128 * callback and so need to cancel the local inc by running the 129 * elided callback to keep the wf->count balanced. 130 */ 131 if (mod_timer(&wf->timer, jiffies + timeout)) 132 wakeref_auto_timeout(&wf->timer); 133 } 134 135 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf) 136 { 137 intel_wakeref_auto(wf, 0); 138 INTEL_WAKEREF_BUG_ON(wf->wakeref); 139 } 140