xref: /openbmc/linux/lib/once.c (revision 3f2fb9a834cb1fcddbae22deca7fde136944dc89)
1  #include <linux/slab.h>
2  #include <linux/spinlock.h>
3  #include <linux/once.h>
4  #include <linux/random.h>
5  
6  struct once_work {
7  	struct work_struct work;
8  	struct static_key *key;
9  };
10  
11  static void once_deferred(struct work_struct *w)
12  {
13  	struct once_work *work;
14  
15  	work = container_of(w, struct once_work, work);
16  	BUG_ON(!static_key_enabled(work->key));
17  	static_key_slow_dec(work->key);
18  	kfree(work);
19  }
20  
21  static void once_disable_jump(struct static_key *key)
22  {
23  	struct once_work *w;
24  
25  	w = kmalloc(sizeof(*w), GFP_ATOMIC);
26  	if (!w)
27  		return;
28  
29  	INIT_WORK(&w->work, once_deferred);
30  	w->key = key;
31  	schedule_work(&w->work);
32  }
33  
34  static DEFINE_SPINLOCK(once_lock);
35  
36  bool __do_once_start(bool *done, unsigned long *flags)
37  	__acquires(once_lock)
38  {
39  	spin_lock_irqsave(&once_lock, *flags);
40  	if (*done) {
41  		spin_unlock_irqrestore(&once_lock, *flags);
42  		/* Keep sparse happy by restoring an even lock count on
43  		 * this lock. In case we return here, we don't call into
44  		 * __do_once_done but return early in the DO_ONCE() macro.
45  		 */
46  		__acquire(once_lock);
47  		return false;
48  	}
49  
50  	return true;
51  }
52  EXPORT_SYMBOL(__do_once_start);
53  
54  void __do_once_done(bool *done, struct static_key *once_key,
55  		    unsigned long *flags)
56  	__releases(once_lock)
57  {
58  	*done = true;
59  	spin_unlock_irqrestore(&once_lock, *flags);
60  	once_disable_jump(once_key);
61  }
62  EXPORT_SYMBOL(__do_once_done);
63