xref: /openbmc/linux/lib/klist.c (revision 6a6d6681ac1add9655b7ab5dd0b46b54aeb1b44f)
1  /*
2   * klist.c - Routines for manipulating klists.
3   *
4   * Copyright (C) 2005 Patrick Mochel
5   *
6   * This file is released under the GPL v2.
7   *
8   * This klist interface provides a couple of structures that wrap around
9   * struct list_head to provide explicit list "head" (struct klist) and list
10   * "node" (struct klist_node) objects. For struct klist, a spinlock is
11   * included that protects access to the actual list itself. struct
12   * klist_node provides a pointer to the klist that owns it and a kref
13   * reference count that indicates the number of current users of that node
14   * in the list.
15   *
16   * The entire point is to provide an interface for iterating over a list
17   * that is safe and allows for modification of the list during the
18   * iteration (e.g. insertion and removal), including modification of the
19   * current node on the list.
20   *
21   * It works using a 3rd object type - struct klist_iter - that is declared
22   * and initialized before an iteration. klist_next() is used to acquire the
23   * next element in the list. It returns NULL if there are no more items.
24   * Internally, that routine takes the klist's lock, decrements the
25   * reference count of the previous klist_node and increments the count of
26   * the next klist_node. It then drops the lock and returns.
27   *
28   * There are primitives for adding and removing nodes to/from a klist.
29   * When deleting, klist_del() will simply decrement the reference count.
30   * Only when the count goes to 0 is the node removed from the list.
31   * klist_remove() will try to delete the node from the list and block until
32   * it is actually removed. This is useful for objects (like devices) that
33   * have been removed from the system and must be freed (but must wait until
34   * all accessors have finished).
35   */
36  
37  #include <linux/klist.h>
38  #include <linux/export.h>
39  #include <linux/sched.h>
40  
41  /*
42   * Use the lowest bit of n_klist to mark deleted nodes and exclude
43   * dead ones from iteration.
44   */
45  #define KNODE_DEAD		1LU
46  #define KNODE_KLIST_MASK	~KNODE_DEAD
47  
48  static struct klist *knode_klist(struct klist_node *knode)
49  {
50  	return (struct klist *)
51  		((unsigned long)knode->n_klist & KNODE_KLIST_MASK);
52  }
53  
54  static bool knode_dead(struct klist_node *knode)
55  {
56  	return (unsigned long)knode->n_klist & KNODE_DEAD;
57  }
58  
59  static void knode_set_klist(struct klist_node *knode, struct klist *klist)
60  {
61  	knode->n_klist = klist;
62  	/* no knode deserves to start its life dead */
63  	WARN_ON(knode_dead(knode));
64  }
65  
66  static void knode_kill(struct klist_node *knode)
67  {
68  	/* and no knode should die twice ever either, see we're very humane */
69  	WARN_ON(knode_dead(knode));
70  	*(unsigned long *)&knode->n_klist |= KNODE_DEAD;
71  }
72  
73  /**
74   * klist_init - Initialize a klist structure.
75   * @k: The klist we're initializing.
76   * @get: The get function for the embedding object (NULL if none)
77   * @put: The put function for the embedding object (NULL if none)
78   *
79   * Initialises the klist structure.  If the klist_node structures are
80   * going to be embedded in refcounted objects (necessary for safe
81   * deletion) then the get/put arguments are used to initialise
82   * functions that take and release references on the embedding
83   * objects.
84   */
85  void klist_init(struct klist *k, void (*get)(struct klist_node *),
86  		void (*put)(struct klist_node *))
87  {
88  	INIT_LIST_HEAD(&k->k_list);
89  	spin_lock_init(&k->k_lock);
90  	k->get = get;
91  	k->put = put;
92  }
93  EXPORT_SYMBOL_GPL(klist_init);
94  
95  static void add_head(struct klist *k, struct klist_node *n)
96  {
97  	spin_lock(&k->k_lock);
98  	list_add(&n->n_node, &k->k_list);
99  	spin_unlock(&k->k_lock);
100  }
101  
102  static void add_tail(struct klist *k, struct klist_node *n)
103  {
104  	spin_lock(&k->k_lock);
105  	list_add_tail(&n->n_node, &k->k_list);
106  	spin_unlock(&k->k_lock);
107  }
108  
109  static void klist_node_init(struct klist *k, struct klist_node *n)
110  {
111  	INIT_LIST_HEAD(&n->n_node);
112  	kref_init(&n->n_ref);
113  	knode_set_klist(n, k);
114  	if (k->get)
115  		k->get(n);
116  }
117  
118  /**
119   * klist_add_head - Initialize a klist_node and add it to front.
120   * @n: node we're adding.
121   * @k: klist it's going on.
122   */
123  void klist_add_head(struct klist_node *n, struct klist *k)
124  {
125  	klist_node_init(k, n);
126  	add_head(k, n);
127  }
128  EXPORT_SYMBOL_GPL(klist_add_head);
129  
130  /**
131   * klist_add_tail - Initialize a klist_node and add it to back.
132   * @n: node we're adding.
133   * @k: klist it's going on.
134   */
135  void klist_add_tail(struct klist_node *n, struct klist *k)
136  {
137  	klist_node_init(k, n);
138  	add_tail(k, n);
139  }
140  EXPORT_SYMBOL_GPL(klist_add_tail);
141  
142  /**
143   * klist_add_behind - Init a klist_node and add it after an existing node
144   * @n: node we're adding.
145   * @pos: node to put @n after
146   */
147  void klist_add_behind(struct klist_node *n, struct klist_node *pos)
148  {
149  	struct klist *k = knode_klist(pos);
150  
151  	klist_node_init(k, n);
152  	spin_lock(&k->k_lock);
153  	list_add(&n->n_node, &pos->n_node);
154  	spin_unlock(&k->k_lock);
155  }
156  EXPORT_SYMBOL_GPL(klist_add_behind);
157  
158  /**
159   * klist_add_before - Init a klist_node and add it before an existing node
160   * @n: node we're adding.
161   * @pos: node to put @n after
162   */
163  void klist_add_before(struct klist_node *n, struct klist_node *pos)
164  {
165  	struct klist *k = knode_klist(pos);
166  
167  	klist_node_init(k, n);
168  	spin_lock(&k->k_lock);
169  	list_add_tail(&n->n_node, &pos->n_node);
170  	spin_unlock(&k->k_lock);
171  }
172  EXPORT_SYMBOL_GPL(klist_add_before);
173  
174  struct klist_waiter {
175  	struct list_head list;
176  	struct klist_node *node;
177  	struct task_struct *process;
178  	int woken;
179  };
180  
181  static DEFINE_SPINLOCK(klist_remove_lock);
182  static LIST_HEAD(klist_remove_waiters);
183  
184  static void klist_release(struct kref *kref)
185  {
186  	struct klist_waiter *waiter, *tmp;
187  	struct klist_node *n = container_of(kref, struct klist_node, n_ref);
188  
189  	WARN_ON(!knode_dead(n));
190  	list_del(&n->n_node);
191  	spin_lock(&klist_remove_lock);
192  	list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) {
193  		if (waiter->node != n)
194  			continue;
195  
196  		list_del(&waiter->list);
197  		waiter->woken = 1;
198  		mb();
199  		wake_up_process(waiter->process);
200  	}
201  	spin_unlock(&klist_remove_lock);
202  	knode_set_klist(n, NULL);
203  }
204  
205  static int klist_dec_and_del(struct klist_node *n)
206  {
207  	return kref_put(&n->n_ref, klist_release);
208  }
209  
210  static void klist_put(struct klist_node *n, bool kill)
211  {
212  	struct klist *k = knode_klist(n);
213  	void (*put)(struct klist_node *) = k->put;
214  
215  	spin_lock(&k->k_lock);
216  	if (kill)
217  		knode_kill(n);
218  	if (!klist_dec_and_del(n))
219  		put = NULL;
220  	spin_unlock(&k->k_lock);
221  	if (put)
222  		put(n);
223  }
224  
225  /**
226   * klist_del - Decrement the reference count of node and try to remove.
227   * @n: node we're deleting.
228   */
229  void klist_del(struct klist_node *n)
230  {
231  	klist_put(n, true);
232  }
233  EXPORT_SYMBOL_GPL(klist_del);
234  
235  /**
236   * klist_remove - Decrement the refcount of node and wait for it to go away.
237   * @n: node we're removing.
238   */
239  void klist_remove(struct klist_node *n)
240  {
241  	struct klist_waiter waiter;
242  
243  	waiter.node = n;
244  	waiter.process = current;
245  	waiter.woken = 0;
246  	spin_lock(&klist_remove_lock);
247  	list_add(&waiter.list, &klist_remove_waiters);
248  	spin_unlock(&klist_remove_lock);
249  
250  	klist_del(n);
251  
252  	for (;;) {
253  		set_current_state(TASK_UNINTERRUPTIBLE);
254  		if (waiter.woken)
255  			break;
256  		schedule();
257  	}
258  	__set_current_state(TASK_RUNNING);
259  }
260  EXPORT_SYMBOL_GPL(klist_remove);
261  
262  /**
263   * klist_node_attached - Say whether a node is bound to a list or not.
264   * @n: Node that we're testing.
265   */
266  int klist_node_attached(struct klist_node *n)
267  {
268  	return (n->n_klist != NULL);
269  }
270  EXPORT_SYMBOL_GPL(klist_node_attached);
271  
272  /**
273   * klist_iter_init_node - Initialize a klist_iter structure.
274   * @k: klist we're iterating.
275   * @i: klist_iter we're filling.
276   * @n: node to start with.
277   *
278   * Similar to klist_iter_init(), but starts the action off with @n,
279   * instead of with the list head.
280   */
281  void klist_iter_init_node(struct klist *k, struct klist_iter *i,
282  			  struct klist_node *n)
283  {
284  	i->i_klist = k;
285  	i->i_cur = NULL;
286  	if (n && kref_get_unless_zero(&n->n_ref))
287  		i->i_cur = n;
288  }
289  EXPORT_SYMBOL_GPL(klist_iter_init_node);
290  
291  /**
292   * klist_iter_init - Iniitalize a klist_iter structure.
293   * @k: klist we're iterating.
294   * @i: klist_iter structure we're filling.
295   *
296   * Similar to klist_iter_init_node(), but start with the list head.
297   */
298  void klist_iter_init(struct klist *k, struct klist_iter *i)
299  {
300  	klist_iter_init_node(k, i, NULL);
301  }
302  EXPORT_SYMBOL_GPL(klist_iter_init);
303  
304  /**
305   * klist_iter_exit - Finish a list iteration.
306   * @i: Iterator structure.
307   *
308   * Must be called when done iterating over list, as it decrements the
309   * refcount of the current node. Necessary in case iteration exited before
310   * the end of the list was reached, and always good form.
311   */
312  void klist_iter_exit(struct klist_iter *i)
313  {
314  	if (i->i_cur) {
315  		klist_put(i->i_cur, false);
316  		i->i_cur = NULL;
317  	}
318  }
319  EXPORT_SYMBOL_GPL(klist_iter_exit);
320  
321  static struct klist_node *to_klist_node(struct list_head *n)
322  {
323  	return container_of(n, struct klist_node, n_node);
324  }
325  
326  /**
327   * klist_prev - Ante up prev node in list.
328   * @i: Iterator structure.
329   *
330   * First grab list lock. Decrement the reference count of the previous
331   * node, if there was one. Grab the prev node, increment its reference
332   * count, drop the lock, and return that prev node.
333   */
334  struct klist_node *klist_prev(struct klist_iter *i)
335  {
336  	void (*put)(struct klist_node *) = i->i_klist->put;
337  	struct klist_node *last = i->i_cur;
338  	struct klist_node *prev;
339  	unsigned long flags;
340  
341  	spin_lock_irqsave(&i->i_klist->k_lock, flags);
342  
343  	if (last) {
344  		prev = to_klist_node(last->n_node.prev);
345  		if (!klist_dec_and_del(last))
346  			put = NULL;
347  	} else
348  		prev = to_klist_node(i->i_klist->k_list.prev);
349  
350  	i->i_cur = NULL;
351  	while (prev != to_klist_node(&i->i_klist->k_list)) {
352  		if (likely(!knode_dead(prev))) {
353  			kref_get(&prev->n_ref);
354  			i->i_cur = prev;
355  			break;
356  		}
357  		prev = to_klist_node(prev->n_node.prev);
358  	}
359  
360  	spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
361  
362  	if (put && last)
363  		put(last);
364  	return i->i_cur;
365  }
366  EXPORT_SYMBOL_GPL(klist_prev);
367  
368  /**
369   * klist_next - Ante up next node in list.
370   * @i: Iterator structure.
371   *
372   * First grab list lock. Decrement the reference count of the previous
373   * node, if there was one. Grab the next node, increment its reference
374   * count, drop the lock, and return that next node.
375   */
376  struct klist_node *klist_next(struct klist_iter *i)
377  {
378  	void (*put)(struct klist_node *) = i->i_klist->put;
379  	struct klist_node *last = i->i_cur;
380  	struct klist_node *next;
381  	unsigned long flags;
382  
383  	spin_lock_irqsave(&i->i_klist->k_lock, flags);
384  
385  	if (last) {
386  		next = to_klist_node(last->n_node.next);
387  		if (!klist_dec_and_del(last))
388  			put = NULL;
389  	} else
390  		next = to_klist_node(i->i_klist->k_list.next);
391  
392  	i->i_cur = NULL;
393  	while (next != to_klist_node(&i->i_klist->k_list)) {
394  		if (likely(!knode_dead(next))) {
395  			kref_get(&next->n_ref);
396  			i->i_cur = next;
397  			break;
398  		}
399  		next = to_klist_node(next->n_node.next);
400  	}
401  
402  	spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
403  
404  	if (put && last)
405  		put(last);
406  	return i->i_cur;
407  }
408  EXPORT_SYMBOL_GPL(klist_next);
409