1 // SPDX-License-Identifier: GPL-2.0
2 /* Watch queue and general notification mechanism, built on pipes
3 *
4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * See Documentation/core-api/watch_queue.rst
8 */
9
10 #define pr_fmt(fmt) "watchq: " fmt
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/printk.h>
16 #include <linux/miscdevice.h>
17 #include <linux/fs.h>
18 #include <linux/mm.h>
19 #include <linux/pagemap.h>
20 #include <linux/poll.h>
21 #include <linux/uaccess.h>
22 #include <linux/vmalloc.h>
23 #include <linux/file.h>
24 #include <linux/security.h>
25 #include <linux/cred.h>
26 #include <linux/sched/signal.h>
27 #include <linux/watch_queue.h>
28 #include <linux/pipe_fs_i.h>
29
30 MODULE_DESCRIPTION("Watch queue");
31 MODULE_AUTHOR("Red Hat, Inc.");
32
33 #define WATCH_QUEUE_NOTE_SIZE 128
34 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
35
36 /*
37 * This must be called under the RCU read-lock, which makes
38 * sure that the wqueue still exists. It can then take the lock,
39 * and check that the wqueue hasn't been destroyed, which in
40 * turn makes sure that the notification pipe still exists.
41 */
lock_wqueue(struct watch_queue * wqueue)42 static inline bool lock_wqueue(struct watch_queue *wqueue)
43 {
44 spin_lock_bh(&wqueue->lock);
45 if (unlikely(!wqueue->pipe)) {
46 spin_unlock_bh(&wqueue->lock);
47 return false;
48 }
49 return true;
50 }
51
unlock_wqueue(struct watch_queue * wqueue)52 static inline void unlock_wqueue(struct watch_queue *wqueue)
53 {
54 spin_unlock_bh(&wqueue->lock);
55 }
56
watch_queue_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)57 static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
58 struct pipe_buffer *buf)
59 {
60 struct watch_queue *wqueue = (struct watch_queue *)buf->private;
61 struct page *page;
62 unsigned int bit;
63
64 /* We need to work out which note within the page this refers to, but
65 * the note might have been maximum size, so merely ANDing the offset
66 * off doesn't work. OTOH, the note must've been more than zero size.
67 */
68 bit = buf->offset + buf->len;
69 if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0)
70 bit -= WATCH_QUEUE_NOTE_SIZE;
71 bit /= WATCH_QUEUE_NOTE_SIZE;
72
73 page = buf->page;
74 bit += page->index;
75
76 set_bit(bit, wqueue->notes_bitmap);
77 generic_pipe_buf_release(pipe, buf);
78 }
79
80 // No try_steal function => no stealing
81 #define watch_queue_pipe_buf_try_steal NULL
82
83 /* New data written to a pipe may be appended to a buffer with this type. */
84 static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
85 .release = watch_queue_pipe_buf_release,
86 .try_steal = watch_queue_pipe_buf_try_steal,
87 .get = generic_pipe_buf_get,
88 };
89
90 /*
91 * Post a notification to a watch queue.
92 *
93 * Must be called with the RCU lock for reading, and the
94 * watch_queue lock held, which guarantees that the pipe
95 * hasn't been released.
96 */
post_one_notification(struct watch_queue * wqueue,struct watch_notification * n)97 static bool post_one_notification(struct watch_queue *wqueue,
98 struct watch_notification *n)
99 {
100 void *p;
101 struct pipe_inode_info *pipe = wqueue->pipe;
102 struct pipe_buffer *buf;
103 struct page *page;
104 unsigned int head, tail, mask, note, offset, len;
105 bool done = false;
106
107 spin_lock_irq(&pipe->rd_wait.lock);
108
109 mask = pipe->ring_size - 1;
110 head = pipe->head;
111 tail = pipe->tail;
112 if (pipe_full(head, tail, pipe->ring_size))
113 goto lost;
114
115 note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes);
116 if (note >= wqueue->nr_notes)
117 goto lost;
118
119 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE];
120 offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE;
121 get_page(page);
122 len = n->info & WATCH_INFO_LENGTH;
123 p = kmap_atomic(page);
124 memcpy(p + offset, n, len);
125 kunmap_atomic(p);
126
127 buf = &pipe->bufs[head & mask];
128 buf->page = page;
129 buf->private = (unsigned long)wqueue;
130 buf->ops = &watch_queue_pipe_buf_ops;
131 buf->offset = offset;
132 buf->len = len;
133 buf->flags = PIPE_BUF_FLAG_WHOLE;
134 smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */
135
136 if (!test_and_clear_bit(note, wqueue->notes_bitmap)) {
137 spin_unlock_irq(&pipe->rd_wait.lock);
138 BUG();
139 }
140 wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM);
141 done = true;
142
143 out:
144 spin_unlock_irq(&pipe->rd_wait.lock);
145 if (done)
146 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
147 return done;
148
149 lost:
150 buf = &pipe->bufs[(head - 1) & mask];
151 buf->flags |= PIPE_BUF_FLAG_LOSS;
152 goto out;
153 }
154
155 /*
156 * Apply filter rules to a notification.
157 */
filter_watch_notification(const struct watch_filter * wf,const struct watch_notification * n)158 static bool filter_watch_notification(const struct watch_filter *wf,
159 const struct watch_notification *n)
160 {
161 const struct watch_type_filter *wt;
162 unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8;
163 unsigned int st_index = n->subtype / st_bits;
164 unsigned int st_bit = 1U << (n->subtype % st_bits);
165 int i;
166
167 if (!test_bit(n->type, wf->type_filter))
168 return false;
169
170 for (i = 0; i < wf->nr_filters; i++) {
171 wt = &wf->filters[i];
172 if (n->type == wt->type &&
173 (wt->subtype_filter[st_index] & st_bit) &&
174 (n->info & wt->info_mask) == wt->info_filter)
175 return true;
176 }
177
178 return false; /* If there is a filter, the default is to reject. */
179 }
180
181 /**
182 * __post_watch_notification - Post an event notification
183 * @wlist: The watch list to post the event to.
184 * @n: The notification record to post.
185 * @cred: The creds of the process that triggered the notification.
186 * @id: The ID to match on the watch.
187 *
188 * Post a notification of an event into a set of watch queues and let the users
189 * know.
190 *
191 * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and
192 * should be in units of sizeof(*n).
193 */
__post_watch_notification(struct watch_list * wlist,struct watch_notification * n,const struct cred * cred,u64 id)194 void __post_watch_notification(struct watch_list *wlist,
195 struct watch_notification *n,
196 const struct cred *cred,
197 u64 id)
198 {
199 const struct watch_filter *wf;
200 struct watch_queue *wqueue;
201 struct watch *watch;
202
203 if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) {
204 WARN_ON(1);
205 return;
206 }
207
208 rcu_read_lock();
209
210 hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) {
211 if (watch->id != id)
212 continue;
213 n->info &= ~WATCH_INFO_ID;
214 n->info |= watch->info_id;
215
216 wqueue = rcu_dereference(watch->queue);
217 wf = rcu_dereference(wqueue->filter);
218 if (wf && !filter_watch_notification(wf, n))
219 continue;
220
221 if (security_post_notification(watch->cred, cred, n) < 0)
222 continue;
223
224 if (lock_wqueue(wqueue)) {
225 post_one_notification(wqueue, n);
226 unlock_wqueue(wqueue);
227 }
228 }
229
230 rcu_read_unlock();
231 }
232 EXPORT_SYMBOL(__post_watch_notification);
233
234 /*
235 * Allocate sufficient pages to preallocation for the requested number of
236 * notifications.
237 */
watch_queue_set_size(struct pipe_inode_info * pipe,unsigned int nr_notes)238 long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes)
239 {
240 struct watch_queue *wqueue = pipe->watch_queue;
241 struct page **pages;
242 unsigned long *bitmap;
243 unsigned long user_bufs;
244 int ret, i, nr_pages;
245
246 if (!wqueue)
247 return -ENODEV;
248 if (wqueue->notes)
249 return -EBUSY;
250
251 if (nr_notes < 1 ||
252 nr_notes > 512) /* TODO: choose a better hard limit */
253 return -EINVAL;
254
255 nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1);
256 nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE;
257 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages);
258
259 if (nr_pages > pipe->max_usage &&
260 (too_many_pipe_buffers_hard(user_bufs) ||
261 too_many_pipe_buffers_soft(user_bufs)) &&
262 pipe_is_unprivileged_user()) {
263 ret = -EPERM;
264 goto error;
265 }
266
267 nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE;
268 ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes));
269 if (ret < 0)
270 goto error;
271
272 /*
273 * pipe_resize_ring() does not update nr_accounted for watch_queue
274 * pipes, because the above vastly overprovisions. Set nr_accounted on
275 * and max_usage this pipe to the number that was actually charged to
276 * the user above via account_pipe_buffers.
277 */
278 pipe->max_usage = nr_pages;
279 pipe->nr_accounted = nr_pages;
280
281 ret = -ENOMEM;
282 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL);
283 if (!pages)
284 goto error;
285
286 for (i = 0; i < nr_pages; i++) {
287 pages[i] = alloc_page(GFP_KERNEL);
288 if (!pages[i])
289 goto error_p;
290 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE;
291 }
292
293 bitmap = bitmap_alloc(nr_notes, GFP_KERNEL);
294 if (!bitmap)
295 goto error_p;
296
297 bitmap_fill(bitmap, nr_notes);
298 wqueue->notes = pages;
299 wqueue->notes_bitmap = bitmap;
300 wqueue->nr_pages = nr_pages;
301 wqueue->nr_notes = nr_notes;
302 return 0;
303
304 error_p:
305 while (--i >= 0)
306 __free_page(pages[i]);
307 kfree(pages);
308 error:
309 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted);
310 return ret;
311 }
312
313 /*
314 * Set the filter on a watch queue.
315 */
watch_queue_set_filter(struct pipe_inode_info * pipe,struct watch_notification_filter __user * _filter)316 long watch_queue_set_filter(struct pipe_inode_info *pipe,
317 struct watch_notification_filter __user *_filter)
318 {
319 struct watch_notification_type_filter *tf;
320 struct watch_notification_filter filter;
321 struct watch_type_filter *q;
322 struct watch_filter *wfilter;
323 struct watch_queue *wqueue = pipe->watch_queue;
324 int ret, nr_filter = 0, i;
325
326 if (!wqueue)
327 return -ENODEV;
328
329 if (!_filter) {
330 /* Remove the old filter */
331 wfilter = NULL;
332 goto set;
333 }
334
335 /* Grab the user's filter specification */
336 if (copy_from_user(&filter, _filter, sizeof(filter)) != 0)
337 return -EFAULT;
338 if (filter.nr_filters == 0 ||
339 filter.nr_filters > 16 ||
340 filter.__reserved != 0)
341 return -EINVAL;
342
343 tf = memdup_array_user(_filter->filters, filter.nr_filters, sizeof(*tf));
344 if (IS_ERR(tf))
345 return PTR_ERR(tf);
346
347 ret = -EINVAL;
348 for (i = 0; i < filter.nr_filters; i++) {
349 if ((tf[i].info_filter & ~tf[i].info_mask) ||
350 tf[i].info_mask & WATCH_INFO_LENGTH)
351 goto err_filter;
352 /* Ignore any unknown types */
353 if (tf[i].type >= WATCH_TYPE__NR)
354 continue;
355 nr_filter++;
356 }
357
358 /* Now we need to build the internal filter from only the relevant
359 * user-specified filters.
360 */
361 ret = -ENOMEM;
362 wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL);
363 if (!wfilter)
364 goto err_filter;
365 wfilter->nr_filters = nr_filter;
366
367 q = wfilter->filters;
368 for (i = 0; i < filter.nr_filters; i++) {
369 if (tf[i].type >= WATCH_TYPE__NR)
370 continue;
371
372 q->type = tf[i].type;
373 q->info_filter = tf[i].info_filter;
374 q->info_mask = tf[i].info_mask;
375 q->subtype_filter[0] = tf[i].subtype_filter[0];
376 __set_bit(q->type, wfilter->type_filter);
377 q++;
378 }
379
380 kfree(tf);
381 set:
382 pipe_lock(pipe);
383 wfilter = rcu_replace_pointer(wqueue->filter, wfilter,
384 lockdep_is_held(&pipe->mutex));
385 pipe_unlock(pipe);
386 if (wfilter)
387 kfree_rcu(wfilter, rcu);
388 return 0;
389
390 err_filter:
391 kfree(tf);
392 return ret;
393 }
394
__put_watch_queue(struct kref * kref)395 static void __put_watch_queue(struct kref *kref)
396 {
397 struct watch_queue *wqueue =
398 container_of(kref, struct watch_queue, usage);
399 struct watch_filter *wfilter;
400 int i;
401
402 for (i = 0; i < wqueue->nr_pages; i++)
403 __free_page(wqueue->notes[i]);
404 kfree(wqueue->notes);
405 bitmap_free(wqueue->notes_bitmap);
406
407 wfilter = rcu_access_pointer(wqueue->filter);
408 if (wfilter)
409 kfree_rcu(wfilter, rcu);
410 kfree_rcu(wqueue, rcu);
411 }
412
413 /**
414 * put_watch_queue - Dispose of a ref on a watchqueue.
415 * @wqueue: The watch queue to unref.
416 */
put_watch_queue(struct watch_queue * wqueue)417 void put_watch_queue(struct watch_queue *wqueue)
418 {
419 kref_put(&wqueue->usage, __put_watch_queue);
420 }
421 EXPORT_SYMBOL(put_watch_queue);
422
free_watch(struct rcu_head * rcu)423 static void free_watch(struct rcu_head *rcu)
424 {
425 struct watch *watch = container_of(rcu, struct watch, rcu);
426
427 put_watch_queue(rcu_access_pointer(watch->queue));
428 atomic_dec(&watch->cred->user->nr_watches);
429 put_cred(watch->cred);
430 kfree(watch);
431 }
432
__put_watch(struct kref * kref)433 static void __put_watch(struct kref *kref)
434 {
435 struct watch *watch = container_of(kref, struct watch, usage);
436
437 call_rcu(&watch->rcu, free_watch);
438 }
439
440 /*
441 * Discard a watch.
442 */
put_watch(struct watch * watch)443 static void put_watch(struct watch *watch)
444 {
445 kref_put(&watch->usage, __put_watch);
446 }
447
448 /**
449 * init_watch - Initialise a watch
450 * @watch: The watch to initialise.
451 * @wqueue: The queue to assign.
452 *
453 * Initialise a watch and set the watch queue.
454 */
init_watch(struct watch * watch,struct watch_queue * wqueue)455 void init_watch(struct watch *watch, struct watch_queue *wqueue)
456 {
457 kref_init(&watch->usage);
458 INIT_HLIST_NODE(&watch->list_node);
459 INIT_HLIST_NODE(&watch->queue_node);
460 rcu_assign_pointer(watch->queue, wqueue);
461 }
462
add_one_watch(struct watch * watch,struct watch_list * wlist,struct watch_queue * wqueue)463 static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue)
464 {
465 const struct cred *cred;
466 struct watch *w;
467
468 hlist_for_each_entry(w, &wlist->watchers, list_node) {
469 struct watch_queue *wq = rcu_access_pointer(w->queue);
470 if (wqueue == wq && watch->id == w->id)
471 return -EBUSY;
472 }
473
474 cred = current_cred();
475 if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) {
476 atomic_dec(&cred->user->nr_watches);
477 return -EAGAIN;
478 }
479
480 watch->cred = get_cred(cred);
481 rcu_assign_pointer(watch->watch_list, wlist);
482
483 kref_get(&wqueue->usage);
484 kref_get(&watch->usage);
485 hlist_add_head(&watch->queue_node, &wqueue->watches);
486 hlist_add_head_rcu(&watch->list_node, &wlist->watchers);
487 return 0;
488 }
489
490 /**
491 * add_watch_to_object - Add a watch on an object to a watch list
492 * @watch: The watch to add
493 * @wlist: The watch list to add to
494 *
495 * @watch->queue must have been set to point to the queue to post notifications
496 * to and the watch list of the object to be watched. @watch->cred must also
497 * have been set to the appropriate credentials and a ref taken on them.
498 *
499 * The caller must pin the queue and the list both and must hold the list
500 * locked against racing watch additions/removals.
501 */
add_watch_to_object(struct watch * watch,struct watch_list * wlist)502 int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
503 {
504 struct watch_queue *wqueue;
505 int ret = -ENOENT;
506
507 rcu_read_lock();
508
509 wqueue = rcu_access_pointer(watch->queue);
510 if (lock_wqueue(wqueue)) {
511 spin_lock(&wlist->lock);
512 ret = add_one_watch(watch, wlist, wqueue);
513 spin_unlock(&wlist->lock);
514 unlock_wqueue(wqueue);
515 }
516
517 rcu_read_unlock();
518 return ret;
519 }
520 EXPORT_SYMBOL(add_watch_to_object);
521
522 /**
523 * remove_watch_from_object - Remove a watch or all watches from an object.
524 * @wlist: The watch list to remove from
525 * @wq: The watch queue of interest (ignored if @all is true)
526 * @id: The ID of the watch to remove (ignored if @all is true)
527 * @all: True to remove all objects
528 *
529 * Remove a specific watch or all watches from an object. A notification is
530 * sent to the watcher to tell them that this happened.
531 */
remove_watch_from_object(struct watch_list * wlist,struct watch_queue * wq,u64 id,bool all)532 int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq,
533 u64 id, bool all)
534 {
535 struct watch_notification_removal n;
536 struct watch_queue *wqueue;
537 struct watch *watch;
538 int ret = -EBADSLT;
539
540 rcu_read_lock();
541
542 again:
543 spin_lock(&wlist->lock);
544 hlist_for_each_entry(watch, &wlist->watchers, list_node) {
545 if (all ||
546 (watch->id == id && rcu_access_pointer(watch->queue) == wq))
547 goto found;
548 }
549 spin_unlock(&wlist->lock);
550 goto out;
551
552 found:
553 ret = 0;
554 hlist_del_init_rcu(&watch->list_node);
555 rcu_assign_pointer(watch->watch_list, NULL);
556 spin_unlock(&wlist->lock);
557
558 /* We now own the reference on watch that used to belong to wlist. */
559
560 n.watch.type = WATCH_TYPE_META;
561 n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION;
562 n.watch.info = watch->info_id | watch_sizeof(n.watch);
563 n.id = id;
564 if (id != 0)
565 n.watch.info = watch->info_id | watch_sizeof(n);
566
567 wqueue = rcu_dereference(watch->queue);
568
569 if (lock_wqueue(wqueue)) {
570 post_one_notification(wqueue, &n.watch);
571
572 if (!hlist_unhashed(&watch->queue_node)) {
573 hlist_del_init_rcu(&watch->queue_node);
574 put_watch(watch);
575 }
576
577 unlock_wqueue(wqueue);
578 }
579
580 if (wlist->release_watch) {
581 void (*release_watch)(struct watch *);
582
583 release_watch = wlist->release_watch;
584 rcu_read_unlock();
585 (*release_watch)(watch);
586 rcu_read_lock();
587 }
588 put_watch(watch);
589
590 if (all && !hlist_empty(&wlist->watchers))
591 goto again;
592 out:
593 rcu_read_unlock();
594 return ret;
595 }
596 EXPORT_SYMBOL(remove_watch_from_object);
597
598 /*
599 * Remove all the watches that are contributory to a queue. This has the
600 * potential to race with removal of the watches by the destruction of the
601 * objects being watched or with the distribution of notifications.
602 */
watch_queue_clear(struct watch_queue * wqueue)603 void watch_queue_clear(struct watch_queue *wqueue)
604 {
605 struct watch_list *wlist;
606 struct watch *watch;
607 bool release;
608
609 rcu_read_lock();
610 spin_lock_bh(&wqueue->lock);
611
612 /*
613 * This pipe can be freed by callers like free_pipe_info().
614 * Removing this reference also prevents new notifications.
615 */
616 wqueue->pipe = NULL;
617
618 while (!hlist_empty(&wqueue->watches)) {
619 watch = hlist_entry(wqueue->watches.first, struct watch, queue_node);
620 hlist_del_init_rcu(&watch->queue_node);
621 /* We now own a ref on the watch. */
622 spin_unlock_bh(&wqueue->lock);
623
624 /* We can't do the next bit under the queue lock as we need to
625 * get the list lock - which would cause a deadlock if someone
626 * was removing from the opposite direction at the same time or
627 * posting a notification.
628 */
629 wlist = rcu_dereference(watch->watch_list);
630 if (wlist) {
631 void (*release_watch)(struct watch *);
632
633 spin_lock(&wlist->lock);
634
635 release = !hlist_unhashed(&watch->list_node);
636 if (release) {
637 hlist_del_init_rcu(&watch->list_node);
638 rcu_assign_pointer(watch->watch_list, NULL);
639
640 /* We now own a second ref on the watch. */
641 }
642
643 release_watch = wlist->release_watch;
644 spin_unlock(&wlist->lock);
645
646 if (release) {
647 if (release_watch) {
648 rcu_read_unlock();
649 /* This might need to call dput(), so
650 * we have to drop all the locks.
651 */
652 (*release_watch)(watch);
653 rcu_read_lock();
654 }
655 put_watch(watch);
656 }
657 }
658
659 put_watch(watch);
660 spin_lock_bh(&wqueue->lock);
661 }
662
663 spin_unlock_bh(&wqueue->lock);
664 rcu_read_unlock();
665 }
666
667 /**
668 * get_watch_queue - Get a watch queue from its file descriptor.
669 * @fd: The fd to query.
670 */
get_watch_queue(int fd)671 struct watch_queue *get_watch_queue(int fd)
672 {
673 struct pipe_inode_info *pipe;
674 struct watch_queue *wqueue = ERR_PTR(-EINVAL);
675 struct fd f;
676
677 f = fdget(fd);
678 if (f.file) {
679 pipe = get_pipe_info(f.file, false);
680 if (pipe && pipe->watch_queue) {
681 wqueue = pipe->watch_queue;
682 kref_get(&wqueue->usage);
683 }
684 fdput(f);
685 }
686
687 return wqueue;
688 }
689 EXPORT_SYMBOL(get_watch_queue);
690
691 /*
692 * Initialise a watch queue
693 */
watch_queue_init(struct pipe_inode_info * pipe)694 int watch_queue_init(struct pipe_inode_info *pipe)
695 {
696 struct watch_queue *wqueue;
697
698 wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL);
699 if (!wqueue)
700 return -ENOMEM;
701
702 wqueue->pipe = pipe;
703 kref_init(&wqueue->usage);
704 spin_lock_init(&wqueue->lock);
705 INIT_HLIST_HEAD(&wqueue->watches);
706
707 pipe->watch_queue = wqueue;
708 return 0;
709 }
710