1 // SPDX-License-Identifier: GPL-2.0 2 /* Watch queue and general notification mechanism, built on pipes 3 * 4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 * 7 * See Documentation/core-api/watch_queue.rst 8 */ 9 10 #define pr_fmt(fmt) "watchq: " fmt 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 #include <linux/printk.h> 16 #include <linux/miscdevice.h> 17 #include <linux/fs.h> 18 #include <linux/mm.h> 19 #include <linux/pagemap.h> 20 #include <linux/poll.h> 21 #include <linux/uaccess.h> 22 #include <linux/vmalloc.h> 23 #include <linux/file.h> 24 #include <linux/security.h> 25 #include <linux/cred.h> 26 #include <linux/sched/signal.h> 27 #include <linux/watch_queue.h> 28 #include <linux/pipe_fs_i.h> 29 30 MODULE_DESCRIPTION("Watch queue"); 31 MODULE_AUTHOR("Red Hat, Inc."); 32 MODULE_LICENSE("GPL"); 33 34 #define WATCH_QUEUE_NOTE_SIZE 128 35 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE) 36 37 /* 38 * This must be called under the RCU read-lock, which makes 39 * sure that the wqueue still exists. It can then take the lock, 40 * and check that the wqueue hasn't been destroyed, which in 41 * turn makes sure that the notification pipe still exists. 42 */ 43 static inline bool lock_wqueue(struct watch_queue *wqueue) 44 { 45 spin_lock_bh(&wqueue->lock); 46 if (unlikely(wqueue->defunct)) { 47 spin_unlock_bh(&wqueue->lock); 48 return false; 49 } 50 return true; 51 } 52 53 static inline void unlock_wqueue(struct watch_queue *wqueue) 54 { 55 spin_unlock_bh(&wqueue->lock); 56 } 57 58 static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, 59 struct pipe_buffer *buf) 60 { 61 struct watch_queue *wqueue = (struct watch_queue *)buf->private; 62 struct page *page; 63 unsigned int bit; 64 65 /* We need to work out which note within the page this refers to, but 66 * the note might have been maximum size, so merely ANDing the offset 67 * off doesn't work. OTOH, the note must've been more than zero size. 68 */ 69 bit = buf->offset + buf->len; 70 if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0) 71 bit -= WATCH_QUEUE_NOTE_SIZE; 72 bit /= WATCH_QUEUE_NOTE_SIZE; 73 74 page = buf->page; 75 bit += page->index; 76 77 set_bit(bit, wqueue->notes_bitmap); 78 generic_pipe_buf_release(pipe, buf); 79 } 80 81 // No try_steal function => no stealing 82 #define watch_queue_pipe_buf_try_steal NULL 83 84 /* New data written to a pipe may be appended to a buffer with this type. */ 85 static const struct pipe_buf_operations watch_queue_pipe_buf_ops = { 86 .release = watch_queue_pipe_buf_release, 87 .try_steal = watch_queue_pipe_buf_try_steal, 88 .get = generic_pipe_buf_get, 89 }; 90 91 /* 92 * Post a notification to a watch queue. 93 * 94 * Must be called with the RCU lock for reading, and the 95 * watch_queue lock held, which guarantees that the pipe 96 * hasn't been released. 97 */ 98 static bool post_one_notification(struct watch_queue *wqueue, 99 struct watch_notification *n) 100 { 101 void *p; 102 struct pipe_inode_info *pipe = wqueue->pipe; 103 struct pipe_buffer *buf; 104 struct page *page; 105 unsigned int head, tail, mask, note, offset, len; 106 bool done = false; 107 108 if (!pipe) 109 return false; 110 111 spin_lock_irq(&pipe->rd_wait.lock); 112 113 mask = pipe->ring_size - 1; 114 head = pipe->head; 115 tail = pipe->tail; 116 if (pipe_full(head, tail, pipe->ring_size)) 117 goto lost; 118 119 note = find_first_bit(wqueue->notes_bitmap, wqueue->nr_notes); 120 if (note >= wqueue->nr_notes) 121 goto lost; 122 123 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE]; 124 offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE; 125 get_page(page); 126 len = n->info & WATCH_INFO_LENGTH; 127 p = kmap_atomic(page); 128 memcpy(p + offset, n, len); 129 kunmap_atomic(p); 130 131 buf = &pipe->bufs[head & mask]; 132 buf->page = page; 133 buf->private = (unsigned long)wqueue; 134 buf->ops = &watch_queue_pipe_buf_ops; 135 buf->offset = offset; 136 buf->len = len; 137 buf->flags = PIPE_BUF_FLAG_WHOLE; 138 smp_store_release(&pipe->head, head + 1); /* vs pipe_read() */ 139 140 if (!test_and_clear_bit(note, wqueue->notes_bitmap)) { 141 spin_unlock_irq(&pipe->rd_wait.lock); 142 BUG(); 143 } 144 wake_up_interruptible_sync_poll_locked(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); 145 done = true; 146 147 out: 148 spin_unlock_irq(&pipe->rd_wait.lock); 149 if (done) 150 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 151 return done; 152 153 lost: 154 buf = &pipe->bufs[(head - 1) & mask]; 155 buf->flags |= PIPE_BUF_FLAG_LOSS; 156 goto out; 157 } 158 159 /* 160 * Apply filter rules to a notification. 161 */ 162 static bool filter_watch_notification(const struct watch_filter *wf, 163 const struct watch_notification *n) 164 { 165 const struct watch_type_filter *wt; 166 unsigned int st_bits = sizeof(wt->subtype_filter[0]) * 8; 167 unsigned int st_index = n->subtype / st_bits; 168 unsigned int st_bit = 1U << (n->subtype % st_bits); 169 int i; 170 171 if (!test_bit(n->type, wf->type_filter)) 172 return false; 173 174 for (i = 0; i < wf->nr_filters; i++) { 175 wt = &wf->filters[i]; 176 if (n->type == wt->type && 177 (wt->subtype_filter[st_index] & st_bit) && 178 (n->info & wt->info_mask) == wt->info_filter) 179 return true; 180 } 181 182 return false; /* If there is a filter, the default is to reject. */ 183 } 184 185 /** 186 * __post_watch_notification - Post an event notification 187 * @wlist: The watch list to post the event to. 188 * @n: The notification record to post. 189 * @cred: The creds of the process that triggered the notification. 190 * @id: The ID to match on the watch. 191 * 192 * Post a notification of an event into a set of watch queues and let the users 193 * know. 194 * 195 * The size of the notification should be set in n->info & WATCH_INFO_LENGTH and 196 * should be in units of sizeof(*n). 197 */ 198 void __post_watch_notification(struct watch_list *wlist, 199 struct watch_notification *n, 200 const struct cred *cred, 201 u64 id) 202 { 203 const struct watch_filter *wf; 204 struct watch_queue *wqueue; 205 struct watch *watch; 206 207 if (((n->info & WATCH_INFO_LENGTH) >> WATCH_INFO_LENGTH__SHIFT) == 0) { 208 WARN_ON(1); 209 return; 210 } 211 212 rcu_read_lock(); 213 214 hlist_for_each_entry_rcu(watch, &wlist->watchers, list_node) { 215 if (watch->id != id) 216 continue; 217 n->info &= ~WATCH_INFO_ID; 218 n->info |= watch->info_id; 219 220 wqueue = rcu_dereference(watch->queue); 221 wf = rcu_dereference(wqueue->filter); 222 if (wf && !filter_watch_notification(wf, n)) 223 continue; 224 225 if (security_post_notification(watch->cred, cred, n) < 0) 226 continue; 227 228 if (lock_wqueue(wqueue)) { 229 post_one_notification(wqueue, n); 230 unlock_wqueue(wqueue); 231 } 232 } 233 234 rcu_read_unlock(); 235 } 236 EXPORT_SYMBOL(__post_watch_notification); 237 238 /* 239 * Allocate sufficient pages to preallocation for the requested number of 240 * notifications. 241 */ 242 long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) 243 { 244 struct watch_queue *wqueue = pipe->watch_queue; 245 struct page **pages; 246 unsigned long *bitmap; 247 unsigned long user_bufs; 248 int ret, i, nr_pages; 249 250 if (!wqueue) 251 return -ENODEV; 252 if (wqueue->notes) 253 return -EBUSY; 254 255 if (nr_notes < 1 || 256 nr_notes > 512) /* TODO: choose a better hard limit */ 257 return -EINVAL; 258 259 nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1); 260 nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE; 261 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages); 262 263 if (nr_pages > pipe->max_usage && 264 (too_many_pipe_buffers_hard(user_bufs) || 265 too_many_pipe_buffers_soft(user_bufs)) && 266 pipe_is_unprivileged_user()) { 267 ret = -EPERM; 268 goto error; 269 } 270 271 nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; 272 ret = pipe_resize_ring(pipe, roundup_pow_of_two(nr_notes)); 273 if (ret < 0) 274 goto error; 275 276 ret = -ENOMEM; 277 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); 278 if (!pages) 279 goto error; 280 281 for (i = 0; i < nr_pages; i++) { 282 pages[i] = alloc_page(GFP_KERNEL); 283 if (!pages[i]) 284 goto error_p; 285 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE; 286 } 287 288 bitmap = bitmap_alloc(nr_notes, GFP_KERNEL); 289 if (!bitmap) 290 goto error_p; 291 292 bitmap_fill(bitmap, nr_notes); 293 wqueue->notes = pages; 294 wqueue->notes_bitmap = bitmap; 295 wqueue->nr_pages = nr_pages; 296 wqueue->nr_notes = nr_notes; 297 return 0; 298 299 error_p: 300 while (--i >= 0) 301 __free_page(pages[i]); 302 kfree(pages); 303 error: 304 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted); 305 return ret; 306 } 307 308 /* 309 * Set the filter on a watch queue. 310 */ 311 long watch_queue_set_filter(struct pipe_inode_info *pipe, 312 struct watch_notification_filter __user *_filter) 313 { 314 struct watch_notification_type_filter *tf; 315 struct watch_notification_filter filter; 316 struct watch_type_filter *q; 317 struct watch_filter *wfilter; 318 struct watch_queue *wqueue = pipe->watch_queue; 319 int ret, nr_filter = 0, i; 320 321 if (!wqueue) 322 return -ENODEV; 323 324 if (!_filter) { 325 /* Remove the old filter */ 326 wfilter = NULL; 327 goto set; 328 } 329 330 /* Grab the user's filter specification */ 331 if (copy_from_user(&filter, _filter, sizeof(filter)) != 0) 332 return -EFAULT; 333 if (filter.nr_filters == 0 || 334 filter.nr_filters > 16 || 335 filter.__reserved != 0) 336 return -EINVAL; 337 338 tf = memdup_user(_filter->filters, filter.nr_filters * sizeof(*tf)); 339 if (IS_ERR(tf)) 340 return PTR_ERR(tf); 341 342 ret = -EINVAL; 343 for (i = 0; i < filter.nr_filters; i++) { 344 if ((tf[i].info_filter & ~tf[i].info_mask) || 345 tf[i].info_mask & WATCH_INFO_LENGTH) 346 goto err_filter; 347 /* Ignore any unknown types */ 348 if (tf[i].type >= WATCH_TYPE__NR) 349 continue; 350 nr_filter++; 351 } 352 353 /* Now we need to build the internal filter from only the relevant 354 * user-specified filters. 355 */ 356 ret = -ENOMEM; 357 wfilter = kzalloc(struct_size(wfilter, filters, nr_filter), GFP_KERNEL); 358 if (!wfilter) 359 goto err_filter; 360 wfilter->nr_filters = nr_filter; 361 362 q = wfilter->filters; 363 for (i = 0; i < filter.nr_filters; i++) { 364 if (tf[i].type >= WATCH_TYPE__NR) 365 continue; 366 367 q->type = tf[i].type; 368 q->info_filter = tf[i].info_filter; 369 q->info_mask = tf[i].info_mask; 370 q->subtype_filter[0] = tf[i].subtype_filter[0]; 371 __set_bit(q->type, wfilter->type_filter); 372 q++; 373 } 374 375 kfree(tf); 376 set: 377 pipe_lock(pipe); 378 wfilter = rcu_replace_pointer(wqueue->filter, wfilter, 379 lockdep_is_held(&pipe->mutex)); 380 pipe_unlock(pipe); 381 if (wfilter) 382 kfree_rcu(wfilter, rcu); 383 return 0; 384 385 err_filter: 386 kfree(tf); 387 return ret; 388 } 389 390 static void __put_watch_queue(struct kref *kref) 391 { 392 struct watch_queue *wqueue = 393 container_of(kref, struct watch_queue, usage); 394 struct watch_filter *wfilter; 395 int i; 396 397 for (i = 0; i < wqueue->nr_pages; i++) 398 __free_page(wqueue->notes[i]); 399 kfree(wqueue->notes); 400 bitmap_free(wqueue->notes_bitmap); 401 402 wfilter = rcu_access_pointer(wqueue->filter); 403 if (wfilter) 404 kfree_rcu(wfilter, rcu); 405 kfree_rcu(wqueue, rcu); 406 } 407 408 /** 409 * put_watch_queue - Dispose of a ref on a watchqueue. 410 * @wqueue: The watch queue to unref. 411 */ 412 void put_watch_queue(struct watch_queue *wqueue) 413 { 414 kref_put(&wqueue->usage, __put_watch_queue); 415 } 416 EXPORT_SYMBOL(put_watch_queue); 417 418 static void free_watch(struct rcu_head *rcu) 419 { 420 struct watch *watch = container_of(rcu, struct watch, rcu); 421 422 put_watch_queue(rcu_access_pointer(watch->queue)); 423 atomic_dec(&watch->cred->user->nr_watches); 424 put_cred(watch->cred); 425 kfree(watch); 426 } 427 428 static void __put_watch(struct kref *kref) 429 { 430 struct watch *watch = container_of(kref, struct watch, usage); 431 432 call_rcu(&watch->rcu, free_watch); 433 } 434 435 /* 436 * Discard a watch. 437 */ 438 static void put_watch(struct watch *watch) 439 { 440 kref_put(&watch->usage, __put_watch); 441 } 442 443 /** 444 * init_watch - Initialise a watch 445 * @watch: The watch to initialise. 446 * @wqueue: The queue to assign. 447 * 448 * Initialise a watch and set the watch queue. 449 */ 450 void init_watch(struct watch *watch, struct watch_queue *wqueue) 451 { 452 kref_init(&watch->usage); 453 INIT_HLIST_NODE(&watch->list_node); 454 INIT_HLIST_NODE(&watch->queue_node); 455 rcu_assign_pointer(watch->queue, wqueue); 456 } 457 458 static int add_one_watch(struct watch *watch, struct watch_list *wlist, struct watch_queue *wqueue) 459 { 460 const struct cred *cred; 461 struct watch *w; 462 463 hlist_for_each_entry(w, &wlist->watchers, list_node) { 464 struct watch_queue *wq = rcu_access_pointer(w->queue); 465 if (wqueue == wq && watch->id == w->id) 466 return -EBUSY; 467 } 468 469 cred = current_cred(); 470 if (atomic_inc_return(&cred->user->nr_watches) > task_rlimit(current, RLIMIT_NOFILE)) { 471 atomic_dec(&cred->user->nr_watches); 472 return -EAGAIN; 473 } 474 475 watch->cred = get_cred(cred); 476 rcu_assign_pointer(watch->watch_list, wlist); 477 478 kref_get(&wqueue->usage); 479 kref_get(&watch->usage); 480 hlist_add_head(&watch->queue_node, &wqueue->watches); 481 hlist_add_head_rcu(&watch->list_node, &wlist->watchers); 482 return 0; 483 } 484 485 /** 486 * add_watch_to_object - Add a watch on an object to a watch list 487 * @watch: The watch to add 488 * @wlist: The watch list to add to 489 * 490 * @watch->queue must have been set to point to the queue to post notifications 491 * to and the watch list of the object to be watched. @watch->cred must also 492 * have been set to the appropriate credentials and a ref taken on them. 493 * 494 * The caller must pin the queue and the list both and must hold the list 495 * locked against racing watch additions/removals. 496 */ 497 int add_watch_to_object(struct watch *watch, struct watch_list *wlist) 498 { 499 struct watch_queue *wqueue; 500 int ret = -ENOENT; 501 502 rcu_read_lock(); 503 504 wqueue = rcu_access_pointer(watch->queue); 505 if (lock_wqueue(wqueue)) { 506 spin_lock(&wlist->lock); 507 ret = add_one_watch(watch, wlist, wqueue); 508 spin_unlock(&wlist->lock); 509 unlock_wqueue(wqueue); 510 } 511 512 rcu_read_unlock(); 513 return ret; 514 } 515 EXPORT_SYMBOL(add_watch_to_object); 516 517 /** 518 * remove_watch_from_object - Remove a watch or all watches from an object. 519 * @wlist: The watch list to remove from 520 * @wq: The watch queue of interest (ignored if @all is true) 521 * @id: The ID of the watch to remove (ignored if @all is true) 522 * @all: True to remove all objects 523 * 524 * Remove a specific watch or all watches from an object. A notification is 525 * sent to the watcher to tell them that this happened. 526 */ 527 int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq, 528 u64 id, bool all) 529 { 530 struct watch_notification_removal n; 531 struct watch_queue *wqueue; 532 struct watch *watch; 533 int ret = -EBADSLT; 534 535 rcu_read_lock(); 536 537 again: 538 spin_lock(&wlist->lock); 539 hlist_for_each_entry(watch, &wlist->watchers, list_node) { 540 if (all || 541 (watch->id == id && rcu_access_pointer(watch->queue) == wq)) 542 goto found; 543 } 544 spin_unlock(&wlist->lock); 545 goto out; 546 547 found: 548 ret = 0; 549 hlist_del_init_rcu(&watch->list_node); 550 rcu_assign_pointer(watch->watch_list, NULL); 551 spin_unlock(&wlist->lock); 552 553 /* We now own the reference on watch that used to belong to wlist. */ 554 555 n.watch.type = WATCH_TYPE_META; 556 n.watch.subtype = WATCH_META_REMOVAL_NOTIFICATION; 557 n.watch.info = watch->info_id | watch_sizeof(n.watch); 558 n.id = id; 559 if (id != 0) 560 n.watch.info = watch->info_id | watch_sizeof(n); 561 562 wqueue = rcu_dereference(watch->queue); 563 564 if (lock_wqueue(wqueue)) { 565 post_one_notification(wqueue, &n.watch); 566 567 if (!hlist_unhashed(&watch->queue_node)) { 568 hlist_del_init_rcu(&watch->queue_node); 569 put_watch(watch); 570 } 571 572 unlock_wqueue(wqueue); 573 } 574 575 if (wlist->release_watch) { 576 void (*release_watch)(struct watch *); 577 578 release_watch = wlist->release_watch; 579 rcu_read_unlock(); 580 (*release_watch)(watch); 581 rcu_read_lock(); 582 } 583 put_watch(watch); 584 585 if (all && !hlist_empty(&wlist->watchers)) 586 goto again; 587 out: 588 rcu_read_unlock(); 589 return ret; 590 } 591 EXPORT_SYMBOL(remove_watch_from_object); 592 593 /* 594 * Remove all the watches that are contributory to a queue. This has the 595 * potential to race with removal of the watches by the destruction of the 596 * objects being watched or with the distribution of notifications. 597 */ 598 void watch_queue_clear(struct watch_queue *wqueue) 599 { 600 struct watch_list *wlist; 601 struct watch *watch; 602 bool release; 603 604 rcu_read_lock(); 605 spin_lock_bh(&wqueue->lock); 606 607 /* Prevent new notifications from being stored. */ 608 wqueue->defunct = true; 609 610 while (!hlist_empty(&wqueue->watches)) { 611 watch = hlist_entry(wqueue->watches.first, struct watch, queue_node); 612 hlist_del_init_rcu(&watch->queue_node); 613 /* We now own a ref on the watch. */ 614 spin_unlock_bh(&wqueue->lock); 615 616 /* We can't do the next bit under the queue lock as we need to 617 * get the list lock - which would cause a deadlock if someone 618 * was removing from the opposite direction at the same time or 619 * posting a notification. 620 */ 621 wlist = rcu_dereference(watch->watch_list); 622 if (wlist) { 623 void (*release_watch)(struct watch *); 624 625 spin_lock(&wlist->lock); 626 627 release = !hlist_unhashed(&watch->list_node); 628 if (release) { 629 hlist_del_init_rcu(&watch->list_node); 630 rcu_assign_pointer(watch->watch_list, NULL); 631 632 /* We now own a second ref on the watch. */ 633 } 634 635 release_watch = wlist->release_watch; 636 spin_unlock(&wlist->lock); 637 638 if (release) { 639 if (release_watch) { 640 rcu_read_unlock(); 641 /* This might need to call dput(), so 642 * we have to drop all the locks. 643 */ 644 (*release_watch)(watch); 645 rcu_read_lock(); 646 } 647 put_watch(watch); 648 } 649 } 650 651 put_watch(watch); 652 spin_lock_bh(&wqueue->lock); 653 } 654 655 spin_unlock_bh(&wqueue->lock); 656 rcu_read_unlock(); 657 } 658 659 /** 660 * get_watch_queue - Get a watch queue from its file descriptor. 661 * @fd: The fd to query. 662 */ 663 struct watch_queue *get_watch_queue(int fd) 664 { 665 struct pipe_inode_info *pipe; 666 struct watch_queue *wqueue = ERR_PTR(-EINVAL); 667 struct fd f; 668 669 f = fdget(fd); 670 if (f.file) { 671 pipe = get_pipe_info(f.file, false); 672 if (pipe && pipe->watch_queue) { 673 wqueue = pipe->watch_queue; 674 kref_get(&wqueue->usage); 675 } 676 fdput(f); 677 } 678 679 return wqueue; 680 } 681 EXPORT_SYMBOL(get_watch_queue); 682 683 /* 684 * Initialise a watch queue 685 */ 686 int watch_queue_init(struct pipe_inode_info *pipe) 687 { 688 struct watch_queue *wqueue; 689 690 wqueue = kzalloc(sizeof(*wqueue), GFP_KERNEL); 691 if (!wqueue) 692 return -ENOMEM; 693 694 wqueue->pipe = pipe; 695 kref_init(&wqueue->usage); 696 spin_lock_init(&wqueue->lock); 697 INIT_HLIST_HEAD(&wqueue->watches); 698 699 pipe->watch_queue = wqueue; 700 return 0; 701 } 702