1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/proc/inode.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/cache.h> 9 #include <linux/time.h> 10 #include <linux/proc_fs.h> 11 #include <linux/kernel.h> 12 #include <linux/pid_namespace.h> 13 #include <linux/mm.h> 14 #include <linux/string.h> 15 #include <linux/stat.h> 16 #include <linux/completion.h> 17 #include <linux/poll.h> 18 #include <linux/printk.h> 19 #include <linux/file.h> 20 #include <linux/limits.h> 21 #include <linux/init.h> 22 #include <linux/module.h> 23 #include <linux/sysctl.h> 24 #include <linux/seq_file.h> 25 #include <linux/slab.h> 26 #include <linux/mount.h> 27 28 #include <linux/uaccess.h> 29 30 #include "internal.h" 31 32 static void proc_evict_inode(struct inode *inode) 33 { 34 struct proc_dir_entry *de; 35 struct ctl_table_header *head; 36 37 truncate_inode_pages_final(&inode->i_data); 38 clear_inode(inode); 39 40 /* Stop tracking associated processes */ 41 put_pid(PROC_I(inode)->pid); 42 43 /* Let go of any associated proc directory entry */ 44 de = PDE(inode); 45 if (de) 46 pde_put(de); 47 48 head = PROC_I(inode)->sysctl; 49 if (head) { 50 RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL); 51 proc_sys_evict_inode(inode, head); 52 } 53 } 54 55 static struct kmem_cache *proc_inode_cachep __ro_after_init; 56 static struct kmem_cache *pde_opener_cache __ro_after_init; 57 58 static struct inode *proc_alloc_inode(struct super_block *sb) 59 { 60 struct proc_inode *ei; 61 62 ei = kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); 63 if (!ei) 64 return NULL; 65 ei->pid = NULL; 66 ei->fd = 0; 67 ei->op.proc_get_link = NULL; 68 ei->pde = NULL; 69 ei->sysctl = NULL; 70 ei->sysctl_entry = NULL; 71 ei->ns_ops = NULL; 72 return &ei->vfs_inode; 73 } 74 75 static void proc_i_callback(struct rcu_head *head) 76 { 77 struct inode *inode = container_of(head, struct inode, i_rcu); 78 kmem_cache_free(proc_inode_cachep, PROC_I(inode)); 79 } 80 81 static void proc_destroy_inode(struct inode *inode) 82 { 83 call_rcu(&inode->i_rcu, proc_i_callback); 84 } 85 86 static void init_once(void *foo) 87 { 88 struct proc_inode *ei = (struct proc_inode *) foo; 89 90 inode_init_once(&ei->vfs_inode); 91 } 92 93 void __init proc_init_kmemcache(void) 94 { 95 proc_inode_cachep = kmem_cache_create("proc_inode_cache", 96 sizeof(struct proc_inode), 97 0, (SLAB_RECLAIM_ACCOUNT| 98 SLAB_MEM_SPREAD|SLAB_ACCOUNT| 99 SLAB_PANIC), 100 init_once); 101 pde_opener_cache = 102 kmem_cache_create("pde_opener", sizeof(struct pde_opener), 0, 103 SLAB_ACCOUNT|SLAB_PANIC, NULL); 104 proc_dir_entry_cache = kmem_cache_create_usercopy( 105 "proc_dir_entry", SIZEOF_PDE, 0, SLAB_PANIC, 106 offsetof(struct proc_dir_entry, inline_name), 107 SIZEOF_PDE_INLINE_NAME, NULL); 108 BUILD_BUG_ON(sizeof(struct proc_dir_entry) >= SIZEOF_PDE); 109 } 110 111 static int proc_show_options(struct seq_file *seq, struct dentry *root) 112 { 113 struct super_block *sb = root->d_sb; 114 struct pid_namespace *pid = sb->s_fs_info; 115 116 if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID)) 117 seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid)); 118 if (pid->hide_pid != HIDEPID_OFF) 119 seq_printf(seq, ",hidepid=%u", pid->hide_pid); 120 121 return 0; 122 } 123 124 const struct super_operations proc_sops = { 125 .alloc_inode = proc_alloc_inode, 126 .destroy_inode = proc_destroy_inode, 127 .drop_inode = generic_delete_inode, 128 .evict_inode = proc_evict_inode, 129 .statfs = simple_statfs, 130 .show_options = proc_show_options, 131 }; 132 133 enum {BIAS = -1U<<31}; 134 135 static inline int use_pde(struct proc_dir_entry *pde) 136 { 137 return likely(atomic_inc_unless_negative(&pde->in_use)); 138 } 139 140 static void unuse_pde(struct proc_dir_entry *pde) 141 { 142 if (unlikely(atomic_dec_return(&pde->in_use) == BIAS)) 143 complete(pde->pde_unload_completion); 144 } 145 146 /* pde is locked on entry, unlocked on exit */ 147 static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo) 148 { 149 /* 150 * close() (proc_reg_release()) can't delete an entry and proceed: 151 * ->release hook needs to be available at the right moment. 152 * 153 * rmmod (remove_proc_entry() et al) can't delete an entry and proceed: 154 * "struct file" needs to be available at the right moment. 155 * 156 * Therefore, first process to enter this function does ->release() and 157 * signals its completion to the other process which does nothing. 158 */ 159 if (pdeo->closing) { 160 /* somebody else is doing that, just wait */ 161 DECLARE_COMPLETION_ONSTACK(c); 162 pdeo->c = &c; 163 spin_unlock(&pde->pde_unload_lock); 164 wait_for_completion(&c); 165 } else { 166 struct file *file; 167 struct completion *c; 168 169 pdeo->closing = true; 170 spin_unlock(&pde->pde_unload_lock); 171 file = pdeo->file; 172 pde->proc_fops->release(file_inode(file), file); 173 spin_lock(&pde->pde_unload_lock); 174 /* After ->release. */ 175 list_del(&pdeo->lh); 176 c = pdeo->c; 177 spin_unlock(&pde->pde_unload_lock); 178 if (unlikely(c)) 179 complete(c); 180 kmem_cache_free(pde_opener_cache, pdeo); 181 } 182 } 183 184 void proc_entry_rundown(struct proc_dir_entry *de) 185 { 186 DECLARE_COMPLETION_ONSTACK(c); 187 /* Wait until all existing callers into module are done. */ 188 de->pde_unload_completion = &c; 189 if (atomic_add_return(BIAS, &de->in_use) != BIAS) 190 wait_for_completion(&c); 191 192 /* ->pde_openers list can't grow from now on. */ 193 194 spin_lock(&de->pde_unload_lock); 195 while (!list_empty(&de->pde_openers)) { 196 struct pde_opener *pdeo; 197 pdeo = list_first_entry(&de->pde_openers, struct pde_opener, lh); 198 close_pdeo(de, pdeo); 199 spin_lock(&de->pde_unload_lock); 200 } 201 spin_unlock(&de->pde_unload_lock); 202 } 203 204 static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) 205 { 206 struct proc_dir_entry *pde = PDE(file_inode(file)); 207 loff_t rv = -EINVAL; 208 if (use_pde(pde)) { 209 loff_t (*llseek)(struct file *, loff_t, int); 210 llseek = pde->proc_fops->llseek; 211 if (!llseek) 212 llseek = default_llseek; 213 rv = llseek(file, offset, whence); 214 unuse_pde(pde); 215 } 216 return rv; 217 } 218 219 static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 220 { 221 ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); 222 struct proc_dir_entry *pde = PDE(file_inode(file)); 223 ssize_t rv = -EIO; 224 if (use_pde(pde)) { 225 read = pde->proc_fops->read; 226 if (read) 227 rv = read(file, buf, count, ppos); 228 unuse_pde(pde); 229 } 230 return rv; 231 } 232 233 static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 234 { 235 ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); 236 struct proc_dir_entry *pde = PDE(file_inode(file)); 237 ssize_t rv = -EIO; 238 if (use_pde(pde)) { 239 write = pde->proc_fops->write; 240 if (write) 241 rv = write(file, buf, count, ppos); 242 unuse_pde(pde); 243 } 244 return rv; 245 } 246 247 static __poll_t proc_reg_poll(struct file *file, struct poll_table_struct *pts) 248 { 249 struct proc_dir_entry *pde = PDE(file_inode(file)); 250 __poll_t rv = DEFAULT_POLLMASK; 251 __poll_t (*poll)(struct file *, struct poll_table_struct *); 252 if (use_pde(pde)) { 253 poll = pde->proc_fops->poll; 254 if (poll) 255 rv = poll(file, pts); 256 unuse_pde(pde); 257 } 258 return rv; 259 } 260 261 static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 262 { 263 struct proc_dir_entry *pde = PDE(file_inode(file)); 264 long rv = -ENOTTY; 265 long (*ioctl)(struct file *, unsigned int, unsigned long); 266 if (use_pde(pde)) { 267 ioctl = pde->proc_fops->unlocked_ioctl; 268 if (ioctl) 269 rv = ioctl(file, cmd, arg); 270 unuse_pde(pde); 271 } 272 return rv; 273 } 274 275 #ifdef CONFIG_COMPAT 276 static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 277 { 278 struct proc_dir_entry *pde = PDE(file_inode(file)); 279 long rv = -ENOTTY; 280 long (*compat_ioctl)(struct file *, unsigned int, unsigned long); 281 if (use_pde(pde)) { 282 compat_ioctl = pde->proc_fops->compat_ioctl; 283 if (compat_ioctl) 284 rv = compat_ioctl(file, cmd, arg); 285 unuse_pde(pde); 286 } 287 return rv; 288 } 289 #endif 290 291 static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma) 292 { 293 struct proc_dir_entry *pde = PDE(file_inode(file)); 294 int rv = -EIO; 295 int (*mmap)(struct file *, struct vm_area_struct *); 296 if (use_pde(pde)) { 297 mmap = pde->proc_fops->mmap; 298 if (mmap) 299 rv = mmap(file, vma); 300 unuse_pde(pde); 301 } 302 return rv; 303 } 304 305 static unsigned long 306 proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, 307 unsigned long len, unsigned long pgoff, 308 unsigned long flags) 309 { 310 struct proc_dir_entry *pde = PDE(file_inode(file)); 311 unsigned long rv = -EIO; 312 313 if (use_pde(pde)) { 314 typeof(proc_reg_get_unmapped_area) *get_area; 315 316 get_area = pde->proc_fops->get_unmapped_area; 317 #ifdef CONFIG_MMU 318 if (!get_area) 319 get_area = current->mm->get_unmapped_area; 320 #endif 321 322 if (get_area) 323 rv = get_area(file, orig_addr, len, pgoff, flags); 324 else 325 rv = orig_addr; 326 unuse_pde(pde); 327 } 328 return rv; 329 } 330 331 static int proc_reg_open(struct inode *inode, struct file *file) 332 { 333 struct proc_dir_entry *pde = PDE(inode); 334 int rv = 0; 335 int (*open)(struct inode *, struct file *); 336 int (*release)(struct inode *, struct file *); 337 struct pde_opener *pdeo; 338 339 /* 340 * Ensure that 341 * 1) PDE's ->release hook will be called no matter what 342 * either normally by close()/->release, or forcefully by 343 * rmmod/remove_proc_entry. 344 * 345 * 2) rmmod isn't blocked by opening file in /proc and sitting on 346 * the descriptor (including "rmmod foo </proc/foo" scenario). 347 * 348 * Save every "struct file" with custom ->release hook. 349 */ 350 if (!use_pde(pde)) 351 return -ENOENT; 352 353 release = pde->proc_fops->release; 354 if (release) { 355 pdeo = kmem_cache_alloc(pde_opener_cache, GFP_KERNEL); 356 if (!pdeo) { 357 rv = -ENOMEM; 358 goto out_unuse; 359 } 360 } 361 362 open = pde->proc_fops->open; 363 if (open) 364 rv = open(inode, file); 365 366 if (release) { 367 if (rv == 0) { 368 /* To know what to release. */ 369 pdeo->file = file; 370 pdeo->closing = false; 371 pdeo->c = NULL; 372 spin_lock(&pde->pde_unload_lock); 373 list_add(&pdeo->lh, &pde->pde_openers); 374 spin_unlock(&pde->pde_unload_lock); 375 } else 376 kmem_cache_free(pde_opener_cache, pdeo); 377 } 378 379 out_unuse: 380 unuse_pde(pde); 381 return rv; 382 } 383 384 static int proc_reg_release(struct inode *inode, struct file *file) 385 { 386 struct proc_dir_entry *pde = PDE(inode); 387 struct pde_opener *pdeo; 388 spin_lock(&pde->pde_unload_lock); 389 list_for_each_entry(pdeo, &pde->pde_openers, lh) { 390 if (pdeo->file == file) { 391 close_pdeo(pde, pdeo); 392 return 0; 393 } 394 } 395 spin_unlock(&pde->pde_unload_lock); 396 return 0; 397 } 398 399 static const struct file_operations proc_reg_file_ops = { 400 .llseek = proc_reg_llseek, 401 .read = proc_reg_read, 402 .write = proc_reg_write, 403 .poll = proc_reg_poll, 404 .unlocked_ioctl = proc_reg_unlocked_ioctl, 405 #ifdef CONFIG_COMPAT 406 .compat_ioctl = proc_reg_compat_ioctl, 407 #endif 408 .mmap = proc_reg_mmap, 409 .get_unmapped_area = proc_reg_get_unmapped_area, 410 .open = proc_reg_open, 411 .release = proc_reg_release, 412 }; 413 414 #ifdef CONFIG_COMPAT 415 static const struct file_operations proc_reg_file_ops_no_compat = { 416 .llseek = proc_reg_llseek, 417 .read = proc_reg_read, 418 .write = proc_reg_write, 419 .poll = proc_reg_poll, 420 .unlocked_ioctl = proc_reg_unlocked_ioctl, 421 .mmap = proc_reg_mmap, 422 .get_unmapped_area = proc_reg_get_unmapped_area, 423 .open = proc_reg_open, 424 .release = proc_reg_release, 425 }; 426 #endif 427 428 static void proc_put_link(void *p) 429 { 430 unuse_pde(p); 431 } 432 433 static const char *proc_get_link(struct dentry *dentry, 434 struct inode *inode, 435 struct delayed_call *done) 436 { 437 struct proc_dir_entry *pde = PDE(inode); 438 if (!use_pde(pde)) 439 return ERR_PTR(-EINVAL); 440 set_delayed_call(done, proc_put_link, pde); 441 return pde->data; 442 } 443 444 const struct inode_operations proc_link_inode_operations = { 445 .get_link = proc_get_link, 446 }; 447 448 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) 449 { 450 struct inode *inode = new_inode_pseudo(sb); 451 452 if (inode) { 453 inode->i_ino = de->low_ino; 454 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 455 PROC_I(inode)->pde = de; 456 457 if (is_empty_pde(de)) { 458 make_empty_dir_inode(inode); 459 return inode; 460 } 461 if (de->mode) { 462 inode->i_mode = de->mode; 463 inode->i_uid = de->uid; 464 inode->i_gid = de->gid; 465 } 466 if (de->size) 467 inode->i_size = de->size; 468 if (de->nlink) 469 set_nlink(inode, de->nlink); 470 WARN_ON(!de->proc_iops); 471 inode->i_op = de->proc_iops; 472 if (de->proc_fops) { 473 if (S_ISREG(inode->i_mode)) { 474 #ifdef CONFIG_COMPAT 475 if (!de->proc_fops->compat_ioctl) 476 inode->i_fop = 477 &proc_reg_file_ops_no_compat; 478 else 479 #endif 480 inode->i_fop = &proc_reg_file_ops; 481 } else { 482 inode->i_fop = de->proc_fops; 483 } 484 } 485 } else 486 pde_put(de); 487 return inode; 488 } 489