1 /* 2 * linux/fs/proc/inode.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/time.h> 8 #include <linux/proc_fs.h> 9 #include <linux/kernel.h> 10 #include <linux/mm.h> 11 #include <linux/string.h> 12 #include <linux/stat.h> 13 #include <linux/completion.h> 14 #include <linux/poll.h> 15 #include <linux/file.h> 16 #include <linux/limits.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/sysctl.h> 20 #include <linux/slab.h> 21 22 #include <asm/system.h> 23 #include <asm/uaccess.h> 24 25 #include "internal.h" 26 27 static void proc_evict_inode(struct inode *inode) 28 { 29 struct proc_dir_entry *de; 30 31 truncate_inode_pages(&inode->i_data, 0); 32 end_writeback(inode); 33 34 /* Stop tracking associated processes */ 35 put_pid(PROC_I(inode)->pid); 36 37 /* Let go of any associated proc directory entry */ 38 de = PROC_I(inode)->pde; 39 if (de) 40 pde_put(de); 41 if (PROC_I(inode)->sysctl) 42 sysctl_head_put(PROC_I(inode)->sysctl); 43 } 44 45 struct vfsmount *proc_mnt; 46 47 static struct kmem_cache * proc_inode_cachep; 48 49 static struct inode *proc_alloc_inode(struct super_block *sb) 50 { 51 struct proc_inode *ei; 52 struct inode *inode; 53 54 ei = (struct proc_inode *)kmem_cache_alloc(proc_inode_cachep, GFP_KERNEL); 55 if (!ei) 56 return NULL; 57 ei->pid = NULL; 58 ei->fd = 0; 59 ei->op.proc_get_link = NULL; 60 ei->pde = NULL; 61 ei->sysctl = NULL; 62 ei->sysctl_entry = NULL; 63 inode = &ei->vfs_inode; 64 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 65 return inode; 66 } 67 68 static void proc_i_callback(struct rcu_head *head) 69 { 70 struct inode *inode = container_of(head, struct inode, i_rcu); 71 INIT_LIST_HEAD(&inode->i_dentry); 72 kmem_cache_free(proc_inode_cachep, PROC_I(inode)); 73 } 74 75 static void proc_destroy_inode(struct inode *inode) 76 { 77 call_rcu(&inode->i_rcu, proc_i_callback); 78 } 79 80 static void init_once(void *foo) 81 { 82 struct proc_inode *ei = (struct proc_inode *) foo; 83 84 inode_init_once(&ei->vfs_inode); 85 } 86 87 void __init proc_init_inodecache(void) 88 { 89 proc_inode_cachep = kmem_cache_create("proc_inode_cache", 90 sizeof(struct proc_inode), 91 0, (SLAB_RECLAIM_ACCOUNT| 92 SLAB_MEM_SPREAD|SLAB_PANIC), 93 init_once); 94 } 95 96 static const struct super_operations proc_sops = { 97 .alloc_inode = proc_alloc_inode, 98 .destroy_inode = proc_destroy_inode, 99 .drop_inode = generic_delete_inode, 100 .evict_inode = proc_evict_inode, 101 .statfs = simple_statfs, 102 }; 103 104 static void __pde_users_dec(struct proc_dir_entry *pde) 105 { 106 pde->pde_users--; 107 if (pde->pde_unload_completion && pde->pde_users == 0) 108 complete(pde->pde_unload_completion); 109 } 110 111 void pde_users_dec(struct proc_dir_entry *pde) 112 { 113 spin_lock(&pde->pde_unload_lock); 114 __pde_users_dec(pde); 115 spin_unlock(&pde->pde_unload_lock); 116 } 117 118 static loff_t proc_reg_llseek(struct file *file, loff_t offset, int whence) 119 { 120 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 121 loff_t rv = -EINVAL; 122 loff_t (*llseek)(struct file *, loff_t, int); 123 124 spin_lock(&pde->pde_unload_lock); 125 /* 126 * remove_proc_entry() is going to delete PDE (as part of module 127 * cleanup sequence). No new callers into module allowed. 128 */ 129 if (!pde->proc_fops) { 130 spin_unlock(&pde->pde_unload_lock); 131 return rv; 132 } 133 /* 134 * Bump refcount so that remove_proc_entry will wail for ->llseek to 135 * complete. 136 */ 137 pde->pde_users++; 138 /* 139 * Save function pointer under lock, to protect against ->proc_fops 140 * NULL'ifying right after ->pde_unload_lock is dropped. 141 */ 142 llseek = pde->proc_fops->llseek; 143 spin_unlock(&pde->pde_unload_lock); 144 145 if (!llseek) 146 llseek = default_llseek; 147 rv = llseek(file, offset, whence); 148 149 pde_users_dec(pde); 150 return rv; 151 } 152 153 static ssize_t proc_reg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 154 { 155 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 156 ssize_t rv = -EIO; 157 ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); 158 159 spin_lock(&pde->pde_unload_lock); 160 if (!pde->proc_fops) { 161 spin_unlock(&pde->pde_unload_lock); 162 return rv; 163 } 164 pde->pde_users++; 165 read = pde->proc_fops->read; 166 spin_unlock(&pde->pde_unload_lock); 167 168 if (read) 169 rv = read(file, buf, count, ppos); 170 171 pde_users_dec(pde); 172 return rv; 173 } 174 175 static ssize_t proc_reg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 176 { 177 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 178 ssize_t rv = -EIO; 179 ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); 180 181 spin_lock(&pde->pde_unload_lock); 182 if (!pde->proc_fops) { 183 spin_unlock(&pde->pde_unload_lock); 184 return rv; 185 } 186 pde->pde_users++; 187 write = pde->proc_fops->write; 188 spin_unlock(&pde->pde_unload_lock); 189 190 if (write) 191 rv = write(file, buf, count, ppos); 192 193 pde_users_dec(pde); 194 return rv; 195 } 196 197 static unsigned int proc_reg_poll(struct file *file, struct poll_table_struct *pts) 198 { 199 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 200 unsigned int rv = DEFAULT_POLLMASK; 201 unsigned int (*poll)(struct file *, struct poll_table_struct *); 202 203 spin_lock(&pde->pde_unload_lock); 204 if (!pde->proc_fops) { 205 spin_unlock(&pde->pde_unload_lock); 206 return rv; 207 } 208 pde->pde_users++; 209 poll = pde->proc_fops->poll; 210 spin_unlock(&pde->pde_unload_lock); 211 212 if (poll) 213 rv = poll(file, pts); 214 215 pde_users_dec(pde); 216 return rv; 217 } 218 219 static long proc_reg_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 220 { 221 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 222 long rv = -ENOTTY; 223 long (*ioctl)(struct file *, unsigned int, unsigned long); 224 225 spin_lock(&pde->pde_unload_lock); 226 if (!pde->proc_fops) { 227 spin_unlock(&pde->pde_unload_lock); 228 return rv; 229 } 230 pde->pde_users++; 231 ioctl = pde->proc_fops->unlocked_ioctl; 232 spin_unlock(&pde->pde_unload_lock); 233 234 if (ioctl) 235 rv = ioctl(file, cmd, arg); 236 237 pde_users_dec(pde); 238 return rv; 239 } 240 241 #ifdef CONFIG_COMPAT 242 static long proc_reg_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 243 { 244 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 245 long rv = -ENOTTY; 246 long (*compat_ioctl)(struct file *, unsigned int, unsigned long); 247 248 spin_lock(&pde->pde_unload_lock); 249 if (!pde->proc_fops) { 250 spin_unlock(&pde->pde_unload_lock); 251 return rv; 252 } 253 pde->pde_users++; 254 compat_ioctl = pde->proc_fops->compat_ioctl; 255 spin_unlock(&pde->pde_unload_lock); 256 257 if (compat_ioctl) 258 rv = compat_ioctl(file, cmd, arg); 259 260 pde_users_dec(pde); 261 return rv; 262 } 263 #endif 264 265 static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma) 266 { 267 struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); 268 int rv = -EIO; 269 int (*mmap)(struct file *, struct vm_area_struct *); 270 271 spin_lock(&pde->pde_unload_lock); 272 if (!pde->proc_fops) { 273 spin_unlock(&pde->pde_unload_lock); 274 return rv; 275 } 276 pde->pde_users++; 277 mmap = pde->proc_fops->mmap; 278 spin_unlock(&pde->pde_unload_lock); 279 280 if (mmap) 281 rv = mmap(file, vma); 282 283 pde_users_dec(pde); 284 return rv; 285 } 286 287 static int proc_reg_open(struct inode *inode, struct file *file) 288 { 289 struct proc_dir_entry *pde = PDE(inode); 290 int rv = 0; 291 int (*open)(struct inode *, struct file *); 292 int (*release)(struct inode *, struct file *); 293 struct pde_opener *pdeo; 294 295 /* 296 * What for, you ask? Well, we can have open, rmmod, remove_proc_entry 297 * sequence. ->release won't be called because ->proc_fops will be 298 * cleared. Depending on complexity of ->release, consequences vary. 299 * 300 * We can't wait for mercy when close will be done for real, it's 301 * deadlockable: rmmod foo </proc/foo . So, we're going to do ->release 302 * by hand in remove_proc_entry(). For this, save opener's credentials 303 * for later. 304 */ 305 pdeo = kmalloc(sizeof(struct pde_opener), GFP_KERNEL); 306 if (!pdeo) 307 return -ENOMEM; 308 309 spin_lock(&pde->pde_unload_lock); 310 if (!pde->proc_fops) { 311 spin_unlock(&pde->pde_unload_lock); 312 kfree(pdeo); 313 return -EINVAL; 314 } 315 pde->pde_users++; 316 open = pde->proc_fops->open; 317 release = pde->proc_fops->release; 318 spin_unlock(&pde->pde_unload_lock); 319 320 if (open) 321 rv = open(inode, file); 322 323 spin_lock(&pde->pde_unload_lock); 324 if (rv == 0 && release) { 325 /* To know what to release. */ 326 pdeo->inode = inode; 327 pdeo->file = file; 328 /* Strictly for "too late" ->release in proc_reg_release(). */ 329 pdeo->release = release; 330 list_add(&pdeo->lh, &pde->pde_openers); 331 } else 332 kfree(pdeo); 333 __pde_users_dec(pde); 334 spin_unlock(&pde->pde_unload_lock); 335 return rv; 336 } 337 338 static struct pde_opener *find_pde_opener(struct proc_dir_entry *pde, 339 struct inode *inode, struct file *file) 340 { 341 struct pde_opener *pdeo; 342 343 list_for_each_entry(pdeo, &pde->pde_openers, lh) { 344 if (pdeo->inode == inode && pdeo->file == file) 345 return pdeo; 346 } 347 return NULL; 348 } 349 350 static int proc_reg_release(struct inode *inode, struct file *file) 351 { 352 struct proc_dir_entry *pde = PDE(inode); 353 int rv = 0; 354 int (*release)(struct inode *, struct file *); 355 struct pde_opener *pdeo; 356 357 spin_lock(&pde->pde_unload_lock); 358 pdeo = find_pde_opener(pde, inode, file); 359 if (!pde->proc_fops) { 360 /* 361 * Can't simply exit, __fput() will think that everything is OK, 362 * and move on to freeing struct file. remove_proc_entry() will 363 * find slacker in opener's list and will try to do non-trivial 364 * things with struct file. Therefore, remove opener from list. 365 * 366 * But if opener is removed from list, who will ->release it? 367 */ 368 if (pdeo) { 369 list_del(&pdeo->lh); 370 spin_unlock(&pde->pde_unload_lock); 371 rv = pdeo->release(inode, file); 372 kfree(pdeo); 373 } else 374 spin_unlock(&pde->pde_unload_lock); 375 return rv; 376 } 377 pde->pde_users++; 378 release = pde->proc_fops->release; 379 if (pdeo) { 380 list_del(&pdeo->lh); 381 kfree(pdeo); 382 } 383 spin_unlock(&pde->pde_unload_lock); 384 385 if (release) 386 rv = release(inode, file); 387 388 pde_users_dec(pde); 389 return rv; 390 } 391 392 static const struct file_operations proc_reg_file_ops = { 393 .llseek = proc_reg_llseek, 394 .read = proc_reg_read, 395 .write = proc_reg_write, 396 .poll = proc_reg_poll, 397 .unlocked_ioctl = proc_reg_unlocked_ioctl, 398 #ifdef CONFIG_COMPAT 399 .compat_ioctl = proc_reg_compat_ioctl, 400 #endif 401 .mmap = proc_reg_mmap, 402 .open = proc_reg_open, 403 .release = proc_reg_release, 404 }; 405 406 #ifdef CONFIG_COMPAT 407 static const struct file_operations proc_reg_file_ops_no_compat = { 408 .llseek = proc_reg_llseek, 409 .read = proc_reg_read, 410 .write = proc_reg_write, 411 .poll = proc_reg_poll, 412 .unlocked_ioctl = proc_reg_unlocked_ioctl, 413 .mmap = proc_reg_mmap, 414 .open = proc_reg_open, 415 .release = proc_reg_release, 416 }; 417 #endif 418 419 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) 420 { 421 struct inode * inode; 422 423 inode = iget_locked(sb, de->low_ino); 424 if (!inode) 425 return NULL; 426 if (inode->i_state & I_NEW) { 427 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; 428 PROC_I(inode)->fd = 0; 429 PROC_I(inode)->pde = de; 430 431 if (de->mode) { 432 inode->i_mode = de->mode; 433 inode->i_uid = de->uid; 434 inode->i_gid = de->gid; 435 } 436 if (de->size) 437 inode->i_size = de->size; 438 if (de->nlink) 439 inode->i_nlink = de->nlink; 440 if (de->proc_iops) 441 inode->i_op = de->proc_iops; 442 if (de->proc_fops) { 443 if (S_ISREG(inode->i_mode)) { 444 #ifdef CONFIG_COMPAT 445 if (!de->proc_fops->compat_ioctl) 446 inode->i_fop = 447 &proc_reg_file_ops_no_compat; 448 else 449 #endif 450 inode->i_fop = &proc_reg_file_ops; 451 } else { 452 inode->i_fop = de->proc_fops; 453 } 454 } 455 unlock_new_inode(inode); 456 } else 457 pde_put(de); 458 return inode; 459 } 460 461 int proc_fill_super(struct super_block *s) 462 { 463 struct inode * root_inode; 464 465 s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC; 466 s->s_blocksize = 1024; 467 s->s_blocksize_bits = 10; 468 s->s_magic = PROC_SUPER_MAGIC; 469 s->s_op = &proc_sops; 470 s->s_time_gran = 1; 471 472 pde_get(&proc_root); 473 root_inode = proc_get_inode(s, &proc_root); 474 if (!root_inode) 475 goto out_no_root; 476 root_inode->i_uid = 0; 477 root_inode->i_gid = 0; 478 s->s_root = d_alloc_root(root_inode); 479 if (!s->s_root) 480 goto out_no_root; 481 return 0; 482 483 out_no_root: 484 printk("proc_read_super: get root inode failed\n"); 485 iput(root_inode); 486 pde_put(&proc_root); 487 return -ENOMEM; 488 } 489