1 /* 2 * linux/fs/file.c 3 * 4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes 5 * 6 * Manage the dynamic fd arrays in the process files_struct. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/fs.h> 11 #include <linux/mm.h> 12 #include <linux/time.h> 13 #include <linux/sched.h> 14 #include <linux/slab.h> 15 #include <linux/vmalloc.h> 16 #include <linux/file.h> 17 #include <linux/fdtable.h> 18 #include <linux/bitops.h> 19 #include <linux/interrupt.h> 20 #include <linux/spinlock.h> 21 #include <linux/rcupdate.h> 22 #include <linux/workqueue.h> 23 24 struct fdtable_defer { 25 spinlock_t lock; 26 struct work_struct wq; 27 struct fdtable *next; 28 }; 29 30 int sysctl_nr_open __read_mostly = 1024*1024; 31 int sysctl_nr_open_min = BITS_PER_LONG; 32 int sysctl_nr_open_max = 1024 * 1024; /* raised later */ 33 34 /* 35 * We use this list to defer free fdtables that have vmalloced 36 * sets/arrays. By keeping a per-cpu list, we avoid having to embed 37 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in 38 * this per-task structure. 39 */ 40 static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); 41 42 static inline void * alloc_fdmem(unsigned int size) 43 { 44 if (size <= PAGE_SIZE) 45 return kmalloc(size, GFP_KERNEL); 46 else 47 return vmalloc(size); 48 } 49 50 static inline void free_fdarr(struct fdtable *fdt) 51 { 52 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) 53 kfree(fdt->fd); 54 else 55 vfree(fdt->fd); 56 } 57 58 static inline void free_fdset(struct fdtable *fdt) 59 { 60 if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2)) 61 kfree(fdt->open_fds); 62 else 63 vfree(fdt->open_fds); 64 } 65 66 static void free_fdtable_work(struct work_struct *work) 67 { 68 struct fdtable_defer *f = 69 container_of(work, struct fdtable_defer, wq); 70 struct fdtable *fdt; 71 72 spin_lock_bh(&f->lock); 73 fdt = f->next; 74 f->next = NULL; 75 spin_unlock_bh(&f->lock); 76 while(fdt) { 77 struct fdtable *next = fdt->next; 78 vfree(fdt->fd); 79 free_fdset(fdt); 80 kfree(fdt); 81 fdt = next; 82 } 83 } 84 85 void free_fdtable_rcu(struct rcu_head *rcu) 86 { 87 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); 88 struct fdtable_defer *fddef; 89 90 BUG_ON(!fdt); 91 92 if (fdt->max_fds <= NR_OPEN_DEFAULT) { 93 /* 94 * This fdtable is embedded in the files structure and that 95 * structure itself is getting destroyed. 96 */ 97 kmem_cache_free(files_cachep, 98 container_of(fdt, struct files_struct, fdtab)); 99 return; 100 } 101 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) { 102 kfree(fdt->fd); 103 kfree(fdt->open_fds); 104 kfree(fdt); 105 } else { 106 fddef = &get_cpu_var(fdtable_defer_list); 107 spin_lock(&fddef->lock); 108 fdt->next = fddef->next; 109 fddef->next = fdt; 110 /* vmallocs are handled from the workqueue context */ 111 schedule_work(&fddef->wq); 112 spin_unlock(&fddef->lock); 113 put_cpu_var(fdtable_defer_list); 114 } 115 } 116 117 /* 118 * Expand the fdset in the files_struct. Called with the files spinlock 119 * held for write. 120 */ 121 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) 122 { 123 unsigned int cpy, set; 124 125 BUG_ON(nfdt->max_fds < ofdt->max_fds); 126 127 cpy = ofdt->max_fds * sizeof(struct file *); 128 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); 129 memcpy(nfdt->fd, ofdt->fd, cpy); 130 memset((char *)(nfdt->fd) + cpy, 0, set); 131 132 cpy = ofdt->max_fds / BITS_PER_BYTE; 133 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE; 134 memcpy(nfdt->open_fds, ofdt->open_fds, cpy); 135 memset((char *)(nfdt->open_fds) + cpy, 0, set); 136 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); 137 memset((char *)(nfdt->close_on_exec) + cpy, 0, set); 138 } 139 140 static struct fdtable * alloc_fdtable(unsigned int nr) 141 { 142 struct fdtable *fdt; 143 char *data; 144 145 /* 146 * Figure out how many fds we actually want to support in this fdtable. 147 * Allocation steps are keyed to the size of the fdarray, since it 148 * grows far faster than any of the other dynamic data. We try to fit 149 * the fdarray into comfortable page-tuned chunks: starting at 1024B 150 * and growing in powers of two from there on. 151 */ 152 nr /= (1024 / sizeof(struct file *)); 153 nr = roundup_pow_of_two(nr + 1); 154 nr *= (1024 / sizeof(struct file *)); 155 /* 156 * Note that this can drive nr *below* what we had passed if sysctl_nr_open 157 * had been set lower between the check in expand_files() and here. Deal 158 * with that in caller, it's cheaper that way. 159 * 160 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise 161 * bitmaps handling below becomes unpleasant, to put it mildly... 162 */ 163 if (unlikely(nr > sysctl_nr_open)) 164 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; 165 166 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); 167 if (!fdt) 168 goto out; 169 fdt->max_fds = nr; 170 data = alloc_fdmem(nr * sizeof(struct file *)); 171 if (!data) 172 goto out_fdt; 173 fdt->fd = (struct file **)data; 174 data = alloc_fdmem(max_t(unsigned int, 175 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); 176 if (!data) 177 goto out_arr; 178 fdt->open_fds = (fd_set *)data; 179 data += nr / BITS_PER_BYTE; 180 fdt->close_on_exec = (fd_set *)data; 181 INIT_RCU_HEAD(&fdt->rcu); 182 fdt->next = NULL; 183 184 return fdt; 185 186 out_arr: 187 free_fdarr(fdt); 188 out_fdt: 189 kfree(fdt); 190 out: 191 return NULL; 192 } 193 194 /* 195 * Expand the file descriptor table. 196 * This function will allocate a new fdtable and both fd array and fdset, of 197 * the given size. 198 * Return <0 error code on error; 1 on successful completion. 199 * The files->file_lock should be held on entry, and will be held on exit. 200 */ 201 static int expand_fdtable(struct files_struct *files, int nr) 202 __releases(files->file_lock) 203 __acquires(files->file_lock) 204 { 205 struct fdtable *new_fdt, *cur_fdt; 206 207 spin_unlock(&files->file_lock); 208 new_fdt = alloc_fdtable(nr); 209 spin_lock(&files->file_lock); 210 if (!new_fdt) 211 return -ENOMEM; 212 /* 213 * extremely unlikely race - sysctl_nr_open decreased between the check in 214 * caller and alloc_fdtable(). Cheaper to catch it here... 215 */ 216 if (unlikely(new_fdt->max_fds <= nr)) { 217 free_fdarr(new_fdt); 218 free_fdset(new_fdt); 219 kfree(new_fdt); 220 return -EMFILE; 221 } 222 /* 223 * Check again since another task may have expanded the fd table while 224 * we dropped the lock 225 */ 226 cur_fdt = files_fdtable(files); 227 if (nr >= cur_fdt->max_fds) { 228 /* Continue as planned */ 229 copy_fdtable(new_fdt, cur_fdt); 230 rcu_assign_pointer(files->fdt, new_fdt); 231 if (cur_fdt->max_fds > NR_OPEN_DEFAULT) 232 free_fdtable(cur_fdt); 233 } else { 234 /* Somebody else expanded, so undo our attempt */ 235 free_fdarr(new_fdt); 236 free_fdset(new_fdt); 237 kfree(new_fdt); 238 } 239 return 1; 240 } 241 242 /* 243 * Expand files. 244 * This function will expand the file structures, if the requested size exceeds 245 * the current capacity and there is room for expansion. 246 * Return <0 error code on error; 0 when nothing done; 1 when files were 247 * expanded and execution may have blocked. 248 * The files->file_lock should be held on entry, and will be held on exit. 249 */ 250 int expand_files(struct files_struct *files, int nr) 251 { 252 struct fdtable *fdt; 253 254 fdt = files_fdtable(files); 255 256 /* 257 * N.B. For clone tasks sharing a files structure, this test 258 * will limit the total number of files that can be opened. 259 */ 260 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur) 261 return -EMFILE; 262 263 /* Do we need to expand? */ 264 if (nr < fdt->max_fds) 265 return 0; 266 267 /* Can we expand? */ 268 if (nr >= sysctl_nr_open) 269 return -EMFILE; 270 271 /* All good, so we try */ 272 return expand_fdtable(files, nr); 273 } 274 275 static int count_open_files(struct fdtable *fdt) 276 { 277 int size = fdt->max_fds; 278 int i; 279 280 /* Find the last open fd */ 281 for (i = size/(8*sizeof(long)); i > 0; ) { 282 if (fdt->open_fds->fds_bits[--i]) 283 break; 284 } 285 i = (i+1) * 8 * sizeof(long); 286 return i; 287 } 288 289 /* 290 * Allocate a new files structure and copy contents from the 291 * passed in files structure. 292 * errorp will be valid only when the returned files_struct is NULL. 293 */ 294 struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) 295 { 296 struct files_struct *newf; 297 struct file **old_fds, **new_fds; 298 int open_files, size, i; 299 struct fdtable *old_fdt, *new_fdt; 300 301 *errorp = -ENOMEM; 302 newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); 303 if (!newf) 304 goto out; 305 306 atomic_set(&newf->count, 1); 307 308 spin_lock_init(&newf->file_lock); 309 newf->next_fd = 0; 310 new_fdt = &newf->fdtab; 311 new_fdt->max_fds = NR_OPEN_DEFAULT; 312 new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; 313 new_fdt->open_fds = (fd_set *)&newf->open_fds_init; 314 new_fdt->fd = &newf->fd_array[0]; 315 INIT_RCU_HEAD(&new_fdt->rcu); 316 new_fdt->next = NULL; 317 318 spin_lock(&oldf->file_lock); 319 old_fdt = files_fdtable(oldf); 320 open_files = count_open_files(old_fdt); 321 322 /* 323 * Check whether we need to allocate a larger fd array and fd set. 324 */ 325 while (unlikely(open_files > new_fdt->max_fds)) { 326 spin_unlock(&oldf->file_lock); 327 328 if (new_fdt != &newf->fdtab) { 329 free_fdarr(new_fdt); 330 free_fdset(new_fdt); 331 kfree(new_fdt); 332 } 333 334 new_fdt = alloc_fdtable(open_files - 1); 335 if (!new_fdt) { 336 *errorp = -ENOMEM; 337 goto out_release; 338 } 339 340 /* beyond sysctl_nr_open; nothing to do */ 341 if (unlikely(new_fdt->max_fds < open_files)) { 342 free_fdarr(new_fdt); 343 free_fdset(new_fdt); 344 kfree(new_fdt); 345 *errorp = -EMFILE; 346 goto out_release; 347 } 348 349 /* 350 * Reacquire the oldf lock and a pointer to its fd table 351 * who knows it may have a new bigger fd table. We need 352 * the latest pointer. 353 */ 354 spin_lock(&oldf->file_lock); 355 old_fdt = files_fdtable(oldf); 356 open_files = count_open_files(old_fdt); 357 } 358 359 old_fds = old_fdt->fd; 360 new_fds = new_fdt->fd; 361 362 memcpy(new_fdt->open_fds->fds_bits, 363 old_fdt->open_fds->fds_bits, open_files/8); 364 memcpy(new_fdt->close_on_exec->fds_bits, 365 old_fdt->close_on_exec->fds_bits, open_files/8); 366 367 for (i = open_files; i != 0; i--) { 368 struct file *f = *old_fds++; 369 if (f) { 370 get_file(f); 371 } else { 372 /* 373 * The fd may be claimed in the fd bitmap but not yet 374 * instantiated in the files array if a sibling thread 375 * is partway through open(). So make sure that this 376 * fd is available to the new process. 377 */ 378 FD_CLR(open_files - i, new_fdt->open_fds); 379 } 380 rcu_assign_pointer(*new_fds++, f); 381 } 382 spin_unlock(&oldf->file_lock); 383 384 /* compute the remainder to be cleared */ 385 size = (new_fdt->max_fds - open_files) * sizeof(struct file *); 386 387 /* This is long word aligned thus could use a optimized version */ 388 memset(new_fds, 0, size); 389 390 if (new_fdt->max_fds > open_files) { 391 int left = (new_fdt->max_fds-open_files)/8; 392 int start = open_files / (8 * sizeof(unsigned long)); 393 394 memset(&new_fdt->open_fds->fds_bits[start], 0, left); 395 memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); 396 } 397 398 rcu_assign_pointer(newf->fdt, new_fdt); 399 400 return newf; 401 402 out_release: 403 kmem_cache_free(files_cachep, newf); 404 out: 405 return NULL; 406 } 407 408 static void __devinit fdtable_defer_list_init(int cpu) 409 { 410 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 411 spin_lock_init(&fddef->lock); 412 INIT_WORK(&fddef->wq, free_fdtable_work); 413 fddef->next = NULL; 414 } 415 416 void __init files_defer_init(void) 417 { 418 int i; 419 for_each_possible_cpu(i) 420 fdtable_defer_list_init(i); 421 sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) & 422 -BITS_PER_LONG; 423 } 424 425 struct files_struct init_files = { 426 .count = ATOMIC_INIT(1), 427 .fdt = &init_files.fdtab, 428 .fdtab = { 429 .max_fds = NR_OPEN_DEFAULT, 430 .fd = &init_files.fd_array[0], 431 .close_on_exec = (fd_set *)&init_files.close_on_exec_init, 432 .open_fds = (fd_set *)&init_files.open_fds_init, 433 .rcu = RCU_HEAD_INIT, 434 }, 435 .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), 436 }; 437 438 /* 439 * allocate a file descriptor, mark it busy. 440 */ 441 int alloc_fd(unsigned start, unsigned flags) 442 { 443 struct files_struct *files = current->files; 444 unsigned int fd; 445 int error; 446 struct fdtable *fdt; 447 448 spin_lock(&files->file_lock); 449 repeat: 450 fdt = files_fdtable(files); 451 fd = start; 452 if (fd < files->next_fd) 453 fd = files->next_fd; 454 455 if (fd < fdt->max_fds) 456 fd = find_next_zero_bit(fdt->open_fds->fds_bits, 457 fdt->max_fds, fd); 458 459 error = expand_files(files, fd); 460 if (error < 0) 461 goto out; 462 463 /* 464 * If we needed to expand the fs array we 465 * might have blocked - try again. 466 */ 467 if (error) 468 goto repeat; 469 470 if (start <= files->next_fd) 471 files->next_fd = fd + 1; 472 473 FD_SET(fd, fdt->open_fds); 474 if (flags & O_CLOEXEC) 475 FD_SET(fd, fdt->close_on_exec); 476 else 477 FD_CLR(fd, fdt->close_on_exec); 478 error = fd; 479 #if 1 480 /* Sanity check */ 481 if (rcu_dereference(fdt->fd[fd]) != NULL) { 482 printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); 483 rcu_assign_pointer(fdt->fd[fd], NULL); 484 } 485 #endif 486 487 out: 488 spin_unlock(&files->file_lock); 489 return error; 490 } 491 492 int get_unused_fd(void) 493 { 494 return alloc_fd(0, 0); 495 } 496 EXPORT_SYMBOL(get_unused_fd); 497