1 /* 2 * linux/fs/file.c 3 * 4 * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes 5 * 6 * Manage the dynamic fd arrays in the process files_struct. 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/time.h> 12 #include <linux/slab.h> 13 #include <linux/vmalloc.h> 14 #include <linux/file.h> 15 #include <linux/fdtable.h> 16 #include <linux/bitops.h> 17 #include <linux/interrupt.h> 18 #include <linux/spinlock.h> 19 #include <linux/rcupdate.h> 20 #include <linux/workqueue.h> 21 22 struct fdtable_defer { 23 spinlock_t lock; 24 struct work_struct wq; 25 struct fdtable *next; 26 }; 27 28 int sysctl_nr_open __read_mostly = 1024*1024; 29 30 /* 31 * We use this list to defer free fdtables that have vmalloced 32 * sets/arrays. By keeping a per-cpu list, we avoid having to embed 33 * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in 34 * this per-task structure. 35 */ 36 static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); 37 38 static inline void * alloc_fdmem(unsigned int size) 39 { 40 if (size <= PAGE_SIZE) 41 return kmalloc(size, GFP_KERNEL); 42 else 43 return vmalloc(size); 44 } 45 46 static inline void free_fdarr(struct fdtable *fdt) 47 { 48 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) 49 kfree(fdt->fd); 50 else 51 vfree(fdt->fd); 52 } 53 54 static inline void free_fdset(struct fdtable *fdt) 55 { 56 if (fdt->max_fds <= (PAGE_SIZE * BITS_PER_BYTE / 2)) 57 kfree(fdt->open_fds); 58 else 59 vfree(fdt->open_fds); 60 } 61 62 static void free_fdtable_work(struct work_struct *work) 63 { 64 struct fdtable_defer *f = 65 container_of(work, struct fdtable_defer, wq); 66 struct fdtable *fdt; 67 68 spin_lock_bh(&f->lock); 69 fdt = f->next; 70 f->next = NULL; 71 spin_unlock_bh(&f->lock); 72 while(fdt) { 73 struct fdtable *next = fdt->next; 74 vfree(fdt->fd); 75 free_fdset(fdt); 76 kfree(fdt); 77 fdt = next; 78 } 79 } 80 81 void free_fdtable_rcu(struct rcu_head *rcu) 82 { 83 struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); 84 struct fdtable_defer *fddef; 85 86 BUG_ON(!fdt); 87 88 if (fdt->max_fds <= NR_OPEN_DEFAULT) { 89 /* 90 * This fdtable is embedded in the files structure and that 91 * structure itself is getting destroyed. 92 */ 93 kmem_cache_free(files_cachep, 94 container_of(fdt, struct files_struct, fdtab)); 95 return; 96 } 97 if (fdt->max_fds <= (PAGE_SIZE / sizeof(struct file *))) { 98 kfree(fdt->fd); 99 kfree(fdt->open_fds); 100 kfree(fdt); 101 } else { 102 fddef = &get_cpu_var(fdtable_defer_list); 103 spin_lock(&fddef->lock); 104 fdt->next = fddef->next; 105 fddef->next = fdt; 106 /* vmallocs are handled from the workqueue context */ 107 schedule_work(&fddef->wq); 108 spin_unlock(&fddef->lock); 109 put_cpu_var(fdtable_defer_list); 110 } 111 } 112 113 /* 114 * Expand the fdset in the files_struct. Called with the files spinlock 115 * held for write. 116 */ 117 static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) 118 { 119 unsigned int cpy, set; 120 121 BUG_ON(nfdt->max_fds < ofdt->max_fds); 122 if (ofdt->max_fds == 0) 123 return; 124 125 cpy = ofdt->max_fds * sizeof(struct file *); 126 set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); 127 memcpy(nfdt->fd, ofdt->fd, cpy); 128 memset((char *)(nfdt->fd) + cpy, 0, set); 129 130 cpy = ofdt->max_fds / BITS_PER_BYTE; 131 set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE; 132 memcpy(nfdt->open_fds, ofdt->open_fds, cpy); 133 memset((char *)(nfdt->open_fds) + cpy, 0, set); 134 memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); 135 memset((char *)(nfdt->close_on_exec) + cpy, 0, set); 136 } 137 138 static struct fdtable * alloc_fdtable(unsigned int nr) 139 { 140 struct fdtable *fdt; 141 char *data; 142 143 /* 144 * Figure out how many fds we actually want to support in this fdtable. 145 * Allocation steps are keyed to the size of the fdarray, since it 146 * grows far faster than any of the other dynamic data. We try to fit 147 * the fdarray into comfortable page-tuned chunks: starting at 1024B 148 * and growing in powers of two from there on. 149 */ 150 nr /= (1024 / sizeof(struct file *)); 151 nr = roundup_pow_of_two(nr + 1); 152 nr *= (1024 / sizeof(struct file *)); 153 /* 154 * Note that this can drive nr *below* what we had passed if sysctl_nr_open 155 * had been set lower between the check in expand_files() and here. Deal 156 * with that in caller, it's cheaper that way. 157 * 158 * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise 159 * bitmaps handling below becomes unpleasant, to put it mildly... 160 */ 161 if (unlikely(nr > sysctl_nr_open)) 162 nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; 163 164 fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); 165 if (!fdt) 166 goto out; 167 fdt->max_fds = nr; 168 data = alloc_fdmem(nr * sizeof(struct file *)); 169 if (!data) 170 goto out_fdt; 171 fdt->fd = (struct file **)data; 172 data = alloc_fdmem(max_t(unsigned int, 173 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); 174 if (!data) 175 goto out_arr; 176 fdt->open_fds = (fd_set *)data; 177 data += nr / BITS_PER_BYTE; 178 fdt->close_on_exec = (fd_set *)data; 179 INIT_RCU_HEAD(&fdt->rcu); 180 fdt->next = NULL; 181 182 return fdt; 183 184 out_arr: 185 free_fdarr(fdt); 186 out_fdt: 187 kfree(fdt); 188 out: 189 return NULL; 190 } 191 192 /* 193 * Expand the file descriptor table. 194 * This function will allocate a new fdtable and both fd array and fdset, of 195 * the given size. 196 * Return <0 error code on error; 1 on successful completion. 197 * The files->file_lock should be held on entry, and will be held on exit. 198 */ 199 static int expand_fdtable(struct files_struct *files, int nr) 200 __releases(files->file_lock) 201 __acquires(files->file_lock) 202 { 203 struct fdtable *new_fdt, *cur_fdt; 204 205 spin_unlock(&files->file_lock); 206 new_fdt = alloc_fdtable(nr); 207 spin_lock(&files->file_lock); 208 if (!new_fdt) 209 return -ENOMEM; 210 /* 211 * extremely unlikely race - sysctl_nr_open decreased between the check in 212 * caller and alloc_fdtable(). Cheaper to catch it here... 213 */ 214 if (unlikely(new_fdt->max_fds <= nr)) { 215 free_fdarr(new_fdt); 216 free_fdset(new_fdt); 217 kfree(new_fdt); 218 return -EMFILE; 219 } 220 /* 221 * Check again since another task may have expanded the fd table while 222 * we dropped the lock 223 */ 224 cur_fdt = files_fdtable(files); 225 if (nr >= cur_fdt->max_fds) { 226 /* Continue as planned */ 227 copy_fdtable(new_fdt, cur_fdt); 228 rcu_assign_pointer(files->fdt, new_fdt); 229 if (cur_fdt->max_fds > NR_OPEN_DEFAULT) 230 free_fdtable(cur_fdt); 231 } else { 232 /* Somebody else expanded, so undo our attempt */ 233 free_fdarr(new_fdt); 234 free_fdset(new_fdt); 235 kfree(new_fdt); 236 } 237 return 1; 238 } 239 240 /* 241 * Expand files. 242 * This function will expand the file structures, if the requested size exceeds 243 * the current capacity and there is room for expansion. 244 * Return <0 error code on error; 0 when nothing done; 1 when files were 245 * expanded and execution may have blocked. 246 * The files->file_lock should be held on entry, and will be held on exit. 247 */ 248 int expand_files(struct files_struct *files, int nr) 249 { 250 struct fdtable *fdt; 251 252 fdt = files_fdtable(files); 253 /* Do we need to expand? */ 254 if (nr < fdt->max_fds) 255 return 0; 256 /* Can we expand? */ 257 if (nr >= sysctl_nr_open) 258 return -EMFILE; 259 260 /* All good, so we try */ 261 return expand_fdtable(files, nr); 262 } 263 264 static void __devinit fdtable_defer_list_init(int cpu) 265 { 266 struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); 267 spin_lock_init(&fddef->lock); 268 INIT_WORK(&fddef->wq, free_fdtable_work); 269 fddef->next = NULL; 270 } 271 272 void __init files_defer_init(void) 273 { 274 int i; 275 for_each_possible_cpu(i) 276 fdtable_defer_list_init(i); 277 } 278