1 2 #include <linux/mm.h> 3 #include <linux/file.h> 4 #include <linux/fdtable.h> 5 #include <linux/fs_struct.h> 6 #include <linux/mount.h> 7 #include <linux/ptrace.h> 8 #include <linux/slab.h> 9 #include <linux/seq_file.h> 10 #include "internal.h" 11 12 /* 13 * Logic: we've got two memory sums for each process, "shared", and 14 * "non-shared". Shared memory may get counted more than once, for 15 * each process that owns it. Non-shared memory is counted 16 * accurately. 17 */ 18 void task_mem(struct seq_file *m, struct mm_struct *mm) 19 { 20 struct vm_area_struct *vma; 21 struct vm_region *region; 22 struct rb_node *p; 23 unsigned long bytes = 0, sbytes = 0, slack = 0, size; 24 25 down_read(&mm->mmap_sem); 26 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { 27 vma = rb_entry(p, struct vm_area_struct, vm_rb); 28 29 bytes += kobjsize(vma); 30 31 region = vma->vm_region; 32 if (region) { 33 size = kobjsize(region); 34 size += region->vm_end - region->vm_start; 35 } else { 36 size = vma->vm_end - vma->vm_start; 37 } 38 39 if (atomic_read(&mm->mm_count) > 1 || 40 vma->vm_flags & VM_MAYSHARE) { 41 sbytes += size; 42 } else { 43 bytes += size; 44 if (region) 45 slack = region->vm_end - vma->vm_end; 46 } 47 } 48 49 if (atomic_read(&mm->mm_count) > 1) 50 sbytes += kobjsize(mm); 51 else 52 bytes += kobjsize(mm); 53 54 if (current->fs && current->fs->users > 1) 55 sbytes += kobjsize(current->fs); 56 else 57 bytes += kobjsize(current->fs); 58 59 if (current->files && atomic_read(¤t->files->count) > 1) 60 sbytes += kobjsize(current->files); 61 else 62 bytes += kobjsize(current->files); 63 64 if (current->sighand && atomic_read(¤t->sighand->count) > 1) 65 sbytes += kobjsize(current->sighand); 66 else 67 bytes += kobjsize(current->sighand); 68 69 bytes += kobjsize(current); /* includes kernel stack */ 70 71 seq_printf(m, 72 "Mem:\t%8lu bytes\n" 73 "Slack:\t%8lu bytes\n" 74 "Shared:\t%8lu bytes\n", 75 bytes, slack, sbytes); 76 77 up_read(&mm->mmap_sem); 78 } 79 80 unsigned long task_vsize(struct mm_struct *mm) 81 { 82 struct vm_area_struct *vma; 83 struct rb_node *p; 84 unsigned long vsize = 0; 85 86 down_read(&mm->mmap_sem); 87 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { 88 vma = rb_entry(p, struct vm_area_struct, vm_rb); 89 vsize += vma->vm_end - vma->vm_start; 90 } 91 up_read(&mm->mmap_sem); 92 return vsize; 93 } 94 95 unsigned long task_statm(struct mm_struct *mm, 96 unsigned long *shared, unsigned long *text, 97 unsigned long *data, unsigned long *resident) 98 { 99 struct vm_area_struct *vma; 100 struct vm_region *region; 101 struct rb_node *p; 102 unsigned long size = kobjsize(mm); 103 104 down_read(&mm->mmap_sem); 105 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { 106 vma = rb_entry(p, struct vm_area_struct, vm_rb); 107 size += kobjsize(vma); 108 region = vma->vm_region; 109 if (region) { 110 size += kobjsize(region); 111 size += region->vm_end - region->vm_start; 112 } 113 } 114 115 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 116 >> PAGE_SHIFT; 117 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK)) 118 >> PAGE_SHIFT; 119 up_read(&mm->mmap_sem); 120 size >>= PAGE_SHIFT; 121 size += *text + *data; 122 *resident = size; 123 return size; 124 } 125 126 /* 127 * display a single VMA to a sequenced file 128 */ 129 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, 130 int is_pid) 131 { 132 struct mm_struct *mm = vma->vm_mm; 133 struct proc_maps_private *priv = m->private; 134 unsigned long ino = 0; 135 struct file *file; 136 dev_t dev = 0; 137 int flags; 138 unsigned long long pgoff = 0; 139 140 flags = vma->vm_flags; 141 file = vma->vm_file; 142 143 if (file) { 144 struct inode *inode = file_inode(vma->vm_file); 145 dev = inode->i_sb->s_dev; 146 ino = inode->i_ino; 147 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 148 } 149 150 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 151 seq_printf(m, 152 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", 153 vma->vm_start, 154 vma->vm_end, 155 flags & VM_READ ? 'r' : '-', 156 flags & VM_WRITE ? 'w' : '-', 157 flags & VM_EXEC ? 'x' : '-', 158 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', 159 pgoff, 160 MAJOR(dev), MINOR(dev), ino); 161 162 if (file) { 163 seq_pad(m, ' '); 164 seq_path(m, &file->f_path, ""); 165 } else if (mm) { 166 pid_t tid = vm_is_stack(priv->task, vma, is_pid); 167 168 if (tid != 0) { 169 seq_pad(m, ' '); 170 /* 171 * Thread stack in /proc/PID/task/TID/maps or 172 * the main process stack. 173 */ 174 if (!is_pid || (vma->vm_start <= mm->start_stack && 175 vma->vm_end >= mm->start_stack)) 176 seq_printf(m, "[stack]"); 177 else 178 seq_printf(m, "[stack:%d]", tid); 179 } 180 } 181 182 seq_putc(m, '\n'); 183 return 0; 184 } 185 186 /* 187 * display mapping lines for a particular process's /proc/pid/maps 188 */ 189 static int show_map(struct seq_file *m, void *_p, int is_pid) 190 { 191 struct rb_node *p = _p; 192 193 return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb), 194 is_pid); 195 } 196 197 static int show_pid_map(struct seq_file *m, void *_p) 198 { 199 return show_map(m, _p, 1); 200 } 201 202 static int show_tid_map(struct seq_file *m, void *_p) 203 { 204 return show_map(m, _p, 0); 205 } 206 207 static void *m_start(struct seq_file *m, loff_t *pos) 208 { 209 struct proc_maps_private *priv = m->private; 210 struct mm_struct *mm; 211 struct rb_node *p; 212 loff_t n = *pos; 213 214 /* pin the task and mm whilst we play with them */ 215 priv->task = get_proc_task(priv->inode); 216 if (!priv->task) 217 return ERR_PTR(-ESRCH); 218 219 mm = priv->mm; 220 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) 221 return NULL; 222 223 down_read(&mm->mmap_sem); 224 /* start from the Nth VMA */ 225 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) 226 if (n-- == 0) 227 return p; 228 229 up_read(&mm->mmap_sem); 230 mmput(mm); 231 return NULL; 232 } 233 234 static void m_stop(struct seq_file *m, void *_vml) 235 { 236 struct proc_maps_private *priv = m->private; 237 238 if (!IS_ERR_OR_NULL(_vml)) { 239 up_read(&priv->mm->mmap_sem); 240 mmput(priv->mm); 241 } 242 if (priv->task) { 243 put_task_struct(priv->task); 244 priv->task = NULL; 245 } 246 } 247 248 static void *m_next(struct seq_file *m, void *_p, loff_t *pos) 249 { 250 struct rb_node *p = _p; 251 252 (*pos)++; 253 return p ? rb_next(p) : NULL; 254 } 255 256 static const struct seq_operations proc_pid_maps_ops = { 257 .start = m_start, 258 .next = m_next, 259 .stop = m_stop, 260 .show = show_pid_map 261 }; 262 263 static const struct seq_operations proc_tid_maps_ops = { 264 .start = m_start, 265 .next = m_next, 266 .stop = m_stop, 267 .show = show_tid_map 268 }; 269 270 static int maps_open(struct inode *inode, struct file *file, 271 const struct seq_operations *ops) 272 { 273 struct proc_maps_private *priv; 274 275 priv = __seq_open_private(file, ops, sizeof(*priv)); 276 if (!priv) 277 return -ENOMEM; 278 279 priv->inode = inode; 280 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 281 if (IS_ERR(priv->mm)) { 282 int err = PTR_ERR(priv->mm); 283 284 seq_release_private(inode, file); 285 return err; 286 } 287 288 return 0; 289 } 290 291 292 static int map_release(struct inode *inode, struct file *file) 293 { 294 struct seq_file *seq = file->private_data; 295 struct proc_maps_private *priv = seq->private; 296 297 if (priv->mm) 298 mmdrop(priv->mm); 299 300 return seq_release_private(inode, file); 301 } 302 303 static int pid_maps_open(struct inode *inode, struct file *file) 304 { 305 return maps_open(inode, file, &proc_pid_maps_ops); 306 } 307 308 static int tid_maps_open(struct inode *inode, struct file *file) 309 { 310 return maps_open(inode, file, &proc_tid_maps_ops); 311 } 312 313 const struct file_operations proc_pid_maps_operations = { 314 .open = pid_maps_open, 315 .read = seq_read, 316 .llseek = seq_lseek, 317 .release = map_release, 318 }; 319 320 const struct file_operations proc_tid_maps_operations = { 321 .open = tid_maps_open, 322 .read = seq_read, 323 .llseek = seq_lseek, 324 .release = map_release, 325 }; 326 327