1 2 #include <linux/mm.h> 3 #include <linux/file.h> 4 #include <linux/fdtable.h> 5 #include <linux/fs_struct.h> 6 #include <linux/mount.h> 7 #include <linux/ptrace.h> 8 #include <linux/slab.h> 9 #include <linux/seq_file.h> 10 #include <linux/sched/mm.h> 11 12 #include "internal.h" 13 14 /* 15 * Logic: we've got two memory sums for each process, "shared", and 16 * "non-shared". Shared memory may get counted more than once, for 17 * each process that owns it. Non-shared memory is counted 18 * accurately. 19 */ 20 void task_mem(struct seq_file *m, struct mm_struct *mm) 21 { 22 struct vm_area_struct *vma; 23 struct vm_region *region; 24 struct rb_node *p; 25 unsigned long bytes = 0, sbytes = 0, slack = 0, size; 26 27 down_read(&mm->mmap_sem); 28 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { 29 vma = rb_entry(p, struct vm_area_struct, vm_rb); 30 31 bytes += kobjsize(vma); 32 33 region = vma->vm_region; 34 if (region) { 35 size = kobjsize(region); 36 size += region->vm_end - region->vm_start; 37 } else { 38 size = vma->vm_end - vma->vm_start; 39 } 40 41 if (atomic_read(&mm->mm_count) > 1 || 42 vma->vm_flags & VM_MAYSHARE) { 43 sbytes += size; 44 } else { 45 bytes += size; 46 if (region) 47 slack = region->vm_end - vma->vm_end; 48 } 49 } 50 51 if (atomic_read(&mm->mm_count) > 1) 52 sbytes += kobjsize(mm); 53 else 54 bytes += kobjsize(mm); 55 56 if (current->fs && current->fs->users > 1) 57 sbytes += kobjsize(current->fs); 58 else 59 bytes += kobjsize(current->fs); 60 61 if (current->files && atomic_read(¤t->files->count) > 1) 62 sbytes += kobjsize(current->files); 63 else 64 bytes += kobjsize(current->files); 65 66 if (current->sighand && atomic_read(¤t->sighand->count) > 1) 67 sbytes += kobjsize(current->sighand); 68 else 69 bytes += kobjsize(current->sighand); 70 71 bytes += kobjsize(current); /* includes kernel stack */ 72 73 seq_printf(m, 74 "Mem:\t%8lu bytes\n" 75 "Slack:\t%8lu bytes\n" 76 "Shared:\t%8lu bytes\n", 77 bytes, slack, sbytes); 78 79 up_read(&mm->mmap_sem); 80 } 81 82 unsigned long task_vsize(struct mm_struct *mm) 83 { 84 struct vm_area_struct *vma; 85 struct rb_node *p; 86 unsigned long vsize = 0; 87 88 down_read(&mm->mmap_sem); 89 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { 90 vma = rb_entry(p, struct vm_area_struct, vm_rb); 91 vsize += vma->vm_end - vma->vm_start; 92 } 93 up_read(&mm->mmap_sem); 94 return vsize; 95 } 96 97 unsigned long task_statm(struct mm_struct *mm, 98 unsigned long *shared, unsigned long *text, 99 unsigned long *data, unsigned long *resident) 100 { 101 struct vm_area_struct *vma; 102 struct vm_region *region; 103 struct rb_node *p; 104 unsigned long size = kobjsize(mm); 105 106 down_read(&mm->mmap_sem); 107 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { 108 vma = rb_entry(p, struct vm_area_struct, vm_rb); 109 size += kobjsize(vma); 110 region = vma->vm_region; 111 if (region) { 112 size += kobjsize(region); 113 size += region->vm_end - region->vm_start; 114 } 115 } 116 117 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 118 >> PAGE_SHIFT; 119 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK)) 120 >> PAGE_SHIFT; 121 up_read(&mm->mmap_sem); 122 size >>= PAGE_SHIFT; 123 size += *text + *data; 124 *resident = size; 125 return size; 126 } 127 128 static int is_stack(struct vm_area_struct *vma) 129 { 130 struct mm_struct *mm = vma->vm_mm; 131 132 /* 133 * We make no effort to guess what a given thread considers to be 134 * its "stack". It's not even well-defined for programs written 135 * languages like Go. 136 */ 137 return vma->vm_start <= mm->start_stack && 138 vma->vm_end >= mm->start_stack; 139 } 140 141 /* 142 * display a single VMA to a sequenced file 143 */ 144 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, 145 int is_pid) 146 { 147 struct mm_struct *mm = vma->vm_mm; 148 unsigned long ino = 0; 149 struct file *file; 150 dev_t dev = 0; 151 int flags; 152 unsigned long long pgoff = 0; 153 154 flags = vma->vm_flags; 155 file = vma->vm_file; 156 157 if (file) { 158 struct inode *inode = file_inode(vma->vm_file); 159 dev = inode->i_sb->s_dev; 160 ino = inode->i_ino; 161 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; 162 } 163 164 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 165 seq_printf(m, 166 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", 167 vma->vm_start, 168 vma->vm_end, 169 flags & VM_READ ? 'r' : '-', 170 flags & VM_WRITE ? 'w' : '-', 171 flags & VM_EXEC ? 'x' : '-', 172 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p', 173 pgoff, 174 MAJOR(dev), MINOR(dev), ino); 175 176 if (file) { 177 seq_pad(m, ' '); 178 seq_file_path(m, file, ""); 179 } else if (mm && is_stack(vma)) { 180 seq_pad(m, ' '); 181 seq_printf(m, "[stack]"); 182 } 183 184 seq_putc(m, '\n'); 185 return 0; 186 } 187 188 /* 189 * display mapping lines for a particular process's /proc/pid/maps 190 */ 191 static int show_map(struct seq_file *m, void *_p, int is_pid) 192 { 193 struct rb_node *p = _p; 194 195 return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb), 196 is_pid); 197 } 198 199 static int show_pid_map(struct seq_file *m, void *_p) 200 { 201 return show_map(m, _p, 1); 202 } 203 204 static int show_tid_map(struct seq_file *m, void *_p) 205 { 206 return show_map(m, _p, 0); 207 } 208 209 static void *m_start(struct seq_file *m, loff_t *pos) 210 { 211 struct proc_maps_private *priv = m->private; 212 struct mm_struct *mm; 213 struct rb_node *p; 214 loff_t n = *pos; 215 216 /* pin the task and mm whilst we play with them */ 217 priv->task = get_proc_task(priv->inode); 218 if (!priv->task) 219 return ERR_PTR(-ESRCH); 220 221 mm = priv->mm; 222 if (!mm || !mmget_not_zero(mm)) 223 return NULL; 224 225 down_read(&mm->mmap_sem); 226 /* start from the Nth VMA */ 227 for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) 228 if (n-- == 0) 229 return p; 230 231 up_read(&mm->mmap_sem); 232 mmput(mm); 233 return NULL; 234 } 235 236 static void m_stop(struct seq_file *m, void *_vml) 237 { 238 struct proc_maps_private *priv = m->private; 239 240 if (!IS_ERR_OR_NULL(_vml)) { 241 up_read(&priv->mm->mmap_sem); 242 mmput(priv->mm); 243 } 244 if (priv->task) { 245 put_task_struct(priv->task); 246 priv->task = NULL; 247 } 248 } 249 250 static void *m_next(struct seq_file *m, void *_p, loff_t *pos) 251 { 252 struct rb_node *p = _p; 253 254 (*pos)++; 255 return p ? rb_next(p) : NULL; 256 } 257 258 static const struct seq_operations proc_pid_maps_ops = { 259 .start = m_start, 260 .next = m_next, 261 .stop = m_stop, 262 .show = show_pid_map 263 }; 264 265 static const struct seq_operations proc_tid_maps_ops = { 266 .start = m_start, 267 .next = m_next, 268 .stop = m_stop, 269 .show = show_tid_map 270 }; 271 272 static int maps_open(struct inode *inode, struct file *file, 273 const struct seq_operations *ops) 274 { 275 struct proc_maps_private *priv; 276 277 priv = __seq_open_private(file, ops, sizeof(*priv)); 278 if (!priv) 279 return -ENOMEM; 280 281 priv->inode = inode; 282 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 283 if (IS_ERR(priv->mm)) { 284 int err = PTR_ERR(priv->mm); 285 286 seq_release_private(inode, file); 287 return err; 288 } 289 290 return 0; 291 } 292 293 294 static int map_release(struct inode *inode, struct file *file) 295 { 296 struct seq_file *seq = file->private_data; 297 struct proc_maps_private *priv = seq->private; 298 299 if (priv->mm) 300 mmdrop(priv->mm); 301 302 return seq_release_private(inode, file); 303 } 304 305 static int pid_maps_open(struct inode *inode, struct file *file) 306 { 307 return maps_open(inode, file, &proc_pid_maps_ops); 308 } 309 310 static int tid_maps_open(struct inode *inode, struct file *file) 311 { 312 return maps_open(inode, file, &proc_tid_maps_ops); 313 } 314 315 const struct file_operations proc_pid_maps_operations = { 316 .open = pid_maps_open, 317 .read = seq_read, 318 .llseek = seq_lseek, 319 .release = map_release, 320 }; 321 322 const struct file_operations proc_tid_maps_operations = { 323 .open = tid_maps_open, 324 .read = seq_read, 325 .llseek = seq_lseek, 326 .release = map_release, 327 }; 328 329