xref: /openbmc/linux/fs/proc/task_nommu.c (revision 25985edc)
1 
2 #include <linux/mm.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/fs_struct.h>
6 #include <linux/mount.h>
7 #include <linux/ptrace.h>
8 #include <linux/slab.h>
9 #include <linux/seq_file.h>
10 #include "internal.h"
11 
12 /*
13  * Logic: we've got two memory sums for each process, "shared", and
14  * "non-shared". Shared memory may get counted more than once, for
15  * each process that owns it. Non-shared memory is counted
16  * accurately.
17  */
18 void task_mem(struct seq_file *m, struct mm_struct *mm)
19 {
20 	struct vm_area_struct *vma;
21 	struct vm_region *region;
22 	struct rb_node *p;
23 	unsigned long bytes = 0, sbytes = 0, slack = 0, size;
24 
25 	down_read(&mm->mmap_sem);
26 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
27 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
28 
29 		bytes += kobjsize(vma);
30 
31 		region = vma->vm_region;
32 		if (region) {
33 			size = kobjsize(region);
34 			size += region->vm_end - region->vm_start;
35 		} else {
36 			size = vma->vm_end - vma->vm_start;
37 		}
38 
39 		if (atomic_read(&mm->mm_count) > 1 ||
40 		    vma->vm_flags & VM_MAYSHARE) {
41 			sbytes += size;
42 		} else {
43 			bytes += size;
44 			if (region)
45 				slack = region->vm_end - vma->vm_end;
46 		}
47 	}
48 
49 	if (atomic_read(&mm->mm_count) > 1)
50 		sbytes += kobjsize(mm);
51 	else
52 		bytes += kobjsize(mm);
53 
54 	if (current->fs && current->fs->users > 1)
55 		sbytes += kobjsize(current->fs);
56 	else
57 		bytes += kobjsize(current->fs);
58 
59 	if (current->files && atomic_read(&current->files->count) > 1)
60 		sbytes += kobjsize(current->files);
61 	else
62 		bytes += kobjsize(current->files);
63 
64 	if (current->sighand && atomic_read(&current->sighand->count) > 1)
65 		sbytes += kobjsize(current->sighand);
66 	else
67 		bytes += kobjsize(current->sighand);
68 
69 	bytes += kobjsize(current); /* includes kernel stack */
70 
71 	seq_printf(m,
72 		"Mem:\t%8lu bytes\n"
73 		"Slack:\t%8lu bytes\n"
74 		"Shared:\t%8lu bytes\n",
75 		bytes, slack, sbytes);
76 
77 	up_read(&mm->mmap_sem);
78 }
79 
80 unsigned long task_vsize(struct mm_struct *mm)
81 {
82 	struct vm_area_struct *vma;
83 	struct rb_node *p;
84 	unsigned long vsize = 0;
85 
86 	down_read(&mm->mmap_sem);
87 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
88 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
89 		vsize += vma->vm_end - vma->vm_start;
90 	}
91 	up_read(&mm->mmap_sem);
92 	return vsize;
93 }
94 
95 unsigned long task_statm(struct mm_struct *mm,
96 			 unsigned long *shared, unsigned long *text,
97 			 unsigned long *data, unsigned long *resident)
98 {
99 	struct vm_area_struct *vma;
100 	struct vm_region *region;
101 	struct rb_node *p;
102 	unsigned long size = kobjsize(mm);
103 
104 	down_read(&mm->mmap_sem);
105 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
106 		vma = rb_entry(p, struct vm_area_struct, vm_rb);
107 		size += kobjsize(vma);
108 		region = vma->vm_region;
109 		if (region) {
110 			size += kobjsize(region);
111 			size += region->vm_end - region->vm_start;
112 		}
113 	}
114 
115 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
116 		>> PAGE_SHIFT;
117 	*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
118 		>> PAGE_SHIFT;
119 	up_read(&mm->mmap_sem);
120 	size >>= PAGE_SHIFT;
121 	size += *text + *data;
122 	*resident = size;
123 	return size;
124 }
125 
126 static void pad_len_spaces(struct seq_file *m, int len)
127 {
128 	len = 25 + sizeof(void*) * 6 - len;
129 	if (len < 1)
130 		len = 1;
131 	seq_printf(m, "%*c", len, ' ');
132 }
133 
134 /*
135  * display a single VMA to a sequenced file
136  */
137 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
138 {
139 	struct mm_struct *mm = vma->vm_mm;
140 	unsigned long ino = 0;
141 	struct file *file;
142 	dev_t dev = 0;
143 	int flags, len;
144 	unsigned long long pgoff = 0;
145 
146 	flags = vma->vm_flags;
147 	file = vma->vm_file;
148 
149 	if (file) {
150 		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
151 		dev = inode->i_sb->s_dev;
152 		ino = inode->i_ino;
153 		pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
154 	}
155 
156 	seq_printf(m,
157 		   "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
158 		   vma->vm_start,
159 		   vma->vm_end,
160 		   flags & VM_READ ? 'r' : '-',
161 		   flags & VM_WRITE ? 'w' : '-',
162 		   flags & VM_EXEC ? 'x' : '-',
163 		   flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
164 		   pgoff,
165 		   MAJOR(dev), MINOR(dev), ino, &len);
166 
167 	if (file) {
168 		pad_len_spaces(m, len);
169 		seq_path(m, &file->f_path, "");
170 	} else if (mm) {
171 		if (vma->vm_start <= mm->start_stack &&
172 			vma->vm_end >= mm->start_stack) {
173 			pad_len_spaces(m, len);
174 			seq_puts(m, "[stack]");
175 		}
176 	}
177 
178 	seq_putc(m, '\n');
179 	return 0;
180 }
181 
182 /*
183  * display mapping lines for a particular process's /proc/pid/maps
184  */
185 static int show_map(struct seq_file *m, void *_p)
186 {
187 	struct rb_node *p = _p;
188 
189 	return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
190 }
191 
192 static void *m_start(struct seq_file *m, loff_t *pos)
193 {
194 	struct proc_maps_private *priv = m->private;
195 	struct mm_struct *mm;
196 	struct rb_node *p;
197 	loff_t n = *pos;
198 
199 	/* pin the task and mm whilst we play with them */
200 	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
201 	if (!priv->task)
202 		return ERR_PTR(-ESRCH);
203 
204 	mm = mm_for_maps(priv->task);
205 	if (!mm || IS_ERR(mm)) {
206 		put_task_struct(priv->task);
207 		priv->task = NULL;
208 		return mm;
209 	}
210 	down_read(&mm->mmap_sem);
211 
212 	/* start from the Nth VMA */
213 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
214 		if (n-- == 0)
215 			return p;
216 	return NULL;
217 }
218 
219 static void m_stop(struct seq_file *m, void *_vml)
220 {
221 	struct proc_maps_private *priv = m->private;
222 
223 	if (priv->task) {
224 		struct mm_struct *mm = priv->task->mm;
225 		up_read(&mm->mmap_sem);
226 		mmput(mm);
227 		put_task_struct(priv->task);
228 	}
229 }
230 
231 static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
232 {
233 	struct rb_node *p = _p;
234 
235 	(*pos)++;
236 	return p ? rb_next(p) : NULL;
237 }
238 
239 static const struct seq_operations proc_pid_maps_ops = {
240 	.start	= m_start,
241 	.next	= m_next,
242 	.stop	= m_stop,
243 	.show	= show_map
244 };
245 
246 static int maps_open(struct inode *inode, struct file *file)
247 {
248 	struct proc_maps_private *priv;
249 	int ret = -ENOMEM;
250 
251 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
252 	if (priv) {
253 		priv->pid = proc_pid(inode);
254 		ret = seq_open(file, &proc_pid_maps_ops);
255 		if (!ret) {
256 			struct seq_file *m = file->private_data;
257 			m->private = priv;
258 		} else {
259 			kfree(priv);
260 		}
261 	}
262 	return ret;
263 }
264 
265 const struct file_operations proc_maps_operations = {
266 	.open		= maps_open,
267 	.read		= seq_read,
268 	.llseek		= seq_lseek,
269 	.release	= seq_release_private,
270 };
271 
272