xref: /openbmc/linux/fs/proc/task_nommu.c (revision 545e4006)
1 
2 #include <linux/mm.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/mount.h>
6 #include <linux/ptrace.h>
7 #include <linux/seq_file.h>
8 #include "internal.h"
9 
10 /*
11  * Logic: we've got two memory sums for each process, "shared", and
12  * "non-shared". Shared memory may get counted more then once, for
13  * each process that owns it. Non-shared memory is counted
14  * accurately.
15  */
16 void task_mem(struct seq_file *m, struct mm_struct *mm)
17 {
18 	struct vm_list_struct *vml;
19 	unsigned long bytes = 0, sbytes = 0, slack = 0;
20 
21 	down_read(&mm->mmap_sem);
22 	for (vml = mm->context.vmlist; vml; vml = vml->next) {
23 		if (!vml->vma)
24 			continue;
25 
26 		bytes += kobjsize(vml);
27 		if (atomic_read(&mm->mm_count) > 1 ||
28 		    atomic_read(&vml->vma->vm_usage) > 1
29 		    ) {
30 			sbytes += kobjsize((void *) vml->vma->vm_start);
31 			sbytes += kobjsize(vml->vma);
32 		} else {
33 			bytes += kobjsize((void *) vml->vma->vm_start);
34 			bytes += kobjsize(vml->vma);
35 			slack += kobjsize((void *) vml->vma->vm_start) -
36 				(vml->vma->vm_end - vml->vma->vm_start);
37 		}
38 	}
39 
40 	if (atomic_read(&mm->mm_count) > 1)
41 		sbytes += kobjsize(mm);
42 	else
43 		bytes += kobjsize(mm);
44 
45 	if (current->fs && atomic_read(&current->fs->count) > 1)
46 		sbytes += kobjsize(current->fs);
47 	else
48 		bytes += kobjsize(current->fs);
49 
50 	if (current->files && atomic_read(&current->files->count) > 1)
51 		sbytes += kobjsize(current->files);
52 	else
53 		bytes += kobjsize(current->files);
54 
55 	if (current->sighand && atomic_read(&current->sighand->count) > 1)
56 		sbytes += kobjsize(current->sighand);
57 	else
58 		bytes += kobjsize(current->sighand);
59 
60 	bytes += kobjsize(current); /* includes kernel stack */
61 
62 	seq_printf(m,
63 		"Mem:\t%8lu bytes\n"
64 		"Slack:\t%8lu bytes\n"
65 		"Shared:\t%8lu bytes\n",
66 		bytes, slack, sbytes);
67 
68 	up_read(&mm->mmap_sem);
69 }
70 
71 unsigned long task_vsize(struct mm_struct *mm)
72 {
73 	struct vm_list_struct *tbp;
74 	unsigned long vsize = 0;
75 
76 	down_read(&mm->mmap_sem);
77 	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
78 		if (tbp->vma)
79 			vsize += kobjsize((void *) tbp->vma->vm_start);
80 	}
81 	up_read(&mm->mmap_sem);
82 	return vsize;
83 }
84 
85 int task_statm(struct mm_struct *mm, int *shared, int *text,
86 	       int *data, int *resident)
87 {
88 	struct vm_list_struct *tbp;
89 	int size = kobjsize(mm);
90 
91 	down_read(&mm->mmap_sem);
92 	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
93 		size += kobjsize(tbp);
94 		if (tbp->vma) {
95 			size += kobjsize(tbp->vma);
96 			size += kobjsize((void *) tbp->vma->vm_start);
97 		}
98 	}
99 
100 	size += (*text = mm->end_code - mm->start_code);
101 	size += (*data = mm->start_stack - mm->start_data);
102 	up_read(&mm->mmap_sem);
103 	*resident = size;
104 	return size;
105 }
106 
107 /*
108  * display mapping lines for a particular process's /proc/pid/maps
109  */
110 static int show_map(struct seq_file *m, void *_vml)
111 {
112 	struct vm_list_struct *vml = _vml;
113 	struct proc_maps_private *priv = m->private;
114 	struct task_struct *task = priv->task;
115 
116 	if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ))
117 		return -EACCES;
118 
119 	return nommu_vma_show(m, vml->vma);
120 }
121 
122 static void *m_start(struct seq_file *m, loff_t *pos)
123 {
124 	struct proc_maps_private *priv = m->private;
125 	struct vm_list_struct *vml;
126 	struct mm_struct *mm;
127 	loff_t n = *pos;
128 
129 	/* pin the task and mm whilst we play with them */
130 	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
131 	if (!priv->task)
132 		return NULL;
133 
134 	mm = mm_for_maps(priv->task);
135 	if (!mm) {
136 		put_task_struct(priv->task);
137 		priv->task = NULL;
138 		return NULL;
139 	}
140 
141 	/* start from the Nth VMA */
142 	for (vml = mm->context.vmlist; vml; vml = vml->next)
143 		if (n-- == 0)
144 			return vml;
145 	return NULL;
146 }
147 
148 static void m_stop(struct seq_file *m, void *_vml)
149 {
150 	struct proc_maps_private *priv = m->private;
151 
152 	if (priv->task) {
153 		struct mm_struct *mm = priv->task->mm;
154 		up_read(&mm->mmap_sem);
155 		mmput(mm);
156 		put_task_struct(priv->task);
157 	}
158 }
159 
160 static void *m_next(struct seq_file *m, void *_vml, loff_t *pos)
161 {
162 	struct vm_list_struct *vml = _vml;
163 
164 	(*pos)++;
165 	return vml ? vml->next : NULL;
166 }
167 
168 static const struct seq_operations proc_pid_maps_ops = {
169 	.start	= m_start,
170 	.next	= m_next,
171 	.stop	= m_stop,
172 	.show	= show_map
173 };
174 
175 static int maps_open(struct inode *inode, struct file *file)
176 {
177 	struct proc_maps_private *priv;
178 	int ret = -ENOMEM;
179 
180 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
181 	if (priv) {
182 		priv->pid = proc_pid(inode);
183 		ret = seq_open(file, &proc_pid_maps_ops);
184 		if (!ret) {
185 			struct seq_file *m = file->private_data;
186 			m->private = priv;
187 		} else {
188 			kfree(priv);
189 		}
190 	}
191 	return ret;
192 }
193 
194 const struct file_operations proc_maps_operations = {
195 	.open		= maps_open,
196 	.read		= seq_read,
197 	.llseek		= seq_lseek,
198 	.release	= seq_release_private,
199 };
200 
201