xref: /openbmc/linux/block/ioprio.c (revision bb0eb050)
1 /*
2  * fs/ioprio.c
3  *
4  * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
5  *
6  * Helper functions for setting/querying io priorities of processes. The
7  * system calls closely mimmick getpriority/setpriority, see the man page for
8  * those. The prio argument is a composite of prio class and prio data, where
9  * the data argument has meaning within that class. The standard scheduling
10  * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11  * being the lowest.
12  *
13  * IOW, setting BE scheduling class with prio 2 is done ala:
14  *
15  * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
16  *
17  * ioprio_set(PRIO_PROCESS, pid, prio);
18  *
19  * See also Documentation/block/ioprio.txt
20  *
21  */
22 #include <linux/gfp.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ioprio.h>
26 #include <linux/cred.h>
27 #include <linux/blkdev.h>
28 #include <linux/capability.h>
29 #include <linux/sched/user.h>
30 #include <linux/sched/task.h>
31 #include <linux/syscalls.h>
32 #include <linux/security.h>
33 #include <linux/pid_namespace.h>
34 
35 int set_task_ioprio(struct task_struct *task, int ioprio)
36 {
37 	int err;
38 	struct io_context *ioc;
39 	const struct cred *cred = current_cred(), *tcred;
40 
41 	rcu_read_lock();
42 	tcred = __task_cred(task);
43 	if (!uid_eq(tcred->uid, cred->euid) &&
44 	    !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
45 		rcu_read_unlock();
46 		return -EPERM;
47 	}
48 	rcu_read_unlock();
49 
50 	err = security_task_setioprio(task, ioprio);
51 	if (err)
52 		return err;
53 
54 	ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
55 	if (ioc) {
56 		ioc->ioprio = ioprio;
57 		put_io_context(ioc);
58 	}
59 
60 	return err;
61 }
62 EXPORT_SYMBOL_GPL(set_task_ioprio);
63 
64 SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
65 {
66 	int class = IOPRIO_PRIO_CLASS(ioprio);
67 	int data = IOPRIO_PRIO_DATA(ioprio);
68 	struct task_struct *p, *g;
69 	struct user_struct *user;
70 	struct pid *pgrp;
71 	kuid_t uid;
72 	int ret;
73 
74 	switch (class) {
75 		case IOPRIO_CLASS_RT:
76 			if (!capable(CAP_SYS_ADMIN))
77 				return -EPERM;
78 			/* fall through, rt has prio field too */
79 		case IOPRIO_CLASS_BE:
80 			if (data >= IOPRIO_BE_NR || data < 0)
81 				return -EINVAL;
82 
83 			break;
84 		case IOPRIO_CLASS_IDLE:
85 			break;
86 		case IOPRIO_CLASS_NONE:
87 			if (data)
88 				return -EINVAL;
89 			break;
90 		default:
91 			return -EINVAL;
92 	}
93 
94 	ret = -ESRCH;
95 	rcu_read_lock();
96 	switch (which) {
97 		case IOPRIO_WHO_PROCESS:
98 			if (!who)
99 				p = current;
100 			else
101 				p = find_task_by_vpid(who);
102 			if (p)
103 				ret = set_task_ioprio(p, ioprio);
104 			break;
105 		case IOPRIO_WHO_PGRP:
106 			if (!who)
107 				pgrp = task_pgrp(current);
108 			else
109 				pgrp = find_vpid(who);
110 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
111 				ret = set_task_ioprio(p, ioprio);
112 				if (ret)
113 					break;
114 			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
115 			break;
116 		case IOPRIO_WHO_USER:
117 			uid = make_kuid(current_user_ns(), who);
118 			if (!uid_valid(uid))
119 				break;
120 			if (!who)
121 				user = current_user();
122 			else
123 				user = find_user(uid);
124 
125 			if (!user)
126 				break;
127 
128 			for_each_process_thread(g, p) {
129 				if (!uid_eq(task_uid(p), uid) ||
130 				    !task_pid_vnr(p))
131 					continue;
132 				ret = set_task_ioprio(p, ioprio);
133 				if (ret)
134 					goto free_uid;
135 			}
136 free_uid:
137 			if (who)
138 				free_uid(user);
139 			break;
140 		default:
141 			ret = -EINVAL;
142 	}
143 
144 	rcu_read_unlock();
145 	return ret;
146 }
147 
148 static int get_task_ioprio(struct task_struct *p)
149 {
150 	int ret;
151 
152 	ret = security_task_getioprio(p);
153 	if (ret)
154 		goto out;
155 	ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
156 	task_lock(p);
157 	if (p->io_context)
158 		ret = p->io_context->ioprio;
159 	task_unlock(p);
160 out:
161 	return ret;
162 }
163 
164 int ioprio_best(unsigned short aprio, unsigned short bprio)
165 {
166 	if (!ioprio_valid(aprio))
167 		aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
168 	if (!ioprio_valid(bprio))
169 		bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
170 
171 	return min(aprio, bprio);
172 }
173 
174 SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
175 {
176 	struct task_struct *g, *p;
177 	struct user_struct *user;
178 	struct pid *pgrp;
179 	kuid_t uid;
180 	int ret = -ESRCH;
181 	int tmpio;
182 
183 	rcu_read_lock();
184 	switch (which) {
185 		case IOPRIO_WHO_PROCESS:
186 			if (!who)
187 				p = current;
188 			else
189 				p = find_task_by_vpid(who);
190 			if (p)
191 				ret = get_task_ioprio(p);
192 			break;
193 		case IOPRIO_WHO_PGRP:
194 			if (!who)
195 				pgrp = task_pgrp(current);
196 			else
197 				pgrp = find_vpid(who);
198 			do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
199 				tmpio = get_task_ioprio(p);
200 				if (tmpio < 0)
201 					continue;
202 				if (ret == -ESRCH)
203 					ret = tmpio;
204 				else
205 					ret = ioprio_best(ret, tmpio);
206 			} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
207 			break;
208 		case IOPRIO_WHO_USER:
209 			uid = make_kuid(current_user_ns(), who);
210 			if (!who)
211 				user = current_user();
212 			else
213 				user = find_user(uid);
214 
215 			if (!user)
216 				break;
217 
218 			for_each_process_thread(g, p) {
219 				if (!uid_eq(task_uid(p), user->uid) ||
220 				    !task_pid_vnr(p))
221 					continue;
222 				tmpio = get_task_ioprio(p);
223 				if (tmpio < 0)
224 					continue;
225 				if (ret == -ESRCH)
226 					ret = tmpio;
227 				else
228 					ret = ioprio_best(ret, tmpio);
229 			}
230 
231 			if (who)
232 				free_uid(user);
233 			break;
234 		default:
235 			ret = -EINVAL;
236 	}
237 
238 	rcu_read_unlock();
239 	return ret;
240 }
241