xref: /openbmc/linux/kernel/bpf/log.c (revision a5961bed)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5  */
6 #include <uapi/linux/btf.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/bpf.h>
10 #include <linux/bpf_verifier.h>
11 #include <linux/math64.h>
12 
13 static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
14 {
15 	/* ubuf and len_total should both be specified (or not) together */
16 	if (!!log->ubuf != !!log->len_total)
17 		return false;
18 	/* log buf without log_level is meaningless */
19 	if (log->ubuf && log->level == 0)
20 		return false;
21 	if (log->level & ~BPF_LOG_MASK)
22 		return false;
23 	if (log->len_total > UINT_MAX >> 2)
24 		return false;
25 	return true;
26 }
27 
28 int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
29 		  char __user *log_buf, u32 log_size)
30 {
31 	log->level = log_level;
32 	log->ubuf = log_buf;
33 	log->len_total = log_size;
34 
35 	/* log attributes have to be sane */
36 	if (!bpf_verifier_log_attr_valid(log))
37 		return -EINVAL;
38 
39 	return 0;
40 }
41 
42 static void bpf_vlog_update_len_max(struct bpf_verifier_log *log, u32 add_len)
43 {
44 	/* add_len includes terminal \0, so no need for +1. */
45 	u64 len = log->end_pos + add_len;
46 
47 	/* log->len_max could be larger than our current len due to
48 	 * bpf_vlog_reset() calls, so we maintain the max of any length at any
49 	 * previous point
50 	 */
51 	if (len > UINT_MAX)
52 		log->len_max = UINT_MAX;
53 	else if (len > log->len_max)
54 		log->len_max = len;
55 }
56 
57 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
58 		       va_list args)
59 {
60 	u64 cur_pos;
61 	u32 new_n, n;
62 
63 	n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
64 
65 	WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
66 		  "verifier log line truncated - local buffer too short\n");
67 
68 	if (log->level == BPF_LOG_KERNEL) {
69 		bool newline = n > 0 && log->kbuf[n - 1] == '\n';
70 
71 		pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
72 		return;
73 	}
74 
75 	n += 1; /* include terminating zero */
76 	bpf_vlog_update_len_max(log, n);
77 
78 	if (log->level & BPF_LOG_FIXED) {
79 		/* check if we have at least something to put into user buf */
80 		new_n = 0;
81 		if (log->end_pos < log->len_total) {
82 			new_n = min_t(u32, log->len_total - log->end_pos, n);
83 			log->kbuf[new_n - 1] = '\0';
84 		}
85 
86 		cur_pos = log->end_pos;
87 		log->end_pos += n - 1; /* don't count terminating '\0' */
88 
89 		if (log->ubuf && new_n &&
90 		    copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n))
91 			goto fail;
92 	} else {
93 		u64 new_end, new_start;
94 		u32 buf_start, buf_end, new_n;
95 
96 		new_end = log->end_pos + n;
97 		if (new_end - log->start_pos >= log->len_total)
98 			new_start = new_end - log->len_total;
99 		else
100 			new_start = log->start_pos;
101 
102 		log->start_pos = new_start;
103 		log->end_pos = new_end - 1; /* don't count terminating '\0' */
104 
105 		if (!log->ubuf)
106 			return;
107 
108 		new_n = min(n, log->len_total);
109 		cur_pos = new_end - new_n;
110 		div_u64_rem(cur_pos, log->len_total, &buf_start);
111 		div_u64_rem(new_end, log->len_total, &buf_end);
112 		/* new_end and buf_end are exclusive indices, so if buf_end is
113 		 * exactly zero, then it actually points right to the end of
114 		 * ubuf and there is no wrap around
115 		 */
116 		if (buf_end == 0)
117 			buf_end = log->len_total;
118 
119 		/* if buf_start > buf_end, we wrapped around;
120 		 * if buf_start == buf_end, then we fill ubuf completely; we
121 		 * can't have buf_start == buf_end to mean that there is
122 		 * nothing to write, because we always write at least
123 		 * something, even if terminal '\0'
124 		 */
125 		if (buf_start < buf_end) {
126 			/* message fits within contiguous chunk of ubuf */
127 			if (copy_to_user(log->ubuf + buf_start,
128 					 log->kbuf + n - new_n,
129 					 buf_end - buf_start))
130 				goto fail;
131 		} else {
132 			/* message wraps around the end of ubuf, copy in two chunks */
133 			if (copy_to_user(log->ubuf + buf_start,
134 					 log->kbuf + n - new_n,
135 					 log->len_total - buf_start))
136 				goto fail;
137 			if (copy_to_user(log->ubuf,
138 					 log->kbuf + n - buf_end,
139 					 buf_end))
140 				goto fail;
141 		}
142 	}
143 
144 	return;
145 fail:
146 	log->ubuf = NULL;
147 }
148 
149 void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos)
150 {
151 	char zero = 0;
152 	u32 pos;
153 
154 	if (WARN_ON_ONCE(new_pos > log->end_pos))
155 		return;
156 
157 	if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL)
158 		return;
159 
160 	/* if position to which we reset is beyond current log window,
161 	 * then we didn't preserve any useful content and should adjust
162 	 * start_pos to end up with an empty log (start_pos == end_pos)
163 	 */
164 	log->end_pos = new_pos;
165 	if (log->end_pos < log->start_pos)
166 		log->start_pos = log->end_pos;
167 
168 	if (!log->ubuf)
169 		return;
170 
171 	if (log->level & BPF_LOG_FIXED)
172 		pos = log->end_pos + 1;
173 	else
174 		div_u64_rem(new_pos, log->len_total, &pos);
175 
176 	if (pos < log->len_total && put_user(zero, log->ubuf + pos))
177 		log->ubuf = NULL;
178 }
179 
180 static void bpf_vlog_reverse_kbuf(char *buf, int len)
181 {
182 	int i, j;
183 
184 	for (i = 0, j = len - 1; i < j; i++, j--)
185 		swap(buf[i], buf[j]);
186 }
187 
188 static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end)
189 {
190 	/* we split log->kbuf into two equal parts for both ends of array */
191 	int n = sizeof(log->kbuf) / 2, nn;
192 	char *lbuf = log->kbuf, *rbuf = log->kbuf + n;
193 
194 	/* Read ubuf's section [start, end) two chunks at a time, from left
195 	 * and right side; within each chunk, swap all the bytes; after that
196 	 * reverse the order of lbuf and rbuf and write result back to ubuf.
197 	 * This way we'll end up with swapped contents of specified
198 	 * [start, end) ubuf segment.
199 	 */
200 	while (end - start > 1) {
201 		nn = min(n, (end - start ) / 2);
202 
203 		if (copy_from_user(lbuf, log->ubuf + start, nn))
204 			return -EFAULT;
205 		if (copy_from_user(rbuf, log->ubuf + end - nn, nn))
206 			return -EFAULT;
207 
208 		bpf_vlog_reverse_kbuf(lbuf, nn);
209 		bpf_vlog_reverse_kbuf(rbuf, nn);
210 
211 		/* we write lbuf to the right end of ubuf, while rbuf to the
212 		 * left one to end up with properly reversed overall ubuf
213 		 */
214 		if (copy_to_user(log->ubuf + start, rbuf, nn))
215 			return -EFAULT;
216 		if (copy_to_user(log->ubuf + end - nn, lbuf, nn))
217 			return -EFAULT;
218 
219 		start += nn;
220 		end -= nn;
221 	}
222 
223 	return 0;
224 }
225 
226 int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual)
227 {
228 	u32 sublen;
229 	int err;
230 
231 	*log_size_actual = 0;
232 	if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL)
233 		return 0;
234 
235 	if (!log->ubuf)
236 		goto skip_log_rotate;
237 	/* If we never truncated log, there is nothing to move around. */
238 	if (log->start_pos == 0)
239 		goto skip_log_rotate;
240 
241 	/* Otherwise we need to rotate log contents to make it start from the
242 	 * buffer beginning and be a continuous zero-terminated string. Note
243 	 * that if log->start_pos != 0 then we definitely filled up entire log
244 	 * buffer with no gaps, and we just need to shift buffer contents to
245 	 * the left by (log->start_pos % log->len_total) bytes.
246 	 *
247 	 * Unfortunately, user buffer could be huge and we don't want to
248 	 * allocate temporary kernel memory of the same size just to shift
249 	 * contents in a straightforward fashion. Instead, we'll be clever and
250 	 * do in-place array rotation. This is a leetcode-style problem, which
251 	 * could be solved by three rotations.
252 	 *
253 	 * Let's say we have log buffer that has to be shifted left by 7 bytes
254 	 * (spaces and vertical bar is just for demonstrative purposes):
255 	 *   E F G H I J K | A B C D
256 	 *
257 	 * First, we reverse entire array:
258 	 *   D C B A | K J I H G F E
259 	 *
260 	 * Then we rotate first 4 bytes (DCBA) and separately last 7 bytes
261 	 * (KJIHGFE), resulting in a properly rotated array:
262 	 *   A B C D | E F G H I J K
263 	 *
264 	 * We'll utilize log->kbuf to read user memory chunk by chunk, swap
265 	 * bytes, and write them back. Doing it byte-by-byte would be
266 	 * unnecessarily inefficient. Altogether we are going to read and
267 	 * write each byte twice, for total 4 memory copies between kernel and
268 	 * user space.
269 	 */
270 
271 	/* length of the chopped off part that will be the beginning;
272 	 * len(ABCD) in the example above
273 	 */
274 	div_u64_rem(log->start_pos, log->len_total, &sublen);
275 	sublen = log->len_total - sublen;
276 
277 	err = bpf_vlog_reverse_ubuf(log, 0, log->len_total);
278 	err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen);
279 	err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total);
280 	if (err)
281 		log->ubuf = NULL;
282 
283 skip_log_rotate:
284 	*log_size_actual = log->len_max;
285 
286 	/* properly initialized log has either both ubuf!=NULL and len_total>0
287 	 * or ubuf==NULL and len_total==0, so if this condition doesn't hold,
288 	 * we got a fault somewhere along the way, so report it back
289 	 */
290 	if (!!log->ubuf != !!log->len_total)
291 		return -EFAULT;
292 
293 	/* did truncation actually happen? */
294 	if (log->ubuf && log->len_max > log->len_total)
295 		return -ENOSPC;
296 
297 	return 0;
298 }
299 
300 /* log_level controls verbosity level of eBPF verifier.
301  * bpf_verifier_log_write() is used to dump the verification trace to the log,
302  * so the user can figure out what's wrong with the program
303  */
304 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
305 					   const char *fmt, ...)
306 {
307 	va_list args;
308 
309 	if (!bpf_verifier_log_needed(&env->log))
310 		return;
311 
312 	va_start(args, fmt);
313 	bpf_verifier_vlog(&env->log, fmt, args);
314 	va_end(args);
315 }
316 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
317 
318 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
319 			    const char *fmt, ...)
320 {
321 	va_list args;
322 
323 	if (!bpf_verifier_log_needed(log))
324 		return;
325 
326 	va_start(args, fmt);
327 	bpf_verifier_vlog(log, fmt, args);
328 	va_end(args);
329 }
330 EXPORT_SYMBOL_GPL(bpf_log);
331