1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Traceprobe fetch helper inlines
4  */
5 
6 static nokprobe_inline void
7 fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf)
8 {
9 	switch (code->size) {
10 	case 1:
11 		*(u8 *)buf = (u8)val;
12 		break;
13 	case 2:
14 		*(u16 *)buf = (u16)val;
15 		break;
16 	case 4:
17 		*(u32 *)buf = (u32)val;
18 		break;
19 	case 8:
20 		//TBD: 32bit signed
21 		*(u64 *)buf = (u64)val;
22 		break;
23 	default:
24 		*(unsigned long *)buf = val;
25 	}
26 }
27 
28 static nokprobe_inline void
29 fetch_apply_bitfield(struct fetch_insn *code, void *buf)
30 {
31 	switch (code->basesize) {
32 	case 1:
33 		*(u8 *)buf <<= code->lshift;
34 		*(u8 *)buf >>= code->rshift;
35 		break;
36 	case 2:
37 		*(u16 *)buf <<= code->lshift;
38 		*(u16 *)buf >>= code->rshift;
39 		break;
40 	case 4:
41 		*(u32 *)buf <<= code->lshift;
42 		*(u32 *)buf >>= code->rshift;
43 		break;
44 	case 8:
45 		*(u64 *)buf <<= code->lshift;
46 		*(u64 *)buf >>= code->rshift;
47 		break;
48 	}
49 }
50 
51 /*
52  * These functions must be defined for each callsite.
53  * Return consumed dynamic data size (>= 0), or error (< 0).
54  * If dest is NULL, don't store result and return required dynamic data size.
55  */
56 static int
57 process_fetch_insn(struct fetch_insn *code, void *rec,
58 		   void *dest, void *base);
59 static nokprobe_inline int fetch_store_strlen(unsigned long addr);
60 static nokprobe_inline int
61 fetch_store_string(unsigned long addr, void *dest, void *base);
62 static nokprobe_inline int fetch_store_strlen_user(unsigned long addr);
63 static nokprobe_inline int
64 fetch_store_string_user(unsigned long addr, void *dest, void *base);
65 static nokprobe_inline int
66 probe_mem_read(void *dest, void *src, size_t size);
67 static nokprobe_inline int
68 probe_mem_read_user(void *dest, void *src, size_t size);
69 
70 static nokprobe_inline int
71 fetch_store_symstrlen(unsigned long addr)
72 {
73 	char namebuf[KSYM_SYMBOL_LEN];
74 	int ret;
75 
76 	ret = sprint_symbol(namebuf, addr);
77 	if (ret < 0)
78 		return 0;
79 
80 	return ret + 1;
81 }
82 
83 /*
84  * Fetch a null-terminated symbol string + offset. Caller MUST set *(u32 *)buf
85  * with max length and relative data location.
86  */
87 static nokprobe_inline int
88 fetch_store_symstring(unsigned long addr, void *dest, void *base)
89 {
90 	int maxlen = get_loc_len(*(u32 *)dest);
91 	void *__dest;
92 
93 	if (unlikely(!maxlen))
94 		return -ENOMEM;
95 
96 	__dest = get_loc_data(dest, base);
97 
98 	return sprint_symbol(__dest, addr);
99 }
100 
101 /* From the 2nd stage, routine is same */
102 static nokprobe_inline int
103 process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val,
104 			   void *dest, void *base)
105 {
106 	struct fetch_insn *s3 = NULL;
107 	int total = 0, ret = 0, i = 0;
108 	u32 loc = 0;
109 	unsigned long lval = val;
110 
111 stage2:
112 	/* 2nd stage: dereference memory if needed */
113 	do {
114 		if (code->op == FETCH_OP_DEREF) {
115 			lval = val;
116 			ret = probe_mem_read(&val, (void *)val + code->offset,
117 					     sizeof(val));
118 		} else if (code->op == FETCH_OP_UDEREF) {
119 			lval = val;
120 			ret = probe_mem_read_user(&val,
121 				 (void *)val + code->offset, sizeof(val));
122 		} else
123 			break;
124 		if (ret)
125 			return ret;
126 		code++;
127 	} while (1);
128 
129 	s3 = code;
130 stage3:
131 	/* 3rd stage: store value to buffer */
132 	if (unlikely(!dest)) {
133 		switch (code->op) {
134 		case FETCH_OP_ST_STRING:
135 			ret = fetch_store_strlen(val + code->offset);
136 			code++;
137 			goto array;
138 		case FETCH_OP_ST_USTRING:
139 			ret += fetch_store_strlen_user(val + code->offset);
140 			code++;
141 			goto array;
142 		case FETCH_OP_ST_SYMSTR:
143 			ret += fetch_store_symstrlen(val + code->offset);
144 			code++;
145 			goto array;
146 		default:
147 			return -EILSEQ;
148 		}
149 	}
150 
151 	switch (code->op) {
152 	case FETCH_OP_ST_RAW:
153 		fetch_store_raw(val, code, dest);
154 		break;
155 	case FETCH_OP_ST_MEM:
156 		probe_mem_read(dest, (void *)val + code->offset, code->size);
157 		break;
158 	case FETCH_OP_ST_UMEM:
159 		probe_mem_read_user(dest, (void *)val + code->offset, code->size);
160 		break;
161 	case FETCH_OP_ST_STRING:
162 		loc = *(u32 *)dest;
163 		ret = fetch_store_string(val + code->offset, dest, base);
164 		break;
165 	case FETCH_OP_ST_USTRING:
166 		loc = *(u32 *)dest;
167 		ret = fetch_store_string_user(val + code->offset, dest, base);
168 		break;
169 	case FETCH_OP_ST_SYMSTR:
170 		loc = *(u32 *)dest;
171 		ret = fetch_store_symstring(val + code->offset, dest, base);
172 		break;
173 	default:
174 		return -EILSEQ;
175 	}
176 	code++;
177 
178 	/* 4th stage: modify stored value if needed */
179 	if (code->op == FETCH_OP_MOD_BF) {
180 		fetch_apply_bitfield(code, dest);
181 		code++;
182 	}
183 
184 array:
185 	/* the last stage: Loop on array */
186 	if (code->op == FETCH_OP_LP_ARRAY) {
187 		total += ret;
188 		if (++i < code->param) {
189 			code = s3;
190 			if (s3->op != FETCH_OP_ST_STRING &&
191 			    s3->op != FETCH_OP_ST_USTRING) {
192 				dest += s3->size;
193 				val += s3->size;
194 				goto stage3;
195 			}
196 			code--;
197 			val = lval + sizeof(char *);
198 			if (dest) {
199 				dest += sizeof(u32);
200 				*(u32 *)dest = update_data_loc(loc, ret);
201 			}
202 			goto stage2;
203 		}
204 		code++;
205 		ret = total;
206 	}
207 
208 	return code->op == FETCH_OP_END ? ret : -EILSEQ;
209 }
210 
211 /* Sum up total data length for dynamic arrays (strings) */
212 static nokprobe_inline int
213 __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
214 {
215 	struct probe_arg *arg;
216 	int i, len, ret = 0;
217 
218 	for (i = 0; i < tp->nr_args; i++) {
219 		arg = tp->args + i;
220 		if (unlikely(arg->dynamic)) {
221 			len = process_fetch_insn(arg->code, regs, NULL, NULL);
222 			if (len > 0)
223 				ret += len;
224 		}
225 	}
226 
227 	return ret;
228 }
229 
230 /* Store the value of each argument */
231 static nokprobe_inline void
232 store_trace_args(void *data, struct trace_probe *tp, void *rec,
233 		 int header_size, int maxlen)
234 {
235 	struct probe_arg *arg;
236 	void *base = data - header_size;
237 	void *dyndata = data + tp->size;
238 	u32 *dl;	/* Data location */
239 	int ret, i;
240 
241 	for (i = 0; i < tp->nr_args; i++) {
242 		arg = tp->args + i;
243 		dl = data + arg->offset;
244 		/* Point the dynamic data area if needed */
245 		if (unlikely(arg->dynamic))
246 			*dl = make_data_loc(maxlen, dyndata - base);
247 		ret = process_fetch_insn(arg->code, rec, dl, base);
248 		if (unlikely(ret < 0 && arg->dynamic)) {
249 			*dl = make_data_loc(0, dyndata - base);
250 		} else {
251 			dyndata += ret;
252 			maxlen -= ret;
253 		}
254 	}
255 }
256 
257 static inline int
258 print_probe_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
259 		 u8 *data, void *field)
260 {
261 	void *p;
262 	int i, j;
263 
264 	for (i = 0; i < nr_args; i++) {
265 		struct probe_arg *a = args + i;
266 
267 		trace_seq_printf(s, " %s=", a->name);
268 		if (likely(!a->count)) {
269 			if (!a->type->print(s, data + a->offset, field))
270 				return -ENOMEM;
271 			continue;
272 		}
273 		trace_seq_putc(s, '{');
274 		p = data + a->offset;
275 		for (j = 0; j < a->count; j++) {
276 			if (!a->type->print(s, p, field))
277 				return -ENOMEM;
278 			trace_seq_putc(s, j == a->count - 1 ? '}' : ',');
279 			p += a->type->size;
280 		}
281 	}
282 	return 0;
283 }
284