xref: /openbmc/linux/arch/powerpc/platforms/pseries/dtl.c (revision 4f727ecefefbd180de10e25b3e74c03dce3f1e75)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Virtual Processor Dispatch Trace Log
4  *
5  * (C) Copyright IBM Corporation 2009
6  *
7  * Author: Jeremy Kerr <jk@ozlabs.org>
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <asm/smp.h>
13 #include <linux/uaccess.h>
14 #include <asm/firmware.h>
15 #include <asm/lppaca.h>
16 #include <asm/debugfs.h>
17 #include <asm/plpar_wrappers.h>
18 #include <asm/machdep.h>
19 
20 struct dtl {
21 	struct dtl_entry	*buf;
22 	struct dentry		*file;
23 	int			cpu;
24 	int			buf_entries;
25 	u64			last_idx;
26 	spinlock_t		lock;
27 };
28 static DEFINE_PER_CPU(struct dtl, cpu_dtl);
29 
30 /*
31  * Dispatch trace log event mask:
32  * 0x7: 0x1: voluntary virtual processor waits
33  *      0x2: time-slice preempts
34  *      0x4: virtual partition memory page faults
35  */
36 static u8 dtl_event_mask = 0x7;
37 
38 
39 /*
40  * Size of per-cpu log buffers. Firmware requires that the buffer does
41  * not cross a 4k boundary.
42  */
43 static int dtl_buf_entries = N_DISPATCH_LOG;
44 
45 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
46 struct dtl_ring {
47 	u64	write_index;
48 	struct dtl_entry *write_ptr;
49 	struct dtl_entry *buf;
50 	struct dtl_entry *buf_end;
51 	u8	saved_dtl_mask;
52 };
53 
54 static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
55 
56 static atomic_t dtl_count;
57 
58 /*
59  * The cpu accounting code controls the DTL ring buffer, and we get
60  * given entries as they are processed.
61  */
62 static void consume_dtle(struct dtl_entry *dtle, u64 index)
63 {
64 	struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
65 	struct dtl_entry *wp = dtlr->write_ptr;
66 	struct lppaca *vpa = local_paca->lppaca_ptr;
67 
68 	if (!wp)
69 		return;
70 
71 	*wp = *dtle;
72 	barrier();
73 
74 	/* check for hypervisor ring buffer overflow, ignore this entry if so */
75 	if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
76 		return;
77 
78 	++wp;
79 	if (wp == dtlr->buf_end)
80 		wp = dtlr->buf;
81 	dtlr->write_ptr = wp;
82 
83 	/* incrementing write_index makes the new entry visible */
84 	smp_wmb();
85 	++dtlr->write_index;
86 }
87 
88 static int dtl_start(struct dtl *dtl)
89 {
90 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
91 
92 	dtlr->buf = dtl->buf;
93 	dtlr->buf_end = dtl->buf + dtl->buf_entries;
94 	dtlr->write_index = 0;
95 
96 	/* setting write_ptr enables logging into our buffer */
97 	smp_wmb();
98 	dtlr->write_ptr = dtl->buf;
99 
100 	/* enable event logging */
101 	dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
102 	lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
103 
104 	dtl_consumer = consume_dtle;
105 	atomic_inc(&dtl_count);
106 	return 0;
107 }
108 
109 static void dtl_stop(struct dtl *dtl)
110 {
111 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
112 
113 	dtlr->write_ptr = NULL;
114 	smp_wmb();
115 
116 	dtlr->buf = NULL;
117 
118 	/* restore dtl_enable_mask */
119 	lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
120 
121 	if (atomic_dec_and_test(&dtl_count))
122 		dtl_consumer = NULL;
123 }
124 
125 static u64 dtl_current_index(struct dtl *dtl)
126 {
127 	return per_cpu(dtl_rings, dtl->cpu).write_index;
128 }
129 
130 #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
131 
132 static int dtl_start(struct dtl *dtl)
133 {
134 	unsigned long addr;
135 	int ret, hwcpu;
136 
137 	/* Register our dtl buffer with the hypervisor. The HV expects the
138 	 * buffer size to be passed in the second word of the buffer */
139 	((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
140 
141 	hwcpu = get_hard_smp_processor_id(dtl->cpu);
142 	addr = __pa(dtl->buf);
143 	ret = register_dtl(hwcpu, addr);
144 	if (ret) {
145 		printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
146 		       "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
147 		return -EIO;
148 	}
149 
150 	/* set our initial buffer indices */
151 	lppaca_of(dtl->cpu).dtl_idx = 0;
152 
153 	/* ensure that our updates to the lppaca fields have occurred before
154 	 * we actually enable the logging */
155 	smp_wmb();
156 
157 	/* enable event logging */
158 	lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
159 
160 	return 0;
161 }
162 
163 static void dtl_stop(struct dtl *dtl)
164 {
165 	int hwcpu = get_hard_smp_processor_id(dtl->cpu);
166 
167 	lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
168 
169 	unregister_dtl(hwcpu);
170 }
171 
172 static u64 dtl_current_index(struct dtl *dtl)
173 {
174 	return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
175 }
176 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
177 
178 static int dtl_enable(struct dtl *dtl)
179 {
180 	long int n_entries;
181 	long int rc;
182 	struct dtl_entry *buf = NULL;
183 
184 	if (!dtl_cache)
185 		return -ENOMEM;
186 
187 	/* only allow one reader */
188 	if (dtl->buf)
189 		return -EBUSY;
190 
191 	n_entries = dtl_buf_entries;
192 	buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
193 	if (!buf) {
194 		printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
195 				__func__, dtl->cpu);
196 		return -ENOMEM;
197 	}
198 
199 	spin_lock(&dtl->lock);
200 	rc = -EBUSY;
201 	if (!dtl->buf) {
202 		/* store the original allocation size for use during read */
203 		dtl->buf_entries = n_entries;
204 		dtl->buf = buf;
205 		dtl->last_idx = 0;
206 		rc = dtl_start(dtl);
207 		if (rc)
208 			dtl->buf = NULL;
209 	}
210 	spin_unlock(&dtl->lock);
211 
212 	if (rc)
213 		kmem_cache_free(dtl_cache, buf);
214 	return rc;
215 }
216 
217 static void dtl_disable(struct dtl *dtl)
218 {
219 	spin_lock(&dtl->lock);
220 	dtl_stop(dtl);
221 	kmem_cache_free(dtl_cache, dtl->buf);
222 	dtl->buf = NULL;
223 	dtl->buf_entries = 0;
224 	spin_unlock(&dtl->lock);
225 }
226 
227 /* file interface */
228 
229 static int dtl_file_open(struct inode *inode, struct file *filp)
230 {
231 	struct dtl *dtl = inode->i_private;
232 	int rc;
233 
234 	rc = dtl_enable(dtl);
235 	if (rc)
236 		return rc;
237 
238 	filp->private_data = dtl;
239 	return 0;
240 }
241 
242 static int dtl_file_release(struct inode *inode, struct file *filp)
243 {
244 	struct dtl *dtl = inode->i_private;
245 	dtl_disable(dtl);
246 	return 0;
247 }
248 
249 static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
250 		loff_t *pos)
251 {
252 	long int rc, n_read, n_req, read_size;
253 	struct dtl *dtl;
254 	u64 cur_idx, last_idx, i;
255 
256 	if ((len % sizeof(struct dtl_entry)) != 0)
257 		return -EINVAL;
258 
259 	dtl = filp->private_data;
260 
261 	/* requested number of entries to read */
262 	n_req = len / sizeof(struct dtl_entry);
263 
264 	/* actual number of entries read */
265 	n_read = 0;
266 
267 	spin_lock(&dtl->lock);
268 
269 	cur_idx = dtl_current_index(dtl);
270 	last_idx = dtl->last_idx;
271 
272 	if (last_idx + dtl->buf_entries <= cur_idx)
273 		last_idx = cur_idx - dtl->buf_entries + 1;
274 
275 	if (last_idx + n_req > cur_idx)
276 		n_req = cur_idx - last_idx;
277 
278 	if (n_req > 0)
279 		dtl->last_idx = last_idx + n_req;
280 
281 	spin_unlock(&dtl->lock);
282 
283 	if (n_req <= 0)
284 		return 0;
285 
286 	i = last_idx % dtl->buf_entries;
287 
288 	/* read the tail of the buffer if we've wrapped */
289 	if (i + n_req > dtl->buf_entries) {
290 		read_size = dtl->buf_entries - i;
291 
292 		rc = copy_to_user(buf, &dtl->buf[i],
293 				read_size * sizeof(struct dtl_entry));
294 		if (rc)
295 			return -EFAULT;
296 
297 		i = 0;
298 		n_req -= read_size;
299 		n_read += read_size;
300 		buf += read_size * sizeof(struct dtl_entry);
301 	}
302 
303 	/* .. and now the head */
304 	rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
305 	if (rc)
306 		return -EFAULT;
307 
308 	n_read += n_req;
309 
310 	return n_read * sizeof(struct dtl_entry);
311 }
312 
313 static const struct file_operations dtl_fops = {
314 	.open		= dtl_file_open,
315 	.release	= dtl_file_release,
316 	.read		= dtl_file_read,
317 	.llseek		= no_llseek,
318 };
319 
320 static struct dentry *dtl_dir;
321 
322 static int dtl_setup_file(struct dtl *dtl)
323 {
324 	char name[10];
325 
326 	sprintf(name, "cpu-%d", dtl->cpu);
327 
328 	dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
329 	if (!dtl->file)
330 		return -ENOMEM;
331 
332 	return 0;
333 }
334 
335 static int dtl_init(void)
336 {
337 	struct dentry *event_mask_file, *buf_entries_file;
338 	int rc, i;
339 
340 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
341 		return -ENODEV;
342 
343 	/* set up common debugfs structure */
344 
345 	rc = -ENOMEM;
346 	dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
347 	if (!dtl_dir) {
348 		printk(KERN_WARNING "%s: can't create dtl root dir\n",
349 				__func__);
350 		goto err;
351 	}
352 
353 	event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
354 				dtl_dir, &dtl_event_mask);
355 	buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
356 				dtl_dir, &dtl_buf_entries);
357 
358 	if (!event_mask_file || !buf_entries_file) {
359 		printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
360 		goto err_remove_dir;
361 	}
362 
363 	/* set up the per-cpu log structures */
364 	for_each_possible_cpu(i) {
365 		struct dtl *dtl = &per_cpu(cpu_dtl, i);
366 		spin_lock_init(&dtl->lock);
367 		dtl->cpu = i;
368 
369 		rc = dtl_setup_file(dtl);
370 		if (rc)
371 			goto err_remove_dir;
372 	}
373 
374 	return 0;
375 
376 err_remove_dir:
377 	debugfs_remove_recursive(dtl_dir);
378 err:
379 	return rc;
380 }
381 machine_arch_initcall(pseries, dtl_init);
382