1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Virtual Processor Dispatch Trace Log
4  *
5  * (C) Copyright IBM Corporation 2009
6  *
7  * Author: Jeremy Kerr <jk@ozlabs.org>
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <asm/smp.h>
13 #include <linux/uaccess.h>
14 #include <asm/firmware.h>
15 #include <asm/lppaca.h>
16 #include <asm/debugfs.h>
17 #include <asm/plpar_wrappers.h>
18 #include <asm/machdep.h>
19 
20 struct dtl {
21 	struct dtl_entry	*buf;
22 	int			cpu;
23 	int			buf_entries;
24 	u64			last_idx;
25 	spinlock_t		lock;
26 };
27 static DEFINE_PER_CPU(struct dtl, cpu_dtl);
28 
29 static u8 dtl_event_mask = DTL_LOG_ALL;
30 
31 
32 /*
33  * Size of per-cpu log buffers. Firmware requires that the buffer does
34  * not cross a 4k boundary.
35  */
36 static int dtl_buf_entries = N_DISPATCH_LOG;
37 
38 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
39 struct dtl_ring {
40 	u64	write_index;
41 	struct dtl_entry *write_ptr;
42 	struct dtl_entry *buf;
43 	struct dtl_entry *buf_end;
44 };
45 
46 static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
47 
48 static atomic_t dtl_count;
49 
50 /*
51  * The cpu accounting code controls the DTL ring buffer, and we get
52  * given entries as they are processed.
53  */
54 static void consume_dtle(struct dtl_entry *dtle, u64 index)
55 {
56 	struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
57 	struct dtl_entry *wp = dtlr->write_ptr;
58 	struct lppaca *vpa = local_paca->lppaca_ptr;
59 
60 	if (!wp)
61 		return;
62 
63 	*wp = *dtle;
64 	barrier();
65 
66 	/* check for hypervisor ring buffer overflow, ignore this entry if so */
67 	if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
68 		return;
69 
70 	++wp;
71 	if (wp == dtlr->buf_end)
72 		wp = dtlr->buf;
73 	dtlr->write_ptr = wp;
74 
75 	/* incrementing write_index makes the new entry visible */
76 	smp_wmb();
77 	++dtlr->write_index;
78 }
79 
80 static int dtl_start(struct dtl *dtl)
81 {
82 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
83 
84 	dtlr->buf = dtl->buf;
85 	dtlr->buf_end = dtl->buf + dtl->buf_entries;
86 	dtlr->write_index = 0;
87 
88 	/* setting write_ptr enables logging into our buffer */
89 	smp_wmb();
90 	dtlr->write_ptr = dtl->buf;
91 
92 	/* enable event logging */
93 	lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
94 
95 	dtl_consumer = consume_dtle;
96 	atomic_inc(&dtl_count);
97 	return 0;
98 }
99 
100 static void dtl_stop(struct dtl *dtl)
101 {
102 	struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
103 
104 	dtlr->write_ptr = NULL;
105 	smp_wmb();
106 
107 	dtlr->buf = NULL;
108 
109 	/* restore dtl_enable_mask */
110 	lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
111 
112 	if (atomic_dec_and_test(&dtl_count))
113 		dtl_consumer = NULL;
114 }
115 
116 static u64 dtl_current_index(struct dtl *dtl)
117 {
118 	return per_cpu(dtl_rings, dtl->cpu).write_index;
119 }
120 
121 #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
122 
123 static int dtl_start(struct dtl *dtl)
124 {
125 	unsigned long addr;
126 	int ret, hwcpu;
127 
128 	/* Register our dtl buffer with the hypervisor. The HV expects the
129 	 * buffer size to be passed in the second word of the buffer */
130 	((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
131 
132 	hwcpu = get_hard_smp_processor_id(dtl->cpu);
133 	addr = __pa(dtl->buf);
134 	ret = register_dtl(hwcpu, addr);
135 	if (ret) {
136 		printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
137 		       "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
138 		return -EIO;
139 	}
140 
141 	/* set our initial buffer indices */
142 	lppaca_of(dtl->cpu).dtl_idx = 0;
143 
144 	/* ensure that our updates to the lppaca fields have occurred before
145 	 * we actually enable the logging */
146 	smp_wmb();
147 
148 	/* enable event logging */
149 	lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
150 
151 	return 0;
152 }
153 
154 static void dtl_stop(struct dtl *dtl)
155 {
156 	int hwcpu = get_hard_smp_processor_id(dtl->cpu);
157 
158 	lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
159 
160 	unregister_dtl(hwcpu);
161 }
162 
163 static u64 dtl_current_index(struct dtl *dtl)
164 {
165 	return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
166 }
167 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
168 
169 static int dtl_enable(struct dtl *dtl)
170 {
171 	long int n_entries;
172 	long int rc;
173 	struct dtl_entry *buf = NULL;
174 
175 	if (!dtl_cache)
176 		return -ENOMEM;
177 
178 	/* only allow one reader */
179 	if (dtl->buf)
180 		return -EBUSY;
181 
182 	/* ensure there are no other conflicting dtl users */
183 	if (!read_trylock(&dtl_access_lock))
184 		return -EBUSY;
185 
186 	n_entries = dtl_buf_entries;
187 	buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
188 	if (!buf) {
189 		printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
190 				__func__, dtl->cpu);
191 		read_unlock(&dtl_access_lock);
192 		return -ENOMEM;
193 	}
194 
195 	spin_lock(&dtl->lock);
196 	rc = -EBUSY;
197 	if (!dtl->buf) {
198 		/* store the original allocation size for use during read */
199 		dtl->buf_entries = n_entries;
200 		dtl->buf = buf;
201 		dtl->last_idx = 0;
202 		rc = dtl_start(dtl);
203 		if (rc)
204 			dtl->buf = NULL;
205 	}
206 	spin_unlock(&dtl->lock);
207 
208 	if (rc) {
209 		read_unlock(&dtl_access_lock);
210 		kmem_cache_free(dtl_cache, buf);
211 	}
212 
213 	return rc;
214 }
215 
216 static void dtl_disable(struct dtl *dtl)
217 {
218 	spin_lock(&dtl->lock);
219 	dtl_stop(dtl);
220 	kmem_cache_free(dtl_cache, dtl->buf);
221 	dtl->buf = NULL;
222 	dtl->buf_entries = 0;
223 	spin_unlock(&dtl->lock);
224 	read_unlock(&dtl_access_lock);
225 }
226 
227 /* file interface */
228 
229 static int dtl_file_open(struct inode *inode, struct file *filp)
230 {
231 	struct dtl *dtl = inode->i_private;
232 	int rc;
233 
234 	rc = dtl_enable(dtl);
235 	if (rc)
236 		return rc;
237 
238 	filp->private_data = dtl;
239 	return 0;
240 }
241 
242 static int dtl_file_release(struct inode *inode, struct file *filp)
243 {
244 	struct dtl *dtl = inode->i_private;
245 	dtl_disable(dtl);
246 	return 0;
247 }
248 
249 static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
250 		loff_t *pos)
251 {
252 	long int rc, n_read, n_req, read_size;
253 	struct dtl *dtl;
254 	u64 cur_idx, last_idx, i;
255 
256 	if ((len % sizeof(struct dtl_entry)) != 0)
257 		return -EINVAL;
258 
259 	dtl = filp->private_data;
260 
261 	/* requested number of entries to read */
262 	n_req = len / sizeof(struct dtl_entry);
263 
264 	/* actual number of entries read */
265 	n_read = 0;
266 
267 	spin_lock(&dtl->lock);
268 
269 	cur_idx = dtl_current_index(dtl);
270 	last_idx = dtl->last_idx;
271 
272 	if (last_idx + dtl->buf_entries <= cur_idx)
273 		last_idx = cur_idx - dtl->buf_entries + 1;
274 
275 	if (last_idx + n_req > cur_idx)
276 		n_req = cur_idx - last_idx;
277 
278 	if (n_req > 0)
279 		dtl->last_idx = last_idx + n_req;
280 
281 	spin_unlock(&dtl->lock);
282 
283 	if (n_req <= 0)
284 		return 0;
285 
286 	i = last_idx % dtl->buf_entries;
287 
288 	/* read the tail of the buffer if we've wrapped */
289 	if (i + n_req > dtl->buf_entries) {
290 		read_size = dtl->buf_entries - i;
291 
292 		rc = copy_to_user(buf, &dtl->buf[i],
293 				read_size * sizeof(struct dtl_entry));
294 		if (rc)
295 			return -EFAULT;
296 
297 		i = 0;
298 		n_req -= read_size;
299 		n_read += read_size;
300 		buf += read_size * sizeof(struct dtl_entry);
301 	}
302 
303 	/* .. and now the head */
304 	rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
305 	if (rc)
306 		return -EFAULT;
307 
308 	n_read += n_req;
309 
310 	return n_read * sizeof(struct dtl_entry);
311 }
312 
313 static const struct file_operations dtl_fops = {
314 	.open		= dtl_file_open,
315 	.release	= dtl_file_release,
316 	.read		= dtl_file_read,
317 	.llseek		= no_llseek,
318 };
319 
320 static struct dentry *dtl_dir;
321 
322 static void dtl_setup_file(struct dtl *dtl)
323 {
324 	char name[10];
325 
326 	sprintf(name, "cpu-%d", dtl->cpu);
327 
328 	debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
329 }
330 
331 static int dtl_init(void)
332 {
333 	int i;
334 
335 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
336 		return -ENODEV;
337 
338 	/* set up common debugfs structure */
339 
340 	dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
341 
342 	debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask);
343 	debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries);
344 
345 	/* set up the per-cpu log structures */
346 	for_each_possible_cpu(i) {
347 		struct dtl *dtl = &per_cpu(cpu_dtl, i);
348 		spin_lock_init(&dtl->lock);
349 		dtl->cpu = i;
350 
351 		dtl_setup_file(dtl);
352 	}
353 
354 	return 0;
355 }
356 machine_arch_initcall(pseries, dtl_init);
357