1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic Counter character device interface
4  * Copyright (C) 2020 William Breathitt Gray
5  */
6 #include <linux/atomic.h>
7 #include <linux/cdev.h>
8 #include <linux/counter.h>
9 #include <linux/err.h>
10 #include <linux/errno.h>
11 #include <linux/export.h>
12 #include <linux/fs.h>
13 #include <linux/kfifo.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/nospec.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/timekeeping.h>
21 #include <linux/types.h>
22 #include <linux/uaccess.h>
23 #include <linux/wait.h>
24 
25 #include "counter-chrdev.h"
26 
27 struct counter_comp_node {
28 	struct list_head l;
29 	struct counter_component component;
30 	struct counter_comp comp;
31 	void *parent;
32 };
33 
34 #define counter_comp_read_is_equal(a, b) \
35 	(a.action_read == b.action_read || \
36 	a.device_u8_read == b.device_u8_read || \
37 	a.count_u8_read == b.count_u8_read || \
38 	a.signal_u8_read == b.signal_u8_read || \
39 	a.device_u32_read == b.device_u32_read || \
40 	a.count_u32_read == b.count_u32_read || \
41 	a.signal_u32_read == b.signal_u32_read || \
42 	a.device_u64_read == b.device_u64_read || \
43 	a.count_u64_read == b.count_u64_read || \
44 	a.signal_u64_read == b.signal_u64_read)
45 
46 #define counter_comp_read_is_set(comp) \
47 	(comp.action_read || \
48 	comp.device_u8_read || \
49 	comp.count_u8_read || \
50 	comp.signal_u8_read || \
51 	comp.device_u32_read || \
52 	comp.count_u32_read || \
53 	comp.signal_u32_read || \
54 	comp.device_u64_read || \
55 	comp.count_u64_read || \
56 	comp.signal_u64_read)
57 
58 static ssize_t counter_chrdev_read(struct file *filp, char __user *buf,
59 				   size_t len, loff_t *f_ps)
60 {
61 	struct counter_device *const counter = filp->private_data;
62 	int err;
63 	unsigned int copied;
64 
65 	if (!counter->ops)
66 		return -ENODEV;
67 
68 	if (len < sizeof(struct counter_event))
69 		return -EINVAL;
70 
71 	do {
72 		if (kfifo_is_empty(&counter->events)) {
73 			if (filp->f_flags & O_NONBLOCK)
74 				return -EAGAIN;
75 
76 			err = wait_event_interruptible(counter->events_wait,
77 					!kfifo_is_empty(&counter->events) ||
78 					!counter->ops);
79 			if (err < 0)
80 				return err;
81 			if (!counter->ops)
82 				return -ENODEV;
83 		}
84 
85 		if (mutex_lock_interruptible(&counter->events_lock))
86 			return -ERESTARTSYS;
87 		err = kfifo_to_user(&counter->events, buf, len, &copied);
88 		mutex_unlock(&counter->events_lock);
89 		if (err < 0)
90 			return err;
91 	} while (!copied);
92 
93 	return copied;
94 }
95 
96 static __poll_t counter_chrdev_poll(struct file *filp,
97 				    struct poll_table_struct *pollt)
98 {
99 	struct counter_device *const counter = filp->private_data;
100 	__poll_t events = 0;
101 
102 	if (!counter->ops)
103 		return events;
104 
105 	poll_wait(filp, &counter->events_wait, pollt);
106 
107 	if (!kfifo_is_empty(&counter->events))
108 		events = EPOLLIN | EPOLLRDNORM;
109 
110 	return events;
111 }
112 
113 static void counter_events_list_free(struct list_head *const events_list)
114 {
115 	struct counter_event_node *p, *n;
116 	struct counter_comp_node *q, *o;
117 
118 	list_for_each_entry_safe(p, n, events_list, l) {
119 		/* Free associated component nodes */
120 		list_for_each_entry_safe(q, o, &p->comp_list, l) {
121 			list_del(&q->l);
122 			kfree(q);
123 		}
124 
125 		/* Free event node */
126 		list_del(&p->l);
127 		kfree(p);
128 	}
129 }
130 
131 static int counter_set_event_node(struct counter_device *const counter,
132 				  struct counter_watch *const watch,
133 				  const struct counter_comp_node *const cfg)
134 {
135 	struct counter_event_node *event_node;
136 	int err = 0;
137 	struct counter_comp_node *comp_node;
138 
139 	/* Search for event in the list */
140 	list_for_each_entry(event_node, &counter->next_events_list, l)
141 		if (event_node->event == watch->event &&
142 		    event_node->channel == watch->channel)
143 			break;
144 
145 	/* If event is not already in the list */
146 	if (&event_node->l == &counter->next_events_list) {
147 		/* Allocate new event node */
148 		event_node = kmalloc(sizeof(*event_node), GFP_KERNEL);
149 		if (!event_node)
150 			return -ENOMEM;
151 
152 		/* Configure event node and add to the list */
153 		event_node->event = watch->event;
154 		event_node->channel = watch->channel;
155 		INIT_LIST_HEAD(&event_node->comp_list);
156 		list_add(&event_node->l, &counter->next_events_list);
157 	}
158 
159 	/* Check if component watch has already been set before */
160 	list_for_each_entry(comp_node, &event_node->comp_list, l)
161 		if (comp_node->parent == cfg->parent &&
162 		    counter_comp_read_is_equal(comp_node->comp, cfg->comp)) {
163 			err = -EINVAL;
164 			goto exit_free_event_node;
165 		}
166 
167 	/* Allocate component node */
168 	comp_node = kmalloc(sizeof(*comp_node), GFP_KERNEL);
169 	if (!comp_node) {
170 		err = -ENOMEM;
171 		goto exit_free_event_node;
172 	}
173 	*comp_node = *cfg;
174 
175 	/* Add component node to event node */
176 	list_add_tail(&comp_node->l, &event_node->comp_list);
177 
178 exit_free_event_node:
179 	/* Free event node if no one else is watching */
180 	if (list_empty(&event_node->comp_list)) {
181 		list_del(&event_node->l);
182 		kfree(event_node);
183 	}
184 
185 	return err;
186 }
187 
188 static int counter_enable_events(struct counter_device *const counter)
189 {
190 	unsigned long flags;
191 	int err = 0;
192 
193 	mutex_lock(&counter->n_events_list_lock);
194 	spin_lock_irqsave(&counter->events_list_lock, flags);
195 
196 	counter_events_list_free(&counter->events_list);
197 	list_replace_init(&counter->next_events_list,
198 			  &counter->events_list);
199 
200 	if (counter->ops->events_configure)
201 		err = counter->ops->events_configure(counter);
202 
203 	spin_unlock_irqrestore(&counter->events_list_lock, flags);
204 	mutex_unlock(&counter->n_events_list_lock);
205 
206 	return err;
207 }
208 
209 static int counter_disable_events(struct counter_device *const counter)
210 {
211 	unsigned long flags;
212 	int err = 0;
213 
214 	spin_lock_irqsave(&counter->events_list_lock, flags);
215 
216 	counter_events_list_free(&counter->events_list);
217 
218 	if (counter->ops->events_configure)
219 		err = counter->ops->events_configure(counter);
220 
221 	spin_unlock_irqrestore(&counter->events_list_lock, flags);
222 
223 	mutex_lock(&counter->n_events_list_lock);
224 
225 	counter_events_list_free(&counter->next_events_list);
226 
227 	mutex_unlock(&counter->n_events_list_lock);
228 
229 	return err;
230 }
231 
232 static int counter_add_watch(struct counter_device *const counter,
233 			     const unsigned long arg)
234 {
235 	void __user *const uwatch = (void __user *)arg;
236 	struct counter_watch watch;
237 	struct counter_comp_node comp_node = {};
238 	size_t parent, id;
239 	struct counter_comp *ext;
240 	size_t num_ext;
241 	int err = 0;
242 
243 	if (copy_from_user(&watch, uwatch, sizeof(watch)))
244 		return -EFAULT;
245 
246 	if (watch.component.type == COUNTER_COMPONENT_NONE)
247 		goto no_component;
248 
249 	parent = watch.component.parent;
250 
251 	/* Configure parent component info for comp node */
252 	switch (watch.component.scope) {
253 	case COUNTER_SCOPE_DEVICE:
254 		ext = counter->ext;
255 		num_ext = counter->num_ext;
256 		break;
257 	case COUNTER_SCOPE_SIGNAL:
258 		if (parent >= counter->num_signals)
259 			return -EINVAL;
260 		parent = array_index_nospec(parent, counter->num_signals);
261 
262 		comp_node.parent = counter->signals + parent;
263 
264 		ext = counter->signals[parent].ext;
265 		num_ext = counter->signals[parent].num_ext;
266 		break;
267 	case COUNTER_SCOPE_COUNT:
268 		if (parent >= counter->num_counts)
269 			return -EINVAL;
270 		parent = array_index_nospec(parent, counter->num_counts);
271 
272 		comp_node.parent = counter->counts + parent;
273 
274 		ext = counter->counts[parent].ext;
275 		num_ext = counter->counts[parent].num_ext;
276 		break;
277 	default:
278 		return -EINVAL;
279 	}
280 
281 	id = watch.component.id;
282 
283 	/* Configure component info for comp node */
284 	switch (watch.component.type) {
285 	case COUNTER_COMPONENT_SIGNAL:
286 		if (watch.component.scope != COUNTER_SCOPE_SIGNAL)
287 			return -EINVAL;
288 
289 		comp_node.comp.type = COUNTER_COMP_SIGNAL_LEVEL;
290 		comp_node.comp.signal_u32_read = counter->ops->signal_read;
291 		break;
292 	case COUNTER_COMPONENT_COUNT:
293 		if (watch.component.scope != COUNTER_SCOPE_COUNT)
294 			return -EINVAL;
295 
296 		comp_node.comp.type = COUNTER_COMP_U64;
297 		comp_node.comp.count_u64_read = counter->ops->count_read;
298 		break;
299 	case COUNTER_COMPONENT_FUNCTION:
300 		if (watch.component.scope != COUNTER_SCOPE_COUNT)
301 			return -EINVAL;
302 
303 		comp_node.comp.type = COUNTER_COMP_FUNCTION;
304 		comp_node.comp.count_u32_read = counter->ops->function_read;
305 		break;
306 	case COUNTER_COMPONENT_SYNAPSE_ACTION:
307 		if (watch.component.scope != COUNTER_SCOPE_COUNT)
308 			return -EINVAL;
309 		if (id >= counter->counts[parent].num_synapses)
310 			return -EINVAL;
311 		id = array_index_nospec(id, counter->counts[parent].num_synapses);
312 
313 		comp_node.comp.type = COUNTER_COMP_SYNAPSE_ACTION;
314 		comp_node.comp.action_read = counter->ops->action_read;
315 		comp_node.comp.priv = counter->counts[parent].synapses + id;
316 		break;
317 	case COUNTER_COMPONENT_EXTENSION:
318 		if (id >= num_ext)
319 			return -EINVAL;
320 		id = array_index_nospec(id, num_ext);
321 
322 		comp_node.comp = ext[id];
323 		break;
324 	default:
325 		return -EINVAL;
326 	}
327 	if (!counter_comp_read_is_set(comp_node.comp))
328 		return -EOPNOTSUPP;
329 
330 no_component:
331 	mutex_lock(&counter->n_events_list_lock);
332 
333 	if (counter->ops->watch_validate) {
334 		err = counter->ops->watch_validate(counter, &watch);
335 		if (err < 0)
336 			goto err_exit;
337 	}
338 
339 	comp_node.component = watch.component;
340 
341 	err = counter_set_event_node(counter, &watch, &comp_node);
342 
343 err_exit:
344 	mutex_unlock(&counter->n_events_list_lock);
345 
346 	return err;
347 }
348 
349 static long counter_chrdev_ioctl(struct file *filp, unsigned int cmd,
350 				 unsigned long arg)
351 {
352 	struct counter_device *const counter = filp->private_data;
353 	int ret = -ENODEV;
354 
355 	mutex_lock(&counter->ops_exist_lock);
356 
357 	if (!counter->ops)
358 		goto out_unlock;
359 
360 	switch (cmd) {
361 	case COUNTER_ADD_WATCH_IOCTL:
362 		ret = counter_add_watch(counter, arg);
363 		break;
364 	case COUNTER_ENABLE_EVENTS_IOCTL:
365 		ret = counter_enable_events(counter);
366 		break;
367 	case COUNTER_DISABLE_EVENTS_IOCTL:
368 		ret = counter_disable_events(counter);
369 		break;
370 	default:
371 		ret = -ENOIOCTLCMD;
372 		break;
373 	}
374 
375 out_unlock:
376 	mutex_unlock(&counter->ops_exist_lock);
377 
378 	return ret;
379 }
380 
381 static int counter_chrdev_open(struct inode *inode, struct file *filp)
382 {
383 	struct counter_device *const counter = container_of(inode->i_cdev,
384 							    typeof(*counter),
385 							    chrdev);
386 
387 	get_device(&counter->dev);
388 	filp->private_data = counter;
389 
390 	return nonseekable_open(inode, filp);
391 }
392 
393 static int counter_chrdev_release(struct inode *inode, struct file *filp)
394 {
395 	struct counter_device *const counter = filp->private_data;
396 	int ret = 0;
397 
398 	mutex_lock(&counter->ops_exist_lock);
399 
400 	if (!counter->ops) {
401 		/* Free any lingering held memory */
402 		counter_events_list_free(&counter->events_list);
403 		counter_events_list_free(&counter->next_events_list);
404 		ret = -ENODEV;
405 		goto out_unlock;
406 	}
407 
408 	ret = counter_disable_events(counter);
409 	if (ret < 0) {
410 		mutex_unlock(&counter->ops_exist_lock);
411 		return ret;
412 	}
413 
414 out_unlock:
415 	mutex_unlock(&counter->ops_exist_lock);
416 
417 	put_device(&counter->dev);
418 
419 	return ret;
420 }
421 
422 static const struct file_operations counter_fops = {
423 	.owner = THIS_MODULE,
424 	.llseek = no_llseek,
425 	.read = counter_chrdev_read,
426 	.poll = counter_chrdev_poll,
427 	.unlocked_ioctl = counter_chrdev_ioctl,
428 	.open = counter_chrdev_open,
429 	.release = counter_chrdev_release,
430 };
431 
432 int counter_chrdev_add(struct counter_device *const counter)
433 {
434 	/* Initialize Counter events lists */
435 	INIT_LIST_HEAD(&counter->events_list);
436 	INIT_LIST_HEAD(&counter->next_events_list);
437 	spin_lock_init(&counter->events_list_lock);
438 	mutex_init(&counter->n_events_list_lock);
439 	init_waitqueue_head(&counter->events_wait);
440 	mutex_init(&counter->events_lock);
441 
442 	/* Initialize character device */
443 	cdev_init(&counter->chrdev, &counter_fops);
444 
445 	/* Allocate Counter events queue */
446 	return kfifo_alloc(&counter->events, 64, GFP_KERNEL);
447 }
448 
449 void counter_chrdev_remove(struct counter_device *const counter)
450 {
451 	kfifo_free(&counter->events);
452 }
453 
454 static int counter_get_data(struct counter_device *const counter,
455 			    const struct counter_comp_node *const comp_node,
456 			    u64 *const value)
457 {
458 	const struct counter_comp *const comp = &comp_node->comp;
459 	void *const parent = comp_node->parent;
460 	u8 value_u8 = 0;
461 	u32 value_u32 = 0;
462 	int ret;
463 
464 	if (comp_node->component.type == COUNTER_COMPONENT_NONE)
465 		return 0;
466 
467 	switch (comp->type) {
468 	case COUNTER_COMP_U8:
469 	case COUNTER_COMP_BOOL:
470 		switch (comp_node->component.scope) {
471 		case COUNTER_SCOPE_DEVICE:
472 			ret = comp->device_u8_read(counter, &value_u8);
473 			break;
474 		case COUNTER_SCOPE_SIGNAL:
475 			ret = comp->signal_u8_read(counter, parent, &value_u8);
476 			break;
477 		case COUNTER_SCOPE_COUNT:
478 			ret = comp->count_u8_read(counter, parent, &value_u8);
479 			break;
480 		}
481 		*value = value_u8;
482 		return ret;
483 	case COUNTER_COMP_SIGNAL_LEVEL:
484 	case COUNTER_COMP_FUNCTION:
485 	case COUNTER_COMP_ENUM:
486 	case COUNTER_COMP_COUNT_DIRECTION:
487 	case COUNTER_COMP_COUNT_MODE:
488 		switch (comp_node->component.scope) {
489 		case COUNTER_SCOPE_DEVICE:
490 			ret = comp->device_u32_read(counter, &value_u32);
491 			break;
492 		case COUNTER_SCOPE_SIGNAL:
493 			ret = comp->signal_u32_read(counter, parent,
494 						    &value_u32);
495 			break;
496 		case COUNTER_SCOPE_COUNT:
497 			ret = comp->count_u32_read(counter, parent, &value_u32);
498 			break;
499 		}
500 		*value = value_u32;
501 		return ret;
502 	case COUNTER_COMP_U64:
503 		switch (comp_node->component.scope) {
504 		case COUNTER_SCOPE_DEVICE:
505 			return comp->device_u64_read(counter, value);
506 		case COUNTER_SCOPE_SIGNAL:
507 			return comp->signal_u64_read(counter, parent, value);
508 		case COUNTER_SCOPE_COUNT:
509 			return comp->count_u64_read(counter, parent, value);
510 		default:
511 			return -EINVAL;
512 		}
513 	case COUNTER_COMP_SYNAPSE_ACTION:
514 		ret = comp->action_read(counter, parent, comp->priv,
515 					&value_u32);
516 		*value = value_u32;
517 		return ret;
518 	default:
519 		return -EINVAL;
520 	}
521 }
522 
523 /**
524  * counter_push_event - queue event for userspace reading
525  * @counter:	pointer to Counter structure
526  * @event:	triggered event
527  * @channel:	event channel
528  *
529  * Note: If no one is watching for the respective event, it is silently
530  * discarded.
531  */
532 void counter_push_event(struct counter_device *const counter, const u8 event,
533 			const u8 channel)
534 {
535 	struct counter_event ev;
536 	unsigned int copied = 0;
537 	unsigned long flags;
538 	struct counter_event_node *event_node;
539 	struct counter_comp_node *comp_node;
540 
541 	ev.timestamp = ktime_get_ns();
542 	ev.watch.event = event;
543 	ev.watch.channel = channel;
544 
545 	/* Could be in an interrupt context, so use a spin lock */
546 	spin_lock_irqsave(&counter->events_list_lock, flags);
547 
548 	/* Search for event in the list */
549 	list_for_each_entry(event_node, &counter->events_list, l)
550 		if (event_node->event == event &&
551 		    event_node->channel == channel)
552 			break;
553 
554 	/* If event is not in the list */
555 	if (&event_node->l == &counter->events_list)
556 		goto exit_early;
557 
558 	/* Read and queue relevant comp for userspace */
559 	list_for_each_entry(comp_node, &event_node->comp_list, l) {
560 		ev.watch.component = comp_node->component;
561 		ev.status = -counter_get_data(counter, comp_node, &ev.value);
562 
563 		copied += kfifo_in(&counter->events, &ev, 1);
564 	}
565 
566 exit_early:
567 	spin_unlock_irqrestore(&counter->events_list_lock, flags);
568 
569 	if (copied)
570 		wake_up_poll(&counter->events_wait, EPOLLIN);
571 }
572 EXPORT_SYMBOL_GPL(counter_push_event);
573