xref: /openbmc/linux/drivers/most/most_cdev.c (revision bef7a78d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * cdev.c - Character device component for Mostcore
4  *
5  * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
6  */
7 
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/cdev.h>
14 #include <linux/poll.h>
15 #include <linux/kfifo.h>
16 #include <linux/uaccess.h>
17 #include <linux/idr.h>
18 #include <linux/most.h>
19 
20 #define CHRDEV_REGION_SIZE 50
21 
22 static struct cdev_component {
23 	dev_t devno;
24 	struct ida minor_id;
25 	unsigned int major;
26 	struct class *class;
27 	struct most_component cc;
28 } comp;
29 
30 struct comp_channel {
31 	wait_queue_head_t wq;
32 	spinlock_t unlink;	/* synchronization lock to unlink channels */
33 	struct cdev cdev;
34 	struct device *dev;
35 	struct mutex io_mutex;
36 	struct most_interface *iface;
37 	struct most_channel_config *cfg;
38 	unsigned int channel_id;
39 	dev_t devno;
40 	size_t mbo_offs;
41 	DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
42 	int access_ref;
43 	struct list_head list;
44 };
45 
46 #define to_channel(d) container_of(d, struct comp_channel, cdev)
47 static struct list_head channel_list;
48 static spinlock_t ch_list_lock;
49 
50 static inline bool ch_has_mbo(struct comp_channel *c)
51 {
52 	return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
53 }
54 
55 static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
56 {
57 	if (!kfifo_peek(&c->fifo, mbo)) {
58 		*mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
59 		if (*mbo)
60 			kfifo_in(&c->fifo, mbo, 1);
61 	}
62 	return *mbo;
63 }
64 
65 static struct comp_channel *get_channel(struct most_interface *iface, int id)
66 {
67 	struct comp_channel *c, *tmp;
68 	unsigned long flags;
69 
70 	spin_lock_irqsave(&ch_list_lock, flags);
71 	list_for_each_entry_safe(c, tmp, &channel_list, list) {
72 		if ((c->iface == iface) && (c->channel_id == id)) {
73 			spin_unlock_irqrestore(&ch_list_lock, flags);
74 			return c;
75 		}
76 	}
77 	spin_unlock_irqrestore(&ch_list_lock, flags);
78 	return NULL;
79 }
80 
81 static void stop_channel(struct comp_channel *c)
82 {
83 	struct mbo *mbo;
84 
85 	while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
86 		most_put_mbo(mbo);
87 	most_stop_channel(c->iface, c->channel_id, &comp.cc);
88 }
89 
90 static void destroy_cdev(struct comp_channel *c)
91 {
92 	unsigned long flags;
93 
94 	device_destroy(comp.class, c->devno);
95 	cdev_del(&c->cdev);
96 	spin_lock_irqsave(&ch_list_lock, flags);
97 	list_del(&c->list);
98 	spin_unlock_irqrestore(&ch_list_lock, flags);
99 }
100 
101 static void destroy_channel(struct comp_channel *c)
102 {
103 	ida_simple_remove(&comp.minor_id, MINOR(c->devno));
104 	kfifo_free(&c->fifo);
105 	kfree(c);
106 }
107 
108 /**
109  * comp_open - implements the syscall to open the device
110  * @inode: inode pointer
111  * @filp: file pointer
112  *
113  * This stores the channel pointer in the private data field of
114  * the file structure and activates the channel within the core.
115  */
116 static int comp_open(struct inode *inode, struct file *filp)
117 {
118 	struct comp_channel *c;
119 	int ret;
120 
121 	c = to_channel(inode->i_cdev);
122 	filp->private_data = c;
123 
124 	if (((c->cfg->direction == MOST_CH_RX) &&
125 	     ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
126 	     ((c->cfg->direction == MOST_CH_TX) &&
127 		((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
128 		return -EACCES;
129 	}
130 
131 	mutex_lock(&c->io_mutex);
132 	if (!c->dev) {
133 		mutex_unlock(&c->io_mutex);
134 		return -ENODEV;
135 	}
136 
137 	if (c->access_ref) {
138 		mutex_unlock(&c->io_mutex);
139 		return -EBUSY;
140 	}
141 
142 	c->mbo_offs = 0;
143 	ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
144 	if (!ret)
145 		c->access_ref = 1;
146 	mutex_unlock(&c->io_mutex);
147 	return ret;
148 }
149 
150 /**
151  * comp_close - implements the syscall to close the device
152  * @inode: inode pointer
153  * @filp: file pointer
154  *
155  * This stops the channel within the core.
156  */
157 static int comp_close(struct inode *inode, struct file *filp)
158 {
159 	struct comp_channel *c = to_channel(inode->i_cdev);
160 
161 	mutex_lock(&c->io_mutex);
162 	spin_lock(&c->unlink);
163 	c->access_ref = 0;
164 	spin_unlock(&c->unlink);
165 	if (c->dev) {
166 		stop_channel(c);
167 		mutex_unlock(&c->io_mutex);
168 	} else {
169 		mutex_unlock(&c->io_mutex);
170 		destroy_channel(c);
171 	}
172 	return 0;
173 }
174 
175 /**
176  * comp_write - implements the syscall to write to the device
177  * @filp: file pointer
178  * @buf: pointer to user buffer
179  * @count: number of bytes to write
180  * @offset: offset from where to start writing
181  */
182 static ssize_t comp_write(struct file *filp, const char __user *buf,
183 			  size_t count, loff_t *offset)
184 {
185 	int ret;
186 	size_t to_copy, left;
187 	struct mbo *mbo = NULL;
188 	struct comp_channel *c = filp->private_data;
189 
190 	mutex_lock(&c->io_mutex);
191 	while (c->dev && !ch_get_mbo(c, &mbo)) {
192 		mutex_unlock(&c->io_mutex);
193 
194 		if ((filp->f_flags & O_NONBLOCK))
195 			return -EAGAIN;
196 		if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
197 			return -ERESTARTSYS;
198 		mutex_lock(&c->io_mutex);
199 	}
200 
201 	if (unlikely(!c->dev)) {
202 		ret = -ENODEV;
203 		goto unlock;
204 	}
205 
206 	to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
207 	left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
208 	if (left == to_copy) {
209 		ret = -EFAULT;
210 		goto unlock;
211 	}
212 
213 	c->mbo_offs += to_copy - left;
214 	if (c->mbo_offs >= c->cfg->buffer_size ||
215 	    c->cfg->data_type == MOST_CH_CONTROL ||
216 	    c->cfg->data_type == MOST_CH_ASYNC) {
217 		kfifo_skip(&c->fifo);
218 		mbo->buffer_length = c->mbo_offs;
219 		c->mbo_offs = 0;
220 		most_submit_mbo(mbo);
221 	}
222 
223 	ret = to_copy - left;
224 unlock:
225 	mutex_unlock(&c->io_mutex);
226 	return ret;
227 }
228 
229 /**
230  * comp_read - implements the syscall to read from the device
231  * @filp: file pointer
232  * @buf: pointer to user buffer
233  * @count: number of bytes to read
234  * @offset: offset from where to start reading
235  */
236 static ssize_t
237 comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
238 {
239 	size_t to_copy, not_copied, copied;
240 	struct mbo *mbo = NULL;
241 	struct comp_channel *c = filp->private_data;
242 
243 	mutex_lock(&c->io_mutex);
244 	while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
245 		mutex_unlock(&c->io_mutex);
246 		if (filp->f_flags & O_NONBLOCK)
247 			return -EAGAIN;
248 		if (wait_event_interruptible(c->wq,
249 					     (!kfifo_is_empty(&c->fifo) ||
250 					      (!c->dev))))
251 			return -ERESTARTSYS;
252 		mutex_lock(&c->io_mutex);
253 	}
254 
255 	/* make sure we don't submit to gone devices */
256 	if (unlikely(!c->dev)) {
257 		mutex_unlock(&c->io_mutex);
258 		return -ENODEV;
259 	}
260 
261 	to_copy = min_t(size_t,
262 			count,
263 			mbo->processed_length - c->mbo_offs);
264 
265 	not_copied = copy_to_user(buf,
266 				  mbo->virt_address + c->mbo_offs,
267 				  to_copy);
268 
269 	copied = to_copy - not_copied;
270 
271 	c->mbo_offs += copied;
272 	if (c->mbo_offs >= mbo->processed_length) {
273 		kfifo_skip(&c->fifo);
274 		most_put_mbo(mbo);
275 		c->mbo_offs = 0;
276 	}
277 	mutex_unlock(&c->io_mutex);
278 	return copied;
279 }
280 
281 static __poll_t comp_poll(struct file *filp, poll_table *wait)
282 {
283 	struct comp_channel *c = filp->private_data;
284 	__poll_t mask = 0;
285 
286 	poll_wait(filp, &c->wq, wait);
287 
288 	mutex_lock(&c->io_mutex);
289 	if (c->cfg->direction == MOST_CH_RX) {
290 		if (!c->dev || !kfifo_is_empty(&c->fifo))
291 			mask |= EPOLLIN | EPOLLRDNORM;
292 	} else {
293 		if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
294 			mask |= EPOLLOUT | EPOLLWRNORM;
295 	}
296 	mutex_unlock(&c->io_mutex);
297 	return mask;
298 }
299 
300 /**
301  * Initialization of struct file_operations
302  */
303 static const struct file_operations channel_fops = {
304 	.owner = THIS_MODULE,
305 	.read = comp_read,
306 	.write = comp_write,
307 	.open = comp_open,
308 	.release = comp_close,
309 	.poll = comp_poll,
310 };
311 
312 /**
313  * comp_disconnect_channel - disconnect a channel
314  * @iface: pointer to interface instance
315  * @channel_id: channel index
316  *
317  * This frees allocated memory and removes the cdev that represents this
318  * channel in user space.
319  */
320 static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
321 {
322 	struct comp_channel *c;
323 
324 	c = get_channel(iface, channel_id);
325 	if (!c)
326 		return -EINVAL;
327 
328 	mutex_lock(&c->io_mutex);
329 	spin_lock(&c->unlink);
330 	c->dev = NULL;
331 	spin_unlock(&c->unlink);
332 	destroy_cdev(c);
333 	if (c->access_ref) {
334 		stop_channel(c);
335 		wake_up_interruptible(&c->wq);
336 		mutex_unlock(&c->io_mutex);
337 	} else {
338 		mutex_unlock(&c->io_mutex);
339 		destroy_channel(c);
340 	}
341 	return 0;
342 }
343 
344 /**
345  * comp_rx_completion - completion handler for rx channels
346  * @mbo: pointer to buffer object that has completed
347  *
348  * This searches for the channel linked to this MBO and stores it in the local
349  * fifo buffer.
350  */
351 static int comp_rx_completion(struct mbo *mbo)
352 {
353 	struct comp_channel *c;
354 
355 	if (!mbo)
356 		return -EINVAL;
357 
358 	c = get_channel(mbo->ifp, mbo->hdm_channel_id);
359 	if (!c)
360 		return -EINVAL;
361 
362 	spin_lock(&c->unlink);
363 	if (!c->access_ref || !c->dev) {
364 		spin_unlock(&c->unlink);
365 		return -ENODEV;
366 	}
367 	kfifo_in(&c->fifo, &mbo, 1);
368 	spin_unlock(&c->unlink);
369 #ifdef DEBUG_MESG
370 	if (kfifo_is_full(&c->fifo))
371 		dev_warn(c->dev, "Fifo is full\n");
372 #endif
373 	wake_up_interruptible(&c->wq);
374 	return 0;
375 }
376 
377 /**
378  * comp_tx_completion - completion handler for tx channels
379  * @iface: pointer to interface instance
380  * @channel_id: channel index/ID
381  *
382  * This wakes sleeping processes in the wait-queue.
383  */
384 static int comp_tx_completion(struct most_interface *iface, int channel_id)
385 {
386 	struct comp_channel *c;
387 
388 	c = get_channel(iface, channel_id);
389 	if (!c)
390 		return -EINVAL;
391 
392 	if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
393 		dev_warn(c->dev, "Channel ID out of range\n");
394 		return -EINVAL;
395 	}
396 
397 	wake_up_interruptible(&c->wq);
398 	return 0;
399 }
400 
401 /**
402  * comp_probe - probe function of the driver module
403  * @iface: pointer to interface instance
404  * @channel_id: channel index/ID
405  * @cfg: pointer to actual channel configuration
406  * @name: name of the device to be created
407  *
408  * This allocates achannel object and creates the device node in /dev
409  *
410  * Returns 0 on success or error code otherwise.
411  */
412 static int comp_probe(struct most_interface *iface, int channel_id,
413 		      struct most_channel_config *cfg, char *name, char *args)
414 {
415 	struct comp_channel *c;
416 	unsigned long cl_flags;
417 	int retval;
418 	int current_minor;
419 
420 	if (!cfg || !name)
421 		return -EINVAL;
422 
423 	c = get_channel(iface, channel_id);
424 	if (c)
425 		return -EEXIST;
426 
427 	current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
428 	if (current_minor < 0)
429 		return current_minor;
430 
431 	c = kzalloc(sizeof(*c), GFP_KERNEL);
432 	if (!c) {
433 		retval = -ENOMEM;
434 		goto err_remove_ida;
435 	}
436 
437 	c->devno = MKDEV(comp.major, current_minor);
438 	cdev_init(&c->cdev, &channel_fops);
439 	c->cdev.owner = THIS_MODULE;
440 	retval = cdev_add(&c->cdev, c->devno, 1);
441 	if (retval < 0)
442 		goto err_free_c;
443 	c->iface = iface;
444 	c->cfg = cfg;
445 	c->channel_id = channel_id;
446 	c->access_ref = 0;
447 	spin_lock_init(&c->unlink);
448 	INIT_KFIFO(c->fifo);
449 	retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
450 	if (retval)
451 		goto err_del_cdev_and_free_channel;
452 	init_waitqueue_head(&c->wq);
453 	mutex_init(&c->io_mutex);
454 	spin_lock_irqsave(&ch_list_lock, cl_flags);
455 	list_add_tail(&c->list, &channel_list);
456 	spin_unlock_irqrestore(&ch_list_lock, cl_flags);
457 	c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
458 
459 	if (IS_ERR(c->dev)) {
460 		retval = PTR_ERR(c->dev);
461 		goto err_free_kfifo_and_del_list;
462 	}
463 	kobject_uevent(&c->dev->kobj, KOBJ_ADD);
464 	return 0;
465 
466 err_free_kfifo_and_del_list:
467 	kfifo_free(&c->fifo);
468 	list_del(&c->list);
469 err_del_cdev_and_free_channel:
470 	cdev_del(&c->cdev);
471 err_free_c:
472 	kfree(c);
473 err_remove_ida:
474 	ida_simple_remove(&comp.minor_id, current_minor);
475 	return retval;
476 }
477 
478 static struct cdev_component comp = {
479 	.cc = {
480 		.mod = THIS_MODULE,
481 		.name = "cdev",
482 		.probe_channel = comp_probe,
483 		.disconnect_channel = comp_disconnect_channel,
484 		.rx_completion = comp_rx_completion,
485 		.tx_completion = comp_tx_completion,
486 	},
487 };
488 
489 static int __init mod_init(void)
490 {
491 	int err;
492 
493 	comp.class = class_create(THIS_MODULE, "most_cdev");
494 	if (IS_ERR(comp.class))
495 		return PTR_ERR(comp.class);
496 
497 	INIT_LIST_HEAD(&channel_list);
498 	spin_lock_init(&ch_list_lock);
499 	ida_init(&comp.minor_id);
500 
501 	err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
502 	if (err < 0)
503 		goto dest_ida;
504 	comp.major = MAJOR(comp.devno);
505 	err = most_register_component(&comp.cc);
506 	if (err)
507 		goto free_cdev;
508 	err = most_register_configfs_subsys(&comp.cc);
509 	if (err)
510 		goto deregister_comp;
511 	return 0;
512 
513 deregister_comp:
514 	most_deregister_component(&comp.cc);
515 free_cdev:
516 	unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
517 dest_ida:
518 	ida_destroy(&comp.minor_id);
519 	class_destroy(comp.class);
520 	return err;
521 }
522 
523 static void __exit mod_exit(void)
524 {
525 	struct comp_channel *c, *tmp;
526 
527 	most_deregister_configfs_subsys(&comp.cc);
528 	most_deregister_component(&comp.cc);
529 
530 	list_for_each_entry_safe(c, tmp, &channel_list, list) {
531 		destroy_cdev(c);
532 		destroy_channel(c);
533 	}
534 	unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
535 	ida_destroy(&comp.minor_id);
536 	class_destroy(comp.class);
537 }
538 
539 module_init(mod_init);
540 module_exit(mod_exit);
541 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
542 MODULE_LICENSE("GPL");
543 MODULE_DESCRIPTION("character device component for mostcore");
544