1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Provides user-space access to the SSAM EC via the /dev/surface/aggregator
4  * misc device. Intended for debugging and development.
5  *
6  * Copyright (C) 2020-2021 Maximilian Luz <luzmaximilian@gmail.com>
7  */
8 
9 #include <linux/fs.h>
10 #include <linux/ioctl.h>
11 #include <linux/kernel.h>
12 #include <linux/kfifo.h>
13 #include <linux/kref.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/poll.h>
18 #include <linux/rwsem.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/vmalloc.h>
22 
23 #include <linux/surface_aggregator/cdev.h>
24 #include <linux/surface_aggregator/controller.h>
25 #include <linux/surface_aggregator/serial_hub.h>
26 
27 #define SSAM_CDEV_DEVICE_NAME	"surface_aggregator_cdev"
28 
29 
30 /* -- Main structures. ------------------------------------------------------ */
31 
32 enum ssam_cdev_device_state {
33 	SSAM_CDEV_DEVICE_SHUTDOWN_BIT = BIT(0),
34 };
35 
36 struct ssam_cdev {
37 	struct kref kref;
38 	struct rw_semaphore lock;
39 
40 	struct device *dev;
41 	struct ssam_controller *ctrl;
42 	struct miscdevice mdev;
43 	unsigned long flags;
44 
45 	struct rw_semaphore client_lock;  /* Guards client list. */
46 	struct list_head client_list;
47 };
48 
49 struct ssam_cdev_client;
50 
51 struct ssam_cdev_notifier {
52 	struct ssam_cdev_client *client;
53 	struct ssam_event_notifier nf;
54 };
55 
56 struct ssam_cdev_client {
57 	struct ssam_cdev *cdev;
58 	struct list_head node;
59 
60 	struct mutex notifier_lock;	/* Guards notifier access for registration */
61 	struct ssam_cdev_notifier *notifier[SSH_NUM_EVENTS];
62 
63 	struct mutex read_lock;		/* Guards FIFO buffer read access */
64 	struct mutex write_lock;	/* Guards FIFO buffer write access */
65 	DECLARE_KFIFO(buffer, u8, 4096);
66 
67 	wait_queue_head_t waitq;
68 	struct fasync_struct *fasync;
69 };
70 
71 static void __ssam_cdev_release(struct kref *kref)
72 {
73 	kfree(container_of(kref, struct ssam_cdev, kref));
74 }
75 
76 static struct ssam_cdev *ssam_cdev_get(struct ssam_cdev *cdev)
77 {
78 	if (cdev)
79 		kref_get(&cdev->kref);
80 
81 	return cdev;
82 }
83 
84 static void ssam_cdev_put(struct ssam_cdev *cdev)
85 {
86 	if (cdev)
87 		kref_put(&cdev->kref, __ssam_cdev_release);
88 }
89 
90 
91 /* -- Notifier handling. ---------------------------------------------------- */
92 
93 static u32 ssam_cdev_notifier(struct ssam_event_notifier *nf, const struct ssam_event *in)
94 {
95 	struct ssam_cdev_notifier *cdev_nf = container_of(nf, struct ssam_cdev_notifier, nf);
96 	struct ssam_cdev_client *client = cdev_nf->client;
97 	struct ssam_cdev_event event;
98 	size_t n = struct_size(&event, data, in->length);
99 
100 	/* Translate event. */
101 	event.target_category = in->target_category;
102 	event.target_id = in->target_id;
103 	event.command_id = in->command_id;
104 	event.instance_id = in->instance_id;
105 	event.length = in->length;
106 
107 	mutex_lock(&client->write_lock);
108 
109 	/* Make sure we have enough space. */
110 	if (kfifo_avail(&client->buffer) < n) {
111 		dev_warn(client->cdev->dev,
112 			 "buffer full, dropping event (tc: %#04x, tid: %#04x, cid: %#04x, iid: %#04x)\n",
113 			 in->target_category, in->target_id, in->command_id, in->instance_id);
114 		mutex_unlock(&client->write_lock);
115 		return 0;
116 	}
117 
118 	/* Copy event header and payload. */
119 	kfifo_in(&client->buffer, (const u8 *)&event, struct_size(&event, data, 0));
120 	kfifo_in(&client->buffer, &in->data[0], in->length);
121 
122 	mutex_unlock(&client->write_lock);
123 
124 	/* Notify waiting readers. */
125 	kill_fasync(&client->fasync, SIGIO, POLL_IN);
126 	wake_up_interruptible(&client->waitq);
127 
128 	/*
129 	 * Don't mark events as handled, this is the job of a proper driver and
130 	 * not the debugging interface.
131 	 */
132 	return 0;
133 }
134 
135 static int ssam_cdev_notifier_register(struct ssam_cdev_client *client, u8 tc, int priority)
136 {
137 	const u16 rqid = ssh_tc_to_rqid(tc);
138 	const u16 event = ssh_rqid_to_event(rqid);
139 	struct ssam_cdev_notifier *nf;
140 	int status;
141 
142 	/* Validate notifier target category. */
143 	if (!ssh_rqid_is_event(rqid))
144 		return -EINVAL;
145 
146 	mutex_lock(&client->notifier_lock);
147 
148 	/* Check if the notifier has already been registered. */
149 	if (client->notifier[event]) {
150 		mutex_unlock(&client->notifier_lock);
151 		return -EEXIST;
152 	}
153 
154 	/* Allocate new notifier. */
155 	nf = kzalloc(sizeof(*nf), GFP_KERNEL);
156 	if (!nf) {
157 		mutex_unlock(&client->notifier_lock);
158 		return -ENOMEM;
159 	}
160 
161 	/*
162 	 * Create a dummy notifier with the minimal required fields for
163 	 * observer registration. Note that we can skip fully specifying event
164 	 * and registry here as we do not need any matching and use silent
165 	 * registration, which does not enable the corresponding event.
166 	 */
167 	nf->client = client;
168 	nf->nf.base.fn = ssam_cdev_notifier;
169 	nf->nf.base.priority = priority;
170 	nf->nf.event.id.target_category = tc;
171 	nf->nf.event.mask = 0;	/* Do not do any matching. */
172 	nf->nf.flags = SSAM_EVENT_NOTIFIER_OBSERVER;
173 
174 	/* Register notifier. */
175 	status = ssam_notifier_register(client->cdev->ctrl, &nf->nf);
176 	if (status)
177 		kfree(nf);
178 	else
179 		client->notifier[event] = nf;
180 
181 	mutex_unlock(&client->notifier_lock);
182 	return status;
183 }
184 
185 static int ssam_cdev_notifier_unregister(struct ssam_cdev_client *client, u8 tc)
186 {
187 	const u16 rqid = ssh_tc_to_rqid(tc);
188 	const u16 event = ssh_rqid_to_event(rqid);
189 	int status;
190 
191 	/* Validate notifier target category. */
192 	if (!ssh_rqid_is_event(rqid))
193 		return -EINVAL;
194 
195 	mutex_lock(&client->notifier_lock);
196 
197 	/* Check if the notifier is currently registered. */
198 	if (!client->notifier[event]) {
199 		mutex_unlock(&client->notifier_lock);
200 		return -ENOENT;
201 	}
202 
203 	/* Unregister and free notifier. */
204 	status = ssam_notifier_unregister(client->cdev->ctrl, &client->notifier[event]->nf);
205 	kfree(client->notifier[event]);
206 	client->notifier[event] = NULL;
207 
208 	mutex_unlock(&client->notifier_lock);
209 	return status;
210 }
211 
212 static void ssam_cdev_notifier_unregister_all(struct ssam_cdev_client *client)
213 {
214 	int i;
215 
216 	down_read(&client->cdev->lock);
217 
218 	/*
219 	 * This function may be used during shutdown, thus we need to test for
220 	 * cdev->ctrl instead of the SSAM_CDEV_DEVICE_SHUTDOWN_BIT bit.
221 	 */
222 	if (client->cdev->ctrl) {
223 		for (i = 0; i < SSH_NUM_EVENTS; i++)
224 			ssam_cdev_notifier_unregister(client, i + 1);
225 
226 	} else {
227 		int count = 0;
228 
229 		/*
230 		 * Device has been shut down. Any notifier remaining is a bug,
231 		 * so warn about that as this would otherwise hardly be
232 		 * noticeable. Nevertheless, free them as well.
233 		 */
234 		mutex_lock(&client->notifier_lock);
235 		for (i = 0; i < SSH_NUM_EVENTS; i++) {
236 			count += !!(client->notifier[i]);
237 			kfree(client->notifier[i]);
238 			client->notifier[i] = NULL;
239 		}
240 		mutex_unlock(&client->notifier_lock);
241 
242 		WARN_ON(count > 0);
243 	}
244 
245 	up_read(&client->cdev->lock);
246 }
247 
248 
249 /* -- IOCTL functions. ------------------------------------------------------ */
250 
251 static long ssam_cdev_request(struct ssam_cdev_client *client, struct ssam_cdev_request __user *r)
252 {
253 	struct ssam_cdev_request rqst;
254 	struct ssam_request spec = {};
255 	struct ssam_response rsp = {};
256 	const void __user *plddata;
257 	void __user *rspdata;
258 	int status = 0, ret = 0, tmp;
259 
260 	ret = copy_struct_from_user(&rqst, sizeof(rqst), r, sizeof(*r));
261 	if (ret)
262 		goto out;
263 
264 	plddata = u64_to_user_ptr(rqst.payload.data);
265 	rspdata = u64_to_user_ptr(rqst.response.data);
266 
267 	/* Setup basic request fields. */
268 	spec.target_category = rqst.target_category;
269 	spec.target_id = rqst.target_id;
270 	spec.command_id = rqst.command_id;
271 	spec.instance_id = rqst.instance_id;
272 	spec.flags = 0;
273 	spec.length = rqst.payload.length;
274 	spec.payload = NULL;
275 
276 	if (rqst.flags & SSAM_CDEV_REQUEST_HAS_RESPONSE)
277 		spec.flags |= SSAM_REQUEST_HAS_RESPONSE;
278 
279 	if (rqst.flags & SSAM_CDEV_REQUEST_UNSEQUENCED)
280 		spec.flags |= SSAM_REQUEST_UNSEQUENCED;
281 
282 	rsp.capacity = rqst.response.length;
283 	rsp.length = 0;
284 	rsp.pointer = NULL;
285 
286 	/* Get request payload from user-space. */
287 	if (spec.length) {
288 		if (!plddata) {
289 			ret = -EINVAL;
290 			goto out;
291 		}
292 
293 		/*
294 		 * Note: spec.length is limited to U16_MAX bytes via struct
295 		 * ssam_cdev_request. This is slightly larger than the
296 		 * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
297 		 * underlying protocol (note that nothing remotely this size
298 		 * should ever be allocated in any normal case). This size is
299 		 * validated later in ssam_request_sync(), for allocation the
300 		 * bound imposed by u16 should be enough.
301 		 */
302 		spec.payload = kzalloc(spec.length, GFP_KERNEL);
303 		if (!spec.payload) {
304 			ret = -ENOMEM;
305 			goto out;
306 		}
307 
308 		if (copy_from_user((void *)spec.payload, plddata, spec.length)) {
309 			ret = -EFAULT;
310 			goto out;
311 		}
312 	}
313 
314 	/* Allocate response buffer. */
315 	if (rsp.capacity) {
316 		if (!rspdata) {
317 			ret = -EINVAL;
318 			goto out;
319 		}
320 
321 		/*
322 		 * Note: rsp.capacity is limited to U16_MAX bytes via struct
323 		 * ssam_cdev_request. This is slightly larger than the
324 		 * theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
325 		 * underlying protocol (note that nothing remotely this size
326 		 * should ever be allocated in any normal case). In later use,
327 		 * this capacity does not have to be strictly bounded, as it
328 		 * is only used as an output buffer to be written to. For
329 		 * allocation the bound imposed by u16 should be enough.
330 		 */
331 		rsp.pointer = kzalloc(rsp.capacity, GFP_KERNEL);
332 		if (!rsp.pointer) {
333 			ret = -ENOMEM;
334 			goto out;
335 		}
336 	}
337 
338 	/* Perform request. */
339 	status = ssam_request_sync(client->cdev->ctrl, &spec, &rsp);
340 	if (status)
341 		goto out;
342 
343 	/* Copy response to user-space. */
344 	if (rsp.length && copy_to_user(rspdata, rsp.pointer, rsp.length))
345 		ret = -EFAULT;
346 
347 out:
348 	/* Always try to set response-length and status. */
349 	tmp = put_user(rsp.length, &r->response.length);
350 	if (tmp)
351 		ret = tmp;
352 
353 	tmp = put_user(status, &r->status);
354 	if (tmp)
355 		ret = tmp;
356 
357 	/* Cleanup. */
358 	kfree(spec.payload);
359 	kfree(rsp.pointer);
360 
361 	return ret;
362 }
363 
364 static long ssam_cdev_notif_register(struct ssam_cdev_client *client,
365 				     const struct ssam_cdev_notifier_desc __user *d)
366 {
367 	struct ssam_cdev_notifier_desc desc;
368 	long ret;
369 
370 	ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
371 	if (ret)
372 		return ret;
373 
374 	return ssam_cdev_notifier_register(client, desc.target_category, desc.priority);
375 }
376 
377 static long ssam_cdev_notif_unregister(struct ssam_cdev_client *client,
378 				       const struct ssam_cdev_notifier_desc __user *d)
379 {
380 	struct ssam_cdev_notifier_desc desc;
381 	long ret;
382 
383 	ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
384 	if (ret)
385 		return ret;
386 
387 	return ssam_cdev_notifier_unregister(client, desc.target_category);
388 }
389 
390 static long ssam_cdev_event_enable(struct ssam_cdev_client *client,
391 				   const struct ssam_cdev_event_desc __user *d)
392 {
393 	struct ssam_cdev_event_desc desc;
394 	struct ssam_event_registry reg;
395 	struct ssam_event_id id;
396 	long ret;
397 
398 	/* Read descriptor from user-space. */
399 	ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
400 	if (ret)
401 		return ret;
402 
403 	/* Translate descriptor. */
404 	reg.target_category = desc.reg.target_category;
405 	reg.target_id = desc.reg.target_id;
406 	reg.cid_enable = desc.reg.cid_enable;
407 	reg.cid_disable = desc.reg.cid_disable;
408 
409 	id.target_category = desc.id.target_category;
410 	id.instance = desc.id.instance;
411 
412 	/* Disable event. */
413 	return ssam_controller_event_enable(client->cdev->ctrl, reg, id, desc.flags);
414 }
415 
416 static long ssam_cdev_event_disable(struct ssam_cdev_client *client,
417 				    const struct ssam_cdev_event_desc __user *d)
418 {
419 	struct ssam_cdev_event_desc desc;
420 	struct ssam_event_registry reg;
421 	struct ssam_event_id id;
422 	long ret;
423 
424 	/* Read descriptor from user-space. */
425 	ret = copy_struct_from_user(&desc, sizeof(desc), d, sizeof(*d));
426 	if (ret)
427 		return ret;
428 
429 	/* Translate descriptor. */
430 	reg.target_category = desc.reg.target_category;
431 	reg.target_id = desc.reg.target_id;
432 	reg.cid_enable = desc.reg.cid_enable;
433 	reg.cid_disable = desc.reg.cid_disable;
434 
435 	id.target_category = desc.id.target_category;
436 	id.instance = desc.id.instance;
437 
438 	/* Disable event. */
439 	return ssam_controller_event_disable(client->cdev->ctrl, reg, id, desc.flags);
440 }
441 
442 
443 /* -- File operations. ------------------------------------------------------ */
444 
445 static int ssam_cdev_device_open(struct inode *inode, struct file *filp)
446 {
447 	struct miscdevice *mdev = filp->private_data;
448 	struct ssam_cdev_client *client;
449 	struct ssam_cdev *cdev = container_of(mdev, struct ssam_cdev, mdev);
450 
451 	/* Initialize client */
452 	client = vzalloc(sizeof(*client));
453 	if (!client)
454 		return -ENOMEM;
455 
456 	client->cdev = ssam_cdev_get(cdev);
457 
458 	INIT_LIST_HEAD(&client->node);
459 
460 	mutex_init(&client->notifier_lock);
461 
462 	mutex_init(&client->read_lock);
463 	mutex_init(&client->write_lock);
464 	INIT_KFIFO(client->buffer);
465 	init_waitqueue_head(&client->waitq);
466 
467 	filp->private_data = client;
468 
469 	/* Attach client. */
470 	down_write(&cdev->client_lock);
471 
472 	if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
473 		up_write(&cdev->client_lock);
474 		mutex_destroy(&client->write_lock);
475 		mutex_destroy(&client->read_lock);
476 		mutex_destroy(&client->notifier_lock);
477 		ssam_cdev_put(client->cdev);
478 		vfree(client);
479 		return -ENODEV;
480 	}
481 	list_add_tail(&client->node, &cdev->client_list);
482 
483 	up_write(&cdev->client_lock);
484 
485 	stream_open(inode, filp);
486 	return 0;
487 }
488 
489 static int ssam_cdev_device_release(struct inode *inode, struct file *filp)
490 {
491 	struct ssam_cdev_client *client = filp->private_data;
492 
493 	/* Force-unregister all remaining notifiers of this client. */
494 	ssam_cdev_notifier_unregister_all(client);
495 
496 	/* Detach client. */
497 	down_write(&client->cdev->client_lock);
498 	list_del(&client->node);
499 	up_write(&client->cdev->client_lock);
500 
501 	/* Free client. */
502 	mutex_destroy(&client->write_lock);
503 	mutex_destroy(&client->read_lock);
504 
505 	mutex_destroy(&client->notifier_lock);
506 
507 	ssam_cdev_put(client->cdev);
508 	vfree(client);
509 
510 	return 0;
511 }
512 
513 static long __ssam_cdev_device_ioctl(struct ssam_cdev_client *client, unsigned int cmd,
514 				     unsigned long arg)
515 {
516 	switch (cmd) {
517 	case SSAM_CDEV_REQUEST:
518 		return ssam_cdev_request(client, (struct ssam_cdev_request __user *)arg);
519 
520 	case SSAM_CDEV_NOTIF_REGISTER:
521 		return ssam_cdev_notif_register(client,
522 						(struct ssam_cdev_notifier_desc __user *)arg);
523 
524 	case SSAM_CDEV_NOTIF_UNREGISTER:
525 		return ssam_cdev_notif_unregister(client,
526 						  (struct ssam_cdev_notifier_desc __user *)arg);
527 
528 	case SSAM_CDEV_EVENT_ENABLE:
529 		return ssam_cdev_event_enable(client, (struct ssam_cdev_event_desc __user *)arg);
530 
531 	case SSAM_CDEV_EVENT_DISABLE:
532 		return ssam_cdev_event_disable(client, (struct ssam_cdev_event_desc __user *)arg);
533 
534 	default:
535 		return -ENOTTY;
536 	}
537 }
538 
539 static long ssam_cdev_device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
540 {
541 	struct ssam_cdev_client *client = file->private_data;
542 	long status;
543 
544 	/* Ensure that controller is valid for as long as we need it. */
545 	if (down_read_killable(&client->cdev->lock))
546 		return -ERESTARTSYS;
547 
548 	if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags)) {
549 		up_read(&client->cdev->lock);
550 		return -ENODEV;
551 	}
552 
553 	status = __ssam_cdev_device_ioctl(client, cmd, arg);
554 
555 	up_read(&client->cdev->lock);
556 	return status;
557 }
558 
559 static ssize_t ssam_cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offs)
560 {
561 	struct ssam_cdev_client *client = file->private_data;
562 	struct ssam_cdev *cdev = client->cdev;
563 	unsigned int copied;
564 	int status = 0;
565 
566 	if (down_read_killable(&cdev->lock))
567 		return -ERESTARTSYS;
568 
569 	/* Make sure we're not shut down. */
570 	if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
571 		up_read(&cdev->lock);
572 		return -ENODEV;
573 	}
574 
575 	do {
576 		/* Check availability, wait if necessary. */
577 		if (kfifo_is_empty(&client->buffer)) {
578 			up_read(&cdev->lock);
579 
580 			if (file->f_flags & O_NONBLOCK)
581 				return -EAGAIN;
582 
583 			status = wait_event_interruptible(client->waitq,
584 							  !kfifo_is_empty(&client->buffer) ||
585 							  test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT,
586 								   &cdev->flags));
587 			if (status < 0)
588 				return status;
589 
590 			if (down_read_killable(&cdev->lock))
591 				return -ERESTARTSYS;
592 
593 			/* Need to check that we're not shut down again. */
594 			if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags)) {
595 				up_read(&cdev->lock);
596 				return -ENODEV;
597 			}
598 		}
599 
600 		/* Try to read from FIFO. */
601 		if (mutex_lock_interruptible(&client->read_lock)) {
602 			up_read(&cdev->lock);
603 			return -ERESTARTSYS;
604 		}
605 
606 		status = kfifo_to_user(&client->buffer, buf, count, &copied);
607 		mutex_unlock(&client->read_lock);
608 
609 		if (status < 0) {
610 			up_read(&cdev->lock);
611 			return status;
612 		}
613 
614 		/* We might not have gotten anything, check this here. */
615 		if (copied == 0 && (file->f_flags & O_NONBLOCK)) {
616 			up_read(&cdev->lock);
617 			return -EAGAIN;
618 		}
619 	} while (copied == 0);
620 
621 	up_read(&cdev->lock);
622 	return copied;
623 }
624 
625 static __poll_t ssam_cdev_poll(struct file *file, struct poll_table_struct *pt)
626 {
627 	struct ssam_cdev_client *client = file->private_data;
628 	__poll_t events = 0;
629 
630 	if (test_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &client->cdev->flags))
631 		return EPOLLHUP | EPOLLERR;
632 
633 	poll_wait(file, &client->waitq, pt);
634 
635 	if (!kfifo_is_empty(&client->buffer))
636 		events |= EPOLLIN | EPOLLRDNORM;
637 
638 	return events;
639 }
640 
641 static int ssam_cdev_fasync(int fd, struct file *file, int on)
642 {
643 	struct ssam_cdev_client *client = file->private_data;
644 
645 	return fasync_helper(fd, file, on, &client->fasync);
646 }
647 
648 static const struct file_operations ssam_controller_fops = {
649 	.owner          = THIS_MODULE,
650 	.open           = ssam_cdev_device_open,
651 	.release        = ssam_cdev_device_release,
652 	.read           = ssam_cdev_read,
653 	.poll           = ssam_cdev_poll,
654 	.fasync         = ssam_cdev_fasync,
655 	.unlocked_ioctl = ssam_cdev_device_ioctl,
656 	.compat_ioctl   = ssam_cdev_device_ioctl,
657 	.llseek         = no_llseek,
658 };
659 
660 
661 /* -- Device and driver setup ----------------------------------------------- */
662 
663 static int ssam_dbg_device_probe(struct platform_device *pdev)
664 {
665 	struct ssam_controller *ctrl;
666 	struct ssam_cdev *cdev;
667 	int status;
668 
669 	ctrl = ssam_client_bind(&pdev->dev);
670 	if (IS_ERR(ctrl))
671 		return PTR_ERR(ctrl) == -ENODEV ? -EPROBE_DEFER : PTR_ERR(ctrl);
672 
673 	cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
674 	if (!cdev)
675 		return -ENOMEM;
676 
677 	kref_init(&cdev->kref);
678 	init_rwsem(&cdev->lock);
679 	cdev->ctrl = ctrl;
680 	cdev->dev = &pdev->dev;
681 
682 	cdev->mdev.parent   = &pdev->dev;
683 	cdev->mdev.minor    = MISC_DYNAMIC_MINOR;
684 	cdev->mdev.name     = "surface_aggregator";
685 	cdev->mdev.nodename = "surface/aggregator";
686 	cdev->mdev.fops     = &ssam_controller_fops;
687 
688 	init_rwsem(&cdev->client_lock);
689 	INIT_LIST_HEAD(&cdev->client_list);
690 
691 	status = misc_register(&cdev->mdev);
692 	if (status) {
693 		kfree(cdev);
694 		return status;
695 	}
696 
697 	platform_set_drvdata(pdev, cdev);
698 	return 0;
699 }
700 
701 static int ssam_dbg_device_remove(struct platform_device *pdev)
702 {
703 	struct ssam_cdev *cdev = platform_get_drvdata(pdev);
704 	struct ssam_cdev_client *client;
705 
706 	/*
707 	 * Mark device as shut-down. Prevent new clients from being added and
708 	 * new operations from being executed.
709 	 */
710 	set_bit(SSAM_CDEV_DEVICE_SHUTDOWN_BIT, &cdev->flags);
711 
712 	down_write(&cdev->client_lock);
713 
714 	/* Remove all notifiers registered by us. */
715 	list_for_each_entry(client, &cdev->client_list, node) {
716 		ssam_cdev_notifier_unregister_all(client);
717 	}
718 
719 	/* Wake up async clients. */
720 	list_for_each_entry(client, &cdev->client_list, node) {
721 		kill_fasync(&client->fasync, SIGIO, POLL_HUP);
722 	}
723 
724 	/* Wake up blocking clients. */
725 	list_for_each_entry(client, &cdev->client_list, node) {
726 		wake_up_interruptible(&client->waitq);
727 	}
728 
729 	up_write(&cdev->client_lock);
730 
731 	/*
732 	 * The controller is only guaranteed to be valid for as long as the
733 	 * driver is bound. Remove controller so that any lingering open files
734 	 * cannot access it any more after we're gone.
735 	 */
736 	down_write(&cdev->lock);
737 	cdev->ctrl = NULL;
738 	cdev->dev = NULL;
739 	up_write(&cdev->lock);
740 
741 	misc_deregister(&cdev->mdev);
742 
743 	ssam_cdev_put(cdev);
744 	return 0;
745 }
746 
747 static struct platform_device *ssam_cdev_device;
748 
749 static struct platform_driver ssam_cdev_driver = {
750 	.probe = ssam_dbg_device_probe,
751 	.remove = ssam_dbg_device_remove,
752 	.driver = {
753 		.name = SSAM_CDEV_DEVICE_NAME,
754 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
755 	},
756 };
757 
758 static int __init ssam_debug_init(void)
759 {
760 	int status;
761 
762 	ssam_cdev_device = platform_device_alloc(SSAM_CDEV_DEVICE_NAME,
763 						 PLATFORM_DEVID_NONE);
764 	if (!ssam_cdev_device)
765 		return -ENOMEM;
766 
767 	status = platform_device_add(ssam_cdev_device);
768 	if (status)
769 		goto err_device;
770 
771 	status = platform_driver_register(&ssam_cdev_driver);
772 	if (status)
773 		goto err_driver;
774 
775 	return 0;
776 
777 err_driver:
778 	platform_device_del(ssam_cdev_device);
779 err_device:
780 	platform_device_put(ssam_cdev_device);
781 	return status;
782 }
783 module_init(ssam_debug_init);
784 
785 static void __exit ssam_debug_exit(void)
786 {
787 	platform_driver_unregister(&ssam_cdev_driver);
788 	platform_device_unregister(ssam_cdev_device);
789 }
790 module_exit(ssam_debug_exit);
791 
792 MODULE_AUTHOR("Maximilian Luz <luzmaximilian@gmail.com>");
793 MODULE_DESCRIPTION("User-space interface for Surface System Aggregator Module");
794 MODULE_LICENSE("GPL");
795