xref: /openbmc/linux/drivers/media/cec/core/cec-api.c (revision da097dcc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * cec-api.c - HDMI Consumer Electronics Control framework - API
4  *
5  * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6  */
7 
8 #include <linux/errno.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/kmod.h>
13 #include <linux/ktime.h>
14 #include <linux/slab.h>
15 #include <linux/mm.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/uaccess.h>
19 #include <linux/version.h>
20 
21 #include <media/cec-pin.h>
22 #include "cec-priv.h"
23 #include "cec-pin-priv.h"
24 
25 static inline struct cec_devnode *cec_devnode_data(struct file *filp)
26 {
27 	struct cec_fh *fh = filp->private_data;
28 
29 	return &fh->adap->devnode;
30 }
31 
32 /* CEC file operations */
33 
34 static __poll_t cec_poll(struct file *filp,
35 			     struct poll_table_struct *poll)
36 {
37 	struct cec_fh *fh = filp->private_data;
38 	struct cec_adapter *adap = fh->adap;
39 	__poll_t res = 0;
40 
41 	poll_wait(filp, &fh->wait, poll);
42 	if (!cec_is_registered(adap))
43 		return EPOLLERR | EPOLLHUP | EPOLLPRI;
44 	mutex_lock(&adap->lock);
45 	if (adap->is_configured &&
46 	    adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
47 		res |= EPOLLOUT | EPOLLWRNORM;
48 	if (fh->queued_msgs)
49 		res |= EPOLLIN | EPOLLRDNORM;
50 	if (fh->total_queued_events)
51 		res |= EPOLLPRI;
52 	mutex_unlock(&adap->lock);
53 	return res;
54 }
55 
56 static bool cec_is_busy(const struct cec_adapter *adap,
57 			const struct cec_fh *fh)
58 {
59 	bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
60 	bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
61 
62 	/*
63 	 * Exclusive initiators and followers can always access the CEC adapter
64 	 */
65 	if (valid_initiator || valid_follower)
66 		return false;
67 	/*
68 	 * All others can only access the CEC adapter if there is no
69 	 * exclusive initiator and they are in INITIATOR mode.
70 	 */
71 	return adap->cec_initiator ||
72 	       fh->mode_initiator == CEC_MODE_NO_INITIATOR;
73 }
74 
75 static long cec_adap_g_caps(struct cec_adapter *adap,
76 			    struct cec_caps __user *parg)
77 {
78 	struct cec_caps caps = {};
79 
80 	strscpy(caps.driver, adap->devnode.dev.parent->driver->name,
81 		sizeof(caps.driver));
82 	strscpy(caps.name, adap->name, sizeof(caps.name));
83 	caps.available_log_addrs = adap->available_log_addrs;
84 	caps.capabilities = adap->capabilities;
85 	caps.version = LINUX_VERSION_CODE;
86 	if (copy_to_user(parg, &caps, sizeof(caps)))
87 		return -EFAULT;
88 	return 0;
89 }
90 
91 static long cec_adap_g_phys_addr(struct cec_adapter *adap,
92 				 __u16 __user *parg)
93 {
94 	u16 phys_addr;
95 
96 	mutex_lock(&adap->lock);
97 	phys_addr = adap->phys_addr;
98 	mutex_unlock(&adap->lock);
99 	if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
100 		return -EFAULT;
101 	return 0;
102 }
103 
104 static int cec_validate_phys_addr(u16 phys_addr)
105 {
106 	int i;
107 
108 	if (phys_addr == CEC_PHYS_ADDR_INVALID)
109 		return 0;
110 	for (i = 0; i < 16; i += 4)
111 		if (phys_addr & (0xf << i))
112 			break;
113 	if (i == 16)
114 		return 0;
115 	for (i += 4; i < 16; i += 4)
116 		if ((phys_addr & (0xf << i)) == 0)
117 			return -EINVAL;
118 	return 0;
119 }
120 
121 static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
122 				 bool block, __u16 __user *parg)
123 {
124 	u16 phys_addr;
125 	long err;
126 
127 	if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
128 		return -ENOTTY;
129 	if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
130 		return -EFAULT;
131 
132 	err = cec_validate_phys_addr(phys_addr);
133 	if (err)
134 		return err;
135 	mutex_lock(&adap->lock);
136 	if (cec_is_busy(adap, fh))
137 		err = -EBUSY;
138 	else
139 		__cec_s_phys_addr(adap, phys_addr, block);
140 	mutex_unlock(&adap->lock);
141 	return err;
142 }
143 
144 static long cec_adap_g_log_addrs(struct cec_adapter *adap,
145 				 struct cec_log_addrs __user *parg)
146 {
147 	struct cec_log_addrs log_addrs;
148 
149 	mutex_lock(&adap->lock);
150 	/*
151 	 * We use memcpy here instead of assignment since there is a
152 	 * hole at the end of struct cec_log_addrs that an assignment
153 	 * might ignore. So when we do copy_to_user() we could leak
154 	 * one byte of memory.
155 	 */
156 	memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
157 	if (!adap->is_configured)
158 		memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
159 		       sizeof(log_addrs.log_addr));
160 	mutex_unlock(&adap->lock);
161 
162 	if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
163 		return -EFAULT;
164 	return 0;
165 }
166 
167 static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
168 				 bool block, struct cec_log_addrs __user *parg)
169 {
170 	struct cec_log_addrs log_addrs;
171 	long err = -EBUSY;
172 
173 	if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
174 		return -ENOTTY;
175 	if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
176 		return -EFAULT;
177 	log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
178 			   CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
179 			   CEC_LOG_ADDRS_FL_CDC_ONLY;
180 	mutex_lock(&adap->lock);
181 	if (!adap->is_claiming_log_addrs && !adap->is_configuring &&
182 	    (!log_addrs.num_log_addrs || !adap->is_configured) &&
183 	    !cec_is_busy(adap, fh)) {
184 		err = __cec_s_log_addrs(adap, &log_addrs, block);
185 		if (!err)
186 			log_addrs = adap->log_addrs;
187 	}
188 	mutex_unlock(&adap->lock);
189 	if (err)
190 		return err;
191 	if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
192 		return -EFAULT;
193 	return 0;
194 }
195 
196 static long cec_adap_g_connector_info(struct cec_adapter *adap,
197 				      struct cec_log_addrs __user *parg)
198 {
199 	int ret = 0;
200 
201 	if (!(adap->capabilities & CEC_CAP_CONNECTOR_INFO))
202 		return -ENOTTY;
203 
204 	mutex_lock(&adap->lock);
205 	if (copy_to_user(parg, &adap->conn_info, sizeof(adap->conn_info)))
206 		ret = -EFAULT;
207 	mutex_unlock(&adap->lock);
208 	return ret;
209 }
210 
211 static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
212 			 bool block, struct cec_msg __user *parg)
213 {
214 	struct cec_msg msg = {};
215 	long err = 0;
216 
217 	if (!(adap->capabilities & CEC_CAP_TRANSMIT))
218 		return -ENOTTY;
219 	if (copy_from_user(&msg, parg, sizeof(msg)))
220 		return -EFAULT;
221 
222 	mutex_lock(&adap->lock);
223 	if (adap->log_addrs.num_log_addrs == 0)
224 		err = -EPERM;
225 	else if (adap->is_configuring)
226 		err = -ENONET;
227 	else if (cec_is_busy(adap, fh))
228 		err = -EBUSY;
229 	else
230 		err = cec_transmit_msg_fh(adap, &msg, fh, block);
231 	mutex_unlock(&adap->lock);
232 	if (err)
233 		return err;
234 	if (copy_to_user(parg, &msg, sizeof(msg)))
235 		return -EFAULT;
236 	return 0;
237 }
238 
239 /* Called by CEC_RECEIVE: wait for a message to arrive */
240 static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
241 {
242 	u32 timeout = msg->timeout;
243 	int res;
244 
245 	do {
246 		mutex_lock(&fh->lock);
247 		/* Are there received messages queued up? */
248 		if (fh->queued_msgs) {
249 			/* Yes, return the first one */
250 			struct cec_msg_entry *entry =
251 				list_first_entry(&fh->msgs,
252 						 struct cec_msg_entry, list);
253 
254 			list_del(&entry->list);
255 			*msg = entry->msg;
256 			kfree(entry);
257 			fh->queued_msgs--;
258 			mutex_unlock(&fh->lock);
259 			/* restore original timeout value */
260 			msg->timeout = timeout;
261 			return 0;
262 		}
263 
264 		/* No, return EAGAIN in non-blocking mode or wait */
265 		mutex_unlock(&fh->lock);
266 
267 		/* Return when in non-blocking mode */
268 		if (!block)
269 			return -EAGAIN;
270 
271 		if (msg->timeout) {
272 			/* The user specified a timeout */
273 			res = wait_event_interruptible_timeout(fh->wait,
274 							       fh->queued_msgs,
275 				msecs_to_jiffies(msg->timeout));
276 			if (res == 0)
277 				res = -ETIMEDOUT;
278 			else if (res > 0)
279 				res = 0;
280 		} else {
281 			/* Wait indefinitely */
282 			res = wait_event_interruptible(fh->wait,
283 						       fh->queued_msgs);
284 		}
285 		/* Exit on error, otherwise loop to get the new message */
286 	} while (!res);
287 	return res;
288 }
289 
290 static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
291 			bool block, struct cec_msg __user *parg)
292 {
293 	struct cec_msg msg = {};
294 	long err;
295 
296 	if (copy_from_user(&msg, parg, sizeof(msg)))
297 		return -EFAULT;
298 
299 	err = cec_receive_msg(fh, &msg, block);
300 	if (err)
301 		return err;
302 	msg.flags = 0;
303 	if (copy_to_user(parg, &msg, sizeof(msg)))
304 		return -EFAULT;
305 	return 0;
306 }
307 
308 static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
309 			bool block, struct cec_event __user *parg)
310 {
311 	struct cec_event_entry *ev = NULL;
312 	u64 ts = ~0ULL;
313 	unsigned int i;
314 	unsigned int ev_idx;
315 	long err = 0;
316 
317 	mutex_lock(&fh->lock);
318 	while (!fh->total_queued_events && block) {
319 		mutex_unlock(&fh->lock);
320 		err = wait_event_interruptible(fh->wait,
321 					       fh->total_queued_events);
322 		if (err)
323 			return err;
324 		mutex_lock(&fh->lock);
325 	}
326 
327 	/* Find the oldest event */
328 	for (i = 0; i < CEC_NUM_EVENTS; i++) {
329 		struct cec_event_entry *entry =
330 			list_first_entry_or_null(&fh->events[i],
331 						 struct cec_event_entry, list);
332 
333 		if (entry && entry->ev.ts <= ts) {
334 			ev = entry;
335 			ev_idx = i;
336 			ts = ev->ev.ts;
337 		}
338 	}
339 
340 	if (!ev) {
341 		err = -EAGAIN;
342 		goto unlock;
343 	}
344 	list_del(&ev->list);
345 
346 	if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
347 		err = -EFAULT;
348 	if (ev_idx >= CEC_NUM_CORE_EVENTS)
349 		kfree(ev);
350 	fh->queued_events[ev_idx]--;
351 	fh->total_queued_events--;
352 
353 unlock:
354 	mutex_unlock(&fh->lock);
355 	return err;
356 }
357 
358 static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
359 		       u32 __user *parg)
360 {
361 	u32 mode = fh->mode_initiator | fh->mode_follower;
362 
363 	if (copy_to_user(parg, &mode, sizeof(mode)))
364 		return -EFAULT;
365 	return 0;
366 }
367 
368 static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
369 		       u32 __user *parg)
370 {
371 	u32 mode;
372 	u8 mode_initiator;
373 	u8 mode_follower;
374 	bool send_pin_event = false;
375 	long err = 0;
376 
377 	if (copy_from_user(&mode, parg, sizeof(mode)))
378 		return -EFAULT;
379 	if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
380 		dprintk(1, "%s: invalid mode bits set\n", __func__);
381 		return -EINVAL;
382 	}
383 
384 	mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
385 	mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
386 
387 	if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
388 	    mode_follower > CEC_MODE_MONITOR_ALL) {
389 		dprintk(1, "%s: unknown mode\n", __func__);
390 		return -EINVAL;
391 	}
392 
393 	if (mode_follower == CEC_MODE_MONITOR_ALL &&
394 	    !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
395 		dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
396 		return -EINVAL;
397 	}
398 
399 	if (mode_follower == CEC_MODE_MONITOR_PIN &&
400 	    !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
401 		dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
402 		return -EINVAL;
403 	}
404 
405 	/* Follower modes should always be able to send CEC messages */
406 	if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
407 	     !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
408 	    mode_follower >= CEC_MODE_FOLLOWER &&
409 	    mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
410 		dprintk(1, "%s: cannot transmit\n", __func__);
411 		return -EINVAL;
412 	}
413 
414 	/* Monitor modes require CEC_MODE_NO_INITIATOR */
415 	if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
416 		dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
417 			__func__);
418 		return -EINVAL;
419 	}
420 
421 	/* Monitor modes require CAP_NET_ADMIN */
422 	if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
423 		return -EPERM;
424 
425 	mutex_lock(&adap->lock);
426 	/*
427 	 * You can't become exclusive follower if someone else already
428 	 * has that job.
429 	 */
430 	if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
431 	     mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
432 	    adap->cec_follower && adap->cec_follower != fh)
433 		err = -EBUSY;
434 	/*
435 	 * You can't become exclusive initiator if someone else already
436 	 * has that job.
437 	 */
438 	if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
439 	    adap->cec_initiator && adap->cec_initiator != fh)
440 		err = -EBUSY;
441 
442 	if (!err) {
443 		bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
444 		bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
445 
446 		if (old_mon_all != new_mon_all) {
447 			if (new_mon_all)
448 				err = cec_monitor_all_cnt_inc(adap);
449 			else
450 				cec_monitor_all_cnt_dec(adap);
451 		}
452 	}
453 
454 	if (!err) {
455 		bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
456 		bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
457 
458 		if (old_mon_pin != new_mon_pin) {
459 			send_pin_event = new_mon_pin;
460 			if (new_mon_pin)
461 				err = cec_monitor_pin_cnt_inc(adap);
462 			else
463 				cec_monitor_pin_cnt_dec(adap);
464 		}
465 	}
466 
467 	if (err) {
468 		mutex_unlock(&adap->lock);
469 		return err;
470 	}
471 
472 	if (fh->mode_follower == CEC_MODE_FOLLOWER)
473 		adap->follower_cnt--;
474 	if (mode_follower == CEC_MODE_FOLLOWER)
475 		adap->follower_cnt++;
476 	if (send_pin_event) {
477 		struct cec_event ev = {
478 			.flags = CEC_EVENT_FL_INITIAL_STATE,
479 		};
480 
481 		ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
482 						   CEC_EVENT_PIN_CEC_LOW;
483 		cec_queue_event_fh(fh, &ev, 0);
484 	}
485 	if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
486 	    mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
487 		adap->passthrough =
488 			mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
489 		adap->cec_follower = fh;
490 	} else if (adap->cec_follower == fh) {
491 		adap->passthrough = false;
492 		adap->cec_follower = NULL;
493 	}
494 	if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
495 		adap->cec_initiator = fh;
496 	else if (adap->cec_initiator == fh)
497 		adap->cec_initiator = NULL;
498 	fh->mode_initiator = mode_initiator;
499 	fh->mode_follower = mode_follower;
500 	mutex_unlock(&adap->lock);
501 	return 0;
502 }
503 
504 static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
505 {
506 	struct cec_fh *fh = filp->private_data;
507 	struct cec_adapter *adap = fh->adap;
508 	bool block = !(filp->f_flags & O_NONBLOCK);
509 	void __user *parg = (void __user *)arg;
510 
511 	if (!cec_is_registered(adap))
512 		return -ENODEV;
513 
514 	switch (cmd) {
515 	case CEC_ADAP_G_CAPS:
516 		return cec_adap_g_caps(adap, parg);
517 
518 	case CEC_ADAP_G_PHYS_ADDR:
519 		return cec_adap_g_phys_addr(adap, parg);
520 
521 	case CEC_ADAP_S_PHYS_ADDR:
522 		return cec_adap_s_phys_addr(adap, fh, block, parg);
523 
524 	case CEC_ADAP_G_LOG_ADDRS:
525 		return cec_adap_g_log_addrs(adap, parg);
526 
527 	case CEC_ADAP_S_LOG_ADDRS:
528 		return cec_adap_s_log_addrs(adap, fh, block, parg);
529 
530 	case CEC_ADAP_G_CONNECTOR_INFO:
531 		return cec_adap_g_connector_info(adap, parg);
532 
533 	case CEC_TRANSMIT:
534 		return cec_transmit(adap, fh, block, parg);
535 
536 	case CEC_RECEIVE:
537 		return cec_receive(adap, fh, block, parg);
538 
539 	case CEC_DQEVENT:
540 		return cec_dqevent(adap, fh, block, parg);
541 
542 	case CEC_G_MODE:
543 		return cec_g_mode(adap, fh, parg);
544 
545 	case CEC_S_MODE:
546 		return cec_s_mode(adap, fh, parg);
547 
548 	default:
549 		return -ENOTTY;
550 	}
551 }
552 
553 static int cec_open(struct inode *inode, struct file *filp)
554 {
555 	struct cec_devnode *devnode =
556 		container_of(inode->i_cdev, struct cec_devnode, cdev);
557 	struct cec_adapter *adap = to_cec_adapter(devnode);
558 	struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
559 	/*
560 	 * Initial events that are automatically sent when the cec device is
561 	 * opened.
562 	 */
563 	struct cec_event ev = {
564 		.event = CEC_EVENT_STATE_CHANGE,
565 		.flags = CEC_EVENT_FL_INITIAL_STATE,
566 	};
567 	unsigned int i;
568 	int err;
569 
570 	if (!fh)
571 		return -ENOMEM;
572 
573 	INIT_LIST_HEAD(&fh->msgs);
574 	INIT_LIST_HEAD(&fh->xfer_list);
575 	for (i = 0; i < CEC_NUM_EVENTS; i++)
576 		INIT_LIST_HEAD(&fh->events[i]);
577 	mutex_init(&fh->lock);
578 	init_waitqueue_head(&fh->wait);
579 
580 	fh->mode_initiator = CEC_MODE_INITIATOR;
581 	fh->adap = adap;
582 
583 	err = cec_get_device(devnode);
584 	if (err) {
585 		kfree(fh);
586 		return err;
587 	}
588 
589 	filp->private_data = fh;
590 
591 	/* Queue up initial state events */
592 	ev.state_change.phys_addr = adap->phys_addr;
593 	ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
594 	ev.state_change.have_conn_info =
595 		adap->conn_info.type != CEC_CONNECTOR_TYPE_NO_CONNECTOR;
596 	cec_queue_event_fh(fh, &ev, 0);
597 #ifdef CONFIG_CEC_PIN
598 	if (adap->pin && adap->pin->ops->read_hpd &&
599 	    !adap->devnode.unregistered) {
600 		err = adap->pin->ops->read_hpd(adap);
601 		if (err >= 0) {
602 			ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
603 					 CEC_EVENT_PIN_HPD_LOW;
604 			cec_queue_event_fh(fh, &ev, 0);
605 		}
606 	}
607 	if (adap->pin && adap->pin->ops->read_5v &&
608 	    !adap->devnode.unregistered) {
609 		err = adap->pin->ops->read_5v(adap);
610 		if (err >= 0) {
611 			ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
612 					 CEC_EVENT_PIN_5V_LOW;
613 			cec_queue_event_fh(fh, &ev, 0);
614 		}
615 	}
616 #endif
617 
618 	mutex_lock(&devnode->lock);
619 	mutex_lock(&devnode->lock_fhs);
620 	list_add(&fh->list, &devnode->fhs);
621 	mutex_unlock(&devnode->lock_fhs);
622 	mutex_unlock(&devnode->lock);
623 
624 	return 0;
625 }
626 
627 /* Override for the release function */
628 static int cec_release(struct inode *inode, struct file *filp)
629 {
630 	struct cec_devnode *devnode = cec_devnode_data(filp);
631 	struct cec_adapter *adap = to_cec_adapter(devnode);
632 	struct cec_fh *fh = filp->private_data;
633 	unsigned int i;
634 
635 	mutex_lock(&adap->lock);
636 	if (adap->cec_initiator == fh)
637 		adap->cec_initiator = NULL;
638 	if (adap->cec_follower == fh) {
639 		adap->cec_follower = NULL;
640 		adap->passthrough = false;
641 	}
642 	if (fh->mode_follower == CEC_MODE_FOLLOWER)
643 		adap->follower_cnt--;
644 	if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
645 		cec_monitor_pin_cnt_dec(adap);
646 	if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
647 		cec_monitor_all_cnt_dec(adap);
648 	mutex_unlock(&adap->lock);
649 
650 	mutex_lock(&devnode->lock);
651 	mutex_lock(&devnode->lock_fhs);
652 	list_del(&fh->list);
653 	mutex_unlock(&devnode->lock_fhs);
654 	mutex_unlock(&devnode->lock);
655 
656 	/* Unhook pending transmits from this filehandle. */
657 	mutex_lock(&adap->lock);
658 	while (!list_empty(&fh->xfer_list)) {
659 		struct cec_data *data =
660 			list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
661 
662 		data->blocking = false;
663 		data->fh = NULL;
664 		list_del_init(&data->xfer_list);
665 	}
666 	mutex_unlock(&adap->lock);
667 
668 	mutex_lock(&fh->lock);
669 	while (!list_empty(&fh->msgs)) {
670 		struct cec_msg_entry *entry =
671 			list_first_entry(&fh->msgs, struct cec_msg_entry, list);
672 
673 		list_del(&entry->list);
674 		kfree(entry);
675 	}
676 	for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
677 		while (!list_empty(&fh->events[i])) {
678 			struct cec_event_entry *entry =
679 				list_first_entry(&fh->events[i],
680 						 struct cec_event_entry, list);
681 
682 			list_del(&entry->list);
683 			kfree(entry);
684 		}
685 	}
686 	mutex_unlock(&fh->lock);
687 	kfree(fh);
688 
689 	cec_put_device(devnode);
690 	filp->private_data = NULL;
691 	return 0;
692 }
693 
694 const struct file_operations cec_devnode_fops = {
695 	.owner = THIS_MODULE,
696 	.open = cec_open,
697 	.unlocked_ioctl = cec_ioctl,
698 	.compat_ioctl = cec_ioctl,
699 	.release = cec_release,
700 	.poll = cec_poll,
701 	.llseek = no_llseek,
702 };
703