xref: /openbmc/linux/drivers/acpi/acpi_dbg.c (revision cfbb9be8)
1 /*
2  * ACPI AML interfacing support
3  *
4  * Copyright (C) 2015, Intel Corporation
5  * Authors: Lv Zheng <lv.zheng@intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 /* #define DEBUG */
13 #define pr_fmt(fmt) "ACPI: AML: " fmt
14 
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/wait.h>
18 #include <linux/poll.h>
19 #include <linux/sched.h>
20 #include <linux/kthread.h>
21 #include <linux/proc_fs.h>
22 #include <linux/debugfs.h>
23 #include <linux/circ_buf.h>
24 #include <linux/acpi.h>
25 #include "internal.h"
26 
27 #define ACPI_AML_BUF_ALIGN	(sizeof (acpi_size))
28 #define ACPI_AML_BUF_SIZE	PAGE_SIZE
29 
30 #define circ_count(circ) \
31 	(CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
32 #define circ_count_to_end(circ) \
33 	(CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
34 #define circ_space(circ) \
35 	(CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
36 #define circ_space_to_end(circ) \
37 	(CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
38 
39 #define ACPI_AML_OPENED		0x0001
40 #define ACPI_AML_CLOSED		0x0002
41 #define ACPI_AML_IN_USER	0x0004 /* user space is writing cmd */
42 #define ACPI_AML_IN_KERN	0x0008 /* kernel space is reading cmd */
43 #define ACPI_AML_OUT_USER	0x0010 /* user space is reading log */
44 #define ACPI_AML_OUT_KERN	0x0020 /* kernel space is writing log */
45 #define ACPI_AML_USER		(ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
46 #define ACPI_AML_KERN		(ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
47 #define ACPI_AML_BUSY		(ACPI_AML_USER | ACPI_AML_KERN)
48 #define ACPI_AML_OPEN		(ACPI_AML_OPENED | ACPI_AML_CLOSED)
49 
50 struct acpi_aml_io {
51 	wait_queue_head_t wait;
52 	unsigned long flags;
53 	unsigned long users;
54 	struct mutex lock;
55 	struct task_struct *thread;
56 	char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
57 	struct circ_buf out_crc;
58 	char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
59 	struct circ_buf in_crc;
60 	acpi_osd_exec_callback function;
61 	void *context;
62 	unsigned long usages;
63 };
64 
65 static struct acpi_aml_io acpi_aml_io;
66 static bool acpi_aml_initialized;
67 static struct file *acpi_aml_active_reader;
68 static struct dentry *acpi_aml_dentry;
69 
70 static inline bool __acpi_aml_running(void)
71 {
72 	return acpi_aml_io.thread ? true : false;
73 }
74 
75 static inline bool __acpi_aml_access_ok(unsigned long flag)
76 {
77 	/*
78 	 * The debugger interface is in opened state (OPENED && !CLOSED),
79 	 * then it is allowed to access the debugger buffers from either
80 	 * user space or the kernel space.
81 	 * In addition, for the kernel space, only the debugger thread
82 	 * (thread ID matched) is allowed to access.
83 	 */
84 	if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
85 	    (acpi_aml_io.flags & ACPI_AML_CLOSED) ||
86 	    !__acpi_aml_running())
87 		return false;
88 	if ((flag & ACPI_AML_KERN) &&
89 	    current != acpi_aml_io.thread)
90 		return false;
91 	return true;
92 }
93 
94 static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
95 {
96 	/*
97 	 * Another read is not in progress and there is data in buffer
98 	 * available for read.
99 	 */
100 	if (!(acpi_aml_io.flags & flag) && circ_count(circ))
101 		return true;
102 	return false;
103 }
104 
105 static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
106 {
107 	/*
108 	 * Another write is not in progress and there is buffer space
109 	 * available for write.
110 	 */
111 	if (!(acpi_aml_io.flags & flag) && circ_space(circ))
112 		return true;
113 	return false;
114 }
115 
116 static inline bool __acpi_aml_busy(void)
117 {
118 	if (acpi_aml_io.flags & ACPI_AML_BUSY)
119 		return true;
120 	return false;
121 }
122 
123 static inline bool __acpi_aml_opened(void)
124 {
125 	if (acpi_aml_io.flags & ACPI_AML_OPEN)
126 		return true;
127 	return false;
128 }
129 
130 static inline bool __acpi_aml_used(void)
131 {
132 	return acpi_aml_io.usages ? true : false;
133 }
134 
135 static inline bool acpi_aml_running(void)
136 {
137 	bool ret;
138 
139 	mutex_lock(&acpi_aml_io.lock);
140 	ret = __acpi_aml_running();
141 	mutex_unlock(&acpi_aml_io.lock);
142 	return ret;
143 }
144 
145 static bool acpi_aml_busy(void)
146 {
147 	bool ret;
148 
149 	mutex_lock(&acpi_aml_io.lock);
150 	ret = __acpi_aml_busy();
151 	mutex_unlock(&acpi_aml_io.lock);
152 	return ret;
153 }
154 
155 static bool acpi_aml_used(void)
156 {
157 	bool ret;
158 
159 	/*
160 	 * The usage count is prepared to avoid race conditions between the
161 	 * starts and the stops of the debugger thread.
162 	 */
163 	mutex_lock(&acpi_aml_io.lock);
164 	ret = __acpi_aml_used();
165 	mutex_unlock(&acpi_aml_io.lock);
166 	return ret;
167 }
168 
169 static bool acpi_aml_kern_readable(void)
170 {
171 	bool ret;
172 
173 	mutex_lock(&acpi_aml_io.lock);
174 	ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
175 	      __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
176 	mutex_unlock(&acpi_aml_io.lock);
177 	return ret;
178 }
179 
180 static bool acpi_aml_kern_writable(void)
181 {
182 	bool ret;
183 
184 	mutex_lock(&acpi_aml_io.lock);
185 	ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
186 	      __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
187 	mutex_unlock(&acpi_aml_io.lock);
188 	return ret;
189 }
190 
191 static bool acpi_aml_user_readable(void)
192 {
193 	bool ret;
194 
195 	mutex_lock(&acpi_aml_io.lock);
196 	ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
197 	      __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
198 	mutex_unlock(&acpi_aml_io.lock);
199 	return ret;
200 }
201 
202 static bool acpi_aml_user_writable(void)
203 {
204 	bool ret;
205 
206 	mutex_lock(&acpi_aml_io.lock);
207 	ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
208 	      __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
209 	mutex_unlock(&acpi_aml_io.lock);
210 	return ret;
211 }
212 
213 static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
214 {
215 	int ret = 0;
216 
217 	mutex_lock(&acpi_aml_io.lock);
218 	if (!__acpi_aml_access_ok(flag)) {
219 		ret = -EFAULT;
220 		goto out;
221 	}
222 	if (!__acpi_aml_writable(circ, flag)) {
223 		ret = -EAGAIN;
224 		goto out;
225 	}
226 	acpi_aml_io.flags |= flag;
227 out:
228 	mutex_unlock(&acpi_aml_io.lock);
229 	return ret;
230 }
231 
232 static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
233 {
234 	int ret = 0;
235 
236 	mutex_lock(&acpi_aml_io.lock);
237 	if (!__acpi_aml_access_ok(flag)) {
238 		ret = -EFAULT;
239 		goto out;
240 	}
241 	if (!__acpi_aml_readable(circ, flag)) {
242 		ret = -EAGAIN;
243 		goto out;
244 	}
245 	acpi_aml_io.flags |= flag;
246 out:
247 	mutex_unlock(&acpi_aml_io.lock);
248 	return ret;
249 }
250 
251 static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
252 {
253 	mutex_lock(&acpi_aml_io.lock);
254 	acpi_aml_io.flags &= ~flag;
255 	if (wakeup)
256 		wake_up_interruptible(&acpi_aml_io.wait);
257 	mutex_unlock(&acpi_aml_io.lock);
258 }
259 
260 static int acpi_aml_write_kern(const char *buf, int len)
261 {
262 	int ret;
263 	struct circ_buf *crc = &acpi_aml_io.out_crc;
264 	int n;
265 	char *p;
266 
267 	ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
268 	if (ret < 0)
269 		return ret;
270 	/* sync tail before inserting logs */
271 	smp_mb();
272 	p = &crc->buf[crc->head];
273 	n = min(len, circ_space_to_end(crc));
274 	memcpy(p, buf, n);
275 	/* sync head after inserting logs */
276 	smp_wmb();
277 	crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
278 	acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
279 	return n;
280 }
281 
282 static int acpi_aml_readb_kern(void)
283 {
284 	int ret;
285 	struct circ_buf *crc = &acpi_aml_io.in_crc;
286 	char *p;
287 
288 	ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
289 	if (ret < 0)
290 		return ret;
291 	/* sync head before removing cmds */
292 	smp_rmb();
293 	p = &crc->buf[crc->tail];
294 	ret = (int)*p;
295 	/* sync tail before inserting cmds */
296 	smp_mb();
297 	crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
298 	acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
299 	return ret;
300 }
301 
302 /*
303  * acpi_aml_write_log() - Capture debugger output
304  * @msg: the debugger output
305  *
306  * This function should be used to implement acpi_os_printf() to filter out
307  * the debugger output and store the output into the debugger interface
308  * buffer. Return the size of stored logs or errno.
309  */
310 static ssize_t acpi_aml_write_log(const char *msg)
311 {
312 	int ret = 0;
313 	int count = 0, size = 0;
314 
315 	if (!acpi_aml_initialized)
316 		return -ENODEV;
317 	if (msg)
318 		count = strlen(msg);
319 	while (count > 0) {
320 again:
321 		ret = acpi_aml_write_kern(msg + size, count);
322 		if (ret == -EAGAIN) {
323 			ret = wait_event_interruptible(acpi_aml_io.wait,
324 				acpi_aml_kern_writable());
325 			/*
326 			 * We need to retry when the condition
327 			 * becomes true.
328 			 */
329 			if (ret == 0)
330 				goto again;
331 			break;
332 		}
333 		if (ret < 0)
334 			break;
335 		size += ret;
336 		count -= ret;
337 	}
338 	return size > 0 ? size : ret;
339 }
340 
341 /*
342  * acpi_aml_read_cmd() - Capture debugger input
343  * @msg: the debugger input
344  * @size: the size of the debugger input
345  *
346  * This function should be used to implement acpi_os_get_line() to capture
347  * the debugger input commands and store the input commands into the
348  * debugger interface buffer. Return the size of stored commands or errno.
349  */
350 static ssize_t acpi_aml_read_cmd(char *msg, size_t count)
351 {
352 	int ret = 0;
353 	int size = 0;
354 
355 	/*
356 	 * This is ensured by the running fact of the debugger thread
357 	 * unless a bug is introduced.
358 	 */
359 	BUG_ON(!acpi_aml_initialized);
360 	while (count > 0) {
361 again:
362 		/*
363 		 * Check each input byte to find the end of the command.
364 		 */
365 		ret = acpi_aml_readb_kern();
366 		if (ret == -EAGAIN) {
367 			ret = wait_event_interruptible(acpi_aml_io.wait,
368 				acpi_aml_kern_readable());
369 			/*
370 			 * We need to retry when the condition becomes
371 			 * true.
372 			 */
373 			if (ret == 0)
374 				goto again;
375 		}
376 		if (ret < 0)
377 			break;
378 		*(msg + size) = (char)ret;
379 		size++;
380 		count--;
381 		if (ret == '\n') {
382 			/*
383 			 * acpi_os_get_line() requires a zero terminated command
384 			 * string.
385 			 */
386 			*(msg + size - 1) = '\0';
387 			break;
388 		}
389 	}
390 	return size > 0 ? size : ret;
391 }
392 
393 static int acpi_aml_thread(void *unsed)
394 {
395 	acpi_osd_exec_callback function = NULL;
396 	void *context;
397 
398 	mutex_lock(&acpi_aml_io.lock);
399 	if (acpi_aml_io.function) {
400 		acpi_aml_io.usages++;
401 		function = acpi_aml_io.function;
402 		context = acpi_aml_io.context;
403 	}
404 	mutex_unlock(&acpi_aml_io.lock);
405 
406 	if (function)
407 		function(context);
408 
409 	mutex_lock(&acpi_aml_io.lock);
410 	acpi_aml_io.usages--;
411 	if (!__acpi_aml_used()) {
412 		acpi_aml_io.thread = NULL;
413 		wake_up(&acpi_aml_io.wait);
414 	}
415 	mutex_unlock(&acpi_aml_io.lock);
416 
417 	return 0;
418 }
419 
420 /*
421  * acpi_aml_create_thread() - Create AML debugger thread
422  * @function: the debugger thread callback
423  * @context: the context to be passed to the debugger thread
424  *
425  * This function should be used to implement acpi_os_execute() which is
426  * used by the ACPICA debugger to create the debugger thread.
427  */
428 static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
429 {
430 	struct task_struct *t;
431 
432 	mutex_lock(&acpi_aml_io.lock);
433 	acpi_aml_io.function = function;
434 	acpi_aml_io.context = context;
435 	mutex_unlock(&acpi_aml_io.lock);
436 
437 	t = kthread_create(acpi_aml_thread, NULL, "aml");
438 	if (IS_ERR(t)) {
439 		pr_err("Failed to create AML debugger thread.\n");
440 		return PTR_ERR(t);
441 	}
442 
443 	mutex_lock(&acpi_aml_io.lock);
444 	acpi_aml_io.thread = t;
445 	acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
446 	wake_up_process(t);
447 	mutex_unlock(&acpi_aml_io.lock);
448 	return 0;
449 }
450 
451 static int acpi_aml_wait_command_ready(bool single_step,
452 				       char *buffer, size_t length)
453 {
454 	acpi_status status;
455 
456 	if (single_step)
457 		acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
458 	else
459 		acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
460 
461 	status = acpi_os_get_line(buffer, length, NULL);
462 	if (ACPI_FAILURE(status))
463 		return -EINVAL;
464 	return 0;
465 }
466 
467 static int acpi_aml_notify_command_complete(void)
468 {
469 	return 0;
470 }
471 
472 static int acpi_aml_open(struct inode *inode, struct file *file)
473 {
474 	int ret = 0;
475 	acpi_status status;
476 
477 	mutex_lock(&acpi_aml_io.lock);
478 	/*
479 	 * The debugger interface is being closed, no new user is allowed
480 	 * during this period.
481 	 */
482 	if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
483 		ret = -EBUSY;
484 		goto err_lock;
485 	}
486 	if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
487 		/*
488 		 * Only one reader is allowed to initiate the debugger
489 		 * thread.
490 		 */
491 		if (acpi_aml_active_reader) {
492 			ret = -EBUSY;
493 			goto err_lock;
494 		} else {
495 			pr_debug("Opening debugger reader.\n");
496 			acpi_aml_active_reader = file;
497 		}
498 	} else {
499 		/*
500 		 * No writer is allowed unless the debugger thread is
501 		 * ready.
502 		 */
503 		if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
504 			ret = -ENODEV;
505 			goto err_lock;
506 		}
507 	}
508 	if (acpi_aml_active_reader == file) {
509 		pr_debug("Opening debugger interface.\n");
510 		mutex_unlock(&acpi_aml_io.lock);
511 
512 		pr_debug("Initializing debugger thread.\n");
513 		status = acpi_initialize_debugger();
514 		if (ACPI_FAILURE(status)) {
515 			pr_err("Failed to initialize debugger.\n");
516 			ret = -EINVAL;
517 			goto err_exit;
518 		}
519 		pr_debug("Debugger thread initialized.\n");
520 
521 		mutex_lock(&acpi_aml_io.lock);
522 		acpi_aml_io.flags |= ACPI_AML_OPENED;
523 		acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
524 		acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
525 		pr_debug("Debugger interface opened.\n");
526 	}
527 	acpi_aml_io.users++;
528 err_lock:
529 	if (ret < 0) {
530 		if (acpi_aml_active_reader == file)
531 			acpi_aml_active_reader = NULL;
532 	}
533 	mutex_unlock(&acpi_aml_io.lock);
534 err_exit:
535 	return ret;
536 }
537 
538 static int acpi_aml_release(struct inode *inode, struct file *file)
539 {
540 	mutex_lock(&acpi_aml_io.lock);
541 	acpi_aml_io.users--;
542 	if (file == acpi_aml_active_reader) {
543 		pr_debug("Closing debugger reader.\n");
544 		acpi_aml_active_reader = NULL;
545 
546 		pr_debug("Closing debugger interface.\n");
547 		acpi_aml_io.flags |= ACPI_AML_CLOSED;
548 
549 		/*
550 		 * Wake up all user space/kernel space blocked
551 		 * readers/writers.
552 		 */
553 		wake_up_interruptible(&acpi_aml_io.wait);
554 		mutex_unlock(&acpi_aml_io.lock);
555 		/*
556 		 * Wait all user space/kernel space readers/writers to
557 		 * stop so that ACPICA command loop of the debugger thread
558 		 * should fail all its command line reads after this point.
559 		 */
560 		wait_event(acpi_aml_io.wait, !acpi_aml_busy());
561 
562 		/*
563 		 * Then we try to terminate the debugger thread if it is
564 		 * not terminated.
565 		 */
566 		pr_debug("Terminating debugger thread.\n");
567 		acpi_terminate_debugger();
568 		wait_event(acpi_aml_io.wait, !acpi_aml_used());
569 		pr_debug("Debugger thread terminated.\n");
570 
571 		mutex_lock(&acpi_aml_io.lock);
572 		acpi_aml_io.flags &= ~ACPI_AML_OPENED;
573 	}
574 	if (acpi_aml_io.users == 0) {
575 		pr_debug("Debugger interface closed.\n");
576 		acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
577 	}
578 	mutex_unlock(&acpi_aml_io.lock);
579 	return 0;
580 }
581 
582 static int acpi_aml_read_user(char __user *buf, int len)
583 {
584 	int ret;
585 	struct circ_buf *crc = &acpi_aml_io.out_crc;
586 	int n;
587 	char *p;
588 
589 	ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
590 	if (ret < 0)
591 		return ret;
592 	/* sync head before removing logs */
593 	smp_rmb();
594 	p = &crc->buf[crc->tail];
595 	n = min(len, circ_count_to_end(crc));
596 	if (copy_to_user(buf, p, n)) {
597 		ret = -EFAULT;
598 		goto out;
599 	}
600 	/* sync tail after removing logs */
601 	smp_mb();
602 	crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
603 	ret = n;
604 out:
605 	acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
606 	return ret;
607 }
608 
609 static ssize_t acpi_aml_read(struct file *file, char __user *buf,
610 			     size_t count, loff_t *ppos)
611 {
612 	int ret = 0;
613 	int size = 0;
614 
615 	if (!count)
616 		return 0;
617 	if (!access_ok(VERIFY_WRITE, buf, count))
618 		return -EFAULT;
619 
620 	while (count > 0) {
621 again:
622 		ret = acpi_aml_read_user(buf + size, count);
623 		if (ret == -EAGAIN) {
624 			if (file->f_flags & O_NONBLOCK)
625 				break;
626 			else {
627 				ret = wait_event_interruptible(acpi_aml_io.wait,
628 					acpi_aml_user_readable());
629 				/*
630 				 * We need to retry when the condition
631 				 * becomes true.
632 				 */
633 				if (ret == 0)
634 					goto again;
635 			}
636 		}
637 		if (ret < 0) {
638 			if (!acpi_aml_running())
639 				ret = 0;
640 			break;
641 		}
642 		if (ret) {
643 			size += ret;
644 			count -= ret;
645 			*ppos += ret;
646 			break;
647 		}
648 	}
649 	return size > 0 ? size : ret;
650 }
651 
652 static int acpi_aml_write_user(const char __user *buf, int len)
653 {
654 	int ret;
655 	struct circ_buf *crc = &acpi_aml_io.in_crc;
656 	int n;
657 	char *p;
658 
659 	ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
660 	if (ret < 0)
661 		return ret;
662 	/* sync tail before inserting cmds */
663 	smp_mb();
664 	p = &crc->buf[crc->head];
665 	n = min(len, circ_space_to_end(crc));
666 	if (copy_from_user(p, buf, n)) {
667 		ret = -EFAULT;
668 		goto out;
669 	}
670 	/* sync head after inserting cmds */
671 	smp_wmb();
672 	crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
673 	ret = n;
674 out:
675 	acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
676 	return n;
677 }
678 
679 static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
680 			      size_t count, loff_t *ppos)
681 {
682 	int ret = 0;
683 	int size = 0;
684 
685 	if (!count)
686 		return 0;
687 	if (!access_ok(VERIFY_READ, buf, count))
688 		return -EFAULT;
689 
690 	while (count > 0) {
691 again:
692 		ret = acpi_aml_write_user(buf + size, count);
693 		if (ret == -EAGAIN) {
694 			if (file->f_flags & O_NONBLOCK)
695 				break;
696 			else {
697 				ret = wait_event_interruptible(acpi_aml_io.wait,
698 					acpi_aml_user_writable());
699 				/*
700 				 * We need to retry when the condition
701 				 * becomes true.
702 				 */
703 				if (ret == 0)
704 					goto again;
705 			}
706 		}
707 		if (ret < 0) {
708 			if (!acpi_aml_running())
709 				ret = 0;
710 			break;
711 		}
712 		if (ret) {
713 			size += ret;
714 			count -= ret;
715 			*ppos += ret;
716 		}
717 	}
718 	return size > 0 ? size : ret;
719 }
720 
721 static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
722 {
723 	__poll_t masks = 0;
724 
725 	poll_wait(file, &acpi_aml_io.wait, wait);
726 	if (acpi_aml_user_readable())
727 		masks |= EPOLLIN | EPOLLRDNORM;
728 	if (acpi_aml_user_writable())
729 		masks |= EPOLLOUT | EPOLLWRNORM;
730 
731 	return masks;
732 }
733 
734 static const struct file_operations acpi_aml_operations = {
735 	.read		= acpi_aml_read,
736 	.write		= acpi_aml_write,
737 	.poll		= acpi_aml_poll,
738 	.open		= acpi_aml_open,
739 	.release	= acpi_aml_release,
740 	.llseek		= generic_file_llseek,
741 };
742 
743 static const struct acpi_debugger_ops acpi_aml_debugger = {
744 	.create_thread		 = acpi_aml_create_thread,
745 	.read_cmd		 = acpi_aml_read_cmd,
746 	.write_log		 = acpi_aml_write_log,
747 	.wait_command_ready	 = acpi_aml_wait_command_ready,
748 	.notify_command_complete = acpi_aml_notify_command_complete,
749 };
750 
751 int __init acpi_aml_init(void)
752 {
753 	int ret = 0;
754 
755 	if (!acpi_debugfs_dir) {
756 		ret = -ENOENT;
757 		goto err_exit;
758 	}
759 
760 	/* Initialize AML IO interface */
761 	mutex_init(&acpi_aml_io.lock);
762 	init_waitqueue_head(&acpi_aml_io.wait);
763 	acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
764 	acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
765 	acpi_aml_dentry = debugfs_create_file("acpidbg",
766 					      S_IFREG | S_IRUGO | S_IWUSR,
767 					      acpi_debugfs_dir, NULL,
768 					      &acpi_aml_operations);
769 	if (acpi_aml_dentry == NULL) {
770 		ret = -ENODEV;
771 		goto err_exit;
772 	}
773 	ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
774 	if (ret)
775 		goto err_fs;
776 	acpi_aml_initialized = true;
777 
778 err_fs:
779 	if (ret) {
780 		debugfs_remove(acpi_aml_dentry);
781 		acpi_aml_dentry = NULL;
782 	}
783 err_exit:
784 	return ret;
785 }
786 
787 void __exit acpi_aml_exit(void)
788 {
789 	if (acpi_aml_initialized) {
790 		acpi_unregister_debugger(&acpi_aml_debugger);
791 		if (acpi_aml_dentry) {
792 			debugfs_remove(acpi_aml_dentry);
793 			acpi_aml_dentry = NULL;
794 		}
795 		acpi_aml_initialized = false;
796 	}
797 }
798 
799 module_init(acpi_aml_init);
800 module_exit(acpi_aml_exit);
801 
802 MODULE_AUTHOR("Lv Zheng");
803 MODULE_DESCRIPTION("ACPI debugger userspace IO driver");
804 MODULE_LICENSE("GPL");
805