1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ACPI AML interfacing support 4 * 5 * Copyright (C) 2015, Intel Corporation 6 * Authors: Lv Zheng <lv.zheng@intel.com> 7 */ 8 9 /* #define DEBUG */ 10 #define pr_fmt(fmt) "ACPI: AML: " fmt 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/wait.h> 15 #include <linux/poll.h> 16 #include <linux/sched.h> 17 #include <linux/kthread.h> 18 #include <linux/proc_fs.h> 19 #include <linux/debugfs.h> 20 #include <linux/circ_buf.h> 21 #include <linux/acpi.h> 22 #include "internal.h" 23 24 #define ACPI_AML_BUF_ALIGN (sizeof (acpi_size)) 25 #define ACPI_AML_BUF_SIZE PAGE_SIZE 26 27 #define circ_count(circ) \ 28 (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) 29 #define circ_count_to_end(circ) \ 30 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) 31 #define circ_space(circ) \ 32 (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) 33 #define circ_space_to_end(circ) \ 34 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE)) 35 36 #define ACPI_AML_OPENED 0x0001 37 #define ACPI_AML_CLOSED 0x0002 38 #define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */ 39 #define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */ 40 #define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */ 41 #define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */ 42 #define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER) 43 #define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN) 44 #define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN) 45 #define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED) 46 47 struct acpi_aml_io { 48 wait_queue_head_t wait; 49 unsigned long flags; 50 unsigned long users; 51 struct mutex lock; 52 struct task_struct *thread; 53 char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN); 54 struct circ_buf out_crc; 55 char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN); 56 struct circ_buf in_crc; 57 acpi_osd_exec_callback function; 58 void *context; 59 unsigned long usages; 60 }; 61 62 static struct acpi_aml_io acpi_aml_io; 63 static bool acpi_aml_initialized; 64 static struct file *acpi_aml_active_reader; 65 static struct dentry *acpi_aml_dentry; 66 67 static inline bool __acpi_aml_running(void) 68 { 69 return acpi_aml_io.thread ? true : false; 70 } 71 72 static inline bool __acpi_aml_access_ok(unsigned long flag) 73 { 74 /* 75 * The debugger interface is in opened state (OPENED && !CLOSED), 76 * then it is allowed to access the debugger buffers from either 77 * user space or the kernel space. 78 * In addition, for the kernel space, only the debugger thread 79 * (thread ID matched) is allowed to access. 80 */ 81 if (!(acpi_aml_io.flags & ACPI_AML_OPENED) || 82 (acpi_aml_io.flags & ACPI_AML_CLOSED) || 83 !__acpi_aml_running()) 84 return false; 85 if ((flag & ACPI_AML_KERN) && 86 current != acpi_aml_io.thread) 87 return false; 88 return true; 89 } 90 91 static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag) 92 { 93 /* 94 * Another read is not in progress and there is data in buffer 95 * available for read. 96 */ 97 if (!(acpi_aml_io.flags & flag) && circ_count(circ)) 98 return true; 99 return false; 100 } 101 102 static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag) 103 { 104 /* 105 * Another write is not in progress and there is buffer space 106 * available for write. 107 */ 108 if (!(acpi_aml_io.flags & flag) && circ_space(circ)) 109 return true; 110 return false; 111 } 112 113 static inline bool __acpi_aml_busy(void) 114 { 115 if (acpi_aml_io.flags & ACPI_AML_BUSY) 116 return true; 117 return false; 118 } 119 120 static inline bool __acpi_aml_opened(void) 121 { 122 if (acpi_aml_io.flags & ACPI_AML_OPEN) 123 return true; 124 return false; 125 } 126 127 static inline bool __acpi_aml_used(void) 128 { 129 return acpi_aml_io.usages ? true : false; 130 } 131 132 static inline bool acpi_aml_running(void) 133 { 134 bool ret; 135 136 mutex_lock(&acpi_aml_io.lock); 137 ret = __acpi_aml_running(); 138 mutex_unlock(&acpi_aml_io.lock); 139 return ret; 140 } 141 142 static bool acpi_aml_busy(void) 143 { 144 bool ret; 145 146 mutex_lock(&acpi_aml_io.lock); 147 ret = __acpi_aml_busy(); 148 mutex_unlock(&acpi_aml_io.lock); 149 return ret; 150 } 151 152 static bool acpi_aml_used(void) 153 { 154 bool ret; 155 156 /* 157 * The usage count is prepared to avoid race conditions between the 158 * starts and the stops of the debugger thread. 159 */ 160 mutex_lock(&acpi_aml_io.lock); 161 ret = __acpi_aml_used(); 162 mutex_unlock(&acpi_aml_io.lock); 163 return ret; 164 } 165 166 static bool acpi_aml_kern_readable(void) 167 { 168 bool ret; 169 170 mutex_lock(&acpi_aml_io.lock); 171 ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) || 172 __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN); 173 mutex_unlock(&acpi_aml_io.lock); 174 return ret; 175 } 176 177 static bool acpi_aml_kern_writable(void) 178 { 179 bool ret; 180 181 mutex_lock(&acpi_aml_io.lock); 182 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) || 183 __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN); 184 mutex_unlock(&acpi_aml_io.lock); 185 return ret; 186 } 187 188 static bool acpi_aml_user_readable(void) 189 { 190 bool ret; 191 192 mutex_lock(&acpi_aml_io.lock); 193 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) || 194 __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER); 195 mutex_unlock(&acpi_aml_io.lock); 196 return ret; 197 } 198 199 static bool acpi_aml_user_writable(void) 200 { 201 bool ret; 202 203 mutex_lock(&acpi_aml_io.lock); 204 ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) || 205 __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER); 206 mutex_unlock(&acpi_aml_io.lock); 207 return ret; 208 } 209 210 static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag) 211 { 212 int ret = 0; 213 214 mutex_lock(&acpi_aml_io.lock); 215 if (!__acpi_aml_access_ok(flag)) { 216 ret = -EFAULT; 217 goto out; 218 } 219 if (!__acpi_aml_writable(circ, flag)) { 220 ret = -EAGAIN; 221 goto out; 222 } 223 acpi_aml_io.flags |= flag; 224 out: 225 mutex_unlock(&acpi_aml_io.lock); 226 return ret; 227 } 228 229 static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag) 230 { 231 int ret = 0; 232 233 mutex_lock(&acpi_aml_io.lock); 234 if (!__acpi_aml_access_ok(flag)) { 235 ret = -EFAULT; 236 goto out; 237 } 238 if (!__acpi_aml_readable(circ, flag)) { 239 ret = -EAGAIN; 240 goto out; 241 } 242 acpi_aml_io.flags |= flag; 243 out: 244 mutex_unlock(&acpi_aml_io.lock); 245 return ret; 246 } 247 248 static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup) 249 { 250 mutex_lock(&acpi_aml_io.lock); 251 acpi_aml_io.flags &= ~flag; 252 if (wakeup) 253 wake_up_interruptible(&acpi_aml_io.wait); 254 mutex_unlock(&acpi_aml_io.lock); 255 } 256 257 static int acpi_aml_write_kern(const char *buf, int len) 258 { 259 int ret; 260 struct circ_buf *crc = &acpi_aml_io.out_crc; 261 int n; 262 char *p; 263 264 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN); 265 if (ret < 0) 266 return ret; 267 /* sync tail before inserting logs */ 268 smp_mb(); 269 p = &crc->buf[crc->head]; 270 n = min(len, circ_space_to_end(crc)); 271 memcpy(p, buf, n); 272 /* sync head after inserting logs */ 273 smp_wmb(); 274 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); 275 acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true); 276 return n; 277 } 278 279 static int acpi_aml_readb_kern(void) 280 { 281 int ret; 282 struct circ_buf *crc = &acpi_aml_io.in_crc; 283 char *p; 284 285 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN); 286 if (ret < 0) 287 return ret; 288 /* sync head before removing cmds */ 289 smp_rmb(); 290 p = &crc->buf[crc->tail]; 291 ret = (int)*p; 292 /* sync tail before inserting cmds */ 293 smp_mb(); 294 crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1); 295 acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true); 296 return ret; 297 } 298 299 /* 300 * acpi_aml_write_log() - Capture debugger output 301 * @msg: the debugger output 302 * 303 * This function should be used to implement acpi_os_printf() to filter out 304 * the debugger output and store the output into the debugger interface 305 * buffer. Return the size of stored logs or errno. 306 */ 307 static ssize_t acpi_aml_write_log(const char *msg) 308 { 309 int ret = 0; 310 int count = 0, size = 0; 311 312 if (!acpi_aml_initialized) 313 return -ENODEV; 314 if (msg) 315 count = strlen(msg); 316 while (count > 0) { 317 again: 318 ret = acpi_aml_write_kern(msg + size, count); 319 if (ret == -EAGAIN) { 320 ret = wait_event_interruptible(acpi_aml_io.wait, 321 acpi_aml_kern_writable()); 322 /* 323 * We need to retry when the condition 324 * becomes true. 325 */ 326 if (ret == 0) 327 goto again; 328 break; 329 } 330 if (ret < 0) 331 break; 332 size += ret; 333 count -= ret; 334 } 335 return size > 0 ? size : ret; 336 } 337 338 /* 339 * acpi_aml_read_cmd() - Capture debugger input 340 * @msg: the debugger input 341 * @size: the size of the debugger input 342 * 343 * This function should be used to implement acpi_os_get_line() to capture 344 * the debugger input commands and store the input commands into the 345 * debugger interface buffer. Return the size of stored commands or errno. 346 */ 347 static ssize_t acpi_aml_read_cmd(char *msg, size_t count) 348 { 349 int ret = 0; 350 int size = 0; 351 352 /* 353 * This is ensured by the running fact of the debugger thread 354 * unless a bug is introduced. 355 */ 356 BUG_ON(!acpi_aml_initialized); 357 while (count > 0) { 358 again: 359 /* 360 * Check each input byte to find the end of the command. 361 */ 362 ret = acpi_aml_readb_kern(); 363 if (ret == -EAGAIN) { 364 ret = wait_event_interruptible(acpi_aml_io.wait, 365 acpi_aml_kern_readable()); 366 /* 367 * We need to retry when the condition becomes 368 * true. 369 */ 370 if (ret == 0) 371 goto again; 372 } 373 if (ret < 0) 374 break; 375 *(msg + size) = (char)ret; 376 size++; 377 count--; 378 if (ret == '\n') { 379 /* 380 * acpi_os_get_line() requires a zero terminated command 381 * string. 382 */ 383 *(msg + size - 1) = '\0'; 384 break; 385 } 386 } 387 return size > 0 ? size : ret; 388 } 389 390 static int acpi_aml_thread(void *unused) 391 { 392 acpi_osd_exec_callback function = NULL; 393 void *context; 394 395 mutex_lock(&acpi_aml_io.lock); 396 if (acpi_aml_io.function) { 397 acpi_aml_io.usages++; 398 function = acpi_aml_io.function; 399 context = acpi_aml_io.context; 400 } 401 mutex_unlock(&acpi_aml_io.lock); 402 403 if (function) 404 function(context); 405 406 mutex_lock(&acpi_aml_io.lock); 407 acpi_aml_io.usages--; 408 if (!__acpi_aml_used()) { 409 acpi_aml_io.thread = NULL; 410 wake_up(&acpi_aml_io.wait); 411 } 412 mutex_unlock(&acpi_aml_io.lock); 413 414 return 0; 415 } 416 417 /* 418 * acpi_aml_create_thread() - Create AML debugger thread 419 * @function: the debugger thread callback 420 * @context: the context to be passed to the debugger thread 421 * 422 * This function should be used to implement acpi_os_execute() which is 423 * used by the ACPICA debugger to create the debugger thread. 424 */ 425 static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context) 426 { 427 struct task_struct *t; 428 429 mutex_lock(&acpi_aml_io.lock); 430 acpi_aml_io.function = function; 431 acpi_aml_io.context = context; 432 mutex_unlock(&acpi_aml_io.lock); 433 434 t = kthread_create(acpi_aml_thread, NULL, "aml"); 435 if (IS_ERR(t)) { 436 pr_err("Failed to create AML debugger thread.\n"); 437 return PTR_ERR(t); 438 } 439 440 mutex_lock(&acpi_aml_io.lock); 441 acpi_aml_io.thread = t; 442 acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t); 443 wake_up_process(t); 444 mutex_unlock(&acpi_aml_io.lock); 445 return 0; 446 } 447 448 static int acpi_aml_wait_command_ready(bool single_step, 449 char *buffer, size_t length) 450 { 451 acpi_status status; 452 453 if (single_step) 454 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT); 455 else 456 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT); 457 458 status = acpi_os_get_line(buffer, length, NULL); 459 if (ACPI_FAILURE(status)) 460 return -EINVAL; 461 return 0; 462 } 463 464 static int acpi_aml_notify_command_complete(void) 465 { 466 return 0; 467 } 468 469 static int acpi_aml_open(struct inode *inode, struct file *file) 470 { 471 int ret = 0; 472 acpi_status status; 473 474 mutex_lock(&acpi_aml_io.lock); 475 /* 476 * The debugger interface is being closed, no new user is allowed 477 * during this period. 478 */ 479 if (acpi_aml_io.flags & ACPI_AML_CLOSED) { 480 ret = -EBUSY; 481 goto err_lock; 482 } 483 if ((file->f_flags & O_ACCMODE) != O_WRONLY) { 484 /* 485 * Only one reader is allowed to initiate the debugger 486 * thread. 487 */ 488 if (acpi_aml_active_reader) { 489 ret = -EBUSY; 490 goto err_lock; 491 } else { 492 pr_debug("Opening debugger reader.\n"); 493 acpi_aml_active_reader = file; 494 } 495 } else { 496 /* 497 * No writer is allowed unless the debugger thread is 498 * ready. 499 */ 500 if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) { 501 ret = -ENODEV; 502 goto err_lock; 503 } 504 } 505 if (acpi_aml_active_reader == file) { 506 pr_debug("Opening debugger interface.\n"); 507 mutex_unlock(&acpi_aml_io.lock); 508 509 pr_debug("Initializing debugger thread.\n"); 510 status = acpi_initialize_debugger(); 511 if (ACPI_FAILURE(status)) { 512 pr_err("Failed to initialize debugger.\n"); 513 ret = -EINVAL; 514 goto err_exit; 515 } 516 pr_debug("Debugger thread initialized.\n"); 517 518 mutex_lock(&acpi_aml_io.lock); 519 acpi_aml_io.flags |= ACPI_AML_OPENED; 520 acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0; 521 acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0; 522 pr_debug("Debugger interface opened.\n"); 523 } 524 acpi_aml_io.users++; 525 err_lock: 526 if (ret < 0) { 527 if (acpi_aml_active_reader == file) 528 acpi_aml_active_reader = NULL; 529 } 530 mutex_unlock(&acpi_aml_io.lock); 531 err_exit: 532 return ret; 533 } 534 535 static int acpi_aml_release(struct inode *inode, struct file *file) 536 { 537 mutex_lock(&acpi_aml_io.lock); 538 acpi_aml_io.users--; 539 if (file == acpi_aml_active_reader) { 540 pr_debug("Closing debugger reader.\n"); 541 acpi_aml_active_reader = NULL; 542 543 pr_debug("Closing debugger interface.\n"); 544 acpi_aml_io.flags |= ACPI_AML_CLOSED; 545 546 /* 547 * Wake up all user space/kernel space blocked 548 * readers/writers. 549 */ 550 wake_up_interruptible(&acpi_aml_io.wait); 551 mutex_unlock(&acpi_aml_io.lock); 552 /* 553 * Wait all user space/kernel space readers/writers to 554 * stop so that ACPICA command loop of the debugger thread 555 * should fail all its command line reads after this point. 556 */ 557 wait_event(acpi_aml_io.wait, !acpi_aml_busy()); 558 559 /* 560 * Then we try to terminate the debugger thread if it is 561 * not terminated. 562 */ 563 pr_debug("Terminating debugger thread.\n"); 564 acpi_terminate_debugger(); 565 wait_event(acpi_aml_io.wait, !acpi_aml_used()); 566 pr_debug("Debugger thread terminated.\n"); 567 568 mutex_lock(&acpi_aml_io.lock); 569 acpi_aml_io.flags &= ~ACPI_AML_OPENED; 570 } 571 if (acpi_aml_io.users == 0) { 572 pr_debug("Debugger interface closed.\n"); 573 acpi_aml_io.flags &= ~ACPI_AML_CLOSED; 574 } 575 mutex_unlock(&acpi_aml_io.lock); 576 return 0; 577 } 578 579 static int acpi_aml_read_user(char __user *buf, int len) 580 { 581 int ret; 582 struct circ_buf *crc = &acpi_aml_io.out_crc; 583 int n; 584 char *p; 585 586 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER); 587 if (ret < 0) 588 return ret; 589 /* sync head before removing logs */ 590 smp_rmb(); 591 p = &crc->buf[crc->tail]; 592 n = min(len, circ_count_to_end(crc)); 593 if (copy_to_user(buf, p, n)) { 594 ret = -EFAULT; 595 goto out; 596 } 597 /* sync tail after removing logs */ 598 smp_mb(); 599 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); 600 ret = n; 601 out: 602 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0); 603 return ret; 604 } 605 606 static ssize_t acpi_aml_read(struct file *file, char __user *buf, 607 size_t count, loff_t *ppos) 608 { 609 int ret = 0; 610 int size = 0; 611 612 if (!count) 613 return 0; 614 if (!access_ok(buf, count)) 615 return -EFAULT; 616 617 while (count > 0) { 618 again: 619 ret = acpi_aml_read_user(buf + size, count); 620 if (ret == -EAGAIN) { 621 if (file->f_flags & O_NONBLOCK) 622 break; 623 else { 624 ret = wait_event_interruptible(acpi_aml_io.wait, 625 acpi_aml_user_readable()); 626 /* 627 * We need to retry when the condition 628 * becomes true. 629 */ 630 if (ret == 0) 631 goto again; 632 } 633 } 634 if (ret < 0) { 635 if (!acpi_aml_running()) 636 ret = 0; 637 break; 638 } 639 if (ret) { 640 size += ret; 641 count -= ret; 642 *ppos += ret; 643 break; 644 } 645 } 646 return size > 0 ? size : ret; 647 } 648 649 static int acpi_aml_write_user(const char __user *buf, int len) 650 { 651 int ret; 652 struct circ_buf *crc = &acpi_aml_io.in_crc; 653 int n; 654 char *p; 655 656 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER); 657 if (ret < 0) 658 return ret; 659 /* sync tail before inserting cmds */ 660 smp_mb(); 661 p = &crc->buf[crc->head]; 662 n = min(len, circ_space_to_end(crc)); 663 if (copy_from_user(p, buf, n)) { 664 ret = -EFAULT; 665 goto out; 666 } 667 /* sync head after inserting cmds */ 668 smp_wmb(); 669 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); 670 ret = n; 671 out: 672 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0); 673 return n; 674 } 675 676 static ssize_t acpi_aml_write(struct file *file, const char __user *buf, 677 size_t count, loff_t *ppos) 678 { 679 int ret = 0; 680 int size = 0; 681 682 if (!count) 683 return 0; 684 if (!access_ok(buf, count)) 685 return -EFAULT; 686 687 while (count > 0) { 688 again: 689 ret = acpi_aml_write_user(buf + size, count); 690 if (ret == -EAGAIN) { 691 if (file->f_flags & O_NONBLOCK) 692 break; 693 else { 694 ret = wait_event_interruptible(acpi_aml_io.wait, 695 acpi_aml_user_writable()); 696 /* 697 * We need to retry when the condition 698 * becomes true. 699 */ 700 if (ret == 0) 701 goto again; 702 } 703 } 704 if (ret < 0) { 705 if (!acpi_aml_running()) 706 ret = 0; 707 break; 708 } 709 if (ret) { 710 size += ret; 711 count -= ret; 712 *ppos += ret; 713 } 714 } 715 return size > 0 ? size : ret; 716 } 717 718 static __poll_t acpi_aml_poll(struct file *file, poll_table *wait) 719 { 720 __poll_t masks = 0; 721 722 poll_wait(file, &acpi_aml_io.wait, wait); 723 if (acpi_aml_user_readable()) 724 masks |= EPOLLIN | EPOLLRDNORM; 725 if (acpi_aml_user_writable()) 726 masks |= EPOLLOUT | EPOLLWRNORM; 727 728 return masks; 729 } 730 731 static const struct file_operations acpi_aml_operations = { 732 .read = acpi_aml_read, 733 .write = acpi_aml_write, 734 .poll = acpi_aml_poll, 735 .open = acpi_aml_open, 736 .release = acpi_aml_release, 737 .llseek = generic_file_llseek, 738 }; 739 740 static const struct acpi_debugger_ops acpi_aml_debugger = { 741 .create_thread = acpi_aml_create_thread, 742 .read_cmd = acpi_aml_read_cmd, 743 .write_log = acpi_aml_write_log, 744 .wait_command_ready = acpi_aml_wait_command_ready, 745 .notify_command_complete = acpi_aml_notify_command_complete, 746 }; 747 748 static int __init acpi_aml_init(void) 749 { 750 int ret; 751 752 /* Initialize AML IO interface */ 753 mutex_init(&acpi_aml_io.lock); 754 init_waitqueue_head(&acpi_aml_io.wait); 755 acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf; 756 acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf; 757 758 acpi_aml_dentry = debugfs_create_file("acpidbg", 759 S_IFREG | S_IRUGO | S_IWUSR, 760 acpi_debugfs_dir, NULL, 761 &acpi_aml_operations); 762 763 ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger); 764 if (ret) { 765 debugfs_remove(acpi_aml_dentry); 766 acpi_aml_dentry = NULL; 767 return ret; 768 } 769 770 acpi_aml_initialized = true; 771 return 0; 772 } 773 774 static void __exit acpi_aml_exit(void) 775 { 776 if (acpi_aml_initialized) { 777 acpi_unregister_debugger(&acpi_aml_debugger); 778 debugfs_remove(acpi_aml_dentry); 779 acpi_aml_dentry = NULL; 780 acpi_aml_initialized = false; 781 } 782 } 783 784 module_init(acpi_aml_init); 785 module_exit(acpi_aml_exit); 786 787 MODULE_AUTHOR("Lv Zheng"); 788 MODULE_DESCRIPTION("ACPI debugger userspace IO driver"); 789 MODULE_LICENSE("GPL"); 790