1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ACPI event handling for Wilco Embedded Controller 4 * 5 * Copyright 2019 Google LLC 6 * 7 * The Wilco Embedded Controller can create custom events that 8 * are not handled as standard ACPI objects. These events can 9 * contain information about changes in EC controlled features, 10 * such as errors and events in the dock or display. For example, 11 * an event is triggered if the dock is plugged into a display 12 * incorrectly. These events are needed for telemetry and 13 * diagnostics reasons, and for possibly alerting the user. 14 15 * These events are triggered by the EC with an ACPI Notify(0x90), 16 * and then the BIOS reads the event buffer from EC RAM via an 17 * ACPI method. When the OS receives these events via ACPI, 18 * it passes them along to this driver. The events are put into 19 * a queue which can be read by a userspace daemon via a char device 20 * that implements read() and poll(). The event queue acts as a 21 * circular buffer of size 64, so if there are no userspace consumers 22 * the kernel will not run out of memory. The char device will appear at 23 * /dev/wilco_event{n}, where n is some small non-negative integer, 24 * starting from 0. Standard ACPI events such as the battery getting 25 * plugged/unplugged can also come through this path, but they are 26 * dealt with via other paths, and are ignored here. 27 28 * To test, you can tail the binary data with 29 * $ cat /dev/wilco_event0 | hexdump -ve '1/1 "%x\n"' 30 * and then create an event by plugging/unplugging the battery. 31 */ 32 33 #include <linux/acpi.h> 34 #include <linux/cdev.h> 35 #include <linux/device.h> 36 #include <linux/fs.h> 37 #include <linux/idr.h> 38 #include <linux/io.h> 39 #include <linux/list.h> 40 #include <linux/module.h> 41 #include <linux/poll.h> 42 #include <linux/spinlock.h> 43 #include <linux/uaccess.h> 44 #include <linux/wait.h> 45 46 /* ACPI Notify event code indicating event data is available. */ 47 #define EC_ACPI_NOTIFY_EVENT 0x90 48 /* ACPI Method to execute to retrieve event data buffer from the EC. */ 49 #define EC_ACPI_GET_EVENT "QSET" 50 /* Maximum number of words in event data returned by the EC. */ 51 #define EC_ACPI_MAX_EVENT_WORDS 6 52 #define EC_ACPI_MAX_EVENT_SIZE \ 53 (sizeof(struct ec_event) + (EC_ACPI_MAX_EVENT_WORDS) * sizeof(u16)) 54 55 /* Node will appear in /dev/EVENT_DEV_NAME */ 56 #define EVENT_DEV_NAME "wilco_event" 57 #define EVENT_CLASS_NAME EVENT_DEV_NAME 58 #define DRV_NAME EVENT_DEV_NAME 59 #define EVENT_DEV_NAME_FMT (EVENT_DEV_NAME "%d") 60 static struct class event_class = { 61 .name = EVENT_CLASS_NAME, 62 }; 63 64 /* Keep track of all the device numbers used. */ 65 #define EVENT_MAX_DEV 128 66 static int event_major; 67 static DEFINE_IDA(event_ida); 68 69 /* Size of circular queue of events. */ 70 #define MAX_NUM_EVENTS 64 71 72 /** 73 * struct ec_event - Extended event returned by the EC. 74 * @size: Number of 16bit words in structure after the size word. 75 * @type: Extended event type, meaningless for us. 76 * @event: Event data words. Max count is %EC_ACPI_MAX_EVENT_WORDS. 77 */ 78 struct ec_event { 79 u16 size; 80 u16 type; 81 u16 event[]; 82 } __packed; 83 84 #define ec_event_num_words(ev) (ev->size - 1) 85 #define ec_event_size(ev) (sizeof(*ev) + (ec_event_num_words(ev) * sizeof(u16))) 86 87 /** 88 * struct ec_event_queue - Circular queue for events. 89 * @capacity: Number of elements the queue can hold. 90 * @head: Next index to write to. 91 * @tail: Next index to read from. 92 * @entries: Array of events. 93 */ 94 struct ec_event_queue { 95 int capacity; 96 int head; 97 int tail; 98 struct ec_event *entries[]; 99 }; 100 101 /* Maximum number of events to store in ec_event_queue */ 102 static int queue_size = 64; 103 module_param(queue_size, int, 0644); 104 105 static struct ec_event_queue *event_queue_new(int capacity) 106 { 107 struct ec_event_queue *q; 108 109 q = kzalloc(struct_size(q, entries, capacity), GFP_KERNEL); 110 if (!q) 111 return NULL; 112 113 q->capacity = capacity; 114 115 return q; 116 } 117 118 static inline bool event_queue_empty(struct ec_event_queue *q) 119 { 120 /* head==tail when both full and empty, but head==NULL when empty */ 121 return q->head == q->tail && !q->entries[q->head]; 122 } 123 124 static inline bool event_queue_full(struct ec_event_queue *q) 125 { 126 /* head==tail when both full and empty, but head!=NULL when full */ 127 return q->head == q->tail && q->entries[q->head]; 128 } 129 130 static struct ec_event *event_queue_pop(struct ec_event_queue *q) 131 { 132 struct ec_event *ev; 133 134 if (event_queue_empty(q)) 135 return NULL; 136 137 ev = q->entries[q->tail]; 138 q->entries[q->tail] = NULL; 139 q->tail = (q->tail + 1) % q->capacity; 140 141 return ev; 142 } 143 144 /* 145 * If full, overwrite the oldest event and return it so the caller 146 * can kfree it. If not full, return NULL. 147 */ 148 static struct ec_event *event_queue_push(struct ec_event_queue *q, 149 struct ec_event *ev) 150 { 151 struct ec_event *popped = NULL; 152 153 if (event_queue_full(q)) 154 popped = event_queue_pop(q); 155 q->entries[q->head] = ev; 156 q->head = (q->head + 1) % q->capacity; 157 158 return popped; 159 } 160 161 static void event_queue_free(struct ec_event_queue *q) 162 { 163 struct ec_event *event; 164 165 while ((event = event_queue_pop(q)) != NULL) 166 kfree(event); 167 168 kfree(q); 169 } 170 171 /** 172 * struct event_device_data - Data for a Wilco EC device that responds to ACPI. 173 * @events: Circular queue of EC events to be provided to userspace. 174 * @queue_lock: Protect the queue from simultaneous read/writes. 175 * @wq: Wait queue to notify processes when events are available or the 176 * device has been removed. 177 * @cdev: Char dev that userspace reads() and polls() from. 178 * @dev: Device associated with the %cdev. 179 * @exist: Has the device been not been removed? Once a device has been removed, 180 * writes, reads, and new opens will fail. 181 * @available: Guarantee only one client can open() file and read from queue. 182 * 183 * There will be one of these structs for each ACPI device registered. This data 184 * is the queue of events received from ACPI that still need to be read from 185 * userspace, the device and char device that userspace is using, a wait queue 186 * used to notify different threads when something has changed, plus a flag 187 * on whether the ACPI device has been removed. 188 */ 189 struct event_device_data { 190 struct ec_event_queue *events; 191 spinlock_t queue_lock; 192 wait_queue_head_t wq; 193 struct device dev; 194 struct cdev cdev; 195 bool exist; 196 atomic_t available; 197 }; 198 199 /** 200 * enqueue_events() - Place EC events in queue to be read by userspace. 201 * @adev: Device the events came from. 202 * @buf: Buffer of event data. 203 * @length: Length of event data buffer. 204 * 205 * %buf contains a number of ec_event's, packed one after the other. 206 * Each ec_event is of variable length. Start with the first event, copy it 207 * into a persistent ec_event, store that entry in the queue, move on 208 * to the next ec_event in buf, and repeat. 209 * 210 * Return: 0 on success or negative error code on failure. 211 */ 212 static int enqueue_events(struct acpi_device *adev, const u8 *buf, u32 length) 213 { 214 struct event_device_data *dev_data = adev->driver_data; 215 struct ec_event *event, *queue_event, *old_event; 216 size_t num_words, event_size; 217 u32 offset = 0; 218 219 while (offset < length) { 220 event = (struct ec_event *)(buf + offset); 221 222 num_words = ec_event_num_words(event); 223 event_size = ec_event_size(event); 224 if (num_words > EC_ACPI_MAX_EVENT_WORDS) { 225 dev_err(&adev->dev, "Too many event words: %zu > %d\n", 226 num_words, EC_ACPI_MAX_EVENT_WORDS); 227 return -EOVERFLOW; 228 } 229 230 /* Ensure event does not overflow the available buffer */ 231 if ((offset + event_size) > length) { 232 dev_err(&adev->dev, "Event exceeds buffer: %zu > %d\n", 233 offset + event_size, length); 234 return -EOVERFLOW; 235 } 236 237 /* Point to the next event in the buffer */ 238 offset += event_size; 239 240 /* Copy event into the queue */ 241 queue_event = kmemdup(event, event_size, GFP_KERNEL); 242 if (!queue_event) 243 return -ENOMEM; 244 spin_lock(&dev_data->queue_lock); 245 old_event = event_queue_push(dev_data->events, queue_event); 246 spin_unlock(&dev_data->queue_lock); 247 kfree(old_event); 248 wake_up_interruptible(&dev_data->wq); 249 } 250 251 return 0; 252 } 253 254 /** 255 * event_device_notify() - Callback when EC generates an event over ACPI. 256 * @adev: The device that the event is coming from. 257 * @value: Value passed to Notify() in ACPI. 258 * 259 * This function will read the events from the device and enqueue them. 260 */ 261 static void event_device_notify(struct acpi_device *adev, u32 value) 262 { 263 struct acpi_buffer event_buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 264 union acpi_object *obj; 265 acpi_status status; 266 267 if (value != EC_ACPI_NOTIFY_EVENT) { 268 dev_err(&adev->dev, "Invalid event: 0x%08x\n", value); 269 return; 270 } 271 272 /* Execute ACPI method to get event data buffer. */ 273 status = acpi_evaluate_object(adev->handle, EC_ACPI_GET_EVENT, 274 NULL, &event_buffer); 275 if (ACPI_FAILURE(status)) { 276 dev_err(&adev->dev, "Error executing ACPI method %s()\n", 277 EC_ACPI_GET_EVENT); 278 return; 279 } 280 281 obj = (union acpi_object *)event_buffer.pointer; 282 if (!obj) { 283 dev_err(&adev->dev, "Nothing returned from %s()\n", 284 EC_ACPI_GET_EVENT); 285 return; 286 } 287 if (obj->type != ACPI_TYPE_BUFFER) { 288 dev_err(&adev->dev, "Invalid object returned from %s()\n", 289 EC_ACPI_GET_EVENT); 290 kfree(obj); 291 return; 292 } 293 if (obj->buffer.length < sizeof(struct ec_event)) { 294 dev_err(&adev->dev, "Invalid buffer length %d from %s()\n", 295 obj->buffer.length, EC_ACPI_GET_EVENT); 296 kfree(obj); 297 return; 298 } 299 300 enqueue_events(adev, obj->buffer.pointer, obj->buffer.length); 301 kfree(obj); 302 } 303 304 static int event_open(struct inode *inode, struct file *filp) 305 { 306 struct event_device_data *dev_data; 307 308 dev_data = container_of(inode->i_cdev, struct event_device_data, cdev); 309 if (!dev_data->exist) 310 return -ENODEV; 311 312 if (atomic_cmpxchg(&dev_data->available, 1, 0) == 0) 313 return -EBUSY; 314 315 /* Increase refcount on device so dev_data is not freed */ 316 get_device(&dev_data->dev); 317 stream_open(inode, filp); 318 filp->private_data = dev_data; 319 320 return 0; 321 } 322 323 static __poll_t event_poll(struct file *filp, poll_table *wait) 324 { 325 struct event_device_data *dev_data = filp->private_data; 326 __poll_t mask = 0; 327 328 poll_wait(filp, &dev_data->wq, wait); 329 if (!dev_data->exist) 330 return EPOLLHUP; 331 if (!event_queue_empty(dev_data->events)) 332 mask |= EPOLLIN | EPOLLRDNORM | EPOLLPRI; 333 return mask; 334 } 335 336 /** 337 * event_read() - Callback for passing event data to userspace via read(). 338 * @filp: The file we are reading from. 339 * @buf: Pointer to userspace buffer to fill with one event. 340 * @count: Number of bytes requested. Must be at least EC_ACPI_MAX_EVENT_SIZE. 341 * @pos: File position pointer, irrelevant since we don't support seeking. 342 * 343 * Removes the first event from the queue, places it in the passed buffer. 344 * 345 * If there are no events in the queue, then one of two things happens, 346 * depending on if the file was opened in nonblocking mode: If in nonblocking 347 * mode, then return -EAGAIN to say there's no data. If in blocking mode, then 348 * block until an event is available. 349 * 350 * Return: Number of bytes placed in buffer, negative error code on failure. 351 */ 352 static ssize_t event_read(struct file *filp, char __user *buf, size_t count, 353 loff_t *pos) 354 { 355 struct event_device_data *dev_data = filp->private_data; 356 struct ec_event *event; 357 ssize_t n_bytes_written = 0; 358 int err; 359 360 /* We only will give them the entire event at once */ 361 if (count != 0 && count < EC_ACPI_MAX_EVENT_SIZE) 362 return -EINVAL; 363 364 spin_lock(&dev_data->queue_lock); 365 while (event_queue_empty(dev_data->events)) { 366 spin_unlock(&dev_data->queue_lock); 367 if (filp->f_flags & O_NONBLOCK) 368 return -EAGAIN; 369 370 err = wait_event_interruptible(dev_data->wq, 371 !event_queue_empty(dev_data->events) || 372 !dev_data->exist); 373 if (err) 374 return err; 375 376 /* Device was removed as we waited? */ 377 if (!dev_data->exist) 378 return -ENODEV; 379 spin_lock(&dev_data->queue_lock); 380 } 381 event = event_queue_pop(dev_data->events); 382 spin_unlock(&dev_data->queue_lock); 383 n_bytes_written = ec_event_size(event); 384 if (copy_to_user(buf, event, n_bytes_written)) 385 n_bytes_written = -EFAULT; 386 kfree(event); 387 388 return n_bytes_written; 389 } 390 391 static int event_release(struct inode *inode, struct file *filp) 392 { 393 struct event_device_data *dev_data = filp->private_data; 394 395 atomic_set(&dev_data->available, 1); 396 put_device(&dev_data->dev); 397 398 return 0; 399 } 400 401 static const struct file_operations event_fops = { 402 .open = event_open, 403 .poll = event_poll, 404 .read = event_read, 405 .release = event_release, 406 .llseek = no_llseek, 407 .owner = THIS_MODULE, 408 }; 409 410 /** 411 * free_device_data() - Callback to free the event_device_data structure. 412 * @d: The device embedded in our device data, which we have been ref counting. 413 * 414 * This is called only after event_device_remove() has been called and all 415 * userspace programs have called event_release() on all the open file 416 * descriptors. 417 */ 418 static void free_device_data(struct device *d) 419 { 420 struct event_device_data *dev_data; 421 422 dev_data = container_of(d, struct event_device_data, dev); 423 event_queue_free(dev_data->events); 424 kfree(dev_data); 425 } 426 427 static void hangup_device(struct event_device_data *dev_data) 428 { 429 dev_data->exist = false; 430 /* Wake up the waiting processes so they can close. */ 431 wake_up_interruptible(&dev_data->wq); 432 put_device(&dev_data->dev); 433 } 434 435 /** 436 * event_device_add() - Callback when creating a new device. 437 * @adev: ACPI device that we will be receiving events from. 438 * 439 * This finds a free minor number for the device, allocates and initializes 440 * some device data, and creates a new device and char dev node. 441 * 442 * The device data is freed in free_device_data(), which is called when 443 * %dev_data->dev is release()ed. This happens after all references to 444 * %dev_data->dev are dropped, which happens once both event_device_remove() 445 * has been called and every open()ed file descriptor has been release()ed. 446 * 447 * Return: 0 on success, negative error code on failure. 448 */ 449 static int event_device_add(struct acpi_device *adev) 450 { 451 struct event_device_data *dev_data; 452 int error, minor; 453 454 minor = ida_alloc_max(&event_ida, EVENT_MAX_DEV-1, GFP_KERNEL); 455 if (minor < 0) { 456 error = minor; 457 dev_err(&adev->dev, "Failed to find minor number: %d\n", error); 458 return error; 459 } 460 461 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); 462 if (!dev_data) { 463 error = -ENOMEM; 464 goto free_minor; 465 } 466 467 /* Initialize the device data. */ 468 adev->driver_data = dev_data; 469 dev_data->events = event_queue_new(queue_size); 470 if (!dev_data->events) { 471 kfree(dev_data); 472 error = -ENOMEM; 473 goto free_minor; 474 } 475 spin_lock_init(&dev_data->queue_lock); 476 init_waitqueue_head(&dev_data->wq); 477 dev_data->exist = true; 478 atomic_set(&dev_data->available, 1); 479 480 /* Initialize the device. */ 481 dev_data->dev.devt = MKDEV(event_major, minor); 482 dev_data->dev.class = &event_class; 483 dev_data->dev.release = free_device_data; 484 dev_set_name(&dev_data->dev, EVENT_DEV_NAME_FMT, minor); 485 device_initialize(&dev_data->dev); 486 487 /* Initialize the character device, and add it to userspace. */ 488 cdev_init(&dev_data->cdev, &event_fops); 489 error = cdev_device_add(&dev_data->cdev, &dev_data->dev); 490 if (error) 491 goto free_dev_data; 492 493 return 0; 494 495 free_dev_data: 496 hangup_device(dev_data); 497 free_minor: 498 ida_simple_remove(&event_ida, minor); 499 return error; 500 } 501 502 static void event_device_remove(struct acpi_device *adev) 503 { 504 struct event_device_data *dev_data = adev->driver_data; 505 506 cdev_device_del(&dev_data->cdev, &dev_data->dev); 507 ida_simple_remove(&event_ida, MINOR(dev_data->dev.devt)); 508 hangup_device(dev_data); 509 } 510 511 static const struct acpi_device_id event_acpi_ids[] = { 512 { "GOOG000D", 0 }, 513 { } 514 }; 515 MODULE_DEVICE_TABLE(acpi, event_acpi_ids); 516 517 static struct acpi_driver event_driver = { 518 .name = DRV_NAME, 519 .class = DRV_NAME, 520 .ids = event_acpi_ids, 521 .ops = { 522 .add = event_device_add, 523 .notify = event_device_notify, 524 .remove = event_device_remove, 525 }, 526 .owner = THIS_MODULE, 527 }; 528 529 static int __init event_module_init(void) 530 { 531 dev_t dev_num = 0; 532 int ret; 533 534 ret = class_register(&event_class); 535 if (ret) { 536 pr_err(DRV_NAME ": Failed registering class: %d\n", ret); 537 return ret; 538 } 539 540 /* Request device numbers, starting with minor=0. Save the major num. */ 541 ret = alloc_chrdev_region(&dev_num, 0, EVENT_MAX_DEV, EVENT_DEV_NAME); 542 if (ret) { 543 pr_err(DRV_NAME ": Failed allocating dev numbers: %d\n", ret); 544 goto destroy_class; 545 } 546 event_major = MAJOR(dev_num); 547 548 ret = acpi_bus_register_driver(&event_driver); 549 if (ret < 0) { 550 pr_err(DRV_NAME ": Failed registering driver: %d\n", ret); 551 goto unregister_region; 552 } 553 554 return 0; 555 556 unregister_region: 557 unregister_chrdev_region(MKDEV(event_major, 0), EVENT_MAX_DEV); 558 destroy_class: 559 class_unregister(&event_class); 560 ida_destroy(&event_ida); 561 return ret; 562 } 563 564 static void __exit event_module_exit(void) 565 { 566 acpi_bus_unregister_driver(&event_driver); 567 unregister_chrdev_region(MKDEV(event_major, 0), EVENT_MAX_DEV); 568 class_unregister(&event_class); 569 ida_destroy(&event_ida); 570 } 571 572 module_init(event_module_init); 573 module_exit(event_module_exit); 574 575 MODULE_AUTHOR("Nick Crews <ncrews@chromium.org>"); 576 MODULE_DESCRIPTION("Wilco EC ACPI event driver"); 577 MODULE_LICENSE("GPL"); 578 MODULE_ALIAS("platform:" DRV_NAME); 579