1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2006 - 2007 Ivo van Doorn 4 * Copyright (C) 2007 Dmitry Torokhov 5 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/workqueue.h> 12 #include <linux/capability.h> 13 #include <linux/list.h> 14 #include <linux/mutex.h> 15 #include <linux/rfkill.h> 16 #include <linux/sched.h> 17 #include <linux/spinlock.h> 18 #include <linux/device.h> 19 #include <linux/miscdevice.h> 20 #include <linux/wait.h> 21 #include <linux/poll.h> 22 #include <linux/fs.h> 23 #include <linux/slab.h> 24 25 #include "rfkill.h" 26 27 #define POLL_INTERVAL (5 * HZ) 28 29 #define RFKILL_BLOCK_HW BIT(0) 30 #define RFKILL_BLOCK_SW BIT(1) 31 #define RFKILL_BLOCK_SW_PREV BIT(2) 32 #define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\ 33 RFKILL_BLOCK_SW |\ 34 RFKILL_BLOCK_SW_PREV) 35 #define RFKILL_BLOCK_SW_SETCALL BIT(31) 36 37 struct rfkill { 38 spinlock_t lock; 39 40 enum rfkill_type type; 41 42 unsigned long state; 43 unsigned long hard_block_reasons; 44 45 u32 idx; 46 47 bool registered; 48 bool persistent; 49 bool polling_paused; 50 bool suspended; 51 52 const struct rfkill_ops *ops; 53 void *data; 54 55 #ifdef CONFIG_RFKILL_LEDS 56 struct led_trigger led_trigger; 57 const char *ledtrigname; 58 #endif 59 60 struct device dev; 61 struct list_head node; 62 63 struct delayed_work poll_work; 64 struct work_struct uevent_work; 65 struct work_struct sync_work; 66 char name[]; 67 }; 68 #define to_rfkill(d) container_of(d, struct rfkill, dev) 69 70 struct rfkill_int_event { 71 struct list_head list; 72 struct rfkill_event ev; 73 }; 74 75 struct rfkill_data { 76 struct list_head list; 77 struct list_head events; 78 struct mutex mtx; 79 wait_queue_head_t read_wait; 80 bool input_handler; 81 }; 82 83 84 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>"); 85 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); 86 MODULE_DESCRIPTION("RF switch support"); 87 MODULE_LICENSE("GPL"); 88 89 90 /* 91 * The locking here should be made much smarter, we currently have 92 * a bit of a stupid situation because drivers might want to register 93 * the rfkill struct under their own lock, and take this lock during 94 * rfkill method calls -- which will cause an AB-BA deadlock situation. 95 * 96 * To fix that, we need to rework this code here to be mostly lock-free 97 * and only use the mutex for list manipulations, not to protect the 98 * various other global variables. Then we can avoid holding the mutex 99 * around driver operations, and all is happy. 100 */ 101 static LIST_HEAD(rfkill_list); /* list of registered rf switches */ 102 static DEFINE_MUTEX(rfkill_global_mutex); 103 static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */ 104 105 static unsigned int rfkill_default_state = 1; 106 module_param_named(default_state, rfkill_default_state, uint, 0444); 107 MODULE_PARM_DESC(default_state, 108 "Default initial state for all radio types, 0 = radio off"); 109 110 static struct { 111 bool cur, sav; 112 } rfkill_global_states[NUM_RFKILL_TYPES]; 113 114 static bool rfkill_epo_lock_active; 115 116 117 #ifdef CONFIG_RFKILL_LEDS 118 static void rfkill_led_trigger_event(struct rfkill *rfkill) 119 { 120 struct led_trigger *trigger; 121 122 if (!rfkill->registered) 123 return; 124 125 trigger = &rfkill->led_trigger; 126 127 if (rfkill->state & RFKILL_BLOCK_ANY) 128 led_trigger_event(trigger, LED_OFF); 129 else 130 led_trigger_event(trigger, LED_FULL); 131 } 132 133 static int rfkill_led_trigger_activate(struct led_classdev *led) 134 { 135 struct rfkill *rfkill; 136 137 rfkill = container_of(led->trigger, struct rfkill, led_trigger); 138 139 rfkill_led_trigger_event(rfkill); 140 141 return 0; 142 } 143 144 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) 145 { 146 return rfkill->led_trigger.name; 147 } 148 EXPORT_SYMBOL(rfkill_get_led_trigger_name); 149 150 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) 151 { 152 BUG_ON(!rfkill); 153 154 rfkill->ledtrigname = name; 155 } 156 EXPORT_SYMBOL(rfkill_set_led_trigger_name); 157 158 static int rfkill_led_trigger_register(struct rfkill *rfkill) 159 { 160 rfkill->led_trigger.name = rfkill->ledtrigname 161 ? : dev_name(&rfkill->dev); 162 rfkill->led_trigger.activate = rfkill_led_trigger_activate; 163 return led_trigger_register(&rfkill->led_trigger); 164 } 165 166 static void rfkill_led_trigger_unregister(struct rfkill *rfkill) 167 { 168 led_trigger_unregister(&rfkill->led_trigger); 169 } 170 171 static struct led_trigger rfkill_any_led_trigger; 172 static struct led_trigger rfkill_none_led_trigger; 173 static struct work_struct rfkill_global_led_trigger_work; 174 175 static void rfkill_global_led_trigger_worker(struct work_struct *work) 176 { 177 enum led_brightness brightness = LED_OFF; 178 struct rfkill *rfkill; 179 180 mutex_lock(&rfkill_global_mutex); 181 list_for_each_entry(rfkill, &rfkill_list, node) { 182 if (!(rfkill->state & RFKILL_BLOCK_ANY)) { 183 brightness = LED_FULL; 184 break; 185 } 186 } 187 mutex_unlock(&rfkill_global_mutex); 188 189 led_trigger_event(&rfkill_any_led_trigger, brightness); 190 led_trigger_event(&rfkill_none_led_trigger, 191 brightness == LED_OFF ? LED_FULL : LED_OFF); 192 } 193 194 static void rfkill_global_led_trigger_event(void) 195 { 196 schedule_work(&rfkill_global_led_trigger_work); 197 } 198 199 static int rfkill_global_led_trigger_register(void) 200 { 201 int ret; 202 203 INIT_WORK(&rfkill_global_led_trigger_work, 204 rfkill_global_led_trigger_worker); 205 206 rfkill_any_led_trigger.name = "rfkill-any"; 207 ret = led_trigger_register(&rfkill_any_led_trigger); 208 if (ret) 209 return ret; 210 211 rfkill_none_led_trigger.name = "rfkill-none"; 212 ret = led_trigger_register(&rfkill_none_led_trigger); 213 if (ret) 214 led_trigger_unregister(&rfkill_any_led_trigger); 215 else 216 /* Delay activation until all global triggers are registered */ 217 rfkill_global_led_trigger_event(); 218 219 return ret; 220 } 221 222 static void rfkill_global_led_trigger_unregister(void) 223 { 224 led_trigger_unregister(&rfkill_none_led_trigger); 225 led_trigger_unregister(&rfkill_any_led_trigger); 226 cancel_work_sync(&rfkill_global_led_trigger_work); 227 } 228 #else 229 static void rfkill_led_trigger_event(struct rfkill *rfkill) 230 { 231 } 232 233 static inline int rfkill_led_trigger_register(struct rfkill *rfkill) 234 { 235 return 0; 236 } 237 238 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) 239 { 240 } 241 242 static void rfkill_global_led_trigger_event(void) 243 { 244 } 245 246 static int rfkill_global_led_trigger_register(void) 247 { 248 return 0; 249 } 250 251 static void rfkill_global_led_trigger_unregister(void) 252 { 253 } 254 #endif /* CONFIG_RFKILL_LEDS */ 255 256 static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, 257 enum rfkill_operation op) 258 { 259 unsigned long flags; 260 261 ev->idx = rfkill->idx; 262 ev->type = rfkill->type; 263 ev->op = op; 264 265 spin_lock_irqsave(&rfkill->lock, flags); 266 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); 267 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | 268 RFKILL_BLOCK_SW_PREV)); 269 ev->hard_block_reasons = rfkill->hard_block_reasons; 270 spin_unlock_irqrestore(&rfkill->lock, flags); 271 } 272 273 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) 274 { 275 struct rfkill_data *data; 276 struct rfkill_int_event *ev; 277 278 list_for_each_entry(data, &rfkill_fds, list) { 279 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 280 if (!ev) 281 continue; 282 rfkill_fill_event(&ev->ev, rfkill, op); 283 mutex_lock(&data->mtx); 284 list_add_tail(&ev->list, &data->events); 285 mutex_unlock(&data->mtx); 286 wake_up_interruptible(&data->read_wait); 287 } 288 } 289 290 static void rfkill_event(struct rfkill *rfkill) 291 { 292 if (!rfkill->registered) 293 return; 294 295 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); 296 297 /* also send event to /dev/rfkill */ 298 rfkill_send_events(rfkill, RFKILL_OP_CHANGE); 299 } 300 301 /** 302 * rfkill_set_block - wrapper for set_block method 303 * 304 * @rfkill: the rfkill struct to use 305 * @blocked: the new software state 306 * 307 * Calls the set_block method (when applicable) and handles notifications 308 * etc. as well. 309 */ 310 static void rfkill_set_block(struct rfkill *rfkill, bool blocked) 311 { 312 unsigned long flags; 313 bool prev, curr; 314 int err; 315 316 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) 317 return; 318 319 /* 320 * Some platforms (...!) generate input events which affect the 321 * _hard_ kill state -- whenever something tries to change the 322 * current software state query the hardware state too. 323 */ 324 if (rfkill->ops->query) 325 rfkill->ops->query(rfkill, rfkill->data); 326 327 spin_lock_irqsave(&rfkill->lock, flags); 328 prev = rfkill->state & RFKILL_BLOCK_SW; 329 330 if (prev) 331 rfkill->state |= RFKILL_BLOCK_SW_PREV; 332 else 333 rfkill->state &= ~RFKILL_BLOCK_SW_PREV; 334 335 if (blocked) 336 rfkill->state |= RFKILL_BLOCK_SW; 337 else 338 rfkill->state &= ~RFKILL_BLOCK_SW; 339 340 rfkill->state |= RFKILL_BLOCK_SW_SETCALL; 341 spin_unlock_irqrestore(&rfkill->lock, flags); 342 343 err = rfkill->ops->set_block(rfkill->data, blocked); 344 345 spin_lock_irqsave(&rfkill->lock, flags); 346 if (err) { 347 /* 348 * Failed -- reset status to _PREV, which may be different 349 * from what we have set _PREV to earlier in this function 350 * if rfkill_set_sw_state was invoked. 351 */ 352 if (rfkill->state & RFKILL_BLOCK_SW_PREV) 353 rfkill->state |= RFKILL_BLOCK_SW; 354 else 355 rfkill->state &= ~RFKILL_BLOCK_SW; 356 } 357 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; 358 rfkill->state &= ~RFKILL_BLOCK_SW_PREV; 359 curr = rfkill->state & RFKILL_BLOCK_SW; 360 spin_unlock_irqrestore(&rfkill->lock, flags); 361 362 rfkill_led_trigger_event(rfkill); 363 rfkill_global_led_trigger_event(); 364 365 if (prev != curr) 366 rfkill_event(rfkill); 367 } 368 369 static void rfkill_update_global_state(enum rfkill_type type, bool blocked) 370 { 371 int i; 372 373 if (type != RFKILL_TYPE_ALL) { 374 rfkill_global_states[type].cur = blocked; 375 return; 376 } 377 378 for (i = 0; i < NUM_RFKILL_TYPES; i++) 379 rfkill_global_states[i].cur = blocked; 380 } 381 382 #ifdef CONFIG_RFKILL_INPUT 383 static atomic_t rfkill_input_disabled = ATOMIC_INIT(0); 384 385 /** 386 * __rfkill_switch_all - Toggle state of all switches of given type 387 * @type: type of interfaces to be affected 388 * @blocked: the new state 389 * 390 * This function sets the state of all switches of given type, 391 * unless a specific switch is suspended. 392 * 393 * Caller must have acquired rfkill_global_mutex. 394 */ 395 static void __rfkill_switch_all(const enum rfkill_type type, bool blocked) 396 { 397 struct rfkill *rfkill; 398 399 rfkill_update_global_state(type, blocked); 400 list_for_each_entry(rfkill, &rfkill_list, node) { 401 if (rfkill->type != type && type != RFKILL_TYPE_ALL) 402 continue; 403 404 rfkill_set_block(rfkill, blocked); 405 } 406 } 407 408 /** 409 * rfkill_switch_all - Toggle state of all switches of given type 410 * @type: type of interfaces to be affected 411 * @blocked: the new state 412 * 413 * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state). 414 * Please refer to __rfkill_switch_all() for details. 415 * 416 * Does nothing if the EPO lock is active. 417 */ 418 void rfkill_switch_all(enum rfkill_type type, bool blocked) 419 { 420 if (atomic_read(&rfkill_input_disabled)) 421 return; 422 423 mutex_lock(&rfkill_global_mutex); 424 425 if (!rfkill_epo_lock_active) 426 __rfkill_switch_all(type, blocked); 427 428 mutex_unlock(&rfkill_global_mutex); 429 } 430 431 /** 432 * rfkill_epo - emergency power off all transmitters 433 * 434 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED, 435 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex. 436 * 437 * The global state before the EPO is saved and can be restored later 438 * using rfkill_restore_states(). 439 */ 440 void rfkill_epo(void) 441 { 442 struct rfkill *rfkill; 443 int i; 444 445 if (atomic_read(&rfkill_input_disabled)) 446 return; 447 448 mutex_lock(&rfkill_global_mutex); 449 450 rfkill_epo_lock_active = true; 451 list_for_each_entry(rfkill, &rfkill_list, node) 452 rfkill_set_block(rfkill, true); 453 454 for (i = 0; i < NUM_RFKILL_TYPES; i++) { 455 rfkill_global_states[i].sav = rfkill_global_states[i].cur; 456 rfkill_global_states[i].cur = true; 457 } 458 459 mutex_unlock(&rfkill_global_mutex); 460 } 461 462 /** 463 * rfkill_restore_states - restore global states 464 * 465 * Restore (and sync switches to) the global state from the 466 * states in rfkill_default_states. This can undo the effects of 467 * a call to rfkill_epo(). 468 */ 469 void rfkill_restore_states(void) 470 { 471 int i; 472 473 if (atomic_read(&rfkill_input_disabled)) 474 return; 475 476 mutex_lock(&rfkill_global_mutex); 477 478 rfkill_epo_lock_active = false; 479 for (i = 0; i < NUM_RFKILL_TYPES; i++) 480 __rfkill_switch_all(i, rfkill_global_states[i].sav); 481 mutex_unlock(&rfkill_global_mutex); 482 } 483 484 /** 485 * rfkill_remove_epo_lock - unlock state changes 486 * 487 * Used by rfkill-input manually unlock state changes, when 488 * the EPO switch is deactivated. 489 */ 490 void rfkill_remove_epo_lock(void) 491 { 492 if (atomic_read(&rfkill_input_disabled)) 493 return; 494 495 mutex_lock(&rfkill_global_mutex); 496 rfkill_epo_lock_active = false; 497 mutex_unlock(&rfkill_global_mutex); 498 } 499 500 /** 501 * rfkill_is_epo_lock_active - returns true EPO is active 502 * 503 * Returns 0 (false) if there is NOT an active EPO condition, 504 * and 1 (true) if there is an active EPO condition, which 505 * locks all radios in one of the BLOCKED states. 506 * 507 * Can be called in atomic context. 508 */ 509 bool rfkill_is_epo_lock_active(void) 510 { 511 return rfkill_epo_lock_active; 512 } 513 514 /** 515 * rfkill_get_global_sw_state - returns global state for a type 516 * @type: the type to get the global state of 517 * 518 * Returns the current global state for a given wireless 519 * device type. 520 */ 521 bool rfkill_get_global_sw_state(const enum rfkill_type type) 522 { 523 return rfkill_global_states[type].cur; 524 } 525 #endif 526 527 bool rfkill_set_hw_state_reason(struct rfkill *rfkill, 528 bool blocked, unsigned long reason) 529 { 530 unsigned long flags; 531 bool ret, prev; 532 533 BUG_ON(!rfkill); 534 535 if (WARN(reason & 536 ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER), 537 "hw_state reason not supported: 0x%lx", reason)) 538 return blocked; 539 540 spin_lock_irqsave(&rfkill->lock, flags); 541 prev = !!(rfkill->hard_block_reasons & reason); 542 if (blocked) { 543 rfkill->state |= RFKILL_BLOCK_HW; 544 rfkill->hard_block_reasons |= reason; 545 } else { 546 rfkill->hard_block_reasons &= ~reason; 547 if (!rfkill->hard_block_reasons) 548 rfkill->state &= ~RFKILL_BLOCK_HW; 549 } 550 ret = !!(rfkill->state & RFKILL_BLOCK_ANY); 551 spin_unlock_irqrestore(&rfkill->lock, flags); 552 553 rfkill_led_trigger_event(rfkill); 554 rfkill_global_led_trigger_event(); 555 556 if (rfkill->registered && prev != blocked) 557 schedule_work(&rfkill->uevent_work); 558 559 return ret; 560 } 561 EXPORT_SYMBOL(rfkill_set_hw_state_reason); 562 563 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) 564 { 565 u32 bit = RFKILL_BLOCK_SW; 566 567 /* if in a ops->set_block right now, use other bit */ 568 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) 569 bit = RFKILL_BLOCK_SW_PREV; 570 571 if (blocked) 572 rfkill->state |= bit; 573 else 574 rfkill->state &= ~bit; 575 } 576 577 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) 578 { 579 unsigned long flags; 580 bool prev, hwblock; 581 582 BUG_ON(!rfkill); 583 584 spin_lock_irqsave(&rfkill->lock, flags); 585 prev = !!(rfkill->state & RFKILL_BLOCK_SW); 586 __rfkill_set_sw_state(rfkill, blocked); 587 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); 588 blocked = blocked || hwblock; 589 spin_unlock_irqrestore(&rfkill->lock, flags); 590 591 if (!rfkill->registered) 592 return blocked; 593 594 if (prev != blocked && !hwblock) 595 schedule_work(&rfkill->uevent_work); 596 597 rfkill_led_trigger_event(rfkill); 598 rfkill_global_led_trigger_event(); 599 600 return blocked; 601 } 602 EXPORT_SYMBOL(rfkill_set_sw_state); 603 604 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) 605 { 606 unsigned long flags; 607 608 BUG_ON(!rfkill); 609 BUG_ON(rfkill->registered); 610 611 spin_lock_irqsave(&rfkill->lock, flags); 612 __rfkill_set_sw_state(rfkill, blocked); 613 rfkill->persistent = true; 614 spin_unlock_irqrestore(&rfkill->lock, flags); 615 } 616 EXPORT_SYMBOL(rfkill_init_sw_state); 617 618 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) 619 { 620 unsigned long flags; 621 bool swprev, hwprev; 622 623 BUG_ON(!rfkill); 624 625 spin_lock_irqsave(&rfkill->lock, flags); 626 627 /* 628 * No need to care about prev/setblock ... this is for uevent only 629 * and that will get triggered by rfkill_set_block anyway. 630 */ 631 swprev = !!(rfkill->state & RFKILL_BLOCK_SW); 632 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); 633 __rfkill_set_sw_state(rfkill, sw); 634 if (hw) 635 rfkill->state |= RFKILL_BLOCK_HW; 636 else 637 rfkill->state &= ~RFKILL_BLOCK_HW; 638 639 spin_unlock_irqrestore(&rfkill->lock, flags); 640 641 if (!rfkill->registered) { 642 rfkill->persistent = true; 643 } else { 644 if (swprev != sw || hwprev != hw) 645 schedule_work(&rfkill->uevent_work); 646 647 rfkill_led_trigger_event(rfkill); 648 rfkill_global_led_trigger_event(); 649 } 650 } 651 EXPORT_SYMBOL(rfkill_set_states); 652 653 static const char * const rfkill_types[] = { 654 NULL, /* RFKILL_TYPE_ALL */ 655 "wlan", 656 "bluetooth", 657 "ultrawideband", 658 "wimax", 659 "wwan", 660 "gps", 661 "fm", 662 "nfc", 663 }; 664 665 enum rfkill_type rfkill_find_type(const char *name) 666 { 667 int i; 668 669 BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES); 670 671 if (!name) 672 return RFKILL_TYPE_ALL; 673 674 for (i = 1; i < NUM_RFKILL_TYPES; i++) 675 if (!strcmp(name, rfkill_types[i])) 676 return i; 677 return RFKILL_TYPE_ALL; 678 } 679 EXPORT_SYMBOL(rfkill_find_type); 680 681 static ssize_t name_show(struct device *dev, struct device_attribute *attr, 682 char *buf) 683 { 684 struct rfkill *rfkill = to_rfkill(dev); 685 686 return sprintf(buf, "%s\n", rfkill->name); 687 } 688 static DEVICE_ATTR_RO(name); 689 690 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 691 char *buf) 692 { 693 struct rfkill *rfkill = to_rfkill(dev); 694 695 return sprintf(buf, "%s\n", rfkill_types[rfkill->type]); 696 } 697 static DEVICE_ATTR_RO(type); 698 699 static ssize_t index_show(struct device *dev, struct device_attribute *attr, 700 char *buf) 701 { 702 struct rfkill *rfkill = to_rfkill(dev); 703 704 return sprintf(buf, "%d\n", rfkill->idx); 705 } 706 static DEVICE_ATTR_RO(index); 707 708 static ssize_t persistent_show(struct device *dev, 709 struct device_attribute *attr, char *buf) 710 { 711 struct rfkill *rfkill = to_rfkill(dev); 712 713 return sprintf(buf, "%d\n", rfkill->persistent); 714 } 715 static DEVICE_ATTR_RO(persistent); 716 717 static ssize_t hard_show(struct device *dev, struct device_attribute *attr, 718 char *buf) 719 { 720 struct rfkill *rfkill = to_rfkill(dev); 721 722 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 ); 723 } 724 static DEVICE_ATTR_RO(hard); 725 726 static ssize_t soft_show(struct device *dev, struct device_attribute *attr, 727 char *buf) 728 { 729 struct rfkill *rfkill = to_rfkill(dev); 730 731 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 ); 732 } 733 734 static ssize_t soft_store(struct device *dev, struct device_attribute *attr, 735 const char *buf, size_t count) 736 { 737 struct rfkill *rfkill = to_rfkill(dev); 738 unsigned long state; 739 int err; 740 741 if (!capable(CAP_NET_ADMIN)) 742 return -EPERM; 743 744 err = kstrtoul(buf, 0, &state); 745 if (err) 746 return err; 747 748 if (state > 1 ) 749 return -EINVAL; 750 751 mutex_lock(&rfkill_global_mutex); 752 rfkill_set_block(rfkill, state); 753 mutex_unlock(&rfkill_global_mutex); 754 755 return count; 756 } 757 static DEVICE_ATTR_RW(soft); 758 759 static ssize_t hard_block_reasons_show(struct device *dev, 760 struct device_attribute *attr, 761 char *buf) 762 { 763 struct rfkill *rfkill = to_rfkill(dev); 764 765 return sprintf(buf, "0x%lx\n", rfkill->hard_block_reasons); 766 } 767 static DEVICE_ATTR_RO(hard_block_reasons); 768 769 static u8 user_state_from_blocked(unsigned long state) 770 { 771 if (state & RFKILL_BLOCK_HW) 772 return RFKILL_USER_STATE_HARD_BLOCKED; 773 if (state & RFKILL_BLOCK_SW) 774 return RFKILL_USER_STATE_SOFT_BLOCKED; 775 776 return RFKILL_USER_STATE_UNBLOCKED; 777 } 778 779 static ssize_t state_show(struct device *dev, struct device_attribute *attr, 780 char *buf) 781 { 782 struct rfkill *rfkill = to_rfkill(dev); 783 784 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state)); 785 } 786 787 static ssize_t state_store(struct device *dev, struct device_attribute *attr, 788 const char *buf, size_t count) 789 { 790 struct rfkill *rfkill = to_rfkill(dev); 791 unsigned long state; 792 int err; 793 794 if (!capable(CAP_NET_ADMIN)) 795 return -EPERM; 796 797 err = kstrtoul(buf, 0, &state); 798 if (err) 799 return err; 800 801 if (state != RFKILL_USER_STATE_SOFT_BLOCKED && 802 state != RFKILL_USER_STATE_UNBLOCKED) 803 return -EINVAL; 804 805 mutex_lock(&rfkill_global_mutex); 806 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED); 807 mutex_unlock(&rfkill_global_mutex); 808 809 return count; 810 } 811 static DEVICE_ATTR_RW(state); 812 813 static struct attribute *rfkill_dev_attrs[] = { 814 &dev_attr_name.attr, 815 &dev_attr_type.attr, 816 &dev_attr_index.attr, 817 &dev_attr_persistent.attr, 818 &dev_attr_state.attr, 819 &dev_attr_soft.attr, 820 &dev_attr_hard.attr, 821 &dev_attr_hard_block_reasons.attr, 822 NULL, 823 }; 824 ATTRIBUTE_GROUPS(rfkill_dev); 825 826 static void rfkill_release(struct device *dev) 827 { 828 struct rfkill *rfkill = to_rfkill(dev); 829 830 kfree(rfkill); 831 } 832 833 static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env) 834 { 835 struct rfkill *rfkill = to_rfkill(dev); 836 unsigned long flags; 837 unsigned long reasons; 838 u32 state; 839 int error; 840 841 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); 842 if (error) 843 return error; 844 error = add_uevent_var(env, "RFKILL_TYPE=%s", 845 rfkill_types[rfkill->type]); 846 if (error) 847 return error; 848 spin_lock_irqsave(&rfkill->lock, flags); 849 state = rfkill->state; 850 reasons = rfkill->hard_block_reasons; 851 spin_unlock_irqrestore(&rfkill->lock, flags); 852 error = add_uevent_var(env, "RFKILL_STATE=%d", 853 user_state_from_blocked(state)); 854 if (error) 855 return error; 856 return add_uevent_var(env, "RFKILL_HW_BLOCK_REASON=0x%lx", reasons); 857 } 858 859 void rfkill_pause_polling(struct rfkill *rfkill) 860 { 861 BUG_ON(!rfkill); 862 863 if (!rfkill->ops->poll) 864 return; 865 866 rfkill->polling_paused = true; 867 cancel_delayed_work_sync(&rfkill->poll_work); 868 } 869 EXPORT_SYMBOL(rfkill_pause_polling); 870 871 void rfkill_resume_polling(struct rfkill *rfkill) 872 { 873 BUG_ON(!rfkill); 874 875 if (!rfkill->ops->poll) 876 return; 877 878 rfkill->polling_paused = false; 879 880 if (rfkill->suspended) 881 return; 882 883 queue_delayed_work(system_power_efficient_wq, 884 &rfkill->poll_work, 0); 885 } 886 EXPORT_SYMBOL(rfkill_resume_polling); 887 888 #ifdef CONFIG_PM_SLEEP 889 static int rfkill_suspend(struct device *dev) 890 { 891 struct rfkill *rfkill = to_rfkill(dev); 892 893 rfkill->suspended = true; 894 cancel_delayed_work_sync(&rfkill->poll_work); 895 896 return 0; 897 } 898 899 static int rfkill_resume(struct device *dev) 900 { 901 struct rfkill *rfkill = to_rfkill(dev); 902 bool cur; 903 904 rfkill->suspended = false; 905 906 if (!rfkill->registered) 907 return 0; 908 909 if (!rfkill->persistent) { 910 cur = !!(rfkill->state & RFKILL_BLOCK_SW); 911 rfkill_set_block(rfkill, cur); 912 } 913 914 if (rfkill->ops->poll && !rfkill->polling_paused) 915 queue_delayed_work(system_power_efficient_wq, 916 &rfkill->poll_work, 0); 917 918 return 0; 919 } 920 921 static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume); 922 #define RFKILL_PM_OPS (&rfkill_pm_ops) 923 #else 924 #define RFKILL_PM_OPS NULL 925 #endif 926 927 static struct class rfkill_class = { 928 .name = "rfkill", 929 .dev_release = rfkill_release, 930 .dev_groups = rfkill_dev_groups, 931 .dev_uevent = rfkill_dev_uevent, 932 .pm = RFKILL_PM_OPS, 933 }; 934 935 bool rfkill_blocked(struct rfkill *rfkill) 936 { 937 unsigned long flags; 938 u32 state; 939 940 spin_lock_irqsave(&rfkill->lock, flags); 941 state = rfkill->state; 942 spin_unlock_irqrestore(&rfkill->lock, flags); 943 944 return !!(state & RFKILL_BLOCK_ANY); 945 } 946 EXPORT_SYMBOL(rfkill_blocked); 947 948 949 struct rfkill * __must_check rfkill_alloc(const char *name, 950 struct device *parent, 951 const enum rfkill_type type, 952 const struct rfkill_ops *ops, 953 void *ops_data) 954 { 955 struct rfkill *rfkill; 956 struct device *dev; 957 958 if (WARN_ON(!ops)) 959 return NULL; 960 961 if (WARN_ON(!ops->set_block)) 962 return NULL; 963 964 if (WARN_ON(!name)) 965 return NULL; 966 967 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) 968 return NULL; 969 970 rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL); 971 if (!rfkill) 972 return NULL; 973 974 spin_lock_init(&rfkill->lock); 975 INIT_LIST_HEAD(&rfkill->node); 976 rfkill->type = type; 977 strcpy(rfkill->name, name); 978 rfkill->ops = ops; 979 rfkill->data = ops_data; 980 981 dev = &rfkill->dev; 982 dev->class = &rfkill_class; 983 dev->parent = parent; 984 device_initialize(dev); 985 986 return rfkill; 987 } 988 EXPORT_SYMBOL(rfkill_alloc); 989 990 static void rfkill_poll(struct work_struct *work) 991 { 992 struct rfkill *rfkill; 993 994 rfkill = container_of(work, struct rfkill, poll_work.work); 995 996 /* 997 * Poll hardware state -- driver will use one of the 998 * rfkill_set{,_hw,_sw}_state functions and use its 999 * return value to update the current status. 1000 */ 1001 rfkill->ops->poll(rfkill, rfkill->data); 1002 1003 queue_delayed_work(system_power_efficient_wq, 1004 &rfkill->poll_work, 1005 round_jiffies_relative(POLL_INTERVAL)); 1006 } 1007 1008 static void rfkill_uevent_work(struct work_struct *work) 1009 { 1010 struct rfkill *rfkill; 1011 1012 rfkill = container_of(work, struct rfkill, uevent_work); 1013 1014 mutex_lock(&rfkill_global_mutex); 1015 rfkill_event(rfkill); 1016 mutex_unlock(&rfkill_global_mutex); 1017 } 1018 1019 static void rfkill_sync_work(struct work_struct *work) 1020 { 1021 struct rfkill *rfkill; 1022 bool cur; 1023 1024 rfkill = container_of(work, struct rfkill, sync_work); 1025 1026 mutex_lock(&rfkill_global_mutex); 1027 cur = rfkill_global_states[rfkill->type].cur; 1028 rfkill_set_block(rfkill, cur); 1029 mutex_unlock(&rfkill_global_mutex); 1030 } 1031 1032 int __must_check rfkill_register(struct rfkill *rfkill) 1033 { 1034 static unsigned long rfkill_no; 1035 struct device *dev; 1036 int error; 1037 1038 if (!rfkill) 1039 return -EINVAL; 1040 1041 dev = &rfkill->dev; 1042 1043 mutex_lock(&rfkill_global_mutex); 1044 1045 if (rfkill->registered) { 1046 error = -EALREADY; 1047 goto unlock; 1048 } 1049 1050 rfkill->idx = rfkill_no; 1051 dev_set_name(dev, "rfkill%lu", rfkill_no); 1052 rfkill_no++; 1053 1054 list_add_tail(&rfkill->node, &rfkill_list); 1055 1056 error = device_add(dev); 1057 if (error) 1058 goto remove; 1059 1060 error = rfkill_led_trigger_register(rfkill); 1061 if (error) 1062 goto devdel; 1063 1064 rfkill->registered = true; 1065 1066 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); 1067 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); 1068 INIT_WORK(&rfkill->sync_work, rfkill_sync_work); 1069 1070 if (rfkill->ops->poll) 1071 queue_delayed_work(system_power_efficient_wq, 1072 &rfkill->poll_work, 1073 round_jiffies_relative(POLL_INTERVAL)); 1074 1075 if (!rfkill->persistent || rfkill_epo_lock_active) { 1076 schedule_work(&rfkill->sync_work); 1077 } else { 1078 #ifdef CONFIG_RFKILL_INPUT 1079 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); 1080 1081 if (!atomic_read(&rfkill_input_disabled)) 1082 __rfkill_switch_all(rfkill->type, soft_blocked); 1083 #endif 1084 } 1085 1086 rfkill_global_led_trigger_event(); 1087 rfkill_send_events(rfkill, RFKILL_OP_ADD); 1088 1089 mutex_unlock(&rfkill_global_mutex); 1090 return 0; 1091 1092 devdel: 1093 device_del(&rfkill->dev); 1094 remove: 1095 list_del_init(&rfkill->node); 1096 unlock: 1097 mutex_unlock(&rfkill_global_mutex); 1098 return error; 1099 } 1100 EXPORT_SYMBOL(rfkill_register); 1101 1102 void rfkill_unregister(struct rfkill *rfkill) 1103 { 1104 BUG_ON(!rfkill); 1105 1106 if (rfkill->ops->poll) 1107 cancel_delayed_work_sync(&rfkill->poll_work); 1108 1109 cancel_work_sync(&rfkill->uevent_work); 1110 cancel_work_sync(&rfkill->sync_work); 1111 1112 rfkill->registered = false; 1113 1114 device_del(&rfkill->dev); 1115 1116 mutex_lock(&rfkill_global_mutex); 1117 rfkill_send_events(rfkill, RFKILL_OP_DEL); 1118 list_del_init(&rfkill->node); 1119 rfkill_global_led_trigger_event(); 1120 mutex_unlock(&rfkill_global_mutex); 1121 1122 rfkill_led_trigger_unregister(rfkill); 1123 } 1124 EXPORT_SYMBOL(rfkill_unregister); 1125 1126 void rfkill_destroy(struct rfkill *rfkill) 1127 { 1128 if (rfkill) 1129 put_device(&rfkill->dev); 1130 } 1131 EXPORT_SYMBOL(rfkill_destroy); 1132 1133 static int rfkill_fop_open(struct inode *inode, struct file *file) 1134 { 1135 struct rfkill_data *data; 1136 struct rfkill *rfkill; 1137 struct rfkill_int_event *ev, *tmp; 1138 1139 data = kzalloc(sizeof(*data), GFP_KERNEL); 1140 if (!data) 1141 return -ENOMEM; 1142 1143 INIT_LIST_HEAD(&data->events); 1144 mutex_init(&data->mtx); 1145 init_waitqueue_head(&data->read_wait); 1146 1147 mutex_lock(&rfkill_global_mutex); 1148 mutex_lock(&data->mtx); 1149 /* 1150 * start getting events from elsewhere but hold mtx to get 1151 * startup events added first 1152 */ 1153 1154 list_for_each_entry(rfkill, &rfkill_list, node) { 1155 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 1156 if (!ev) 1157 goto free; 1158 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); 1159 list_add_tail(&ev->list, &data->events); 1160 } 1161 list_add(&data->list, &rfkill_fds); 1162 mutex_unlock(&data->mtx); 1163 mutex_unlock(&rfkill_global_mutex); 1164 1165 file->private_data = data; 1166 1167 return stream_open(inode, file); 1168 1169 free: 1170 mutex_unlock(&data->mtx); 1171 mutex_unlock(&rfkill_global_mutex); 1172 mutex_destroy(&data->mtx); 1173 list_for_each_entry_safe(ev, tmp, &data->events, list) 1174 kfree(ev); 1175 kfree(data); 1176 return -ENOMEM; 1177 } 1178 1179 static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait) 1180 { 1181 struct rfkill_data *data = file->private_data; 1182 __poll_t res = EPOLLOUT | EPOLLWRNORM; 1183 1184 poll_wait(file, &data->read_wait, wait); 1185 1186 mutex_lock(&data->mtx); 1187 if (!list_empty(&data->events)) 1188 res = EPOLLIN | EPOLLRDNORM; 1189 mutex_unlock(&data->mtx); 1190 1191 return res; 1192 } 1193 1194 static ssize_t rfkill_fop_read(struct file *file, char __user *buf, 1195 size_t count, loff_t *pos) 1196 { 1197 struct rfkill_data *data = file->private_data; 1198 struct rfkill_int_event *ev; 1199 unsigned long sz; 1200 int ret; 1201 1202 mutex_lock(&data->mtx); 1203 1204 while (list_empty(&data->events)) { 1205 if (file->f_flags & O_NONBLOCK) { 1206 ret = -EAGAIN; 1207 goto out; 1208 } 1209 mutex_unlock(&data->mtx); 1210 /* since we re-check and it just compares pointers, 1211 * using !list_empty() without locking isn't a problem 1212 */ 1213 ret = wait_event_interruptible(data->read_wait, 1214 !list_empty(&data->events)); 1215 mutex_lock(&data->mtx); 1216 1217 if (ret) 1218 goto out; 1219 } 1220 1221 ev = list_first_entry(&data->events, struct rfkill_int_event, 1222 list); 1223 1224 sz = min_t(unsigned long, sizeof(ev->ev), count); 1225 ret = sz; 1226 if (copy_to_user(buf, &ev->ev, sz)) 1227 ret = -EFAULT; 1228 1229 list_del(&ev->list); 1230 kfree(ev); 1231 out: 1232 mutex_unlock(&data->mtx); 1233 return ret; 1234 } 1235 1236 static ssize_t rfkill_fop_write(struct file *file, const char __user *buf, 1237 size_t count, loff_t *pos) 1238 { 1239 struct rfkill *rfkill; 1240 struct rfkill_event ev; 1241 int ret; 1242 1243 /* we don't need the 'hard' variable but accept it */ 1244 if (count < RFKILL_EVENT_SIZE_V1 - 1) 1245 return -EINVAL; 1246 1247 /* 1248 * Copy as much data as we can accept into our 'ev' buffer, 1249 * but tell userspace how much we've copied so it can determine 1250 * our API version even in a write() call, if it cares. 1251 */ 1252 count = min(count, sizeof(ev)); 1253 if (copy_from_user(&ev, buf, count)) 1254 return -EFAULT; 1255 1256 if (ev.type >= NUM_RFKILL_TYPES) 1257 return -EINVAL; 1258 1259 mutex_lock(&rfkill_global_mutex); 1260 1261 switch (ev.op) { 1262 case RFKILL_OP_CHANGE_ALL: 1263 rfkill_update_global_state(ev.type, ev.soft); 1264 list_for_each_entry(rfkill, &rfkill_list, node) 1265 if (rfkill->type == ev.type || 1266 ev.type == RFKILL_TYPE_ALL) 1267 rfkill_set_block(rfkill, ev.soft); 1268 ret = 0; 1269 break; 1270 case RFKILL_OP_CHANGE: 1271 list_for_each_entry(rfkill, &rfkill_list, node) 1272 if (rfkill->idx == ev.idx && 1273 (rfkill->type == ev.type || 1274 ev.type == RFKILL_TYPE_ALL)) 1275 rfkill_set_block(rfkill, ev.soft); 1276 ret = 0; 1277 break; 1278 default: 1279 ret = -EINVAL; 1280 break; 1281 } 1282 1283 mutex_unlock(&rfkill_global_mutex); 1284 1285 return ret ?: count; 1286 } 1287 1288 static int rfkill_fop_release(struct inode *inode, struct file *file) 1289 { 1290 struct rfkill_data *data = file->private_data; 1291 struct rfkill_int_event *ev, *tmp; 1292 1293 mutex_lock(&rfkill_global_mutex); 1294 list_del(&data->list); 1295 mutex_unlock(&rfkill_global_mutex); 1296 1297 mutex_destroy(&data->mtx); 1298 list_for_each_entry_safe(ev, tmp, &data->events, list) 1299 kfree(ev); 1300 1301 #ifdef CONFIG_RFKILL_INPUT 1302 if (data->input_handler) 1303 if (atomic_dec_return(&rfkill_input_disabled) == 0) 1304 printk(KERN_DEBUG "rfkill: input handler enabled\n"); 1305 #endif 1306 1307 kfree(data); 1308 1309 return 0; 1310 } 1311 1312 #ifdef CONFIG_RFKILL_INPUT 1313 static long rfkill_fop_ioctl(struct file *file, unsigned int cmd, 1314 unsigned long arg) 1315 { 1316 struct rfkill_data *data = file->private_data; 1317 1318 if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC) 1319 return -ENOSYS; 1320 1321 if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT) 1322 return -ENOSYS; 1323 1324 mutex_lock(&data->mtx); 1325 1326 if (!data->input_handler) { 1327 if (atomic_inc_return(&rfkill_input_disabled) == 1) 1328 printk(KERN_DEBUG "rfkill: input handler disabled\n"); 1329 data->input_handler = true; 1330 } 1331 1332 mutex_unlock(&data->mtx); 1333 1334 return 0; 1335 } 1336 #endif 1337 1338 static const struct file_operations rfkill_fops = { 1339 .owner = THIS_MODULE, 1340 .open = rfkill_fop_open, 1341 .read = rfkill_fop_read, 1342 .write = rfkill_fop_write, 1343 .poll = rfkill_fop_poll, 1344 .release = rfkill_fop_release, 1345 #ifdef CONFIG_RFKILL_INPUT 1346 .unlocked_ioctl = rfkill_fop_ioctl, 1347 .compat_ioctl = compat_ptr_ioctl, 1348 #endif 1349 .llseek = no_llseek, 1350 }; 1351 1352 #define RFKILL_NAME "rfkill" 1353 1354 static struct miscdevice rfkill_miscdev = { 1355 .fops = &rfkill_fops, 1356 .name = RFKILL_NAME, 1357 .minor = RFKILL_MINOR, 1358 }; 1359 1360 static int __init rfkill_init(void) 1361 { 1362 int error; 1363 1364 rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state); 1365 1366 error = class_register(&rfkill_class); 1367 if (error) 1368 goto error_class; 1369 1370 error = misc_register(&rfkill_miscdev); 1371 if (error) 1372 goto error_misc; 1373 1374 error = rfkill_global_led_trigger_register(); 1375 if (error) 1376 goto error_led_trigger; 1377 1378 #ifdef CONFIG_RFKILL_INPUT 1379 error = rfkill_handler_init(); 1380 if (error) 1381 goto error_input; 1382 #endif 1383 1384 return 0; 1385 1386 #ifdef CONFIG_RFKILL_INPUT 1387 error_input: 1388 rfkill_global_led_trigger_unregister(); 1389 #endif 1390 error_led_trigger: 1391 misc_deregister(&rfkill_miscdev); 1392 error_misc: 1393 class_unregister(&rfkill_class); 1394 error_class: 1395 return error; 1396 } 1397 subsys_initcall(rfkill_init); 1398 1399 static void __exit rfkill_exit(void) 1400 { 1401 #ifdef CONFIG_RFKILL_INPUT 1402 rfkill_handler_exit(); 1403 #endif 1404 rfkill_global_led_trigger_unregister(); 1405 misc_deregister(&rfkill_miscdev); 1406 class_unregister(&rfkill_class); 1407 } 1408 module_exit(rfkill_exit); 1409 1410 MODULE_ALIAS_MISCDEV(RFKILL_MINOR); 1411 MODULE_ALIAS("devname:" RFKILL_NAME); 1412