xref: /openbmc/linux/net/rfkill/core.c (revision d6d5d5c4)
1 /*
2  * Copyright (C) 2006 - 2007 Ivo van Doorn
3  * Copyright (C) 2007 Dmitry Torokhov
4  * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the
18  * Free Software Foundation, Inc.,
19  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/workqueue.h>
26 #include <linux/capability.h>
27 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/rfkill.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/miscdevice.h>
33 #include <linux/wait.h>
34 #include <linux/poll.h>
35 #include <linux/fs.h>
36 #include <linux/slab.h>
37 
38 #include "rfkill.h"
39 
40 #define POLL_INTERVAL		(5 * HZ)
41 
42 #define RFKILL_BLOCK_HW		BIT(0)
43 #define RFKILL_BLOCK_SW		BIT(1)
44 #define RFKILL_BLOCK_SW_PREV	BIT(2)
45 #define RFKILL_BLOCK_ANY	(RFKILL_BLOCK_HW |\
46 				 RFKILL_BLOCK_SW |\
47 				 RFKILL_BLOCK_SW_PREV)
48 #define RFKILL_BLOCK_SW_SETCALL	BIT(31)
49 
50 struct rfkill {
51 	spinlock_t		lock;
52 
53 	const char		*name;
54 	enum rfkill_type	type;
55 
56 	unsigned long		state;
57 
58 	u32			idx;
59 
60 	bool			registered;
61 	bool			persistent;
62 
63 	const struct rfkill_ops	*ops;
64 	void			*data;
65 
66 #ifdef CONFIG_RFKILL_LEDS
67 	struct led_trigger	led_trigger;
68 	const char		*ledtrigname;
69 #endif
70 
71 	struct device		dev;
72 	struct list_head	node;
73 
74 	struct delayed_work	poll_work;
75 	struct work_struct	uevent_work;
76 	struct work_struct	sync_work;
77 };
78 #define to_rfkill(d)	container_of(d, struct rfkill, dev)
79 
80 struct rfkill_int_event {
81 	struct list_head	list;
82 	struct rfkill_event	ev;
83 };
84 
85 struct rfkill_data {
86 	struct list_head	list;
87 	struct list_head	events;
88 	struct mutex		mtx;
89 	wait_queue_head_t	read_wait;
90 	bool			input_handler;
91 };
92 
93 
94 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
95 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
96 MODULE_DESCRIPTION("RF switch support");
97 MODULE_LICENSE("GPL");
98 
99 
100 /*
101  * The locking here should be made much smarter, we currently have
102  * a bit of a stupid situation because drivers might want to register
103  * the rfkill struct under their own lock, and take this lock during
104  * rfkill method calls -- which will cause an AB-BA deadlock situation.
105  *
106  * To fix that, we need to rework this code here to be mostly lock-free
107  * and only use the mutex for list manipulations, not to protect the
108  * various other global variables. Then we can avoid holding the mutex
109  * around driver operations, and all is happy.
110  */
111 static LIST_HEAD(rfkill_list);	/* list of registered rf switches */
112 static DEFINE_MUTEX(rfkill_global_mutex);
113 static LIST_HEAD(rfkill_fds);	/* list of open fds of /dev/rfkill */
114 
115 static unsigned int rfkill_default_state = 1;
116 module_param_named(default_state, rfkill_default_state, uint, 0444);
117 MODULE_PARM_DESC(default_state,
118 		 "Default initial state for all radio types, 0 = radio off");
119 
120 static struct {
121 	bool cur, sav;
122 } rfkill_global_states[NUM_RFKILL_TYPES];
123 
124 static bool rfkill_epo_lock_active;
125 
126 
127 #ifdef CONFIG_RFKILL_LEDS
128 static void rfkill_led_trigger_event(struct rfkill *rfkill)
129 {
130 	struct led_trigger *trigger;
131 
132 	if (!rfkill->registered)
133 		return;
134 
135 	trigger = &rfkill->led_trigger;
136 
137 	if (rfkill->state & RFKILL_BLOCK_ANY)
138 		led_trigger_event(trigger, LED_OFF);
139 	else
140 		led_trigger_event(trigger, LED_FULL);
141 }
142 
143 static void rfkill_led_trigger_activate(struct led_classdev *led)
144 {
145 	struct rfkill *rfkill;
146 
147 	rfkill = container_of(led->trigger, struct rfkill, led_trigger);
148 
149 	rfkill_led_trigger_event(rfkill);
150 }
151 
152 static int rfkill_led_trigger_register(struct rfkill *rfkill)
153 {
154 	rfkill->led_trigger.name = rfkill->ledtrigname
155 					? : dev_name(&rfkill->dev);
156 	rfkill->led_trigger.activate = rfkill_led_trigger_activate;
157 	return led_trigger_register(&rfkill->led_trigger);
158 }
159 
160 static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
161 {
162 	led_trigger_unregister(&rfkill->led_trigger);
163 }
164 #else
165 static void rfkill_led_trigger_event(struct rfkill *rfkill)
166 {
167 }
168 
169 static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
170 {
171 	return 0;
172 }
173 
174 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
175 {
176 }
177 #endif /* CONFIG_RFKILL_LEDS */
178 
179 static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
180 			      enum rfkill_operation op)
181 {
182 	unsigned long flags;
183 
184 	ev->idx = rfkill->idx;
185 	ev->type = rfkill->type;
186 	ev->op = op;
187 
188 	spin_lock_irqsave(&rfkill->lock, flags);
189 	ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
190 	ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
191 					RFKILL_BLOCK_SW_PREV));
192 	spin_unlock_irqrestore(&rfkill->lock, flags);
193 }
194 
195 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
196 {
197 	struct rfkill_data *data;
198 	struct rfkill_int_event *ev;
199 
200 	list_for_each_entry(data, &rfkill_fds, list) {
201 		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
202 		if (!ev)
203 			continue;
204 		rfkill_fill_event(&ev->ev, rfkill, op);
205 		mutex_lock(&data->mtx);
206 		list_add_tail(&ev->list, &data->events);
207 		mutex_unlock(&data->mtx);
208 		wake_up_interruptible(&data->read_wait);
209 	}
210 }
211 
212 static void rfkill_event(struct rfkill *rfkill)
213 {
214 	if (!rfkill->registered)
215 		return;
216 
217 	kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
218 
219 	/* also send event to /dev/rfkill */
220 	rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
221 }
222 
223 static bool __rfkill_set_hw_state(struct rfkill *rfkill,
224 				  bool blocked, bool *change)
225 {
226 	unsigned long flags;
227 	bool prev, any;
228 
229 	BUG_ON(!rfkill);
230 
231 	spin_lock_irqsave(&rfkill->lock, flags);
232 	prev = !!(rfkill->state & RFKILL_BLOCK_HW);
233 	if (blocked)
234 		rfkill->state |= RFKILL_BLOCK_HW;
235 	else
236 		rfkill->state &= ~RFKILL_BLOCK_HW;
237 	*change = prev != blocked;
238 	any = rfkill->state & RFKILL_BLOCK_ANY;
239 	spin_unlock_irqrestore(&rfkill->lock, flags);
240 
241 	rfkill_led_trigger_event(rfkill);
242 
243 	return any;
244 }
245 
246 /**
247  * rfkill_set_block - wrapper for set_block method
248  *
249  * @rfkill: the rfkill struct to use
250  * @blocked: the new software state
251  *
252  * Calls the set_block method (when applicable) and handles notifications
253  * etc. as well.
254  */
255 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
256 {
257 	unsigned long flags;
258 	int err;
259 
260 	if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
261 		return;
262 
263 	/*
264 	 * Some platforms (...!) generate input events which affect the
265 	 * _hard_ kill state -- whenever something tries to change the
266 	 * current software state query the hardware state too.
267 	 */
268 	if (rfkill->ops->query)
269 		rfkill->ops->query(rfkill, rfkill->data);
270 
271 	spin_lock_irqsave(&rfkill->lock, flags);
272 	if (rfkill->state & RFKILL_BLOCK_SW)
273 		rfkill->state |= RFKILL_BLOCK_SW_PREV;
274 	else
275 		rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
276 
277 	if (blocked)
278 		rfkill->state |= RFKILL_BLOCK_SW;
279 	else
280 		rfkill->state &= ~RFKILL_BLOCK_SW;
281 
282 	rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
283 	spin_unlock_irqrestore(&rfkill->lock, flags);
284 
285 	err = rfkill->ops->set_block(rfkill->data, blocked);
286 
287 	spin_lock_irqsave(&rfkill->lock, flags);
288 	if (err) {
289 		/*
290 		 * Failed -- reset status to _prev, this may be different
291 		 * from what set set _PREV to earlier in this function
292 		 * if rfkill_set_sw_state was invoked.
293 		 */
294 		if (rfkill->state & RFKILL_BLOCK_SW_PREV)
295 			rfkill->state |= RFKILL_BLOCK_SW;
296 		else
297 			rfkill->state &= ~RFKILL_BLOCK_SW;
298 	}
299 	rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
300 	rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
301 	spin_unlock_irqrestore(&rfkill->lock, flags);
302 
303 	rfkill_led_trigger_event(rfkill);
304 	rfkill_event(rfkill);
305 }
306 
307 #ifdef CONFIG_RFKILL_INPUT
308 static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
309 
310 /**
311  * __rfkill_switch_all - Toggle state of all switches of given type
312  * @type: type of interfaces to be affected
313  * @state: the new state
314  *
315  * This function sets the state of all switches of given type,
316  * unless a specific switch is claimed by userspace (in which case,
317  * that switch is left alone) or suspended.
318  *
319  * Caller must have acquired rfkill_global_mutex.
320  */
321 static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
322 {
323 	struct rfkill *rfkill;
324 
325 	rfkill_global_states[type].cur = blocked;
326 	list_for_each_entry(rfkill, &rfkill_list, node) {
327 		if (rfkill->type != type)
328 			continue;
329 
330 		rfkill_set_block(rfkill, blocked);
331 	}
332 }
333 
334 /**
335  * rfkill_switch_all - Toggle state of all switches of given type
336  * @type: type of interfaces to be affected
337  * @state: the new state
338  *
339  * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
340  * Please refer to __rfkill_switch_all() for details.
341  *
342  * Does nothing if the EPO lock is active.
343  */
344 void rfkill_switch_all(enum rfkill_type type, bool blocked)
345 {
346 	if (atomic_read(&rfkill_input_disabled))
347 		return;
348 
349 	mutex_lock(&rfkill_global_mutex);
350 
351 	if (!rfkill_epo_lock_active)
352 		__rfkill_switch_all(type, blocked);
353 
354 	mutex_unlock(&rfkill_global_mutex);
355 }
356 
357 /**
358  * rfkill_epo - emergency power off all transmitters
359  *
360  * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
361  * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
362  *
363  * The global state before the EPO is saved and can be restored later
364  * using rfkill_restore_states().
365  */
366 void rfkill_epo(void)
367 {
368 	struct rfkill *rfkill;
369 	int i;
370 
371 	if (atomic_read(&rfkill_input_disabled))
372 		return;
373 
374 	mutex_lock(&rfkill_global_mutex);
375 
376 	rfkill_epo_lock_active = true;
377 	list_for_each_entry(rfkill, &rfkill_list, node)
378 		rfkill_set_block(rfkill, true);
379 
380 	for (i = 0; i < NUM_RFKILL_TYPES; i++) {
381 		rfkill_global_states[i].sav = rfkill_global_states[i].cur;
382 		rfkill_global_states[i].cur = true;
383 	}
384 
385 	mutex_unlock(&rfkill_global_mutex);
386 }
387 
388 /**
389  * rfkill_restore_states - restore global states
390  *
391  * Restore (and sync switches to) the global state from the
392  * states in rfkill_default_states.  This can undo the effects of
393  * a call to rfkill_epo().
394  */
395 void rfkill_restore_states(void)
396 {
397 	int i;
398 
399 	if (atomic_read(&rfkill_input_disabled))
400 		return;
401 
402 	mutex_lock(&rfkill_global_mutex);
403 
404 	rfkill_epo_lock_active = false;
405 	for (i = 0; i < NUM_RFKILL_TYPES; i++)
406 		__rfkill_switch_all(i, rfkill_global_states[i].sav);
407 	mutex_unlock(&rfkill_global_mutex);
408 }
409 
410 /**
411  * rfkill_remove_epo_lock - unlock state changes
412  *
413  * Used by rfkill-input manually unlock state changes, when
414  * the EPO switch is deactivated.
415  */
416 void rfkill_remove_epo_lock(void)
417 {
418 	if (atomic_read(&rfkill_input_disabled))
419 		return;
420 
421 	mutex_lock(&rfkill_global_mutex);
422 	rfkill_epo_lock_active = false;
423 	mutex_unlock(&rfkill_global_mutex);
424 }
425 
426 /**
427  * rfkill_is_epo_lock_active - returns true EPO is active
428  *
429  * Returns 0 (false) if there is NOT an active EPO contidion,
430  * and 1 (true) if there is an active EPO contition, which
431  * locks all radios in one of the BLOCKED states.
432  *
433  * Can be called in atomic context.
434  */
435 bool rfkill_is_epo_lock_active(void)
436 {
437 	return rfkill_epo_lock_active;
438 }
439 
440 /**
441  * rfkill_get_global_sw_state - returns global state for a type
442  * @type: the type to get the global state of
443  *
444  * Returns the current global state for a given wireless
445  * device type.
446  */
447 bool rfkill_get_global_sw_state(const enum rfkill_type type)
448 {
449 	return rfkill_global_states[type].cur;
450 }
451 #endif
452 
453 
454 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
455 {
456 	bool ret, change;
457 
458 	ret = __rfkill_set_hw_state(rfkill, blocked, &change);
459 
460 	if (!rfkill->registered)
461 		return ret;
462 
463 	if (change)
464 		schedule_work(&rfkill->uevent_work);
465 
466 	return ret;
467 }
468 EXPORT_SYMBOL(rfkill_set_hw_state);
469 
470 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
471 {
472 	u32 bit = RFKILL_BLOCK_SW;
473 
474 	/* if in a ops->set_block right now, use other bit */
475 	if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
476 		bit = RFKILL_BLOCK_SW_PREV;
477 
478 	if (blocked)
479 		rfkill->state |= bit;
480 	else
481 		rfkill->state &= ~bit;
482 }
483 
484 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
485 {
486 	unsigned long flags;
487 	bool prev, hwblock;
488 
489 	BUG_ON(!rfkill);
490 
491 	spin_lock_irqsave(&rfkill->lock, flags);
492 	prev = !!(rfkill->state & RFKILL_BLOCK_SW);
493 	__rfkill_set_sw_state(rfkill, blocked);
494 	hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
495 	blocked = blocked || hwblock;
496 	spin_unlock_irqrestore(&rfkill->lock, flags);
497 
498 	if (!rfkill->registered)
499 		return blocked;
500 
501 	if (prev != blocked && !hwblock)
502 		schedule_work(&rfkill->uevent_work);
503 
504 	rfkill_led_trigger_event(rfkill);
505 
506 	return blocked;
507 }
508 EXPORT_SYMBOL(rfkill_set_sw_state);
509 
510 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
511 {
512 	unsigned long flags;
513 
514 	BUG_ON(!rfkill);
515 	BUG_ON(rfkill->registered);
516 
517 	spin_lock_irqsave(&rfkill->lock, flags);
518 	__rfkill_set_sw_state(rfkill, blocked);
519 	rfkill->persistent = true;
520 	spin_unlock_irqrestore(&rfkill->lock, flags);
521 }
522 EXPORT_SYMBOL(rfkill_init_sw_state);
523 
524 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
525 {
526 	unsigned long flags;
527 	bool swprev, hwprev;
528 
529 	BUG_ON(!rfkill);
530 
531 	spin_lock_irqsave(&rfkill->lock, flags);
532 
533 	/*
534 	 * No need to care about prev/setblock ... this is for uevent only
535 	 * and that will get triggered by rfkill_set_block anyway.
536 	 */
537 	swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
538 	hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
539 	__rfkill_set_sw_state(rfkill, sw);
540 	if (hw)
541 		rfkill->state |= RFKILL_BLOCK_HW;
542 	else
543 		rfkill->state &= ~RFKILL_BLOCK_HW;
544 
545 	spin_unlock_irqrestore(&rfkill->lock, flags);
546 
547 	if (!rfkill->registered) {
548 		rfkill->persistent = true;
549 	} else {
550 		if (swprev != sw || hwprev != hw)
551 			schedule_work(&rfkill->uevent_work);
552 
553 		rfkill_led_trigger_event(rfkill);
554 	}
555 }
556 EXPORT_SYMBOL(rfkill_set_states);
557 
558 static ssize_t rfkill_name_show(struct device *dev,
559 				struct device_attribute *attr,
560 				char *buf)
561 {
562 	struct rfkill *rfkill = to_rfkill(dev);
563 
564 	return sprintf(buf, "%s\n", rfkill->name);
565 }
566 
567 static const char *rfkill_get_type_str(enum rfkill_type type)
568 {
569 	BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
570 
571 	switch (type) {
572 	case RFKILL_TYPE_WLAN:
573 		return "wlan";
574 	case RFKILL_TYPE_BLUETOOTH:
575 		return "bluetooth";
576 	case RFKILL_TYPE_UWB:
577 		return "ultrawideband";
578 	case RFKILL_TYPE_WIMAX:
579 		return "wimax";
580 	case RFKILL_TYPE_WWAN:
581 		return "wwan";
582 	case RFKILL_TYPE_GPS:
583 		return "gps";
584 	case RFKILL_TYPE_FM:
585 		return "fm";
586 	default:
587 		BUG();
588 	}
589 }
590 
591 static ssize_t rfkill_type_show(struct device *dev,
592 				struct device_attribute *attr,
593 				char *buf)
594 {
595 	struct rfkill *rfkill = to_rfkill(dev);
596 
597 	return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
598 }
599 
600 static ssize_t rfkill_idx_show(struct device *dev,
601 			       struct device_attribute *attr,
602 			       char *buf)
603 {
604 	struct rfkill *rfkill = to_rfkill(dev);
605 
606 	return sprintf(buf, "%d\n", rfkill->idx);
607 }
608 
609 static ssize_t rfkill_persistent_show(struct device *dev,
610 			       struct device_attribute *attr,
611 			       char *buf)
612 {
613 	struct rfkill *rfkill = to_rfkill(dev);
614 
615 	return sprintf(buf, "%d\n", rfkill->persistent);
616 }
617 
618 static ssize_t rfkill_hard_show(struct device *dev,
619 				 struct device_attribute *attr,
620 				 char *buf)
621 {
622 	struct rfkill *rfkill = to_rfkill(dev);
623 
624 	return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
625 }
626 
627 static ssize_t rfkill_soft_show(struct device *dev,
628 				 struct device_attribute *attr,
629 				 char *buf)
630 {
631 	struct rfkill *rfkill = to_rfkill(dev);
632 
633 	return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
634 }
635 
636 static ssize_t rfkill_soft_store(struct device *dev,
637 				  struct device_attribute *attr,
638 				  const char *buf, size_t count)
639 {
640 	struct rfkill *rfkill = to_rfkill(dev);
641 	unsigned long state;
642 	int err;
643 
644 	if (!capable(CAP_NET_ADMIN))
645 		return -EPERM;
646 
647 	err = strict_strtoul(buf, 0, &state);
648 	if (err)
649 		return err;
650 
651 	if (state > 1 )
652 		return -EINVAL;
653 
654 	mutex_lock(&rfkill_global_mutex);
655 	rfkill_set_block(rfkill, state);
656 	mutex_unlock(&rfkill_global_mutex);
657 
658 	return err ?: count;
659 }
660 
661 static u8 user_state_from_blocked(unsigned long state)
662 {
663 	if (state & RFKILL_BLOCK_HW)
664 		return RFKILL_USER_STATE_HARD_BLOCKED;
665 	if (state & RFKILL_BLOCK_SW)
666 		return RFKILL_USER_STATE_SOFT_BLOCKED;
667 
668 	return RFKILL_USER_STATE_UNBLOCKED;
669 }
670 
671 static ssize_t rfkill_state_show(struct device *dev,
672 				 struct device_attribute *attr,
673 				 char *buf)
674 {
675 	struct rfkill *rfkill = to_rfkill(dev);
676 
677 	return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
678 }
679 
680 static ssize_t rfkill_state_store(struct device *dev,
681 				  struct device_attribute *attr,
682 				  const char *buf, size_t count)
683 {
684 	struct rfkill *rfkill = to_rfkill(dev);
685 	unsigned long state;
686 	int err;
687 
688 	if (!capable(CAP_NET_ADMIN))
689 		return -EPERM;
690 
691 	err = strict_strtoul(buf, 0, &state);
692 	if (err)
693 		return err;
694 
695 	if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
696 	    state != RFKILL_USER_STATE_UNBLOCKED)
697 		return -EINVAL;
698 
699 	mutex_lock(&rfkill_global_mutex);
700 	rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
701 	mutex_unlock(&rfkill_global_mutex);
702 
703 	return err ?: count;
704 }
705 
706 static ssize_t rfkill_claim_show(struct device *dev,
707 				 struct device_attribute *attr,
708 				 char *buf)
709 {
710 	return sprintf(buf, "%d\n", 0);
711 }
712 
713 static ssize_t rfkill_claim_store(struct device *dev,
714 				  struct device_attribute *attr,
715 				  const char *buf, size_t count)
716 {
717 	return -EOPNOTSUPP;
718 }
719 
720 static struct device_attribute rfkill_dev_attrs[] = {
721 	__ATTR(name, S_IRUGO, rfkill_name_show, NULL),
722 	__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
723 	__ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
724 	__ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
725 	__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
726 	__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
727 	__ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
728 	__ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
729 	__ATTR_NULL
730 };
731 
732 static void rfkill_release(struct device *dev)
733 {
734 	struct rfkill *rfkill = to_rfkill(dev);
735 
736 	kfree(rfkill);
737 }
738 
739 static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
740 {
741 	struct rfkill *rfkill = to_rfkill(dev);
742 	unsigned long flags;
743 	u32 state;
744 	int error;
745 
746 	error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
747 	if (error)
748 		return error;
749 	error = add_uevent_var(env, "RFKILL_TYPE=%s",
750 			       rfkill_get_type_str(rfkill->type));
751 	if (error)
752 		return error;
753 	spin_lock_irqsave(&rfkill->lock, flags);
754 	state = rfkill->state;
755 	spin_unlock_irqrestore(&rfkill->lock, flags);
756 	error = add_uevent_var(env, "RFKILL_STATE=%d",
757 			       user_state_from_blocked(state));
758 	return error;
759 }
760 
761 void rfkill_pause_polling(struct rfkill *rfkill)
762 {
763 	BUG_ON(!rfkill);
764 
765 	if (!rfkill->ops->poll)
766 		return;
767 
768 	cancel_delayed_work_sync(&rfkill->poll_work);
769 }
770 EXPORT_SYMBOL(rfkill_pause_polling);
771 
772 void rfkill_resume_polling(struct rfkill *rfkill)
773 {
774 	BUG_ON(!rfkill);
775 
776 	if (!rfkill->ops->poll)
777 		return;
778 
779 	schedule_work(&rfkill->poll_work.work);
780 }
781 EXPORT_SYMBOL(rfkill_resume_polling);
782 
783 static int rfkill_suspend(struct device *dev, pm_message_t state)
784 {
785 	struct rfkill *rfkill = to_rfkill(dev);
786 
787 	rfkill_pause_polling(rfkill);
788 
789 	return 0;
790 }
791 
792 static int rfkill_resume(struct device *dev)
793 {
794 	struct rfkill *rfkill = to_rfkill(dev);
795 	bool cur;
796 
797 	if (!rfkill->persistent) {
798 		cur = !!(rfkill->state & RFKILL_BLOCK_SW);
799 		rfkill_set_block(rfkill, cur);
800 	}
801 
802 	rfkill_resume_polling(rfkill);
803 
804 	return 0;
805 }
806 
807 static struct class rfkill_class = {
808 	.name		= "rfkill",
809 	.dev_release	= rfkill_release,
810 	.dev_attrs	= rfkill_dev_attrs,
811 	.dev_uevent	= rfkill_dev_uevent,
812 	.suspend	= rfkill_suspend,
813 	.resume		= rfkill_resume,
814 };
815 
816 bool rfkill_blocked(struct rfkill *rfkill)
817 {
818 	unsigned long flags;
819 	u32 state;
820 
821 	spin_lock_irqsave(&rfkill->lock, flags);
822 	state = rfkill->state;
823 	spin_unlock_irqrestore(&rfkill->lock, flags);
824 
825 	return !!(state & RFKILL_BLOCK_ANY);
826 }
827 EXPORT_SYMBOL(rfkill_blocked);
828 
829 
830 struct rfkill * __must_check rfkill_alloc(const char *name,
831 					  struct device *parent,
832 					  const enum rfkill_type type,
833 					  const struct rfkill_ops *ops,
834 					  void *ops_data)
835 {
836 	struct rfkill *rfkill;
837 	struct device *dev;
838 
839 	if (WARN_ON(!ops))
840 		return NULL;
841 
842 	if (WARN_ON(!ops->set_block))
843 		return NULL;
844 
845 	if (WARN_ON(!name))
846 		return NULL;
847 
848 	if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
849 		return NULL;
850 
851 	rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
852 	if (!rfkill)
853 		return NULL;
854 
855 	spin_lock_init(&rfkill->lock);
856 	INIT_LIST_HEAD(&rfkill->node);
857 	rfkill->type = type;
858 	rfkill->name = name;
859 	rfkill->ops = ops;
860 	rfkill->data = ops_data;
861 
862 	dev = &rfkill->dev;
863 	dev->class = &rfkill_class;
864 	dev->parent = parent;
865 	device_initialize(dev);
866 
867 	return rfkill;
868 }
869 EXPORT_SYMBOL(rfkill_alloc);
870 
871 static void rfkill_poll(struct work_struct *work)
872 {
873 	struct rfkill *rfkill;
874 
875 	rfkill = container_of(work, struct rfkill, poll_work.work);
876 
877 	/*
878 	 * Poll hardware state -- driver will use one of the
879 	 * rfkill_set{,_hw,_sw}_state functions and use its
880 	 * return value to update the current status.
881 	 */
882 	rfkill->ops->poll(rfkill, rfkill->data);
883 
884 	schedule_delayed_work(&rfkill->poll_work,
885 		round_jiffies_relative(POLL_INTERVAL));
886 }
887 
888 static void rfkill_uevent_work(struct work_struct *work)
889 {
890 	struct rfkill *rfkill;
891 
892 	rfkill = container_of(work, struct rfkill, uevent_work);
893 
894 	mutex_lock(&rfkill_global_mutex);
895 	rfkill_event(rfkill);
896 	mutex_unlock(&rfkill_global_mutex);
897 }
898 
899 static void rfkill_sync_work(struct work_struct *work)
900 {
901 	struct rfkill *rfkill;
902 	bool cur;
903 
904 	rfkill = container_of(work, struct rfkill, sync_work);
905 
906 	mutex_lock(&rfkill_global_mutex);
907 	cur = rfkill_global_states[rfkill->type].cur;
908 	rfkill_set_block(rfkill, cur);
909 	mutex_unlock(&rfkill_global_mutex);
910 }
911 
912 int __must_check rfkill_register(struct rfkill *rfkill)
913 {
914 	static unsigned long rfkill_no;
915 	struct device *dev = &rfkill->dev;
916 	int error;
917 
918 	BUG_ON(!rfkill);
919 
920 	mutex_lock(&rfkill_global_mutex);
921 
922 	if (rfkill->registered) {
923 		error = -EALREADY;
924 		goto unlock;
925 	}
926 
927 	rfkill->idx = rfkill_no;
928 	dev_set_name(dev, "rfkill%lu", rfkill_no);
929 	rfkill_no++;
930 
931 	list_add_tail(&rfkill->node, &rfkill_list);
932 
933 	error = device_add(dev);
934 	if (error)
935 		goto remove;
936 
937 	error = rfkill_led_trigger_register(rfkill);
938 	if (error)
939 		goto devdel;
940 
941 	rfkill->registered = true;
942 
943 	INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
944 	INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
945 	INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
946 
947 	if (rfkill->ops->poll)
948 		schedule_delayed_work(&rfkill->poll_work,
949 			round_jiffies_relative(POLL_INTERVAL));
950 
951 	if (!rfkill->persistent || rfkill_epo_lock_active) {
952 		schedule_work(&rfkill->sync_work);
953 	} else {
954 #ifdef CONFIG_RFKILL_INPUT
955 		bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
956 
957 		if (!atomic_read(&rfkill_input_disabled))
958 			__rfkill_switch_all(rfkill->type, soft_blocked);
959 #endif
960 	}
961 
962 	rfkill_send_events(rfkill, RFKILL_OP_ADD);
963 
964 	mutex_unlock(&rfkill_global_mutex);
965 	return 0;
966 
967  devdel:
968 	device_del(&rfkill->dev);
969  remove:
970 	list_del_init(&rfkill->node);
971  unlock:
972 	mutex_unlock(&rfkill_global_mutex);
973 	return error;
974 }
975 EXPORT_SYMBOL(rfkill_register);
976 
977 void rfkill_unregister(struct rfkill *rfkill)
978 {
979 	BUG_ON(!rfkill);
980 
981 	if (rfkill->ops->poll)
982 		cancel_delayed_work_sync(&rfkill->poll_work);
983 
984 	cancel_work_sync(&rfkill->uevent_work);
985 	cancel_work_sync(&rfkill->sync_work);
986 
987 	rfkill->registered = false;
988 
989 	device_del(&rfkill->dev);
990 
991 	mutex_lock(&rfkill_global_mutex);
992 	rfkill_send_events(rfkill, RFKILL_OP_DEL);
993 	list_del_init(&rfkill->node);
994 	mutex_unlock(&rfkill_global_mutex);
995 
996 	rfkill_led_trigger_unregister(rfkill);
997 }
998 EXPORT_SYMBOL(rfkill_unregister);
999 
1000 void rfkill_destroy(struct rfkill *rfkill)
1001 {
1002 	if (rfkill)
1003 		put_device(&rfkill->dev);
1004 }
1005 EXPORT_SYMBOL(rfkill_destroy);
1006 
1007 static int rfkill_fop_open(struct inode *inode, struct file *file)
1008 {
1009 	struct rfkill_data *data;
1010 	struct rfkill *rfkill;
1011 	struct rfkill_int_event *ev, *tmp;
1012 
1013 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1014 	if (!data)
1015 		return -ENOMEM;
1016 
1017 	INIT_LIST_HEAD(&data->events);
1018 	mutex_init(&data->mtx);
1019 	init_waitqueue_head(&data->read_wait);
1020 
1021 	mutex_lock(&rfkill_global_mutex);
1022 	mutex_lock(&data->mtx);
1023 	/*
1024 	 * start getting events from elsewhere but hold mtx to get
1025 	 * startup events added first
1026 	 */
1027 	list_add(&data->list, &rfkill_fds);
1028 
1029 	list_for_each_entry(rfkill, &rfkill_list, node) {
1030 		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1031 		if (!ev)
1032 			goto free;
1033 		rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1034 		list_add_tail(&ev->list, &data->events);
1035 	}
1036 	mutex_unlock(&data->mtx);
1037 	mutex_unlock(&rfkill_global_mutex);
1038 
1039 	file->private_data = data;
1040 
1041 	return nonseekable_open(inode, file);
1042 
1043  free:
1044 	mutex_unlock(&data->mtx);
1045 	mutex_unlock(&rfkill_global_mutex);
1046 	mutex_destroy(&data->mtx);
1047 	list_for_each_entry_safe(ev, tmp, &data->events, list)
1048 		kfree(ev);
1049 	kfree(data);
1050 	return -ENOMEM;
1051 }
1052 
1053 static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1054 {
1055 	struct rfkill_data *data = file->private_data;
1056 	unsigned int res = POLLOUT | POLLWRNORM;
1057 
1058 	poll_wait(file, &data->read_wait, wait);
1059 
1060 	mutex_lock(&data->mtx);
1061 	if (!list_empty(&data->events))
1062 		res = POLLIN | POLLRDNORM;
1063 	mutex_unlock(&data->mtx);
1064 
1065 	return res;
1066 }
1067 
1068 static bool rfkill_readable(struct rfkill_data *data)
1069 {
1070 	bool r;
1071 
1072 	mutex_lock(&data->mtx);
1073 	r = !list_empty(&data->events);
1074 	mutex_unlock(&data->mtx);
1075 
1076 	return r;
1077 }
1078 
1079 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1080 			       size_t count, loff_t *pos)
1081 {
1082 	struct rfkill_data *data = file->private_data;
1083 	struct rfkill_int_event *ev;
1084 	unsigned long sz;
1085 	int ret;
1086 
1087 	mutex_lock(&data->mtx);
1088 
1089 	while (list_empty(&data->events)) {
1090 		if (file->f_flags & O_NONBLOCK) {
1091 			ret = -EAGAIN;
1092 			goto out;
1093 		}
1094 		mutex_unlock(&data->mtx);
1095 		ret = wait_event_interruptible(data->read_wait,
1096 					       rfkill_readable(data));
1097 		mutex_lock(&data->mtx);
1098 
1099 		if (ret)
1100 			goto out;
1101 	}
1102 
1103 	ev = list_first_entry(&data->events, struct rfkill_int_event,
1104 				list);
1105 
1106 	sz = min_t(unsigned long, sizeof(ev->ev), count);
1107 	ret = sz;
1108 	if (copy_to_user(buf, &ev->ev, sz))
1109 		ret = -EFAULT;
1110 
1111 	list_del(&ev->list);
1112 	kfree(ev);
1113  out:
1114 	mutex_unlock(&data->mtx);
1115 	return ret;
1116 }
1117 
1118 static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1119 				size_t count, loff_t *pos)
1120 {
1121 	struct rfkill *rfkill;
1122 	struct rfkill_event ev;
1123 
1124 	/* we don't need the 'hard' variable but accept it */
1125 	if (count < RFKILL_EVENT_SIZE_V1 - 1)
1126 		return -EINVAL;
1127 
1128 	/*
1129 	 * Copy as much data as we can accept into our 'ev' buffer,
1130 	 * but tell userspace how much we've copied so it can determine
1131 	 * our API version even in a write() call, if it cares.
1132 	 */
1133 	count = min(count, sizeof(ev));
1134 	if (copy_from_user(&ev, buf, count))
1135 		return -EFAULT;
1136 
1137 	if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1138 		return -EINVAL;
1139 
1140 	if (ev.type >= NUM_RFKILL_TYPES)
1141 		return -EINVAL;
1142 
1143 	mutex_lock(&rfkill_global_mutex);
1144 
1145 	if (ev.op == RFKILL_OP_CHANGE_ALL) {
1146 		if (ev.type == RFKILL_TYPE_ALL) {
1147 			enum rfkill_type i;
1148 			for (i = 0; i < NUM_RFKILL_TYPES; i++)
1149 				rfkill_global_states[i].cur = ev.soft;
1150 		} else {
1151 			rfkill_global_states[ev.type].cur = ev.soft;
1152 		}
1153 	}
1154 
1155 	list_for_each_entry(rfkill, &rfkill_list, node) {
1156 		if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1157 			continue;
1158 
1159 		if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1160 			continue;
1161 
1162 		rfkill_set_block(rfkill, ev.soft);
1163 	}
1164 	mutex_unlock(&rfkill_global_mutex);
1165 
1166 	return count;
1167 }
1168 
1169 static int rfkill_fop_release(struct inode *inode, struct file *file)
1170 {
1171 	struct rfkill_data *data = file->private_data;
1172 	struct rfkill_int_event *ev, *tmp;
1173 
1174 	mutex_lock(&rfkill_global_mutex);
1175 	list_del(&data->list);
1176 	mutex_unlock(&rfkill_global_mutex);
1177 
1178 	mutex_destroy(&data->mtx);
1179 	list_for_each_entry_safe(ev, tmp, &data->events, list)
1180 		kfree(ev);
1181 
1182 #ifdef CONFIG_RFKILL_INPUT
1183 	if (data->input_handler)
1184 		if (atomic_dec_return(&rfkill_input_disabled) == 0)
1185 			printk(KERN_DEBUG "rfkill: input handler enabled\n");
1186 #endif
1187 
1188 	kfree(data);
1189 
1190 	return 0;
1191 }
1192 
1193 #ifdef CONFIG_RFKILL_INPUT
1194 static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1195 			     unsigned long arg)
1196 {
1197 	struct rfkill_data *data = file->private_data;
1198 
1199 	if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1200 		return -ENOSYS;
1201 
1202 	if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1203 		return -ENOSYS;
1204 
1205 	mutex_lock(&data->mtx);
1206 
1207 	if (!data->input_handler) {
1208 		if (atomic_inc_return(&rfkill_input_disabled) == 1)
1209 			printk(KERN_DEBUG "rfkill: input handler disabled\n");
1210 		data->input_handler = true;
1211 	}
1212 
1213 	mutex_unlock(&data->mtx);
1214 
1215 	return 0;
1216 }
1217 #endif
1218 
1219 static const struct file_operations rfkill_fops = {
1220 	.owner		= THIS_MODULE,
1221 	.open		= rfkill_fop_open,
1222 	.read		= rfkill_fop_read,
1223 	.write		= rfkill_fop_write,
1224 	.poll		= rfkill_fop_poll,
1225 	.release	= rfkill_fop_release,
1226 #ifdef CONFIG_RFKILL_INPUT
1227 	.unlocked_ioctl	= rfkill_fop_ioctl,
1228 	.compat_ioctl	= rfkill_fop_ioctl,
1229 #endif
1230 	.llseek		= no_llseek,
1231 };
1232 
1233 static struct miscdevice rfkill_miscdev = {
1234 	.name	= "rfkill",
1235 	.fops	= &rfkill_fops,
1236 	.minor	= MISC_DYNAMIC_MINOR,
1237 };
1238 
1239 static int __init rfkill_init(void)
1240 {
1241 	int error;
1242 	int i;
1243 
1244 	for (i = 0; i < NUM_RFKILL_TYPES; i++)
1245 		rfkill_global_states[i].cur = !rfkill_default_state;
1246 
1247 	error = class_register(&rfkill_class);
1248 	if (error)
1249 		goto out;
1250 
1251 	error = misc_register(&rfkill_miscdev);
1252 	if (error) {
1253 		class_unregister(&rfkill_class);
1254 		goto out;
1255 	}
1256 
1257 #ifdef CONFIG_RFKILL_INPUT
1258 	error = rfkill_handler_init();
1259 	if (error) {
1260 		misc_deregister(&rfkill_miscdev);
1261 		class_unregister(&rfkill_class);
1262 		goto out;
1263 	}
1264 #endif
1265 
1266  out:
1267 	return error;
1268 }
1269 subsys_initcall(rfkill_init);
1270 
1271 static void __exit rfkill_exit(void)
1272 {
1273 #ifdef CONFIG_RFKILL_INPUT
1274 	rfkill_handler_exit();
1275 #endif
1276 	misc_deregister(&rfkill_miscdev);
1277 	class_unregister(&rfkill_class);
1278 }
1279 module_exit(rfkill_exit);
1280