xref: /openbmc/linux/net/rfkill/core.c (revision f2ac54eb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2006 - 2007 Ivo van Doorn
4  * Copyright (C) 2007 Dmitry Torokhov
5  * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/workqueue.h>
12 #include <linux/capability.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/rfkill.h>
16 #include <linux/sched.h>
17 #include <linux/spinlock.h>
18 #include <linux/device.h>
19 #include <linux/miscdevice.h>
20 #include <linux/wait.h>
21 #include <linux/poll.h>
22 #include <linux/fs.h>
23 #include <linux/slab.h>
24 
25 #include "rfkill.h"
26 
27 #define POLL_INTERVAL		(5 * HZ)
28 
29 #define RFKILL_BLOCK_HW		BIT(0)
30 #define RFKILL_BLOCK_SW		BIT(1)
31 #define RFKILL_BLOCK_SW_PREV	BIT(2)
32 #define RFKILL_BLOCK_ANY	(RFKILL_BLOCK_HW |\
33 				 RFKILL_BLOCK_SW |\
34 				 RFKILL_BLOCK_SW_PREV)
35 #define RFKILL_BLOCK_SW_SETCALL	BIT(31)
36 
37 struct rfkill {
38 	spinlock_t		lock;
39 
40 	enum rfkill_type	type;
41 
42 	unsigned long		state;
43 	unsigned long		hard_block_reasons;
44 
45 	u32			idx;
46 
47 	bool			registered;
48 	bool			persistent;
49 	bool			polling_paused;
50 	bool			suspended;
51 	bool			need_sync;
52 
53 	const struct rfkill_ops	*ops;
54 	void			*data;
55 
56 #ifdef CONFIG_RFKILL_LEDS
57 	struct led_trigger	led_trigger;
58 	const char		*ledtrigname;
59 #endif
60 
61 	struct device		dev;
62 	struct list_head	node;
63 
64 	struct delayed_work	poll_work;
65 	struct work_struct	uevent_work;
66 	struct work_struct	sync_work;
67 	char			name[];
68 };
69 #define to_rfkill(d)	container_of(d, struct rfkill, dev)
70 
71 struct rfkill_int_event {
72 	struct list_head	list;
73 	struct rfkill_event_ext	ev;
74 };
75 
76 struct rfkill_data {
77 	struct list_head	list;
78 	struct list_head	events;
79 	struct mutex		mtx;
80 	wait_queue_head_t	read_wait;
81 	bool			input_handler;
82 	u8			max_size;
83 };
84 
85 
86 MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
87 MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
88 MODULE_DESCRIPTION("RF switch support");
89 MODULE_LICENSE("GPL");
90 
91 
92 /*
93  * The locking here should be made much smarter, we currently have
94  * a bit of a stupid situation because drivers might want to register
95  * the rfkill struct under their own lock, and take this lock during
96  * rfkill method calls -- which will cause an AB-BA deadlock situation.
97  *
98  * To fix that, we need to rework this code here to be mostly lock-free
99  * and only use the mutex for list manipulations, not to protect the
100  * various other global variables. Then we can avoid holding the mutex
101  * around driver operations, and all is happy.
102  */
103 static LIST_HEAD(rfkill_list);	/* list of registered rf switches */
104 static DEFINE_MUTEX(rfkill_global_mutex);
105 static LIST_HEAD(rfkill_fds);	/* list of open fds of /dev/rfkill */
106 
107 static unsigned int rfkill_default_state = 1;
108 module_param_named(default_state, rfkill_default_state, uint, 0444);
109 MODULE_PARM_DESC(default_state,
110 		 "Default initial state for all radio types, 0 = radio off");
111 
112 static struct {
113 	bool cur, sav;
114 } rfkill_global_states[NUM_RFKILL_TYPES];
115 
116 static bool rfkill_epo_lock_active;
117 
118 
119 #ifdef CONFIG_RFKILL_LEDS
rfkill_led_trigger_event(struct rfkill * rfkill)120 static void rfkill_led_trigger_event(struct rfkill *rfkill)
121 {
122 	struct led_trigger *trigger;
123 
124 	if (!rfkill->registered)
125 		return;
126 
127 	trigger = &rfkill->led_trigger;
128 
129 	if (rfkill->state & RFKILL_BLOCK_ANY)
130 		led_trigger_event(trigger, LED_OFF);
131 	else
132 		led_trigger_event(trigger, LED_FULL);
133 }
134 
rfkill_led_trigger_activate(struct led_classdev * led)135 static int rfkill_led_trigger_activate(struct led_classdev *led)
136 {
137 	struct rfkill *rfkill;
138 
139 	rfkill = container_of(led->trigger, struct rfkill, led_trigger);
140 
141 	rfkill_led_trigger_event(rfkill);
142 
143 	return 0;
144 }
145 
rfkill_get_led_trigger_name(struct rfkill * rfkill)146 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
147 {
148 	return rfkill->led_trigger.name;
149 }
150 EXPORT_SYMBOL(rfkill_get_led_trigger_name);
151 
rfkill_set_led_trigger_name(struct rfkill * rfkill,const char * name)152 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
153 {
154 	BUG_ON(!rfkill);
155 
156 	rfkill->ledtrigname = name;
157 }
158 EXPORT_SYMBOL(rfkill_set_led_trigger_name);
159 
rfkill_led_trigger_register(struct rfkill * rfkill)160 static int rfkill_led_trigger_register(struct rfkill *rfkill)
161 {
162 	rfkill->led_trigger.name = rfkill->ledtrigname
163 					? : dev_name(&rfkill->dev);
164 	rfkill->led_trigger.activate = rfkill_led_trigger_activate;
165 	return led_trigger_register(&rfkill->led_trigger);
166 }
167 
rfkill_led_trigger_unregister(struct rfkill * rfkill)168 static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
169 {
170 	led_trigger_unregister(&rfkill->led_trigger);
171 }
172 
173 static struct led_trigger rfkill_any_led_trigger;
174 static struct led_trigger rfkill_none_led_trigger;
175 static struct work_struct rfkill_global_led_trigger_work;
176 
rfkill_global_led_trigger_worker(struct work_struct * work)177 static void rfkill_global_led_trigger_worker(struct work_struct *work)
178 {
179 	enum led_brightness brightness = LED_OFF;
180 	struct rfkill *rfkill;
181 
182 	mutex_lock(&rfkill_global_mutex);
183 	list_for_each_entry(rfkill, &rfkill_list, node) {
184 		if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
185 			brightness = LED_FULL;
186 			break;
187 		}
188 	}
189 	mutex_unlock(&rfkill_global_mutex);
190 
191 	led_trigger_event(&rfkill_any_led_trigger, brightness);
192 	led_trigger_event(&rfkill_none_led_trigger,
193 			  brightness == LED_OFF ? LED_FULL : LED_OFF);
194 }
195 
rfkill_global_led_trigger_event(void)196 static void rfkill_global_led_trigger_event(void)
197 {
198 	schedule_work(&rfkill_global_led_trigger_work);
199 }
200 
rfkill_global_led_trigger_register(void)201 static int rfkill_global_led_trigger_register(void)
202 {
203 	int ret;
204 
205 	INIT_WORK(&rfkill_global_led_trigger_work,
206 			rfkill_global_led_trigger_worker);
207 
208 	rfkill_any_led_trigger.name = "rfkill-any";
209 	ret = led_trigger_register(&rfkill_any_led_trigger);
210 	if (ret)
211 		return ret;
212 
213 	rfkill_none_led_trigger.name = "rfkill-none";
214 	ret = led_trigger_register(&rfkill_none_led_trigger);
215 	if (ret)
216 		led_trigger_unregister(&rfkill_any_led_trigger);
217 	else
218 		/* Delay activation until all global triggers are registered */
219 		rfkill_global_led_trigger_event();
220 
221 	return ret;
222 }
223 
rfkill_global_led_trigger_unregister(void)224 static void rfkill_global_led_trigger_unregister(void)
225 {
226 	led_trigger_unregister(&rfkill_none_led_trigger);
227 	led_trigger_unregister(&rfkill_any_led_trigger);
228 	cancel_work_sync(&rfkill_global_led_trigger_work);
229 }
230 #else
rfkill_led_trigger_event(struct rfkill * rfkill)231 static void rfkill_led_trigger_event(struct rfkill *rfkill)
232 {
233 }
234 
rfkill_led_trigger_register(struct rfkill * rfkill)235 static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
236 {
237 	return 0;
238 }
239 
rfkill_led_trigger_unregister(struct rfkill * rfkill)240 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
241 {
242 }
243 
rfkill_global_led_trigger_event(void)244 static void rfkill_global_led_trigger_event(void)
245 {
246 }
247 
rfkill_global_led_trigger_register(void)248 static int rfkill_global_led_trigger_register(void)
249 {
250 	return 0;
251 }
252 
rfkill_global_led_trigger_unregister(void)253 static void rfkill_global_led_trigger_unregister(void)
254 {
255 }
256 #endif /* CONFIG_RFKILL_LEDS */
257 
rfkill_fill_event(struct rfkill_event_ext * ev,struct rfkill * rfkill,enum rfkill_operation op)258 static void rfkill_fill_event(struct rfkill_event_ext *ev,
259 			      struct rfkill *rfkill,
260 			      enum rfkill_operation op)
261 {
262 	unsigned long flags;
263 
264 	ev->idx = rfkill->idx;
265 	ev->type = rfkill->type;
266 	ev->op = op;
267 
268 	spin_lock_irqsave(&rfkill->lock, flags);
269 	ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
270 	ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
271 					RFKILL_BLOCK_SW_PREV));
272 	ev->hard_block_reasons = rfkill->hard_block_reasons;
273 	spin_unlock_irqrestore(&rfkill->lock, flags);
274 }
275 
rfkill_send_events(struct rfkill * rfkill,enum rfkill_operation op)276 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
277 {
278 	struct rfkill_data *data;
279 	struct rfkill_int_event *ev;
280 
281 	list_for_each_entry(data, &rfkill_fds, list) {
282 		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
283 		if (!ev)
284 			continue;
285 		rfkill_fill_event(&ev->ev, rfkill, op);
286 		mutex_lock(&data->mtx);
287 		list_add_tail(&ev->list, &data->events);
288 		mutex_unlock(&data->mtx);
289 		wake_up_interruptible(&data->read_wait);
290 	}
291 }
292 
rfkill_event(struct rfkill * rfkill)293 static void rfkill_event(struct rfkill *rfkill)
294 {
295 	if (!rfkill->registered)
296 		return;
297 
298 	kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
299 
300 	/* also send event to /dev/rfkill */
301 	rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
302 }
303 
304 /**
305  * rfkill_set_block - wrapper for set_block method
306  *
307  * @rfkill: the rfkill struct to use
308  * @blocked: the new software state
309  *
310  * Calls the set_block method (when applicable) and handles notifications
311  * etc. as well.
312  */
rfkill_set_block(struct rfkill * rfkill,bool blocked)313 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
314 {
315 	unsigned long flags;
316 	bool prev, curr;
317 	int err;
318 
319 	if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
320 		return;
321 
322 	/*
323 	 * Some platforms (...!) generate input events which affect the
324 	 * _hard_ kill state -- whenever something tries to change the
325 	 * current software state query the hardware state too.
326 	 */
327 	if (rfkill->ops->query)
328 		rfkill->ops->query(rfkill, rfkill->data);
329 
330 	spin_lock_irqsave(&rfkill->lock, flags);
331 	prev = rfkill->state & RFKILL_BLOCK_SW;
332 
333 	if (prev)
334 		rfkill->state |= RFKILL_BLOCK_SW_PREV;
335 	else
336 		rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
337 
338 	if (blocked)
339 		rfkill->state |= RFKILL_BLOCK_SW;
340 	else
341 		rfkill->state &= ~RFKILL_BLOCK_SW;
342 
343 	rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
344 	spin_unlock_irqrestore(&rfkill->lock, flags);
345 
346 	err = rfkill->ops->set_block(rfkill->data, blocked);
347 
348 	spin_lock_irqsave(&rfkill->lock, flags);
349 	if (err) {
350 		/*
351 		 * Failed -- reset status to _PREV, which may be different
352 		 * from what we have set _PREV to earlier in this function
353 		 * if rfkill_set_sw_state was invoked.
354 		 */
355 		if (rfkill->state & RFKILL_BLOCK_SW_PREV)
356 			rfkill->state |= RFKILL_BLOCK_SW;
357 		else
358 			rfkill->state &= ~RFKILL_BLOCK_SW;
359 	}
360 	rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
361 	rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
362 	curr = rfkill->state & RFKILL_BLOCK_SW;
363 	spin_unlock_irqrestore(&rfkill->lock, flags);
364 
365 	rfkill_led_trigger_event(rfkill);
366 	rfkill_global_led_trigger_event();
367 
368 	if (prev != curr)
369 		rfkill_event(rfkill);
370 }
371 
rfkill_sync(struct rfkill * rfkill)372 static void rfkill_sync(struct rfkill *rfkill)
373 {
374 	lockdep_assert_held(&rfkill_global_mutex);
375 
376 	if (!rfkill->need_sync)
377 		return;
378 
379 	rfkill_set_block(rfkill, rfkill_global_states[rfkill->type].cur);
380 	rfkill->need_sync = false;
381 }
382 
rfkill_update_global_state(enum rfkill_type type,bool blocked)383 static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
384 {
385 	int i;
386 
387 	if (type != RFKILL_TYPE_ALL) {
388 		rfkill_global_states[type].cur = blocked;
389 		return;
390 	}
391 
392 	for (i = 0; i < NUM_RFKILL_TYPES; i++)
393 		rfkill_global_states[i].cur = blocked;
394 }
395 
396 #ifdef CONFIG_RFKILL_INPUT
397 static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
398 
399 /**
400  * __rfkill_switch_all - Toggle state of all switches of given type
401  * @type: type of interfaces to be affected
402  * @blocked: the new state
403  *
404  * This function sets the state of all switches of given type,
405  * unless a specific switch is suspended.
406  *
407  * Caller must have acquired rfkill_global_mutex.
408  */
__rfkill_switch_all(const enum rfkill_type type,bool blocked)409 static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
410 {
411 	struct rfkill *rfkill;
412 
413 	rfkill_update_global_state(type, blocked);
414 	list_for_each_entry(rfkill, &rfkill_list, node) {
415 		if (rfkill->type != type && type != RFKILL_TYPE_ALL)
416 			continue;
417 
418 		rfkill_set_block(rfkill, blocked);
419 	}
420 }
421 
422 /**
423  * rfkill_switch_all - Toggle state of all switches of given type
424  * @type: type of interfaces to be affected
425  * @blocked: the new state
426  *
427  * Acquires rfkill_global_mutex and calls __rfkill_switch_all(@type, @state).
428  * Please refer to __rfkill_switch_all() for details.
429  *
430  * Does nothing if the EPO lock is active.
431  */
rfkill_switch_all(enum rfkill_type type,bool blocked)432 void rfkill_switch_all(enum rfkill_type type, bool blocked)
433 {
434 	if (atomic_read(&rfkill_input_disabled))
435 		return;
436 
437 	mutex_lock(&rfkill_global_mutex);
438 
439 	if (!rfkill_epo_lock_active)
440 		__rfkill_switch_all(type, blocked);
441 
442 	mutex_unlock(&rfkill_global_mutex);
443 }
444 
445 /**
446  * rfkill_epo - emergency power off all transmitters
447  *
448  * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
449  * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
450  *
451  * The global state before the EPO is saved and can be restored later
452  * using rfkill_restore_states().
453  */
rfkill_epo(void)454 void rfkill_epo(void)
455 {
456 	struct rfkill *rfkill;
457 	int i;
458 
459 	if (atomic_read(&rfkill_input_disabled))
460 		return;
461 
462 	mutex_lock(&rfkill_global_mutex);
463 
464 	rfkill_epo_lock_active = true;
465 	list_for_each_entry(rfkill, &rfkill_list, node)
466 		rfkill_set_block(rfkill, true);
467 
468 	for (i = 0; i < NUM_RFKILL_TYPES; i++) {
469 		rfkill_global_states[i].sav = rfkill_global_states[i].cur;
470 		rfkill_global_states[i].cur = true;
471 	}
472 
473 	mutex_unlock(&rfkill_global_mutex);
474 }
475 
476 /**
477  * rfkill_restore_states - restore global states
478  *
479  * Restore (and sync switches to) the global state from the
480  * states in rfkill_default_states.  This can undo the effects of
481  * a call to rfkill_epo().
482  */
rfkill_restore_states(void)483 void rfkill_restore_states(void)
484 {
485 	int i;
486 
487 	if (atomic_read(&rfkill_input_disabled))
488 		return;
489 
490 	mutex_lock(&rfkill_global_mutex);
491 
492 	rfkill_epo_lock_active = false;
493 	for (i = 0; i < NUM_RFKILL_TYPES; i++)
494 		__rfkill_switch_all(i, rfkill_global_states[i].sav);
495 	mutex_unlock(&rfkill_global_mutex);
496 }
497 
498 /**
499  * rfkill_remove_epo_lock - unlock state changes
500  *
501  * Used by rfkill-input manually unlock state changes, when
502  * the EPO switch is deactivated.
503  */
rfkill_remove_epo_lock(void)504 void rfkill_remove_epo_lock(void)
505 {
506 	if (atomic_read(&rfkill_input_disabled))
507 		return;
508 
509 	mutex_lock(&rfkill_global_mutex);
510 	rfkill_epo_lock_active = false;
511 	mutex_unlock(&rfkill_global_mutex);
512 }
513 
514 /**
515  * rfkill_is_epo_lock_active - returns true EPO is active
516  *
517  * Returns 0 (false) if there is NOT an active EPO condition,
518  * and 1 (true) if there is an active EPO condition, which
519  * locks all radios in one of the BLOCKED states.
520  *
521  * Can be called in atomic context.
522  */
rfkill_is_epo_lock_active(void)523 bool rfkill_is_epo_lock_active(void)
524 {
525 	return rfkill_epo_lock_active;
526 }
527 
528 /**
529  * rfkill_get_global_sw_state - returns global state for a type
530  * @type: the type to get the global state of
531  *
532  * Returns the current global state for a given wireless
533  * device type.
534  */
rfkill_get_global_sw_state(const enum rfkill_type type)535 bool rfkill_get_global_sw_state(const enum rfkill_type type)
536 {
537 	return rfkill_global_states[type].cur;
538 }
539 #endif
540 
rfkill_set_hw_state_reason(struct rfkill * rfkill,bool blocked,unsigned long reason)541 bool rfkill_set_hw_state_reason(struct rfkill *rfkill,
542 				bool blocked, unsigned long reason)
543 {
544 	unsigned long flags;
545 	bool ret, prev;
546 
547 	BUG_ON(!rfkill);
548 
549 	if (WARN(reason &
550 	    ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER),
551 	    "hw_state reason not supported: 0x%lx", reason))
552 		return blocked;
553 
554 	spin_lock_irqsave(&rfkill->lock, flags);
555 	prev = !!(rfkill->hard_block_reasons & reason);
556 	if (blocked) {
557 		rfkill->state |= RFKILL_BLOCK_HW;
558 		rfkill->hard_block_reasons |= reason;
559 	} else {
560 		rfkill->hard_block_reasons &= ~reason;
561 		if (!rfkill->hard_block_reasons)
562 			rfkill->state &= ~RFKILL_BLOCK_HW;
563 	}
564 	ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
565 	spin_unlock_irqrestore(&rfkill->lock, flags);
566 
567 	rfkill_led_trigger_event(rfkill);
568 	rfkill_global_led_trigger_event();
569 
570 	if (rfkill->registered && prev != blocked)
571 		schedule_work(&rfkill->uevent_work);
572 
573 	return ret;
574 }
575 EXPORT_SYMBOL(rfkill_set_hw_state_reason);
576 
__rfkill_set_sw_state(struct rfkill * rfkill,bool blocked)577 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
578 {
579 	u32 bit = RFKILL_BLOCK_SW;
580 
581 	/* if in a ops->set_block right now, use other bit */
582 	if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
583 		bit = RFKILL_BLOCK_SW_PREV;
584 
585 	if (blocked)
586 		rfkill->state |= bit;
587 	else
588 		rfkill->state &= ~bit;
589 }
590 
rfkill_set_sw_state(struct rfkill * rfkill,bool blocked)591 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
592 {
593 	unsigned long flags;
594 	bool prev, hwblock;
595 
596 	BUG_ON(!rfkill);
597 
598 	spin_lock_irqsave(&rfkill->lock, flags);
599 	prev = !!(rfkill->state & RFKILL_BLOCK_SW);
600 	__rfkill_set_sw_state(rfkill, blocked);
601 	hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
602 	blocked = blocked || hwblock;
603 	spin_unlock_irqrestore(&rfkill->lock, flags);
604 
605 	if (!rfkill->registered)
606 		return blocked;
607 
608 	if (prev != blocked && !hwblock)
609 		schedule_work(&rfkill->uevent_work);
610 
611 	rfkill_led_trigger_event(rfkill);
612 	rfkill_global_led_trigger_event();
613 
614 	return blocked;
615 }
616 EXPORT_SYMBOL(rfkill_set_sw_state);
617 
rfkill_init_sw_state(struct rfkill * rfkill,bool blocked)618 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
619 {
620 	unsigned long flags;
621 
622 	BUG_ON(!rfkill);
623 	BUG_ON(rfkill->registered);
624 
625 	spin_lock_irqsave(&rfkill->lock, flags);
626 	__rfkill_set_sw_state(rfkill, blocked);
627 	rfkill->persistent = true;
628 	spin_unlock_irqrestore(&rfkill->lock, flags);
629 }
630 EXPORT_SYMBOL(rfkill_init_sw_state);
631 
rfkill_set_states(struct rfkill * rfkill,bool sw,bool hw)632 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
633 {
634 	unsigned long flags;
635 	bool swprev, hwprev;
636 
637 	BUG_ON(!rfkill);
638 
639 	spin_lock_irqsave(&rfkill->lock, flags);
640 
641 	/*
642 	 * No need to care about prev/setblock ... this is for uevent only
643 	 * and that will get triggered by rfkill_set_block anyway.
644 	 */
645 	swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
646 	hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
647 	__rfkill_set_sw_state(rfkill, sw);
648 	if (hw)
649 		rfkill->state |= RFKILL_BLOCK_HW;
650 	else
651 		rfkill->state &= ~RFKILL_BLOCK_HW;
652 
653 	spin_unlock_irqrestore(&rfkill->lock, flags);
654 
655 	if (!rfkill->registered) {
656 		rfkill->persistent = true;
657 	} else {
658 		if (swprev != sw || hwprev != hw)
659 			schedule_work(&rfkill->uevent_work);
660 
661 		rfkill_led_trigger_event(rfkill);
662 		rfkill_global_led_trigger_event();
663 	}
664 }
665 EXPORT_SYMBOL(rfkill_set_states);
666 
667 static const char * const rfkill_types[] = {
668 	NULL, /* RFKILL_TYPE_ALL */
669 	"wlan",
670 	"bluetooth",
671 	"ultrawideband",
672 	"wimax",
673 	"wwan",
674 	"gps",
675 	"fm",
676 	"nfc",
677 };
678 
rfkill_find_type(const char * name)679 enum rfkill_type rfkill_find_type(const char *name)
680 {
681 	int i;
682 
683 	BUILD_BUG_ON(ARRAY_SIZE(rfkill_types) != NUM_RFKILL_TYPES);
684 
685 	if (!name)
686 		return RFKILL_TYPE_ALL;
687 
688 	for (i = 1; i < NUM_RFKILL_TYPES; i++)
689 		if (!strcmp(name, rfkill_types[i]))
690 			return i;
691 	return RFKILL_TYPE_ALL;
692 }
693 EXPORT_SYMBOL(rfkill_find_type);
694 
name_show(struct device * dev,struct device_attribute * attr,char * buf)695 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
696 			 char *buf)
697 {
698 	struct rfkill *rfkill = to_rfkill(dev);
699 
700 	return sysfs_emit(buf, "%s\n", rfkill->name);
701 }
702 static DEVICE_ATTR_RO(name);
703 
type_show(struct device * dev,struct device_attribute * attr,char * buf)704 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
705 			 char *buf)
706 {
707 	struct rfkill *rfkill = to_rfkill(dev);
708 
709 	return sysfs_emit(buf, "%s\n", rfkill_types[rfkill->type]);
710 }
711 static DEVICE_ATTR_RO(type);
712 
index_show(struct device * dev,struct device_attribute * attr,char * buf)713 static ssize_t index_show(struct device *dev, struct device_attribute *attr,
714 			  char *buf)
715 {
716 	struct rfkill *rfkill = to_rfkill(dev);
717 
718 	return sysfs_emit(buf, "%d\n", rfkill->idx);
719 }
720 static DEVICE_ATTR_RO(index);
721 
persistent_show(struct device * dev,struct device_attribute * attr,char * buf)722 static ssize_t persistent_show(struct device *dev,
723 			       struct device_attribute *attr, char *buf)
724 {
725 	struct rfkill *rfkill = to_rfkill(dev);
726 
727 	return sysfs_emit(buf, "%d\n", rfkill->persistent);
728 }
729 static DEVICE_ATTR_RO(persistent);
730 
hard_show(struct device * dev,struct device_attribute * attr,char * buf)731 static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
732 			 char *buf)
733 {
734 	struct rfkill *rfkill = to_rfkill(dev);
735 
736 	return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0);
737 }
738 static DEVICE_ATTR_RO(hard);
739 
soft_show(struct device * dev,struct device_attribute * attr,char * buf)740 static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
741 			 char *buf)
742 {
743 	struct rfkill *rfkill = to_rfkill(dev);
744 
745 	mutex_lock(&rfkill_global_mutex);
746 	rfkill_sync(rfkill);
747 	mutex_unlock(&rfkill_global_mutex);
748 
749 	return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0);
750 }
751 
soft_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)752 static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
753 			  const char *buf, size_t count)
754 {
755 	struct rfkill *rfkill = to_rfkill(dev);
756 	unsigned long state;
757 	int err;
758 
759 	if (!capable(CAP_NET_ADMIN))
760 		return -EPERM;
761 
762 	err = kstrtoul(buf, 0, &state);
763 	if (err)
764 		return err;
765 
766 	if (state > 1 )
767 		return -EINVAL;
768 
769 	mutex_lock(&rfkill_global_mutex);
770 	rfkill_sync(rfkill);
771 	rfkill_set_block(rfkill, state);
772 	mutex_unlock(&rfkill_global_mutex);
773 
774 	return count;
775 }
776 static DEVICE_ATTR_RW(soft);
777 
hard_block_reasons_show(struct device * dev,struct device_attribute * attr,char * buf)778 static ssize_t hard_block_reasons_show(struct device *dev,
779 				       struct device_attribute *attr,
780 				       char *buf)
781 {
782 	struct rfkill *rfkill = to_rfkill(dev);
783 
784 	return sysfs_emit(buf, "0x%lx\n", rfkill->hard_block_reasons);
785 }
786 static DEVICE_ATTR_RO(hard_block_reasons);
787 
user_state_from_blocked(unsigned long state)788 static u8 user_state_from_blocked(unsigned long state)
789 {
790 	if (state & RFKILL_BLOCK_HW)
791 		return RFKILL_USER_STATE_HARD_BLOCKED;
792 	if (state & RFKILL_BLOCK_SW)
793 		return RFKILL_USER_STATE_SOFT_BLOCKED;
794 
795 	return RFKILL_USER_STATE_UNBLOCKED;
796 }
797 
state_show(struct device * dev,struct device_attribute * attr,char * buf)798 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
799 			  char *buf)
800 {
801 	struct rfkill *rfkill = to_rfkill(dev);
802 
803 	mutex_lock(&rfkill_global_mutex);
804 	rfkill_sync(rfkill);
805 	mutex_unlock(&rfkill_global_mutex);
806 
807 	return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state));
808 }
809 
state_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)810 static ssize_t state_store(struct device *dev, struct device_attribute *attr,
811 			   const char *buf, size_t count)
812 {
813 	struct rfkill *rfkill = to_rfkill(dev);
814 	unsigned long state;
815 	int err;
816 
817 	if (!capable(CAP_NET_ADMIN))
818 		return -EPERM;
819 
820 	err = kstrtoul(buf, 0, &state);
821 	if (err)
822 		return err;
823 
824 	if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
825 	    state != RFKILL_USER_STATE_UNBLOCKED)
826 		return -EINVAL;
827 
828 	mutex_lock(&rfkill_global_mutex);
829 	rfkill_sync(rfkill);
830 	rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
831 	mutex_unlock(&rfkill_global_mutex);
832 
833 	return count;
834 }
835 static DEVICE_ATTR_RW(state);
836 
837 static struct attribute *rfkill_dev_attrs[] = {
838 	&dev_attr_name.attr,
839 	&dev_attr_type.attr,
840 	&dev_attr_index.attr,
841 	&dev_attr_persistent.attr,
842 	&dev_attr_state.attr,
843 	&dev_attr_soft.attr,
844 	&dev_attr_hard.attr,
845 	&dev_attr_hard_block_reasons.attr,
846 	NULL,
847 };
848 ATTRIBUTE_GROUPS(rfkill_dev);
849 
rfkill_release(struct device * dev)850 static void rfkill_release(struct device *dev)
851 {
852 	struct rfkill *rfkill = to_rfkill(dev);
853 
854 	kfree(rfkill);
855 }
856 
rfkill_dev_uevent(const struct device * dev,struct kobj_uevent_env * env)857 static int rfkill_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
858 {
859 	struct rfkill *rfkill = to_rfkill(dev);
860 	unsigned long flags;
861 	unsigned long reasons;
862 	u32 state;
863 	int error;
864 
865 	error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
866 	if (error)
867 		return error;
868 	error = add_uevent_var(env, "RFKILL_TYPE=%s",
869 			       rfkill_types[rfkill->type]);
870 	if (error)
871 		return error;
872 	spin_lock_irqsave(&rfkill->lock, flags);
873 	state = rfkill->state;
874 	reasons = rfkill->hard_block_reasons;
875 	spin_unlock_irqrestore(&rfkill->lock, flags);
876 	error = add_uevent_var(env, "RFKILL_STATE=%d",
877 			       user_state_from_blocked(state));
878 	if (error)
879 		return error;
880 	return add_uevent_var(env, "RFKILL_HW_BLOCK_REASON=0x%lx", reasons);
881 }
882 
rfkill_pause_polling(struct rfkill * rfkill)883 void rfkill_pause_polling(struct rfkill *rfkill)
884 {
885 	BUG_ON(!rfkill);
886 
887 	if (!rfkill->ops->poll)
888 		return;
889 
890 	rfkill->polling_paused = true;
891 	cancel_delayed_work_sync(&rfkill->poll_work);
892 }
893 EXPORT_SYMBOL(rfkill_pause_polling);
894 
rfkill_resume_polling(struct rfkill * rfkill)895 void rfkill_resume_polling(struct rfkill *rfkill)
896 {
897 	BUG_ON(!rfkill);
898 
899 	if (!rfkill->ops->poll)
900 		return;
901 
902 	rfkill->polling_paused = false;
903 
904 	if (rfkill->suspended)
905 		return;
906 
907 	queue_delayed_work(system_power_efficient_wq,
908 			   &rfkill->poll_work, 0);
909 }
910 EXPORT_SYMBOL(rfkill_resume_polling);
911 
912 #ifdef CONFIG_PM_SLEEP
rfkill_suspend(struct device * dev)913 static int rfkill_suspend(struct device *dev)
914 {
915 	struct rfkill *rfkill = to_rfkill(dev);
916 
917 	rfkill->suspended = true;
918 	cancel_delayed_work_sync(&rfkill->poll_work);
919 
920 	return 0;
921 }
922 
rfkill_resume(struct device * dev)923 static int rfkill_resume(struct device *dev)
924 {
925 	struct rfkill *rfkill = to_rfkill(dev);
926 	bool cur;
927 
928 	rfkill->suspended = false;
929 
930 	if (!rfkill->registered)
931 		return 0;
932 
933 	if (!rfkill->persistent) {
934 		cur = !!(rfkill->state & RFKILL_BLOCK_SW);
935 		rfkill_set_block(rfkill, cur);
936 	}
937 
938 	if (rfkill->ops->poll && !rfkill->polling_paused)
939 		queue_delayed_work(system_power_efficient_wq,
940 				   &rfkill->poll_work, 0);
941 
942 	return 0;
943 }
944 
945 static SIMPLE_DEV_PM_OPS(rfkill_pm_ops, rfkill_suspend, rfkill_resume);
946 #define RFKILL_PM_OPS (&rfkill_pm_ops)
947 #else
948 #define RFKILL_PM_OPS NULL
949 #endif
950 
951 static struct class rfkill_class = {
952 	.name		= "rfkill",
953 	.dev_release	= rfkill_release,
954 	.dev_groups	= rfkill_dev_groups,
955 	.dev_uevent	= rfkill_dev_uevent,
956 	.pm		= RFKILL_PM_OPS,
957 };
958 
rfkill_blocked(struct rfkill * rfkill)959 bool rfkill_blocked(struct rfkill *rfkill)
960 {
961 	unsigned long flags;
962 	u32 state;
963 
964 	spin_lock_irqsave(&rfkill->lock, flags);
965 	state = rfkill->state;
966 	spin_unlock_irqrestore(&rfkill->lock, flags);
967 
968 	return !!(state & RFKILL_BLOCK_ANY);
969 }
970 EXPORT_SYMBOL(rfkill_blocked);
971 
rfkill_soft_blocked(struct rfkill * rfkill)972 bool rfkill_soft_blocked(struct rfkill *rfkill)
973 {
974 	unsigned long flags;
975 	u32 state;
976 
977 	spin_lock_irqsave(&rfkill->lock, flags);
978 	state = rfkill->state;
979 	spin_unlock_irqrestore(&rfkill->lock, flags);
980 
981 	return !!(state & RFKILL_BLOCK_SW);
982 }
983 EXPORT_SYMBOL(rfkill_soft_blocked);
984 
rfkill_alloc(const char * name,struct device * parent,const enum rfkill_type type,const struct rfkill_ops * ops,void * ops_data)985 struct rfkill * __must_check rfkill_alloc(const char *name,
986 					  struct device *parent,
987 					  const enum rfkill_type type,
988 					  const struct rfkill_ops *ops,
989 					  void *ops_data)
990 {
991 	struct rfkill *rfkill;
992 	struct device *dev;
993 
994 	if (WARN_ON(!ops))
995 		return NULL;
996 
997 	if (WARN_ON(!ops->set_block))
998 		return NULL;
999 
1000 	if (WARN_ON(!name))
1001 		return NULL;
1002 
1003 	if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
1004 		return NULL;
1005 
1006 	rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL);
1007 	if (!rfkill)
1008 		return NULL;
1009 
1010 	spin_lock_init(&rfkill->lock);
1011 	INIT_LIST_HEAD(&rfkill->node);
1012 	rfkill->type = type;
1013 	strcpy(rfkill->name, name);
1014 	rfkill->ops = ops;
1015 	rfkill->data = ops_data;
1016 
1017 	dev = &rfkill->dev;
1018 	dev->class = &rfkill_class;
1019 	dev->parent = parent;
1020 	device_initialize(dev);
1021 
1022 	return rfkill;
1023 }
1024 EXPORT_SYMBOL(rfkill_alloc);
1025 
rfkill_poll(struct work_struct * work)1026 static void rfkill_poll(struct work_struct *work)
1027 {
1028 	struct rfkill *rfkill;
1029 
1030 	rfkill = container_of(work, struct rfkill, poll_work.work);
1031 
1032 	/*
1033 	 * Poll hardware state -- driver will use one of the
1034 	 * rfkill_set{,_hw,_sw}_state functions and use its
1035 	 * return value to update the current status.
1036 	 */
1037 	rfkill->ops->poll(rfkill, rfkill->data);
1038 
1039 	queue_delayed_work(system_power_efficient_wq,
1040 		&rfkill->poll_work,
1041 		round_jiffies_relative(POLL_INTERVAL));
1042 }
1043 
rfkill_uevent_work(struct work_struct * work)1044 static void rfkill_uevent_work(struct work_struct *work)
1045 {
1046 	struct rfkill *rfkill;
1047 
1048 	rfkill = container_of(work, struct rfkill, uevent_work);
1049 
1050 	mutex_lock(&rfkill_global_mutex);
1051 	rfkill_event(rfkill);
1052 	mutex_unlock(&rfkill_global_mutex);
1053 }
1054 
rfkill_sync_work(struct work_struct * work)1055 static void rfkill_sync_work(struct work_struct *work)
1056 {
1057 	struct rfkill *rfkill = container_of(work, struct rfkill, sync_work);
1058 
1059 	mutex_lock(&rfkill_global_mutex);
1060 	rfkill_sync(rfkill);
1061 	mutex_unlock(&rfkill_global_mutex);
1062 }
1063 
rfkill_register(struct rfkill * rfkill)1064 int __must_check rfkill_register(struct rfkill *rfkill)
1065 {
1066 	static unsigned long rfkill_no;
1067 	struct device *dev;
1068 	int error;
1069 
1070 	if (!rfkill)
1071 		return -EINVAL;
1072 
1073 	dev = &rfkill->dev;
1074 
1075 	mutex_lock(&rfkill_global_mutex);
1076 
1077 	if (rfkill->registered) {
1078 		error = -EALREADY;
1079 		goto unlock;
1080 	}
1081 
1082 	rfkill->idx = rfkill_no;
1083 	dev_set_name(dev, "rfkill%lu", rfkill_no);
1084 	rfkill_no++;
1085 
1086 	list_add_tail(&rfkill->node, &rfkill_list);
1087 
1088 	error = device_add(dev);
1089 	if (error)
1090 		goto remove;
1091 
1092 	error = rfkill_led_trigger_register(rfkill);
1093 	if (error)
1094 		goto devdel;
1095 
1096 	rfkill->registered = true;
1097 
1098 	INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
1099 	INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
1100 	INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
1101 
1102 	if (rfkill->ops->poll)
1103 		queue_delayed_work(system_power_efficient_wq,
1104 			&rfkill->poll_work,
1105 			round_jiffies_relative(POLL_INTERVAL));
1106 
1107 	if (!rfkill->persistent || rfkill_epo_lock_active) {
1108 		rfkill->need_sync = true;
1109 		schedule_work(&rfkill->sync_work);
1110 	} else {
1111 #ifdef CONFIG_RFKILL_INPUT
1112 		bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
1113 
1114 		if (!atomic_read(&rfkill_input_disabled))
1115 			__rfkill_switch_all(rfkill->type, soft_blocked);
1116 #endif
1117 	}
1118 
1119 	rfkill_global_led_trigger_event();
1120 	rfkill_send_events(rfkill, RFKILL_OP_ADD);
1121 
1122 	mutex_unlock(&rfkill_global_mutex);
1123 	return 0;
1124 
1125  devdel:
1126 	device_del(&rfkill->dev);
1127  remove:
1128 	list_del_init(&rfkill->node);
1129  unlock:
1130 	mutex_unlock(&rfkill_global_mutex);
1131 	return error;
1132 }
1133 EXPORT_SYMBOL(rfkill_register);
1134 
rfkill_unregister(struct rfkill * rfkill)1135 void rfkill_unregister(struct rfkill *rfkill)
1136 {
1137 	BUG_ON(!rfkill);
1138 
1139 	if (rfkill->ops->poll)
1140 		cancel_delayed_work_sync(&rfkill->poll_work);
1141 
1142 	cancel_work_sync(&rfkill->uevent_work);
1143 	cancel_work_sync(&rfkill->sync_work);
1144 
1145 	rfkill->registered = false;
1146 
1147 	device_del(&rfkill->dev);
1148 
1149 	mutex_lock(&rfkill_global_mutex);
1150 	rfkill_send_events(rfkill, RFKILL_OP_DEL);
1151 	list_del_init(&rfkill->node);
1152 	rfkill_global_led_trigger_event();
1153 	mutex_unlock(&rfkill_global_mutex);
1154 
1155 	rfkill_led_trigger_unregister(rfkill);
1156 }
1157 EXPORT_SYMBOL(rfkill_unregister);
1158 
rfkill_destroy(struct rfkill * rfkill)1159 void rfkill_destroy(struct rfkill *rfkill)
1160 {
1161 	if (rfkill)
1162 		put_device(&rfkill->dev);
1163 }
1164 EXPORT_SYMBOL(rfkill_destroy);
1165 
rfkill_fop_open(struct inode * inode,struct file * file)1166 static int rfkill_fop_open(struct inode *inode, struct file *file)
1167 {
1168 	struct rfkill_data *data;
1169 	struct rfkill *rfkill;
1170 	struct rfkill_int_event *ev, *tmp;
1171 
1172 	data = kzalloc(sizeof(*data), GFP_KERNEL);
1173 	if (!data)
1174 		return -ENOMEM;
1175 
1176 	data->max_size = RFKILL_EVENT_SIZE_V1;
1177 
1178 	INIT_LIST_HEAD(&data->events);
1179 	mutex_init(&data->mtx);
1180 	init_waitqueue_head(&data->read_wait);
1181 
1182 	mutex_lock(&rfkill_global_mutex);
1183 	/*
1184 	 * start getting events from elsewhere but hold mtx to get
1185 	 * startup events added first
1186 	 */
1187 
1188 	list_for_each_entry(rfkill, &rfkill_list, node) {
1189 		ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1190 		if (!ev)
1191 			goto free;
1192 		rfkill_sync(rfkill);
1193 		rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1194 		mutex_lock(&data->mtx);
1195 		list_add_tail(&ev->list, &data->events);
1196 		mutex_unlock(&data->mtx);
1197 	}
1198 	list_add(&data->list, &rfkill_fds);
1199 	mutex_unlock(&rfkill_global_mutex);
1200 
1201 	file->private_data = data;
1202 
1203 	return stream_open(inode, file);
1204 
1205  free:
1206 	mutex_unlock(&rfkill_global_mutex);
1207 	mutex_destroy(&data->mtx);
1208 	list_for_each_entry_safe(ev, tmp, &data->events, list)
1209 		kfree(ev);
1210 	kfree(data);
1211 	return -ENOMEM;
1212 }
1213 
rfkill_fop_poll(struct file * file,poll_table * wait)1214 static __poll_t rfkill_fop_poll(struct file *file, poll_table *wait)
1215 {
1216 	struct rfkill_data *data = file->private_data;
1217 	__poll_t res = EPOLLOUT | EPOLLWRNORM;
1218 
1219 	poll_wait(file, &data->read_wait, wait);
1220 
1221 	mutex_lock(&data->mtx);
1222 	if (!list_empty(&data->events))
1223 		res = EPOLLIN | EPOLLRDNORM;
1224 	mutex_unlock(&data->mtx);
1225 
1226 	return res;
1227 }
1228 
rfkill_fop_read(struct file * file,char __user * buf,size_t count,loff_t * pos)1229 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1230 			       size_t count, loff_t *pos)
1231 {
1232 	struct rfkill_data *data = file->private_data;
1233 	struct rfkill_int_event *ev;
1234 	unsigned long sz;
1235 	int ret;
1236 
1237 	mutex_lock(&data->mtx);
1238 
1239 	while (list_empty(&data->events)) {
1240 		if (file->f_flags & O_NONBLOCK) {
1241 			ret = -EAGAIN;
1242 			goto out;
1243 		}
1244 		mutex_unlock(&data->mtx);
1245 		/* since we re-check and it just compares pointers,
1246 		 * using !list_empty() without locking isn't a problem
1247 		 */
1248 		ret = wait_event_interruptible(data->read_wait,
1249 					       !list_empty(&data->events));
1250 		mutex_lock(&data->mtx);
1251 
1252 		if (ret)
1253 			goto out;
1254 	}
1255 
1256 	ev = list_first_entry(&data->events, struct rfkill_int_event,
1257 				list);
1258 
1259 	sz = min_t(unsigned long, sizeof(ev->ev), count);
1260 	sz = min_t(unsigned long, sz, data->max_size);
1261 	ret = sz;
1262 	if (copy_to_user(buf, &ev->ev, sz))
1263 		ret = -EFAULT;
1264 
1265 	list_del(&ev->list);
1266 	kfree(ev);
1267  out:
1268 	mutex_unlock(&data->mtx);
1269 	return ret;
1270 }
1271 
rfkill_fop_write(struct file * file,const char __user * buf,size_t count,loff_t * pos)1272 static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1273 				size_t count, loff_t *pos)
1274 {
1275 	struct rfkill_data *data = file->private_data;
1276 	struct rfkill *rfkill;
1277 	struct rfkill_event_ext ev;
1278 	int ret;
1279 
1280 	/* we don't need the 'hard' variable but accept it */
1281 	if (count < RFKILL_EVENT_SIZE_V1 - 1)
1282 		return -EINVAL;
1283 
1284 	/*
1285 	 * Copy as much data as we can accept into our 'ev' buffer,
1286 	 * but tell userspace how much we've copied so it can determine
1287 	 * our API version even in a write() call, if it cares.
1288 	 */
1289 	count = min(count, sizeof(ev));
1290 	count = min_t(size_t, count, data->max_size);
1291 	if (copy_from_user(&ev, buf, count))
1292 		return -EFAULT;
1293 
1294 	if (ev.type >= NUM_RFKILL_TYPES)
1295 		return -EINVAL;
1296 
1297 	mutex_lock(&rfkill_global_mutex);
1298 
1299 	switch (ev.op) {
1300 	case RFKILL_OP_CHANGE_ALL:
1301 		rfkill_update_global_state(ev.type, ev.soft);
1302 		list_for_each_entry(rfkill, &rfkill_list, node)
1303 			if (rfkill->type == ev.type ||
1304 			    ev.type == RFKILL_TYPE_ALL)
1305 				rfkill_set_block(rfkill, ev.soft);
1306 		ret = 0;
1307 		break;
1308 	case RFKILL_OP_CHANGE:
1309 		list_for_each_entry(rfkill, &rfkill_list, node)
1310 			if (rfkill->idx == ev.idx &&
1311 			    (rfkill->type == ev.type ||
1312 			     ev.type == RFKILL_TYPE_ALL))
1313 				rfkill_set_block(rfkill, ev.soft);
1314 		ret = 0;
1315 		break;
1316 	default:
1317 		ret = -EINVAL;
1318 		break;
1319 	}
1320 
1321 	mutex_unlock(&rfkill_global_mutex);
1322 
1323 	return ret ?: count;
1324 }
1325 
rfkill_fop_release(struct inode * inode,struct file * file)1326 static int rfkill_fop_release(struct inode *inode, struct file *file)
1327 {
1328 	struct rfkill_data *data = file->private_data;
1329 	struct rfkill_int_event *ev, *tmp;
1330 
1331 	mutex_lock(&rfkill_global_mutex);
1332 	list_del(&data->list);
1333 	mutex_unlock(&rfkill_global_mutex);
1334 
1335 	mutex_destroy(&data->mtx);
1336 	list_for_each_entry_safe(ev, tmp, &data->events, list)
1337 		kfree(ev);
1338 
1339 #ifdef CONFIG_RFKILL_INPUT
1340 	if (data->input_handler)
1341 		if (atomic_dec_return(&rfkill_input_disabled) == 0)
1342 			printk(KERN_DEBUG "rfkill: input handler enabled\n");
1343 #endif
1344 
1345 	kfree(data);
1346 
1347 	return 0;
1348 }
1349 
rfkill_fop_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1350 static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1351 			     unsigned long arg)
1352 {
1353 	struct rfkill_data *data = file->private_data;
1354 	int ret = -ENOSYS;
1355 	u32 size;
1356 
1357 	if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1358 		return -ENOSYS;
1359 
1360 	mutex_lock(&data->mtx);
1361 	switch (_IOC_NR(cmd)) {
1362 #ifdef CONFIG_RFKILL_INPUT
1363 	case RFKILL_IOC_NOINPUT:
1364 		if (!data->input_handler) {
1365 			if (atomic_inc_return(&rfkill_input_disabled) == 1)
1366 				printk(KERN_DEBUG "rfkill: input handler disabled\n");
1367 			data->input_handler = true;
1368 		}
1369 		ret = 0;
1370 		break;
1371 #endif
1372 	case RFKILL_IOC_MAX_SIZE:
1373 		if (get_user(size, (__u32 __user *)arg)) {
1374 			ret = -EFAULT;
1375 			break;
1376 		}
1377 		if (size < RFKILL_EVENT_SIZE_V1 || size > U8_MAX) {
1378 			ret = -EINVAL;
1379 			break;
1380 		}
1381 		data->max_size = size;
1382 		ret = 0;
1383 		break;
1384 	default:
1385 		break;
1386 	}
1387 	mutex_unlock(&data->mtx);
1388 
1389 	return ret;
1390 }
1391 
1392 static const struct file_operations rfkill_fops = {
1393 	.owner		= THIS_MODULE,
1394 	.open		= rfkill_fop_open,
1395 	.read		= rfkill_fop_read,
1396 	.write		= rfkill_fop_write,
1397 	.poll		= rfkill_fop_poll,
1398 	.release	= rfkill_fop_release,
1399 	.unlocked_ioctl	= rfkill_fop_ioctl,
1400 	.compat_ioctl	= compat_ptr_ioctl,
1401 	.llseek		= no_llseek,
1402 };
1403 
1404 #define RFKILL_NAME "rfkill"
1405 
1406 static struct miscdevice rfkill_miscdev = {
1407 	.fops	= &rfkill_fops,
1408 	.name	= RFKILL_NAME,
1409 	.minor	= RFKILL_MINOR,
1410 };
1411 
rfkill_init(void)1412 static int __init rfkill_init(void)
1413 {
1414 	int error;
1415 
1416 	rfkill_update_global_state(RFKILL_TYPE_ALL, !rfkill_default_state);
1417 
1418 	error = class_register(&rfkill_class);
1419 	if (error)
1420 		goto error_class;
1421 
1422 	error = misc_register(&rfkill_miscdev);
1423 	if (error)
1424 		goto error_misc;
1425 
1426 	error = rfkill_global_led_trigger_register();
1427 	if (error)
1428 		goto error_led_trigger;
1429 
1430 #ifdef CONFIG_RFKILL_INPUT
1431 	error = rfkill_handler_init();
1432 	if (error)
1433 		goto error_input;
1434 #endif
1435 
1436 	return 0;
1437 
1438 #ifdef CONFIG_RFKILL_INPUT
1439 error_input:
1440 	rfkill_global_led_trigger_unregister();
1441 #endif
1442 error_led_trigger:
1443 	misc_deregister(&rfkill_miscdev);
1444 error_misc:
1445 	class_unregister(&rfkill_class);
1446 error_class:
1447 	return error;
1448 }
1449 subsys_initcall(rfkill_init);
1450 
rfkill_exit(void)1451 static void __exit rfkill_exit(void)
1452 {
1453 #ifdef CONFIG_RFKILL_INPUT
1454 	rfkill_handler_exit();
1455 #endif
1456 	rfkill_global_led_trigger_unregister();
1457 	misc_deregister(&rfkill_miscdev);
1458 	class_unregister(&rfkill_class);
1459 }
1460 module_exit(rfkill_exit);
1461 
1462 MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
1463 MODULE_ALIAS("devname:" RFKILL_NAME);
1464