xref: /openbmc/linux/net/mac80211/offchannel.c (revision c059ee9d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Off-channel operation helpers
4  *
5  * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
6  * Copyright 2004, Instant802 Networks, Inc.
7  * Copyright 2005, Devicescape Software, Inc.
8  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
9  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
10  * Copyright 2009	Johannes Berg <johannes@sipsolutions.net>
11  * Copyright (C) 2019 Intel Corporation
12  */
13 #include <linux/export.h>
14 #include <net/mac80211.h>
15 #include "ieee80211_i.h"
16 #include "driver-ops.h"
17 
18 /*
19  * Tell our hardware to disable PS.
20  * Optionally inform AP that we will go to sleep so that it will buffer
21  * the frames while we are doing off-channel work.  This is optional
22  * because we *may* be doing work on-operating channel, and want our
23  * hardware unconditionally awake, but still let the AP send us normal frames.
24  */
25 static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
26 {
27 	struct ieee80211_local *local = sdata->local;
28 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
29 	bool offchannel_ps_enabled = false;
30 
31 	/* FIXME: what to do when local->pspolling is true? */
32 
33 	del_timer_sync(&local->dynamic_ps_timer);
34 	del_timer_sync(&ifmgd->bcn_mon_timer);
35 	del_timer_sync(&ifmgd->conn_mon_timer);
36 
37 	cancel_work_sync(&local->dynamic_ps_enable_work);
38 
39 	if (local->hw.conf.flags & IEEE80211_CONF_PS) {
40 		offchannel_ps_enabled = true;
41 		local->hw.conf.flags &= ~IEEE80211_CONF_PS;
42 		ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
43 	}
44 
45 	if (!offchannel_ps_enabled ||
46 	    !ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
47 		/*
48 		 * If power save was enabled, no need to send a nullfunc
49 		 * frame because AP knows that we are sleeping. But if the
50 		 * hardware is creating the nullfunc frame for power save
51 		 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
52 		 * enabled) and power save was enabled, the firmware just
53 		 * sent a null frame with power save disabled. So we need
54 		 * to send a new nullfunc frame to inform the AP that we
55 		 * are again sleeping.
56 		 */
57 		ieee80211_send_nullfunc(local, sdata, true);
58 }
59 
60 /* inform AP that we are awake again */
61 static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
62 {
63 	struct ieee80211_local *local = sdata->local;
64 
65 	if (!local->ps_sdata)
66 		ieee80211_send_nullfunc(local, sdata, false);
67 	else if (local->hw.conf.dynamic_ps_timeout > 0) {
68 		/*
69 		 * the dynamic_ps_timer had been running before leaving the
70 		 * operating channel, restart the timer now and send a nullfunc
71 		 * frame to inform the AP that we are awake so that AP sends
72 		 * the buffered packets (if any).
73 		 */
74 		ieee80211_send_nullfunc(local, sdata, false);
75 		mod_timer(&local->dynamic_ps_timer, jiffies +
76 			  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
77 	}
78 
79 	ieee80211_sta_reset_beacon_monitor(sdata);
80 	ieee80211_sta_reset_conn_monitor(sdata);
81 }
82 
83 void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local)
84 {
85 	struct ieee80211_sub_if_data *sdata;
86 
87 	if (WARN_ON(local->use_chanctx))
88 		return;
89 
90 	/*
91 	 * notify the AP about us leaving the channel and stop all
92 	 * STA interfaces.
93 	 */
94 
95 	/*
96 	 * Stop queues and transmit all frames queued by the driver
97 	 * before sending nullfunc to enable powersave at the AP.
98 	 */
99 	ieee80211_stop_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
100 					IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
101 					false);
102 	ieee80211_flush_queues(local, NULL, false);
103 
104 	mutex_lock(&local->iflist_mtx);
105 	list_for_each_entry(sdata, &local->interfaces, list) {
106 		if (!ieee80211_sdata_running(sdata))
107 			continue;
108 
109 		if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE ||
110 		    sdata->vif.type == NL80211_IFTYPE_NAN)
111 			continue;
112 
113 		if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
114 			set_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
115 
116 		/* Check to see if we should disable beaconing. */
117 		if (sdata->vif.bss_conf.enable_beacon) {
118 			set_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
119 				&sdata->state);
120 			sdata->vif.bss_conf.enable_beacon = false;
121 			ieee80211_bss_info_change_notify(
122 				sdata, BSS_CHANGED_BEACON_ENABLED);
123 		}
124 
125 		if (sdata->vif.type == NL80211_IFTYPE_STATION &&
126 		    sdata->u.mgd.associated)
127 			ieee80211_offchannel_ps_enable(sdata);
128 	}
129 	mutex_unlock(&local->iflist_mtx);
130 }
131 
132 void ieee80211_offchannel_return(struct ieee80211_local *local)
133 {
134 	struct ieee80211_sub_if_data *sdata;
135 
136 	if (WARN_ON(local->use_chanctx))
137 		return;
138 
139 	mutex_lock(&local->iflist_mtx);
140 	list_for_each_entry(sdata, &local->interfaces, list) {
141 		if (sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE)
142 			continue;
143 
144 		if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
145 			clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
146 
147 		if (!ieee80211_sdata_running(sdata))
148 			continue;
149 
150 		/* Tell AP we're back */
151 		if (sdata->vif.type == NL80211_IFTYPE_STATION &&
152 		    sdata->u.mgd.associated)
153 			ieee80211_offchannel_ps_disable(sdata);
154 
155 		if (test_and_clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED,
156 				       &sdata->state)) {
157 			sdata->vif.bss_conf.enable_beacon = true;
158 			ieee80211_bss_info_change_notify(
159 				sdata, BSS_CHANGED_BEACON_ENABLED);
160 		}
161 	}
162 	mutex_unlock(&local->iflist_mtx);
163 
164 	ieee80211_wake_queues_by_reason(&local->hw, IEEE80211_MAX_QUEUE_MAP,
165 					IEEE80211_QUEUE_STOP_REASON_OFFCHANNEL,
166 					false);
167 }
168 
169 static void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
170 {
171 	/* was never transmitted */
172 	if (roc->frame) {
173 		cfg80211_mgmt_tx_status(&roc->sdata->wdev, roc->mgmt_tx_cookie,
174 					roc->frame->data, roc->frame->len,
175 					false, GFP_KERNEL);
176 		ieee80211_free_txskb(&roc->sdata->local->hw, roc->frame);
177 	}
178 
179 	if (!roc->mgmt_tx_cookie)
180 		cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
181 						   roc->cookie, roc->chan,
182 						   GFP_KERNEL);
183 	else
184 		cfg80211_tx_mgmt_expired(&roc->sdata->wdev,
185 					 roc->mgmt_tx_cookie,
186 					 roc->chan, GFP_KERNEL);
187 
188 	list_del(&roc->list);
189 	kfree(roc);
190 }
191 
192 static unsigned long ieee80211_end_finished_rocs(struct ieee80211_local *local,
193 						 unsigned long now)
194 {
195 	struct ieee80211_roc_work *roc, *tmp;
196 	long remaining_dur_min = LONG_MAX;
197 
198 	lockdep_assert_held(&local->mtx);
199 
200 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
201 		long remaining;
202 
203 		if (!roc->started)
204 			break;
205 
206 		remaining = roc->start_time +
207 			    msecs_to_jiffies(roc->duration) -
208 			    now;
209 
210 		/* In case of HW ROC, it is possible that the HW finished the
211 		 * ROC session before the actual requested time. In such a case
212 		 * end the ROC session (disregarding the remaining time).
213 		 */
214 		if (roc->abort || roc->hw_begun || remaining <= 0)
215 			ieee80211_roc_notify_destroy(roc);
216 		else
217 			remaining_dur_min = min(remaining_dur_min, remaining);
218 	}
219 
220 	return remaining_dur_min;
221 }
222 
223 static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
224 				     unsigned long now)
225 {
226 	long dur = ieee80211_end_finished_rocs(local, now);
227 
228 	if (dur == LONG_MAX)
229 		return false;
230 
231 	mod_delayed_work(local->workqueue, &local->roc_work, dur);
232 	return true;
233 }
234 
235 static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
236 					 unsigned long start_time)
237 {
238 	if (WARN_ON(roc->notified))
239 		return;
240 
241 	roc->start_time = start_time;
242 	roc->started = true;
243 
244 	if (roc->mgmt_tx_cookie) {
245 		if (!WARN_ON(!roc->frame)) {
246 			ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7,
247 						  roc->chan->band);
248 			roc->frame = NULL;
249 		}
250 	} else {
251 		cfg80211_ready_on_channel(&roc->sdata->wdev, roc->cookie,
252 					  roc->chan, roc->req_duration,
253 					  GFP_KERNEL);
254 	}
255 
256 	roc->notified = true;
257 }
258 
259 static void ieee80211_hw_roc_start(struct work_struct *work)
260 {
261 	struct ieee80211_local *local =
262 		container_of(work, struct ieee80211_local, hw_roc_start);
263 	struct ieee80211_roc_work *roc;
264 
265 	mutex_lock(&local->mtx);
266 
267 	list_for_each_entry(roc, &local->roc_list, list) {
268 		if (!roc->started)
269 			break;
270 
271 		roc->hw_begun = true;
272 		ieee80211_handle_roc_started(roc, local->hw_roc_start_time);
273 	}
274 
275 	mutex_unlock(&local->mtx);
276 }
277 
278 void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
279 {
280 	struct ieee80211_local *local = hw_to_local(hw);
281 
282 	local->hw_roc_start_time = jiffies;
283 
284 	trace_api_ready_on_channel(local);
285 
286 	ieee80211_queue_work(hw, &local->hw_roc_start);
287 }
288 EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
289 
290 static void _ieee80211_start_next_roc(struct ieee80211_local *local)
291 {
292 	struct ieee80211_roc_work *roc, *tmp;
293 	enum ieee80211_roc_type type;
294 	u32 min_dur, max_dur;
295 
296 	lockdep_assert_held(&local->mtx);
297 
298 	if (WARN_ON(list_empty(&local->roc_list)))
299 		return;
300 
301 	roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
302 			       list);
303 
304 	if (WARN_ON(roc->started))
305 		return;
306 
307 	min_dur = roc->duration;
308 	max_dur = roc->duration;
309 	type = roc->type;
310 
311 	list_for_each_entry(tmp, &local->roc_list, list) {
312 		if (tmp == roc)
313 			continue;
314 		if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
315 			break;
316 		max_dur = max(tmp->duration, max_dur);
317 		min_dur = min(tmp->duration, min_dur);
318 		type = max(tmp->type, type);
319 	}
320 
321 	if (local->ops->remain_on_channel) {
322 		int ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
323 						max_dur, type);
324 
325 		if (ret) {
326 			wiphy_warn(local->hw.wiphy,
327 				   "failed to start next HW ROC (%d)\n", ret);
328 			/*
329 			 * queue the work struct again to avoid recursion
330 			 * when multiple failures occur
331 			 */
332 			list_for_each_entry(tmp, &local->roc_list, list) {
333 				if (tmp->sdata != roc->sdata ||
334 				    tmp->chan != roc->chan)
335 					break;
336 				tmp->started = true;
337 				tmp->abort = true;
338 			}
339 			ieee80211_queue_work(&local->hw, &local->hw_roc_done);
340 			return;
341 		}
342 
343 		/* we'll notify about the start once the HW calls back */
344 		list_for_each_entry(tmp, &local->roc_list, list) {
345 			if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
346 				break;
347 			tmp->started = true;
348 		}
349 	} else {
350 		/* If actually operating on the desired channel (with at least
351 		 * 20 MHz channel width) don't stop all the operations but still
352 		 * treat it as though the ROC operation started properly, so
353 		 * other ROC operations won't interfere with this one.
354 		 */
355 		roc->on_channel = roc->chan == local->_oper_chandef.chan &&
356 				  local->_oper_chandef.width != NL80211_CHAN_WIDTH_5 &&
357 				  local->_oper_chandef.width != NL80211_CHAN_WIDTH_10;
358 
359 		/* start this ROC */
360 		ieee80211_recalc_idle(local);
361 
362 		if (!roc->on_channel) {
363 			ieee80211_offchannel_stop_vifs(local);
364 
365 			local->tmp_channel = roc->chan;
366 			ieee80211_hw_config(local, 0);
367 		}
368 
369 		ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
370 					     msecs_to_jiffies(min_dur));
371 
372 		/* tell userspace or send frame(s) */
373 		list_for_each_entry(tmp, &local->roc_list, list) {
374 			if (tmp->sdata != roc->sdata || tmp->chan != roc->chan)
375 				break;
376 
377 			tmp->on_channel = roc->on_channel;
378 			ieee80211_handle_roc_started(tmp, jiffies);
379 		}
380 	}
381 }
382 
383 void ieee80211_start_next_roc(struct ieee80211_local *local)
384 {
385 	struct ieee80211_roc_work *roc;
386 
387 	lockdep_assert_held(&local->mtx);
388 
389 	if (list_empty(&local->roc_list)) {
390 		ieee80211_run_deferred_scan(local);
391 		return;
392 	}
393 
394 	/* defer roc if driver is not started (i.e. during reconfig) */
395 	if (local->in_reconfig)
396 		return;
397 
398 	roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
399 			       list);
400 
401 	if (WARN_ON_ONCE(roc->started))
402 		return;
403 
404 	if (local->ops->remain_on_channel) {
405 		_ieee80211_start_next_roc(local);
406 	} else {
407 		/* delay it a bit */
408 		ieee80211_queue_delayed_work(&local->hw, &local->roc_work,
409 					     round_jiffies_relative(HZ/2));
410 	}
411 }
412 
413 static void __ieee80211_roc_work(struct ieee80211_local *local)
414 {
415 	struct ieee80211_roc_work *roc;
416 	bool on_channel;
417 
418 	lockdep_assert_held(&local->mtx);
419 
420 	if (WARN_ON(local->ops->remain_on_channel))
421 		return;
422 
423 	roc = list_first_entry_or_null(&local->roc_list,
424 				       struct ieee80211_roc_work, list);
425 	if (!roc)
426 		return;
427 
428 	if (!roc->started) {
429 		WARN_ON(local->use_chanctx);
430 		_ieee80211_start_next_roc(local);
431 	} else {
432 		on_channel = roc->on_channel;
433 		if (ieee80211_recalc_sw_work(local, jiffies))
434 			return;
435 
436 		/* careful - roc pointer became invalid during recalc */
437 
438 		if (!on_channel) {
439 			ieee80211_flush_queues(local, NULL, false);
440 
441 			local->tmp_channel = NULL;
442 			ieee80211_hw_config(local, 0);
443 
444 			ieee80211_offchannel_return(local);
445 		}
446 
447 		ieee80211_recalc_idle(local);
448 		ieee80211_start_next_roc(local);
449 	}
450 }
451 
452 static void ieee80211_roc_work(struct work_struct *work)
453 {
454 	struct ieee80211_local *local =
455 		container_of(work, struct ieee80211_local, roc_work.work);
456 
457 	mutex_lock(&local->mtx);
458 	__ieee80211_roc_work(local);
459 	mutex_unlock(&local->mtx);
460 }
461 
462 static void ieee80211_hw_roc_done(struct work_struct *work)
463 {
464 	struct ieee80211_local *local =
465 		container_of(work, struct ieee80211_local, hw_roc_done);
466 
467 	mutex_lock(&local->mtx);
468 
469 	ieee80211_end_finished_rocs(local, jiffies);
470 
471 	/* if there's another roc, start it now */
472 	ieee80211_start_next_roc(local);
473 
474 	mutex_unlock(&local->mtx);
475 }
476 
477 void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
478 {
479 	struct ieee80211_local *local = hw_to_local(hw);
480 
481 	trace_api_remain_on_channel_expired(local);
482 
483 	ieee80211_queue_work(hw, &local->hw_roc_done);
484 }
485 EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
486 
487 static bool
488 ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local,
489 				  struct ieee80211_roc_work *new_roc,
490 				  struct ieee80211_roc_work *cur_roc)
491 {
492 	unsigned long now = jiffies;
493 	unsigned long remaining;
494 
495 	if (WARN_ON(!cur_roc->started))
496 		return false;
497 
498 	/* if it was scheduled in the hardware, but not started yet,
499 	 * we can only combine if the older one had a longer duration
500 	 */
501 	if (!cur_roc->hw_begun && new_roc->duration > cur_roc->duration)
502 		return false;
503 
504 	remaining = cur_roc->start_time +
505 		    msecs_to_jiffies(cur_roc->duration) -
506 		    now;
507 
508 	/* if it doesn't fit entirely, schedule a new one */
509 	if (new_roc->duration > jiffies_to_msecs(remaining))
510 		return false;
511 
512 	/* add just after the current one so we combine their finish later */
513 	list_add(&new_roc->list, &cur_roc->list);
514 
515 	/* if the existing one has already begun then let this one also
516 	 * begin, otherwise they'll both be marked properly by the work
517 	 * struct that runs once the driver notifies us of the beginning
518 	 */
519 	if (cur_roc->hw_begun) {
520 		new_roc->hw_begun = true;
521 		ieee80211_handle_roc_started(new_roc, now);
522 	}
523 
524 	return true;
525 }
526 
527 static int ieee80211_start_roc_work(struct ieee80211_local *local,
528 				    struct ieee80211_sub_if_data *sdata,
529 				    struct ieee80211_channel *channel,
530 				    unsigned int duration, u64 *cookie,
531 				    struct sk_buff *txskb,
532 				    enum ieee80211_roc_type type)
533 {
534 	struct ieee80211_roc_work *roc, *tmp;
535 	bool queued = false, combine_started = true;
536 	int ret;
537 
538 	lockdep_assert_held(&local->mtx);
539 
540 	if (channel->freq_offset)
541 		/* this may work, but is untested */
542 		return -EOPNOTSUPP;
543 
544 	if (local->use_chanctx && !local->ops->remain_on_channel)
545 		return -EOPNOTSUPP;
546 
547 	roc = kzalloc(sizeof(*roc), GFP_KERNEL);
548 	if (!roc)
549 		return -ENOMEM;
550 
551 	/*
552 	 * If the duration is zero, then the driver
553 	 * wouldn't actually do anything. Set it to
554 	 * 10 for now.
555 	 *
556 	 * TODO: cancel the off-channel operation
557 	 *       when we get the SKB's TX status and
558 	 *       the wait time was zero before.
559 	 */
560 	if (!duration)
561 		duration = 10;
562 
563 	roc->chan = channel;
564 	roc->duration = duration;
565 	roc->req_duration = duration;
566 	roc->frame = txskb;
567 	roc->type = type;
568 	roc->sdata = sdata;
569 
570 	/*
571 	 * cookie is either the roc cookie (for normal roc)
572 	 * or the SKB (for mgmt TX)
573 	 */
574 	if (!txskb) {
575 		roc->cookie = ieee80211_mgmt_tx_cookie(local);
576 		*cookie = roc->cookie;
577 	} else {
578 		roc->mgmt_tx_cookie = *cookie;
579 	}
580 
581 	/* if there's no need to queue, handle it immediately */
582 	if (list_empty(&local->roc_list) &&
583 	    !local->scanning && !ieee80211_is_radar_required(local)) {
584 		/* if not HW assist, just queue & schedule work */
585 		if (!local->ops->remain_on_channel) {
586 			list_add_tail(&roc->list, &local->roc_list);
587 			ieee80211_queue_delayed_work(&local->hw,
588 						     &local->roc_work, 0);
589 		} else {
590 			/* otherwise actually kick it off here
591 			 * (for error handling)
592 			 */
593 			ret = drv_remain_on_channel(local, sdata, channel,
594 						    duration, type);
595 			if (ret) {
596 				kfree(roc);
597 				return ret;
598 			}
599 			roc->started = true;
600 			list_add_tail(&roc->list, &local->roc_list);
601 		}
602 
603 		return 0;
604 	}
605 
606 	/* otherwise handle queueing */
607 
608 	list_for_each_entry(tmp, &local->roc_list, list) {
609 		if (tmp->chan != channel || tmp->sdata != sdata)
610 			continue;
611 
612 		/*
613 		 * Extend this ROC if possible: If it hasn't started, add
614 		 * just after the new one to combine.
615 		 */
616 		if (!tmp->started) {
617 			list_add(&roc->list, &tmp->list);
618 			queued = true;
619 			break;
620 		}
621 
622 		if (!combine_started)
623 			continue;
624 
625 		if (!local->ops->remain_on_channel) {
626 			/* If there's no hardware remain-on-channel, and
627 			 * doing so won't push us over the maximum r-o-c
628 			 * we allow, then we can just add the new one to
629 			 * the list and mark it as having started now.
630 			 * If it would push over the limit, don't try to
631 			 * combine with other started ones (that haven't
632 			 * been running as long) but potentially sort it
633 			 * with others that had the same fate.
634 			 */
635 			unsigned long now = jiffies;
636 			u32 elapsed = jiffies_to_msecs(now - tmp->start_time);
637 			struct wiphy *wiphy = local->hw.wiphy;
638 			u32 max_roc = wiphy->max_remain_on_channel_duration;
639 
640 			if (elapsed + roc->duration > max_roc) {
641 				combine_started = false;
642 				continue;
643 			}
644 
645 			list_add(&roc->list, &tmp->list);
646 			queued = true;
647 			roc->on_channel = tmp->on_channel;
648 			ieee80211_handle_roc_started(roc, now);
649 			ieee80211_recalc_sw_work(local, now);
650 			break;
651 		}
652 
653 		queued = ieee80211_coalesce_hw_started_roc(local, roc, tmp);
654 		if (queued)
655 			break;
656 		/* if it wasn't queued, perhaps it can be combined with
657 		 * another that also couldn't get combined previously,
658 		 * but no need to check for already started ones, since
659 		 * that can't work.
660 		 */
661 		combine_started = false;
662 	}
663 
664 	if (!queued)
665 		list_add_tail(&roc->list, &local->roc_list);
666 
667 	return 0;
668 }
669 
670 int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
671 				struct ieee80211_channel *chan,
672 				unsigned int duration, u64 *cookie)
673 {
674 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
675 	struct ieee80211_local *local = sdata->local;
676 	int ret;
677 
678 	mutex_lock(&local->mtx);
679 	ret = ieee80211_start_roc_work(local, sdata, chan,
680 				       duration, cookie, NULL,
681 				       IEEE80211_ROC_TYPE_NORMAL);
682 	mutex_unlock(&local->mtx);
683 
684 	return ret;
685 }
686 
687 static int ieee80211_cancel_roc(struct ieee80211_local *local,
688 				u64 cookie, bool mgmt_tx)
689 {
690 	struct ieee80211_roc_work *roc, *tmp, *found = NULL;
691 	int ret;
692 
693 	if (!cookie)
694 		return -ENOENT;
695 
696 	flush_work(&local->hw_roc_start);
697 
698 	mutex_lock(&local->mtx);
699 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
700 		if (!mgmt_tx && roc->cookie != cookie)
701 			continue;
702 		else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
703 			continue;
704 
705 		found = roc;
706 		break;
707 	}
708 
709 	if (!found) {
710 		mutex_unlock(&local->mtx);
711 		return -ENOENT;
712 	}
713 
714 	if (!found->started) {
715 		ieee80211_roc_notify_destroy(found);
716 		goto out_unlock;
717 	}
718 
719 	if (local->ops->remain_on_channel) {
720 		ret = drv_cancel_remain_on_channel(local, roc->sdata);
721 		if (WARN_ON_ONCE(ret)) {
722 			mutex_unlock(&local->mtx);
723 			return ret;
724 		}
725 
726 		/* TODO:
727 		 * if multiple items were combined here then we really shouldn't
728 		 * cancel them all - we should wait for as much time as needed
729 		 * for the longest remaining one, and only then cancel ...
730 		 */
731 		list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
732 			if (!roc->started)
733 				break;
734 			if (roc == found)
735 				found = NULL;
736 			ieee80211_roc_notify_destroy(roc);
737 		}
738 
739 		/* that really must not happen - it was started */
740 		WARN_ON(found);
741 
742 		ieee80211_start_next_roc(local);
743 	} else {
744 		/* go through work struct to return to the operating channel */
745 		found->abort = true;
746 		mod_delayed_work(local->workqueue, &local->roc_work, 0);
747 	}
748 
749  out_unlock:
750 	mutex_unlock(&local->mtx);
751 
752 	return 0;
753 }
754 
755 int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
756 				       struct wireless_dev *wdev, u64 cookie)
757 {
758 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
759 	struct ieee80211_local *local = sdata->local;
760 
761 	return ieee80211_cancel_roc(local, cookie, false);
762 }
763 
764 int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
765 		      struct cfg80211_mgmt_tx_params *params, u64 *cookie)
766 {
767 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
768 	struct ieee80211_local *local = sdata->local;
769 	struct sk_buff *skb;
770 	struct sta_info *sta;
771 	const struct ieee80211_mgmt *mgmt = (void *)params->buf;
772 	bool need_offchan = false;
773 	u32 flags;
774 	int ret;
775 	u8 *data;
776 
777 	if (params->dont_wait_for_ack)
778 		flags = IEEE80211_TX_CTL_NO_ACK;
779 	else
780 		flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
781 			IEEE80211_TX_CTL_REQ_TX_STATUS;
782 
783 	if (params->no_cck)
784 		flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
785 
786 	switch (sdata->vif.type) {
787 	case NL80211_IFTYPE_ADHOC:
788 		if (!sdata->vif.bss_conf.ibss_joined)
789 			need_offchan = true;
790 #ifdef CONFIG_MAC80211_MESH
791 		fallthrough;
792 	case NL80211_IFTYPE_MESH_POINT:
793 		if (ieee80211_vif_is_mesh(&sdata->vif) &&
794 		    !sdata->u.mesh.mesh_id_len)
795 			need_offchan = true;
796 #endif
797 		fallthrough;
798 	case NL80211_IFTYPE_AP:
799 	case NL80211_IFTYPE_AP_VLAN:
800 	case NL80211_IFTYPE_P2P_GO:
801 		if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
802 		    !ieee80211_vif_is_mesh(&sdata->vif) &&
803 		    !rcu_access_pointer(sdata->bss->beacon))
804 			need_offchan = true;
805 		if (!ieee80211_is_action(mgmt->frame_control) ||
806 		    mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
807 		    mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
808 		    mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
809 			break;
810 		rcu_read_lock();
811 		sta = sta_info_get_bss(sdata, mgmt->da);
812 		rcu_read_unlock();
813 		if (!sta)
814 			return -ENOLINK;
815 		break;
816 	case NL80211_IFTYPE_STATION:
817 	case NL80211_IFTYPE_P2P_CLIENT:
818 		sdata_lock(sdata);
819 		if (!sdata->u.mgd.associated ||
820 		    (params->offchan && params->wait &&
821 		     local->ops->remain_on_channel &&
822 		     memcmp(sdata->u.mgd.bssid,
823 			    mgmt->bssid, ETH_ALEN)))
824 			need_offchan = true;
825 		sdata_unlock(sdata);
826 		break;
827 	case NL80211_IFTYPE_P2P_DEVICE:
828 		need_offchan = true;
829 		break;
830 	case NL80211_IFTYPE_NAN:
831 	default:
832 		return -EOPNOTSUPP;
833 	}
834 
835 	/* configurations requiring offchan cannot work if no channel has been
836 	 * specified
837 	 */
838 	if (need_offchan && !params->chan)
839 		return -EINVAL;
840 
841 	mutex_lock(&local->mtx);
842 
843 	/* Check if the operating channel is the requested channel */
844 	if (!need_offchan) {
845 		struct ieee80211_chanctx_conf *chanctx_conf;
846 
847 		rcu_read_lock();
848 		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
849 
850 		if (chanctx_conf) {
851 			need_offchan = params->chan &&
852 				       (params->chan !=
853 					chanctx_conf->def.chan);
854 		} else if (!params->chan) {
855 			ret = -EINVAL;
856 			rcu_read_unlock();
857 			goto out_unlock;
858 		} else {
859 			need_offchan = true;
860 		}
861 		rcu_read_unlock();
862 	}
863 
864 	if (need_offchan && !params->offchan) {
865 		ret = -EBUSY;
866 		goto out_unlock;
867 	}
868 
869 	skb = dev_alloc_skb(local->hw.extra_tx_headroom + params->len);
870 	if (!skb) {
871 		ret = -ENOMEM;
872 		goto out_unlock;
873 	}
874 	skb_reserve(skb, local->hw.extra_tx_headroom);
875 
876 	data = skb_put_data(skb, params->buf, params->len);
877 
878 	/* Update CSA counters */
879 	if (sdata->vif.csa_active &&
880 	    (sdata->vif.type == NL80211_IFTYPE_AP ||
881 	     sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
882 	     sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
883 	    params->n_csa_offsets) {
884 		int i;
885 		struct beacon_data *beacon = NULL;
886 
887 		rcu_read_lock();
888 
889 		if (sdata->vif.type == NL80211_IFTYPE_AP)
890 			beacon = rcu_dereference(sdata->u.ap.beacon);
891 		else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
892 			beacon = rcu_dereference(sdata->u.ibss.presp);
893 		else if (ieee80211_vif_is_mesh(&sdata->vif))
894 			beacon = rcu_dereference(sdata->u.mesh.beacon);
895 
896 		if (beacon)
897 			for (i = 0; i < params->n_csa_offsets; i++)
898 				data[params->csa_offsets[i]] =
899 					beacon->cntdwn_current_counter;
900 
901 		rcu_read_unlock();
902 	}
903 
904 	IEEE80211_SKB_CB(skb)->flags = flags;
905 
906 	skb->dev = sdata->dev;
907 
908 	if (!params->dont_wait_for_ack) {
909 		/* make a copy to preserve the frame contents
910 		 * in case of encryption.
911 		 */
912 		ret = ieee80211_attach_ack_skb(local, skb, cookie, GFP_KERNEL);
913 		if (ret) {
914 			kfree_skb(skb);
915 			goto out_unlock;
916 		}
917 	} else {
918 		/* Assign a dummy non-zero cookie, it's not sent to
919 		 * userspace in this case but we rely on its value
920 		 * internally in the need_offchan case to distinguish
921 		 * mgmt-tx from remain-on-channel.
922 		 */
923 		*cookie = 0xffffffff;
924 	}
925 
926 	if (!need_offchan) {
927 		ieee80211_tx_skb(sdata, skb);
928 		ret = 0;
929 		goto out_unlock;
930 	}
931 
932 	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN |
933 					IEEE80211_TX_INTFL_OFFCHAN_TX_OK;
934 	if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
935 		IEEE80211_SKB_CB(skb)->hw_queue =
936 			local->hw.offchannel_tx_hw_queue;
937 
938 	/* This will handle all kinds of coalescing and immediate TX */
939 	ret = ieee80211_start_roc_work(local, sdata, params->chan,
940 				       params->wait, cookie, skb,
941 				       IEEE80211_ROC_TYPE_MGMT_TX);
942 	if (ret)
943 		ieee80211_free_txskb(&local->hw, skb);
944  out_unlock:
945 	mutex_unlock(&local->mtx);
946 	return ret;
947 }
948 
949 int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
950 				  struct wireless_dev *wdev, u64 cookie)
951 {
952 	struct ieee80211_local *local = wiphy_priv(wiphy);
953 
954 	return ieee80211_cancel_roc(local, cookie, true);
955 }
956 
957 void ieee80211_roc_setup(struct ieee80211_local *local)
958 {
959 	INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
960 	INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
961 	INIT_DELAYED_WORK(&local->roc_work, ieee80211_roc_work);
962 	INIT_LIST_HEAD(&local->roc_list);
963 }
964 
965 void ieee80211_roc_purge(struct ieee80211_local *local,
966 			 struct ieee80211_sub_if_data *sdata)
967 {
968 	struct ieee80211_roc_work *roc, *tmp;
969 	bool work_to_do = false;
970 
971 	mutex_lock(&local->mtx);
972 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
973 		if (sdata && roc->sdata != sdata)
974 			continue;
975 
976 		if (roc->started) {
977 			if (local->ops->remain_on_channel) {
978 				/* can race, so ignore return value */
979 				drv_cancel_remain_on_channel(local, sdata);
980 				ieee80211_roc_notify_destroy(roc);
981 			} else {
982 				roc->abort = true;
983 				work_to_do = true;
984 			}
985 		} else {
986 			ieee80211_roc_notify_destroy(roc);
987 		}
988 	}
989 	if (work_to_do)
990 		__ieee80211_roc_work(local);
991 	mutex_unlock(&local->mtx);
992 }
993