xref: /openbmc/linux/drivers/net/wireless/ath/ath11k/wow.c (revision 2cc39179)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2020 The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include "mac.h"
9 
10 #include <net/mac80211.h>
11 #include "core.h"
12 #include "hif.h"
13 #include "debug.h"
14 #include "wmi.h"
15 #include "wow.h"
16 #include "dp_rx.h"
17 
18 static const struct wiphy_wowlan_support ath11k_wowlan_support = {
19 	.flags = WIPHY_WOWLAN_DISCONNECT |
20 		 WIPHY_WOWLAN_MAGIC_PKT |
21 		 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
22 		 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
23 	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
24 	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
25 	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
26 };
27 
28 int ath11k_wow_enable(struct ath11k_base *ab)
29 {
30 	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
31 	int i, ret;
32 
33 	clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
34 
35 	for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
36 		reinit_completion(&ab->htc_suspend);
37 
38 		ret = ath11k_wmi_wow_enable(ar);
39 		if (ret) {
40 			ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
41 			return ret;
42 		}
43 
44 		ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
45 		if (ret == 0) {
46 			ath11k_warn(ab,
47 				    "timed out while waiting for htc suspend completion\n");
48 			return -ETIMEDOUT;
49 		}
50 
51 		if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
52 			/* success, suspend complete received */
53 			return 0;
54 
55 		ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
56 			    i);
57 		msleep(ATH11K_WOW_RETRY_WAIT_MS);
58 	}
59 
60 	ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
61 
62 	return -ETIMEDOUT;
63 }
64 
65 int ath11k_wow_wakeup(struct ath11k_base *ab)
66 {
67 	struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
68 	int ret;
69 
70 	reinit_completion(&ab->wow.wakeup_completed);
71 
72 	ret = ath11k_wmi_wow_host_wakeup_ind(ar);
73 	if (ret) {
74 		ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
75 			    ret);
76 		return ret;
77 	}
78 
79 	ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
80 	if (ret == 0) {
81 		ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
82 		return -ETIMEDOUT;
83 	}
84 
85 	return 0;
86 }
87 
88 static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
89 {
90 	struct ath11k *ar = arvif->ar;
91 	int i, ret;
92 
93 	for (i = 0; i < WOW_EVENT_MAX; i++) {
94 		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
95 		if (ret) {
96 			ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
97 				    wow_wakeup_event(i), arvif->vdev_id, ret);
98 			return ret;
99 		}
100 	}
101 
102 	for (i = 0; i < ar->wow.max_num_patterns; i++) {
103 		ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
104 		if (ret) {
105 			ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
106 				    i, arvif->vdev_id, ret);
107 			return ret;
108 		}
109 	}
110 
111 	return 0;
112 }
113 
114 static int ath11k_wow_cleanup(struct ath11k *ar)
115 {
116 	struct ath11k_vif *arvif;
117 	int ret;
118 
119 	lockdep_assert_held(&ar->conf_mutex);
120 
121 	list_for_each_entry(arvif, &ar->arvifs, list) {
122 		ret = ath11k_wow_vif_cleanup(arvif);
123 		if (ret) {
124 			ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
125 				    arvif->vdev_id, ret);
126 			return ret;
127 		}
128 	}
129 
130 	return 0;
131 }
132 
133 /* Convert a 802.3 format to a 802.11 format.
134  *         +------------+-----------+--------+----------------+
135  * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
136  *         +------------+-----------+--------+----------------+
137  *                |__         |_______    |____________  |________
138  *                   |                |                |          |
139  *         +--+------------+----+-----------+---------------+-----------+
140  * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
141  *         +--+------------+----+-----------+---------------+-----------+
142  */
143 static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
144 					     const struct cfg80211_pkt_pattern *old)
145 {
146 	u8 hdr_8023_pattern[ETH_HLEN] = {};
147 	u8 hdr_8023_bit_mask[ETH_HLEN] = {};
148 	u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
149 	u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
150 
151 	int total_len = old->pkt_offset + old->pattern_len;
152 	int hdr_80211_end_offset;
153 
154 	struct ieee80211_hdr_3addr *new_hdr_pattern =
155 		(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
156 	struct ieee80211_hdr_3addr *new_hdr_mask =
157 		(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
158 	struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
159 	struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
160 	int hdr_len = sizeof(*new_hdr_pattern);
161 
162 	struct rfc1042_hdr *new_rfc_pattern =
163 		(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
164 	struct rfc1042_hdr *new_rfc_mask =
165 		(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
166 	int rfc_len = sizeof(*new_rfc_pattern);
167 
168 	memcpy(hdr_8023_pattern + old->pkt_offset,
169 	       old->pattern, ETH_HLEN - old->pkt_offset);
170 	memcpy(hdr_8023_bit_mask + old->pkt_offset,
171 	       old->mask, ETH_HLEN - old->pkt_offset);
172 
173 	/* Copy destination address */
174 	memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
175 	memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
176 
177 	/* Copy source address */
178 	memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
179 	memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
180 
181 	/* Copy logic link type */
182 	memcpy(&new_rfc_pattern->snap_type,
183 	       &old_hdr_pattern->h_proto,
184 	       sizeof(old_hdr_pattern->h_proto));
185 	memcpy(&new_rfc_mask->snap_type,
186 	       &old_hdr_mask->h_proto,
187 	       sizeof(old_hdr_mask->h_proto));
188 
189 	/* Compute new pkt_offset */
190 	if (old->pkt_offset < ETH_ALEN)
191 		new->pkt_offset = old->pkt_offset +
192 			offsetof(struct ieee80211_hdr_3addr, addr1);
193 	else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
194 		new->pkt_offset = old->pkt_offset +
195 			offsetof(struct ieee80211_hdr_3addr, addr3) -
196 			offsetof(struct ethhdr, h_source);
197 	else
198 		new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
199 
200 	/* Compute new hdr end offset */
201 	if (total_len > ETH_HLEN)
202 		hdr_80211_end_offset = hdr_len + rfc_len;
203 	else if (total_len > offsetof(struct ethhdr, h_proto))
204 		hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
205 	else if (total_len > ETH_ALEN)
206 		hdr_80211_end_offset = total_len - ETH_ALEN +
207 			offsetof(struct ieee80211_hdr_3addr, addr3);
208 	else
209 		hdr_80211_end_offset = total_len +
210 			offsetof(struct ieee80211_hdr_3addr, addr1);
211 
212 	new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
213 
214 	memcpy((u8 *)new->pattern,
215 	       hdr_80211_pattern + new->pkt_offset,
216 	       new->pattern_len);
217 	memcpy((u8 *)new->mask,
218 	       hdr_80211_bit_mask + new->pkt_offset,
219 	       new->pattern_len);
220 
221 	if (total_len > ETH_HLEN) {
222 		/* Copy frame body */
223 		memcpy((u8 *)new->pattern + new->pattern_len,
224 		       (void *)old->pattern + ETH_HLEN - old->pkt_offset,
225 		       total_len - ETH_HLEN);
226 		memcpy((u8 *)new->mask + new->pattern_len,
227 		       (void *)old->mask + ETH_HLEN - old->pkt_offset,
228 		       total_len - ETH_HLEN);
229 
230 		new->pattern_len += total_len - ETH_HLEN;
231 	}
232 }
233 
234 static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
235 					    struct cfg80211_sched_scan_request *nd_config,
236 					    struct wmi_pno_scan_req *pno)
237 {
238 	int i, j;
239 	u8 ssid_len;
240 
241 	pno->enable = 1;
242 	pno->vdev_id = vdev_id;
243 	pno->uc_networks_count = nd_config->n_match_sets;
244 
245 	if (!pno->uc_networks_count ||
246 	    pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
247 		return -EINVAL;
248 
249 	if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
250 		return -EINVAL;
251 
252 	/* Filling per profile params */
253 	for (i = 0; i < pno->uc_networks_count; i++) {
254 		ssid_len = nd_config->match_sets[i].ssid.ssid_len;
255 
256 		if (ssid_len == 0 || ssid_len > 32)
257 			return -EINVAL;
258 
259 		pno->a_networks[i].ssid.ssid_len = ssid_len;
260 
261 		memcpy(pno->a_networks[i].ssid.ssid,
262 		       nd_config->match_sets[i].ssid.ssid,
263 		       nd_config->match_sets[i].ssid.ssid_len);
264 		pno->a_networks[i].authentication = 0;
265 		pno->a_networks[i].encryption     = 0;
266 		pno->a_networks[i].bcast_nw_type  = 0;
267 
268 		/* Copying list of valid channel into request */
269 		pno->a_networks[i].channel_count = nd_config->n_channels;
270 		pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
271 
272 		for (j = 0; j < nd_config->n_channels; j++) {
273 			pno->a_networks[i].channels[j] =
274 					nd_config->channels[j]->center_freq;
275 		}
276 	}
277 
278 	/* set scan to passive if no SSIDs are specified in the request */
279 	if (nd_config->n_ssids == 0)
280 		pno->do_passive_scan = true;
281 	else
282 		pno->do_passive_scan = false;
283 
284 	for (i = 0; i < nd_config->n_ssids; i++) {
285 		j = 0;
286 		while (j < pno->uc_networks_count) {
287 			if (pno->a_networks[j].ssid.ssid_len ==
288 				nd_config->ssids[i].ssid_len &&
289 			(memcmp(pno->a_networks[j].ssid.ssid,
290 				nd_config->ssids[i].ssid,
291 				pno->a_networks[j].ssid.ssid_len) == 0)) {
292 				pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
293 				break;
294 			}
295 			j++;
296 		}
297 	}
298 
299 	if (nd_config->n_scan_plans == 2) {
300 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
301 		pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
302 		pno->slow_scan_period =
303 			nd_config->scan_plans[1].interval * MSEC_PER_SEC;
304 	} else if (nd_config->n_scan_plans == 1) {
305 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
306 		pno->fast_scan_max_cycles = 1;
307 		pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
308 	} else {
309 		ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
310 			    nd_config->n_scan_plans);
311 	}
312 
313 	if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
314 		/* enable mac randomization */
315 		pno->enable_pno_scan_randomization = 1;
316 		memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
317 		memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
318 	}
319 
320 	pno->delay_start_time = nd_config->delay;
321 
322 	/* Current FW does not support min-max range for dwell time */
323 	pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
324 	pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
325 
326 	return 0;
327 }
328 
329 static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
330 				      struct cfg80211_wowlan *wowlan)
331 {
332 	int ret, i;
333 	unsigned long wow_mask = 0;
334 	struct ath11k *ar = arvif->ar;
335 	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
336 	int pattern_id = 0;
337 
338 	/* Setup requested WOW features */
339 	switch (arvif->vdev_type) {
340 	case WMI_VDEV_TYPE_IBSS:
341 		__set_bit(WOW_BEACON_EVENT, &wow_mask);
342 		fallthrough;
343 	case WMI_VDEV_TYPE_AP:
344 		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
345 		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
346 		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
347 		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
348 		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
349 		__set_bit(WOW_HTT_EVENT, &wow_mask);
350 		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
351 		break;
352 	case WMI_VDEV_TYPE_STA:
353 		if (wowlan->disconnect) {
354 			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
355 			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
356 			__set_bit(WOW_BMISS_EVENT, &wow_mask);
357 			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
358 		}
359 
360 		if (wowlan->magic_pkt)
361 			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
362 
363 		if (wowlan->nd_config) {
364 			struct wmi_pno_scan_req *pno;
365 			int ret;
366 
367 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
368 			if (!pno)
369 				return -ENOMEM;
370 
371 			ar->nlo_enabled = true;
372 
373 			ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
374 							       wowlan->nd_config, pno);
375 			if (!ret) {
376 				ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
377 				__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
378 			}
379 
380 			kfree(pno);
381 		}
382 		break;
383 	default:
384 		break;
385 	}
386 
387 	for (i = 0; i < wowlan->n_patterns; i++) {
388 		u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
389 		u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
390 		u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
391 		struct cfg80211_pkt_pattern new_pattern = {};
392 		struct cfg80211_pkt_pattern old_pattern = patterns[i];
393 		int j;
394 
395 		new_pattern.pattern = ath_pattern;
396 		new_pattern.mask = ath_bitmask;
397 		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
398 			continue;
399 		/* convert bytemask to bitmask */
400 		for (j = 0; j < patterns[i].pattern_len; j++)
401 			if (patterns[i].mask[j / 8] & BIT(j % 8))
402 				bitmask[j] = 0xff;
403 		old_pattern.mask = bitmask;
404 
405 		if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
406 		    ATH11K_HW_TXRX_NATIVE_WIFI) {
407 			if (patterns[i].pkt_offset < ETH_HLEN) {
408 				u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
409 
410 				memcpy(pattern_ext, old_pattern.pattern,
411 				       old_pattern.pattern_len);
412 				old_pattern.pattern = pattern_ext;
413 				ath11k_wow_convert_8023_to_80211(&new_pattern,
414 								 &old_pattern);
415 			} else {
416 				new_pattern = old_pattern;
417 				new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
418 			}
419 		}
420 
421 		if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
422 			return -EINVAL;
423 
424 		ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
425 						 pattern_id,
426 						 new_pattern.pattern,
427 						 new_pattern.mask,
428 						 new_pattern.pattern_len,
429 						 new_pattern.pkt_offset);
430 		if (ret) {
431 			ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
432 				    pattern_id,
433 				    arvif->vdev_id, ret);
434 			return ret;
435 		}
436 
437 		pattern_id++;
438 		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
439 	}
440 
441 	for (i = 0; i < WOW_EVENT_MAX; i++) {
442 		if (!test_bit(i, &wow_mask))
443 			continue;
444 		ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
445 		if (ret) {
446 			ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
447 				    wow_wakeup_event(i), arvif->vdev_id, ret);
448 			return ret;
449 		}
450 	}
451 
452 	return 0;
453 }
454 
455 static int ath11k_wow_set_wakeups(struct ath11k *ar,
456 				  struct cfg80211_wowlan *wowlan)
457 {
458 	struct ath11k_vif *arvif;
459 	int ret;
460 
461 	lockdep_assert_held(&ar->conf_mutex);
462 
463 	list_for_each_entry(arvif, &ar->arvifs, list) {
464 		ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
465 		if (ret) {
466 			ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
467 				    arvif->vdev_id, ret);
468 			return ret;
469 		}
470 	}
471 
472 	return 0;
473 }
474 
475 static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
476 {
477 	int ret = 0;
478 	struct ath11k *ar = arvif->ar;
479 
480 	switch (arvif->vdev_type) {
481 	case WMI_VDEV_TYPE_STA:
482 		if (ar->nlo_enabled) {
483 			struct wmi_pno_scan_req *pno;
484 
485 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
486 			if (!pno)
487 				return -ENOMEM;
488 
489 			pno->enable = 0;
490 			ar->nlo_enabled = false;
491 			ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
492 			kfree(pno);
493 		}
494 		break;
495 	default:
496 		break;
497 	}
498 	return ret;
499 }
500 
501 static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
502 {
503 	struct ath11k_vif *arvif;
504 	int ret;
505 
506 	lockdep_assert_held(&ar->conf_mutex);
507 
508 	list_for_each_entry(arvif, &ar->arvifs, list) {
509 		ret = ath11k_vif_wow_clean_nlo(arvif);
510 		if (ret) {
511 			ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
512 				    arvif->vdev_id, ret);
513 			return ret;
514 		}
515 	}
516 
517 	return 0;
518 }
519 
520 static int ath11k_wow_set_hw_filter(struct ath11k *ar)
521 {
522 	struct ath11k_vif *arvif;
523 	u32 bitmap;
524 	int ret;
525 
526 	lockdep_assert_held(&ar->conf_mutex);
527 
528 	list_for_each_entry(arvif, &ar->arvifs, list) {
529 		bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
530 			WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
531 		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
532 						    bitmap,
533 						    true);
534 		if (ret) {
535 			ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
536 				    arvif->vdev_id, ret);
537 			return ret;
538 		}
539 	}
540 
541 	return 0;
542 }
543 
544 static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
545 {
546 	struct ath11k_vif *arvif;
547 	int ret;
548 
549 	lockdep_assert_held(&ar->conf_mutex);
550 
551 	list_for_each_entry(arvif, &ar->arvifs, list) {
552 		ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
553 
554 		if (ret) {
555 			ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
556 				    arvif->vdev_id, ret);
557 			return ret;
558 		}
559 	}
560 
561 	return 0;
562 }
563 
564 static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
565 {
566 	struct ath11k_vif *arvif;
567 	int ret;
568 
569 	lockdep_assert_held(&ar->conf_mutex);
570 
571 	list_for_each_entry(arvif, &ar->arvifs, list) {
572 		if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
573 			continue;
574 
575 		ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
576 
577 		if (ret) {
578 			ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
579 				    arvif->vdev_id, enable, ret);
580 			return ret;
581 		}
582 	}
583 
584 	return 0;
585 }
586 
587 static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
588 {
589 	struct ath11k_vif *arvif;
590 	int ret;
591 
592 	lockdep_assert_held(&ar->conf_mutex);
593 
594 	list_for_each_entry(arvif, &ar->arvifs, list) {
595 		if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
596 		    !arvif->is_up ||
597 		    !arvif->rekey_data.enable_offload)
598 			continue;
599 
600 		/* get rekey info before disable rekey offload */
601 		if (!enable) {
602 			ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
603 			if (ret) {
604 				ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
605 					    arvif->vdev_id, ret);
606 				return ret;
607 			}
608 		}
609 
610 		ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
611 
612 		if (ret) {
613 			ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
614 				    arvif->vdev_id, enable, ret);
615 			return ret;
616 		}
617 	}
618 
619 	return 0;
620 }
621 
622 static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
623 {
624 	int ret;
625 
626 	ret = ath11k_wow_arp_ns_offload(ar, enable);
627 	if (ret) {
628 		ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
629 			    enable, ret);
630 		return ret;
631 	}
632 
633 	ret = ath11k_gtk_rekey_offload(ar, enable);
634 	if (ret) {
635 		ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
636 			    enable, ret);
637 		return ret;
638 	}
639 
640 	return 0;
641 }
642 
643 static int ath11k_wow_set_keepalive(struct ath11k *ar,
644 				    enum wmi_sta_keepalive_method method,
645 				    u32 interval)
646 {
647 	struct ath11k_vif *arvif;
648 	int ret;
649 
650 	lockdep_assert_held(&ar->conf_mutex);
651 
652 	list_for_each_entry(arvif, &ar->arvifs, list) {
653 		ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
654 		if (ret)
655 			return ret;
656 	}
657 
658 	return 0;
659 }
660 
661 int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
662 			  struct cfg80211_wowlan *wowlan)
663 {
664 	struct ath11k *ar = hw->priv;
665 	int ret;
666 
667 	mutex_lock(&ar->conf_mutex);
668 
669 	ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
670 	if (ret) {
671 		ath11k_warn(ar->ab,
672 			    "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
673 			    ret);
674 		goto exit;
675 	}
676 
677 	ret =  ath11k_wow_cleanup(ar);
678 	if (ret) {
679 		ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
680 			    ret);
681 		goto exit;
682 	}
683 
684 	ret = ath11k_wow_set_wakeups(ar, wowlan);
685 	if (ret) {
686 		ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
687 			    ret);
688 		goto cleanup;
689 	}
690 
691 	ret = ath11k_wow_protocol_offload(ar, true);
692 	if (ret) {
693 		ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
694 			    ret);
695 		goto cleanup;
696 	}
697 
698 	ath11k_mac_drain_tx(ar);
699 	ret = ath11k_mac_wait_tx_complete(ar);
700 	if (ret) {
701 		ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
702 		goto cleanup;
703 	}
704 
705 	ret = ath11k_wow_set_hw_filter(ar);
706 	if (ret) {
707 		ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
708 			    ret);
709 		goto cleanup;
710 	}
711 
712 	ret = ath11k_wow_set_keepalive(ar,
713 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
714 				       WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
715 	if (ret) {
716 		ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
717 		goto cleanup;
718 	}
719 
720 	ret = ath11k_wow_enable(ar->ab);
721 	if (ret) {
722 		ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
723 		goto cleanup;
724 	}
725 
726 	ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
727 	if (ret) {
728 		ath11k_warn(ar->ab,
729 			    "failed to stop dp rx pktlog during wow suspend: %d\n",
730 			    ret);
731 		goto cleanup;
732 	}
733 
734 	ath11k_ce_stop_shadow_timers(ar->ab);
735 	ath11k_dp_stop_shadow_timers(ar->ab);
736 
737 	ath11k_hif_irq_disable(ar->ab);
738 	ath11k_hif_ce_irq_disable(ar->ab);
739 
740 	ret = ath11k_hif_suspend(ar->ab);
741 	if (ret) {
742 		ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
743 		goto wakeup;
744 	}
745 
746 	goto exit;
747 
748 wakeup:
749 	ath11k_wow_wakeup(ar->ab);
750 
751 cleanup:
752 	ath11k_wow_cleanup(ar);
753 
754 exit:
755 	mutex_unlock(&ar->conf_mutex);
756 	return ret ? 1 : 0;
757 }
758 
759 void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
760 {
761 	struct ath11k *ar = hw->priv;
762 
763 	mutex_lock(&ar->conf_mutex);
764 	device_set_wakeup_enable(ar->ab->dev, enabled);
765 	mutex_unlock(&ar->conf_mutex);
766 }
767 
768 int ath11k_wow_op_resume(struct ieee80211_hw *hw)
769 {
770 	struct ath11k *ar = hw->priv;
771 	int ret;
772 
773 	mutex_lock(&ar->conf_mutex);
774 
775 	ret = ath11k_hif_resume(ar->ab);
776 	if (ret) {
777 		ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
778 		goto exit;
779 	}
780 
781 	ath11k_hif_ce_irq_enable(ar->ab);
782 	ath11k_hif_irq_enable(ar->ab);
783 
784 	ret = ath11k_dp_rx_pktlog_start(ar->ab);
785 	if (ret) {
786 		ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
787 		goto exit;
788 	}
789 
790 	ret = ath11k_wow_wakeup(ar->ab);
791 	if (ret) {
792 		ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
793 		goto exit;
794 	}
795 
796 	ret = ath11k_wow_nlo_cleanup(ar);
797 	if (ret) {
798 		ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
799 		goto exit;
800 	}
801 
802 	ret = ath11k_wow_clear_hw_filter(ar);
803 	if (ret) {
804 		ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
805 		goto exit;
806 	}
807 
808 	ret = ath11k_wow_protocol_offload(ar, false);
809 	if (ret) {
810 		ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
811 			    ret);
812 		goto exit;
813 	}
814 
815 	ret = ath11k_wow_set_keepalive(ar,
816 				       WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
817 				       WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
818 	if (ret) {
819 		ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
820 		goto exit;
821 	}
822 
823 exit:
824 	if (ret) {
825 		switch (ar->state) {
826 		case ATH11K_STATE_ON:
827 			ar->state = ATH11K_STATE_RESTARTING;
828 			ret = 1;
829 			break;
830 		case ATH11K_STATE_OFF:
831 		case ATH11K_STATE_RESTARTING:
832 		case ATH11K_STATE_RESTARTED:
833 		case ATH11K_STATE_WEDGED:
834 			ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
835 				    ar->state);
836 			ret = -EIO;
837 			break;
838 		}
839 	}
840 
841 	mutex_unlock(&ar->conf_mutex);
842 	return ret;
843 }
844 
845 int ath11k_wow_init(struct ath11k *ar)
846 {
847 	if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
848 		return 0;
849 
850 	ar->wow.wowlan_support = ath11k_wowlan_support;
851 
852 	if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
853 	    ATH11K_HW_TXRX_NATIVE_WIFI) {
854 		ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
855 		ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
856 	}
857 
858 	if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
859 		ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
860 		ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
861 	}
862 
863 	ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
864 	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
865 	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
866 
867 	device_set_wakeup_capable(ar->ab->dev, true);
868 
869 	return 0;
870 }
871