xref: /openbmc/linux/drivers/net/wireless/ath/ath10k/wow.c (revision bdf2bd9aa684511bcb4271f185f735525ca27a70)
1 /*
2  * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
3  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "mac.h"
19 
20 #include <net/mac80211.h>
21 #include "hif.h"
22 #include "core.h"
23 #include "debug.h"
24 #include "wmi.h"
25 #include "wmi-ops.h"
26 
27 static const struct wiphy_wowlan_support ath10k_wowlan_support = {
28 	.flags = WIPHY_WOWLAN_DISCONNECT |
29 		 WIPHY_WOWLAN_MAGIC_PKT,
30 	.pattern_min_len = WOW_MIN_PATTERN_SIZE,
31 	.pattern_max_len = WOW_MAX_PATTERN_SIZE,
32 	.max_pkt_offset = WOW_MAX_PKT_OFFSET,
33 };
34 
35 static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
36 {
37 	struct ath10k *ar = arvif->ar;
38 	int i, ret;
39 
40 	for (i = 0; i < WOW_EVENT_MAX; i++) {
41 		ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
42 		if (ret) {
43 			ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
44 				    wow_wakeup_event(i), arvif->vdev_id, ret);
45 			return ret;
46 		}
47 	}
48 
49 	for (i = 0; i < ar->wow.max_num_patterns; i++) {
50 		ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
51 		if (ret) {
52 			ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
53 				    i, arvif->vdev_id, ret);
54 			return ret;
55 		}
56 	}
57 
58 	return 0;
59 }
60 
61 static int ath10k_wow_cleanup(struct ath10k *ar)
62 {
63 	struct ath10k_vif *arvif;
64 	int ret;
65 
66 	lockdep_assert_held(&ar->conf_mutex);
67 
68 	list_for_each_entry(arvif, &ar->arvifs, list) {
69 		ret = ath10k_wow_vif_cleanup(arvif);
70 		if (ret) {
71 			ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
72 				    arvif->vdev_id, ret);
73 			return ret;
74 		}
75 	}
76 
77 	return 0;
78 }
79 
80 /*
81  * Convert a 802.3 format to a 802.11 format.
82  *         +------------+-----------+--------+----------------+
83  * 802.3:  |dest mac(6B)|src mac(6B)|type(2B)|     body...    |
84  *         +------------+-----------+--------+----------------+
85  *                |__         |_______    |____________  |________
86  *                   |                |                |          |
87  *         +--+------------+----+-----------+---------------+-----------+
88  * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)|  8B  |type(2B)|  body...  |
89  *         +--+------------+----+-----------+---------------+-----------+
90  */
91 static void ath10k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
92 					     const struct cfg80211_pkt_pattern *old)
93 {
94 	u8 hdr_8023_pattern[ETH_HLEN] = {};
95 	u8 hdr_8023_bit_mask[ETH_HLEN] = {};
96 	u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
97 	u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
98 
99 	int total_len = old->pkt_offset + old->pattern_len;
100 	int hdr_80211_end_offset;
101 
102 	struct ieee80211_hdr_3addr *new_hdr_pattern =
103 		(struct ieee80211_hdr_3addr *)hdr_80211_pattern;
104 	struct ieee80211_hdr_3addr *new_hdr_mask =
105 		(struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
106 	struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
107 	struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
108 	int hdr_len = sizeof(*new_hdr_pattern);
109 
110 	struct rfc1042_hdr *new_rfc_pattern =
111 		(struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
112 	struct rfc1042_hdr *new_rfc_mask =
113 		(struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
114 	int rfc_len = sizeof(*new_rfc_pattern);
115 
116 	memcpy(hdr_8023_pattern + old->pkt_offset,
117 	       old->pattern, ETH_HLEN - old->pkt_offset);
118 	memcpy(hdr_8023_bit_mask + old->pkt_offset,
119 	       old->mask, ETH_HLEN - old->pkt_offset);
120 
121 	/* Copy destination address */
122 	memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
123 	memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
124 
125 	/* Copy source address */
126 	memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
127 	memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
128 
129 	/* Copy logic link type */
130 	memcpy(&new_rfc_pattern->snap_type,
131 	       &old_hdr_pattern->h_proto,
132 	       sizeof(old_hdr_pattern->h_proto));
133 	memcpy(&new_rfc_mask->snap_type,
134 	       &old_hdr_mask->h_proto,
135 	       sizeof(old_hdr_mask->h_proto));
136 
137 	/* Calculate new pkt_offset */
138 	if (old->pkt_offset < ETH_ALEN)
139 		new->pkt_offset = old->pkt_offset +
140 			offsetof(struct ieee80211_hdr_3addr, addr1);
141 	else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
142 		new->pkt_offset = old->pkt_offset +
143 			offsetof(struct ieee80211_hdr_3addr, addr3) -
144 			offsetof(struct ethhdr, h_source);
145 	else
146 		new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
147 
148 	/* Calculate new hdr end offset */
149 	if (total_len > ETH_HLEN)
150 		hdr_80211_end_offset = hdr_len + rfc_len;
151 	else if (total_len > offsetof(struct ethhdr, h_proto))
152 		hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
153 	else if (total_len > ETH_ALEN)
154 		hdr_80211_end_offset = total_len - ETH_ALEN +
155 			offsetof(struct ieee80211_hdr_3addr, addr3);
156 	else
157 		hdr_80211_end_offset = total_len +
158 			offsetof(struct ieee80211_hdr_3addr, addr1);
159 
160 	new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
161 
162 	memcpy((u8 *)new->pattern,
163 	       hdr_80211_pattern + new->pkt_offset,
164 	       new->pattern_len);
165 	memcpy((u8 *)new->mask,
166 	       hdr_80211_bit_mask + new->pkt_offset,
167 	       new->pattern_len);
168 
169 	if (total_len > ETH_HLEN) {
170 		/* Copy frame body */
171 		memcpy((u8 *)new->pattern + new->pattern_len,
172 		       (void *)old->pattern + ETH_HLEN - old->pkt_offset,
173 		       total_len - ETH_HLEN);
174 		memcpy((u8 *)new->mask + new->pattern_len,
175 		       (void *)old->mask + ETH_HLEN - old->pkt_offset,
176 		       total_len - ETH_HLEN);
177 
178 		new->pattern_len += total_len - ETH_HLEN;
179 	}
180 }
181 
182 static int ath10k_wmi_pno_check(struct ath10k *ar, u32 vdev_id,
183 				struct cfg80211_sched_scan_request *nd_config,
184 				struct wmi_pno_scan_req *pno)
185 {
186 	int i, j, ret = 0;
187 	u8 ssid_len;
188 
189 	pno->enable = 1;
190 	pno->vdev_id = vdev_id;
191 	pno->uc_networks_count = nd_config->n_match_sets;
192 
193 	if (!pno->uc_networks_count ||
194 	    pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
195 		return -EINVAL;
196 
197 	if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
198 		return -EINVAL;
199 
200 	/* Filling per profile  params */
201 	for (i = 0; i < pno->uc_networks_count; i++) {
202 		ssid_len = nd_config->match_sets[i].ssid.ssid_len;
203 
204 		if (ssid_len == 0 || ssid_len > 32)
205 			return -EINVAL;
206 
207 		pno->a_networks[i].ssid.ssid_len = __cpu_to_le32(ssid_len);
208 
209 		memcpy(pno->a_networks[i].ssid.ssid,
210 		       nd_config->match_sets[i].ssid.ssid,
211 		       nd_config->match_sets[i].ssid.ssid_len);
212 		pno->a_networks[i].authentication = 0;
213 		pno->a_networks[i].encryption     = 0;
214 		pno->a_networks[i].bcast_nw_type  = 0;
215 
216 		/*Copying list of valid channel into request */
217 		pno->a_networks[i].channel_count = nd_config->n_channels;
218 		pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
219 
220 		for (j = 0; j < nd_config->n_channels; j++) {
221 			pno->a_networks[i].channels[j] =
222 					nd_config->channels[j]->center_freq;
223 		}
224 	}
225 
226 	/* set scan to passive if no SSIDs are specified in the request */
227 	if (nd_config->n_ssids == 0)
228 		pno->do_passive_scan = true;
229 	else
230 		pno->do_passive_scan = false;
231 
232 	for (i = 0; i < nd_config->n_ssids; i++) {
233 		j = 0;
234 		while (j < pno->uc_networks_count) {
235 			if (__le32_to_cpu(pno->a_networks[j].ssid.ssid_len) ==
236 				nd_config->ssids[i].ssid_len &&
237 			(memcmp(pno->a_networks[j].ssid.ssid,
238 				nd_config->ssids[i].ssid,
239 				__le32_to_cpu(pno->a_networks[j].ssid.ssid_len)) == 0)) {
240 				pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
241 				break;
242 			}
243 			j++;
244 		}
245 	}
246 
247 	if (nd_config->n_scan_plans == 2) {
248 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
249 		pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
250 		pno->slow_scan_period =
251 			nd_config->scan_plans[1].interval * MSEC_PER_SEC;
252 	} else if (nd_config->n_scan_plans == 1) {
253 		pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
254 		pno->fast_scan_max_cycles = 1;
255 		pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
256 	} else {
257 		ath10k_warn(ar, "Invalid number of scan plans %d !!",
258 			    nd_config->n_scan_plans);
259 	}
260 
261 	if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
262 		/* enable mac randomization */
263 		pno->enable_pno_scan_randomization = 1;
264 		memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
265 		memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
266 	}
267 
268 	pno->delay_start_time = nd_config->delay;
269 
270 	/* Current FW does not support min-max range for dwell time */
271 	pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
272 	pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
273 	return ret;
274 }
275 
276 static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
277 				      struct cfg80211_wowlan *wowlan)
278 {
279 	int ret, i;
280 	unsigned long wow_mask = 0;
281 	struct ath10k *ar = arvif->ar;
282 	const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
283 	int pattern_id = 0;
284 
285 	/* Setup requested WOW features */
286 	switch (arvif->vdev_type) {
287 	case WMI_VDEV_TYPE_IBSS:
288 		__set_bit(WOW_BEACON_EVENT, &wow_mask);
289 		 /* fall through */
290 	case WMI_VDEV_TYPE_AP:
291 		__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
292 		__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
293 		__set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
294 		__set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
295 		__set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
296 		__set_bit(WOW_HTT_EVENT, &wow_mask);
297 		__set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
298 		break;
299 	case WMI_VDEV_TYPE_STA:
300 		if (wowlan->disconnect) {
301 			__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
302 			__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
303 			__set_bit(WOW_BMISS_EVENT, &wow_mask);
304 			__set_bit(WOW_CSA_IE_EVENT, &wow_mask);
305 		}
306 
307 		if (wowlan->magic_pkt)
308 			__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
309 
310 		if (wowlan->nd_config) {
311 			struct wmi_pno_scan_req *pno;
312 			int ret;
313 
314 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
315 			if (!pno)
316 				return -ENOMEM;
317 
318 			ar->nlo_enabled = true;
319 
320 			ret = ath10k_wmi_pno_check(ar, arvif->vdev_id,
321 						   wowlan->nd_config, pno);
322 			if (!ret) {
323 				ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
324 				__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
325 			}
326 
327 			kfree(pno);
328 		}
329 		break;
330 	default:
331 		break;
332 	}
333 
334 	for (i = 0; i < wowlan->n_patterns; i++) {
335 		u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
336 		u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
337 		u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
338 		struct cfg80211_pkt_pattern new_pattern = {};
339 		struct cfg80211_pkt_pattern old_pattern = patterns[i];
340 		int j;
341 
342 		new_pattern.pattern = ath_pattern;
343 		new_pattern.mask = ath_bitmask;
344 		if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
345 			continue;
346 		/* convert bytemask to bitmask */
347 		for (j = 0; j < patterns[i].pattern_len; j++)
348 			if (patterns[i].mask[j / 8] & BIT(j % 8))
349 				bitmask[j] = 0xff;
350 		old_pattern.mask = bitmask;
351 		new_pattern = old_pattern;
352 
353 		if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
354 			if (patterns[i].pkt_offset < ETH_HLEN)
355 				ath10k_wow_convert_8023_to_80211(&new_pattern,
356 								 &old_pattern);
357 			else
358 				new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
359 		}
360 
361 		if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
362 			return -EINVAL;
363 
364 		ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
365 						 pattern_id,
366 						 new_pattern.pattern,
367 						 new_pattern.mask,
368 						 new_pattern.pattern_len,
369 						 new_pattern.pkt_offset);
370 		if (ret) {
371 			ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
372 				    pattern_id,
373 				    arvif->vdev_id, ret);
374 			return ret;
375 		}
376 
377 		pattern_id++;
378 		__set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
379 	}
380 
381 	for (i = 0; i < WOW_EVENT_MAX; i++) {
382 		if (!test_bit(i, &wow_mask))
383 			continue;
384 		ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
385 		if (ret) {
386 			ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
387 				    wow_wakeup_event(i), arvif->vdev_id, ret);
388 			return ret;
389 		}
390 	}
391 
392 	return 0;
393 }
394 
395 static int ath10k_wow_set_wakeups(struct ath10k *ar,
396 				  struct cfg80211_wowlan *wowlan)
397 {
398 	struct ath10k_vif *arvif;
399 	int ret;
400 
401 	lockdep_assert_held(&ar->conf_mutex);
402 
403 	list_for_each_entry(arvif, &ar->arvifs, list) {
404 		ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
405 		if (ret) {
406 			ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
407 				    arvif->vdev_id, ret);
408 			return ret;
409 		}
410 	}
411 
412 	return 0;
413 }
414 
415 static int ath10k_vif_wow_clean_nlo(struct ath10k_vif *arvif)
416 {
417 	int ret = 0;
418 	struct ath10k *ar = arvif->ar;
419 
420 	switch (arvif->vdev_type) {
421 	case WMI_VDEV_TYPE_STA:
422 		if (ar->nlo_enabled) {
423 			struct wmi_pno_scan_req *pno;
424 
425 			pno = kzalloc(sizeof(*pno), GFP_KERNEL);
426 			if (!pno)
427 				return -ENOMEM;
428 
429 			pno->enable = 0;
430 			ar->nlo_enabled = false;
431 			ret = ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
432 			kfree(pno);
433 		}
434 		break;
435 	default:
436 		break;
437 	}
438 	return ret;
439 }
440 
441 static int ath10k_wow_nlo_cleanup(struct ath10k *ar)
442 {
443 	struct ath10k_vif *arvif;
444 	int ret = 0;
445 
446 	lockdep_assert_held(&ar->conf_mutex);
447 
448 	list_for_each_entry(arvif, &ar->arvifs, list) {
449 		ret = ath10k_vif_wow_clean_nlo(arvif);
450 		if (ret) {
451 			ath10k_warn(ar, "failed to clean nlo settings on vdev %i: %d\n",
452 				    arvif->vdev_id, ret);
453 			return ret;
454 		}
455 	}
456 
457 	return 0;
458 }
459 
460 static int ath10k_wow_enable(struct ath10k *ar)
461 {
462 	int ret;
463 
464 	lockdep_assert_held(&ar->conf_mutex);
465 
466 	reinit_completion(&ar->target_suspend);
467 
468 	ret = ath10k_wmi_wow_enable(ar);
469 	if (ret) {
470 		ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
471 		return ret;
472 	}
473 
474 	ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
475 	if (ret == 0) {
476 		ath10k_warn(ar, "timed out while waiting for suspend completion\n");
477 		return -ETIMEDOUT;
478 	}
479 
480 	return 0;
481 }
482 
483 static int ath10k_wow_wakeup(struct ath10k *ar)
484 {
485 	int ret;
486 
487 	lockdep_assert_held(&ar->conf_mutex);
488 
489 	reinit_completion(&ar->wow.wakeup_completed);
490 
491 	ret = ath10k_wmi_wow_host_wakeup_ind(ar);
492 	if (ret) {
493 		ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
494 			    ret);
495 		return ret;
496 	}
497 
498 	ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
499 	if (ret == 0) {
500 		ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
501 		return -ETIMEDOUT;
502 	}
503 
504 	return 0;
505 }
506 
507 int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
508 			  struct cfg80211_wowlan *wowlan)
509 {
510 	struct ath10k *ar = hw->priv;
511 	int ret;
512 
513 	mutex_lock(&ar->conf_mutex);
514 
515 	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
516 			      ar->running_fw->fw_file.fw_features))) {
517 		ret = 1;
518 		goto exit;
519 	}
520 
521 	ret =  ath10k_wow_cleanup(ar);
522 	if (ret) {
523 		ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
524 			    ret);
525 		goto exit;
526 	}
527 
528 	ret = ath10k_wow_set_wakeups(ar, wowlan);
529 	if (ret) {
530 		ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
531 			    ret);
532 		goto cleanup;
533 	}
534 
535 	ath10k_mac_wait_tx_complete(ar);
536 
537 	ret = ath10k_wow_enable(ar);
538 	if (ret) {
539 		ath10k_warn(ar, "failed to start wow: %d\n", ret);
540 		goto cleanup;
541 	}
542 
543 	ret = ath10k_hif_suspend(ar);
544 	if (ret) {
545 		ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
546 		goto wakeup;
547 	}
548 
549 	goto exit;
550 
551 wakeup:
552 	ath10k_wow_wakeup(ar);
553 
554 cleanup:
555 	ath10k_wow_cleanup(ar);
556 
557 exit:
558 	mutex_unlock(&ar->conf_mutex);
559 	return ret ? 1 : 0;
560 }
561 
562 void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
563 {
564 	struct ath10k *ar = hw->priv;
565 
566 	mutex_lock(&ar->conf_mutex);
567 	if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
568 		     ar->running_fw->fw_file.fw_features)) {
569 		device_set_wakeup_enable(ar->dev, enabled);
570 	}
571 	mutex_unlock(&ar->conf_mutex);
572 }
573 
574 int ath10k_wow_op_resume(struct ieee80211_hw *hw)
575 {
576 	struct ath10k *ar = hw->priv;
577 	int ret;
578 
579 	mutex_lock(&ar->conf_mutex);
580 
581 	if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
582 			      ar->running_fw->fw_file.fw_features))) {
583 		ret = 1;
584 		goto exit;
585 	}
586 
587 	ret = ath10k_hif_resume(ar);
588 	if (ret) {
589 		ath10k_warn(ar, "failed to resume hif: %d\n", ret);
590 		goto exit;
591 	}
592 
593 	ret = ath10k_wow_wakeup(ar);
594 	if (ret)
595 		ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
596 
597 	ret = ath10k_wow_nlo_cleanup(ar);
598 	if (ret)
599 		ath10k_warn(ar, "failed to cleanup nlo: %d\n", ret);
600 
601 exit:
602 	if (ret) {
603 		switch (ar->state) {
604 		case ATH10K_STATE_ON:
605 			ar->state = ATH10K_STATE_RESTARTING;
606 			ret = 1;
607 			break;
608 		case ATH10K_STATE_OFF:
609 		case ATH10K_STATE_RESTARTING:
610 		case ATH10K_STATE_RESTARTED:
611 		case ATH10K_STATE_UTF:
612 		case ATH10K_STATE_WEDGED:
613 			ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
614 				    ar->state);
615 			ret = -EIO;
616 			break;
617 		}
618 	}
619 
620 	mutex_unlock(&ar->conf_mutex);
621 	return ret;
622 }
623 
624 int ath10k_wow_init(struct ath10k *ar)
625 {
626 	if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
627 		      ar->running_fw->fw_file.fw_features))
628 		return 0;
629 
630 	if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
631 		return -EINVAL;
632 
633 	ar->wow.wowlan_support = ath10k_wowlan_support;
634 
635 	if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
636 		ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
637 		ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
638 	}
639 
640 	if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
641 		ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
642 		ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
643 	}
644 
645 	ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
646 	ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
647 
648 	device_set_wakeup_capable(ar->dev, true);
649 
650 	return 0;
651 }
652