1 /*
2  * Copyright (c) 2016 Broadcom
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include <linux/netdevice.h>
17 #include <linux/gcd.h>
18 #include <net/cfg80211.h>
19 
20 #include "core.h"
21 #include "debug.h"
22 #include "fwil.h"
23 #include "fwil_types.h"
24 #include "cfg80211.h"
25 #include "pno.h"
26 
27 #define BRCMF_PNO_VERSION		2
28 #define BRCMF_PNO_REPEAT		4
29 #define BRCMF_PNO_FREQ_EXPO_MAX		3
30 #define BRCMF_PNO_IMMEDIATE_SCAN_BIT	3
31 #define BRCMF_PNO_ENABLE_BD_SCAN_BIT	5
32 #define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT	6
33 #define BRCMF_PNO_REPORT_SEPARATELY_BIT	11
34 #define BRCMF_PNO_SCAN_INCOMPLETE	0
35 #define BRCMF_PNO_WPA_AUTH_ANY		0xFFFFFFFF
36 #define BRCMF_PNO_HIDDEN_BIT		2
37 #define BRCMF_PNO_SCHED_SCAN_PERIOD	30
38 
39 #define BRCMF_PNO_MAX_BUCKETS		16
40 #define GSCAN_BATCH_NO_THR_SET			101
41 #define GSCAN_RETRY_THRESHOLD			3
42 
43 struct brcmf_pno_info {
44 	int n_reqs;
45 	struct cfg80211_sched_scan_request *reqs[BRCMF_PNO_MAX_BUCKETS];
46 	struct mutex req_lock;
47 };
48 
49 #define ifp_to_pno(_ifp)	((_ifp)->drvr->config->pno)
50 
51 static int brcmf_pno_store_request(struct brcmf_pno_info *pi,
52 				   struct cfg80211_sched_scan_request *req)
53 {
54 	if (WARN(pi->n_reqs == BRCMF_PNO_MAX_BUCKETS,
55 		 "pno request storage full\n"))
56 		return -ENOSPC;
57 
58 	brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
59 	mutex_lock(&pi->req_lock);
60 	pi->reqs[pi->n_reqs++] = req;
61 	mutex_unlock(&pi->req_lock);
62 	return 0;
63 }
64 
65 static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
66 {
67 	int i, err = 0;
68 
69 	mutex_lock(&pi->req_lock);
70 
71 	/* find request */
72 	for (i = 0; i < pi->n_reqs; i++) {
73 		if (pi->reqs[i]->reqid == reqid)
74 			break;
75 	}
76 	/* request not found */
77 	if (WARN(i == pi->n_reqs, "reqid not found\n")) {
78 		err = -ENOENT;
79 		goto done;
80 	}
81 
82 	brcmf_dbg(SCAN, "reqid=%llu\n", reqid);
83 	pi->n_reqs--;
84 
85 	/* if last we are done */
86 	if (!pi->n_reqs || i == pi->n_reqs)
87 		goto done;
88 
89 	/* fill the gap with remaining requests */
90 	while (i <= pi->n_reqs - 1) {
91 		pi->reqs[i] = pi->reqs[i + 1];
92 		i++;
93 	}
94 
95 done:
96 	mutex_unlock(&pi->req_lock);
97 	return err;
98 }
99 
100 static int brcmf_pno_channel_config(struct brcmf_if *ifp,
101 				    struct brcmf_pno_config_le *cfg)
102 {
103 	cfg->reporttype = 0;
104 	cfg->flags = 0;
105 
106 	return brcmf_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg));
107 }
108 
109 static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
110 			    u32 mscan, u32 bestn)
111 {
112 	struct brcmf_pub *drvr = ifp->drvr;
113 	struct brcmf_pno_param_le pfn_param;
114 	u16 flags;
115 	u32 pfnmem;
116 	s32 err;
117 
118 	memset(&pfn_param, 0, sizeof(pfn_param));
119 	pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
120 
121 	/* set extra pno params */
122 	flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
123 		BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
124 	pfn_param.repeat = BRCMF_PNO_REPEAT;
125 	pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
126 
127 	/* set up pno scan fr */
128 	pfn_param.scan_freq = cpu_to_le32(scan_freq);
129 
130 	if (mscan) {
131 		pfnmem = bestn;
132 
133 		/* set bestn in firmware */
134 		err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
135 		if (err < 0) {
136 			bphy_err(drvr, "failed to set pfnmem\n");
137 			goto exit;
138 		}
139 		/* get max mscan which the firmware supports */
140 		err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
141 		if (err < 0) {
142 			bphy_err(drvr, "failed to get pfnmem\n");
143 			goto exit;
144 		}
145 		mscan = min_t(u32, mscan, pfnmem);
146 		pfn_param.mscan = mscan;
147 		pfn_param.bestn = bestn;
148 		flags |= BIT(BRCMF_PNO_ENABLE_BD_SCAN_BIT);
149 		brcmf_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn);
150 	}
151 
152 	pfn_param.flags = cpu_to_le16(flags);
153 	err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
154 				       sizeof(pfn_param));
155 	if (err)
156 		bphy_err(drvr, "pfn_set failed, err=%d\n", err);
157 
158 exit:
159 	return err;
160 }
161 
162 static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
163 {
164 	struct brcmf_pub *drvr = ifp->drvr;
165 	struct brcmf_pno_macaddr_le pfn_mac;
166 	u8 *mac_addr = NULL;
167 	u8 *mac_mask = NULL;
168 	int err, i;
169 
170 	for (i = 0; i < pi->n_reqs; i++)
171 		if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
172 			mac_addr = pi->reqs[i]->mac_addr;
173 			mac_mask = pi->reqs[i]->mac_addr_mask;
174 			break;
175 		}
176 
177 	/* no random mac requested */
178 	if (!mac_addr)
179 		return 0;
180 
181 	pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
182 	pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
183 
184 	memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
185 	for (i = 0; i < ETH_ALEN; i++) {
186 		pfn_mac.mac[i] &= mac_mask[i];
187 		pfn_mac.mac[i] |= get_random_int() & ~(mac_mask[i]);
188 	}
189 	/* Clear multi bit */
190 	pfn_mac.mac[0] &= 0xFE;
191 	/* Set locally administered */
192 	pfn_mac.mac[0] |= 0x02;
193 
194 	brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
195 		  pi->reqs[i]->reqid, pfn_mac.mac);
196 	err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
197 				       sizeof(pfn_mac));
198 	if (err)
199 		bphy_err(drvr, "pfn_macaddr failed, err=%d\n", err);
200 
201 	return err;
202 }
203 
204 static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
205 			      bool active)
206 {
207 	struct brcmf_pub *drvr = ifp->drvr;
208 	struct brcmf_pno_net_param_le pfn;
209 	int err;
210 
211 	pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
212 	pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
213 	pfn.wsec = cpu_to_le32(0);
214 	pfn.infra = cpu_to_le32(1);
215 	pfn.flags = 0;
216 	if (active)
217 		pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
218 	pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
219 	memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
220 
221 	brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active);
222 	err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
223 	if (err < 0)
224 		bphy_err(drvr, "adding failed: err=%d\n", err);
225 	return err;
226 }
227 
228 static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
229 {
230 	struct brcmf_pub *drvr = ifp->drvr;
231 	struct brcmf_pno_bssid_le bssid_cfg;
232 	int err;
233 
234 	memcpy(bssid_cfg.bssid, bssid, ETH_ALEN);
235 	bssid_cfg.flags = 0;
236 
237 	brcmf_dbg(SCAN, "adding bssid=%pM\n", bssid);
238 	err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg,
239 				       sizeof(bssid_cfg));
240 	if (err < 0)
241 		bphy_err(drvr, "adding failed: err=%d\n", err);
242 	return err;
243 }
244 
245 static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
246 				 struct cfg80211_sched_scan_request *req)
247 {
248 	int i;
249 
250 	if (!ssid || !req->ssids || !req->n_ssids)
251 		return false;
252 
253 	for (i = 0; i < req->n_ssids; i++) {
254 		if (ssid->ssid_len == req->ssids[i].ssid_len) {
255 			if (!strncmp(ssid->ssid, req->ssids[i].ssid,
256 				     ssid->ssid_len))
257 				return true;
258 		}
259 	}
260 	return false;
261 }
262 
263 static int brcmf_pno_clean(struct brcmf_if *ifp)
264 {
265 	struct brcmf_pub *drvr = ifp->drvr;
266 	int ret;
267 
268 	/* Disable pfn */
269 	ret = brcmf_fil_iovar_int_set(ifp, "pfn", 0);
270 	if (ret == 0) {
271 		/* clear pfn */
272 		ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0);
273 	}
274 	if (ret < 0)
275 		bphy_err(drvr, "failed code %d\n", ret);
276 
277 	return ret;
278 }
279 
280 static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
281 					 struct brcmf_pno_config_le *pno_cfg)
282 {
283 	u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
284 	u16 chan;
285 	int i, err = 0;
286 
287 	for (i = 0; i < r->n_channels; i++) {
288 		if (n_chan >= BRCMF_NUMCHANNELS) {
289 			err = -ENOSPC;
290 			goto done;
291 		}
292 		chan = r->channels[i]->hw_value;
293 		brcmf_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan);
294 		pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan);
295 	}
296 	/* return number of channels */
297 	err = n_chan;
298 done:
299 	pno_cfg->channel_num = cpu_to_le32(n_chan);
300 	return err;
301 }
302 
303 static int brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi,
304 				   struct brcmf_pno_config_le *pno_cfg,
305 				   struct brcmf_gscan_bucket_config **buckets,
306 				   u32 *scan_freq)
307 {
308 	struct cfg80211_sched_scan_request *sr;
309 	struct brcmf_gscan_bucket_config *fw_buckets;
310 	int i, err, chidx;
311 
312 	brcmf_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs);
313 	if (WARN_ON(!pi->n_reqs))
314 		return -ENODATA;
315 
316 	/*
317 	 * actual scan period is determined using gcd() for each
318 	 * scheduled scan period.
319 	 */
320 	*scan_freq = pi->reqs[0]->scan_plans[0].interval;
321 	for (i = 1; i < pi->n_reqs; i++) {
322 		sr = pi->reqs[i];
323 		*scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq);
324 	}
325 	if (*scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
326 		brcmf_dbg(SCAN, "scan period too small, using minimum\n");
327 		*scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
328 	}
329 
330 	*buckets = NULL;
331 	fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL);
332 	if (!fw_buckets)
333 		return -ENOMEM;
334 
335 	memset(pno_cfg, 0, sizeof(*pno_cfg));
336 	for (i = 0; i < pi->n_reqs; i++) {
337 		sr = pi->reqs[i];
338 		chidx = brcmf_pno_get_bucket_channels(sr, pno_cfg);
339 		if (chidx < 0) {
340 			err = chidx;
341 			goto fail;
342 		}
343 		fw_buckets[i].bucket_end_index = chidx - 1;
344 		fw_buckets[i].bucket_freq_multiple =
345 			sr->scan_plans[0].interval / *scan_freq;
346 		/* assure period is non-zero */
347 		if (!fw_buckets[i].bucket_freq_multiple)
348 			fw_buckets[i].bucket_freq_multiple = 1;
349 		fw_buckets[i].flag = BRCMF_PNO_REPORT_NO_BATCH;
350 	}
351 
352 	if (BRCMF_SCAN_ON()) {
353 		brcmf_err("base period=%u\n", *scan_freq);
354 		for (i = 0; i < pi->n_reqs; i++) {
355 			brcmf_err("[%d] period %u max %u repeat %u flag %x idx %u\n",
356 				  i, fw_buckets[i].bucket_freq_multiple,
357 				  le16_to_cpu(fw_buckets[i].max_freq_multiple),
358 				  fw_buckets[i].repeat, fw_buckets[i].flag,
359 				  fw_buckets[i].bucket_end_index);
360 		}
361 	}
362 	*buckets = fw_buckets;
363 	return pi->n_reqs;
364 
365 fail:
366 	kfree(fw_buckets);
367 	return err;
368 }
369 
370 static int brcmf_pno_config_networks(struct brcmf_if *ifp,
371 				     struct brcmf_pno_info *pi)
372 {
373 	struct cfg80211_sched_scan_request *r;
374 	struct cfg80211_match_set *ms;
375 	bool active;
376 	int i, j, err = 0;
377 
378 	for (i = 0; i < pi->n_reqs; i++) {
379 		r = pi->reqs[i];
380 
381 		for (j = 0; j < r->n_match_sets; j++) {
382 			ms = &r->match_sets[j];
383 			if (ms->ssid.ssid_len) {
384 				active = brcmf_is_ssid_active(&ms->ssid, r);
385 				err = brcmf_pno_add_ssid(ifp, &ms->ssid,
386 							 active);
387 			}
388 			if (!err && is_valid_ether_addr(ms->bssid))
389 				err = brcmf_pno_add_bssid(ifp, ms->bssid);
390 
391 			if (err < 0)
392 				return err;
393 		}
394 	}
395 	return 0;
396 }
397 
398 static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
399 {
400 	struct brcmf_pub *drvr = ifp->drvr;
401 	struct brcmf_pno_info *pi;
402 	struct brcmf_gscan_config *gscan_cfg;
403 	struct brcmf_gscan_bucket_config *buckets;
404 	struct brcmf_pno_config_le pno_cfg;
405 	size_t gsz;
406 	u32 scan_freq;
407 	int err, n_buckets;
408 
409 	pi = ifp_to_pno(ifp);
410 	n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
411 					    &scan_freq);
412 	if (n_buckets < 0)
413 		return n_buckets;
414 
415 	gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
416 	gscan_cfg = kzalloc(gsz, GFP_KERNEL);
417 	if (!gscan_cfg) {
418 		err = -ENOMEM;
419 		goto free_buckets;
420 	}
421 
422 	/* clean up everything */
423 	err = brcmf_pno_clean(ifp);
424 	if  (err < 0) {
425 		bphy_err(drvr, "failed error=%d\n", err);
426 		goto free_gscan;
427 	}
428 
429 	/* configure pno */
430 	err = brcmf_pno_config(ifp, scan_freq, 0, 0);
431 	if (err < 0)
432 		goto free_gscan;
433 
434 	err = brcmf_pno_channel_config(ifp, &pno_cfg);
435 	if (err < 0)
436 		goto clean;
437 
438 	gscan_cfg->version = cpu_to_le16(BRCMF_GSCAN_CFG_VERSION);
439 	gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD;
440 	gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
441 	gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
442 
443 	gscan_cfg->count_of_channel_buckets = n_buckets;
444 	memcpy(&gscan_cfg->bucket[0], buckets,
445 	       n_buckets * sizeof(*buckets));
446 
447 	err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
448 
449 	if (err < 0)
450 		goto clean;
451 
452 	/* configure random mac */
453 	err = brcmf_pno_set_random(ifp, pi);
454 	if (err < 0)
455 		goto clean;
456 
457 	err = brcmf_pno_config_networks(ifp, pi);
458 	if (err < 0)
459 		goto clean;
460 
461 	/* Enable the PNO */
462 	err = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
463 
464 clean:
465 	if (err < 0)
466 		brcmf_pno_clean(ifp);
467 free_gscan:
468 	kfree(gscan_cfg);
469 free_buckets:
470 	kfree(buckets);
471 	return err;
472 }
473 
474 int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
475 			       struct cfg80211_sched_scan_request *req)
476 {
477 	struct brcmf_pno_info *pi;
478 	int ret;
479 
480 	brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
481 
482 	pi = ifp_to_pno(ifp);
483 	ret = brcmf_pno_store_request(pi, req);
484 	if (ret < 0)
485 		return ret;
486 
487 	ret = brcmf_pno_config_sched_scans(ifp);
488 	if (ret < 0) {
489 		brcmf_pno_remove_request(pi, req->reqid);
490 		if (pi->n_reqs)
491 			(void)brcmf_pno_config_sched_scans(ifp);
492 		return ret;
493 	}
494 	return 0;
495 }
496 
497 int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
498 {
499 	struct brcmf_pno_info *pi;
500 	int err;
501 
502 	brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
503 
504 	pi = ifp_to_pno(ifp);
505 
506 	/* No PNO request */
507 	if (!pi->n_reqs)
508 		return 0;
509 
510 	err = brcmf_pno_remove_request(pi, reqid);
511 	if (err)
512 		return err;
513 
514 	brcmf_pno_clean(ifp);
515 
516 	if (pi->n_reqs)
517 		(void)brcmf_pno_config_sched_scans(ifp);
518 
519 	return 0;
520 }
521 
522 int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg)
523 {
524 	struct brcmf_pno_info *pi;
525 
526 	brcmf_dbg(TRACE, "enter\n");
527 	pi = kzalloc(sizeof(*pi), GFP_KERNEL);
528 	if (!pi)
529 		return -ENOMEM;
530 
531 	cfg->pno = pi;
532 	mutex_init(&pi->req_lock);
533 	return 0;
534 }
535 
536 void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg)
537 {
538 	struct brcmf_pno_info *pi;
539 
540 	brcmf_dbg(TRACE, "enter\n");
541 	pi = cfg->pno;
542 	cfg->pno = NULL;
543 
544 	WARN_ON(pi->n_reqs);
545 	mutex_destroy(&pi->req_lock);
546 	kfree(pi);
547 }
548 
549 void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
550 {
551 	/* scheduled scan settings */
552 	wiphy->max_sched_scan_reqs = gscan ? BRCMF_PNO_MAX_BUCKETS : 1;
553 	wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
554 	wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
555 	wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
556 	wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
557 }
558 
559 u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
560 {
561 	u64 reqid = 0;
562 
563 	mutex_lock(&pi->req_lock);
564 
565 	if (bucket < pi->n_reqs)
566 		reqid = pi->reqs[bucket]->reqid;
567 
568 	mutex_unlock(&pi->req_lock);
569 	return reqid;
570 }
571 
572 u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
573 			     struct brcmf_pno_net_info_le *ni)
574 {
575 	struct cfg80211_sched_scan_request *req;
576 	struct cfg80211_match_set *ms;
577 	u32 bucket_map = 0;
578 	int i, j;
579 
580 	mutex_lock(&pi->req_lock);
581 	for (i = 0; i < pi->n_reqs; i++) {
582 		req = pi->reqs[i];
583 
584 		if (!req->n_match_sets)
585 			continue;
586 		for (j = 0; j < req->n_match_sets; j++) {
587 			ms = &req->match_sets[j];
588 			if (ms->ssid.ssid_len == ni->SSID_len &&
589 			    !memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
590 				bucket_map |= BIT(i);
591 				break;
592 			}
593 			if (is_valid_ether_addr(ms->bssid) &&
594 			    !memcmp(ms->bssid, ni->bssid, ETH_ALEN)) {
595 				bucket_map |= BIT(i);
596 				break;
597 			}
598 		}
599 	}
600 	mutex_unlock(&pi->req_lock);
601 	return bucket_map;
602 }
603