1 /*
2  * Copyright (c) 2012 Qualcomm Atheros, Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/etherdevice.h>
18 #include <linux/if_arp.h>
19 
20 #include "wil6210.h"
21 #include "txrx.h"
22 #include "wmi.h"
23 #include "trace.h"
24 
25 /**
26  * WMI event receiving - theory of operations
27  *
28  * When firmware about to report WMI event, it fills memory area
29  * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
30  * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
31  *
32  * @wmi_recv_cmd reads event, allocates memory chunk  and attaches it to the
33  * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
34  * and handles events within the @wmi_event_worker. Every event get detached
35  * from list, processed and deleted.
36  *
37  * Purpose for this mechanism is to release IRQ thread; otherwise,
38  * if WMI event handling involves another WMI command flow, this 2-nd flow
39  * won't be completed because of blocked IRQ thread.
40  */
41 
42 /**
43  * Addressing - theory of operations
44  *
45  * There are several buses present on the WIL6210 card.
46  * Same memory areas are visible at different address on
47  * the different busses. There are 3 main bus masters:
48  *  - MAC CPU (ucode)
49  *  - User CPU (firmware)
50  *  - AHB (host)
51  *
52  * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
53  * AHB addresses starting from 0x880000
54  *
55  * Internally, firmware uses addresses that allows faster access but
56  * are invisible from the host. To read from these addresses, alternative
57  * AHB address must be used.
58  *
59  * Memory mapping
60  * Linker address         PCI/Host address
61  *                        0x880000 .. 0xa80000  2Mb BAR0
62  * 0x800000 .. 0x807000   0x900000 .. 0x907000  28k DCCM
63  * 0x840000 .. 0x857000   0x908000 .. 0x91f000  92k PERIPH
64  */
65 
66 /**
67  * @fw_mapping provides memory remapping table
68  */
69 static const struct {
70 	u32 from; /* linker address - from, inclusive */
71 	u32 to;   /* linker address - to, exclusive */
72 	u32 host; /* PCI/Host address - BAR0 + 0x880000 */
73 } fw_mapping[] = {
74 	{0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
75 	{0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
76 	{0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
77 	{0x880000, 0x88a000, 0x880000}, /* various RGF */
78 	{0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
79 	/*
80 	 * 920000..930000 ucode code RAM
81 	 * 930000..932000 ucode data RAM
82 	 */
83 };
84 
85 /**
86  * return AHB address for given firmware/ucode internal (linker) address
87  * @x - internal address
88  * If address have no valid AHB mapping, return 0
89  */
90 static u32 wmi_addr_remap(u32 x)
91 {
92 	uint i;
93 
94 	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
95 		if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
96 			return x + fw_mapping[i].host - fw_mapping[i].from;
97 	}
98 
99 	return 0;
100 }
101 
102 /**
103  * Check address validity for WMI buffer; remap if needed
104  * @ptr - internal (linker) fw/ucode address
105  *
106  * Valid buffer should be DWORD aligned
107  *
108  * return address for accessing buffer from the host;
109  * if buffer is not valid, return NULL.
110  */
111 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
112 {
113 	u32 off;
114 	u32 ptr = le32_to_cpu(ptr_);
115 
116 	if (ptr % 4)
117 		return NULL;
118 
119 	ptr = wmi_addr_remap(ptr);
120 	if (ptr < WIL6210_FW_HOST_OFF)
121 		return NULL;
122 
123 	off = HOSTADDR(ptr);
124 	if (off > WIL6210_MEM_SIZE - 4)
125 		return NULL;
126 
127 	return wil->csr + off;
128 }
129 
130 /**
131  * Check address validity
132  */
133 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
134 {
135 	u32 off;
136 
137 	if (ptr % 4)
138 		return NULL;
139 
140 	if (ptr < WIL6210_FW_HOST_OFF)
141 		return NULL;
142 
143 	off = HOSTADDR(ptr);
144 	if (off > WIL6210_MEM_SIZE - 4)
145 		return NULL;
146 
147 	return wil->csr + off;
148 }
149 
150 int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
151 		 struct wil6210_mbox_hdr *hdr)
152 {
153 	void __iomem *src = wmi_buffer(wil, ptr);
154 	if (!src)
155 		return -EINVAL;
156 
157 	wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
158 
159 	return 0;
160 }
161 
162 static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
163 {
164 	struct {
165 		struct wil6210_mbox_hdr hdr;
166 		struct wil6210_mbox_hdr_wmi wmi;
167 	} __packed cmd = {
168 		.hdr = {
169 			.type = WIL_MBOX_HDR_TYPE_WMI,
170 			.flags = 0,
171 			.len = cpu_to_le16(sizeof(cmd.wmi) + len),
172 		},
173 		.wmi = {
174 			.id = cpu_to_le16(cmdid),
175 			.info1 = 0,
176 		},
177 	};
178 	struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
179 	struct wil6210_mbox_ring_desc d_head;
180 	u32 next_head;
181 	void __iomem *dst;
182 	void __iomem *head = wmi_addr(wil, r->head);
183 	uint retry;
184 
185 	if (sizeof(cmd) + len > r->entry_size) {
186 		wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
187 			(int)(sizeof(cmd) + len), r->entry_size);
188 		return -ERANGE;
189 	}
190 
191 	might_sleep();
192 
193 	if (!test_bit(wil_status_fwready, &wil->status)) {
194 		wil_err(wil, "FW not ready\n");
195 		return -EAGAIN;
196 	}
197 
198 	if (!head) {
199 		wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
200 		return -EINVAL;
201 	}
202 	/* read Tx head till it is not busy */
203 	for (retry = 5; retry > 0; retry--) {
204 		wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
205 		if (d_head.sync == 0)
206 			break;
207 		msleep(20);
208 	}
209 	if (d_head.sync != 0) {
210 		wil_err(wil, "WMI head busy\n");
211 		return -EBUSY;
212 	}
213 	/* next head */
214 	next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
215 	wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
216 	/* wait till FW finish with previous command */
217 	for (retry = 5; retry > 0; retry--) {
218 		r->tail = ioread32(wil->csr + HOST_MBOX +
219 				   offsetof(struct wil6210_mbox_ctl, tx.tail));
220 		if (next_head != r->tail)
221 			break;
222 		msleep(20);
223 	}
224 	if (next_head == r->tail) {
225 		wil_err(wil, "WMI ring full\n");
226 		return -EBUSY;
227 	}
228 	dst = wmi_buffer(wil, d_head.addr);
229 	if (!dst) {
230 		wil_err(wil, "invalid WMI buffer: 0x%08x\n",
231 			le32_to_cpu(d_head.addr));
232 		return -EINVAL;
233 	}
234 	cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
235 	/* set command */
236 	wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
237 	wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
238 			 sizeof(cmd), true);
239 	wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
240 			 len, true);
241 	wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
242 	wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
243 	/* mark entry as full */
244 	iowrite32(1, wil->csr + HOSTADDR(r->head) +
245 		  offsetof(struct wil6210_mbox_ring_desc, sync));
246 	/* advance next ptr */
247 	iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
248 		  offsetof(struct wil6210_mbox_ctl, tx.head));
249 
250 	trace_wil6210_wmi_cmd(cmdid, buf, len);
251 
252 	/* interrupt to FW */
253 	iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
254 
255 	return 0;
256 }
257 
258 int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
259 {
260 	int rc;
261 
262 	mutex_lock(&wil->wmi_mutex);
263 	rc = __wmi_send(wil, cmdid, buf, len);
264 	mutex_unlock(&wil->wmi_mutex);
265 
266 	return rc;
267 }
268 
269 /*=== Event handlers ===*/
270 static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
271 {
272 	struct net_device *ndev = wil_to_ndev(wil);
273 	struct wireless_dev *wdev = wil->wdev;
274 	struct wmi_ready_event *evt = d;
275 	wil->fw_version = le32_to_cpu(evt->sw_version);
276 	wil->n_mids = evt->numof_additional_mids;
277 
278 	wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
279 		    evt->mac, wil->n_mids);
280 
281 	if (!is_valid_ether_addr(ndev->dev_addr)) {
282 		memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
283 		memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
284 	}
285 	snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
286 		 "%d", wil->fw_version);
287 }
288 
289 static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
290 			     int len)
291 {
292 	wil_dbg_wmi(wil, "WMI: FW ready\n");
293 
294 	set_bit(wil_status_fwready, &wil->status);
295 	/* reuse wmi_ready for the firmware ready indication */
296 	complete(&wil->wmi_ready);
297 }
298 
299 static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
300 {
301 	struct wmi_rx_mgmt_packet_event *data = d;
302 	struct wiphy *wiphy = wil_to_wiphy(wil);
303 	struct ieee80211_mgmt *rx_mgmt_frame =
304 			(struct ieee80211_mgmt *)data->payload;
305 	int ch_no = data->info.channel+1;
306 	u32 freq = ieee80211_channel_to_frequency(ch_no,
307 			IEEE80211_BAND_60GHZ);
308 	struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
309 	/* TODO convert LE to CPU */
310 	s32 signal = 0; /* TODO */
311 	__le16 fc = rx_mgmt_frame->frame_control;
312 	u32 d_len = le32_to_cpu(data->info.len);
313 	u16 d_status = le16_to_cpu(data->info.status);
314 
315 	wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
316 		    data->info.channel, data->info.mcs, data->info.snr);
317 	wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
318 		    le16_to_cpu(data->info.stype));
319 	wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
320 		    data->info.qid, data->info.mid, data->info.cid);
321 
322 	if (!channel) {
323 		wil_err(wil, "Frame on unsupported channel\n");
324 		return;
325 	}
326 
327 	if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
328 		struct cfg80211_bss *bss;
329 
330 		bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
331 						d_len, signal, GFP_KERNEL);
332 		if (bss) {
333 			wil_dbg_wmi(wil, "Added BSS %pM\n",
334 				    rx_mgmt_frame->bssid);
335 			cfg80211_put_bss(wiphy, bss);
336 		} else {
337 			wil_err(wil, "cfg80211_inform_bss() failed\n");
338 		}
339 	} else {
340 		cfg80211_rx_mgmt(wil->wdev, freq, signal,
341 				 (void *)rx_mgmt_frame, d_len, GFP_KERNEL);
342 	}
343 }
344 
345 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
346 				  void *d, int len)
347 {
348 	if (wil->scan_request) {
349 		struct wmi_scan_complete_event *data = d;
350 		bool aborted = (data->status != 0);
351 
352 		wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
353 		cfg80211_scan_done(wil->scan_request, aborted);
354 		wil->scan_request = NULL;
355 	} else {
356 		wil_err(wil, "SCAN_COMPLETE while not scanning\n");
357 	}
358 }
359 
360 static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
361 {
362 	struct net_device *ndev = wil_to_ndev(wil);
363 	struct wireless_dev *wdev = wil->wdev;
364 	struct wmi_connect_event *evt = d;
365 	int ch; /* channel number */
366 	struct station_info sinfo;
367 	u8 *assoc_req_ie, *assoc_resp_ie;
368 	size_t assoc_req_ielen, assoc_resp_ielen;
369 	/* capinfo(u16) + listen_interval(u16) + IEs */
370 	const size_t assoc_req_ie_offset = sizeof(u16) * 2;
371 	/* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
372 	const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
373 
374 	if (len < sizeof(*evt)) {
375 		wil_err(wil, "Connect event too short : %d bytes\n", len);
376 		return;
377 	}
378 	if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
379 		   evt->assoc_resp_len) {
380 		wil_err(wil,
381 			"Connect event corrupted : %d != %d + %d + %d + %d\n",
382 			len, (int)sizeof(*evt), evt->beacon_ie_len,
383 			evt->assoc_req_len, evt->assoc_resp_len);
384 		return;
385 	}
386 	ch = evt->channel + 1;
387 	wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
388 		    evt->bssid, ch, evt->cid);
389 	wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
390 			 evt->assoc_info, len - sizeof(*evt), true);
391 
392 	/* figure out IE's */
393 	assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
394 					assoc_req_ie_offset];
395 	assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
396 	if (evt->assoc_req_len <= assoc_req_ie_offset) {
397 		assoc_req_ie = NULL;
398 		assoc_req_ielen = 0;
399 	}
400 
401 	assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
402 					 evt->assoc_req_len +
403 					 assoc_resp_ie_offset];
404 	assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
405 	if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
406 		assoc_resp_ie = NULL;
407 		assoc_resp_ielen = 0;
408 	}
409 
410 	if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
411 	    (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
412 		if (wdev->sme_state != CFG80211_SME_CONNECTING) {
413 			wil_err(wil, "Not in connecting state\n");
414 			return;
415 		}
416 		del_timer_sync(&wil->connect_timer);
417 		cfg80211_connect_result(ndev, evt->bssid,
418 					assoc_req_ie, assoc_req_ielen,
419 					assoc_resp_ie, assoc_resp_ielen,
420 					WLAN_STATUS_SUCCESS, GFP_KERNEL);
421 
422 	} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
423 		   (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
424 		memset(&sinfo, 0, sizeof(sinfo));
425 
426 		sinfo.generation = wil->sinfo_gen++;
427 
428 		if (assoc_req_ie) {
429 			sinfo.assoc_req_ies = assoc_req_ie;
430 			sinfo.assoc_req_ies_len = assoc_req_ielen;
431 			sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
432 		}
433 
434 		cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
435 	}
436 	set_bit(wil_status_fwconnected, &wil->status);
437 
438 	/* FIXME FW can transmit only ucast frames to peer */
439 	/* FIXME real ring_id instead of hard coded 0 */
440 	memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
441 
442 	wil->pending_connect_cid = evt->cid;
443 	queue_work(wil->wmi_wq_conn, &wil->connect_worker);
444 }
445 
446 static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
447 			       void *d, int len)
448 {
449 	struct wmi_disconnect_event *evt = d;
450 
451 	wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n",
452 		    evt->bssid,
453 		    evt->protocol_reason_status, evt->disconnect_reason);
454 
455 	wil->sinfo_gen++;
456 
457 	wil6210_disconnect(wil, evt->bssid);
458 }
459 
460 static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
461 {
462 	struct wmi_notify_req_done_event *evt = d;
463 
464 	if (len < sizeof(*evt)) {
465 		wil_err(wil, "Short NOTIFY event\n");
466 		return;
467 	}
468 
469 	wil->stats.tsf = le64_to_cpu(evt->tsf);
470 	wil->stats.snr = le32_to_cpu(evt->snr_val);
471 	wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs);
472 	wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector);
473 	wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
474 	wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
475 	wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
476 	wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
477 		    "BF status 0x%08x SNR 0x%08x\n"
478 		    "Tx Tpt %d goodput %d Rx goodput %d\n"
479 		    "Sectors(rx:tx) my %d:%d peer %d:%d\n",
480 		    wil->stats.bf_mcs, wil->stats.tsf, evt->status,
481 		    wil->stats.snr, le32_to_cpu(evt->tx_tpt),
482 		    le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
483 		    wil->stats.my_rx_sector, wil->stats.my_tx_sector,
484 		    wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
485 }
486 
487 /*
488  * Firmware reports EAPOL frame using WME event.
489  * Reconstruct Ethernet frame and deliver it via normal Rx
490  */
491 static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
492 			     void *d, int len)
493 {
494 	struct net_device *ndev = wil_to_ndev(wil);
495 	struct wmi_eapol_rx_event *evt = d;
496 	u16 eapol_len = le16_to_cpu(evt->eapol_len);
497 	int sz = eapol_len + ETH_HLEN;
498 	struct sk_buff *skb;
499 	struct ethhdr *eth;
500 
501 	wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
502 		    evt->src_mac);
503 
504 	if (eapol_len > 196) { /* TODO: revisit size limit */
505 		wil_err(wil, "EAPOL too large\n");
506 		return;
507 	}
508 
509 	skb = alloc_skb(sz, GFP_KERNEL);
510 	if (!skb) {
511 		wil_err(wil, "Failed to allocate skb\n");
512 		return;
513 	}
514 	eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
515 	memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
516 	memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
517 	eth->h_proto = cpu_to_be16(ETH_P_PAE);
518 	memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
519 	skb->protocol = eth_type_trans(skb, ndev);
520 	if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
521 		ndev->stats.rx_packets++;
522 		ndev->stats.rx_bytes += skb->len;
523 	} else {
524 		ndev->stats.rx_dropped++;
525 	}
526 }
527 
528 static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
529 {
530 	struct net_device *ndev = wil_to_ndev(wil);
531 	struct wmi_data_port_open_event *evt = d;
532 
533 	wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid);
534 
535 	netif_carrier_on(ndev);
536 }
537 
538 static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
539 {
540 	struct net_device *ndev = wil_to_ndev(wil);
541 	struct wmi_wbe_link_down_event *evt = d;
542 
543 	wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
544 		    evt->cid, le32_to_cpu(evt->reason));
545 
546 	netif_carrier_off(ndev);
547 }
548 
549 static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
550 			      int len)
551 {
552 	struct wmi_vring_ba_status_event *evt = d;
553 
554 	wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
555 		    evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize,
556 		    __le16_to_cpu(evt->ba_timeout));
557 }
558 
559 static const struct {
560 	int eventid;
561 	void (*handler)(struct wil6210_priv *wil, int eventid,
562 			void *data, int data_len);
563 } wmi_evt_handlers[] = {
564 	{WMI_READY_EVENTID,		wmi_evt_ready},
565 	{WMI_FW_READY_EVENTID,		wmi_evt_fw_ready},
566 	{WMI_RX_MGMT_PACKET_EVENTID,	wmi_evt_rx_mgmt},
567 	{WMI_SCAN_COMPLETE_EVENTID,	wmi_evt_scan_complete},
568 	{WMI_CONNECT_EVENTID,		wmi_evt_connect},
569 	{WMI_DISCONNECT_EVENTID,	wmi_evt_disconnect},
570 	{WMI_NOTIFY_REQ_DONE_EVENTID,	wmi_evt_notify},
571 	{WMI_EAPOL_RX_EVENTID,		wmi_evt_eapol_rx},
572 	{WMI_DATA_PORT_OPEN_EVENTID,	wmi_evt_linkup},
573 	{WMI_WBE_LINKDOWN_EVENTID,	wmi_evt_linkdown},
574 	{WMI_BA_STATUS_EVENTID,		wmi_evt_ba_status},
575 };
576 
577 /*
578  * Run in IRQ context
579  * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
580  * that will be eventually handled by the @wmi_event_worker in the thread
581  * context of thread "wil6210_wmi"
582  */
583 void wmi_recv_cmd(struct wil6210_priv *wil)
584 {
585 	struct wil6210_mbox_ring_desc d_tail;
586 	struct wil6210_mbox_hdr hdr;
587 	struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
588 	struct pending_wmi_event *evt;
589 	u8 *cmd;
590 	void __iomem *src;
591 	ulong flags;
592 
593 	if (!test_bit(wil_status_reset_done, &wil->status)) {
594 		wil_err(wil, "Reset not completed\n");
595 		return;
596 	}
597 
598 	for (;;) {
599 		u16 len;
600 
601 		r->head = ioread32(wil->csr + HOST_MBOX +
602 				   offsetof(struct wil6210_mbox_ctl, rx.head));
603 		if (r->tail == r->head)
604 			return;
605 
606 		/* read cmd from tail */
607 		wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
608 				     sizeof(struct wil6210_mbox_ring_desc));
609 		if (d_tail.sync == 0) {
610 			wil_err(wil, "Mbox evt not owned by FW?\n");
611 			return;
612 		}
613 
614 		if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
615 			wil_err(wil, "Mbox evt at 0x%08x?\n",
616 				le32_to_cpu(d_tail.addr));
617 			return;
618 		}
619 
620 		len = le16_to_cpu(hdr.len);
621 		src = wmi_buffer(wil, d_tail.addr) +
622 		      sizeof(struct wil6210_mbox_hdr);
623 		evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
624 					     event.wmi) + len, 4),
625 			      GFP_KERNEL);
626 		if (!evt)
627 			return;
628 
629 		evt->event.hdr = hdr;
630 		cmd = (void *)&evt->event.wmi;
631 		wil_memcpy_fromio_32(cmd, src, len);
632 		/* mark entry as empty */
633 		iowrite32(0, wil->csr + HOSTADDR(r->tail) +
634 			  offsetof(struct wil6210_mbox_ring_desc, sync));
635 		/* indicate */
636 		wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
637 			    le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
638 			    hdr.flags);
639 		if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
640 		    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
641 			u16 id = le16_to_cpu(evt->event.wmi.id);
642 			wil_dbg_wmi(wil, "WMI event 0x%04x\n", id);
643 			trace_wil6210_wmi_event(id, &evt->event.wmi, len);
644 		}
645 		wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
646 				 &evt->event.hdr, sizeof(hdr) + len, true);
647 
648 		/* advance tail */
649 		r->tail = r->base + ((r->tail - r->base +
650 			  sizeof(struct wil6210_mbox_ring_desc)) % r->size);
651 		iowrite32(r->tail, wil->csr + HOST_MBOX +
652 			  offsetof(struct wil6210_mbox_ctl, rx.tail));
653 
654 		/* add to the pending list */
655 		spin_lock_irqsave(&wil->wmi_ev_lock, flags);
656 		list_add_tail(&evt->list, &wil->pending_wmi_ev);
657 		spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
658 		{
659 			int q =	queue_work(wil->wmi_wq,
660 					   &wil->wmi_event_worker);
661 			wil_dbg_wmi(wil, "queue_work -> %d\n", q);
662 		}
663 	}
664 }
665 
666 int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
667 	     u16 reply_id, void *reply, u8 reply_size, int to_msec)
668 {
669 	int rc;
670 	int remain;
671 
672 	mutex_lock(&wil->wmi_mutex);
673 
674 	rc = __wmi_send(wil, cmdid, buf, len);
675 	if (rc)
676 		goto out;
677 
678 	wil->reply_id = reply_id;
679 	wil->reply_buf = reply;
680 	wil->reply_size = reply_size;
681 	remain = wait_for_completion_timeout(&wil->wmi_ready,
682 			msecs_to_jiffies(to_msec));
683 	if (0 == remain) {
684 		wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
685 			cmdid, reply_id, to_msec);
686 		rc = -ETIME;
687 	} else {
688 		wil_dbg_wmi(wil,
689 			    "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
690 			    cmdid, reply_id,
691 			    to_msec - jiffies_to_msecs(remain));
692 	}
693 	wil->reply_id = 0;
694 	wil->reply_buf = NULL;
695 	wil->reply_size = 0;
696  out:
697 	mutex_unlock(&wil->wmi_mutex);
698 
699 	return rc;
700 }
701 
702 int wmi_echo(struct wil6210_priv *wil)
703 {
704 	struct wmi_echo_cmd cmd = {
705 		.value = cpu_to_le32(0x12345678),
706 	};
707 
708 	return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
709 			 WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
710 }
711 
712 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
713 {
714 	struct wmi_set_mac_address_cmd cmd;
715 
716 	memcpy(cmd.mac, addr, ETH_ALEN);
717 
718 	wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
719 
720 	return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
721 }
722 
723 int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
724 {
725 	int rc;
726 
727 	struct wmi_pcp_start_cmd cmd = {
728 		.bcon_interval = cpu_to_le16(bi),
729 		.network_type = wmi_nettype,
730 		.disable_sec_offload = 1,
731 		.channel = chan - 1,
732 	};
733 	struct {
734 		struct wil6210_mbox_hdr_wmi wmi;
735 		struct wmi_pcp_started_event evt;
736 	} __packed reply;
737 
738 	if (!wil->secure_pcp)
739 		cmd.disable_sec = 1;
740 
741 	rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
742 		      WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100);
743 	if (rc)
744 		return rc;
745 
746 	if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
747 		rc = -EINVAL;
748 
749 	return rc;
750 }
751 
752 int wmi_pcp_stop(struct wil6210_priv *wil)
753 {
754 	return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0,
755 			WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
756 }
757 
758 int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
759 {
760 	struct wmi_set_ssid_cmd cmd = {
761 		.ssid_len = cpu_to_le32(ssid_len),
762 	};
763 
764 	if (ssid_len > sizeof(cmd.ssid))
765 		return -EINVAL;
766 
767 	memcpy(cmd.ssid, ssid, ssid_len);
768 
769 	return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
770 }
771 
772 int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
773 {
774 	int rc;
775 	struct {
776 		struct wil6210_mbox_hdr_wmi wmi;
777 		struct wmi_set_ssid_cmd cmd;
778 	} __packed reply;
779 	int len; /* reply.cmd.ssid_len in CPU order */
780 
781 	rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
782 		      &reply, sizeof(reply), 20);
783 	if (rc)
784 		return rc;
785 
786 	len = le32_to_cpu(reply.cmd.ssid_len);
787 	if (len > sizeof(reply.cmd.ssid))
788 		return -EINVAL;
789 
790 	*ssid_len = len;
791 	memcpy(ssid, reply.cmd.ssid, len);
792 
793 	return 0;
794 }
795 
796 int wmi_set_channel(struct wil6210_priv *wil, int channel)
797 {
798 	struct wmi_set_pcp_channel_cmd cmd = {
799 		.channel = channel - 1,
800 	};
801 
802 	return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
803 }
804 
805 int wmi_get_channel(struct wil6210_priv *wil, int *channel)
806 {
807 	int rc;
808 	struct {
809 		struct wil6210_mbox_hdr_wmi wmi;
810 		struct wmi_set_pcp_channel_cmd cmd;
811 	} __packed reply;
812 
813 	rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
814 		      WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
815 	if (rc)
816 		return rc;
817 
818 	if (reply.cmd.channel > 3)
819 		return -EINVAL;
820 
821 	*channel = reply.cmd.channel + 1;
822 
823 	return 0;
824 }
825 
826 int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
827 {
828 	struct wmi_p2p_cfg_cmd cmd = {
829 		.discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
830 		.channel = channel - 1,
831 	};
832 
833 	return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
834 }
835 
836 int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
837 {
838 	struct wmi_eapol_tx_cmd *cmd;
839 	struct ethhdr *eth;
840 	u16 eapol_len = skb->len - ETH_HLEN;
841 	void *eapol = skb->data + ETH_HLEN;
842 	uint i;
843 	int rc;
844 
845 	skb_set_mac_header(skb, 0);
846 	eth = eth_hdr(skb);
847 	wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
848 	for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
849 		if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
850 			goto found_dest;
851 	}
852 
853 	return -EINVAL;
854 
855  found_dest:
856 	/* find out eapol data & len */
857 	cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
858 	if (!cmd)
859 		return -EINVAL;
860 
861 	memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
862 	cmd->eapol_len = cpu_to_le16(eapol_len);
863 	memcpy(cmd->eapol, eapol, eapol_len);
864 	rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
865 	kfree(cmd);
866 
867 	return rc;
868 }
869 
870 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
871 		       const void *mac_addr)
872 {
873 	struct wmi_delete_cipher_key_cmd cmd = {
874 		.key_index = key_index,
875 	};
876 
877 	if (mac_addr)
878 		memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
879 
880 	return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
881 }
882 
883 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
884 		       const void *mac_addr, int key_len, const void *key)
885 {
886 	struct wmi_add_cipher_key_cmd cmd = {
887 		.key_index = key_index,
888 		.key_usage = WMI_KEY_USE_PAIRWISE,
889 		.key_len = key_len,
890 	};
891 
892 	if (!key || (key_len > sizeof(cmd.key)))
893 		return -EINVAL;
894 
895 	memcpy(cmd.key, key, key_len);
896 	if (mac_addr)
897 		memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
898 
899 	return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
900 }
901 
902 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
903 {
904 	int rc;
905 	u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
906 	struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
907 	if (!cmd)
908 		return -ENOMEM;
909 
910 	cmd->mgmt_frm_type = type;
911 	/* BUG: FW API define ieLen as u8. Will fix FW */
912 	cmd->ie_len = cpu_to_le16(ie_len);
913 	memcpy(cmd->ie_info, ie, ie_len);
914 	rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
915 	kfree(cmd);
916 
917 	return rc;
918 }
919 
920 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
921 {
922 	struct wireless_dev *wdev = wil->wdev;
923 	struct net_device *ndev = wil_to_ndev(wil);
924 	struct wmi_cfg_rx_chain_cmd cmd = {
925 		.action = WMI_RX_CHAIN_ADD,
926 		.rx_sw_ring = {
927 			.max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
928 			.ring_mem_base = cpu_to_le64(vring->pa),
929 			.ring_size = cpu_to_le16(vring->size),
930 		},
931 		.mid = 0, /* TODO - what is it? */
932 		.decap_trans_type = WMI_DECAP_TYPE_802_3,
933 	};
934 	struct {
935 		struct wil6210_mbox_hdr_wmi wmi;
936 		struct wmi_cfg_rx_chain_done_event evt;
937 	} __packed evt;
938 	int rc;
939 
940 	if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
941 		struct ieee80211_channel *ch = wdev->preset_chandef.chan;
942 
943 		cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
944 		if (ch)
945 			cmd.sniffer_cfg.channel = ch->hw_value - 1;
946 		cmd.sniffer_cfg.phy_info_mode =
947 			cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
948 		cmd.sniffer_cfg.phy_support =
949 			cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
950 				    ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
951 	}
952 	/* typical time for secure PCP is 840ms */
953 	rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
954 		      WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
955 	if (rc)
956 		return rc;
957 
958 	vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
959 
960 	wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
961 		     le32_to_cpu(evt.evt.status), vring->hwtail);
962 
963 	if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
964 		rc = -EINVAL;
965 
966 	return rc;
967 }
968 
969 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
970 {
971 	int rc;
972 	struct wmi_temp_sense_cmd cmd = {
973 		.measure_marlon_m_en = cpu_to_le32(!!t_m),
974 		.measure_marlon_r_en = cpu_to_le32(!!t_r),
975 	};
976 	struct {
977 		struct wil6210_mbox_hdr_wmi wmi;
978 		struct wmi_temp_sense_done_event evt;
979 	} __packed reply;
980 
981 	rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, &cmd, sizeof(cmd),
982 		      WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100);
983 	if (rc)
984 		return rc;
985 
986 	if (t_m)
987 		*t_m = le32_to_cpu(reply.evt.marlon_m_t1000);
988 	if (t_r)
989 		*t_r = le32_to_cpu(reply.evt.marlon_r_t1000);
990 
991 	return 0;
992 }
993 
994 void wmi_event_flush(struct wil6210_priv *wil)
995 {
996 	struct pending_wmi_event *evt, *t;
997 
998 	wil_dbg_wmi(wil, "%s()\n", __func__);
999 
1000 	list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
1001 		list_del(&evt->list);
1002 		kfree(evt);
1003 	}
1004 }
1005 
1006 static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
1007 				 void *d, int len)
1008 {
1009 	uint i;
1010 
1011 	for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
1012 		if (wmi_evt_handlers[i].eventid == id) {
1013 			wmi_evt_handlers[i].handler(wil, id, d, len);
1014 			return true;
1015 		}
1016 	}
1017 
1018 	return false;
1019 }
1020 
1021 static void wmi_event_handle(struct wil6210_priv *wil,
1022 			     struct wil6210_mbox_hdr *hdr)
1023 {
1024 	u16 len = le16_to_cpu(hdr->len);
1025 
1026 	if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
1027 	    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
1028 		struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
1029 		void *evt_data = (void *)(&wmi[1]);
1030 		u16 id = le16_to_cpu(wmi->id);
1031 		/* check if someone waits for this event */
1032 		if (wil->reply_id && wil->reply_id == id) {
1033 			if (wil->reply_buf) {
1034 				memcpy(wil->reply_buf, wmi,
1035 				       min(len, wil->reply_size));
1036 			} else {
1037 				wmi_evt_call_handler(wil, id, evt_data,
1038 						     len - sizeof(*wmi));
1039 			}
1040 			wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id);
1041 			complete(&wil->wmi_ready);
1042 			return;
1043 		}
1044 		/* unsolicited event */
1045 		/* search for handler */
1046 		if (!wmi_evt_call_handler(wil, id, evt_data,
1047 					  len - sizeof(*wmi))) {
1048 			wil_err(wil, "Unhandled event 0x%04x\n", id);
1049 		}
1050 	} else {
1051 		wil_err(wil, "Unknown event type\n");
1052 		print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
1053 			       hdr, sizeof(*hdr) + len, true);
1054 	}
1055 }
1056 
1057 /*
1058  * Retrieve next WMI event from the pending list
1059  */
1060 static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
1061 {
1062 	ulong flags;
1063 	struct list_head *ret = NULL;
1064 
1065 	spin_lock_irqsave(&wil->wmi_ev_lock, flags);
1066 
1067 	if (!list_empty(&wil->pending_wmi_ev)) {
1068 		ret = wil->pending_wmi_ev.next;
1069 		list_del(ret);
1070 	}
1071 
1072 	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
1073 
1074 	return ret;
1075 }
1076 
1077 /*
1078  * Handler for the WMI events
1079  */
1080 void wmi_event_worker(struct work_struct *work)
1081 {
1082 	struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
1083 						 wmi_event_worker);
1084 	struct pending_wmi_event *evt;
1085 	struct list_head *lh;
1086 
1087 	while ((lh = next_wmi_ev(wil)) != NULL) {
1088 		evt = list_entry(lh, struct pending_wmi_event, list);
1089 		wmi_event_handle(wil, &evt->event.hdr);
1090 		kfree(evt);
1091 	}
1092 }
1093