1 /*
2  * Copyright (c) 2012 Qualcomm Atheros, Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/etherdevice.h>
18 #include <linux/if_arp.h>
19 
20 #include "wil6210.h"
21 #include "txrx.h"
22 #include "wmi.h"
23 
24 /**
25  * WMI event receiving - theory of operations
26  *
27  * When firmware about to report WMI event, it fills memory area
28  * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
29  * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler.
30  *
31  * @wmi_recv_cmd reads event, allocates memory chunk  and attaches it to the
32  * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up
33  * and handles events within the @wmi_event_worker. Every event get detached
34  * from list, processed and deleted.
35  *
36  * Purpose for this mechanism is to release IRQ thread; otherwise,
37  * if WMI event handling involves another WMI command flow, this 2-nd flow
38  * won't be completed because of blocked IRQ thread.
39  */
40 
41 /**
42  * Addressing - theory of operations
43  *
44  * There are several buses present on the WIL6210 card.
45  * Same memory areas are visible at different address on
46  * the different busses. There are 3 main bus masters:
47  *  - MAC CPU (ucode)
48  *  - User CPU (firmware)
49  *  - AHB (host)
50  *
51  * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing
52  * AHB addresses starting from 0x880000
53  *
54  * Internally, firmware uses addresses that allows faster access but
55  * are invisible from the host. To read from these addresses, alternative
56  * AHB address must be used.
57  *
58  * Memory mapping
59  * Linker address         PCI/Host address
60  *                        0x880000 .. 0xa80000  2Mb BAR0
61  * 0x800000 .. 0x807000   0x900000 .. 0x907000  28k DCCM
62  * 0x840000 .. 0x857000   0x908000 .. 0x91f000  92k PERIPH
63  */
64 
65 /**
66  * @fw_mapping provides memory remapping table
67  */
68 static const struct {
69 	u32 from; /* linker address - from, inclusive */
70 	u32 to;   /* linker address - to, exclusive */
71 	u32 host; /* PCI/Host address - BAR0 + 0x880000 */
72 } fw_mapping[] = {
73 	{0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */
74 	{0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */
75 	{0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */
76 	{0x880000, 0x88a000, 0x880000}, /* various RGF */
77 	{0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */
78 	/*
79 	 * 920000..930000 ucode code RAM
80 	 * 930000..932000 ucode data RAM
81 	 */
82 };
83 
84 /**
85  * return AHB address for given firmware/ucode internal (linker) address
86  * @x - internal address
87  * If address have no valid AHB mapping, return 0
88  */
89 static u32 wmi_addr_remap(u32 x)
90 {
91 	uint i;
92 
93 	for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
94 		if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to))
95 			return x + fw_mapping[i].host - fw_mapping[i].from;
96 	}
97 
98 	return 0;
99 }
100 
101 /**
102  * Check address validity for WMI buffer; remap if needed
103  * @ptr - internal (linker) fw/ucode address
104  *
105  * Valid buffer should be DWORD aligned
106  *
107  * return address for accessing buffer from the host;
108  * if buffer is not valid, return NULL.
109  */
110 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
111 {
112 	u32 off;
113 	u32 ptr = le32_to_cpu(ptr_);
114 
115 	if (ptr % 4)
116 		return NULL;
117 
118 	ptr = wmi_addr_remap(ptr);
119 	if (ptr < WIL6210_FW_HOST_OFF)
120 		return NULL;
121 
122 	off = HOSTADDR(ptr);
123 	if (off > WIL6210_MEM_SIZE - 4)
124 		return NULL;
125 
126 	return wil->csr + off;
127 }
128 
129 /**
130  * Check address validity
131  */
132 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
133 {
134 	u32 off;
135 
136 	if (ptr % 4)
137 		return NULL;
138 
139 	if (ptr < WIL6210_FW_HOST_OFF)
140 		return NULL;
141 
142 	off = HOSTADDR(ptr);
143 	if (off > WIL6210_MEM_SIZE - 4)
144 		return NULL;
145 
146 	return wil->csr + off;
147 }
148 
149 int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr,
150 		 struct wil6210_mbox_hdr *hdr)
151 {
152 	void __iomem *src = wmi_buffer(wil, ptr);
153 	if (!src)
154 		return -EINVAL;
155 
156 	wil_memcpy_fromio_32(hdr, src, sizeof(*hdr));
157 
158 	return 0;
159 }
160 
161 static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
162 {
163 	struct {
164 		struct wil6210_mbox_hdr hdr;
165 		struct wil6210_mbox_hdr_wmi wmi;
166 	} __packed cmd = {
167 		.hdr = {
168 			.type = WIL_MBOX_HDR_TYPE_WMI,
169 			.flags = 0,
170 			.len = cpu_to_le16(sizeof(cmd.wmi) + len),
171 		},
172 		.wmi = {
173 			.id = cpu_to_le16(cmdid),
174 			.info1 = 0,
175 		},
176 	};
177 	struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
178 	struct wil6210_mbox_ring_desc d_head;
179 	u32 next_head;
180 	void __iomem *dst;
181 	void __iomem *head = wmi_addr(wil, r->head);
182 	uint retry;
183 
184 	if (sizeof(cmd) + len > r->entry_size) {
185 		wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
186 			(int)(sizeof(cmd) + len), r->entry_size);
187 		return -ERANGE;
188 	}
189 
190 	might_sleep();
191 
192 	if (!test_bit(wil_status_fwready, &wil->status)) {
193 		wil_err(wil, "FW not ready\n");
194 		return -EAGAIN;
195 	}
196 
197 	if (!head) {
198 		wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
199 		return -EINVAL;
200 	}
201 	/* read Tx head till it is not busy */
202 	for (retry = 5; retry > 0; retry--) {
203 		wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
204 		if (d_head.sync == 0)
205 			break;
206 		msleep(20);
207 	}
208 	if (d_head.sync != 0) {
209 		wil_err(wil, "WMI head busy\n");
210 		return -EBUSY;
211 	}
212 	/* next head */
213 	next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
214 	wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
215 	/* wait till FW finish with previous command */
216 	for (retry = 5; retry > 0; retry--) {
217 		r->tail = ioread32(wil->csr + HOST_MBOX +
218 				   offsetof(struct wil6210_mbox_ctl, tx.tail));
219 		if (next_head != r->tail)
220 			break;
221 		msleep(20);
222 	}
223 	if (next_head == r->tail) {
224 		wil_err(wil, "WMI ring full\n");
225 		return -EBUSY;
226 	}
227 	dst = wmi_buffer(wil, d_head.addr);
228 	if (!dst) {
229 		wil_err(wil, "invalid WMI buffer: 0x%08x\n",
230 			le32_to_cpu(d_head.addr));
231 		return -EINVAL;
232 	}
233 	cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
234 	/* set command */
235 	wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len);
236 	wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd,
237 			 sizeof(cmd), true);
238 	wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf,
239 			 len, true);
240 	wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
241 	wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
242 	/* mark entry as full */
243 	iowrite32(1, wil->csr + HOSTADDR(r->head) +
244 		  offsetof(struct wil6210_mbox_ring_desc, sync));
245 	/* advance next ptr */
246 	iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
247 		  offsetof(struct wil6210_mbox_ctl, tx.head));
248 
249 	/* interrupt to FW */
250 	iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
251 
252 	return 0;
253 }
254 
255 int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
256 {
257 	int rc;
258 
259 	mutex_lock(&wil->wmi_mutex);
260 	rc = __wmi_send(wil, cmdid, buf, len);
261 	mutex_unlock(&wil->wmi_mutex);
262 
263 	return rc;
264 }
265 
266 /*=== Event handlers ===*/
267 static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
268 {
269 	struct net_device *ndev = wil_to_ndev(wil);
270 	struct wireless_dev *wdev = wil->wdev;
271 	struct wmi_ready_event *evt = d;
272 	u32 ver = le32_to_cpu(evt->sw_version);
273 
274 	wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
275 
276 	if (!is_valid_ether_addr(ndev->dev_addr)) {
277 		memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
278 		memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
279 	}
280 	snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
281 		 "%d", ver);
282 }
283 
284 static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
285 			     int len)
286 {
287 	wil_dbg_wmi(wil, "WMI: FW ready\n");
288 
289 	set_bit(wil_status_fwready, &wil->status);
290 	/* reuse wmi_ready for the firmware ready indication */
291 	complete(&wil->wmi_ready);
292 }
293 
294 static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
295 {
296 	struct wmi_rx_mgmt_packet_event *data = d;
297 	struct wiphy *wiphy = wil_to_wiphy(wil);
298 	struct ieee80211_mgmt *rx_mgmt_frame =
299 			(struct ieee80211_mgmt *)data->payload;
300 	int ch_no = data->info.channel+1;
301 	u32 freq = ieee80211_channel_to_frequency(ch_no,
302 			IEEE80211_BAND_60GHZ);
303 	struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
304 	/* TODO convert LE to CPU */
305 	s32 signal = 0; /* TODO */
306 	__le16 fc = rx_mgmt_frame->frame_control;
307 	u32 d_len = le32_to_cpu(data->info.len);
308 	u16 d_status = le16_to_cpu(data->info.status);
309 
310 	wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n",
311 		    data->info.channel, data->info.mcs, data->info.snr);
312 	wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len,
313 		    le16_to_cpu(data->info.stype));
314 	wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
315 		    data->info.qid, data->info.mid, data->info.cid);
316 
317 	if (!channel) {
318 		wil_err(wil, "Frame on unsupported channel\n");
319 		return;
320 	}
321 
322 	if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
323 		struct cfg80211_bss *bss;
324 
325 		bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
326 						d_len, signal, GFP_KERNEL);
327 		if (bss) {
328 			wil_dbg_wmi(wil, "Added BSS %pM\n",
329 				    rx_mgmt_frame->bssid);
330 			cfg80211_put_bss(wiphy, bss);
331 		} else {
332 			wil_err(wil, "cfg80211_inform_bss() failed\n");
333 		}
334 	} else {
335 		cfg80211_rx_mgmt(wil->wdev, freq, signal,
336 				 (void *)rx_mgmt_frame, d_len, GFP_KERNEL);
337 	}
338 }
339 
340 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
341 				  void *d, int len)
342 {
343 	if (wil->scan_request) {
344 		struct wmi_scan_complete_event *data = d;
345 		bool aborted = (data->status != 0);
346 
347 		wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status);
348 		cfg80211_scan_done(wil->scan_request, aborted);
349 		wil->scan_request = NULL;
350 	} else {
351 		wil_err(wil, "SCAN_COMPLETE while not scanning\n");
352 	}
353 }
354 
355 static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
356 {
357 	struct net_device *ndev = wil_to_ndev(wil);
358 	struct wireless_dev *wdev = wil->wdev;
359 	struct wmi_connect_event *evt = d;
360 	int ch; /* channel number */
361 	struct station_info sinfo;
362 	u8 *assoc_req_ie, *assoc_resp_ie;
363 	size_t assoc_req_ielen, assoc_resp_ielen;
364 	/* capinfo(u16) + listen_interval(u16) + IEs */
365 	const size_t assoc_req_ie_offset = sizeof(u16) * 2;
366 	/* capinfo(u16) + status_code(u16) + associd(u16) + IEs */
367 	const size_t assoc_resp_ie_offset = sizeof(u16) * 3;
368 
369 	if (len < sizeof(*evt)) {
370 		wil_err(wil, "Connect event too short : %d bytes\n", len);
371 		return;
372 	}
373 	if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len +
374 		   evt->assoc_resp_len) {
375 		wil_err(wil,
376 			"Connect event corrupted : %d != %d + %d + %d + %d\n",
377 			len, (int)sizeof(*evt), evt->beacon_ie_len,
378 			evt->assoc_req_len, evt->assoc_resp_len);
379 		return;
380 	}
381 	ch = evt->channel + 1;
382 	wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n",
383 		    evt->bssid, ch, evt->cid);
384 	wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
385 			 evt->assoc_info, len - sizeof(*evt), true);
386 
387 	/* figure out IE's */
388 	assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len +
389 					assoc_req_ie_offset];
390 	assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset;
391 	if (evt->assoc_req_len <= assoc_req_ie_offset) {
392 		assoc_req_ie = NULL;
393 		assoc_req_ielen = 0;
394 	}
395 
396 	assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len +
397 					 evt->assoc_req_len +
398 					 assoc_resp_ie_offset];
399 	assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset;
400 	if (evt->assoc_resp_len <= assoc_resp_ie_offset) {
401 		assoc_resp_ie = NULL;
402 		assoc_resp_ielen = 0;
403 	}
404 
405 	if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
406 	    (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
407 		if (wdev->sme_state != CFG80211_SME_CONNECTING) {
408 			wil_err(wil, "Not in connecting state\n");
409 			return;
410 		}
411 		del_timer_sync(&wil->connect_timer);
412 		cfg80211_connect_result(ndev, evt->bssid,
413 					assoc_req_ie, assoc_req_ielen,
414 					assoc_resp_ie, assoc_resp_ielen,
415 					WLAN_STATUS_SUCCESS, GFP_KERNEL);
416 
417 	} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
418 		   (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
419 		memset(&sinfo, 0, sizeof(sinfo));
420 
421 		sinfo.generation = wil->sinfo_gen++;
422 
423 		if (assoc_req_ie) {
424 			sinfo.assoc_req_ies = assoc_req_ie;
425 			sinfo.assoc_req_ies_len = assoc_req_ielen;
426 			sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
427 		}
428 
429 		cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
430 	}
431 	set_bit(wil_status_fwconnected, &wil->status);
432 
433 	/* FIXME FW can transmit only ucast frames to peer */
434 	/* FIXME real ring_id instead of hard coded 0 */
435 	memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
436 
437 	wil->pending_connect_cid = evt->cid;
438 	queue_work(wil->wmi_wq_conn, &wil->connect_worker);
439 }
440 
441 static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
442 			       void *d, int len)
443 {
444 	struct wmi_disconnect_event *evt = d;
445 
446 	wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n",
447 		    evt->bssid,
448 		    evt->protocol_reason_status, evt->disconnect_reason);
449 
450 	wil->sinfo_gen++;
451 
452 	wil6210_disconnect(wil, evt->bssid);
453 }
454 
455 static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len)
456 {
457 	struct wmi_notify_req_done_event *evt = d;
458 
459 	if (len < sizeof(*evt)) {
460 		wil_err(wil, "Short NOTIFY event\n");
461 		return;
462 	}
463 
464 	wil->stats.tsf = le64_to_cpu(evt->tsf);
465 	wil->stats.snr = le32_to_cpu(evt->snr_val);
466 	wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs);
467 	wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector);
468 	wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector);
469 	wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector);
470 	wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector);
471 	wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n"
472 		    "BF status 0x%08x SNR 0x%08x\n"
473 		    "Tx Tpt %d goodput %d Rx goodput %d\n"
474 		    "Sectors(rx:tx) my %d:%d peer %d:%d\n",
475 		    wil->stats.bf_mcs, wil->stats.tsf, evt->status,
476 		    wil->stats.snr, le32_to_cpu(evt->tx_tpt),
477 		    le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput),
478 		    wil->stats.my_rx_sector, wil->stats.my_tx_sector,
479 		    wil->stats.peer_rx_sector, wil->stats.peer_tx_sector);
480 }
481 
482 /*
483  * Firmware reports EAPOL frame using WME event.
484  * Reconstruct Ethernet frame and deliver it via normal Rx
485  */
486 static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
487 			     void *d, int len)
488 {
489 	struct net_device *ndev = wil_to_ndev(wil);
490 	struct wmi_eapol_rx_event *evt = d;
491 	u16 eapol_len = le16_to_cpu(evt->eapol_len);
492 	int sz = eapol_len + ETH_HLEN;
493 	struct sk_buff *skb;
494 	struct ethhdr *eth;
495 
496 	wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len,
497 		    evt->src_mac);
498 
499 	if (eapol_len > 196) { /* TODO: revisit size limit */
500 		wil_err(wil, "EAPOL too large\n");
501 		return;
502 	}
503 
504 	skb = alloc_skb(sz, GFP_KERNEL);
505 	if (!skb) {
506 		wil_err(wil, "Failed to allocate skb\n");
507 		return;
508 	}
509 	eth = (struct ethhdr *)skb_put(skb, ETH_HLEN);
510 	memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN);
511 	memcpy(eth->h_source, evt->src_mac, ETH_ALEN);
512 	eth->h_proto = cpu_to_be16(ETH_P_PAE);
513 	memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len);
514 	skb->protocol = eth_type_trans(skb, ndev);
515 	if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) {
516 		ndev->stats.rx_packets++;
517 		ndev->stats.rx_bytes += skb->len;
518 	} else {
519 		ndev->stats.rx_dropped++;
520 	}
521 }
522 
523 static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
524 {
525 	struct net_device *ndev = wil_to_ndev(wil);
526 	struct wmi_data_port_open_event *evt = d;
527 
528 	wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid);
529 
530 	netif_carrier_on(ndev);
531 }
532 
533 static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
534 {
535 	struct net_device *ndev = wil_to_ndev(wil);
536 	struct wmi_wbe_link_down_event *evt = d;
537 
538 	wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
539 		    evt->cid, le32_to_cpu(evt->reason));
540 
541 	netif_carrier_off(ndev);
542 }
543 
544 static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
545 			      int len)
546 {
547 	struct wmi_vring_ba_status_event *evt = d;
548 
549 	wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
550 		    evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize,
551 		    __le16_to_cpu(evt->ba_timeout));
552 }
553 
554 static const struct {
555 	int eventid;
556 	void (*handler)(struct wil6210_priv *wil, int eventid,
557 			void *data, int data_len);
558 } wmi_evt_handlers[] = {
559 	{WMI_READY_EVENTID,		wmi_evt_ready},
560 	{WMI_FW_READY_EVENTID,		wmi_evt_fw_ready},
561 	{WMI_RX_MGMT_PACKET_EVENTID,	wmi_evt_rx_mgmt},
562 	{WMI_SCAN_COMPLETE_EVENTID,	wmi_evt_scan_complete},
563 	{WMI_CONNECT_EVENTID,		wmi_evt_connect},
564 	{WMI_DISCONNECT_EVENTID,	wmi_evt_disconnect},
565 	{WMI_NOTIFY_REQ_DONE_EVENTID,	wmi_evt_notify},
566 	{WMI_EAPOL_RX_EVENTID,		wmi_evt_eapol_rx},
567 	{WMI_DATA_PORT_OPEN_EVENTID,	wmi_evt_linkup},
568 	{WMI_WBE_LINKDOWN_EVENTID,	wmi_evt_linkdown},
569 	{WMI_BA_STATUS_EVENTID,		wmi_evt_ba_status},
570 };
571 
572 /*
573  * Run in IRQ context
574  * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev
575  * that will be eventually handled by the @wmi_event_worker in the thread
576  * context of thread "wil6210_wmi"
577  */
578 void wmi_recv_cmd(struct wil6210_priv *wil)
579 {
580 	struct wil6210_mbox_ring_desc d_tail;
581 	struct wil6210_mbox_hdr hdr;
582 	struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
583 	struct pending_wmi_event *evt;
584 	u8 *cmd;
585 	void __iomem *src;
586 	ulong flags;
587 
588 	if (!test_bit(wil_status_reset_done, &wil->status)) {
589 		wil_err(wil, "Reset not completed\n");
590 		return;
591 	}
592 
593 	for (;;) {
594 		u16 len;
595 
596 		r->head = ioread32(wil->csr + HOST_MBOX +
597 				   offsetof(struct wil6210_mbox_ctl, rx.head));
598 		if (r->tail == r->head)
599 			return;
600 
601 		/* read cmd from tail */
602 		wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail),
603 				     sizeof(struct wil6210_mbox_ring_desc));
604 		if (d_tail.sync == 0) {
605 			wil_err(wil, "Mbox evt not owned by FW?\n");
606 			return;
607 		}
608 
609 		if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) {
610 			wil_err(wil, "Mbox evt at 0x%08x?\n",
611 				le32_to_cpu(d_tail.addr));
612 			return;
613 		}
614 
615 		len = le16_to_cpu(hdr.len);
616 		src = wmi_buffer(wil, d_tail.addr) +
617 		      sizeof(struct wil6210_mbox_hdr);
618 		evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event,
619 					     event.wmi) + len, 4),
620 			      GFP_KERNEL);
621 		if (!evt)
622 			return;
623 
624 		evt->event.hdr = hdr;
625 		cmd = (void *)&evt->event.wmi;
626 		wil_memcpy_fromio_32(cmd, src, len);
627 		/* mark entry as empty */
628 		iowrite32(0, wil->csr + HOSTADDR(r->tail) +
629 			  offsetof(struct wil6210_mbox_ring_desc, sync));
630 		/* indicate */
631 		wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n",
632 			    le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type),
633 			    hdr.flags);
634 		if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
635 		    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
636 			wil_dbg_wmi(wil, "WMI event 0x%04x\n",
637 				    evt->event.wmi.id);
638 		}
639 		wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
640 				 &evt->event.hdr, sizeof(hdr) + len, true);
641 
642 		/* advance tail */
643 		r->tail = r->base + ((r->tail - r->base +
644 			  sizeof(struct wil6210_mbox_ring_desc)) % r->size);
645 		iowrite32(r->tail, wil->csr + HOST_MBOX +
646 			  offsetof(struct wil6210_mbox_ctl, rx.tail));
647 
648 		/* add to the pending list */
649 		spin_lock_irqsave(&wil->wmi_ev_lock, flags);
650 		list_add_tail(&evt->list, &wil->pending_wmi_ev);
651 		spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
652 		{
653 			int q =	queue_work(wil->wmi_wq,
654 					   &wil->wmi_event_worker);
655 			wil_dbg_wmi(wil, "queue_work -> %d\n", q);
656 		}
657 	}
658 }
659 
660 int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
661 	     u16 reply_id, void *reply, u8 reply_size, int to_msec)
662 {
663 	int rc;
664 	int remain;
665 
666 	mutex_lock(&wil->wmi_mutex);
667 
668 	rc = __wmi_send(wil, cmdid, buf, len);
669 	if (rc)
670 		goto out;
671 
672 	wil->reply_id = reply_id;
673 	wil->reply_buf = reply;
674 	wil->reply_size = reply_size;
675 	remain = wait_for_completion_timeout(&wil->wmi_ready,
676 			msecs_to_jiffies(to_msec));
677 	if (0 == remain) {
678 		wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n",
679 			cmdid, reply_id, to_msec);
680 		rc = -ETIME;
681 	} else {
682 		wil_dbg_wmi(wil,
683 			    "wmi_call(0x%04x->0x%04x) completed in %d msec\n",
684 			    cmdid, reply_id,
685 			    to_msec - jiffies_to_msecs(remain));
686 	}
687 	wil->reply_id = 0;
688 	wil->reply_buf = NULL;
689 	wil->reply_size = 0;
690  out:
691 	mutex_unlock(&wil->wmi_mutex);
692 
693 	return rc;
694 }
695 
696 int wmi_echo(struct wil6210_priv *wil)
697 {
698 	struct wmi_echo_cmd cmd = {
699 		.value = cpu_to_le32(0x12345678),
700 	};
701 
702 	return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd),
703 			 WMI_ECHO_RSP_EVENTID, NULL, 0, 20);
704 }
705 
706 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
707 {
708 	struct wmi_set_mac_address_cmd cmd;
709 
710 	memcpy(cmd.mac, addr, ETH_ALEN);
711 
712 	wil_dbg_wmi(wil, "Set MAC %pM\n", addr);
713 
714 	return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
715 }
716 
717 int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype)
718 {
719 	struct wmi_bcon_ctrl_cmd cmd = {
720 		.bcon_interval = cpu_to_le16(bi),
721 		.network_type = wmi_nettype,
722 		.disable_sec_offload = 1,
723 	};
724 
725 	if (!wil->secure_pcp)
726 		cmd.disable_sec = 1;
727 
728 	return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd));
729 }
730 
731 int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
732 {
733 	struct wmi_set_ssid_cmd cmd = {
734 		.ssid_len = cpu_to_le32(ssid_len),
735 	};
736 
737 	if (ssid_len > sizeof(cmd.ssid))
738 		return -EINVAL;
739 
740 	memcpy(cmd.ssid, ssid, ssid_len);
741 
742 	return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd));
743 }
744 
745 int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
746 {
747 	int rc;
748 	struct {
749 		struct wil6210_mbox_hdr_wmi wmi;
750 		struct wmi_set_ssid_cmd cmd;
751 	} __packed reply;
752 	int len; /* reply.cmd.ssid_len in CPU order */
753 
754 	rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID,
755 		      &reply, sizeof(reply), 20);
756 	if (rc)
757 		return rc;
758 
759 	len = le32_to_cpu(reply.cmd.ssid_len);
760 	if (len > sizeof(reply.cmd.ssid))
761 		return -EINVAL;
762 
763 	*ssid_len = len;
764 	memcpy(ssid, reply.cmd.ssid, len);
765 
766 	return 0;
767 }
768 
769 int wmi_set_channel(struct wil6210_priv *wil, int channel)
770 {
771 	struct wmi_set_pcp_channel_cmd cmd = {
772 		.channel = channel - 1,
773 	};
774 
775 	return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd));
776 }
777 
778 int wmi_get_channel(struct wil6210_priv *wil, int *channel)
779 {
780 	int rc;
781 	struct {
782 		struct wil6210_mbox_hdr_wmi wmi;
783 		struct wmi_set_pcp_channel_cmd cmd;
784 	} __packed reply;
785 
786 	rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0,
787 		      WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20);
788 	if (rc)
789 		return rc;
790 
791 	if (reply.cmd.channel > 3)
792 		return -EINVAL;
793 
794 	*channel = reply.cmd.channel + 1;
795 
796 	return 0;
797 }
798 
799 int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
800 {
801 	struct wmi_eapol_tx_cmd *cmd;
802 	struct ethhdr *eth;
803 	u16 eapol_len = skb->len - ETH_HLEN;
804 	void *eapol = skb->data + ETH_HLEN;
805 	uint i;
806 	int rc;
807 
808 	skb_set_mac_header(skb, 0);
809 	eth = eth_hdr(skb);
810 	wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest);
811 	for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
812 		if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0)
813 			goto found_dest;
814 	}
815 
816 	return -EINVAL;
817 
818  found_dest:
819 	/* find out eapol data & len */
820 	cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL);
821 	if (!cmd)
822 		return -EINVAL;
823 
824 	memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN);
825 	cmd->eapol_len = cpu_to_le16(eapol_len);
826 	memcpy(cmd->eapol, eapol, eapol_len);
827 	rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len);
828 	kfree(cmd);
829 
830 	return rc;
831 }
832 
833 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
834 		       const void *mac_addr)
835 {
836 	struct wmi_delete_cipher_key_cmd cmd = {
837 		.key_index = key_index,
838 	};
839 
840 	if (mac_addr)
841 		memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
842 
843 	return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
844 }
845 
846 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
847 		       const void *mac_addr, int key_len, const void *key)
848 {
849 	struct wmi_add_cipher_key_cmd cmd = {
850 		.key_index = key_index,
851 		.key_usage = WMI_KEY_USE_PAIRWISE,
852 		.key_len = key_len,
853 	};
854 
855 	if (!key || (key_len > sizeof(cmd.key)))
856 		return -EINVAL;
857 
858 	memcpy(cmd.key, key, key_len);
859 	if (mac_addr)
860 		memcpy(cmd.mac, mac_addr, WMI_MAC_LEN);
861 
862 	return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd));
863 }
864 
865 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
866 {
867 	int rc;
868 	u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
869 	struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
870 	if (!cmd)
871 		return -ENOMEM;
872 
873 	cmd->mgmt_frm_type = type;
874 	/* BUG: FW API define ieLen as u8. Will fix FW */
875 	cmd->ie_len = cpu_to_le16(ie_len);
876 	memcpy(cmd->ie_info, ie, ie_len);
877 	rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
878 	kfree(cmd);
879 
880 	return rc;
881 }
882 
883 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
884 {
885 	struct wireless_dev *wdev = wil->wdev;
886 	struct net_device *ndev = wil_to_ndev(wil);
887 	struct wmi_cfg_rx_chain_cmd cmd = {
888 		.action = WMI_RX_CHAIN_ADD,
889 		.rx_sw_ring = {
890 			.max_mpdu_size = cpu_to_le16(RX_BUF_LEN),
891 			.ring_mem_base = cpu_to_le64(vring->pa),
892 			.ring_size = cpu_to_le16(vring->size),
893 		},
894 		.mid = 0, /* TODO - what is it? */
895 		.decap_trans_type = WMI_DECAP_TYPE_802_3,
896 	};
897 	struct {
898 		struct wil6210_mbox_hdr_wmi wmi;
899 		struct wmi_cfg_rx_chain_done_event evt;
900 	} __packed evt;
901 	int rc;
902 
903 	if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
904 		struct ieee80211_channel *ch = wdev->preset_chandef.chan;
905 
906 		cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
907 		if (ch)
908 			cmd.sniffer_cfg.channel = ch->hw_value - 1;
909 		cmd.sniffer_cfg.phy_info_mode =
910 			cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP);
911 		cmd.sniffer_cfg.phy_support =
912 			cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
913 				    ? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
914 	}
915 	/* typical time for secure PCP is 840ms */
916 	rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
917 		      WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000);
918 	if (rc)
919 		return rc;
920 
921 	vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr);
922 
923 	wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n",
924 		     le32_to_cpu(evt.evt.status), vring->hwtail);
925 
926 	if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS)
927 		rc = -EINVAL;
928 
929 	return rc;
930 }
931 
932 void wmi_event_flush(struct wil6210_priv *wil)
933 {
934 	struct pending_wmi_event *evt, *t;
935 
936 	wil_dbg_wmi(wil, "%s()\n", __func__);
937 
938 	list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
939 		list_del(&evt->list);
940 		kfree(evt);
941 	}
942 }
943 
944 static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
945 				 void *d, int len)
946 {
947 	uint i;
948 
949 	for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) {
950 		if (wmi_evt_handlers[i].eventid == id) {
951 			wmi_evt_handlers[i].handler(wil, id, d, len);
952 			return true;
953 		}
954 	}
955 
956 	return false;
957 }
958 
959 static void wmi_event_handle(struct wil6210_priv *wil,
960 			     struct wil6210_mbox_hdr *hdr)
961 {
962 	u16 len = le16_to_cpu(hdr->len);
963 
964 	if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
965 	    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
966 		struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
967 		void *evt_data = (void *)(&wmi[1]);
968 		u16 id = le16_to_cpu(wmi->id);
969 		/* check if someone waits for this event */
970 		if (wil->reply_id && wil->reply_id == id) {
971 			if (wil->reply_buf) {
972 				memcpy(wil->reply_buf, wmi,
973 				       min(len, wil->reply_size));
974 			} else {
975 				wmi_evt_call_handler(wil, id, evt_data,
976 						     len - sizeof(*wmi));
977 			}
978 			wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id);
979 			complete(&wil->wmi_ready);
980 			return;
981 		}
982 		/* unsolicited event */
983 		/* search for handler */
984 		if (!wmi_evt_call_handler(wil, id, evt_data,
985 					  len - sizeof(*wmi))) {
986 			wil_err(wil, "Unhandled event 0x%04x\n", id);
987 		}
988 	} else {
989 		wil_err(wil, "Unknown event type\n");
990 		print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1,
991 			       hdr, sizeof(*hdr) + len, true);
992 	}
993 }
994 
995 /*
996  * Retrieve next WMI event from the pending list
997  */
998 static struct list_head *next_wmi_ev(struct wil6210_priv *wil)
999 {
1000 	ulong flags;
1001 	struct list_head *ret = NULL;
1002 
1003 	spin_lock_irqsave(&wil->wmi_ev_lock, flags);
1004 
1005 	if (!list_empty(&wil->pending_wmi_ev)) {
1006 		ret = wil->pending_wmi_ev.next;
1007 		list_del(ret);
1008 	}
1009 
1010 	spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
1011 
1012 	return ret;
1013 }
1014 
1015 /*
1016  * Handler for the WMI events
1017  */
1018 void wmi_event_worker(struct work_struct *work)
1019 {
1020 	struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
1021 						 wmi_event_worker);
1022 	struct pending_wmi_event *evt;
1023 	struct list_head *lh;
1024 
1025 	while ((lh = next_wmi_ev(wil)) != NULL) {
1026 		evt = list_entry(lh, struct pending_wmi_event, list);
1027 		wmi_event_handle(wil, &evt->event.hdr);
1028 		kfree(evt);
1029 	}
1030 }
1031