1 /* 2 * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/moduleparam.h> 18 #include <linux/etherdevice.h> 19 #include <linux/if_arp.h> 20 21 #include "wil6210.h" 22 #include "txrx.h" 23 #include "wmi.h" 24 #include "trace.h" 25 26 static uint max_assoc_sta = 1; 27 module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR); 28 MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP"); 29 30 /** 31 * WMI event receiving - theory of operations 32 * 33 * When firmware about to report WMI event, it fills memory area 34 * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for 35 * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler. 36 * 37 * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the 38 * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up 39 * and handles events within the @wmi_event_worker. Every event get detached 40 * from list, processed and deleted. 41 * 42 * Purpose for this mechanism is to release IRQ thread; otherwise, 43 * if WMI event handling involves another WMI command flow, this 2-nd flow 44 * won't be completed because of blocked IRQ thread. 45 */ 46 47 /** 48 * Addressing - theory of operations 49 * 50 * There are several buses present on the WIL6210 card. 51 * Same memory areas are visible at different address on 52 * the different busses. There are 3 main bus masters: 53 * - MAC CPU (ucode) 54 * - User CPU (firmware) 55 * - AHB (host) 56 * 57 * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing 58 * AHB addresses starting from 0x880000 59 * 60 * Internally, firmware uses addresses that allows faster access but 61 * are invisible from the host. To read from these addresses, alternative 62 * AHB address must be used. 63 * 64 * Memory mapping 65 * Linker address PCI/Host address 66 * 0x880000 .. 0xa80000 2Mb BAR0 67 * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM 68 * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH 69 */ 70 71 /** 72 * @fw_mapping provides memory remapping table 73 * 74 * array size should be in sync with the declaration in the wil6210.h 75 */ 76 const struct fw_map fw_mapping[] = { 77 {0x000000, 0x040000, 0x8c0000, "fw_code"}, /* FW code RAM 256k */ 78 {0x800000, 0x808000, 0x900000, "fw_data"}, /* FW data RAM 32k */ 79 {0x840000, 0x860000, 0x908000, "fw_peri"}, /* periph. data RAM 128k */ 80 {0x880000, 0x88a000, 0x880000, "rgf"}, /* various RGF 40k */ 81 {0x88a000, 0x88b000, 0x88a000, "AGC_tbl"}, /* AGC table 4k */ 82 {0x88b000, 0x88c000, 0x88b000, "rgf_ext"}, /* Pcie_ext_rgf 4k */ 83 {0x8c0000, 0x949000, 0x8c0000, "upper"}, /* upper area 548k */ 84 /* 85 * 920000..930000 ucode code RAM 86 * 930000..932000 ucode data RAM 87 * 932000..949000 back-door debug data 88 */ 89 }; 90 91 /** 92 * return AHB address for given firmware/ucode internal (linker) address 93 * @x - internal address 94 * If address have no valid AHB mapping, return 0 95 */ 96 static u32 wmi_addr_remap(u32 x) 97 { 98 uint i; 99 100 for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { 101 if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to)) 102 return x + fw_mapping[i].host - fw_mapping[i].from; 103 } 104 105 return 0; 106 } 107 108 /** 109 * Check address validity for WMI buffer; remap if needed 110 * @ptr - internal (linker) fw/ucode address 111 * 112 * Valid buffer should be DWORD aligned 113 * 114 * return address for accessing buffer from the host; 115 * if buffer is not valid, return NULL. 116 */ 117 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) 118 { 119 u32 off; 120 u32 ptr = le32_to_cpu(ptr_); 121 122 if (ptr % 4) 123 return NULL; 124 125 ptr = wmi_addr_remap(ptr); 126 if (ptr < WIL6210_FW_HOST_OFF) 127 return NULL; 128 129 off = HOSTADDR(ptr); 130 if (off > WIL6210_MEM_SIZE - 4) 131 return NULL; 132 133 return wil->csr + off; 134 } 135 136 /** 137 * Check address validity 138 */ 139 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr) 140 { 141 u32 off; 142 143 if (ptr % 4) 144 return NULL; 145 146 if (ptr < WIL6210_FW_HOST_OFF) 147 return NULL; 148 149 off = HOSTADDR(ptr); 150 if (off > WIL6210_MEM_SIZE - 4) 151 return NULL; 152 153 return wil->csr + off; 154 } 155 156 int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, 157 struct wil6210_mbox_hdr *hdr) 158 { 159 void __iomem *src = wmi_buffer(wil, ptr); 160 161 if (!src) 162 return -EINVAL; 163 164 wil_memcpy_fromio_32(hdr, src, sizeof(*hdr)); 165 166 return 0; 167 } 168 169 static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) 170 { 171 struct { 172 struct wil6210_mbox_hdr hdr; 173 struct wil6210_mbox_hdr_wmi wmi; 174 } __packed cmd = { 175 .hdr = { 176 .type = WIL_MBOX_HDR_TYPE_WMI, 177 .flags = 0, 178 .len = cpu_to_le16(sizeof(cmd.wmi) + len), 179 }, 180 .wmi = { 181 .mid = 0, 182 .id = cpu_to_le16(cmdid), 183 }, 184 }; 185 struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx; 186 struct wil6210_mbox_ring_desc d_head; 187 u32 next_head; 188 void __iomem *dst; 189 void __iomem *head = wmi_addr(wil, r->head); 190 uint retry; 191 192 if (sizeof(cmd) + len > r->entry_size) { 193 wil_err(wil, "WMI size too large: %d bytes, max is %d\n", 194 (int)(sizeof(cmd) + len), r->entry_size); 195 return -ERANGE; 196 } 197 198 might_sleep(); 199 200 if (!test_bit(wil_status_fwready, &wil->status)) { 201 wil_err(wil, "WMI: cannot send command while FW not ready\n"); 202 return -EAGAIN; 203 } 204 205 if (!head) { 206 wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head); 207 return -EINVAL; 208 } 209 /* read Tx head till it is not busy */ 210 for (retry = 5; retry > 0; retry--) { 211 wil_memcpy_fromio_32(&d_head, head, sizeof(d_head)); 212 if (d_head.sync == 0) 213 break; 214 msleep(20); 215 } 216 if (d_head.sync != 0) { 217 wil_err(wil, "WMI head busy\n"); 218 return -EBUSY; 219 } 220 /* next head */ 221 next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size); 222 wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); 223 /* wait till FW finish with previous command */ 224 for (retry = 5; retry > 0; retry--) { 225 r->tail = ioread32(wil->csr + HOST_MBOX + 226 offsetof(struct wil6210_mbox_ctl, tx.tail)); 227 if (next_head != r->tail) 228 break; 229 msleep(20); 230 } 231 if (next_head == r->tail) { 232 wil_err(wil, "WMI ring full\n"); 233 return -EBUSY; 234 } 235 dst = wmi_buffer(wil, d_head.addr); 236 if (!dst) { 237 wil_err(wil, "invalid WMI buffer: 0x%08x\n", 238 le32_to_cpu(d_head.addr)); 239 return -EINVAL; 240 } 241 cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq); 242 /* set command */ 243 wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len); 244 wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd, 245 sizeof(cmd), true); 246 wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf, 247 len, true); 248 wil_memcpy_toio_32(dst, &cmd, sizeof(cmd)); 249 wil_memcpy_toio_32(dst + sizeof(cmd), buf, len); 250 /* mark entry as full */ 251 iowrite32(1, wil->csr + HOSTADDR(r->head) + 252 offsetof(struct wil6210_mbox_ring_desc, sync)); 253 /* advance next ptr */ 254 iowrite32(r->head = next_head, wil->csr + HOST_MBOX + 255 offsetof(struct wil6210_mbox_ctl, tx.head)); 256 257 trace_wil6210_wmi_cmd(&cmd.wmi, buf, len); 258 259 /* interrupt to FW */ 260 iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); 261 262 return 0; 263 } 264 265 int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) 266 { 267 int rc; 268 269 mutex_lock(&wil->wmi_mutex); 270 rc = __wmi_send(wil, cmdid, buf, len); 271 mutex_unlock(&wil->wmi_mutex); 272 273 return rc; 274 } 275 276 /*=== Event handlers ===*/ 277 static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) 278 { 279 struct net_device *ndev = wil_to_ndev(wil); 280 struct wireless_dev *wdev = wil->wdev; 281 struct wmi_ready_event *evt = d; 282 283 wil->fw_version = le32_to_cpu(evt->sw_version); 284 wil->n_mids = evt->numof_additional_mids; 285 286 wil_info(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version, 287 evt->mac, wil->n_mids); 288 289 if (!is_valid_ether_addr(ndev->dev_addr)) { 290 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN); 291 memcpy(ndev->perm_addr, evt->mac, ETH_ALEN); 292 } 293 snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version), 294 "%d", wil->fw_version); 295 } 296 297 static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d, 298 int len) 299 { 300 wil_dbg_wmi(wil, "WMI: got FW ready event\n"); 301 302 wil_set_recovery_state(wil, fw_recovery_idle); 303 set_bit(wil_status_fwready, &wil->status); 304 /* let the reset sequence continue */ 305 complete(&wil->wmi_ready); 306 } 307 308 static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) 309 { 310 struct wmi_rx_mgmt_packet_event *data = d; 311 struct wiphy *wiphy = wil_to_wiphy(wil); 312 struct ieee80211_mgmt *rx_mgmt_frame = 313 (struct ieee80211_mgmt *)data->payload; 314 int ch_no = data->info.channel+1; 315 u32 freq = ieee80211_channel_to_frequency(ch_no, 316 IEEE80211_BAND_60GHZ); 317 struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); 318 s32 signal = data->info.sqi; 319 __le16 fc = rx_mgmt_frame->frame_control; 320 u32 d_len = le32_to_cpu(data->info.len); 321 u16 d_status = le16_to_cpu(data->info.status); 322 323 wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n", 324 data->info.channel, data->info.mcs, data->info.snr, 325 data->info.sqi); 326 wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, 327 le16_to_cpu(fc)); 328 wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", 329 data->info.qid, data->info.mid, data->info.cid); 330 331 if (!channel) { 332 wil_err(wil, "Frame on unsupported channel\n"); 333 return; 334 } 335 336 if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) { 337 struct cfg80211_bss *bss; 338 u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp); 339 u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info); 340 u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int); 341 const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable; 342 size_t ie_len = d_len - offsetof(struct ieee80211_mgmt, 343 u.beacon.variable); 344 wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap); 345 wil_dbg_wmi(wil, "TSF : 0x%016llx\n", tsf); 346 wil_dbg_wmi(wil, "Beacon interval : %d\n", bi); 347 wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf, 348 ie_len, true); 349 350 bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame, 351 d_len, signal, GFP_KERNEL); 352 if (bss) { 353 wil_dbg_wmi(wil, "Added BSS %pM\n", 354 rx_mgmt_frame->bssid); 355 cfg80211_put_bss(wiphy, bss); 356 } else { 357 wil_err(wil, "cfg80211_inform_bss_frame() failed\n"); 358 } 359 } else { 360 cfg80211_rx_mgmt(wil->wdev, freq, signal, 361 (void *)rx_mgmt_frame, d_len, 0); 362 } 363 } 364 365 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, 366 void *d, int len) 367 { 368 if (wil->scan_request) { 369 struct wmi_scan_complete_event *data = d; 370 bool aborted = (data->status != WMI_SCAN_SUCCESS); 371 372 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); 373 wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n", 374 wil->scan_request, aborted); 375 376 del_timer_sync(&wil->scan_timer); 377 cfg80211_scan_done(wil->scan_request, aborted); 378 wil->scan_request = NULL; 379 } else { 380 wil_err(wil, "SCAN_COMPLETE while not scanning\n"); 381 } 382 } 383 384 static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) 385 { 386 struct net_device *ndev = wil_to_ndev(wil); 387 struct wireless_dev *wdev = wil->wdev; 388 struct wmi_connect_event *evt = d; 389 int ch; /* channel number */ 390 struct station_info sinfo; 391 u8 *assoc_req_ie, *assoc_resp_ie; 392 size_t assoc_req_ielen, assoc_resp_ielen; 393 /* capinfo(u16) + listen_interval(u16) + IEs */ 394 const size_t assoc_req_ie_offset = sizeof(u16) * 2; 395 /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */ 396 const size_t assoc_resp_ie_offset = sizeof(u16) * 3; 397 398 if (len < sizeof(*evt)) { 399 wil_err(wil, "Connect event too short : %d bytes\n", len); 400 return; 401 } 402 if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len + 403 evt->assoc_resp_len) { 404 wil_err(wil, 405 "Connect event corrupted : %d != %d + %d + %d + %d\n", 406 len, (int)sizeof(*evt), evt->beacon_ie_len, 407 evt->assoc_req_len, evt->assoc_resp_len); 408 return; 409 } 410 if (evt->cid >= WIL6210_MAX_CID) { 411 wil_err(wil, "Connect CID invalid : %d\n", evt->cid); 412 return; 413 } 414 415 ch = evt->channel + 1; 416 wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n", 417 evt->bssid, ch, evt->cid); 418 wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, 419 evt->assoc_info, len - sizeof(*evt), true); 420 421 /* figure out IE's */ 422 assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len + 423 assoc_req_ie_offset]; 424 assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset; 425 if (evt->assoc_req_len <= assoc_req_ie_offset) { 426 assoc_req_ie = NULL; 427 assoc_req_ielen = 0; 428 } 429 430 assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len + 431 evt->assoc_req_len + 432 assoc_resp_ie_offset]; 433 assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset; 434 if (evt->assoc_resp_len <= assoc_resp_ie_offset) { 435 assoc_resp_ie = NULL; 436 assoc_resp_ielen = 0; 437 } 438 439 if ((wdev->iftype == NL80211_IFTYPE_STATION) || 440 (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { 441 if (!test_bit(wil_status_fwconnecting, &wil->status)) { 442 wil_err(wil, "Not in connecting state\n"); 443 return; 444 } 445 del_timer_sync(&wil->connect_timer); 446 cfg80211_connect_result(ndev, evt->bssid, 447 assoc_req_ie, assoc_req_ielen, 448 assoc_resp_ie, assoc_resp_ielen, 449 WLAN_STATUS_SUCCESS, GFP_KERNEL); 450 451 } else if ((wdev->iftype == NL80211_IFTYPE_AP) || 452 (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { 453 memset(&sinfo, 0, sizeof(sinfo)); 454 455 sinfo.generation = wil->sinfo_gen++; 456 457 if (assoc_req_ie) { 458 sinfo.assoc_req_ies = assoc_req_ie; 459 sinfo.assoc_req_ies_len = assoc_req_ielen; 460 sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; 461 } 462 463 cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); 464 } 465 clear_bit(wil_status_fwconnecting, &wil->status); 466 set_bit(wil_status_fwconnected, &wil->status); 467 468 /* FIXME FW can transmit only ucast frames to peer */ 469 /* FIXME real ring_id instead of hard coded 0 */ 470 memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN); 471 wil->sta[evt->cid].status = wil_sta_conn_pending; 472 473 wil->pending_connect_cid = evt->cid; 474 queue_work(wil->wmi_wq_conn, &wil->connect_worker); 475 } 476 477 static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, 478 void *d, int len) 479 { 480 struct wmi_disconnect_event *evt = d; 481 u16 reason_code = le16_to_cpu(evt->protocol_reason_status); 482 483 wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", 484 evt->bssid, reason_code, evt->disconnect_reason); 485 486 wil->sinfo_gen++; 487 488 mutex_lock(&wil->mutex); 489 wil6210_disconnect(wil, evt->bssid, reason_code, true); 490 mutex_unlock(&wil->mutex); 491 } 492 493 /* 494 * Firmware reports EAPOL frame using WME event. 495 * Reconstruct Ethernet frame and deliver it via normal Rx 496 */ 497 static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id, 498 void *d, int len) 499 { 500 struct net_device *ndev = wil_to_ndev(wil); 501 struct wmi_eapol_rx_event *evt = d; 502 u16 eapol_len = le16_to_cpu(evt->eapol_len); 503 int sz = eapol_len + ETH_HLEN; 504 struct sk_buff *skb; 505 struct ethhdr *eth; 506 int cid; 507 struct wil_net_stats *stats = NULL; 508 509 wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len, 510 evt->src_mac); 511 512 cid = wil_find_cid(wil, evt->src_mac); 513 if (cid >= 0) 514 stats = &wil->sta[cid].stats; 515 516 if (eapol_len > 196) { /* TODO: revisit size limit */ 517 wil_err(wil, "EAPOL too large\n"); 518 return; 519 } 520 521 skb = alloc_skb(sz, GFP_KERNEL); 522 if (!skb) { 523 wil_err(wil, "Failed to allocate skb\n"); 524 return; 525 } 526 527 eth = (struct ethhdr *)skb_put(skb, ETH_HLEN); 528 memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN); 529 memcpy(eth->h_source, evt->src_mac, ETH_ALEN); 530 eth->h_proto = cpu_to_be16(ETH_P_PAE); 531 memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len); 532 skb->protocol = eth_type_trans(skb, ndev); 533 if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) { 534 ndev->stats.rx_packets++; 535 ndev->stats.rx_bytes += sz; 536 if (stats) { 537 stats->rx_packets++; 538 stats->rx_bytes += sz; 539 } 540 } else { 541 ndev->stats.rx_dropped++; 542 if (stats) 543 stats->rx_dropped++; 544 } 545 } 546 547 static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len) 548 { 549 struct net_device *ndev = wil_to_ndev(wil); 550 struct wmi_data_port_open_event *evt = d; 551 u8 cid = evt->cid; 552 553 wil_dbg_wmi(wil, "Link UP for CID %d\n", cid); 554 555 if (cid >= ARRAY_SIZE(wil->sta)) { 556 wil_err(wil, "Link UP for invalid CID %d\n", cid); 557 return; 558 } 559 560 wil->sta[cid].data_port_open = true; 561 netif_carrier_on(ndev); 562 } 563 564 static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len) 565 { 566 struct net_device *ndev = wil_to_ndev(wil); 567 struct wmi_wbe_link_down_event *evt = d; 568 u8 cid = evt->cid; 569 570 wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n", 571 cid, le32_to_cpu(evt->reason)); 572 573 if (cid >= ARRAY_SIZE(wil->sta)) { 574 wil_err(wil, "Link DOWN for invalid CID %d\n", cid); 575 return; 576 } 577 578 wil->sta[cid].data_port_open = false; 579 netif_carrier_off(ndev); 580 } 581 582 static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, 583 int len) 584 { 585 struct wmi_vring_ba_status_event *evt = d; 586 struct wil_sta_info *sta; 587 uint i, cid; 588 589 /* TODO: use Rx BA status, not Tx one */ 590 591 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n", 592 evt->ringid, 593 evt->status == WMI_BA_AGREED ? "OK" : "N/A", 594 evt->agg_wsize, __le16_to_cpu(evt->ba_timeout)); 595 596 if (evt->ringid >= WIL6210_MAX_TX_RINGS) { 597 wil_err(wil, "invalid ring id %d\n", evt->ringid); 598 return; 599 } 600 601 mutex_lock(&wil->mutex); 602 603 cid = wil->vring2cid_tid[evt->ringid][0]; 604 if (cid >= WIL6210_MAX_CID) { 605 wil_err(wil, "invalid CID %d for vring %d\n", cid, evt->ringid); 606 goto out; 607 } 608 609 sta = &wil->sta[cid]; 610 if (sta->status == wil_sta_unused) { 611 wil_err(wil, "CID %d unused\n", cid); 612 goto out; 613 } 614 615 wil_dbg_wmi(wil, "BACK for CID %d %pM\n", cid, sta->addr); 616 for (i = 0; i < WIL_STA_TID_NUM; i++) { 617 struct wil_tid_ampdu_rx *r; 618 unsigned long flags; 619 620 spin_lock_irqsave(&sta->tid_rx_lock, flags); 621 622 r = sta->tid_rx[i]; 623 sta->tid_rx[i] = NULL; 624 wil_tid_ampdu_rx_free(wil, r); 625 626 spin_unlock_irqrestore(&sta->tid_rx_lock, flags); 627 628 if ((evt->status == WMI_BA_AGREED) && evt->agg_wsize) 629 sta->tid_rx[i] = wil_tid_ampdu_rx_alloc(wil, 630 evt->agg_wsize, 0); 631 } 632 633 out: 634 mutex_unlock(&wil->mutex); 635 } 636 637 static const struct { 638 int eventid; 639 void (*handler)(struct wil6210_priv *wil, int eventid, 640 void *data, int data_len); 641 } wmi_evt_handlers[] = { 642 {WMI_READY_EVENTID, wmi_evt_ready}, 643 {WMI_FW_READY_EVENTID, wmi_evt_fw_ready}, 644 {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt}, 645 {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete}, 646 {WMI_CONNECT_EVENTID, wmi_evt_connect}, 647 {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, 648 {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx}, 649 {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup}, 650 {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown}, 651 {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status}, 652 }; 653 654 /* 655 * Run in IRQ context 656 * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev 657 * that will be eventually handled by the @wmi_event_worker in the thread 658 * context of thread "wil6210_wmi" 659 */ 660 void wmi_recv_cmd(struct wil6210_priv *wil) 661 { 662 struct wil6210_mbox_ring_desc d_tail; 663 struct wil6210_mbox_hdr hdr; 664 struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx; 665 struct pending_wmi_event *evt; 666 u8 *cmd; 667 void __iomem *src; 668 ulong flags; 669 unsigned n; 670 671 if (!test_bit(wil_status_reset_done, &wil->status)) { 672 wil_err(wil, "Reset in progress. Cannot handle WMI event\n"); 673 return; 674 } 675 676 for (n = 0;; n++) { 677 u16 len; 678 bool q; 679 680 r->head = ioread32(wil->csr + HOST_MBOX + 681 offsetof(struct wil6210_mbox_ctl, rx.head)); 682 if (r->tail == r->head) 683 break; 684 685 wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n", 686 r->head, r->tail); 687 /* read cmd descriptor from tail */ 688 wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail), 689 sizeof(struct wil6210_mbox_ring_desc)); 690 if (d_tail.sync == 0) { 691 wil_err(wil, "Mbox evt not owned by FW?\n"); 692 break; 693 } 694 695 /* read cmd header from descriptor */ 696 if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) { 697 wil_err(wil, "Mbox evt at 0x%08x?\n", 698 le32_to_cpu(d_tail.addr)); 699 break; 700 } 701 len = le16_to_cpu(hdr.len); 702 wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n", 703 le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type), 704 hdr.flags); 705 706 /* read cmd buffer from descriptor */ 707 src = wmi_buffer(wil, d_tail.addr) + 708 sizeof(struct wil6210_mbox_hdr); 709 evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event, 710 event.wmi) + len, 4), 711 GFP_KERNEL); 712 if (!evt) 713 break; 714 715 evt->event.hdr = hdr; 716 cmd = (void *)&evt->event.wmi; 717 wil_memcpy_fromio_32(cmd, src, len); 718 /* mark entry as empty */ 719 iowrite32(0, wil->csr + HOSTADDR(r->tail) + 720 offsetof(struct wil6210_mbox_ring_desc, sync)); 721 /* indicate */ 722 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && 723 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 724 struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; 725 u16 id = le16_to_cpu(wmi->id); 726 u32 tstamp = le32_to_cpu(wmi->timestamp); 727 728 wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n", 729 id, wmi->mid, tstamp); 730 trace_wil6210_wmi_event(wmi, &wmi[1], 731 len - sizeof(*wmi)); 732 } 733 wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1, 734 &evt->event.hdr, sizeof(hdr) + len, true); 735 736 /* advance tail */ 737 r->tail = r->base + ((r->tail - r->base + 738 sizeof(struct wil6210_mbox_ring_desc)) % r->size); 739 iowrite32(r->tail, wil->csr + HOST_MBOX + 740 offsetof(struct wil6210_mbox_ctl, rx.tail)); 741 742 /* add to the pending list */ 743 spin_lock_irqsave(&wil->wmi_ev_lock, flags); 744 list_add_tail(&evt->list, &wil->pending_wmi_ev); 745 spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); 746 q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); 747 wil_dbg_wmi(wil, "queue_work -> %d\n", q); 748 } 749 /* normally, 1 event per IRQ should be processed */ 750 wil_dbg_wmi(wil, "%s -> %d events queued\n", __func__, n); 751 } 752 753 int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, 754 u16 reply_id, void *reply, u8 reply_size, int to_msec) 755 { 756 int rc; 757 int remain; 758 759 mutex_lock(&wil->wmi_mutex); 760 761 rc = __wmi_send(wil, cmdid, buf, len); 762 if (rc) 763 goto out; 764 765 wil->reply_id = reply_id; 766 wil->reply_buf = reply; 767 wil->reply_size = reply_size; 768 remain = wait_for_completion_timeout(&wil->wmi_call, 769 msecs_to_jiffies(to_msec)); 770 if (0 == remain) { 771 wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n", 772 cmdid, reply_id, to_msec); 773 rc = -ETIME; 774 } else { 775 wil_dbg_wmi(wil, 776 "wmi_call(0x%04x->0x%04x) completed in %d msec\n", 777 cmdid, reply_id, 778 to_msec - jiffies_to_msecs(remain)); 779 } 780 wil->reply_id = 0; 781 wil->reply_buf = NULL; 782 wil->reply_size = 0; 783 out: 784 mutex_unlock(&wil->wmi_mutex); 785 786 return rc; 787 } 788 789 int wmi_echo(struct wil6210_priv *wil) 790 { 791 struct wmi_echo_cmd cmd = { 792 .value = cpu_to_le32(0x12345678), 793 }; 794 795 return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd), 796 WMI_ECHO_RSP_EVENTID, NULL, 0, 20); 797 } 798 799 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) 800 { 801 struct wmi_set_mac_address_cmd cmd; 802 803 memcpy(cmd.mac, addr, ETH_ALEN); 804 805 wil_dbg_wmi(wil, "Set MAC %pM\n", addr); 806 807 return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd)); 808 } 809 810 int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) 811 { 812 int rc; 813 814 struct wmi_pcp_start_cmd cmd = { 815 .bcon_interval = cpu_to_le16(bi), 816 .network_type = wmi_nettype, 817 .disable_sec_offload = 1, 818 .channel = chan - 1, 819 .pcp_max_assoc_sta = max_assoc_sta, 820 }; 821 struct { 822 struct wil6210_mbox_hdr_wmi wmi; 823 struct wmi_pcp_started_event evt; 824 } __packed reply; 825 826 if (!wil->secure_pcp) 827 cmd.disable_sec = 1; 828 829 if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) || 830 (cmd.pcp_max_assoc_sta <= 0)) { 831 wil_info(wil, 832 "Requested connection limit %u, valid values are 1 - %d. Setting to %d\n", 833 max_assoc_sta, WIL6210_MAX_CID, WIL6210_MAX_CID); 834 cmd.pcp_max_assoc_sta = WIL6210_MAX_CID; 835 } 836 837 /* 838 * Processing time may be huge, in case of secure AP it takes about 839 * 3500ms for FW to start AP 840 */ 841 rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd), 842 WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000); 843 if (rc) 844 return rc; 845 846 if (reply.evt.status != WMI_FW_STATUS_SUCCESS) 847 rc = -EINVAL; 848 849 return rc; 850 } 851 852 int wmi_pcp_stop(struct wil6210_priv *wil) 853 { 854 return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0, 855 WMI_PCP_STOPPED_EVENTID, NULL, 0, 20); 856 } 857 858 int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid) 859 { 860 struct wmi_set_ssid_cmd cmd = { 861 .ssid_len = cpu_to_le32(ssid_len), 862 }; 863 864 if (ssid_len > sizeof(cmd.ssid)) 865 return -EINVAL; 866 867 memcpy(cmd.ssid, ssid, ssid_len); 868 869 return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd)); 870 } 871 872 int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid) 873 { 874 int rc; 875 struct { 876 struct wil6210_mbox_hdr_wmi wmi; 877 struct wmi_set_ssid_cmd cmd; 878 } __packed reply; 879 int len; /* reply.cmd.ssid_len in CPU order */ 880 881 rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID, 882 &reply, sizeof(reply), 20); 883 if (rc) 884 return rc; 885 886 len = le32_to_cpu(reply.cmd.ssid_len); 887 if (len > sizeof(reply.cmd.ssid)) 888 return -EINVAL; 889 890 *ssid_len = len; 891 memcpy(ssid, reply.cmd.ssid, len); 892 893 return 0; 894 } 895 896 int wmi_set_channel(struct wil6210_priv *wil, int channel) 897 { 898 struct wmi_set_pcp_channel_cmd cmd = { 899 .channel = channel - 1, 900 }; 901 902 return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd)); 903 } 904 905 int wmi_get_channel(struct wil6210_priv *wil, int *channel) 906 { 907 int rc; 908 struct { 909 struct wil6210_mbox_hdr_wmi wmi; 910 struct wmi_set_pcp_channel_cmd cmd; 911 } __packed reply; 912 913 rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0, 914 WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20); 915 if (rc) 916 return rc; 917 918 if (reply.cmd.channel > 3) 919 return -EINVAL; 920 921 *channel = reply.cmd.channel + 1; 922 923 return 0; 924 } 925 926 int wmi_p2p_cfg(struct wil6210_priv *wil, int channel) 927 { 928 struct wmi_p2p_cfg_cmd cmd = { 929 .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD, 930 .channel = channel - 1, 931 }; 932 933 return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd)); 934 } 935 936 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, 937 const void *mac_addr) 938 { 939 struct wmi_delete_cipher_key_cmd cmd = { 940 .key_index = key_index, 941 }; 942 943 if (mac_addr) 944 memcpy(cmd.mac, mac_addr, WMI_MAC_LEN); 945 946 return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd)); 947 } 948 949 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, 950 const void *mac_addr, int key_len, const void *key) 951 { 952 struct wmi_add_cipher_key_cmd cmd = { 953 .key_index = key_index, 954 .key_usage = WMI_KEY_USE_PAIRWISE, 955 .key_len = key_len, 956 }; 957 958 if (!key || (key_len > sizeof(cmd.key))) 959 return -EINVAL; 960 961 memcpy(cmd.key, key, key_len); 962 if (mac_addr) 963 memcpy(cmd.mac, mac_addr, WMI_MAC_LEN); 964 965 return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd)); 966 } 967 968 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) 969 { 970 int rc; 971 u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; 972 struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); 973 974 if (!cmd) 975 return -ENOMEM; 976 if (!ie) 977 ie_len = 0; 978 979 cmd->mgmt_frm_type = type; 980 /* BUG: FW API define ieLen as u8. Will fix FW */ 981 cmd->ie_len = cpu_to_le16(ie_len); 982 memcpy(cmd->ie_info, ie, ie_len); 983 rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len); 984 kfree(cmd); 985 986 return rc; 987 } 988 989 /** 990 * wmi_rxon - turn radio on/off 991 * @on: turn on if true, off otherwise 992 * 993 * Only switch radio. Channel should be set separately. 994 * No timeout for rxon - radio turned on forever unless some other call 995 * turns it off 996 */ 997 int wmi_rxon(struct wil6210_priv *wil, bool on) 998 { 999 int rc; 1000 struct { 1001 struct wil6210_mbox_hdr_wmi wmi; 1002 struct wmi_listen_started_event evt; 1003 } __packed reply; 1004 1005 wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off"); 1006 1007 if (on) { 1008 rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0, 1009 WMI_LISTEN_STARTED_EVENTID, 1010 &reply, sizeof(reply), 100); 1011 if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS)) 1012 rc = -EINVAL; 1013 } else { 1014 rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0, 1015 WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 20); 1016 } 1017 1018 return rc; 1019 } 1020 1021 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) 1022 { 1023 struct wireless_dev *wdev = wil->wdev; 1024 struct net_device *ndev = wil_to_ndev(wil); 1025 struct wmi_cfg_rx_chain_cmd cmd = { 1026 .action = WMI_RX_CHAIN_ADD, 1027 .rx_sw_ring = { 1028 .max_mpdu_size = cpu_to_le16(mtu_max + ETH_HLEN), 1029 .ring_mem_base = cpu_to_le64(vring->pa), 1030 .ring_size = cpu_to_le16(vring->size), 1031 }, 1032 .mid = 0, /* TODO - what is it? */ 1033 .decap_trans_type = WMI_DECAP_TYPE_802_3, 1034 .reorder_type = WMI_RX_SW_REORDER, 1035 }; 1036 struct { 1037 struct wil6210_mbox_hdr_wmi wmi; 1038 struct wmi_cfg_rx_chain_done_event evt; 1039 } __packed evt; 1040 int rc; 1041 1042 if (wdev->iftype == NL80211_IFTYPE_MONITOR) { 1043 struct ieee80211_channel *ch = wdev->preset_chandef.chan; 1044 1045 cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON); 1046 if (ch) 1047 cmd.sniffer_cfg.channel = ch->hw_value - 1; 1048 cmd.sniffer_cfg.phy_info_mode = 1049 cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP); 1050 cmd.sniffer_cfg.phy_support = 1051 cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) 1052 ? WMI_SNIFFER_CP : WMI_SNIFFER_DP); 1053 } else { 1054 /* Initialize offload (in non-sniffer mode). 1055 * Linux IP stack always calculates IP checksum 1056 * HW always calculate TCP/UDP checksum 1057 */ 1058 cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS); 1059 } 1060 /* typical time for secure PCP is 840ms */ 1061 rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), 1062 WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000); 1063 if (rc) 1064 return rc; 1065 1066 vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr); 1067 1068 wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n", 1069 le32_to_cpu(evt.evt.status), vring->hwtail); 1070 1071 if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) 1072 rc = -EINVAL; 1073 1074 return rc; 1075 } 1076 1077 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r) 1078 { 1079 int rc; 1080 struct wmi_temp_sense_cmd cmd = { 1081 .measure_marlon_m_en = cpu_to_le32(!!t_m), 1082 .measure_marlon_r_en = cpu_to_le32(!!t_r), 1083 }; 1084 struct { 1085 struct wil6210_mbox_hdr_wmi wmi; 1086 struct wmi_temp_sense_done_event evt; 1087 } __packed reply; 1088 1089 rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, &cmd, sizeof(cmd), 1090 WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100); 1091 if (rc) 1092 return rc; 1093 1094 if (t_m) 1095 *t_m = le32_to_cpu(reply.evt.marlon_m_t1000); 1096 if (t_r) 1097 *t_r = le32_to_cpu(reply.evt.marlon_r_t1000); 1098 1099 return 0; 1100 } 1101 1102 int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason) 1103 { 1104 struct wmi_disconnect_sta_cmd cmd = { 1105 .disconnect_reason = cpu_to_le16(reason), 1106 }; 1107 memcpy(cmd.dst_mac, mac, ETH_ALEN); 1108 1109 wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason); 1110 1111 return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd)); 1112 } 1113 1114 void wmi_event_flush(struct wil6210_priv *wil) 1115 { 1116 struct pending_wmi_event *evt, *t; 1117 1118 wil_dbg_wmi(wil, "%s()\n", __func__); 1119 1120 list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) { 1121 list_del(&evt->list); 1122 kfree(evt); 1123 } 1124 } 1125 1126 static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id, 1127 void *d, int len) 1128 { 1129 uint i; 1130 1131 for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) { 1132 if (wmi_evt_handlers[i].eventid == id) { 1133 wmi_evt_handlers[i].handler(wil, id, d, len); 1134 return true; 1135 } 1136 } 1137 1138 return false; 1139 } 1140 1141 static void wmi_event_handle(struct wil6210_priv *wil, 1142 struct wil6210_mbox_hdr *hdr) 1143 { 1144 u16 len = le16_to_cpu(hdr->len); 1145 1146 if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) && 1147 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 1148 struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]); 1149 void *evt_data = (void *)(&wmi[1]); 1150 u16 id = le16_to_cpu(wmi->id); 1151 1152 wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n", 1153 id, wil->reply_id); 1154 /* check if someone waits for this event */ 1155 if (wil->reply_id && wil->reply_id == id) { 1156 if (wil->reply_buf) { 1157 memcpy(wil->reply_buf, wmi, 1158 min(len, wil->reply_size)); 1159 } else { 1160 wmi_evt_call_handler(wil, id, evt_data, 1161 len - sizeof(*wmi)); 1162 } 1163 wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id); 1164 complete(&wil->wmi_call); 1165 return; 1166 } 1167 /* unsolicited event */ 1168 /* search for handler */ 1169 if (!wmi_evt_call_handler(wil, id, evt_data, 1170 len - sizeof(*wmi))) { 1171 wil_err(wil, "Unhandled event 0x%04x\n", id); 1172 } 1173 } else { 1174 wil_err(wil, "Unknown event type\n"); 1175 print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1, 1176 hdr, sizeof(*hdr) + len, true); 1177 } 1178 } 1179 1180 /* 1181 * Retrieve next WMI event from the pending list 1182 */ 1183 static struct list_head *next_wmi_ev(struct wil6210_priv *wil) 1184 { 1185 ulong flags; 1186 struct list_head *ret = NULL; 1187 1188 spin_lock_irqsave(&wil->wmi_ev_lock, flags); 1189 1190 if (!list_empty(&wil->pending_wmi_ev)) { 1191 ret = wil->pending_wmi_ev.next; 1192 list_del(ret); 1193 } 1194 1195 spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); 1196 1197 return ret; 1198 } 1199 1200 /* 1201 * Handler for the WMI events 1202 */ 1203 void wmi_event_worker(struct work_struct *work) 1204 { 1205 struct wil6210_priv *wil = container_of(work, struct wil6210_priv, 1206 wmi_event_worker); 1207 struct pending_wmi_event *evt; 1208 struct list_head *lh; 1209 1210 wil_dbg_wmi(wil, "Start %s\n", __func__); 1211 while ((lh = next_wmi_ev(wil)) != NULL) { 1212 evt = list_entry(lh, struct pending_wmi_event, list); 1213 wmi_event_handle(wil, &evt->event.hdr); 1214 kfree(evt); 1215 } 1216 wil_dbg_wmi(wil, "Finished %s\n", __func__); 1217 } 1218