1 /* 2 * Copyright (c) 2012 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/pci.h> 18 #include <linux/io.h> 19 #include <linux/list.h> 20 #include <linux/etherdevice.h> 21 #include <linux/if_arp.h> 22 23 #include "wil6210.h" 24 #include "txrx.h" 25 #include "wmi.h" 26 27 /** 28 * WMI event receiving - theory of operations 29 * 30 * When firmware about to report WMI event, it fills memory area 31 * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for 32 * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler. 33 * 34 * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the 35 * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up 36 * and handles events within the @wmi_event_worker. Every event get detached 37 * from list, processed and deleted. 38 * 39 * Purpose for this mechanism is to release IRQ thread; otherwise, 40 * if WMI event handling involves another WMI command flow, this 2-nd flow 41 * won't be completed because of blocked IRQ thread. 42 */ 43 44 /** 45 * Addressing - theory of operations 46 * 47 * There are several buses present on the WIL6210 card. 48 * Same memory areas are visible at different address on 49 * the different busses. There are 3 main bus masters: 50 * - MAC CPU (ucode) 51 * - User CPU (firmware) 52 * - AHB (host) 53 * 54 * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing 55 * AHB addresses starting from 0x880000 56 * 57 * Internally, firmware uses addresses that allows faster access but 58 * are invisible from the host. To read from these addresses, alternative 59 * AHB address must be used. 60 * 61 * Memory mapping 62 * Linker address PCI/Host address 63 * 0x880000 .. 0xa80000 2Mb BAR0 64 * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM 65 * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH 66 */ 67 68 /** 69 * @fw_mapping provides memory remapping table 70 */ 71 static const struct { 72 u32 from; /* linker address - from, inclusive */ 73 u32 to; /* linker address - to, exclusive */ 74 u32 host; /* PCI/Host address - BAR0 + 0x880000 */ 75 } fw_mapping[] = { 76 {0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */ 77 {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */ 78 {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */ 79 {0x880000, 0x88a000, 0x880000}, /* various RGF */ 80 {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */ 81 /* 82 * 920000..930000 ucode code RAM 83 * 930000..932000 ucode data RAM 84 */ 85 }; 86 87 /** 88 * return AHB address for given firmware/ucode internal (linker) address 89 * @x - internal address 90 * If address have no valid AHB mapping, return 0 91 */ 92 static u32 wmi_addr_remap(u32 x) 93 { 94 uint i; 95 96 for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { 97 if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to)) 98 return x + fw_mapping[i].host - fw_mapping[i].from; 99 } 100 101 return 0; 102 } 103 104 /** 105 * Check address validity for WMI buffer; remap if needed 106 * @ptr - internal (linker) fw/ucode address 107 * 108 * Valid buffer should be DWORD aligned 109 * 110 * return address for accessing buffer from the host; 111 * if buffer is not valid, return NULL. 112 */ 113 void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) 114 { 115 u32 off; 116 u32 ptr = le32_to_cpu(ptr_); 117 118 if (ptr % 4) 119 return NULL; 120 121 ptr = wmi_addr_remap(ptr); 122 if (ptr < WIL6210_FW_HOST_OFF) 123 return NULL; 124 125 off = HOSTADDR(ptr); 126 if (off > WIL6210_MEM_SIZE - 4) 127 return NULL; 128 129 return wil->csr + off; 130 } 131 132 /** 133 * Check address validity 134 */ 135 void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr) 136 { 137 u32 off; 138 139 if (ptr % 4) 140 return NULL; 141 142 if (ptr < WIL6210_FW_HOST_OFF) 143 return NULL; 144 145 off = HOSTADDR(ptr); 146 if (off > WIL6210_MEM_SIZE - 4) 147 return NULL; 148 149 return wil->csr + off; 150 } 151 152 int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, 153 struct wil6210_mbox_hdr *hdr) 154 { 155 void __iomem *src = wmi_buffer(wil, ptr); 156 if (!src) 157 return -EINVAL; 158 159 wil_memcpy_fromio_32(hdr, src, sizeof(*hdr)); 160 161 return 0; 162 } 163 164 static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) 165 { 166 struct { 167 struct wil6210_mbox_hdr hdr; 168 struct wil6210_mbox_hdr_wmi wmi; 169 } __packed cmd = { 170 .hdr = { 171 .type = WIL_MBOX_HDR_TYPE_WMI, 172 .flags = 0, 173 .len = cpu_to_le16(sizeof(cmd.wmi) + len), 174 }, 175 .wmi = { 176 .id = cpu_to_le16(cmdid), 177 .info1 = 0, 178 }, 179 }; 180 struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx; 181 struct wil6210_mbox_ring_desc d_head; 182 u32 next_head; 183 void __iomem *dst; 184 void __iomem *head = wmi_addr(wil, r->head); 185 uint retry; 186 187 if (sizeof(cmd) + len > r->entry_size) { 188 wil_err(wil, "WMI size too large: %d bytes, max is %d\n", 189 (int)(sizeof(cmd) + len), r->entry_size); 190 return -ERANGE; 191 } 192 193 might_sleep(); 194 195 if (!test_bit(wil_status_fwready, &wil->status)) { 196 wil_err(wil, "FW not ready\n"); 197 return -EAGAIN; 198 } 199 200 if (!head) { 201 wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head); 202 return -EINVAL; 203 } 204 /* read Tx head till it is not busy */ 205 for (retry = 5; retry > 0; retry--) { 206 wil_memcpy_fromio_32(&d_head, head, sizeof(d_head)); 207 if (d_head.sync == 0) 208 break; 209 msleep(20); 210 } 211 if (d_head.sync != 0) { 212 wil_err(wil, "WMI head busy\n"); 213 return -EBUSY; 214 } 215 /* next head */ 216 next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size); 217 wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); 218 /* wait till FW finish with previous command */ 219 for (retry = 5; retry > 0; retry--) { 220 r->tail = ioread32(wil->csr + HOST_MBOX + 221 offsetof(struct wil6210_mbox_ctl, tx.tail)); 222 if (next_head != r->tail) 223 break; 224 msleep(20); 225 } 226 if (next_head == r->tail) { 227 wil_err(wil, "WMI ring full\n"); 228 return -EBUSY; 229 } 230 dst = wmi_buffer(wil, d_head.addr); 231 if (!dst) { 232 wil_err(wil, "invalid WMI buffer: 0x%08x\n", 233 le32_to_cpu(d_head.addr)); 234 return -EINVAL; 235 } 236 cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq); 237 /* set command */ 238 wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len); 239 wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd, 240 sizeof(cmd), true); 241 wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf, 242 len, true); 243 wil_memcpy_toio_32(dst, &cmd, sizeof(cmd)); 244 wil_memcpy_toio_32(dst + sizeof(cmd), buf, len); 245 /* mark entry as full */ 246 iowrite32(1, wil->csr + HOSTADDR(r->head) + 247 offsetof(struct wil6210_mbox_ring_desc, sync)); 248 /* advance next ptr */ 249 iowrite32(r->head = next_head, wil->csr + HOST_MBOX + 250 offsetof(struct wil6210_mbox_ctl, tx.head)); 251 252 /* interrupt to FW */ 253 iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); 254 255 return 0; 256 } 257 258 int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) 259 { 260 int rc; 261 262 mutex_lock(&wil->wmi_mutex); 263 rc = __wmi_send(wil, cmdid, buf, len); 264 mutex_unlock(&wil->wmi_mutex); 265 266 return rc; 267 } 268 269 /*=== Event handlers ===*/ 270 static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) 271 { 272 struct net_device *ndev = wil_to_ndev(wil); 273 struct wireless_dev *wdev = wil->wdev; 274 struct wmi_ready_event *evt = d; 275 u32 ver = le32_to_cpu(evt->sw_version); 276 277 wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac); 278 279 if (!is_valid_ether_addr(ndev->dev_addr)) { 280 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN); 281 memcpy(ndev->perm_addr, evt->mac, ETH_ALEN); 282 } 283 snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version), 284 "%d", ver); 285 } 286 287 static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d, 288 int len) 289 { 290 wil_dbg_wmi(wil, "WMI: FW ready\n"); 291 292 set_bit(wil_status_fwready, &wil->status); 293 /* reuse wmi_ready for the firmware ready indication */ 294 complete(&wil->wmi_ready); 295 } 296 297 static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) 298 { 299 struct wmi_rx_mgmt_packet_event *data = d; 300 struct wiphy *wiphy = wil_to_wiphy(wil); 301 struct ieee80211_mgmt *rx_mgmt_frame = 302 (struct ieee80211_mgmt *)data->payload; 303 int ch_no = data->info.channel+1; 304 u32 freq = ieee80211_channel_to_frequency(ch_no, 305 IEEE80211_BAND_60GHZ); 306 struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); 307 /* TODO convert LE to CPU */ 308 s32 signal = 0; /* TODO */ 309 __le16 fc = rx_mgmt_frame->frame_control; 310 u32 d_len = le32_to_cpu(data->info.len); 311 u16 d_status = le16_to_cpu(data->info.status); 312 313 wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n", 314 data->info.channel, data->info.mcs, data->info.snr); 315 wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len, 316 le16_to_cpu(data->info.stype)); 317 wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", 318 data->info.qid, data->info.mid, data->info.cid); 319 320 if (!channel) { 321 wil_err(wil, "Frame on unsupported channel\n"); 322 return; 323 } 324 325 if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) { 326 struct cfg80211_bss *bss; 327 u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp); 328 u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info); 329 u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int); 330 const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable; 331 size_t ie_len = d_len - offsetof(struct ieee80211_mgmt, 332 u.beacon.variable); 333 wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap); 334 335 bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid, 336 tsf, cap, bi, ie_buf, ie_len, 337 signal, GFP_KERNEL); 338 if (bss) { 339 wil_dbg_wmi(wil, "Added BSS %pM\n", 340 rx_mgmt_frame->bssid); 341 cfg80211_put_bss(wiphy, bss); 342 } else { 343 wil_err(wil, "cfg80211_inform_bss() failed\n"); 344 } 345 } 346 } 347 348 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, 349 void *d, int len) 350 { 351 if (wil->scan_request) { 352 struct wmi_scan_complete_event *data = d; 353 bool aborted = (data->status != 0); 354 355 wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); 356 cfg80211_scan_done(wil->scan_request, aborted); 357 wil->scan_request = NULL; 358 } else { 359 wil_err(wil, "SCAN_COMPLETE while not scanning\n"); 360 } 361 } 362 363 static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) 364 { 365 struct net_device *ndev = wil_to_ndev(wil); 366 struct wireless_dev *wdev = wil->wdev; 367 struct wmi_connect_event *evt = d; 368 int ch; /* channel number */ 369 struct station_info sinfo; 370 u8 *assoc_req_ie, *assoc_resp_ie; 371 size_t assoc_req_ielen, assoc_resp_ielen; 372 /* capinfo(u16) + listen_interval(u16) + IEs */ 373 const size_t assoc_req_ie_offset = sizeof(u16) * 2; 374 /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */ 375 const size_t assoc_resp_ie_offset = sizeof(u16) * 3; 376 377 if (len < sizeof(*evt)) { 378 wil_err(wil, "Connect event too short : %d bytes\n", len); 379 return; 380 } 381 if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len + 382 evt->assoc_resp_len) { 383 wil_err(wil, 384 "Connect event corrupted : %d != %d + %d + %d + %d\n", 385 len, (int)sizeof(*evt), evt->beacon_ie_len, 386 evt->assoc_req_len, evt->assoc_resp_len); 387 return; 388 } 389 ch = evt->channel + 1; 390 wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n", 391 evt->bssid, ch, evt->cid); 392 wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, 393 evt->assoc_info, len - sizeof(*evt), true); 394 395 /* figure out IE's */ 396 assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len + 397 assoc_req_ie_offset]; 398 assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset; 399 if (evt->assoc_req_len <= assoc_req_ie_offset) { 400 assoc_req_ie = NULL; 401 assoc_req_ielen = 0; 402 } 403 404 assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len + 405 evt->assoc_req_len + 406 assoc_resp_ie_offset]; 407 assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset; 408 if (evt->assoc_resp_len <= assoc_resp_ie_offset) { 409 assoc_resp_ie = NULL; 410 assoc_resp_ielen = 0; 411 } 412 413 if ((wdev->iftype == NL80211_IFTYPE_STATION) || 414 (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { 415 if (wdev->sme_state != CFG80211_SME_CONNECTING) { 416 wil_err(wil, "Not in connecting state\n"); 417 return; 418 } 419 del_timer_sync(&wil->connect_timer); 420 cfg80211_connect_result(ndev, evt->bssid, 421 assoc_req_ie, assoc_req_ielen, 422 assoc_resp_ie, assoc_resp_ielen, 423 WLAN_STATUS_SUCCESS, GFP_KERNEL); 424 425 } else if ((wdev->iftype == NL80211_IFTYPE_AP) || 426 (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { 427 memset(&sinfo, 0, sizeof(sinfo)); 428 429 sinfo.generation = wil->sinfo_gen++; 430 431 if (assoc_req_ie) { 432 sinfo.assoc_req_ies = assoc_req_ie; 433 sinfo.assoc_req_ies_len = assoc_req_ielen; 434 sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; 435 } 436 437 cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); 438 } 439 set_bit(wil_status_fwconnected, &wil->status); 440 441 /* FIXME FW can transmit only ucast frames to peer */ 442 /* FIXME real ring_id instead of hard coded 0 */ 443 memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN); 444 445 wil->pending_connect_cid = evt->cid; 446 queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker); 447 } 448 449 static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, 450 void *d, int len) 451 { 452 struct wmi_disconnect_event *evt = d; 453 454 wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n", 455 evt->bssid, 456 evt->protocol_reason_status, evt->disconnect_reason); 457 458 wil->sinfo_gen++; 459 460 wil6210_disconnect(wil, evt->bssid); 461 } 462 463 static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len) 464 { 465 struct wmi_notify_req_done_event *evt = d; 466 467 if (len < sizeof(*evt)) { 468 wil_err(wil, "Short NOTIFY event\n"); 469 return; 470 } 471 472 wil->stats.tsf = le64_to_cpu(evt->tsf); 473 wil->stats.snr = le32_to_cpu(evt->snr_val); 474 wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs); 475 wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector); 476 wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector); 477 wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector); 478 wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector); 479 wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n" 480 "BF status 0x%08x SNR 0x%08x\n" 481 "Tx Tpt %d goodput %d Rx goodput %d\n" 482 "Sectors(rx:tx) my %d:%d peer %d:%d\n", 483 wil->stats.bf_mcs, wil->stats.tsf, evt->status, 484 wil->stats.snr, le32_to_cpu(evt->tx_tpt), 485 le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput), 486 wil->stats.my_rx_sector, wil->stats.my_tx_sector, 487 wil->stats.peer_rx_sector, wil->stats.peer_tx_sector); 488 } 489 490 /* 491 * Firmware reports EAPOL frame using WME event. 492 * Reconstruct Ethernet frame and deliver it via normal Rx 493 */ 494 static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id, 495 void *d, int len) 496 { 497 struct net_device *ndev = wil_to_ndev(wil); 498 struct wmi_eapol_rx_event *evt = d; 499 u16 eapol_len = le16_to_cpu(evt->eapol_len); 500 int sz = eapol_len + ETH_HLEN; 501 struct sk_buff *skb; 502 struct ethhdr *eth; 503 504 wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len, 505 evt->src_mac); 506 507 if (eapol_len > 196) { /* TODO: revisit size limit */ 508 wil_err(wil, "EAPOL too large\n"); 509 return; 510 } 511 512 skb = alloc_skb(sz, GFP_KERNEL); 513 if (!skb) { 514 wil_err(wil, "Failed to allocate skb\n"); 515 return; 516 } 517 eth = (struct ethhdr *)skb_put(skb, ETH_HLEN); 518 memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN); 519 memcpy(eth->h_source, evt->src_mac, ETH_ALEN); 520 eth->h_proto = cpu_to_be16(ETH_P_PAE); 521 memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len); 522 skb->protocol = eth_type_trans(skb, ndev); 523 if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) { 524 ndev->stats.rx_packets++; 525 ndev->stats.rx_bytes += skb->len; 526 } else { 527 ndev->stats.rx_dropped++; 528 } 529 } 530 531 static const struct { 532 int eventid; 533 void (*handler)(struct wil6210_priv *wil, int eventid, 534 void *data, int data_len); 535 } wmi_evt_handlers[] = { 536 {WMI_READY_EVENTID, wmi_evt_ready}, 537 {WMI_FW_READY_EVENTID, wmi_evt_fw_ready}, 538 {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt}, 539 {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete}, 540 {WMI_CONNECT_EVENTID, wmi_evt_connect}, 541 {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, 542 {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify}, 543 {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx}, 544 }; 545 546 /* 547 * Run in IRQ context 548 * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev 549 * that will be eventually handled by the @wmi_event_worker in the thread 550 * context of thread "wil6210_wmi" 551 */ 552 void wmi_recv_cmd(struct wil6210_priv *wil) 553 { 554 struct wil6210_mbox_ring_desc d_tail; 555 struct wil6210_mbox_hdr hdr; 556 struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx; 557 struct pending_wmi_event *evt; 558 u8 *cmd; 559 void __iomem *src; 560 ulong flags; 561 562 for (;;) { 563 u16 len; 564 565 r->head = ioread32(wil->csr + HOST_MBOX + 566 offsetof(struct wil6210_mbox_ctl, rx.head)); 567 if (r->tail == r->head) 568 return; 569 570 /* read cmd from tail */ 571 wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail), 572 sizeof(struct wil6210_mbox_ring_desc)); 573 if (d_tail.sync == 0) { 574 wil_err(wil, "Mbox evt not owned by FW?\n"); 575 return; 576 } 577 578 if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) { 579 wil_err(wil, "Mbox evt at 0x%08x?\n", 580 le32_to_cpu(d_tail.addr)); 581 return; 582 } 583 584 len = le16_to_cpu(hdr.len); 585 src = wmi_buffer(wil, d_tail.addr) + 586 sizeof(struct wil6210_mbox_hdr); 587 evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event, 588 event.wmi) + len, 4), 589 GFP_KERNEL); 590 if (!evt) 591 return; 592 593 evt->event.hdr = hdr; 594 cmd = (void *)&evt->event.wmi; 595 wil_memcpy_fromio_32(cmd, src, len); 596 /* mark entry as empty */ 597 iowrite32(0, wil->csr + HOSTADDR(r->tail) + 598 offsetof(struct wil6210_mbox_ring_desc, sync)); 599 /* indicate */ 600 wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n", 601 le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type), 602 hdr.flags); 603 if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && 604 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 605 wil_dbg_wmi(wil, "WMI event 0x%04x\n", 606 evt->event.wmi.id); 607 } 608 wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1, 609 &evt->event.hdr, sizeof(hdr) + len, true); 610 611 /* advance tail */ 612 r->tail = r->base + ((r->tail - r->base + 613 sizeof(struct wil6210_mbox_ring_desc)) % r->size); 614 iowrite32(r->tail, wil->csr + HOST_MBOX + 615 offsetof(struct wil6210_mbox_ctl, rx.tail)); 616 617 /* add to the pending list */ 618 spin_lock_irqsave(&wil->wmi_ev_lock, flags); 619 list_add_tail(&evt->list, &wil->pending_wmi_ev); 620 spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); 621 { 622 int q = queue_work(wil->wmi_wq, 623 &wil->wmi_event_worker); 624 wil_dbg_wmi(wil, "queue_work -> %d\n", q); 625 } 626 } 627 } 628 629 int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, 630 u16 reply_id, void *reply, u8 reply_size, int to_msec) 631 { 632 int rc; 633 int remain; 634 635 mutex_lock(&wil->wmi_mutex); 636 637 rc = __wmi_send(wil, cmdid, buf, len); 638 if (rc) 639 goto out; 640 641 wil->reply_id = reply_id; 642 wil->reply_buf = reply; 643 wil->reply_size = reply_size; 644 remain = wait_for_completion_timeout(&wil->wmi_ready, 645 msecs_to_jiffies(to_msec)); 646 if (0 == remain) { 647 wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n", 648 cmdid, reply_id, to_msec); 649 rc = -ETIME; 650 } else { 651 wil_dbg_wmi(wil, 652 "wmi_call(0x%04x->0x%04x) completed in %d msec\n", 653 cmdid, reply_id, 654 to_msec - jiffies_to_msecs(remain)); 655 } 656 wil->reply_id = 0; 657 wil->reply_buf = NULL; 658 wil->reply_size = 0; 659 out: 660 mutex_unlock(&wil->wmi_mutex); 661 662 return rc; 663 } 664 665 int wmi_echo(struct wil6210_priv *wil) 666 { 667 struct wmi_echo_cmd cmd = { 668 .value = cpu_to_le32(0x12345678), 669 }; 670 671 return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd), 672 WMI_ECHO_RSP_EVENTID, NULL, 0, 20); 673 } 674 675 int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) 676 { 677 struct wmi_set_mac_address_cmd cmd; 678 679 memcpy(cmd.mac, addr, ETH_ALEN); 680 681 wil_dbg_wmi(wil, "Set MAC %pM\n", addr); 682 683 return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd)); 684 } 685 686 int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype) 687 { 688 struct wmi_bcon_ctrl_cmd cmd = { 689 .bcon_interval = cpu_to_le16(bi), 690 .network_type = wmi_nettype, 691 .disable_sec_offload = 1, 692 }; 693 694 if (!wil->secure_pcp) 695 cmd.disable_sec = 1; 696 697 return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd)); 698 } 699 700 int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid) 701 { 702 struct wmi_set_ssid_cmd cmd = { 703 .ssid_len = cpu_to_le32(ssid_len), 704 }; 705 706 if (ssid_len > sizeof(cmd.ssid)) 707 return -EINVAL; 708 709 memcpy(cmd.ssid, ssid, ssid_len); 710 711 return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd)); 712 } 713 714 int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid) 715 { 716 int rc; 717 struct { 718 struct wil6210_mbox_hdr_wmi wmi; 719 struct wmi_set_ssid_cmd cmd; 720 } __packed reply; 721 int len; /* reply.cmd.ssid_len in CPU order */ 722 723 rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID, 724 &reply, sizeof(reply), 20); 725 if (rc) 726 return rc; 727 728 len = le32_to_cpu(reply.cmd.ssid_len); 729 if (len > sizeof(reply.cmd.ssid)) 730 return -EINVAL; 731 732 *ssid_len = len; 733 memcpy(ssid, reply.cmd.ssid, len); 734 735 return 0; 736 } 737 738 int wmi_set_channel(struct wil6210_priv *wil, int channel) 739 { 740 struct wmi_set_pcp_channel_cmd cmd = { 741 .channel = channel - 1, 742 }; 743 744 return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd)); 745 } 746 747 int wmi_get_channel(struct wil6210_priv *wil, int *channel) 748 { 749 int rc; 750 struct { 751 struct wil6210_mbox_hdr_wmi wmi; 752 struct wmi_set_pcp_channel_cmd cmd; 753 } __packed reply; 754 755 rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0, 756 WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20); 757 if (rc) 758 return rc; 759 760 if (reply.cmd.channel > 3) 761 return -EINVAL; 762 763 *channel = reply.cmd.channel + 1; 764 765 return 0; 766 } 767 768 int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb) 769 { 770 struct wmi_eapol_tx_cmd *cmd; 771 struct ethhdr *eth; 772 u16 eapol_len = skb->len - ETH_HLEN; 773 void *eapol = skb->data + ETH_HLEN; 774 uint i; 775 int rc; 776 777 skb_set_mac_header(skb, 0); 778 eth = eth_hdr(skb); 779 wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest); 780 for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { 781 if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0) 782 goto found_dest; 783 } 784 785 return -EINVAL; 786 787 found_dest: 788 /* find out eapol data & len */ 789 cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL); 790 if (!cmd) 791 return -EINVAL; 792 793 memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN); 794 cmd->eapol_len = cpu_to_le16(eapol_len); 795 memcpy(cmd->eapol, eapol, eapol_len); 796 rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len); 797 kfree(cmd); 798 799 return rc; 800 } 801 802 int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, 803 const void *mac_addr) 804 { 805 struct wmi_delete_cipher_key_cmd cmd = { 806 .key_index = key_index, 807 }; 808 809 if (mac_addr) 810 memcpy(cmd.mac, mac_addr, WMI_MAC_LEN); 811 812 return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd)); 813 } 814 815 int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, 816 const void *mac_addr, int key_len, const void *key) 817 { 818 struct wmi_add_cipher_key_cmd cmd = { 819 .key_index = key_index, 820 .key_usage = WMI_KEY_USE_PAIRWISE, 821 .key_len = key_len, 822 }; 823 824 if (!key || (key_len > sizeof(cmd.key))) 825 return -EINVAL; 826 827 memcpy(cmd.key, key, key_len); 828 if (mac_addr) 829 memcpy(cmd.mac, mac_addr, WMI_MAC_LEN); 830 831 return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd)); 832 } 833 834 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) 835 { 836 int rc; 837 u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; 838 struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); 839 if (!cmd) 840 return -ENOMEM; 841 842 cmd->mgmt_frm_type = type; 843 /* BUG: FW API define ieLen as u8. Will fix FW */ 844 cmd->ie_len = cpu_to_le16(ie_len); 845 memcpy(cmd->ie_info, ie, ie_len); 846 rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len); 847 kfree(cmd); 848 849 return rc; 850 } 851 852 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) 853 { 854 struct wireless_dev *wdev = wil->wdev; 855 struct net_device *ndev = wil_to_ndev(wil); 856 struct wmi_cfg_rx_chain_cmd cmd = { 857 .action = WMI_RX_CHAIN_ADD, 858 .rx_sw_ring = { 859 .max_mpdu_size = cpu_to_le16(RX_BUF_LEN), 860 .ring_mem_base = cpu_to_le64(vring->pa), 861 .ring_size = cpu_to_le16(vring->size), 862 }, 863 .mid = 0, /* TODO - what is it? */ 864 .decap_trans_type = WMI_DECAP_TYPE_802_3, 865 }; 866 struct { 867 struct wil6210_mbox_hdr_wmi wmi; 868 struct wmi_cfg_rx_chain_done_event evt; 869 } __packed evt; 870 int rc; 871 872 if (wdev->iftype == NL80211_IFTYPE_MONITOR) { 873 struct ieee80211_channel *ch = wdev->preset_chandef.chan; 874 875 cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON); 876 if (ch) 877 cmd.sniffer_cfg.channel = ch->hw_value - 1; 878 cmd.sniffer_cfg.phy_info_mode = 879 cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP); 880 cmd.sniffer_cfg.phy_support = 881 cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) 882 ? WMI_SNIFFER_CP : WMI_SNIFFER_DP); 883 } 884 /* typical time for secure PCP is 840ms */ 885 rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), 886 WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000); 887 if (rc) 888 return rc; 889 890 vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr); 891 892 wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n", 893 le32_to_cpu(evt.evt.status), vring->hwtail); 894 895 if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) 896 rc = -EINVAL; 897 898 return rc; 899 } 900 901 void wmi_event_flush(struct wil6210_priv *wil) 902 { 903 struct pending_wmi_event *evt, *t; 904 905 wil_dbg_wmi(wil, "%s()\n", __func__); 906 907 list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) { 908 list_del(&evt->list); 909 kfree(evt); 910 } 911 } 912 913 static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id, 914 void *d, int len) 915 { 916 uint i; 917 918 for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) { 919 if (wmi_evt_handlers[i].eventid == id) { 920 wmi_evt_handlers[i].handler(wil, id, d, len); 921 return true; 922 } 923 } 924 925 return false; 926 } 927 928 static void wmi_event_handle(struct wil6210_priv *wil, 929 struct wil6210_mbox_hdr *hdr) 930 { 931 u16 len = le16_to_cpu(hdr->len); 932 933 if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) && 934 (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { 935 struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]); 936 void *evt_data = (void *)(&wmi[1]); 937 u16 id = le16_to_cpu(wmi->id); 938 /* check if someone waits for this event */ 939 if (wil->reply_id && wil->reply_id == id) { 940 if (wil->reply_buf) { 941 memcpy(wil->reply_buf, wmi, 942 min(len, wil->reply_size)); 943 } else { 944 wmi_evt_call_handler(wil, id, evt_data, 945 len - sizeof(*wmi)); 946 } 947 wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id); 948 complete(&wil->wmi_ready); 949 return; 950 } 951 /* unsolicited event */ 952 /* search for handler */ 953 if (!wmi_evt_call_handler(wil, id, evt_data, 954 len - sizeof(*wmi))) { 955 wil_err(wil, "Unhandled event 0x%04x\n", id); 956 } 957 } else { 958 wil_err(wil, "Unknown event type\n"); 959 print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1, 960 hdr, sizeof(*hdr) + len, true); 961 } 962 } 963 964 /* 965 * Retrieve next WMI event from the pending list 966 */ 967 static struct list_head *next_wmi_ev(struct wil6210_priv *wil) 968 { 969 ulong flags; 970 struct list_head *ret = NULL; 971 972 spin_lock_irqsave(&wil->wmi_ev_lock, flags); 973 974 if (!list_empty(&wil->pending_wmi_ev)) { 975 ret = wil->pending_wmi_ev.next; 976 list_del(ret); 977 } 978 979 spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); 980 981 return ret; 982 } 983 984 /* 985 * Handler for the WMI events 986 */ 987 void wmi_event_worker(struct work_struct *work) 988 { 989 struct wil6210_priv *wil = container_of(work, struct wil6210_priv, 990 wmi_event_worker); 991 struct pending_wmi_event *evt; 992 struct list_head *lh; 993 994 while ((lh = next_wmi_ev(wil)) != NULL) { 995 evt = list_entry(lh, struct pending_wmi_event, list); 996 wmi_event_handle(wil, &evt->event.hdr); 997 kfree(evt); 998 } 999 } 1000 1001 void wmi_connect_worker(struct work_struct *work) 1002 { 1003 int rc; 1004 struct wil6210_priv *wil = container_of(work, struct wil6210_priv, 1005 wmi_connect_worker); 1006 1007 if (wil->pending_connect_cid < 0) { 1008 wil_err(wil, "No connection pending\n"); 1009 return; 1010 } 1011 1012 wil_dbg_wmi(wil, "Configure for connection CID %d\n", 1013 wil->pending_connect_cid); 1014 1015 rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, 1016 wil->pending_connect_cid, 0); 1017 wil->pending_connect_cid = -1; 1018 if (rc == 0) 1019 wil_link_on(wil); 1020 } 1021