1 /* 2 * Copyright (c) 2004-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20 #include "core.h" 21 #include "hif-ops.h" 22 #include "cfg80211.h" 23 #include "target.h" 24 #include "debug.h" 25 26 struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr) 27 { 28 struct ath6kl *ar = vif->ar; 29 struct ath6kl_sta *conn = NULL; 30 u8 i, max_conn; 31 32 max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0; 33 34 for (i = 0; i < max_conn; i++) { 35 if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) { 36 conn = &ar->sta_list[i]; 37 break; 38 } 39 } 40 41 return conn; 42 } 43 44 struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid) 45 { 46 struct ath6kl_sta *conn = NULL; 47 u8 ctr; 48 49 for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) { 50 if (ar->sta_list[ctr].aid == aid) { 51 conn = &ar->sta_list[ctr]; 52 break; 53 } 54 } 55 return conn; 56 } 57 58 static void ath6kl_add_new_sta(struct ath6kl_vif *vif, u8 *mac, u16 aid, 59 u8 *wpaie, size_t ielen, u8 keymgmt, 60 u8 ucipher, u8 auth, u8 apsd_info) 61 { 62 struct ath6kl *ar = vif->ar; 63 struct ath6kl_sta *sta; 64 u8 free_slot; 65 66 free_slot = aid - 1; 67 68 sta = &ar->sta_list[free_slot]; 69 memcpy(sta->mac, mac, ETH_ALEN); 70 if (ielen <= ATH6KL_MAX_IE) 71 memcpy(sta->wpa_ie, wpaie, ielen); 72 sta->aid = aid; 73 sta->keymgmt = keymgmt; 74 sta->ucipher = ucipher; 75 sta->auth = auth; 76 sta->apsd_info = apsd_info; 77 78 ar->sta_list_index = ar->sta_list_index | (1 << free_slot); 79 ar->ap_stats.sta[free_slot].aid = cpu_to_le32(aid); 80 aggr_conn_init(vif, vif->aggr_cntxt, sta->aggr_conn); 81 } 82 83 static void ath6kl_sta_cleanup(struct ath6kl *ar, u8 i) 84 { 85 struct ath6kl_sta *sta = &ar->sta_list[i]; 86 struct ath6kl_mgmt_buff *entry, *tmp; 87 88 /* empty the queued pkts in the PS queue if any */ 89 spin_lock_bh(&sta->psq_lock); 90 skb_queue_purge(&sta->psq); 91 skb_queue_purge(&sta->apsdq); 92 93 if (sta->mgmt_psq_len != 0) { 94 list_for_each_entry_safe(entry, tmp, &sta->mgmt_psq, list) { 95 kfree(entry); 96 } 97 INIT_LIST_HEAD(&sta->mgmt_psq); 98 sta->mgmt_psq_len = 0; 99 } 100 101 spin_unlock_bh(&sta->psq_lock); 102 103 memset(&ar->ap_stats.sta[sta->aid - 1], 0, 104 sizeof(struct wmi_per_sta_stat)); 105 memset(sta->mac, 0, ETH_ALEN); 106 memset(sta->wpa_ie, 0, ATH6KL_MAX_IE); 107 sta->aid = 0; 108 sta->sta_flags = 0; 109 110 ar->sta_list_index = ar->sta_list_index & ~(1 << i); 111 aggr_reset_state(sta->aggr_conn); 112 } 113 114 static u8 ath6kl_remove_sta(struct ath6kl *ar, u8 *mac, u16 reason) 115 { 116 u8 i, removed = 0; 117 118 if (is_zero_ether_addr(mac)) 119 return removed; 120 121 if (is_broadcast_ether_addr(mac)) { 122 ath6kl_dbg(ATH6KL_DBG_TRC, "deleting all station\n"); 123 124 for (i = 0; i < AP_MAX_NUM_STA; i++) { 125 if (!is_zero_ether_addr(ar->sta_list[i].mac)) { 126 ath6kl_sta_cleanup(ar, i); 127 removed = 1; 128 } 129 } 130 } else { 131 for (i = 0; i < AP_MAX_NUM_STA; i++) { 132 if (memcmp(ar->sta_list[i].mac, mac, ETH_ALEN) == 0) { 133 ath6kl_dbg(ATH6KL_DBG_TRC, 134 "deleting station %pM aid=%d reason=%d\n", 135 mac, ar->sta_list[i].aid, reason); 136 ath6kl_sta_cleanup(ar, i); 137 removed = 1; 138 break; 139 } 140 } 141 } 142 143 return removed; 144 } 145 146 enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac) 147 { 148 struct ath6kl *ar = devt; 149 return ar->ac2ep_map[ac]; 150 } 151 152 struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar) 153 { 154 struct ath6kl_cookie *cookie; 155 156 cookie = ar->cookie_list; 157 if (cookie != NULL) { 158 ar->cookie_list = cookie->arc_list_next; 159 ar->cookie_count--; 160 } 161 162 return cookie; 163 } 164 165 void ath6kl_cookie_init(struct ath6kl *ar) 166 { 167 u32 i; 168 169 ar->cookie_list = NULL; 170 ar->cookie_count = 0; 171 172 memset(ar->cookie_mem, 0, sizeof(ar->cookie_mem)); 173 174 for (i = 0; i < MAX_COOKIE_NUM; i++) 175 ath6kl_free_cookie(ar, &ar->cookie_mem[i]); 176 } 177 178 void ath6kl_cookie_cleanup(struct ath6kl *ar) 179 { 180 ar->cookie_list = NULL; 181 ar->cookie_count = 0; 182 } 183 184 void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie) 185 { 186 /* Insert first */ 187 188 if (!ar || !cookie) 189 return; 190 191 cookie->arc_list_next = ar->cookie_list; 192 ar->cookie_list = cookie; 193 ar->cookie_count++; 194 } 195 196 /* 197 * Read from the hardware through its diagnostic window. No cooperation 198 * from the firmware is required for this. 199 */ 200 int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value) 201 { 202 int ret; 203 204 ret = ath6kl_hif_diag_read32(ar, address, value); 205 if (ret) { 206 ath6kl_warn("failed to read32 through diagnose window: %d\n", 207 ret); 208 return ret; 209 } 210 211 return 0; 212 } 213 214 /* 215 * Write to the ATH6KL through its diagnostic window. No cooperation from 216 * the Target is required for this. 217 */ 218 int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value) 219 { 220 int ret; 221 222 ret = ath6kl_hif_diag_write32(ar, address, value); 223 224 if (ret) { 225 ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n", 226 address, value); 227 return ret; 228 } 229 230 return 0; 231 } 232 233 int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length) 234 { 235 u32 count, *buf = data; 236 int ret; 237 238 if (WARN_ON(length % 4)) 239 return -EINVAL; 240 241 for (count = 0; count < length / 4; count++, address += 4) { 242 ret = ath6kl_diag_read32(ar, address, &buf[count]); 243 if (ret) 244 return ret; 245 } 246 247 return 0; 248 } 249 250 int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length) 251 { 252 u32 count; 253 __le32 *buf = data; 254 int ret; 255 256 if (WARN_ON(length % 4)) 257 return -EINVAL; 258 259 for (count = 0; count < length / 4; count++, address += 4) { 260 ret = ath6kl_diag_write32(ar, address, buf[count]); 261 if (ret) 262 return ret; 263 } 264 265 return 0; 266 } 267 268 int ath6kl_read_fwlogs(struct ath6kl *ar) 269 { 270 struct ath6kl_dbglog_hdr debug_hdr; 271 struct ath6kl_dbglog_buf debug_buf; 272 u32 address, length, dropped, firstbuf, debug_hdr_addr; 273 int ret, loop; 274 u8 *buf; 275 276 buf = kmalloc(ATH6KL_FWLOG_PAYLOAD_SIZE, GFP_KERNEL); 277 if (!buf) 278 return -ENOMEM; 279 280 address = TARG_VTOP(ar->target_type, 281 ath6kl_get_hi_item_addr(ar, 282 HI_ITEM(hi_dbglog_hdr))); 283 284 ret = ath6kl_diag_read32(ar, address, &debug_hdr_addr); 285 if (ret) 286 goto out; 287 288 /* Get the contents of the ring buffer */ 289 if (debug_hdr_addr == 0) { 290 ath6kl_warn("Invalid address for debug_hdr_addr\n"); 291 ret = -EINVAL; 292 goto out; 293 } 294 295 address = TARG_VTOP(ar->target_type, debug_hdr_addr); 296 ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr)); 297 298 address = TARG_VTOP(ar->target_type, 299 le32_to_cpu(debug_hdr.dbuf_addr)); 300 firstbuf = address; 301 dropped = le32_to_cpu(debug_hdr.dropped); 302 ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf)); 303 304 loop = 100; 305 306 do { 307 address = TARG_VTOP(ar->target_type, 308 le32_to_cpu(debug_buf.buffer_addr)); 309 length = le32_to_cpu(debug_buf.length); 310 311 if (length != 0 && (le32_to_cpu(debug_buf.length) <= 312 le32_to_cpu(debug_buf.bufsize))) { 313 length = ALIGN(length, 4); 314 315 ret = ath6kl_diag_read(ar, address, 316 buf, length); 317 if (ret) 318 goto out; 319 320 ath6kl_debug_fwlog_event(ar, buf, length); 321 } 322 323 address = TARG_VTOP(ar->target_type, 324 le32_to_cpu(debug_buf.next)); 325 ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf)); 326 if (ret) 327 goto out; 328 329 loop--; 330 331 if (WARN_ON(loop == 0)) { 332 ret = -ETIMEDOUT; 333 goto out; 334 } 335 } while (address != firstbuf); 336 337 out: 338 kfree(buf); 339 340 return ret; 341 } 342 343 /* FIXME: move to a better place, target.h? */ 344 #define AR6003_RESET_CONTROL_ADDRESS 0x00004000 345 #define AR6004_RESET_CONTROL_ADDRESS 0x00004000 346 347 void ath6kl_reset_device(struct ath6kl *ar, u32 target_type, 348 bool wait_fot_compltn, bool cold_reset) 349 { 350 int status = 0; 351 u32 address; 352 __le32 data; 353 354 if (target_type != TARGET_TYPE_AR6003 && 355 target_type != TARGET_TYPE_AR6004) 356 return; 357 358 data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) : 359 cpu_to_le32(RESET_CONTROL_MBOX_RST); 360 361 switch (target_type) { 362 case TARGET_TYPE_AR6003: 363 address = AR6003_RESET_CONTROL_ADDRESS; 364 break; 365 case TARGET_TYPE_AR6004: 366 address = AR6004_RESET_CONTROL_ADDRESS; 367 break; 368 } 369 370 status = ath6kl_diag_write32(ar, address, data); 371 372 if (status) 373 ath6kl_err("failed to reset target\n"); 374 } 375 376 static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif) 377 { 378 u8 index; 379 u8 keyusage; 380 381 for (index = 0; index <= WMI_MAX_KEY_INDEX; index++) { 382 if (vif->wep_key_list[index].key_len) { 383 keyusage = GROUP_USAGE; 384 if (index == vif->def_txkey_index) 385 keyusage |= TX_USAGE; 386 387 ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx, 388 index, 389 WEP_CRYPT, 390 keyusage, 391 vif->wep_key_list[index].key_len, 392 NULL, 0, 393 vif->wep_key_list[index].key, 394 KEY_OP_INIT_VAL, NULL, 395 NO_SYNC_WMIFLAG); 396 } 397 } 398 } 399 400 void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel) 401 { 402 struct ath6kl *ar = vif->ar; 403 struct ath6kl_req_key *ik; 404 int res; 405 u8 key_rsc[ATH6KL_KEY_SEQ_LEN]; 406 407 ik = &ar->ap_mode_bkey; 408 409 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel); 410 411 switch (vif->auth_mode) { 412 case NONE_AUTH: 413 if (vif->prwise_crypto == WEP_CRYPT) 414 ath6kl_install_static_wep_keys(vif); 415 if (!ik->valid || ik->key_type != WAPI_CRYPT) 416 break; 417 /* for WAPI, we need to set the delayed group key, continue: */ 418 case WPA_PSK_AUTH: 419 case WPA2_PSK_AUTH: 420 case (WPA_PSK_AUTH | WPA2_PSK_AUTH): 421 if (!ik->valid) 422 break; 423 424 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 425 "Delayed addkey for the initial group key for AP mode\n"); 426 memset(key_rsc, 0, sizeof(key_rsc)); 427 res = ath6kl_wmi_addkey_cmd( 428 ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type, 429 GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN, 430 ik->key, 431 KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG); 432 if (res) { 433 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 434 "Delayed addkey failed: %d\n", res); 435 } 436 break; 437 } 438 439 if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) { 440 ar->want_ch_switch &= ~(1 << vif->fw_vif_idx); 441 /* we actually don't know the phymode, default to HT20 */ 442 ath6kl_cfg80211_ch_switch_notify(vif, channel, 443 WMI_11G_HT20); 444 } 445 446 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0); 447 set_bit(CONNECTED, &vif->flags); 448 netif_carrier_on(vif->ndev); 449 } 450 451 void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr, 452 u8 keymgmt, u8 ucipher, u8 auth, 453 u8 assoc_req_len, u8 *assoc_info, u8 apsd_info) 454 { 455 u8 *ies = NULL, *wpa_ie = NULL, *pos; 456 size_t ies_len = 0; 457 struct station_info sinfo; 458 459 ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); 460 461 if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) { 462 struct ieee80211_mgmt *mgmt = 463 (struct ieee80211_mgmt *) assoc_info; 464 if (ieee80211_is_assoc_req(mgmt->frame_control) && 465 assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) + 466 sizeof(mgmt->u.assoc_req)) { 467 ies = mgmt->u.assoc_req.variable; 468 ies_len = assoc_info + assoc_req_len - ies; 469 } else if (ieee80211_is_reassoc_req(mgmt->frame_control) && 470 assoc_req_len >= sizeof(struct ieee80211_hdr_3addr) 471 + sizeof(mgmt->u.reassoc_req)) { 472 ies = mgmt->u.reassoc_req.variable; 473 ies_len = assoc_info + assoc_req_len - ies; 474 } 475 } 476 477 pos = ies; 478 while (pos && pos + 1 < ies + ies_len) { 479 if (pos + 2 + pos[1] > ies + ies_len) 480 break; 481 if (pos[0] == WLAN_EID_RSN) 482 wpa_ie = pos; /* RSN IE */ 483 else if (pos[0] == WLAN_EID_VENDOR_SPECIFIC && 484 pos[1] >= 4 && 485 pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2) { 486 if (pos[5] == 0x01) 487 wpa_ie = pos; /* WPA IE */ 488 else if (pos[5] == 0x04) { 489 wpa_ie = pos; /* WPS IE */ 490 break; /* overrides WPA/RSN IE */ 491 } 492 } else if (pos[0] == 0x44 && wpa_ie == NULL) { 493 /* 494 * Note: WAPI Parameter Set IE re-uses Element ID that 495 * was officially allocated for BSS AC Access Delay. As 496 * such, we need to be a bit more careful on when 497 * parsing the frame. However, BSS AC Access Delay 498 * element is not supposed to be included in 499 * (Re)Association Request frames, so this should not 500 * cause problems. 501 */ 502 wpa_ie = pos; /* WAPI IE */ 503 break; 504 } 505 pos += 2 + pos[1]; 506 } 507 508 ath6kl_add_new_sta(vif, mac_addr, aid, wpa_ie, 509 wpa_ie ? 2 + wpa_ie[1] : 0, 510 keymgmt, ucipher, auth, apsd_info); 511 512 /* send event to application */ 513 memset(&sinfo, 0, sizeof(sinfo)); 514 515 /* TODO: sinfo.generation */ 516 517 sinfo.assoc_req_ies = ies; 518 sinfo.assoc_req_ies_len = ies_len; 519 sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; 520 521 cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL); 522 523 netif_wake_queue(vif->ndev); 524 } 525 526 void disconnect_timer_handler(unsigned long ptr) 527 { 528 struct net_device *dev = (struct net_device *)ptr; 529 struct ath6kl_vif *vif = netdev_priv(dev); 530 531 ath6kl_init_profile_info(vif); 532 ath6kl_disconnect(vif); 533 } 534 535 void ath6kl_disconnect(struct ath6kl_vif *vif) 536 { 537 if (test_bit(CONNECTED, &vif->flags) || 538 test_bit(CONNECT_PEND, &vif->flags)) { 539 ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); 540 /* 541 * Disconnect command is issued, clear the connect pending 542 * flag. The connected flag will be cleared in 543 * disconnect event notification. 544 */ 545 clear_bit(CONNECT_PEND, &vif->flags); 546 } 547 } 548 549 /* WMI Event handlers */ 550 551 void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver, 552 enum wmi_phy_cap cap) 553 { 554 struct ath6kl *ar = devt; 555 556 memcpy(ar->mac_addr, datap, ETH_ALEN); 557 558 ath6kl_dbg(ATH6KL_DBG_BOOT, 559 "ready event mac addr %pM sw_ver 0x%x abi_ver 0x%x cap 0x%x\n", 560 ar->mac_addr, sw_ver, abi_ver, cap); 561 562 ar->version.wlan_ver = sw_ver; 563 ar->version.abi_ver = abi_ver; 564 ar->hw.cap = cap; 565 566 if (strlen(ar->wiphy->fw_version) == 0) { 567 snprintf(ar->wiphy->fw_version, 568 sizeof(ar->wiphy->fw_version), 569 "%u.%u.%u.%u", 570 (ar->version.wlan_ver & 0xf0000000) >> 28, 571 (ar->version.wlan_ver & 0x0f000000) >> 24, 572 (ar->version.wlan_ver & 0x00ff0000) >> 16, 573 (ar->version.wlan_ver & 0x0000ffff)); 574 } 575 576 /* indicate to the waiting thread that the ready event was received */ 577 set_bit(WMI_READY, &ar->flag); 578 wake_up(&ar->event_wq); 579 } 580 581 void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status) 582 { 583 struct ath6kl *ar = vif->ar; 584 bool aborted = false; 585 586 if (status != WMI_SCAN_STATUS_SUCCESS) 587 aborted = true; 588 589 ath6kl_cfg80211_scan_complete_event(vif, aborted); 590 591 if (!ar->usr_bss_filter) { 592 clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); 593 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, 594 NONE_BSS_FILTER, 0); 595 } 596 597 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status); 598 } 599 600 static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel) 601 { 602 603 struct ath6kl *ar = vif->ar; 604 605 vif->profile.ch = cpu_to_le16(channel); 606 607 switch (vif->nw_type) { 608 case AP_NETWORK: 609 return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, 610 &vif->profile); 611 default: 612 ath6kl_err("won't switch channels nw_type=%d\n", vif->nw_type); 613 return -ENOTSUPP; 614 } 615 } 616 617 static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel) 618 { 619 620 struct ath6kl_vif *vif; 621 int res = 0; 622 623 if (!ar->want_ch_switch) 624 return; 625 626 spin_lock_bh(&ar->list_lock); 627 list_for_each_entry(vif, &ar->vif_list, list) { 628 if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) 629 res = ath6kl_commit_ch_switch(vif, channel); 630 631 if (res) 632 ath6kl_err("channel switch failed nw_type %d res %d\n", 633 vif->nw_type, res); 634 } 635 spin_unlock_bh(&ar->list_lock); 636 } 637 638 void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid, 639 u16 listen_int, u16 beacon_int, 640 enum network_type net_type, u8 beacon_ie_len, 641 u8 assoc_req_len, u8 assoc_resp_len, 642 u8 *assoc_info) 643 { 644 struct ath6kl *ar = vif->ar; 645 646 ath6kl_cfg80211_connect_event(vif, channel, bssid, 647 listen_int, beacon_int, 648 net_type, beacon_ie_len, 649 assoc_req_len, assoc_resp_len, 650 assoc_info); 651 652 memcpy(vif->bssid, bssid, sizeof(vif->bssid)); 653 vif->bss_ch = channel; 654 655 if ((vif->nw_type == INFRA_NETWORK)) { 656 ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx, 657 vif->listen_intvl_t, 0); 658 ath6kl_check_ch_switch(ar, channel); 659 } 660 661 netif_wake_queue(vif->ndev); 662 663 /* Update connect & link status atomically */ 664 spin_lock_bh(&vif->if_lock); 665 set_bit(CONNECTED, &vif->flags); 666 clear_bit(CONNECT_PEND, &vif->flags); 667 netif_carrier_on(vif->ndev); 668 spin_unlock_bh(&vif->if_lock); 669 670 aggr_reset_state(vif->aggr_cntxt->aggr_conn); 671 vif->reconnect_flag = 0; 672 673 if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) { 674 memset(ar->node_map, 0, sizeof(ar->node_map)); 675 ar->node_num = 0; 676 ar->next_ep_id = ENDPOINT_2; 677 } 678 679 if (!ar->usr_bss_filter) { 680 set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags); 681 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, 682 CURRENT_BSS_FILTER, 0); 683 } 684 } 685 686 void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast) 687 { 688 struct ath6kl_sta *sta; 689 struct ath6kl *ar = vif->ar; 690 u8 tsc[6]; 691 692 /* 693 * For AP case, keyid will have aid of STA which sent pkt with 694 * MIC error. Use this aid to get MAC & send it to hostapd. 695 */ 696 if (vif->nw_type == AP_NETWORK) { 697 sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2)); 698 if (!sta) 699 return; 700 701 ath6kl_dbg(ATH6KL_DBG_TRC, 702 "ap tkip mic error received from aid=%d\n", keyid); 703 704 memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */ 705 cfg80211_michael_mic_failure(vif->ndev, sta->mac, 706 NL80211_KEYTYPE_PAIRWISE, keyid, 707 tsc, GFP_KERNEL); 708 } else 709 ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast); 710 711 } 712 713 static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len) 714 { 715 struct wmi_target_stats *tgt_stats = 716 (struct wmi_target_stats *) ptr; 717 struct ath6kl *ar = vif->ar; 718 struct target_stats *stats = &vif->target_stats; 719 struct tkip_ccmp_stats *ccmp_stats; 720 u8 ac; 721 722 if (len < sizeof(*tgt_stats)) 723 return; 724 725 ath6kl_dbg(ATH6KL_DBG_TRC, "updating target stats\n"); 726 727 stats->tx_pkt += le32_to_cpu(tgt_stats->stats.tx.pkt); 728 stats->tx_byte += le32_to_cpu(tgt_stats->stats.tx.byte); 729 stats->tx_ucast_pkt += le32_to_cpu(tgt_stats->stats.tx.ucast_pkt); 730 stats->tx_ucast_byte += le32_to_cpu(tgt_stats->stats.tx.ucast_byte); 731 stats->tx_mcast_pkt += le32_to_cpu(tgt_stats->stats.tx.mcast_pkt); 732 stats->tx_mcast_byte += le32_to_cpu(tgt_stats->stats.tx.mcast_byte); 733 stats->tx_bcast_pkt += le32_to_cpu(tgt_stats->stats.tx.bcast_pkt); 734 stats->tx_bcast_byte += le32_to_cpu(tgt_stats->stats.tx.bcast_byte); 735 stats->tx_rts_success_cnt += 736 le32_to_cpu(tgt_stats->stats.tx.rts_success_cnt); 737 738 for (ac = 0; ac < WMM_NUM_AC; ac++) 739 stats->tx_pkt_per_ac[ac] += 740 le32_to_cpu(tgt_stats->stats.tx.pkt_per_ac[ac]); 741 742 stats->tx_err += le32_to_cpu(tgt_stats->stats.tx.err); 743 stats->tx_fail_cnt += le32_to_cpu(tgt_stats->stats.tx.fail_cnt); 744 stats->tx_retry_cnt += le32_to_cpu(tgt_stats->stats.tx.retry_cnt); 745 stats->tx_mult_retry_cnt += 746 le32_to_cpu(tgt_stats->stats.tx.mult_retry_cnt); 747 stats->tx_rts_fail_cnt += 748 le32_to_cpu(tgt_stats->stats.tx.rts_fail_cnt); 749 stats->tx_ucast_rate = 750 ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.tx.ucast_rate)); 751 752 stats->rx_pkt += le32_to_cpu(tgt_stats->stats.rx.pkt); 753 stats->rx_byte += le32_to_cpu(tgt_stats->stats.rx.byte); 754 stats->rx_ucast_pkt += le32_to_cpu(tgt_stats->stats.rx.ucast_pkt); 755 stats->rx_ucast_byte += le32_to_cpu(tgt_stats->stats.rx.ucast_byte); 756 stats->rx_mcast_pkt += le32_to_cpu(tgt_stats->stats.rx.mcast_pkt); 757 stats->rx_mcast_byte += le32_to_cpu(tgt_stats->stats.rx.mcast_byte); 758 stats->rx_bcast_pkt += le32_to_cpu(tgt_stats->stats.rx.bcast_pkt); 759 stats->rx_bcast_byte += le32_to_cpu(tgt_stats->stats.rx.bcast_byte); 760 stats->rx_frgment_pkt += le32_to_cpu(tgt_stats->stats.rx.frgment_pkt); 761 stats->rx_err += le32_to_cpu(tgt_stats->stats.rx.err); 762 stats->rx_crc_err += le32_to_cpu(tgt_stats->stats.rx.crc_err); 763 stats->rx_key_cache_miss += 764 le32_to_cpu(tgt_stats->stats.rx.key_cache_miss); 765 stats->rx_decrypt_err += le32_to_cpu(tgt_stats->stats.rx.decrypt_err); 766 stats->rx_dupl_frame += le32_to_cpu(tgt_stats->stats.rx.dupl_frame); 767 stats->rx_ucast_rate = 768 ath6kl_wmi_get_rate(a_sle32_to_cpu(tgt_stats->stats.rx.ucast_rate)); 769 770 ccmp_stats = &tgt_stats->stats.tkip_ccmp_stats; 771 772 stats->tkip_local_mic_fail += 773 le32_to_cpu(ccmp_stats->tkip_local_mic_fail); 774 stats->tkip_cnter_measures_invoked += 775 le32_to_cpu(ccmp_stats->tkip_cnter_measures_invoked); 776 stats->tkip_fmt_err += le32_to_cpu(ccmp_stats->tkip_fmt_err); 777 778 stats->ccmp_fmt_err += le32_to_cpu(ccmp_stats->ccmp_fmt_err); 779 stats->ccmp_replays += le32_to_cpu(ccmp_stats->ccmp_replays); 780 781 stats->pwr_save_fail_cnt += 782 le32_to_cpu(tgt_stats->pm_stats.pwr_save_failure_cnt); 783 stats->noise_floor_calib = 784 a_sle32_to_cpu(tgt_stats->noise_floor_calib); 785 786 stats->cs_bmiss_cnt += 787 le32_to_cpu(tgt_stats->cserv_stats.cs_bmiss_cnt); 788 stats->cs_low_rssi_cnt += 789 le32_to_cpu(tgt_stats->cserv_stats.cs_low_rssi_cnt); 790 stats->cs_connect_cnt += 791 le16_to_cpu(tgt_stats->cserv_stats.cs_connect_cnt); 792 stats->cs_discon_cnt += 793 le16_to_cpu(tgt_stats->cserv_stats.cs_discon_cnt); 794 795 stats->cs_ave_beacon_rssi = 796 a_sle16_to_cpu(tgt_stats->cserv_stats.cs_ave_beacon_rssi); 797 798 stats->cs_last_roam_msec = 799 tgt_stats->cserv_stats.cs_last_roam_msec; 800 stats->cs_snr = tgt_stats->cserv_stats.cs_snr; 801 stats->cs_rssi = a_sle16_to_cpu(tgt_stats->cserv_stats.cs_rssi); 802 803 stats->lq_val = le32_to_cpu(tgt_stats->lq_val); 804 805 stats->wow_pkt_dropped += 806 le32_to_cpu(tgt_stats->wow_stats.wow_pkt_dropped); 807 stats->wow_host_pkt_wakeups += 808 tgt_stats->wow_stats.wow_host_pkt_wakeups; 809 stats->wow_host_evt_wakeups += 810 tgt_stats->wow_stats.wow_host_evt_wakeups; 811 stats->wow_evt_discarded += 812 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); 813 814 stats->arp_received = le32_to_cpu(tgt_stats->arp_stats.arp_received); 815 stats->arp_replied = le32_to_cpu(tgt_stats->arp_stats.arp_replied); 816 stats->arp_matched = le32_to_cpu(tgt_stats->arp_stats.arp_matched); 817 818 if (test_bit(STATS_UPDATE_PEND, &vif->flags)) { 819 clear_bit(STATS_UPDATE_PEND, &vif->flags); 820 wake_up(&ar->event_wq); 821 } 822 } 823 824 static void ath6kl_add_le32(__le32 *var, __le32 val) 825 { 826 *var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val)); 827 } 828 829 void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len) 830 { 831 struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr; 832 struct ath6kl *ar = vif->ar; 833 struct wmi_ap_mode_stat *ap = &ar->ap_stats; 834 struct wmi_per_sta_stat *st_ap, *st_p; 835 u8 ac; 836 837 if (vif->nw_type == AP_NETWORK) { 838 if (len < sizeof(*p)) 839 return; 840 841 for (ac = 0; ac < AP_MAX_NUM_STA; ac++) { 842 st_ap = &ap->sta[ac]; 843 st_p = &p->sta[ac]; 844 845 ath6kl_add_le32(&st_ap->tx_bytes, st_p->tx_bytes); 846 ath6kl_add_le32(&st_ap->tx_pkts, st_p->tx_pkts); 847 ath6kl_add_le32(&st_ap->tx_error, st_p->tx_error); 848 ath6kl_add_le32(&st_ap->tx_discard, st_p->tx_discard); 849 ath6kl_add_le32(&st_ap->rx_bytes, st_p->rx_bytes); 850 ath6kl_add_le32(&st_ap->rx_pkts, st_p->rx_pkts); 851 ath6kl_add_le32(&st_ap->rx_error, st_p->rx_error); 852 ath6kl_add_le32(&st_ap->rx_discard, st_p->rx_discard); 853 } 854 855 } else { 856 ath6kl_update_target_stats(vif, ptr, len); 857 } 858 } 859 860 void ath6kl_wakeup_event(void *dev) 861 { 862 struct ath6kl *ar = (struct ath6kl *) dev; 863 864 wake_up(&ar->event_wq); 865 } 866 867 void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr) 868 { 869 struct ath6kl *ar = (struct ath6kl *) devt; 870 871 ar->tx_pwr = tx_pwr; 872 wake_up(&ar->event_wq); 873 } 874 875 void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid) 876 { 877 struct ath6kl_sta *conn; 878 struct sk_buff *skb; 879 bool psq_empty = false; 880 struct ath6kl *ar = vif->ar; 881 struct ath6kl_mgmt_buff *mgmt_buf; 882 883 conn = ath6kl_find_sta_by_aid(ar, aid); 884 885 if (!conn) 886 return; 887 /* 888 * Send out a packet queued on ps queue. When the ps queue 889 * becomes empty update the PVB for this station. 890 */ 891 spin_lock_bh(&conn->psq_lock); 892 psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0); 893 spin_unlock_bh(&conn->psq_lock); 894 895 if (psq_empty) 896 /* TODO: Send out a NULL data frame */ 897 return; 898 899 spin_lock_bh(&conn->psq_lock); 900 if (conn->mgmt_psq_len > 0) { 901 mgmt_buf = list_first_entry(&conn->mgmt_psq, 902 struct ath6kl_mgmt_buff, list); 903 list_del(&mgmt_buf->list); 904 conn->mgmt_psq_len--; 905 spin_unlock_bh(&conn->psq_lock); 906 907 conn->sta_flags |= STA_PS_POLLED; 908 ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, 909 mgmt_buf->id, mgmt_buf->freq, 910 mgmt_buf->wait, mgmt_buf->buf, 911 mgmt_buf->len, mgmt_buf->no_cck); 912 conn->sta_flags &= ~STA_PS_POLLED; 913 kfree(mgmt_buf); 914 } else { 915 skb = skb_dequeue(&conn->psq); 916 spin_unlock_bh(&conn->psq_lock); 917 918 conn->sta_flags |= STA_PS_POLLED; 919 ath6kl_data_tx(skb, vif->ndev); 920 conn->sta_flags &= ~STA_PS_POLLED; 921 } 922 923 spin_lock_bh(&conn->psq_lock); 924 psq_empty = skb_queue_empty(&conn->psq) && (conn->mgmt_psq_len == 0); 925 spin_unlock_bh(&conn->psq_lock); 926 927 if (psq_empty) 928 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0); 929 } 930 931 void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif) 932 { 933 bool mcastq_empty = false; 934 struct sk_buff *skb; 935 struct ath6kl *ar = vif->ar; 936 937 /* 938 * If there are no associated STAs, ignore the DTIM expiry event. 939 * There can be potential race conditions where the last associated 940 * STA may disconnect & before the host could clear the 'Indicate 941 * DTIM' request to the firmware, the firmware would have just 942 * indicated a DTIM expiry event. The race is between 'clear DTIM 943 * expiry cmd' going from the host to the firmware & the DTIM 944 * expiry event happening from the firmware to the host. 945 */ 946 if (!ar->sta_list_index) 947 return; 948 949 spin_lock_bh(&ar->mcastpsq_lock); 950 mcastq_empty = skb_queue_empty(&ar->mcastpsq); 951 spin_unlock_bh(&ar->mcastpsq_lock); 952 953 if (mcastq_empty) 954 return; 955 956 /* set the STA flag to dtim_expired for the frame to go out */ 957 set_bit(DTIM_EXPIRED, &vif->flags); 958 959 spin_lock_bh(&ar->mcastpsq_lock); 960 while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) { 961 spin_unlock_bh(&ar->mcastpsq_lock); 962 963 ath6kl_data_tx(skb, vif->ndev); 964 965 spin_lock_bh(&ar->mcastpsq_lock); 966 } 967 spin_unlock_bh(&ar->mcastpsq_lock); 968 969 clear_bit(DTIM_EXPIRED, &vif->flags); 970 971 /* clear the LSB of the BitMapCtl field of the TIM IE */ 972 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0); 973 } 974 975 void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid, 976 u8 assoc_resp_len, u8 *assoc_info, 977 u16 prot_reason_status) 978 { 979 struct ath6kl *ar = vif->ar; 980 981 if (vif->nw_type == AP_NETWORK) { 982 /* disconnect due to other STA vif switching channels */ 983 if (reason == BSS_DISCONNECTED && 984 prot_reason_status == WMI_AP_REASON_STA_ROAM) 985 ar->want_ch_switch |= 1 << vif->fw_vif_idx; 986 987 if (!ath6kl_remove_sta(ar, bssid, prot_reason_status)) 988 return; 989 990 /* if no more associated STAs, empty the mcast PS q */ 991 if (ar->sta_list_index == 0) { 992 spin_lock_bh(&ar->mcastpsq_lock); 993 skb_queue_purge(&ar->mcastpsq); 994 spin_unlock_bh(&ar->mcastpsq_lock); 995 996 /* clear the LSB of the TIM IE's BitMapCtl field */ 997 if (test_bit(WMI_READY, &ar->flag)) 998 ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, 999 MCAST_AID, 0); 1000 } 1001 1002 if (!is_broadcast_ether_addr(bssid)) { 1003 /* send event to application */ 1004 cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL); 1005 } 1006 1007 if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) { 1008 memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list)); 1009 clear_bit(CONNECTED, &vif->flags); 1010 } 1011 return; 1012 } 1013 1014 ath6kl_cfg80211_disconnect_event(vif, reason, bssid, 1015 assoc_resp_len, assoc_info, 1016 prot_reason_status); 1017 1018 aggr_reset_state(vif->aggr_cntxt->aggr_conn); 1019 1020 del_timer(&vif->disconnect_timer); 1021 1022 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason); 1023 1024 /* 1025 * If the event is due to disconnect cmd from the host, only they 1026 * the target would stop trying to connect. Under any other 1027 * condition, target would keep trying to connect. 1028 */ 1029 if (reason == DISCONNECT_CMD) { 1030 if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag)) 1031 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, 1032 NONE_BSS_FILTER, 0); 1033 } else { 1034 set_bit(CONNECT_PEND, &vif->flags); 1035 if (((reason == ASSOC_FAILED) && 1036 (prot_reason_status == 0x11)) || 1037 ((reason == ASSOC_FAILED) && (prot_reason_status == 0x0) && 1038 (vif->reconnect_flag == 1))) { 1039 set_bit(CONNECTED, &vif->flags); 1040 return; 1041 } 1042 } 1043 1044 /* update connect & link status atomically */ 1045 spin_lock_bh(&vif->if_lock); 1046 clear_bit(CONNECTED, &vif->flags); 1047 netif_carrier_off(vif->ndev); 1048 spin_unlock_bh(&vif->if_lock); 1049 1050 if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1)) 1051 vif->reconnect_flag = 0; 1052 1053 if (reason != CSERV_DISCONNECT) 1054 ar->user_key_ctrl = 0; 1055 1056 netif_stop_queue(vif->ndev); 1057 memset(vif->bssid, 0, sizeof(vif->bssid)); 1058 vif->bss_ch = 0; 1059 1060 ath6kl_tx_data_cleanup(ar); 1061 } 1062 1063 struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar) 1064 { 1065 struct ath6kl_vif *vif; 1066 1067 spin_lock_bh(&ar->list_lock); 1068 if (list_empty(&ar->vif_list)) { 1069 spin_unlock_bh(&ar->list_lock); 1070 return NULL; 1071 } 1072 1073 vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list); 1074 1075 spin_unlock_bh(&ar->list_lock); 1076 1077 return vif; 1078 } 1079 1080 static int ath6kl_open(struct net_device *dev) 1081 { 1082 struct ath6kl_vif *vif = netdev_priv(dev); 1083 1084 set_bit(WLAN_ENABLED, &vif->flags); 1085 1086 if (test_bit(CONNECTED, &vif->flags)) { 1087 netif_carrier_on(dev); 1088 netif_wake_queue(dev); 1089 } else 1090 netif_carrier_off(dev); 1091 1092 return 0; 1093 } 1094 1095 static int ath6kl_close(struct net_device *dev) 1096 { 1097 struct ath6kl_vif *vif = netdev_priv(dev); 1098 1099 netif_stop_queue(dev); 1100 1101 ath6kl_cfg80211_stop(vif); 1102 1103 clear_bit(WLAN_ENABLED, &vif->flags); 1104 1105 return 0; 1106 } 1107 1108 static struct net_device_stats *ath6kl_get_stats(struct net_device *dev) 1109 { 1110 struct ath6kl_vif *vif = netdev_priv(dev); 1111 1112 return &vif->net_stats; 1113 } 1114 1115 static int ath6kl_set_features(struct net_device *dev, 1116 netdev_features_t features) 1117 { 1118 struct ath6kl_vif *vif = netdev_priv(dev); 1119 struct ath6kl *ar = vif->ar; 1120 int err = 0; 1121 1122 if ((features & NETIF_F_RXCSUM) && 1123 (ar->rx_meta_ver != WMI_META_VERSION_2)) { 1124 ar->rx_meta_ver = WMI_META_VERSION_2; 1125 err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, 1126 vif->fw_vif_idx, 1127 ar->rx_meta_ver, 0, 0); 1128 if (err) { 1129 dev->features = features & ~NETIF_F_RXCSUM; 1130 return err; 1131 } 1132 } else if (!(features & NETIF_F_RXCSUM) && 1133 (ar->rx_meta_ver == WMI_META_VERSION_2)) { 1134 ar->rx_meta_ver = 0; 1135 err = ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, 1136 vif->fw_vif_idx, 1137 ar->rx_meta_ver, 0, 0); 1138 if (err) { 1139 dev->features = features | NETIF_F_RXCSUM; 1140 return err; 1141 } 1142 1143 } 1144 1145 return err; 1146 } 1147 1148 static void ath6kl_set_multicast_list(struct net_device *ndev) 1149 { 1150 struct ath6kl_vif *vif = netdev_priv(ndev); 1151 bool mc_all_on = false; 1152 int mc_count = netdev_mc_count(ndev); 1153 struct netdev_hw_addr *ha; 1154 bool found; 1155 struct ath6kl_mc_filter *mc_filter, *tmp; 1156 struct list_head mc_filter_new; 1157 int ret; 1158 1159 if (!test_bit(WMI_READY, &vif->ar->flag) || 1160 !test_bit(WLAN_ENABLED, &vif->flags)) 1161 return; 1162 1163 /* Enable multicast-all filter. */ 1164 mc_all_on = !!(ndev->flags & IFF_PROMISC) || 1165 !!(ndev->flags & IFF_ALLMULTI) || 1166 !!(mc_count > ATH6K_MAX_MC_FILTERS_PER_LIST); 1167 1168 if (mc_all_on) 1169 set_bit(NETDEV_MCAST_ALL_ON, &vif->flags); 1170 else 1171 clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags); 1172 1173 if (test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, 1174 vif->ar->fw_capabilities)) { 1175 mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON); 1176 } 1177 1178 if (!(ndev->flags & IFF_MULTICAST)) { 1179 mc_all_on = false; 1180 set_bit(NETDEV_MCAST_ALL_OFF, &vif->flags); 1181 } else { 1182 clear_bit(NETDEV_MCAST_ALL_OFF, &vif->flags); 1183 } 1184 1185 /* Enable/disable "multicast-all" filter*/ 1186 ath6kl_dbg(ATH6KL_DBG_TRC, "%s multicast-all filter\n", 1187 mc_all_on ? "enabling" : "disabling"); 1188 1189 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, vif->fw_vif_idx, 1190 mc_all_on); 1191 if (ret) { 1192 ath6kl_warn("Failed to %s multicast-all receive\n", 1193 mc_all_on ? "enable" : "disable"); 1194 return; 1195 } 1196 1197 if (test_bit(NETDEV_MCAST_ALL_ON, &vif->flags)) 1198 return; 1199 1200 /* Keep the driver and firmware mcast list in sync. */ 1201 list_for_each_entry_safe(mc_filter, tmp, &vif->mc_filter, list) { 1202 found = false; 1203 netdev_for_each_mc_addr(ha, ndev) { 1204 if (memcmp(ha->addr, mc_filter->hw_addr, 1205 ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) { 1206 found = true; 1207 break; 1208 } 1209 } 1210 1211 if (!found) { 1212 /* 1213 * Delete the filter which was previously set 1214 * but not in the new request. 1215 */ 1216 ath6kl_dbg(ATH6KL_DBG_TRC, 1217 "Removing %pM from multicast filter\n", 1218 mc_filter->hw_addr); 1219 ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi, 1220 vif->fw_vif_idx, mc_filter->hw_addr, 1221 false); 1222 if (ret) { 1223 ath6kl_warn("Failed to remove multicast filter:%pM\n", 1224 mc_filter->hw_addr); 1225 return; 1226 } 1227 1228 list_del(&mc_filter->list); 1229 kfree(mc_filter); 1230 } 1231 } 1232 1233 INIT_LIST_HEAD(&mc_filter_new); 1234 1235 netdev_for_each_mc_addr(ha, ndev) { 1236 found = false; 1237 list_for_each_entry(mc_filter, &vif->mc_filter, list) { 1238 if (memcmp(ha->addr, mc_filter->hw_addr, 1239 ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE) == 0) { 1240 found = true; 1241 break; 1242 } 1243 } 1244 1245 if (!found) { 1246 mc_filter = kzalloc(sizeof(struct ath6kl_mc_filter), 1247 GFP_ATOMIC); 1248 if (!mc_filter) { 1249 WARN_ON(1); 1250 goto out; 1251 } 1252 1253 memcpy(mc_filter->hw_addr, ha->addr, 1254 ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE); 1255 /* Set the multicast filter */ 1256 ath6kl_dbg(ATH6KL_DBG_TRC, 1257 "Adding %pM to multicast filter list\n", 1258 mc_filter->hw_addr); 1259 ret = ath6kl_wmi_add_del_mcast_filter_cmd(vif->ar->wmi, 1260 vif->fw_vif_idx, mc_filter->hw_addr, 1261 true); 1262 if (ret) { 1263 ath6kl_warn("Failed to add multicast filter :%pM\n", 1264 mc_filter->hw_addr); 1265 kfree(mc_filter); 1266 goto out; 1267 } 1268 1269 list_add_tail(&mc_filter->list, &mc_filter_new); 1270 } 1271 } 1272 1273 out: 1274 list_splice_tail(&mc_filter_new, &vif->mc_filter); 1275 } 1276 1277 static const struct net_device_ops ath6kl_netdev_ops = { 1278 .ndo_open = ath6kl_open, 1279 .ndo_stop = ath6kl_close, 1280 .ndo_start_xmit = ath6kl_data_tx, 1281 .ndo_get_stats = ath6kl_get_stats, 1282 .ndo_set_features = ath6kl_set_features, 1283 .ndo_set_rx_mode = ath6kl_set_multicast_list, 1284 }; 1285 1286 void init_netdev(struct net_device *dev) 1287 { 1288 dev->netdev_ops = &ath6kl_netdev_ops; 1289 dev->destructor = free_netdev; 1290 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; 1291 1292 dev->needed_headroom = ETH_HLEN; 1293 dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) + 1294 sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH 1295 + WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES; 1296 1297 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1298 1299 return; 1300 } 1301