1 /* 2 * Copyright (c) 2010-2011 Atheros Communications Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/dma-mapping.h> 18 #include <linux/slab.h> 19 20 #include "ath9k.h" 21 #include "mci.h" 22 23 static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 }; 24 25 static struct ath_mci_profile_info* 26 ath_mci_find_profile(struct ath_mci_profile *mci, 27 struct ath_mci_profile_info *info) 28 { 29 struct ath_mci_profile_info *entry; 30 31 if (list_empty(&mci->info)) 32 return NULL; 33 34 list_for_each_entry(entry, &mci->info, list) { 35 if (entry->conn_handle == info->conn_handle) 36 return entry; 37 } 38 return NULL; 39 } 40 41 static bool ath_mci_add_profile(struct ath_common *common, 42 struct ath_mci_profile *mci, 43 struct ath_mci_profile_info *info) 44 { 45 struct ath_mci_profile_info *entry; 46 47 if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) && 48 (info->type == MCI_GPM_COEX_PROFILE_VOICE)) 49 return false; 50 51 if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) && 52 (info->type != MCI_GPM_COEX_PROFILE_VOICE)) 53 return false; 54 55 entry = kzalloc(sizeof(*entry), GFP_ATOMIC); 56 if (!entry) 57 return false; 58 59 memcpy(entry, info, 10); 60 INC_PROF(mci, info); 61 list_add_tail(&entry->list, &mci->info); 62 63 return true; 64 } 65 66 static void ath_mci_del_profile(struct ath_common *common, 67 struct ath_mci_profile *mci, 68 struct ath_mci_profile_info *entry) 69 { 70 if (!entry) 71 return; 72 73 DEC_PROF(mci, entry); 74 list_del(&entry->list); 75 kfree(entry); 76 } 77 78 void ath_mci_flush_profile(struct ath_mci_profile *mci) 79 { 80 struct ath_mci_profile_info *info, *tinfo; 81 82 mci->aggr_limit = 0; 83 84 if (list_empty(&mci->info)) 85 return; 86 87 list_for_each_entry_safe(info, tinfo, &mci->info, list) { 88 list_del(&info->list); 89 DEC_PROF(mci, info); 90 kfree(info); 91 } 92 } 93 94 static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex) 95 { 96 struct ath_mci_profile *mci = &btcoex->mci; 97 u32 wlan_airtime = btcoex->btcoex_period * 98 (100 - btcoex->duty_cycle) / 100; 99 100 /* 101 * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms. 102 * When wlan_airtime is less than 4ms, aggregation limit has to be 103 * adjusted half of wlan_airtime to ensure that the aggregation can fit 104 * without collision with BT traffic. 105 */ 106 if ((wlan_airtime <= 4) && 107 (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime)))) 108 mci->aggr_limit = 2 * wlan_airtime; 109 } 110 111 static void ath_mci_update_scheme(struct ath_softc *sc) 112 { 113 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 114 struct ath_btcoex *btcoex = &sc->btcoex; 115 struct ath_mci_profile *mci = &btcoex->mci; 116 struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci; 117 struct ath_mci_profile_info *info; 118 u32 num_profile = NUM_PROF(mci); 119 120 if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING) 121 goto skip_tuning; 122 123 btcoex->duty_cycle = ath_mci_duty_cycle[num_profile]; 124 125 if (num_profile == 1) { 126 info = list_first_entry(&mci->info, 127 struct ath_mci_profile_info, 128 list); 129 if (mci->num_sco) { 130 if (info->T == 12) 131 mci->aggr_limit = 8; 132 else if (info->T == 6) { 133 mci->aggr_limit = 6; 134 btcoex->duty_cycle = 30; 135 } 136 ath_dbg(common, MCI, 137 "Single SCO, aggregation limit %d 1/4 ms\n", 138 mci->aggr_limit); 139 } else if (mci->num_pan || mci->num_other_acl) { 140 /* 141 * For single PAN/FTP profile, allocate 35% for BT 142 * to improve WLAN throughput. 143 */ 144 btcoex->duty_cycle = 35; 145 btcoex->btcoex_period = 53; 146 ath_dbg(common, MCI, 147 "Single PAN/FTP bt period %d ms dutycycle %d\n", 148 btcoex->duty_cycle, btcoex->btcoex_period); 149 } else if (mci->num_hid) { 150 btcoex->duty_cycle = 30; 151 mci->aggr_limit = 6; 152 ath_dbg(common, MCI, 153 "Multiple attempt/timeout single HID " 154 "aggregation limit 1.5 ms dutycycle 30%%\n"); 155 } 156 } else if (num_profile == 2) { 157 if (mci->num_hid == 2) 158 btcoex->duty_cycle = 30; 159 mci->aggr_limit = 6; 160 ath_dbg(common, MCI, 161 "Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n", 162 btcoex->duty_cycle); 163 } else if (num_profile >= 3) { 164 mci->aggr_limit = 4; 165 ath_dbg(common, MCI, 166 "Three or more profiles aggregation limit 1 ms\n"); 167 } 168 169 skip_tuning: 170 if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) { 171 if (IS_CHAN_HT(sc->sc_ah->curchan)) 172 ath_mci_adjust_aggr_limit(btcoex); 173 else 174 btcoex->btcoex_period >>= 1; 175 } 176 177 ath9k_btcoex_timer_pause(sc); 178 ath9k_hw_btcoex_disable(sc->sc_ah); 179 180 if (IS_CHAN_5GHZ(sc->sc_ah->curchan)) 181 return; 182 183 btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0); 184 if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE) 185 btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE; 186 187 btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 * 188 (100 - btcoex->duty_cycle) / 100; 189 190 ath9k_hw_btcoex_enable(sc->sc_ah); 191 ath9k_btcoex_timer_resume(sc); 192 } 193 194 static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 195 { 196 struct ath_hw *ah = sc->sc_ah; 197 struct ath_common *common = ath9k_hw_common(ah); 198 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; 199 u32 payload[4] = {0, 0, 0, 0}; 200 201 switch (opcode) { 202 case MCI_GPM_BT_CAL_REQ: 203 if (mci_hw->bt_state == MCI_BT_AWAKE) { 204 ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START); 205 ath9k_queue_reset(sc, RESET_TYPE_MCI); 206 } 207 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state); 208 break; 209 case MCI_GPM_BT_CAL_GRANT: 210 MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE); 211 ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload, 212 16, false, true); 213 break; 214 default: 215 ath_dbg(common, MCI, "Unknown GPM CAL message\n"); 216 break; 217 } 218 } 219 220 static void ath9k_mci_work(struct work_struct *work) 221 { 222 struct ath_softc *sc = container_of(work, struct ath_softc, mci_work); 223 224 ath_mci_update_scheme(sc); 225 } 226 227 static void ath_mci_process_profile(struct ath_softc *sc, 228 struct ath_mci_profile_info *info) 229 { 230 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 231 struct ath_btcoex *btcoex = &sc->btcoex; 232 struct ath_mci_profile *mci = &btcoex->mci; 233 struct ath_mci_profile_info *entry = NULL; 234 235 entry = ath_mci_find_profile(mci, info); 236 if (entry) { 237 /* 238 * Two MCI interrupts are generated while connecting to 239 * headset and A2DP profile, but only one MCI interrupt 240 * is generated with last added profile type while disconnecting 241 * both profiles. 242 * So while adding second profile type decrement 243 * the first one. 244 */ 245 if (entry->type != info->type) { 246 DEC_PROF(mci, entry); 247 INC_PROF(mci, info); 248 } 249 memcpy(entry, info, 10); 250 } 251 252 if (info->start) { 253 if (!entry && !ath_mci_add_profile(common, mci, info)) 254 return; 255 } else 256 ath_mci_del_profile(common, mci, entry); 257 258 btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; 259 mci->aggr_limit = mci->num_sco ? 6 : 0; 260 261 btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; 262 if (NUM_PROF(mci)) 263 btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 264 else 265 btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL : 266 ATH_BTCOEX_STOMP_LOW; 267 268 ieee80211_queue_work(sc->hw, &sc->mci_work); 269 } 270 271 static void ath_mci_process_status(struct ath_softc *sc, 272 struct ath_mci_profile_status *status) 273 { 274 struct ath_btcoex *btcoex = &sc->btcoex; 275 struct ath_mci_profile *mci = &btcoex->mci; 276 struct ath_mci_profile_info info; 277 int i = 0, old_num_mgmt = mci->num_mgmt; 278 279 /* Link status type are not handled */ 280 if (status->is_link) 281 return; 282 283 info.conn_handle = status->conn_handle; 284 if (ath_mci_find_profile(mci, &info)) 285 return; 286 287 if (status->conn_handle >= ATH_MCI_MAX_PROFILE) 288 return; 289 290 if (status->is_critical) 291 __set_bit(status->conn_handle, mci->status); 292 else 293 __clear_bit(status->conn_handle, mci->status); 294 295 mci->num_mgmt = 0; 296 do { 297 if (test_bit(i, mci->status)) 298 mci->num_mgmt++; 299 } while (++i < ATH_MCI_MAX_PROFILE); 300 301 if (old_num_mgmt != mci->num_mgmt) 302 ieee80211_queue_work(sc->hw, &sc->mci_work); 303 } 304 305 static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 306 { 307 struct ath_hw *ah = sc->sc_ah; 308 struct ath_mci_profile_info profile_info; 309 struct ath_mci_profile_status profile_status; 310 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 311 u8 major, minor; 312 u32 seq_num; 313 314 switch (opcode) { 315 case MCI_GPM_COEX_VERSION_QUERY: 316 ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION); 317 break; 318 case MCI_GPM_COEX_VERSION_RESPONSE: 319 major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION); 320 minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION); 321 ar9003_mci_set_bt_version(ah, major, minor); 322 break; 323 case MCI_GPM_COEX_STATUS_QUERY: 324 ar9003_mci_send_wlan_channels(ah); 325 break; 326 case MCI_GPM_COEX_BT_PROFILE_INFO: 327 memcpy(&profile_info, 328 (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10); 329 330 if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) || 331 (profile_info.type >= MCI_GPM_COEX_PROFILE_MAX)) { 332 ath_dbg(common, MCI, 333 "Illegal profile type = %d, state = %d\n", 334 profile_info.type, 335 profile_info.start); 336 break; 337 } 338 339 ath_mci_process_profile(sc, &profile_info); 340 break; 341 case MCI_GPM_COEX_BT_STATUS_UPDATE: 342 profile_status.is_link = *(rx_payload + 343 MCI_GPM_COEX_B_STATUS_TYPE); 344 profile_status.conn_handle = *(rx_payload + 345 MCI_GPM_COEX_B_STATUS_LINKID); 346 profile_status.is_critical = *(rx_payload + 347 MCI_GPM_COEX_B_STATUS_STATE); 348 349 seq_num = *((u32 *)(rx_payload + 12)); 350 ath_dbg(common, MCI, 351 "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n", 352 profile_status.is_link, profile_status.conn_handle, 353 profile_status.is_critical, seq_num); 354 355 ath_mci_process_status(sc, &profile_status); 356 break; 357 default: 358 ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode); 359 break; 360 } 361 } 362 363 int ath_mci_setup(struct ath_softc *sc) 364 { 365 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 366 struct ath_mci_coex *mci = &sc->mci_coex; 367 struct ath_mci_buf *buf = &mci->sched_buf; 368 369 buf->bf_addr = dma_alloc_coherent(sc->dev, 370 ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE, 371 &buf->bf_paddr, GFP_KERNEL); 372 373 if (buf->bf_addr == NULL) { 374 ath_dbg(common, FATAL, "MCI buffer alloc failed\n"); 375 return -ENOMEM; 376 } 377 378 memset(buf->bf_addr, MCI_GPM_RSVD_PATTERN, 379 ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE); 380 381 mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE; 382 383 mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE; 384 mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len; 385 mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len; 386 387 ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr, 388 mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), 389 mci->sched_buf.bf_paddr); 390 391 INIT_WORK(&sc->mci_work, ath9k_mci_work); 392 ath_dbg(common, MCI, "MCI Initialized\n"); 393 394 return 0; 395 } 396 397 void ath_mci_cleanup(struct ath_softc *sc) 398 { 399 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 400 struct ath_hw *ah = sc->sc_ah; 401 struct ath_mci_coex *mci = &sc->mci_coex; 402 struct ath_mci_buf *buf = &mci->sched_buf; 403 404 if (buf->bf_addr) 405 dma_free_coherent(sc->dev, 406 ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE, 407 buf->bf_addr, buf->bf_paddr); 408 409 ar9003_mci_cleanup(ah); 410 411 ath_dbg(common, MCI, "MCI De-Initialized\n"); 412 } 413 414 void ath_mci_intr(struct ath_softc *sc) 415 { 416 struct ath_mci_coex *mci = &sc->mci_coex; 417 struct ath_hw *ah = sc->sc_ah; 418 struct ath_common *common = ath9k_hw_common(ah); 419 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; 420 u32 mci_int, mci_int_rxmsg; 421 u32 offset, subtype, opcode; 422 u32 *pgpm; 423 u32 more_data = MCI_GPM_MORE; 424 bool skip_gpm = false; 425 426 ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); 427 428 if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) { 429 ar9003_mci_get_next_gpm_offset(ah, true, NULL); 430 return; 431 } 432 433 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) { 434 u32 payload[4] = { 0xffffffff, 0xffffffff, 435 0xffffffff, 0xffffff00}; 436 437 /* 438 * The following REMOTE_RESET and SYS_WAKING used to sent 439 * only when BT wake up. Now they are always sent, as a 440 * recovery method to reset BT MCI's RX alignment. 441 */ 442 ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, 443 payload, 16, true, false); 444 ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0, 445 NULL, 0, true, false); 446 447 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE; 448 ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE); 449 450 /* 451 * always do this for recovery and 2G/5G toggling and LNA_TRANS 452 */ 453 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE); 454 } 455 456 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) { 457 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING; 458 459 if ((mci_hw->bt_state == MCI_BT_SLEEP) && 460 (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) != 461 MCI_BT_SLEEP)) 462 ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE); 463 } 464 465 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) { 466 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING; 467 468 if ((mci_hw->bt_state == MCI_BT_AWAKE) && 469 (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) != 470 MCI_BT_AWAKE)) 471 mci_hw->bt_state = MCI_BT_SLEEP; 472 } 473 474 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || 475 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { 476 ar9003_mci_state(ah, MCI_STATE_RECOVER_RX); 477 skip_gpm = true; 478 } 479 480 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) { 481 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO; 482 offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET); 483 } 484 485 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) { 486 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM; 487 488 while (more_data == MCI_GPM_MORE) { 489 490 pgpm = mci->gpm_buf.bf_addr; 491 offset = ar9003_mci_get_next_gpm_offset(ah, false, 492 &more_data); 493 494 if (offset == MCI_GPM_INVALID) 495 break; 496 497 pgpm += (offset >> 2); 498 499 /* 500 * The first dword is timer. 501 * The real data starts from 2nd dword. 502 */ 503 subtype = MCI_GPM_TYPE(pgpm); 504 opcode = MCI_GPM_OPCODE(pgpm); 505 506 if (skip_gpm) 507 goto recycle; 508 509 if (MCI_GPM_IS_CAL_TYPE(subtype)) { 510 ath_mci_cal_msg(sc, subtype, (u8 *)pgpm); 511 } else { 512 switch (subtype) { 513 case MCI_GPM_COEX_AGENT: 514 ath_mci_msg(sc, opcode, (u8 *)pgpm); 515 break; 516 default: 517 break; 518 } 519 } 520 recycle: 521 MCI_GPM_RECYCLE(pgpm); 522 } 523 } 524 525 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) { 526 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL) 527 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL; 528 529 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) 530 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO; 531 532 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) { 533 int value_dbm = MS(mci_hw->cont_status, 534 AR_MCI_CONT_RSSI_POWER); 535 536 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO; 537 538 ath_dbg(common, MCI, 539 "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n", 540 MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ? 541 "tx" : "rx", 542 MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY), 543 value_dbm); 544 } 545 546 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) 547 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK; 548 549 if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) 550 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST; 551 } 552 553 if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || 554 (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) 555 mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR | 556 AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT); 557 } 558 559 void ath_mci_enable(struct ath_softc *sc) 560 { 561 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 562 563 if (!common->btcoex_enabled) 564 return; 565 566 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) 567 sc->sc_ah->imask |= ATH9K_INT_MCI; 568 } 569