1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2018-2019 Realtek Corporation 3 */ 4 5 #include "main.h" 6 #include "coex.h" 7 #include "fw.h" 8 #include "tx.h" 9 #include "reg.h" 10 #include "sec.h" 11 #include "debug.h" 12 #include "util.h" 13 #include "wow.h" 14 15 static void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, 16 struct sk_buff *skb) 17 { 18 struct rtw_c2h_cmd *c2h; 19 u8 sub_cmd_id; 20 21 c2h = get_c2h_from_skb(skb); 22 sub_cmd_id = c2h->payload[0]; 23 24 switch (sub_cmd_id) { 25 case C2H_CCX_RPT: 26 rtw_tx_report_handle(rtwdev, skb); 27 break; 28 default: 29 break; 30 } 31 } 32 33 static u16 get_max_amsdu_len(u32 bit_rate) 34 { 35 /* lower than ofdm, do not aggregate */ 36 if (bit_rate < 550) 37 return 1; 38 39 /* lower than 20M 2ss mcs8, make it small */ 40 if (bit_rate < 1800) 41 return 1200; 42 43 /* lower than 40M 2ss mcs9, make it medium */ 44 if (bit_rate < 4000) 45 return 2600; 46 47 /* not yet 80M 2ss mcs8/9, make it twice regular packet size */ 48 if (bit_rate < 7000) 49 return 3500; 50 51 /* unlimited */ 52 return 0; 53 } 54 55 struct rtw_fw_iter_ra_data { 56 struct rtw_dev *rtwdev; 57 u8 *payload; 58 }; 59 60 static void rtw_fw_ra_report_iter(void *data, struct ieee80211_sta *sta) 61 { 62 struct rtw_fw_iter_ra_data *ra_data = data; 63 struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv; 64 u8 mac_id, rate, sgi, bw; 65 u8 mcs, nss; 66 u32 bit_rate; 67 68 mac_id = GET_RA_REPORT_MACID(ra_data->payload); 69 if (si->mac_id != mac_id) 70 return; 71 72 si->ra_report.txrate.flags = 0; 73 74 rate = GET_RA_REPORT_RATE(ra_data->payload); 75 sgi = GET_RA_REPORT_SGI(ra_data->payload); 76 bw = GET_RA_REPORT_BW(ra_data->payload); 77 78 if (rate < DESC_RATEMCS0) { 79 si->ra_report.txrate.legacy = rtw_desc_to_bitrate(rate); 80 goto legacy; 81 } 82 83 rtw_desc_to_mcsrate(rate, &mcs, &nss); 84 if (rate >= DESC_RATEVHT1SS_MCS0) 85 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_VHT_MCS; 86 else if (rate >= DESC_RATEMCS0) 87 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_MCS; 88 89 if (rate >= DESC_RATEMCS0) { 90 si->ra_report.txrate.mcs = mcs; 91 si->ra_report.txrate.nss = nss; 92 } 93 94 if (sgi) 95 si->ra_report.txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 96 97 if (bw == RTW_CHANNEL_WIDTH_80) 98 si->ra_report.txrate.bw = RATE_INFO_BW_80; 99 else if (bw == RTW_CHANNEL_WIDTH_40) 100 si->ra_report.txrate.bw = RATE_INFO_BW_40; 101 else 102 si->ra_report.txrate.bw = RATE_INFO_BW_20; 103 104 legacy: 105 bit_rate = cfg80211_calculate_bitrate(&si->ra_report.txrate); 106 107 si->ra_report.desc_rate = rate; 108 si->ra_report.bit_rate = bit_rate; 109 110 sta->max_rc_amsdu_len = get_max_amsdu_len(bit_rate); 111 } 112 113 static void rtw_fw_ra_report_handle(struct rtw_dev *rtwdev, u8 *payload, 114 u8 length) 115 { 116 struct rtw_fw_iter_ra_data ra_data; 117 118 if (WARN(length < 7, "invalid ra report c2h length\n")) 119 return; 120 121 rtwdev->dm_info.tx_rate = GET_RA_REPORT_RATE(payload); 122 ra_data.rtwdev = rtwdev; 123 ra_data.payload = payload; 124 rtw_iterate_stas_atomic(rtwdev, rtw_fw_ra_report_iter, &ra_data); 125 } 126 127 void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb) 128 { 129 struct rtw_c2h_cmd *c2h; 130 u32 pkt_offset; 131 u8 len; 132 133 pkt_offset = *((u32 *)skb->cb); 134 c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset); 135 len = skb->len - pkt_offset - 2; 136 137 mutex_lock(&rtwdev->mutex); 138 139 if (!test_bit(RTW_FLAG_RUNNING, rtwdev->flags)) 140 goto unlock; 141 142 switch (c2h->id) { 143 case C2H_BT_INFO: 144 rtw_coex_bt_info_notify(rtwdev, c2h->payload, len); 145 break; 146 case C2H_WLAN_INFO: 147 rtw_coex_wl_fwdbginfo_notify(rtwdev, c2h->payload, len); 148 break; 149 case C2H_HALMAC: 150 rtw_fw_c2h_cmd_handle_ext(rtwdev, skb); 151 break; 152 case C2H_RA_RPT: 153 rtw_fw_ra_report_handle(rtwdev, c2h->payload, len); 154 break; 155 default: 156 break; 157 } 158 159 unlock: 160 mutex_unlock(&rtwdev->mutex); 161 } 162 163 void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset, 164 struct sk_buff *skb) 165 { 166 struct rtw_c2h_cmd *c2h; 167 u8 len; 168 169 c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset); 170 len = skb->len - pkt_offset - 2; 171 *((u32 *)skb->cb) = pkt_offset; 172 173 rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n", 174 c2h->id, c2h->seq, len); 175 176 switch (c2h->id) { 177 case C2H_BT_MP_INFO: 178 rtw_coex_info_response(rtwdev, skb); 179 break; 180 default: 181 /* pass offset for further operation */ 182 *((u32 *)skb->cb) = pkt_offset; 183 skb_queue_tail(&rtwdev->c2h_queue, skb); 184 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 185 break; 186 } 187 } 188 EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe); 189 190 static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, 191 u8 *h2c) 192 { 193 u8 box; 194 u8 box_state; 195 u32 box_reg, box_ex_reg; 196 u32 h2c_wait; 197 int idx; 198 199 rtw_dbg(rtwdev, RTW_DBG_FW, 200 "send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n", 201 h2c[3], h2c[2], h2c[1], h2c[0], 202 h2c[7], h2c[6], h2c[5], h2c[4]); 203 204 spin_lock(&rtwdev->h2c.lock); 205 206 box = rtwdev->h2c.last_box_num; 207 switch (box) { 208 case 0: 209 box_reg = REG_HMEBOX0; 210 box_ex_reg = REG_HMEBOX0_EX; 211 break; 212 case 1: 213 box_reg = REG_HMEBOX1; 214 box_ex_reg = REG_HMEBOX1_EX; 215 break; 216 case 2: 217 box_reg = REG_HMEBOX2; 218 box_ex_reg = REG_HMEBOX2_EX; 219 break; 220 case 3: 221 box_reg = REG_HMEBOX3; 222 box_ex_reg = REG_HMEBOX3_EX; 223 break; 224 default: 225 WARN(1, "invalid h2c mail box number\n"); 226 goto out; 227 } 228 229 h2c_wait = 20; 230 do { 231 box_state = rtw_read8(rtwdev, REG_HMETFR); 232 } while ((box_state >> box) & 0x1 && --h2c_wait > 0); 233 234 if (!h2c_wait) { 235 rtw_err(rtwdev, "failed to send h2c command\n"); 236 goto out; 237 } 238 239 for (idx = 0; idx < 4; idx++) 240 rtw_write8(rtwdev, box_reg + idx, h2c[idx]); 241 for (idx = 0; idx < 4; idx++) 242 rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]); 243 244 if (++rtwdev->h2c.last_box_num >= 4) 245 rtwdev->h2c.last_box_num = 0; 246 247 out: 248 spin_unlock(&rtwdev->h2c.lock); 249 } 250 251 static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt) 252 { 253 int ret; 254 255 spin_lock(&rtwdev->h2c.lock); 256 257 FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq); 258 ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE); 259 if (ret) 260 rtw_err(rtwdev, "failed to send h2c packet\n"); 261 rtwdev->h2c.seq++; 262 263 spin_unlock(&rtwdev->h2c.lock); 264 } 265 266 void 267 rtw_fw_send_general_info(struct rtw_dev *rtwdev) 268 { 269 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 270 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 271 u16 total_size = H2C_PKT_HDR_SIZE + 4; 272 273 rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO); 274 275 SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); 276 277 GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt, 278 fifo->rsvd_fw_txbuf_addr - 279 fifo->rsvd_boundary); 280 281 rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); 282 } 283 284 void 285 rtw_fw_send_phydm_info(struct rtw_dev *rtwdev) 286 { 287 struct rtw_hal *hal = &rtwdev->hal; 288 struct rtw_efuse *efuse = &rtwdev->efuse; 289 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 290 u16 total_size = H2C_PKT_HDR_SIZE + 8; 291 u8 fw_rf_type = 0; 292 293 if (hal->rf_type == RF_1T1R) 294 fw_rf_type = FW_RF_1T1R; 295 else if (hal->rf_type == RF_2T2R) 296 fw_rf_type = FW_RF_2T2R; 297 298 rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO); 299 300 SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); 301 PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option); 302 PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type); 303 PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version); 304 PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx); 305 PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx); 306 307 rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); 308 } 309 310 void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para) 311 { 312 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 313 u16 total_size = H2C_PKT_HDR_SIZE + 1; 314 315 rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK); 316 SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); 317 IQK_SET_CLEAR(h2c_pkt, para->clear); 318 IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk); 319 320 rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); 321 } 322 323 void rtw_fw_query_bt_info(struct rtw_dev *rtwdev) 324 { 325 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 326 327 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_INFO); 328 329 SET_QUERY_BT_INFO(h2c_pkt, true); 330 331 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 332 } 333 334 void rtw_fw_wl_ch_info(struct rtw_dev *rtwdev, u8 link, u8 ch, u8 bw) 335 { 336 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 337 338 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WL_CH_INFO); 339 340 SET_WL_CH_INFO_LINK(h2c_pkt, link); 341 SET_WL_CH_INFO_CHNL(h2c_pkt, ch); 342 SET_WL_CH_INFO_BW(h2c_pkt, bw); 343 344 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 345 } 346 347 void rtw_fw_query_bt_mp_info(struct rtw_dev *rtwdev, 348 struct rtw_coex_info_req *req) 349 { 350 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 351 352 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_QUERY_BT_MP_INFO); 353 354 SET_BT_MP_INFO_SEQ(h2c_pkt, req->seq); 355 SET_BT_MP_INFO_OP_CODE(h2c_pkt, req->op_code); 356 SET_BT_MP_INFO_PARA1(h2c_pkt, req->para1); 357 SET_BT_MP_INFO_PARA2(h2c_pkt, req->para2); 358 SET_BT_MP_INFO_PARA3(h2c_pkt, req->para3); 359 360 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 361 } 362 363 void rtw_fw_force_bt_tx_power(struct rtw_dev *rtwdev, u8 bt_pwr_dec_lvl) 364 { 365 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 366 u8 index = 0 - bt_pwr_dec_lvl; 367 368 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_FORCE_BT_TX_POWER); 369 370 SET_BT_TX_POWER_INDEX(h2c_pkt, index); 371 372 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 373 } 374 375 void rtw_fw_bt_ignore_wlan_action(struct rtw_dev *rtwdev, bool enable) 376 { 377 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 378 379 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_IGNORE_WLAN_ACTION); 380 381 SET_IGNORE_WLAN_ACTION_EN(h2c_pkt, enable); 382 383 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 384 } 385 386 void rtw_fw_coex_tdma_type(struct rtw_dev *rtwdev, 387 u8 para1, u8 para2, u8 para3, u8 para4, u8 para5) 388 { 389 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 390 391 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_COEX_TDMA_TYPE); 392 393 SET_COEX_TDMA_TYPE_PARA1(h2c_pkt, para1); 394 SET_COEX_TDMA_TYPE_PARA2(h2c_pkt, para2); 395 SET_COEX_TDMA_TYPE_PARA3(h2c_pkt, para3); 396 SET_COEX_TDMA_TYPE_PARA4(h2c_pkt, para4); 397 SET_COEX_TDMA_TYPE_PARA5(h2c_pkt, para5); 398 399 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 400 } 401 402 void rtw_fw_bt_wifi_control(struct rtw_dev *rtwdev, u8 op_code, u8 *data) 403 { 404 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 405 406 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BT_WIFI_CONTROL); 407 408 SET_BT_WIFI_CONTROL_OP_CODE(h2c_pkt, op_code); 409 410 SET_BT_WIFI_CONTROL_DATA1(h2c_pkt, *data); 411 SET_BT_WIFI_CONTROL_DATA2(h2c_pkt, *(data + 1)); 412 SET_BT_WIFI_CONTROL_DATA3(h2c_pkt, *(data + 2)); 413 SET_BT_WIFI_CONTROL_DATA4(h2c_pkt, *(data + 3)); 414 SET_BT_WIFI_CONTROL_DATA5(h2c_pkt, *(data + 4)); 415 416 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 417 } 418 419 void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) 420 { 421 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 422 u8 rssi = ewma_rssi_read(&si->avg_rssi); 423 bool stbc_en = si->stbc_en ? true : false; 424 425 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR); 426 427 SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id); 428 SET_RSSI_INFO_RSSI(h2c_pkt, rssi); 429 SET_RSSI_INFO_STBC(h2c_pkt, stbc_en); 430 431 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 432 } 433 434 void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) 435 { 436 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 437 bool no_update = si->updated; 438 bool disable_pt = true; 439 440 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO); 441 442 SET_RA_INFO_MACID(h2c_pkt, si->mac_id); 443 SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id); 444 SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv); 445 SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable); 446 SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode); 447 SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en); 448 SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update); 449 SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable); 450 SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt); 451 SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff)); 452 SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8); 453 SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16); 454 SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24); 455 456 si->init_ra_lv = 0; 457 si->updated = true; 458 459 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 460 } 461 462 void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect) 463 { 464 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 465 466 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT); 467 MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect); 468 MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id); 469 470 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 471 } 472 473 void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev) 474 { 475 struct rtw_lps_conf *conf = &rtwdev->lps_conf; 476 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 477 478 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE); 479 480 SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode); 481 SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm); 482 SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps); 483 SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval); 484 SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id); 485 SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state); 486 487 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 488 } 489 490 void rtw_fw_set_keep_alive_cmd(struct rtw_dev *rtwdev, bool enable) 491 { 492 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 493 struct rtw_fw_wow_keep_alive_para mode = { 494 .adopt = true, 495 .pkt_type = KEEP_ALIVE_NULL_PKT, 496 .period = 5, 497 }; 498 499 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_KEEP_ALIVE); 500 SET_KEEP_ALIVE_ENABLE(h2c_pkt, enable); 501 SET_KEEP_ALIVE_ADOPT(h2c_pkt, mode.adopt); 502 SET_KEEP_ALIVE_PKT_TYPE(h2c_pkt, mode.pkt_type); 503 SET_KEEP_ALIVE_CHECK_PERIOD(h2c_pkt, mode.period); 504 505 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 506 } 507 508 void rtw_fw_set_disconnect_decision_cmd(struct rtw_dev *rtwdev, bool enable) 509 { 510 struct rtw_wow_param *rtw_wow = &rtwdev->wow; 511 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 512 struct rtw_fw_wow_disconnect_para mode = { 513 .adopt = true, 514 .period = 30, 515 .retry_count = 5, 516 }; 517 518 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_DISCONNECT_DECISION); 519 520 if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 521 SET_DISCONNECT_DECISION_ENABLE(h2c_pkt, enable); 522 SET_DISCONNECT_DECISION_ADOPT(h2c_pkt, mode.adopt); 523 SET_DISCONNECT_DECISION_CHECK_PERIOD(h2c_pkt, mode.period); 524 SET_DISCONNECT_DECISION_TRY_PKT_NUM(h2c_pkt, mode.retry_count); 525 } 526 527 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 528 } 529 530 void rtw_fw_set_wowlan_ctrl_cmd(struct rtw_dev *rtwdev, bool enable) 531 { 532 struct rtw_wow_param *rtw_wow = &rtwdev->wow; 533 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 534 535 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WOWLAN); 536 537 SET_WOWLAN_FUNC_ENABLE(h2c_pkt, enable); 538 if (rtw_wow_mgd_linked(rtwdev)) { 539 if (test_bit(RTW_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 540 SET_WOWLAN_MAGIC_PKT_ENABLE(h2c_pkt, enable); 541 if (test_bit(RTW_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 542 SET_WOWLAN_DEAUTH_WAKEUP_ENABLE(h2c_pkt, enable); 543 if (test_bit(RTW_WOW_FLAG_EN_REKEY_PKT, rtw_wow->flags)) 544 SET_WOWLAN_REKEY_WAKEUP_ENABLE(h2c_pkt, enable); 545 if (rtw_wow->pattern_cnt) 546 SET_WOWLAN_PATTERN_MATCH_ENABLE(h2c_pkt, enable); 547 } 548 549 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 550 } 551 552 void rtw_fw_set_aoac_global_info_cmd(struct rtw_dev *rtwdev, 553 u8 pairwise_key_enc, 554 u8 group_key_enc) 555 { 556 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 557 558 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_AOAC_GLOBAL_INFO); 559 560 SET_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(h2c_pkt, pairwise_key_enc); 561 SET_AOAC_GLOBAL_INFO_GROUP_ENC_ALG(h2c_pkt, group_key_enc); 562 563 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 564 } 565 566 void rtw_fw_set_remote_wake_ctrl_cmd(struct rtw_dev *rtwdev, bool enable) 567 { 568 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 569 570 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_REMOTE_WAKE_CTRL); 571 572 SET_REMOTE_WAKECTRL_ENABLE(h2c_pkt, enable); 573 574 if (rtw_wow_no_link(rtwdev)) 575 SET_REMOTE_WAKE_CTRL_NLO_OFFLOAD_EN(h2c_pkt, enable); 576 577 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 578 } 579 580 static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev, 581 enum rtw_rsvd_packet_type type) 582 { 583 struct rtw_rsvd_page *rsvd_pkt; 584 u8 location = 0; 585 586 list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) { 587 if (type == rsvd_pkt->type) 588 location = rsvd_pkt->page; 589 } 590 591 return location; 592 } 593 594 void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable) 595 { 596 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 597 u8 loc_nlo; 598 599 loc_nlo = rtw_get_rsvd_page_location(rtwdev, RSVD_NLO_INFO); 600 601 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_NLO_INFO); 602 603 SET_NLO_FUN_EN(h2c_pkt, enable); 604 if (enable) { 605 if (rtw_fw_lps_deep_mode) 606 SET_NLO_PS_32K(h2c_pkt, enable); 607 SET_NLO_IGNORE_SECURITY(h2c_pkt, enable); 608 SET_NLO_LOC_NLO_INFO(h2c_pkt, loc_nlo); 609 } 610 611 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 612 } 613 614 void rtw_fw_set_pg_info(struct rtw_dev *rtwdev) 615 { 616 struct rtw_lps_conf *conf = &rtwdev->lps_conf; 617 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 618 u8 loc_pg, loc_dpk; 619 620 loc_pg = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_INFO); 621 loc_dpk = rtw_get_rsvd_page_location(rtwdev, RSVD_LPS_PG_DPK); 622 623 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_LPS_PG_INFO); 624 625 LPS_PG_INFO_LOC(h2c_pkt, loc_pg); 626 LPS_PG_DPK_LOC(h2c_pkt, loc_dpk); 627 LPS_PG_SEC_CAM_EN(h2c_pkt, conf->sec_cam_backup); 628 LPS_PG_PATTERN_CAM_EN(h2c_pkt, conf->pattern_cam_backup); 629 630 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 631 } 632 633 u8 rtw_get_rsvd_page_probe_req_location(struct rtw_dev *rtwdev, 634 struct cfg80211_ssid *ssid) 635 { 636 struct rtw_rsvd_page *rsvd_pkt; 637 u8 location = 0; 638 639 list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) { 640 if (rsvd_pkt->type != RSVD_PROBE_REQ) 641 continue; 642 if ((!ssid && !rsvd_pkt->ssid) || 643 rtw_ssid_equal(rsvd_pkt->ssid, ssid)) 644 location = rsvd_pkt->page; 645 } 646 647 return location; 648 } 649 650 u16 rtw_get_rsvd_page_probe_req_size(struct rtw_dev *rtwdev, 651 struct cfg80211_ssid *ssid) 652 { 653 struct rtw_rsvd_page *rsvd_pkt; 654 u16 size = 0; 655 656 list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) { 657 if (rsvd_pkt->type != RSVD_PROBE_REQ) 658 continue; 659 if ((!ssid && !rsvd_pkt->ssid) || 660 rtw_ssid_equal(rsvd_pkt->ssid, ssid)) 661 size = rsvd_pkt->skb->len; 662 } 663 664 return size; 665 } 666 667 void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev) 668 { 669 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 670 u8 location = 0; 671 672 SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE); 673 674 location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP); 675 *(h2c_pkt + 1) = location; 676 rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location); 677 678 location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL); 679 *(h2c_pkt + 2) = location; 680 rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location); 681 682 location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL); 683 *(h2c_pkt + 3) = location; 684 rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location); 685 686 location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL); 687 *(h2c_pkt + 4) = location; 688 rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location); 689 690 rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 691 } 692 693 static struct sk_buff * 694 rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 695 { 696 struct sk_buff *skb_new; 697 698 if (vif->type != NL80211_IFTYPE_AP && 699 vif->type != NL80211_IFTYPE_ADHOC && 700 !ieee80211_vif_is_mesh(vif)) { 701 skb_new = alloc_skb(1, GFP_KERNEL); 702 if (!skb_new) 703 return NULL; 704 skb_put(skb_new, 1); 705 } else { 706 skb_new = ieee80211_beacon_get(hw, vif); 707 } 708 709 return skb_new; 710 } 711 712 static struct sk_buff *rtw_nlo_info_get(struct ieee80211_hw *hw) 713 { 714 struct rtw_dev *rtwdev = hw->priv; 715 struct rtw_chip_info *chip = rtwdev->chip; 716 struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req; 717 struct rtw_nlo_info_hdr *nlo_hdr; 718 struct cfg80211_ssid *ssid; 719 struct sk_buff *skb; 720 u8 *pos, loc; 721 u32 size; 722 int i; 723 724 if (!pno_req->inited || !pno_req->match_set_cnt) 725 return NULL; 726 727 size = sizeof(struct rtw_nlo_info_hdr) + pno_req->match_set_cnt * 728 IEEE80211_MAX_SSID_LEN + chip->tx_pkt_desc_sz; 729 730 skb = alloc_skb(size, GFP_KERNEL); 731 if (!skb) 732 return NULL; 733 734 skb_reserve(skb, chip->tx_pkt_desc_sz); 735 736 nlo_hdr = skb_put_zero(skb, sizeof(struct rtw_nlo_info_hdr)); 737 738 nlo_hdr->nlo_count = pno_req->match_set_cnt; 739 nlo_hdr->hidden_ap_count = pno_req->match_set_cnt; 740 741 /* pattern check for firmware */ 742 memset(nlo_hdr->pattern_check, 0xA5, FW_NLO_INFO_CHECK_SIZE); 743 744 for (i = 0; i < pno_req->match_set_cnt; i++) 745 nlo_hdr->ssid_len[i] = pno_req->match_sets[i].ssid.ssid_len; 746 747 for (i = 0; i < pno_req->match_set_cnt; i++) { 748 ssid = &pno_req->match_sets[i].ssid; 749 loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid); 750 if (!loc) { 751 rtw_err(rtwdev, "failed to get probe req rsvd loc\n"); 752 kfree_skb(skb); 753 return NULL; 754 } 755 nlo_hdr->location[i] = loc; 756 } 757 758 for (i = 0; i < pno_req->match_set_cnt; i++) { 759 pos = skb_put_zero(skb, IEEE80211_MAX_SSID_LEN); 760 memcpy(pos, pno_req->match_sets[i].ssid.ssid, 761 pno_req->match_sets[i].ssid.ssid_len); 762 } 763 764 return skb; 765 } 766 767 static struct sk_buff *rtw_cs_channel_info_get(struct ieee80211_hw *hw) 768 { 769 struct rtw_dev *rtwdev = hw->priv; 770 struct rtw_chip_info *chip = rtwdev->chip; 771 struct rtw_pno_request *pno_req = &rtwdev->wow.pno_req; 772 struct ieee80211_channel *channels = pno_req->channels; 773 struct sk_buff *skb; 774 int count = pno_req->channel_cnt; 775 u8 *pos; 776 int i = 0; 777 778 skb = alloc_skb(4 * count + chip->tx_pkt_desc_sz, GFP_KERNEL); 779 if (!skb) 780 return NULL; 781 782 skb_reserve(skb, chip->tx_pkt_desc_sz); 783 784 for (i = 0; i < count; i++) { 785 pos = skb_put_zero(skb, 4); 786 787 CHSW_INFO_SET_CH(pos, channels[i].hw_value); 788 789 if (channels[i].flags & IEEE80211_CHAN_RADAR) 790 CHSW_INFO_SET_ACTION_ID(pos, 0); 791 else 792 CHSW_INFO_SET_ACTION_ID(pos, 1); 793 CHSW_INFO_SET_TIMEOUT(pos, 1); 794 CHSW_INFO_SET_PRI_CH_IDX(pos, 1); 795 CHSW_INFO_SET_BW(pos, 0); 796 } 797 798 return skb; 799 } 800 801 static struct sk_buff *rtw_lps_pg_dpk_get(struct ieee80211_hw *hw) 802 { 803 struct rtw_dev *rtwdev = hw->priv; 804 struct rtw_chip_info *chip = rtwdev->chip; 805 struct rtw_dpk_info *dpk_info = &rtwdev->dm_info.dpk_info; 806 struct rtw_lps_pg_dpk_hdr *dpk_hdr; 807 struct sk_buff *skb; 808 u32 size; 809 810 size = chip->tx_pkt_desc_sz + sizeof(*dpk_hdr); 811 skb = alloc_skb(size, GFP_KERNEL); 812 if (!skb) 813 return NULL; 814 815 skb_reserve(skb, chip->tx_pkt_desc_sz); 816 dpk_hdr = skb_put_zero(skb, sizeof(*dpk_hdr)); 817 dpk_hdr->dpk_ch = dpk_info->dpk_ch; 818 dpk_hdr->dpk_path_ok = dpk_info->dpk_path_ok[0]; 819 memcpy(dpk_hdr->dpk_txagc, dpk_info->dpk_txagc, 2); 820 memcpy(dpk_hdr->dpk_gs, dpk_info->dpk_gs, 4); 821 memcpy(dpk_hdr->coef, dpk_info->coef, 160); 822 823 return skb; 824 } 825 826 static struct sk_buff *rtw_lps_pg_info_get(struct ieee80211_hw *hw) 827 { 828 struct rtw_dev *rtwdev = hw->priv; 829 struct rtw_chip_info *chip = rtwdev->chip; 830 struct rtw_lps_conf *conf = &rtwdev->lps_conf; 831 struct rtw_lps_pg_info_hdr *pg_info_hdr; 832 struct rtw_wow_param *rtw_wow = &rtwdev->wow; 833 struct sk_buff *skb; 834 u32 size; 835 836 size = chip->tx_pkt_desc_sz + sizeof(*pg_info_hdr); 837 skb = alloc_skb(size, GFP_KERNEL); 838 if (!skb) 839 return NULL; 840 841 skb_reserve(skb, chip->tx_pkt_desc_sz); 842 pg_info_hdr = skb_put_zero(skb, sizeof(*pg_info_hdr)); 843 pg_info_hdr->tx_bu_page_count = rtwdev->fifo.rsvd_drv_pg_num; 844 pg_info_hdr->macid = find_first_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM); 845 pg_info_hdr->sec_cam_count = 846 rtw_sec_cam_pg_backup(rtwdev, pg_info_hdr->sec_cam); 847 pg_info_hdr->pattern_count = rtw_wow->pattern_cnt; 848 849 conf->sec_cam_backup = pg_info_hdr->sec_cam_count != 0; 850 conf->pattern_cam_backup = rtw_wow->pattern_cnt != 0; 851 852 return skb; 853 } 854 855 static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw, 856 struct ieee80211_vif *vif, 857 struct rtw_rsvd_page *rsvd_pkt) 858 { 859 struct sk_buff *skb_new; 860 struct cfg80211_ssid *ssid; 861 862 switch (rsvd_pkt->type) { 863 case RSVD_BEACON: 864 skb_new = rtw_beacon_get(hw, vif); 865 break; 866 case RSVD_PS_POLL: 867 skb_new = ieee80211_pspoll_get(hw, vif); 868 break; 869 case RSVD_PROBE_RESP: 870 skb_new = ieee80211_proberesp_get(hw, vif); 871 break; 872 case RSVD_NULL: 873 skb_new = ieee80211_nullfunc_get(hw, vif, false); 874 break; 875 case RSVD_QOS_NULL: 876 skb_new = ieee80211_nullfunc_get(hw, vif, true); 877 break; 878 case RSVD_LPS_PG_DPK: 879 skb_new = rtw_lps_pg_dpk_get(hw); 880 break; 881 case RSVD_LPS_PG_INFO: 882 skb_new = rtw_lps_pg_info_get(hw); 883 break; 884 case RSVD_PROBE_REQ: 885 ssid = (struct cfg80211_ssid *)rsvd_pkt->ssid; 886 if (ssid) 887 skb_new = ieee80211_probereq_get(hw, vif->addr, 888 ssid->ssid, 889 ssid->ssid_len, 0); 890 else 891 skb_new = ieee80211_probereq_get(hw, vif->addr, NULL, 0, 0); 892 break; 893 case RSVD_NLO_INFO: 894 skb_new = rtw_nlo_info_get(hw); 895 break; 896 case RSVD_CH_INFO: 897 skb_new = rtw_cs_channel_info_get(hw); 898 break; 899 default: 900 return NULL; 901 } 902 903 if (!skb_new) 904 return NULL; 905 906 return skb_new; 907 } 908 909 static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb) 910 { 911 struct rtw_tx_pkt_info pkt_info; 912 struct rtw_chip_info *chip = rtwdev->chip; 913 u8 *pkt_desc; 914 915 memset(&pkt_info, 0, sizeof(pkt_info)); 916 rtw_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb); 917 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); 918 memset(pkt_desc, 0, chip->tx_pkt_desc_sz); 919 rtw_tx_fill_tx_desc(&pkt_info, skb); 920 } 921 922 static inline u8 rtw_len_to_page(unsigned int len, u8 page_size) 923 { 924 return DIV_ROUND_UP(len, page_size); 925 } 926 927 static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size, 928 u8 page_margin, u32 page, u8 *buf, 929 struct rtw_rsvd_page *rsvd_pkt) 930 { 931 struct sk_buff *skb = rsvd_pkt->skb; 932 933 if (page >= 1) 934 memcpy(buf + page_margin + page_size * (page - 1), 935 skb->data, skb->len); 936 else 937 memcpy(buf, skb->data, skb->len); 938 } 939 940 static struct rtw_rsvd_page *rtw_alloc_rsvd_page(struct rtw_dev *rtwdev, 941 enum rtw_rsvd_packet_type type, 942 bool txdesc) 943 { 944 struct rtw_rsvd_page *rsvd_pkt = NULL; 945 946 rsvd_pkt = kzalloc(sizeof(*rsvd_pkt), GFP_KERNEL); 947 948 if (!rsvd_pkt) 949 return NULL; 950 951 rsvd_pkt->type = type; 952 rsvd_pkt->add_txdesc = txdesc; 953 954 return rsvd_pkt; 955 } 956 957 static void rtw_insert_rsvd_page(struct rtw_dev *rtwdev, 958 struct rtw_rsvd_page *rsvd_pkt) 959 { 960 lockdep_assert_held(&rtwdev->mutex); 961 list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list); 962 } 963 964 void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type, 965 bool txdesc) 966 { 967 struct rtw_rsvd_page *rsvd_pkt; 968 969 rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, type, txdesc); 970 if (!rsvd_pkt) 971 return; 972 973 rtw_insert_rsvd_page(rtwdev, rsvd_pkt); 974 } 975 976 void rtw_add_rsvd_page_probe_req(struct rtw_dev *rtwdev, 977 struct cfg80211_ssid *ssid) 978 { 979 struct rtw_rsvd_page *rsvd_pkt; 980 981 rsvd_pkt = rtw_alloc_rsvd_page(rtwdev, RSVD_PROBE_REQ, true); 982 if (!rsvd_pkt) 983 return; 984 985 rsvd_pkt->ssid = ssid; 986 rtw_insert_rsvd_page(rtwdev, rsvd_pkt); 987 } 988 989 void rtw_reset_rsvd_page(struct rtw_dev *rtwdev) 990 { 991 struct rtw_rsvd_page *rsvd_pkt, *tmp; 992 993 lockdep_assert_held(&rtwdev->mutex); 994 995 list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) { 996 if (rsvd_pkt->type == RSVD_BEACON) 997 continue; 998 list_del(&rsvd_pkt->list); 999 kfree(rsvd_pkt); 1000 } 1001 } 1002 1003 int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr, 1004 u8 *buf, u32 size) 1005 { 1006 u8 bckp[2]; 1007 u8 val; 1008 u16 rsvd_pg_head; 1009 int ret; 1010 1011 lockdep_assert_held(&rtwdev->mutex); 1012 1013 if (!size) 1014 return -EINVAL; 1015 1016 pg_addr &= BIT_MASK_BCN_HEAD_1_V1; 1017 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr | BIT_BCN_VALID_V1); 1018 1019 val = rtw_read8(rtwdev, REG_CR + 1); 1020 bckp[0] = val; 1021 val |= BIT_ENSWBCN >> 8; 1022 rtw_write8(rtwdev, REG_CR + 1, val); 1023 1024 val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2); 1025 bckp[1] = val; 1026 val &= ~(BIT_EN_BCNQ_DL >> 16); 1027 rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val); 1028 1029 ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size); 1030 if (ret) { 1031 rtw_err(rtwdev, "failed to write data to rsvd page\n"); 1032 goto restore; 1033 } 1034 1035 if (!check_hw_ready(rtwdev, REG_FIFOPAGE_CTRL_2, BIT_BCN_VALID_V1, 1)) { 1036 rtw_err(rtwdev, "error beacon valid\n"); 1037 ret = -EBUSY; 1038 } 1039 1040 restore: 1041 rsvd_pg_head = rtwdev->fifo.rsvd_boundary; 1042 rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, 1043 rsvd_pg_head | BIT_BCN_VALID_V1); 1044 rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]); 1045 rtw_write8(rtwdev, REG_CR + 1, bckp[0]); 1046 1047 return ret; 1048 } 1049 1050 static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size) 1051 { 1052 u32 pg_size; 1053 u32 pg_num = 0; 1054 u16 pg_addr = 0; 1055 1056 pg_size = rtwdev->chip->page_size; 1057 pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0); 1058 if (pg_num > rtwdev->fifo.rsvd_drv_pg_num) 1059 return -ENOMEM; 1060 1061 pg_addr = rtwdev->fifo.rsvd_drv_addr; 1062 1063 return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size); 1064 } 1065 1066 static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, 1067 struct ieee80211_vif *vif, u32 *size) 1068 { 1069 struct ieee80211_hw *hw = rtwdev->hw; 1070 struct rtw_chip_info *chip = rtwdev->chip; 1071 struct sk_buff *iter; 1072 struct rtw_rsvd_page *rsvd_pkt; 1073 u32 page = 0; 1074 u8 total_page = 0; 1075 u8 page_size, page_margin, tx_desc_sz; 1076 u8 *buf; 1077 1078 page_size = chip->page_size; 1079 tx_desc_sz = chip->tx_pkt_desc_sz; 1080 page_margin = page_size - tx_desc_sz; 1081 1082 list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) { 1083 iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt); 1084 if (!iter) { 1085 rtw_err(rtwdev, "failed to build rsvd packet\n"); 1086 goto release_skb; 1087 } 1088 1089 /* Fill the tx_desc for the rsvd pkt that requires one. 1090 * And iter->len will be added with size of tx_desc_sz. 1091 */ 1092 if (rsvd_pkt->add_txdesc) 1093 rtw_fill_rsvd_page_desc(rtwdev, iter); 1094 1095 rsvd_pkt->skb = iter; 1096 rsvd_pkt->page = total_page; 1097 1098 /* Reserved page is downloaded via TX path, and TX path will 1099 * generate a tx_desc at the header to describe length of 1100 * the buffer. If we are not counting page numbers with the 1101 * size of tx_desc added at the first rsvd_pkt (usually a 1102 * beacon, firmware default refer to the first page as the 1103 * content of beacon), we could generate a buffer which size 1104 * is smaller than the actual size of the whole rsvd_page 1105 */ 1106 if (total_page == 0) { 1107 if (rsvd_pkt->type != RSVD_BEACON) { 1108 rtw_err(rtwdev, "first page should be a beacon\n"); 1109 goto release_skb; 1110 } 1111 total_page += rtw_len_to_page(iter->len + tx_desc_sz, 1112 page_size); 1113 } else { 1114 total_page += rtw_len_to_page(iter->len, page_size); 1115 } 1116 } 1117 1118 if (total_page > rtwdev->fifo.rsvd_drv_pg_num) { 1119 rtw_err(rtwdev, "rsvd page over size: %d\n", total_page); 1120 goto release_skb; 1121 } 1122 1123 *size = (total_page - 1) * page_size + page_margin; 1124 buf = kzalloc(*size, GFP_KERNEL); 1125 if (!buf) 1126 goto release_skb; 1127 1128 /* Copy the content of each rsvd_pkt to the buf, and they should 1129 * be aligned to the pages. 1130 * 1131 * Note that the first rsvd_pkt is a beacon no matter what vif->type. 1132 * And that rsvd_pkt does not require tx_desc because when it goes 1133 * through TX path, the TX path will generate one for it. 1134 */ 1135 list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) { 1136 rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin, 1137 page, buf, rsvd_pkt); 1138 if (page == 0) 1139 page += rtw_len_to_page(rsvd_pkt->skb->len + 1140 tx_desc_sz, page_size); 1141 else 1142 page += rtw_len_to_page(rsvd_pkt->skb->len, page_size); 1143 1144 kfree_skb(rsvd_pkt->skb); 1145 rsvd_pkt->skb = NULL; 1146 } 1147 1148 return buf; 1149 1150 release_skb: 1151 list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) { 1152 kfree_skb(rsvd_pkt->skb); 1153 rsvd_pkt->skb = NULL; 1154 } 1155 1156 return NULL; 1157 } 1158 1159 static int 1160 rtw_download_beacon(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) 1161 { 1162 struct ieee80211_hw *hw = rtwdev->hw; 1163 struct sk_buff *skb; 1164 int ret = 0; 1165 1166 skb = rtw_beacon_get(hw, vif); 1167 if (!skb) { 1168 rtw_err(rtwdev, "failed to get beacon skb\n"); 1169 ret = -ENOMEM; 1170 goto out; 1171 } 1172 1173 ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len); 1174 if (ret) 1175 rtw_err(rtwdev, "failed to download drv rsvd page\n"); 1176 1177 dev_kfree_skb(skb); 1178 1179 out: 1180 return ret; 1181 } 1182 1183 int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif) 1184 { 1185 u8 *buf; 1186 u32 size; 1187 int ret; 1188 1189 buf = rtw_build_rsvd_page(rtwdev, vif, &size); 1190 if (!buf) { 1191 rtw_err(rtwdev, "failed to build rsvd page pkt\n"); 1192 return -ENOMEM; 1193 } 1194 1195 ret = rtw_download_drv_rsvd_page(rtwdev, buf, size); 1196 if (ret) { 1197 rtw_err(rtwdev, "failed to download drv rsvd page\n"); 1198 goto free; 1199 } 1200 1201 /* The last thing is to download the *ONLY* beacon again, because 1202 * the previous tx_desc is to describe the total rsvd page. Download 1203 * the beacon again to replace the TX desc header, and we will get 1204 * a correct tx_desc for the beacon in the rsvd page. 1205 */ 1206 ret = rtw_download_beacon(rtwdev, vif); 1207 if (ret) { 1208 rtw_err(rtwdev, "failed to download beacon\n"); 1209 goto free; 1210 } 1211 1212 free: 1213 kfree(buf); 1214 1215 return ret; 1216 } 1217 1218 int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev, 1219 u32 offset, u32 size, u32 *buf) 1220 { 1221 struct rtw_fifo_conf *fifo = &rtwdev->fifo; 1222 u32 residue, i; 1223 u16 start_pg; 1224 u16 idx = 0; 1225 u16 ctl; 1226 u8 rcr; 1227 1228 if (size & 0x3) { 1229 rtw_warn(rtwdev, "should be 4-byte aligned\n"); 1230 return -EINVAL; 1231 } 1232 1233 offset += fifo->rsvd_boundary << TX_PAGE_SIZE_SHIFT; 1234 residue = offset & (FIFO_PAGE_SIZE - 1); 1235 start_pg = offset >> FIFO_PAGE_SIZE_SHIFT; 1236 start_pg += RSVD_PAGE_START_ADDR; 1237 1238 rcr = rtw_read8(rtwdev, REG_RCR + 2); 1239 ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000; 1240 1241 /* disable rx clock gate */ 1242 rtw_write8(rtwdev, REG_RCR, rcr | BIT(3)); 1243 1244 do { 1245 rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl); 1246 1247 for (i = FIFO_DUMP_ADDR + residue; 1248 i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) { 1249 buf[idx++] = rtw_read32(rtwdev, i); 1250 size -= 4; 1251 if (size == 0) 1252 goto out; 1253 } 1254 1255 residue = 0; 1256 start_pg++; 1257 } while (size); 1258 1259 out: 1260 rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl); 1261 rtw_write8(rtwdev, REG_RCR + 2, rcr); 1262 return 0; 1263 } 1264 1265 static void __rtw_fw_update_pkt(struct rtw_dev *rtwdev, u8 pkt_id, u16 size, 1266 u8 location) 1267 { 1268 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 1269 u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_UPDATE_PKT_LEN; 1270 1271 rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_UPDATE_PKT); 1272 1273 SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); 1274 UPDATE_PKT_SET_PKT_ID(h2c_pkt, pkt_id); 1275 UPDATE_PKT_SET_LOCATION(h2c_pkt, location); 1276 1277 /* include txdesc size */ 1278 UPDATE_PKT_SET_SIZE(h2c_pkt, size); 1279 1280 rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); 1281 } 1282 1283 void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev, 1284 struct cfg80211_ssid *ssid) 1285 { 1286 u8 loc; 1287 u32 size; 1288 1289 loc = rtw_get_rsvd_page_probe_req_location(rtwdev, ssid); 1290 if (!loc) { 1291 rtw_err(rtwdev, "failed to get probe_req rsvd loc\n"); 1292 return; 1293 } 1294 1295 size = rtw_get_rsvd_page_probe_req_size(rtwdev, ssid); 1296 if (!size) { 1297 rtw_err(rtwdev, "failed to get probe_req rsvd size\n"); 1298 return; 1299 } 1300 1301 __rtw_fw_update_pkt(rtwdev, RTW_PACKET_PROBE_REQ, size, loc); 1302 } 1303 1304 void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable) 1305 { 1306 struct rtw_pno_request *rtw_pno_req = &rtwdev->wow.pno_req; 1307 u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 1308 u16 total_size = H2C_PKT_HDR_SIZE + H2C_PKT_CH_SWITCH_LEN; 1309 u8 loc_ch_info; 1310 const struct rtw_ch_switch_option cs_option = { 1311 .dest_ch_en = 1, 1312 .dest_ch = 1, 1313 .periodic_option = 2, 1314 .normal_period = 5, 1315 .normal_period_sel = 0, 1316 .normal_cycle = 10, 1317 .slow_period = 1, 1318 .slow_period_sel = 1, 1319 }; 1320 1321 rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_CH_SWITCH); 1322 SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size); 1323 1324 CH_SWITCH_SET_START(h2c_pkt, enable); 1325 CH_SWITCH_SET_DEST_CH_EN(h2c_pkt, cs_option.dest_ch_en); 1326 CH_SWITCH_SET_DEST_CH(h2c_pkt, cs_option.dest_ch); 1327 CH_SWITCH_SET_NORMAL_PERIOD(h2c_pkt, cs_option.normal_period); 1328 CH_SWITCH_SET_NORMAL_PERIOD_SEL(h2c_pkt, cs_option.normal_period_sel); 1329 CH_SWITCH_SET_SLOW_PERIOD(h2c_pkt, cs_option.slow_period); 1330 CH_SWITCH_SET_SLOW_PERIOD_SEL(h2c_pkt, cs_option.slow_period_sel); 1331 CH_SWITCH_SET_NORMAL_CYCLE(h2c_pkt, cs_option.normal_cycle); 1332 CH_SWITCH_SET_PERIODIC_OPT(h2c_pkt, cs_option.periodic_option); 1333 1334 CH_SWITCH_SET_CH_NUM(h2c_pkt, rtw_pno_req->channel_cnt); 1335 CH_SWITCH_SET_INFO_SIZE(h2c_pkt, rtw_pno_req->channel_cnt * 4); 1336 1337 loc_ch_info = rtw_get_rsvd_page_location(rtwdev, RSVD_CH_INFO); 1338 CH_SWITCH_SET_INFO_LOC(h2c_pkt, loc_ch_info); 1339 1340 rtw_fw_send_h2c_packet(rtwdev, h2c_pkt); 1341 } 1342