1 /* 2 * Copyright (c) 2018 The Linux Foundation. All rights reserved. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/completion.h> 18 #include <linux/device.h> 19 #include <linux/debugfs.h> 20 #include <linux/idr.h> 21 #include <linux/kernel.h> 22 #include <linux/of.h> 23 #include <linux/of_address.h> 24 #include <linux/module.h> 25 #include <linux/net.h> 26 #include <linux/platform_device.h> 27 #include <linux/qcom_scm.h> 28 #include <linux/string.h> 29 #include <net/sock.h> 30 31 #include "debug.h" 32 #include "snoc.h" 33 34 #define ATH10K_QMI_CLIENT_ID 0x4b4e454c 35 #define ATH10K_QMI_TIMEOUT 30 36 37 static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi, 38 struct ath10k_msa_mem_info *mem_info) 39 { 40 struct qcom_scm_vmperm dst_perms[3]; 41 struct ath10k *ar = qmi->ar; 42 unsigned int src_perms; 43 u32 perm_count; 44 int ret; 45 46 src_perms = BIT(QCOM_SCM_VMID_HLOS); 47 48 dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA; 49 dst_perms[0].perm = QCOM_SCM_PERM_RW; 50 dst_perms[1].vmid = QCOM_SCM_VMID_WLAN; 51 dst_perms[1].perm = QCOM_SCM_PERM_RW; 52 53 if (mem_info->secure) { 54 perm_count = 2; 55 } else { 56 dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE; 57 dst_perms[2].perm = QCOM_SCM_PERM_RW; 58 perm_count = 3; 59 } 60 61 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size, 62 &src_perms, dst_perms, perm_count); 63 if (ret < 0) 64 ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret); 65 66 return ret; 67 } 68 69 static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi, 70 struct ath10k_msa_mem_info *mem_info) 71 { 72 struct qcom_scm_vmperm dst_perms; 73 struct ath10k *ar = qmi->ar; 74 unsigned int src_perms; 75 int ret; 76 77 src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN); 78 79 if (!mem_info->secure) 80 src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE); 81 82 dst_perms.vmid = QCOM_SCM_VMID_HLOS; 83 dst_perms.perm = QCOM_SCM_PERM_RW; 84 85 ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size, 86 &src_perms, &dst_perms, 1); 87 if (ret < 0) 88 ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret); 89 90 return ret; 91 } 92 93 static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi) 94 { 95 int ret; 96 int i; 97 98 for (i = 0; i < qmi->nr_mem_region; i++) { 99 ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]); 100 if (ret) 101 goto err_unmap; 102 } 103 104 return 0; 105 106 err_unmap: 107 for (i--; i >= 0; i--) 108 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]); 109 return ret; 110 } 111 112 static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi) 113 { 114 int i; 115 116 for (i = 0; i < qmi->nr_mem_region; i++) 117 ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]); 118 } 119 120 static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi) 121 { 122 struct wlfw_msa_info_resp_msg_v01 resp = {}; 123 struct wlfw_msa_info_req_msg_v01 req = {}; 124 struct ath10k *ar = qmi->ar; 125 struct qmi_txn txn; 126 int ret; 127 int i; 128 129 req.msa_addr = qmi->msa_pa; 130 req.size = qmi->msa_mem_size; 131 132 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 133 wlfw_msa_info_resp_msg_v01_ei, &resp); 134 if (ret < 0) 135 goto out; 136 137 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 138 QMI_WLFW_MSA_INFO_REQ_V01, 139 WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN, 140 wlfw_msa_info_req_msg_v01_ei, &req); 141 if (ret < 0) { 142 qmi_txn_cancel(&txn); 143 ath10k_err(ar, "failed to send msa mem info req: %d\n", ret); 144 goto out; 145 } 146 147 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 148 if (ret < 0) 149 goto out; 150 151 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 152 ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error); 153 ret = -EINVAL; 154 goto out; 155 } 156 157 if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) { 158 ath10k_err(ar, "invalid memory region length received: %d\n", 159 resp.mem_region_info_len); 160 ret = -EINVAL; 161 goto out; 162 } 163 164 qmi->nr_mem_region = resp.mem_region_info_len; 165 for (i = 0; i < resp.mem_region_info_len; i++) { 166 qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr; 167 qmi->mem_region[i].size = resp.mem_region_info[i].size; 168 qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag; 169 ath10k_dbg(ar, ATH10K_DBG_QMI, 170 "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n", 171 i, &qmi->mem_region[i].addr, 172 qmi->mem_region[i].size, 173 qmi->mem_region[i].secure); 174 } 175 176 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n"); 177 return 0; 178 179 out: 180 return ret; 181 } 182 183 static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi) 184 { 185 struct wlfw_msa_ready_resp_msg_v01 resp = {}; 186 struct wlfw_msa_ready_req_msg_v01 req = {}; 187 struct ath10k *ar = qmi->ar; 188 struct qmi_txn txn; 189 int ret; 190 191 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 192 wlfw_msa_ready_resp_msg_v01_ei, &resp); 193 if (ret < 0) 194 goto out; 195 196 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 197 QMI_WLFW_MSA_READY_REQ_V01, 198 WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN, 199 wlfw_msa_ready_req_msg_v01_ei, &req); 200 if (ret < 0) { 201 qmi_txn_cancel(&txn); 202 ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret); 203 goto out; 204 } 205 206 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 207 if (ret < 0) 208 goto out; 209 210 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 211 ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error); 212 ret = -EINVAL; 213 } 214 215 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n"); 216 return 0; 217 218 out: 219 return ret; 220 } 221 222 static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi) 223 { 224 struct wlfw_bdf_download_resp_msg_v01 resp = {}; 225 struct wlfw_bdf_download_req_msg_v01 *req; 226 struct ath10k *ar = qmi->ar; 227 unsigned int remaining; 228 struct qmi_txn txn; 229 const u8 *temp; 230 int ret; 231 232 req = kzalloc(sizeof(*req), GFP_KERNEL); 233 if (!req) 234 return -ENOMEM; 235 236 temp = ar->normal_mode_fw.board_data; 237 remaining = ar->normal_mode_fw.board_len; 238 239 while (remaining) { 240 req->valid = 1; 241 req->file_id_valid = 1; 242 req->file_id = 0; 243 req->total_size_valid = 1; 244 req->total_size = ar->normal_mode_fw.board_len; 245 req->seg_id_valid = 1; 246 req->data_valid = 1; 247 req->end_valid = 1; 248 249 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) { 250 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01; 251 } else { 252 req->data_len = remaining; 253 req->end = 1; 254 } 255 256 memcpy(req->data, temp, req->data_len); 257 258 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 259 wlfw_bdf_download_resp_msg_v01_ei, 260 &resp); 261 if (ret < 0) 262 goto out; 263 264 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 265 QMI_WLFW_BDF_DOWNLOAD_REQ_V01, 266 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, 267 wlfw_bdf_download_req_msg_v01_ei, req); 268 if (ret < 0) { 269 qmi_txn_cancel(&txn); 270 goto out; 271 } 272 273 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 274 275 if (ret < 0) 276 goto out; 277 278 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 279 ath10k_err(ar, "failed to download board data file: %d\n", 280 resp.resp.error); 281 ret = -EINVAL; 282 goto out; 283 } 284 285 remaining -= req->data_len; 286 temp += req->data_len; 287 req->seg_id++; 288 } 289 290 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n"); 291 292 kfree(req); 293 return 0; 294 295 out: 296 kfree(req); 297 return ret; 298 } 299 300 static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi) 301 { 302 struct wlfw_cal_report_resp_msg_v01 resp = {}; 303 struct wlfw_cal_report_req_msg_v01 req = {}; 304 struct ath10k *ar = qmi->ar; 305 struct qmi_txn txn; 306 int i, j = 0; 307 int ret; 308 309 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei, 310 &resp); 311 if (ret < 0) 312 goto out; 313 314 for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) { 315 if (qmi->cal_data[i].total_size && 316 qmi->cal_data[i].data) { 317 req.meta_data[j] = qmi->cal_data[i].cal_id; 318 j++; 319 } 320 } 321 req.meta_data_len = j; 322 323 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 324 QMI_WLFW_CAL_REPORT_REQ_V01, 325 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN, 326 wlfw_cal_report_req_msg_v01_ei, &req); 327 if (ret < 0) { 328 qmi_txn_cancel(&txn); 329 ath10k_err(ar, "failed to send calibration request: %d\n", ret); 330 goto out; 331 } 332 333 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 334 if (ret < 0) 335 goto out; 336 337 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 338 ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error); 339 ret = -EINVAL; 340 goto out; 341 } 342 343 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n"); 344 return 0; 345 346 out: 347 return ret; 348 } 349 350 static int 351 ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode) 352 { 353 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 354 struct ath10k_qmi *qmi = ar_snoc->qmi; 355 struct wlfw_wlan_mode_resp_msg_v01 resp = {}; 356 struct wlfw_wlan_mode_req_msg_v01 req = {}; 357 struct qmi_txn txn; 358 int ret; 359 360 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 361 wlfw_wlan_mode_resp_msg_v01_ei, 362 &resp); 363 if (ret < 0) 364 goto out; 365 366 req.mode = mode; 367 req.hw_debug_valid = 1; 368 req.hw_debug = 0; 369 370 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 371 QMI_WLFW_WLAN_MODE_REQ_V01, 372 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN, 373 wlfw_wlan_mode_req_msg_v01_ei, &req); 374 if (ret < 0) { 375 qmi_txn_cancel(&txn); 376 ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret); 377 goto out; 378 } 379 380 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 381 if (ret < 0) 382 goto out; 383 384 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 385 ath10k_err(ar, "more request rejected: %d\n", resp.resp.error); 386 ret = -EINVAL; 387 goto out; 388 } 389 390 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode); 391 return 0; 392 393 out: 394 return ret; 395 } 396 397 static int 398 ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar, 399 struct ath10k_qmi_wlan_enable_cfg *config, 400 const char *version) 401 { 402 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 403 struct ath10k_qmi *qmi = ar_snoc->qmi; 404 struct wlfw_wlan_cfg_resp_msg_v01 resp = {}; 405 struct wlfw_wlan_cfg_req_msg_v01 *req; 406 struct qmi_txn txn; 407 int ret; 408 u32 i; 409 410 req = kzalloc(sizeof(*req), GFP_KERNEL); 411 if (!req) 412 return -ENOMEM; 413 414 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 415 wlfw_wlan_cfg_resp_msg_v01_ei, 416 &resp); 417 if (ret < 0) 418 goto out; 419 420 req->host_version_valid = 0; 421 422 req->tgt_cfg_valid = 1; 423 if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01) 424 req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01; 425 else 426 req->tgt_cfg_len = config->num_ce_tgt_cfg; 427 for (i = 0; i < req->tgt_cfg_len; i++) { 428 req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num; 429 req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir; 430 req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries; 431 req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max; 432 req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags; 433 } 434 435 req->svc_cfg_valid = 1; 436 if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01) 437 req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01; 438 else 439 req->svc_cfg_len = config->num_ce_svc_pipe_cfg; 440 for (i = 0; i < req->svc_cfg_len; i++) { 441 req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id; 442 req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir; 443 req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num; 444 } 445 446 req->shadow_reg_valid = 1; 447 if (config->num_shadow_reg_cfg > 448 QMI_WLFW_MAX_NUM_SHADOW_REG_V01) 449 req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01; 450 else 451 req->shadow_reg_len = config->num_shadow_reg_cfg; 452 453 memcpy(req->shadow_reg, config->shadow_reg_cfg, 454 sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len); 455 456 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 457 QMI_WLFW_WLAN_CFG_REQ_V01, 458 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN, 459 wlfw_wlan_cfg_req_msg_v01_ei, req); 460 if (ret < 0) { 461 qmi_txn_cancel(&txn); 462 ath10k_err(ar, "failed to send config request: %d\n", ret); 463 goto out; 464 } 465 466 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 467 if (ret < 0) 468 goto out; 469 470 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 471 ath10k_err(ar, "config request rejected: %d\n", resp.resp.error); 472 ret = -EINVAL; 473 goto out; 474 } 475 476 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n"); 477 kfree(req); 478 return 0; 479 480 out: 481 kfree(req); 482 return ret; 483 } 484 485 int ath10k_qmi_wlan_enable(struct ath10k *ar, 486 struct ath10k_qmi_wlan_enable_cfg *config, 487 enum wlfw_driver_mode_enum_v01 mode, 488 const char *version) 489 { 490 int ret; 491 492 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n", 493 mode, config); 494 495 ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version); 496 if (ret) { 497 ath10k_err(ar, "failed to send qmi config: %d\n", ret); 498 return ret; 499 } 500 501 ret = ath10k_qmi_mode_send_sync_msg(ar, mode); 502 if (ret) { 503 ath10k_err(ar, "failed to send qmi mode: %d\n", ret); 504 return ret; 505 } 506 507 return 0; 508 } 509 510 int ath10k_qmi_wlan_disable(struct ath10k *ar) 511 { 512 return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01); 513 } 514 515 static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi) 516 { 517 struct wlfw_cap_resp_msg_v01 *resp; 518 struct wlfw_cap_req_msg_v01 req = {}; 519 struct ath10k *ar = qmi->ar; 520 struct qmi_txn txn; 521 int ret; 522 523 resp = kzalloc(sizeof(*resp), GFP_KERNEL); 524 if (!resp) 525 return -ENOMEM; 526 527 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp); 528 if (ret < 0) 529 goto out; 530 531 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 532 QMI_WLFW_CAP_REQ_V01, 533 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN, 534 wlfw_cap_req_msg_v01_ei, &req); 535 if (ret < 0) { 536 qmi_txn_cancel(&txn); 537 ath10k_err(ar, "failed to send capability request: %d\n", ret); 538 goto out; 539 } 540 541 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 542 if (ret < 0) 543 goto out; 544 545 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { 546 ath10k_err(ar, "capablity req rejected: %d\n", resp->resp.error); 547 ret = -EINVAL; 548 goto out; 549 } 550 551 if (resp->chip_info_valid) { 552 qmi->chip_info.chip_id = resp->chip_info.chip_id; 553 qmi->chip_info.chip_family = resp->chip_info.chip_family; 554 } 555 556 if (resp->board_info_valid) 557 qmi->board_info.board_id = resp->board_info.board_id; 558 else 559 qmi->board_info.board_id = 0xFF; 560 561 if (resp->soc_info_valid) 562 qmi->soc_info.soc_id = resp->soc_info.soc_id; 563 564 if (resp->fw_version_info_valid) { 565 qmi->fw_version = resp->fw_version_info.fw_version; 566 strlcpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp, 567 sizeof(qmi->fw_build_timestamp)); 568 } 569 570 if (resp->fw_build_id_valid) 571 strlcpy(qmi->fw_build_id, resp->fw_build_id, 572 MAX_BUILD_ID_LEN + 1); 573 574 ath10k_dbg(ar, ATH10K_DBG_QMI, 575 "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x", 576 qmi->chip_info.chip_id, qmi->chip_info.chip_family, 577 qmi->board_info.board_id, qmi->soc_info.soc_id); 578 ath10k_dbg(ar, ATH10K_DBG_QMI, 579 "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s", 580 qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id); 581 582 kfree(resp); 583 return 0; 584 585 out: 586 kfree(resp); 587 return ret; 588 } 589 590 static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi) 591 { 592 struct wlfw_host_cap_resp_msg_v01 resp = {}; 593 struct wlfw_host_cap_req_msg_v01 req = {}; 594 struct ath10k *ar = qmi->ar; 595 struct qmi_txn txn; 596 int ret; 597 598 req.daemon_support_valid = 1; 599 req.daemon_support = 0; 600 601 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 602 wlfw_host_cap_resp_msg_v01_ei, &resp); 603 if (ret < 0) 604 goto out; 605 606 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 607 QMI_WLFW_HOST_CAP_REQ_V01, 608 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN, 609 wlfw_host_cap_req_msg_v01_ei, &req); 610 if (ret < 0) { 611 qmi_txn_cancel(&txn); 612 ath10k_err(ar, "failed to send host capability request: %d\n", ret); 613 goto out; 614 } 615 616 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 617 if (ret < 0) 618 goto out; 619 620 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 621 ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error); 622 ret = -EINVAL; 623 goto out; 624 } 625 626 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capablity request completed\n"); 627 return 0; 628 629 out: 630 return ret; 631 } 632 633 static int 634 ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi) 635 { 636 struct wlfw_ind_register_resp_msg_v01 resp = {}; 637 struct wlfw_ind_register_req_msg_v01 req = {}; 638 struct ath10k *ar = qmi->ar; 639 struct qmi_txn txn; 640 int ret; 641 642 req.client_id_valid = 1; 643 req.client_id = ATH10K_QMI_CLIENT_ID; 644 req.fw_ready_enable_valid = 1; 645 req.fw_ready_enable = 1; 646 req.msa_ready_enable_valid = 1; 647 req.msa_ready_enable = 1; 648 649 ret = qmi_txn_init(&qmi->qmi_hdl, &txn, 650 wlfw_ind_register_resp_msg_v01_ei, &resp); 651 if (ret < 0) 652 goto out; 653 654 ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn, 655 QMI_WLFW_IND_REGISTER_REQ_V01, 656 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN, 657 wlfw_ind_register_req_msg_v01_ei, &req); 658 if (ret < 0) { 659 qmi_txn_cancel(&txn); 660 ath10k_err(ar, "failed to send indication registed request: %d\n", ret); 661 goto out; 662 } 663 664 ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ); 665 if (ret < 0) 666 goto out; 667 668 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { 669 ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error); 670 ret = -EINVAL; 671 goto out; 672 } 673 674 if (resp.fw_status_valid) { 675 if (resp.fw_status & QMI_WLFW_FW_READY_V01) 676 qmi->fw_ready = true; 677 } 678 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n"); 679 return 0; 680 681 out: 682 return ret; 683 } 684 685 static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi) 686 { 687 struct ath10k *ar = qmi->ar; 688 int ret; 689 690 ret = ath10k_qmi_ind_register_send_sync_msg(qmi); 691 if (ret) 692 return; 693 694 if (qmi->fw_ready) { 695 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND); 696 return; 697 } 698 699 ret = ath10k_qmi_host_cap_send_sync(qmi); 700 if (ret) 701 return; 702 703 ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi); 704 if (ret) 705 return; 706 707 ret = ath10k_qmi_setup_msa_permissions(qmi); 708 if (ret) 709 return; 710 711 ret = ath10k_qmi_msa_ready_send_sync_msg(qmi); 712 if (ret) 713 goto err_setup_msa; 714 715 ret = ath10k_qmi_cap_send_sync_msg(qmi); 716 if (ret) 717 goto err_setup_msa; 718 719 return; 720 721 err_setup_msa: 722 ath10k_qmi_remove_msa_permission(qmi); 723 } 724 725 static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi) 726 { 727 struct ath10k *ar = qmi->ar; 728 729 ar->hif.bus = ATH10K_BUS_SNOC; 730 ar->id.qmi_ids_valid = true; 731 ar->id.qmi_board_id = qmi->board_info.board_id; 732 ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR; 733 734 return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD); 735 } 736 737 static int 738 ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi, 739 enum ath10k_qmi_driver_event_type type, 740 void *data) 741 { 742 struct ath10k_qmi_driver_event *event; 743 744 event = kzalloc(sizeof(*event), GFP_ATOMIC); 745 if (!event) 746 return -ENOMEM; 747 748 event->type = type; 749 event->data = data; 750 751 spin_lock(&qmi->event_lock); 752 list_add_tail(&event->list, &qmi->event_list); 753 spin_unlock(&qmi->event_lock); 754 755 queue_work(qmi->event_wq, &qmi->event_work); 756 757 return 0; 758 } 759 760 static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi) 761 { 762 struct ath10k *ar = qmi->ar; 763 764 ath10k_qmi_remove_msa_permission(qmi); 765 ath10k_core_free_board_files(ar); 766 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND); 767 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n"); 768 } 769 770 static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi) 771 { 772 int ret; 773 774 ret = ath10k_qmi_fetch_board_file(qmi); 775 if (ret) 776 goto out; 777 778 ret = ath10k_qmi_bdf_dnld_send_sync(qmi); 779 if (ret) 780 goto out; 781 782 ret = ath10k_qmi_send_cal_report_req(qmi); 783 784 out: 785 return; 786 } 787 788 static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi) 789 { 790 struct ath10k *ar = qmi->ar; 791 792 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n"); 793 ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND); 794 795 return 0; 796 } 797 798 static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl, 799 struct sockaddr_qrtr *sq, 800 struct qmi_txn *txn, const void *data) 801 { 802 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); 803 804 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL); 805 } 806 807 static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl, 808 struct sockaddr_qrtr *sq, 809 struct qmi_txn *txn, const void *data) 810 { 811 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); 812 813 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL); 814 } 815 816 static struct qmi_msg_handler qmi_msg_handler[] = { 817 { 818 .type = QMI_INDICATION, 819 .msg_id = QMI_WLFW_FW_READY_IND_V01, 820 .ei = wlfw_fw_ready_ind_msg_v01_ei, 821 .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01), 822 .fn = ath10k_qmi_fw_ready_ind, 823 }, 824 { 825 .type = QMI_INDICATION, 826 .msg_id = QMI_WLFW_MSA_READY_IND_V01, 827 .ei = wlfw_msa_ready_ind_msg_v01_ei, 828 .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01), 829 .fn = ath10k_qmi_msa_ready_ind, 830 }, 831 {} 832 }; 833 834 static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl, 835 struct qmi_service *service) 836 { 837 struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); 838 struct sockaddr_qrtr *sq = &qmi->sq; 839 struct ath10k *ar = qmi->ar; 840 int ret; 841 842 sq->sq_family = AF_QIPCRTR; 843 sq->sq_node = service->node; 844 sq->sq_port = service->port; 845 846 ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n"); 847 848 ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq, 849 sizeof(qmi->sq), 0); 850 if (ret) { 851 ath10k_err(ar, "failed to connect to a remote QMI service port\n"); 852 return ret; 853 } 854 855 ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n"); 856 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL); 857 858 return ret; 859 } 860 861 static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl, 862 struct qmi_service *service) 863 { 864 struct ath10k_qmi *qmi = 865 container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl); 866 867 qmi->fw_ready = false; 868 ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT, NULL); 869 } 870 871 static struct qmi_ops ath10k_qmi_ops = { 872 .new_server = ath10k_qmi_new_server, 873 .del_server = ath10k_qmi_del_server, 874 }; 875 876 static void ath10k_qmi_driver_event_work(struct work_struct *work) 877 { 878 struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi, 879 event_work); 880 struct ath10k_qmi_driver_event *event; 881 struct ath10k *ar = qmi->ar; 882 883 spin_lock(&qmi->event_lock); 884 while (!list_empty(&qmi->event_list)) { 885 event = list_first_entry(&qmi->event_list, 886 struct ath10k_qmi_driver_event, list); 887 list_del(&event->list); 888 spin_unlock(&qmi->event_lock); 889 890 switch (event->type) { 891 case ATH10K_QMI_EVENT_SERVER_ARRIVE: 892 ath10k_qmi_event_server_arrive(qmi); 893 break; 894 case ATH10K_QMI_EVENT_SERVER_EXIT: 895 ath10k_qmi_event_server_exit(qmi); 896 break; 897 case ATH10K_QMI_EVENT_FW_READY_IND: 898 ath10k_qmi_event_fw_ready_ind(qmi); 899 break; 900 case ATH10K_QMI_EVENT_MSA_READY_IND: 901 ath10k_qmi_event_msa_ready(qmi); 902 break; 903 default: 904 ath10k_warn(ar, "invalid event type: %d", event->type); 905 break; 906 } 907 kfree(event); 908 spin_lock(&qmi->event_lock); 909 } 910 spin_unlock(&qmi->event_lock); 911 } 912 913 static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size) 914 { 915 struct ath10k *ar = qmi->ar; 916 struct device *dev = ar->dev; 917 struct device_node *node; 918 struct resource r; 919 int ret; 920 921 node = of_parse_phandle(dev->of_node, "memory-region", 0); 922 if (node) { 923 ret = of_address_to_resource(node, 0, &r); 924 if (ret) { 925 dev_err(dev, "failed to resolve msa fixed region\n"); 926 return ret; 927 } 928 of_node_put(node); 929 930 qmi->msa_pa = r.start; 931 qmi->msa_mem_size = resource_size(&r); 932 qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size, 933 MEMREMAP_WT); 934 if (!qmi->msa_pa) { 935 dev_err(dev, "failed to map memory region: %pa\n", &r.start); 936 return -EBUSY; 937 } 938 } else { 939 qmi->msa_va = dmam_alloc_coherent(dev, msa_size, 940 &qmi->msa_pa, GFP_KERNEL); 941 if (!qmi->msa_va) { 942 ath10k_err(ar, "failed to allocate dma memory for msa region\n"); 943 return -ENOMEM; 944 } 945 qmi->msa_mem_size = msa_size; 946 } 947 948 ath10k_dbg(ar, ATH10K_DBG_QMI, "msa pa: %pad , msa va: 0x%p\n", 949 &qmi->msa_pa, 950 qmi->msa_va); 951 952 return 0; 953 } 954 955 int ath10k_qmi_init(struct ath10k *ar, u32 msa_size) 956 { 957 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 958 struct ath10k_qmi *qmi; 959 int ret; 960 961 qmi = kzalloc(sizeof(*qmi), GFP_KERNEL); 962 if (!qmi) 963 return -ENOMEM; 964 965 qmi->ar = ar; 966 ar_snoc->qmi = qmi; 967 968 ret = ath10k_qmi_setup_msa_resources(qmi, msa_size); 969 if (ret) 970 goto err; 971 972 ret = qmi_handle_init(&qmi->qmi_hdl, 973 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, 974 &ath10k_qmi_ops, qmi_msg_handler); 975 if (ret) 976 goto err; 977 978 qmi->event_wq = alloc_workqueue("ath10k_qmi_driver_event", 979 WQ_UNBOUND, 1); 980 if (!qmi->event_wq) { 981 ath10k_err(ar, "failed to allocate workqueue\n"); 982 ret = -EFAULT; 983 goto err_release_qmi_handle; 984 } 985 986 INIT_LIST_HEAD(&qmi->event_list); 987 spin_lock_init(&qmi->event_lock); 988 INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work); 989 990 ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01, 991 WLFW_SERVICE_VERS_V01, 0); 992 if (ret) 993 goto err_qmi_lookup; 994 995 return 0; 996 997 err_qmi_lookup: 998 destroy_workqueue(qmi->event_wq); 999 1000 err_release_qmi_handle: 1001 qmi_handle_release(&qmi->qmi_hdl); 1002 1003 err: 1004 kfree(qmi); 1005 return ret; 1006 } 1007 1008 int ath10k_qmi_deinit(struct ath10k *ar) 1009 { 1010 struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); 1011 struct ath10k_qmi *qmi = ar_snoc->qmi; 1012 1013 qmi_handle_release(&qmi->qmi_hdl); 1014 cancel_work_sync(&qmi->event_work); 1015 destroy_workqueue(qmi->event_wq); 1016 ar_snoc->qmi = NULL; 1017 1018 return 0; 1019 } 1020