1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2017 Intel Deutschland GmbH 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * BSD LICENSE 20 * 21 * Copyright(c) 2017 Intel Deutschland GmbH 22 * All rights reserved. 23 * 24 * Redistribution and use in source and binary forms, with or without 25 * modification, are permitted provided that the following conditions 26 * are met: 27 * 28 * * Redistributions of source code must retain the above copyright 29 * notice, this list of conditions and the following disclaimer. 30 * * Redistributions in binary form must reproduce the above copyright 31 * notice, this list of conditions and the following disclaimer in 32 * the documentation and/or other materials provided with the 33 * distribution. 34 * * Neither the name Intel Corporation nor the names of its 35 * contributors may be used to endorse or promote products derived 36 * from this software without specific prior written permission. 37 * 38 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 39 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 40 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 41 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 42 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 43 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 44 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 45 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 46 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 47 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 48 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 49 * 50 *****************************************************************************/ 51 52 #include "iwl-trans.h" 53 #include "iwl-fh.h" 54 #include "iwl-context-info.h" 55 #include "internal.h" 56 #include "iwl-prph.h" 57 58 static int iwl_pcie_get_num_sections(const struct fw_img *fw, 59 int start) 60 { 61 int i = 0; 62 63 while (start < fw->num_sec && 64 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && 65 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { 66 start++; 67 i++; 68 } 69 70 return i; 71 } 72 73 static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, 74 const struct fw_desc *sec, 75 struct iwl_dram_data *dram) 76 { 77 dram->block = dma_alloc_coherent(trans->dev, sec->len, 78 &dram->physical, 79 GFP_KERNEL); 80 if (!dram->block) 81 return -ENOMEM; 82 83 dram->size = sec->len; 84 memcpy(dram->block, sec->data, sec->len); 85 86 return 0; 87 } 88 89 static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) 90 { 91 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 92 struct iwl_self_init_dram *dram = &trans_pcie->init_dram; 93 int i; 94 95 if (!dram->fw) { 96 WARN_ON(dram->fw_cnt); 97 return; 98 } 99 100 for (i = 0; i < dram->fw_cnt; i++) 101 dma_free_coherent(trans->dev, dram->fw[i].size, 102 dram->fw[i].block, dram->fw[i].physical); 103 104 kfree(dram->fw); 105 dram->fw_cnt = 0; 106 } 107 108 void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) 109 { 110 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 111 struct iwl_self_init_dram *dram = &trans_pcie->init_dram; 112 int i; 113 114 if (!dram->paging) { 115 WARN_ON(dram->paging_cnt); 116 return; 117 } 118 119 /* free paging*/ 120 for (i = 0; i < dram->paging_cnt; i++) 121 dma_free_coherent(trans->dev, dram->paging[i].size, 122 dram->paging[i].block, 123 dram->paging[i].physical); 124 125 kfree(dram->paging); 126 dram->paging_cnt = 0; 127 } 128 129 static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans, 130 const struct fw_img *fw, 131 struct iwl_context_info *ctxt_info) 132 { 133 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 134 struct iwl_self_init_dram *dram = &trans_pcie->init_dram; 135 struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram; 136 int i, ret, lmac_cnt, umac_cnt, paging_cnt; 137 138 lmac_cnt = iwl_pcie_get_num_sections(fw, 0); 139 /* add 1 due to separator */ 140 umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1); 141 /* add 2 due to separators */ 142 paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2); 143 144 dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL); 145 if (!dram->fw) 146 return -ENOMEM; 147 dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL); 148 if (!dram->paging) 149 return -ENOMEM; 150 151 /* initialize lmac sections */ 152 for (i = 0; i < lmac_cnt; i++) { 153 ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i], 154 &dram->fw[dram->fw_cnt]); 155 if (ret) 156 return ret; 157 ctxt_dram->lmac_img[i] = 158 cpu_to_le64(dram->fw[dram->fw_cnt].physical); 159 dram->fw_cnt++; 160 } 161 162 /* initialize umac sections */ 163 for (i = 0; i < umac_cnt; i++) { 164 /* access FW with +1 to make up for lmac separator */ 165 ret = iwl_pcie_ctxt_info_alloc_dma(trans, 166 &fw->sec[dram->fw_cnt + 1], 167 &dram->fw[dram->fw_cnt]); 168 if (ret) 169 return ret; 170 ctxt_dram->umac_img[i] = 171 cpu_to_le64(dram->fw[dram->fw_cnt].physical); 172 dram->fw_cnt++; 173 } 174 175 /* 176 * Initialize paging. 177 * Paging memory isn't stored in dram->fw as the umac and lmac - it is 178 * stored separately. 179 * This is since the timing of its release is different - 180 * while fw memory can be released on alive, the paging memory can be 181 * freed only when the device goes down. 182 * Given that, the logic here in accessing the fw image is a bit 183 * different - fw_cnt isn't changing so loop counter is added to it. 184 */ 185 for (i = 0; i < paging_cnt; i++) { 186 /* access FW with +2 to make up for lmac & umac separators */ 187 int fw_idx = dram->fw_cnt + i + 2; 188 189 ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx], 190 &dram->paging[i]); 191 if (ret) 192 return ret; 193 194 ctxt_dram->virtual_img[i] = 195 cpu_to_le64(dram->paging[i].physical); 196 dram->paging_cnt++; 197 } 198 199 return 0; 200 } 201 202 int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, 203 const struct fw_img *fw) 204 { 205 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 206 struct iwl_context_info *ctxt_info; 207 struct iwl_context_info_rbd_cfg *rx_cfg; 208 u32 control_flags = 0; 209 int ret; 210 211 ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), 212 &trans_pcie->ctxt_info_dma_addr, 213 GFP_KERNEL); 214 if (!ctxt_info) 215 return -ENOMEM; 216 217 ctxt_info->version.version = 0; 218 ctxt_info->version.mac_id = 219 cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); 220 /* size is in DWs */ 221 ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); 222 223 BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF); 224 control_flags = IWL_CTXT_INFO_RB_SIZE_4K | 225 IWL_CTXT_INFO_TFD_FORMAT_LONG | 226 RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << 227 IWL_CTXT_INFO_RB_CB_SIZE_POS; 228 ctxt_info->control.control_flags = cpu_to_le32(control_flags); 229 230 /* initialize RX default queue */ 231 rx_cfg = &ctxt_info->rbd_cfg; 232 rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma); 233 rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); 234 rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma); 235 236 /* initialize TX command queue */ 237 ctxt_info->hcmd_cfg.cmd_queue_addr = 238 cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); 239 ctxt_info->hcmd_cfg.cmd_queue_size = 240 TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX); 241 242 /* allocate ucode sections in dram and set addresses */ 243 ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); 244 if (ret) { 245 dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), 246 ctxt_info, trans_pcie->ctxt_info_dma_addr); 247 return ret; 248 } 249 250 trans_pcie->ctxt_info = ctxt_info; 251 252 iwl_enable_interrupts(trans); 253 254 /* Configure debug, if exists */ 255 if (trans->dbg_dest_tlv) 256 iwl_pcie_apply_destination(trans); 257 258 /* kick FW self load */ 259 iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); 260 iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); 261 262 /* Context info will be released upon alive or failure to get one */ 263 264 return 0; 265 } 266 267 void iwl_pcie_ctxt_info_free(struct iwl_trans *trans) 268 { 269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 270 271 if (!trans_pcie->ctxt_info) 272 return; 273 274 dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), 275 trans_pcie->ctxt_info, 276 trans_pcie->ctxt_info_dma_addr); 277 trans_pcie->ctxt_info_dma_addr = 0; 278 trans_pcie->ctxt_info = NULL; 279 280 iwl_pcie_ctxt_info_free_fw_img(trans); 281 } 282