1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "bmi.h" 19 #include "hif.h" 20 #include "debug.h" 21 #include "htc.h" 22 23 void ath10k_bmi_start(struct ath10k *ar) 24 { 25 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n"); 26 27 ar->bmi.done_sent = false; 28 } 29 30 int ath10k_bmi_done(struct ath10k *ar) 31 { 32 struct bmi_cmd cmd; 33 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done); 34 int ret; 35 36 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n"); 37 38 if (ar->bmi.done_sent) { 39 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n"); 40 return 0; 41 } 42 43 ar->bmi.done_sent = true; 44 cmd.id = __cpu_to_le32(BMI_DONE); 45 46 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); 47 if (ret) { 48 ath10k_warn(ar, "unable to write to the device: %d\n", ret); 49 return ret; 50 } 51 52 return 0; 53 } 54 55 int ath10k_bmi_get_target_info(struct ath10k *ar, 56 struct bmi_target_info *target_info) 57 { 58 struct bmi_cmd cmd; 59 union bmi_resp resp; 60 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info); 61 u32 resplen = sizeof(resp.get_target_info); 62 int ret; 63 64 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n"); 65 66 if (ar->bmi.done_sent) { 67 ath10k_warn(ar, "BMI Get Target Info Command disallowed\n"); 68 return -EBUSY; 69 } 70 71 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO); 72 73 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); 74 if (ret) { 75 ath10k_warn(ar, "unable to get target info from device\n"); 76 return ret; 77 } 78 79 if (resplen < sizeof(resp.get_target_info)) { 80 ath10k_warn(ar, "invalid get_target_info response length (%d)\n", 81 resplen); 82 return -EIO; 83 } 84 85 target_info->version = __le32_to_cpu(resp.get_target_info.version); 86 target_info->type = __le32_to_cpu(resp.get_target_info.type); 87 88 return 0; 89 } 90 91 int ath10k_bmi_read_memory(struct ath10k *ar, 92 u32 address, void *buffer, u32 length) 93 { 94 struct bmi_cmd cmd; 95 union bmi_resp resp; 96 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem); 97 u32 rxlen; 98 int ret; 99 100 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", 101 address, length); 102 103 if (ar->bmi.done_sent) { 104 ath10k_warn(ar, "command disallowed\n"); 105 return -EBUSY; 106 } 107 108 while (length) { 109 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE); 110 111 cmd.id = __cpu_to_le32(BMI_READ_MEMORY); 112 cmd.read_mem.addr = __cpu_to_le32(address); 113 cmd.read_mem.len = __cpu_to_le32(rxlen); 114 115 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, 116 &resp, &rxlen); 117 if (ret) { 118 ath10k_warn(ar, "unable to read from the device (%d)\n", 119 ret); 120 return ret; 121 } 122 123 memcpy(buffer, resp.read_mem.payload, rxlen); 124 address += rxlen; 125 buffer += rxlen; 126 length -= rxlen; 127 } 128 129 return 0; 130 } 131 132 int ath10k_bmi_write_memory(struct ath10k *ar, 133 u32 address, const void *buffer, u32 length) 134 { 135 struct bmi_cmd cmd; 136 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem); 137 u32 txlen; 138 int ret; 139 140 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n", 141 address, length); 142 143 if (ar->bmi.done_sent) { 144 ath10k_warn(ar, "command disallowed\n"); 145 return -EBUSY; 146 } 147 148 while (length) { 149 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); 150 151 /* copy before roundup to avoid reading beyond buffer*/ 152 memcpy(cmd.write_mem.payload, buffer, txlen); 153 txlen = roundup(txlen, 4); 154 155 cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY); 156 cmd.write_mem.addr = __cpu_to_le32(address); 157 cmd.write_mem.len = __cpu_to_le32(txlen); 158 159 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, 160 NULL, NULL); 161 if (ret) { 162 ath10k_warn(ar, "unable to write to the device (%d)\n", 163 ret); 164 return ret; 165 } 166 167 /* fixup roundup() so `length` zeroes out for last chunk */ 168 txlen = min(txlen, length); 169 170 address += txlen; 171 buffer += txlen; 172 length -= txlen; 173 } 174 175 return 0; 176 } 177 178 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result) 179 { 180 struct bmi_cmd cmd; 181 union bmi_resp resp; 182 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute); 183 u32 resplen = sizeof(resp.execute); 184 int ret; 185 186 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", 187 address, param); 188 189 if (ar->bmi.done_sent) { 190 ath10k_warn(ar, "command disallowed\n"); 191 return -EBUSY; 192 } 193 194 cmd.id = __cpu_to_le32(BMI_EXECUTE); 195 cmd.execute.addr = __cpu_to_le32(address); 196 cmd.execute.param = __cpu_to_le32(param); 197 198 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); 199 if (ret) { 200 ath10k_warn(ar, "unable to read from the device\n"); 201 return ret; 202 } 203 204 if (resplen < sizeof(resp.execute)) { 205 ath10k_warn(ar, "invalid execute response length (%d)\n", 206 resplen); 207 return -EIO; 208 } 209 210 *result = __le32_to_cpu(resp.execute.result); 211 212 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result); 213 214 return 0; 215 } 216 217 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length) 218 { 219 struct bmi_cmd cmd; 220 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data); 221 u32 txlen; 222 int ret; 223 224 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n", 225 buffer, length); 226 227 if (ar->bmi.done_sent) { 228 ath10k_warn(ar, "command disallowed\n"); 229 return -EBUSY; 230 } 231 232 while (length) { 233 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); 234 235 WARN_ON_ONCE(txlen & 3); 236 237 cmd.id = __cpu_to_le32(BMI_LZ_DATA); 238 cmd.lz_data.len = __cpu_to_le32(txlen); 239 memcpy(cmd.lz_data.payload, buffer, txlen); 240 241 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, 242 NULL, NULL); 243 if (ret) { 244 ath10k_warn(ar, "unable to write to the device\n"); 245 return ret; 246 } 247 248 buffer += txlen; 249 length -= txlen; 250 } 251 252 return 0; 253 } 254 255 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) 256 { 257 struct bmi_cmd cmd; 258 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start); 259 int ret; 260 261 ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n", 262 address); 263 264 if (ar->bmi.done_sent) { 265 ath10k_warn(ar, "command disallowed\n"); 266 return -EBUSY; 267 } 268 269 cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START); 270 cmd.lz_start.addr = __cpu_to_le32(address); 271 272 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); 273 if (ret) { 274 ath10k_warn(ar, "unable to Start LZ Stream to the device\n"); 275 return ret; 276 } 277 278 return 0; 279 } 280 281 int ath10k_bmi_fast_download(struct ath10k *ar, 282 u32 address, const void *buffer, u32 length) 283 { 284 u8 trailer[4] = {}; 285 u32 head_len = rounddown(length, 4); 286 u32 trailer_len = length - head_len; 287 int ret; 288 289 ath10k_dbg(ar, ATH10K_DBG_BMI, 290 "bmi fast download address 0x%x buffer 0x%pK length %d\n", 291 address, buffer, length); 292 293 ret = ath10k_bmi_lz_stream_start(ar, address); 294 if (ret) 295 return ret; 296 297 /* copy the last word into a zero padded buffer */ 298 if (trailer_len > 0) 299 memcpy(trailer, buffer + head_len, trailer_len); 300 301 ret = ath10k_bmi_lz_data(ar, buffer, head_len); 302 if (ret) 303 return ret; 304 305 if (trailer_len > 0) 306 ret = ath10k_bmi_lz_data(ar, trailer, 4); 307 308 if (ret != 0) 309 return ret; 310 311 /* 312 * Close compressed stream and open a new (fake) one. 313 * This serves mainly to flush Target caches. 314 */ 315 ret = ath10k_bmi_lz_stream_start(ar, 0x00); 316 317 return ret; 318 } 319