1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/completion.h> 5 #include <linux/circ_buf.h> 6 #include <linux/list.h> 7 8 #include "a6xx_gmu.h" 9 #include "a6xx_gmu.xml.h" 10 11 #define HFI_MSG_ID(val) [val] = #val 12 13 static const char * const a6xx_hfi_msg_id[] = { 14 HFI_MSG_ID(HFI_H2F_MSG_INIT), 15 HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION), 16 HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE), 17 HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE), 18 HFI_MSG_ID(HFI_H2F_MSG_TEST), 19 }; 20 21 static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data, 22 u32 dwords) 23 { 24 struct a6xx_hfi_queue_header *header = queue->header; 25 u32 i, hdr, index = header->read_index; 26 27 if (header->read_index == header->write_index) { 28 header->rx_request = 1; 29 return 0; 30 } 31 32 hdr = queue->data[index]; 33 34 /* 35 * If we are to assume that the GMU firmware is in fact a rational actor 36 * and is programmed to not send us a larger response than we expect 37 * then we can also assume that if the header size is unexpectedly large 38 * that it is due to memory corruption and/or hardware failure. In this 39 * case the only reasonable course of action is to BUG() to help harden 40 * the failure. 41 */ 42 43 BUG_ON(HFI_HEADER_SIZE(hdr) > dwords); 44 45 for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) { 46 data[i] = queue->data[index]; 47 index = (index + 1) % header->size; 48 } 49 50 header->read_index = index; 51 return HFI_HEADER_SIZE(hdr); 52 } 53 54 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, 55 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) 56 { 57 struct a6xx_hfi_queue_header *header = queue->header; 58 u32 i, space, index = header->write_index; 59 60 spin_lock(&queue->lock); 61 62 space = CIRC_SPACE(header->write_index, header->read_index, 63 header->size); 64 if (space < dwords) { 65 header->dropped++; 66 spin_unlock(&queue->lock); 67 return -ENOSPC; 68 } 69 70 for (i = 0; i < dwords; i++) { 71 queue->data[index] = data[i]; 72 index = (index + 1) % header->size; 73 } 74 75 header->write_index = index; 76 spin_unlock(&queue->lock); 77 78 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); 79 return 0; 80 } 81 82 struct a6xx_hfi_response { 83 u32 id; 84 u32 seqnum; 85 struct list_head node; 86 struct completion complete; 87 88 u32 error; 89 u32 payload[16]; 90 }; 91 92 /* 93 * Incoming HFI ack messages can come in out of order so we need to store all 94 * the pending messages on a list until they are handled. 95 */ 96 static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock); 97 static LIST_HEAD(hfi_ack_list); 98 99 static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu, 100 struct a6xx_hfi_msg_response *msg) 101 { 102 struct a6xx_hfi_response *resp; 103 u32 id, seqnum; 104 105 /* msg->ret_header contains the header of the message being acked */ 106 id = HFI_HEADER_ID(msg->ret_header); 107 seqnum = HFI_HEADER_SEQNUM(msg->ret_header); 108 109 spin_lock(&hfi_ack_lock); 110 list_for_each_entry(resp, &hfi_ack_list, node) { 111 if (resp->id == id && resp->seqnum == seqnum) { 112 resp->error = msg->error; 113 memcpy(resp->payload, msg->payload, 114 sizeof(resp->payload)); 115 116 complete(&resp->complete); 117 spin_unlock(&hfi_ack_lock); 118 return; 119 } 120 } 121 spin_unlock(&hfi_ack_lock); 122 123 dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum); 124 } 125 126 static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu, 127 struct a6xx_hfi_msg_response *msg) 128 { 129 struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg; 130 131 dev_err(gmu->dev, "GMU firmware error %d\n", error->code); 132 } 133 134 void a6xx_hfi_task(unsigned long data) 135 { 136 struct a6xx_gmu *gmu = (struct a6xx_gmu *) data; 137 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; 138 struct a6xx_hfi_msg_response resp; 139 140 for (;;) { 141 u32 id; 142 int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp, 143 sizeof(resp) >> 2); 144 145 /* Returns the number of bytes copied or negative on error */ 146 if (ret <= 0) { 147 if (ret < 0) 148 dev_err(gmu->dev, 149 "Unable to read the HFI message queue\n"); 150 break; 151 } 152 153 id = HFI_HEADER_ID(resp.header); 154 155 if (id == HFI_F2H_MSG_ACK) 156 a6xx_hfi_handle_ack(gmu, &resp); 157 else if (id == HFI_F2H_MSG_ERROR) 158 a6xx_hfi_handle_error(gmu, &resp); 159 } 160 } 161 162 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, 163 void *data, u32 size, u32 *payload, u32 payload_size) 164 { 165 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; 166 struct a6xx_hfi_response resp = { 0 }; 167 int ret, dwords = size >> 2; 168 u32 seqnum; 169 170 seqnum = atomic_inc_return(&queue->seqnum) % 0xfff; 171 172 /* First dword of the message is the message header - fill it in */ 173 *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) | 174 (dwords << 8) | id; 175 176 init_completion(&resp.complete); 177 resp.id = id; 178 resp.seqnum = seqnum; 179 180 spin_lock_bh(&hfi_ack_lock); 181 list_add_tail(&resp.node, &hfi_ack_list); 182 spin_unlock_bh(&hfi_ack_lock); 183 184 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); 185 if (ret) { 186 dev_err(gmu->dev, "Unable to send message %s id %d\n", 187 a6xx_hfi_msg_id[id], seqnum); 188 goto out; 189 } 190 191 /* Wait up to 5 seconds for the response */ 192 ret = wait_for_completion_timeout(&resp.complete, 193 msecs_to_jiffies(5000)); 194 if (!ret) { 195 dev_err(gmu->dev, 196 "Message %s id %d timed out waiting for response\n", 197 a6xx_hfi_msg_id[id], seqnum); 198 ret = -ETIMEDOUT; 199 } else 200 ret = 0; 201 202 out: 203 spin_lock_bh(&hfi_ack_lock); 204 list_del(&resp.node); 205 spin_unlock_bh(&hfi_ack_lock); 206 207 if (ret) 208 return ret; 209 210 if (resp.error) { 211 dev_err(gmu->dev, "Message %s id %d returned error %d\n", 212 a6xx_hfi_msg_id[id], seqnum, resp.error); 213 return -EINVAL; 214 } 215 216 if (payload && payload_size) { 217 int copy = min_t(u32, payload_size, sizeof(resp.payload)); 218 219 memcpy(payload, resp.payload, copy); 220 } 221 222 return 0; 223 } 224 225 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) 226 { 227 struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 }; 228 229 msg.dbg_buffer_addr = (u32) gmu->debug->iova; 230 msg.dbg_buffer_size = (u32) gmu->debug->size; 231 msg.boot_state = boot_state; 232 233 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg), 234 NULL, 0); 235 } 236 237 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) 238 { 239 struct a6xx_hfi_msg_fw_version msg = { 0 }; 240 241 /* Currently supporting version 1.1 */ 242 msg.supported_version = (1 << 28) | (1 << 16); 243 244 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg), 245 version, sizeof(*version)); 246 } 247 248 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) 249 { 250 struct a6xx_hfi_msg_perf_table msg = { 0 }; 251 int i; 252 253 msg.num_gpu_levels = gmu->nr_gpu_freqs; 254 msg.num_gmu_levels = gmu->nr_gmu_freqs; 255 256 for (i = 0; i < gmu->nr_gpu_freqs; i++) { 257 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; 258 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; 259 } 260 261 for (i = 0; i < gmu->nr_gmu_freqs; i++) { 262 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; 263 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; 264 } 265 266 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), 267 NULL, 0); 268 } 269 270 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) 271 { 272 struct a6xx_hfi_msg_bw_table msg = { 0 }; 273 274 /* 275 * The sdm845 GMU doesn't do bus frequency scaling on its own but it 276 * does need at least one entry in the list because it might be accessed 277 * when the GMU is shutting down. Send a single "off" entry. 278 */ 279 280 msg.bw_level_num = 1; 281 282 msg.ddr_cmds_num = 3; 283 msg.ddr_wait_bitmask = 0x07; 284 285 msg.ddr_cmds_addrs[0] = 0x50000; 286 msg.ddr_cmds_addrs[1] = 0x5005c; 287 msg.ddr_cmds_addrs[2] = 0x5000c; 288 289 msg.ddr_cmds_data[0][0] = 0x40000000; 290 msg.ddr_cmds_data[0][1] = 0x40000000; 291 msg.ddr_cmds_data[0][2] = 0x40000000; 292 293 /* 294 * These are the CX (CNOC) votes. This is used but the values for the 295 * sdm845 GMU are known and fixed so we can hard code them. 296 */ 297 298 msg.cnoc_cmds_num = 3; 299 msg.cnoc_wait_bitmask = 0x05; 300 301 msg.cnoc_cmds_addrs[0] = 0x50034; 302 msg.cnoc_cmds_addrs[1] = 0x5007c; 303 msg.cnoc_cmds_addrs[2] = 0x5004c; 304 305 msg.cnoc_cmds_data[0][0] = 0x40000000; 306 msg.cnoc_cmds_data[0][1] = 0x00000000; 307 msg.cnoc_cmds_data[0][2] = 0x40000000; 308 309 msg.cnoc_cmds_data[1][0] = 0x60000001; 310 msg.cnoc_cmds_data[1][1] = 0x20000001; 311 msg.cnoc_cmds_data[1][2] = 0x60000001; 312 313 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), 314 NULL, 0); 315 } 316 317 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) 318 { 319 struct a6xx_hfi_msg_test msg = { 0 }; 320 321 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg), 322 NULL, 0); 323 } 324 325 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) 326 { 327 int ret; 328 329 ret = a6xx_hfi_send_gmu_init(gmu, boot_state); 330 if (ret) 331 return ret; 332 333 ret = a6xx_hfi_get_fw_version(gmu, NULL); 334 if (ret) 335 return ret; 336 337 /* 338 * We have to get exchange version numbers per the sequence but at this 339 * point th kernel driver doesn't need to know the exact version of 340 * the GMU firmware 341 */ 342 343 ret = a6xx_hfi_send_perf_table(gmu); 344 if (ret) 345 return ret; 346 347 ret = a6xx_hfi_send_bw_table(gmu); 348 if (ret) 349 return ret; 350 351 /* 352 * Let the GMU know that there won't be any more HFI messages until next 353 * boot 354 */ 355 a6xx_hfi_send_test(gmu); 356 357 return 0; 358 } 359 360 void a6xx_hfi_stop(struct a6xx_gmu *gmu) 361 { 362 int i; 363 364 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { 365 struct a6xx_hfi_queue *queue = &gmu->queues[i]; 366 367 if (!queue->header) 368 continue; 369 370 if (queue->header->read_index != queue->header->write_index) 371 dev_err(gmu->dev, "HFI queue %d is not empty\n", i); 372 373 queue->header->read_index = 0; 374 queue->header->write_index = 0; 375 } 376 } 377 378 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue, 379 struct a6xx_hfi_queue_header *header, void *virt, u64 iova, 380 u32 id) 381 { 382 spin_lock_init(&queue->lock); 383 queue->header = header; 384 queue->data = virt; 385 atomic_set(&queue->seqnum, 0); 386 387 /* Set up the shared memory header */ 388 header->iova = iova; 389 header->type = 10 << 8 | id; 390 header->status = 1; 391 header->size = SZ_4K >> 2; 392 header->msg_size = 0; 393 header->dropped = 0; 394 header->rx_watermark = 1; 395 header->tx_watermark = 1; 396 header->rx_request = 1; 397 header->tx_request = 0; 398 header->read_index = 0; 399 header->write_index = 0; 400 } 401 402 void a6xx_hfi_init(struct a6xx_gmu *gmu) 403 { 404 struct a6xx_gmu_bo *hfi = gmu->hfi; 405 struct a6xx_hfi_queue_table_header *table = hfi->virt; 406 struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table); 407 u64 offset; 408 int table_size; 409 410 /* 411 * The table size is the size of the table header plus all of the queue 412 * headers 413 */ 414 table_size = sizeof(*table); 415 table_size += (ARRAY_SIZE(gmu->queues) * 416 sizeof(struct a6xx_hfi_queue_header)); 417 418 table->version = 0; 419 table->size = table_size; 420 /* First queue header is located immediately after the table header */ 421 table->qhdr0_offset = sizeof(*table) >> 2; 422 table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2; 423 table->num_queues = ARRAY_SIZE(gmu->queues); 424 table->active_queues = ARRAY_SIZE(gmu->queues); 425 426 /* Command queue */ 427 offset = SZ_4K; 428 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, 429 hfi->iova + offset, 0); 430 431 /* GMU response queue */ 432 offset += SZ_4K; 433 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, 434 hfi->iova + offset, 4); 435 } 436