1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ 3 4 #include <linux/completion.h> 5 #include <linux/circ_buf.h> 6 #include <linux/list.h> 7 8 #include "a6xx_gmu.h" 9 #include "a6xx_gmu.xml.h" 10 #include "a6xx_gpu.h" 11 12 #define HFI_MSG_ID(val) [val] = #val 13 14 static const char * const a6xx_hfi_msg_id[] = { 15 HFI_MSG_ID(HFI_H2F_MSG_INIT), 16 HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION), 17 HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE), 18 HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE), 19 HFI_MSG_ID(HFI_H2F_MSG_TEST), 20 HFI_MSG_ID(HFI_H2F_MSG_START), 21 HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START), 22 HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE), 23 HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER), 24 }; 25 26 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, 27 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) 28 { 29 struct a6xx_hfi_queue_header *header = queue->header; 30 u32 i, hdr, index = header->read_index; 31 32 if (header->read_index == header->write_index) { 33 header->rx_request = 1; 34 return 0; 35 } 36 37 hdr = queue->data[index]; 38 39 /* 40 * If we are to assume that the GMU firmware is in fact a rational actor 41 * and is programmed to not send us a larger response than we expect 42 * then we can also assume that if the header size is unexpectedly large 43 * that it is due to memory corruption and/or hardware failure. In this 44 * case the only reasonable course of action is to BUG() to help harden 45 * the failure. 46 */ 47 48 BUG_ON(HFI_HEADER_SIZE(hdr) > dwords); 49 50 for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) { 51 data[i] = queue->data[index]; 52 index = (index + 1) % header->size; 53 } 54 55 if (!gmu->legacy) 56 index = ALIGN(index, 4) % header->size; 57 58 header->read_index = index; 59 return HFI_HEADER_SIZE(hdr); 60 } 61 62 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, 63 struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) 64 { 65 struct a6xx_hfi_queue_header *header = queue->header; 66 u32 i, space, index = header->write_index; 67 68 spin_lock(&queue->lock); 69 70 space = CIRC_SPACE(header->write_index, header->read_index, 71 header->size); 72 if (space < dwords) { 73 header->dropped++; 74 spin_unlock(&queue->lock); 75 return -ENOSPC; 76 } 77 78 for (i = 0; i < dwords; i++) { 79 queue->data[index] = data[i]; 80 index = (index + 1) % header->size; 81 } 82 83 /* Cookify any non used data at the end of the write buffer */ 84 if (!gmu->legacy) { 85 for (; index % 4; index = (index + 1) % header->size) 86 queue->data[index] = 0xfafafafa; 87 } 88 89 header->write_index = index; 90 spin_unlock(&queue->lock); 91 92 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); 93 return 0; 94 } 95 96 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, 97 u32 *payload, u32 payload_size) 98 { 99 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; 100 u32 val; 101 int ret; 102 103 /* Wait for a response */ 104 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 105 val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000); 106 107 if (ret) { 108 DRM_DEV_ERROR(gmu->dev, 109 "Message %s id %d timed out waiting for response\n", 110 a6xx_hfi_msg_id[id], seqnum); 111 return -ETIMEDOUT; 112 } 113 114 /* Clear the interrupt */ 115 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 116 A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ); 117 118 for (;;) { 119 struct a6xx_hfi_msg_response resp; 120 121 /* Get the next packet */ 122 ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp, 123 sizeof(resp) >> 2); 124 125 /* If the queue is empty our response never made it */ 126 if (!ret) { 127 DRM_DEV_ERROR(gmu->dev, 128 "The HFI response queue is unexpectedly empty\n"); 129 130 return -ENOENT; 131 } 132 133 if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) { 134 struct a6xx_hfi_msg_error *error = 135 (struct a6xx_hfi_msg_error *) &resp; 136 137 DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n", 138 error->code); 139 continue; 140 } 141 142 if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) { 143 DRM_DEV_ERROR(gmu->dev, 144 "Unexpected message id %d on the response queue\n", 145 HFI_HEADER_SEQNUM(resp.ret_header)); 146 continue; 147 } 148 149 if (resp.error) { 150 DRM_DEV_ERROR(gmu->dev, 151 "Message %s id %d returned error %d\n", 152 a6xx_hfi_msg_id[id], seqnum, resp.error); 153 return -EINVAL; 154 } 155 156 /* All is well, copy over the buffer */ 157 if (payload && payload_size) 158 memcpy(payload, resp.payload, 159 min_t(u32, payload_size, sizeof(resp.payload))); 160 161 return 0; 162 } 163 } 164 165 static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, 166 void *data, u32 size, u32 *payload, u32 payload_size) 167 { 168 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; 169 int ret, dwords = size >> 2; 170 u32 seqnum; 171 172 seqnum = atomic_inc_return(&queue->seqnum) % 0xfff; 173 174 /* First dword of the message is the message header - fill it in */ 175 *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) | 176 (dwords << 8) | id; 177 178 ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); 179 if (ret) { 180 DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n", 181 a6xx_hfi_msg_id[id], seqnum); 182 return ret; 183 } 184 185 return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size); 186 } 187 188 static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) 189 { 190 struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 }; 191 192 msg.dbg_buffer_addr = (u32) gmu->debug.iova; 193 msg.dbg_buffer_size = (u32) gmu->debug.size; 194 msg.boot_state = boot_state; 195 196 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg), 197 NULL, 0); 198 } 199 200 static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) 201 { 202 struct a6xx_hfi_msg_fw_version msg = { 0 }; 203 204 /* Currently supporting version 1.1 */ 205 msg.supported_version = (1 << 28) | (1 << 16); 206 207 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg), 208 version, sizeof(*version)); 209 } 210 211 static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu) 212 { 213 struct a6xx_hfi_msg_perf_table_v1 msg = { 0 }; 214 int i; 215 216 msg.num_gpu_levels = gmu->nr_gpu_freqs; 217 msg.num_gmu_levels = gmu->nr_gmu_freqs; 218 219 for (i = 0; i < gmu->nr_gpu_freqs; i++) { 220 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; 221 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; 222 } 223 224 for (i = 0; i < gmu->nr_gmu_freqs; i++) { 225 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; 226 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; 227 } 228 229 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), 230 NULL, 0); 231 } 232 233 static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) 234 { 235 struct a6xx_hfi_msg_perf_table msg = { 0 }; 236 int i; 237 238 msg.num_gpu_levels = gmu->nr_gpu_freqs; 239 msg.num_gmu_levels = gmu->nr_gmu_freqs; 240 241 for (i = 0; i < gmu->nr_gpu_freqs; i++) { 242 msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; 243 msg.gx_votes[i].acd = 0xffffffff; 244 msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; 245 } 246 247 for (i = 0; i < gmu->nr_gmu_freqs; i++) { 248 msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; 249 msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; 250 } 251 252 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), 253 NULL, 0); 254 } 255 256 static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 257 { 258 /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */ 259 msg->bw_level_num = 1; 260 261 msg->ddr_cmds_num = 3; 262 msg->ddr_wait_bitmask = 0x01; 263 264 msg->ddr_cmds_addrs[0] = 0x50000; 265 msg->ddr_cmds_addrs[1] = 0x5003c; 266 msg->ddr_cmds_addrs[2] = 0x5000c; 267 268 msg->ddr_cmds_data[0][0] = 0x40000000; 269 msg->ddr_cmds_data[0][1] = 0x40000000; 270 msg->ddr_cmds_data[0][2] = 0x40000000; 271 272 /* 273 * These are the CX (CNOC) votes - these are used by the GMU but the 274 * votes are known and fixed for the target 275 */ 276 msg->cnoc_cmds_num = 1; 277 msg->cnoc_wait_bitmask = 0x01; 278 279 msg->cnoc_cmds_addrs[0] = 0x5007c; 280 msg->cnoc_cmds_data[0][0] = 0x40000000; 281 msg->cnoc_cmds_data[1][0] = 0x60000001; 282 } 283 284 static void a640_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 285 { 286 /* 287 * Send a single "off" entry just to get things running 288 * TODO: bus scaling 289 */ 290 msg->bw_level_num = 1; 291 292 msg->ddr_cmds_num = 3; 293 msg->ddr_wait_bitmask = 0x01; 294 295 msg->ddr_cmds_addrs[0] = 0x50000; 296 msg->ddr_cmds_addrs[1] = 0x5003c; 297 msg->ddr_cmds_addrs[2] = 0x5000c; 298 299 msg->ddr_cmds_data[0][0] = 0x40000000; 300 msg->ddr_cmds_data[0][1] = 0x40000000; 301 msg->ddr_cmds_data[0][2] = 0x40000000; 302 303 /* 304 * These are the CX (CNOC) votes - these are used by the GMU but the 305 * votes are known and fixed for the target 306 */ 307 msg->cnoc_cmds_num = 3; 308 msg->cnoc_wait_bitmask = 0x01; 309 310 msg->cnoc_cmds_addrs[0] = 0x50034; 311 msg->cnoc_cmds_addrs[1] = 0x5007c; 312 msg->cnoc_cmds_addrs[2] = 0x5004c; 313 314 msg->cnoc_cmds_data[0][0] = 0x40000000; 315 msg->cnoc_cmds_data[0][1] = 0x00000000; 316 msg->cnoc_cmds_data[0][2] = 0x40000000; 317 318 msg->cnoc_cmds_data[1][0] = 0x60000001; 319 msg->cnoc_cmds_data[1][1] = 0x20000001; 320 msg->cnoc_cmds_data[1][2] = 0x60000001; 321 } 322 323 static void a650_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 324 { 325 /* 326 * Send a single "off" entry just to get things running 327 * TODO: bus scaling 328 */ 329 msg->bw_level_num = 1; 330 331 msg->ddr_cmds_num = 3; 332 msg->ddr_wait_bitmask = 0x01; 333 334 msg->ddr_cmds_addrs[0] = 0x50000; 335 msg->ddr_cmds_addrs[1] = 0x50004; 336 msg->ddr_cmds_addrs[2] = 0x5007c; 337 338 msg->ddr_cmds_data[0][0] = 0x40000000; 339 msg->ddr_cmds_data[0][1] = 0x40000000; 340 msg->ddr_cmds_data[0][2] = 0x40000000; 341 342 /* 343 * These are the CX (CNOC) votes - these are used by the GMU but the 344 * votes are known and fixed for the target 345 */ 346 msg->cnoc_cmds_num = 1; 347 msg->cnoc_wait_bitmask = 0x01; 348 349 msg->cnoc_cmds_addrs[0] = 0x500a4; 350 msg->cnoc_cmds_data[0][0] = 0x40000000; 351 msg->cnoc_cmds_data[1][0] = 0x60000001; 352 } 353 354 static void a660_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 355 { 356 /* 357 * Send a single "off" entry just to get things running 358 * TODO: bus scaling 359 */ 360 msg->bw_level_num = 1; 361 362 msg->ddr_cmds_num = 3; 363 msg->ddr_wait_bitmask = 0x01; 364 365 msg->ddr_cmds_addrs[0] = 0x50004; 366 msg->ddr_cmds_addrs[1] = 0x500a0; 367 msg->ddr_cmds_addrs[2] = 0x50000; 368 369 msg->ddr_cmds_data[0][0] = 0x40000000; 370 msg->ddr_cmds_data[0][1] = 0x40000000; 371 msg->ddr_cmds_data[0][2] = 0x40000000; 372 373 /* 374 * These are the CX (CNOC) votes - these are used by the GMU but the 375 * votes are known and fixed for the target 376 */ 377 msg->cnoc_cmds_num = 1; 378 msg->cnoc_wait_bitmask = 0x01; 379 380 msg->cnoc_cmds_addrs[0] = 0x50070; 381 msg->cnoc_cmds_data[0][0] = 0x40000000; 382 msg->cnoc_cmds_data[1][0] = 0x60000001; 383 } 384 385 static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg) 386 { 387 /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */ 388 msg->bw_level_num = 1; 389 390 msg->ddr_cmds_num = 3; 391 msg->ddr_wait_bitmask = 0x07; 392 393 msg->ddr_cmds_addrs[0] = 0x50000; 394 msg->ddr_cmds_addrs[1] = 0x5005c; 395 msg->ddr_cmds_addrs[2] = 0x5000c; 396 397 msg->ddr_cmds_data[0][0] = 0x40000000; 398 msg->ddr_cmds_data[0][1] = 0x40000000; 399 msg->ddr_cmds_data[0][2] = 0x40000000; 400 401 /* 402 * These are the CX (CNOC) votes. This is used but the values for the 403 * sdm845 GMU are known and fixed so we can hard code them. 404 */ 405 406 msg->cnoc_cmds_num = 3; 407 msg->cnoc_wait_bitmask = 0x05; 408 409 msg->cnoc_cmds_addrs[0] = 0x50034; 410 msg->cnoc_cmds_addrs[1] = 0x5007c; 411 msg->cnoc_cmds_addrs[2] = 0x5004c; 412 413 msg->cnoc_cmds_data[0][0] = 0x40000000; 414 msg->cnoc_cmds_data[0][1] = 0x00000000; 415 msg->cnoc_cmds_data[0][2] = 0x40000000; 416 417 msg->cnoc_cmds_data[1][0] = 0x60000001; 418 msg->cnoc_cmds_data[1][1] = 0x20000001; 419 msg->cnoc_cmds_data[1][2] = 0x60000001; 420 } 421 422 423 static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) 424 { 425 struct a6xx_hfi_msg_bw_table msg = { 0 }; 426 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 427 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; 428 429 if (adreno_is_a618(adreno_gpu)) 430 a618_build_bw_table(&msg); 431 else if (adreno_is_a640(adreno_gpu)) 432 a640_build_bw_table(&msg); 433 else if (adreno_is_a650(adreno_gpu)) 434 a650_build_bw_table(&msg); 435 else if (adreno_is_a660(adreno_gpu)) 436 a660_build_bw_table(&msg); 437 else 438 a6xx_build_bw_table(&msg); 439 440 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), 441 NULL, 0); 442 } 443 444 static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) 445 { 446 struct a6xx_hfi_msg_test msg = { 0 }; 447 448 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg), 449 NULL, 0); 450 } 451 452 static int a6xx_hfi_send_start(struct a6xx_gmu *gmu) 453 { 454 struct a6xx_hfi_msg_start msg = { 0 }; 455 456 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg), 457 NULL, 0); 458 } 459 460 static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu) 461 { 462 struct a6xx_hfi_msg_core_fw_start msg = { 0 }; 463 464 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg, 465 sizeof(msg), NULL, 0); 466 } 467 468 int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index) 469 { 470 struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 }; 471 472 msg.ack_type = 1; /* blocking */ 473 msg.freq = index; 474 msg.bw = 0; /* TODO: bus scaling */ 475 476 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg, 477 sizeof(msg), NULL, 0); 478 } 479 480 int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu) 481 { 482 struct a6xx_hfi_prep_slumber_cmd msg = { 0 }; 483 484 /* TODO: should freq and bw fields be non-zero ? */ 485 486 return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg, 487 sizeof(msg), NULL, 0); 488 } 489 490 static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state) 491 { 492 int ret; 493 494 ret = a6xx_hfi_send_gmu_init(gmu, boot_state); 495 if (ret) 496 return ret; 497 498 ret = a6xx_hfi_get_fw_version(gmu, NULL); 499 if (ret) 500 return ret; 501 502 /* 503 * We have to get exchange version numbers per the sequence but at this 504 * point th kernel driver doesn't need to know the exact version of 505 * the GMU firmware 506 */ 507 508 ret = a6xx_hfi_send_perf_table_v1(gmu); 509 if (ret) 510 return ret; 511 512 ret = a6xx_hfi_send_bw_table(gmu); 513 if (ret) 514 return ret; 515 516 /* 517 * Let the GMU know that there won't be any more HFI messages until next 518 * boot 519 */ 520 a6xx_hfi_send_test(gmu); 521 522 return 0; 523 } 524 525 int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) 526 { 527 int ret; 528 529 if (gmu->legacy) 530 return a6xx_hfi_start_v1(gmu, boot_state); 531 532 533 ret = a6xx_hfi_send_perf_table(gmu); 534 if (ret) 535 return ret; 536 537 ret = a6xx_hfi_send_bw_table(gmu); 538 if (ret) 539 return ret; 540 541 ret = a6xx_hfi_send_core_fw_start(gmu); 542 if (ret) 543 return ret; 544 545 /* 546 * Downstream driver sends this in its "a6xx_hw_init" equivalent, 547 * but seems to be no harm in sending it here 548 */ 549 ret = a6xx_hfi_send_start(gmu); 550 if (ret) 551 return ret; 552 553 return 0; 554 } 555 556 void a6xx_hfi_stop(struct a6xx_gmu *gmu) 557 { 558 int i; 559 560 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { 561 struct a6xx_hfi_queue *queue = &gmu->queues[i]; 562 563 if (!queue->header) 564 continue; 565 566 if (queue->header->read_index != queue->header->write_index) 567 DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i); 568 569 queue->header->read_index = 0; 570 queue->header->write_index = 0; 571 } 572 } 573 574 static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue, 575 struct a6xx_hfi_queue_header *header, void *virt, u64 iova, 576 u32 id) 577 { 578 spin_lock_init(&queue->lock); 579 queue->header = header; 580 queue->data = virt; 581 atomic_set(&queue->seqnum, 0); 582 583 /* Set up the shared memory header */ 584 header->iova = iova; 585 header->type = 10 << 8 | id; 586 header->status = 1; 587 header->size = SZ_4K >> 2; 588 header->msg_size = 0; 589 header->dropped = 0; 590 header->rx_watermark = 1; 591 header->tx_watermark = 1; 592 header->rx_request = 1; 593 header->tx_request = 0; 594 header->read_index = 0; 595 header->write_index = 0; 596 } 597 598 void a6xx_hfi_init(struct a6xx_gmu *gmu) 599 { 600 struct a6xx_gmu_bo *hfi = &gmu->hfi; 601 struct a6xx_hfi_queue_table_header *table = hfi->virt; 602 struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table); 603 u64 offset; 604 int table_size; 605 606 /* 607 * The table size is the size of the table header plus all of the queue 608 * headers 609 */ 610 table_size = sizeof(*table); 611 table_size += (ARRAY_SIZE(gmu->queues) * 612 sizeof(struct a6xx_hfi_queue_header)); 613 614 table->version = 0; 615 table->size = table_size; 616 /* First queue header is located immediately after the table header */ 617 table->qhdr0_offset = sizeof(*table) >> 2; 618 table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2; 619 table->num_queues = ARRAY_SIZE(gmu->queues); 620 table->active_queues = ARRAY_SIZE(gmu->queues); 621 622 /* Command queue */ 623 offset = SZ_4K; 624 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, 625 hfi->iova + offset, 0); 626 627 /* GMU response queue */ 628 offset += SZ_4K; 629 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, 630 hfi->iova + offset, gmu->legacy ? 4 : 1); 631 } 632