1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016-2019 Intel Corporation 4 */ 5 6 #include <linux/circ_buf.h> 7 #include <linux/ktime.h> 8 #include <linux/time64.h> 9 #include <linux/string_helpers.h> 10 #include <linux/timekeeping.h> 11 12 #include "i915_drv.h" 13 #include "intel_guc_ct.h" 14 #include "intel_guc_print.h" 15 16 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) 17 enum { 18 CT_DEAD_ALIVE = 0, 19 CT_DEAD_SETUP, 20 CT_DEAD_WRITE, 21 CT_DEAD_DEADLOCK, 22 CT_DEAD_H2G_HAS_ROOM, 23 CT_DEAD_READ, 24 CT_DEAD_PROCESS_FAILED, 25 }; 26 27 static void ct_dead_ct_worker_func(struct work_struct *w); 28 29 #define CT_DEAD(ct, reason) \ 30 do { \ 31 if (!(ct)->dead_ct_reported) { \ 32 (ct)->dead_ct_reason |= 1 << CT_DEAD_##reason; \ 33 queue_work(system_unbound_wq, &(ct)->dead_ct_worker); \ 34 } \ 35 } while (0) 36 #else 37 #define CT_DEAD(ct, reason) do { } while (0) 38 #endif 39 40 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) 41 { 42 return container_of(ct, struct intel_guc, ct); 43 } 44 45 #define CT_ERROR(_ct, _fmt, ...) \ 46 guc_err(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__) 47 #ifdef CONFIG_DRM_I915_DEBUG_GUC 48 #define CT_DEBUG(_ct, _fmt, ...) \ 49 guc_dbg(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__) 50 #else 51 #define CT_DEBUG(...) do { } while (0) 52 #endif 53 #define CT_PROBE_ERROR(_ct, _fmt, ...) \ 54 guc_probe_error(ct_to_guc(ct), "CT: " _fmt, ##__VA_ARGS__) 55 56 /** 57 * DOC: CTB Blob 58 * 59 * We allocate single blob to hold both CTB descriptors and buffers: 60 * 61 * +--------+-----------------------------------------------+------+ 62 * | offset | contents | size | 63 * +========+===============================================+======+ 64 * | 0x0000 | H2G `CTB Descriptor`_ (send) | | 65 * +--------+-----------------------------------------------+ 4K | 66 * | 0x0800 | G2H `CTB Descriptor`_ (recv) | | 67 * +--------+-----------------------------------------------+------+ 68 * | 0x1000 | H2G `CT Buffer`_ (send) | n*4K | 69 * | | | | 70 * +--------+-----------------------------------------------+------+ 71 * | 0x1000 | G2H `CT Buffer`_ (recv) | m*4K | 72 * | + n*4K | | | 73 * +--------+-----------------------------------------------+------+ 74 * 75 * Size of each `CT Buffer`_ must be multiple of 4K. 76 * We don't expect too many messages in flight at any time, unless we are 77 * using the GuC submission. In that case each request requires a minimum 78 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this 79 * enough space to avoid backpressure on the driver. We increase the size 80 * of the receive buffer (relative to the send) to ensure a G2H response 81 * CTB has a landing spot. 82 */ 83 #define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K) 84 #define CTB_H2G_BUFFER_SIZE (SZ_4K) 85 #define CTB_G2H_BUFFER_SIZE (4 * CTB_H2G_BUFFER_SIZE) 86 #define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 4) 87 88 struct ct_request { 89 struct list_head link; 90 u32 fence; 91 u32 status; 92 u32 response_len; 93 u32 *response_buf; 94 }; 95 96 struct ct_incoming_msg { 97 struct list_head link; 98 u32 size; 99 u32 msg[]; 100 }; 101 102 enum { CTB_SEND = 0, CTB_RECV = 1 }; 103 104 enum { CTB_OWNER_HOST = 0 }; 105 106 static void ct_receive_tasklet_func(struct tasklet_struct *t); 107 static void ct_incoming_request_worker_func(struct work_struct *w); 108 109 /** 110 * intel_guc_ct_init_early - Initialize CT state without requiring device access 111 * @ct: pointer to CT struct 112 */ 113 void intel_guc_ct_init_early(struct intel_guc_ct *ct) 114 { 115 spin_lock_init(&ct->ctbs.send.lock); 116 spin_lock_init(&ct->ctbs.recv.lock); 117 spin_lock_init(&ct->requests.lock); 118 INIT_LIST_HEAD(&ct->requests.pending); 119 INIT_LIST_HEAD(&ct->requests.incoming); 120 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) 121 INIT_WORK(&ct->dead_ct_worker, ct_dead_ct_worker_func); 122 #endif 123 INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func); 124 tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func); 125 init_waitqueue_head(&ct->wq); 126 } 127 128 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc) 129 { 130 memset(desc, 0, sizeof(*desc)); 131 } 132 133 static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb) 134 { 135 u32 space; 136 137 ctb->broken = false; 138 ctb->tail = 0; 139 ctb->head = 0; 140 space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space; 141 atomic_set(&ctb->space, space); 142 143 guc_ct_buffer_desc_init(ctb->desc); 144 } 145 146 static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb, 147 struct guc_ct_buffer_desc *desc, 148 u32 *cmds, u32 size_in_bytes, u32 resv_space) 149 { 150 GEM_BUG_ON(size_in_bytes % 4); 151 152 ctb->desc = desc; 153 ctb->cmds = cmds; 154 ctb->size = size_in_bytes / 4; 155 ctb->resv_space = resv_space / 4; 156 157 guc_ct_buffer_reset(ctb); 158 } 159 160 static int guc_action_control_ctb(struct intel_guc *guc, u32 control) 161 { 162 u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = { 163 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 164 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 165 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_CONTROL_CTB), 166 FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, control), 167 }; 168 int ret; 169 170 GEM_BUG_ON(control != GUC_CTB_CONTROL_DISABLE && control != GUC_CTB_CONTROL_ENABLE); 171 172 /* CT control must go over MMIO */ 173 ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0); 174 175 return ret > 0 ? -EPROTO : ret; 176 } 177 178 static int ct_control_enable(struct intel_guc_ct *ct, bool enable) 179 { 180 int err; 181 182 err = guc_action_control_ctb(ct_to_guc(ct), enable ? 183 GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE); 184 if (unlikely(err)) 185 CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n", 186 str_enable_disable(enable), ERR_PTR(err)); 187 188 return err; 189 } 190 191 static int ct_register_buffer(struct intel_guc_ct *ct, bool send, 192 u32 desc_addr, u32 buff_addr, u32 size) 193 { 194 int err; 195 196 err = intel_guc_self_cfg64(ct_to_guc(ct), send ? 197 GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY : 198 GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY, 199 desc_addr); 200 if (unlikely(err)) 201 goto failed; 202 203 err = intel_guc_self_cfg64(ct_to_guc(ct), send ? 204 GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY : 205 GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY, 206 buff_addr); 207 if (unlikely(err)) 208 goto failed; 209 210 err = intel_guc_self_cfg32(ct_to_guc(ct), send ? 211 GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY : 212 GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY, 213 size); 214 if (unlikely(err)) 215 failed: 216 CT_PROBE_ERROR(ct, "Failed to register %s buffer (%pe)\n", 217 send ? "SEND" : "RECV", ERR_PTR(err)); 218 219 return err; 220 } 221 222 /** 223 * intel_guc_ct_init - Init buffer-based communication 224 * @ct: pointer to CT struct 225 * 226 * Allocate memory required for buffer-based communication. 227 * 228 * Return: 0 on success, a negative errno code on failure. 229 */ 230 int intel_guc_ct_init(struct intel_guc_ct *ct) 231 { 232 struct intel_guc *guc = ct_to_guc(ct); 233 struct guc_ct_buffer_desc *desc; 234 u32 blob_size; 235 u32 cmds_size; 236 u32 resv_space; 237 void *blob; 238 u32 *cmds; 239 int err; 240 241 err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO); 242 if (err) 243 return err; 244 245 GEM_BUG_ON(ct->vma); 246 247 blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE; 248 err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob); 249 if (unlikely(err)) { 250 CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n", 251 blob_size, ERR_PTR(err)); 252 return err; 253 } 254 255 CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size); 256 257 /* store pointers to desc and cmds for send ctb */ 258 desc = blob; 259 cmds = blob + 2 * CTB_DESC_SIZE; 260 cmds_size = CTB_H2G_BUFFER_SIZE; 261 resv_space = 0; 262 CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send", 263 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size, 264 resv_space); 265 266 guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space); 267 268 /* store pointers to desc and cmds for recv ctb */ 269 desc = blob + CTB_DESC_SIZE; 270 cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE; 271 cmds_size = CTB_G2H_BUFFER_SIZE; 272 resv_space = G2H_ROOM_BUFFER_SIZE; 273 CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv", 274 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size, 275 resv_space); 276 277 guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space); 278 279 return 0; 280 } 281 282 /** 283 * intel_guc_ct_fini - Fini buffer-based communication 284 * @ct: pointer to CT struct 285 * 286 * Deallocate memory required for buffer-based communication. 287 */ 288 void intel_guc_ct_fini(struct intel_guc_ct *ct) 289 { 290 GEM_BUG_ON(ct->enabled); 291 292 tasklet_kill(&ct->receive_tasklet); 293 i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP); 294 memset(ct, 0, sizeof(*ct)); 295 } 296 297 /** 298 * intel_guc_ct_enable - Enable buffer based command transport. 299 * @ct: pointer to CT struct 300 * 301 * Return: 0 on success, a negative errno code on failure. 302 */ 303 int intel_guc_ct_enable(struct intel_guc_ct *ct) 304 { 305 struct intel_guc *guc = ct_to_guc(ct); 306 u32 base, desc, cmds, size; 307 void *blob; 308 int err; 309 310 GEM_BUG_ON(ct->enabled); 311 312 /* vma should be already allocated and map'ed */ 313 GEM_BUG_ON(!ct->vma); 314 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj)); 315 base = intel_guc_ggtt_offset(guc, ct->vma); 316 317 /* blob should start with send descriptor */ 318 blob = __px_vaddr(ct->vma->obj); 319 GEM_BUG_ON(blob != ct->ctbs.send.desc); 320 321 /* (re)initialize descriptors */ 322 guc_ct_buffer_reset(&ct->ctbs.send); 323 guc_ct_buffer_reset(&ct->ctbs.recv); 324 325 /* 326 * Register both CT buffers starting with RECV buffer. 327 * Descriptors are in first half of the blob. 328 */ 329 desc = base + ptrdiff(ct->ctbs.recv.desc, blob); 330 cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob); 331 size = ct->ctbs.recv.size * 4; 332 err = ct_register_buffer(ct, false, desc, cmds, size); 333 if (unlikely(err)) 334 goto err_out; 335 336 desc = base + ptrdiff(ct->ctbs.send.desc, blob); 337 cmds = base + ptrdiff(ct->ctbs.send.cmds, blob); 338 size = ct->ctbs.send.size * 4; 339 err = ct_register_buffer(ct, true, desc, cmds, size); 340 if (unlikely(err)) 341 goto err_out; 342 343 err = ct_control_enable(ct, true); 344 if (unlikely(err)) 345 goto err_out; 346 347 ct->enabled = true; 348 ct->stall_time = KTIME_MAX; 349 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) 350 ct->dead_ct_reported = false; 351 ct->dead_ct_reason = CT_DEAD_ALIVE; 352 #endif 353 354 return 0; 355 356 err_out: 357 CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err)); 358 CT_DEAD(ct, SETUP); 359 return err; 360 } 361 362 /** 363 * intel_guc_ct_disable - Disable buffer based command transport. 364 * @ct: pointer to CT struct 365 */ 366 void intel_guc_ct_disable(struct intel_guc_ct *ct) 367 { 368 struct intel_guc *guc = ct_to_guc(ct); 369 370 GEM_BUG_ON(!ct->enabled); 371 372 ct->enabled = false; 373 374 if (intel_guc_is_fw_running(guc)) { 375 ct_control_enable(ct, false); 376 } 377 } 378 379 static u32 ct_get_next_fence(struct intel_guc_ct *ct) 380 { 381 /* For now it's trivial */ 382 return ++ct->requests.last_fence; 383 } 384 385 static int ct_write(struct intel_guc_ct *ct, 386 const u32 *action, 387 u32 len /* in dwords */, 388 u32 fence, u32 flags) 389 { 390 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; 391 struct guc_ct_buffer_desc *desc = ctb->desc; 392 u32 tail = ctb->tail; 393 u32 size = ctb->size; 394 u32 header; 395 u32 hxg; 396 u32 type; 397 u32 *cmds = ctb->cmds; 398 unsigned int i; 399 400 if (unlikely(desc->status)) 401 goto corrupted; 402 403 GEM_BUG_ON(tail > size); 404 405 #ifdef CONFIG_DRM_I915_DEBUG_GUC 406 if (unlikely(tail != READ_ONCE(desc->tail))) { 407 CT_ERROR(ct, "Tail was modified %u != %u\n", 408 desc->tail, tail); 409 desc->status |= GUC_CTB_STATUS_MISMATCH; 410 goto corrupted; 411 } 412 if (unlikely(READ_ONCE(desc->head) >= size)) { 413 CT_ERROR(ct, "Invalid head offset %u >= %u)\n", 414 desc->head, size); 415 desc->status |= GUC_CTB_STATUS_OVERFLOW; 416 goto corrupted; 417 } 418 #endif 419 420 /* 421 * dw0: CT header (including fence) 422 * dw1: HXG header (including action code) 423 * dw2+: action data 424 */ 425 header = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) | 426 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) | 427 FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence); 428 429 type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT : 430 GUC_HXG_TYPE_REQUEST; 431 hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) | 432 FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | 433 GUC_HXG_EVENT_MSG_0_DATA0, action[0]); 434 435 CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n", 436 tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]); 437 438 cmds[tail] = header; 439 tail = (tail + 1) % size; 440 441 cmds[tail] = hxg; 442 tail = (tail + 1) % size; 443 444 for (i = 1; i < len; i++) { 445 cmds[tail] = action[i]; 446 tail = (tail + 1) % size; 447 } 448 GEM_BUG_ON(tail > size); 449 450 /* 451 * make sure H2G buffer update and LRC tail update (if this triggering a 452 * submission) are visible before updating the descriptor tail 453 */ 454 intel_guc_write_barrier(ct_to_guc(ct)); 455 456 /* update local copies */ 457 ctb->tail = tail; 458 GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN); 459 atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space); 460 461 /* now update descriptor */ 462 WRITE_ONCE(desc->tail, tail); 463 464 return 0; 465 466 corrupted: 467 CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n", 468 desc->head, desc->tail, desc->status); 469 CT_DEAD(ct, WRITE); 470 ctb->broken = true; 471 return -EPIPE; 472 } 473 474 /** 475 * wait_for_ct_request_update - Wait for CT request state update. 476 * @ct: pointer to CT 477 * @req: pointer to pending request 478 * @status: placeholder for status 479 * 480 * For each sent request, GuC shall send back CT response message. 481 * Our message handler will update status of tracked request once 482 * response message with given fence is received. Wait here and 483 * check for valid response status value. 484 * 485 * Return: 486 * * 0 response received (status is valid) 487 * * -ETIMEDOUT no response within hardcoded timeout 488 */ 489 static int wait_for_ct_request_update(struct intel_guc_ct *ct, struct ct_request *req, u32 *status) 490 { 491 int err; 492 bool ct_enabled; 493 494 /* 495 * Fast commands should complete in less than 10us, so sample quickly 496 * up to that length of time, then switch to a slower sleep-wait loop. 497 * No GuC command should ever take longer than 10ms but many GuC 498 * commands can be inflight at time, so use a 1s timeout on the slower 499 * sleep-wait loop. 500 */ 501 #define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10 502 #define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000 503 #define done \ 504 (!(ct_enabled = intel_guc_ct_enabled(ct)) || \ 505 FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \ 506 GUC_HXG_ORIGIN_GUC) 507 err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS); 508 if (err) 509 err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS); 510 #undef done 511 if (!ct_enabled) 512 err = -ENODEV; 513 514 *status = req->status; 515 return err; 516 } 517 518 #define GUC_CTB_TIMEOUT_MS 1500 519 static inline bool ct_deadlocked(struct intel_guc_ct *ct) 520 { 521 long timeout = GUC_CTB_TIMEOUT_MS; 522 bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout; 523 524 if (unlikely(ret)) { 525 struct guc_ct_buffer_desc *send = ct->ctbs.send.desc; 526 struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc; 527 528 CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n", 529 ktime_ms_delta(ktime_get(), ct->stall_time), 530 send->status, recv->status); 531 CT_ERROR(ct, "H2G Space: %u (Bytes)\n", 532 atomic_read(&ct->ctbs.send.space) * 4); 533 CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head); 534 CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail); 535 CT_ERROR(ct, "G2H Space: %u (Bytes)\n", 536 atomic_read(&ct->ctbs.recv.space) * 4); 537 CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head); 538 CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail); 539 540 CT_DEAD(ct, DEADLOCK); 541 ct->ctbs.send.broken = true; 542 } 543 544 return ret; 545 } 546 547 static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw) 548 { 549 struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv; 550 551 /* 552 * We leave a certain amount of space in the G2H CTB buffer for 553 * unexpected G2H CTBs (e.g. logging, engine hang, etc...) 554 */ 555 return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw; 556 } 557 558 static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw) 559 { 560 lockdep_assert_held(&ct->ctbs.send.lock); 561 562 GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw)); 563 564 if (g2h_len_dw) 565 atomic_sub(g2h_len_dw, &ct->ctbs.recv.space); 566 } 567 568 static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw) 569 { 570 atomic_add(g2h_len_dw, &ct->ctbs.recv.space); 571 } 572 573 static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw) 574 { 575 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; 576 struct guc_ct_buffer_desc *desc = ctb->desc; 577 u32 head; 578 u32 space; 579 580 if (atomic_read(&ctb->space) >= len_dw) 581 return true; 582 583 head = READ_ONCE(desc->head); 584 if (unlikely(head > ctb->size)) { 585 CT_ERROR(ct, "Invalid head offset %u >= %u)\n", 586 head, ctb->size); 587 desc->status |= GUC_CTB_STATUS_OVERFLOW; 588 ctb->broken = true; 589 CT_DEAD(ct, H2G_HAS_ROOM); 590 return false; 591 } 592 593 space = CIRC_SPACE(ctb->tail, head, ctb->size); 594 atomic_set(&ctb->space, space); 595 596 return space >= len_dw; 597 } 598 599 static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw) 600 { 601 bool h2g = h2g_has_room(ct, h2g_dw); 602 bool g2h = g2h_has_room(ct, g2h_dw); 603 604 lockdep_assert_held(&ct->ctbs.send.lock); 605 606 if (unlikely(!h2g || !g2h)) { 607 if (ct->stall_time == KTIME_MAX) 608 ct->stall_time = ktime_get(); 609 610 /* Be paranoid and kick G2H tasklet to free credits */ 611 if (!g2h) 612 tasklet_hi_schedule(&ct->receive_tasklet); 613 614 if (unlikely(ct_deadlocked(ct))) 615 return -EPIPE; 616 else 617 return -EBUSY; 618 } 619 620 ct->stall_time = KTIME_MAX; 621 return 0; 622 } 623 624 #define G2H_LEN_DW(f) ({ \ 625 typeof(f) f_ = (f); \ 626 FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \ 627 FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \ 628 GUC_CTB_HXG_MSG_MIN_LEN : 0; \ 629 }) 630 static int ct_send_nb(struct intel_guc_ct *ct, 631 const u32 *action, 632 u32 len, 633 u32 flags) 634 { 635 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; 636 unsigned long spin_flags; 637 u32 g2h_len_dw = G2H_LEN_DW(flags); 638 u32 fence; 639 int ret; 640 641 spin_lock_irqsave(&ctb->lock, spin_flags); 642 643 ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw); 644 if (unlikely(ret)) 645 goto out; 646 647 fence = ct_get_next_fence(ct); 648 ret = ct_write(ct, action, len, fence, flags); 649 if (unlikely(ret)) 650 goto out; 651 652 g2h_reserve_space(ct, g2h_len_dw); 653 intel_guc_notify(ct_to_guc(ct)); 654 655 out: 656 spin_unlock_irqrestore(&ctb->lock, spin_flags); 657 658 return ret; 659 } 660 661 static int ct_send(struct intel_guc_ct *ct, 662 const u32 *action, 663 u32 len, 664 u32 *response_buf, 665 u32 response_buf_size, 666 u32 *status) 667 { 668 struct intel_guc_ct_buffer *ctb = &ct->ctbs.send; 669 struct ct_request request; 670 unsigned long flags; 671 unsigned int sleep_period_ms = 1; 672 bool send_again; 673 u32 fence; 674 int err; 675 676 GEM_BUG_ON(!ct->enabled); 677 GEM_BUG_ON(!len); 678 GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); 679 GEM_BUG_ON(!response_buf && response_buf_size); 680 might_sleep(); 681 682 resend: 683 send_again = false; 684 685 /* 686 * We use a lazy spin wait loop here as we believe that if the CT 687 * buffers are sized correctly the flow control condition should be 688 * rare. Reserving the maximum size in the G2H credits as we don't know 689 * how big the response is going to be. 690 */ 691 retry: 692 spin_lock_irqsave(&ctb->lock, flags); 693 if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) || 694 !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) { 695 if (ct->stall_time == KTIME_MAX) 696 ct->stall_time = ktime_get(); 697 spin_unlock_irqrestore(&ctb->lock, flags); 698 699 if (unlikely(ct_deadlocked(ct))) 700 return -EPIPE; 701 702 if (msleep_interruptible(sleep_period_ms)) 703 return -EINTR; 704 sleep_period_ms = sleep_period_ms << 1; 705 706 goto retry; 707 } 708 709 ct->stall_time = KTIME_MAX; 710 711 fence = ct_get_next_fence(ct); 712 request.fence = fence; 713 request.status = 0; 714 request.response_len = response_buf_size; 715 request.response_buf = response_buf; 716 717 spin_lock(&ct->requests.lock); 718 list_add_tail(&request.link, &ct->requests.pending); 719 spin_unlock(&ct->requests.lock); 720 721 err = ct_write(ct, action, len, fence, 0); 722 g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 723 724 spin_unlock_irqrestore(&ctb->lock, flags); 725 726 if (unlikely(err)) 727 goto unlink; 728 729 intel_guc_notify(ct_to_guc(ct)); 730 731 err = wait_for_ct_request_update(ct, &request, status); 732 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); 733 if (unlikely(err)) { 734 if (err == -ENODEV) 735 /* wait_for_ct_request_update returns -ENODEV on reset/suspend in progress. 736 * In this case, output is debug rather than error info 737 */ 738 CT_DEBUG(ct, "Request %#x (fence %u) cancelled as CTB is disabled\n", 739 action[0], request.fence); 740 else 741 CT_ERROR(ct, "No response for request %#x (fence %u)\n", 742 action[0], request.fence); 743 goto unlink; 744 } 745 746 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) { 747 CT_DEBUG(ct, "retrying request %#x (%u)\n", *action, 748 FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status)); 749 send_again = true; 750 goto unlink; 751 } 752 753 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) { 754 err = -EIO; 755 goto unlink; 756 } 757 758 if (response_buf) { 759 /* There shall be no data in the status */ 760 WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status)); 761 /* Return actual response len */ 762 err = request.response_len; 763 } else { 764 /* There shall be no response payload */ 765 WARN_ON(request.response_len); 766 /* Return data decoded from the status dword */ 767 err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status); 768 } 769 770 unlink: 771 spin_lock_irqsave(&ct->requests.lock, flags); 772 list_del(&request.link); 773 spin_unlock_irqrestore(&ct->requests.lock, flags); 774 775 if (unlikely(send_again)) 776 goto resend; 777 778 return err; 779 } 780 781 /* 782 * Command Transport (CT) buffer based GuC send function. 783 */ 784 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len, 785 u32 *response_buf, u32 response_buf_size, u32 flags) 786 { 787 u32 status = ~0; /* undefined */ 788 int ret; 789 790 if (unlikely(!ct->enabled)) { 791 struct intel_guc *guc = ct_to_guc(ct); 792 struct intel_uc *uc = container_of(guc, struct intel_uc, guc); 793 794 WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action); 795 return -ENODEV; 796 } 797 798 if (unlikely(ct->ctbs.send.broken)) 799 return -EPIPE; 800 801 if (flags & INTEL_GUC_CT_SEND_NB) 802 return ct_send_nb(ct, action, len, flags); 803 804 ret = ct_send(ct, action, len, response_buf, response_buf_size, &status); 805 if (unlikely(ret < 0)) { 806 if (ret != -ENODEV) 807 CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n", 808 action[0], ERR_PTR(ret), status); 809 } else if (unlikely(ret)) { 810 CT_DEBUG(ct, "send action %#x returned %d (%#x)\n", 811 action[0], ret, ret); 812 } 813 814 return ret; 815 } 816 817 static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords) 818 { 819 struct ct_incoming_msg *msg; 820 821 msg = kmalloc(struct_size(msg, msg, num_dwords), GFP_ATOMIC); 822 if (msg) 823 msg->size = num_dwords; 824 return msg; 825 } 826 827 static void ct_free_msg(struct ct_incoming_msg *msg) 828 { 829 kfree(msg); 830 } 831 832 /* 833 * Return: number available remaining dwords to read (0 if empty) 834 * or a negative error code on failure 835 */ 836 static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg) 837 { 838 struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv; 839 struct guc_ct_buffer_desc *desc = ctb->desc; 840 u32 head = ctb->head; 841 u32 tail = READ_ONCE(desc->tail); 842 u32 size = ctb->size; 843 u32 *cmds = ctb->cmds; 844 s32 available; 845 unsigned int len; 846 unsigned int i; 847 u32 header; 848 849 if (unlikely(ctb->broken)) 850 return -EPIPE; 851 852 if (unlikely(desc->status)) { 853 u32 status = desc->status; 854 855 if (status & GUC_CTB_STATUS_UNUSED) { 856 /* 857 * Potentially valid if a CLIENT_RESET request resulted in 858 * contexts/engines being reset. But should never happen as 859 * no contexts should be active when CLIENT_RESET is sent. 860 */ 861 CT_ERROR(ct, "Unexpected G2H after GuC has stopped!\n"); 862 status &= ~GUC_CTB_STATUS_UNUSED; 863 } 864 865 if (status) 866 goto corrupted; 867 } 868 869 GEM_BUG_ON(head > size); 870 871 #ifdef CONFIG_DRM_I915_DEBUG_GUC 872 if (unlikely(head != READ_ONCE(desc->head))) { 873 CT_ERROR(ct, "Head was modified %u != %u\n", 874 desc->head, head); 875 desc->status |= GUC_CTB_STATUS_MISMATCH; 876 goto corrupted; 877 } 878 #endif 879 if (unlikely(tail >= size)) { 880 CT_ERROR(ct, "Invalid tail offset %u >= %u)\n", 881 tail, size); 882 desc->status |= GUC_CTB_STATUS_OVERFLOW; 883 goto corrupted; 884 } 885 886 /* tail == head condition indicates empty */ 887 available = tail - head; 888 if (unlikely(available == 0)) { 889 *msg = NULL; 890 return 0; 891 } 892 893 /* beware of buffer wrap case */ 894 if (unlikely(available < 0)) 895 available += size; 896 CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size); 897 GEM_BUG_ON(available < 0); 898 899 header = cmds[head]; 900 head = (head + 1) % size; 901 902 /* message len with header */ 903 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN; 904 if (unlikely(len > (u32)available)) { 905 CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n", 906 4, &header, 907 4 * (head + available - 1 > size ? 908 size - head : available - 1), &cmds[head], 909 4 * (head + available - 1 > size ? 910 available - 1 - size + head : 0), &cmds[0]); 911 desc->status |= GUC_CTB_STATUS_UNDERFLOW; 912 goto corrupted; 913 } 914 915 *msg = ct_alloc_msg(len); 916 if (!*msg) { 917 CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n", 918 4, &header, 919 4 * (head + available - 1 > size ? 920 size - head : available - 1), &cmds[head], 921 4 * (head + available - 1 > size ? 922 available - 1 - size + head : 0), &cmds[0]); 923 return available; 924 } 925 926 (*msg)->msg[0] = header; 927 928 for (i = 1; i < len; i++) { 929 (*msg)->msg[i] = cmds[head]; 930 head = (head + 1) % size; 931 } 932 CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg); 933 934 /* update local copies */ 935 ctb->head = head; 936 937 /* now update descriptor */ 938 WRITE_ONCE(desc->head, head); 939 940 /* 941 * Wa_22016122933: Making sure the head update is 942 * visible to GuC right away 943 */ 944 intel_guc_write_barrier(ct_to_guc(ct)); 945 946 return available - len; 947 948 corrupted: 949 CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n", 950 desc->head, desc->tail, desc->status); 951 ctb->broken = true; 952 CT_DEAD(ct, READ); 953 return -EPIPE; 954 } 955 956 static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response) 957 { 958 u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]); 959 u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, response->msg[0]); 960 const u32 *hxg = &response->msg[GUC_CTB_MSG_MIN_LEN]; 961 const u32 *data = &hxg[GUC_HXG_MSG_MIN_LEN]; 962 u32 datalen = len - GUC_HXG_MSG_MIN_LEN; 963 struct ct_request *req; 964 unsigned long flags; 965 bool found = false; 966 int err = 0; 967 968 GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN); 969 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC); 970 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS && 971 FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_NO_RESPONSE_RETRY && 972 FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE); 973 974 CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]); 975 976 spin_lock_irqsave(&ct->requests.lock, flags); 977 list_for_each_entry(req, &ct->requests.pending, link) { 978 if (unlikely(fence != req->fence)) { 979 CT_DEBUG(ct, "request %u awaits response\n", 980 req->fence); 981 continue; 982 } 983 if (unlikely(datalen > req->response_len)) { 984 CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n", 985 req->fence, datalen, req->response_len); 986 datalen = min(datalen, req->response_len); 987 err = -EMSGSIZE; 988 } 989 if (datalen) 990 memcpy(req->response_buf, data, 4 * datalen); 991 req->response_len = datalen; 992 WRITE_ONCE(req->status, hxg[0]); 993 found = true; 994 break; 995 } 996 if (!found) { 997 CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence); 998 CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence, 999 ct->requests.last_fence); 1000 list_for_each_entry(req, &ct->requests.pending, link) 1001 CT_ERROR(ct, "request %u awaits response\n", 1002 req->fence); 1003 err = -ENOKEY; 1004 } 1005 spin_unlock_irqrestore(&ct->requests.lock, flags); 1006 1007 if (unlikely(err)) 1008 return err; 1009 1010 ct_free_msg(response); 1011 return 0; 1012 } 1013 1014 static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request) 1015 { 1016 struct intel_guc *guc = ct_to_guc(ct); 1017 const u32 *hxg; 1018 const u32 *payload; 1019 u32 hxg_len, action, len; 1020 int ret; 1021 1022 hxg = &request->msg[GUC_CTB_MSG_MIN_LEN]; 1023 hxg_len = request->size - GUC_CTB_MSG_MIN_LEN; 1024 payload = &hxg[GUC_HXG_MSG_MIN_LEN]; 1025 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1026 len = hxg_len - GUC_HXG_MSG_MIN_LEN; 1027 1028 CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload); 1029 1030 switch (action) { 1031 case INTEL_GUC_ACTION_DEFAULT: 1032 ret = intel_guc_to_host_process_recv_msg(guc, payload, len); 1033 break; 1034 case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 1035 ret = intel_guc_deregister_done_process_msg(guc, payload, 1036 len); 1037 break; 1038 case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 1039 ret = intel_guc_sched_done_process_msg(guc, payload, len); 1040 break; 1041 case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION: 1042 ret = intel_guc_context_reset_process_msg(guc, payload, len); 1043 break; 1044 case INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION: 1045 ret = intel_guc_error_capture_process_msg(guc, payload, len); 1046 if (unlikely(ret)) 1047 CT_ERROR(ct, "error capture notification failed %x %*ph\n", 1048 action, 4 * len, payload); 1049 break; 1050 case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION: 1051 ret = intel_guc_engine_failure_process_msg(guc, payload, len); 1052 break; 1053 case INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE: 1054 intel_guc_log_handle_flush_event(&guc->log); 1055 ret = 0; 1056 break; 1057 case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED: 1058 CT_ERROR(ct, "Received GuC crash dump notification!\n"); 1059 ret = 0; 1060 break; 1061 case INTEL_GUC_ACTION_NOTIFY_EXCEPTION: 1062 CT_ERROR(ct, "Received GuC exception notification!\n"); 1063 ret = 0; 1064 break; 1065 default: 1066 ret = -EOPNOTSUPP; 1067 break; 1068 } 1069 1070 if (unlikely(ret)) { 1071 CT_ERROR(ct, "Failed to process request %04x (%pe)\n", 1072 action, ERR_PTR(ret)); 1073 return ret; 1074 } 1075 1076 ct_free_msg(request); 1077 return 0; 1078 } 1079 1080 static bool ct_process_incoming_requests(struct intel_guc_ct *ct) 1081 { 1082 unsigned long flags; 1083 struct ct_incoming_msg *request; 1084 bool done; 1085 int err; 1086 1087 spin_lock_irqsave(&ct->requests.lock, flags); 1088 request = list_first_entry_or_null(&ct->requests.incoming, 1089 struct ct_incoming_msg, link); 1090 if (request) 1091 list_del(&request->link); 1092 done = !!list_empty(&ct->requests.incoming); 1093 spin_unlock_irqrestore(&ct->requests.lock, flags); 1094 1095 if (!request) 1096 return true; 1097 1098 err = ct_process_request(ct, request); 1099 if (unlikely(err)) { 1100 CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n", 1101 ERR_PTR(err), 4 * request->size, request->msg); 1102 CT_DEAD(ct, PROCESS_FAILED); 1103 ct_free_msg(request); 1104 } 1105 1106 return done; 1107 } 1108 1109 static void ct_incoming_request_worker_func(struct work_struct *w) 1110 { 1111 struct intel_guc_ct *ct = 1112 container_of(w, struct intel_guc_ct, requests.worker); 1113 bool done; 1114 1115 do { 1116 done = ct_process_incoming_requests(ct); 1117 } while (!done); 1118 } 1119 1120 static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request) 1121 { 1122 const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN]; 1123 u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]); 1124 unsigned long flags; 1125 1126 GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT); 1127 1128 /* 1129 * Adjusting the space must be done in IRQ or deadlock can occur as the 1130 * CTB processing in the below workqueue can send CTBs which creates a 1131 * circular dependency if the space was returned there. 1132 */ 1133 switch (action) { 1134 case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE: 1135 case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE: 1136 g2h_release_space(ct, request->size); 1137 } 1138 1139 spin_lock_irqsave(&ct->requests.lock, flags); 1140 list_add_tail(&request->link, &ct->requests.incoming); 1141 spin_unlock_irqrestore(&ct->requests.lock, flags); 1142 1143 queue_work(system_unbound_wq, &ct->requests.worker); 1144 return 0; 1145 } 1146 1147 static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg) 1148 { 1149 u32 origin, type; 1150 u32 *hxg; 1151 int err; 1152 1153 if (unlikely(msg->size < GUC_CTB_HXG_MSG_MIN_LEN)) 1154 return -EBADMSG; 1155 1156 hxg = &msg->msg[GUC_CTB_MSG_MIN_LEN]; 1157 1158 origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]); 1159 if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { 1160 err = -EPROTO; 1161 goto failed; 1162 } 1163 1164 type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]); 1165 switch (type) { 1166 case GUC_HXG_TYPE_EVENT: 1167 err = ct_handle_event(ct, msg); 1168 break; 1169 case GUC_HXG_TYPE_RESPONSE_SUCCESS: 1170 case GUC_HXG_TYPE_RESPONSE_FAILURE: 1171 case GUC_HXG_TYPE_NO_RESPONSE_RETRY: 1172 err = ct_handle_response(ct, msg); 1173 break; 1174 default: 1175 err = -EOPNOTSUPP; 1176 } 1177 1178 if (unlikely(err)) { 1179 failed: 1180 CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n", 1181 ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg); 1182 } 1183 return err; 1184 } 1185 1186 static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg) 1187 { 1188 u32 format = FIELD_GET(GUC_CTB_MSG_0_FORMAT, msg->msg[0]); 1189 int err; 1190 1191 if (format == GUC_CTB_FORMAT_HXG) 1192 err = ct_handle_hxg(ct, msg); 1193 else 1194 err = -EOPNOTSUPP; 1195 1196 if (unlikely(err)) { 1197 CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n", 1198 ERR_PTR(err), 4 * msg->size, msg->msg); 1199 ct_free_msg(msg); 1200 } 1201 } 1202 1203 /* 1204 * Return: number available remaining dwords to read (0 if empty) 1205 * or a negative error code on failure 1206 */ 1207 static int ct_receive(struct intel_guc_ct *ct) 1208 { 1209 struct ct_incoming_msg *msg = NULL; 1210 unsigned long flags; 1211 int ret; 1212 1213 spin_lock_irqsave(&ct->ctbs.recv.lock, flags); 1214 ret = ct_read(ct, &msg); 1215 spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags); 1216 if (ret < 0) 1217 return ret; 1218 1219 if (msg) 1220 ct_handle_msg(ct, msg); 1221 1222 return ret; 1223 } 1224 1225 static void ct_try_receive_message(struct intel_guc_ct *ct) 1226 { 1227 int ret; 1228 1229 if (GEM_WARN_ON(!ct->enabled)) 1230 return; 1231 1232 ret = ct_receive(ct); 1233 if (ret > 0) 1234 tasklet_hi_schedule(&ct->receive_tasklet); 1235 } 1236 1237 static void ct_receive_tasklet_func(struct tasklet_struct *t) 1238 { 1239 struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet); 1240 1241 ct_try_receive_message(ct); 1242 } 1243 1244 /* 1245 * When we're communicating with the GuC over CT, GuC uses events 1246 * to notify us about new messages being posted on the RECV buffer. 1247 */ 1248 void intel_guc_ct_event_handler(struct intel_guc_ct *ct) 1249 { 1250 if (unlikely(!ct->enabled)) { 1251 WARN(1, "Unexpected GuC event received while CT disabled!\n"); 1252 return; 1253 } 1254 1255 ct_try_receive_message(ct); 1256 } 1257 1258 void intel_guc_ct_print_info(struct intel_guc_ct *ct, 1259 struct drm_printer *p) 1260 { 1261 drm_printf(p, "CT %s\n", str_enabled_disabled(ct->enabled)); 1262 1263 if (!ct->enabled) 1264 return; 1265 1266 drm_printf(p, "H2G Space: %u\n", 1267 atomic_read(&ct->ctbs.send.space) * 4); 1268 drm_printf(p, "Head: %u\n", 1269 ct->ctbs.send.desc->head); 1270 drm_printf(p, "Tail: %u\n", 1271 ct->ctbs.send.desc->tail); 1272 drm_printf(p, "G2H Space: %u\n", 1273 atomic_read(&ct->ctbs.recv.space) * 4); 1274 drm_printf(p, "Head: %u\n", 1275 ct->ctbs.recv.desc->head); 1276 drm_printf(p, "Tail: %u\n", 1277 ct->ctbs.recv.desc->tail); 1278 } 1279 1280 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) 1281 static void ct_dead_ct_worker_func(struct work_struct *w) 1282 { 1283 struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, dead_ct_worker); 1284 struct intel_guc *guc = ct_to_guc(ct); 1285 1286 if (ct->dead_ct_reported) 1287 return; 1288 1289 ct->dead_ct_reported = true; 1290 1291 guc_info(guc, "CTB is dead - reason=0x%X\n", ct->dead_ct_reason); 1292 intel_klog_error_capture(guc_to_gt(guc), (intel_engine_mask_t)~0U); 1293 } 1294 #endif 1295