1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/circ_buf.h>
7 #include <linux/ktime.h>
8 #include <linux/time64.h>
9 #include <linux/timekeeping.h>
10 
11 #include "i915_drv.h"
12 #include "intel_guc_ct.h"
13 #include "gt/intel_gt.h"
14 
15 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
16 {
17 	return container_of(ct, struct intel_guc, ct);
18 }
19 
20 static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
21 {
22 	return guc_to_gt(ct_to_guc(ct));
23 }
24 
25 static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
26 {
27 	return ct_to_gt(ct)->i915;
28 }
29 
30 static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
31 {
32 	return &ct_to_i915(ct)->drm;
33 }
34 
35 #define CT_ERROR(_ct, _fmt, ...) \
36 	drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
37 #ifdef CONFIG_DRM_I915_DEBUG_GUC
38 #define CT_DEBUG(_ct, _fmt, ...) \
39 	drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
40 #else
41 #define CT_DEBUG(...)	do { } while (0)
42 #endif
43 #define CT_PROBE_ERROR(_ct, _fmt, ...) \
44 	i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
45 
46 /**
47  * DOC: CTB Blob
48  *
49  * We allocate single blob to hold both CTB descriptors and buffers:
50  *
51  *      +--------+-----------------------------------------------+------+
52  *      | offset | contents                                      | size |
53  *      +========+===============================================+======+
54  *      | 0x0000 | H2G `CTB Descriptor`_ (send)                  |      |
55  *      +--------+-----------------------------------------------+  4K  |
56  *      | 0x0800 | G2H `CTB Descriptor`_ (recv)                  |      |
57  *      +--------+-----------------------------------------------+------+
58  *      | 0x1000 | H2G `CT Buffer`_ (send)                       | n*4K |
59  *      |        |                                               |      |
60  *      +--------+-----------------------------------------------+------+
61  *      | 0x1000 | G2H `CT Buffer`_ (recv)                       | m*4K |
62  *      | + n*4K |                                               |      |
63  *      +--------+-----------------------------------------------+------+
64  *
65  * Size of each `CT Buffer`_ must be multiple of 4K.
66  * We don't expect too many messages in flight at any time, unless we are
67  * using the GuC submission. In that case each request requires a minimum
68  * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
69  * enough space to avoid backpressure on the driver. We increase the size
70  * of the receive buffer (relative to the send) to ensure a G2H response
71  * CTB has a landing spot.
72  */
73 #define CTB_DESC_SIZE		ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
74 #define CTB_H2G_BUFFER_SIZE	(SZ_4K)
75 #define CTB_G2H_BUFFER_SIZE	(4 * CTB_H2G_BUFFER_SIZE)
76 #define G2H_ROOM_BUFFER_SIZE	(CTB_G2H_BUFFER_SIZE / 4)
77 
78 struct ct_request {
79 	struct list_head link;
80 	u32 fence;
81 	u32 status;
82 	u32 response_len;
83 	u32 *response_buf;
84 };
85 
86 struct ct_incoming_msg {
87 	struct list_head link;
88 	u32 size;
89 	u32 msg[];
90 };
91 
92 enum { CTB_SEND = 0, CTB_RECV = 1 };
93 
94 enum { CTB_OWNER_HOST = 0 };
95 
96 static void ct_receive_tasklet_func(struct tasklet_struct *t);
97 static void ct_incoming_request_worker_func(struct work_struct *w);
98 
99 /**
100  * intel_guc_ct_init_early - Initialize CT state without requiring device access
101  * @ct: pointer to CT struct
102  */
103 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
104 {
105 	spin_lock_init(&ct->ctbs.send.lock);
106 	spin_lock_init(&ct->ctbs.recv.lock);
107 	spin_lock_init(&ct->requests.lock);
108 	INIT_LIST_HEAD(&ct->requests.pending);
109 	INIT_LIST_HEAD(&ct->requests.incoming);
110 	INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
111 	tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
112 	init_waitqueue_head(&ct->wq);
113 }
114 
115 static inline const char *guc_ct_buffer_type_to_str(u32 type)
116 {
117 	switch (type) {
118 	case GUC_CTB_TYPE_HOST2GUC:
119 		return "SEND";
120 	case GUC_CTB_TYPE_GUC2HOST:
121 		return "RECV";
122 	default:
123 		return "<invalid>";
124 	}
125 }
126 
127 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
128 {
129 	memset(desc, 0, sizeof(*desc));
130 }
131 
132 static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
133 {
134 	u32 space;
135 
136 	ctb->broken = false;
137 	ctb->tail = 0;
138 	ctb->head = 0;
139 	space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
140 	atomic_set(&ctb->space, space);
141 
142 	guc_ct_buffer_desc_init(ctb->desc);
143 }
144 
145 static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
146 			       struct guc_ct_buffer_desc *desc,
147 			       u32 *cmds, u32 size_in_bytes, u32 resv_space)
148 {
149 	GEM_BUG_ON(size_in_bytes % 4);
150 
151 	ctb->desc = desc;
152 	ctb->cmds = cmds;
153 	ctb->size = size_in_bytes / 4;
154 	ctb->resv_space = resv_space / 4;
155 
156 	guc_ct_buffer_reset(ctb);
157 }
158 
159 static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type,
160 					 u32 desc_addr, u32 buff_addr, u32 size)
161 {
162 	u32 request[HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN] = {
163 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
164 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
165 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_REGISTER_CTB),
166 		FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE, size / SZ_4K - 1) |
167 		FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE, type),
168 		FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr),
169 		FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr),
170 	};
171 	int ret;
172 
173 	GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
174 	GEM_BUG_ON(size % SZ_4K);
175 
176 	/* CT registration must go over MMIO */
177 	ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
178 
179 	return ret > 0 ? -EPROTO : ret;
180 }
181 
182 static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
183 			      u32 desc_addr, u32 buff_addr, u32 size)
184 {
185 	int err;
186 
187 	err = i915_inject_probe_error(guc_to_gt(ct_to_guc(ct))->i915, -ENXIO);
188 	if (unlikely(err))
189 		return err;
190 
191 	err = guc_action_register_ct_buffer(ct_to_guc(ct), type,
192 					    desc_addr, buff_addr, size);
193 	if (unlikely(err))
194 		CT_ERROR(ct, "Failed to register %s buffer (%pe)\n",
195 			 guc_ct_buffer_type_to_str(type), ERR_PTR(err));
196 	return err;
197 }
198 
199 static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
200 {
201 	u32 request[HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN] = {
202 		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
203 		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
204 		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB),
205 		FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type),
206 	};
207 	int ret;
208 
209 	GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
210 
211 	/* CT deregistration must go over MMIO */
212 	ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
213 
214 	return ret > 0 ? -EPROTO : ret;
215 }
216 
217 static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
218 {
219 	int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
220 
221 	if (unlikely(err))
222 		CT_ERROR(ct, "Failed to deregister %s buffer (%pe)\n",
223 			 guc_ct_buffer_type_to_str(type), ERR_PTR(err));
224 	return err;
225 }
226 
227 /**
228  * intel_guc_ct_init - Init buffer-based communication
229  * @ct: pointer to CT struct
230  *
231  * Allocate memory required for buffer-based communication.
232  *
233  * Return: 0 on success, a negative errno code on failure.
234  */
235 int intel_guc_ct_init(struct intel_guc_ct *ct)
236 {
237 	struct intel_guc *guc = ct_to_guc(ct);
238 	struct guc_ct_buffer_desc *desc;
239 	u32 blob_size;
240 	u32 cmds_size;
241 	u32 resv_space;
242 	void *blob;
243 	u32 *cmds;
244 	int err;
245 
246 	err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
247 	if (err)
248 		return err;
249 
250 	GEM_BUG_ON(ct->vma);
251 
252 	blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
253 	err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
254 	if (unlikely(err)) {
255 		CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
256 			       blob_size, ERR_PTR(err));
257 		return err;
258 	}
259 
260 	CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
261 
262 	/* store pointers to desc and cmds for send ctb */
263 	desc = blob;
264 	cmds = blob + 2 * CTB_DESC_SIZE;
265 	cmds_size = CTB_H2G_BUFFER_SIZE;
266 	resv_space = 0;
267 	CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
268 		 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
269 		 resv_space);
270 
271 	guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
272 
273 	/* store pointers to desc and cmds for recv ctb */
274 	desc = blob + CTB_DESC_SIZE;
275 	cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
276 	cmds_size = CTB_G2H_BUFFER_SIZE;
277 	resv_space = G2H_ROOM_BUFFER_SIZE;
278 	CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
279 		 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
280 		 resv_space);
281 
282 	guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
283 
284 	return 0;
285 }
286 
287 /**
288  * intel_guc_ct_fini - Fini buffer-based communication
289  * @ct: pointer to CT struct
290  *
291  * Deallocate memory required for buffer-based communication.
292  */
293 void intel_guc_ct_fini(struct intel_guc_ct *ct)
294 {
295 	GEM_BUG_ON(ct->enabled);
296 
297 	tasklet_kill(&ct->receive_tasklet);
298 	i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
299 	memset(ct, 0, sizeof(*ct));
300 }
301 
302 /**
303  * intel_guc_ct_enable - Enable buffer based command transport.
304  * @ct: pointer to CT struct
305  *
306  * Return: 0 on success, a negative errno code on failure.
307  */
308 int intel_guc_ct_enable(struct intel_guc_ct *ct)
309 {
310 	struct intel_guc *guc = ct_to_guc(ct);
311 	u32 base, desc, cmds;
312 	void *blob;
313 	int err;
314 
315 	GEM_BUG_ON(ct->enabled);
316 
317 	/* vma should be already allocated and map'ed */
318 	GEM_BUG_ON(!ct->vma);
319 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
320 	base = intel_guc_ggtt_offset(guc, ct->vma);
321 
322 	/* blob should start with send descriptor */
323 	blob = __px_vaddr(ct->vma->obj);
324 	GEM_BUG_ON(blob != ct->ctbs.send.desc);
325 
326 	/* (re)initialize descriptors */
327 	guc_ct_buffer_reset(&ct->ctbs.send);
328 	guc_ct_buffer_reset(&ct->ctbs.recv);
329 
330 	/*
331 	 * Register both CT buffers starting with RECV buffer.
332 	 * Descriptors are in first half of the blob.
333 	 */
334 	desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
335 	cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
336 	err = ct_register_buffer(ct, GUC_CTB_TYPE_GUC2HOST,
337 				 desc, cmds, ct->ctbs.recv.size * 4);
338 
339 	if (unlikely(err))
340 		goto err_out;
341 
342 	desc = base + ptrdiff(ct->ctbs.send.desc, blob);
343 	cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
344 	err = ct_register_buffer(ct, GUC_CTB_TYPE_HOST2GUC,
345 				 desc, cmds, ct->ctbs.send.size * 4);
346 
347 	if (unlikely(err))
348 		goto err_deregister;
349 
350 	ct->enabled = true;
351 	ct->stall_time = KTIME_MAX;
352 
353 	return 0;
354 
355 err_deregister:
356 	ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
357 err_out:
358 	CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
359 	return err;
360 }
361 
362 /**
363  * intel_guc_ct_disable - Disable buffer based command transport.
364  * @ct: pointer to CT struct
365  */
366 void intel_guc_ct_disable(struct intel_guc_ct *ct)
367 {
368 	struct intel_guc *guc = ct_to_guc(ct);
369 
370 	GEM_BUG_ON(!ct->enabled);
371 
372 	ct->enabled = false;
373 
374 	if (intel_guc_is_fw_running(guc)) {
375 		ct_deregister_buffer(ct, GUC_CTB_TYPE_HOST2GUC);
376 		ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
377 	}
378 }
379 
380 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
381 {
382 	/* For now it's trivial */
383 	return ++ct->requests.last_fence;
384 }
385 
386 static int ct_write(struct intel_guc_ct *ct,
387 		    const u32 *action,
388 		    u32 len /* in dwords */,
389 		    u32 fence, u32 flags)
390 {
391 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
392 	struct guc_ct_buffer_desc *desc = ctb->desc;
393 	u32 tail = ctb->tail;
394 	u32 size = ctb->size;
395 	u32 header;
396 	u32 hxg;
397 	u32 type;
398 	u32 *cmds = ctb->cmds;
399 	unsigned int i;
400 
401 	if (unlikely(desc->status))
402 		goto corrupted;
403 
404 	GEM_BUG_ON(tail > size);
405 
406 #ifdef CONFIG_DRM_I915_DEBUG_GUC
407 	if (unlikely(tail != READ_ONCE(desc->tail))) {
408 		CT_ERROR(ct, "Tail was modified %u != %u\n",
409 			 desc->tail, tail);
410 		desc->status |= GUC_CTB_STATUS_MISMATCH;
411 		goto corrupted;
412 	}
413 	if (unlikely(READ_ONCE(desc->head) >= size)) {
414 		CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
415 			 desc->head, size);
416 		desc->status |= GUC_CTB_STATUS_OVERFLOW;
417 		goto corrupted;
418 	}
419 #endif
420 
421 	/*
422 	 * dw0: CT header (including fence)
423 	 * dw1: HXG header (including action code)
424 	 * dw2+: action data
425 	 */
426 	header = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
427 		 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
428 		 FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence);
429 
430 	type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT :
431 		GUC_HXG_TYPE_REQUEST;
432 	hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |
433 		FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
434 			   GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
435 
436 	CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n",
437 		 tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]);
438 
439 	cmds[tail] = header;
440 	tail = (tail + 1) % size;
441 
442 	cmds[tail] = hxg;
443 	tail = (tail + 1) % size;
444 
445 	for (i = 1; i < len; i++) {
446 		cmds[tail] = action[i];
447 		tail = (tail + 1) % size;
448 	}
449 	GEM_BUG_ON(tail > size);
450 
451 	/*
452 	 * make sure H2G buffer update and LRC tail update (if this triggering a
453 	 * submission) are visible before updating the descriptor tail
454 	 */
455 	intel_guc_write_barrier(ct_to_guc(ct));
456 
457 	/* update local copies */
458 	ctb->tail = tail;
459 	GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
460 	atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
461 
462 	/* now update descriptor */
463 	WRITE_ONCE(desc->tail, tail);
464 
465 	return 0;
466 
467 corrupted:
468 	CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
469 		 desc->head, desc->tail, desc->status);
470 	ctb->broken = true;
471 	return -EPIPE;
472 }
473 
474 /**
475  * wait_for_ct_request_update - Wait for CT request state update.
476  * @req:	pointer to pending request
477  * @status:	placeholder for status
478  *
479  * For each sent request, GuC shall send back CT response message.
480  * Our message handler will update status of tracked request once
481  * response message with given fence is received. Wait here and
482  * check for valid response status value.
483  *
484  * Return:
485  * *	0 response received (status is valid)
486  * *	-ETIMEDOUT no response within hardcoded timeout
487  */
488 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
489 {
490 	int err;
491 
492 	/*
493 	 * Fast commands should complete in less than 10us, so sample quickly
494 	 * up to that length of time, then switch to a slower sleep-wait loop.
495 	 * No GuC command should ever take longer than 10ms but many GuC
496 	 * commands can be inflight at time, so use a 1s timeout on the slower
497 	 * sleep-wait loop.
498 	 */
499 #define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
500 #define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
501 #define done \
502 	(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
503 	 GUC_HXG_ORIGIN_GUC)
504 	err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
505 	if (err)
506 		err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
507 #undef done
508 
509 	*status = req->status;
510 	return err;
511 }
512 
513 #define GUC_CTB_TIMEOUT_MS	1500
514 static inline bool ct_deadlocked(struct intel_guc_ct *ct)
515 {
516 	long timeout = GUC_CTB_TIMEOUT_MS;
517 	bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
518 
519 	if (unlikely(ret)) {
520 		struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
521 		struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
522 
523 		CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
524 			 ktime_ms_delta(ktime_get(), ct->stall_time),
525 			 send->status, recv->status);
526 		CT_ERROR(ct, "H2G Space: %u (Bytes)\n",
527 			 atomic_read(&ct->ctbs.send.space) * 4);
528 		CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head);
529 		CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail);
530 		CT_ERROR(ct, "G2H Space: %u (Bytes)\n",
531 			 atomic_read(&ct->ctbs.recv.space) * 4);
532 		CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head);
533 		CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail);
534 
535 		ct->ctbs.send.broken = true;
536 	}
537 
538 	return ret;
539 }
540 
541 static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
542 {
543 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
544 
545 	/*
546 	 * We leave a certain amount of space in the G2H CTB buffer for
547 	 * unexpected G2H CTBs (e.g. logging, engine hang, etc...)
548 	 */
549 	return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
550 }
551 
552 static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
553 {
554 	lockdep_assert_held(&ct->ctbs.send.lock);
555 
556 	GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
557 
558 	if (g2h_len_dw)
559 		atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
560 }
561 
562 static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
563 {
564 	atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
565 }
566 
567 static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
568 {
569 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
570 	struct guc_ct_buffer_desc *desc = ctb->desc;
571 	u32 head;
572 	u32 space;
573 
574 	if (atomic_read(&ctb->space) >= len_dw)
575 		return true;
576 
577 	head = READ_ONCE(desc->head);
578 	if (unlikely(head > ctb->size)) {
579 		CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
580 			 head, ctb->size);
581 		desc->status |= GUC_CTB_STATUS_OVERFLOW;
582 		ctb->broken = true;
583 		return false;
584 	}
585 
586 	space = CIRC_SPACE(ctb->tail, head, ctb->size);
587 	atomic_set(&ctb->space, space);
588 
589 	return space >= len_dw;
590 }
591 
592 static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
593 {
594 	bool h2g = h2g_has_room(ct, h2g_dw);
595 	bool g2h = g2h_has_room(ct, g2h_dw);
596 
597 	lockdep_assert_held(&ct->ctbs.send.lock);
598 
599 	if (unlikely(!h2g || !g2h)) {
600 		if (ct->stall_time == KTIME_MAX)
601 			ct->stall_time = ktime_get();
602 
603 		/* Be paranoid and kick G2H tasklet to free credits */
604 		if (!g2h)
605 			tasklet_hi_schedule(&ct->receive_tasklet);
606 
607 		if (unlikely(ct_deadlocked(ct)))
608 			return -EPIPE;
609 		else
610 			return -EBUSY;
611 	}
612 
613 	ct->stall_time = KTIME_MAX;
614 	return 0;
615 }
616 
617 #define G2H_LEN_DW(f) ({ \
618 	typeof(f) f_ = (f); \
619 	FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
620 	FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
621 	GUC_CTB_HXG_MSG_MIN_LEN : 0; \
622 })
623 static int ct_send_nb(struct intel_guc_ct *ct,
624 		      const u32 *action,
625 		      u32 len,
626 		      u32 flags)
627 {
628 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
629 	unsigned long spin_flags;
630 	u32 g2h_len_dw = G2H_LEN_DW(flags);
631 	u32 fence;
632 	int ret;
633 
634 	spin_lock_irqsave(&ctb->lock, spin_flags);
635 
636 	ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
637 	if (unlikely(ret))
638 		goto out;
639 
640 	fence = ct_get_next_fence(ct);
641 	ret = ct_write(ct, action, len, fence, flags);
642 	if (unlikely(ret))
643 		goto out;
644 
645 	g2h_reserve_space(ct, g2h_len_dw);
646 	intel_guc_notify(ct_to_guc(ct));
647 
648 out:
649 	spin_unlock_irqrestore(&ctb->lock, spin_flags);
650 
651 	return ret;
652 }
653 
654 static int ct_send(struct intel_guc_ct *ct,
655 		   const u32 *action,
656 		   u32 len,
657 		   u32 *response_buf,
658 		   u32 response_buf_size,
659 		   u32 *status)
660 {
661 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
662 	struct ct_request request;
663 	unsigned long flags;
664 	unsigned int sleep_period_ms = 1;
665 	u32 fence;
666 	int err;
667 
668 	GEM_BUG_ON(!ct->enabled);
669 	GEM_BUG_ON(!len);
670 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
671 	GEM_BUG_ON(!response_buf && response_buf_size);
672 	might_sleep();
673 
674 	/*
675 	 * We use a lazy spin wait loop here as we believe that if the CT
676 	 * buffers are sized correctly the flow control condition should be
677 	 * rare. Reserving the maximum size in the G2H credits as we don't know
678 	 * how big the response is going to be.
679 	 */
680 retry:
681 	spin_lock_irqsave(&ctb->lock, flags);
682 	if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
683 		     !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
684 		if (ct->stall_time == KTIME_MAX)
685 			ct->stall_time = ktime_get();
686 		spin_unlock_irqrestore(&ctb->lock, flags);
687 
688 		if (unlikely(ct_deadlocked(ct)))
689 			return -EPIPE;
690 
691 		if (msleep_interruptible(sleep_period_ms))
692 			return -EINTR;
693 		sleep_period_ms = sleep_period_ms << 1;
694 
695 		goto retry;
696 	}
697 
698 	ct->stall_time = KTIME_MAX;
699 
700 	fence = ct_get_next_fence(ct);
701 	request.fence = fence;
702 	request.status = 0;
703 	request.response_len = response_buf_size;
704 	request.response_buf = response_buf;
705 
706 	spin_lock(&ct->requests.lock);
707 	list_add_tail(&request.link, &ct->requests.pending);
708 	spin_unlock(&ct->requests.lock);
709 
710 	err = ct_write(ct, action, len, fence, 0);
711 	g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
712 
713 	spin_unlock_irqrestore(&ctb->lock, flags);
714 
715 	if (unlikely(err))
716 		goto unlink;
717 
718 	intel_guc_notify(ct_to_guc(ct));
719 
720 	err = wait_for_ct_request_update(&request, status);
721 	g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
722 	if (unlikely(err)) {
723 		CT_ERROR(ct, "No response for request %#x (fence %u)\n",
724 			 action[0], request.fence);
725 		goto unlink;
726 	}
727 
728 	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
729 		err = -EIO;
730 		goto unlink;
731 	}
732 
733 	if (response_buf) {
734 		/* There shall be no data in the status */
735 		WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status));
736 		/* Return actual response len */
737 		err = request.response_len;
738 	} else {
739 		/* There shall be no response payload */
740 		WARN_ON(request.response_len);
741 		/* Return data decoded from the status dword */
742 		err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status);
743 	}
744 
745 unlink:
746 	spin_lock_irqsave(&ct->requests.lock, flags);
747 	list_del(&request.link);
748 	spin_unlock_irqrestore(&ct->requests.lock, flags);
749 
750 	return err;
751 }
752 
753 /*
754  * Command Transport (CT) buffer based GuC send function.
755  */
756 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
757 		      u32 *response_buf, u32 response_buf_size, u32 flags)
758 {
759 	u32 status = ~0; /* undefined */
760 	int ret;
761 
762 	if (unlikely(!ct->enabled)) {
763 		struct intel_guc *guc = ct_to_guc(ct);
764 		struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
765 
766 		WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action);
767 		return -ENODEV;
768 	}
769 
770 	if (unlikely(ct->ctbs.send.broken))
771 		return -EPIPE;
772 
773 	if (flags & INTEL_GUC_CT_SEND_NB)
774 		return ct_send_nb(ct, action, len, flags);
775 
776 	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
777 	if (unlikely(ret < 0)) {
778 		CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
779 			 action[0], ERR_PTR(ret), status);
780 	} else if (unlikely(ret)) {
781 		CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
782 			 action[0], ret, ret);
783 	}
784 
785 	return ret;
786 }
787 
788 static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
789 {
790 	struct ct_incoming_msg *msg;
791 
792 	msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC);
793 	if (msg)
794 		msg->size = num_dwords;
795 	return msg;
796 }
797 
798 static void ct_free_msg(struct ct_incoming_msg *msg)
799 {
800 	kfree(msg);
801 }
802 
803 /*
804  * Return: number available remaining dwords to read (0 if empty)
805  *         or a negative error code on failure
806  */
807 static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
808 {
809 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
810 	struct guc_ct_buffer_desc *desc = ctb->desc;
811 	u32 head = ctb->head;
812 	u32 tail = READ_ONCE(desc->tail);
813 	u32 size = ctb->size;
814 	u32 *cmds = ctb->cmds;
815 	s32 available;
816 	unsigned int len;
817 	unsigned int i;
818 	u32 header;
819 
820 	if (unlikely(ctb->broken))
821 		return -EPIPE;
822 
823 	if (unlikely(desc->status))
824 		goto corrupted;
825 
826 	GEM_BUG_ON(head > size);
827 
828 #ifdef CONFIG_DRM_I915_DEBUG_GUC
829 	if (unlikely(head != READ_ONCE(desc->head))) {
830 		CT_ERROR(ct, "Head was modified %u != %u\n",
831 			 desc->head, head);
832 		desc->status |= GUC_CTB_STATUS_MISMATCH;
833 		goto corrupted;
834 	}
835 #endif
836 	if (unlikely(tail >= size)) {
837 		CT_ERROR(ct, "Invalid tail offset %u >= %u)\n",
838 			 tail, size);
839 		desc->status |= GUC_CTB_STATUS_OVERFLOW;
840 		goto corrupted;
841 	}
842 
843 	/* tail == head condition indicates empty */
844 	available = tail - head;
845 	if (unlikely(available == 0)) {
846 		*msg = NULL;
847 		return 0;
848 	}
849 
850 	/* beware of buffer wrap case */
851 	if (unlikely(available < 0))
852 		available += size;
853 	CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size);
854 	GEM_BUG_ON(available < 0);
855 
856 	header = cmds[head];
857 	head = (head + 1) % size;
858 
859 	/* message len with header */
860 	len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN;
861 	if (unlikely(len > (u32)available)) {
862 		CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
863 			 4, &header,
864 			 4 * (head + available - 1 > size ?
865 			      size - head : available - 1), &cmds[head],
866 			 4 * (head + available - 1 > size ?
867 			      available - 1 - size + head : 0), &cmds[0]);
868 		desc->status |= GUC_CTB_STATUS_UNDERFLOW;
869 		goto corrupted;
870 	}
871 
872 	*msg = ct_alloc_msg(len);
873 	if (!*msg) {
874 		CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
875 			 4, &header,
876 			 4 * (head + available - 1 > size ?
877 			      size - head : available - 1), &cmds[head],
878 			 4 * (head + available - 1 > size ?
879 			      available - 1 - size + head : 0), &cmds[0]);
880 		return available;
881 	}
882 
883 	(*msg)->msg[0] = header;
884 
885 	for (i = 1; i < len; i++) {
886 		(*msg)->msg[i] = cmds[head];
887 		head = (head + 1) % size;
888 	}
889 	CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
890 
891 	/* update local copies */
892 	ctb->head = head;
893 
894 	/* now update descriptor */
895 	WRITE_ONCE(desc->head, head);
896 
897 	return available - len;
898 
899 corrupted:
900 	CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
901 		 desc->head, desc->tail, desc->status);
902 	ctb->broken = true;
903 	return -EPIPE;
904 }
905 
906 static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
907 {
908 	u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]);
909 	u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, response->msg[0]);
910 	const u32 *hxg = &response->msg[GUC_CTB_MSG_MIN_LEN];
911 	const u32 *data = &hxg[GUC_HXG_MSG_MIN_LEN];
912 	u32 datalen = len - GUC_HXG_MSG_MIN_LEN;
913 	struct ct_request *req;
914 	unsigned long flags;
915 	bool found = false;
916 	int err = 0;
917 
918 	GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
919 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
920 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
921 		   FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
922 
923 	CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
924 
925 	spin_lock_irqsave(&ct->requests.lock, flags);
926 	list_for_each_entry(req, &ct->requests.pending, link) {
927 		if (unlikely(fence != req->fence)) {
928 			CT_DEBUG(ct, "request %u awaits response\n",
929 				 req->fence);
930 			continue;
931 		}
932 		if (unlikely(datalen > req->response_len)) {
933 			CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
934 				 req->fence, datalen, req->response_len);
935 			datalen = min(datalen, req->response_len);
936 			err = -EMSGSIZE;
937 		}
938 		if (datalen)
939 			memcpy(req->response_buf, data, 4 * datalen);
940 		req->response_len = datalen;
941 		WRITE_ONCE(req->status, hxg[0]);
942 		found = true;
943 		break;
944 	}
945 	if (!found) {
946 		CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
947 		CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence,
948 			 ct->requests.last_fence);
949 		list_for_each_entry(req, &ct->requests.pending, link)
950 			CT_ERROR(ct, "request %u awaits response\n",
951 				 req->fence);
952 		err = -ENOKEY;
953 	}
954 	spin_unlock_irqrestore(&ct->requests.lock, flags);
955 
956 	if (unlikely(err))
957 		return err;
958 
959 	ct_free_msg(response);
960 	return 0;
961 }
962 
963 static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
964 {
965 	struct intel_guc *guc = ct_to_guc(ct);
966 	const u32 *hxg;
967 	const u32 *payload;
968 	u32 hxg_len, action, len;
969 	int ret;
970 
971 	hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
972 	hxg_len = request->size - GUC_CTB_MSG_MIN_LEN;
973 	payload = &hxg[GUC_HXG_MSG_MIN_LEN];
974 	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
975 	len = hxg_len - GUC_HXG_MSG_MIN_LEN;
976 
977 	CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
978 
979 	switch (action) {
980 	case INTEL_GUC_ACTION_DEFAULT:
981 		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
982 		break;
983 	case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
984 		ret = intel_guc_deregister_done_process_msg(guc, payload,
985 							    len);
986 		break;
987 	case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
988 		ret = intel_guc_sched_done_process_msg(guc, payload, len);
989 		break;
990 	case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
991 		ret = intel_guc_context_reset_process_msg(guc, payload, len);
992 		break;
993 	case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
994 		ret = intel_guc_engine_failure_process_msg(guc, payload, len);
995 		break;
996 	default:
997 		ret = -EOPNOTSUPP;
998 		break;
999 	}
1000 
1001 	if (unlikely(ret)) {
1002 		CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
1003 			 action, ERR_PTR(ret));
1004 		return ret;
1005 	}
1006 
1007 	ct_free_msg(request);
1008 	return 0;
1009 }
1010 
1011 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
1012 {
1013 	unsigned long flags;
1014 	struct ct_incoming_msg *request;
1015 	bool done;
1016 	int err;
1017 
1018 	spin_lock_irqsave(&ct->requests.lock, flags);
1019 	request = list_first_entry_or_null(&ct->requests.incoming,
1020 					   struct ct_incoming_msg, link);
1021 	if (request)
1022 		list_del(&request->link);
1023 	done = !!list_empty(&ct->requests.incoming);
1024 	spin_unlock_irqrestore(&ct->requests.lock, flags);
1025 
1026 	if (!request)
1027 		return true;
1028 
1029 	err = ct_process_request(ct, request);
1030 	if (unlikely(err)) {
1031 		CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
1032 			 ERR_PTR(err), 4 * request->size, request->msg);
1033 		ct_free_msg(request);
1034 	}
1035 
1036 	return done;
1037 }
1038 
1039 static void ct_incoming_request_worker_func(struct work_struct *w)
1040 {
1041 	struct intel_guc_ct *ct =
1042 		container_of(w, struct intel_guc_ct, requests.worker);
1043 	bool done;
1044 
1045 	do {
1046 		done = ct_process_incoming_requests(ct);
1047 	} while (!done);
1048 }
1049 
1050 static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
1051 {
1052 	const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
1053 	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
1054 	unsigned long flags;
1055 
1056 	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
1057 
1058 	/*
1059 	 * Adjusting the space must be done in IRQ or deadlock can occur as the
1060 	 * CTB processing in the below workqueue can send CTBs which creates a
1061 	 * circular dependency if the space was returned there.
1062 	 */
1063 	switch (action) {
1064 	case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
1065 	case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
1066 		g2h_release_space(ct, request->size);
1067 	}
1068 
1069 	spin_lock_irqsave(&ct->requests.lock, flags);
1070 	list_add_tail(&request->link, &ct->requests.incoming);
1071 	spin_unlock_irqrestore(&ct->requests.lock, flags);
1072 
1073 	queue_work(system_unbound_wq, &ct->requests.worker);
1074 	return 0;
1075 }
1076 
1077 static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
1078 {
1079 	u32 origin, type;
1080 	u32 *hxg;
1081 	int err;
1082 
1083 	if (unlikely(msg->size < GUC_CTB_HXG_MSG_MIN_LEN))
1084 		return -EBADMSG;
1085 
1086 	hxg = &msg->msg[GUC_CTB_MSG_MIN_LEN];
1087 
1088 	origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
1089 	if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
1090 		err = -EPROTO;
1091 		goto failed;
1092 	}
1093 
1094 	type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
1095 	switch (type) {
1096 	case GUC_HXG_TYPE_EVENT:
1097 		err = ct_handle_event(ct, msg);
1098 		break;
1099 	case GUC_HXG_TYPE_RESPONSE_SUCCESS:
1100 	case GUC_HXG_TYPE_RESPONSE_FAILURE:
1101 		err = ct_handle_response(ct, msg);
1102 		break;
1103 	default:
1104 		err = -EOPNOTSUPP;
1105 	}
1106 
1107 	if (unlikely(err)) {
1108 failed:
1109 		CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n",
1110 			 ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg);
1111 	}
1112 	return err;
1113 }
1114 
1115 static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
1116 {
1117 	u32 format = FIELD_GET(GUC_CTB_MSG_0_FORMAT, msg->msg[0]);
1118 	int err;
1119 
1120 	if (format == GUC_CTB_FORMAT_HXG)
1121 		err = ct_handle_hxg(ct, msg);
1122 	else
1123 		err = -EOPNOTSUPP;
1124 
1125 	if (unlikely(err)) {
1126 		CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
1127 			 ERR_PTR(err), 4 * msg->size, msg->msg);
1128 		ct_free_msg(msg);
1129 	}
1130 }
1131 
1132 /*
1133  * Return: number available remaining dwords to read (0 if empty)
1134  *         or a negative error code on failure
1135  */
1136 static int ct_receive(struct intel_guc_ct *ct)
1137 {
1138 	struct ct_incoming_msg *msg = NULL;
1139 	unsigned long flags;
1140 	int ret;
1141 
1142 	spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
1143 	ret = ct_read(ct, &msg);
1144 	spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
1145 	if (ret < 0)
1146 		return ret;
1147 
1148 	if (msg)
1149 		ct_handle_msg(ct, msg);
1150 
1151 	return ret;
1152 }
1153 
1154 static void ct_try_receive_message(struct intel_guc_ct *ct)
1155 {
1156 	int ret;
1157 
1158 	if (GEM_WARN_ON(!ct->enabled))
1159 		return;
1160 
1161 	ret = ct_receive(ct);
1162 	if (ret > 0)
1163 		tasklet_hi_schedule(&ct->receive_tasklet);
1164 }
1165 
1166 static void ct_receive_tasklet_func(struct tasklet_struct *t)
1167 {
1168 	struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
1169 
1170 	ct_try_receive_message(ct);
1171 }
1172 
1173 /*
1174  * When we're communicating with the GuC over CT, GuC uses events
1175  * to notify us about new messages being posted on the RECV buffer.
1176  */
1177 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
1178 {
1179 	if (unlikely(!ct->enabled)) {
1180 		WARN(1, "Unexpected GuC event received while CT disabled!\n");
1181 		return;
1182 	}
1183 
1184 	ct_try_receive_message(ct);
1185 }
1186 
1187 void intel_guc_ct_print_info(struct intel_guc_ct *ct,
1188 			     struct drm_printer *p)
1189 {
1190 	drm_printf(p, "CT %s\n", enableddisabled(ct->enabled));
1191 
1192 	if (!ct->enabled)
1193 		return;
1194 
1195 	drm_printf(p, "H2G Space: %u\n",
1196 		   atomic_read(&ct->ctbs.send.space) * 4);
1197 	drm_printf(p, "Head: %u\n",
1198 		   ct->ctbs.send.desc->head);
1199 	drm_printf(p, "Tail: %u\n",
1200 		   ct->ctbs.send.desc->tail);
1201 	drm_printf(p, "G2H Space: %u\n",
1202 		   atomic_read(&ct->ctbs.recv.space) * 4);
1203 	drm_printf(p, "Head: %u\n",
1204 		   ct->ctbs.recv.desc->head);
1205 	drm_printf(p, "Tail: %u\n",
1206 		   ct->ctbs.recv.desc->tail);
1207 }
1208