1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_guc_ct.h"
8 #include "gt/intel_gt.h"
9 
10 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
11 {
12 	return container_of(ct, struct intel_guc, ct);
13 }
14 
15 static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
16 {
17 	return guc_to_gt(ct_to_guc(ct));
18 }
19 
20 static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
21 {
22 	return ct_to_gt(ct)->i915;
23 }
24 
25 static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
26 {
27 	return &ct_to_i915(ct)->drm;
28 }
29 
30 #define CT_ERROR(_ct, _fmt, ...) \
31 	drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
32 #ifdef CONFIG_DRM_I915_DEBUG_GUC
33 #define CT_DEBUG(_ct, _fmt, ...) \
34 	drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
35 #else
36 #define CT_DEBUG(...)	do { } while (0)
37 #endif
38 #define CT_PROBE_ERROR(_ct, _fmt, ...) \
39 	i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
40 
41 /**
42  * DOC: CTB Blob
43  *
44  * We allocate single blob to hold both CTB descriptors and buffers:
45  *
46  *      +--------+-----------------------------------------------+------+
47  *      | offset | contents                                      | size |
48  *      +========+===============================================+======+
49  *      | 0x0000 | H2G `CTB Descriptor`_ (send)                  |      |
50  *      +--------+-----------------------------------------------+  4K  |
51  *      | 0x0800 | G2H `CTB Descriptor`_ (recv)                  |      |
52  *      +--------+-----------------------------------------------+------+
53  *      | 0x1000 | H2G `CT Buffer`_ (send)                       | n*4K |
54  *      |        |                                               |      |
55  *      +--------+-----------------------------------------------+------+
56  *      | 0x1000 | G2H `CT Buffer`_ (recv)                       | m*4K |
57  *      | + n*4K |                                               |      |
58  *      +--------+-----------------------------------------------+------+
59  *
60  * Size of each `CT Buffer`_ must be multiple of 4K.
61  * As we don't expect too many messages, for now use minimum sizes.
62  */
63 #define CTB_DESC_SIZE		ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
64 #define CTB_H2G_BUFFER_SIZE	(SZ_4K)
65 #define CTB_G2H_BUFFER_SIZE	(SZ_4K)
66 
67 struct ct_request {
68 	struct list_head link;
69 	u32 fence;
70 	u32 status;
71 	u32 response_len;
72 	u32 *response_buf;
73 };
74 
75 struct ct_incoming_msg {
76 	struct list_head link;
77 	u32 size;
78 	u32 msg[];
79 };
80 
81 enum { CTB_SEND = 0, CTB_RECV = 1 };
82 
83 enum { CTB_OWNER_HOST = 0 };
84 
85 static void ct_receive_tasklet_func(struct tasklet_struct *t);
86 static void ct_incoming_request_worker_func(struct work_struct *w);
87 
88 /**
89  * intel_guc_ct_init_early - Initialize CT state without requiring device access
90  * @ct: pointer to CT struct
91  */
92 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
93 {
94 	spin_lock_init(&ct->ctbs.send.lock);
95 	spin_lock_init(&ct->ctbs.recv.lock);
96 	spin_lock_init(&ct->requests.lock);
97 	INIT_LIST_HEAD(&ct->requests.pending);
98 	INIT_LIST_HEAD(&ct->requests.incoming);
99 	INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
100 	tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
101 }
102 
103 static inline const char *guc_ct_buffer_type_to_str(u32 type)
104 {
105 	switch (type) {
106 	case INTEL_GUC_CT_BUFFER_TYPE_SEND:
107 		return "SEND";
108 	case INTEL_GUC_CT_BUFFER_TYPE_RECV:
109 		return "RECV";
110 	default:
111 		return "<invalid>";
112 	}
113 }
114 
115 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
116 				    u32 cmds_addr, u32 size)
117 {
118 	memset(desc, 0, sizeof(*desc));
119 	desc->addr = cmds_addr;
120 	desc->size = size;
121 	desc->owner = CTB_OWNER_HOST;
122 }
123 
124 static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb, u32 cmds_addr)
125 {
126 	guc_ct_buffer_desc_init(ctb->desc, cmds_addr, ctb->size);
127 }
128 
129 static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
130 			       struct guc_ct_buffer_desc *desc,
131 			       u32 *cmds, u32 size)
132 {
133 	GEM_BUG_ON(size % 4);
134 
135 	ctb->desc = desc;
136 	ctb->cmds = cmds;
137 	ctb->size = size;
138 
139 	guc_ct_buffer_reset(ctb, 0);
140 }
141 
142 static int guc_action_register_ct_buffer(struct intel_guc *guc,
143 					 u32 desc_addr,
144 					 u32 type)
145 {
146 	u32 action[] = {
147 		INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
148 		desc_addr,
149 		sizeof(struct guc_ct_buffer_desc),
150 		type
151 	};
152 
153 	/* Can't use generic send(), CT registration must go over MMIO */
154 	return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
155 }
156 
157 static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type)
158 {
159 	int err = guc_action_register_ct_buffer(ct_to_guc(ct), desc_addr, type);
160 
161 	if (unlikely(err))
162 		CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
163 			 guc_ct_buffer_type_to_str(type), err);
164 	return err;
165 }
166 
167 static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
168 {
169 	u32 action[] = {
170 		INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
171 		CTB_OWNER_HOST,
172 		type
173 	};
174 
175 	/* Can't use generic send(), CT deregistration must go over MMIO */
176 	return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
177 }
178 
179 static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
180 {
181 	int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
182 
183 	if (unlikely(err))
184 		CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
185 			 guc_ct_buffer_type_to_str(type), err);
186 	return err;
187 }
188 
189 /**
190  * intel_guc_ct_init - Init buffer-based communication
191  * @ct: pointer to CT struct
192  *
193  * Allocate memory required for buffer-based communication.
194  *
195  * Return: 0 on success, a negative errno code on failure.
196  */
197 int intel_guc_ct_init(struct intel_guc_ct *ct)
198 {
199 	struct intel_guc *guc = ct_to_guc(ct);
200 	struct guc_ct_buffer_desc *desc;
201 	u32 blob_size;
202 	u32 cmds_size;
203 	void *blob;
204 	u32 *cmds;
205 	int err;
206 
207 	GEM_BUG_ON(ct->vma);
208 
209 	blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
210 	err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
211 	if (unlikely(err)) {
212 		CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
213 			       blob_size, ERR_PTR(err));
214 		return err;
215 	}
216 
217 	CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
218 
219 	/* store pointers to desc and cmds for send ctb */
220 	desc = blob;
221 	cmds = blob + 2 * CTB_DESC_SIZE;
222 	cmds_size = CTB_H2G_BUFFER_SIZE;
223 	CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "send",
224 		 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
225 
226 	guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size);
227 
228 	/* store pointers to desc and cmds for recv ctb */
229 	desc = blob + CTB_DESC_SIZE;
230 	cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
231 	cmds_size = CTB_G2H_BUFFER_SIZE;
232 	CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "recv",
233 		 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
234 
235 	guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size);
236 
237 	return 0;
238 }
239 
240 /**
241  * intel_guc_ct_fini - Fini buffer-based communication
242  * @ct: pointer to CT struct
243  *
244  * Deallocate memory required for buffer-based communication.
245  */
246 void intel_guc_ct_fini(struct intel_guc_ct *ct)
247 {
248 	GEM_BUG_ON(ct->enabled);
249 
250 	tasklet_kill(&ct->receive_tasklet);
251 	i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
252 	memset(ct, 0, sizeof(*ct));
253 }
254 
255 /**
256  * intel_guc_ct_enable - Enable buffer based command transport.
257  * @ct: pointer to CT struct
258  *
259  * Return: 0 on success, a negative errno code on failure.
260  */
261 int intel_guc_ct_enable(struct intel_guc_ct *ct)
262 {
263 	struct intel_guc *guc = ct_to_guc(ct);
264 	u32 base, cmds;
265 	void *blob;
266 	int err;
267 
268 	GEM_BUG_ON(ct->enabled);
269 
270 	/* vma should be already allocated and map'ed */
271 	GEM_BUG_ON(!ct->vma);
272 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
273 	base = intel_guc_ggtt_offset(guc, ct->vma);
274 
275 	/* blob should start with send descriptor */
276 	blob = __px_vaddr(ct->vma->obj);
277 	GEM_BUG_ON(blob != ct->ctbs.send.desc);
278 
279 	/* (re)initialize descriptors */
280 	cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
281 	guc_ct_buffer_reset(&ct->ctbs.send, cmds);
282 
283 	cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
284 	guc_ct_buffer_reset(&ct->ctbs.recv, cmds);
285 
286 	/*
287 	 * Register both CT buffers starting with RECV buffer.
288 	 * Descriptors are in first half of the blob.
289 	 */
290 	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.recv.desc, blob),
291 				 INTEL_GUC_CT_BUFFER_TYPE_RECV);
292 	if (unlikely(err))
293 		goto err_out;
294 
295 	err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.send.desc, blob),
296 				 INTEL_GUC_CT_BUFFER_TYPE_SEND);
297 	if (unlikely(err))
298 		goto err_deregister;
299 
300 	ct->enabled = true;
301 
302 	return 0;
303 
304 err_deregister:
305 	ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
306 err_out:
307 	CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
308 	return err;
309 }
310 
311 /**
312  * intel_guc_ct_disable - Disable buffer based command transport.
313  * @ct: pointer to CT struct
314  */
315 void intel_guc_ct_disable(struct intel_guc_ct *ct)
316 {
317 	struct intel_guc *guc = ct_to_guc(ct);
318 
319 	GEM_BUG_ON(!ct->enabled);
320 
321 	ct->enabled = false;
322 
323 	if (intel_guc_is_fw_running(guc)) {
324 		ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_SEND);
325 		ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
326 	}
327 }
328 
329 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
330 {
331 	/* For now it's trivial */
332 	return ++ct->requests.last_fence;
333 }
334 
335 static void write_barrier(struct intel_guc_ct *ct)
336 {
337 	struct intel_guc *guc = ct_to_guc(ct);
338 	struct intel_gt *gt = guc_to_gt(guc);
339 
340 	if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
341 		GEM_BUG_ON(guc->send_regs.fw_domains);
342 		/*
343 		 * This register is used by the i915 and GuC for MMIO based
344 		 * communication. Once we are in this code CTBs are the only
345 		 * method the i915 uses to communicate with the GuC so it is
346 		 * safe to write to this register (a value of 0 is NOP for MMIO
347 		 * communication). If we ever start mixing CTBs and MMIOs a new
348 		 * register will have to be chosen.
349 		 */
350 		intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
351 	} else {
352 		/* wmb() sufficient for a barrier if in smem */
353 		wmb();
354 	}
355 }
356 
357 /**
358  * DOC: CTB Host to GuC request
359  *
360  * Format of the CTB Host to GuC request message is as follows::
361  *
362  *      +------------+---------+---------+---------+---------+
363  *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
364  *      +------------+---------+---------+---------+---------+
365  *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
366  *      +   HEADER   +---------+---------+---------+---------+
367  *      |            |    0    |    1    |   ...   |    n    |
368  *      +============+=========+=========+=========+=========+
369  *      |  len >= 1  |  FENCE  |     request specific data   |
370  *      +------+-----+---------+---------+---------+---------+
371  *
372  *                   ^-----------------len-------------------^
373  */
374 
375 static int ct_write(struct intel_guc_ct *ct,
376 		    const u32 *action,
377 		    u32 len /* in dwords */,
378 		    u32 fence)
379 {
380 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
381 	struct guc_ct_buffer_desc *desc = ctb->desc;
382 	u32 head = desc->head;
383 	u32 tail = desc->tail;
384 	u32 size = ctb->size;
385 	u32 used;
386 	u32 header;
387 	u32 *cmds = ctb->cmds;
388 	unsigned int i;
389 
390 	if (unlikely(desc->is_in_error))
391 		return -EPIPE;
392 
393 	if (unlikely(!IS_ALIGNED(head | tail, 4) ||
394 		     (tail | head) >= size))
395 		goto corrupted;
396 
397 	/* later calculations will be done in dwords */
398 	head /= 4;
399 	tail /= 4;
400 	size /= 4;
401 
402 	/*
403 	 * tail == head condition indicates empty. GuC FW does not support
404 	 * using up the entire buffer to get tail == head meaning full.
405 	 */
406 	if (tail < head)
407 		used = (size - head) + tail;
408 	else
409 		used = tail - head;
410 
411 	/* make sure there is a space including extra dw for the fence */
412 	if (unlikely(used + len + 1 >= size))
413 		return -ENOSPC;
414 
415 	/*
416 	 * Write the message. The format is the following:
417 	 * DW0: header (including action code)
418 	 * DW1: fence
419 	 * DW2+: action data
420 	 */
421 	header = (len << GUC_CT_MSG_LEN_SHIFT) |
422 		 GUC_CT_MSG_SEND_STATUS |
423 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
424 
425 	CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
426 		 4, &header, 4, &fence, 4 * (len - 1), &action[1]);
427 
428 	cmds[tail] = header;
429 	tail = (tail + 1) % size;
430 
431 	cmds[tail] = fence;
432 	tail = (tail + 1) % size;
433 
434 	for (i = 1; i < len; i++) {
435 		cmds[tail] = action[i];
436 		tail = (tail + 1) % size;
437 	}
438 	GEM_BUG_ON(tail > size);
439 
440 	/*
441 	 * make sure H2G buffer update and LRC tail update (if this triggering a
442 	 * submission) are visible before updating the descriptor tail
443 	 */
444 	write_barrier(ct);
445 
446 	/* now update desc tail (back in bytes) */
447 	desc->tail = tail * 4;
448 	return 0;
449 
450 corrupted:
451 	CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
452 		 desc->addr, desc->head, desc->tail, desc->size);
453 	desc->is_in_error = 1;
454 	return -EPIPE;
455 }
456 
457 /**
458  * wait_for_ct_request_update - Wait for CT request state update.
459  * @req:	pointer to pending request
460  * @status:	placeholder for status
461  *
462  * For each sent request, Guc shall send bac CT response message.
463  * Our message handler will update status of tracked request once
464  * response message with given fence is received. Wait here and
465  * check for valid response status value.
466  *
467  * Return:
468  * *	0 response received (status is valid)
469  * *	-ETIMEDOUT no response within hardcoded timeout
470  */
471 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
472 {
473 	int err;
474 
475 	/*
476 	 * Fast commands should complete in less than 10us, so sample quickly
477 	 * up to that length of time, then switch to a slower sleep-wait loop.
478 	 * No GuC command should ever take longer than 10ms.
479 	 */
480 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
481 	err = wait_for_us(done, 10);
482 	if (err)
483 		err = wait_for(done, 10);
484 #undef done
485 
486 	if (unlikely(err))
487 		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
488 
489 	*status = req->status;
490 	return err;
491 }
492 
493 static int ct_send(struct intel_guc_ct *ct,
494 		   const u32 *action,
495 		   u32 len,
496 		   u32 *response_buf,
497 		   u32 response_buf_size,
498 		   u32 *status)
499 {
500 	struct ct_request request;
501 	unsigned long flags;
502 	u32 fence;
503 	int err;
504 
505 	GEM_BUG_ON(!ct->enabled);
506 	GEM_BUG_ON(!len);
507 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
508 	GEM_BUG_ON(!response_buf && response_buf_size);
509 
510 	spin_lock_irqsave(&ct->ctbs.send.lock, flags);
511 
512 	fence = ct_get_next_fence(ct);
513 	request.fence = fence;
514 	request.status = 0;
515 	request.response_len = response_buf_size;
516 	request.response_buf = response_buf;
517 
518 	spin_lock(&ct->requests.lock);
519 	list_add_tail(&request.link, &ct->requests.pending);
520 	spin_unlock(&ct->requests.lock);
521 
522 	err = ct_write(ct, action, len, fence);
523 
524 	spin_unlock_irqrestore(&ct->ctbs.send.lock, flags);
525 
526 	if (unlikely(err))
527 		goto unlink;
528 
529 	intel_guc_notify(ct_to_guc(ct));
530 
531 	err = wait_for_ct_request_update(&request, status);
532 	if (unlikely(err))
533 		goto unlink;
534 
535 	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
536 		err = -EIO;
537 		goto unlink;
538 	}
539 
540 	if (response_buf) {
541 		/* There shall be no data in the status */
542 		WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
543 		/* Return actual response len */
544 		err = request.response_len;
545 	} else {
546 		/* There shall be no response payload */
547 		WARN_ON(request.response_len);
548 		/* Return data decoded from the status dword */
549 		err = INTEL_GUC_MSG_TO_DATA(*status);
550 	}
551 
552 unlink:
553 	spin_lock_irqsave(&ct->requests.lock, flags);
554 	list_del(&request.link);
555 	spin_unlock_irqrestore(&ct->requests.lock, flags);
556 
557 	return err;
558 }
559 
560 /*
561  * Command Transport (CT) buffer based GuC send function.
562  */
563 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
564 		      u32 *response_buf, u32 response_buf_size)
565 {
566 	u32 status = ~0; /* undefined */
567 	int ret;
568 
569 	if (unlikely(!ct->enabled)) {
570 		WARN(1, "Unexpected send: action=%#x\n", *action);
571 		return -ENODEV;
572 	}
573 
574 	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
575 	if (unlikely(ret < 0)) {
576 		CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
577 			 action[0], ret, status);
578 	} else if (unlikely(ret)) {
579 		CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
580 			 action[0], ret, ret);
581 	}
582 
583 	return ret;
584 }
585 
586 static inline unsigned int ct_header_get_len(u32 header)
587 {
588 	return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
589 }
590 
591 static inline unsigned int ct_header_get_action(u32 header)
592 {
593 	return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
594 }
595 
596 static inline bool ct_header_is_response(u32 header)
597 {
598 	return !!(header & GUC_CT_MSG_IS_RESPONSE);
599 }
600 
601 static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
602 {
603 	struct ct_incoming_msg *msg;
604 
605 	msg = kmalloc(sizeof(*msg) + sizeof(u32) * num_dwords, GFP_ATOMIC);
606 	if (msg)
607 		msg->size = num_dwords;
608 	return msg;
609 }
610 
611 static void ct_free_msg(struct ct_incoming_msg *msg)
612 {
613 	kfree(msg);
614 }
615 
616 /*
617  * Return: number available remaining dwords to read (0 if empty)
618  *         or a negative error code on failure
619  */
620 static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
621 {
622 	struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
623 	struct guc_ct_buffer_desc *desc = ctb->desc;
624 	u32 head = desc->head;
625 	u32 tail = desc->tail;
626 	u32 size = ctb->size;
627 	u32 *cmds = ctb->cmds;
628 	s32 available;
629 	unsigned int len;
630 	unsigned int i;
631 	u32 header;
632 
633 	if (unlikely(desc->is_in_error))
634 		return -EPIPE;
635 
636 	if (unlikely(!IS_ALIGNED(head | tail, 4) ||
637 		     (tail | head) >= size))
638 		goto corrupted;
639 
640 	/* later calculations will be done in dwords */
641 	head /= 4;
642 	tail /= 4;
643 	size /= 4;
644 
645 	/* tail == head condition indicates empty */
646 	available = tail - head;
647 	if (unlikely(available == 0)) {
648 		*msg = NULL;
649 		return 0;
650 	}
651 
652 	/* beware of buffer wrap case */
653 	if (unlikely(available < 0))
654 		available += size;
655 	CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
656 	GEM_BUG_ON(available < 0);
657 
658 	header = cmds[head];
659 	head = (head + 1) % size;
660 
661 	/* message len with header */
662 	len = ct_header_get_len(header) + 1;
663 	if (unlikely(len > (u32)available)) {
664 		CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
665 			 4, &header,
666 			 4 * (head + available - 1 > size ?
667 			      size - head : available - 1), &cmds[head],
668 			 4 * (head + available - 1 > size ?
669 			      available - 1 - size + head : 0), &cmds[0]);
670 		goto corrupted;
671 	}
672 
673 	*msg = ct_alloc_msg(len);
674 	if (!*msg) {
675 		CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
676 			 4, &header,
677 			 4 * (head + available - 1 > size ?
678 			      size - head : available - 1), &cmds[head],
679 			 4 * (head + available - 1 > size ?
680 			      available - 1 - size + head : 0), &cmds[0]);
681 		return available;
682 	}
683 
684 	(*msg)->msg[0] = header;
685 
686 	for (i = 1; i < len; i++) {
687 		(*msg)->msg[i] = cmds[head];
688 		head = (head + 1) % size;
689 	}
690 	CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
691 
692 	desc->head = head * 4;
693 	return available - len;
694 
695 corrupted:
696 	CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
697 		 desc->addr, desc->head, desc->tail, desc->size);
698 	desc->is_in_error = 1;
699 	return -EPIPE;
700 }
701 
702 /**
703  * DOC: CTB GuC to Host response
704  *
705  * Format of the CTB GuC to Host response message is as follows::
706  *
707  *      +------------+---------+---------+---------+---------+---------+
708  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
709  *      +------------+---------+---------+---------+---------+---------+
710  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
711  *      +   HEADER   +---------+---------+---------+---------+---------+
712  *      |            |    0    |    1    |    2    |   ...   |    n    |
713  *      +============+=========+=========+=========+=========+=========+
714  *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
715  *      +------+-----+---------+---------+---------+---------+---------+
716  *
717  *                   ^-----------------------len-----------------------^
718  */
719 
720 static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
721 {
722 	u32 header = response->msg[0];
723 	u32 len = ct_header_get_len(header);
724 	u32 fence;
725 	u32 status;
726 	u32 datalen;
727 	struct ct_request *req;
728 	unsigned long flags;
729 	bool found = false;
730 	int err = 0;
731 
732 	GEM_BUG_ON(!ct_header_is_response(header));
733 
734 	/* Response payload shall at least include fence and status */
735 	if (unlikely(len < 2)) {
736 		CT_ERROR(ct, "Corrupted response (len %u)\n", len);
737 		return -EPROTO;
738 	}
739 
740 	fence = response->msg[1];
741 	status = response->msg[2];
742 	datalen = len - 2;
743 
744 	/* Format of the status follows RESPONSE message */
745 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
746 		CT_ERROR(ct, "Corrupted response (status %#x)\n", status);
747 		return -EPROTO;
748 	}
749 
750 	CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
751 
752 	spin_lock_irqsave(&ct->requests.lock, flags);
753 	list_for_each_entry(req, &ct->requests.pending, link) {
754 		if (unlikely(fence != req->fence)) {
755 			CT_DEBUG(ct, "request %u awaits response\n",
756 				 req->fence);
757 			continue;
758 		}
759 		if (unlikely(datalen > req->response_len)) {
760 			CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
761 				 req->fence, datalen, req->response_len);
762 			datalen = min(datalen, req->response_len);
763 			err = -EMSGSIZE;
764 		}
765 		if (datalen)
766 			memcpy(req->response_buf, response->msg + 3, 4 * datalen);
767 		req->response_len = datalen;
768 		WRITE_ONCE(req->status, status);
769 		found = true;
770 		break;
771 	}
772 	spin_unlock_irqrestore(&ct->requests.lock, flags);
773 
774 	if (!found) {
775 		CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
776 		return -ENOKEY;
777 	}
778 
779 	if (unlikely(err))
780 		return err;
781 
782 	ct_free_msg(response);
783 	return 0;
784 }
785 
786 static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
787 {
788 	struct intel_guc *guc = ct_to_guc(ct);
789 	u32 header, action, len;
790 	const u32 *payload;
791 	int ret;
792 
793 	header = request->msg[0];
794 	payload = &request->msg[1];
795 	action = ct_header_get_action(header);
796 	len = ct_header_get_len(header);
797 
798 	CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
799 
800 	switch (action) {
801 	case INTEL_GUC_ACTION_DEFAULT:
802 		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
803 		break;
804 	default:
805 		ret = -EOPNOTSUPP;
806 		break;
807 	}
808 
809 	if (unlikely(ret)) {
810 		CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
811 			 action, ERR_PTR(ret));
812 		return ret;
813 	}
814 
815 	ct_free_msg(request);
816 	return 0;
817 }
818 
819 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
820 {
821 	unsigned long flags;
822 	struct ct_incoming_msg *request;
823 	bool done;
824 	int err;
825 
826 	spin_lock_irqsave(&ct->requests.lock, flags);
827 	request = list_first_entry_or_null(&ct->requests.incoming,
828 					   struct ct_incoming_msg, link);
829 	if (request)
830 		list_del(&request->link);
831 	done = !!list_empty(&ct->requests.incoming);
832 	spin_unlock_irqrestore(&ct->requests.lock, flags);
833 
834 	if (!request)
835 		return true;
836 
837 	err = ct_process_request(ct, request);
838 	if (unlikely(err)) {
839 		CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
840 			 ERR_PTR(err), 4 * request->size, request->msg);
841 		ct_free_msg(request);
842 	}
843 
844 	return done;
845 }
846 
847 static void ct_incoming_request_worker_func(struct work_struct *w)
848 {
849 	struct intel_guc_ct *ct =
850 		container_of(w, struct intel_guc_ct, requests.worker);
851 	bool done;
852 
853 	done = ct_process_incoming_requests(ct);
854 	if (!done)
855 		queue_work(system_unbound_wq, &ct->requests.worker);
856 }
857 
858 /**
859  * DOC: CTB GuC to Host request
860  *
861  * Format of the CTB GuC to Host request message is as follows::
862  *
863  *      +------------+---------+---------+---------+---------+---------+
864  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
865  *      +------------+---------+---------+---------+---------+---------+
866  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
867  *      +   HEADER   +---------+---------+---------+---------+---------+
868  *      |            |    0    |    1    |    2    |   ...   |    n    |
869  *      +============+=========+=========+=========+=========+=========+
870  *      |     len    |            request specific data                |
871  *      +------+-----+---------+---------+---------+---------+---------+
872  *
873  *                   ^-----------------------len-----------------------^
874  */
875 
876 static int ct_handle_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
877 {
878 	unsigned long flags;
879 
880 	GEM_BUG_ON(ct_header_is_response(request->msg[0]));
881 
882 	spin_lock_irqsave(&ct->requests.lock, flags);
883 	list_add_tail(&request->link, &ct->requests.incoming);
884 	spin_unlock_irqrestore(&ct->requests.lock, flags);
885 
886 	queue_work(system_unbound_wq, &ct->requests.worker);
887 	return 0;
888 }
889 
890 static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
891 {
892 	u32 header = msg->msg[0];
893 	int err;
894 
895 	if (ct_header_is_response(header))
896 		err = ct_handle_response(ct, msg);
897 	else
898 		err = ct_handle_request(ct, msg);
899 
900 	if (unlikely(err)) {
901 		CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
902 			 ERR_PTR(err), 4 * msg->size, msg->msg);
903 		ct_free_msg(msg);
904 	}
905 }
906 
907 /*
908  * Return: number available remaining dwords to read (0 if empty)
909  *         or a negative error code on failure
910  */
911 static int ct_receive(struct intel_guc_ct *ct)
912 {
913 	struct ct_incoming_msg *msg = NULL;
914 	unsigned long flags;
915 	int ret;
916 
917 	spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
918 	ret = ct_read(ct, &msg);
919 	spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
920 	if (ret < 0)
921 		return ret;
922 
923 	if (msg)
924 		ct_handle_msg(ct, msg);
925 
926 	return ret;
927 }
928 
929 static void ct_try_receive_message(struct intel_guc_ct *ct)
930 {
931 	int ret;
932 
933 	if (GEM_WARN_ON(!ct->enabled))
934 		return;
935 
936 	ret = ct_receive(ct);
937 	if (ret > 0)
938 		tasklet_hi_schedule(&ct->receive_tasklet);
939 }
940 
941 static void ct_receive_tasklet_func(struct tasklet_struct *t)
942 {
943 	struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
944 
945 	ct_try_receive_message(ct);
946 }
947 
948 /*
949  * When we're communicating with the GuC over CT, GuC uses events
950  * to notify us about new messages being posted on the RECV buffer.
951  */
952 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
953 {
954 	if (unlikely(!ct->enabled)) {
955 		WARN(1, "Unexpected GuC event received while CT disabled!\n");
956 		return;
957 	}
958 
959 	ct_try_receive_message(ct);
960 }
961