1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_guc_ct.h"
8 
9 #ifdef CONFIG_DRM_I915_DEBUG_GUC
10 #define CT_DEBUG_DRIVER(...)	DRM_DEBUG_DRIVER(__VA_ARGS__)
11 #else
12 #define CT_DEBUG_DRIVER(...)	do { } while (0)
13 #endif
14 
15 struct ct_request {
16 	struct list_head link;
17 	u32 fence;
18 	u32 status;
19 	u32 response_len;
20 	u32 *response_buf;
21 };
22 
23 struct ct_incoming_request {
24 	struct list_head link;
25 	u32 msg[];
26 };
27 
28 enum { CTB_SEND = 0, CTB_RECV = 1 };
29 
30 enum { CTB_OWNER_HOST = 0 };
31 
32 static void ct_incoming_request_worker_func(struct work_struct *w);
33 
34 /**
35  * intel_guc_ct_init_early - Initialize CT state without requiring device access
36  * @ct: pointer to CT struct
37  */
38 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
39 {
40 	spin_lock_init(&ct->requests.lock);
41 	INIT_LIST_HEAD(&ct->requests.pending);
42 	INIT_LIST_HEAD(&ct->requests.incoming);
43 	INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
44 }
45 
46 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
47 {
48 	return container_of(ct, struct intel_guc, ct);
49 }
50 
51 static inline const char *guc_ct_buffer_type_to_str(u32 type)
52 {
53 	switch (type) {
54 	case INTEL_GUC_CT_BUFFER_TYPE_SEND:
55 		return "SEND";
56 	case INTEL_GUC_CT_BUFFER_TYPE_RECV:
57 		return "RECV";
58 	default:
59 		return "<invalid>";
60 	}
61 }
62 
63 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
64 				    u32 cmds_addr, u32 size)
65 {
66 	CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
67 	memset(desc, 0, sizeof(*desc));
68 	desc->addr = cmds_addr;
69 	desc->size = size;
70 	desc->owner = CTB_OWNER_HOST;
71 }
72 
73 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
74 {
75 	CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
76 			desc, desc->head, desc->tail);
77 	desc->head = 0;
78 	desc->tail = 0;
79 	desc->is_in_error = 0;
80 }
81 
82 static int guc_action_register_ct_buffer(struct intel_guc *guc,
83 					 u32 desc_addr,
84 					 u32 type)
85 {
86 	u32 action[] = {
87 		INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
88 		desc_addr,
89 		sizeof(struct guc_ct_buffer_desc),
90 		type
91 	};
92 	int err;
93 
94 	/* Can't use generic send(), CT registration must go over MMIO */
95 	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
96 	if (err)
97 		DRM_ERROR("CT: register %s buffer failed; err=%d\n",
98 			  guc_ct_buffer_type_to_str(type), err);
99 	return err;
100 }
101 
102 static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
103 					   u32 type)
104 {
105 	u32 action[] = {
106 		INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
107 		CTB_OWNER_HOST,
108 		type
109 	};
110 	int err;
111 
112 	/* Can't use generic send(), CT deregistration must go over MMIO */
113 	err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
114 	if (err)
115 		DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
116 			  guc_ct_buffer_type_to_str(type), err);
117 	return err;
118 }
119 
120 /**
121  * intel_guc_ct_init - Init buffer-based communication
122  * @ct: pointer to CT struct
123  *
124  * Allocate memory required for buffer-based communication.
125  *
126  * Return: 0 on success, a negative errno code on failure.
127  */
128 int intel_guc_ct_init(struct intel_guc_ct *ct)
129 {
130 	struct intel_guc *guc = ct_to_guc(ct);
131 	void *blob;
132 	int err;
133 	int i;
134 
135 	GEM_BUG_ON(ct->vma);
136 
137 	/* We allocate 1 page to hold both descriptors and both buffers.
138 	 *       ___________.....................
139 	 *      |desc (SEND)|                   :
140 	 *      |___________|                   PAGE/4
141 	 *      :___________....................:
142 	 *      |desc (RECV)|                   :
143 	 *      |___________|                   PAGE/4
144 	 *      :_______________________________:
145 	 *      |cmds (SEND)                    |
146 	 *      |                               PAGE/4
147 	 *      |_______________________________|
148 	 *      |cmds (RECV)                    |
149 	 *      |                               PAGE/4
150 	 *      |_______________________________|
151 	 *
152 	 * Each message can use a maximum of 32 dwords and we don't expect to
153 	 * have more than 1 in flight at any time, so we have enough space.
154 	 * Some logic further ahead will rely on the fact that there is only 1
155 	 * page and that it is always mapped, so if the size is changed the
156 	 * other code will need updating as well.
157 	 */
158 
159 	err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
160 	if (err) {
161 		DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
162 		return err;
163 	}
164 
165 	CT_DEBUG_DRIVER("CT: vma base=%#x\n",
166 			intel_guc_ggtt_offset(guc, ct->vma));
167 
168 	/* store pointers to desc and cmds */
169 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
170 		GEM_BUG_ON((i !=  CTB_SEND) && (i != CTB_RECV));
171 		ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
172 		ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
173 	}
174 
175 	return 0;
176 }
177 
178 /**
179  * intel_guc_ct_fini - Fini buffer-based communication
180  * @ct: pointer to CT struct
181  *
182  * Deallocate memory required for buffer-based communication.
183  */
184 void intel_guc_ct_fini(struct intel_guc_ct *ct)
185 {
186 	GEM_BUG_ON(ct->enabled);
187 
188 	i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
189 }
190 
191 /**
192  * intel_guc_ct_enable - Enable buffer based command transport.
193  * @ct: pointer to CT struct
194  *
195  * Return: 0 on success, a negative errno code on failure.
196  */
197 int intel_guc_ct_enable(struct intel_guc_ct *ct)
198 {
199 	struct intel_guc *guc = ct_to_guc(ct);
200 	u32 base;
201 	int err;
202 	int i;
203 
204 	GEM_BUG_ON(ct->enabled);
205 
206 	/* vma should be already allocated and map'ed */
207 	GEM_BUG_ON(!ct->vma);
208 	base = intel_guc_ggtt_offset(guc, ct->vma);
209 
210 	/* (re)initialize descriptors
211 	 * cmds buffers are in the second half of the blob page
212 	 */
213 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
214 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
215 		guc_ct_buffer_desc_init(ct->ctbs[i].desc,
216 					base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
217 					PAGE_SIZE/4);
218 	}
219 
220 	/* register buffers, starting wirh RECV buffer
221 	 * descriptors are in first half of the blob
222 	 */
223 	err = guc_action_register_ct_buffer(guc,
224 					    base + PAGE_SIZE/4 * CTB_RECV,
225 					    INTEL_GUC_CT_BUFFER_TYPE_RECV);
226 	if (unlikely(err))
227 		goto err_out;
228 
229 	err = guc_action_register_ct_buffer(guc,
230 					    base + PAGE_SIZE/4 * CTB_SEND,
231 					    INTEL_GUC_CT_BUFFER_TYPE_SEND);
232 	if (unlikely(err))
233 		goto err_deregister;
234 
235 	ct->enabled = true;
236 
237 	return 0;
238 
239 err_deregister:
240 	guc_action_deregister_ct_buffer(guc,
241 					INTEL_GUC_CT_BUFFER_TYPE_RECV);
242 err_out:
243 	DRM_ERROR("CT: can't open channel; err=%d\n", err);
244 	return err;
245 }
246 
247 /**
248  * intel_guc_ct_disable - Disable buffer based command transport.
249  * @ct: pointer to CT struct
250  */
251 void intel_guc_ct_disable(struct intel_guc_ct *ct)
252 {
253 	struct intel_guc *guc = ct_to_guc(ct);
254 
255 	GEM_BUG_ON(!ct->enabled);
256 
257 	ct->enabled = false;
258 
259 	if (intel_guc_is_running(guc)) {
260 		guc_action_deregister_ct_buffer(guc,
261 						INTEL_GUC_CT_BUFFER_TYPE_SEND);
262 		guc_action_deregister_ct_buffer(guc,
263 						INTEL_GUC_CT_BUFFER_TYPE_RECV);
264 	}
265 }
266 
267 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
268 {
269 	/* For now it's trivial */
270 	return ++ct->requests.next_fence;
271 }
272 
273 /**
274  * DOC: CTB Host to GuC request
275  *
276  * Format of the CTB Host to GuC request message is as follows::
277  *
278  *      +------------+---------+---------+---------+---------+
279  *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
280  *      +------------+---------+---------+---------+---------+
281  *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
282  *      +   HEADER   +---------+---------+---------+---------+
283  *      |            |    0    |    1    |   ...   |    n    |
284  *      +============+=========+=========+=========+=========+
285  *      |  len >= 1  |  FENCE  |     request specific data   |
286  *      +------+-----+---------+---------+---------+---------+
287  *
288  *                   ^-----------------len-------------------^
289  */
290 
291 static int ctb_write(struct intel_guc_ct_buffer *ctb,
292 		     const u32 *action,
293 		     u32 len /* in dwords */,
294 		     u32 fence,
295 		     bool want_response)
296 {
297 	struct guc_ct_buffer_desc *desc = ctb->desc;
298 	u32 head = desc->head / 4;	/* in dwords */
299 	u32 tail = desc->tail / 4;	/* in dwords */
300 	u32 size = desc->size / 4;	/* in dwords */
301 	u32 used;			/* in dwords */
302 	u32 header;
303 	u32 *cmds = ctb->cmds;
304 	unsigned int i;
305 
306 	GEM_BUG_ON(desc->size % 4);
307 	GEM_BUG_ON(desc->head % 4);
308 	GEM_BUG_ON(desc->tail % 4);
309 	GEM_BUG_ON(tail >= size);
310 
311 	/*
312 	 * tail == head condition indicates empty. GuC FW does not support
313 	 * using up the entire buffer to get tail == head meaning full.
314 	 */
315 	if (tail < head)
316 		used = (size - head) + tail;
317 	else
318 		used = tail - head;
319 
320 	/* make sure there is a space including extra dw for the fence */
321 	if (unlikely(used + len + 1 >= size))
322 		return -ENOSPC;
323 
324 	/*
325 	 * Write the message. The format is the following:
326 	 * DW0: header (including action code)
327 	 * DW1: fence
328 	 * DW2+: action data
329 	 */
330 	header = (len << GUC_CT_MSG_LEN_SHIFT) |
331 		 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
332 		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
333 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
334 
335 	CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
336 			4, &header, 4, &fence,
337 			4 * (len - 1), &action[1]);
338 
339 	cmds[tail] = header;
340 	tail = (tail + 1) % size;
341 
342 	cmds[tail] = fence;
343 	tail = (tail + 1) % size;
344 
345 	for (i = 1; i < len; i++) {
346 		cmds[tail] = action[i];
347 		tail = (tail + 1) % size;
348 	}
349 
350 	/* now update desc tail (back in bytes) */
351 	desc->tail = tail * 4;
352 	GEM_BUG_ON(desc->tail > desc->size);
353 
354 	return 0;
355 }
356 
357 /**
358  * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
359  * @desc:	buffer descriptor
360  * @fence:	response fence
361  * @status:	placeholder for status
362  *
363  * Guc will update CT buffer descriptor with new fence and status
364  * after processing the command identified by the fence. Wait for
365  * specified fence and then read from the descriptor status of the
366  * command.
367  *
368  * Return:
369  * *	0 response received (status is valid)
370  * *	-ETIMEDOUT no response within hardcoded timeout
371  * *	-EPROTO no response, CT buffer is in error
372  */
373 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
374 				    u32 fence,
375 				    u32 *status)
376 {
377 	int err;
378 
379 	/*
380 	 * Fast commands should complete in less than 10us, so sample quickly
381 	 * up to that length of time, then switch to a slower sleep-wait loop.
382 	 * No GuC command should ever take longer than 10ms.
383 	 */
384 #define done (READ_ONCE(desc->fence) == fence)
385 	err = wait_for_us(done, 10);
386 	if (err)
387 		err = wait_for(done, 10);
388 #undef done
389 
390 	if (unlikely(err)) {
391 		DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
392 			  fence, desc->fence);
393 
394 		if (WARN_ON(desc->is_in_error)) {
395 			/* Something went wrong with the messaging, try to reset
396 			 * the buffer and hope for the best
397 			 */
398 			guc_ct_buffer_desc_reset(desc);
399 			err = -EPROTO;
400 		}
401 	}
402 
403 	*status = desc->status;
404 	return err;
405 }
406 
407 /**
408  * wait_for_ct_request_update - Wait for CT request state update.
409  * @req:	pointer to pending request
410  * @status:	placeholder for status
411  *
412  * For each sent request, Guc shall send bac CT response message.
413  * Our message handler will update status of tracked request once
414  * response message with given fence is received. Wait here and
415  * check for valid response status value.
416  *
417  * Return:
418  * *	0 response received (status is valid)
419  * *	-ETIMEDOUT no response within hardcoded timeout
420  */
421 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
422 {
423 	int err;
424 
425 	/*
426 	 * Fast commands should complete in less than 10us, so sample quickly
427 	 * up to that length of time, then switch to a slower sleep-wait loop.
428 	 * No GuC command should ever take longer than 10ms.
429 	 */
430 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
431 	err = wait_for_us(done, 10);
432 	if (err)
433 		err = wait_for(done, 10);
434 #undef done
435 
436 	if (unlikely(err))
437 		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
438 
439 	*status = req->status;
440 	return err;
441 }
442 
443 static int ct_send(struct intel_guc_ct *ct,
444 		   const u32 *action,
445 		   u32 len,
446 		   u32 *response_buf,
447 		   u32 response_buf_size,
448 		   u32 *status)
449 {
450 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
451 	struct guc_ct_buffer_desc *desc = ctb->desc;
452 	struct ct_request request;
453 	unsigned long flags;
454 	u32 fence;
455 	int err;
456 
457 	GEM_BUG_ON(!ct->enabled);
458 	GEM_BUG_ON(!len);
459 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
460 	GEM_BUG_ON(!response_buf && response_buf_size);
461 
462 	fence = ct_get_next_fence(ct);
463 	request.fence = fence;
464 	request.status = 0;
465 	request.response_len = response_buf_size;
466 	request.response_buf = response_buf;
467 
468 	spin_lock_irqsave(&ct->requests.lock, flags);
469 	list_add_tail(&request.link, &ct->requests.pending);
470 	spin_unlock_irqrestore(&ct->requests.lock, flags);
471 
472 	err = ctb_write(ctb, action, len, fence, !!response_buf);
473 	if (unlikely(err))
474 		goto unlink;
475 
476 	intel_guc_notify(ct_to_guc(ct));
477 
478 	if (response_buf)
479 		err = wait_for_ct_request_update(&request, status);
480 	else
481 		err = wait_for_ctb_desc_update(desc, fence, status);
482 	if (unlikely(err))
483 		goto unlink;
484 
485 	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
486 		err = -EIO;
487 		goto unlink;
488 	}
489 
490 	if (response_buf) {
491 		/* There shall be no data in the status */
492 		WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
493 		/* Return actual response len */
494 		err = request.response_len;
495 	} else {
496 		/* There shall be no response payload */
497 		WARN_ON(request.response_len);
498 		/* Return data decoded from the status dword */
499 		err = INTEL_GUC_MSG_TO_DATA(*status);
500 	}
501 
502 unlink:
503 	spin_lock_irqsave(&ct->requests.lock, flags);
504 	list_del(&request.link);
505 	spin_unlock_irqrestore(&ct->requests.lock, flags);
506 
507 	return err;
508 }
509 
510 /*
511  * Command Transport (CT) buffer based GuC send function.
512  */
513 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
514 		      u32 *response_buf, u32 response_buf_size)
515 {
516 	struct intel_guc *guc = ct_to_guc(ct);
517 	u32 status = ~0; /* undefined */
518 	int ret;
519 
520 	if (unlikely(!ct->enabled)) {
521 		WARN(1, "Unexpected send: action=%#x\n", *action);
522 		return -ENODEV;
523 	}
524 
525 	mutex_lock(&guc->send_mutex);
526 
527 	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
528 	if (unlikely(ret < 0)) {
529 		DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
530 			  action[0], ret, status);
531 	} else if (unlikely(ret)) {
532 		CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
533 				action[0], ret, ret);
534 	}
535 
536 	mutex_unlock(&guc->send_mutex);
537 	return ret;
538 }
539 
540 static inline unsigned int ct_header_get_len(u32 header)
541 {
542 	return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
543 }
544 
545 static inline unsigned int ct_header_get_action(u32 header)
546 {
547 	return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
548 }
549 
550 static inline bool ct_header_is_response(u32 header)
551 {
552 	return !!(header & GUC_CT_MSG_IS_RESPONSE);
553 }
554 
555 static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
556 {
557 	struct guc_ct_buffer_desc *desc = ctb->desc;
558 	u32 head = desc->head / 4;	/* in dwords */
559 	u32 tail = desc->tail / 4;	/* in dwords */
560 	u32 size = desc->size / 4;	/* in dwords */
561 	u32 *cmds = ctb->cmds;
562 	s32 available;			/* in dwords */
563 	unsigned int len;
564 	unsigned int i;
565 
566 	GEM_BUG_ON(desc->size % 4);
567 	GEM_BUG_ON(desc->head % 4);
568 	GEM_BUG_ON(desc->tail % 4);
569 	GEM_BUG_ON(tail >= size);
570 	GEM_BUG_ON(head >= size);
571 
572 	/* tail == head condition indicates empty */
573 	available = tail - head;
574 	if (unlikely(available == 0))
575 		return -ENODATA;
576 
577 	/* beware of buffer wrap case */
578 	if (unlikely(available < 0))
579 		available += size;
580 	CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
581 	GEM_BUG_ON(available < 0);
582 
583 	data[0] = cmds[head];
584 	head = (head + 1) % size;
585 
586 	/* message len with header */
587 	len = ct_header_get_len(data[0]) + 1;
588 	if (unlikely(len > (u32)available)) {
589 		DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
590 			  4, data,
591 			  4 * (head + available - 1 > size ?
592 			       size - head : available - 1), &cmds[head],
593 			  4 * (head + available - 1 > size ?
594 			       available - 1 - size + head : 0), &cmds[0]);
595 		return -EPROTO;
596 	}
597 
598 	for (i = 1; i < len; i++) {
599 		data[i] = cmds[head];
600 		head = (head + 1) % size;
601 	}
602 	CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
603 
604 	desc->head = head * 4;
605 	return 0;
606 }
607 
608 /**
609  * DOC: CTB GuC to Host response
610  *
611  * Format of the CTB GuC to Host response message is as follows::
612  *
613  *      +------------+---------+---------+---------+---------+---------+
614  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
615  *      +------------+---------+---------+---------+---------+---------+
616  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
617  *      +   HEADER   +---------+---------+---------+---------+---------+
618  *      |            |    0    |    1    |    2    |   ...   |    n    |
619  *      +============+=========+=========+=========+=========+=========+
620  *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
621  *      +------+-----+---------+---------+---------+---------+---------+
622  *
623  *                   ^-----------------------len-----------------------^
624  */
625 
626 static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
627 {
628 	u32 header = msg[0];
629 	u32 len = ct_header_get_len(header);
630 	u32 msglen = len + 1; /* total message length including header */
631 	u32 fence;
632 	u32 status;
633 	u32 datalen;
634 	struct ct_request *req;
635 	bool found = false;
636 
637 	GEM_BUG_ON(!ct_header_is_response(header));
638 	GEM_BUG_ON(!in_irq());
639 
640 	/* Response payload shall at least include fence and status */
641 	if (unlikely(len < 2)) {
642 		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
643 		return -EPROTO;
644 	}
645 
646 	fence = msg[1];
647 	status = msg[2];
648 	datalen = len - 2;
649 
650 	/* Format of the status follows RESPONSE message */
651 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
652 		DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
653 		return -EPROTO;
654 	}
655 
656 	CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
657 
658 	spin_lock(&ct->requests.lock);
659 	list_for_each_entry(req, &ct->requests.pending, link) {
660 		if (unlikely(fence != req->fence)) {
661 			CT_DEBUG_DRIVER("CT: request %u awaits response\n",
662 					req->fence);
663 			continue;
664 		}
665 		if (unlikely(datalen > req->response_len)) {
666 			DRM_ERROR("CT: response %u too long %*ph\n",
667 				  req->fence, 4 * msglen, msg);
668 			datalen = 0;
669 		}
670 		if (datalen)
671 			memcpy(req->response_buf, msg + 3, 4 * datalen);
672 		req->response_len = datalen;
673 		WRITE_ONCE(req->status, status);
674 		found = true;
675 		break;
676 	}
677 	spin_unlock(&ct->requests.lock);
678 
679 	if (!found)
680 		DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
681 	return 0;
682 }
683 
684 static void ct_process_request(struct intel_guc_ct *ct,
685 			       u32 action, u32 len, const u32 *payload)
686 {
687 	struct intel_guc *guc = ct_to_guc(ct);
688 	int ret;
689 
690 	CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
691 
692 	switch (action) {
693 	case INTEL_GUC_ACTION_DEFAULT:
694 		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
695 		if (unlikely(ret))
696 			goto fail_unexpected;
697 		break;
698 
699 	default:
700 fail_unexpected:
701 		DRM_ERROR("CT: unexpected request %x %*ph\n",
702 			  action, 4 * len, payload);
703 		break;
704 	}
705 }
706 
707 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
708 {
709 	unsigned long flags;
710 	struct ct_incoming_request *request;
711 	u32 header;
712 	u32 *payload;
713 	bool done;
714 
715 	spin_lock_irqsave(&ct->requests.lock, flags);
716 	request = list_first_entry_or_null(&ct->requests.incoming,
717 					   struct ct_incoming_request, link);
718 	if (request)
719 		list_del(&request->link);
720 	done = !!list_empty(&ct->requests.incoming);
721 	spin_unlock_irqrestore(&ct->requests.lock, flags);
722 
723 	if (!request)
724 		return true;
725 
726 	header = request->msg[0];
727 	payload = &request->msg[1];
728 	ct_process_request(ct,
729 			   ct_header_get_action(header),
730 			   ct_header_get_len(header),
731 			   payload);
732 
733 	kfree(request);
734 	return done;
735 }
736 
737 static void ct_incoming_request_worker_func(struct work_struct *w)
738 {
739 	struct intel_guc_ct *ct =
740 		container_of(w, struct intel_guc_ct, requests.worker);
741 	bool done;
742 
743 	done = ct_process_incoming_requests(ct);
744 	if (!done)
745 		queue_work(system_unbound_wq, &ct->requests.worker);
746 }
747 
748 /**
749  * DOC: CTB GuC to Host request
750  *
751  * Format of the CTB GuC to Host request message is as follows::
752  *
753  *      +------------+---------+---------+---------+---------+---------+
754  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
755  *      +------------+---------+---------+---------+---------+---------+
756  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
757  *      +   HEADER   +---------+---------+---------+---------+---------+
758  *      |            |    0    |    1    |    2    |   ...   |    n    |
759  *      +============+=========+=========+=========+=========+=========+
760  *      |     len    |            request specific data                |
761  *      +------+-----+---------+---------+---------+---------+---------+
762  *
763  *                   ^-----------------------len-----------------------^
764  */
765 
766 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
767 {
768 	u32 header = msg[0];
769 	u32 len = ct_header_get_len(header);
770 	u32 msglen = len + 1; /* total message length including header */
771 	struct ct_incoming_request *request;
772 	unsigned long flags;
773 
774 	GEM_BUG_ON(ct_header_is_response(header));
775 
776 	request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
777 	if (unlikely(!request)) {
778 		DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
779 		return 0; /* XXX: -ENOMEM ? */
780 	}
781 	memcpy(request->msg, msg, 4 * msglen);
782 
783 	spin_lock_irqsave(&ct->requests.lock, flags);
784 	list_add_tail(&request->link, &ct->requests.incoming);
785 	spin_unlock_irqrestore(&ct->requests.lock, flags);
786 
787 	queue_work(system_unbound_wq, &ct->requests.worker);
788 	return 0;
789 }
790 
791 /*
792  * When we're communicating with the GuC over CT, GuC uses events
793  * to notify us about new messages being posted on the RECV buffer.
794  */
795 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
796 {
797 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
798 	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
799 	int err = 0;
800 
801 	if (unlikely(!ct->enabled)) {
802 		WARN(1, "Unexpected GuC event received while CT disabled!\n");
803 		return;
804 	}
805 
806 	do {
807 		err = ctb_read(ctb, msg);
808 		if (err)
809 			break;
810 
811 		if (ct_header_is_response(msg[0]))
812 			err = ct_handle_response(ct, msg);
813 		else
814 			err = ct_handle_request(ct, msg);
815 	} while (!err);
816 
817 	if (GEM_WARN_ON(err == -EPROTO)) {
818 		DRM_ERROR("CT: corrupted message detected!\n");
819 		ctb->desc->is_in_error = 1;
820 	}
821 }
822 
823