1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "intel_guc_ct.h"
8 #include "gt/intel_gt.h"
9 
10 #define CT_ERROR(_ct, _fmt, ...) \
11 	DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
12 #ifdef CONFIG_DRM_I915_DEBUG_GUC
13 #define CT_DEBUG(_ct, _fmt, ...) \
14 	DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
15 #else
16 #define CT_DEBUG(...)	do { } while (0)
17 #endif
18 
19 struct ct_request {
20 	struct list_head link;
21 	u32 fence;
22 	u32 status;
23 	u32 response_len;
24 	u32 *response_buf;
25 };
26 
27 struct ct_incoming_request {
28 	struct list_head link;
29 	u32 msg[];
30 };
31 
32 enum { CTB_SEND = 0, CTB_RECV = 1 };
33 
34 enum { CTB_OWNER_HOST = 0 };
35 
36 static void ct_incoming_request_worker_func(struct work_struct *w);
37 
38 /**
39  * intel_guc_ct_init_early - Initialize CT state without requiring device access
40  * @ct: pointer to CT struct
41  */
42 void intel_guc_ct_init_early(struct intel_guc_ct *ct)
43 {
44 	spin_lock_init(&ct->requests.lock);
45 	INIT_LIST_HEAD(&ct->requests.pending);
46 	INIT_LIST_HEAD(&ct->requests.incoming);
47 	INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
48 }
49 
50 static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
51 {
52 	return container_of(ct, struct intel_guc, ct);
53 }
54 
55 static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
56 {
57 	return guc_to_gt(ct_to_guc(ct));
58 }
59 
60 static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
61 {
62 	return ct_to_gt(ct)->i915;
63 }
64 
65 static inline struct device *ct_to_dev(struct intel_guc_ct *ct)
66 {
67 	return ct_to_i915(ct)->drm.dev;
68 }
69 
70 static inline const char *guc_ct_buffer_type_to_str(u32 type)
71 {
72 	switch (type) {
73 	case INTEL_GUC_CT_BUFFER_TYPE_SEND:
74 		return "SEND";
75 	case INTEL_GUC_CT_BUFFER_TYPE_RECV:
76 		return "RECV";
77 	default:
78 		return "<invalid>";
79 	}
80 }
81 
82 static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
83 				    u32 cmds_addr, u32 size)
84 {
85 	memset(desc, 0, sizeof(*desc));
86 	desc->addr = cmds_addr;
87 	desc->size = size;
88 	desc->owner = CTB_OWNER_HOST;
89 }
90 
91 static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
92 {
93 	desc->head = 0;
94 	desc->tail = 0;
95 	desc->is_in_error = 0;
96 }
97 
98 static int guc_action_register_ct_buffer(struct intel_guc *guc,
99 					 u32 desc_addr,
100 					 u32 type)
101 {
102 	u32 action[] = {
103 		INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
104 		desc_addr,
105 		sizeof(struct guc_ct_buffer_desc),
106 		type
107 	};
108 
109 	/* Can't use generic send(), CT registration must go over MMIO */
110 	return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
111 }
112 
113 static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type)
114 {
115 	int err = guc_action_register_ct_buffer(ct_to_guc(ct), desc_addr, type);
116 
117 	if (unlikely(err))
118 		CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
119 			 guc_ct_buffer_type_to_str(type), err);
120 	return err;
121 }
122 
123 static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
124 {
125 	u32 action[] = {
126 		INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
127 		CTB_OWNER_HOST,
128 		type
129 	};
130 
131 	/* Can't use generic send(), CT deregistration must go over MMIO */
132 	return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
133 }
134 
135 static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
136 {
137 	int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
138 
139 	if (unlikely(err))
140 		CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
141 			 guc_ct_buffer_type_to_str(type), err);
142 	return err;
143 }
144 
145 /**
146  * intel_guc_ct_init - Init buffer-based communication
147  * @ct: pointer to CT struct
148  *
149  * Allocate memory required for buffer-based communication.
150  *
151  * Return: 0 on success, a negative errno code on failure.
152  */
153 int intel_guc_ct_init(struct intel_guc_ct *ct)
154 {
155 	struct intel_guc *guc = ct_to_guc(ct);
156 	void *blob;
157 	int err;
158 	int i;
159 
160 	GEM_BUG_ON(ct->vma);
161 
162 	/* We allocate 1 page to hold both descriptors and both buffers.
163 	 *       ___________.....................
164 	 *      |desc (SEND)|                   :
165 	 *      |___________|                   PAGE/4
166 	 *      :___________....................:
167 	 *      |desc (RECV)|                   :
168 	 *      |___________|                   PAGE/4
169 	 *      :_______________________________:
170 	 *      |cmds (SEND)                    |
171 	 *      |                               PAGE/4
172 	 *      |_______________________________|
173 	 *      |cmds (RECV)                    |
174 	 *      |                               PAGE/4
175 	 *      |_______________________________|
176 	 *
177 	 * Each message can use a maximum of 32 dwords and we don't expect to
178 	 * have more than 1 in flight at any time, so we have enough space.
179 	 * Some logic further ahead will rely on the fact that there is only 1
180 	 * page and that it is always mapped, so if the size is changed the
181 	 * other code will need updating as well.
182 	 */
183 
184 	err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
185 	if (unlikely(err)) {
186 		CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err);
187 		return err;
188 	}
189 
190 	CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma));
191 
192 	/* store pointers to desc and cmds */
193 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
194 		GEM_BUG_ON((i !=  CTB_SEND) && (i != CTB_RECV));
195 		ct->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
196 		ct->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
197 	}
198 
199 	return 0;
200 }
201 
202 /**
203  * intel_guc_ct_fini - Fini buffer-based communication
204  * @ct: pointer to CT struct
205  *
206  * Deallocate memory required for buffer-based communication.
207  */
208 void intel_guc_ct_fini(struct intel_guc_ct *ct)
209 {
210 	GEM_BUG_ON(ct->enabled);
211 
212 	i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
213 	memset(ct, 0, sizeof(*ct));
214 }
215 
216 /**
217  * intel_guc_ct_enable - Enable buffer based command transport.
218  * @ct: pointer to CT struct
219  *
220  * Return: 0 on success, a negative errno code on failure.
221  */
222 int intel_guc_ct_enable(struct intel_guc_ct *ct)
223 {
224 	struct intel_guc *guc = ct_to_guc(ct);
225 	u32 base, cmds, size;
226 	int err;
227 	int i;
228 
229 	GEM_BUG_ON(ct->enabled);
230 
231 	/* vma should be already allocated and map'ed */
232 	GEM_BUG_ON(!ct->vma);
233 	base = intel_guc_ggtt_offset(guc, ct->vma);
234 
235 	/* (re)initialize descriptors
236 	 * cmds buffers are in the second half of the blob page
237 	 */
238 	for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
239 		GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
240 		cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
241 		size = PAGE_SIZE / 4;
242 		CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size);
243 		guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size);
244 	}
245 
246 	/*
247 	 * Register both CT buffers starting with RECV buffer.
248 	 * Descriptors are in first half of the blob.
249 	 */
250 	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
251 				 INTEL_GUC_CT_BUFFER_TYPE_RECV);
252 	if (unlikely(err))
253 		goto err_out;
254 
255 	err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
256 				 INTEL_GUC_CT_BUFFER_TYPE_SEND);
257 	if (unlikely(err))
258 		goto err_deregister;
259 
260 	ct->enabled = true;
261 
262 	return 0;
263 
264 err_deregister:
265 	ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
266 err_out:
267 	CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err);
268 	return err;
269 }
270 
271 /**
272  * intel_guc_ct_disable - Disable buffer based command transport.
273  * @ct: pointer to CT struct
274  */
275 void intel_guc_ct_disable(struct intel_guc_ct *ct)
276 {
277 	struct intel_guc *guc = ct_to_guc(ct);
278 
279 	GEM_BUG_ON(!ct->enabled);
280 
281 	ct->enabled = false;
282 
283 	if (intel_guc_is_fw_running(guc)) {
284 		ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_SEND);
285 		ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
286 	}
287 }
288 
289 static u32 ct_get_next_fence(struct intel_guc_ct *ct)
290 {
291 	/* For now it's trivial */
292 	return ++ct->requests.last_fence;
293 }
294 
295 /**
296  * DOC: CTB Host to GuC request
297  *
298  * Format of the CTB Host to GuC request message is as follows::
299  *
300  *      +------------+---------+---------+---------+---------+
301  *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
302  *      +------------+---------+---------+---------+---------+
303  *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
304  *      +   HEADER   +---------+---------+---------+---------+
305  *      |            |    0    |    1    |   ...   |    n    |
306  *      +============+=========+=========+=========+=========+
307  *      |  len >= 1  |  FENCE  |     request specific data   |
308  *      +------+-----+---------+---------+---------+---------+
309  *
310  *                   ^-----------------len-------------------^
311  */
312 
313 static int ct_write(struct intel_guc_ct *ct,
314 		    const u32 *action,
315 		    u32 len /* in dwords */,
316 		    u32 fence,
317 		    bool want_response)
318 {
319 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
320 	struct guc_ct_buffer_desc *desc = ctb->desc;
321 	u32 head = desc->head;
322 	u32 tail = desc->tail;
323 	u32 size = desc->size;
324 	u32 used;
325 	u32 header;
326 	u32 *cmds = ctb->cmds;
327 	unsigned int i;
328 
329 	if (unlikely(desc->is_in_error))
330 		return -EPIPE;
331 
332 	if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
333 		     (tail | head) >= size))
334 		goto corrupted;
335 
336 	/* later calculations will be done in dwords */
337 	head /= 4;
338 	tail /= 4;
339 	size /= 4;
340 
341 	/*
342 	 * tail == head condition indicates empty. GuC FW does not support
343 	 * using up the entire buffer to get tail == head meaning full.
344 	 */
345 	if (tail < head)
346 		used = (size - head) + tail;
347 	else
348 		used = tail - head;
349 
350 	/* make sure there is a space including extra dw for the fence */
351 	if (unlikely(used + len + 1 >= size))
352 		return -ENOSPC;
353 
354 	/*
355 	 * Write the message. The format is the following:
356 	 * DW0: header (including action code)
357 	 * DW1: fence
358 	 * DW2+: action data
359 	 */
360 	header = (len << GUC_CT_MSG_LEN_SHIFT) |
361 		 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
362 		 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
363 		 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
364 
365 	CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
366 		 4, &header, 4, &fence, 4 * (len - 1), &action[1]);
367 
368 	cmds[tail] = header;
369 	tail = (tail + 1) % size;
370 
371 	cmds[tail] = fence;
372 	tail = (tail + 1) % size;
373 
374 	for (i = 1; i < len; i++) {
375 		cmds[tail] = action[i];
376 		tail = (tail + 1) % size;
377 	}
378 	GEM_BUG_ON(tail > size);
379 
380 	/* now update desc tail (back in bytes) */
381 	desc->tail = tail * 4;
382 	return 0;
383 
384 corrupted:
385 	CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
386 		 desc->addr, desc->head, desc->tail, desc->size);
387 	desc->is_in_error = 1;
388 	return -EPIPE;
389 }
390 
391 /**
392  * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
393  * @desc:	buffer descriptor
394  * @fence:	response fence
395  * @status:	placeholder for status
396  *
397  * Guc will update CT buffer descriptor with new fence and status
398  * after processing the command identified by the fence. Wait for
399  * specified fence and then read from the descriptor status of the
400  * command.
401  *
402  * Return:
403  * *	0 response received (status is valid)
404  * *	-ETIMEDOUT no response within hardcoded timeout
405  * *	-EPROTO no response, CT buffer is in error
406  */
407 static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
408 				    u32 fence,
409 				    u32 *status)
410 {
411 	int err;
412 
413 	/*
414 	 * Fast commands should complete in less than 10us, so sample quickly
415 	 * up to that length of time, then switch to a slower sleep-wait loop.
416 	 * No GuC command should ever take longer than 10ms.
417 	 */
418 #define done (READ_ONCE(desc->fence) == fence)
419 	err = wait_for_us(done, 10);
420 	if (err)
421 		err = wait_for(done, 10);
422 #undef done
423 
424 	if (unlikely(err)) {
425 		DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
426 			  fence, desc->fence);
427 
428 		if (WARN_ON(desc->is_in_error)) {
429 			/* Something went wrong with the messaging, try to reset
430 			 * the buffer and hope for the best
431 			 */
432 			guc_ct_buffer_desc_reset(desc);
433 			err = -EPROTO;
434 		}
435 	}
436 
437 	*status = desc->status;
438 	return err;
439 }
440 
441 /**
442  * wait_for_ct_request_update - Wait for CT request state update.
443  * @req:	pointer to pending request
444  * @status:	placeholder for status
445  *
446  * For each sent request, Guc shall send bac CT response message.
447  * Our message handler will update status of tracked request once
448  * response message with given fence is received. Wait here and
449  * check for valid response status value.
450  *
451  * Return:
452  * *	0 response received (status is valid)
453  * *	-ETIMEDOUT no response within hardcoded timeout
454  */
455 static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
456 {
457 	int err;
458 
459 	/*
460 	 * Fast commands should complete in less than 10us, so sample quickly
461 	 * up to that length of time, then switch to a slower sleep-wait loop.
462 	 * No GuC command should ever take longer than 10ms.
463 	 */
464 #define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
465 	err = wait_for_us(done, 10);
466 	if (err)
467 		err = wait_for(done, 10);
468 #undef done
469 
470 	if (unlikely(err))
471 		DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
472 
473 	*status = req->status;
474 	return err;
475 }
476 
477 static int ct_send(struct intel_guc_ct *ct,
478 		   const u32 *action,
479 		   u32 len,
480 		   u32 *response_buf,
481 		   u32 response_buf_size,
482 		   u32 *status)
483 {
484 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
485 	struct guc_ct_buffer_desc *desc = ctb->desc;
486 	struct ct_request request;
487 	unsigned long flags;
488 	u32 fence;
489 	int err;
490 
491 	GEM_BUG_ON(!ct->enabled);
492 	GEM_BUG_ON(!len);
493 	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
494 	GEM_BUG_ON(!response_buf && response_buf_size);
495 
496 	fence = ct_get_next_fence(ct);
497 	request.fence = fence;
498 	request.status = 0;
499 	request.response_len = response_buf_size;
500 	request.response_buf = response_buf;
501 
502 	spin_lock_irqsave(&ct->requests.lock, flags);
503 	list_add_tail(&request.link, &ct->requests.pending);
504 	spin_unlock_irqrestore(&ct->requests.lock, flags);
505 
506 	err = ct_write(ct, action, len, fence, !!response_buf);
507 	if (unlikely(err))
508 		goto unlink;
509 
510 	intel_guc_notify(ct_to_guc(ct));
511 
512 	if (response_buf)
513 		err = wait_for_ct_request_update(&request, status);
514 	else
515 		err = wait_for_ctb_desc_update(desc, fence, status);
516 	if (unlikely(err))
517 		goto unlink;
518 
519 	if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
520 		err = -EIO;
521 		goto unlink;
522 	}
523 
524 	if (response_buf) {
525 		/* There shall be no data in the status */
526 		WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
527 		/* Return actual response len */
528 		err = request.response_len;
529 	} else {
530 		/* There shall be no response payload */
531 		WARN_ON(request.response_len);
532 		/* Return data decoded from the status dword */
533 		err = INTEL_GUC_MSG_TO_DATA(*status);
534 	}
535 
536 unlink:
537 	spin_lock_irqsave(&ct->requests.lock, flags);
538 	list_del(&request.link);
539 	spin_unlock_irqrestore(&ct->requests.lock, flags);
540 
541 	return err;
542 }
543 
544 /*
545  * Command Transport (CT) buffer based GuC send function.
546  */
547 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
548 		      u32 *response_buf, u32 response_buf_size)
549 {
550 	struct intel_guc *guc = ct_to_guc(ct);
551 	u32 status = ~0; /* undefined */
552 	int ret;
553 
554 	if (unlikely(!ct->enabled)) {
555 		WARN(1, "Unexpected send: action=%#x\n", *action);
556 		return -ENODEV;
557 	}
558 
559 	mutex_lock(&guc->send_mutex);
560 
561 	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
562 	if (unlikely(ret < 0)) {
563 		CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
564 			 action[0], ret, status);
565 	} else if (unlikely(ret)) {
566 		CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
567 			 action[0], ret, ret);
568 	}
569 
570 	mutex_unlock(&guc->send_mutex);
571 	return ret;
572 }
573 
574 static inline unsigned int ct_header_get_len(u32 header)
575 {
576 	return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
577 }
578 
579 static inline unsigned int ct_header_get_action(u32 header)
580 {
581 	return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
582 }
583 
584 static inline bool ct_header_is_response(u32 header)
585 {
586 	return !!(header & GUC_CT_MSG_IS_RESPONSE);
587 }
588 
589 static int ct_read(struct intel_guc_ct *ct, u32 *data)
590 {
591 	struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
592 	struct guc_ct_buffer_desc *desc = ctb->desc;
593 	u32 head = desc->head;
594 	u32 tail = desc->tail;
595 	u32 size = desc->size;
596 	u32 *cmds = ctb->cmds;
597 	s32 available;
598 	unsigned int len;
599 	unsigned int i;
600 
601 	if (unlikely(desc->is_in_error))
602 		return -EPIPE;
603 
604 	if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
605 		     (tail | head) >= size))
606 		goto corrupted;
607 
608 	/* later calculations will be done in dwords */
609 	head /= 4;
610 	tail /= 4;
611 	size /= 4;
612 
613 	/* tail == head condition indicates empty */
614 	available = tail - head;
615 	if (unlikely(available == 0))
616 		return -ENODATA;
617 
618 	/* beware of buffer wrap case */
619 	if (unlikely(available < 0))
620 		available += size;
621 	CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
622 	GEM_BUG_ON(available < 0);
623 
624 	data[0] = cmds[head];
625 	head = (head + 1) % size;
626 
627 	/* message len with header */
628 	len = ct_header_get_len(data[0]) + 1;
629 	if (unlikely(len > (u32)available)) {
630 		CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
631 			 4, data,
632 			 4 * (head + available - 1 > size ?
633 			      size - head : available - 1), &cmds[head],
634 			 4 * (head + available - 1 > size ?
635 			      available - 1 - size + head : 0), &cmds[0]);
636 		goto corrupted;
637 	}
638 
639 	for (i = 1; i < len; i++) {
640 		data[i] = cmds[head];
641 		head = (head + 1) % size;
642 	}
643 	CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
644 
645 	desc->head = head * 4;
646 	return 0;
647 
648 corrupted:
649 	CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
650 		 desc->addr, desc->head, desc->tail, desc->size);
651 	desc->is_in_error = 1;
652 	return -EPIPE;
653 }
654 
655 /**
656  * DOC: CTB GuC to Host response
657  *
658  * Format of the CTB GuC to Host response message is as follows::
659  *
660  *      +------------+---------+---------+---------+---------+---------+
661  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
662  *      +------------+---------+---------+---------+---------+---------+
663  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
664  *      +   HEADER   +---------+---------+---------+---------+---------+
665  *      |            |    0    |    1    |    2    |   ...   |    n    |
666  *      +============+=========+=========+=========+=========+=========+
667  *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
668  *      +------+-----+---------+---------+---------+---------+---------+
669  *
670  *                   ^-----------------------len-----------------------^
671  */
672 
673 static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
674 {
675 	u32 header = msg[0];
676 	u32 len = ct_header_get_len(header);
677 	u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
678 	u32 fence;
679 	u32 status;
680 	u32 datalen;
681 	struct ct_request *req;
682 	bool found = false;
683 
684 	GEM_BUG_ON(!ct_header_is_response(header));
685 	GEM_BUG_ON(!in_irq());
686 
687 	/* Response payload shall at least include fence and status */
688 	if (unlikely(len < 2)) {
689 		CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
690 		return -EPROTO;
691 	}
692 
693 	fence = msg[1];
694 	status = msg[2];
695 	datalen = len - 2;
696 
697 	/* Format of the status follows RESPONSE message */
698 	if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
699 		CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
700 		return -EPROTO;
701 	}
702 
703 	CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
704 
705 	spin_lock(&ct->requests.lock);
706 	list_for_each_entry(req, &ct->requests.pending, link) {
707 		if (unlikely(fence != req->fence)) {
708 			CT_DEBUG(ct, "request %u awaits response\n",
709 				 req->fence);
710 			continue;
711 		}
712 		if (unlikely(datalen > req->response_len)) {
713 			CT_ERROR(ct, "Response for %u is too long %*ph\n",
714 				 req->fence, msgsize, msg);
715 			datalen = 0;
716 		}
717 		if (datalen)
718 			memcpy(req->response_buf, msg + 3, 4 * datalen);
719 		req->response_len = datalen;
720 		WRITE_ONCE(req->status, status);
721 		found = true;
722 		break;
723 	}
724 	spin_unlock(&ct->requests.lock);
725 
726 	if (!found)
727 		CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
728 	return 0;
729 }
730 
731 static void ct_process_request(struct intel_guc_ct *ct,
732 			       u32 action, u32 len, const u32 *payload)
733 {
734 	struct intel_guc *guc = ct_to_guc(ct);
735 	int ret;
736 
737 	CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
738 
739 	switch (action) {
740 	case INTEL_GUC_ACTION_DEFAULT:
741 		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
742 		if (unlikely(ret))
743 			goto fail_unexpected;
744 		break;
745 
746 	default:
747 fail_unexpected:
748 		CT_ERROR(ct, "Unexpected request %x %*ph\n",
749 			 action, 4 * len, payload);
750 		break;
751 	}
752 }
753 
754 static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
755 {
756 	unsigned long flags;
757 	struct ct_incoming_request *request;
758 	u32 header;
759 	u32 *payload;
760 	bool done;
761 
762 	spin_lock_irqsave(&ct->requests.lock, flags);
763 	request = list_first_entry_or_null(&ct->requests.incoming,
764 					   struct ct_incoming_request, link);
765 	if (request)
766 		list_del(&request->link);
767 	done = !!list_empty(&ct->requests.incoming);
768 	spin_unlock_irqrestore(&ct->requests.lock, flags);
769 
770 	if (!request)
771 		return true;
772 
773 	header = request->msg[0];
774 	payload = &request->msg[1];
775 	ct_process_request(ct,
776 			   ct_header_get_action(header),
777 			   ct_header_get_len(header),
778 			   payload);
779 
780 	kfree(request);
781 	return done;
782 }
783 
784 static void ct_incoming_request_worker_func(struct work_struct *w)
785 {
786 	struct intel_guc_ct *ct =
787 		container_of(w, struct intel_guc_ct, requests.worker);
788 	bool done;
789 
790 	done = ct_process_incoming_requests(ct);
791 	if (!done)
792 		queue_work(system_unbound_wq, &ct->requests.worker);
793 }
794 
795 /**
796  * DOC: CTB GuC to Host request
797  *
798  * Format of the CTB GuC to Host request message is as follows::
799  *
800  *      +------------+---------+---------+---------+---------+---------+
801  *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
802  *      +------------+---------+---------+---------+---------+---------+
803  *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
804  *      +   HEADER   +---------+---------+---------+---------+---------+
805  *      |            |    0    |    1    |    2    |   ...   |    n    |
806  *      +============+=========+=========+=========+=========+=========+
807  *      |     len    |            request specific data                |
808  *      +------+-----+---------+---------+---------+---------+---------+
809  *
810  *                   ^-----------------------len-----------------------^
811  */
812 
813 static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
814 {
815 	u32 header = msg[0];
816 	u32 len = ct_header_get_len(header);
817 	u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
818 	struct ct_incoming_request *request;
819 	unsigned long flags;
820 
821 	GEM_BUG_ON(ct_header_is_response(header));
822 
823 	request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC);
824 	if (unlikely(!request)) {
825 		CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg);
826 		return 0; /* XXX: -ENOMEM ? */
827 	}
828 	memcpy(request->msg, msg, msgsize);
829 
830 	spin_lock_irqsave(&ct->requests.lock, flags);
831 	list_add_tail(&request->link, &ct->requests.incoming);
832 	spin_unlock_irqrestore(&ct->requests.lock, flags);
833 
834 	queue_work(system_unbound_wq, &ct->requests.worker);
835 	return 0;
836 }
837 
838 /*
839  * When we're communicating with the GuC over CT, GuC uses events
840  * to notify us about new messages being posted on the RECV buffer.
841  */
842 void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
843 {
844 	u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
845 	int err = 0;
846 
847 	if (unlikely(!ct->enabled)) {
848 		WARN(1, "Unexpected GuC event received while CT disabled!\n");
849 		return;
850 	}
851 
852 	do {
853 		err = ct_read(ct, msg);
854 		if (err)
855 			break;
856 
857 		if (ct_header_is_response(msg[0]))
858 			err = ct_handle_response(ct, msg);
859 		else
860 			err = ct_handle_request(ct, msg);
861 	} while (!err);
862 }
863