xref: /openbmc/linux/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h (revision 19b438592238b3b40c3f945bb5f9c4ca971c0c45)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #ifndef _INTEL_GUC_CT_H_
7 #define _INTEL_GUC_CT_H_
8 
9 #include <linux/interrupt.h>
10 #include <linux/spinlock.h>
11 #include <linux/workqueue.h>
12 
13 #include "intel_guc_fwif.h"
14 
15 struct i915_vma;
16 struct intel_guc;
17 
18 /**
19  * DOC: Command Transport (CT).
20  *
21  * Buffer based command transport is a replacement for MMIO based mechanism.
22  * It can be used to perform both host-2-guc and guc-to-host communication.
23  */
24 
25 /** Represents single command transport buffer.
26  *
27  * A single command transport buffer consists of two parts, the header
28  * record (command transport buffer descriptor) and the actual buffer which
29  * holds the commands.
30  *
31  * @lock: protects access to the commands buffer and buffer descriptor
32  * @desc: pointer to the buffer descriptor
33  * @cmds: pointer to the commands buffer
34  * @size: size of the commands buffer
35  */
36 struct intel_guc_ct_buffer {
37 	spinlock_t lock;
38 	struct guc_ct_buffer_desc *desc;
39 	u32 *cmds;
40 	u32 size;
41 };
42 
43 
44 /** Top-level structure for Command Transport related data
45  *
46  * Includes a pair of CT buffers for bi-directional communication and tracking
47  * for the H2G and G2H requests sent and received through the buffers.
48  */
49 struct intel_guc_ct {
50 	struct i915_vma *vma;
51 	bool enabled;
52 
53 	/* buffers for sending and receiving commands */
54 	struct {
55 		struct intel_guc_ct_buffer send;
56 		struct intel_guc_ct_buffer recv;
57 	} ctbs;
58 
59 	struct tasklet_struct receive_tasklet;
60 
61 	struct {
62 		u32 last_fence; /* last fence used to send request */
63 
64 		spinlock_t lock; /* protects pending requests list */
65 		struct list_head pending; /* requests waiting for response */
66 
67 		struct list_head incoming; /* incoming requests */
68 		struct work_struct worker; /* handler for incoming requests */
69 	} requests;
70 };
71 
72 void intel_guc_ct_init_early(struct intel_guc_ct *ct);
73 int intel_guc_ct_init(struct intel_guc_ct *ct);
74 void intel_guc_ct_fini(struct intel_guc_ct *ct);
75 int intel_guc_ct_enable(struct intel_guc_ct *ct);
76 void intel_guc_ct_disable(struct intel_guc_ct *ct);
77 
78 static inline void intel_guc_ct_sanitize(struct intel_guc_ct *ct)
79 {
80 	ct->enabled = false;
81 }
82 
83 static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
84 {
85 	return ct->enabled;
86 }
87 
88 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
89 		      u32 *response_buf, u32 response_buf_size);
90 void intel_guc_ct_event_handler(struct intel_guc_ct *ct);
91 
92 #endif /* _INTEL_GUC_CT_H_ */
93