xref: /openbmc/linux/drivers/gpu/drm/lima/lima_sched.h (revision 022db5d6)
1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
3 
4 #ifndef __LIMA_SCHED_H__
5 #define __LIMA_SCHED_H__
6 
7 #include <drm/gpu_scheduler.h>
8 #include <linux/list.h>
9 #include <linux/xarray.h>
10 
11 struct lima_device;
12 struct lima_vm;
13 
14 struct lima_sched_error_task {
15 	struct list_head list;
16 	void *data;
17 	u32 size;
18 };
19 
20 struct lima_sched_task {
21 	struct drm_sched_job base;
22 
23 	struct lima_vm *vm;
24 	void *frame;
25 
26 	struct lima_bo **bos;
27 	int num_bos;
28 
29 	bool recoverable;
30 	struct lima_bo *heap;
31 
32 	/* pipe fence */
33 	struct dma_fence *fence;
34 };
35 
36 struct lima_sched_context {
37 	struct drm_sched_entity base;
38 };
39 
40 #define LIMA_SCHED_PIPE_MAX_MMU       8
41 #define LIMA_SCHED_PIPE_MAX_L2_CACHE  2
42 #define LIMA_SCHED_PIPE_MAX_PROCESSOR 8
43 
44 struct lima_ip;
45 
46 struct lima_sched_pipe {
47 	struct drm_gpu_scheduler base;
48 
49 	u64 fence_context;
50 	u32 fence_seqno;
51 	spinlock_t fence_lock;
52 
53 	struct lima_device *ldev;
54 
55 	struct lima_sched_task *current_task;
56 	struct lima_vm *current_vm;
57 
58 	struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
59 	int num_mmu;
60 
61 	struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE];
62 	int num_l2_cache;
63 
64 	struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR];
65 	int num_processor;
66 
67 	struct lima_ip *bcast_processor;
68 	struct lima_ip *bcast_mmu;
69 
70 	u32 done;
71 	bool error;
72 	atomic_t task;
73 
74 	int frame_size;
75 	struct kmem_cache *task_slab;
76 
77 	int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
78 	void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
79 	void (*task_fini)(struct lima_sched_pipe *pipe);
80 	void (*task_error)(struct lima_sched_pipe *pipe);
81 	void (*task_mmu_error)(struct lima_sched_pipe *pipe);
82 	int (*task_recover)(struct lima_sched_pipe *pipe);
83 	void (*task_mask_irq)(struct lima_sched_pipe *pipe);
84 
85 	struct work_struct recover_work;
86 };
87 
88 int lima_sched_task_init(struct lima_sched_task *task,
89 			 struct lima_sched_context *context,
90 			 struct lima_bo **bos, int num_bos,
91 			 struct lima_vm *vm);
92 void lima_sched_task_fini(struct lima_sched_task *task);
93 
94 int lima_sched_context_init(struct lima_sched_pipe *pipe,
95 			    struct lima_sched_context *context,
96 			    atomic_t *guilty);
97 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
98 			     struct lima_sched_context *context);
99 struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
100 
101 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
102 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
103 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe);
104 
lima_sched_pipe_mmu_error(struct lima_sched_pipe * pipe)105 static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe)
106 {
107 	pipe->error = true;
108 	pipe->task_mmu_error(pipe);
109 }
110 
111 int lima_sched_slab_init(void);
112 void lima_sched_slab_fini(void);
113 
114 #endif
115