1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 
7 #ifndef _I915_SCHEDULER_H_
8 #define _I915_SCHEDULER_H_
9 
10 #include <linux/bitops.h>
11 #include <linux/kernel.h>
12 
13 #include <uapi/drm/i915_drm.h>
14 
15 struct drm_i915_private;
16 struct i915_request;
17 struct intel_engine_cs;
18 
19 enum {
20 	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
21 	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
22 	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
23 
24 	I915_PRIORITY_INVALID = INT_MIN
25 };
26 
27 #define I915_USER_PRIORITY_SHIFT 3
28 #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
29 
30 #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
31 #define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
32 
33 #define I915_PRIORITY_WAIT		((u8)BIT(0))
34 #define I915_PRIORITY_NEWCLIENT		((u8)BIT(1))
35 #define I915_PRIORITY_NOSEMAPHORE	((u8)BIT(2))
36 
37 #define __NO_PREEMPTION (I915_PRIORITY_WAIT)
38 
39 struct i915_sched_attr {
40 	/**
41 	 * @priority: execution and service priority
42 	 *
43 	 * All clients are equal, but some are more equal than others!
44 	 *
45 	 * Requests from a context with a greater (more positive) value of
46 	 * @priority will be executed before those with a lower @priority
47 	 * value, forming a simple QoS.
48 	 *
49 	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
50 	 */
51 	int priority;
52 };
53 
54 /*
55  * "People assume that time is a strict progression of cause to effect, but
56  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
57  * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
58  *
59  * Requests exist in a complex web of interdependencies. Each request
60  * has to wait for some other request to complete before it is ready to be run
61  * (e.g. we have to wait until the pixels have been rendering into a texture
62  * before we can copy from it). We track the readiness of a request in terms
63  * of fences, but we also need to keep the dependency tree for the lifetime
64  * of the request (beyond the life of an individual fence). We use the tree
65  * at various points to reorder the requests whilst keeping the requests
66  * in order with respect to their various dependencies.
67  *
68  * There is no active component to the "scheduler". As we know the dependency
69  * DAG of each request, we are able to insert it into a sorted queue when it
70  * is ready, and are able to reorder its portion of the graph to accommodate
71  * dynamic priority changes.
72  */
73 struct i915_sched_node {
74 	struct list_head signalers_list; /* those before us, we depend upon */
75 	struct list_head waiters_list; /* those after us, they depend upon us */
76 	struct list_head link;
77 	struct i915_sched_attr attr;
78 	unsigned int flags;
79 #define I915_SCHED_HAS_SEMAPHORE	BIT(0)
80 };
81 
82 struct i915_dependency {
83 	struct i915_sched_node *signaler;
84 	struct list_head signal_link;
85 	struct list_head wait_link;
86 	struct list_head dfs_link;
87 	unsigned long flags;
88 #define I915_DEPENDENCY_ALLOC BIT(0)
89 };
90 
91 struct i915_priolist {
92 	struct list_head requests[I915_PRIORITY_COUNT];
93 	struct rb_node node;
94 	unsigned long used;
95 	int priority;
96 };
97 
98 #define priolist_for_each_request(it, plist, idx) \
99 	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
100 		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
101 
102 #define priolist_for_each_request_consume(it, n, plist, idx) \
103 	for (; \
104 	     (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \
105 	     (plist)->used &= ~BIT(idx)) \
106 		list_for_each_entry_safe(it, n, \
107 					 &(plist)->requests[idx], \
108 					 sched.link)
109 
110 void i915_sched_node_init(struct i915_sched_node *node);
111 
112 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
113 				      struct i915_sched_node *signal,
114 				      struct i915_dependency *dep,
115 				      unsigned long flags);
116 
117 int i915_sched_node_add_dependency(struct i915_sched_node *node,
118 				   struct i915_sched_node *signal);
119 
120 void i915_sched_node_fini(struct i915_sched_node *node);
121 
122 void i915_schedule(struct i915_request *request,
123 		   const struct i915_sched_attr *attr);
124 
125 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
126 
127 struct list_head *
128 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
129 
130 void __i915_priolist_free(struct i915_priolist *p);
131 static inline void i915_priolist_free(struct i915_priolist *p)
132 {
133 	if (p->priority != I915_PRIORITY_NORMAL)
134 		__i915_priolist_free(p);
135 }
136 
137 #endif /* _I915_SCHEDULER_H_ */
138