13a891a62SChris Wilson /*
23a891a62SChris Wilson  * SPDX-License-Identifier: MIT
33a891a62SChris Wilson  *
43a891a62SChris Wilson  * Copyright © 2018 Intel Corporation
53a891a62SChris Wilson  */
63a891a62SChris Wilson 
73a891a62SChris Wilson #ifndef _I915_SCHEDULER_TYPES_H_
83a891a62SChris Wilson #define _I915_SCHEDULER_TYPES_H_
93a891a62SChris Wilson 
103a891a62SChris Wilson #include <linux/list.h>
113a891a62SChris Wilson 
12112ed2d3SChris Wilson #include "gt/intel_engine_types.h"
138b74594aSChris Wilson #include "i915_priolist_types.h"
143a891a62SChris Wilson 
153a891a62SChris Wilson struct drm_i915_private;
163a891a62SChris Wilson struct i915_request;
173a891a62SChris Wilson struct intel_engine_cs;
183a891a62SChris Wilson 
193a891a62SChris Wilson struct i915_sched_attr {
203a891a62SChris Wilson 	/**
213a891a62SChris Wilson 	 * @priority: execution and service priority
223a891a62SChris Wilson 	 *
233a891a62SChris Wilson 	 * All clients are equal, but some are more equal than others!
243a891a62SChris Wilson 	 *
253a891a62SChris Wilson 	 * Requests from a context with a greater (more positive) value of
263a891a62SChris Wilson 	 * @priority will be executed before those with a lower @priority
273a891a62SChris Wilson 	 * value, forming a simple QoS.
283a891a62SChris Wilson 	 *
293a891a62SChris Wilson 	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
303a891a62SChris Wilson 	 */
313a891a62SChris Wilson 	int priority;
323a891a62SChris Wilson };
333a891a62SChris Wilson 
343a891a62SChris Wilson /*
353a891a62SChris Wilson  * "People assume that time is a strict progression of cause to effect, but
363a891a62SChris Wilson  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
373a891a62SChris Wilson  * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
383a891a62SChris Wilson  *
393a891a62SChris Wilson  * Requests exist in a complex web of interdependencies. Each request
403a891a62SChris Wilson  * has to wait for some other request to complete before it is ready to be run
413a891a62SChris Wilson  * (e.g. we have to wait until the pixels have been rendering into a texture
423a891a62SChris Wilson  * before we can copy from it). We track the readiness of a request in terms
433a891a62SChris Wilson  * of fences, but we also need to keep the dependency tree for the lifetime
443a891a62SChris Wilson  * of the request (beyond the life of an individual fence). We use the tree
453a891a62SChris Wilson  * at various points to reorder the requests whilst keeping the requests
463a891a62SChris Wilson  * in order with respect to their various dependencies.
473a891a62SChris Wilson  *
483a891a62SChris Wilson  * There is no active component to the "scheduler". As we know the dependency
493a891a62SChris Wilson  * DAG of each request, we are able to insert it into a sorted queue when it
503a891a62SChris Wilson  * is ready, and are able to reorder its portion of the graph to accommodate
513a891a62SChris Wilson  * dynamic priority changes.
529c4a14f8SChris Wilson  *
539c4a14f8SChris Wilson  * Ok, there is now one active element to the "scheduler" in the backends.
549c4a14f8SChris Wilson  * We let a new context run for a small amount of time before re-evaluating
559c4a14f8SChris Wilson  * the run order. As we re-evaluate, we maintain the strict ordering of
569c4a14f8SChris Wilson  * dependencies, but attempt to rotate the active contexts (the current context
579c4a14f8SChris Wilson  * is put to the back of its priority queue, then reshuffling its dependents).
589c4a14f8SChris Wilson  * This provides minimal timeslicing and prevents a userspace hog (e.g.
599c4a14f8SChris Wilson  * something waiting on a user semaphore [VkEvent]) from denying service to
609c4a14f8SChris Wilson  * others.
613a891a62SChris Wilson  */
623a891a62SChris Wilson struct i915_sched_node {
633a891a62SChris Wilson 	struct list_head signalers_list; /* those before us, we depend upon */
643a891a62SChris Wilson 	struct list_head waiters_list; /* those after us, they depend upon us */
653a891a62SChris Wilson 	struct list_head link;
663a891a62SChris Wilson 	struct i915_sched_attr attr;
673a891a62SChris Wilson 	unsigned int flags;
6818e4af04SChris Wilson #define I915_SCHED_HAS_EXTERNAL_CHAIN	BIT(0)
697881e605SChris Wilson 	intel_engine_mask_t semaphores;
703a891a62SChris Wilson };
713a891a62SChris Wilson 
723a891a62SChris Wilson struct i915_dependency {
733a891a62SChris Wilson 	struct i915_sched_node *signaler;
748ee36e04SChris Wilson 	struct i915_sched_node *waiter;
753a891a62SChris Wilson 	struct list_head signal_link;
763a891a62SChris Wilson 	struct list_head wait_link;
773a891a62SChris Wilson 	struct list_head dfs_link;
783a891a62SChris Wilson 	unsigned long flags;
793a891a62SChris Wilson #define I915_DEPENDENCY_ALLOC		BIT(0)
806e7eb7a8SChris Wilson #define I915_DEPENDENCY_EXTERNAL	BIT(1)
816b6cd2ebSChris Wilson #define I915_DEPENDENCY_WEAK		BIT(2)
823a891a62SChris Wilson };
833a891a62SChris Wilson 
84b5b349b9SChris Wilson #define for_each_waiter(p__, rq__) \
85b5b349b9SChris Wilson 	list_for_each_entry_lockless(p__, \
86b5b349b9SChris Wilson 				     &(rq__)->sched.waiters_list, \
87b5b349b9SChris Wilson 				     wait_link)
88b5b349b9SChris Wilson 
89b5b349b9SChris Wilson #define for_each_signaler(p__, rq__) \
90b5b349b9SChris Wilson 	list_for_each_entry_rcu(p__, \
91b5b349b9SChris Wilson 				&(rq__)->sched.signalers_list, \
92b5b349b9SChris Wilson 				signal_link)
93b5b349b9SChris Wilson 
943e28d371SMatthew Brost /**
953e28d371SMatthew Brost  * struct i915_sched_engine - scheduler engine
963e28d371SMatthew Brost  *
973e28d371SMatthew Brost  * A schedule engine represents a submission queue with different priority
983e28d371SMatthew Brost  * bands. It contains all the common state (relative to the backend) to queue,
993e28d371SMatthew Brost  * track, and submit a request.
1003e28d371SMatthew Brost  *
1013e28d371SMatthew Brost  * This object at the moment is quite i915 specific but will transition into a
1023e28d371SMatthew Brost  * container for the drm_gpu_scheduler plus a few other variables once the i915
1033e28d371SMatthew Brost  * is integrated with the DRM scheduler.
1043e28d371SMatthew Brost  */
1053e28d371SMatthew Brost struct i915_sched_engine {
1063e28d371SMatthew Brost 	/**
1073e28d371SMatthew Brost 	 * @ref: reference count of schedule engine object
1083e28d371SMatthew Brost 	 */
1093e28d371SMatthew Brost 	struct kref ref;
1103e28d371SMatthew Brost 
1113e28d371SMatthew Brost 	/**
112349a2bc5SMatthew Brost 	 * @lock: protects requests in priority lists, requests, hold and
113349a2bc5SMatthew Brost 	 * tasklet while running
114349a2bc5SMatthew Brost 	 */
115349a2bc5SMatthew Brost 	spinlock_t lock;
116349a2bc5SMatthew Brost 
117349a2bc5SMatthew Brost 	/**
118349a2bc5SMatthew Brost 	 * @requests: list of requests inflight on this schedule engine
119349a2bc5SMatthew Brost 	 */
120349a2bc5SMatthew Brost 	struct list_head requests;
121349a2bc5SMatthew Brost 
122349a2bc5SMatthew Brost 	/**
123349a2bc5SMatthew Brost 	 * @hold: list of ready requests, but on hold
124349a2bc5SMatthew Brost 	 */
125349a2bc5SMatthew Brost 	struct list_head hold;
126349a2bc5SMatthew Brost 
127349a2bc5SMatthew Brost 	/**
12822916badSMatthew Brost 	 * @tasklet: softirq tasklet for submission
12922916badSMatthew Brost 	 */
13022916badSMatthew Brost 	struct tasklet_struct tasklet;
13122916badSMatthew Brost 
13222916badSMatthew Brost 	/**
1333e28d371SMatthew Brost 	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
1343e28d371SMatthew Brost 	 */
1353e28d371SMatthew Brost 	struct i915_priolist default_priolist;
1363e28d371SMatthew Brost 
1373e28d371SMatthew Brost 	/**
1383e28d371SMatthew Brost 	 * @queue_priority_hint: Highest pending priority.
1393e28d371SMatthew Brost 	 *
1403e28d371SMatthew Brost 	 * When we add requests into the queue, or adjust the priority of
1413e28d371SMatthew Brost 	 * executing requests, we compute the maximum priority of those
1423e28d371SMatthew Brost 	 * pending requests. We can then use this value to determine if
1433e28d371SMatthew Brost 	 * we need to preempt the executing requests to service the queue.
1443e28d371SMatthew Brost 	 * However, since the we may have recorded the priority of an inflight
1453e28d371SMatthew Brost 	 * request we wanted to preempt but since completed, at the time of
1463e28d371SMatthew Brost 	 * dequeuing the priority hint may no longer may match the highest
1473e28d371SMatthew Brost 	 * available request priority.
1483e28d371SMatthew Brost 	 */
1493e28d371SMatthew Brost 	int queue_priority_hint;
1503e28d371SMatthew Brost 
1513e28d371SMatthew Brost 	/**
1523e28d371SMatthew Brost 	 * @queue: queue of requests, in priority lists
1533e28d371SMatthew Brost 	 */
1543e28d371SMatthew Brost 	struct rb_root_cached queue;
1553e28d371SMatthew Brost 
1563e28d371SMatthew Brost 	/**
1573e28d371SMatthew Brost 	 * @no_priolist: priority lists disabled
1583e28d371SMatthew Brost 	 */
1593e28d371SMatthew Brost 	bool no_priolist;
1603f623e06SMatthew Brost 
1613f623e06SMatthew Brost 	/**
16222916badSMatthew Brost 	 * @private_data: private data of the submission backend
16322916badSMatthew Brost 	 */
16422916badSMatthew Brost 	void *private_data;
16522916badSMatthew Brost 
16622916badSMatthew Brost 	/**
16727466222SMatthew Brost 	 * @destroy: destroy schedule engine / cleanup in backend
16827466222SMatthew Brost 	 */
16927466222SMatthew Brost 	void	(*destroy)(struct kref *kref);
17027466222SMatthew Brost 
17127466222SMatthew Brost 	/**
172c41ee287SMatthew Brost 	 * @disabled: check if backend has disabled submission
173c41ee287SMatthew Brost 	 */
174c41ee287SMatthew Brost 	bool	(*disabled)(struct i915_sched_engine *sched_engine);
175c41ee287SMatthew Brost 
176c41ee287SMatthew Brost 	/**
17771ed6011SMatthew Brost 	 * @kick_backend: kick backend after a request's priority has changed
17871ed6011SMatthew Brost 	 */
17971ed6011SMatthew Brost 	void	(*kick_backend)(const struct i915_request *rq,
18071ed6011SMatthew Brost 				int prio);
18171ed6011SMatthew Brost 
18271ed6011SMatthew Brost 	/**
183*ee242ca7SMatthew Brost 	 * @bump_inflight_request_prio: update priority of an inflight request
184*ee242ca7SMatthew Brost 	 */
185*ee242ca7SMatthew Brost 	void	(*bump_inflight_request_prio)(struct i915_request *rq,
186*ee242ca7SMatthew Brost 					      int prio);
187*ee242ca7SMatthew Brost 
188*ee242ca7SMatthew Brost 	/**
189*ee242ca7SMatthew Brost 	 * @retire_inflight_request_prio: indicate request is retired to
190*ee242ca7SMatthew Brost 	 * priority tracking
191*ee242ca7SMatthew Brost 	 */
192*ee242ca7SMatthew Brost 	void	(*retire_inflight_request_prio)(struct i915_request *rq);
193*ee242ca7SMatthew Brost 
194*ee242ca7SMatthew Brost 	/**
1953f623e06SMatthew Brost 	 * @schedule: adjust priority of request
1963f623e06SMatthew Brost 	 *
1973f623e06SMatthew Brost 	 * Call when the priority on a request has changed and it and its
1983f623e06SMatthew Brost 	 * dependencies may need rescheduling. Note the request itself may
1993f623e06SMatthew Brost 	 * not be ready to run!
2003f623e06SMatthew Brost 	 */
2013f623e06SMatthew Brost 	void	(*schedule)(struct i915_request *request,
2023f623e06SMatthew Brost 			    const struct i915_sched_attr *attr);
2033e28d371SMatthew Brost };
2043e28d371SMatthew Brost 
2053a891a62SChris Wilson #endif /* _I915_SCHEDULER_TYPES_H_ */
206