1b3786b29SChris Wilson /* SPDX-License-Identifier: MIT */
2b3786b29SChris Wilson /*
3b3786b29SChris Wilson  * Copyright © 2019 Intel Corporation
4b3786b29SChris Wilson  */
5b3786b29SChris Wilson 
6b3786b29SChris Wilson #ifndef __INTEL_BREADCRUMBS_TYPES__
7b3786b29SChris Wilson #define __INTEL_BREADCRUMBS_TYPES__
8b3786b29SChris Wilson 
9b3786b29SChris Wilson #include <linux/irq_work.h>
10*a95d1160SMatthew Brost #include <linux/kref.h>
11b3786b29SChris Wilson #include <linux/list.h>
12b3786b29SChris Wilson #include <linux/spinlock.h>
13b3786b29SChris Wilson #include <linux/types.h>
14b3786b29SChris Wilson 
15*a95d1160SMatthew Brost #include "intel_engine_types.h"
16*a95d1160SMatthew Brost 
17b3786b29SChris Wilson /*
18b3786b29SChris Wilson  * Rather than have every client wait upon all user interrupts,
19b3786b29SChris Wilson  * with the herd waking after every interrupt and each doing the
20b3786b29SChris Wilson  * heavyweight seqno dance, we delegate the task (of being the
21b3786b29SChris Wilson  * bottom-half of the user interrupt) to the first client. After
22b3786b29SChris Wilson  * every interrupt, we wake up one client, who does the heavyweight
23b3786b29SChris Wilson  * coherent seqno read and either goes back to sleep (if incomplete),
24b3786b29SChris Wilson  * or wakes up all the completed clients in parallel, before then
25b3786b29SChris Wilson  * transferring the bottom-half status to the next client in the queue.
26b3786b29SChris Wilson  *
27b3786b29SChris Wilson  * Compared to walking the entire list of waiters in a single dedicated
28b3786b29SChris Wilson  * bottom-half, we reduce the latency of the first waiter by avoiding
29b3786b29SChris Wilson  * a context switch, but incur additional coherent seqno reads when
30b3786b29SChris Wilson  * following the chain of request breadcrumbs. Since it is most likely
31b3786b29SChris Wilson  * that we have a single client waiting on each seqno, then reducing
32b3786b29SChris Wilson  * the overhead of waking that client is much preferred.
33b3786b29SChris Wilson  */
34b3786b29SChris Wilson struct intel_breadcrumbs {
35*a95d1160SMatthew Brost 	struct kref ref;
36e3ed90b8SChris Wilson 	atomic_t active;
37b3786b29SChris Wilson 
38c744d503SChris Wilson 	spinlock_t signalers_lock; /* protects the list of signalers */
39b3786b29SChris Wilson 	struct list_head signalers;
406cfe66ebSChris Wilson 	struct llist_head signaled_requests;
4166e40750SChris Wilson 	atomic_t signaler_active;
42b3786b29SChris Wilson 
43c744d503SChris Wilson 	spinlock_t irq_lock; /* protects the interrupt from hardirq context */
44b3786b29SChris Wilson 	struct irq_work irq_work; /* for use from inside irq_lock */
45b3786b29SChris Wilson 	unsigned int irq_enabled;
46b3786b29SChris Wilson 	bool irq_armed;
47e3ed90b8SChris Wilson 
48e3ed90b8SChris Wilson 	/* Not all breadcrumbs are attached to physical HW */
49*a95d1160SMatthew Brost 	intel_engine_mask_t	engine_mask;
50e3ed90b8SChris Wilson 	struct intel_engine_cs *irq_engine;
51*a95d1160SMatthew Brost 	bool	(*irq_enable)(struct intel_breadcrumbs *b);
52*a95d1160SMatthew Brost 	void	(*irq_disable)(struct intel_breadcrumbs *b);
53b3786b29SChris Wilson };
54b3786b29SChris Wilson 
55b3786b29SChris Wilson #endif /* __INTEL_BREADCRUMBS_TYPES__ */
56