1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright 2016,2017 IBM Corporation.
4  */
5 #ifndef __XIVE_INTERNAL_H
6 #define __XIVE_INTERNAL_H
7 
8 /* Each CPU carry one of these with various per-CPU state */
9 struct xive_cpu {
10 #ifdef CONFIG_SMP
11 	/* HW irq number and data of IPI */
12 	u32 hw_ipi;
13 	struct xive_irq_data ipi_data;
14 #endif /* CONFIG_SMP */
15 
16 	int chip_id;
17 
18 	/* Queue datas. Only one is populated */
19 #define XIVE_MAX_QUEUES	8
20 	struct xive_q queue[XIVE_MAX_QUEUES];
21 
22 	/*
23 	 * Pending mask. Each bit corresponds to a priority that
24 	 * potentially has pending interrupts.
25 	 */
26 	u8 pending_prio;
27 
28 	/* Cache of HW CPPR */
29 	u8 cppr;
30 };
31 
32 /* Backend ops */
33 struct xive_ops {
34 	int	(*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
35 	int 	(*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
36 	int	(*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
37 	void	(*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
38 	void	(*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
39 	void	(*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
40 	bool	(*match)(struct device_node *np);
41 	void	(*shutdown)(void);
42 
43 	void	(*update_pending)(struct xive_cpu *xc);
44 	void	(*eoi)(u32 hw_irq);
45 	void	(*sync_source)(u32 hw_irq);
46 	u64	(*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
47 #ifdef CONFIG_SMP
48 	int	(*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
49 	void	(*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
50 #endif
51 	const char *name;
52 };
53 
54 bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
55 		    u8 max_prio);
56 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
57 
58 static inline u32 xive_alloc_order(u32 queue_shift)
59 {
60 	return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
61 }
62 
63 extern bool xive_cmdline_disabled;
64 
65 #endif /*  __XIVE_INTERNAL_H */
66