1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright 2016,2017 IBM Corporation.
4  */
5 #ifndef __XIVE_INTERNAL_H
6 #define __XIVE_INTERNAL_H
7 
8 #define XIVE_IPI_HW_IRQ		0 /* interrupt source # for IPIs */
9 
10 /*
11  * A "disabled" interrupt should never fire, to catch problems
12  * we set its logical number to this
13  */
14 #define XIVE_BAD_IRQ		0x7fffffff
15 #define XIVE_MAX_IRQ		(XIVE_BAD_IRQ - 1)
16 
17 /* Each CPU carry one of these with various per-CPU state */
18 struct xive_cpu {
19 #ifdef CONFIG_SMP
20 	/* HW irq number and data of IPI */
21 	u32 hw_ipi;
22 	struct xive_irq_data ipi_data;
23 #endif /* CONFIG_SMP */
24 
25 	int chip_id;
26 
27 	/* Queue datas. Only one is populated */
28 #define XIVE_MAX_QUEUES	8
29 	struct xive_q queue[XIVE_MAX_QUEUES];
30 
31 	/*
32 	 * Pending mask. Each bit corresponds to a priority that
33 	 * potentially has pending interrupts.
34 	 */
35 	u8 pending_prio;
36 
37 	/* Cache of HW CPPR */
38 	u8 cppr;
39 };
40 
41 /* Backend ops */
42 struct xive_ops {
43 	int	(*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
44 	int 	(*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
45 	int	(*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
46 				  u32 *sw_irq);
47 	int	(*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
48 	void	(*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
49 	void	(*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
50 	void	(*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
51 	bool	(*match)(struct device_node *np);
52 	void	(*shutdown)(void);
53 
54 	void	(*update_pending)(struct xive_cpu *xc);
55 	void	(*sync_source)(u32 hw_irq);
56 	u64	(*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
57 #ifdef CONFIG_SMP
58 	int	(*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
59 	void	(*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
60 #endif
61 	int	(*debug_show)(struct seq_file *m, void *private);
62 	const char *name;
63 };
64 
65 bool xive_core_init(struct device_node *np, const struct xive_ops *ops,
66 		    void __iomem *area, u32 offset, u8 max_prio);
67 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
68 int xive_core_debug_init(void);
69 
70 static inline u32 xive_alloc_order(u32 queue_shift)
71 {
72 	return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
73 }
74 
75 extern bool xive_cmdline_disabled;
76 
77 #endif /*  __XIVE_INTERNAL_H */
78