1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright 2016,2017 IBM Corporation. 4 */ 5 #ifndef __XIVE_INTERNAL_H 6 #define __XIVE_INTERNAL_H 7 8 /* Each CPU carry one of these with various per-CPU state */ 9 struct xive_cpu { 10 #ifdef CONFIG_SMP 11 /* HW irq number and data of IPI */ 12 u32 hw_ipi; 13 struct xive_irq_data ipi_data; 14 #endif /* CONFIG_SMP */ 15 16 int chip_id; 17 18 /* Queue datas. Only one is populated */ 19 #define XIVE_MAX_QUEUES 8 20 struct xive_q queue[XIVE_MAX_QUEUES]; 21 22 /* 23 * Pending mask. Each bit corresponds to a priority that 24 * potentially has pending interrupts. 25 */ 26 u8 pending_prio; 27 28 /* Cache of HW CPPR */ 29 u8 cppr; 30 }; 31 32 /* Backend ops */ 33 struct xive_ops { 34 int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data); 35 int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq); 36 int (*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio, 37 u32 *sw_irq); 38 int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio); 39 void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio); 40 void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc); 41 void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc); 42 bool (*match)(struct device_node *np); 43 void (*shutdown)(void); 44 45 void (*update_pending)(struct xive_cpu *xc); 46 void (*eoi)(u32 hw_irq); 47 void (*sync_source)(u32 hw_irq); 48 u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write); 49 #ifdef CONFIG_SMP 50 int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc); 51 void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc); 52 #endif 53 const char *name; 54 }; 55 56 bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, 57 u8 max_prio); 58 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift); 59 60 static inline u32 xive_alloc_order(u32 queue_shift) 61 { 62 return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0; 63 } 64 65 extern bool xive_cmdline_disabled; 66 67 #endif /* __XIVE_INTERNAL_H */ 68