1 /* 2 * QEMU PowerPC XIVE internal structure definitions 3 * 4 * 5 * The XIVE structures are accessed by the HW and their format is 6 * architected to be big-endian. Some macros are provided to ease 7 * access to the different fields. 8 * 9 * 10 * Copyright (c) 2016-2018, IBM Corporation. 11 * 12 * This code is licensed under the GPL version 2 or later. See the 13 * COPYING file in the top-level directory. 14 */ 15 16 #ifndef PPC_XIVE_REGS_H 17 #define PPC_XIVE_REGS_H 18 19 #include "qemu/bswap.h" 20 #include "qemu/host-utils.h" 21 22 /* 23 * Interrupt source number encoding on PowerBUS 24 */ 25 #define XIVE_SRCNO_BLOCK(srcno) (((srcno) >> 28) & 0xf) 26 #define XIVE_SRCNO_INDEX(srcno) ((srcno) & 0x0fffffff) 27 #define XIVE_SRCNO(blk, idx) ((uint32_t)(blk) << 28 | (idx)) 28 29 #define TM_SHIFT 16 30 31 /* TM register offsets */ 32 #define TM_QW0_USER 0x000 /* All rings */ 33 #define TM_QW1_OS 0x010 /* Ring 0..2 */ 34 #define TM_QW2_HV_POOL 0x020 /* Ring 0..1 */ 35 #define TM_QW3_HV_PHYS 0x030 /* Ring 0..1 */ 36 37 /* Byte offsets inside a QW QW0 QW1 QW2 QW3 */ 38 #define TM_NSR 0x0 /* + + - + */ 39 #define TM_CPPR 0x1 /* - + - + */ 40 #define TM_IPB 0x2 /* - + + + */ 41 #define TM_LSMFB 0x3 /* - + + + */ 42 #define TM_ACK_CNT 0x4 /* - + - - */ 43 #define TM_INC 0x5 /* - + - + */ 44 #define TM_AGE 0x6 /* - + - + */ 45 #define TM_PIPR 0x7 /* - + - + */ 46 47 #define TM_WORD0 0x0 48 #define TM_WORD1 0x4 49 50 /* 51 * QW word 2 contains the valid bit at the top and other fields 52 * depending on the QW. 53 */ 54 #define TM_WORD2 0x8 55 #define TM_QW0W2_VU PPC_BIT32(0) 56 #define TM_QW0W2_LOGIC_SERV PPC_BITMASK32(1, 31) /* XX 2,31 ? */ 57 #define TM_QW1W2_VO PPC_BIT32(0) 58 #define TM_QW1W2_OS_CAM PPC_BITMASK32(8, 31) 59 #define TM_QW2W2_VP PPC_BIT32(0) 60 #define TM_QW2W2_POOL_CAM PPC_BITMASK32(8, 31) 61 #define TM_QW3W2_VT PPC_BIT32(0) 62 #define TM_QW3W2_LP PPC_BIT32(6) 63 #define TM_QW3W2_LE PPC_BIT32(7) 64 #define TM_QW3W2_T PPC_BIT32(31) 65 66 /* 67 * In addition to normal loads to "peek" and writes (only when invalid) 68 * using 4 and 8 bytes accesses, the above registers support these 69 * "special" byte operations: 70 * 71 * - Byte load from QW0[NSR] - User level NSR (EBB) 72 * - Byte store to QW0[NSR] - User level NSR (EBB) 73 * - Byte load/store to QW1[CPPR] and QW3[CPPR] - CPPR access 74 * - Byte load from QW3[TM_WORD2] - Read VT||00000||LP||LE on thrd 0 75 * otherwise VT||0000000 76 * - Byte store to QW3[TM_WORD2] - Set VT bit (and LP/LE if present) 77 * 78 * Then we have all these "special" CI ops at these offset that trigger 79 * all sorts of side effects: 80 */ 81 #define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/ 82 #define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */ 83 #define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */ 84 #define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user 85 * context */ 86 #define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */ 87 #define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS 88 * context to reg */ 89 #define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool 90 * context to reg*/ 91 #define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */ 92 #define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd 93 * line */ 94 #define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */ 95 #define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even 96 * line */ 97 #define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */ 98 /* XXX more... */ 99 100 /* NSR fields for the various QW ack types */ 101 #define TM_QW0_NSR_EB PPC_BIT8(0) 102 #define TM_QW1_NSR_EO PPC_BIT8(0) 103 #define TM_QW3_NSR_HE PPC_BITMASK8(0, 1) 104 #define TM_QW3_NSR_HE_NONE 0 105 #define TM_QW3_NSR_HE_POOL 1 106 #define TM_QW3_NSR_HE_PHYS 2 107 #define TM_QW3_NSR_HE_LSI 3 108 #define TM_QW3_NSR_I PPC_BIT8(2) 109 #define TM_QW3_NSR_GRP_LVL PPC_BIT8(3, 7) 110 111 /* 112 * EAS (Event Assignment Structure) 113 * 114 * One per interrupt source. Targets an interrupt to a given Event 115 * Notification Descriptor (END) and provides the corresponding 116 * logical interrupt number (END data) 117 */ 118 typedef struct XiveEAS { 119 /* 120 * Use a single 64-bit definition to make it easier to perform 121 * atomic updates 122 */ 123 uint64_t w; 124 #define EAS_VALID PPC_BIT(0) 125 #define EAS_END_BLOCK PPC_BITMASK(4, 7) /* Destination END block# */ 126 #define EAS_END_INDEX PPC_BITMASK(8, 31) /* Destination END index */ 127 #define EAS_MASKED PPC_BIT(32) /* Masked */ 128 #define EAS_END_DATA PPC_BITMASK(33, 63) /* Data written to the END */ 129 } XiveEAS; 130 131 #define xive_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS_VALID) 132 #define xive_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS_MASKED) 133 134 void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon); 135 136 static inline uint64_t xive_get_field64(uint64_t mask, uint64_t word) 137 { 138 return (be64_to_cpu(word) & mask) >> ctz64(mask); 139 } 140 141 static inline uint64_t xive_set_field64(uint64_t mask, uint64_t word, 142 uint64_t value) 143 { 144 uint64_t tmp = 145 (be64_to_cpu(word) & ~mask) | ((value << ctz64(mask)) & mask); 146 return cpu_to_be64(tmp); 147 } 148 149 static inline uint32_t xive_get_field32(uint32_t mask, uint32_t word) 150 { 151 return (be32_to_cpu(word) & mask) >> ctz32(mask); 152 } 153 154 static inline uint32_t xive_set_field32(uint32_t mask, uint32_t word, 155 uint32_t value) 156 { 157 uint32_t tmp = 158 (be32_to_cpu(word) & ~mask) | ((value << ctz32(mask)) & mask); 159 return cpu_to_be32(tmp); 160 } 161 162 /* Event Notification Descriptor (END) */ 163 typedef struct XiveEND { 164 uint32_t w0; 165 #define END_W0_VALID PPC_BIT32(0) /* "v" bit */ 166 #define END_W0_ENQUEUE PPC_BIT32(1) /* "q" bit */ 167 #define END_W0_UCOND_NOTIFY PPC_BIT32(2) /* "n" bit */ 168 #define END_W0_BACKLOG PPC_BIT32(3) /* "b" bit */ 169 #define END_W0_PRECL_ESC_CTL PPC_BIT32(4) /* "p" bit */ 170 #define END_W0_ESCALATE_CTL PPC_BIT32(5) /* "e" bit */ 171 #define END_W0_UNCOND_ESCALATE PPC_BIT32(6) /* "u" bit - DD2.0 */ 172 #define END_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit - DD2.0 */ 173 #define END_W0_QSIZE PPC_BITMASK32(12, 15) 174 #define END_W0_SW0 PPC_BIT32(16) 175 #define END_W0_FIRMWARE END_W0_SW0 /* Owned by FW */ 176 #define END_QSIZE_4K 0 177 #define END_QSIZE_64K 4 178 #define END_W0_HWDEP PPC_BITMASK32(24, 31) 179 uint32_t w1; 180 #define END_W1_ESn PPC_BITMASK32(0, 1) 181 #define END_W1_ESn_P PPC_BIT32(0) 182 #define END_W1_ESn_Q PPC_BIT32(1) 183 #define END_W1_ESe PPC_BITMASK32(2, 3) 184 #define END_W1_ESe_P PPC_BIT32(2) 185 #define END_W1_ESe_Q PPC_BIT32(3) 186 #define END_W1_GENERATION PPC_BIT32(9) 187 #define END_W1_PAGE_OFF PPC_BITMASK32(10, 31) 188 uint32_t w2; 189 #define END_W2_MIGRATION_REG PPC_BITMASK32(0, 3) 190 #define END_W2_OP_DESC_HI PPC_BITMASK32(4, 31) 191 uint32_t w3; 192 #define END_W3_OP_DESC_LO PPC_BITMASK32(0, 31) 193 uint32_t w4; 194 #define END_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7) 195 #define END_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) 196 uint32_t w5; 197 #define END_W5_ESC_END_DATA PPC_BITMASK32(1, 31) 198 uint32_t w6; 199 #define END_W6_FORMAT_BIT PPC_BIT32(8) 200 #define END_W6_NVT_BLOCK PPC_BITMASK32(9, 12) 201 #define END_W6_NVT_INDEX PPC_BITMASK32(13, 31) 202 uint32_t w7; 203 #define END_W7_F0_IGNORE PPC_BIT32(0) 204 #define END_W7_F0_BLK_GROUPING PPC_BIT32(1) 205 #define END_W7_F0_PRIORITY PPC_BITMASK32(8, 15) 206 #define END_W7_F1_WAKEZ PPC_BIT32(0) 207 #define END_W7_F1_LOG_SERVER_ID PPC_BITMASK32(1, 31) 208 } XiveEND; 209 210 #define xive_end_is_valid(end) (be32_to_cpu((end)->w0) & END_W0_VALID) 211 #define xive_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END_W0_ENQUEUE) 212 #define xive_end_is_notify(end) (be32_to_cpu((end)->w0) & END_W0_UCOND_NOTIFY) 213 #define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG) 214 #define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL) 215 #define xive_end_is_uncond_escalation(end) \ 216 (be32_to_cpu((end)->w0) & END_W0_UNCOND_ESCALATE) 217 #define xive_end_is_silent_escalation(end) \ 218 (be32_to_cpu((end)->w0) & END_W0_SILENT_ESCALATE) 219 220 static inline uint64_t xive_end_qaddr(XiveEND *end) 221 { 222 return ((uint64_t) be32_to_cpu(end->w2) & 0x0fffffff) << 32 | 223 be32_to_cpu(end->w3); 224 } 225 226 void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon); 227 void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon); 228 void xive_end_eas_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon); 229 230 /* Notification Virtual Target (NVT) */ 231 typedef struct XiveNVT { 232 uint32_t w0; 233 #define NVT_W0_VALID PPC_BIT32(0) 234 uint32_t w1; 235 uint32_t w2; 236 uint32_t w3; 237 uint32_t w4; 238 uint32_t w5; 239 uint32_t w6; 240 uint32_t w7; 241 uint32_t w8; 242 #define NVT_W8_GRP_VALID PPC_BIT32(0) 243 uint32_t w9; 244 uint32_t wa; 245 uint32_t wb; 246 uint32_t wc; 247 uint32_t wd; 248 uint32_t we; 249 uint32_t wf; 250 } XiveNVT; 251 252 #define xive_nvt_is_valid(nvt) (be32_to_cpu((nvt)->w0) & NVT_W0_VALID) 253 254 #endif /* PPC_XIVE_REGS_H */ 255