1 /* 2 * QEMU PowerPC XIVE2 internal structure definitions (POWER10) 3 * 4 * Copyright (c) 2019-2022, IBM Corporation. 5 * 6 * This code is licensed under the GPL version 2 or later. See the 7 * COPYING file in the top-level directory. 8 */ 9 10 #ifndef PPC_XIVE2_REGS_H 11 #define PPC_XIVE2_REGS_H 12 13 /* 14 * Thread Interrupt Management Area (TIMA) 15 * 16 * In Gen1 mode (P9 compat mode) word 2 is the same. However in Gen2 17 * mode (P10), the CAM line is slightly different as the VP space was 18 * increased. 19 */ 20 #define TM2_QW0W2_VU PPC_BIT32(0) 21 #define TM2_QW0W2_LOGIC_SERV PPC_BITMASK32(4, 31) 22 #define TM2_QW1W2_VO PPC_BIT32(0) 23 #define TM2_QW1W2_HO PPC_BIT32(1) 24 #define TM2_QW1W2_OS_CAM PPC_BITMASK32(4, 31) 25 #define TM2_QW2W2_VP PPC_BIT32(0) 26 #define TM2_QW2W2_HP PPC_BIT32(1) 27 #define TM2_QW2W2_POOL_CAM PPC_BITMASK32(4, 31) 28 #define TM2_QW3W2_VT PPC_BIT32(0) 29 #define TM2_QW3W2_HT PPC_BIT32(1) 30 #define TM2_QW3W2_LP PPC_BIT32(6) 31 #define TM2_QW3W2_LE PPC_BIT32(7) 32 33 /* 34 * Event Assignment Structure (EAS) 35 */ 36 37 typedef struct Xive2Eas { 38 uint64_t w; 39 #define EAS2_VALID PPC_BIT(0) 40 #define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ 41 #define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ 42 #define EAS2_MASKED PPC_BIT(32) /* Masked */ 43 #define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ 44 } Xive2Eas; 45 46 #define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID) 47 #define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED) 48 49 void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon); 50 51 /* 52 * Event Notifification Descriptor (END) 53 */ 54 55 typedef struct Xive2End { 56 uint32_t w0; 57 #define END2_W0_VALID PPC_BIT32(0) /* "v" bit */ 58 #define END2_W0_ENQUEUE PPC_BIT32(5) /* "q" bit */ 59 #define END2_W0_UCOND_NOTIFY PPC_BIT32(6) /* "n" bit */ 60 #define END2_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit */ 61 #define END2_W0_BACKLOG PPC_BIT32(8) /* "b" bit */ 62 #define END2_W0_PRECL_ESC_CTL PPC_BIT32(9) /* "p" bit */ 63 #define END2_W0_UNCOND_ESCALATE PPC_BIT32(10) /* "u" bit */ 64 #define END2_W0_ESCALATE_CTL PPC_BIT32(11) /* "e" bit */ 65 #define END2_W0_ADAPTIVE_ESC PPC_BIT32(12) /* "a" bit */ 66 #define END2_W0_ESCALATE_END PPC_BIT32(13) /* "N" bit */ 67 #define END2_W0_FIRMWARE1 PPC_BIT32(16) /* Owned by FW */ 68 #define END2_W0_FIRMWARE2 PPC_BIT32(17) /* Owned by FW */ 69 #define END2_W0_AEC_SIZE PPC_BITMASK32(18, 19) 70 #define END2_W0_AEG_SIZE PPC_BITMASK32(20, 23) 71 #define END2_W0_EQ_VG_PREDICT PPC_BITMASK32(24, 31) /* Owned by HW */ 72 uint32_t w1; 73 #define END2_W1_ESn PPC_BITMASK32(0, 1) 74 #define END2_W1_ESn_P PPC_BIT32(0) 75 #define END2_W1_ESn_Q PPC_BIT32(1) 76 #define END2_W1_ESe PPC_BITMASK32(2, 3) 77 #define END2_W1_ESe_P PPC_BIT32(2) 78 #define END2_W1_ESe_Q PPC_BIT32(3) 79 #define END2_W1_GEN_FLIPPED PPC_BIT32(8) 80 #define END2_W1_GENERATION PPC_BIT32(9) 81 #define END2_W1_PAGE_OFF PPC_BITMASK32(10, 31) 82 uint32_t w2; 83 #define END2_W2_RESERVED PPC_BITMASK32(4, 7) 84 #define END2_W2_EQ_ADDR_HI PPC_BITMASK32(8, 31) 85 uint32_t w3; 86 #define END2_W3_EQ_ADDR_LO PPC_BITMASK32(0, 24) 87 #define END2_W3_QSIZE PPC_BITMASK32(28, 31) 88 uint32_t w4; 89 #define END2_W4_END_BLOCK PPC_BITMASK32(4, 7) 90 #define END2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) 91 #define END2_W4_ESB_BLOCK PPC_BITMASK32(0, 3) 92 #define END2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31) 93 uint32_t w5; 94 #define END2_W5_ESC_END_DATA PPC_BITMASK32(1, 31) 95 uint32_t w6; 96 #define END2_W6_FORMAT_BIT PPC_BIT32(0) 97 #define END2_W6_IGNORE PPC_BIT32(1) 98 #define END2_W6_VP_BLOCK PPC_BITMASK32(4, 7) 99 #define END2_W6_VP_OFFSET PPC_BITMASK32(8, 31) 100 #define END2_W6_VP_OFFSET_GEN1 PPC_BITMASK32(13, 31) 101 uint32_t w7; 102 #define END2_W7_TOPO PPC_BITMASK32(0, 3) /* Owned by HW */ 103 #define END2_W7_F0_PRIORITY PPC_BITMASK32(8, 15) 104 #define END2_W7_F1_LOG_SERVER_ID PPC_BITMASK32(4, 31) 105 } Xive2End; 106 107 #define xive2_end_is_valid(end) (be32_to_cpu((end)->w0) & END2_W0_VALID) 108 #define xive2_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END2_W0_ENQUEUE) 109 #define xive2_end_is_notify(end) \ 110 (be32_to_cpu((end)->w0) & END2_W0_UCOND_NOTIFY) 111 #define xive2_end_is_backlog(end) (be32_to_cpu((end)->w0) & END2_W0_BACKLOG) 112 #define xive2_end_is_escalate(end) \ 113 (be32_to_cpu((end)->w0) & END2_W0_ESCALATE_CTL) 114 #define xive2_end_is_uncond_escalation(end) \ 115 (be32_to_cpu((end)->w0) & END2_W0_UNCOND_ESCALATE) 116 #define xive2_end_is_silent_escalation(end) \ 117 (be32_to_cpu((end)->w0) & END2_W0_SILENT_ESCALATE) 118 #define xive2_end_is_escalate_end(end) \ 119 (be32_to_cpu((end)->w0) & END2_W0_ESCALATE_END) 120 #define xive2_end_is_firmware1(end) \ 121 (be32_to_cpu((end)->w0) & END2_W0_FIRMWARE1) 122 #define xive2_end_is_firmware2(end) \ 123 (be32_to_cpu((end)->w0) & END2_W0_FIRMWARE2) 124 125 static inline uint64_t xive2_end_qaddr(Xive2End *end) 126 { 127 return ((uint64_t) be32_to_cpu(end->w2) & END2_W2_EQ_ADDR_HI) << 32 | 128 (be32_to_cpu(end->w3) & END2_W3_EQ_ADDR_LO); 129 } 130 131 void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon); 132 void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, 133 Monitor *mon); 134 void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx, 135 Monitor *mon); 136 137 /* 138 * Notification Virtual Processor (NVP) 139 */ 140 typedef struct Xive2Nvp { 141 uint32_t w0; 142 #define NVP2_W0_VALID PPC_BIT32(0) 143 #define NVP2_W0_HW PPC_BIT32(7) 144 #define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */ 145 uint32_t w1; 146 #define NVP2_W1_CO PPC_BIT32(13) 147 #define NVP2_W1_CO_PRIV PPC_BITMASK32(14, 15) 148 #define NVP2_W1_CO_THRID_VALID PPC_BIT32(16) 149 #define NVP2_W1_CO_THRID PPC_BITMASK32(17, 31) 150 uint32_t w2; 151 #define NVP2_W2_CPPR PPC_BITMASK32(0, 7) 152 #define NVP2_W2_IPB PPC_BITMASK32(8, 15) 153 #define NVP2_W2_LSMFB PPC_BITMASK32(16, 23) 154 uint32_t w3; 155 uint32_t w4; 156 #define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */ 157 #define NVP2_W4_ESC_ESB_INDEX PPC_BITMASK32(4, 31) /* N:0 */ 158 #define NVP2_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7) /* N:1 */ 159 #define NVP2_W4_ESC_END_INDEX PPC_BITMASK32(8, 31) /* N:1 */ 160 uint32_t w5; 161 #define NVP2_W5_PSIZE PPC_BITMASK32(0, 1) 162 #define NVP2_W5_VP_END_BLOCK PPC_BITMASK32(4, 7) 163 #define NVP2_W5_VP_END_INDEX PPC_BITMASK32(8, 31) 164 uint32_t w6; 165 uint32_t w7; 166 } Xive2Nvp; 167 168 #define xive2_nvp_is_valid(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_VALID) 169 #define xive2_nvp_is_hw(nvp) (be32_to_cpu((nvp)->w0) & NVP2_W0_HW) 170 #define xive2_nvp_is_co(nvp) (be32_to_cpu((nvp)->w1) & NVP2_W1_CO) 171 172 /* 173 * The VP number space in a block is defined by the END2_W6_VP_OFFSET 174 * field of the XIVE END. When running in Gen1 mode (P9 compat mode), 175 * the VP space is reduced to (1 << 19) VPs per block 176 */ 177 #define XIVE2_NVP_SHIFT 24 178 #define XIVE2_NVP_COUNT (1 << XIVE2_NVP_SHIFT) 179 180 static inline uint32_t xive2_nvp_cam_line(uint8_t nvp_blk, uint32_t nvp_idx) 181 { 182 return (nvp_blk << XIVE2_NVP_SHIFT) | nvp_idx; 183 } 184 185 static inline uint32_t xive2_nvp_idx(uint32_t cam_line) 186 { 187 return cam_line & ((1 << XIVE2_NVP_SHIFT) - 1); 188 } 189 190 static inline uint32_t xive2_nvp_blk(uint32_t cam_line) 191 { 192 return (cam_line >> XIVE2_NVP_SHIFT) & 0xf; 193 } 194 195 /* 196 * Notification Virtual Group or Crowd (NVG/NVC) 197 */ 198 typedef struct Xive2Nvgc { 199 uint32_t w0; 200 #define NVGC2_W0_VALID PPC_BIT32(0) 201 uint32_t w1; 202 uint32_t w2; 203 uint32_t w3; 204 uint32_t w4; 205 uint32_t w5; 206 uint32_t w6; 207 uint32_t w7; 208 } Xive2Nvgc; 209 210 #endif /* PPC_XIVE2_REGS_H */ 211