1/* 2 * PMU register read/write functions for TCG IBM POWER chips 3 * 4 * Copyright IBM Corp. 2021 5 * 6 * Authors: 7 * Daniel Henrique Barboza <danielhb413@gmail.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or later. 10 * See the COPYING file in the top-level directory. 11 */ 12 13#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) 14 15/* 16 * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the 17 * PMCs) has problem state read access. 18 * 19 * Read acccess is granted for all PMCC values but 0b01, where a 20 * Facility Unavailable Interrupt will occur. 21 */ 22static bool spr_groupA_read_allowed(DisasContext *ctx) 23{ 24 if (!ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { 25 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); 26 return false; 27 } 28 29 return true; 30} 31 32/* 33 * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the 34 * PMCs) has problem state write access. 35 * 36 * Write acccess is granted for PMCC values 0b10 and 0b11. Userspace 37 * writing with PMCC 0b00 will generate a Hypervisor Emulation 38 * Assistance Interrupt. Userspace writing with PMCC 0b01 will 39 * generate a Facility Unavailable Interrupt. 40 */ 41static bool spr_groupA_write_allowed(DisasContext *ctx) 42{ 43 if (ctx->mmcr0_pmcc0) { 44 return true; 45 } 46 47 if (ctx->mmcr0_pmcc1) { 48 /* PMCC = 0b01 */ 49 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); 50 } else { 51 /* PMCC = 0b00 */ 52 gen_hvpriv_exception(ctx, POWERPC_EXCP_PRIV_REG); 53 } 54 55 return false; 56} 57 58/* 59 * Helper function to avoid code repetition between MMCR0 and 60 * MMCR2 problem state write functions. 61 */ 62static TCGv masked_gprn_for_spr_write(int gprn, int sprn, 63 uint64_t spr_mask) 64{ 65 TCGv ret = tcg_temp_new(); 66 TCGv t0 = tcg_temp_new(); 67 68 /* 'ret' starts with all mask bits cleared */ 69 gen_load_spr(ret, sprn); 70 tcg_gen_andi_tl(ret, ret, ~(spr_mask)); 71 72 /* Apply the mask into 'gprn' in a temp var */ 73 tcg_gen_andi_tl(t0, cpu_gpr[gprn], spr_mask); 74 75 /* Add the masked gprn bits into 'ret' */ 76 tcg_gen_or_tl(ret, ret, t0); 77 78 return ret; 79} 80 81void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn) 82{ 83 TCGv t0; 84 85 if (!spr_groupA_read_allowed(ctx)) { 86 return; 87 } 88 89 t0 = tcg_temp_new(); 90 91 /* 92 * Filter out all bits but FC, PMAO, and PMAE, according 93 * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0, 94 * fourth paragraph. 95 */ 96 gen_load_spr(t0, SPR_POWER_MMCR0); 97 tcg_gen_andi_tl(t0, t0, MMCR0_UREG_MASK); 98 tcg_gen_mov_tl(cpu_gpr[gprn], t0); 99} 100 101static void write_MMCR0_common(DisasContext *ctx, TCGv val) 102{ 103 /* 104 * helper_store_mmcr0 will make clock based operations that 105 * will cause 'bad icount read' errors if we do not execute 106 * translator_io_start() beforehand. 107 */ 108 translator_io_start(&ctx->base); 109 gen_helper_store_mmcr0(cpu_env, val); 110 111 /* 112 * End the translation block because MMCR0 writes can change 113 * ctx->pmu_insn_cnt. 114 */ 115 ctx->base.is_jmp = DISAS_EXIT_UPDATE; 116} 117 118void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn) 119{ 120 TCGv masked_gprn; 121 122 if (!spr_groupA_write_allowed(ctx)) { 123 return; 124 } 125 126 /* 127 * Filter out all bits but FC, PMAO, and PMAE, according 128 * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0, 129 * fourth paragraph. 130 */ 131 masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR0, 132 MMCR0_UREG_MASK); 133 write_MMCR0_common(ctx, masked_gprn); 134} 135 136void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) 137{ 138 TCGv t0; 139 140 if (!spr_groupA_read_allowed(ctx)) { 141 return; 142 } 143 144 t0 = tcg_temp_new(); 145 146 /* 147 * On read, filter out all bits that are not FCnP0 bits. 148 * When MMCR0[PMCC] is set to 0b10 or 0b11, providing 149 * problem state programs read/write access to MMCR2, 150 * only the FCnP0 bits can be accessed. All other bits are 151 * not changed when mtspr is executed in problem state, and 152 * all other bits return 0s when mfspr is executed in problem 153 * state, according to ISA v3.1, section 10.4.6 Monitor Mode 154 * Control Register 2, p. 1316, third paragraph. 155 */ 156 gen_load_spr(t0, SPR_POWER_MMCR2); 157 tcg_gen_andi_tl(t0, t0, MMCR2_UREG_MASK); 158 tcg_gen_mov_tl(cpu_gpr[gprn], t0); 159} 160 161void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) 162{ 163 TCGv masked_gprn; 164 165 if (!spr_groupA_write_allowed(ctx)) { 166 return; 167 } 168 169 /* 170 * Filter the bits that can be written using MMCR2_UREG_MASK, 171 * similar to what is done in spr_write_MMCR0_ureg(). 172 */ 173 masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR2, 174 MMCR2_UREG_MASK); 175 gen_store_spr(SPR_POWER_MMCR2, masked_gprn); 176} 177 178void spr_read_PMC(DisasContext *ctx, int gprn, int sprn) 179{ 180 TCGv_i32 t_sprn = tcg_constant_i32(sprn); 181 182 translator_io_start(&ctx->base); 183 gen_helper_read_pmc(cpu_gpr[gprn], cpu_env, t_sprn); 184} 185 186void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn) 187{ 188 if (!spr_groupA_read_allowed(ctx)) { 189 return; 190 } 191 192 spr_read_PMC(ctx, gprn, sprn + 0x10); 193} 194 195void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn) 196{ 197 /* 198 * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance 199 * Monitor, and a read attempt results in a Facility Unavailable 200 * Interrupt. 201 */ 202 if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { 203 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); 204 return; 205 } 206 207 /* The remaining steps are similar to PMCs 1-4 userspace read */ 208 spr_read_PMC14_ureg(ctx, gprn, sprn); 209} 210 211void spr_write_PMC(DisasContext *ctx, int sprn, int gprn) 212{ 213 TCGv_i32 t_sprn = tcg_constant_i32(sprn); 214 215 translator_io_start(&ctx->base); 216 gen_helper_store_pmc(cpu_env, t_sprn, cpu_gpr[gprn]); 217} 218 219void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn) 220{ 221 if (!spr_groupA_write_allowed(ctx)) { 222 return; 223 } 224 225 spr_write_PMC(ctx, sprn + 0x10, gprn); 226} 227 228void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn) 229{ 230 /* 231 * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance 232 * Monitor, and a write attempt results in a Facility Unavailable 233 * Interrupt. 234 */ 235 if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { 236 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_PMU); 237 return; 238 } 239 240 /* The remaining steps are similar to PMCs 1-4 userspace write */ 241 spr_write_PMC14_ureg(ctx, sprn, gprn); 242} 243 244void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn) 245{ 246 write_MMCR0_common(ctx, cpu_gpr[gprn]); 247} 248 249void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn) 250{ 251 translator_io_start(&ctx->base); 252 gen_helper_store_mmcr1(cpu_env, cpu_gpr[gprn]); 253} 254#else 255void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn) 256{ 257 spr_read_ureg(ctx, gprn, sprn); 258} 259 260void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn) 261{ 262 spr_noaccess(ctx, gprn, sprn); 263} 264 265void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) 266{ 267 spr_read_ureg(ctx, gprn, sprn); 268} 269 270void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) 271{ 272 spr_noaccess(ctx, gprn, sprn); 273} 274 275void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn) 276{ 277 spr_read_ureg(ctx, gprn, sprn); 278} 279 280void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn) 281{ 282 spr_read_ureg(ctx, gprn, sprn); 283} 284 285void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn) 286{ 287 spr_noaccess(ctx, gprn, sprn); 288} 289 290void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn) 291{ 292 spr_noaccess(ctx, gprn, sprn); 293} 294 295void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn) 296{ 297 spr_write_generic(ctx, sprn, gprn); 298} 299 300void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn) 301{ 302 spr_write_generic(ctx, sprn, gprn); 303} 304 305void spr_write_PMC(DisasContext *ctx, int sprn, int gprn) 306{ 307 spr_write_generic(ctx, sprn, gprn); 308} 309#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ 310