1 /* 2 * x86 gdb server stub 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * Copyright (c) 2013 SUSE LINUX Products GmbH 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2.1 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "accel/tcg/vcpu-state.h" 22 #include "cpu.h" 23 #include "exec/gdbstub.h" 24 #include "gdbstub/helpers.h" 25 #ifdef CONFIG_LINUX_USER 26 #include "linux-user/qemu.h" 27 #endif 28 29 #ifdef TARGET_X86_64 30 static const int gpr_map[16] = { 31 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP, 32 8, 9, 10, 11, 12, 13, 14, 15 33 }; 34 #else 35 #define gpr_map gpr_map32 36 #endif 37 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; 38 39 /* 40 * Keep these in sync with assignment to 41 * gdb_num_core_regs in target/i386/cpu.c 42 * and with the machine description 43 */ 44 45 /* 46 * SEG: 6 segments, plus fs_base, gs_base, kernel_gs_base 47 */ 48 49 /* 50 * general regs -----> 8 or 16 51 */ 52 #define IDX_NB_IP 1 53 #define IDX_NB_FLAGS 1 54 #define IDX_NB_SEG (6 + 3) 55 #define IDX_NB_CTL 6 56 #define IDX_NB_FP 16 57 /* 58 * fpu regs ----------> 8 or 16 59 */ 60 #define IDX_NB_MXCSR 1 61 /* 62 * total ----> 8+1+1+9+6+16+8+1=50 or 16+1+1+9+6+16+16+1=66 63 */ 64 65 #define IDX_IP_REG CPU_NB_REGS 66 #define IDX_FLAGS_REG (IDX_IP_REG + IDX_NB_IP) 67 #define IDX_SEG_REGS (IDX_FLAGS_REG + IDX_NB_FLAGS) 68 #define IDX_CTL_REGS (IDX_SEG_REGS + IDX_NB_SEG) 69 #define IDX_FP_REGS (IDX_CTL_REGS + IDX_NB_CTL) 70 #define IDX_XMM_REGS (IDX_FP_REGS + IDX_NB_FP) 71 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS) 72 73 #define IDX_CTL_CR0_REG (IDX_CTL_REGS + 0) 74 #define IDX_CTL_CR2_REG (IDX_CTL_REGS + 1) 75 #define IDX_CTL_CR3_REG (IDX_CTL_REGS + 2) 76 #define IDX_CTL_CR4_REG (IDX_CTL_REGS + 3) 77 #define IDX_CTL_CR8_REG (IDX_CTL_REGS + 4) 78 #define IDX_CTL_EFER_REG (IDX_CTL_REGS + 5) 79 80 #ifdef TARGET_X86_64 81 #define GDB_FORCE_64 1 82 #else 83 #define GDB_FORCE_64 0 84 #endif 85 86 static int gdb_read_reg_cs64(uint32_t hflags, GByteArray *buf, target_ulong val) 87 { 88 if ((hflags & HF_CS64_MASK) || GDB_FORCE_64) { 89 return gdb_get_reg64(buf, val); 90 } 91 return gdb_get_reg32(buf, val); 92 } 93 94 static int gdb_write_reg_cs64(uint32_t hflags, uint8_t *buf, target_ulong *val) 95 { 96 if (hflags & HF_CS64_MASK) { 97 *val = ldq_p(buf); 98 return 8; 99 } 100 *val = ldl_p(buf); 101 return 4; 102 } 103 104 static int gdb_get_reg(CPUX86State *env, GByteArray *mem_buf, target_ulong val) 105 { 106 if (TARGET_LONG_BITS == 64) { 107 if (env->hflags & HF_CS64_MASK) { 108 return gdb_get_reg64(mem_buf, val); 109 } else { 110 return gdb_get_reg64(mem_buf, val & 0xffffffffUL); 111 } 112 } else { 113 return gdb_get_reg32(mem_buf, val); 114 } 115 } 116 117 int x86_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) 118 { 119 X86CPU *cpu = X86_CPU(cs); 120 CPUX86State *env = &cpu->env; 121 122 uint64_t tpr; 123 124 /* N.B. GDB can't deal with changes in registers or sizes in the middle 125 of a session. So if we're in 32-bit mode on a 64-bit cpu, still act 126 as if we're on a 64-bit cpu. */ 127 128 if (n < CPU_NB_REGS) { 129 if (TARGET_LONG_BITS == 64) { 130 if (env->hflags & HF_CS64_MASK) { 131 return gdb_get_reg64(mem_buf, env->regs[gpr_map[n]]); 132 } else if (n < CPU_NB_REGS32) { 133 return gdb_get_reg64(mem_buf, 134 env->regs[gpr_map[n]] & 0xffffffffUL); 135 } else { 136 return gdb_get_regl(mem_buf, 0); 137 } 138 } else { 139 return gdb_get_reg32(mem_buf, env->regs[gpr_map32[n]]); 140 } 141 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) { 142 int st_index = n - IDX_FP_REGS; 143 int r_index = (st_index + env->fpstt) % 8; 144 floatx80 *fp = &env->fpregs[r_index].d; 145 int len = gdb_get_reg64(mem_buf, cpu_to_le64(fp->low)); 146 len += gdb_get_reg16(mem_buf, cpu_to_le16(fp->high)); 147 return len; 148 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) { 149 n -= IDX_XMM_REGS; 150 if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) { 151 return gdb_get_reg128(mem_buf, 152 env->xmm_regs[n].ZMM_Q(1), 153 env->xmm_regs[n].ZMM_Q(0)); 154 } 155 } else { 156 switch (n) { 157 case IDX_IP_REG: 158 return gdb_get_reg(env, mem_buf, env->eip); 159 case IDX_FLAGS_REG: 160 return gdb_get_reg32(mem_buf, env->eflags); 161 162 case IDX_SEG_REGS: 163 return gdb_get_reg32(mem_buf, env->segs[R_CS].selector); 164 case IDX_SEG_REGS + 1: 165 return gdb_get_reg32(mem_buf, env->segs[R_SS].selector); 166 case IDX_SEG_REGS + 2: 167 return gdb_get_reg32(mem_buf, env->segs[R_DS].selector); 168 case IDX_SEG_REGS + 3: 169 return gdb_get_reg32(mem_buf, env->segs[R_ES].selector); 170 case IDX_SEG_REGS + 4: 171 return gdb_get_reg32(mem_buf, env->segs[R_FS].selector); 172 case IDX_SEG_REGS + 5: 173 return gdb_get_reg32(mem_buf, env->segs[R_GS].selector); 174 case IDX_SEG_REGS + 6: 175 return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_FS].base); 176 case IDX_SEG_REGS + 7: 177 return gdb_read_reg_cs64(env->hflags, mem_buf, env->segs[R_GS].base); 178 179 case IDX_SEG_REGS + 8: 180 #ifdef TARGET_X86_64 181 return gdb_read_reg_cs64(env->hflags, mem_buf, env->kernelgsbase); 182 #else 183 return gdb_get_reg32(mem_buf, 0); 184 #endif 185 186 case IDX_FP_REGS + 8: 187 return gdb_get_reg32(mem_buf, env->fpuc); 188 case IDX_FP_REGS + 9: 189 return gdb_get_reg32(mem_buf, (env->fpus & ~0x3800) | 190 (env->fpstt & 0x7) << 11); 191 case IDX_FP_REGS + 10: 192 return gdb_get_reg32(mem_buf, 0); /* ftag */ 193 case IDX_FP_REGS + 11: 194 return gdb_get_reg32(mem_buf, 0); /* fiseg */ 195 case IDX_FP_REGS + 12: 196 return gdb_get_reg32(mem_buf, 0); /* fioff */ 197 case IDX_FP_REGS + 13: 198 return gdb_get_reg32(mem_buf, 0); /* foseg */ 199 case IDX_FP_REGS + 14: 200 return gdb_get_reg32(mem_buf, 0); /* fooff */ 201 case IDX_FP_REGS + 15: 202 return gdb_get_reg32(mem_buf, 0); /* fop */ 203 204 case IDX_MXCSR_REG: 205 update_mxcsr_from_sse_status(env); 206 return gdb_get_reg32(mem_buf, env->mxcsr); 207 208 case IDX_CTL_CR0_REG: 209 return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[0]); 210 case IDX_CTL_CR2_REG: 211 return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[2]); 212 case IDX_CTL_CR3_REG: 213 return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[3]); 214 case IDX_CTL_CR4_REG: 215 return gdb_read_reg_cs64(env->hflags, mem_buf, env->cr[4]); 216 case IDX_CTL_CR8_REG: 217 #ifndef CONFIG_USER_ONLY 218 tpr = cpu_get_apic_tpr(cpu->apic_state); 219 #else 220 tpr = 0; 221 #endif 222 return gdb_read_reg_cs64(env->hflags, mem_buf, tpr); 223 224 case IDX_CTL_EFER_REG: 225 return gdb_read_reg_cs64(env->hflags, mem_buf, env->efer); 226 } 227 } 228 return 0; 229 } 230 231 static int x86_cpu_gdb_load_seg(X86CPU *cpu, X86Seg sreg, uint8_t *mem_buf) 232 { 233 CPUX86State *env = &cpu->env; 234 uint16_t selector = ldl_p(mem_buf); 235 236 if (selector != env->segs[sreg].selector) { 237 #if defined(CONFIG_USER_ONLY) 238 cpu_x86_load_seg(env, sreg, selector); 239 #else 240 unsigned int limit, flags; 241 target_ulong base; 242 243 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { 244 int dpl = (env->eflags & VM_MASK) ? 3 : 0; 245 base = selector << 4; 246 limit = 0xffff; 247 flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 248 DESC_A_MASK | (dpl << DESC_DPL_SHIFT); 249 } else { 250 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, 251 &flags)) { 252 return 4; 253 } 254 } 255 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags); 256 #endif 257 } 258 return 4; 259 } 260 261 static int gdb_write_reg(CPUX86State *env, uint8_t *mem_buf, target_ulong *val) 262 { 263 if (TARGET_LONG_BITS == 64) { 264 if (env->hflags & HF_CS64_MASK) { 265 *val = ldq_p(mem_buf); 266 } else { 267 *val = ldq_p(mem_buf) & 0xffffffffUL; 268 } 269 return 8; 270 } else { 271 *val = (uint32_t)ldl_p(mem_buf); 272 return 4; 273 } 274 } 275 276 int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) 277 { 278 X86CPU *cpu = X86_CPU(cs); 279 CPUX86State *env = &cpu->env; 280 target_ulong tmp; 281 int len; 282 283 /* N.B. GDB can't deal with changes in registers or sizes in the middle 284 of a session. So if we're in 32-bit mode on a 64-bit cpu, still act 285 as if we're on a 64-bit cpu. */ 286 287 if (n < CPU_NB_REGS) { 288 if (TARGET_LONG_BITS == 64) { 289 if (env->hflags & HF_CS64_MASK) { 290 env->regs[gpr_map[n]] = ldtul_p(mem_buf); 291 } else if (n < CPU_NB_REGS32) { 292 env->regs[gpr_map[n]] = ldtul_p(mem_buf) & 0xffffffffUL; 293 } 294 return sizeof(target_ulong); 295 } else if (n < CPU_NB_REGS32) { 296 n = gpr_map32[n]; 297 env->regs[n] &= ~0xffffffffUL; 298 env->regs[n] |= (uint32_t)ldl_p(mem_buf); 299 return 4; 300 } 301 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) { 302 floatx80 *fp = (floatx80 *) &env->fpregs[n - IDX_FP_REGS]; 303 fp->low = le64_to_cpu(* (uint64_t *) mem_buf); 304 fp->high = le16_to_cpu(* (uint16_t *) (mem_buf + 8)); 305 return 10; 306 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) { 307 n -= IDX_XMM_REGS; 308 if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) { 309 env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf); 310 env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8); 311 return 16; 312 } 313 } else { 314 switch (n) { 315 case IDX_IP_REG: 316 return gdb_write_reg(env, mem_buf, &env->eip); 317 case IDX_FLAGS_REG: 318 env->eflags = ldl_p(mem_buf); 319 return 4; 320 321 case IDX_SEG_REGS: 322 return x86_cpu_gdb_load_seg(cpu, R_CS, mem_buf); 323 case IDX_SEG_REGS + 1: 324 return x86_cpu_gdb_load_seg(cpu, R_SS, mem_buf); 325 case IDX_SEG_REGS + 2: 326 return x86_cpu_gdb_load_seg(cpu, R_DS, mem_buf); 327 case IDX_SEG_REGS + 3: 328 return x86_cpu_gdb_load_seg(cpu, R_ES, mem_buf); 329 case IDX_SEG_REGS + 4: 330 return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf); 331 case IDX_SEG_REGS + 5: 332 return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf); 333 case IDX_SEG_REGS + 6: 334 return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_FS].base); 335 case IDX_SEG_REGS + 7: 336 return gdb_write_reg_cs64(env->hflags, mem_buf, &env->segs[R_GS].base); 337 case IDX_SEG_REGS + 8: 338 #ifdef TARGET_X86_64 339 return gdb_write_reg_cs64(env->hflags, mem_buf, &env->kernelgsbase); 340 #endif 341 return 4; 342 343 case IDX_FP_REGS + 8: 344 cpu_set_fpuc(env, ldl_p(mem_buf)); 345 return 4; 346 case IDX_FP_REGS + 9: 347 tmp = ldl_p(mem_buf); 348 env->fpstt = (tmp >> 11) & 7; 349 env->fpus = tmp & ~0x3800; 350 return 4; 351 case IDX_FP_REGS + 10: /* ftag */ 352 return 4; 353 case IDX_FP_REGS + 11: /* fiseg */ 354 return 4; 355 case IDX_FP_REGS + 12: /* fioff */ 356 return 4; 357 case IDX_FP_REGS + 13: /* foseg */ 358 return 4; 359 case IDX_FP_REGS + 14: /* fooff */ 360 return 4; 361 case IDX_FP_REGS + 15: /* fop */ 362 return 4; 363 364 case IDX_MXCSR_REG: 365 cpu_set_mxcsr(env, ldl_p(mem_buf)); 366 return 4; 367 368 case IDX_CTL_CR0_REG: 369 len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); 370 #ifndef CONFIG_USER_ONLY 371 cpu_x86_update_cr0(env, tmp); 372 #endif 373 return len; 374 375 case IDX_CTL_CR2_REG: 376 len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); 377 #ifndef CONFIG_USER_ONLY 378 env->cr[2] = tmp; 379 #endif 380 return len; 381 382 case IDX_CTL_CR3_REG: 383 len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); 384 #ifndef CONFIG_USER_ONLY 385 cpu_x86_update_cr3(env, tmp); 386 #endif 387 return len; 388 389 case IDX_CTL_CR4_REG: 390 len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); 391 #ifndef CONFIG_USER_ONLY 392 cpu_x86_update_cr4(env, tmp); 393 #endif 394 return len; 395 396 case IDX_CTL_CR8_REG: 397 len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); 398 #ifndef CONFIG_USER_ONLY 399 cpu_set_apic_tpr(cpu->apic_state, tmp); 400 #endif 401 return len; 402 403 case IDX_CTL_EFER_REG: 404 len = gdb_write_reg_cs64(env->hflags, mem_buf, &tmp); 405 #ifndef CONFIG_USER_ONLY 406 cpu_load_efer(env, tmp); 407 #endif 408 return len; 409 } 410 } 411 /* Unrecognised register. */ 412 return 0; 413 } 414 415 #ifdef CONFIG_LINUX_USER 416 417 #define IDX_ORIG_AX 0 418 419 static int x86_cpu_gdb_read_linux_register(CPUState *cs, GByteArray *mem_buf, 420 int n) 421 { 422 X86CPU *cpu = X86_CPU(cs); 423 CPUX86State *env = &cpu->env; 424 425 switch (n) { 426 case IDX_ORIG_AX: 427 return gdb_get_reg(env, mem_buf, get_task_state(cs)->orig_ax); 428 } 429 return 0; 430 } 431 432 static int x86_cpu_gdb_write_linux_register(CPUState *cs, uint8_t *mem_buf, 433 int n) 434 { 435 X86CPU *cpu = X86_CPU(cs); 436 CPUX86State *env = &cpu->env; 437 438 switch (n) { 439 case IDX_ORIG_AX: 440 return gdb_write_reg(env, mem_buf, &get_task_state(cs)->orig_ax); 441 } 442 return 0; 443 } 444 445 #endif 446 447 void x86_cpu_gdb_init(CPUState *cs) 448 { 449 #ifdef CONFIG_LINUX_USER 450 gdb_register_coprocessor(cs, x86_cpu_gdb_read_linux_register, 451 x86_cpu_gdb_write_linux_register, 452 #ifdef TARGET_X86_64 453 gdb_find_static_feature("i386-64bit-linux.xml"), 454 #else 455 gdb_find_static_feature("i386-32bit-linux.xml"), 456 #endif 457 0); 458 #endif 459 } 460