1 /* 2 * x86 gdb server stub 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * Copyright (c) 2013 SUSE LINUX Products GmbH 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * You should have received a copy of the GNU Lesser General Public 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 #include "qemu/osdep.h" 21 #include "qemu-common.h" 22 #include "cpu.h" 23 #include "exec/gdbstub.h" 24 25 #ifdef TARGET_X86_64 26 static const int gpr_map[16] = { 27 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP, 28 8, 9, 10, 11, 12, 13, 14, 15 29 }; 30 #else 31 #define gpr_map gpr_map32 32 #endif 33 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; 34 35 /* 36 * Keep these in sync with assignment to 37 * gdb_num_core_regs in target/i386/cpu.c 38 * and with the machine description 39 */ 40 41 /* 42 * SEG: 6 segments, plus fs_base, gs_base, kernel_gs_base 43 */ 44 45 /* 46 * general regs -----> 8 or 16 47 */ 48 #define IDX_NB_IP 1 49 #define IDX_NB_FLAGS 1 50 #define IDX_NB_SEG (6 + 3) 51 #define IDX_NB_CTL 6 52 #define IDX_NB_FP 16 53 /* 54 * fpu regs ----------> 8 or 16 55 */ 56 #define IDX_NB_MXCSR 1 57 /* 58 * total ----> 8+1+1+9+6+16+8+1=50 or 16+1+1+9+6+16+16+1=66 59 */ 60 61 #define IDX_IP_REG CPU_NB_REGS 62 #define IDX_FLAGS_REG (IDX_IP_REG + IDX_NB_IP) 63 #define IDX_SEG_REGS (IDX_FLAGS_REG + IDX_NB_FLAGS) 64 #define IDX_CTL_REGS (IDX_SEG_REGS + IDX_NB_SEG) 65 #define IDX_FP_REGS (IDX_CTL_REGS + IDX_NB_CTL) 66 #define IDX_XMM_REGS (IDX_FP_REGS + IDX_NB_FP) 67 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS) 68 69 #define IDX_CTL_CR0_REG (IDX_CTL_REGS + 0) 70 #define IDX_CTL_CR2_REG (IDX_CTL_REGS + 1) 71 #define IDX_CTL_CR3_REG (IDX_CTL_REGS + 2) 72 #define IDX_CTL_CR4_REG (IDX_CTL_REGS + 3) 73 #define IDX_CTL_CR8_REG (IDX_CTL_REGS + 4) 74 #define IDX_CTL_EFER_REG (IDX_CTL_REGS + 5) 75 76 #ifdef TARGET_X86_64 77 #define GDB_FORCE_64 1 78 #else 79 #define GDB_FORCE_64 0 80 #endif 81 82 83 int x86_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n) 84 { 85 X86CPU *cpu = X86_CPU(cs); 86 CPUX86State *env = &cpu->env; 87 88 uint64_t tpr; 89 90 /* N.B. GDB can't deal with changes in registers or sizes in the middle 91 of a session. So if we're in 32-bit mode on a 64-bit cpu, still act 92 as if we're on a 64-bit cpu. */ 93 94 if (n < CPU_NB_REGS) { 95 if (TARGET_LONG_BITS == 64) { 96 if (env->hflags & HF_CS64_MASK) { 97 return gdb_get_reg64(mem_buf, env->regs[gpr_map[n]]); 98 } else if (n < CPU_NB_REGS32) { 99 return gdb_get_reg64(mem_buf, 100 env->regs[gpr_map[n]] & 0xffffffffUL); 101 } else { 102 memset(mem_buf, 0, sizeof(target_ulong)); 103 return sizeof(target_ulong); 104 } 105 } else { 106 return gdb_get_reg32(mem_buf, env->regs[gpr_map32[n]]); 107 } 108 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) { 109 #ifdef USE_X86LDOUBLE 110 /* FIXME: byteswap float values - after fixing fpregs layout. */ 111 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10); 112 #else 113 memset(mem_buf, 0, 10); 114 #endif 115 return 10; 116 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) { 117 n -= IDX_XMM_REGS; 118 if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) { 119 stq_p(mem_buf, env->xmm_regs[n].ZMM_Q(0)); 120 stq_p(mem_buf + 8, env->xmm_regs[n].ZMM_Q(1)); 121 return 16; 122 } 123 } else { 124 switch (n) { 125 case IDX_IP_REG: 126 if (TARGET_LONG_BITS == 64) { 127 if (env->hflags & HF_CS64_MASK) { 128 return gdb_get_reg64(mem_buf, env->eip); 129 } else { 130 return gdb_get_reg64(mem_buf, env->eip & 0xffffffffUL); 131 } 132 } else { 133 return gdb_get_reg32(mem_buf, env->eip); 134 } 135 case IDX_FLAGS_REG: 136 return gdb_get_reg32(mem_buf, env->eflags); 137 138 case IDX_SEG_REGS: 139 return gdb_get_reg32(mem_buf, env->segs[R_CS].selector); 140 case IDX_SEG_REGS + 1: 141 return gdb_get_reg32(mem_buf, env->segs[R_SS].selector); 142 case IDX_SEG_REGS + 2: 143 return gdb_get_reg32(mem_buf, env->segs[R_DS].selector); 144 case IDX_SEG_REGS + 3: 145 return gdb_get_reg32(mem_buf, env->segs[R_ES].selector); 146 case IDX_SEG_REGS + 4: 147 return gdb_get_reg32(mem_buf, env->segs[R_FS].selector); 148 case IDX_SEG_REGS + 5: 149 return gdb_get_reg32(mem_buf, env->segs[R_GS].selector); 150 151 case IDX_SEG_REGS + 6: 152 if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { 153 return gdb_get_reg64(mem_buf, env->segs[R_FS].base); 154 } 155 return gdb_get_reg32(mem_buf, env->segs[R_FS].base); 156 157 case IDX_SEG_REGS + 7: 158 if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { 159 return gdb_get_reg64(mem_buf, env->segs[R_GS].base); 160 } 161 return gdb_get_reg32(mem_buf, env->segs[R_GS].base); 162 163 case IDX_SEG_REGS + 8: 164 #ifdef TARGET_X86_64 165 if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { 166 return gdb_get_reg64(mem_buf, env->kernelgsbase); 167 } 168 return gdb_get_reg32(mem_buf, env->kernelgsbase); 169 #else 170 return gdb_get_reg32(mem_buf, 0); 171 #endif 172 173 case IDX_FP_REGS + 8: 174 return gdb_get_reg32(mem_buf, env->fpuc); 175 case IDX_FP_REGS + 9: 176 return gdb_get_reg32(mem_buf, (env->fpus & ~0x3800) | 177 (env->fpstt & 0x7) << 11); 178 case IDX_FP_REGS + 10: 179 return gdb_get_reg32(mem_buf, 0); /* ftag */ 180 case IDX_FP_REGS + 11: 181 return gdb_get_reg32(mem_buf, 0); /* fiseg */ 182 case IDX_FP_REGS + 12: 183 return gdb_get_reg32(mem_buf, 0); /* fioff */ 184 case IDX_FP_REGS + 13: 185 return gdb_get_reg32(mem_buf, 0); /* foseg */ 186 case IDX_FP_REGS + 14: 187 return gdb_get_reg32(mem_buf, 0); /* fooff */ 188 case IDX_FP_REGS + 15: 189 return gdb_get_reg32(mem_buf, 0); /* fop */ 190 191 case IDX_MXCSR_REG: 192 return gdb_get_reg32(mem_buf, env->mxcsr); 193 194 case IDX_CTL_CR0_REG: 195 if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { 196 return gdb_get_reg64(mem_buf, env->cr[0]); 197 } 198 return gdb_get_reg32(mem_buf, env->cr[0]); 199 200 case IDX_CTL_CR2_REG: 201 if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { 202 return gdb_get_reg64(mem_buf, env->cr[2]); 203 } 204 return gdb_get_reg32(mem_buf, env->cr[2]); 205 206 case IDX_CTL_CR3_REG: 207 if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { 208 return gdb_get_reg64(mem_buf, env->cr[3]); 209 } 210 return gdb_get_reg32(mem_buf, env->cr[3]); 211 212 case IDX_CTL_CR4_REG: 213 if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { 214 return gdb_get_reg64(mem_buf, env->cr[4]); 215 } 216 return gdb_get_reg32(mem_buf, env->cr[4]); 217 218 case IDX_CTL_CR8_REG: 219 #ifdef CONFIG_SOFTMMU 220 tpr = cpu_get_apic_tpr(cpu->apic_state); 221 #else 222 tpr = 0; 223 #endif 224 if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { 225 return gdb_get_reg64(mem_buf, tpr); 226 } 227 return gdb_get_reg32(mem_buf, tpr); 228 229 case IDX_CTL_EFER_REG: 230 if ((env->hflags & HF_CS64_MASK) || GDB_FORCE_64) { 231 return gdb_get_reg64(mem_buf, env->efer); 232 } 233 return gdb_get_reg32(mem_buf, env->efer); 234 } 235 } 236 return 0; 237 } 238 239 static int x86_cpu_gdb_load_seg(X86CPU *cpu, int sreg, uint8_t *mem_buf) 240 { 241 CPUX86State *env = &cpu->env; 242 uint16_t selector = ldl_p(mem_buf); 243 244 if (selector != env->segs[sreg].selector) { 245 #if defined(CONFIG_USER_ONLY) 246 cpu_x86_load_seg(env, sreg, selector); 247 #else 248 unsigned int limit, flags; 249 target_ulong base; 250 251 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) { 252 int dpl = (env->eflags & VM_MASK) ? 3 : 0; 253 base = selector << 4; 254 limit = 0xffff; 255 flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 256 DESC_A_MASK | (dpl << DESC_DPL_SHIFT); 257 } else { 258 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, 259 &flags)) { 260 return 4; 261 } 262 } 263 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags); 264 #endif 265 } 266 return 4; 267 } 268 269 int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) 270 { 271 X86CPU *cpu = X86_CPU(cs); 272 CPUX86State *env = &cpu->env; 273 uint32_t tmp; 274 275 /* N.B. GDB can't deal with changes in registers or sizes in the middle 276 of a session. So if we're in 32-bit mode on a 64-bit cpu, still act 277 as if we're on a 64-bit cpu. */ 278 279 if (n < CPU_NB_REGS) { 280 if (TARGET_LONG_BITS == 64) { 281 if (env->hflags & HF_CS64_MASK) { 282 env->regs[gpr_map[n]] = ldtul_p(mem_buf); 283 } else if (n < CPU_NB_REGS32) { 284 env->regs[gpr_map[n]] = ldtul_p(mem_buf) & 0xffffffffUL; 285 } 286 return sizeof(target_ulong); 287 } else if (n < CPU_NB_REGS32) { 288 n = gpr_map32[n]; 289 env->regs[n] &= ~0xffffffffUL; 290 env->regs[n] |= (uint32_t)ldl_p(mem_buf); 291 return 4; 292 } 293 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) { 294 #ifdef USE_X86LDOUBLE 295 /* FIXME: byteswap float values - after fixing fpregs layout. */ 296 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10); 297 #endif 298 return 10; 299 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) { 300 n -= IDX_XMM_REGS; 301 if (n < CPU_NB_REGS32 || TARGET_LONG_BITS == 64) { 302 env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf); 303 env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8); 304 return 16; 305 } 306 } else { 307 switch (n) { 308 case IDX_IP_REG: 309 if (TARGET_LONG_BITS == 64) { 310 if (env->hflags & HF_CS64_MASK) { 311 env->eip = ldq_p(mem_buf); 312 } else { 313 env->eip = ldq_p(mem_buf) & 0xffffffffUL; 314 } 315 return 8; 316 } else { 317 env->eip &= ~0xffffffffUL; 318 env->eip |= (uint32_t)ldl_p(mem_buf); 319 return 4; 320 } 321 case IDX_FLAGS_REG: 322 env->eflags = ldl_p(mem_buf); 323 return 4; 324 325 case IDX_SEG_REGS: 326 return x86_cpu_gdb_load_seg(cpu, R_CS, mem_buf); 327 case IDX_SEG_REGS + 1: 328 return x86_cpu_gdb_load_seg(cpu, R_SS, mem_buf); 329 case IDX_SEG_REGS + 2: 330 return x86_cpu_gdb_load_seg(cpu, R_DS, mem_buf); 331 case IDX_SEG_REGS + 3: 332 return x86_cpu_gdb_load_seg(cpu, R_ES, mem_buf); 333 case IDX_SEG_REGS + 4: 334 return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf); 335 case IDX_SEG_REGS + 5: 336 return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf); 337 338 case IDX_SEG_REGS + 6: 339 if (env->hflags & HF_CS64_MASK) { 340 env->segs[R_FS].base = ldq_p(mem_buf); 341 return 8; 342 } 343 env->segs[R_FS].base = ldl_p(mem_buf); 344 return 4; 345 346 case IDX_SEG_REGS + 7: 347 if (env->hflags & HF_CS64_MASK) { 348 env->segs[R_GS].base = ldq_p(mem_buf); 349 return 8; 350 } 351 env->segs[R_GS].base = ldl_p(mem_buf); 352 return 4; 353 354 #ifdef TARGET_X86_64 355 case IDX_SEG_REGS + 8: 356 if (env->hflags & HF_CS64_MASK) { 357 env->kernelgsbase = ldq_p(mem_buf); 358 return 8; 359 } 360 env->kernelgsbase = ldl_p(mem_buf); 361 return 4; 362 #endif 363 364 case IDX_FP_REGS + 8: 365 cpu_set_fpuc(env, ldl_p(mem_buf)); 366 return 4; 367 case IDX_FP_REGS + 9: 368 tmp = ldl_p(mem_buf); 369 env->fpstt = (tmp >> 11) & 7; 370 env->fpus = tmp & ~0x3800; 371 return 4; 372 case IDX_FP_REGS + 10: /* ftag */ 373 return 4; 374 case IDX_FP_REGS + 11: /* fiseg */ 375 return 4; 376 case IDX_FP_REGS + 12: /* fioff */ 377 return 4; 378 case IDX_FP_REGS + 13: /* foseg */ 379 return 4; 380 case IDX_FP_REGS + 14: /* fooff */ 381 return 4; 382 case IDX_FP_REGS + 15: /* fop */ 383 return 4; 384 385 case IDX_MXCSR_REG: 386 cpu_set_mxcsr(env, ldl_p(mem_buf)); 387 return 4; 388 389 case IDX_CTL_CR0_REG: 390 if (env->hflags & HF_CS64_MASK) { 391 cpu_x86_update_cr0(env, ldq_p(mem_buf)); 392 return 8; 393 } 394 cpu_x86_update_cr0(env, ldl_p(mem_buf)); 395 return 4; 396 397 case IDX_CTL_CR2_REG: 398 if (env->hflags & HF_CS64_MASK) { 399 env->cr[2] = ldq_p(mem_buf); 400 return 8; 401 } 402 env->cr[2] = ldl_p(mem_buf); 403 return 4; 404 405 case IDX_CTL_CR3_REG: 406 if (env->hflags & HF_CS64_MASK) { 407 cpu_x86_update_cr3(env, ldq_p(mem_buf)); 408 return 8; 409 } 410 cpu_x86_update_cr3(env, ldl_p(mem_buf)); 411 return 4; 412 413 case IDX_CTL_CR4_REG: 414 if (env->hflags & HF_CS64_MASK) { 415 cpu_x86_update_cr4(env, ldq_p(mem_buf)); 416 return 8; 417 } 418 cpu_x86_update_cr4(env, ldl_p(mem_buf)); 419 return 4; 420 421 case IDX_CTL_CR8_REG: 422 if (env->hflags & HF_CS64_MASK) { 423 #ifdef CONFIG_SOFTMMU 424 cpu_set_apic_tpr(cpu->apic_state, ldq_p(mem_buf)); 425 #endif 426 return 8; 427 } 428 #ifdef CONFIG_SOFTMMU 429 cpu_set_apic_tpr(cpu->apic_state, ldl_p(mem_buf)); 430 #endif 431 return 4; 432 433 case IDX_CTL_EFER_REG: 434 if (env->hflags & HF_CS64_MASK) { 435 cpu_load_efer(env, ldq_p(mem_buf)); 436 return 8; 437 } 438 cpu_load_efer(env, ldl_p(mem_buf)); 439 return 4; 440 441 } 442 } 443 /* Unrecognised register. */ 444 return 0; 445 } 446