1 /* 2 * emulator main execution loop 3 * 4 * Copyright (c) 2003-2005 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "sysemu/cpus.h" 22 #include "sysemu/tcg.h" 23 #include "exec/exec-all.h" 24 25 bool tcg_allowed; 26 27 /* exit the current TB, but without causing any exception to be raised */ 28 void cpu_loop_exit_noexc(CPUState *cpu) 29 { 30 cpu->exception_index = -1; 31 cpu_loop_exit(cpu); 32 } 33 34 #if defined(CONFIG_SOFTMMU) 35 void cpu_reloading_memory_map(void) 36 { 37 if (qemu_in_vcpu_thread() && current_cpu->running) { 38 /* The guest can in theory prolong the RCU critical section as long 39 * as it feels like. The major problem with this is that because it 40 * can do multiple reconfigurations of the memory map within the 41 * critical section, we could potentially accumulate an unbounded 42 * collection of memory data structures awaiting reclamation. 43 * 44 * Because the only thing we're currently protecting with RCU is the 45 * memory data structures, it's sufficient to break the critical section 46 * in this callback, which we know will get called every time the 47 * memory map is rearranged. 48 * 49 * (If we add anything else in the system that uses RCU to protect 50 * its data structures, we will need to implement some other mechanism 51 * to force TCG CPUs to exit the critical section, at which point this 52 * part of this callback might become unnecessary.) 53 * 54 * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which 55 * only protects cpu->as->dispatch. Since we know our caller is about 56 * to reload it, it's safe to split the critical section. 57 */ 58 rcu_read_unlock(); 59 rcu_read_lock(); 60 } 61 } 62 #endif 63 64 void cpu_loop_exit(CPUState *cpu) 65 { 66 /* Undo the setting in cpu_tb_exec. */ 67 cpu->can_do_io = 1; 68 siglongjmp(cpu->jmp_env, 1); 69 } 70 71 void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) 72 { 73 if (pc) { 74 cpu_restore_state(cpu, pc, true); 75 } 76 cpu_loop_exit(cpu); 77 } 78 79 void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) 80 { 81 cpu->exception_index = EXCP_ATOMIC; 82 cpu_loop_exit_restore(cpu, pc); 83 } 84