cputlb.c (31e213e30617b986a6e8ab4d9a0646eb4e6a4227) | cputlb.c (53d284554cfb476a43807fe94fa59909ed5d9ff8) |
---|---|
1/* 2 * Common CPU TLB handling 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either --- 64 unchanged lines hidden (view full) --- 73 */ 74QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 75#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 76 77void tlb_init(CPUState *cpu) 78{ 79 CPUArchState *env = cpu->env_ptr; 80 | 1/* 2 * Common CPU TLB handling 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either --- 64 unchanged lines hidden (view full) --- 73 */ 74QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); 75#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) 76 77void tlb_init(CPUState *cpu) 78{ 79 CPUArchState *env = cpu->env_ptr; 80 |
81 qemu_spin_init(&env->tlb_lock); | 81 qemu_spin_init(&env->tlb_c.lock); |
82} 83 84/* flush_all_helper: run fn across all cpus 85 * 86 * If the wait flag is set then the src cpu's helper will be queued as 87 * "safe" work and the loop exited creating a synchronisation point 88 * where all queued work will be finished before execution starts 89 * again. --- 39 unchanged lines hidden (view full) --- 129 return; 130 } 131 132 assert_cpu_is_self(cpu); 133 atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1); 134 tlb_debug("(count: %zu)\n", tlb_flush_count()); 135 136 /* | 82} 83 84/* flush_all_helper: run fn across all cpus 85 * 86 * If the wait flag is set then the src cpu's helper will be queued as 87 * "safe" work and the loop exited creating a synchronisation point 88 * where all queued work will be finished before execution starts 89 * again. --- 39 unchanged lines hidden (view full) --- 129 return; 130 } 131 132 assert_cpu_is_self(cpu); 133 atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1); 134 tlb_debug("(count: %zu)\n", tlb_flush_count()); 135 136 /* |
137 * tlb_table/tlb_v_table updates from any thread must hold tlb_lock. | 137 * tlb_table/tlb_v_table updates from any thread must hold tlb_c.lock. |
138 * However, updates from the owner thread (as is the case here; see the 139 * above assert_cpu_is_self) do not need atomic_set because all reads 140 * that do not hold the lock are performed by the same owner thread. 141 */ | 138 * However, updates from the owner thread (as is the case here; see the 139 * above assert_cpu_is_self) do not need atomic_set because all reads 140 * that do not hold the lock are performed by the same owner thread. 141 */ |
142 qemu_spin_lock(&env->tlb_lock); | 142 qemu_spin_lock(&env->tlb_c.lock); |
143 memset(env->tlb_table, -1, sizeof(env->tlb_table)); 144 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); | 143 memset(env->tlb_table, -1, sizeof(env->tlb_table)); 144 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); |
145 qemu_spin_unlock(&env->tlb_lock); | 145 qemu_spin_unlock(&env->tlb_c.lock); |
146 147 cpu_tb_jmp_cache_clear(cpu); 148 149 env->vtlb_index = 0; 150 env->tlb_flush_addr = -1; 151 env->tlb_flush_mask = 0; 152 153 atomic_mb_set(&cpu->pending_tlb_flush, 0); --- 36 unchanged lines hidden (view full) --- 190 CPUArchState *env = cpu->env_ptr; 191 unsigned long mmu_idx_bitmask = data.host_int; 192 int mmu_idx; 193 194 assert_cpu_is_self(cpu); 195 196 tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask); 197 | 146 147 cpu_tb_jmp_cache_clear(cpu); 148 149 env->vtlb_index = 0; 150 env->tlb_flush_addr = -1; 151 env->tlb_flush_mask = 0; 152 153 atomic_mb_set(&cpu->pending_tlb_flush, 0); --- 36 unchanged lines hidden (view full) --- 190 CPUArchState *env = cpu->env_ptr; 191 unsigned long mmu_idx_bitmask = data.host_int; 192 int mmu_idx; 193 194 assert_cpu_is_self(cpu); 195 196 tlb_debug("start: mmu_idx:0x%04lx\n", mmu_idx_bitmask); 197 |
198 qemu_spin_lock(&env->tlb_lock); | 198 qemu_spin_lock(&env->tlb_c.lock); |
199 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 200 201 if (test_bit(mmu_idx, &mmu_idx_bitmask)) { 202 tlb_debug("%d\n", mmu_idx); 203 204 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); 205 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); 206 } 207 } | 199 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 200 201 if (test_bit(mmu_idx, &mmu_idx_bitmask)) { 202 tlb_debug("%d\n", mmu_idx); 203 204 memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0])); 205 memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); 206 } 207 } |
208 qemu_spin_unlock(&env->tlb_lock); | 208 qemu_spin_unlock(&env->tlb_c.lock); |
209 210 cpu_tb_jmp_cache_clear(cpu); 211 212 tlb_debug("done\n"); 213} 214 215void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 216{ --- 40 unchanged lines hidden (view full) --- 257static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 258 target_ulong page) 259{ 260 return tlb_hit_page(tlb_entry->addr_read, page) || 261 tlb_hit_page(tlb_addr_write(tlb_entry), page) || 262 tlb_hit_page(tlb_entry->addr_code, page); 263} 264 | 209 210 cpu_tb_jmp_cache_clear(cpu); 211 212 tlb_debug("done\n"); 213} 214 215void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) 216{ --- 40 unchanged lines hidden (view full) --- 257static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, 258 target_ulong page) 259{ 260 return tlb_hit_page(tlb_entry->addr_read, page) || 261 tlb_hit_page(tlb_addr_write(tlb_entry), page) || 262 tlb_hit_page(tlb_entry->addr_code, page); 263} 264 |
265/* Called with tlb_lock held */ | 265/* Called with tlb_c.lock held */ |
266static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 267 target_ulong page) 268{ 269 if (tlb_hit_page_anyprot(tlb_entry, page)) { 270 memset(tlb_entry, -1, sizeof(*tlb_entry)); 271 } 272} 273 | 266static inline void tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, 267 target_ulong page) 268{ 269 if (tlb_hit_page_anyprot(tlb_entry, page)) { 270 memset(tlb_entry, -1, sizeof(*tlb_entry)); 271 } 272} 273 |
274/* Called with tlb_lock held */ | 274/* Called with tlb_c.lock held */ |
275static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 276 target_ulong page) 277{ 278 int k; 279 280 assert_cpu_is_self(ENV_GET_CPU(env)); 281 for (k = 0; k < CPU_VTLB_SIZE; k++) { 282 tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page); --- 16 unchanged lines hidden (view full) --- 299 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 300 env->tlb_flush_addr, env->tlb_flush_mask); 301 302 tlb_flush(cpu); 303 return; 304 } 305 306 addr &= TARGET_PAGE_MASK; | 275static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 276 target_ulong page) 277{ 278 int k; 279 280 assert_cpu_is_self(ENV_GET_CPU(env)); 281 for (k = 0; k < CPU_VTLB_SIZE; k++) { 282 tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page); --- 16 unchanged lines hidden (view full) --- 299 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", 300 env->tlb_flush_addr, env->tlb_flush_mask); 301 302 tlb_flush(cpu); 303 return; 304 } 305 306 addr &= TARGET_PAGE_MASK; |
307 qemu_spin_lock(&env->tlb_lock); | 307 qemu_spin_lock(&env->tlb_c.lock); |
308 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 309 tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr); 310 tlb_flush_vtlb_page_locked(env, mmu_idx, addr); 311 } | 308 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 309 tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr); 310 tlb_flush_vtlb_page_locked(env, mmu_idx, addr); 311 } |
312 qemu_spin_unlock(&env->tlb_lock); | 312 qemu_spin_unlock(&env->tlb_c.lock); |
313 314 tb_flush_jmp_cache(cpu, addr); 315} 316 317void tlb_flush_page(CPUState *cpu, target_ulong addr) 318{ 319 tlb_debug("page :" TARGET_FMT_lx "\n", addr); 320 --- 19 unchanged lines hidden (view full) --- 340 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 341 int mmu_idx; 342 343 assert_cpu_is_self(cpu); 344 345 tlb_debug("flush page addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n", 346 addr, mmu_idx_bitmap); 347 | 313 314 tb_flush_jmp_cache(cpu, addr); 315} 316 317void tlb_flush_page(CPUState *cpu, target_ulong addr) 318{ 319 tlb_debug("page :" TARGET_FMT_lx "\n", addr); 320 --- 19 unchanged lines hidden (view full) --- 340 unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; 341 int mmu_idx; 342 343 assert_cpu_is_self(cpu); 344 345 tlb_debug("flush page addr:"TARGET_FMT_lx" mmu_idx:0x%lx\n", 346 addr, mmu_idx_bitmap); 347 |
348 qemu_spin_lock(&env->tlb_lock); | 348 qemu_spin_lock(&env->tlb_c.lock); |
349 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 350 if (test_bit(mmu_idx, &mmu_idx_bitmap)) { 351 tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr); 352 tlb_flush_vtlb_page_locked(env, mmu_idx, addr); 353 } 354 } | 349 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 350 if (test_bit(mmu_idx, &mmu_idx_bitmap)) { 351 tlb_flush_entry_locked(tlb_entry(env, mmu_idx, addr), addr); 352 tlb_flush_vtlb_page_locked(env, mmu_idx, addr); 353 } 354 } |
355 qemu_spin_unlock(&env->tlb_lock); | 355 qemu_spin_unlock(&env->tlb_c.lock); |
356 357 tb_flush_jmp_cache(cpu, addr); 358} 359 360static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu, 361 run_on_cpu_data data) 362{ 363 CPUArchState *env = cpu->env_ptr; --- 110 unchanged lines hidden (view full) --- 474 * There are a number of reasons to do this but for normal RAM the 475 * most usual is detecting writes to code regions which may invalidate 476 * generated code. 477 * 478 * Other vCPUs might be reading their TLBs during guest execution, so we update 479 * te->addr_write with atomic_set. We don't need to worry about this for 480 * oversized guests as MTTCG is disabled for them. 481 * | 356 357 tb_flush_jmp_cache(cpu, addr); 358} 359 360static void tlb_check_page_and_flush_by_mmuidx_async_work(CPUState *cpu, 361 run_on_cpu_data data) 362{ 363 CPUArchState *env = cpu->env_ptr; --- 110 unchanged lines hidden (view full) --- 474 * There are a number of reasons to do this but for normal RAM the 475 * most usual is detecting writes to code regions which may invalidate 476 * generated code. 477 * 478 * Other vCPUs might be reading their TLBs during guest execution, so we update 479 * te->addr_write with atomic_set. We don't need to worry about this for 480 * oversized guests as MTTCG is disabled for them. 481 * |
482 * Called with tlb_lock held. | 482 * Called with tlb_c.lock held. |
483 */ 484static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 485 uintptr_t start, uintptr_t length) 486{ 487 uintptr_t addr = tlb_entry->addr_write; 488 489 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { 490 addr &= TARGET_PAGE_MASK; --- 5 unchanged lines hidden (view full) --- 496 atomic_set(&tlb_entry->addr_write, 497 tlb_entry->addr_write | TLB_NOTDIRTY); 498#endif 499 } 500 } 501} 502 503/* | 483 */ 484static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, 485 uintptr_t start, uintptr_t length) 486{ 487 uintptr_t addr = tlb_entry->addr_write; 488 489 if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { 490 addr &= TARGET_PAGE_MASK; --- 5 unchanged lines hidden (view full) --- 496 atomic_set(&tlb_entry->addr_write, 497 tlb_entry->addr_write | TLB_NOTDIRTY); 498#endif 499 } 500 } 501} 502 503/* |
504 * Called with tlb_lock held. | 504 * Called with tlb_c.lock held. |
505 * Called only from the vCPU context, i.e. the TLB's owner thread. 506 */ 507static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 508{ 509 *d = *s; 510} 511 512/* This is a cross vCPU call (i.e. another vCPU resetting the flags of 513 * the target vCPU). | 505 * Called only from the vCPU context, i.e. the TLB's owner thread. 506 */ 507static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) 508{ 509 *d = *s; 510} 511 512/* This is a cross vCPU call (i.e. another vCPU resetting the flags of 513 * the target vCPU). |
514 * We must take tlb_lock to avoid racing with another vCPU update. The only | 514 * We must take tlb_c.lock to avoid racing with another vCPU update. The only |
515 * thing actually updated is the target TLB entry ->addr_write flags. 516 */ 517void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 518{ 519 CPUArchState *env; 520 521 int mmu_idx; 522 523 env = cpu->env_ptr; | 515 * thing actually updated is the target TLB entry ->addr_write flags. 516 */ 517void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) 518{ 519 CPUArchState *env; 520 521 int mmu_idx; 522 523 env = cpu->env_ptr; |
524 qemu_spin_lock(&env->tlb_lock); | 524 qemu_spin_lock(&env->tlb_c.lock); |
525 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 526 unsigned int i; 527 528 for (i = 0; i < CPU_TLB_SIZE; i++) { 529 tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1, 530 length); 531 } 532 533 for (i = 0; i < CPU_VTLB_SIZE; i++) { 534 tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1, 535 length); 536 } 537 } | 525 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 526 unsigned int i; 527 528 for (i = 0; i < CPU_TLB_SIZE; i++) { 529 tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1, 530 length); 531 } 532 533 for (i = 0; i < CPU_VTLB_SIZE; i++) { 534 tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1, 535 length); 536 } 537 } |
538 qemu_spin_unlock(&env->tlb_lock); | 538 qemu_spin_unlock(&env->tlb_c.lock); |
539} 540 | 539} 540 |
541/* Called with tlb_lock held */ | 541/* Called with tlb_c.lock held */ |
542static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 543 target_ulong vaddr) 544{ 545 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 546 tlb_entry->addr_write = vaddr; 547 } 548} 549 550/* update the TLB corresponding to virtual page vaddr 551 so that it is no longer dirty */ 552void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 553{ 554 CPUArchState *env = cpu->env_ptr; 555 int mmu_idx; 556 557 assert_cpu_is_self(cpu); 558 559 vaddr &= TARGET_PAGE_MASK; | 542static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, 543 target_ulong vaddr) 544{ 545 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { 546 tlb_entry->addr_write = vaddr; 547 } 548} 549 550/* update the TLB corresponding to virtual page vaddr 551 so that it is no longer dirty */ 552void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) 553{ 554 CPUArchState *env = cpu->env_ptr; 555 int mmu_idx; 556 557 assert_cpu_is_self(cpu); 558 559 vaddr &= TARGET_PAGE_MASK; |
560 qemu_spin_lock(&env->tlb_lock); | 560 qemu_spin_lock(&env->tlb_c.lock); |
561 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 562 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 563 } 564 565 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 566 int k; 567 for (k = 0; k < CPU_VTLB_SIZE; k++) { 568 tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr); 569 } 570 } | 561 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 562 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 563 } 564 565 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 566 int k; 567 for (k = 0; k < CPU_VTLB_SIZE; k++) { 568 tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr); 569 } 570 } |
571 qemu_spin_unlock(&env->tlb_lock); | 571 qemu_spin_unlock(&env->tlb_c.lock); |
572} 573 574/* Our TLB does not support large pages, so remember the area covered by 575 large pages and trigger a full TLB flush if these are invalidated. */ 576static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, 577 target_ulong size) 578{ 579 target_ulong mask = ~(size - 1); --- 84 unchanged lines hidden (view full) --- 664 665 /* 666 * Hold the TLB lock for the rest of the function. We could acquire/release 667 * the lock several times in the function, but it is faster to amortize the 668 * acquisition cost by acquiring it just once. Note that this leads to 669 * a longer critical section, but this is not a concern since the TLB lock 670 * is unlikely to be contended. 671 */ | 572} 573 574/* Our TLB does not support large pages, so remember the area covered by 575 large pages and trigger a full TLB flush if these are invalidated. */ 576static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr, 577 target_ulong size) 578{ 579 target_ulong mask = ~(size - 1); --- 84 unchanged lines hidden (view full) --- 664 665 /* 666 * Hold the TLB lock for the rest of the function. We could acquire/release 667 * the lock several times in the function, but it is faster to amortize the 668 * acquisition cost by acquiring it just once. Note that this leads to 669 * a longer critical section, but this is not a concern since the TLB lock 670 * is unlikely to be contended. 671 */ |
672 qemu_spin_lock(&env->tlb_lock); | 672 qemu_spin_lock(&env->tlb_c.lock); |
673 674 /* Make sure there's no cached translation for the new page. */ 675 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 676 677 /* 678 * Only evict the old entry to the victim tlb if it's for a 679 * different page; otherwise just overwrite the stale data. 680 */ --- 50 unchanged lines hidden (view full) --- 731 tn.addr_write = address; 732 } 733 if (prot & PAGE_WRITE_INV) { 734 tn.addr_write |= TLB_INVALID_MASK; 735 } 736 } 737 738 copy_tlb_helper_locked(te, &tn); | 673 674 /* Make sure there's no cached translation for the new page. */ 675 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); 676 677 /* 678 * Only evict the old entry to the victim tlb if it's for a 679 * different page; otherwise just overwrite the stale data. 680 */ --- 50 unchanged lines hidden (view full) --- 731 tn.addr_write = address; 732 } 733 if (prot & PAGE_WRITE_INV) { 734 tn.addr_write |= TLB_INVALID_MASK; 735 } 736 } 737 738 copy_tlb_helper_locked(te, &tn); |
739 qemu_spin_unlock(&env->tlb_lock); | 739 qemu_spin_unlock(&env->tlb_c.lock); |
740} 741 742/* Add a new TLB entry, but without specifying the memory 743 * transaction attributes to be used. 744 */ 745void tlb_set_page(CPUState *cpu, target_ulong vaddr, 746 hwaddr paddr, int prot, 747 int mmu_idx, target_ulong size) --- 164 unchanged lines hidden (view full) --- 912#else 913 cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 914#endif 915 916 if (cmp == page) { 917 /* Found entry in victim tlb, swap tlb and iotlb. */ 918 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; 919 | 740} 741 742/* Add a new TLB entry, but without specifying the memory 743 * transaction attributes to be used. 744 */ 745void tlb_set_page(CPUState *cpu, target_ulong vaddr, 746 hwaddr paddr, int prot, 747 int mmu_idx, target_ulong size) --- 164 unchanged lines hidden (view full) --- 912#else 913 cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 914#endif 915 916 if (cmp == page) { 917 /* Found entry in victim tlb, swap tlb and iotlb. */ 918 CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; 919 |
920 qemu_spin_lock(&env->tlb_lock); | 920 qemu_spin_lock(&env->tlb_c.lock); |
921 copy_tlb_helper_locked(&tmptlb, tlb); 922 copy_tlb_helper_locked(tlb, vtlb); 923 copy_tlb_helper_locked(vtlb, &tmptlb); | 921 copy_tlb_helper_locked(&tmptlb, tlb); 922 copy_tlb_helper_locked(tlb, vtlb); 923 copy_tlb_helper_locked(vtlb, &tmptlb); |
924 qemu_spin_unlock(&env->tlb_lock); | 924 qemu_spin_unlock(&env->tlb_c.lock); |
925 926 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; 927 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; 928 tmpio = *io; *io = *vio; *vio = tmpio; 929 return true; 930 } 931 } 932 return false; --- 237 unchanged lines hidden --- | 925 926 CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; 927 CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; 928 tmpio = *io; *io = *vio; *vio = tmpio; 929 return true; 930 } 931 } 932 return false; --- 237 unchanged lines hidden --- |