cputlb.c (600b819f235d6b6eb33fc33e09fe64f53eb9a9a6) cputlb.c (c13b27d826797ee12dcf4e4c289a7a6c401e620b)
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either

--- 873 unchanged lines hidden (view full) ---

882void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
883 target_ulong addr,
884 uint16_t idxmap, unsigned bits)
885{
886 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
887 idxmap, bits);
888}
889
1/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either

--- 873 unchanged lines hidden (view full) ---

882void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
883 target_ulong addr,
884 uint16_t idxmap, unsigned bits)
885{
886 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
887 idxmap, bits);
888}
889
890void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
891 target_ulong addr,
892 uint16_t idxmap,
893 unsigned bits)
890void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
891 target_ulong addr,
892 target_ulong len,
893 uint16_t idxmap,
894 unsigned bits)
894{
895 TLBFlushRangeData d, *p;
896 CPUState *dst_cpu;
897
895{
896 TLBFlushRangeData d, *p;
897 CPUState *dst_cpu;
898
898 /* If all bits are significant, this devolves to tlb_flush_page. */
899 if (bits >= TARGET_LONG_BITS) {
899 /*
900 * If all bits are significant, and len is small,
901 * this devolves to tlb_flush_page.
902 */
903 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
900 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
901 return;
902 }
903 /* If no page bits are significant, this devolves to tlb_flush. */
904 if (bits < TARGET_PAGE_BITS) {
905 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
906 return;
907 }
908
909 /* This should already be page aligned */
910 d.addr = addr & TARGET_PAGE_MASK;
904 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
905 return;
906 }
907 /* If no page bits are significant, this devolves to tlb_flush. */
908 if (bits < TARGET_PAGE_BITS) {
909 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
910 return;
911 }
912
913 /* This should already be page aligned */
914 d.addr = addr & TARGET_PAGE_MASK;
911 d.len = TARGET_PAGE_SIZE;
915 d.len = len;
912 d.idxmap = idxmap;
913 d.bits = bits;
914
915 /* Allocate a separate data block for each destination cpu. */
916 CPU_FOREACH(dst_cpu) {
917 if (dst_cpu != src_cpu) {
918 p = g_memdup(&d, sizeof(d));
919 async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
920 RUN_ON_CPU_HOST_PTR(p));
921 }
922 }
923
924 p = g_memdup(&d, sizeof(d));
925 async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
926 RUN_ON_CPU_HOST_PTR(p));
927}
928
916 d.idxmap = idxmap;
917 d.bits = bits;
918
919 /* Allocate a separate data block for each destination cpu. */
920 CPU_FOREACH(dst_cpu) {
921 if (dst_cpu != src_cpu) {
922 p = g_memdup(&d, sizeof(d));
923 async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
924 RUN_ON_CPU_HOST_PTR(p));
925 }
926 }
927
928 p = g_memdup(&d, sizeof(d));
929 async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2,
930 RUN_ON_CPU_HOST_PTR(p));
931}
932
933void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
934 target_ulong addr,
935 uint16_t idxmap,
936 unsigned bits)
937{
938 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
939 idxmap, bits);
940}
941
929/* update the TLBs so that writes to code in the virtual page 'addr'
930 can be detected */
931void tlb_protect_code(ram_addr_t ram_addr)
932{
933 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
934 DIRTY_MEMORY_CODE);
935}
936

--- 1821 unchanged lines hidden ---
942/* update the TLBs so that writes to code in the virtual page 'addr'
943 can be detected */
944void tlb_protect_code(ram_addr_t ram_addr)
945{
946 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
947 DIRTY_MEMORY_CODE);
948}
949

--- 1821 unchanged lines hidden ---