mmu.c (a02001086bbfb4da35d1228bebc2f1b442db455f) mmu.c (4ed89f2228061422ce5f62545fd0b6f6648bd2cc)
1/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.

--- 178 unchanged lines hidden (view full) ---

187 }
188 return 0;
189}
190early_param("cachepolicy", early_cachepolicy);
191
192static int __init early_nocache(char *__unused)
193{
194 char *p = "buffered";
1/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.

--- 178 unchanged lines hidden (view full) ---

187 }
188 return 0;
189}
190early_param("cachepolicy", early_cachepolicy);
191
192static int __init early_nocache(char *__unused)
193{
194 char *p = "buffered";
195 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
195 pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
196 early_cachepolicy(p);
197 return 0;
198}
199early_param("nocache", early_nocache);
200
201static int __init early_nowrite(char *__unused)
202{
203 char *p = "uncached";
196 early_cachepolicy(p);
197 return 0;
198}
199early_param("nocache", early_nocache);
200
201static int __init early_nowrite(char *__unused)
202{
203 char *p = "uncached";
204 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
204 pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
205 early_cachepolicy(p);
206 return 0;
207}
208early_param("nowb", early_nowrite);
209
210#ifndef CONFIG_ARM_LPAE
211static int __init early_ecc(char *p)
212{

--- 568 unchanged lines hidden (view full) ---

781 phys_addr_t phys;
782 pgd_t *pgd;
783
784 addr = md->virtual;
785 phys = __pfn_to_phys(md->pfn);
786 length = PAGE_ALIGN(md->length);
787
788 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
205 early_cachepolicy(p);
206 return 0;
207}
208early_param("nowb", early_nowrite);
209
210#ifndef CONFIG_ARM_LPAE
211static int __init early_ecc(char *p)
212{

--- 568 unchanged lines hidden (view full) ---

781 phys_addr_t phys;
782 pgd_t *pgd;
783
784 addr = md->virtual;
785 phys = __pfn_to_phys(md->pfn);
786 length = PAGE_ALIGN(md->length);
787
788 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
789 printk(KERN_ERR "MM: CPU does not support supersection "
790 "mapping for 0x%08llx at 0x%08lx\n",
789 pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
791 (long long)__pfn_to_phys((u64)md->pfn), addr);
792 return;
793 }
794
795 /* N.B. ARMv6 supersections are only defined to work with domain 0.
796 * Since domain assignments can in fact be arbitrary, the
797 * 'domain == 0' check below is required to insure that ARMv6
798 * supersections are only allocated for domain 0 regardless
799 * of the actual domain assignments in use.
800 */
801 if (type->domain) {
790 (long long)__pfn_to_phys((u64)md->pfn), addr);
791 return;
792 }
793
794 /* N.B. ARMv6 supersections are only defined to work with domain 0.
795 * Since domain assignments can in fact be arbitrary, the
796 * 'domain == 0' check below is required to insure that ARMv6
797 * supersections are only allocated for domain 0 regardless
798 * of the actual domain assignments in use.
799 */
800 if (type->domain) {
802 printk(KERN_ERR "MM: invalid domain in supersection "
803 "mapping for 0x%08llx at 0x%08lx\n",
801 pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
804 (long long)__pfn_to_phys((u64)md->pfn), addr);
805 return;
806 }
807
808 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
802 (long long)__pfn_to_phys((u64)md->pfn), addr);
803 return;
804 }
805
806 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
809 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
810 " at 0x%08lx invalid alignment\n",
807 pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
811 (long long)__pfn_to_phys((u64)md->pfn), addr);
812 return;
813 }
814
815 /*
816 * Shift bits [35:32] of address into bits [23:20] of PMD
817 * (See ARMv6 spec).
818 */

--- 26 unchanged lines hidden (view full) ---

845static void __init create_mapping(struct map_desc *md)
846{
847 unsigned long addr, length, end;
848 phys_addr_t phys;
849 const struct mem_type *type;
850 pgd_t *pgd;
851
852 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
808 (long long)__pfn_to_phys((u64)md->pfn), addr);
809 return;
810 }
811
812 /*
813 * Shift bits [35:32] of address into bits [23:20] of PMD
814 * (See ARMv6 spec).
815 */

--- 26 unchanged lines hidden (view full) ---

842static void __init create_mapping(struct map_desc *md)
843{
844 unsigned long addr, length, end;
845 phys_addr_t phys;
846 const struct mem_type *type;
847 pgd_t *pgd;
848
849 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
853 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
854 " at 0x%08lx in user region\n",
855 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
850 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
851 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
856 return;
857 }
858
859 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
860 md->virtual >= PAGE_OFFSET &&
861 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
852 return;
853 }
854
855 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
856 md->virtual >= PAGE_OFFSET &&
857 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
862 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
863 " at 0x%08lx out of vmalloc space\n",
864 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
858 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
859 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
865 }
866
867 type = &mem_types[md->type];
868
869#ifndef CONFIG_ARM_LPAE
870 /*
871 * Catch 36-bit addresses
872 */
873 if (md->pfn >= 0x100000) {
874 create_36bit_mapping(md, type);
875 return;
876 }
877#endif
878
879 addr = md->virtual & PAGE_MASK;
880 phys = __pfn_to_phys(md->pfn);
881 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
882
883 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
860 }
861
862 type = &mem_types[md->type];
863
864#ifndef CONFIG_ARM_LPAE
865 /*
866 * Catch 36-bit addresses
867 */
868 if (md->pfn >= 0x100000) {
869 create_36bit_mapping(md, type);
870 return;
871 }
872#endif
873
874 addr = md->virtual & PAGE_MASK;
875 phys = __pfn_to_phys(md->pfn);
876 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
877
878 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
884 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
885 "be mapped using pages, ignoring.\n",
886 (long long)__pfn_to_phys(md->pfn), addr);
879 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
880 (long long)__pfn_to_phys(md->pfn), addr);
887 return;
888 }
889
890 pgd = pgd_offset_k(addr);
891 end = addr + length;
892 do {
893 unsigned long next = pgd_addr_end(addr, end);
894

--- 153 unchanged lines hidden (view full) ---

1048 * area - the default is 240m.
1049 */
1050static int __init early_vmalloc(char *arg)
1051{
1052 unsigned long vmalloc_reserve = memparse(arg, NULL);
1053
1054 if (vmalloc_reserve < SZ_16M) {
1055 vmalloc_reserve = SZ_16M;
881 return;
882 }
883
884 pgd = pgd_offset_k(addr);
885 end = addr + length;
886 do {
887 unsigned long next = pgd_addr_end(addr, end);
888

--- 153 unchanged lines hidden (view full) ---

1042 * area - the default is 240m.
1043 */
1044static int __init early_vmalloc(char *arg)
1045{
1046 unsigned long vmalloc_reserve = memparse(arg, NULL);
1047
1048 if (vmalloc_reserve < SZ_16M) {
1049 vmalloc_reserve = SZ_16M;
1056 printk(KERN_WARNING
1057 "vmalloc area too small, limiting to %luMB\n",
1050 pr_warn("vmalloc area too small, limiting to %luMB\n",
1058 vmalloc_reserve >> 20);
1059 }
1060
1061 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1062 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1051 vmalloc_reserve >> 20);
1052 }
1053
1054 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1055 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1063 printk(KERN_WARNING
1064 "vmalloc area is too big, limiting to %luMB\n",
1056 pr_warn("vmalloc area is too big, limiting to %luMB\n",
1065 vmalloc_reserve >> 20);
1066 }
1067
1068 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
1069 return 0;
1070}
1071early_param("vmalloc", early_vmalloc);
1072

--- 16 unchanged lines hidden (view full) ---

1089 else
1090 size_limit = vmalloc_limit - reg->base;
1091
1092
1093 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1094
1095 if (highmem) {
1096 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1057 vmalloc_reserve >> 20);
1058 }
1059
1060 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
1061 return 0;
1062}
1063early_param("vmalloc", early_vmalloc);
1064

--- 16 unchanged lines hidden (view full) ---

1081 else
1082 size_limit = vmalloc_limit - reg->base;
1083
1084
1085 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1086
1087 if (highmem) {
1088 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1097 &block_start, &block_end);
1089 &block_start, &block_end);
1098 memblock_remove(reg->base, reg->size);
1099 continue;
1100 }
1101
1102 if (reg->size > size_limit) {
1103 phys_addr_t overlap_size = reg->size - size_limit;
1104
1105 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1090 memblock_remove(reg->base, reg->size);
1091 continue;
1092 }
1093
1094 if (reg->size > size_limit) {
1095 phys_addr_t overlap_size = reg->size - size_limit;
1096
1097 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1106 &block_start, &block_end, &vmalloc_limit);
1098 &block_start, &block_end, &vmalloc_limit);
1107 memblock_remove(vmalloc_limit, overlap_size);
1108 block_end = vmalloc_limit;
1109 }
1110 }
1111
1112 if (!highmem) {
1113 if (block_end > arm_lowmem_limit) {
1114 if (reg->size > size_limit)

--- 421 unchanged lines hidden ---
1099 memblock_remove(vmalloc_limit, overlap_size);
1100 block_end = vmalloc_limit;
1101 }
1102 }
1103
1104 if (!highmem) {
1105 if (block_end > arm_lowmem_limit) {
1106 if (reg->size > size_limit)

--- 421 unchanged lines hidden ---