1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_S390_MEM_DETECT_H
3 #define _ASM_S390_MEM_DETECT_H
4 
5 #include <linux/types.h>
6 #include <asm/page.h>
7 
8 enum physmem_info_source {
9 	MEM_DETECT_NONE = 0,
10 	MEM_DETECT_SCLP_STOR_INFO,
11 	MEM_DETECT_DIAG260,
12 	MEM_DETECT_SCLP_READ_INFO,
13 	MEM_DETECT_BIN_SEARCH
14 };
15 
16 struct physmem_range {
17 	u64 start;
18 	u64 end;
19 };
20 
21 enum reserved_range_type {
22 	RR_DECOMPRESSOR,
23 	RR_INITRD,
24 	RR_VMLINUX,
25 	RR_AMODE31,
26 	RR_IPLREPORT,
27 	RR_CERT_COMP_LIST,
28 	RR_MEM_DETECT_EXTENDED,
29 	RR_VMEM,
30 	RR_MAX
31 };
32 
33 struct reserved_range {
34 	unsigned long start;
35 	unsigned long end;
36 	struct reserved_range *chain;
37 };
38 
39 /*
40  * Storage element id is defined as 1 byte (up to 256 storage elements).
41  * In practise only storage element id 0 and 1 are used).
42  * According to architecture one storage element could have as much as
43  * 1020 subincrements. 255 physmem_ranges are embedded in physmem_info.
44  * If more physmem_ranges are required, a block of memory from already
45  * known physmem_range is taken (online_extended points to it).
46  */
47 #define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
48 
49 struct physmem_info {
50 	u32 range_count;
51 	u8 info_source;
52 	unsigned long usable;
53 	struct reserved_range reserved[RR_MAX];
54 	struct physmem_range online[MEM_INLINED_ENTRIES];
55 	struct physmem_range *online_extended;
56 };
57 
58 extern struct physmem_info physmem_info;
59 
60 void add_physmem_online_range(u64 start, u64 end);
61 
62 static inline int __get_physmem_range(u32 n, unsigned long *start,
63 				      unsigned long *end, bool respect_usable_limit)
64 {
65 	if (n >= physmem_info.range_count) {
66 		*start = 0;
67 		*end = 0;
68 		return -1;
69 	}
70 
71 	if (n < MEM_INLINED_ENTRIES) {
72 		*start = (unsigned long)physmem_info.online[n].start;
73 		*end = (unsigned long)physmem_info.online[n].end;
74 	} else {
75 		*start = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].start;
76 		*end = (unsigned long)physmem_info.online_extended[n - MEM_INLINED_ENTRIES].end;
77 	}
78 
79 	if (respect_usable_limit && physmem_info.usable) {
80 		if (*start >= physmem_info.usable)
81 			return -1;
82 		if (*end > physmem_info.usable)
83 			*end = physmem_info.usable;
84 	}
85 	return 0;
86 }
87 
88 /**
89  * for_each_physmem_usable_range - early online memory range iterator
90  * @i: an integer used as loop variable
91  * @p_start: ptr to unsigned long for start address of the range
92  * @p_end: ptr to unsigned long for end address of the range
93  *
94  * Walks over detected online memory ranges below usable limit.
95  */
96 #define for_each_physmem_usable_range(i, p_start, p_end)		\
97 	for (i = 0; !__get_physmem_range(i, p_start, p_end, true); i++)
98 
99 /* Walks over all detected online memory ranges disregarding usable limit. */
100 #define for_each_physmem_online_range(i, p_start, p_end)		\
101 	for (i = 0; !__get_physmem_range(i, p_start, p_end, false); i++)
102 
103 static inline const char *get_physmem_info_source(void)
104 {
105 	switch (physmem_info.info_source) {
106 	case MEM_DETECT_SCLP_STOR_INFO:
107 		return "sclp storage info";
108 	case MEM_DETECT_DIAG260:
109 		return "diag260";
110 	case MEM_DETECT_SCLP_READ_INFO:
111 		return "sclp read info";
112 	case MEM_DETECT_BIN_SEARCH:
113 		return "binary search";
114 	}
115 	return "none";
116 }
117 
118 #define RR_TYPE_NAME(t) case RR_ ## t: return #t
119 static inline const char *get_rr_type_name(enum reserved_range_type t)
120 {
121 	switch (t) {
122 	RR_TYPE_NAME(DECOMPRESSOR);
123 	RR_TYPE_NAME(INITRD);
124 	RR_TYPE_NAME(VMLINUX);
125 	RR_TYPE_NAME(AMODE31);
126 	RR_TYPE_NAME(IPLREPORT);
127 	RR_TYPE_NAME(CERT_COMP_LIST);
128 	RR_TYPE_NAME(MEM_DETECT_EXTENDED);
129 	RR_TYPE_NAME(VMEM);
130 	default:
131 		return "UNKNOWN";
132 	}
133 }
134 
135 #define for_each_physmem_reserved_type_range(t, range, p_start, p_end)				\
136 	for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end;	\
137 	     range && range->end; range = range->chain ? __va(range->chain) : NULL,		\
138 	     *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
139 
140 static inline struct reserved_range *__physmem_reserved_next(enum reserved_range_type *t,
141 							     struct reserved_range *range)
142 {
143 	if (!range) {
144 		range = &physmem_info.reserved[*t];
145 		if (range->end)
146 			return range;
147 	}
148 	if (range->chain)
149 		return __va(range->chain);
150 	while (++*t < RR_MAX) {
151 		range = &physmem_info.reserved[*t];
152 		if (range->end)
153 			return range;
154 	}
155 	return NULL;
156 }
157 
158 #define for_each_physmem_reserved_range(t, range, p_start, p_end)			\
159 	for (t = 0, range = __physmem_reserved_next(&t, NULL),			\
160 	    *p_start = range ? range->start : 0, *p_end = range ? range->end : 0;	\
161 	     range; range = __physmem_reserved_next(&t, range),			\
162 	    *p_start = range ? range->start : 0, *p_end = range ? range->end : 0)
163 
164 static inline unsigned long get_physmem_reserved(enum reserved_range_type type,
165 						 unsigned long *addr, unsigned long *size)
166 {
167 	*addr = physmem_info.reserved[type].start;
168 	*size = physmem_info.reserved[type].end - physmem_info.reserved[type].start;
169 	return *size;
170 }
171 
172 #endif
173