xref: /openbmc/linux/block/partitions/aix.c (revision 275876e2)
1 /*
2  *  fs/partitions/aix.c
3  *
4  *  Copyright (C) 2012-2013 Philippe De Muyter <phdm@macqel.be>
5  */
6 
7 #include "check.h"
8 #include "aix.h"
9 
10 struct lvm_rec {
11 	char lvm_id[4]; /* "_LVM" */
12 	char reserved4[16];
13 	__be32 lvmarea_len;
14 	__be32 vgda_len;
15 	__be32 vgda_psn[2];
16 	char reserved36[10];
17 	__be16 pp_size; /* log2(pp_size) */
18 	char reserved46[12];
19 	__be16 version;
20 	};
21 
22 struct vgda {
23 	__be32 secs;
24 	__be32 usec;
25 	char reserved8[16];
26 	__be16 numlvs;
27 	__be16 maxlvs;
28 	__be16 pp_size;
29 	__be16 numpvs;
30 	__be16 total_vgdas;
31 	__be16 vgda_size;
32 	};
33 
34 struct lvd {
35 	__be16 lv_ix;
36 	__be16 res2;
37 	__be16 res4;
38 	__be16 maxsize;
39 	__be16 lv_state;
40 	__be16 mirror;
41 	__be16 mirror_policy;
42 	__be16 num_lps;
43 	__be16 res10[8];
44 	};
45 
46 struct lvname {
47 	char name[64];
48 	};
49 
50 struct ppe {
51 	__be16 lv_ix;
52 	unsigned short res2;
53 	unsigned short res4;
54 	__be16 lp_ix;
55 	unsigned short res8[12];
56 	};
57 
58 struct pvd {
59 	char reserved0[16];
60 	__be16 pp_count;
61 	char reserved18[2];
62 	__be32 psn_part1;
63 	char reserved24[8];
64 	struct ppe ppe[1016];
65 	};
66 
67 #define LVM_MAXLVS 256
68 
69 /**
70  * last_lba(): return number of last logical block of device
71  * @bdev: block device
72  *
73  * Description: Returns last LBA value on success, 0 on error.
74  * This is stored (by sd and ide-geometry) in
75  *  the part[0] entry for this disk, and is the number of
76  *  physical sectors available on the disk.
77  */
78 static u64 last_lba(struct block_device *bdev)
79 {
80 	if (!bdev || !bdev->bd_inode)
81 		return 0;
82 	return (bdev->bd_inode->i_size >> 9) - 1ULL;
83 }
84 
85 /**
86  * read_lba(): Read bytes from disk, starting at given LBA
87  * @state
88  * @lba
89  * @buffer
90  * @count
91  *
92  * Description:  Reads @count bytes from @state->bdev into @buffer.
93  * Returns number of bytes read on success, 0 on error.
94  */
95 static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
96 			size_t count)
97 {
98 	size_t totalreadcount = 0;
99 
100 	if (!buffer || lba + count / 512 > last_lba(state->bdev))
101 		return 0;
102 
103 	while (count) {
104 		int copied = 512;
105 		Sector sect;
106 		unsigned char *data = read_part_sector(state, lba++, &sect);
107 		if (!data)
108 			break;
109 		if (copied > count)
110 			copied = count;
111 		memcpy(buffer, data, copied);
112 		put_dev_sector(sect);
113 		buffer += copied;
114 		totalreadcount += copied;
115 		count -= copied;
116 	}
117 	return totalreadcount;
118 }
119 
120 /**
121  * alloc_pvd(): reads physical volume descriptor
122  * @state
123  * @lba
124  *
125  * Description: Returns pvd on success,  NULL on error.
126  * Allocates space for pvd and fill it with disk blocks at @lba
127  * Notes: remember to free pvd when you're done!
128  */
129 static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba)
130 {
131 	size_t count = sizeof(struct pvd);
132 	struct pvd *p;
133 
134 	p = kmalloc(count, GFP_KERNEL);
135 	if (!p)
136 		return NULL;
137 
138 	if (read_lba(state, lba, (u8 *) p, count) < count) {
139 		kfree(p);
140 		return NULL;
141 	}
142 	return p;
143 }
144 
145 /**
146  * alloc_lvn(): reads logical volume names
147  * @state
148  * @lba
149  *
150  * Description: Returns lvn on success,  NULL on error.
151  * Allocates space for lvn and fill it with disk blocks at @lba
152  * Notes: remember to free lvn when you're done!
153  */
154 static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba)
155 {
156 	size_t count = sizeof(struct lvname) * LVM_MAXLVS;
157 	struct lvname *p;
158 
159 	p = kmalloc(count, GFP_KERNEL);
160 	if (!p)
161 		return NULL;
162 
163 	if (read_lba(state, lba, (u8 *) p, count) < count) {
164 		kfree(p);
165 		return NULL;
166 	}
167 	return p;
168 }
169 
170 int aix_partition(struct parsed_partitions *state)
171 {
172 	int ret = 0;
173 	Sector sect;
174 	unsigned char *d;
175 	u32 pp_bytes_size;
176 	u32 pp_blocks_size = 0;
177 	u32 vgda_sector = 0;
178 	u32 vgda_len = 0;
179 	int numlvs = 0;
180 	struct pvd *pvd;
181 	struct lv_info {
182 		unsigned short pps_per_lv;
183 		unsigned short pps_found;
184 		unsigned char lv_is_contiguous;
185 	} *lvip;
186 	struct lvname *n = NULL;
187 
188 	d = read_part_sector(state, 7, &sect);
189 	if (d) {
190 		struct lvm_rec *p = (struct lvm_rec *)d;
191 		u16 lvm_version = be16_to_cpu(p->version);
192 		char tmp[64];
193 
194 		if (lvm_version == 1) {
195 			int pp_size_log2 = be16_to_cpu(p->pp_size);
196 
197 			pp_bytes_size = 1 << pp_size_log2;
198 			pp_blocks_size = pp_bytes_size / 512;
199 			snprintf(tmp, sizeof(tmp),
200 				" AIX LVM header version %u found\n",
201 				lvm_version);
202 			vgda_len = be32_to_cpu(p->vgda_len);
203 			vgda_sector = be32_to_cpu(p->vgda_psn[0]);
204 		} else {
205 			snprintf(tmp, sizeof(tmp),
206 				" unsupported AIX LVM version %d found\n",
207 				lvm_version);
208 		}
209 		strlcat(state->pp_buf, tmp, PAGE_SIZE);
210 		put_dev_sector(sect);
211 	}
212 	if (vgda_sector && (d = read_part_sector(state, vgda_sector, &sect))) {
213 		struct vgda *p = (struct vgda *)d;
214 
215 		numlvs = be16_to_cpu(p->numlvs);
216 		put_dev_sector(sect);
217 	}
218 	lvip = kcalloc(state->limit, sizeof(struct lv_info), GFP_KERNEL);
219 	if (!lvip)
220 		return 0;
221 	if (numlvs && (d = read_part_sector(state, vgda_sector + 1, &sect))) {
222 		struct lvd *p = (struct lvd *)d;
223 		int i;
224 
225 		n = alloc_lvn(state, vgda_sector + vgda_len - 33);
226 		if (n) {
227 			int foundlvs = 0;
228 
229 			for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) {
230 				lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps);
231 				if (lvip[i].pps_per_lv)
232 					foundlvs += 1;
233 			}
234 		}
235 		put_dev_sector(sect);
236 	}
237 	pvd = alloc_pvd(state, vgda_sector + 17);
238 	if (pvd) {
239 		int numpps = be16_to_cpu(pvd->pp_count);
240 		int psn_part1 = be32_to_cpu(pvd->psn_part1);
241 		int i;
242 		int cur_lv_ix = -1;
243 		int next_lp_ix = 1;
244 		int lp_ix;
245 
246 		for (i = 0; i < numpps; i += 1) {
247 			struct ppe *p = pvd->ppe + i;
248 			unsigned int lv_ix;
249 
250 			lp_ix = be16_to_cpu(p->lp_ix);
251 			if (!lp_ix) {
252 				next_lp_ix = 1;
253 				continue;
254 			}
255 			lv_ix = be16_to_cpu(p->lv_ix) - 1;
256 			if (lv_ix >= state->limit) {
257 				cur_lv_ix = -1;
258 				continue;
259 			}
260 			lvip[lv_ix].pps_found += 1;
261 			if (lp_ix == 1) {
262 				cur_lv_ix = lv_ix;
263 				next_lp_ix = 1;
264 			} else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) {
265 				next_lp_ix = 1;
266 				continue;
267 			}
268 			if (lp_ix == lvip[lv_ix].pps_per_lv) {
269 				char tmp[70];
270 
271 				put_partition(state, lv_ix + 1,
272 				  (i + 1 - lp_ix) * pp_blocks_size + psn_part1,
273 				  lvip[lv_ix].pps_per_lv * pp_blocks_size);
274 				snprintf(tmp, sizeof(tmp), " <%s>\n",
275 					 n[lv_ix].name);
276 				strlcat(state->pp_buf, tmp, PAGE_SIZE);
277 				lvip[lv_ix].lv_is_contiguous = 1;
278 				ret = 1;
279 				next_lp_ix = 1;
280 			} else
281 				next_lp_ix += 1;
282 		}
283 		for (i = 0; i < state->limit; i += 1)
284 			if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
285 				pr_warn("partition %s (%u pp's found) is "
286 					"not contiguous\n",
287 					n[i].name, lvip[i].pps_found);
288 		kfree(pvd);
289 	}
290 	kfree(n);
291 	kfree(lvip);
292 	return ret;
293 }
294