xref: /openbmc/linux/fs/ufs/util.h (revision 7a010c3c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  linux/fs/ufs/util.h
4  *
5  * Copyright (C) 1998
6  * Daniel Pirkl <daniel.pirkl@email.cz>
7  * Charles University, Faculty of Mathematics and Physics
8  */
9 
10 #include <linux/buffer_head.h>
11 #include <linux/fs.h>
12 #include "swab.h"
13 
14 
15 /*
16  * some useful macros
17  */
18 #define in_range(b,first,len)	((b)>=(first)&&(b)<(first)+(len))
19 
20 /*
21  * functions used for retyping
22  */
23 static inline struct ufs_buffer_head *UCPI_UBH(struct ufs_cg_private_info *cpi)
24 {
25 	return &cpi->c_ubh;
26 }
27 static inline struct ufs_buffer_head *USPI_UBH(struct ufs_sb_private_info *spi)
28 {
29 	return &spi->s_ubh;
30 }
31 
32 
33 
34 /*
35  * macros used for accessing structures
36  */
37 static inline s32
38 ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
39 		 struct ufs_super_block_third *usb3)
40 {
41 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
42 	case UFS_ST_SUNOS:
43 		if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT)
44 			return fs32_to_cpu(sb, usb1->fs_u0.fs_sun.fs_state);
45 		fallthrough;	/* to UFS_ST_SUN */
46 	case UFS_ST_SUN:
47 		return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state);
48 	case UFS_ST_SUNx86:
49 		return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state);
50 	case UFS_ST_44BSD:
51 	default:
52 		return fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_state);
53 	}
54 }
55 
56 static inline void
57 ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
58 		 struct ufs_super_block_third *usb3, s32 value)
59 {
60 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
61 	case UFS_ST_SUNOS:
62 		if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT) {
63 			usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value);
64 			break;
65 		}
66 		fallthrough;	/* to UFS_ST_SUN */
67 	case UFS_ST_SUN:
68 		usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value);
69 		break;
70 	case UFS_ST_SUNx86:
71 		usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value);
72 		break;
73 	case UFS_ST_44BSD:
74 		usb3->fs_un2.fs_44.fs_state = cpu_to_fs32(sb, value);
75 		break;
76 	}
77 }
78 
79 static inline u32
80 ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
81 		  struct ufs_super_block_third *usb3)
82 {
83 	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
84 		return fs32_to_cpu(sb, usb3->fs_un2.fs_sunx86.fs_npsect);
85 	else
86 		return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
87 }
88 
89 static inline u64
90 ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
91 {
92 	__fs64 tmp;
93 
94 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
95 	case UFS_ST_SUNOS:
96 	case UFS_ST_SUN:
97 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qbmask[0];
98 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qbmask[1];
99 		break;
100 	case UFS_ST_SUNx86:
101 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qbmask[0];
102 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qbmask[1];
103 		break;
104 	case UFS_ST_44BSD:
105 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qbmask[0];
106 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qbmask[1];
107 		break;
108 	}
109 
110 	return fs64_to_cpu(sb, tmp);
111 }
112 
113 static inline u64
114 ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
115 {
116 	__fs64 tmp;
117 
118 	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
119 	case UFS_ST_SUNOS:
120 	case UFS_ST_SUN:
121 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qfmask[0];
122 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qfmask[1];
123 		break;
124 	case UFS_ST_SUNx86:
125 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qfmask[0];
126 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qfmask[1];
127 		break;
128 	case UFS_ST_44BSD:
129 		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qfmask[0];
130 		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qfmask[1];
131 		break;
132 	}
133 
134 	return fs64_to_cpu(sb, tmp);
135 }
136 
137 static inline u16
138 ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
139 {
140 	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
141 		return fs16_to_cpu(sb, de->d_u.d_namlen);
142 	else
143 		return de->d_u.d_44.d_namlen; /* XXX this seems wrong */
144 }
145 
146 static inline void
147 ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
148 {
149 	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
150 		de->d_u.d_namlen = cpu_to_fs16(sb, value);
151 	else
152 		de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */
153 }
154 
155 static inline void
156 ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
157 {
158 	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
159 		return;
160 
161 	/*
162 	 * TODO turn this into a table lookup
163 	 */
164 	switch (mode & S_IFMT) {
165 	case S_IFSOCK:
166 		de->d_u.d_44.d_type = DT_SOCK;
167 		break;
168 	case S_IFLNK:
169 		de->d_u.d_44.d_type = DT_LNK;
170 		break;
171 	case S_IFREG:
172 		de->d_u.d_44.d_type = DT_REG;
173 		break;
174 	case S_IFBLK:
175 		de->d_u.d_44.d_type = DT_BLK;
176 		break;
177 	case S_IFDIR:
178 		de->d_u.d_44.d_type = DT_DIR;
179 		break;
180 	case S_IFCHR:
181 		de->d_u.d_44.d_type = DT_CHR;
182 		break;
183 	case S_IFIFO:
184 		de->d_u.d_44.d_type = DT_FIFO;
185 		break;
186 	default:
187 		de->d_u.d_44.d_type = DT_UNKNOWN;
188 	}
189 }
190 
191 static inline u32
192 ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
193 {
194 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
195 	case UFS_UID_44BSD:
196 		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_uid);
197 	case UFS_UID_EFT:
198 		if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
199 			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
200 		fallthrough;
201 	default:
202 		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid);
203 	}
204 }
205 
206 static inline void
207 ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
208 {
209 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
210 	case UFS_UID_44BSD:
211 		inode->ui_u3.ui_44.ui_uid = cpu_to_fs32(sb, value);
212 		inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
213 		break;
214 	case UFS_UID_EFT:
215 		inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
216 		if (value > 0xFFFF)
217 			value = 0xFFFF;
218 		fallthrough;
219 	default:
220 		inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
221 		break;
222 	}
223 }
224 
225 static inline u32
226 ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
227 {
228 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
229 	case UFS_UID_44BSD:
230 		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
231 	case UFS_UID_EFT:
232 		if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
233 			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
234 		fallthrough;
235 	default:
236 		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid);
237 	}
238 }
239 
240 static inline void
241 ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
242 {
243 	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
244 	case UFS_UID_44BSD:
245 		inode->ui_u3.ui_44.ui_gid = cpu_to_fs32(sb, value);
246 		inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
247 		break;
248 	case UFS_UID_EFT:
249 		inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
250 		if (value > 0xFFFF)
251 			value = 0xFFFF;
252 		fallthrough;
253 	default:
254 		inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
255 		break;
256 	}
257 }
258 
259 extern dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
260 extern void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
261 extern int ufs_prepare_chunk(struct page *page, loff_t pos, unsigned len);
262 
263 /*
264  * These functions manipulate ufs buffers
265  */
266 #define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)
267 extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, u64 , u64);
268 extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, u64, u64);
269 extern void ubh_brelse (struct ufs_buffer_head *);
270 extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
271 extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
272 extern void ubh_mark_buffer_uptodate (struct ufs_buffer_head *, int);
273 extern void ubh_sync_block(struct ufs_buffer_head *);
274 extern void ubh_bforget (struct ufs_buffer_head *);
275 extern int  ubh_buffer_dirty (struct ufs_buffer_head *);
276 #define ubh_ubhcpymem(mem,ubh,size) _ubh_ubhcpymem_(uspi,mem,ubh,size)
277 extern void _ubh_ubhcpymem_(struct ufs_sb_private_info *, unsigned char *, struct ufs_buffer_head *, unsigned);
278 #define ubh_memcpyubh(ubh,mem,size) _ubh_memcpyubh_(uspi,ubh,mem,size)
279 extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head *, unsigned char *, unsigned);
280 
281 /* This functions works with cache pages*/
282 extern struct page *ufs_get_locked_page(struct address_space *mapping,
283 					pgoff_t index);
284 static inline void ufs_put_locked_page(struct page *page)
285 {
286        unlock_page(page);
287        put_page(page);
288 }
289 
290 
291 /*
292  * macros and inline function to get important structures from ufs_sb_private_info
293  */
294 
295 static inline void *get_usb_offset(struct ufs_sb_private_info *uspi,
296 				   unsigned int offset)
297 {
298 	unsigned int index;
299 
300 	index = offset >> uspi->s_fshift;
301 	offset &= ~uspi->s_fmask;
302 	return uspi->s_ubh.bh[index]->b_data + offset;
303 }
304 
305 #define ubh_get_usb_first(uspi) \
306 	((struct ufs_super_block_first *)get_usb_offset((uspi), 0))
307 
308 #define ubh_get_usb_second(uspi) \
309 	((struct ufs_super_block_second *)get_usb_offset((uspi), UFS_SECTOR_SIZE))
310 
311 #define ubh_get_usb_third(uspi)	\
312 	((struct ufs_super_block_third *)get_usb_offset((uspi), 2*UFS_SECTOR_SIZE))
313 
314 
315 #define ubh_get_ucg(ubh) \
316 	((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data))
317 
318 
319 /*
320  * Extract byte from ufs_buffer_head
321  * Extract the bits for a block from a map inside ufs_buffer_head
322  */
323 #define ubh_get_addr8(ubh,begin) \
324 	((u8*)(ubh)->bh[(begin) >> uspi->s_fshift]->b_data + \
325 	((begin) & ~uspi->s_fmask))
326 
327 #define ubh_get_addr16(ubh,begin) \
328 	(((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
329 	((begin) & ((uspi->fsize>>1) - 1)))
330 
331 #define ubh_get_addr32(ubh,begin) \
332 	(((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
333 	((begin) & ((uspi->s_fsize>>2) - 1)))
334 
335 #define ubh_get_addr64(ubh,begin) \
336 	(((__fs64*)((ubh)->bh[(begin) >> (uspi->s_fshift-3)]->b_data)) + \
337 	((begin) & ((uspi->s_fsize>>3) - 1)))
338 
339 #define ubh_get_addr ubh_get_addr8
340 
341 static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
342 				     struct ufs_buffer_head *ubh,
343 				     u64 blk)
344 {
345 	if (uspi->fs_magic == UFS2_MAGIC)
346 		return ubh_get_addr64(ubh, blk);
347 	else
348 		return ubh_get_addr32(ubh, blk);
349 }
350 
351 #define ubh_blkmap(ubh,begin,bit) \
352 	((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))
353 
354 static inline u64
355 ufs_freefrags(struct ufs_sb_private_info *uspi)
356 {
357 	return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
358 		uspi->cs_total.cs_nffree;
359 }
360 
361 /*
362  * Macros to access cylinder group array structures
363  */
364 #define ubh_cg_blktot(ucpi,cylno) \
365 	(*((__fs32*)ubh_get_addr(UCPI_UBH(ucpi), (ucpi)->c_btotoff + ((cylno) << 2))))
366 
367 #define ubh_cg_blks(ucpi,cylno,rpos) \
368 	(*((__fs16*)ubh_get_addr(UCPI_UBH(ucpi), \
369 	(ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 ))))
370 
371 /*
372  * Bitmap operations
373  * These functions work like classical bitmap operations.
374  * The difference is that we don't have the whole bitmap
375  * in one contiguous chunk of memory, but in several buffers.
376  * The parameters of each function are super_block, ufs_buffer_head and
377  * position of the beginning of the bitmap.
378  */
379 #define ubh_setbit(ubh,begin,bit) \
380 	(*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) |= (1 << ((bit) & 7)))
381 
382 #define ubh_clrbit(ubh,begin,bit) \
383 	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) &= ~(1 << ((bit) & 7)))
384 
385 #define ubh_isset(ubh,begin,bit) \
386 	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) & (1 << ((bit) & 7)))
387 
388 #define ubh_isclr(ubh,begin,bit) (!ubh_isset(ubh,begin,bit))
389 
390 #define ubh_find_first_zero_bit(ubh,begin,size) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,0)
391 
392 #define ubh_find_next_zero_bit(ubh,begin,size,offset) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,offset)
393 static inline unsigned _ubh_find_next_zero_bit_(
394 	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
395 	unsigned begin, unsigned size, unsigned offset)
396 {
397 	unsigned base, count, pos;
398 
399 	size -= offset;
400 	begin <<= 3;
401 	offset += begin;
402 	base = offset >> uspi->s_bpfshift;
403 	offset &= uspi->s_bpfmask;
404 	for (;;) {
405 		count = min_t(unsigned int, size + offset, uspi->s_bpf);
406 		size -= count - offset;
407 		pos = find_next_zero_bit_le(ubh->bh[base]->b_data, count, offset);
408 		if (pos < count || !size)
409 			break;
410 		base++;
411 		offset = 0;
412 	}
413 	return (base << uspi->s_bpfshift) + pos - begin;
414 }
415 
416 static inline unsigned find_last_zero_bit (unsigned char * bitmap,
417 	unsigned size, unsigned offset)
418 {
419 	unsigned bit, i;
420 	unsigned char * mapp;
421 	unsigned char map;
422 
423 	mapp = bitmap + (size >> 3);
424 	map = *mapp--;
425 	bit = 1 << (size & 7);
426 	for (i = size; i > offset; i--) {
427 		if ((map & bit) == 0)
428 			break;
429 		if ((i & 7) != 0) {
430 			bit >>= 1;
431 		} else {
432 			map = *mapp--;
433 			bit = 1 << 7;
434 		}
435 	}
436 	return i;
437 }
438 
439 #define ubh_find_last_zero_bit(ubh,begin,size,offset) _ubh_find_last_zero_bit_(uspi,ubh,begin,size,offset)
440 static inline unsigned _ubh_find_last_zero_bit_(
441 	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
442 	unsigned begin, unsigned start, unsigned end)
443 {
444 	unsigned base, count, pos, size;
445 
446 	size = start - end;
447 	begin <<= 3;
448 	start += begin;
449 	base = start >> uspi->s_bpfshift;
450 	start &= uspi->s_bpfmask;
451 	for (;;) {
452 		count = min_t(unsigned int,
453 			    size + (uspi->s_bpf - start), uspi->s_bpf)
454 			- (uspi->s_bpf - start);
455 		size -= count;
456 		pos = find_last_zero_bit (ubh->bh[base]->b_data,
457 			start, start - count);
458 		if (pos > start - count || !size)
459 			break;
460 		base--;
461 		start = uspi->s_bpf;
462 	}
463 	return (base << uspi->s_bpfshift) + pos - begin;
464 }
465 
466 #define ubh_isblockclear(ubh,begin,block) (!_ubh_isblockset_(uspi,ubh,begin,block))
467 
468 #define ubh_isblockset(ubh,begin,block) _ubh_isblockset_(uspi,ubh,begin,block)
469 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
470 	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
471 {
472 	u8 mask;
473 	switch (uspi->s_fpb) {
474 	case 8:
475 	    	return (*ubh_get_addr (ubh, begin + block) == 0xff);
476 	case 4:
477 		mask = 0x0f << ((block & 0x01) << 2);
478 		return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
479 	case 2:
480 		mask = 0x03 << ((block & 0x03) << 1);
481 		return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
482 	case 1:
483 		mask = 0x01 << (block & 0x07);
484 		return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
485 	}
486 	return 0;
487 }
488 
489 #define ubh_clrblock(ubh,begin,block) _ubh_clrblock_(uspi,ubh,begin,block)
490 static inline void _ubh_clrblock_(struct ufs_sb_private_info * uspi,
491 	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
492 {
493 	switch (uspi->s_fpb) {
494 	case 8:
495 	    	*ubh_get_addr (ubh, begin + block) = 0x00;
496 	    	return;
497 	case 4:
498 		*ubh_get_addr (ubh, begin + (block >> 1)) &= ~(0x0f << ((block & 0x01) << 2));
499 		return;
500 	case 2:
501 		*ubh_get_addr (ubh, begin + (block >> 2)) &= ~(0x03 << ((block & 0x03) << 1));
502 		return;
503 	case 1:
504 		*ubh_get_addr (ubh, begin + (block >> 3)) &= ~(0x01 << ((block & 0x07)));
505 		return;
506 	}
507 }
508 
509 #define ubh_setblock(ubh,begin,block) _ubh_setblock_(uspi,ubh,begin,block)
510 static inline void _ubh_setblock_(struct ufs_sb_private_info * uspi,
511 	struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
512 {
513 	switch (uspi->s_fpb) {
514 	case 8:
515 	    	*ubh_get_addr(ubh, begin + block) = 0xff;
516 	    	return;
517 	case 4:
518 		*ubh_get_addr(ubh, begin + (block >> 1)) |= (0x0f << ((block & 0x01) << 2));
519 		return;
520 	case 2:
521 		*ubh_get_addr(ubh, begin + (block >> 2)) |= (0x03 << ((block & 0x03) << 1));
522 		return;
523 	case 1:
524 		*ubh_get_addr(ubh, begin + (block >> 3)) |= (0x01 << ((block & 0x07)));
525 		return;
526 	}
527 }
528 
529 static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
530 	__fs32 * fraglist, int cnt)
531 {
532 	struct ufs_sb_private_info * uspi;
533 	unsigned fragsize, pos;
534 
535 	uspi = UFS_SB(sb)->s_uspi;
536 
537 	fragsize = 0;
538 	for (pos = 0; pos < uspi->s_fpb; pos++) {
539 		if (blockmap & (1 << pos)) {
540 			fragsize++;
541 		}
542 		else if (fragsize > 0) {
543 			fs32_add(sb, &fraglist[fragsize], cnt);
544 			fragsize = 0;
545 		}
546 	}
547 	if (fragsize > 0 && fragsize < uspi->s_fpb)
548 		fs32_add(sb, &fraglist[fragsize], cnt);
549 }
550 
551 static inline void *ufs_get_direct_data_ptr(struct ufs_sb_private_info *uspi,
552 					    struct ufs_inode_info *ufsi,
553 					    unsigned blk)
554 {
555 	BUG_ON(blk > UFS_TIND_BLOCK);
556 	return uspi->fs_magic == UFS2_MAGIC ?
557 		(void *)&ufsi->i_u1.u2_i_data[blk] :
558 		(void *)&ufsi->i_u1.i_data[blk];
559 }
560 
561 static inline u64 ufs_data_ptr_to_cpu(struct super_block *sb, void *p)
562 {
563 	return UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC ?
564 		fs64_to_cpu(sb, *(__fs64 *)p) :
565 		fs32_to_cpu(sb, *(__fs32 *)p);
566 }
567 
568 static inline void ufs_cpu_to_data_ptr(struct super_block *sb, void *p, u64 val)
569 {
570 	if (UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC)
571 		*(__fs64 *)p = cpu_to_fs64(sb, val);
572 	else
573 		*(__fs32 *)p = cpu_to_fs32(sb, val);
574 }
575 
576 static inline void ufs_data_ptr_clear(struct ufs_sb_private_info *uspi,
577 				      void *p)
578 {
579 	if (uspi->fs_magic == UFS2_MAGIC)
580 		*(__fs64 *)p = 0;
581 	else
582 		*(__fs32 *)p = 0;
583 }
584 
585 static inline int ufs_is_data_ptr_zero(struct ufs_sb_private_info *uspi,
586 				       void *p)
587 {
588 	if (uspi->fs_magic == UFS2_MAGIC)
589 		return *(__fs64 *)p == 0;
590 	else
591 		return *(__fs32 *)p == 0;
592 }
593 
594 static inline __fs32 ufs_get_seconds(struct super_block *sbp)
595 {
596 	time64_t now = ktime_get_real_seconds();
597 
598 	/* Signed 32-bit interpretation wraps around in 2038, which
599 	 * happens in ufs1 inode stamps but not ufs2 using 64-bits
600 	 * stamps. For superblock and blockgroup, let's assume
601 	 * unsigned 32-bit stamps, which are good until y2106.
602 	 * Wrap around rather than clamp here to make the dirty
603 	 * file system detection work in the superblock stamp.
604 	 */
605 	return cpu_to_fs32(sbp, lower_32_bits(now));
606 }
607