xref: /openbmc/linux/fs/ufs/balloc.c (revision b627b4ed)
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/quotaops.h>
16 #include <linux/buffer_head.h>
17 #include <linux/capability.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
20 
21 #include "ufs_fs.h"
22 #include "ufs.h"
23 #include "swab.h"
24 #include "util.h"
25 
26 #define INVBLOCK ((u64)-1L)
27 
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34 
35 /*
36  * Free 'count' fragments from fragment number 'fragment'
37  */
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
39 {
40 	struct super_block * sb;
41 	struct ufs_sb_private_info * uspi;
42 	struct ufs_super_block_first * usb1;
43 	struct ufs_cg_private_info * ucpi;
44 	struct ufs_cylinder_group * ucg;
45 	unsigned cgno, bit, end_bit, bbase, blkmap, i;
46 	u64 blkno;
47 
48 	sb = inode->i_sb;
49 	uspi = UFS_SB(sb)->s_uspi;
50 	usb1 = ubh_get_usb_first(uspi);
51 
52 	UFSD("ENTER, fragment %llu, count %u\n",
53 	     (unsigned long long)fragment, count);
54 
55 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
56 		ufs_error (sb, "ufs_free_fragments", "internal error");
57 
58 	lock_super(sb);
59 
60 	cgno = ufs_dtog(uspi, fragment);
61 	bit = ufs_dtogd(uspi, fragment);
62 	if (cgno >= uspi->s_ncg) {
63 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
64 		goto failed;
65 	}
66 
67 	ucpi = ufs_load_cylinder (sb, cgno);
68 	if (!ucpi)
69 		goto failed;
70 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
71 	if (!ufs_cg_chkmagic(sb, ucg)) {
72 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
73 		goto failed;
74 	}
75 
76 	end_bit = bit + count;
77 	bbase = ufs_blknum (bit);
78 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
79 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
80 	for (i = bit; i < end_bit; i++) {
81 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
82 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
83 		else
84 			ufs_error (sb, "ufs_free_fragments",
85 				   "bit already cleared for fragment %u", i);
86 	}
87 
88 	vfs_dq_free_block(inode, count);
89 
90 
91 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
92 	uspi->cs_total.cs_nffree += count;
93 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
94 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
95 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
96 
97 	/*
98 	 * Trying to reassemble free fragments into block
99 	 */
100 	blkno = ufs_fragstoblks (bbase);
101 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
102 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
103 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
104 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
105 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
106 			ufs_clusteracct (sb, ucpi, blkno, 1);
107 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
108 		uspi->cs_total.cs_nbfree++;
109 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
110 		if (uspi->fs_magic != UFS2_MAGIC) {
111 			unsigned cylno = ufs_cbtocylno (bbase);
112 
113 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
114 						  ufs_cbtorpos(bbase)), 1);
115 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
116 		}
117 	}
118 
119 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
120 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
121 	if (sb->s_flags & MS_SYNCHRONOUS) {
122 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
123 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
124 	}
125 	sb->s_dirt = 1;
126 
127 	unlock_super (sb);
128 	UFSD("EXIT\n");
129 	return;
130 
131 failed:
132 	unlock_super (sb);
133 	UFSD("EXIT (FAILED)\n");
134 	return;
135 }
136 
137 /*
138  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
139  */
140 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
141 {
142 	struct super_block * sb;
143 	struct ufs_sb_private_info * uspi;
144 	struct ufs_super_block_first * usb1;
145 	struct ufs_cg_private_info * ucpi;
146 	struct ufs_cylinder_group * ucg;
147 	unsigned overflow, cgno, bit, end_bit, i;
148 	u64 blkno;
149 
150 	sb = inode->i_sb;
151 	uspi = UFS_SB(sb)->s_uspi;
152 	usb1 = ubh_get_usb_first(uspi);
153 
154 	UFSD("ENTER, fragment %llu, count %u\n",
155 	     (unsigned long long)fragment, count);
156 
157 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
158 		ufs_error (sb, "ufs_free_blocks", "internal error, "
159 			   "fragment %llu, count %u\n",
160 			   (unsigned long long)fragment, count);
161 		goto failed;
162 	}
163 
164 	lock_super(sb);
165 
166 do_more:
167 	overflow = 0;
168 	cgno = ufs_dtog(uspi, fragment);
169 	bit = ufs_dtogd(uspi, fragment);
170 	if (cgno >= uspi->s_ncg) {
171 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
172 		goto failed_unlock;
173 	}
174 	end_bit = bit + count;
175 	if (end_bit > uspi->s_fpg) {
176 		overflow = bit + count - uspi->s_fpg;
177 		count -= overflow;
178 		end_bit -= overflow;
179 	}
180 
181 	ucpi = ufs_load_cylinder (sb, cgno);
182 	if (!ucpi)
183 		goto failed_unlock;
184 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
185 	if (!ufs_cg_chkmagic(sb, ucg)) {
186 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
187 		goto failed_unlock;
188 	}
189 
190 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
191 		blkno = ufs_fragstoblks(i);
192 		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
193 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
194 		}
195 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
196 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
197 			ufs_clusteracct (sb, ucpi, blkno, 1);
198 		vfs_dq_free_block(inode, uspi->s_fpb);
199 
200 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
201 		uspi->cs_total.cs_nbfree++;
202 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
203 
204 		if (uspi->fs_magic != UFS2_MAGIC) {
205 			unsigned cylno = ufs_cbtocylno(i);
206 
207 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
208 						  ufs_cbtorpos(i)), 1);
209 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
210 		}
211 	}
212 
213 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
214 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
215 	if (sb->s_flags & MS_SYNCHRONOUS) {
216 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
217 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
218 	}
219 
220 	if (overflow) {
221 		fragment += count;
222 		count = overflow;
223 		goto do_more;
224 	}
225 
226 	sb->s_dirt = 1;
227 	unlock_super (sb);
228 	UFSD("EXIT\n");
229 	return;
230 
231 failed_unlock:
232 	unlock_super (sb);
233 failed:
234 	UFSD("EXIT (FAILED)\n");
235 	return;
236 }
237 
238 /*
239  * Modify inode page cache in such way:
240  * have - blocks with b_blocknr equal to oldb...oldb+count-1
241  * get - blocks with b_blocknr equal to newb...newb+count-1
242  * also we suppose that oldb...oldb+count-1 blocks
243  * situated at the end of file.
244  *
245  * We can come here from ufs_writepage or ufs_prepare_write,
246  * locked_page is argument of these functions, so we already lock it.
247  */
248 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
249 			       unsigned int count, sector_t oldb,
250 			       sector_t newb, struct page *locked_page)
251 {
252 	const unsigned blks_per_page =
253 		1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
254 	const unsigned mask = blks_per_page - 1;
255 	struct address_space * const mapping = inode->i_mapping;
256 	pgoff_t index, cur_index, last_index;
257 	unsigned pos, j, lblock;
258 	sector_t end, i;
259 	struct page *page;
260 	struct buffer_head *head, *bh;
261 
262 	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
263 	      inode->i_ino, count,
264 	     (unsigned long long)oldb, (unsigned long long)newb);
265 
266 	BUG_ON(!locked_page);
267 	BUG_ON(!PageLocked(locked_page));
268 
269 	cur_index = locked_page->index;
270 	end = count + beg;
271 	last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
272 	for (i = beg; i < end; i = (i | mask) + 1) {
273 		index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
274 
275 		if (likely(cur_index != index)) {
276 			page = ufs_get_locked_page(mapping, index);
277 			if (!page)/* it was truncated */
278 				continue;
279 			if (IS_ERR(page)) {/* or EIO */
280 				ufs_error(inode->i_sb, __func__,
281 					  "read of page %llu failed\n",
282 					  (unsigned long long)index);
283 				continue;
284 			}
285 		} else
286 			page = locked_page;
287 
288 		head = page_buffers(page);
289 		bh = head;
290 		pos = i & mask;
291 		for (j = 0; j < pos; ++j)
292 			bh = bh->b_this_page;
293 
294 
295 		if (unlikely(index == last_index))
296 			lblock = end & mask;
297 		else
298 			lblock = blks_per_page;
299 
300 		do {
301 			if (j >= lblock)
302 				break;
303 			pos = (i - beg) + j;
304 
305 			if (!buffer_mapped(bh))
306 					map_bh(bh, inode->i_sb, oldb + pos);
307 			if (!buffer_uptodate(bh)) {
308 				ll_rw_block(READ, 1, &bh);
309 				wait_on_buffer(bh);
310 				if (!buffer_uptodate(bh)) {
311 					ufs_error(inode->i_sb, __func__,
312 						  "read of block failed\n");
313 					break;
314 				}
315 			}
316 
317 			UFSD(" change from %llu to %llu, pos %u\n",
318 			     (unsigned long long)(pos + oldb),
319 			     (unsigned long long)(pos + newb), pos);
320 
321 			bh->b_blocknr = newb + pos;
322 			unmap_underlying_metadata(bh->b_bdev,
323 						  bh->b_blocknr);
324 			mark_buffer_dirty(bh);
325 			++j;
326 			bh = bh->b_this_page;
327 		} while (bh != head);
328 
329 		if (likely(cur_index != index))
330 			ufs_put_locked_page(page);
331  	}
332 	UFSD("EXIT\n");
333 }
334 
335 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
336 			    int sync)
337 {
338 	struct buffer_head *bh;
339 	sector_t end = beg + n;
340 
341 	for (; beg < end; ++beg) {
342 		bh = sb_getblk(inode->i_sb, beg);
343 		lock_buffer(bh);
344 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
345 		set_buffer_uptodate(bh);
346 		mark_buffer_dirty(bh);
347 		unlock_buffer(bh);
348 		if (IS_SYNC(inode) || sync)
349 			sync_dirty_buffer(bh);
350 		brelse(bh);
351 	}
352 }
353 
354 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
355 			   u64 goal, unsigned count, int *err,
356 			   struct page *locked_page)
357 {
358 	struct super_block * sb;
359 	struct ufs_sb_private_info * uspi;
360 	struct ufs_super_block_first * usb1;
361 	unsigned cgno, oldcount, newcount;
362 	u64 tmp, request, result;
363 
364 	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
365 	     inode->i_ino, (unsigned long long)fragment,
366 	     (unsigned long long)goal, count);
367 
368 	sb = inode->i_sb;
369 	uspi = UFS_SB(sb)->s_uspi;
370 	usb1 = ubh_get_usb_first(uspi);
371 	*err = -ENOSPC;
372 
373 	lock_super (sb);
374 	tmp = ufs_data_ptr_to_cpu(sb, p);
375 
376 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
377 		ufs_warning(sb, "ufs_new_fragments", "internal warning"
378 			    " fragment %llu, count %u",
379 			    (unsigned long long)fragment, count);
380 		count = uspi->s_fpb - ufs_fragnum(fragment);
381 	}
382 	oldcount = ufs_fragnum (fragment);
383 	newcount = oldcount + count;
384 
385 	/*
386 	 * Somebody else has just allocated our fragments
387 	 */
388 	if (oldcount) {
389 		if (!tmp) {
390 			ufs_error(sb, "ufs_new_fragments", "internal error, "
391 				  "fragment %llu, tmp %llu\n",
392 				  (unsigned long long)fragment,
393 				  (unsigned long long)tmp);
394 			unlock_super(sb);
395 			return INVBLOCK;
396 		}
397 		if (fragment < UFS_I(inode)->i_lastfrag) {
398 			UFSD("EXIT (ALREADY ALLOCATED)\n");
399 			unlock_super (sb);
400 			return 0;
401 		}
402 	}
403 	else {
404 		if (tmp) {
405 			UFSD("EXIT (ALREADY ALLOCATED)\n");
406 			unlock_super(sb);
407 			return 0;
408 		}
409 	}
410 
411 	/*
412 	 * There is not enough space for user on the device
413 	 */
414 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
415 		unlock_super (sb);
416 		UFSD("EXIT (FAILED)\n");
417 		return 0;
418 	}
419 
420 	if (goal >= uspi->s_size)
421 		goal = 0;
422 	if (goal == 0)
423 		cgno = ufs_inotocg (inode->i_ino);
424 	else
425 		cgno = ufs_dtog(uspi, goal);
426 
427 	/*
428 	 * allocate new fragment
429 	 */
430 	if (oldcount == 0) {
431 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
432 		if (result) {
433 			ufs_cpu_to_data_ptr(sb, p, result);
434 			*err = 0;
435 			UFS_I(inode)->i_lastfrag =
436 				max_t(u32, UFS_I(inode)->i_lastfrag,
437 				      fragment + count);
438 			ufs_clear_frags(inode, result + oldcount,
439 					newcount - oldcount, locked_page != NULL);
440 		}
441 		unlock_super(sb);
442 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
443 		return result;
444 	}
445 
446 	/*
447 	 * resize block
448 	 */
449 	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
450 	if (result) {
451 		*err = 0;
452 		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
453 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
454 				locked_page != NULL);
455 		unlock_super(sb);
456 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
457 		return result;
458 	}
459 
460 	/*
461 	 * allocate new block and move data
462 	 */
463 	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
464 	    case UFS_OPTSPACE:
465 		request = newcount;
466 		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
467 		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
468 			break;
469 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
470 		break;
471 	    default:
472 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
473 
474 	    case UFS_OPTTIME:
475 		request = uspi->s_fpb;
476 		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
477 		    (uspi->s_minfree - 2) / 100)
478 			break;
479 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
480 		break;
481 	}
482 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
483 	if (result) {
484 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
485 				locked_page != NULL);
486 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
487 				   uspi->s_sbbase + tmp,
488 				   uspi->s_sbbase + result, locked_page);
489 		ufs_cpu_to_data_ptr(sb, p, result);
490 		*err = 0;
491 		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
492 		unlock_super(sb);
493 		if (newcount < request)
494 			ufs_free_fragments (inode, result + newcount, request - newcount);
495 		ufs_free_fragments (inode, tmp, oldcount);
496 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
497 		return result;
498 	}
499 
500 	unlock_super(sb);
501 	UFSD("EXIT (FAILED)\n");
502 	return 0;
503 }
504 
505 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
506 			     unsigned oldcount, unsigned newcount, int *err)
507 {
508 	struct super_block * sb;
509 	struct ufs_sb_private_info * uspi;
510 	struct ufs_super_block_first * usb1;
511 	struct ufs_cg_private_info * ucpi;
512 	struct ufs_cylinder_group * ucg;
513 	unsigned cgno, fragno, fragoff, count, fragsize, i;
514 
515 	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
516 	     (unsigned long long)fragment, oldcount, newcount);
517 
518 	sb = inode->i_sb;
519 	uspi = UFS_SB(sb)->s_uspi;
520 	usb1 = ubh_get_usb_first (uspi);
521 	count = newcount - oldcount;
522 
523 	cgno = ufs_dtog(uspi, fragment);
524 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
525 		return 0;
526 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
527 		return 0;
528 	ucpi = ufs_load_cylinder (sb, cgno);
529 	if (!ucpi)
530 		return 0;
531 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
532 	if (!ufs_cg_chkmagic(sb, ucg)) {
533 		ufs_panic (sb, "ufs_add_fragments",
534 			"internal error, bad magic number on cg %u", cgno);
535 		return 0;
536 	}
537 
538 	fragno = ufs_dtogd(uspi, fragment);
539 	fragoff = ufs_fragnum (fragno);
540 	for (i = oldcount; i < newcount; i++)
541 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
542 			return 0;
543 	/*
544 	 * Block can be extended
545 	 */
546 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
547 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
548 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
549 			break;
550 	fragsize = i - oldcount;
551 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
552 		ufs_panic (sb, "ufs_add_fragments",
553 			"internal error or corrupted bitmap on cg %u", cgno);
554 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
555 	if (fragsize != count)
556 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
557 	for (i = oldcount; i < newcount; i++)
558 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
559 	if (vfs_dq_alloc_block(inode, count)) {
560 		*err = -EDQUOT;
561 		return 0;
562 	}
563 
564 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
565 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
566 	uspi->cs_total.cs_nffree -= count;
567 
568 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
569 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
570 	if (sb->s_flags & MS_SYNCHRONOUS) {
571 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
572 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
573 	}
574 	sb->s_dirt = 1;
575 
576 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
577 
578 	return fragment;
579 }
580 
581 #define UFS_TEST_FREE_SPACE_CG \
582 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
583 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
584 		goto cg_found; \
585 	for (k = count; k < uspi->s_fpb; k++) \
586 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
587 			goto cg_found;
588 
589 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
590 			       u64 goal, unsigned count, int *err)
591 {
592 	struct super_block * sb;
593 	struct ufs_sb_private_info * uspi;
594 	struct ufs_super_block_first * usb1;
595 	struct ufs_cg_private_info * ucpi;
596 	struct ufs_cylinder_group * ucg;
597 	unsigned oldcg, i, j, k, allocsize;
598 	u64 result;
599 
600 	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
601 	     inode->i_ino, cgno, (unsigned long long)goal, count);
602 
603 	sb = inode->i_sb;
604 	uspi = UFS_SB(sb)->s_uspi;
605 	usb1 = ubh_get_usb_first(uspi);
606 	oldcg = cgno;
607 
608 	/*
609 	 * 1. searching on preferred cylinder group
610 	 */
611 	UFS_TEST_FREE_SPACE_CG
612 
613 	/*
614 	 * 2. quadratic rehash
615 	 */
616 	for (j = 1; j < uspi->s_ncg; j *= 2) {
617 		cgno += j;
618 		if (cgno >= uspi->s_ncg)
619 			cgno -= uspi->s_ncg;
620 		UFS_TEST_FREE_SPACE_CG
621 	}
622 
623 	/*
624 	 * 3. brute force search
625 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
626 	 */
627 	cgno = (oldcg + 1) % uspi->s_ncg;
628 	for (j = 2; j < uspi->s_ncg; j++) {
629 		cgno++;
630 		if (cgno >= uspi->s_ncg)
631 			cgno = 0;
632 		UFS_TEST_FREE_SPACE_CG
633 	}
634 
635 	UFSD("EXIT (FAILED)\n");
636 	return 0;
637 
638 cg_found:
639 	ucpi = ufs_load_cylinder (sb, cgno);
640 	if (!ucpi)
641 		return 0;
642 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
643 	if (!ufs_cg_chkmagic(sb, ucg))
644 		ufs_panic (sb, "ufs_alloc_fragments",
645 			"internal error, bad magic number on cg %u", cgno);
646 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
647 
648 	if (count == uspi->s_fpb) {
649 		result = ufs_alloccg_block (inode, ucpi, goal, err);
650 		if (result == INVBLOCK)
651 			return 0;
652 		goto succed;
653 	}
654 
655 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
656 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
657 			break;
658 
659 	if (allocsize == uspi->s_fpb) {
660 		result = ufs_alloccg_block (inode, ucpi, goal, err);
661 		if (result == INVBLOCK)
662 			return 0;
663 		goal = ufs_dtogd(uspi, result);
664 		for (i = count; i < uspi->s_fpb; i++)
665 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
666 		i = uspi->s_fpb - count;
667 		vfs_dq_free_block(inode, i);
668 
669 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
670 		uspi->cs_total.cs_nffree += i;
671 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
672 		fs32_add(sb, &ucg->cg_frsum[i], 1);
673 		goto succed;
674 	}
675 
676 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
677 	if (result == INVBLOCK)
678 		return 0;
679 	if (vfs_dq_alloc_block(inode, count)) {
680 		*err = -EDQUOT;
681 		return 0;
682 	}
683 	for (i = 0; i < count; i++)
684 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
685 
686 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
687 	uspi->cs_total.cs_nffree -= count;
688 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
689 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
690 
691 	if (count != allocsize)
692 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
693 
694 succed:
695 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
696 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
697 	if (sb->s_flags & MS_SYNCHRONOUS) {
698 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
699 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
700 	}
701 	sb->s_dirt = 1;
702 
703 	result += cgno * uspi->s_fpg;
704 	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
705 	return result;
706 }
707 
708 static u64 ufs_alloccg_block(struct inode *inode,
709 			     struct ufs_cg_private_info *ucpi,
710 			     u64 goal, int *err)
711 {
712 	struct super_block * sb;
713 	struct ufs_sb_private_info * uspi;
714 	struct ufs_super_block_first * usb1;
715 	struct ufs_cylinder_group * ucg;
716 	u64 result, blkno;
717 
718 	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
719 
720 	sb = inode->i_sb;
721 	uspi = UFS_SB(sb)->s_uspi;
722 	usb1 = ubh_get_usb_first(uspi);
723 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
724 
725 	if (goal == 0) {
726 		goal = ucpi->c_rotor;
727 		goto norot;
728 	}
729 	goal = ufs_blknum (goal);
730 	goal = ufs_dtogd(uspi, goal);
731 
732 	/*
733 	 * If the requested block is available, use it.
734 	 */
735 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
736 		result = goal;
737 		goto gotit;
738 	}
739 
740 norot:
741 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
742 	if (result == INVBLOCK)
743 		return INVBLOCK;
744 	ucpi->c_rotor = result;
745 gotit:
746 	blkno = ufs_fragstoblks(result);
747 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
748 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
749 		ufs_clusteracct (sb, ucpi, blkno, -1);
750 	if (vfs_dq_alloc_block(inode, uspi->s_fpb)) {
751 		*err = -EDQUOT;
752 		return INVBLOCK;
753 	}
754 
755 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
756 	uspi->cs_total.cs_nbfree--;
757 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
758 
759 	if (uspi->fs_magic != UFS2_MAGIC) {
760 		unsigned cylno = ufs_cbtocylno((unsigned)result);
761 
762 		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
763 					  ufs_cbtorpos((unsigned)result)), 1);
764 		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
765 	}
766 
767 	UFSD("EXIT, result %llu\n", (unsigned long long)result);
768 
769 	return result;
770 }
771 
772 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
773 			  struct ufs_buffer_head *ubh,
774 			  unsigned begin, unsigned size,
775 			  unsigned char *table, unsigned char mask)
776 {
777 	unsigned rest, offset;
778 	unsigned char *cp;
779 
780 
781 	offset = begin & ~uspi->s_fmask;
782 	begin >>= uspi->s_fshift;
783 	for (;;) {
784 		if ((offset + size) < uspi->s_fsize)
785 			rest = size;
786 		else
787 			rest = uspi->s_fsize - offset;
788 		size -= rest;
789 		cp = ubh->bh[begin]->b_data + offset;
790 		while ((table[*cp++] & mask) == 0 && --rest)
791 			;
792 		if (rest || !size)
793 			break;
794 		begin++;
795 		offset = 0;
796 	}
797 	return (size + rest);
798 }
799 
800 /*
801  * Find a block of the specified size in the specified cylinder group.
802  * @sp: pointer to super block
803  * @ucpi: pointer to cylinder group info
804  * @goal: near which block we want find new one
805  * @count: specified size
806  */
807 static u64 ufs_bitmap_search(struct super_block *sb,
808 			     struct ufs_cg_private_info *ucpi,
809 			     u64 goal, unsigned count)
810 {
811 	/*
812 	 * Bit patterns for identifying fragments in the block map
813 	 * used as ((map & mask_arr) == want_arr)
814 	 */
815 	static const int mask_arr[9] = {
816 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
817 	};
818 	static const int want_arr[9] = {
819 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
820 	};
821 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
822 	struct ufs_super_block_first *usb1;
823 	struct ufs_cylinder_group *ucg;
824 	unsigned start, length, loc;
825 	unsigned pos, want, blockmap, mask, end;
826 	u64 result;
827 
828 	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
829 	     (unsigned long long)goal, count);
830 
831 	usb1 = ubh_get_usb_first (uspi);
832 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
833 
834 	if (goal)
835 		start = ufs_dtogd(uspi, goal) >> 3;
836 	else
837 		start = ucpi->c_frotor >> 3;
838 
839 	length = ((uspi->s_fpg + 7) >> 3) - start;
840 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
841 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
842 		1 << (count - 1 + (uspi->s_fpb & 7)));
843 	if (loc == 0) {
844 		length = start + 1;
845 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
846 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
847 				ufs_fragtable_other,
848 				1 << (count - 1 + (uspi->s_fpb & 7)));
849 		if (loc == 0) {
850 			ufs_error(sb, "ufs_bitmap_search",
851 				  "bitmap corrupted on cg %u, start %u,"
852 				  " length %u, count %u, freeoff %u\n",
853 				  ucpi->c_cgx, start, length, count,
854 				  ucpi->c_freeoff);
855 			return INVBLOCK;
856 		}
857 		start = 0;
858 	}
859 	result = (start + length - loc) << 3;
860 	ucpi->c_frotor = result;
861 
862 	/*
863 	 * found the byte in the map
864 	 */
865 
866 	for (end = result + 8; result < end; result += uspi->s_fpb) {
867 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
868 		blockmap <<= 1;
869 		mask = mask_arr[count];
870 		want = want_arr[count];
871 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
872 			if ((blockmap & mask) == want) {
873 				UFSD("EXIT, result %llu\n",
874 				     (unsigned long long)result);
875 				return result + pos;
876  			}
877 			mask <<= 1;
878 			want <<= 1;
879  		}
880  	}
881 
882 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
883 		  ucpi->c_cgx);
884 	UFSD("EXIT (FAILED)\n");
885 	return INVBLOCK;
886 }
887 
888 static void ufs_clusteracct(struct super_block * sb,
889 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
890 {
891 	struct ufs_sb_private_info * uspi;
892 	int i, start, end, forw, back;
893 
894 	uspi = UFS_SB(sb)->s_uspi;
895 	if (uspi->s_contigsumsize <= 0)
896 		return;
897 
898 	if (cnt > 0)
899 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
900 	else
901 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
902 
903 	/*
904 	 * Find the size of the cluster going forward.
905 	 */
906 	start = blkno + 1;
907 	end = start + uspi->s_contigsumsize;
908 	if ( end >= ucpi->c_nclusterblks)
909 		end = ucpi->c_nclusterblks;
910 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
911 	if (i > end)
912 		i = end;
913 	forw = i - start;
914 
915 	/*
916 	 * Find the size of the cluster going backward.
917 	 */
918 	start = blkno - 1;
919 	end = start - uspi->s_contigsumsize;
920 	if (end < 0 )
921 		end = -1;
922 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
923 	if ( i < end)
924 		i = end;
925 	back = start - i;
926 
927 	/*
928 	 * Account for old cluster and the possibly new forward and
929 	 * back clusters.
930 	 */
931 	i = back + forw + 1;
932 	if (i > uspi->s_contigsumsize)
933 		i = uspi->s_contigsumsize;
934 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
935 	if (back > 0)
936 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
937 	if (forw > 0)
938 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
939 }
940 
941 
942 static unsigned char ufs_fragtable_8fpb[] = {
943 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
944 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
945 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
946 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
947 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
948 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
949 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
950 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
951 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
952 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
953 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
954 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
955 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
956 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
957 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
958 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
959 };
960 
961 static unsigned char ufs_fragtable_other[] = {
962 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
963 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
964 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
965 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
966 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
967 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
968 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
969 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
970 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
971 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
972 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
973 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
974 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
975 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
976 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
977 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
978 };
979