xref: /openbmc/linux/fs/ufs/balloc.c (revision acc6a093)
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/quotaops.h>
16 #include <linux/buffer_head.h>
17 #include <linux/capability.h>
18 #include <linux/bitops.h>
19 #include <asm/byteorder.h>
20 
21 #include "ufs_fs.h"
22 #include "ufs.h"
23 #include "swab.h"
24 #include "util.h"
25 
26 #define INVBLOCK ((u64)-1L)
27 
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34 
35 /*
36  * Free 'count' fragments from fragment number 'fragment'
37  */
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
39 {
40 	struct super_block * sb;
41 	struct ufs_sb_private_info * uspi;
42 	struct ufs_super_block_first * usb1;
43 	struct ufs_cg_private_info * ucpi;
44 	struct ufs_cylinder_group * ucg;
45 	unsigned cgno, bit, end_bit, bbase, blkmap, i;
46 	u64 blkno;
47 
48 	sb = inode->i_sb;
49 	uspi = UFS_SB(sb)->s_uspi;
50 	usb1 = ubh_get_usb_first(uspi);
51 
52 	UFSD("ENTER, fragment %llu, count %u\n",
53 	     (unsigned long long)fragment, count);
54 
55 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
56 		ufs_error (sb, "ufs_free_fragments", "internal error");
57 
58 	lock_super(sb);
59 
60 	cgno = ufs_dtog(uspi, fragment);
61 	bit = ufs_dtogd(uspi, fragment);
62 	if (cgno >= uspi->s_ncg) {
63 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
64 		goto failed;
65 	}
66 
67 	ucpi = ufs_load_cylinder (sb, cgno);
68 	if (!ucpi)
69 		goto failed;
70 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
71 	if (!ufs_cg_chkmagic(sb, ucg)) {
72 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
73 		goto failed;
74 	}
75 
76 	end_bit = bit + count;
77 	bbase = ufs_blknum (bit);
78 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
79 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
80 	for (i = bit; i < end_bit; i++) {
81 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
82 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
83 		else
84 			ufs_error (sb, "ufs_free_fragments",
85 				   "bit already cleared for fragment %u", i);
86 	}
87 
88 	dquot_free_block(inode, count);
89 
90 
91 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
92 	uspi->cs_total.cs_nffree += count;
93 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
94 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
95 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
96 
97 	/*
98 	 * Trying to reassemble free fragments into block
99 	 */
100 	blkno = ufs_fragstoblks (bbase);
101 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
102 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
103 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
104 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
105 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
106 			ufs_clusteracct (sb, ucpi, blkno, 1);
107 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
108 		uspi->cs_total.cs_nbfree++;
109 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
110 		if (uspi->fs_magic != UFS2_MAGIC) {
111 			unsigned cylno = ufs_cbtocylno (bbase);
112 
113 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
114 						  ufs_cbtorpos(bbase)), 1);
115 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
116 		}
117 	}
118 
119 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
120 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
121 	if (sb->s_flags & MS_SYNCHRONOUS) {
122 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
123 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
124 	}
125 	sb->s_dirt = 1;
126 
127 	unlock_super (sb);
128 	UFSD("EXIT\n");
129 	return;
130 
131 failed:
132 	unlock_super (sb);
133 	UFSD("EXIT (FAILED)\n");
134 	return;
135 }
136 
137 /*
138  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
139  */
140 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
141 {
142 	struct super_block * sb;
143 	struct ufs_sb_private_info * uspi;
144 	struct ufs_super_block_first * usb1;
145 	struct ufs_cg_private_info * ucpi;
146 	struct ufs_cylinder_group * ucg;
147 	unsigned overflow, cgno, bit, end_bit, i;
148 	u64 blkno;
149 
150 	sb = inode->i_sb;
151 	uspi = UFS_SB(sb)->s_uspi;
152 	usb1 = ubh_get_usb_first(uspi);
153 
154 	UFSD("ENTER, fragment %llu, count %u\n",
155 	     (unsigned long long)fragment, count);
156 
157 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
158 		ufs_error (sb, "ufs_free_blocks", "internal error, "
159 			   "fragment %llu, count %u\n",
160 			   (unsigned long long)fragment, count);
161 		goto failed;
162 	}
163 
164 	lock_super(sb);
165 
166 do_more:
167 	overflow = 0;
168 	cgno = ufs_dtog(uspi, fragment);
169 	bit = ufs_dtogd(uspi, fragment);
170 	if (cgno >= uspi->s_ncg) {
171 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
172 		goto failed_unlock;
173 	}
174 	end_bit = bit + count;
175 	if (end_bit > uspi->s_fpg) {
176 		overflow = bit + count - uspi->s_fpg;
177 		count -= overflow;
178 		end_bit -= overflow;
179 	}
180 
181 	ucpi = ufs_load_cylinder (sb, cgno);
182 	if (!ucpi)
183 		goto failed_unlock;
184 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
185 	if (!ufs_cg_chkmagic(sb, ucg)) {
186 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
187 		goto failed_unlock;
188 	}
189 
190 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
191 		blkno = ufs_fragstoblks(i);
192 		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
193 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
194 		}
195 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
196 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
197 			ufs_clusteracct (sb, ucpi, blkno, 1);
198 		dquot_free_block(inode, uspi->s_fpb);
199 
200 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
201 		uspi->cs_total.cs_nbfree++;
202 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
203 
204 		if (uspi->fs_magic != UFS2_MAGIC) {
205 			unsigned cylno = ufs_cbtocylno(i);
206 
207 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
208 						  ufs_cbtorpos(i)), 1);
209 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
210 		}
211 	}
212 
213 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
214 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
215 	if (sb->s_flags & MS_SYNCHRONOUS) {
216 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
217 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
218 	}
219 
220 	if (overflow) {
221 		fragment += count;
222 		count = overflow;
223 		goto do_more;
224 	}
225 
226 	sb->s_dirt = 1;
227 	unlock_super (sb);
228 	UFSD("EXIT\n");
229 	return;
230 
231 failed_unlock:
232 	unlock_super (sb);
233 failed:
234 	UFSD("EXIT (FAILED)\n");
235 	return;
236 }
237 
238 /*
239  * Modify inode page cache in such way:
240  * have - blocks with b_blocknr equal to oldb...oldb+count-1
241  * get - blocks with b_blocknr equal to newb...newb+count-1
242  * also we suppose that oldb...oldb+count-1 blocks
243  * situated at the end of file.
244  *
245  * We can come here from ufs_writepage or ufs_prepare_write,
246  * locked_page is argument of these functions, so we already lock it.
247  */
248 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
249 			       unsigned int count, sector_t oldb,
250 			       sector_t newb, struct page *locked_page)
251 {
252 	const unsigned blks_per_page =
253 		1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
254 	const unsigned mask = blks_per_page - 1;
255 	struct address_space * const mapping = inode->i_mapping;
256 	pgoff_t index, cur_index, last_index;
257 	unsigned pos, j, lblock;
258 	sector_t end, i;
259 	struct page *page;
260 	struct buffer_head *head, *bh;
261 
262 	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
263 	      inode->i_ino, count,
264 	     (unsigned long long)oldb, (unsigned long long)newb);
265 
266 	BUG_ON(!locked_page);
267 	BUG_ON(!PageLocked(locked_page));
268 
269 	cur_index = locked_page->index;
270 	end = count + beg;
271 	last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
272 	for (i = beg; i < end; i = (i | mask) + 1) {
273 		index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
274 
275 		if (likely(cur_index != index)) {
276 			page = ufs_get_locked_page(mapping, index);
277 			if (!page)/* it was truncated */
278 				continue;
279 			if (IS_ERR(page)) {/* or EIO */
280 				ufs_error(inode->i_sb, __func__,
281 					  "read of page %llu failed\n",
282 					  (unsigned long long)index);
283 				continue;
284 			}
285 		} else
286 			page = locked_page;
287 
288 		head = page_buffers(page);
289 		bh = head;
290 		pos = i & mask;
291 		for (j = 0; j < pos; ++j)
292 			bh = bh->b_this_page;
293 
294 
295 		if (unlikely(index == last_index))
296 			lblock = end & mask;
297 		else
298 			lblock = blks_per_page;
299 
300 		do {
301 			if (j >= lblock)
302 				break;
303 			pos = (i - beg) + j;
304 
305 			if (!buffer_mapped(bh))
306 					map_bh(bh, inode->i_sb, oldb + pos);
307 			if (!buffer_uptodate(bh)) {
308 				ll_rw_block(READ, 1, &bh);
309 				wait_on_buffer(bh);
310 				if (!buffer_uptodate(bh)) {
311 					ufs_error(inode->i_sb, __func__,
312 						  "read of block failed\n");
313 					break;
314 				}
315 			}
316 
317 			UFSD(" change from %llu to %llu, pos %u\n",
318 			     (unsigned long long)(pos + oldb),
319 			     (unsigned long long)(pos + newb), pos);
320 
321 			bh->b_blocknr = newb + pos;
322 			unmap_underlying_metadata(bh->b_bdev,
323 						  bh->b_blocknr);
324 			mark_buffer_dirty(bh);
325 			++j;
326 			bh = bh->b_this_page;
327 		} while (bh != head);
328 
329 		if (likely(cur_index != index))
330 			ufs_put_locked_page(page);
331  	}
332 	UFSD("EXIT\n");
333 }
334 
335 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
336 			    int sync)
337 {
338 	struct buffer_head *bh;
339 	sector_t end = beg + n;
340 
341 	for (; beg < end; ++beg) {
342 		bh = sb_getblk(inode->i_sb, beg);
343 		lock_buffer(bh);
344 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
345 		set_buffer_uptodate(bh);
346 		mark_buffer_dirty(bh);
347 		unlock_buffer(bh);
348 		if (IS_SYNC(inode) || sync)
349 			sync_dirty_buffer(bh);
350 		brelse(bh);
351 	}
352 }
353 
354 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
355 			   u64 goal, unsigned count, int *err,
356 			   struct page *locked_page)
357 {
358 	struct super_block * sb;
359 	struct ufs_sb_private_info * uspi;
360 	struct ufs_super_block_first * usb1;
361 	unsigned cgno, oldcount, newcount;
362 	u64 tmp, request, result;
363 
364 	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
365 	     inode->i_ino, (unsigned long long)fragment,
366 	     (unsigned long long)goal, count);
367 
368 	sb = inode->i_sb;
369 	uspi = UFS_SB(sb)->s_uspi;
370 	usb1 = ubh_get_usb_first(uspi);
371 	*err = -ENOSPC;
372 
373 	lock_super (sb);
374 	tmp = ufs_data_ptr_to_cpu(sb, p);
375 
376 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
377 		ufs_warning(sb, "ufs_new_fragments", "internal warning"
378 			    " fragment %llu, count %u",
379 			    (unsigned long long)fragment, count);
380 		count = uspi->s_fpb - ufs_fragnum(fragment);
381 	}
382 	oldcount = ufs_fragnum (fragment);
383 	newcount = oldcount + count;
384 
385 	/*
386 	 * Somebody else has just allocated our fragments
387 	 */
388 	if (oldcount) {
389 		if (!tmp) {
390 			ufs_error(sb, "ufs_new_fragments", "internal error, "
391 				  "fragment %llu, tmp %llu\n",
392 				  (unsigned long long)fragment,
393 				  (unsigned long long)tmp);
394 			unlock_super(sb);
395 			return INVBLOCK;
396 		}
397 		if (fragment < UFS_I(inode)->i_lastfrag) {
398 			UFSD("EXIT (ALREADY ALLOCATED)\n");
399 			unlock_super (sb);
400 			return 0;
401 		}
402 	}
403 	else {
404 		if (tmp) {
405 			UFSD("EXIT (ALREADY ALLOCATED)\n");
406 			unlock_super(sb);
407 			return 0;
408 		}
409 	}
410 
411 	/*
412 	 * There is not enough space for user on the device
413 	 */
414 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
415 		unlock_super (sb);
416 		UFSD("EXIT (FAILED)\n");
417 		return 0;
418 	}
419 
420 	if (goal >= uspi->s_size)
421 		goal = 0;
422 	if (goal == 0)
423 		cgno = ufs_inotocg (inode->i_ino);
424 	else
425 		cgno = ufs_dtog(uspi, goal);
426 
427 	/*
428 	 * allocate new fragment
429 	 */
430 	if (oldcount == 0) {
431 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
432 		if (result) {
433 			ufs_cpu_to_data_ptr(sb, p, result);
434 			*err = 0;
435 			UFS_I(inode)->i_lastfrag =
436 				max_t(u32, UFS_I(inode)->i_lastfrag,
437 				      fragment + count);
438 			ufs_clear_frags(inode, result + oldcount,
439 					newcount - oldcount, locked_page != NULL);
440 		}
441 		unlock_super(sb);
442 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
443 		return result;
444 	}
445 
446 	/*
447 	 * resize block
448 	 */
449 	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
450 	if (result) {
451 		*err = 0;
452 		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
453 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
454 				locked_page != NULL);
455 		unlock_super(sb);
456 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
457 		return result;
458 	}
459 
460 	/*
461 	 * allocate new block and move data
462 	 */
463 	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
464 	    case UFS_OPTSPACE:
465 		request = newcount;
466 		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
467 		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
468 			break;
469 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
470 		break;
471 	    default:
472 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
473 
474 	    case UFS_OPTTIME:
475 		request = uspi->s_fpb;
476 		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
477 		    (uspi->s_minfree - 2) / 100)
478 			break;
479 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
480 		break;
481 	}
482 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
483 	if (result) {
484 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
485 				locked_page != NULL);
486 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
487 				   uspi->s_sbbase + tmp,
488 				   uspi->s_sbbase + result, locked_page);
489 		ufs_cpu_to_data_ptr(sb, p, result);
490 		*err = 0;
491 		UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
492 		unlock_super(sb);
493 		if (newcount < request)
494 			ufs_free_fragments (inode, result + newcount, request - newcount);
495 		ufs_free_fragments (inode, tmp, oldcount);
496 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
497 		return result;
498 	}
499 
500 	unlock_super(sb);
501 	UFSD("EXIT (FAILED)\n");
502 	return 0;
503 }
504 
505 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
506 			     unsigned oldcount, unsigned newcount, int *err)
507 {
508 	struct super_block * sb;
509 	struct ufs_sb_private_info * uspi;
510 	struct ufs_super_block_first * usb1;
511 	struct ufs_cg_private_info * ucpi;
512 	struct ufs_cylinder_group * ucg;
513 	unsigned cgno, fragno, fragoff, count, fragsize, i;
514 	int ret;
515 
516 	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
517 	     (unsigned long long)fragment, oldcount, newcount);
518 
519 	sb = inode->i_sb;
520 	uspi = UFS_SB(sb)->s_uspi;
521 	usb1 = ubh_get_usb_first (uspi);
522 	count = newcount - oldcount;
523 
524 	cgno = ufs_dtog(uspi, fragment);
525 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
526 		return 0;
527 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
528 		return 0;
529 	ucpi = ufs_load_cylinder (sb, cgno);
530 	if (!ucpi)
531 		return 0;
532 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
533 	if (!ufs_cg_chkmagic(sb, ucg)) {
534 		ufs_panic (sb, "ufs_add_fragments",
535 			"internal error, bad magic number on cg %u", cgno);
536 		return 0;
537 	}
538 
539 	fragno = ufs_dtogd(uspi, fragment);
540 	fragoff = ufs_fragnum (fragno);
541 	for (i = oldcount; i < newcount; i++)
542 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
543 			return 0;
544 	/*
545 	 * Block can be extended
546 	 */
547 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
548 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
549 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
550 			break;
551 	fragsize = i - oldcount;
552 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
553 		ufs_panic (sb, "ufs_add_fragments",
554 			"internal error or corrupted bitmap on cg %u", cgno);
555 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
556 	if (fragsize != count)
557 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
558 	for (i = oldcount; i < newcount; i++)
559 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
560 	ret = dquot_alloc_block(inode, count);
561 	if (ret) {
562 		*err = ret;
563 		return 0;
564 	}
565 
566 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
567 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
568 	uspi->cs_total.cs_nffree -= count;
569 
570 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
571 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
572 	if (sb->s_flags & MS_SYNCHRONOUS) {
573 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
574 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
575 	}
576 	sb->s_dirt = 1;
577 
578 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
579 
580 	return fragment;
581 }
582 
583 #define UFS_TEST_FREE_SPACE_CG \
584 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
585 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
586 		goto cg_found; \
587 	for (k = count; k < uspi->s_fpb; k++) \
588 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
589 			goto cg_found;
590 
591 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
592 			       u64 goal, unsigned count, int *err)
593 {
594 	struct super_block * sb;
595 	struct ufs_sb_private_info * uspi;
596 	struct ufs_super_block_first * usb1;
597 	struct ufs_cg_private_info * ucpi;
598 	struct ufs_cylinder_group * ucg;
599 	unsigned oldcg, i, j, k, allocsize;
600 	u64 result;
601 	int ret;
602 
603 	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
604 	     inode->i_ino, cgno, (unsigned long long)goal, count);
605 
606 	sb = inode->i_sb;
607 	uspi = UFS_SB(sb)->s_uspi;
608 	usb1 = ubh_get_usb_first(uspi);
609 	oldcg = cgno;
610 
611 	/*
612 	 * 1. searching on preferred cylinder group
613 	 */
614 	UFS_TEST_FREE_SPACE_CG
615 
616 	/*
617 	 * 2. quadratic rehash
618 	 */
619 	for (j = 1; j < uspi->s_ncg; j *= 2) {
620 		cgno += j;
621 		if (cgno >= uspi->s_ncg)
622 			cgno -= uspi->s_ncg;
623 		UFS_TEST_FREE_SPACE_CG
624 	}
625 
626 	/*
627 	 * 3. brute force search
628 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
629 	 */
630 	cgno = (oldcg + 1) % uspi->s_ncg;
631 	for (j = 2; j < uspi->s_ncg; j++) {
632 		cgno++;
633 		if (cgno >= uspi->s_ncg)
634 			cgno = 0;
635 		UFS_TEST_FREE_SPACE_CG
636 	}
637 
638 	UFSD("EXIT (FAILED)\n");
639 	return 0;
640 
641 cg_found:
642 	ucpi = ufs_load_cylinder (sb, cgno);
643 	if (!ucpi)
644 		return 0;
645 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
646 	if (!ufs_cg_chkmagic(sb, ucg))
647 		ufs_panic (sb, "ufs_alloc_fragments",
648 			"internal error, bad magic number on cg %u", cgno);
649 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
650 
651 	if (count == uspi->s_fpb) {
652 		result = ufs_alloccg_block (inode, ucpi, goal, err);
653 		if (result == INVBLOCK)
654 			return 0;
655 		goto succed;
656 	}
657 
658 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
659 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
660 			break;
661 
662 	if (allocsize == uspi->s_fpb) {
663 		result = ufs_alloccg_block (inode, ucpi, goal, err);
664 		if (result == INVBLOCK)
665 			return 0;
666 		goal = ufs_dtogd(uspi, result);
667 		for (i = count; i < uspi->s_fpb; i++)
668 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
669 		i = uspi->s_fpb - count;
670 		dquot_free_block(inode, i);
671 
672 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
673 		uspi->cs_total.cs_nffree += i;
674 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
675 		fs32_add(sb, &ucg->cg_frsum[i], 1);
676 		goto succed;
677 	}
678 
679 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
680 	if (result == INVBLOCK)
681 		return 0;
682 	ret = dquot_alloc_block(inode, count);
683 	if (ret) {
684 		*err = ret;
685 		return 0;
686 	}
687 	for (i = 0; i < count; i++)
688 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
689 
690 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
691 	uspi->cs_total.cs_nffree -= count;
692 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
693 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
694 
695 	if (count != allocsize)
696 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
697 
698 succed:
699 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
700 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
701 	if (sb->s_flags & MS_SYNCHRONOUS) {
702 		ubh_ll_rw_block(SWRITE, UCPI_UBH(ucpi));
703 		ubh_wait_on_buffer (UCPI_UBH(ucpi));
704 	}
705 	sb->s_dirt = 1;
706 
707 	result += cgno * uspi->s_fpg;
708 	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
709 	return result;
710 }
711 
712 static u64 ufs_alloccg_block(struct inode *inode,
713 			     struct ufs_cg_private_info *ucpi,
714 			     u64 goal, int *err)
715 {
716 	struct super_block * sb;
717 	struct ufs_sb_private_info * uspi;
718 	struct ufs_super_block_first * usb1;
719 	struct ufs_cylinder_group * ucg;
720 	u64 result, blkno;
721 	int ret;
722 
723 	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
724 
725 	sb = inode->i_sb;
726 	uspi = UFS_SB(sb)->s_uspi;
727 	usb1 = ubh_get_usb_first(uspi);
728 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
729 
730 	if (goal == 0) {
731 		goal = ucpi->c_rotor;
732 		goto norot;
733 	}
734 	goal = ufs_blknum (goal);
735 	goal = ufs_dtogd(uspi, goal);
736 
737 	/*
738 	 * If the requested block is available, use it.
739 	 */
740 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
741 		result = goal;
742 		goto gotit;
743 	}
744 
745 norot:
746 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
747 	if (result == INVBLOCK)
748 		return INVBLOCK;
749 	ucpi->c_rotor = result;
750 gotit:
751 	blkno = ufs_fragstoblks(result);
752 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
753 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
754 		ufs_clusteracct (sb, ucpi, blkno, -1);
755 	ret = dquot_alloc_block(inode, uspi->s_fpb);
756 	if (ret) {
757 		*err = ret;
758 		return INVBLOCK;
759 	}
760 
761 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
762 	uspi->cs_total.cs_nbfree--;
763 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
764 
765 	if (uspi->fs_magic != UFS2_MAGIC) {
766 		unsigned cylno = ufs_cbtocylno((unsigned)result);
767 
768 		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
769 					  ufs_cbtorpos((unsigned)result)), 1);
770 		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
771 	}
772 
773 	UFSD("EXIT, result %llu\n", (unsigned long long)result);
774 
775 	return result;
776 }
777 
778 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
779 			  struct ufs_buffer_head *ubh,
780 			  unsigned begin, unsigned size,
781 			  unsigned char *table, unsigned char mask)
782 {
783 	unsigned rest, offset;
784 	unsigned char *cp;
785 
786 
787 	offset = begin & ~uspi->s_fmask;
788 	begin >>= uspi->s_fshift;
789 	for (;;) {
790 		if ((offset + size) < uspi->s_fsize)
791 			rest = size;
792 		else
793 			rest = uspi->s_fsize - offset;
794 		size -= rest;
795 		cp = ubh->bh[begin]->b_data + offset;
796 		while ((table[*cp++] & mask) == 0 && --rest)
797 			;
798 		if (rest || !size)
799 			break;
800 		begin++;
801 		offset = 0;
802 	}
803 	return (size + rest);
804 }
805 
806 /*
807  * Find a block of the specified size in the specified cylinder group.
808  * @sp: pointer to super block
809  * @ucpi: pointer to cylinder group info
810  * @goal: near which block we want find new one
811  * @count: specified size
812  */
813 static u64 ufs_bitmap_search(struct super_block *sb,
814 			     struct ufs_cg_private_info *ucpi,
815 			     u64 goal, unsigned count)
816 {
817 	/*
818 	 * Bit patterns for identifying fragments in the block map
819 	 * used as ((map & mask_arr) == want_arr)
820 	 */
821 	static const int mask_arr[9] = {
822 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
823 	};
824 	static const int want_arr[9] = {
825 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
826 	};
827 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
828 	struct ufs_super_block_first *usb1;
829 	struct ufs_cylinder_group *ucg;
830 	unsigned start, length, loc;
831 	unsigned pos, want, blockmap, mask, end;
832 	u64 result;
833 
834 	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
835 	     (unsigned long long)goal, count);
836 
837 	usb1 = ubh_get_usb_first (uspi);
838 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
839 
840 	if (goal)
841 		start = ufs_dtogd(uspi, goal) >> 3;
842 	else
843 		start = ucpi->c_frotor >> 3;
844 
845 	length = ((uspi->s_fpg + 7) >> 3) - start;
846 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
847 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
848 		1 << (count - 1 + (uspi->s_fpb & 7)));
849 	if (loc == 0) {
850 		length = start + 1;
851 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
852 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
853 				ufs_fragtable_other,
854 				1 << (count - 1 + (uspi->s_fpb & 7)));
855 		if (loc == 0) {
856 			ufs_error(sb, "ufs_bitmap_search",
857 				  "bitmap corrupted on cg %u, start %u,"
858 				  " length %u, count %u, freeoff %u\n",
859 				  ucpi->c_cgx, start, length, count,
860 				  ucpi->c_freeoff);
861 			return INVBLOCK;
862 		}
863 		start = 0;
864 	}
865 	result = (start + length - loc) << 3;
866 	ucpi->c_frotor = result;
867 
868 	/*
869 	 * found the byte in the map
870 	 */
871 
872 	for (end = result + 8; result < end; result += uspi->s_fpb) {
873 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
874 		blockmap <<= 1;
875 		mask = mask_arr[count];
876 		want = want_arr[count];
877 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
878 			if ((blockmap & mask) == want) {
879 				UFSD("EXIT, result %llu\n",
880 				     (unsigned long long)result);
881 				return result + pos;
882  			}
883 			mask <<= 1;
884 			want <<= 1;
885  		}
886  	}
887 
888 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
889 		  ucpi->c_cgx);
890 	UFSD("EXIT (FAILED)\n");
891 	return INVBLOCK;
892 }
893 
894 static void ufs_clusteracct(struct super_block * sb,
895 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
896 {
897 	struct ufs_sb_private_info * uspi;
898 	int i, start, end, forw, back;
899 
900 	uspi = UFS_SB(sb)->s_uspi;
901 	if (uspi->s_contigsumsize <= 0)
902 		return;
903 
904 	if (cnt > 0)
905 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
906 	else
907 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
908 
909 	/*
910 	 * Find the size of the cluster going forward.
911 	 */
912 	start = blkno + 1;
913 	end = start + uspi->s_contigsumsize;
914 	if ( end >= ucpi->c_nclusterblks)
915 		end = ucpi->c_nclusterblks;
916 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
917 	if (i > end)
918 		i = end;
919 	forw = i - start;
920 
921 	/*
922 	 * Find the size of the cluster going backward.
923 	 */
924 	start = blkno - 1;
925 	end = start - uspi->s_contigsumsize;
926 	if (end < 0 )
927 		end = -1;
928 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
929 	if ( i < end)
930 		i = end;
931 	back = start - i;
932 
933 	/*
934 	 * Account for old cluster and the possibly new forward and
935 	 * back clusters.
936 	 */
937 	i = back + forw + 1;
938 	if (i > uspi->s_contigsumsize)
939 		i = uspi->s_contigsumsize;
940 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
941 	if (back > 0)
942 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
943 	if (forw > 0)
944 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
945 }
946 
947 
948 static unsigned char ufs_fragtable_8fpb[] = {
949 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
950 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
951 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
952 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
953 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
954 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
955 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
956 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
957 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
958 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
959 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
960 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
961 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
962 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
963 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
964 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
965 };
966 
967 static unsigned char ufs_fragtable_other[] = {
968 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
969 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
970 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
971 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
972 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
973 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
974 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
975 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
976 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
977 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
978 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
979 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
980 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
981 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
982 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
983 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
984 };
985