xref: /openbmc/linux/fs/ufs/balloc.c (revision c819e2cf)
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <asm/byteorder.h>
19 
20 #include "ufs_fs.h"
21 #include "ufs.h"
22 #include "swab.h"
23 #include "util.h"
24 
25 #define INVBLOCK ((u64)-1L)
26 
27 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
28 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
29 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
30 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
31 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
32 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
33 
34 /*
35  * Free 'count' fragments from fragment number 'fragment'
36  */
37 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
38 {
39 	struct super_block * sb;
40 	struct ufs_sb_private_info * uspi;
41 	struct ufs_cg_private_info * ucpi;
42 	struct ufs_cylinder_group * ucg;
43 	unsigned cgno, bit, end_bit, bbase, blkmap, i;
44 	u64 blkno;
45 
46 	sb = inode->i_sb;
47 	uspi = UFS_SB(sb)->s_uspi;
48 
49 	UFSD("ENTER, fragment %llu, count %u\n",
50 	     (unsigned long long)fragment, count);
51 
52 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
53 		ufs_error (sb, "ufs_free_fragments", "internal error");
54 
55 	lock_ufs(sb);
56 
57 	cgno = ufs_dtog(uspi, fragment);
58 	bit = ufs_dtogd(uspi, fragment);
59 	if (cgno >= uspi->s_ncg) {
60 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
61 		goto failed;
62 	}
63 
64 	ucpi = ufs_load_cylinder (sb, cgno);
65 	if (!ucpi)
66 		goto failed;
67 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
68 	if (!ufs_cg_chkmagic(sb, ucg)) {
69 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
70 		goto failed;
71 	}
72 
73 	end_bit = bit + count;
74 	bbase = ufs_blknum (bit);
75 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
76 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
77 	for (i = bit; i < end_bit; i++) {
78 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
79 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
80 		else
81 			ufs_error (sb, "ufs_free_fragments",
82 				   "bit already cleared for fragment %u", i);
83 	}
84 
85 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
86 	uspi->cs_total.cs_nffree += count;
87 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
88 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
89 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
90 
91 	/*
92 	 * Trying to reassemble free fragments into block
93 	 */
94 	blkno = ufs_fragstoblks (bbase);
95 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
96 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
97 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
98 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
99 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100 			ufs_clusteracct (sb, ucpi, blkno, 1);
101 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102 		uspi->cs_total.cs_nbfree++;
103 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104 		if (uspi->fs_magic != UFS2_MAGIC) {
105 			unsigned cylno = ufs_cbtocylno (bbase);
106 
107 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
108 						  ufs_cbtorpos(bbase)), 1);
109 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
110 		}
111 	}
112 
113 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
114 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
115 	if (sb->s_flags & MS_SYNCHRONOUS)
116 		ubh_sync_block(UCPI_UBH(ucpi));
117 	ufs_mark_sb_dirty(sb);
118 
119 	unlock_ufs(sb);
120 	UFSD("EXIT\n");
121 	return;
122 
123 failed:
124 	unlock_ufs(sb);
125 	UFSD("EXIT (FAILED)\n");
126 	return;
127 }
128 
129 /*
130  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
131  */
132 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
133 {
134 	struct super_block * sb;
135 	struct ufs_sb_private_info * uspi;
136 	struct ufs_cg_private_info * ucpi;
137 	struct ufs_cylinder_group * ucg;
138 	unsigned overflow, cgno, bit, end_bit, i;
139 	u64 blkno;
140 
141 	sb = inode->i_sb;
142 	uspi = UFS_SB(sb)->s_uspi;
143 
144 	UFSD("ENTER, fragment %llu, count %u\n",
145 	     (unsigned long long)fragment, count);
146 
147 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
148 		ufs_error (sb, "ufs_free_blocks", "internal error, "
149 			   "fragment %llu, count %u\n",
150 			   (unsigned long long)fragment, count);
151 		goto failed;
152 	}
153 
154 	lock_ufs(sb);
155 
156 do_more:
157 	overflow = 0;
158 	cgno = ufs_dtog(uspi, fragment);
159 	bit = ufs_dtogd(uspi, fragment);
160 	if (cgno >= uspi->s_ncg) {
161 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
162 		goto failed_unlock;
163 	}
164 	end_bit = bit + count;
165 	if (end_bit > uspi->s_fpg) {
166 		overflow = bit + count - uspi->s_fpg;
167 		count -= overflow;
168 		end_bit -= overflow;
169 	}
170 
171 	ucpi = ufs_load_cylinder (sb, cgno);
172 	if (!ucpi)
173 		goto failed_unlock;
174 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
175 	if (!ufs_cg_chkmagic(sb, ucg)) {
176 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177 		goto failed_unlock;
178 	}
179 
180 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
181 		blkno = ufs_fragstoblks(i);
182 		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
183 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
184 		}
185 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
186 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
187 			ufs_clusteracct (sb, ucpi, blkno, 1);
188 
189 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
190 		uspi->cs_total.cs_nbfree++;
191 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
192 
193 		if (uspi->fs_magic != UFS2_MAGIC) {
194 			unsigned cylno = ufs_cbtocylno(i);
195 
196 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
197 						  ufs_cbtorpos(i)), 1);
198 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
199 		}
200 	}
201 
202 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
203 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
204 	if (sb->s_flags & MS_SYNCHRONOUS)
205 		ubh_sync_block(UCPI_UBH(ucpi));
206 
207 	if (overflow) {
208 		fragment += count;
209 		count = overflow;
210 		goto do_more;
211 	}
212 
213 	ufs_mark_sb_dirty(sb);
214 	unlock_ufs(sb);
215 	UFSD("EXIT\n");
216 	return;
217 
218 failed_unlock:
219 	unlock_ufs(sb);
220 failed:
221 	UFSD("EXIT (FAILED)\n");
222 	return;
223 }
224 
225 /*
226  * Modify inode page cache in such way:
227  * have - blocks with b_blocknr equal to oldb...oldb+count-1
228  * get - blocks with b_blocknr equal to newb...newb+count-1
229  * also we suppose that oldb...oldb+count-1 blocks
230  * situated at the end of file.
231  *
232  * We can come here from ufs_writepage or ufs_prepare_write,
233  * locked_page is argument of these functions, so we already lock it.
234  */
235 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
236 			       unsigned int count, sector_t oldb,
237 			       sector_t newb, struct page *locked_page)
238 {
239 	const unsigned blks_per_page =
240 		1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
241 	const unsigned mask = blks_per_page - 1;
242 	struct address_space * const mapping = inode->i_mapping;
243 	pgoff_t index, cur_index, last_index;
244 	unsigned pos, j, lblock;
245 	sector_t end, i;
246 	struct page *page;
247 	struct buffer_head *head, *bh;
248 
249 	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
250 	      inode->i_ino, count,
251 	     (unsigned long long)oldb, (unsigned long long)newb);
252 
253 	BUG_ON(!locked_page);
254 	BUG_ON(!PageLocked(locked_page));
255 
256 	cur_index = locked_page->index;
257 	end = count + beg;
258 	last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
259 	for (i = beg; i < end; i = (i | mask) + 1) {
260 		index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
261 
262 		if (likely(cur_index != index)) {
263 			page = ufs_get_locked_page(mapping, index);
264 			if (!page)/* it was truncated */
265 				continue;
266 			if (IS_ERR(page)) {/* or EIO */
267 				ufs_error(inode->i_sb, __func__,
268 					  "read of page %llu failed\n",
269 					  (unsigned long long)index);
270 				continue;
271 			}
272 		} else
273 			page = locked_page;
274 
275 		head = page_buffers(page);
276 		bh = head;
277 		pos = i & mask;
278 		for (j = 0; j < pos; ++j)
279 			bh = bh->b_this_page;
280 
281 
282 		if (unlikely(index == last_index))
283 			lblock = end & mask;
284 		else
285 			lblock = blks_per_page;
286 
287 		do {
288 			if (j >= lblock)
289 				break;
290 			pos = (i - beg) + j;
291 
292 			if (!buffer_mapped(bh))
293 					map_bh(bh, inode->i_sb, oldb + pos);
294 			if (!buffer_uptodate(bh)) {
295 				ll_rw_block(READ, 1, &bh);
296 				wait_on_buffer(bh);
297 				if (!buffer_uptodate(bh)) {
298 					ufs_error(inode->i_sb, __func__,
299 						  "read of block failed\n");
300 					break;
301 				}
302 			}
303 
304 			UFSD(" change from %llu to %llu, pos %u\n",
305 			     (unsigned long long)(pos + oldb),
306 			     (unsigned long long)(pos + newb), pos);
307 
308 			bh->b_blocknr = newb + pos;
309 			unmap_underlying_metadata(bh->b_bdev,
310 						  bh->b_blocknr);
311 			mark_buffer_dirty(bh);
312 			++j;
313 			bh = bh->b_this_page;
314 		} while (bh != head);
315 
316 		if (likely(cur_index != index))
317 			ufs_put_locked_page(page);
318  	}
319 	UFSD("EXIT\n");
320 }
321 
322 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
323 			    int sync)
324 {
325 	struct buffer_head *bh;
326 	sector_t end = beg + n;
327 
328 	for (; beg < end; ++beg) {
329 		bh = sb_getblk(inode->i_sb, beg);
330 		lock_buffer(bh);
331 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
332 		set_buffer_uptodate(bh);
333 		mark_buffer_dirty(bh);
334 		unlock_buffer(bh);
335 		if (IS_SYNC(inode) || sync)
336 			sync_dirty_buffer(bh);
337 		brelse(bh);
338 	}
339 }
340 
341 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
342 			   u64 goal, unsigned count, int *err,
343 			   struct page *locked_page)
344 {
345 	struct super_block * sb;
346 	struct ufs_sb_private_info * uspi;
347 	struct ufs_super_block_first * usb1;
348 	unsigned cgno, oldcount, newcount;
349 	u64 tmp, request, result;
350 
351 	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
352 	     inode->i_ino, (unsigned long long)fragment,
353 	     (unsigned long long)goal, count);
354 
355 	sb = inode->i_sb;
356 	uspi = UFS_SB(sb)->s_uspi;
357 	usb1 = ubh_get_usb_first(uspi);
358 	*err = -ENOSPC;
359 
360 	lock_ufs(sb);
361 	tmp = ufs_data_ptr_to_cpu(sb, p);
362 
363 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
364 		ufs_warning(sb, "ufs_new_fragments", "internal warning"
365 			    " fragment %llu, count %u",
366 			    (unsigned long long)fragment, count);
367 		count = uspi->s_fpb - ufs_fragnum(fragment);
368 	}
369 	oldcount = ufs_fragnum (fragment);
370 	newcount = oldcount + count;
371 
372 	/*
373 	 * Somebody else has just allocated our fragments
374 	 */
375 	if (oldcount) {
376 		if (!tmp) {
377 			ufs_error(sb, "ufs_new_fragments", "internal error, "
378 				  "fragment %llu, tmp %llu\n",
379 				  (unsigned long long)fragment,
380 				  (unsigned long long)tmp);
381 			unlock_ufs(sb);
382 			return INVBLOCK;
383 		}
384 		if (fragment < UFS_I(inode)->i_lastfrag) {
385 			UFSD("EXIT (ALREADY ALLOCATED)\n");
386 			unlock_ufs(sb);
387 			return 0;
388 		}
389 	}
390 	else {
391 		if (tmp) {
392 			UFSD("EXIT (ALREADY ALLOCATED)\n");
393 			unlock_ufs(sb);
394 			return 0;
395 		}
396 	}
397 
398 	/*
399 	 * There is not enough space for user on the device
400 	 */
401 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
402 		unlock_ufs(sb);
403 		UFSD("EXIT (FAILED)\n");
404 		return 0;
405 	}
406 
407 	if (goal >= uspi->s_size)
408 		goal = 0;
409 	if (goal == 0)
410 		cgno = ufs_inotocg (inode->i_ino);
411 	else
412 		cgno = ufs_dtog(uspi, goal);
413 
414 	/*
415 	 * allocate new fragment
416 	 */
417 	if (oldcount == 0) {
418 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
419 		if (result) {
420 			ufs_cpu_to_data_ptr(sb, p, result);
421 			*err = 0;
422 			UFS_I(inode)->i_lastfrag =
423 				max(UFS_I(inode)->i_lastfrag, fragment + count);
424 			ufs_clear_frags(inode, result + oldcount,
425 					newcount - oldcount, locked_page != NULL);
426 		}
427 		unlock_ufs(sb);
428 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
429 		return result;
430 	}
431 
432 	/*
433 	 * resize block
434 	 */
435 	result = ufs_add_fragments(inode, tmp, oldcount, newcount);
436 	if (result) {
437 		*err = 0;
438 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
439 						fragment + count);
440 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
441 				locked_page != NULL);
442 		unlock_ufs(sb);
443 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
444 		return result;
445 	}
446 
447 	/*
448 	 * allocate new block and move data
449 	 */
450 	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
451 	    case UFS_OPTSPACE:
452 		request = newcount;
453 		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
454 		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
455 			break;
456 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
457 		break;
458 	    default:
459 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
460 
461 	    case UFS_OPTTIME:
462 		request = uspi->s_fpb;
463 		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
464 		    (uspi->s_minfree - 2) / 100)
465 			break;
466 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
467 		break;
468 	}
469 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
470 	if (result) {
471 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
472 				locked_page != NULL);
473 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
474 				   uspi->s_sbbase + tmp,
475 				   uspi->s_sbbase + result, locked_page);
476 		ufs_cpu_to_data_ptr(sb, p, result);
477 		*err = 0;
478 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
479 						fragment + count);
480 		unlock_ufs(sb);
481 		if (newcount < request)
482 			ufs_free_fragments (inode, result + newcount, request - newcount);
483 		ufs_free_fragments (inode, tmp, oldcount);
484 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
485 		return result;
486 	}
487 
488 	unlock_ufs(sb);
489 	UFSD("EXIT (FAILED)\n");
490 	return 0;
491 }
492 
493 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
494 			     unsigned oldcount, unsigned newcount)
495 {
496 	struct super_block * sb;
497 	struct ufs_sb_private_info * uspi;
498 	struct ufs_cg_private_info * ucpi;
499 	struct ufs_cylinder_group * ucg;
500 	unsigned cgno, fragno, fragoff, count, fragsize, i;
501 
502 	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
503 	     (unsigned long long)fragment, oldcount, newcount);
504 
505 	sb = inode->i_sb;
506 	uspi = UFS_SB(sb)->s_uspi;
507 	count = newcount - oldcount;
508 
509 	cgno = ufs_dtog(uspi, fragment);
510 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
511 		return 0;
512 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
513 		return 0;
514 	ucpi = ufs_load_cylinder (sb, cgno);
515 	if (!ucpi)
516 		return 0;
517 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
518 	if (!ufs_cg_chkmagic(sb, ucg)) {
519 		ufs_panic (sb, "ufs_add_fragments",
520 			"internal error, bad magic number on cg %u", cgno);
521 		return 0;
522 	}
523 
524 	fragno = ufs_dtogd(uspi, fragment);
525 	fragoff = ufs_fragnum (fragno);
526 	for (i = oldcount; i < newcount; i++)
527 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
528 			return 0;
529 	/*
530 	 * Block can be extended
531 	 */
532 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
533 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
534 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
535 			break;
536 	fragsize = i - oldcount;
537 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
538 		ufs_panic (sb, "ufs_add_fragments",
539 			"internal error or corrupted bitmap on cg %u", cgno);
540 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
541 	if (fragsize != count)
542 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
543 	for (i = oldcount; i < newcount; i++)
544 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
545 
546 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
547 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
548 	uspi->cs_total.cs_nffree -= count;
549 
550 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
551 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
552 	if (sb->s_flags & MS_SYNCHRONOUS)
553 		ubh_sync_block(UCPI_UBH(ucpi));
554 	ufs_mark_sb_dirty(sb);
555 
556 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
557 
558 	return fragment;
559 }
560 
561 #define UFS_TEST_FREE_SPACE_CG \
562 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
563 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
564 		goto cg_found; \
565 	for (k = count; k < uspi->s_fpb; k++) \
566 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
567 			goto cg_found;
568 
569 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
570 			       u64 goal, unsigned count, int *err)
571 {
572 	struct super_block * sb;
573 	struct ufs_sb_private_info * uspi;
574 	struct ufs_cg_private_info * ucpi;
575 	struct ufs_cylinder_group * ucg;
576 	unsigned oldcg, i, j, k, allocsize;
577 	u64 result;
578 
579 	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
580 	     inode->i_ino, cgno, (unsigned long long)goal, count);
581 
582 	sb = inode->i_sb;
583 	uspi = UFS_SB(sb)->s_uspi;
584 	oldcg = cgno;
585 
586 	/*
587 	 * 1. searching on preferred cylinder group
588 	 */
589 	UFS_TEST_FREE_SPACE_CG
590 
591 	/*
592 	 * 2. quadratic rehash
593 	 */
594 	for (j = 1; j < uspi->s_ncg; j *= 2) {
595 		cgno += j;
596 		if (cgno >= uspi->s_ncg)
597 			cgno -= uspi->s_ncg;
598 		UFS_TEST_FREE_SPACE_CG
599 	}
600 
601 	/*
602 	 * 3. brute force search
603 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
604 	 */
605 	cgno = (oldcg + 1) % uspi->s_ncg;
606 	for (j = 2; j < uspi->s_ncg; j++) {
607 		cgno++;
608 		if (cgno >= uspi->s_ncg)
609 			cgno = 0;
610 		UFS_TEST_FREE_SPACE_CG
611 	}
612 
613 	UFSD("EXIT (FAILED)\n");
614 	return 0;
615 
616 cg_found:
617 	ucpi = ufs_load_cylinder (sb, cgno);
618 	if (!ucpi)
619 		return 0;
620 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
621 	if (!ufs_cg_chkmagic(sb, ucg))
622 		ufs_panic (sb, "ufs_alloc_fragments",
623 			"internal error, bad magic number on cg %u", cgno);
624 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
625 
626 	if (count == uspi->s_fpb) {
627 		result = ufs_alloccg_block (inode, ucpi, goal, err);
628 		if (result == INVBLOCK)
629 			return 0;
630 		goto succed;
631 	}
632 
633 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
634 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
635 			break;
636 
637 	if (allocsize == uspi->s_fpb) {
638 		result = ufs_alloccg_block (inode, ucpi, goal, err);
639 		if (result == INVBLOCK)
640 			return 0;
641 		goal = ufs_dtogd(uspi, result);
642 		for (i = count; i < uspi->s_fpb; i++)
643 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
644 		i = uspi->s_fpb - count;
645 
646 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
647 		uspi->cs_total.cs_nffree += i;
648 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
649 		fs32_add(sb, &ucg->cg_frsum[i], 1);
650 		goto succed;
651 	}
652 
653 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
654 	if (result == INVBLOCK)
655 		return 0;
656 	for (i = 0; i < count; i++)
657 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
658 
659 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
660 	uspi->cs_total.cs_nffree -= count;
661 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
662 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
663 
664 	if (count != allocsize)
665 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
666 
667 succed:
668 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
669 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
670 	if (sb->s_flags & MS_SYNCHRONOUS)
671 		ubh_sync_block(UCPI_UBH(ucpi));
672 	ufs_mark_sb_dirty(sb);
673 
674 	result += cgno * uspi->s_fpg;
675 	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
676 	return result;
677 }
678 
679 static u64 ufs_alloccg_block(struct inode *inode,
680 			     struct ufs_cg_private_info *ucpi,
681 			     u64 goal, int *err)
682 {
683 	struct super_block * sb;
684 	struct ufs_sb_private_info * uspi;
685 	struct ufs_cylinder_group * ucg;
686 	u64 result, blkno;
687 
688 	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
689 
690 	sb = inode->i_sb;
691 	uspi = UFS_SB(sb)->s_uspi;
692 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
693 
694 	if (goal == 0) {
695 		goal = ucpi->c_rotor;
696 		goto norot;
697 	}
698 	goal = ufs_blknum (goal);
699 	goal = ufs_dtogd(uspi, goal);
700 
701 	/*
702 	 * If the requested block is available, use it.
703 	 */
704 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
705 		result = goal;
706 		goto gotit;
707 	}
708 
709 norot:
710 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
711 	if (result == INVBLOCK)
712 		return INVBLOCK;
713 	ucpi->c_rotor = result;
714 gotit:
715 	blkno = ufs_fragstoblks(result);
716 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
717 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
718 		ufs_clusteracct (sb, ucpi, blkno, -1);
719 
720 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
721 	uspi->cs_total.cs_nbfree--;
722 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
723 
724 	if (uspi->fs_magic != UFS2_MAGIC) {
725 		unsigned cylno = ufs_cbtocylno((unsigned)result);
726 
727 		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
728 					  ufs_cbtorpos((unsigned)result)), 1);
729 		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
730 	}
731 
732 	UFSD("EXIT, result %llu\n", (unsigned long long)result);
733 
734 	return result;
735 }
736 
737 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
738 			  struct ufs_buffer_head *ubh,
739 			  unsigned begin, unsigned size,
740 			  unsigned char *table, unsigned char mask)
741 {
742 	unsigned rest, offset;
743 	unsigned char *cp;
744 
745 
746 	offset = begin & ~uspi->s_fmask;
747 	begin >>= uspi->s_fshift;
748 	for (;;) {
749 		if ((offset + size) < uspi->s_fsize)
750 			rest = size;
751 		else
752 			rest = uspi->s_fsize - offset;
753 		size -= rest;
754 		cp = ubh->bh[begin]->b_data + offset;
755 		while ((table[*cp++] & mask) == 0 && --rest)
756 			;
757 		if (rest || !size)
758 			break;
759 		begin++;
760 		offset = 0;
761 	}
762 	return (size + rest);
763 }
764 
765 /*
766  * Find a block of the specified size in the specified cylinder group.
767  * @sp: pointer to super block
768  * @ucpi: pointer to cylinder group info
769  * @goal: near which block we want find new one
770  * @count: specified size
771  */
772 static u64 ufs_bitmap_search(struct super_block *sb,
773 			     struct ufs_cg_private_info *ucpi,
774 			     u64 goal, unsigned count)
775 {
776 	/*
777 	 * Bit patterns for identifying fragments in the block map
778 	 * used as ((map & mask_arr) == want_arr)
779 	 */
780 	static const int mask_arr[9] = {
781 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
782 	};
783 	static const int want_arr[9] = {
784 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
785 	};
786 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
787 	unsigned start, length, loc;
788 	unsigned pos, want, blockmap, mask, end;
789 	u64 result;
790 
791 	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
792 	     (unsigned long long)goal, count);
793 
794 	if (goal)
795 		start = ufs_dtogd(uspi, goal) >> 3;
796 	else
797 		start = ucpi->c_frotor >> 3;
798 
799 	length = ((uspi->s_fpg + 7) >> 3) - start;
800 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
801 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
802 		1 << (count - 1 + (uspi->s_fpb & 7)));
803 	if (loc == 0) {
804 		length = start + 1;
805 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
806 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
807 				ufs_fragtable_other,
808 				1 << (count - 1 + (uspi->s_fpb & 7)));
809 		if (loc == 0) {
810 			ufs_error(sb, "ufs_bitmap_search",
811 				  "bitmap corrupted on cg %u, start %u,"
812 				  " length %u, count %u, freeoff %u\n",
813 				  ucpi->c_cgx, start, length, count,
814 				  ucpi->c_freeoff);
815 			return INVBLOCK;
816 		}
817 		start = 0;
818 	}
819 	result = (start + length - loc) << 3;
820 	ucpi->c_frotor = result;
821 
822 	/*
823 	 * found the byte in the map
824 	 */
825 
826 	for (end = result + 8; result < end; result += uspi->s_fpb) {
827 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
828 		blockmap <<= 1;
829 		mask = mask_arr[count];
830 		want = want_arr[count];
831 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
832 			if ((blockmap & mask) == want) {
833 				UFSD("EXIT, result %llu\n",
834 				     (unsigned long long)result);
835 				return result + pos;
836  			}
837 			mask <<= 1;
838 			want <<= 1;
839  		}
840  	}
841 
842 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
843 		  ucpi->c_cgx);
844 	UFSD("EXIT (FAILED)\n");
845 	return INVBLOCK;
846 }
847 
848 static void ufs_clusteracct(struct super_block * sb,
849 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
850 {
851 	struct ufs_sb_private_info * uspi;
852 	int i, start, end, forw, back;
853 
854 	uspi = UFS_SB(sb)->s_uspi;
855 	if (uspi->s_contigsumsize <= 0)
856 		return;
857 
858 	if (cnt > 0)
859 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
860 	else
861 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
862 
863 	/*
864 	 * Find the size of the cluster going forward.
865 	 */
866 	start = blkno + 1;
867 	end = start + uspi->s_contigsumsize;
868 	if ( end >= ucpi->c_nclusterblks)
869 		end = ucpi->c_nclusterblks;
870 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
871 	if (i > end)
872 		i = end;
873 	forw = i - start;
874 
875 	/*
876 	 * Find the size of the cluster going backward.
877 	 */
878 	start = blkno - 1;
879 	end = start - uspi->s_contigsumsize;
880 	if (end < 0 )
881 		end = -1;
882 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
883 	if ( i < end)
884 		i = end;
885 	back = start - i;
886 
887 	/*
888 	 * Account for old cluster and the possibly new forward and
889 	 * back clusters.
890 	 */
891 	i = back + forw + 1;
892 	if (i > uspi->s_contigsumsize)
893 		i = uspi->s_contigsumsize;
894 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
895 	if (back > 0)
896 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
897 	if (forw > 0)
898 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
899 }
900 
901 
902 static unsigned char ufs_fragtable_8fpb[] = {
903 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
904 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
905 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
906 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
907 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
908 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
909 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
910 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
911 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
912 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
913 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
914 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
915 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
916 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
917 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
918 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
919 };
920 
921 static unsigned char ufs_fragtable_other[] = {
922 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
923 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
924 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
925 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
926 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
927 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
928 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
929 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
930 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
932 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
933 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
934 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
935 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
936 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
937 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
938 };
939