xref: /openbmc/linux/fs/ufs/balloc.c (revision f7d84fa7)
1 /*
2  *  linux/fs/ufs/balloc.c
3  *
4  * Copyright (C) 1998
5  * Daniel Pirkl <daniel.pirkl@email.cz>
6  * Charles University, Faculty of Mathematics and Physics
7  *
8  * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
9  */
10 
11 #include <linux/fs.h>
12 #include <linux/stat.h>
13 #include <linux/time.h>
14 #include <linux/string.h>
15 #include <linux/buffer_head.h>
16 #include <linux/capability.h>
17 #include <linux/bitops.h>
18 #include <linux/bio.h>
19 #include <asm/byteorder.h>
20 
21 #include "ufs_fs.h"
22 #include "ufs.h"
23 #include "swab.h"
24 #include "util.h"
25 
26 #define INVBLOCK ((u64)-1L)
27 
28 static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
29 static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30 static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31 static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
32 static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33 static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34 
35 /*
36  * Free 'count' fragments from fragment number 'fragment'
37  */
38 void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
39 {
40 	struct super_block * sb;
41 	struct ufs_sb_private_info * uspi;
42 	struct ufs_cg_private_info * ucpi;
43 	struct ufs_cylinder_group * ucg;
44 	unsigned cgno, bit, end_bit, bbase, blkmap, i;
45 	u64 blkno;
46 
47 	sb = inode->i_sb;
48 	uspi = UFS_SB(sb)->s_uspi;
49 
50 	UFSD("ENTER, fragment %llu, count %u\n",
51 	     (unsigned long long)fragment, count);
52 
53 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54 		ufs_error (sb, "ufs_free_fragments", "internal error");
55 
56 	mutex_lock(&UFS_SB(sb)->s_lock);
57 
58 	cgno = ufs_dtog(uspi, fragment);
59 	bit = ufs_dtogd(uspi, fragment);
60 	if (cgno >= uspi->s_ncg) {
61 		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
62 		goto failed;
63 	}
64 
65 	ucpi = ufs_load_cylinder (sb, cgno);
66 	if (!ucpi)
67 		goto failed;
68 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
69 	if (!ufs_cg_chkmagic(sb, ucg)) {
70 		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
71 		goto failed;
72 	}
73 
74 	end_bit = bit + count;
75 	bbase = ufs_blknum (bit);
76 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
77 	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78 	for (i = bit; i < end_bit; i++) {
79 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
81 		else
82 			ufs_error (sb, "ufs_free_fragments",
83 				   "bit already cleared for fragment %u", i);
84 	}
85 
86 	inode_sub_bytes(inode, count << uspi->s_fshift);
87 	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
88 	uspi->cs_total.cs_nffree += count;
89 	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
90 	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
91 	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92 
93 	/*
94 	 * Trying to reassemble free fragments into block
95 	 */
96 	blkno = ufs_fragstoblks (bbase);
97 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
98 		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
99 		uspi->cs_total.cs_nffree -= uspi->s_fpb;
100 		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102 			ufs_clusteracct (sb, ucpi, blkno, 1);
103 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
104 		uspi->cs_total.cs_nbfree++;
105 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
106 		if (uspi->fs_magic != UFS2_MAGIC) {
107 			unsigned cylno = ufs_cbtocylno (bbase);
108 
109 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110 						  ufs_cbtorpos(bbase)), 1);
111 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112 		}
113 	}
114 
115 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
116 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117 	if (sb->s_flags & MS_SYNCHRONOUS)
118 		ubh_sync_block(UCPI_UBH(ucpi));
119 	ufs_mark_sb_dirty(sb);
120 
121 	mutex_unlock(&UFS_SB(sb)->s_lock);
122 	UFSD("EXIT\n");
123 	return;
124 
125 failed:
126 	mutex_unlock(&UFS_SB(sb)->s_lock);
127 	UFSD("EXIT (FAILED)\n");
128 	return;
129 }
130 
131 /*
132  * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133  */
134 void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
135 {
136 	struct super_block * sb;
137 	struct ufs_sb_private_info * uspi;
138 	struct ufs_cg_private_info * ucpi;
139 	struct ufs_cylinder_group * ucg;
140 	unsigned overflow, cgno, bit, end_bit, i;
141 	u64 blkno;
142 
143 	sb = inode->i_sb;
144 	uspi = UFS_SB(sb)->s_uspi;
145 
146 	UFSD("ENTER, fragment %llu, count %u\n",
147 	     (unsigned long long)fragment, count);
148 
149 	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
150 		ufs_error (sb, "ufs_free_blocks", "internal error, "
151 			   "fragment %llu, count %u\n",
152 			   (unsigned long long)fragment, count);
153 		goto failed;
154 	}
155 
156 	mutex_lock(&UFS_SB(sb)->s_lock);
157 
158 do_more:
159 	overflow = 0;
160 	cgno = ufs_dtog(uspi, fragment);
161 	bit = ufs_dtogd(uspi, fragment);
162 	if (cgno >= uspi->s_ncg) {
163 		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
164 		goto failed_unlock;
165 	}
166 	end_bit = bit + count;
167 	if (end_bit > uspi->s_fpg) {
168 		overflow = bit + count - uspi->s_fpg;
169 		count -= overflow;
170 		end_bit -= overflow;
171 	}
172 
173 	ucpi = ufs_load_cylinder (sb, cgno);
174 	if (!ucpi)
175 		goto failed_unlock;
176 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
177 	if (!ufs_cg_chkmagic(sb, ucg)) {
178 		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
179 		goto failed_unlock;
180 	}
181 
182 	for (i = bit; i < end_bit; i += uspi->s_fpb) {
183 		blkno = ufs_fragstoblks(i);
184 		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
185 			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
186 		}
187 		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
188 		inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
189 		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
190 			ufs_clusteracct (sb, ucpi, blkno, 1);
191 
192 		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
193 		uspi->cs_total.cs_nbfree++;
194 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
195 
196 		if (uspi->fs_magic != UFS2_MAGIC) {
197 			unsigned cylno = ufs_cbtocylno(i);
198 
199 			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
200 						  ufs_cbtorpos(i)), 1);
201 			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
202 		}
203 	}
204 
205 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
206 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
207 	if (sb->s_flags & MS_SYNCHRONOUS)
208 		ubh_sync_block(UCPI_UBH(ucpi));
209 
210 	if (overflow) {
211 		fragment += count;
212 		count = overflow;
213 		goto do_more;
214 	}
215 
216 	ufs_mark_sb_dirty(sb);
217 	mutex_unlock(&UFS_SB(sb)->s_lock);
218 	UFSD("EXIT\n");
219 	return;
220 
221 failed_unlock:
222 	mutex_unlock(&UFS_SB(sb)->s_lock);
223 failed:
224 	UFSD("EXIT (FAILED)\n");
225 	return;
226 }
227 
228 /*
229  * Modify inode page cache in such way:
230  * have - blocks with b_blocknr equal to oldb...oldb+count-1
231  * get - blocks with b_blocknr equal to newb...newb+count-1
232  * also we suppose that oldb...oldb+count-1 blocks
233  * situated at the end of file.
234  *
235  * We can come here from ufs_writepage or ufs_prepare_write,
236  * locked_page is argument of these functions, so we already lock it.
237  */
238 static void ufs_change_blocknr(struct inode *inode, sector_t beg,
239 			       unsigned int count, sector_t oldb,
240 			       sector_t newb, struct page *locked_page)
241 {
242 	const unsigned blks_per_page =
243 		1 << (PAGE_SHIFT - inode->i_blkbits);
244 	const unsigned mask = blks_per_page - 1;
245 	struct address_space * const mapping = inode->i_mapping;
246 	pgoff_t index, cur_index, last_index;
247 	unsigned pos, j, lblock;
248 	sector_t end, i;
249 	struct page *page;
250 	struct buffer_head *head, *bh;
251 
252 	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
253 	      inode->i_ino, count,
254 	     (unsigned long long)oldb, (unsigned long long)newb);
255 
256 	BUG_ON(!locked_page);
257 	BUG_ON(!PageLocked(locked_page));
258 
259 	cur_index = locked_page->index;
260 	end = count + beg;
261 	last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
262 	for (i = beg; i < end; i = (i | mask) + 1) {
263 		index = i >> (PAGE_SHIFT - inode->i_blkbits);
264 
265 		if (likely(cur_index != index)) {
266 			page = ufs_get_locked_page(mapping, index);
267 			if (!page)/* it was truncated */
268 				continue;
269 			if (IS_ERR(page)) {/* or EIO */
270 				ufs_error(inode->i_sb, __func__,
271 					  "read of page %llu failed\n",
272 					  (unsigned long long)index);
273 				continue;
274 			}
275 		} else
276 			page = locked_page;
277 
278 		head = page_buffers(page);
279 		bh = head;
280 		pos = i & mask;
281 		for (j = 0; j < pos; ++j)
282 			bh = bh->b_this_page;
283 
284 
285 		if (unlikely(index == last_index))
286 			lblock = end & mask;
287 		else
288 			lblock = blks_per_page;
289 
290 		do {
291 			if (j >= lblock)
292 				break;
293 			pos = (i - beg) + j;
294 
295 			if (!buffer_mapped(bh))
296 					map_bh(bh, inode->i_sb, oldb + pos);
297 			if (!buffer_uptodate(bh)) {
298 				ll_rw_block(REQ_OP_READ, 0, 1, &bh);
299 				wait_on_buffer(bh);
300 				if (!buffer_uptodate(bh)) {
301 					ufs_error(inode->i_sb, __func__,
302 						  "read of block failed\n");
303 					break;
304 				}
305 			}
306 
307 			UFSD(" change from %llu to %llu, pos %u\n",
308 			     (unsigned long long)(pos + oldb),
309 			     (unsigned long long)(pos + newb), pos);
310 
311 			bh->b_blocknr = newb + pos;
312 			clean_bdev_bh_alias(bh);
313 			mark_buffer_dirty(bh);
314 			++j;
315 			bh = bh->b_this_page;
316 		} while (bh != head);
317 
318 		if (likely(cur_index != index))
319 			ufs_put_locked_page(page);
320  	}
321 	UFSD("EXIT\n");
322 }
323 
324 static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
325 			    int sync)
326 {
327 	struct buffer_head *bh;
328 	sector_t end = beg + n;
329 
330 	for (; beg < end; ++beg) {
331 		bh = sb_getblk(inode->i_sb, beg);
332 		lock_buffer(bh);
333 		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
334 		set_buffer_uptodate(bh);
335 		mark_buffer_dirty(bh);
336 		unlock_buffer(bh);
337 		if (IS_SYNC(inode) || sync)
338 			sync_dirty_buffer(bh);
339 		brelse(bh);
340 	}
341 }
342 
343 u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
344 			   u64 goal, unsigned count, int *err,
345 			   struct page *locked_page)
346 {
347 	struct super_block * sb;
348 	struct ufs_sb_private_info * uspi;
349 	struct ufs_super_block_first * usb1;
350 	unsigned cgno, oldcount, newcount;
351 	u64 tmp, request, result;
352 
353 	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
354 	     inode->i_ino, (unsigned long long)fragment,
355 	     (unsigned long long)goal, count);
356 
357 	sb = inode->i_sb;
358 	uspi = UFS_SB(sb)->s_uspi;
359 	usb1 = ubh_get_usb_first(uspi);
360 	*err = -ENOSPC;
361 
362 	mutex_lock(&UFS_SB(sb)->s_lock);
363 	tmp = ufs_data_ptr_to_cpu(sb, p);
364 
365 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
366 		ufs_warning(sb, "ufs_new_fragments", "internal warning"
367 			    " fragment %llu, count %u",
368 			    (unsigned long long)fragment, count);
369 		count = uspi->s_fpb - ufs_fragnum(fragment);
370 	}
371 	oldcount = ufs_fragnum (fragment);
372 	newcount = oldcount + count;
373 
374 	/*
375 	 * Somebody else has just allocated our fragments
376 	 */
377 	if (oldcount) {
378 		if (!tmp) {
379 			ufs_error(sb, "ufs_new_fragments", "internal error, "
380 				  "fragment %llu, tmp %llu\n",
381 				  (unsigned long long)fragment,
382 				  (unsigned long long)tmp);
383 			mutex_unlock(&UFS_SB(sb)->s_lock);
384 			return INVBLOCK;
385 		}
386 		if (fragment < UFS_I(inode)->i_lastfrag) {
387 			UFSD("EXIT (ALREADY ALLOCATED)\n");
388 			mutex_unlock(&UFS_SB(sb)->s_lock);
389 			return 0;
390 		}
391 	}
392 	else {
393 		if (tmp) {
394 			UFSD("EXIT (ALREADY ALLOCATED)\n");
395 			mutex_unlock(&UFS_SB(sb)->s_lock);
396 			return 0;
397 		}
398 	}
399 
400 	/*
401 	 * There is not enough space for user on the device
402 	 */
403 	if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
404 		if (!capable(CAP_SYS_RESOURCE)) {
405 			mutex_unlock(&UFS_SB(sb)->s_lock);
406 			UFSD("EXIT (FAILED)\n");
407 			return 0;
408 		}
409 	}
410 
411 	if (goal >= uspi->s_size)
412 		goal = 0;
413 	if (goal == 0)
414 		cgno = ufs_inotocg (inode->i_ino);
415 	else
416 		cgno = ufs_dtog(uspi, goal);
417 
418 	/*
419 	 * allocate new fragment
420 	 */
421 	if (oldcount == 0) {
422 		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
423 		if (result) {
424 			ufs_clear_frags(inode, result + oldcount,
425 					newcount - oldcount, locked_page != NULL);
426 			*err = 0;
427 			write_seqlock(&UFS_I(inode)->meta_lock);
428 			ufs_cpu_to_data_ptr(sb, p, result);
429 			UFS_I(inode)->i_lastfrag =
430 				max(UFS_I(inode)->i_lastfrag, fragment + count);
431 			write_sequnlock(&UFS_I(inode)->meta_lock);
432 		}
433 		mutex_unlock(&UFS_SB(sb)->s_lock);
434 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
435 		return result;
436 	}
437 
438 	/*
439 	 * resize block
440 	 */
441 	result = ufs_add_fragments(inode, tmp, oldcount, newcount);
442 	if (result) {
443 		*err = 0;
444 		read_seqlock_excl(&UFS_I(inode)->meta_lock);
445 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
446 						fragment + count);
447 		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
448 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
449 				locked_page != NULL);
450 		mutex_unlock(&UFS_SB(sb)->s_lock);
451 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
452 		return result;
453 	}
454 
455 	/*
456 	 * allocate new block and move data
457 	 */
458 	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
459 	    case UFS_OPTSPACE:
460 		request = newcount;
461 		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
462 		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
463 			break;
464 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
465 		break;
466 	    default:
467 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
468 
469 	    case UFS_OPTTIME:
470 		request = uspi->s_fpb;
471 		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
472 		    (uspi->s_minfree - 2) / 100)
473 			break;
474 		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
475 		break;
476 	}
477 	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
478 	if (result) {
479 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
480 				locked_page != NULL);
481 		mutex_unlock(&UFS_SB(sb)->s_lock);
482 		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
483 				   uspi->s_sbbase + tmp,
484 				   uspi->s_sbbase + result, locked_page);
485 		*err = 0;
486 		write_seqlock(&UFS_I(inode)->meta_lock);
487 		ufs_cpu_to_data_ptr(sb, p, result);
488 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
489 						fragment + count);
490 		write_sequnlock(&UFS_I(inode)->meta_lock);
491 		if (newcount < request)
492 			ufs_free_fragments (inode, result + newcount, request - newcount);
493 		ufs_free_fragments (inode, tmp, oldcount);
494 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
495 		return result;
496 	}
497 
498 	mutex_unlock(&UFS_SB(sb)->s_lock);
499 	UFSD("EXIT (FAILED)\n");
500 	return 0;
501 }
502 
503 static bool try_add_frags(struct inode *inode, unsigned frags)
504 {
505 	unsigned size = frags * i_blocksize(inode);
506 	spin_lock(&inode->i_lock);
507 	__inode_add_bytes(inode, size);
508 	if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
509 		__inode_sub_bytes(inode, size);
510 		spin_unlock(&inode->i_lock);
511 		return false;
512 	}
513 	spin_unlock(&inode->i_lock);
514 	return true;
515 }
516 
517 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
518 			     unsigned oldcount, unsigned newcount)
519 {
520 	struct super_block * sb;
521 	struct ufs_sb_private_info * uspi;
522 	struct ufs_cg_private_info * ucpi;
523 	struct ufs_cylinder_group * ucg;
524 	unsigned cgno, fragno, fragoff, count, fragsize, i;
525 
526 	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
527 	     (unsigned long long)fragment, oldcount, newcount);
528 
529 	sb = inode->i_sb;
530 	uspi = UFS_SB(sb)->s_uspi;
531 	count = newcount - oldcount;
532 
533 	cgno = ufs_dtog(uspi, fragment);
534 	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
535 		return 0;
536 	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
537 		return 0;
538 	ucpi = ufs_load_cylinder (sb, cgno);
539 	if (!ucpi)
540 		return 0;
541 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
542 	if (!ufs_cg_chkmagic(sb, ucg)) {
543 		ufs_panic (sb, "ufs_add_fragments",
544 			"internal error, bad magic number on cg %u", cgno);
545 		return 0;
546 	}
547 
548 	fragno = ufs_dtogd(uspi, fragment);
549 	fragoff = ufs_fragnum (fragno);
550 	for (i = oldcount; i < newcount; i++)
551 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
552 			return 0;
553 
554 	if (!try_add_frags(inode, count))
555 		return 0;
556 	/*
557 	 * Block can be extended
558 	 */
559 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
560 	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
561 		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
562 			break;
563 	fragsize = i - oldcount;
564 	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
565 		ufs_panic (sb, "ufs_add_fragments",
566 			"internal error or corrupted bitmap on cg %u", cgno);
567 	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
568 	if (fragsize != count)
569 		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
570 	for (i = oldcount; i < newcount; i++)
571 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
572 
573 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
574 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
575 	uspi->cs_total.cs_nffree -= count;
576 
577 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
578 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
579 	if (sb->s_flags & MS_SYNCHRONOUS)
580 		ubh_sync_block(UCPI_UBH(ucpi));
581 	ufs_mark_sb_dirty(sb);
582 
583 	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
584 
585 	return fragment;
586 }
587 
588 #define UFS_TEST_FREE_SPACE_CG \
589 	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
590 	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
591 		goto cg_found; \
592 	for (k = count; k < uspi->s_fpb; k++) \
593 		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
594 			goto cg_found;
595 
596 static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
597 			       u64 goal, unsigned count, int *err)
598 {
599 	struct super_block * sb;
600 	struct ufs_sb_private_info * uspi;
601 	struct ufs_cg_private_info * ucpi;
602 	struct ufs_cylinder_group * ucg;
603 	unsigned oldcg, i, j, k, allocsize;
604 	u64 result;
605 
606 	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
607 	     inode->i_ino, cgno, (unsigned long long)goal, count);
608 
609 	sb = inode->i_sb;
610 	uspi = UFS_SB(sb)->s_uspi;
611 	oldcg = cgno;
612 
613 	/*
614 	 * 1. searching on preferred cylinder group
615 	 */
616 	UFS_TEST_FREE_SPACE_CG
617 
618 	/*
619 	 * 2. quadratic rehash
620 	 */
621 	for (j = 1; j < uspi->s_ncg; j *= 2) {
622 		cgno += j;
623 		if (cgno >= uspi->s_ncg)
624 			cgno -= uspi->s_ncg;
625 		UFS_TEST_FREE_SPACE_CG
626 	}
627 
628 	/*
629 	 * 3. brute force search
630 	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
631 	 */
632 	cgno = (oldcg + 1) % uspi->s_ncg;
633 	for (j = 2; j < uspi->s_ncg; j++) {
634 		cgno++;
635 		if (cgno >= uspi->s_ncg)
636 			cgno = 0;
637 		UFS_TEST_FREE_SPACE_CG
638 	}
639 
640 	UFSD("EXIT (FAILED)\n");
641 	return 0;
642 
643 cg_found:
644 	ucpi = ufs_load_cylinder (sb, cgno);
645 	if (!ucpi)
646 		return 0;
647 	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
648 	if (!ufs_cg_chkmagic(sb, ucg))
649 		ufs_panic (sb, "ufs_alloc_fragments",
650 			"internal error, bad magic number on cg %u", cgno);
651 	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
652 
653 	if (count == uspi->s_fpb) {
654 		result = ufs_alloccg_block (inode, ucpi, goal, err);
655 		if (result == INVBLOCK)
656 			return 0;
657 		goto succed;
658 	}
659 
660 	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
661 		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
662 			break;
663 
664 	if (allocsize == uspi->s_fpb) {
665 		result = ufs_alloccg_block (inode, ucpi, goal, err);
666 		if (result == INVBLOCK)
667 			return 0;
668 		goal = ufs_dtogd(uspi, result);
669 		for (i = count; i < uspi->s_fpb; i++)
670 			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
671 		i = uspi->s_fpb - count;
672 
673 		inode_sub_bytes(inode, i << uspi->s_fshift);
674 		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
675 		uspi->cs_total.cs_nffree += i;
676 		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
677 		fs32_add(sb, &ucg->cg_frsum[i], 1);
678 		goto succed;
679 	}
680 
681 	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
682 	if (result == INVBLOCK)
683 		return 0;
684 	if (!try_add_frags(inode, count))
685 		return 0;
686 	for (i = 0; i < count; i++)
687 		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
688 
689 	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
690 	uspi->cs_total.cs_nffree -= count;
691 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
692 	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
693 
694 	if (count != allocsize)
695 		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
696 
697 succed:
698 	ubh_mark_buffer_dirty (USPI_UBH(uspi));
699 	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
700 	if (sb->s_flags & MS_SYNCHRONOUS)
701 		ubh_sync_block(UCPI_UBH(ucpi));
702 	ufs_mark_sb_dirty(sb);
703 
704 	result += cgno * uspi->s_fpg;
705 	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
706 	return result;
707 }
708 
709 static u64 ufs_alloccg_block(struct inode *inode,
710 			     struct ufs_cg_private_info *ucpi,
711 			     u64 goal, int *err)
712 {
713 	struct super_block * sb;
714 	struct ufs_sb_private_info * uspi;
715 	struct ufs_cylinder_group * ucg;
716 	u64 result, blkno;
717 
718 	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
719 
720 	sb = inode->i_sb;
721 	uspi = UFS_SB(sb)->s_uspi;
722 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
723 
724 	if (goal == 0) {
725 		goal = ucpi->c_rotor;
726 		goto norot;
727 	}
728 	goal = ufs_blknum (goal);
729 	goal = ufs_dtogd(uspi, goal);
730 
731 	/*
732 	 * If the requested block is available, use it.
733 	 */
734 	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
735 		result = goal;
736 		goto gotit;
737 	}
738 
739 norot:
740 	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
741 	if (result == INVBLOCK)
742 		return INVBLOCK;
743 	ucpi->c_rotor = result;
744 gotit:
745 	if (!try_add_frags(inode, uspi->s_fpb))
746 		return 0;
747 	blkno = ufs_fragstoblks(result);
748 	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
749 	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
750 		ufs_clusteracct (sb, ucpi, blkno, -1);
751 
752 	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
753 	uspi->cs_total.cs_nbfree--;
754 	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
755 
756 	if (uspi->fs_magic != UFS2_MAGIC) {
757 		unsigned cylno = ufs_cbtocylno((unsigned)result);
758 
759 		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
760 					  ufs_cbtorpos((unsigned)result)), 1);
761 		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
762 	}
763 
764 	UFSD("EXIT, result %llu\n", (unsigned long long)result);
765 
766 	return result;
767 }
768 
769 static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
770 			  struct ufs_buffer_head *ubh,
771 			  unsigned begin, unsigned size,
772 			  unsigned char *table, unsigned char mask)
773 {
774 	unsigned rest, offset;
775 	unsigned char *cp;
776 
777 
778 	offset = begin & ~uspi->s_fmask;
779 	begin >>= uspi->s_fshift;
780 	for (;;) {
781 		if ((offset + size) < uspi->s_fsize)
782 			rest = size;
783 		else
784 			rest = uspi->s_fsize - offset;
785 		size -= rest;
786 		cp = ubh->bh[begin]->b_data + offset;
787 		while ((table[*cp++] & mask) == 0 && --rest)
788 			;
789 		if (rest || !size)
790 			break;
791 		begin++;
792 		offset = 0;
793 	}
794 	return (size + rest);
795 }
796 
797 /*
798  * Find a block of the specified size in the specified cylinder group.
799  * @sp: pointer to super block
800  * @ucpi: pointer to cylinder group info
801  * @goal: near which block we want find new one
802  * @count: specified size
803  */
804 static u64 ufs_bitmap_search(struct super_block *sb,
805 			     struct ufs_cg_private_info *ucpi,
806 			     u64 goal, unsigned count)
807 {
808 	/*
809 	 * Bit patterns for identifying fragments in the block map
810 	 * used as ((map & mask_arr) == want_arr)
811 	 */
812 	static const int mask_arr[9] = {
813 		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
814 	};
815 	static const int want_arr[9] = {
816 		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
817 	};
818 	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
819 	unsigned start, length, loc;
820 	unsigned pos, want, blockmap, mask, end;
821 	u64 result;
822 
823 	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
824 	     (unsigned long long)goal, count);
825 
826 	if (goal)
827 		start = ufs_dtogd(uspi, goal) >> 3;
828 	else
829 		start = ucpi->c_frotor >> 3;
830 
831 	length = ((uspi->s_fpg + 7) >> 3) - start;
832 	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
833 		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
834 		1 << (count - 1 + (uspi->s_fpb & 7)));
835 	if (loc == 0) {
836 		length = start + 1;
837 		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
838 				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
839 				ufs_fragtable_other,
840 				1 << (count - 1 + (uspi->s_fpb & 7)));
841 		if (loc == 0) {
842 			ufs_error(sb, "ufs_bitmap_search",
843 				  "bitmap corrupted on cg %u, start %u,"
844 				  " length %u, count %u, freeoff %u\n",
845 				  ucpi->c_cgx, start, length, count,
846 				  ucpi->c_freeoff);
847 			return INVBLOCK;
848 		}
849 		start = 0;
850 	}
851 	result = (start + length - loc) << 3;
852 	ucpi->c_frotor = result;
853 
854 	/*
855 	 * found the byte in the map
856 	 */
857 
858 	for (end = result + 8; result < end; result += uspi->s_fpb) {
859 		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
860 		blockmap <<= 1;
861 		mask = mask_arr[count];
862 		want = want_arr[count];
863 		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
864 			if ((blockmap & mask) == want) {
865 				UFSD("EXIT, result %llu\n",
866 				     (unsigned long long)result);
867 				return result + pos;
868  			}
869 			mask <<= 1;
870 			want <<= 1;
871  		}
872  	}
873 
874 	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
875 		  ucpi->c_cgx);
876 	UFSD("EXIT (FAILED)\n");
877 	return INVBLOCK;
878 }
879 
880 static void ufs_clusteracct(struct super_block * sb,
881 	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
882 {
883 	struct ufs_sb_private_info * uspi;
884 	int i, start, end, forw, back;
885 
886 	uspi = UFS_SB(sb)->s_uspi;
887 	if (uspi->s_contigsumsize <= 0)
888 		return;
889 
890 	if (cnt > 0)
891 		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
892 	else
893 		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
894 
895 	/*
896 	 * Find the size of the cluster going forward.
897 	 */
898 	start = blkno + 1;
899 	end = start + uspi->s_contigsumsize;
900 	if ( end >= ucpi->c_nclusterblks)
901 		end = ucpi->c_nclusterblks;
902 	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
903 	if (i > end)
904 		i = end;
905 	forw = i - start;
906 
907 	/*
908 	 * Find the size of the cluster going backward.
909 	 */
910 	start = blkno - 1;
911 	end = start - uspi->s_contigsumsize;
912 	if (end < 0 )
913 		end = -1;
914 	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
915 	if ( i < end)
916 		i = end;
917 	back = start - i;
918 
919 	/*
920 	 * Account for old cluster and the possibly new forward and
921 	 * back clusters.
922 	 */
923 	i = back + forw + 1;
924 	if (i > uspi->s_contigsumsize)
925 		i = uspi->s_contigsumsize;
926 	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
927 	if (back > 0)
928 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
929 	if (forw > 0)
930 		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
931 }
932 
933 
934 static unsigned char ufs_fragtable_8fpb[] = {
935 	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
936 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
937 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
938 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
939 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
940 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
941 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
942 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
943 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
944 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
945 	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
946 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
947 	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
948 	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
949 	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
950 	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
951 };
952 
953 static unsigned char ufs_fragtable_other[] = {
954 	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
955 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
956 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
957 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
958 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
959 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
960 	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
961 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
962 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
963 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
964 	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
965 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
966 	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
967 	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
968 	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
969 	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
970 };
971