1 /* 2 * linux/fs/hfsplus/bitmap.c 3 * 4 * Copyright (C) 2001 5 * Brad Boyer (flar@allandria.com) 6 * (C) 2003 Ardis Technologies <roman@ardistech.com> 7 * 8 * Handling of allocation file 9 */ 10 11 #include <linux/pagemap.h> 12 13 #include "hfsplus_fs.h" 14 #include "hfsplus_raw.h" 15 16 #define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8) 17 18 int hfsplus_block_allocate(struct super_block *sb, u32 size, 19 u32 offset, u32 *max) 20 { 21 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 22 struct page *page; 23 struct address_space *mapping; 24 __be32 *pptr, *curr, *end; 25 u32 mask, start, len, n; 26 __be32 val; 27 int i; 28 29 len = *max; 30 if (!len) 31 return size; 32 33 dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); 34 mutex_lock(&sbi->alloc_mutex); 35 mapping = sbi->alloc_file->i_mapping; 36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); 37 if (IS_ERR(page)) { 38 start = size; 39 goto out; 40 } 41 pptr = kmap(page); 42 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; 43 i = offset % 32; 44 offset &= ~(PAGE_CACHE_BITS - 1); 45 if ((size ^ offset) / PAGE_CACHE_BITS) 46 end = pptr + PAGE_CACHE_BITS / 32; 47 else 48 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; 49 50 /* scan the first partial u32 for zero bits */ 51 val = *curr; 52 if (~val) { 53 n = be32_to_cpu(val); 54 mask = (1U << 31) >> i; 55 for (; i < 32; mask >>= 1, i++) { 56 if (!(n & mask)) 57 goto found; 58 } 59 } 60 curr++; 61 62 /* scan complete u32s for the first zero bit */ 63 while (1) { 64 while (curr < end) { 65 val = *curr; 66 if (~val) { 67 n = be32_to_cpu(val); 68 mask = 1 << 31; 69 for (i = 0; i < 32; mask >>= 1, i++) { 70 if (!(n & mask)) 71 goto found; 72 } 73 } 74 curr++; 75 } 76 kunmap(page); 77 offset += PAGE_CACHE_BITS; 78 if (offset >= size) 79 break; 80 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, 81 NULL); 82 if (IS_ERR(page)) { 83 start = size; 84 goto out; 85 } 86 curr = pptr = kmap(page); 87 if ((size ^ offset) / PAGE_CACHE_BITS) 88 end = pptr + PAGE_CACHE_BITS / 32; 89 else 90 end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; 91 } 92 dprint(DBG_BITMAP, "bitmap full\n"); 93 start = size; 94 goto out; 95 96 found: 97 start = offset + (curr - pptr) * 32 + i; 98 if (start >= size) { 99 dprint(DBG_BITMAP, "bitmap full\n"); 100 goto out; 101 } 102 /* do any partial u32 at the start */ 103 len = min(size - start, len); 104 while (1) { 105 n |= mask; 106 if (++i >= 32) 107 break; 108 mask >>= 1; 109 if (!--len || n & mask) 110 goto done; 111 } 112 if (!--len) 113 goto done; 114 *curr++ = cpu_to_be32(n); 115 /* do full u32s */ 116 while (1) { 117 while (curr < end) { 118 n = be32_to_cpu(*curr); 119 if (len < 32) 120 goto last; 121 if (n) { 122 len = 32; 123 goto last; 124 } 125 *curr++ = cpu_to_be32(0xffffffff); 126 len -= 32; 127 } 128 set_page_dirty(page); 129 kunmap(page); 130 offset += PAGE_CACHE_BITS; 131 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, 132 NULL); 133 if (IS_ERR(page)) { 134 start = size; 135 goto out; 136 } 137 pptr = kmap(page); 138 curr = pptr; 139 end = pptr + PAGE_CACHE_BITS / 32; 140 } 141 last: 142 /* do any partial u32 at end */ 143 mask = 1U << 31; 144 for (i = 0; i < len; i++) { 145 if (n & mask) 146 break; 147 n |= mask; 148 mask >>= 1; 149 } 150 done: 151 *curr = cpu_to_be32(n); 152 set_page_dirty(page); 153 kunmap(page); 154 *max = offset + (curr - pptr) * 32 + i - start; 155 sbi->free_blocks -= *max; 156 hfsplus_mark_mdb_dirty(sb); 157 dprint(DBG_BITMAP, "-> %u,%u\n", start, *max); 158 out: 159 mutex_unlock(&sbi->alloc_mutex); 160 return start; 161 } 162 163 int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) 164 { 165 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 166 struct page *page; 167 struct address_space *mapping; 168 __be32 *pptr, *curr, *end; 169 u32 mask, len, pnr; 170 int i; 171 172 /* is there any actual work to be done? */ 173 if (!count) 174 return 0; 175 176 dprint(DBG_BITMAP, "block_free: %u,%u\n", offset, count); 177 /* are all of the bits in range? */ 178 if ((offset + count) > sbi->total_blocks) 179 return -2; 180 181 mutex_lock(&sbi->alloc_mutex); 182 mapping = sbi->alloc_file->i_mapping; 183 pnr = offset / PAGE_CACHE_BITS; 184 page = read_mapping_page(mapping, pnr, NULL); 185 pptr = kmap(page); 186 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; 187 end = pptr + PAGE_CACHE_BITS / 32; 188 len = count; 189 190 /* do any partial u32 at the start */ 191 i = offset % 32; 192 if (i) { 193 int j = 32 - i; 194 mask = 0xffffffffU << j; 195 if (j > count) { 196 mask |= 0xffffffffU >> (i + count); 197 *curr++ &= cpu_to_be32(mask); 198 goto out; 199 } 200 *curr++ &= cpu_to_be32(mask); 201 count -= j; 202 } 203 204 /* do full u32s */ 205 while (1) { 206 while (curr < end) { 207 if (count < 32) 208 goto done; 209 *curr++ = 0; 210 count -= 32; 211 } 212 if (!count) 213 break; 214 set_page_dirty(page); 215 kunmap(page); 216 page = read_mapping_page(mapping, ++pnr, NULL); 217 pptr = kmap(page); 218 curr = pptr; 219 end = pptr + PAGE_CACHE_BITS / 32; 220 } 221 done: 222 /* do any partial u32 at end */ 223 if (count) { 224 mask = 0xffffffffU >> count; 225 *curr &= cpu_to_be32(mask); 226 } 227 out: 228 set_page_dirty(page); 229 kunmap(page); 230 sbi->free_blocks += len; 231 hfsplus_mark_mdb_dirty(sb); 232 mutex_unlock(&sbi->alloc_mutex); 233 234 return 0; 235 } 236