1 /* 2 * linux/fs/minix/dir.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 * 6 * minix directory handling functions 7 */ 8 9 #include "minix.h" 10 #include <linux/highmem.h> 11 #include <linux/smp_lock.h> 12 13 typedef struct minix_dir_entry minix_dirent; 14 15 static int minix_readdir(struct file *, void *, filldir_t); 16 17 struct file_operations minix_dir_operations = { 18 .read = generic_read_dir, 19 .readdir = minix_readdir, 20 .fsync = minix_sync_file, 21 }; 22 23 static inline void dir_put_page(struct page *page) 24 { 25 kunmap(page); 26 page_cache_release(page); 27 } 28 29 /* 30 * Return the offset into page `page_nr' of the last valid 31 * byte in that page, plus one. 32 */ 33 static unsigned 34 minix_last_byte(struct inode *inode, unsigned long page_nr) 35 { 36 unsigned last_byte = PAGE_CACHE_SIZE; 37 38 if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) 39 last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); 40 return last_byte; 41 } 42 43 static inline unsigned long dir_pages(struct inode *inode) 44 { 45 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; 46 } 47 48 static int dir_commit_chunk(struct page *page, unsigned from, unsigned to) 49 { 50 struct inode *dir = (struct inode *)page->mapping->host; 51 int err = 0; 52 page->mapping->a_ops->commit_write(NULL, page, from, to); 53 if (IS_DIRSYNC(dir)) 54 err = write_one_page(page, 1); 55 else 56 unlock_page(page); 57 return err; 58 } 59 60 static struct page * dir_get_page(struct inode *dir, unsigned long n) 61 { 62 struct address_space *mapping = dir->i_mapping; 63 struct page *page = read_cache_page(mapping, n, 64 (filler_t*)mapping->a_ops->readpage, NULL); 65 if (!IS_ERR(page)) { 66 wait_on_page_locked(page); 67 kmap(page); 68 if (!PageUptodate(page)) 69 goto fail; 70 } 71 return page; 72 73 fail: 74 dir_put_page(page); 75 return ERR_PTR(-EIO); 76 } 77 78 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) 79 { 80 return (void*)((char*)de + sbi->s_dirsize); 81 } 82 83 static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir) 84 { 85 unsigned long pos = filp->f_pos; 86 struct inode *inode = filp->f_dentry->d_inode; 87 struct super_block *sb = inode->i_sb; 88 unsigned offset = pos & ~PAGE_CACHE_MASK; 89 unsigned long n = pos >> PAGE_CACHE_SHIFT; 90 unsigned long npages = dir_pages(inode); 91 struct minix_sb_info *sbi = minix_sb(sb); 92 unsigned chunk_size = sbi->s_dirsize; 93 94 lock_kernel(); 95 96 pos = (pos + chunk_size-1) & ~(chunk_size-1); 97 if (pos >= inode->i_size) 98 goto done; 99 100 for ( ; n < npages; n++, offset = 0) { 101 char *p, *kaddr, *limit; 102 struct page *page = dir_get_page(inode, n); 103 104 if (IS_ERR(page)) 105 continue; 106 kaddr = (char *)page_address(page); 107 p = kaddr+offset; 108 limit = kaddr + minix_last_byte(inode, n) - chunk_size; 109 for ( ; p <= limit ; p = minix_next_entry(p, sbi)) { 110 minix_dirent *de = (minix_dirent *)p; 111 if (de->inode) { 112 int over; 113 unsigned l = strnlen(de->name,sbi->s_namelen); 114 115 offset = p - kaddr; 116 over = filldir(dirent, de->name, l, 117 (n<<PAGE_CACHE_SHIFT) | offset, 118 de->inode, DT_UNKNOWN); 119 if (over) { 120 dir_put_page(page); 121 goto done; 122 } 123 } 124 } 125 dir_put_page(page); 126 } 127 128 done: 129 filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset; 130 unlock_kernel(); 131 return 0; 132 } 133 134 static inline int namecompare(int len, int maxlen, 135 const char * name, const char * buffer) 136 { 137 if (len < maxlen && buffer[len]) 138 return 0; 139 return !memcmp(name, buffer, len); 140 } 141 142 /* 143 * minix_find_entry() 144 * 145 * finds an entry in the specified directory with the wanted name. It 146 * returns the cache buffer in which the entry was found, and the entry 147 * itself (as a parameter - res_dir). It does NOT read the inode of the 148 * entry - you'll have to do that yourself if you want to. 149 */ 150 minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page) 151 { 152 const char * name = dentry->d_name.name; 153 int namelen = dentry->d_name.len; 154 struct inode * dir = dentry->d_parent->d_inode; 155 struct super_block * sb = dir->i_sb; 156 struct minix_sb_info * sbi = minix_sb(sb); 157 unsigned long n; 158 unsigned long npages = dir_pages(dir); 159 struct page *page = NULL; 160 struct minix_dir_entry *de; 161 162 *res_page = NULL; 163 164 for (n = 0; n < npages; n++) { 165 char *kaddr; 166 page = dir_get_page(dir, n); 167 if (IS_ERR(page)) 168 continue; 169 170 kaddr = (char*)page_address(page); 171 de = (struct minix_dir_entry *) kaddr; 172 kaddr += minix_last_byte(dir, n) - sbi->s_dirsize; 173 for ( ; (char *) de <= kaddr ; de = minix_next_entry(de,sbi)) { 174 if (!de->inode) 175 continue; 176 if (namecompare(namelen,sbi->s_namelen,name,de->name)) 177 goto found; 178 } 179 dir_put_page(page); 180 } 181 return NULL; 182 183 found: 184 *res_page = page; 185 return de; 186 } 187 188 int minix_add_link(struct dentry *dentry, struct inode *inode) 189 { 190 struct inode *dir = dentry->d_parent->d_inode; 191 const char * name = dentry->d_name.name; 192 int namelen = dentry->d_name.len; 193 struct super_block * sb = dir->i_sb; 194 struct minix_sb_info * sbi = minix_sb(sb); 195 struct page *page = NULL; 196 struct minix_dir_entry * de; 197 unsigned long npages = dir_pages(dir); 198 unsigned long n; 199 char *kaddr; 200 unsigned from, to; 201 int err; 202 203 /* 204 * We take care of directory expansion in the same loop 205 * This code plays outside i_size, so it locks the page 206 * to protect that region. 207 */ 208 for (n = 0; n <= npages; n++) { 209 char *dir_end; 210 211 page = dir_get_page(dir, n); 212 err = PTR_ERR(page); 213 if (IS_ERR(page)) 214 goto out; 215 lock_page(page); 216 kaddr = (char*)page_address(page); 217 dir_end = kaddr + minix_last_byte(dir, n); 218 de = (minix_dirent *)kaddr; 219 kaddr += PAGE_CACHE_SIZE - sbi->s_dirsize; 220 while ((char *)de <= kaddr) { 221 if ((char *)de == dir_end) { 222 /* We hit i_size */ 223 de->inode = 0; 224 goto got_it; 225 } 226 if (!de->inode) 227 goto got_it; 228 err = -EEXIST; 229 if (namecompare(namelen,sbi->s_namelen,name,de->name)) 230 goto out_unlock; 231 de = minix_next_entry(de, sbi); 232 } 233 unlock_page(page); 234 dir_put_page(page); 235 } 236 BUG(); 237 return -EINVAL; 238 239 got_it: 240 from = (char*)de - (char*)page_address(page); 241 to = from + sbi->s_dirsize; 242 err = page->mapping->a_ops->prepare_write(NULL, page, from, to); 243 if (err) 244 goto out_unlock; 245 memcpy (de->name, name, namelen); 246 memset (de->name + namelen, 0, sbi->s_dirsize - namelen - 2); 247 de->inode = inode->i_ino; 248 err = dir_commit_chunk(page, from, to); 249 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; 250 mark_inode_dirty(dir); 251 out_put: 252 dir_put_page(page); 253 out: 254 return err; 255 out_unlock: 256 unlock_page(page); 257 goto out_put; 258 } 259 260 int minix_delete_entry(struct minix_dir_entry *de, struct page *page) 261 { 262 struct address_space *mapping = page->mapping; 263 struct inode *inode = (struct inode*)mapping->host; 264 char *kaddr = page_address(page); 265 unsigned from = (char*)de - kaddr; 266 unsigned to = from + minix_sb(inode->i_sb)->s_dirsize; 267 int err; 268 269 lock_page(page); 270 err = mapping->a_ops->prepare_write(NULL, page, from, to); 271 if (err == 0) { 272 de->inode = 0; 273 err = dir_commit_chunk(page, from, to); 274 } else { 275 unlock_page(page); 276 } 277 dir_put_page(page); 278 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; 279 mark_inode_dirty(inode); 280 return err; 281 } 282 283 int minix_make_empty(struct inode *inode, struct inode *dir) 284 { 285 struct address_space *mapping = inode->i_mapping; 286 struct page *page = grab_cache_page(mapping, 0); 287 struct minix_sb_info * sbi = minix_sb(inode->i_sb); 288 struct minix_dir_entry * de; 289 char *kaddr; 290 int err; 291 292 if (!page) 293 return -ENOMEM; 294 err = mapping->a_ops->prepare_write(NULL, page, 0, 2 * sbi->s_dirsize); 295 if (err) { 296 unlock_page(page); 297 goto fail; 298 } 299 300 kaddr = kmap_atomic(page, KM_USER0); 301 memset(kaddr, 0, PAGE_CACHE_SIZE); 302 303 de = (struct minix_dir_entry *)kaddr; 304 de->inode = inode->i_ino; 305 strcpy(de->name,"."); 306 de = minix_next_entry(de, sbi); 307 de->inode = dir->i_ino; 308 strcpy(de->name,".."); 309 kunmap_atomic(kaddr, KM_USER0); 310 311 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); 312 fail: 313 page_cache_release(page); 314 return err; 315 } 316 317 /* 318 * routine to check that the specified directory is empty (for rmdir) 319 */ 320 int minix_empty_dir(struct inode * inode) 321 { 322 struct page *page = NULL; 323 unsigned long i, npages = dir_pages(inode); 324 struct minix_sb_info *sbi = minix_sb(inode->i_sb); 325 326 for (i = 0; i < npages; i++) { 327 char *kaddr; 328 minix_dirent * de; 329 page = dir_get_page(inode, i); 330 331 if (IS_ERR(page)) 332 continue; 333 334 kaddr = (char *)page_address(page); 335 de = (minix_dirent *)kaddr; 336 kaddr += minix_last_byte(inode, i) - sbi->s_dirsize; 337 338 while ((char *)de <= kaddr) { 339 if (de->inode != 0) { 340 /* check for . and .. */ 341 if (de->name[0] != '.') 342 goto not_empty; 343 if (!de->name[1]) { 344 if (de->inode != inode->i_ino) 345 goto not_empty; 346 } else if (de->name[1] != '.') 347 goto not_empty; 348 else if (de->name[2]) 349 goto not_empty; 350 } 351 de = minix_next_entry(de, sbi); 352 } 353 dir_put_page(page); 354 } 355 return 1; 356 357 not_empty: 358 dir_put_page(page); 359 return 0; 360 } 361 362 /* Releases the page */ 363 void minix_set_link(struct minix_dir_entry *de, struct page *page, 364 struct inode *inode) 365 { 366 struct inode *dir = (struct inode*)page->mapping->host; 367 struct minix_sb_info *sbi = minix_sb(dir->i_sb); 368 unsigned from = (char *)de-(char*)page_address(page); 369 unsigned to = from + sbi->s_dirsize; 370 int err; 371 372 lock_page(page); 373 err = page->mapping->a_ops->prepare_write(NULL, page, from, to); 374 if (err == 0) { 375 de->inode = inode->i_ino; 376 err = dir_commit_chunk(page, from, to); 377 } else { 378 unlock_page(page); 379 } 380 dir_put_page(page); 381 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; 382 mark_inode_dirty(dir); 383 } 384 385 struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p) 386 { 387 struct page *page = dir_get_page(dir, 0); 388 struct minix_sb_info *sbi = minix_sb(dir->i_sb); 389 struct minix_dir_entry *de = NULL; 390 391 if (!IS_ERR(page)) { 392 de = minix_next_entry(page_address(page), sbi); 393 *p = page; 394 } 395 return de; 396 } 397 398 ino_t minix_inode_by_name(struct dentry *dentry) 399 { 400 struct page *page; 401 struct minix_dir_entry *de = minix_find_entry(dentry, &page); 402 ino_t res = 0; 403 404 if (de) { 405 res = de->inode; 406 dir_put_page(page); 407 } 408 return res; 409 } 410