4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/dir.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * ext2 directory handling functions
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
20 * All code that works with directory layout had been switched to pagecache
25 #include <linux/pagemap.h>
26 #include <linux/smp_lock.h>
28 typedef struct ext2_dir_entry_2 ext2_dirent;
31 * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
32 * more robust, but we have what we have
34 static inline unsigned ext2_chunk_size(struct inode *inode)
36 return inode->i_sb->s_blocksize;
39 static inline void ext2_put_page(struct page *page)
42 page_cache_release(page);
45 static inline unsigned long dir_pages(struct inode *inode)
47 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
51 * Return the offset into page `page_nr' of the last valid
52 * byte in that page, plus one.
55 ext2_last_byte(struct inode *inode, unsigned long page_nr)
57 unsigned last_byte = inode->i_size;
59 last_byte -= page_nr << PAGE_CACHE_SHIFT;
60 if (last_byte > PAGE_CACHE_SIZE)
61 last_byte = PAGE_CACHE_SIZE;
65 static int ext2_commit_chunk(struct page *page, unsigned from, unsigned to)
67 struct inode *dir = page->mapping->host;
70 page->mapping->a_ops->commit_write(NULL, page, from, to);
72 err = write_one_page(page, 1);
78 static void ext2_check_page(struct page *page)
80 struct inode *dir = page->mapping->host;
81 struct super_block *sb = dir->i_sb;
82 unsigned chunk_size = ext2_chunk_size(dir);
83 char *kaddr = page_address(page);
84 u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
85 unsigned offs, rec_len;
86 unsigned limit = PAGE_CACHE_SIZE;
90 if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
91 limit = dir->i_size & ~PAGE_CACHE_MASK;
92 if (limit & (chunk_size - 1))
97 for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
98 p = (ext2_dirent *)(kaddr + offs);
99 rec_len = le16_to_cpu(p->rec_len);
101 if (rec_len < EXT2_DIR_REC_LEN(1))
105 if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
107 if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
109 if (le32_to_cpu(p->inode) > max_inumber)
115 SetPageChecked(page);
118 /* Too bad, we had an error */
121 ext2_error(sb, "ext2_check_page",
122 "size of directory #%lu is not a multiple of chunk size",
127 error = "rec_len is smaller than minimal";
130 error = "unaligned directory entry";
133 error = "rec_len is too small for name_len";
136 error = "directory entry across blocks";
139 error = "inode out of bounds";
141 ext2_error (sb, "ext2_check_page", "bad entry in directory #%lu: %s - "
142 "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
143 dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
144 (unsigned long) le32_to_cpu(p->inode),
145 rec_len, p->name_len);
148 p = (ext2_dirent *)(kaddr + offs);
149 ext2_error (sb, "ext2_check_page",
150 "entry in directory #%lu spans the page boundary"
151 "offset=%lu, inode=%lu",
152 dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
153 (unsigned long) le32_to_cpu(p->inode));
155 SetPageChecked(page);
159 static struct page * ext2_get_page(struct inode *dir, unsigned long n)
161 struct address_space *mapping = dir->i_mapping;
162 struct page *page = read_cache_page(mapping, n,
163 (filler_t*)mapping->a_ops->readpage, NULL);
165 wait_on_page_locked(page);
167 if (!PageUptodate(page))
169 if (!PageChecked(page))
170 ext2_check_page(page);
178 return ERR_PTR(-EIO);
182 * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
184 * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
186 static inline int ext2_match (int len, const char * const name,
187 struct ext2_dir_entry_2 * de)
189 if (len != de->name_len)
193 return !memcmp(name, de->name, len);
197 * p is at least 6 bytes before the end of page
199 static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
201 return (ext2_dirent *)((char*)p + le16_to_cpu(p->rec_len));
204 static inline unsigned
205 ext2_validate_entry(char *base, unsigned offset, unsigned mask)
207 ext2_dirent *de = (ext2_dirent*)(base + offset);
208 ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
209 while ((char*)p < (char*)de) {
212 p = ext2_next_entry(p);
214 return (char *)p - base;
217 static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
218 [EXT2_FT_UNKNOWN] = DT_UNKNOWN,
219 [EXT2_FT_REG_FILE] = DT_REG,
220 [EXT2_FT_DIR] = DT_DIR,
221 [EXT2_FT_CHRDEV] = DT_CHR,
222 [EXT2_FT_BLKDEV] = DT_BLK,
223 [EXT2_FT_FIFO] = DT_FIFO,
224 [EXT2_FT_SOCK] = DT_SOCK,
225 [EXT2_FT_SYMLINK] = DT_LNK,
229 static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
230 [S_IFREG >> S_SHIFT] = EXT2_FT_REG_FILE,
231 [S_IFDIR >> S_SHIFT] = EXT2_FT_DIR,
232 [S_IFCHR >> S_SHIFT] = EXT2_FT_CHRDEV,
233 [S_IFBLK >> S_SHIFT] = EXT2_FT_BLKDEV,
234 [S_IFIFO >> S_SHIFT] = EXT2_FT_FIFO,
235 [S_IFSOCK >> S_SHIFT] = EXT2_FT_SOCK,
236 [S_IFLNK >> S_SHIFT] = EXT2_FT_SYMLINK,
239 static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
241 mode_t mode = inode->i_mode;
242 if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
243 de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
249 ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
251 loff_t pos = filp->f_pos;
252 struct inode *inode = filp->f_dentry->d_inode;
253 struct super_block *sb = inode->i_sb;
254 unsigned int offset = pos & ~PAGE_CACHE_MASK;
255 unsigned long n = pos >> PAGE_CACHE_SHIFT;
256 unsigned long npages = dir_pages(inode);
257 unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
258 unsigned char *types = NULL;
259 int need_revalidate = filp->f_version != inode->i_version;
261 if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
264 if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
265 types = ext2_filetype_table;
267 for ( ; n < npages; n++, offset = 0) {
270 struct page *page = ext2_get_page(inode, n);
273 ext2_error(sb, __FUNCTION__,
276 filp->f_pos += PAGE_CACHE_SIZE - offset;
279 kaddr = page_address(page);
280 if (unlikely(need_revalidate)) {
282 offset = ext2_validate_entry(kaddr, offset, chunk_mask);
283 filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
285 filp->f_version = inode->i_version;
288 de = (ext2_dirent *)(kaddr+offset);
289 limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
290 for ( ;(char*)de <= limit; de = ext2_next_entry(de)) {
291 if (de->rec_len == 0) {
292 ext2_error(sb, __FUNCTION__,
293 "zero-length directory entry");
299 unsigned char d_type = DT_UNKNOWN;
301 if (types && de->file_type < EXT2_FT_MAX)
302 d_type = types[de->file_type];
304 offset = (char *)de - kaddr;
305 over = filldir(dirent, de->name, de->name_len,
306 (n<<PAGE_CACHE_SHIFT) | offset,
307 le32_to_cpu(de->inode), d_type);
313 filp->f_pos += le16_to_cpu(de->rec_len);
323 * finds an entry in the specified directory with the wanted name. It
324 * returns the page in which the entry was found, and the entry itself
325 * (as a parameter - res_dir). Page is returned mapped and unlocked.
326 * Entry is guaranteed to be valid.
328 struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
329 struct dentry *dentry, struct page ** res_page)
331 const char *name = dentry->d_name.name;
332 int namelen = dentry->d_name.len;
333 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
334 unsigned long start, n;
335 unsigned long npages = dir_pages(dir);
336 struct page *page = NULL;
337 struct ext2_inode_info *ei = EXT2_I(dir);
346 start = ei->i_dir_start_lookup;
352 page = ext2_get_page(dir, n);
354 kaddr = page_address(page);
355 de = (ext2_dirent *) kaddr;
356 kaddr += ext2_last_byte(dir, n) - reclen;
357 while ((char *) de <= kaddr) {
358 if (de->rec_len == 0) {
359 ext2_error(dir->i_sb, __FUNCTION__,
360 "zero-length directory entry");
364 if (ext2_match (namelen, name, de))
366 de = ext2_next_entry(de);
372 /* next page is past the blocks we've got */
373 if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
374 ext2_error(dir->i_sb, __FUNCTION__,
375 "dir %lu size %lld exceeds block count %llu",
376 dir->i_ino, dir->i_size,
377 (unsigned long long)dir->i_blocks);
380 } while (n != start);
386 ei->i_dir_start_lookup = n;
390 struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
392 struct page *page = ext2_get_page(dir, 0);
393 ext2_dirent *de = NULL;
396 de = ext2_next_entry((ext2_dirent *) page_address(page));
402 ino_t ext2_inode_by_name(struct inode * dir, struct dentry *dentry)
405 struct ext2_dir_entry_2 * de;
408 de = ext2_find_entry (dir, dentry, &page);
410 res = le32_to_cpu(de->inode);
412 page_cache_release(page);
417 /* Releases the page */
418 void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
419 struct page *page, struct inode *inode)
421 unsigned from = (char *) de - (char *) page_address(page);
422 unsigned to = from + le16_to_cpu(de->rec_len);
426 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
429 de->inode = cpu_to_le32(inode->i_ino);
430 ext2_set_de_type (de, inode);
431 err = ext2_commit_chunk(page, from, to);
433 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
434 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
435 mark_inode_dirty(dir);
441 int ext2_add_link (struct dentry *dentry, struct inode *inode)
443 struct inode *dir = dentry->d_parent->d_inode;
444 const char *name = dentry->d_name.name;
445 int namelen = dentry->d_name.len;
446 unsigned chunk_size = ext2_chunk_size(dir);
447 unsigned reclen = EXT2_DIR_REC_LEN(namelen);
448 unsigned short rec_len, name_len;
449 struct page *page = NULL;
451 unsigned long npages = dir_pages(dir);
458 * We take care of directory expansion in the same loop.
459 * This code plays outside i_size, so it locks the page
460 * to protect that region.
462 for (n = 0; n <= npages; n++) {
465 page = ext2_get_page(dir, n);
470 kaddr = page_address(page);
471 dir_end = kaddr + ext2_last_byte(dir, n);
472 de = (ext2_dirent *)kaddr;
473 kaddr += PAGE_CACHE_SIZE - reclen;
474 while ((char *)de <= kaddr) {
475 if ((char *)de == dir_end) {
478 rec_len = chunk_size;
479 de->rec_len = cpu_to_le16(chunk_size);
483 if (de->rec_len == 0) {
484 ext2_error(dir->i_sb, __FUNCTION__,
485 "zero-length directory entry");
490 if (ext2_match (namelen, name, de))
492 name_len = EXT2_DIR_REC_LEN(de->name_len);
493 rec_len = le16_to_cpu(de->rec_len);
494 if (!de->inode && rec_len >= reclen)
496 if (rec_len >= name_len + reclen)
498 de = (ext2_dirent *) ((char *) de + rec_len);
507 from = (char*)de - (char*)page_address(page);
509 err = page->mapping->a_ops->prepare_write(NULL, page, from, to);
513 ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
514 de1->rec_len = cpu_to_le16(rec_len - name_len);
515 de->rec_len = cpu_to_le16(name_len);
518 de->name_len = namelen;
519 memcpy (de->name, name, namelen);
520 de->inode = cpu_to_le32(inode->i_ino);
521 ext2_set_de_type (de, inode);
522 err = ext2_commit_chunk(page, from, to);
523 dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
524 EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
525 mark_inode_dirty(dir);
537 * ext2_delete_entry deletes a directory entry by merging it with the
538 * previous entry. Page is up-to-date. Releases the page.
540 int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
542 struct address_space *mapping = page->mapping;
543 struct inode *inode = mapping->host;
544 char *kaddr = page_address(page);
545 unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
546 unsigned to = ((char*)dir - kaddr) + le16_to_cpu(dir->rec_len);
547 ext2_dirent * pde = NULL;
548 ext2_dirent * de = (ext2_dirent *) (kaddr + from);
551 while ((char*)de < (char*)dir) {
552 if (de->rec_len == 0) {
553 ext2_error(inode->i_sb, __FUNCTION__,
554 "zero-length directory entry");
559 de = ext2_next_entry(de);
562 from = (char*)pde - (char*)page_address(page);
564 err = mapping->a_ops->prepare_write(NULL, page, from, to);
568 pde->rec_len = cpu_to_le16(to-from);
570 err = ext2_commit_chunk(page, from, to);
571 inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
572 EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
573 mark_inode_dirty(inode);
580 * Set the first fragment of directory.
582 int ext2_make_empty(struct inode *inode, struct inode *parent)
584 struct address_space *mapping = inode->i_mapping;
585 struct page *page = grab_cache_page(mapping, 0);
586 unsigned chunk_size = ext2_chunk_size(inode);
587 struct ext2_dir_entry_2 * de;
593 err = mapping->a_ops->prepare_write(NULL, page, 0, chunk_size);
598 kaddr = kmap_atomic(page, KM_USER0);
599 memset(kaddr, 0, chunk_size);
600 de = (struct ext2_dir_entry_2 *)kaddr;
602 de->rec_len = cpu_to_le16(EXT2_DIR_REC_LEN(1));
603 memcpy (de->name, ".\0\0", 4);
604 de->inode = cpu_to_le32(inode->i_ino);
605 ext2_set_de_type (de, inode);
607 de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1));
609 de->rec_len = cpu_to_le16(chunk_size - EXT2_DIR_REC_LEN(1));
610 de->inode = cpu_to_le32(parent->i_ino);
611 memcpy (de->name, "..\0", 4);
612 ext2_set_de_type (de, inode);
613 kunmap_atomic(kaddr, KM_USER0);
614 err = ext2_commit_chunk(page, 0, chunk_size);
616 page_cache_release(page);
621 * routine to check that the specified directory is empty (for rmdir)
623 int ext2_empty_dir (struct inode * inode)
625 struct page *page = NULL;
626 unsigned long i, npages = dir_pages(inode);
628 for (i = 0; i < npages; i++) {
631 page = ext2_get_page(inode, i);
636 kaddr = page_address(page);
637 de = (ext2_dirent *)kaddr;
638 kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1);
640 while ((char *)de <= kaddr) {
641 if (de->rec_len == 0) {
642 ext2_error(inode->i_sb, __FUNCTION__,
643 "zero-length directory entry");
644 printk("kaddr=%p, de=%p\n", kaddr, de);
647 if (de->inode != 0) {
648 /* check for . and .. */
649 if (de->name[0] != '.')
651 if (de->name_len > 2)
653 if (de->name_len < 2) {
655 cpu_to_le32(inode->i_ino))
657 } else if (de->name[1] != '.')
660 de = ext2_next_entry(de);
671 struct file_operations ext2_dir_operations = {
672 .llseek = generic_file_llseek,
673 .read = generic_read_dir,
674 .readdir = ext2_readdir,
676 .fsync = ext2_sync_file,