diff options
Diffstat (limited to 'fs/jffs2')
50 files changed, 11450 insertions, 5321 deletions
diff --git a/fs/jffs2/Kconfig b/fs/jffs2/Kconfig new file mode 100644 index 00000000000..d8bb6c411e9 --- /dev/null +++ b/fs/jffs2/Kconfig @@ -0,0 +1,188 @@ +config JFFS2_FS + tristate "Journalling Flash File System v2 (JFFS2) support" + select CRC32 + depends on MTD + help + JFFS2 is the second generation of the Journalling Flash File System + for use on diskless embedded devices. It provides improved wear + levelling, compression and support for hard links. You cannot use + this on normal block devices, only on 'MTD' devices. + + Further information on the design and implementation of JFFS2 is + available at <http://sources.redhat.com/jffs2/>. + +config JFFS2_FS_DEBUG + int "JFFS2 debugging verbosity (0 = quiet, 2 = noisy)" + depends on JFFS2_FS + default "0" + help + This controls the amount of debugging messages produced by the JFFS2 + code. Set it to zero for use in production systems. For evaluation, + testing and debugging, it's advisable to set it to one. This will + enable a few assertions and will print debugging messages at the + KERN_DEBUG loglevel, where they won't normally be visible. Level 2 + is unlikely to be useful - it enables extra debugging in certain + areas which at one point needed debugging, but when the bugs were + located and fixed, the detailed messages were relegated to level 2. + + If reporting bugs, please try to have available a full dump of the + messages at debug level 1 while the misbehaviour was occurring. + +config JFFS2_FS_WRITEBUFFER + bool "JFFS2 write-buffering support" + depends on JFFS2_FS + default y + help + This enables the write-buffering support in JFFS2. + + This functionality is required to support JFFS2 on the following + types of flash devices: + - NAND flash + - NOR flash with transparent ECC + - DataFlash + +config JFFS2_FS_WBUF_VERIFY + bool "Verify JFFS2 write-buffer reads" + depends on JFFS2_FS_WRITEBUFFER + default n + help + This causes JFFS2 to read back every page written through the + write-buffer, and check for errors. + +config JFFS2_SUMMARY + bool "JFFS2 summary support" + depends on JFFS2_FS + default n + help + This feature makes it possible to use summary information + for faster filesystem mount. + + The summary information can be inserted into a filesystem image + by the utility 'sumtool'. + + If unsure, say 'N'. + +config JFFS2_FS_XATTR + bool "JFFS2 XATTR support" + depends on JFFS2_FS + default n + help + Extended attributes are name:value pairs associated with inodes by + the kernel or by users (see the attr(5) manual page, or visit + <http://acl.bestbits.at/> for details). + + If unsure, say N. + +config JFFS2_FS_POSIX_ACL + bool "JFFS2 POSIX Access Control Lists" + depends on JFFS2_FS_XATTR + default y + select FS_POSIX_ACL + help + Posix Access Control Lists (ACLs) support permissions for users and + groups beyond the owner/group/world scheme. + + To learn more about Access Control Lists, visit the Posix ACLs for + Linux website <http://acl.bestbits.at/>. + + If you don't know what Access Control Lists are, say N + +config JFFS2_FS_SECURITY + bool "JFFS2 Security Labels" + depends on JFFS2_FS_XATTR + default y + help + Security labels support alternative access control models + implemented by security modules like SELinux. This option + enables an extended attribute handler for file security + labels in the jffs2 filesystem. + + If you are not using a security module that requires using + extended attributes for file security labels, say N. + +config JFFS2_COMPRESSION_OPTIONS + bool "Advanced compression options for JFFS2" + depends on JFFS2_FS + default n + help + Enabling this option allows you to explicitly choose which + compression modules, if any, are enabled in JFFS2. Removing + compressors can mean you cannot read existing file systems, + and enabling experimental compressors can mean that you + write a file system which cannot be read by a standard kernel. + + If unsure, you should _definitely_ say 'N'. + +config JFFS2_ZLIB + bool "JFFS2 ZLIB compression support" if JFFS2_COMPRESSION_OPTIONS + select ZLIB_INFLATE + select ZLIB_DEFLATE + depends on JFFS2_FS + default y + help + Zlib is designed to be a free, general-purpose, legally unencumbered, + lossless data-compression library for use on virtually any computer + hardware and operating system. See <http://www.gzip.org/zlib/> for + further information. + + Say 'Y' if unsure. + +config JFFS2_LZO + bool "JFFS2 LZO compression support" if JFFS2_COMPRESSION_OPTIONS + select LZO_COMPRESS + select LZO_DECOMPRESS + depends on JFFS2_FS + default n + help + minilzo-based compression. Generally works better than Zlib. + + This feature was added in July, 2007. Say 'N' if you need + compatibility with older bootloaders or kernels. + +config JFFS2_RTIME + bool "JFFS2 RTIME compression support" if JFFS2_COMPRESSION_OPTIONS + depends on JFFS2_FS + default y + help + Rtime does manage to recompress already-compressed data. Say 'Y' if unsure. + +config JFFS2_RUBIN + bool "JFFS2 RUBIN compression support" if JFFS2_COMPRESSION_OPTIONS + depends on JFFS2_FS + default n + help + RUBINMIPS and DYNRUBIN compressors. Say 'N' if unsure. + +choice + prompt "JFFS2 default compression mode" if JFFS2_COMPRESSION_OPTIONS + default JFFS2_CMODE_PRIORITY + depends on JFFS2_FS + help + You can set here the default compression mode of JFFS2 from + the available compression modes. Don't touch if unsure. + +config JFFS2_CMODE_NONE + bool "no compression" + help + Uses no compression. + +config JFFS2_CMODE_PRIORITY + bool "priority" + help + Tries the compressors in a predefined order and chooses the first + successful one. + +config JFFS2_CMODE_SIZE + bool "size" + help + Tries all compressors and chooses the one which has the smallest + result. + +config JFFS2_CMODE_FAVOURLZO + bool "Favour LZO" + help + Tries all compressors and chooses the one which has the smallest + result but gives some preference to LZO (which has faster + decompression) at the expense of size. + +endchoice diff --git a/fs/jffs2/LICENCE b/fs/jffs2/LICENCE index cd81d83e4ad..56288590813 100644 --- a/fs/jffs2/LICENCE +++ b/fs/jffs2/LICENCE @@ -1,7 +1,7 @@ The files in this directory and elsewhere which refer to this LICENCE file are part of JFFS2, the Journalling Flash File System v2. - Copyright (C) 2001, 2002 Red Hat, Inc. + Copyright © 2001-2007 Red Hat, Inc. and others JFFS2 is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -28,8 +28,3 @@ of the GNU General Public License. This exception does not invalidate any other reasons why a work based on this file might be covered by the GNU General Public License. -For information on obtaining alternative licences for JFFS2, see -http://sources.redhat.com/jffs2/jffs2-licence.html - - - $Id: LICENCE,v 1.1 2002/05/20 14:56:37 dwmw2 Exp $ diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile index f1afe681ecd..60e5d49ca03 100644 --- a/fs/jffs2/Makefile +++ b/fs/jffs2/Makefile @@ -1,7 +1,6 @@ # # Makefile for the Linux Journalling Flash File System v2 (JFFS2) # -# $Id: Makefile.common,v 1.9 2005/02/09 09:23:53 pavlov Exp $ # obj-$(CONFIG_JFFS2_FS) += jffs2.o @@ -9,9 +8,14 @@ obj-$(CONFIG_JFFS2_FS) += jffs2.o jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o -jffs2-y += super.o +jffs2-y += super.o debug.o jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o +jffs2-$(CONFIG_JFFS2_FS_XATTR) += xattr.o xattr_trusted.o xattr_user.o +jffs2-$(CONFIG_JFFS2_FS_SECURITY) += security.o +jffs2-$(CONFIG_JFFS2_FS_POSIX_ACL) += acl.o jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o +jffs2-$(CONFIG_JFFS2_LZO) += compr_lzo.o +jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking index b7943439b6e..3ea36554107 100644 --- a/fs/jffs2/README.Locking +++ b/fs/jffs2/README.Locking @@ -1,4 +1,3 @@ - $Id: README.Locking,v 1.12 2005/04/13 13:22:35 dwmw2 Exp $ JFFS2 LOCKING DOCUMENTATION --------------------------- @@ -15,7 +14,7 @@ be fairly close. alloc_sem --------- -The alloc_sem is a per-filesystem semaphore, used primarily to ensure +The alloc_sem is a per-filesystem mutex, used primarily to ensure contiguous allocation of space on the medium. It is automatically obtained during space allocations (jffs2_reserve_space()) and freed upon write completion (jffs2_complete_reservation()). Note that @@ -42,10 +41,10 @@ if the wbuf is currently holding any data is permitted, though. Ordering constraints: See f->sem. - File Semaphore f->sem + File Mutex f->sem --------------------- -This is the JFFS2-internal equivalent of the inode semaphore i->i_sem. +This is the JFFS2-internal equivalent of the inode mutex i->i_sem. It protects the contents of the jffs2_inode_info private inode data, including the linked list of node fragments (but see the notes below on erase_completion_lock), etc. @@ -61,14 +60,14 @@ lead to deadlock, unless we played games with unlocking the i_sem before calling the space allocation functions. Instead of playing such games, we just have an extra internal -semaphore, which is obtained by the garbage collection code and also +mutex, which is obtained by the garbage collection code and also by the normal file system code _after_ allocation of space. Ordering constraints: 1. Never attempt to allocate space or lock alloc_sem with any f->sem held. - 2. Never attempt to lock two file semaphores in one thread. + 2. Never attempt to lock two file mutexes in one thread. No ordering rules have been made for doing so. @@ -87,8 +86,8 @@ a simple spin_lock() rather than spin_lock_bh(). Note that the per-inode list of physical nodes (f->nodes) is a special case. Any changes to _valid_ nodes (i.e. ->flash_offset & 1 == 0) in -the list are protected by the file semaphore f->sem. But the erase -code may remove _obsolete_ nodes from the list while holding only the +the list are protected by the file mutex f->sem. But the erase code +may remove _obsolete_ nodes from the list while holding only the erase_completion_lock. So you can walk the list only while holding the erase_completion_lock, and can drop the lock temporarily mid-walk as long as the pointer you're holding is to a _valid_ node, not an @@ -125,10 +124,10 @@ Ordering constraints: erase_free_sem -------------- -This semaphore is only used by the erase code which frees obsolete -node references and the jffs2_garbage_collect_deletion_dirent() -function. The latter function on NAND flash must read _obsolete_ nodes -to determine whether the 'deletion dirent' under consideration can be +This mutex is only used by the erase code which frees obsolete node +references and the jffs2_garbage_collect_deletion_dirent() function. +The latter function on NAND flash must read _obsolete_ nodes to +determine whether the 'deletion dirent' under consideration can be discarded or whether it is still required to show that an inode has been unlinked. Because reading from the flash may sleep, the erase_completion_lock cannot be held, so an alternative, more @@ -150,3 +149,24 @@ the buffer. Ordering constraints: Lock wbuf_sem last, after the alloc_sem or and f->sem. + + + c->xattr_sem + ------------ + +This read/write semaphore protects against concurrent access to the +xattr related objects which include stuff in superblock and ic->xref. +In read-only path, write-semaphore is too much exclusion. It's enough +by read-semaphore. But you must hold write-semaphore when updating, +creating or deleting any xattr related object. + +Once xattr_sem released, there would be no assurance for the existence +of those objects. Thus, a series of processes is often required to retry, +when updating such a object is necessary under holding read semaphore. +For example, do_jffs2_getxattr() holds read-semaphore to scan xref and +xdatum at first. But it retries this process with holding write-semaphore +after release read-semaphore, if it's necessary to load name/value pair +from medium. + +Ordering constraints: + Lock xattr_sem last, after the alloc_sem. diff --git a/fs/jffs2/TODO b/fs/jffs2/TODO index 2bff82fd221..ca28964abd4 100644 --- a/fs/jffs2/TODO +++ b/fs/jffs2/TODO @@ -1,40 +1,37 @@ -$Id: TODO,v 1.10 2002/09/09 16:31:21 dwmw2 Exp $ + - support asynchronous operation -- add a per-fs 'reserved_space' count, + let each outstanding write reserve the _maximum_ amount of physical + space it could take. Let GC flush the outstanding writes because the + reservations will necessarily be pessimistic. With this we could even + do shared writable mmap, if we can have a fs hook for do_wp_page() to + make the reservation. - disable compression in commit_write()? - fine-tune the allocation / GC thresholds - chattr support - turning on/off and tuning compression per-inode - checkpointing (do we need this? scan is quite fast) - make the scan code populate real inodes so read_inode just after mount doesn't have to read the flash twice for large files. - Make this a per-inode option, changable with chattr, so you can + Make this a per-inode option, changeable with chattr, so you can decide which inodes should be in-core immediately after mount. - test, test, test - NAND flash support: - - flush_wbuf using GC to fill it, don't just pad. - - Deal with write errors. Data don't get lost - we just have to write - the affected node(s) out again somewhere else. - - make fsync flush only if actually required - - make sys_sync() work. - - reboot notifier - - timed flush of old wbuf - - fix magical second arg of jffs2_flush_wbuf(). Split into two or more functions instead. - + - almost done :) + - use bad block check instead of the hardwired byte check - Optimisations: - - Stop GC from decompressing and immediately recompressing nodes which could - just be copied intact. (We now keep track of REF_PRISTINE flag. Easy now.) - - Furthermore, in the case where it could be copied intact we don't even need - to call iget() for it -- if we use (raw_node_raw->flash_offset & 2) as a flag - to show a node can be copied intact and it's _not_ in icache, we could just do - it, fix up the next_in_ino list and move on. We would need a way to find out - _whether_ it's in icache though -- if it's in icache we also need to do the - fragment lists, etc. P'raps a flag or pointer in the jffs2_inode_cache could - help. (We have half of this now.) + - Split writes so they go to two separate blocks rather than just c->nextblock. + By writing _new_ nodes to one block, and garbage-collected REF_PRISTINE + nodes to a different one, we can separate clean nodes from those which + are likely to become dirty, and end up with blocks which are each far + closer to 100% or 0% clean, hence speeding up later GC progress dramatically. - Stop keeping name in-core with struct jffs2_full_dirent. If we keep the hash in the full dirent, we only need to go to the flash in lookup() when we think we've got a match, and in readdir(). - Doubly-linked next_in_ino list to allow us to free obsoleted raw_node_refs immediately? - - Remove totlen from jffs2_raw_node_ref? Need to have totlen passed into - jffs2_mark_node_obsolete(). Can all callers work it out? - Remove size from jffs2_raw_node_frag. + +dedekind: +1. __jffs2_flush_wbuf() has a strange 'pad' parameter. Eliminate. +2. get_sb()->build_fs()->scan() path... Why get_sb() removes scan()'s crap in + case of failure? scan() does not clean everything. Fix. diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c new file mode 100644 index 00000000000..009ec0b5993 --- /dev/null +++ b/fs/jffs2/acl.c @@ -0,0 +1,310 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2006 NEC Corporation + * + * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/sched.h> +#include <linux/time.h> +#include <linux/crc32.h> +#include <linux/jffs2.h> +#include <linux/xattr.h> +#include <linux/posix_acl_xattr.h> +#include <linux/mtd/mtd.h> +#include "nodelist.h" + +static size_t jffs2_acl_size(int count) +{ + if (count <= 4) { + return sizeof(struct jffs2_acl_header) + + count * sizeof(struct jffs2_acl_entry_short); + } else { + return sizeof(struct jffs2_acl_header) + + 4 * sizeof(struct jffs2_acl_entry_short) + + (count - 4) * sizeof(struct jffs2_acl_entry); + } +} + +static int jffs2_acl_count(size_t size) +{ + size_t s; + + size -= sizeof(struct jffs2_acl_header); + if (size < 4 * sizeof(struct jffs2_acl_entry_short)) { + if (size % sizeof(struct jffs2_acl_entry_short)) + return -1; + return size / sizeof(struct jffs2_acl_entry_short); + } else { + s = size - 4 * sizeof(struct jffs2_acl_entry_short); + if (s % sizeof(struct jffs2_acl_entry)) + return -1; + return s / sizeof(struct jffs2_acl_entry) + 4; + } +} + +static struct posix_acl *jffs2_acl_from_medium(void *value, size_t size) +{ + void *end = value + size; + struct jffs2_acl_header *header = value; + struct jffs2_acl_entry *entry; + struct posix_acl *acl; + uint32_t ver; + int i, count; + + if (!value) + return NULL; + if (size < sizeof(struct jffs2_acl_header)) + return ERR_PTR(-EINVAL); + ver = je32_to_cpu(header->a_version); + if (ver != JFFS2_ACL_VERSION) { + JFFS2_WARNING("Invalid ACL version. (=%u)\n", ver); + return ERR_PTR(-EINVAL); + } + + value += sizeof(struct jffs2_acl_header); + count = jffs2_acl_count(size); + if (count < 0) + return ERR_PTR(-EINVAL); + if (count == 0) + return NULL; + + acl = posix_acl_alloc(count, GFP_KERNEL); + if (!acl) + return ERR_PTR(-ENOMEM); + + for (i=0; i < count; i++) { + entry = value; + if (value + sizeof(struct jffs2_acl_entry_short) > end) + goto fail; + acl->a_entries[i].e_tag = je16_to_cpu(entry->e_tag); + acl->a_entries[i].e_perm = je16_to_cpu(entry->e_perm); + switch (acl->a_entries[i].e_tag) { + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: + case ACL_MASK: + case ACL_OTHER: + value += sizeof(struct jffs2_acl_entry_short); + break; + + case ACL_USER: + value += sizeof(struct jffs2_acl_entry); + if (value > end) + goto fail; + acl->a_entries[i].e_uid = + make_kuid(&init_user_ns, + je32_to_cpu(entry->e_id)); + break; + case ACL_GROUP: + value += sizeof(struct jffs2_acl_entry); + if (value > end) + goto fail; + acl->a_entries[i].e_gid = + make_kgid(&init_user_ns, + je32_to_cpu(entry->e_id)); + break; + + default: + goto fail; + } + } + if (value != end) + goto fail; + return acl; + fail: + posix_acl_release(acl); + return ERR_PTR(-EINVAL); +} + +static void *jffs2_acl_to_medium(const struct posix_acl *acl, size_t *size) +{ + struct jffs2_acl_header *header; + struct jffs2_acl_entry *entry; + void *e; + size_t i; + + *size = jffs2_acl_size(acl->a_count); + header = kmalloc(sizeof(*header) + acl->a_count * sizeof(*entry), GFP_KERNEL); + if (!header) + return ERR_PTR(-ENOMEM); + header->a_version = cpu_to_je32(JFFS2_ACL_VERSION); + e = header + 1; + for (i=0; i < acl->a_count; i++) { + const struct posix_acl_entry *acl_e = &acl->a_entries[i]; + entry = e; + entry->e_tag = cpu_to_je16(acl_e->e_tag); + entry->e_perm = cpu_to_je16(acl_e->e_perm); + switch(acl_e->e_tag) { + case ACL_USER: + entry->e_id = cpu_to_je32( + from_kuid(&init_user_ns, acl_e->e_uid)); + e += sizeof(struct jffs2_acl_entry); + break; + case ACL_GROUP: + entry->e_id = cpu_to_je32( + from_kgid(&init_user_ns, acl_e->e_gid)); + e += sizeof(struct jffs2_acl_entry); + break; + + case ACL_USER_OBJ: + case ACL_GROUP_OBJ: + case ACL_MASK: + case ACL_OTHER: + e += sizeof(struct jffs2_acl_entry_short); + break; + + default: + goto fail; + } + } + return header; + fail: + kfree(header); + return ERR_PTR(-EINVAL); +} + +struct posix_acl *jffs2_get_acl(struct inode *inode, int type) +{ + struct posix_acl *acl; + char *value = NULL; + int rc, xprefix; + + switch (type) { + case ACL_TYPE_ACCESS: + xprefix = JFFS2_XPREFIX_ACL_ACCESS; + break; + case ACL_TYPE_DEFAULT: + xprefix = JFFS2_XPREFIX_ACL_DEFAULT; + break; + default: + BUG(); + } + rc = do_jffs2_getxattr(inode, xprefix, "", NULL, 0); + if (rc > 0) { + value = kmalloc(rc, GFP_KERNEL); + if (!value) + return ERR_PTR(-ENOMEM); + rc = do_jffs2_getxattr(inode, xprefix, "", value, rc); + } + if (rc > 0) { + acl = jffs2_acl_from_medium(value, rc); + } else if (rc == -ENODATA || rc == -ENOSYS) { + acl = NULL; + } else { + acl = ERR_PTR(rc); + } + if (value) + kfree(value); + if (!IS_ERR(acl)) + set_cached_acl(inode, type, acl); + return acl; +} + +static int __jffs2_set_acl(struct inode *inode, int xprefix, struct posix_acl *acl) +{ + char *value = NULL; + size_t size = 0; + int rc; + + if (acl) { + value = jffs2_acl_to_medium(acl, &size); + if (IS_ERR(value)) + return PTR_ERR(value); + } + rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0); + if (!value && rc == -ENODATA) + rc = 0; + kfree(value); + + return rc; +} + +int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) +{ + int rc, xprefix; + + switch (type) { + case ACL_TYPE_ACCESS: + xprefix = JFFS2_XPREFIX_ACL_ACCESS; + if (acl) { + umode_t mode = inode->i_mode; + rc = posix_acl_equiv_mode(acl, &mode); + if (rc < 0) + return rc; + if (inode->i_mode != mode) { + struct iattr attr; + + attr.ia_valid = ATTR_MODE | ATTR_CTIME; + attr.ia_mode = mode; + attr.ia_ctime = CURRENT_TIME_SEC; + rc = jffs2_do_setattr(inode, &attr); + if (rc < 0) + return rc; + } + if (rc == 0) + acl = NULL; + } + break; + case ACL_TYPE_DEFAULT: + xprefix = JFFS2_XPREFIX_ACL_DEFAULT; + if (!S_ISDIR(inode->i_mode)) + return acl ? -EACCES : 0; + break; + default: + return -EINVAL; + } + rc = __jffs2_set_acl(inode, xprefix, acl); + if (!rc) + set_cached_acl(inode, type, acl); + return rc; +} + +int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, umode_t *i_mode) +{ + struct posix_acl *default_acl, *acl; + int rc; + + cache_no_acl(inode); + + rc = posix_acl_create(dir_i, i_mode, &default_acl, &acl); + if (rc) + return rc; + + if (default_acl) { + set_cached_acl(inode, ACL_TYPE_DEFAULT, default_acl); + posix_acl_release(default_acl); + } + if (acl) { + set_cached_acl(inode, ACL_TYPE_ACCESS, acl); + posix_acl_release(acl); + } + return 0; +} + +int jffs2_init_acl_post(struct inode *inode) +{ + int rc; + + if (inode->i_default_acl) { + rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_DEFAULT, inode->i_default_acl); + if (rc) + return rc; + } + + if (inode->i_acl) { + rc = __jffs2_set_acl(inode, JFFS2_XPREFIX_ACL_ACCESS, inode->i_acl); + if (rc) + return rc; + } + + return 0; +} diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h new file mode 100644 index 00000000000..2e2b5745c3b --- /dev/null +++ b/fs/jffs2/acl.h @@ -0,0 +1,41 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2006 NEC Corporation + * + * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +struct jffs2_acl_entry { + jint16_t e_tag; + jint16_t e_perm; + jint32_t e_id; +}; + +struct jffs2_acl_entry_short { + jint16_t e_tag; + jint16_t e_perm; +}; + +struct jffs2_acl_header { + jint32_t a_version; +}; + +#ifdef CONFIG_JFFS2_FS_POSIX_ACL + +struct posix_acl *jffs2_get_acl(struct inode *inode, int type); +int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type); +extern int jffs2_init_acl_pre(struct inode *, struct inode *, umode_t *); +extern int jffs2_init_acl_post(struct inode *); + +#else + +#define jffs2_get_acl (NULL) +#define jffs2_set_acl (NULL) +#define jffs2_init_acl_pre(dir_i,inode,mode) (0) +#define jffs2_init_acl_post(inode) (0) + +#endif /* CONFIG_JFFS2_FS_POSIX_ACL */ diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 0f224384f17..bb9cebc9ca8 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c @@ -1,20 +1,24 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: background.c,v 1.54 2005/05/20 21:37:12 gleixner Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/jffs2.h> #include <linux/mtd/mtd.h> #include <linux/completion.h> +#include <linux/sched.h> +#include <linux/freezer.h> +#include <linux/kthread.h> #include "nodelist.h" @@ -22,35 +26,35 @@ static int jffs2_garbage_collect_thread(void *); void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) { - spin_lock(&c->erase_completion_lock); - if (c->gc_task && jffs2_thread_should_wake(c)) - send_sig(SIGHUP, c->gc_task, 1); - spin_unlock(&c->erase_completion_lock); + assert_spin_locked(&c->erase_completion_lock); + if (c->gc_task && jffs2_thread_should_wake(c)) + send_sig(SIGHUP, c->gc_task, 1); } /* This must only ever be called when no GC thread is currently running */ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) { - pid_t pid; + struct task_struct *tsk; int ret = 0; - if (c->gc_task) - BUG(); + BUG_ON(c->gc_task); init_completion(&c->gc_thread_start); init_completion(&c->gc_thread_exit); - pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES); - if (pid < 0) { - printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid); + tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); + if (IS_ERR(tsk)) { + pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n", + -PTR_ERR(tsk)); complete(&c->gc_thread_exit); - ret = pid; + ret = PTR_ERR(tsk); } else { /* Wait for it... */ - D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); + jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid); wait_for_completion(&c->gc_thread_start); + ret = tsk->pid; } - + return ret; } @@ -59,7 +63,7 @@ void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) int wait = 0; spin_lock(&c->erase_completion_lock); if (c->gc_task) { - D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid)); + jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid); send_sig(SIGKILL, c->gc_task, 1); wait = 1; } @@ -71,67 +75,88 @@ void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) static int jffs2_garbage_collect_thread(void *_c) { struct jffs2_sb_info *c = _c; + sigset_t hupmask; - daemonize("jffs2_gcd_mtd%d", c->mtd->index); + siginitset(&hupmask, sigmask(SIGHUP)); allow_signal(SIGKILL); allow_signal(SIGSTOP); allow_signal(SIGCONT); + allow_signal(SIGHUP); c->gc_task = current; complete(&c->gc_thread_start); set_user_nice(current, 10); + set_freezable(); for (;;) { - allow_signal(SIGHUP); - + sigprocmask(SIG_UNBLOCK, &hupmask, NULL); + again: + spin_lock(&c->erase_completion_lock); if (!jffs2_thread_should_wake(c)) { set_current_state (TASK_INTERRUPTIBLE); - D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); - /* Yes, there's a race here; we checked jffs2_thread_should_wake() - before setting current->state to TASK_INTERRUPTIBLE. But it doesn't - matter - We don't care if we miss a wakeup, because the GC thread - is only an optimisation anyway. */ + spin_unlock(&c->erase_completion_lock); + jffs2_dbg(1, "%s(): sleeping...\n", __func__); schedule(); + } else { + spin_unlock(&c->erase_completion_lock); + } + /* Problem - immediately after bootup, the GCD spends a lot + * of time in places like jffs2_kill_fragtree(); so much so + * that userspace processes (like gdm and X) are starved + * despite plenty of cond_resched()s and renicing. Yield() + * doesn't help, either (presumably because userspace and GCD + * are generally competing for a higher latency resource - + * disk). + * This forces the GCD to slow the hell down. Pulling an + * inode in with read_inode() is much preferable to having + * the GC thread get there first. */ + schedule_timeout_interruptible(msecs_to_jiffies(50)); + + if (kthread_should_stop()) { + jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__); + goto die; } - if (try_to_freeze()) - continue; - - cond_resched(); - - /* Put_super will send a SIGKILL and then wait on the sem. + /* Put_super will send a SIGKILL and then wait on the sem. */ - while (signal_pending(current)) { + while (signal_pending(current) || freezing(current)) { siginfo_t info; unsigned long signr; + if (try_to_freeze()) + goto again; + signr = dequeue_signal_lock(current, ¤t->blocked, &info); switch(signr) { case SIGSTOP: - D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n")); + jffs2_dbg(1, "%s(): SIGSTOP received\n", + __func__); set_current_state(TASK_STOPPED); schedule(); break; case SIGKILL: - D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n")); + jffs2_dbg(1, "%s(): SIGKILL received\n", + __func__); goto die; case SIGHUP: - D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n")); + jffs2_dbg(1, "%s(): SIGHUP received\n", + __func__); break; default: - D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr)); + jffs2_dbg(1, "%s(): signal %ld received\n", + __func__, signr); } } /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ - disallow_signal(SIGHUP); + sigprocmask(SIG_BLOCK, &hupmask, NULL); - D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n")); + jffs2_dbg(1, "%s(): pass\n", __func__); if (jffs2_garbage_collect_pass(c) == -ENOSPC) { - printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n"); + pr_notice("No space for garbage collection. Aborting GC thread\n"); goto die; } } diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c index 97dc39796e2..a3750f902ad 100644 --- a/fs/jffs2/build.c +++ b/fs/jffs2/build.c @@ -1,16 +1,17 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: build.c,v 1.71 2005/07/12 16:37:08 dedekind Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> @@ -18,12 +19,13 @@ #include <linux/mtd/mtd.h> #include "nodelist.h" -static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **); +static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, + struct jffs2_inode_cache *, struct jffs2_full_dirent **); static inline struct jffs2_inode_cache * first_inode_chain(int *i, struct jffs2_sb_info *c) { - for (; *i < INOCACHE_HASHSIZE; (*i)++) { + for (; *i < c->inocache_hashsize; (*i)++) { if (c->inocache_list[*i]) return c->inocache_list[*i]; } @@ -46,11 +48,12 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) ic = next_inode(&i, ic, (c))) -static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) +static void jffs2_build_inode_pass1(struct jffs2_sb_info *c, + struct jffs2_inode_cache *ic) { struct jffs2_full_dirent *fd; - D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino)); + dbg_fsbuild("building directory inode #%u\n", ic->ino); /* For each child, increase nlink */ for(fd = ic->scan_dents; fd; fd = fd->next) { @@ -58,26 +61,29 @@ static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2 if (!fd->ino) continue; - /* XXX: Can get high latency here with huge directories */ + /* we can get high latency here with huge directories */ child_ic = jffs2_get_ino_cache(c, fd->ino); if (!child_ic) { - printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", + dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n", fd->name, fd->ino, ic->ino); jffs2_mark_node_obsolete(c, fd->raw); continue; } - if (child_ic->nlink++ && fd->type == DT_DIR) { - printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino); - if (fd->ino == 1 && ic->ino == 1) { - printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n"); - printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n"); + if (fd->type == DT_DIR) { + if (child_ic->pino_nlink) { + JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", + fd->name, fd->ino, ic->ino); + /* TODO: What do we do about it? */ + } else { + child_ic->pino_nlink = ic->ino; } - /* What do we do about it? */ - } - D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino)); - /* Can't free them. We might need them in pass 2 */ + } else + child_ic->pino_nlink++; + + dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino); + /* Can't free scan_dents so far. We might need them in pass 2 */ } } @@ -94,6 +100,8 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) struct jffs2_full_dirent *fd; struct jffs2_full_dirent *dead_fds = NULL; + dbg_fsbuild("build FS data structures\n"); + /* First, scan the medium and build all the inode caches with lists of physical nodes */ @@ -103,60 +111,54 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) if (ret) goto exit; - D1(printk(KERN_DEBUG "Scanned flash completely\n")); - D2(jffs2_dump_block_lists(c)); + dbg_fsbuild("scanned flash completely\n"); + jffs2_dbg_dump_block_lists_nolock(c); + dbg_fsbuild("pass 1 starting\n"); c->flags |= JFFS2_SB_FLAG_BUILDING; /* Now scan the directory tree, increasing nlink according to every dirent found. */ for_each_inode(i, c, ic) { - D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino)); - - D1(BUG_ON(ic->ino > c->highest_ino)); - if (ic->scan_dents) { jffs2_build_inode_pass1(c, ic); cond_resched(); } } - D1(printk(KERN_DEBUG "Pass 1 complete\n")); + dbg_fsbuild("pass 1 complete\n"); /* Next, scan for inodes with nlink == 0 and remove them. If they were directories, then decrement the nlink of their children too, and repeat the scan. As that's going to be a fairly uncommon occurrence, it's not so evil to do it this way. Recursion bad. */ - D1(printk(KERN_DEBUG "Pass 2 starting\n")); + dbg_fsbuild("pass 2 starting\n"); for_each_inode(i, c, ic) { - D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes)); - if (ic->nlink) + if (ic->pino_nlink) continue; - + jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); cond_resched(); - } + } - D1(printk(KERN_DEBUG "Pass 2a starting\n")); + dbg_fsbuild("pass 2a starting\n"); while (dead_fds) { fd = dead_fds; dead_fds = fd->next; ic = jffs2_get_ino_cache(c, fd->ino); - D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic)); if (ic) jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); jffs2_free_full_dirent(fd); } - D1(printk(KERN_DEBUG "Pass 2 complete\n")); - + dbg_fsbuild("pass 2a complete\n"); + dbg_fsbuild("freeing temporary data structures\n"); + /* Finally, we can scan again and free the dirent structs */ for_each_inode(i, c, ic) { - D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes)); - while(ic->scan_dents) { fd = ic->scan_dents; ic->scan_dents = fd->next; @@ -165,10 +167,10 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c) ic->scan_dents = NULL; cond_resched(); } + jffs2_build_xattr_subsystem(c); c->flags &= ~JFFS2_SB_FLAG_BUILDING; - - D1(printk(KERN_DEBUG "Pass 3 complete\n")); - D2(jffs2_dump_block_lists(c)); + + dbg_fsbuild("FS build complete\n"); /* Rotate the lists by some number to ensure wear levelling */ jffs2_rotate_lists(c); @@ -184,29 +186,32 @@ exit: jffs2_free_full_dirent(fd); } } + jffs2_clear_xattr_subsystem(c); } return ret; } -static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds) +static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, + struct jffs2_inode_cache *ic, + struct jffs2_full_dirent **dead_fds) { struct jffs2_raw_node_ref *raw; struct jffs2_full_dirent *fd; - D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino)); - + dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); + raw = ic->nodes; while (raw != (void *)ic) { struct jffs2_raw_node_ref *next = raw->next_in_ino; - D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw))); + dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw)); jffs2_mark_node_obsolete(c, raw); raw = next; } if (ic->scan_dents) { int whinged = 0; - D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino)); + dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); while(ic->scan_dents) { struct jffs2_inode_cache *child_ic; @@ -216,45 +221,46 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jf if (!fd->ino) { /* It's a deletion dirent. Ignore it */ - D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name)); + dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name); jffs2_free_full_dirent(fd); continue; } - if (!whinged) { + if (!whinged) whinged = 1; - printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino); - } - D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n", - fd->name, fd->ino)); - + dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino); + child_ic = jffs2_get_ino_cache(c, fd->ino); if (!child_ic) { - printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino); + dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n", + fd->name, fd->ino); jffs2_free_full_dirent(fd); continue; } - /* Reduce nlink of the child. If it's now zero, stick it on the + /* Reduce nlink of the child. If it's now zero, stick it on the dead_fds list to be cleaned up later. Else just free the fd */ - child_ic->nlink--; - - if (!child_ic->nlink) { - D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n", - fd->ino, fd->name)); + if (fd->type == DT_DIR) + child_ic->pino_nlink = 0; + else + child_ic->pino_nlink--; + + if (!child_ic->pino_nlink) { + dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n", + fd->ino, fd->name); fd->next = *dead_fds; *dead_fds = fd; } else { - D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", - fd->ino, fd->name, child_ic->nlink)); + dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n", + fd->ino, fd->name, child_ic->pino_nlink); jffs2_free_full_dirent(fd); } } } /* - We don't delete the inocache from the hash list and free it yet. + We don't delete the inocache from the hash list and free it yet. The erase code will do that, when all the nodes are completely gone. */ } @@ -268,7 +274,7 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) because there's not enough free space... */ c->resv_blocks_deletion = 2; - /* Be conservative about how much space we need before we allow writes. + /* Be conservative about how much space we need before we allow writes. On top of that which is required for deletia, require an extra 2% of the medium to be available, for overhead caused by nodes being split across blocks, etc. */ @@ -283,7 +289,7 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) c->resv_blocks_gctrigger = c->resv_blocks_write + 1; - /* When do we allow garbage collection to merge nodes to make + /* When do we allow garbage collection to merge nodes to make long-term progress at the expense of short-term space exhaustion? */ c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1; @@ -291,49 +297,58 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) than actually making progress? */ c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2; + /* What number of 'very dirty' eraseblocks do we allow before we + trigger the GC thread even if we don't _need_ the space. When we + can't mark nodes obsolete on the medium, the old dirty nodes cause + performance problems because we have to inspect and discard them. */ + c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger; + if (jffs2_can_mark_obsolete(c)) + c->vdirty_blocks_gctrigger *= 10; + /* If there's less than this amount of dirty space, don't bother trying to GC to make more space. It'll be a fruitless task */ c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); - D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", - c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks)); - D1(printk(KERN_DEBUG "Blocks required to allow deletion: %d (%d KiB)\n", - c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024)); - D1(printk(KERN_DEBUG "Blocks required to allow writes: %d (%d KiB)\n", - c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024)); - D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n", - c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024)); - D1(printk(KERN_DEBUG "Blocks required to allow GC merges: %d (%d KiB)\n", - c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024)); - D1(printk(KERN_DEBUG "Blocks required to GC bad blocks: %d (%d KiB)\n", - c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024)); - D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n", - c->nospc_dirty_size)); -} + dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", + c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); + dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", + c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); + dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", + c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024); + dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n", + c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024); + dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n", + c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024); + dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n", + c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024); + dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n", + c->nospc_dirty_size); + dbg_fsbuild("Very dirty blocks before GC triggered: %d\n", + c->vdirty_blocks_gctrigger); +} int jffs2_do_mount_fs(struct jffs2_sb_info *c) { + int ret; int i; + int size; c->free_size = c->flash_size; c->nr_blocks = c->flash_size / c->sector_size; - if (c->mtd->flags & MTD_NO_VIRTBLOCKS) - c->blocks = vmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks); + size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; +#ifndef __ECOS + if (jffs2_blocks_use_vmalloc(c)) + c->blocks = vzalloc(size); else - c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL); +#endif + c->blocks = kzalloc(size, GFP_KERNEL); if (!c->blocks) return -ENOMEM; + for (i=0; i<c->nr_blocks; i++) { INIT_LIST_HEAD(&c->blocks[i].list); c->blocks[i].offset = i * c->sector_size; c->blocks[i].free_size = c->sector_size; - c->blocks[i].dirty_size = 0; - c->blocks[i].wasted_size = 0; - c->blocks[i].unchecked_size = 0; - c->blocks[i].used_size = 0; - c->blocks[i].first_node = NULL; - c->blocks[i].last_node = NULL; - c->blocks[i].bad_count = 0; } INIT_LIST_HEAD(&c->clean_list); @@ -341,6 +356,7 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c) INIT_LIST_HEAD(&c->dirty_list); INIT_LIST_HEAD(&c->erasable_list); INIT_LIST_HEAD(&c->erasing_list); + INIT_LIST_HEAD(&c->erase_checking_list); INIT_LIST_HEAD(&c->erase_pending_list); INIT_LIST_HEAD(&c->erasable_pending_wbuf_list); INIT_LIST_HEAD(&c->erase_complete_list); @@ -348,20 +364,31 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c) INIT_LIST_HEAD(&c->bad_list); INIT_LIST_HEAD(&c->bad_used_list); c->highest_ino = 1; + c->summary = NULL; + + ret = jffs2_sum_init(c); + if (ret) + goto out_free; if (jffs2_build_filesystem(c)) { - D1(printk(KERN_DEBUG "build_fs failed\n")); + dbg_fsbuild("build_fs failed\n"); jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); - if (c->mtd->flags & MTD_NO_VIRTBLOCKS) { - vfree(c->blocks); - } else { - kfree(c->blocks); - } - return -EIO; + ret = -EIO; + goto out_free; } jffs2_calc_trigger_levels(c); return 0; + + out_free: +#ifndef __ECOS + if (jffs2_blocks_use_vmalloc(c)) + vfree(c->blocks); + else +#endif + kfree(c->blocks); + + return ret; } diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c index af922a9618a..4849a4c9a0e 100644 --- a/fs/jffs2/compr.c +++ b/fs/jffs2/compr.c @@ -1,18 +1,19 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. - * Created by Arjan van de Ven <arjanv@redhat.com> + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> + * Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, + * University of Szeged, Hungary * - * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, - * University of Szeged, Hungary + * Created by Arjan van de Ven <arjan@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: compr.c,v 1.42 2004/08/07 21:56:08 dwmw2 Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include "compr.h" static DEFINE_SPINLOCK(jffs2_compressor_list_lock); @@ -26,9 +27,109 @@ static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY; /* Statistics for blocks stored without compression */ static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_compr_size=0; + +/* + * Return 1 to use this compression + */ +static int jffs2_is_best_compression(struct jffs2_compressor *this, + struct jffs2_compressor *best, uint32_t size, uint32_t bestsize) +{ + switch (jffs2_compression_mode) { + case JFFS2_COMPR_MODE_SIZE: + if (bestsize > size) + return 1; + return 0; + case JFFS2_COMPR_MODE_FAVOURLZO: + if ((this->compr == JFFS2_COMPR_LZO) && (bestsize > size)) + return 1; + if ((best->compr != JFFS2_COMPR_LZO) && (bestsize > size)) + return 1; + if ((this->compr == JFFS2_COMPR_LZO) && (bestsize > (size * FAVOUR_LZO_PERCENT / 100))) + return 1; + if ((bestsize * FAVOUR_LZO_PERCENT / 100) > size) + return 1; + + return 0; + } + /* Shouldn't happen */ + return 0; +} + +/* + * jffs2_selected_compress: + * @compr: Explicit compression type to use (ie, JFFS2_COMPR_ZLIB). + * If 0, just take the first available compression mode. + * @data_in: Pointer to uncompressed data + * @cpage_out: Pointer to returned pointer to buffer for compressed data + * @datalen: On entry, holds the amount of data available for compression. + * On exit, expected to hold the amount of data actually compressed. + * @cdatalen: On entry, holds the amount of space available for compressed + * data. On exit, expected to hold the actual size of the compressed + * data. + * + * Returns: the compression type used. Zero is used to show that the data + * could not be compressed; probably because we couldn't find the requested + * compression mode. + */ +static int jffs2_selected_compress(u8 compr, unsigned char *data_in, + unsigned char **cpage_out, u32 *datalen, u32 *cdatalen) +{ + struct jffs2_compressor *this; + int err, ret = JFFS2_COMPR_NONE; + uint32_t orig_slen, orig_dlen; + char *output_buf; + + output_buf = kmalloc(*cdatalen, GFP_KERNEL); + if (!output_buf) { + pr_warn("No memory for compressor allocation. Compression failed.\n"); + return ret; + } + orig_slen = *datalen; + orig_dlen = *cdatalen; + spin_lock(&jffs2_compressor_list_lock); + list_for_each_entry(this, &jffs2_compressor_list, list) { + /* Skip decompress-only and disabled modules */ + if (!this->compress || this->disabled) + continue; + + /* Skip if not the desired compression type */ + if (compr && (compr != this->compr)) + continue; + + /* + * Either compression type was unspecified, or we found our + * compressor; either way, we're good to go. + */ + this->usecount++; + spin_unlock(&jffs2_compressor_list_lock); + + *datalen = orig_slen; + *cdatalen = orig_dlen; + err = this->compress(data_in, output_buf, datalen, cdatalen); + + spin_lock(&jffs2_compressor_list_lock); + this->usecount--; + if (!err) { + /* Success */ + ret = this->compr; + this->stat_compr_blocks++; + this->stat_compr_orig_size += *datalen; + this->stat_compr_new_size += *cdatalen; + break; + } + } + spin_unlock(&jffs2_compressor_list_lock); + if (ret == JFFS2_COMPR_NONE) + kfree(output_buf); + else + *cpage_out = output_buf; + + return ret; +} + /* jffs2_compress: - * @data: Pointer to uncompressed data - * @cdata: Pointer to returned pointer to buffer for compressed data + * @data_in: Pointer to uncompressed data + * @cpage_out: Pointer to returned pointer to buffer for compressed data * @datalen: On entry, holds the amount of data available for compression. * On exit, expected to hold the amount of data actually compressed. * @cdatalen: On entry, holds the amount of space available for compressed @@ -36,139 +137,125 @@ static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_co * data. * * Returns: Lower byte to be stored with data indicating compression type used. - * Zero is used to show that the data could not be compressed - the + * Zero is used to show that the data could not be compressed - the * compressed version was actually larger than the original. * Upper byte will be used later. (soon) * * If the cdata buffer isn't large enough to hold all the uncompressed data, - * jffs2_compress should compress as much as will fit, and should set + * jffs2_compress should compress as much as will fit, and should set * *datalen accordingly to show the amount of data which were compressed. */ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, - unsigned char *data_in, unsigned char **cpage_out, - uint32_t *datalen, uint32_t *cdatalen) + unsigned char *data_in, unsigned char **cpage_out, + uint32_t *datalen, uint32_t *cdatalen) { int ret = JFFS2_COMPR_NONE; - int compr_ret; - struct jffs2_compressor *this, *best=NULL; - unsigned char *output_buf = NULL, *tmp_buf; - uint32_t orig_slen, orig_dlen; - uint32_t best_slen=0, best_dlen=0; - - switch (jffs2_compression_mode) { - case JFFS2_COMPR_MODE_NONE: - break; - case JFFS2_COMPR_MODE_PRIORITY: - output_buf = kmalloc(*cdatalen,GFP_KERNEL); - if (!output_buf) { - printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n"); - goto out; - } - orig_slen = *datalen; - orig_dlen = *cdatalen; - spin_lock(&jffs2_compressor_list_lock); - list_for_each_entry(this, &jffs2_compressor_list, list) { - /* Skip decompress-only backwards-compatibility and disabled modules */ - if ((!this->compress)||(this->disabled)) - continue; - - this->usecount++; - spin_unlock(&jffs2_compressor_list_lock); - *datalen = orig_slen; - *cdatalen = orig_dlen; - compr_ret = this->compress(data_in, output_buf, datalen, cdatalen, NULL); - spin_lock(&jffs2_compressor_list_lock); - this->usecount--; - if (!compr_ret) { - ret = this->compr; - this->stat_compr_blocks++; - this->stat_compr_orig_size += *datalen; - this->stat_compr_new_size += *cdatalen; - break; - } - } - spin_unlock(&jffs2_compressor_list_lock); - if (ret == JFFS2_COMPR_NONE) kfree(output_buf); - break; - case JFFS2_COMPR_MODE_SIZE: - orig_slen = *datalen; - orig_dlen = *cdatalen; - spin_lock(&jffs2_compressor_list_lock); - list_for_each_entry(this, &jffs2_compressor_list, list) { - /* Skip decompress-only backwards-compatibility and disabled modules */ - if ((!this->compress)||(this->disabled)) - continue; - /* Allocating memory for output buffer if necessary */ - if ((this->compr_buf_size<orig_dlen)&&(this->compr_buf)) { - spin_unlock(&jffs2_compressor_list_lock); - kfree(this->compr_buf); - spin_lock(&jffs2_compressor_list_lock); - this->compr_buf_size=0; - this->compr_buf=NULL; - } - if (!this->compr_buf) { - spin_unlock(&jffs2_compressor_list_lock); - tmp_buf = kmalloc(orig_dlen,GFP_KERNEL); - spin_lock(&jffs2_compressor_list_lock); - if (!tmp_buf) { - printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n",orig_dlen); - continue; - } - else { - this->compr_buf = tmp_buf; - this->compr_buf_size = orig_dlen; - } - } - this->usecount++; - spin_unlock(&jffs2_compressor_list_lock); - *datalen = orig_slen; - *cdatalen = orig_dlen; - compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen, NULL); - spin_lock(&jffs2_compressor_list_lock); - this->usecount--; - if (!compr_ret) { - if ((!best_dlen)||(best_dlen>*cdatalen)) { - best_dlen = *cdatalen; - best_slen = *datalen; - best = this; - } - } - } - if (best_dlen) { - *cdatalen = best_dlen; - *datalen = best_slen; - output_buf = best->compr_buf; - best->compr_buf = NULL; - best->compr_buf_size = 0; - best->stat_compr_blocks++; - best->stat_compr_orig_size += best_slen; - best->stat_compr_new_size += best_dlen; - ret = best->compr; - } - spin_unlock(&jffs2_compressor_list_lock); - break; - default: - printk(KERN_ERR "JFFS2: unknow compression mode.\n"); - } - out: - if (ret == JFFS2_COMPR_NONE) { - *cpage_out = data_in; - *datalen = *cdatalen; - none_stat_compr_blocks++; - none_stat_compr_size += *datalen; - } - else { - *cpage_out = output_buf; - } + int mode, compr_ret; + struct jffs2_compressor *this, *best=NULL; + unsigned char *output_buf = NULL, *tmp_buf; + uint32_t orig_slen, orig_dlen; + uint32_t best_slen=0, best_dlen=0; + + if (c->mount_opts.override_compr) + mode = c->mount_opts.compr; + else + mode = jffs2_compression_mode; + + switch (mode) { + case JFFS2_COMPR_MODE_NONE: + break; + case JFFS2_COMPR_MODE_PRIORITY: + ret = jffs2_selected_compress(0, data_in, cpage_out, datalen, + cdatalen); + break; + case JFFS2_COMPR_MODE_SIZE: + case JFFS2_COMPR_MODE_FAVOURLZO: + orig_slen = *datalen; + orig_dlen = *cdatalen; + spin_lock(&jffs2_compressor_list_lock); + list_for_each_entry(this, &jffs2_compressor_list, list) { + /* Skip decompress-only backwards-compatibility and disabled modules */ + if ((!this->compress)||(this->disabled)) + continue; + /* Allocating memory for output buffer if necessary */ + if ((this->compr_buf_size < orig_slen) && (this->compr_buf)) { + spin_unlock(&jffs2_compressor_list_lock); + kfree(this->compr_buf); + spin_lock(&jffs2_compressor_list_lock); + this->compr_buf_size=0; + this->compr_buf=NULL; + } + if (!this->compr_buf) { + spin_unlock(&jffs2_compressor_list_lock); + tmp_buf = kmalloc(orig_slen, GFP_KERNEL); + spin_lock(&jffs2_compressor_list_lock); + if (!tmp_buf) { + pr_warn("No memory for compressor allocation. (%d bytes)\n", + orig_slen); + continue; + } + else { + this->compr_buf = tmp_buf; + this->compr_buf_size = orig_slen; + } + } + this->usecount++; + spin_unlock(&jffs2_compressor_list_lock); + *datalen = orig_slen; + *cdatalen = orig_dlen; + compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen); + spin_lock(&jffs2_compressor_list_lock); + this->usecount--; + if (!compr_ret) { + if (((!best_dlen) || jffs2_is_best_compression(this, best, *cdatalen, best_dlen)) + && (*cdatalen < *datalen)) { + best_dlen = *cdatalen; + best_slen = *datalen; + best = this; + } + } + } + if (best_dlen) { + *cdatalen = best_dlen; + *datalen = best_slen; + output_buf = best->compr_buf; + best->compr_buf = NULL; + best->compr_buf_size = 0; + best->stat_compr_blocks++; + best->stat_compr_orig_size += best_slen; + best->stat_compr_new_size += best_dlen; + ret = best->compr; + *cpage_out = output_buf; + } + spin_unlock(&jffs2_compressor_list_lock); + break; + case JFFS2_COMPR_MODE_FORCELZO: + ret = jffs2_selected_compress(JFFS2_COMPR_LZO, data_in, + cpage_out, datalen, cdatalen); + break; + case JFFS2_COMPR_MODE_FORCEZLIB: + ret = jffs2_selected_compress(JFFS2_COMPR_ZLIB, data_in, + cpage_out, datalen, cdatalen); + break; + default: + pr_err("unknown compression mode\n"); + } + + if (ret == JFFS2_COMPR_NONE) { + *cpage_out = data_in; + *datalen = *cdatalen; + none_stat_compr_blocks++; + none_stat_compr_size += *datalen; + } return ret; } int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, - uint16_t comprtype, unsigned char *cdata_in, + uint16_t comprtype, unsigned char *cdata_in, unsigned char *data_out, uint32_t cdatalen, uint32_t datalen) { - struct jffs2_compressor *this; - int ret; + struct jffs2_compressor *this; + int ret; /* Older code had a bug where it would write non-zero 'usercompr' fields. Deal with it. */ @@ -179,32 +266,33 @@ int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, case JFFS2_COMPR_NONE: /* This should be special-cased elsewhere, but we might as well deal with it */ memcpy(data_out, cdata_in, datalen); - none_stat_decompr_blocks++; + none_stat_decompr_blocks++; break; case JFFS2_COMPR_ZERO: memset(data_out, 0, datalen); break; default: - spin_lock(&jffs2_compressor_list_lock); - list_for_each_entry(this, &jffs2_compressor_list, list) { - if (comprtype == this->compr) { - this->usecount++; - spin_unlock(&jffs2_compressor_list_lock); - ret = this->decompress(cdata_in, data_out, cdatalen, datalen, NULL); - spin_lock(&jffs2_compressor_list_lock); - if (ret) { - printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret); - } - else { - this->stat_decompr_blocks++; - } - this->usecount--; - spin_unlock(&jffs2_compressor_list_lock); - return ret; - } - } - printk(KERN_WARNING "JFFS2 compression type 0x%02x not available.\n", comprtype); - spin_unlock(&jffs2_compressor_list_lock); + spin_lock(&jffs2_compressor_list_lock); + list_for_each_entry(this, &jffs2_compressor_list, list) { + if (comprtype == this->compr) { + this->usecount++; + spin_unlock(&jffs2_compressor_list_lock); + ret = this->decompress(cdata_in, data_out, cdatalen, datalen); + spin_lock(&jffs2_compressor_list_lock); + if (ret) { + pr_warn("Decompressor \"%s\" returned %d\n", + this->name, ret); + } + else { + this->stat_decompr_blocks++; + } + this->usecount--; + spin_unlock(&jffs2_compressor_list_lock); + return ret; + } + } + pr_warn("compression type 0x%02x not available\n", comprtype); + spin_unlock(&jffs2_compressor_list_lock); return -EIO; } return 0; @@ -212,258 +300,119 @@ int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, int jffs2_register_compressor(struct jffs2_compressor *comp) { - struct jffs2_compressor *this; - - if (!comp->name) { - printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n"); - return -1; - } - comp->compr_buf_size=0; - comp->compr_buf=NULL; - comp->usecount=0; - comp->stat_compr_orig_size=0; - comp->stat_compr_new_size=0; - comp->stat_compr_blocks=0; - comp->stat_decompr_blocks=0; - D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name)); - - spin_lock(&jffs2_compressor_list_lock); - - list_for_each_entry(this, &jffs2_compressor_list, list) { - if (this->priority < comp->priority) { - list_add(&comp->list, this->list.prev); - goto out; - } - } - list_add_tail(&comp->list, &jffs2_compressor_list); + struct jffs2_compressor *this; + + if (!comp->name) { + pr_warn("NULL compressor name at registering JFFS2 compressor. Failed.\n"); + return -1; + } + comp->compr_buf_size=0; + comp->compr_buf=NULL; + comp->usecount=0; + comp->stat_compr_orig_size=0; + comp->stat_compr_new_size=0; + comp->stat_compr_blocks=0; + comp->stat_decompr_blocks=0; + jffs2_dbg(1, "Registering JFFS2 compressor \"%s\"\n", comp->name); + + spin_lock(&jffs2_compressor_list_lock); + + list_for_each_entry(this, &jffs2_compressor_list, list) { + if (this->priority < comp->priority) { + list_add(&comp->list, this->list.prev); + goto out; + } + } + list_add_tail(&comp->list, &jffs2_compressor_list); out: - D2(list_for_each_entry(this, &jffs2_compressor_list, list) { - printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority); - }) + D2(list_for_each_entry(this, &jffs2_compressor_list, list) { + printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority); + }) - spin_unlock(&jffs2_compressor_list_lock); + spin_unlock(&jffs2_compressor_list_lock); - return 0; + return 0; } int jffs2_unregister_compressor(struct jffs2_compressor *comp) { - D2(struct jffs2_compressor *this;) - - D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name)); - - spin_lock(&jffs2_compressor_list_lock); - - if (comp->usecount) { - spin_unlock(&jffs2_compressor_list_lock); - printk(KERN_WARNING "JFFS2: Compressor modul is in use. Unregister failed.\n"); - return -1; - } - list_del(&comp->list); - - D2(list_for_each_entry(this, &jffs2_compressor_list, list) { - printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority); - }) - spin_unlock(&jffs2_compressor_list_lock); - return 0; -} + D2(struct jffs2_compressor *this); -#ifdef CONFIG_JFFS2_PROC + jffs2_dbg(1, "Unregistering JFFS2 compressor \"%s\"\n", comp->name); -#define JFFS2_STAT_BUF_SIZE 16000 + spin_lock(&jffs2_compressor_list_lock); -char *jffs2_list_compressors(void) -{ - struct jffs2_compressor *this; - char *buf, *act_buf; - - act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL); - list_for_each_entry(this, &jffs2_compressor_list, list) { - act_buf += sprintf(act_buf, "%10s priority:%d ", this->name, this->priority); - if ((this->disabled)||(!this->compress)) - act_buf += sprintf(act_buf,"disabled"); - else - act_buf += sprintf(act_buf,"enabled"); - act_buf += sprintf(act_buf,"\n"); - } - return buf; -} - -char *jffs2_stats(void) -{ - struct jffs2_compressor *this; - char *buf, *act_buf; - - act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL); - - act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n"); - act_buf += sprintf(act_buf,"%10s ","none"); - act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks, - none_stat_compr_size, none_stat_decompr_blocks); - spin_lock(&jffs2_compressor_list_lock); - list_for_each_entry(this, &jffs2_compressor_list, list) { - act_buf += sprintf(act_buf,"%10s ",this->name); - if ((this->disabled)||(!this->compress)) - act_buf += sprintf(act_buf,"- "); - else - act_buf += sprintf(act_buf,"+ "); - act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks, - this->stat_compr_new_size, this->stat_compr_orig_size, - this->stat_decompr_blocks); - act_buf += sprintf(act_buf,"\n"); - } - spin_unlock(&jffs2_compressor_list_lock); - - return buf; -} - -char *jffs2_get_compression_mode_name(void) -{ - switch (jffs2_compression_mode) { - case JFFS2_COMPR_MODE_NONE: - return "none"; - case JFFS2_COMPR_MODE_PRIORITY: - return "priority"; - case JFFS2_COMPR_MODE_SIZE: - return "size"; - } - return "unkown"; -} - -int jffs2_set_compression_mode_name(const char *name) -{ - if (!strcmp("none",name)) { - jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; - return 0; - } - if (!strcmp("priority",name)) { - jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY; - return 0; - } - if (!strcmp("size",name)) { - jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; - return 0; - } - return 1; -} - -static int jffs2_compressor_Xable(const char *name, int disabled) -{ - struct jffs2_compressor *this; - spin_lock(&jffs2_compressor_list_lock); - list_for_each_entry(this, &jffs2_compressor_list, list) { - if (!strcmp(this->name, name)) { - this->disabled = disabled; - spin_unlock(&jffs2_compressor_list_lock); - return 0; - } - } - spin_unlock(&jffs2_compressor_list_lock); - printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name); - return 1; -} - -int jffs2_enable_compressor_name(const char *name) -{ - return jffs2_compressor_Xable(name, 0); -} - -int jffs2_disable_compressor_name(const char *name) -{ - return jffs2_compressor_Xable(name, 1); -} + if (comp->usecount) { + spin_unlock(&jffs2_compressor_list_lock); + pr_warn("Compressor module is in use. Unregister failed.\n"); + return -1; + } + list_del(&comp->list); -int jffs2_set_compressor_priority(const char *name, int priority) -{ - struct jffs2_compressor *this,*comp; - spin_lock(&jffs2_compressor_list_lock); - list_for_each_entry(this, &jffs2_compressor_list, list) { - if (!strcmp(this->name, name)) { - this->priority = priority; - comp = this; - goto reinsert; - } - } - spin_unlock(&jffs2_compressor_list_lock); - printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name); - return 1; -reinsert: - /* list is sorted in the order of priority, so if - we change it we have to reinsert it into the - good place */ - list_del(&comp->list); - list_for_each_entry(this, &jffs2_compressor_list, list) { - if (this->priority < comp->priority) { - list_add(&comp->list, this->list.prev); - spin_unlock(&jffs2_compressor_list_lock); - return 0; - } - } - list_add_tail(&comp->list, &jffs2_compressor_list); - spin_unlock(&jffs2_compressor_list_lock); - return 0; + D2(list_for_each_entry(this, &jffs2_compressor_list, list) { + printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority); + }) + spin_unlock(&jffs2_compressor_list_lock); + return 0; } -#endif - void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig) { - if (orig != comprbuf) - kfree(comprbuf); + if (orig != comprbuf) + kfree(comprbuf); } -int jffs2_compressors_init(void) +int __init jffs2_compressors_init(void) { /* Registering compressors */ #ifdef CONFIG_JFFS2_ZLIB - jffs2_zlib_init(); + jffs2_zlib_init(); #endif #ifdef CONFIG_JFFS2_RTIME - jffs2_rtime_init(); + jffs2_rtime_init(); #endif #ifdef CONFIG_JFFS2_RUBIN - jffs2_rubinmips_init(); - jffs2_dynrubin_init(); -#endif -#ifdef CONFIG_JFFS2_LZARI - jffs2_lzari_init(); + jffs2_rubinmips_init(); + jffs2_dynrubin_init(); #endif #ifdef CONFIG_JFFS2_LZO - jffs2_lzo_init(); + jffs2_lzo_init(); #endif /* Setting default compression mode */ #ifdef CONFIG_JFFS2_CMODE_NONE - jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; - D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");) + jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; + jffs2_dbg(1, "default compression mode: none\n"); #else #ifdef CONFIG_JFFS2_CMODE_SIZE - jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; - D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");) + jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; + jffs2_dbg(1, "default compression mode: size\n"); +#else +#ifdef CONFIG_JFFS2_CMODE_FAVOURLZO + jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO; + jffs2_dbg(1, "default compression mode: favourlzo\n"); #else - D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");) + jffs2_dbg(1, "default compression mode: priority\n"); #endif #endif - return 0; +#endif + return 0; } -int jffs2_compressors_exit(void) +int jffs2_compressors_exit(void) { /* Unregistering compressors */ #ifdef CONFIG_JFFS2_LZO - jffs2_lzo_exit(); -#endif -#ifdef CONFIG_JFFS2_LZARI - jffs2_lzari_exit(); + jffs2_lzo_exit(); #endif #ifdef CONFIG_JFFS2_RUBIN - jffs2_dynrubin_exit(); - jffs2_rubinmips_exit(); + jffs2_dynrubin_exit(); + jffs2_rubinmips_exit(); #endif #ifdef CONFIG_JFFS2_RTIME - jffs2_rtime_exit(); + jffs2_rtime_exit(); #endif #ifdef CONFIG_JFFS2_ZLIB - jffs2_zlib_exit(); + jffs2_zlib_exit(); #endif - return 0; + return 0; } diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h index 89ceeed201e..5e91d578f4e 100644 --- a/fs/jffs2/compr.h +++ b/fs/jffs2/compr.h @@ -1,13 +1,11 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, - * University of Szeged, Hungary + * Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, + * University of Szeged, Hungary + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * - * For licensing information, see the file 'LICENCE' in the - * jffs2 directory. - * - * $Id: compr.h,v 1.6 2004/07/16 15:17:57 dwmw2 Exp $ + * For licensing information, see the file 'LICENCE' in this directory. * */ @@ -23,41 +21,47 @@ #include <linux/errno.h> #include <linux/fs.h> #include <linux/jffs2.h> -#include <linux/jffs2_fs_i.h> -#include <linux/jffs2_fs_sb.h> +#include "jffs2_fs_i.h" +#include "jffs2_fs_sb.h" #include "nodelist.h" #define JFFS2_RUBINMIPS_PRIORITY 10 #define JFFS2_DYNRUBIN_PRIORITY 20 #define JFFS2_LZARI_PRIORITY 30 -#define JFFS2_LZO_PRIORITY 40 #define JFFS2_RTIME_PRIORITY 50 #define JFFS2_ZLIB_PRIORITY 60 +#define JFFS2_LZO_PRIORITY 80 + #define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */ -#define JFFS2_DYNRUBIN_DISABLED /* for decompression */ +#define JFFS2_DYNRUBIN_DISABLED /* for decompression */ #define JFFS2_COMPR_MODE_NONE 0 #define JFFS2_COMPR_MODE_PRIORITY 1 #define JFFS2_COMPR_MODE_SIZE 2 +#define JFFS2_COMPR_MODE_FAVOURLZO 3 +#define JFFS2_COMPR_MODE_FORCELZO 4 +#define JFFS2_COMPR_MODE_FORCEZLIB 5 + +#define FAVOUR_LZO_PERCENT 80 struct jffs2_compressor { - struct list_head list; - int priority; /* used by prirority comr. mode */ - char *name; - char compr; /* JFFS2_COMPR_XXX */ - int (*compress)(unsigned char *data_in, unsigned char *cpage_out, - uint32_t *srclen, uint32_t *destlen, void *model); - int (*decompress)(unsigned char *cdata_in, unsigned char *data_out, - uint32_t cdatalen, uint32_t datalen, void *model); - int usecount; - int disabled; /* if seted the compressor won't compress */ - unsigned char *compr_buf; /* used by size compr. mode */ - uint32_t compr_buf_size; /* used by size compr. mode */ - uint32_t stat_compr_orig_size; - uint32_t stat_compr_new_size; - uint32_t stat_compr_blocks; - uint32_t stat_decompr_blocks; + struct list_head list; + int priority; /* used by prirority comr. mode */ + char *name; + char compr; /* JFFS2_COMPR_XXX */ + int (*compress)(unsigned char *data_in, unsigned char *cpage_out, + uint32_t *srclen, uint32_t *destlen); + int (*decompress)(unsigned char *cdata_in, unsigned char *data_out, + uint32_t cdatalen, uint32_t datalen); + int usecount; + int disabled; /* if set the compressor won't compress */ + unsigned char *compr_buf; /* used by size compr. mode */ + uint32_t compr_buf_size; /* used by size compr. mode */ + uint32_t stat_compr_orig_size; + uint32_t stat_compr_new_size; + uint32_t stat_compr_blocks; + uint32_t stat_decompr_blocks; }; int jffs2_register_compressor(struct jffs2_compressor *comp); @@ -67,25 +71,15 @@ int jffs2_compressors_init(void); int jffs2_compressors_exit(void); uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, - unsigned char *data_in, unsigned char **cpage_out, - uint32_t *datalen, uint32_t *cdatalen); + unsigned char *data_in, unsigned char **cpage_out, + uint32_t *datalen, uint32_t *cdatalen); int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, - uint16_t comprtype, unsigned char *cdata_in, - unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); + uint16_t comprtype, unsigned char *cdata_in, + unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig); -#ifdef CONFIG_JFFS2_PROC -int jffs2_enable_compressor_name(const char *name); -int jffs2_disable_compressor_name(const char *name); -int jffs2_set_compression_mode_name(const char *mode_name); -char *jffs2_get_compression_mode_name(void); -int jffs2_set_compressor_priority(const char *mode_name, int priority); -char *jffs2_list_compressors(void); -char *jffs2_stats(void); -#endif - /* Compressor modules */ /* These functions will be called by jffs2_compressors_init/exit */ @@ -103,10 +97,6 @@ void jffs2_rtime_exit(void); int jffs2_zlib_init(void); void jffs2_zlib_exit(void); #endif -#ifdef CONFIG_JFFS2_LZARI -int jffs2_lzari_init(void); -void jffs2_lzari_exit(void); -#endif #ifdef CONFIG_JFFS2_LZO int jffs2_lzo_init(void); void jffs2_lzo_exit(void); diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c new file mode 100644 index 00000000000..c553bd6506d --- /dev/null +++ b/fs/jffs2/compr_lzo.c @@ -0,0 +1,110 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2007 Nokia Corporation. All rights reserved. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> + * + * Created by Richard Purdie <rpurdie@openedhand.com> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/vmalloc.h> +#include <linux/init.h> +#include <linux/lzo.h> +#include "compr.h" + +static void *lzo_mem; +static void *lzo_compress_buf; +static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */ + +static void free_workspace(void) +{ + vfree(lzo_mem); + vfree(lzo_compress_buf); +} + +static int __init alloc_workspace(void) +{ + lzo_mem = vmalloc(LZO1X_MEM_COMPRESS); + lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); + + if (!lzo_mem || !lzo_compress_buf) { + free_workspace(); + return -ENOMEM; + } + + return 0; +} + +static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out, + uint32_t *sourcelen, uint32_t *dstlen) +{ + size_t compress_size; + int ret; + + mutex_lock(&deflate_mutex); + ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem); + if (ret != LZO_E_OK) + goto fail; + + if (compress_size > *dstlen) + goto fail; + + memcpy(cpage_out, lzo_compress_buf, compress_size); + mutex_unlock(&deflate_mutex); + + *dstlen = compress_size; + return 0; + + fail: + mutex_unlock(&deflate_mutex); + return -1; +} + +static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out, + uint32_t srclen, uint32_t destlen) +{ + size_t dl = destlen; + int ret; + + ret = lzo1x_decompress_safe(data_in, srclen, cpage_out, &dl); + + if (ret != LZO_E_OK || dl != destlen) + return -1; + + return 0; +} + +static struct jffs2_compressor jffs2_lzo_comp = { + .priority = JFFS2_LZO_PRIORITY, + .name = "lzo", + .compr = JFFS2_COMPR_LZO, + .compress = &jffs2_lzo_compress, + .decompress = &jffs2_lzo_decompress, + .disabled = 0, +}; + +int __init jffs2_lzo_init(void) +{ + int ret; + + ret = alloc_workspace(); + if (ret < 0) + return ret; + + ret = jffs2_register_compressor(&jffs2_lzo_comp); + if (ret) + free_workspace(); + + return ret; +} + +void jffs2_lzo_exit(void) +{ + jffs2_unregister_compressor(&jffs2_lzo_comp); + free_workspace(); +} diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c index 39312941866..406d9cc84ba 100644 --- a/fs/jffs2/compr_rtime.c +++ b/fs/jffs2/compr_rtime.c @@ -1,13 +1,13 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by Arjan van de Ven <arjanv@redhat.com> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: compr_rtime.c,v 1.14 2004/06/23 16:34:40 havasi Exp $ * * * Very simple lz77-ish encoder. @@ -24,33 +24,32 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/string.h> -#include <linux/jffs2.h> +#include <linux/string.h> +#include <linux/jffs2.h> #include "compr.h" /* _compress returns the compressed size, -1 if bigger */ static int jffs2_rtime_compress(unsigned char *data_in, unsigned char *cpage_out, - uint32_t *sourcelen, uint32_t *dstlen, - void *model) + uint32_t *sourcelen, uint32_t *dstlen) { - short positions[256]; + unsigned short positions[256]; int outpos = 0; int pos=0; - memset(positions,0,sizeof(positions)); - + memset(positions,0,sizeof(positions)); + while (pos < (*sourcelen) && outpos <= (*dstlen)-2) { int backpos, runlen=0; unsigned char value; - + value = data_in[pos]; cpage_out[outpos++] = data_in[pos++]; - + backpos = positions[value]; positions[value]=pos; - + while ((backpos < pos) && (pos < (*sourcelen)) && (data_in[pos]==data_in[backpos++]) && (runlen<255)) { pos++; @@ -63,35 +62,34 @@ static int jffs2_rtime_compress(unsigned char *data_in, /* We failed */ return -1; } - + /* Tell the caller how much we managed to compress, and how much space it took */ *sourcelen = pos; *dstlen = outpos; return 0; -} +} static int jffs2_rtime_decompress(unsigned char *data_in, unsigned char *cpage_out, - uint32_t srclen, uint32_t destlen, - void *model) + uint32_t srclen, uint32_t destlen) { - short positions[256]; + unsigned short positions[256]; int outpos = 0; int pos=0; - - memset(positions,0,sizeof(positions)); - + + memset(positions,0,sizeof(positions)); + while (outpos<destlen) { unsigned char value; int backoffs; int repeat; - + value = data_in[pos++]; cpage_out[outpos++] = value; /* first the verbatim copied byte */ repeat = data_in[pos++]; backoffs = positions[value]; - + positions[value]=outpos; if (repeat) { if (backoffs + repeat >= outpos) { @@ -101,12 +99,12 @@ static int jffs2_rtime_decompress(unsigned char *data_in, } } else { memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat); - outpos+=repeat; + outpos+=repeat; } } } - return 0; -} + return 0; +} static struct jffs2_compressor jffs2_rtime_comp = { .priority = JFFS2_RTIME_PRIORITY, diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c index 09422388fb9..92e0644bf86 100644 --- a/fs/jffs2/compr_rubin.c +++ b/fs/jffs2/compr_rubin.c @@ -1,32 +1,104 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001, 2002 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by Arjan van de Ven <arjanv@redhat.com> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: compr_rubin.c,v 1.20 2004/06/23 16:34:40 havasi Exp $ - * */ - +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/string.h> #include <linux/types.h> #include <linux/jffs2.h> -#include "compr_rubin.h" -#include "histo_mips.h" +#include <linux/errno.h> #include "compr.h" + +#define RUBIN_REG_SIZE 16 +#define UPPER_BIT_RUBIN (((long) 1)<<(RUBIN_REG_SIZE-1)) +#define LOWER_BITS_RUBIN ((((long) 1)<<(RUBIN_REG_SIZE-1))-1) + + +#define BIT_DIVIDER_MIPS 1043 +static int bits_mips[8] = { 277, 249, 290, 267, 229, 341, 212, 241}; + +struct pushpull { + unsigned char *buf; + unsigned int buflen; + unsigned int ofs; + unsigned int reserve; +}; + +struct rubin_state { + unsigned long p; + unsigned long q; + unsigned long rec_q; + long bit_number; + struct pushpull pp; + int bit_divider; + int bits[8]; +}; + +static inline void init_pushpull(struct pushpull *pp, char *buf, + unsigned buflen, unsigned ofs, + unsigned reserve) +{ + pp->buf = buf; + pp->buflen = buflen; + pp->ofs = ofs; + pp->reserve = reserve; +} + +static inline int pushbit(struct pushpull *pp, int bit, int use_reserved) +{ + if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) + return -ENOSPC; + + if (bit) + pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs & 7))); + else + pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs & 7))); + + pp->ofs++; + + return 0; +} + +static inline int pushedbits(struct pushpull *pp) +{ + return pp->ofs; +} + +static inline int pullbit(struct pushpull *pp) +{ + int bit; + + bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1; + + pp->ofs++; + return bit; +} + +static inline int pulledbits(struct pushpull *pp) +{ + return pp->ofs; +} + + static void init_rubin(struct rubin_state *rs, int div, int *bits) -{ +{ int c; rs->q = 0; rs->p = (long) (2 * UPPER_BIT_RUBIN); rs->bit_number = (long) 0; rs->bit_divider = div; + for (c=0; c<8; c++) rs->bits[c] = bits[c]; } @@ -38,9 +110,10 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol) long i0, i1; int ret; - while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { + while ((rs->q >= UPPER_BIT_RUBIN) || + ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { rs->bit_number++; - + ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); if (ret) return ret; @@ -49,12 +122,12 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol) rs->p <<= 1; } i0 = A * rs->p / (A + B); - if (i0 <= 0) { + if (i0 <= 0) i0 = 1; - } - if (i0 >= rs->p) { + + if (i0 >= rs->p) i0 = rs->p - 1; - } + i1 = rs->p - i0; if (symbol == 0) @@ -68,7 +141,7 @@ static int encode(struct rubin_state *rs, long A, long B, int symbol) static void end_rubin(struct rubin_state *rs) -{ +{ int i; @@ -82,16 +155,18 @@ static void end_rubin(struct rubin_state *rs) static void init_decode(struct rubin_state *rs, int div, int *bits) { - init_rubin(rs, div, bits); + init_rubin(rs, div, bits); /* behalve lower */ rs->rec_q = 0; - for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp))) + for (rs->bit_number = 0; rs->bit_number++ < RUBIN_REG_SIZE; + rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp))) ; } -static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q) +static void __do_decode(struct rubin_state *rs, unsigned long p, + unsigned long q) { register unsigned long lower_bits_rubin = LOWER_BITS_RUBIN; unsigned long rec_q; @@ -137,12 +212,11 @@ static int decode(struct rubin_state *rs, long A, long B) __do_decode(rs, p, q); i0 = A * rs->p / (A + B); - if (i0 <= 0) { + if (i0 <= 0) i0 = 1; - } - if (i0 >= rs->p) { + + if (i0 >= rs->p) i0 = rs->p - 1; - } threshold = rs->q + i0; symbol = rs->rec_q >= threshold; @@ -164,14 +238,15 @@ static int out_byte(struct rubin_state *rs, unsigned char byte) struct rubin_state rs_copy; rs_copy = *rs; - for (i=0;i<8;i++) { - ret = encode(rs, rs->bit_divider-rs->bits[i],rs->bits[i],byte&1); + for (i=0; i<8; i++) { + ret = encode(rs, rs->bit_divider-rs->bits[i], + rs->bits[i], byte & 1); if (ret) { /* Failed. Restore old state */ *rs = rs_copy; return ret; } - byte=byte>>1; + byte >>= 1 ; } return 0; } @@ -181,15 +256,17 @@ static int in_byte(struct rubin_state *rs) int i, result = 0, bit_divider = rs->bit_divider; for (i = 0; i < 8; i++) - result |= decode(rs, bit_divider - rs->bits[i], rs->bits[i]) << i; + result |= decode(rs, bit_divider - rs->bits[i], + rs->bits[i]) << i; return result; } -static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, - unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen) +static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, + unsigned char *cpage_out, uint32_t *sourcelen, + uint32_t *dstlen) { int outpos = 0; int pos=0; @@ -198,40 +275,40 @@ static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in, init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32); init_rubin(&rs, bit_divider, bits); - + while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos])) pos++; - + end_rubin(&rs); if (outpos > pos) { /* We failed */ return -1; } - - /* Tell the caller how much we managed to compress, + + /* Tell the caller how much we managed to compress, * and how much space it took */ - + outpos = (pushedbits(&rs.pp)+7)/8; - + if (outpos >= pos) return -1; /* We didn't actually compress */ *sourcelen = pos; *dstlen = outpos; return 0; -} +} #if 0 /* _compress returns the compressed size, -1 if bigger */ -int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, - uint32_t *sourcelen, uint32_t *dstlen, void *model) +int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, + uint32_t *sourcelen, uint32_t *dstlen) { - return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); + return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, + cpage_out, sourcelen, dstlen); } #endif static int jffs2_dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out, - uint32_t *sourcelen, uint32_t *dstlen, - void *model) + uint32_t *sourcelen, uint32_t *dstlen) { int bits[8]; unsigned char histo[256]; @@ -246,9 +323,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, return -1; memset(histo, 0, 256); - for (i=0; i<mysrclen; i++) { + for (i=0; i<mysrclen; i++) histo[data_in[i]]++; - } memset(bits, 0, sizeof(int)*8); for (i=0; i<256; i++) { if (i&128) @@ -276,8 +352,9 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, cpage_out[i] = bits[i]; } - ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen); - if (ret) + ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, + &mydstlen); + if (ret) return ret; /* Add back the 8 bytes we took for the probabilities */ @@ -293,34 +370,34 @@ static int jffs2_dynrubin_compress(unsigned char *data_in, return 0; } -static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in, - unsigned char *page_out, uint32_t srclen, uint32_t destlen) +static void rubin_do_decompress(int bit_divider, int *bits, + unsigned char *cdata_in, + unsigned char *page_out, uint32_t srclen, + uint32_t destlen) { int outpos = 0; struct rubin_state rs; - + init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); init_decode(&rs, bit_divider, bits); - - while (outpos < destlen) { + + while (outpos < destlen) page_out[outpos++] = in_byte(&rs); - } -} +} static int jffs2_rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out, - uint32_t sourcelen, uint32_t dstlen, - void *model) + uint32_t sourcelen, uint32_t dstlen) { - rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen); - return 0; + rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, + cpage_out, sourcelen, dstlen); + return 0; } static int jffs2_dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out, - uint32_t sourcelen, uint32_t dstlen, - void *model) + uint32_t sourcelen, uint32_t dstlen) { int bits[8]; int c; @@ -328,52 +405,53 @@ static int jffs2_dynrubin_decompress(unsigned char *data_in, for (c=0; c<8; c++) bits[c] = data_in[c]; - rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen); - return 0; + rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, + dstlen); + return 0; } static struct jffs2_compressor jffs2_rubinmips_comp = { - .priority = JFFS2_RUBINMIPS_PRIORITY, - .name = "rubinmips", - .compr = JFFS2_COMPR_DYNRUBIN, - .compress = NULL, /*&jffs2_rubinmips_compress,*/ - .decompress = &jffs2_rubinmips_decompress, + .priority = JFFS2_RUBINMIPS_PRIORITY, + .name = "rubinmips", + .compr = JFFS2_COMPR_DYNRUBIN, + .compress = NULL, /*&jffs2_rubinmips_compress,*/ + .decompress = &jffs2_rubinmips_decompress, #ifdef JFFS2_RUBINMIPS_DISABLED - .disabled = 1, + .disabled = 1, #else - .disabled = 0, + .disabled = 0, #endif }; int jffs2_rubinmips_init(void) { - return jffs2_register_compressor(&jffs2_rubinmips_comp); + return jffs2_register_compressor(&jffs2_rubinmips_comp); } void jffs2_rubinmips_exit(void) { - jffs2_unregister_compressor(&jffs2_rubinmips_comp); + jffs2_unregister_compressor(&jffs2_rubinmips_comp); } static struct jffs2_compressor jffs2_dynrubin_comp = { - .priority = JFFS2_DYNRUBIN_PRIORITY, - .name = "dynrubin", - .compr = JFFS2_COMPR_RUBINMIPS, - .compress = jffs2_dynrubin_compress, - .decompress = &jffs2_dynrubin_decompress, + .priority = JFFS2_DYNRUBIN_PRIORITY, + .name = "dynrubin", + .compr = JFFS2_COMPR_RUBINMIPS, + .compress = jffs2_dynrubin_compress, + .decompress = &jffs2_dynrubin_decompress, #ifdef JFFS2_DYNRUBIN_DISABLED - .disabled = 1, + .disabled = 1, #else - .disabled = 0, + .disabled = 0, #endif }; int jffs2_dynrubin_init(void) { - return jffs2_register_compressor(&jffs2_dynrubin_comp); + return jffs2_register_compressor(&jffs2_dynrubin_comp); } void jffs2_dynrubin_exit(void) { - jffs2_unregister_compressor(&jffs2_dynrubin_comp); + jffs2_unregister_compressor(&jffs2_dynrubin_comp); } diff --git a/fs/jffs2/compr_rubin.h b/fs/jffs2/compr_rubin.h deleted file mode 100644 index cf51e34f657..00000000000 --- a/fs/jffs2/compr_rubin.h +++ /dev/null @@ -1,21 +0,0 @@ -/* Rubin encoder/decoder header */ -/* work started at : aug 3, 1994 */ -/* last modification : aug 15, 1994 */ -/* $Id: compr_rubin.h,v 1.6 2002/01/25 01:49:26 dwmw2 Exp $ */ - -#include "pushpull.h" - -#define RUBIN_REG_SIZE 16 -#define UPPER_BIT_RUBIN (((long) 1)<<(RUBIN_REG_SIZE-1)) -#define LOWER_BITS_RUBIN ((((long) 1)<<(RUBIN_REG_SIZE-1))-1) - - -struct rubin_state { - unsigned long p; - unsigned long q; - unsigned long rec_q; - long bit_number; - struct pushpull pp; - int bit_divider; - int bits[8]; -}; diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c index 83f7e0788fd..0b9a1e44e83 100644 --- a/fs/jffs2/compr_zlib.c +++ b/fs/jffs2/compr_zlib.c @@ -1,61 +1,61 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: compr_zlib.c,v 1.31 2005/05/20 19:30:06 gleixner Exp $ - * */ #if !defined(__KERNEL__) && !defined(__ECOS) #error "The userspace support got too messy and was removed. Update your mkfs.jffs2" #endif -#include <linux/config.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/slab.h> #include <linux/zlib.h> #include <linux/zutil.h> #include "nodelist.h" #include "compr.h" - /* Plan: call deflate() with avail_in == *sourcelen, - avail_out = *dstlen - 12 and flush == Z_FINISH. + /* Plan: call deflate() with avail_in == *sourcelen, + avail_out = *dstlen - 12 and flush == Z_FINISH. If it doesn't manage to finish, call it again with avail_in == 0 and avail_out set to the remaining 12 - bytes for it to clean up. + bytes for it to clean up. Q: Is 12 bytes sufficient? */ #define STREAM_END_SPACE 12 -static DECLARE_MUTEX(deflate_sem); -static DECLARE_MUTEX(inflate_sem); +static DEFINE_MUTEX(deflate_mutex); +static DEFINE_MUTEX(inflate_mutex); static z_stream inf_strm, def_strm; #ifdef __KERNEL__ /* Linux-only */ #include <linux/vmalloc.h> #include <linux/init.h> +#include <linux/mutex.h> static int __init alloc_workspaces(void) { - def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); - if (!def_strm.workspace) { - printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize()); + def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS, + MAX_MEM_LEVEL)); + if (!def_strm.workspace) return -ENOMEM; - } - D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize())); + + jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n", + zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)); inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); if (!inf_strm.workspace) { - printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize()); vfree(def_strm.workspace); return -ENOMEM; } - D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize())); + jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n", + zlib_inflate_workspacesize()); return 0; } @@ -71,40 +71,40 @@ static void free_workspaces(void) static int jffs2_zlib_compress(unsigned char *data_in, unsigned char *cpage_out, - uint32_t *sourcelen, uint32_t *dstlen, - void *model) + uint32_t *sourcelen, uint32_t *dstlen) { int ret; if (*dstlen <= STREAM_END_SPACE) return -1; - down(&deflate_sem); + mutex_lock(&deflate_mutex); if (Z_OK != zlib_deflateInit(&def_strm, 3)) { - printk(KERN_WARNING "deflateInit failed\n"); - up(&deflate_sem); + pr_warn("deflateInit failed\n"); + mutex_unlock(&deflate_mutex); return -1; } def_strm.next_in = data_in; def_strm.total_in = 0; - + def_strm.next_out = cpage_out; def_strm.total_out = 0; while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) { def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE); def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out); - D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n", - def_strm.avail_in, def_strm.avail_out)); + jffs2_dbg(1, "calling deflate with avail_in %d, avail_out %d\n", + def_strm.avail_in, def_strm.avail_out); ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); - D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", - def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out)); + jffs2_dbg(1, "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", + def_strm.avail_in, def_strm.avail_out, + def_strm.total_in, def_strm.total_out); if (ret != Z_OK) { - D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); + jffs2_dbg(1, "deflate in loop returned %d\n", ret); zlib_deflateEnd(&def_strm); - up(&deflate_sem); + mutex_unlock(&deflate_mutex); return -1; } } @@ -114,43 +114,42 @@ static int jffs2_zlib_compress(unsigned char *data_in, zlib_deflateEnd(&def_strm); if (ret != Z_STREAM_END) { - D1(printk(KERN_DEBUG "final deflate returned %d\n", ret)); + jffs2_dbg(1, "final deflate returned %d\n", ret); ret = -1; goto out; } if (def_strm.total_out >= def_strm.total_in) { - D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n", - def_strm.total_in, def_strm.total_out)); + jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n", + def_strm.total_in, def_strm.total_out); ret = -1; goto out; } - D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n", - def_strm.total_in, def_strm.total_out)); + jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n", + def_strm.total_in, def_strm.total_out); *dstlen = def_strm.total_out; *sourcelen = def_strm.total_in; ret = 0; out: - up(&deflate_sem); + mutex_unlock(&deflate_mutex); return ret; } static int jffs2_zlib_decompress(unsigned char *data_in, unsigned char *cpage_out, - uint32_t srclen, uint32_t destlen, - void *model) + uint32_t srclen, uint32_t destlen) { int ret; int wbits = MAX_WBITS; - down(&inflate_sem); + mutex_lock(&inflate_mutex); inf_strm.next_in = data_in; inf_strm.avail_in = srclen; inf_strm.total_in = 0; - + inf_strm.next_out = cpage_out; inf_strm.avail_out = destlen; inf_strm.total_out = 0; @@ -161,30 +160,30 @@ static int jffs2_zlib_decompress(unsigned char *data_in, ((data_in[0] & 0x0f) == Z_DEFLATED) && !(((data_in[0]<<8) + data_in[1]) % 31)) { - D2(printk(KERN_DEBUG "inflate skipping adler32\n")); + jffs2_dbg(2, "inflate skipping adler32\n"); wbits = -((data_in[0] >> 4) + 8); inf_strm.next_in += 2; inf_strm.avail_in -= 2; } else { /* Let this remain D1 for now -- it should never happen */ - D1(printk(KERN_DEBUG "inflate not skipping adler32\n")); + jffs2_dbg(1, "inflate not skipping adler32\n"); } if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { - printk(KERN_WARNING "inflateInit failed\n"); - up(&inflate_sem); + pr_warn("inflateInit failed\n"); + mutex_unlock(&inflate_mutex); return 1; } while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK) ; if (ret != Z_STREAM_END) { - printk(KERN_NOTICE "inflate returned %d\n", ret); + pr_notice("inflate returned %d\n", ret); } zlib_inflateEnd(&inf_strm); - up(&inflate_sem); - return 0; + mutex_unlock(&inflate_mutex); + return 0; } static struct jffs2_compressor jffs2_zlib_comp = { @@ -206,11 +205,11 @@ int __init jffs2_zlib_init(void) ret = alloc_workspaces(); if (ret) - return ret; + return ret; ret = jffs2_register_compressor(&jffs2_zlib_comp); if (ret) - free_workspaces(); + free_workspaces(); return ret; } diff --git a/fs/jffs2/comprtest.c b/fs/jffs2/comprtest.c deleted file mode 100644 index cf51f091d0e..00000000000 --- a/fs/jffs2/comprtest.c +++ /dev/null @@ -1,307 +0,0 @@ -/* $Id: comprtest.c,v 1.5 2002/01/03 15:20:44 dwmw2 Exp $ */ - -#include <linux/kernel.h> -#include <linux/string.h> -#include <linux/module.h> -#include <asm/types.h> -#if 0 -#define TESTDATA_LEN 512 -static unsigned char testdata[TESTDATA_LEN] = { - 0x7f, 0x45, 0x4c, 0x46, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x02, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x60, 0x83, 0x04, 0x08, 0x34, 0x00, 0x00, 0x00, - 0xb0, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x20, 0x00, 0x06, 0x00, 0x28, 0x00, - 0x1e, 0x00, 0x1b, 0x00, 0x06, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x34, 0x80, 0x04, 0x08, - 0x34, 0x80, 0x04, 0x08, 0xc0, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0xf4, 0x00, 0x00, 0x00, 0xf4, 0x80, 0x04, 0x08, - 0xf4, 0x80, 0x04, 0x08, 0x13, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x04, 0x08, - 0x00, 0x80, 0x04, 0x08, 0x0d, 0x05, 0x00, 0x00, 0x0d, 0x05, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x10, 0x05, 0x00, 0x00, 0x10, 0x95, 0x04, 0x08, - 0x10, 0x95, 0x04, 0x08, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x00, 0x10, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x58, 0x05, 0x00, 0x00, 0x58, 0x95, 0x04, 0x08, - 0x58, 0x95, 0x04, 0x08, 0xa0, 0x00, 0x00, 0x00, 0xa0, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, 0x01, 0x00, 0x00, 0x08, 0x81, 0x04, 0x08, - 0x08, 0x81, 0x04, 0x08, 0x20, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x2f, 0x6c, 0x69, 0x62, 0x2f, 0x6c, 0x64, 0x2d, 0x6c, 0x69, 0x6e, 0x75, - 0x78, 0x2e, 0x73, 0x6f, 0x2e, 0x32, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, - 0x01, 0x00, 0x00, 0x00, 0x47, 0x4e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, - 0x07, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x04, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x00, 0x00, 0x00, - 0x0c, 0x83, 0x04, 0x08, 0x81, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, - 0x1c, 0x83, 0x04, 0x08, 0xac, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, - 0x2c, 0x83, 0x04, 0x08, 0xdd, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, - 0x3c, 0x83, 0x04, 0x08, 0x2e, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x21, 0x00, 0x00, 0x00, - 0x4c, 0x83, 0x04, 0x08, 0x7d, 0x00, 0x00, 0x00, 0x22, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, - 0x00, 0x85, 0x04, 0x08, 0x04, 0x00, 0x00, 0x00, 0x11, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x5f, 0x67, - 0x6d, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x5f, 0x00, 0x6c, 0x69, 0x62, 0x63, - 0x2e, 0x73, 0x6f, 0x2e, 0x36, 0x00, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x00, 0x5f, 0x5f, 0x63}; -#else -#define TESTDATA_LEN 3481 -static unsigned char testdata[TESTDATA_LEN] = { - 0x23, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x22, 0x64, 0x62, 0x65, 0x6e, 0x63, 0x68, - 0x2e, 0x68, 0x22, 0x0a, 0x0a, 0x23, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x4d, 0x41, 0x58, - 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x20, 0x31, 0x30, 0x30, 0x30, 0x0a, 0x0a, 0x73, 0x74, 0x61, - 0x74, 0x69, 0x63, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x62, 0x75, 0x66, 0x5b, 0x37, 0x30, 0x30, - 0x30, 0x30, 0x5d, 0x3b, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x20, 0x69, 0x6e, 0x74, 0x20, - 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x73, 0x74, 0x61, - 0x74, 0x69, 0x63, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x7b, 0x0a, 0x09, 0x69, 0x6e, - 0x74, 0x20, 0x66, 0x64, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x3b, 0x0a, 0x7d, 0x20, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x4d, 0x41, 0x58, 0x5f, - 0x46, 0x49, 0x4c, 0x45, 0x53, 0x5d, 0x3b, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, - 0x5f, 0x75, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, - 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, - 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x75, - 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x20, 0x21, 0x3d, 0x20, - 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, - 0x25, 0x64, 0x29, 0x20, 0x75, 0x6e, 0x6c, 0x69, 0x6e, 0x6b, 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, - 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, - 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x66, - 0x69, 0x6c, 0x65, 0x28, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x64, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, - 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x3b, 0x0a, - 0x09, 0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, - 0x09, 0x09, 0x73, 0x20, 0x3d, 0x20, 0x4d, 0x49, 0x4e, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x6f, 0x66, - 0x28, 0x62, 0x75, 0x66, 0x29, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x09, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, - 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x2d, 0x3d, 0x20, 0x73, 0x3b, 0x0a, - 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x6f, 0x70, - 0x65, 0x6e, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, - 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, - 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x64, 0x2c, - 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x20, 0x3d, - 0x20, 0x4f, 0x5f, 0x52, 0x44, 0x57, 0x52, 0x7c, 0x4f, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x3b, - 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x20, 0x73, 0x74, 0x61, 0x74, 0x20, 0x73, 0x74, - 0x3b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x3b, 0x0a, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, - 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, - 0x7a, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x20, 0x7c, - 0x3d, 0x20, 0x4f, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x3b, 0x0a, 0x0a, 0x09, 0x66, 0x64, 0x20, - 0x3d, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x66, 0x6c, - 0x61, 0x67, 0x73, 0x2c, 0x20, 0x30, 0x36, 0x30, 0x30, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20, - 0x28, 0x66, 0x64, 0x20, 0x3d, 0x3d, 0x20, 0x2d, 0x31, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, - 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x6f, 0x70, 0x65, 0x6e, - 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x68, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, - 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x68, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, - 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72, - 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x66, 0x73, 0x74, 0x61, 0x74, 0x28, 0x66, 0x64, 0x2c, - 0x20, 0x26, 0x73, 0x74, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, - 0x20, 0x3e, 0x20, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, - 0x0a, 0x23, 0x69, 0x66, 0x20, 0x44, 0x45, 0x42, 0x55, 0x47, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, - 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x20, 0x25, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x25, 0x64, 0x20, 0x66, 0x72, 0x6f, - 0x6d, 0x20, 0x25, 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, - 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, 0x74, - 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x23, 0x65, - 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x09, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69, - 0x6c, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x74, - 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x20, 0x65, 0x6c, - 0x73, 0x65, 0x20, 0x69, 0x66, 0x20, 0x28, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x3c, 0x20, 0x73, 0x74, - 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6e, 0x67, - 0x20, 0x25, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x25, 0x64, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x25, - 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, - 0x74, 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, - 0x09, 0x66, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x28, 0x66, 0x64, 0x2c, 0x20, 0x73, - 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, - 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, 0x69, - 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, 0x20, - 0x30, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, - 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, - 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x66, 0x69, - 0x6c, 0x65, 0x20, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x66, 0x75, 0x6c, 0x6c, 0x20, 0x66, 0x6f, - 0x72, 0x20, 0x25, 0x73, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, - 0x0a, 0x09, 0x09, 0x65, 0x78, 0x69, 0x74, 0x28, 0x31, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, - 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, - 0x20, 0x3d, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x3b, 0x0a, 0x09, 0x66, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x20, 0x3d, 0x20, 0x66, 0x64, 0x3b, 0x0a, 0x09, - 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2b, 0x2b, 0x20, 0x25, 0x20, 0x31, 0x30, - 0x30, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, - 0x74, 0x66, 0x28, 0x22, 0x2e, 0x22, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, - 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x69, 0x6e, 0x74, - 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x73, 0x69, 0x7a, - 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x0a, 0x7b, - 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x62, - 0x75, 0x66, 0x5b, 0x30, 0x5d, 0x20, 0x3d, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x6d, 0x65, 0x6d, 0x73, - 0x65, 0x74, 0x28, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x31, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x6f, - 0x66, 0x28, 0x62, 0x75, 0x66, 0x29, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, - 0x69, 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, - 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, - 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, - 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, - 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x31, 0x0a, - 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, - 0x6f, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x3a, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, - 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x20, - 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, 0x64, 0x20, 0x6f, 0x66, 0x73, 0x3d, 0x25, 0x64, 0x5c, 0x6e, - 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, - 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, - 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x3b, 0x0a, - 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, - 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x6c, 0x73, 0x65, 0x65, 0x6b, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x2c, - 0x20, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x53, 0x45, 0x54, 0x29, 0x3b, 0x0a, 0x09, 0x69, 0x66, 0x20, - 0x28, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, - 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, - 0x21, 0x3d, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, - 0x6e, 0x74, 0x66, 0x28, 0x22, 0x77, 0x72, 0x69, 0x74, 0x65, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x5c, 0x6e, - 0x22, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, - 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x28, 0x69, - 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x73, - 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, - 0x0a, 0x7b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, - 0x28, 0x69, 0x3d, 0x30, 0x3b, 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, - 0x3b, 0x69, 0x2b, 0x2b, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, - 0x3d, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, - 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, - 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, - 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x65, 0x61, - 0x64, 0x3a, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, - 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, - 0x64, 0x20, 0x6f, 0x66, 0x73, 0x3d, 0x25, 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x2c, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, - 0x20, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x6c, 0x73, 0x65, 0x65, 0x6b, 0x28, 0x66, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x2c, 0x20, 0x6f, 0x66, 0x66, 0x73, - 0x65, 0x74, 0x2c, 0x20, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x53, 0x45, 0x54, 0x29, 0x3b, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, - 0x64, 0x2c, 0x20, 0x62, 0x75, 0x66, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x7d, - 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28, - 0x69, 0x6e, 0x74, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x69, - 0x6e, 0x74, 0x20, 0x69, 0x3b, 0x0a, 0x09, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x69, 0x3d, 0x30, 0x3b, - 0x69, 0x3c, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x53, 0x3b, 0x69, 0x2b, 0x2b, 0x29, - 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x69, 0x66, 0x20, 0x28, 0x66, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5b, - 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x3d, 0x20, 0x68, 0x61, 0x6e, - 0x64, 0x6c, 0x65, 0x29, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, - 0x69, 0x66, 0x20, 0x28, 0x69, 0x20, 0x3d, 0x3d, 0x20, 0x4d, 0x41, 0x58, 0x5f, 0x46, 0x49, 0x4c, - 0x45, 0x53, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, - 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x3a, 0x20, 0x68, - 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x25, 0x64, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x74, - 0x20, 0x6f, 0x70, 0x65, 0x6e, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, - 0x20, 0x20, 0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, - 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, 0x75, 0x72, - 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28, 0x66, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x66, 0x64, 0x29, 0x3b, 0x0a, 0x09, 0x66, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5b, 0x69, 0x5d, 0x2e, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x20, 0x3d, 0x20, - 0x30, 0x3b, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x6d, 0x6b, - 0x64, 0x69, 0x72, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, - 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, - 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x6d, 0x6b, 0x64, 0x69, 0x72, - 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x30, 0x37, 0x30, 0x30, 0x29, 0x20, 0x21, 0x3d, - 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x23, 0x69, 0x66, 0x20, 0x44, 0x45, 0x42, 0x55, 0x47, 0x0a, - 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x6d, 0x6b, 0x64, 0x69, 0x72, 0x20, - 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, - 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6e, 0x61, - 0x6d, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, - 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x23, 0x65, 0x6e, 0x64, 0x69, 0x66, 0x0a, 0x09, 0x7d, 0x0a, - 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x72, 0x6d, 0x64, 0x69, 0x72, - 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x0a, 0x7b, 0x0a, - 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, - 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x72, 0x6d, 0x64, 0x69, 0x72, 0x28, 0x66, 0x6e, - 0x61, 0x6d, 0x65, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, - 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x72, 0x6d, 0x64, 0x69, 0x72, 0x20, 0x25, 0x73, 0x20, - 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, - 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, - 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, - 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, - 0x5f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x28, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x6f, 0x6c, - 0x64, 0x2c, 0x20, 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x6e, 0x65, 0x77, 0x29, 0x0a, 0x7b, 0x0a, - 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x6f, 0x6c, 0x64, 0x29, 0x3b, 0x0a, - 0x09, 0x73, 0x74, 0x72, 0x75, 0x70, 0x70, 0x65, 0x72, 0x28, 0x6e, 0x65, 0x77, 0x29, 0x3b, 0x0a, - 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x28, 0x6f, 0x6c, 0x64, - 0x2c, 0x20, 0x6e, 0x65, 0x77, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, - 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x20, - 0x25, 0x73, 0x20, 0x25, 0x73, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x28, 0x25, 0x73, - 0x29, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x6f, 0x6c, 0x64, 0x2c, 0x20, 0x6e, 0x65, 0x77, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, - 0x0a, 0x0a, 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x28, - 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, - 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, - 0x20, 0x73, 0x74, 0x61, 0x74, 0x20, 0x73, 0x74, 0x3b, 0x0a, 0x0a, 0x09, 0x73, 0x74, 0x72, 0x75, - 0x70, 0x70, 0x65, 0x72, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x29, 0x3b, 0x0a, 0x0a, 0x09, 0x69, - 0x66, 0x20, 0x28, 0x73, 0x74, 0x61, 0x74, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x26, - 0x73, 0x74, 0x29, 0x20, 0x21, 0x3d, 0x20, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, - 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x3a, 0x20, 0x25, 0x73, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x3d, 0x25, 0x64, 0x20, 0x25, - 0x73, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x28, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x29, 0x29, 0x3b, 0x0a, 0x09, 0x09, 0x72, 0x65, 0x74, - 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, 0x53, 0x5f, 0x49, - 0x53, 0x44, 0x49, 0x52, 0x28, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x29, - 0x29, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x3b, 0x0a, 0x0a, 0x09, 0x69, 0x66, 0x20, 0x28, - 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x21, 0x3d, 0x20, 0x73, 0x69, - 0x7a, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x09, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x66, 0x28, 0x22, - 0x28, 0x25, 0x64, 0x29, 0x20, 0x64, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x3a, 0x20, 0x25, 0x73, - 0x20, 0x77, 0x72, 0x6f, 0x6e, 0x67, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x25, 0x64, 0x20, 0x25, - 0x64, 0x5c, 0x6e, 0x22, 0x2c, 0x20, 0x0a, 0x09, 0x09, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, - 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x2c, 0x20, 0x66, 0x6e, 0x61, 0x6d, - 0x65, 0x2c, 0x20, 0x28, 0x69, 0x6e, 0x74, 0x29, 0x73, 0x74, 0x2e, 0x73, 0x74, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x2c, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x7d, 0x0a, 0x7d, 0x0a, - 0x0a, 0x76, 0x6f, 0x69, 0x64, 0x20, 0x64, 0x6f, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x28, - 0x63, 0x68, 0x61, 0x72, 0x20, 0x2a, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x69, 0x6e, 0x74, - 0x20, 0x73, 0x69, 0x7a, 0x65, 0x29, 0x0a, 0x7b, 0x0a, 0x09, 0x64, 0x6f, 0x5f, 0x6f, 0x70, 0x65, - 0x6e, 0x28, 0x66, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x35, 0x30, 0x30, 0x30, 0x2c, 0x20, 0x73, - 0x69, 0x7a, 0x65, 0x29, 0x3b, 0x0a, 0x09, 0x64, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x28, - 0x35, 0x30, 0x30, 0x30, 0x29, 0x3b, 0x0a, 0x7d, 0x0a -}; -#endif -static unsigned char comprbuf[TESTDATA_LEN]; -static unsigned char decomprbuf[TESTDATA_LEN]; - -int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in, - unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); -unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out, - uint32_t *datalen, uint32_t *cdatalen); - -int init_module(void ) { - unsigned char comprtype; - uint32_t c, d; - int ret; - - printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", - testdata[0],testdata[1],testdata[2],testdata[3], - testdata[4],testdata[5],testdata[6],testdata[7], - testdata[8],testdata[9],testdata[10],testdata[11], - testdata[12],testdata[13],testdata[14],testdata[15]); - d = TESTDATA_LEN; - c = TESTDATA_LEN; - comprtype = jffs2_compress(testdata, comprbuf, &d, &c); - - printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n", - comprtype, c, d); - printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", - comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3], - comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7], - comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11], - comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]); - - ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d); - printk("jffs2_decompress returned %d\n", ret); - printk("Decompressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", - decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3], - decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7], - decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11], - decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]); - if (memcmp(decomprbuf, testdata, d)) - printk("Compression and decompression corrupted data\n"); - else - printk("Compression good for %d bytes\n", d); - return 1; -} diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c new file mode 100644 index 00000000000..1090eb64b90 --- /dev/null +++ b/fs/jffs2/debug.c @@ -0,0 +1,866 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> + * + * Created by David Woodhouse <dwmw2@infradead.org> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/pagemap.h> +#include <linux/crc32.h> +#include <linux/jffs2.h> +#include <linux/mtd/mtd.h> +#include <linux/slab.h> +#include "nodelist.h" +#include "debug.h" + +#ifdef JFFS2_DBG_SANITY_CHECKS + +void +__jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb) +{ + if (unlikely(jeb && jeb->used_size + jeb->dirty_size + + jeb->free_size + jeb->wasted_size + + jeb->unchecked_size != c->sector_size)) { + JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset); + JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", + jeb->free_size, jeb->dirty_size, jeb->used_size, + jeb->wasted_size, jeb->unchecked_size, c->sector_size); + BUG(); + } + + if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + + c->wasted_size + c->unchecked_size != c->flash_size)) { + JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n"); + JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n", + c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, + c->wasted_size, c->unchecked_size, c->flash_size); + BUG(); + } +} + +void +__jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb) +{ + spin_lock(&c->erase_completion_lock); + jffs2_dbg_acct_sanity_check_nolock(c, jeb); + spin_unlock(&c->erase_completion_lock); +} + +#endif /* JFFS2_DBG_SANITY_CHECKS */ + +#ifdef JFFS2_DBG_PARANOIA_CHECKS +/* + * Check the fragtree. + */ +void +__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f) +{ + mutex_lock(&f->sem); + __jffs2_dbg_fragtree_paranoia_check_nolock(f); + mutex_unlock(&f->sem); +} + +void +__jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f) +{ + struct jffs2_node_frag *frag; + int bitched = 0; + + for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { + struct jffs2_full_dnode *fn = frag->node; + + if (!fn || !fn->raw) + continue; + + if (ref_flags(fn->raw) == REF_PRISTINE) { + if (fn->frags > 1) { + JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n", + ref_offset(fn->raw), fn->frags); + bitched = 1; + } + + /* A hole node which isn't multi-page should be garbage-collected + and merged anyway, so we just check for the frag size here, + rather than mucking around with actually reading the node + and checking the compression type, which is the real way + to tell a hole node. */ + if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) + && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { + JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", + ref_offset(fn->raw)); + bitched = 1; + } + + if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) + && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { + JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", + ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); + bitched = 1; + } + } + } + + if (bitched) { + JFFS2_ERROR("fragtree is corrupted.\n"); + __jffs2_dbg_dump_fragtree_nolock(f); + BUG(); + } +} + +/* + * Check if the flash contains all 0xFF before we start writing. + */ +void +__jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, + uint32_t ofs, int len) +{ + size_t retlen; + int ret, i; + unsigned char *buf; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return; + + ret = jffs2_flash_read(c, ofs, len, &retlen, buf); + if (ret || (retlen != len)) { + JFFS2_WARNING("read %d bytes failed or short. ret %d, retlen %zd.\n", + len, ret, retlen); + kfree(buf); + return; + } + + ret = 0; + for (i = 0; i < len; i++) + if (buf[i] != 0xff) + ret = 1; + + if (ret) { + JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n", + ofs, ofs + i); + __jffs2_dbg_dump_buffer(buf, len, ofs); + kfree(buf); + BUG(); + } + + kfree(buf); +} + +void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c) +{ + struct jffs2_eraseblock *jeb; + uint32_t free = 0, dirty = 0, used = 0, wasted = 0, + erasing = 0, bad = 0, unchecked = 0; + int nr_counted = 0; + int dump = 0; + + if (c->gcblock) { + nr_counted++; + free += c->gcblock->free_size; + dirty += c->gcblock->dirty_size; + used += c->gcblock->used_size; + wasted += c->gcblock->wasted_size; + unchecked += c->gcblock->unchecked_size; + } + if (c->nextblock) { + nr_counted++; + free += c->nextblock->free_size; + dirty += c->nextblock->dirty_size; + used += c->nextblock->used_size; + wasted += c->nextblock->wasted_size; + unchecked += c->nextblock->unchecked_size; + } + list_for_each_entry(jeb, &c->clean_list, list) { + nr_counted++; + free += jeb->free_size; + dirty += jeb->dirty_size; + used += jeb->used_size; + wasted += jeb->wasted_size; + unchecked += jeb->unchecked_size; + } + list_for_each_entry(jeb, &c->very_dirty_list, list) { + nr_counted++; + free += jeb->free_size; + dirty += jeb->dirty_size; + used += jeb->used_size; + wasted += jeb->wasted_size; + unchecked += jeb->unchecked_size; + } + list_for_each_entry(jeb, &c->dirty_list, list) { + nr_counted++; + free += jeb->free_size; + dirty += jeb->dirty_size; + used += jeb->used_size; + wasted += jeb->wasted_size; + unchecked += jeb->unchecked_size; + } + list_for_each_entry(jeb, &c->erasable_list, list) { + nr_counted++; + free += jeb->free_size; + dirty += jeb->dirty_size; + used += jeb->used_size; + wasted += jeb->wasted_size; + unchecked += jeb->unchecked_size; + } + list_for_each_entry(jeb, &c->erasable_pending_wbuf_list, list) { + nr_counted++; + free += jeb->free_size; + dirty += jeb->dirty_size; + used += jeb->used_size; + wasted += jeb->wasted_size; + unchecked += jeb->unchecked_size; + } + list_for_each_entry(jeb, &c->erase_pending_list, list) { + nr_counted++; + free += jeb->free_size; + dirty += jeb->dirty_size; + used += jeb->used_size; + wasted += jeb->wasted_size; + unchecked += jeb->unchecked_size; + } + list_for_each_entry(jeb, &c->free_list, list) { + nr_counted++; + free += jeb->free_size; + dirty += jeb->dirty_size; + used += jeb->used_size; + wasted += jeb->wasted_size; + unchecked += jeb->unchecked_size; + } + list_for_each_entry(jeb, &c->bad_used_list, list) { + nr_counted++; + free += jeb->free_size; + dirty += jeb->dirty_size; + used += jeb->used_size; + wasted += jeb->wasted_size; + unchecked += jeb->unchecked_size; + } + + list_for_each_entry(jeb, &c->erasing_list, list) { + nr_counted++; + erasing += c->sector_size; + } + list_for_each_entry(jeb, &c->erase_checking_list, list) { + nr_counted++; + erasing += c->sector_size; + } + list_for_each_entry(jeb, &c->erase_complete_list, list) { + nr_counted++; + erasing += c->sector_size; + } + list_for_each_entry(jeb, &c->bad_list, list) { + nr_counted++; + bad += c->sector_size; + } + +#define check(sz) \ +do { \ + if (sz != c->sz##_size) { \ + pr_warn("%s_size mismatch counted 0x%x, c->%s_size 0x%x\n", \ + #sz, sz, #sz, c->sz##_size); \ + dump = 1; \ + } \ +} while (0) + + check(free); + check(dirty); + check(used); + check(wasted); + check(unchecked); + check(bad); + check(erasing); + +#undef check + + if (nr_counted != c->nr_blocks) { + pr_warn("%s counted only 0x%x blocks of 0x%x. Where are the others?\n", + __func__, nr_counted, c->nr_blocks); + dump = 1; + } + + if (dump) { + __jffs2_dbg_dump_block_lists_nolock(c); + BUG(); + } +} + +/* + * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'. + */ +void +__jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb) +{ + spin_lock(&c->erase_completion_lock); + __jffs2_dbg_acct_paranoia_check_nolock(c, jeb); + spin_unlock(&c->erase_completion_lock); +} + +void +__jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb) +{ + uint32_t my_used_size = 0; + uint32_t my_unchecked_size = 0; + uint32_t my_dirty_size = 0; + struct jffs2_raw_node_ref *ref2 = jeb->first_node; + + while (ref2) { + uint32_t totlen = ref_totlen(c, jeb, ref2); + + if (ref_offset(ref2) < jeb->offset || + ref_offset(ref2) > jeb->offset + c->sector_size) { + JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n", + ref_offset(ref2), jeb->offset); + goto error; + + } + if (ref_flags(ref2) == REF_UNCHECKED) + my_unchecked_size += totlen; + else if (!ref_obsolete(ref2)) + my_used_size += totlen; + else + my_dirty_size += totlen; + + if ((!ref_next(ref2)) != (ref2 == jeb->last_node)) { + JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n", + ref_offset(ref2), ref2, ref_offset(ref_next(ref2)), ref_next(ref2), + ref_offset(jeb->last_node), jeb->last_node); + goto error; + } + ref2 = ref_next(ref2); + } + + if (my_used_size != jeb->used_size) { + JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n", + my_used_size, jeb->used_size); + goto error; + } + + if (my_unchecked_size != jeb->unchecked_size) { + JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n", + my_unchecked_size, jeb->unchecked_size); + goto error; + } + +#if 0 + /* This should work when we implement ref->__totlen elemination */ + if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) { + JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n", + my_dirty_size, jeb->dirty_size + jeb->wasted_size); + goto error; + } + + if (jeb->free_size == 0 + && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) { + JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n", + my_used_size + my_unchecked_size + my_dirty_size, + c->sector_size); + goto error; + } +#endif + + if (!(c->flags & (JFFS2_SB_FLAG_BUILDING|JFFS2_SB_FLAG_SCANNING))) + __jffs2_dbg_superblock_counts(c); + + return; + +error: + __jffs2_dbg_dump_node_refs_nolock(c, jeb); + __jffs2_dbg_dump_jeb_nolock(jeb); + __jffs2_dbg_dump_block_lists_nolock(c); + BUG(); + +} +#endif /* JFFS2_DBG_PARANOIA_CHECKS */ + +#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) +/* + * Dump the node_refs of the 'jeb' JFFS2 eraseblock. + */ +void +__jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb) +{ + spin_lock(&c->erase_completion_lock); + __jffs2_dbg_dump_node_refs_nolock(c, jeb); + spin_unlock(&c->erase_completion_lock); +} + +void +__jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb) +{ + struct jffs2_raw_node_ref *ref; + int i = 0; + + printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset); + if (!jeb->first_node) { + printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset); + return; + } + + printk(JFFS2_DBG); + for (ref = jeb->first_node; ; ref = ref_next(ref)) { + printk("%#08x", ref_offset(ref)); +#ifdef TEST_TOTLEN + printk("(%x)", ref->__totlen); +#endif + if (ref_next(ref)) + printk("->"); + else + break; + if (++i == 4) { + i = 0; + printk("\n" JFFS2_DBG); + } + } + printk("\n"); +} + +/* + * Dump an eraseblock's space accounting. + */ +void +__jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) +{ + spin_lock(&c->erase_completion_lock); + __jffs2_dbg_dump_jeb_nolock(jeb); + spin_unlock(&c->erase_completion_lock); +} + +void +__jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb) +{ + if (!jeb) + return; + + printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n", + jeb->offset); + + printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size); + printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size); + printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size); + printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size); + printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size); +} + +void +__jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c) +{ + spin_lock(&c->erase_completion_lock); + __jffs2_dbg_dump_block_lists_nolock(c); + spin_unlock(&c->erase_completion_lock); +} + +void +__jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c) +{ + printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n"); + + printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size); + printk(JFFS2_DBG "used_size: %#08x\n", c->used_size); + printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size); + printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size); + printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size); + printk(JFFS2_DBG "free_size: %#08x\n", c->free_size); + printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size); + printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size); + printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size); + printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n", + c->sector_size * c->resv_blocks_write); + + if (c->nextblock) + printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + c->nextblock->offset, c->nextblock->used_size, + c->nextblock->dirty_size, c->nextblock->wasted_size, + c->nextblock->unchecked_size, c->nextblock->free_size); + else + printk(JFFS2_DBG "nextblock: NULL\n"); + + if (c->gcblock) + printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, + c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); + else + printk(JFFS2_DBG "gcblock: NULL\n"); + + if (list_empty(&c->clean_list)) { + printk(JFFS2_DBG "clean_list: empty\n"); + } else { + struct list_head *this; + int numblocks = 0; + uint32_t dirty = 0; + + list_for_each(this, &c->clean_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + numblocks ++; + dirty += jeb->wasted_size; + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + + printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", + numblocks, dirty, dirty / numblocks); + } + + if (list_empty(&c->very_dirty_list)) { + printk(JFFS2_DBG "very_dirty_list: empty\n"); + } else { + struct list_head *this; + int numblocks = 0; + uint32_t dirty = 0; + + list_for_each(this, &c->very_dirty_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + numblocks ++; + dirty += jeb->dirty_size; + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + + printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", + numblocks, dirty, dirty / numblocks); + } + + if (list_empty(&c->dirty_list)) { + printk(JFFS2_DBG "dirty_list: empty\n"); + } else { + struct list_head *this; + int numblocks = 0; + uint32_t dirty = 0; + + list_for_each(this, &c->dirty_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + numblocks ++; + dirty += jeb->dirty_size; + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + + printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n", + numblocks, dirty, dirty / numblocks); + } + + if (list_empty(&c->erasable_list)) { + printk(JFFS2_DBG "erasable_list: empty\n"); + } else { + struct list_head *this; + + list_for_each(this, &c->erasable_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + } + + if (list_empty(&c->erasing_list)) { + printk(JFFS2_DBG "erasing_list: empty\n"); + } else { + struct list_head *this; + + list_for_each(this, &c->erasing_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + } + if (list_empty(&c->erase_checking_list)) { + printk(JFFS2_DBG "erase_checking_list: empty\n"); + } else { + struct list_head *this; + + list_for_each(this, &c->erase_checking_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "erase_checking_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + } + + if (list_empty(&c->erase_pending_list)) { + printk(JFFS2_DBG "erase_pending_list: empty\n"); + } else { + struct list_head *this; + + list_for_each(this, &c->erase_pending_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + } + + if (list_empty(&c->erasable_pending_wbuf_list)) { + printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n"); + } else { + struct list_head *this; + + list_for_each(this, &c->erasable_pending_wbuf_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + } + + if (list_empty(&c->free_list)) { + printk(JFFS2_DBG "free_list: empty\n"); + } else { + struct list_head *this; + + list_for_each(this, &c->free_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + } + + if (list_empty(&c->bad_list)) { + printk(JFFS2_DBG "bad_list: empty\n"); + } else { + struct list_head *this; + + list_for_each(this, &c->bad_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + } + + if (list_empty(&c->bad_used_list)) { + printk(JFFS2_DBG "bad_used_list: empty\n"); + } else { + struct list_head *this; + + list_for_each(this, &c->bad_used_list) { + struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); + + if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) { + printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, + jeb->unchecked_size, jeb->free_size); + } + } + } +} + +void +__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f) +{ + mutex_lock(&f->sem); + jffs2_dbg_dump_fragtree_nolock(f); + mutex_unlock(&f->sem); +} + +void +__jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f) +{ + struct jffs2_node_frag *this = frag_first(&f->fragtree); + uint32_t lastofs = 0; + int buggy = 0; + + printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino); + while(this) { + if (this->node) + printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n", + this->ofs, this->ofs+this->size, ref_offset(this->node->raw), + ref_flags(this->node->raw), this, frag_left(this), frag_right(this), + frag_parent(this)); + else + printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n", + this->ofs, this->ofs+this->size, this, frag_left(this), + frag_right(this), frag_parent(this)); + if (this->ofs != lastofs) + buggy = 1; + lastofs = this->ofs + this->size; + this = frag_next(this); + } + + if (f->metadata) + printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); + + if (buggy) { + JFFS2_ERROR("frag tree got a hole in it.\n"); + BUG(); + } +} + +#define JFFS2_BUFDUMP_BYTES_PER_LINE 32 +void +__jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs) +{ + int skip; + int i; + + printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n", + offs, offs + len, len); + i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE; + offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1); + + if (skip != 0) + printk(JFFS2_DBG "%#08x: ", offs); + + while (skip--) + printk(" "); + + while (i < len) { + if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) { + if (i != 0) + printk("\n"); + offs += JFFS2_BUFDUMP_BYTES_PER_LINE; + printk(JFFS2_DBG "%0#8x: ", offs); + } + + printk("%02x ", buf[i]); + + i += 1; + } + + printk("\n"); +} + +/* + * Dump a JFFS2 node. + */ +void +__jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs) +{ + union jffs2_node_union node; + int len = sizeof(union jffs2_node_union); + size_t retlen; + uint32_t crc; + int ret; + + printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs); + + ret = jffs2_flash_read(c, ofs, len, &retlen, (unsigned char *)&node); + if (ret || (retlen != len)) { + JFFS2_ERROR("read %d bytes failed or short. ret %d, retlen %zd.\n", + len, ret, retlen); + return; + } + + printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic)); + printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype)); + printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen)); + printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc)); + + crc = crc32(0, &node.u, sizeof(node.u) - 4); + if (crc != je32_to_cpu(node.u.hdr_crc)) { + JFFS2_ERROR("wrong common header CRC.\n"); + return; + } + + if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK && + je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK) + { + JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n", + je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK); + return; + } + + switch(je16_to_cpu(node.u.nodetype)) { + + case JFFS2_NODETYPE_INODE: + + printk(JFFS2_DBG "the node is inode node\n"); + printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino)); + printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version)); + printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m); + printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid)); + printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid)); + printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize)); + printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime)); + printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime)); + printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime)); + printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset)); + printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize)); + printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize)); + printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr); + printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr); + printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags)); + printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc)); + printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc)); + + crc = crc32(0, &node.i, sizeof(node.i) - 8); + if (crc != je32_to_cpu(node.i.node_crc)) { + JFFS2_ERROR("wrong node header CRC.\n"); + return; + } + break; + + case JFFS2_NODETYPE_DIRENT: + + printk(JFFS2_DBG "the node is dirent node\n"); + printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino)); + printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version)); + printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino)); + printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime)); + printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize); + printk(JFFS2_DBG "type:\t%#02x\n", node.d.type); + printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc)); + printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc)); + + node.d.name[node.d.nsize] = '\0'; + printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name); + + crc = crc32(0, &node.d, sizeof(node.d) - 8); + if (crc != je32_to_cpu(node.d.node_crc)) { + JFFS2_ERROR("wrong node header CRC.\n"); + return; + } + break; + + default: + printk(JFFS2_DBG "node type is unknown\n"); + break; + } +} +#endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */ diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h new file mode 100644 index 00000000000..4fd9be4cbc9 --- /dev/null +++ b/fs/jffs2/debug.h @@ -0,0 +1,275 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> + * + * Created by David Woodhouse <dwmw2@infradead.org> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#ifndef _JFFS2_DEBUG_H_ +#define _JFFS2_DEBUG_H_ + +#include <linux/sched.h> + +#ifndef CONFIG_JFFS2_FS_DEBUG +#define CONFIG_JFFS2_FS_DEBUG 0 +#endif + +#if CONFIG_JFFS2_FS_DEBUG > 0 +/* Enable "paranoia" checks and dumps */ +#define JFFS2_DBG_PARANOIA_CHECKS +#define JFFS2_DBG_DUMPS + +/* + * By defining/undefining the below macros one may select debugging messages + * fro specific JFFS2 subsystems. + */ +#define JFFS2_DBG_READINODE_MESSAGES +#define JFFS2_DBG_FRAGTREE_MESSAGES +#define JFFS2_DBG_DENTLIST_MESSAGES +#define JFFS2_DBG_NODEREF_MESSAGES +#define JFFS2_DBG_INOCACHE_MESSAGES +#define JFFS2_DBG_SUMMARY_MESSAGES +#define JFFS2_DBG_FSBUILD_MESSAGES +#endif + +#if CONFIG_JFFS2_FS_DEBUG > 1 +#define JFFS2_DBG_FRAGTREE2_MESSAGES +#define JFFS2_DBG_READINODE2_MESSAGES +#define JFFS2_DBG_MEMALLOC_MESSAGES +#endif + +/* Sanity checks are supposed to be light-weight and enabled by default */ +#define JFFS2_DBG_SANITY_CHECKS + +/* + * Dx() are mainly used for debugging messages, they must go away and be + * superseded by nicer dbg_xxx() macros... + */ +#if CONFIG_JFFS2_FS_DEBUG > 0 +#define DEBUG +#define D1(x) x +#else +#define D1(x) +#endif + +#if CONFIG_JFFS2_FS_DEBUG > 1 +#define D2(x) x +#else +#define D2(x) +#endif + +#define jffs2_dbg(level, fmt, ...) \ +do { \ + if (CONFIG_JFFS2_FS_DEBUG >= level) \ + pr_debug(fmt, ##__VA_ARGS__); \ +} while (0) + +/* The prefixes of JFFS2 messages */ +#define JFFS2_DBG KERN_DEBUG +#define JFFS2_DBG_PREFIX "[JFFS2 DBG]" +#define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX + +/* JFFS2 message macros */ +#define JFFS2_ERROR(fmt, ...) \ + pr_err("error: (%d) %s: " fmt, \ + task_pid_nr(current), __func__, ##__VA_ARGS__) + +#define JFFS2_WARNING(fmt, ...) \ + pr_warn("warning: (%d) %s: " fmt, \ + task_pid_nr(current), __func__, ##__VA_ARGS__) + +#define JFFS2_NOTICE(fmt, ...) \ + pr_notice("notice: (%d) %s: " fmt, \ + task_pid_nr(current), __func__, ##__VA_ARGS__) + +#define JFFS2_DEBUG(fmt, ...) \ + printk(KERN_DEBUG "[JFFS2 DBG] (%d) %s: " fmt, \ + task_pid_nr(current), __func__, ##__VA_ARGS__) + +/* + * We split our debugging messages on several parts, depending on the JFFS2 + * subsystem the message belongs to. + */ +/* Read inode debugging messages */ +#ifdef JFFS2_DBG_READINODE_MESSAGES +#define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_readinode(fmt, ...) +#endif +#ifdef JFFS2_DBG_READINODE2_MESSAGES +#define dbg_readinode2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_readinode2(fmt, ...) +#endif + +/* Fragtree build debugging messages */ +#ifdef JFFS2_DBG_FRAGTREE_MESSAGES +#define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_fragtree(fmt, ...) +#endif +#ifdef JFFS2_DBG_FRAGTREE2_MESSAGES +#define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_fragtree2(fmt, ...) +#endif + +/* Directory entry list manilulation debugging messages */ +#ifdef JFFS2_DBG_DENTLIST_MESSAGES +#define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_dentlist(fmt, ...) +#endif + +/* Print the messages about manipulating node_refs */ +#ifdef JFFS2_DBG_NODEREF_MESSAGES +#define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_noderef(fmt, ...) +#endif + +/* Manipulations with the list of inodes (JFFS2 inocache) */ +#ifdef JFFS2_DBG_INOCACHE_MESSAGES +#define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_inocache(fmt, ...) +#endif + +/* Summary debugging messages */ +#ifdef JFFS2_DBG_SUMMARY_MESSAGES +#define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_summary(fmt, ...) +#endif + +/* File system build messages */ +#ifdef JFFS2_DBG_FSBUILD_MESSAGES +#define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_fsbuild(fmt, ...) +#endif + +/* Watch the object allocations */ +#ifdef JFFS2_DBG_MEMALLOC_MESSAGES +#define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_memalloc(fmt, ...) +#endif + +/* Watch the XATTR subsystem */ +#ifdef JFFS2_DBG_XATTR_MESSAGES +#define dbg_xattr(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) +#else +#define dbg_xattr(fmt, ...) +#endif + +/* "Sanity" checks */ +void +__jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb); +void +__jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb); + +/* "Paranoia" checks */ +void +__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f); +void +__jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f); +void +__jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb); +void +__jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb); +void +__jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c, + uint32_t ofs, int len); + +/* "Dump" functions */ +void +__jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); +void +__jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb); +void +__jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c); +void +__jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c); +void +__jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb); +void +__jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb); +void +__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f); +void +__jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f); +void +__jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs); +void +__jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs); + +#ifdef JFFS2_DBG_PARANOIA_CHECKS +#define jffs2_dbg_fragtree_paranoia_check(f) \ + __jffs2_dbg_fragtree_paranoia_check(f) +#define jffs2_dbg_fragtree_paranoia_check_nolock(f) \ + __jffs2_dbg_fragtree_paranoia_check_nolock(f) +#define jffs2_dbg_acct_paranoia_check(c, jeb) \ + __jffs2_dbg_acct_paranoia_check(c,jeb) +#define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) \ + __jffs2_dbg_acct_paranoia_check_nolock(c,jeb) +#define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) \ + __jffs2_dbg_prewrite_paranoia_check(c, ofs, len) +#else +#define jffs2_dbg_fragtree_paranoia_check(f) +#define jffs2_dbg_fragtree_paranoia_check_nolock(f) +#define jffs2_dbg_acct_paranoia_check(c, jeb) +#define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) +#define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) +#endif /* !JFFS2_PARANOIA_CHECKS */ + +#ifdef JFFS2_DBG_DUMPS +#define jffs2_dbg_dump_jeb(c, jeb) \ + __jffs2_dbg_dump_jeb(c, jeb); +#define jffs2_dbg_dump_jeb_nolock(jeb) \ + __jffs2_dbg_dump_jeb_nolock(jeb); +#define jffs2_dbg_dump_block_lists(c) \ + __jffs2_dbg_dump_block_lists(c) +#define jffs2_dbg_dump_block_lists_nolock(c) \ + __jffs2_dbg_dump_block_lists_nolock(c) +#define jffs2_dbg_dump_fragtree(f) \ + __jffs2_dbg_dump_fragtree(f); +#define jffs2_dbg_dump_fragtree_nolock(f) \ + __jffs2_dbg_dump_fragtree_nolock(f); +#define jffs2_dbg_dump_buffer(buf, len, offs) \ + __jffs2_dbg_dump_buffer(*buf, len, offs); +#define jffs2_dbg_dump_node(c, ofs) \ + __jffs2_dbg_dump_node(c, ofs); +#else +#define jffs2_dbg_dump_jeb(c, jeb) +#define jffs2_dbg_dump_jeb_nolock(jeb) +#define jffs2_dbg_dump_block_lists(c) +#define jffs2_dbg_dump_block_lists_nolock(c) +#define jffs2_dbg_dump_fragtree(f) +#define jffs2_dbg_dump_fragtree_nolock(f) +#define jffs2_dbg_dump_buffer(buf, len, offs) +#define jffs2_dbg_dump_node(c, ofs) +#endif /* !JFFS2_DBG_DUMPS */ + +#ifdef JFFS2_DBG_SANITY_CHECKS +#define jffs2_dbg_acct_sanity_check(c, jeb) \ + __jffs2_dbg_acct_sanity_check(c, jeb) +#define jffs2_dbg_acct_sanity_check_nolock(c, jeb) \ + __jffs2_dbg_acct_sanity_check_nolock(c, jeb) +#else +#define jffs2_dbg_acct_sanity_check(c, jeb) +#define jffs2_dbg_acct_sanity_check_nolock(c, jeb) +#endif /* !JFFS2_DBG_SANITY_CHECKS */ + +#endif /* _JFFS2_DEBUG_H_ */ diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 3ca0d25eef1..938556025d6 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -1,52 +1,53 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: dir.c,v 1.86 2005/07/06 12:13:09 dwmw2 Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/slab.h> -#include <linux/sched.h> #include <linux/fs.h> #include <linux/crc32.h> #include <linux/jffs2.h> -#include <linux/jffs2_fs_i.h> -#include <linux/jffs2_fs_sb.h> +#include "jffs2_fs_i.h" +#include "jffs2_fs_sb.h" #include <linux/time.h> #include "nodelist.h" -static int jffs2_readdir (struct file *, void *, filldir_t); +static int jffs2_readdir (struct file *, struct dir_context *); -static int jffs2_create (struct inode *,struct dentry *,int, - struct nameidata *); +static int jffs2_create (struct inode *,struct dentry *,umode_t, + bool); static struct dentry *jffs2_lookup (struct inode *,struct dentry *, - struct nameidata *); + unsigned int); static int jffs2_link (struct dentry *,struct inode *,struct dentry *); static int jffs2_unlink (struct inode *,struct dentry *); static int jffs2_symlink (struct inode *,struct dentry *,const char *); -static int jffs2_mkdir (struct inode *,struct dentry *,int); +static int jffs2_mkdir (struct inode *,struct dentry *,umode_t); static int jffs2_rmdir (struct inode *,struct dentry *); -static int jffs2_mknod (struct inode *,struct dentry *,int,dev_t); +static int jffs2_mknod (struct inode *,struct dentry *,umode_t,dev_t); static int jffs2_rename (struct inode *, struct dentry *, - struct inode *, struct dentry *); + struct inode *, struct dentry *); -struct file_operations jffs2_dir_operations = +const struct file_operations jffs2_dir_operations = { .read = generic_read_dir, - .readdir = jffs2_readdir, - .ioctl = jffs2_ioctl, - .fsync = jffs2_fsync + .iterate = jffs2_readdir, + .unlocked_ioctl=jffs2_ioctl, + .fsync = jffs2_fsync, + .llseek = generic_file_llseek, }; -struct inode_operations jffs2_dir_inode_operations = +const struct inode_operations jffs2_dir_inode_operations = { .create = jffs2_create, .lookup = jffs2_lookup, @@ -57,35 +58,42 @@ struct inode_operations jffs2_dir_inode_operations = .rmdir = jffs2_rmdir, .mknod = jffs2_mknod, .rename = jffs2_rename, + .get_acl = jffs2_get_acl, + .set_acl = jffs2_set_acl, .setattr = jffs2_setattr, + .setxattr = jffs2_setxattr, + .getxattr = jffs2_getxattr, + .listxattr = jffs2_listxattr, + .removexattr = jffs2_removexattr }; /***********************************************************************/ /* We keep the dirent list sorted in increasing order of name hash, - and we use the same hash function as the dentries. Makes this + and we use the same hash function as the dentries. Makes this nice and simple */ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, - struct nameidata *nd) + unsigned int flags) { struct jffs2_inode_info *dir_f; - struct jffs2_sb_info *c; struct jffs2_full_dirent *fd = NULL, *fd_list; uint32_t ino = 0; struct inode *inode = NULL; - D1(printk(KERN_DEBUG "jffs2_lookup()\n")); + jffs2_dbg(1, "jffs2_lookup()\n"); + + if (target->d_name.len > JFFS2_MAX_NAME_LEN) + return ERR_PTR(-ENAMETOOLONG); dir_f = JFFS2_INODE_INFO(dir_i); - c = JFFS2_SB_INFO(dir_i->i_sb); - down(&dir_f->sem); + mutex_lock(&dir_f->sem); /* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */ for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) { - if (fd_list->nhash == target->d_name.hash && + if (fd_list->nhash == target->d_name.hash && (!fd || fd_list->version > fd->version) && strlen(fd_list->name) == target->d_name.len && !strncmp(fd_list->name, target->d_name.name, target->d_name.len)) { @@ -94,84 +102,61 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, } if (fd) ino = fd->ino; - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); if (ino) { - inode = iget(dir_i->i_sb, ino); - if (!inode) { - printk(KERN_WARNING "iget() failed for ino #%u\n", ino); - return (ERR_PTR(-EIO)); - } + inode = jffs2_iget(dir_i->i_sb, ino); + if (IS_ERR(inode)) + pr_warn("iget() failed for ino #%u\n", ino); } - d_add(target, inode); - - return NULL; + return d_splice_alias(inode, target); } /***********************************************************************/ -static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) +static int jffs2_readdir(struct file *file, struct dir_context *ctx) { - struct jffs2_inode_info *f; - struct jffs2_sb_info *c; - struct inode *inode = filp->f_dentry->d_inode; + struct inode *inode = file_inode(file); + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_full_dirent *fd; - unsigned long offset, curofs; - - D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_dentry->d_inode->i_ino)); + unsigned long curofs = 1; - f = JFFS2_INODE_INFO(inode); - c = JFFS2_SB_INFO(inode->i_sb); + jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", inode->i_ino); - offset = filp->f_pos; + if (!dir_emit_dots(file, ctx)) + return 0; - if (offset == 0) { - D1(printk(KERN_DEBUG "Dirent 0: \".\", ino #%lu\n", inode->i_ino)); - if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) - goto out; - offset++; - } - if (offset == 1) { - unsigned long pino = parent_ino(filp->f_dentry); - D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino)); - if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0) - goto out; - offset++; - } - - curofs=1; - down(&f->sem); + mutex_lock(&f->sem); for (fd = f->dents; fd; fd = fd->next) { - curofs++; - /* First loop: curofs = 2; offset = 2 */ - if (curofs < offset) { - D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", - fd->name, fd->ino, fd->type, curofs, offset)); + /* First loop: curofs = 2; pos = 2 */ + if (curofs < ctx->pos) { + jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", + fd->name, fd->ino, fd->type, curofs, (unsigned long)ctx->pos); continue; } if (!fd->ino) { - D2(printk(KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name)); - offset++; + jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n", + fd->name); + ctx->pos++; continue; } - D2(printk(KERN_DEBUG "Dirent %ld: \"%s\", ino #%u, type %d\n", offset, fd->name, fd->ino, fd->type)); - if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0) + jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n", + (unsigned long)ctx->pos, fd->name, fd->ino, fd->type); + if (!dir_emit(ctx, fd->name, strlen(fd->name), fd->ino, fd->type)) break; - offset++; + ctx->pos++; } - up(&f->sem); - out: - filp->f_pos = offset; + mutex_unlock(&f->sem); return 0; } /***********************************************************************/ -static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, - struct nameidata *nd) +static int jffs2_create(struct inode *dir_i, struct dentry *dentry, + umode_t mode, bool excl) { struct jffs2_raw_inode *ri; struct jffs2_inode_info *f, *dir_f; @@ -182,15 +167,15 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; - + c = JFFS2_SB_INFO(dir_i->i_sb); - D1(printk(KERN_DEBUG "jffs2_create()\n")); + jffs2_dbg(1, "%s()\n", __func__); inode = jffs2_new_inode(dir_i, mode, ri); if (IS_ERR(inode)) { - D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n")); + jffs2_dbg(1, "jffs2_new_inode() failed\n"); jffs2_free_raw_inode(ri); return PTR_ERR(inode); } @@ -203,24 +188,33 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, f = JFFS2_INODE_INFO(inode); dir_f = JFFS2_INODE_INFO(dir_i); - ret = jffs2_do_create(c, dir_f, f, ri, - dentry->d_name.name, dentry->d_name.len); + /* jffs2_do_create() will want to lock it, _after_ reserving + space and taking c-alloc_sem. If we keep it locked here, + lockdep gets unhappy (although it's a false positive; + nothing else will be looking at this inode yet so there's + no chance of AB-BA deadlock involving its f->sem). */ + mutex_unlock(&f->sem); - if (ret) { - make_bad_inode(inode); - iput(inode); - jffs2_free_raw_inode(ri); - return ret; - } + ret = jffs2_do_create(c, dir_f, f, ri, &dentry->d_name); + if (ret) + goto fail; dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime)); jffs2_free_raw_inode(ri); - d_instantiate(dentry, inode); - D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", - inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages)); + jffs2_dbg(1, "%s(): Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", + __func__, inode->i_ino, inode->i_mode, inode->i_nlink, + f->inocache->pino_nlink, inode->i_mapping->nrpages); + + unlock_new_inode(inode); + d_instantiate(dentry, inode); return 0; + + fail: + iget_failed(inode); + jffs2_free_raw_inode(ri); + return ret; } /***********************************************************************/ @@ -232,11 +226,14 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry) struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode); int ret; + uint32_t now = get_seconds(); - ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, - dentry->d_name.len, dead_f); + ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, + dentry->d_name.len, dead_f, now); if (dead_f->inocache) - dentry->d_inode->i_nlink = dead_f->inocache->nlink; + set_nlink(dentry->d_inode, dead_f->inocache->pino_nlink); + if (!ret) + dir_i->i_mtime = dir_i->i_ctime = ITIME(now); return ret; } /***********************************************************************/ @@ -249,6 +246,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); int ret; uint8_t type; + uint32_t now; /* Don't let people make hard links to bad inodes. */ if (!f->inocache) @@ -261,14 +259,16 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; if (!type) type = DT_REG; - ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len); + now = get_seconds(); + ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now); if (!ret) { - down(&f->sem); - old_dentry->d_inode->i_nlink = ++f->inocache->nlink; - up(&f->sem); + mutex_lock(&f->sem); + set_nlink(old_dentry->d_inode, ++f->inocache->pino_nlink); + mutex_unlock(&f->sem); d_instantiate(dentry, old_dentry->d_inode); - atomic_inc(&old_dentry->d_inode->i_count); + dir_i->i_mtime = dir_i->i_ctime = ITIME(now); + ihold(old_dentry->d_inode); } return ret; } @@ -285,26 +285,27 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char struct jffs2_full_dnode *fn; struct jffs2_full_dirent *fd; int namelen; - uint32_t alloclen, phys_ofs; + uint32_t alloclen; int ret, targetlen = strlen(target); /* FIXME: If you care. We'd need to use frags for the target if it grows much more than this */ if (targetlen > 254) - return -EINVAL; + return -ENAMETOOLONG; ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; - + c = JFFS2_SB_INFO(dir_i->i_sb); - - /* Try to reserve enough space for both node and dirent. - * Just the node will do for now, though + + /* Try to reserve enough space for both node and dirent. + * Just the node will do for now, though */ namelen = dentry->d_name.len; - ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen, ALLOC_NORMAL); + ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &alloclen, + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); @@ -331,56 +332,63 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char ri->compr = JFFS2_COMPR_NONE; ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); - - fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL); + + fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL); jffs2_free_raw_inode(ri); if (IS_ERR(fn)) { /* Eeek. Wave bye bye */ - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - jffs2_clear_inode(inode); - return PTR_ERR(fn); + ret = PTR_ERR(fn); + goto fail; } - /* We use f->dents field to store the target path. */ - f->dents = kmalloc(targetlen + 1, GFP_KERNEL); - if (!f->dents) { - printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1); - up(&f->sem); + /* We use f->target field to store the target path. */ + f->target = kmemdup(target, targetlen + 1, GFP_KERNEL); + if (!f->target) { + pr_warn("Can't allocate %d bytes of memory\n", targetlen + 1); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - jffs2_clear_inode(inode); - return -ENOMEM; + ret = -ENOMEM; + goto fail; } - memcpy(f->dents, target, targetlen + 1); - D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->dents)); + jffs2_dbg(1, "%s(): symlink's target '%s' cached\n", + __func__, (char *)f->target); - /* No data here. Only a metadata node, which will be + /* No data here. Only a metadata node, which will be obsoleted by the first data write */ f->metadata = fn; - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); - if (ret) { - /* Eep. */ - jffs2_clear_inode(inode); - return ret; - } + + ret = jffs2_init_security(inode, dir_i, &dentry->d_name); + if (ret) + goto fail; + + ret = jffs2_init_acl_post(inode); + if (ret) + goto fail; + + ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, + ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); + if (ret) + goto fail; rd = jffs2_alloc_raw_dirent(); if (!rd) { /* Argh. Now we treat it like a normal delete */ jffs2_complete_reservation(c); - jffs2_clear_inode(inode); - return -ENOMEM; + ret = -ENOMEM; + goto fail; } dir_f = JFFS2_INODE_INFO(dir_i); - down(&dir_f->sem); + mutex_lock(&dir_f->sem); rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); @@ -396,16 +404,16 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); - fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); + fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); if (IS_ERR(fd)) { - /* dirent failed to write. Delete the inode normally + /* dirent failed to write. Delete the inode normally as if it were the final unlink() */ jffs2_complete_reservation(c); jffs2_free_raw_dirent(rd); - up(&dir_f->sem); - jffs2_clear_inode(inode); - return PTR_ERR(fd); + mutex_unlock(&dir_f->sem); + ret = PTR_ERR(fd); + goto fail; } dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); @@ -416,15 +424,20 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char one if necessary. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); + unlock_new_inode(inode); d_instantiate(dentry, inode); return 0; + + fail: + iget_failed(inode); + return ret; } -static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) +static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode) { struct jffs2_inode_info *f, *dir_f; struct jffs2_sb_info *c; @@ -434,7 +447,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) struct jffs2_full_dnode *fn; struct jffs2_full_dirent *fd; int namelen; - uint32_t alloclen, phys_ofs; + uint32_t alloclen; int ret; mode |= S_IFDIR; @@ -442,14 +455,15 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; - + c = JFFS2_SB_INFO(dir_i->i_sb); - /* Try to reserve enough space for both node and dirent. - * Just the node will do for now, though + /* Try to reserve enough space for both node and dirent. + * Just the node will do for now, though */ namelen = dentry->d_name.len; - ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL); + ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, + JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); @@ -466,49 +480,59 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) inode->i_op = &jffs2_dir_inode_operations; inode->i_fop = &jffs2_dir_operations; - /* Directories get nlink 2 at start */ - inode->i_nlink = 2; f = JFFS2_INODE_INFO(inode); + /* Directories get nlink 2 at start */ + set_nlink(inode, 2); + /* but ic->pino_nlink is the parent ino# */ + f->inocache->pino_nlink = dir_i->i_ino; + ri->data_crc = cpu_to_je32(0); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); - - fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); + + fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); jffs2_free_raw_inode(ri); if (IS_ERR(fn)) { /* Eeek. Wave bye bye */ - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - jffs2_clear_inode(inode); - return PTR_ERR(fn); + ret = PTR_ERR(fn); + goto fail; } - /* No data here. Only a metadata node, which will be + /* No data here. Only a metadata node, which will be obsoleted by the first data write */ f->metadata = fn; - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); - if (ret) { - /* Eep. */ - jffs2_clear_inode(inode); - return ret; - } - + + ret = jffs2_init_security(inode, dir_i, &dentry->d_name); + if (ret) + goto fail; + + ret = jffs2_init_acl_post(inode); + if (ret) + goto fail; + + ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, + ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); + if (ret) + goto fail; + rd = jffs2_alloc_raw_dirent(); if (!rd) { /* Argh. Now we treat it like a normal delete */ jffs2_complete_reservation(c); - jffs2_clear_inode(inode); - return -ENOMEM; + ret = -ENOMEM; + goto fail; } dir_f = JFFS2_INODE_INFO(dir_i); - down(&dir_f->sem); + mutex_lock(&dir_f->sem); rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); @@ -524,20 +548,20 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); - fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); - + fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); + if (IS_ERR(fd)) { - /* dirent failed to write. Delete the inode normally + /* dirent failed to write. Delete the inode normally as if it were the final unlink() */ jffs2_complete_reservation(c); jffs2_free_raw_dirent(rd); - up(&dir_f->sem); - jffs2_clear_inode(inode); - return PTR_ERR(fd); + mutex_unlock(&dir_f->sem); + ret = PTR_ERR(fd); + goto fail; } dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); - dir_i->i_nlink++; + inc_nlink(dir_i); jffs2_free_raw_dirent(rd); @@ -545,30 +569,43 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) one if necessary. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); + unlock_new_inode(inode); d_instantiate(dentry, inode); return 0; + + fail: + iget_failed(inode); + return ret; } static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) { + struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb); + struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); struct jffs2_full_dirent *fd; int ret; + uint32_t now = get_seconds(); for (fd = f->dents ; fd; fd = fd->next) { if (fd->ino) return -ENOTEMPTY; } - ret = jffs2_unlink(dir_i, dentry); - if (!ret) - dir_i->i_nlink--; + + ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, + dentry->d_name.len, f, now); + if (!ret) { + dir_i->i_mtime = dir_i->i_ctime = ITIME(now); + clear_nlink(dentry->d_inode); + drop_nlink(dir_i); + } return ret; } -static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, dev_t rdev) +static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode, dev_t rdev) { struct jffs2_inode_info *f, *dir_f; struct jffs2_sb_info *c; @@ -578,30 +615,29 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de struct jffs2_full_dnode *fn; struct jffs2_full_dirent *fd; int namelen; - jint16_t dev; + union jffs2_device_node dev; int devlen = 0; - uint32_t alloclen, phys_ofs; + uint32_t alloclen; int ret; - if (!old_valid_dev(rdev)) + if (!new_valid_dev(rdev)) return -EINVAL; ri = jffs2_alloc_raw_inode(); if (!ri) return -ENOMEM; - + c = JFFS2_SB_INFO(dir_i->i_sb); - - if (S_ISBLK(mode) || S_ISCHR(mode)) { - dev = cpu_to_je16(old_encode_dev(rdev)); - devlen = sizeof(dev); - } - - /* Try to reserve enough space for both node and dirent. - * Just the node will do for now, though + + if (S_ISBLK(mode) || S_ISCHR(mode)) + devlen = jffs2_encode_dev(&dev, rdev); + + /* Try to reserve enough space for both node and dirent. + * Just the node will do for now, though */ namelen = dentry->d_name.len; - ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, ALLOC_NORMAL); + ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &alloclen, + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); @@ -627,42 +663,49 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de ri->compr = JFFS2_COMPR_NONE; ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); - - fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL); + + fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, ALLOC_NORMAL); jffs2_free_raw_inode(ri); if (IS_ERR(fn)) { /* Eeek. Wave bye bye */ - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - jffs2_clear_inode(inode); - return PTR_ERR(fn); + ret = PTR_ERR(fn); + goto fail; } - /* No data here. Only a metadata node, which will be + /* No data here. Only a metadata node, which will be obsoleted by the first data write */ f->metadata = fn; - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); - if (ret) { - /* Eep. */ - jffs2_clear_inode(inode); - return ret; - } + + ret = jffs2_init_security(inode, dir_i, &dentry->d_name); + if (ret) + goto fail; + + ret = jffs2_init_acl_post(inode); + if (ret) + goto fail; + + ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, + ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); + if (ret) + goto fail; rd = jffs2_alloc_raw_dirent(); if (!rd) { /* Argh. Now we treat it like a normal delete */ jffs2_complete_reservation(c); - jffs2_clear_inode(inode); - return -ENOMEM; + ret = -ENOMEM; + goto fail; } dir_f = JFFS2_INODE_INFO(dir_i); - down(&dir_f->sem); + mutex_lock(&dir_f->sem); rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); @@ -681,16 +724,16 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); - fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); - + fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL); + if (IS_ERR(fd)) { - /* dirent failed to write. Delete the inode normally + /* dirent failed to write. Delete the inode normally as if it were the final unlink() */ jffs2_complete_reservation(c); jffs2_free_raw_dirent(rd); - up(&dir_f->sem); - jffs2_clear_inode(inode); - return PTR_ERR(fd); + mutex_unlock(&dir_f->sem); + ret = PTR_ERR(fd); + goto fail; } dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); @@ -701,23 +744,28 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de one if necessary. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); jffs2_complete_reservation(c); + unlock_new_inode(inode); d_instantiate(dentry, inode); - return 0; + + fail: + iget_failed(inode); + return ret; } static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, - struct inode *new_dir_i, struct dentry *new_dentry) + struct inode *new_dir_i, struct dentry *new_dentry) { int ret; struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb); struct jffs2_inode_info *victim_f = NULL; uint8_t type; + uint32_t now; - /* The VFS will check for us and prevent trying to rename a + /* The VFS will check for us and prevent trying to rename a * file over a directory and vice versa, but if it's a directory, * the VFS can't check whether the victim is empty. The filesystem * needs to do that for itself. @@ -727,76 +775,87 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, if (S_ISDIR(new_dentry->d_inode->i_mode)) { struct jffs2_full_dirent *fd; - down(&victim_f->sem); + mutex_lock(&victim_f->sem); for (fd = victim_f->dents; fd; fd = fd->next) { if (fd->ino) { - up(&victim_f->sem); + mutex_unlock(&victim_f->sem); return -ENOTEMPTY; } } - up(&victim_f->sem); + mutex_unlock(&victim_f->sem); } } /* XXX: We probably ought to alloc enough space for - both nodes at the same time. Writing the new link, + both nodes at the same time. Writing the new link, then getting -ENOSPC, is quite bad :) */ /* Make a hard link */ - + /* XXX: This is ugly */ type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12; if (!type) type = DT_REG; - ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), + now = get_seconds(); + ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i), old_dentry->d_inode->i_ino, type, - new_dentry->d_name.name, new_dentry->d_name.len); + new_dentry->d_name.name, new_dentry->d_name.len, now); if (ret) return ret; if (victim_f) { /* There was a victim. Kill it off nicely */ - new_dentry->d_inode->i_nlink--; + if (S_ISDIR(new_dentry->d_inode->i_mode)) + clear_nlink(new_dentry->d_inode); + else + drop_nlink(new_dentry->d_inode); /* Don't oops if the victim was a dirent pointing to an inode which didn't exist. */ if (victim_f->inocache) { - down(&victim_f->sem); - victim_f->inocache->nlink--; - up(&victim_f->sem); + mutex_lock(&victim_f->sem); + if (S_ISDIR(new_dentry->d_inode->i_mode)) + victim_f->inocache->pino_nlink = 0; + else + victim_f->inocache->pino_nlink--; + mutex_unlock(&victim_f->sem); } } - /* If it was a directory we moved, and there was no victim, + /* If it was a directory we moved, and there was no victim, increase i_nlink on its new parent */ if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) - new_dir_i->i_nlink++; + inc_nlink(new_dir_i); /* Unlink the original */ - ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), - old_dentry->d_name.name, old_dentry->d_name.len, NULL); + ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i), + old_dentry->d_name.name, old_dentry->d_name.len, NULL, now); /* We don't touch inode->i_nlink */ if (ret) { /* Oh shit. We really ought to make a single node which can do both atomically */ struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); - down(&f->sem); - old_dentry->d_inode->i_nlink++; - if (f->inocache) - f->inocache->nlink++; - up(&f->sem); - - printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret); + mutex_lock(&f->sem); + inc_nlink(old_dentry->d_inode); + if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode)) + f->inocache->pino_nlink++; + mutex_unlock(&f->sem); + + pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n", + __func__, ret); /* Might as well let the VFS know */ d_instantiate(new_dentry, old_dentry->d_inode); - atomic_inc(&old_dentry->d_inode->i_count); + ihold(old_dentry->d_inode); + new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now); return ret; } if (S_ISDIR(old_dentry->d_inode->i_mode)) - old_dir_i->i_nlink--; + drop_nlink(old_dir_i); + + new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now); return 0; } diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 787d84ac2bc..4a6cf289be2 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c @@ -1,16 +1,17 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: erase.c,v 1.80 2005/07/14 19:46:24 joern Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> @@ -24,13 +25,12 @@ struct erase_priv_struct { struct jffs2_eraseblock *jeb; struct jffs2_sb_info *c; }; - + #ifndef __ECOS static void jffs2_erase_callback(struct erase_info *); #endif static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); -static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); static void jffs2_erase_block(struct jffs2_sb_info *c, @@ -41,24 +41,27 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, #ifdef __ECOS ret = jffs2_flash_erase(c, jeb); if (!ret) { - jffs2_erase_succeeded(c, jeb); - return; + jffs2_erase_succeeded(c, jeb); + return; } bad_offset = jeb->offset; #else /* Linux */ struct erase_info *instr; - D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#x (range %#x-%#x)\n", jeb->offset, jeb->offset, jeb->offset + c->sector_size)); + jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n", + __func__, + jeb->offset, jeb->offset, jeb->offset + c->sector_size); instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); if (!instr) { - printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); + pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); + mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); - list_del(&jeb->list); - list_add(&jeb->list, &c->erase_pending_list); + list_move(&jeb->list, &c->erase_pending_list); c->erasing_size -= c->sector_size; c->dirty_size += c->sector_size; jeb->dirty_size = c->sector_size; spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->erase_free_sem); return; } @@ -69,12 +72,11 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, instr->len = c->sector_size; instr->callback = jffs2_erase_callback; instr->priv = (unsigned long)(&instr[1]); - instr->fail_addr = 0xffffffff; - + ((struct erase_priv_struct *)instr->priv)->jeb = jeb; ((struct erase_priv_struct *)instr->priv)->c = c; - ret = c->mtd->erase(c->mtd, instr); + ret = mtd_erase(c->mtd, instr); if (!ret) return; @@ -84,30 +86,35 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, if (ret == -ENOMEM || ret == -EAGAIN) { /* Erase failed immediately. Refile it on the list */ - D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); + jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", + jeb->offset, ret); + mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); - list_del(&jeb->list); - list_add(&jeb->list, &c->erase_pending_list); + list_move(&jeb->list, &c->erase_pending_list); c->erasing_size -= c->sector_size; c->dirty_size += c->sector_size; jeb->dirty_size = c->sector_size; spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->erase_free_sem); return; } - if (ret == -EROFS) - printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); + if (ret == -EROFS) + pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", + jeb->offset); else - printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); + pr_warn("Erase at 0x%08x failed immediately: errno %d\n", + jeb->offset, ret); jffs2_erase_failed(c, jeb, bad_offset); } -void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) +int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) { struct jffs2_eraseblock *jeb; + int work_done = 0; - down(&c->erase_free_sem); + mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); @@ -116,18 +123,21 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) if (!list_empty(&c->erase_complete_list)) { jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); - list_del(&jeb->list); + list_move(&jeb->list, &c->erase_checking_list); spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->erase_free_sem); jffs2_mark_erased_block(c, jeb); + work_done++; if (!--count) { - D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); + jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n"); goto done; } } else if (!list_empty(&c->erase_pending_list)) { jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); - D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset)); + jffs2_dbg(1, "Starting erase of pending block 0x%08x\n", + jeb->offset); list_del(&jeb->list); c->erasing_size += c->sector_size; c->wasted_size -= jeb->wasted_size; @@ -135,9 +145,10 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) c->used_size -= jeb->used_size; c->dirty_size -= jeb->dirty_size; jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; - jffs2_free_all_node_refs(c, jeb); + jffs2_free_jeb_node_refs(c, jeb); list_add(&jeb->list, &c->erasing_list); spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->erase_free_sem); jffs2_erase_block(c, jeb); @@ -147,56 +158,61 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) /* Be nice */ cond_resched(); + mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); } spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->erase_free_sem); done: - D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); - - up(&c->erase_free_sem); + jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n"); + return work_done; } static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { - D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); + jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset); + mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); - list_del(&jeb->list); - list_add_tail(&jeb->list, &c->erase_complete_list); + list_move_tail(&jeb->list, &c->erase_complete_list); + /* Wake the GC thread to mark them clean */ + jffs2_garbage_collect_trigger(c); spin_unlock(&c->erase_completion_lock); - /* Ensure that kupdated calls us again to mark them clean */ - jffs2_erase_pending_trigger(c); + mutex_unlock(&c->erase_free_sem); + wake_up(&c->erase_wait); } static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) { /* For NAND, if the failure did not occur at the device level for a specific physical page, don't bother updating the bad block table. */ - if (jffs2_cleanmarker_oob(c) && (bad_offset != 0xffffffff)) { + if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) { /* We had a device-level failure to erase. Let's see if we've failed too many times. */ if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { /* We'd like to give this block another try. */ + mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); - list_del(&jeb->list); - list_add(&jeb->list, &c->erase_pending_list); + list_move(&jeb->list, &c->erase_pending_list); c->erasing_size -= c->sector_size; c->dirty_size += c->sector_size; jeb->dirty_size = c->sector_size; spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->erase_free_sem); return; } } + mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); c->erasing_size -= c->sector_size; c->bad_size += c->sector_size; - list_del(&jeb->list); - list_add(&jeb->list, &c->bad_list); + list_move(&jeb->list, &c->bad_list); c->nr_erasing_blocks--; spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->erase_free_sem); wake_up(&c->erase_wait); -} +} #ifndef __ECOS static void jffs2_erase_callback(struct erase_info *instr) @@ -204,11 +220,12 @@ static void jffs2_erase_callback(struct erase_info *instr) struct erase_priv_struct *priv = (void *)instr->priv; if(instr->state != MTD_ERASE_DONE) { - printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state); + pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", + (unsigned long long)instr->addr, instr->state); jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); } else { jffs2_erase_succeeded(priv->c, priv->jeb); - } + } kfree(instr); } #endif /* !__ECOS */ @@ -226,13 +243,13 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, /* Walk the inode's list once, removing any nodes from this eraseblock */ while (1) { if (!(*prev)->next_in_ino) { - /* We're looking at the jffs2_inode_cache, which is + /* We're looking at the jffs2_inode_cache, which is at the end of the linked list. Stash it and continue from the beginning of the list */ ic = (struct jffs2_inode_cache *)(*prev); prev = &ic->nodes; continue; - } + } if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { /* It's in the block we're erasing */ @@ -253,51 +270,71 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, /* PARANOIA */ if (!ic) { - printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n"); + JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref" + " not found in remove_node_refs()!!\n"); return; } - D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", - jeb->offset, jeb->offset + c->sector_size, ic->ino)); + jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", + jeb->offset, jeb->offset + c->sector_size, ic->ino); D2({ int i=0; struct jffs2_raw_node_ref *this; - printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); + printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n"); this = ic->nodes; - + + printk(KERN_DEBUG); while(this) { - printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); + pr_cont("0x%08x(%d)->", + ref_offset(this), ref_flags(this)); if (++i == 5) { - printk("\n" KERN_DEBUG); + printk(KERN_DEBUG); i=0; } this = this->next_in_ino; } - printk("\n"); + pr_cont("\n"); }); - if (ic->nodes == (void *)ic && ic->nlink == 0) - jffs2_del_ino_cache(c, ic); + switch (ic->class) { +#ifdef CONFIG_JFFS2_FS_XATTR + case RAWNODE_CLASS_XATTR_DATUM: + jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); + break; + case RAWNODE_CLASS_XATTR_REF: + jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); + break; +#endif + default: + if (ic->nodes == (void *)ic && ic->pino_nlink == 0) + jffs2_del_ino_cache(c, ic); + } } -static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) +void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { - struct jffs2_raw_node_ref *ref; - D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); - while(jeb->first_node) { - ref = jeb->first_node; - jeb->first_node = ref->next_phys; - - /* Remove from the inode-list */ - if (ref->next_in_ino) + struct jffs2_raw_node_ref *block, *ref; + jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n", + jeb->offset); + + block = ref = jeb->first_node; + + while (ref) { + if (ref->flash_offset == REF_LINK_NODE) { + ref = ref->next_in_ino; + jffs2_free_refblock(block); + block = ref; + continue; + } + if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino) jffs2_remove_node_refs_from_ino_list(c, ref, jeb); /* else it was a non-inode node or already removed, so don't bother */ - jffs2_free_raw_node_ref(ref); + ref++; } - jeb->last_node = NULL; + jeb->first_node = jeb->last_node = NULL; } static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) @@ -305,15 +342,48 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl void *ebuf; uint32_t ofs; size_t retlen; - int ret = -EIO; - + int ret; + unsigned long *wordebuf; + + ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen, + &ebuf, NULL); + if (ret != -EOPNOTSUPP) { + if (ret) { + jffs2_dbg(1, "MTD point failed %d\n", ret); + goto do_flash_read; + } + if (retlen < c->sector_size) { + /* Don't muck about if it won't let us point to the whole erase sector */ + jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", + retlen); + mtd_unpoint(c->mtd, jeb->offset, retlen); + goto do_flash_read; + } + wordebuf = ebuf-sizeof(*wordebuf); + retlen /= sizeof(*wordebuf); + do { + if (*++wordebuf != ~0) + break; + } while(--retlen); + mtd_unpoint(c->mtd, jeb->offset, c->sector_size); + if (retlen) { + pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n", + *wordebuf, + jeb->offset + + c->sector_size-retlen * sizeof(*wordebuf)); + return -EIO; + } + return 0; + } + do_flash_read: ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!ebuf) { - printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); + pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", + jeb->offset); return -EAGAIN; } - D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); + jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset); for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); @@ -321,13 +391,17 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl *bad_offset = ofs; - ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf); + ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf); if (ret) { - printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); + pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", + ofs, ret); + ret = -EIO; goto fail; } if (retlen != readlen) { - printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); + pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", + ofs, readlen, retlen); + ret = -EIO; goto fail; } for (i=0; i<readlen; i += sizeof(unsigned long)) { @@ -335,7 +409,9 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl unsigned long *datum = ebuf + i; if (*datum + 1) { *bad_offset += i; - printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); + pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n", + *datum, *bad_offset); + ret = -EIO; goto fail; } } @@ -350,18 +426,17 @@ fail: static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { - struct jffs2_raw_node_ref *marker_ref = NULL; size_t retlen; int ret; - uint32_t bad_offset; + uint32_t uninitialized_var(bad_offset); switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { case -EAGAIN: goto refile; case -EIO: goto filebad; } - /* Write the erase complete marker */ - D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); + /* Write the erase complete marker */ + jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset); bad_offset = jeb->offset; /* Cleanmarker in oob area or no cleanmarker at all ? */ @@ -371,13 +446,6 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb if (jffs2_write_nand_cleanmarker(c, jeb)) goto filebad; } - - jeb->first_node = jeb->last_node = NULL; - jeb->free_size = c->sector_size; - jeb->used_size = 0; - jeb->dirty_size = 0; - jeb->wasted_size = 0; - } else { struct kvec vecs[1]; @@ -387,72 +455,61 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb .totlen = cpu_to_je32(c->cleanmarker_size) }; - marker_ref = jffs2_alloc_raw_node_ref(); - if (!marker_ref) { - printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n"); - goto refile; - } + jffs2_prealloc_raw_node_refs(c, jeb, 1); marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); vecs[0].iov_base = (unsigned char *) ▮ vecs[0].iov_len = sizeof(marker); ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); - + if (ret || retlen != sizeof(marker)) { if (ret) - printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", + pr_warn("Write clean marker to block at 0x%08x failed: %d\n", jeb->offset, ret); else - printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", + pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", jeb->offset, sizeof(marker), retlen); - jffs2_free_raw_node_ref(marker_ref); goto filebad; } - - marker_ref->next_in_ino = NULL; - marker_ref->next_phys = NULL; - marker_ref->flash_offset = jeb->offset | REF_NORMAL; - marker_ref->__totlen = c->cleanmarker_size; - - jeb->first_node = jeb->last_node = marker_ref; - - jeb->free_size = c->sector_size - c->cleanmarker_size; - jeb->used_size = c->cleanmarker_size; - jeb->dirty_size = 0; - jeb->wasted_size = 0; } + /* Everything else got zeroed before the erase */ + jeb->free_size = c->sector_size; + mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); + c->erasing_size -= c->sector_size; - c->free_size += jeb->free_size; - c->used_size += jeb->used_size; + c->free_size += c->sector_size; - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + /* Account for cleanmarker now, if it's in-band */ + if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c)) + jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); - list_add_tail(&jeb->list, &c->free_list); + list_move_tail(&jeb->list, &c->free_list); c->nr_erasing_blocks--; c->nr_free_blocks++; + + jffs2_dbg_acct_sanity_check_nolock(c, jeb); + jffs2_dbg_acct_paranoia_check_nolock(c, jeb); + spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->erase_free_sem); wake_up(&c->erase_wait); return; filebad: - spin_lock(&c->erase_completion_lock); - /* Stick it on a list (any list) so erase_failed can take it - right off again. Silly, but shouldn't happen often. */ - list_add(&jeb->list, &c->erasing_list); - spin_unlock(&c->erase_completion_lock); jffs2_erase_failed(c, jeb, bad_offset); return; refile: /* Stick it back on the list from whence it came and come back later */ - jffs2_erase_pending_trigger(c); + mutex_lock(&c->erase_free_sem); spin_lock(&c->erase_completion_lock); - list_add(&jeb->list, &c->erase_complete_list); + jffs2_garbage_collect_trigger(c); + list_move(&jeb->list, &c->erase_complete_list); spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->erase_free_sem); return; } diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index bd9ed9b0247..64989ca9ba9 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -1,18 +1,18 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: file.c,v 1.102 2005/07/06 12:13:09 dwmw2 Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> -#include <linux/slab.h> #include <linux/fs.h> #include <linux/time.h> #include <linux/pagemap.h> @@ -21,50 +21,64 @@ #include <linux/jffs2.h> #include "nodelist.h" -extern int generic_file_open(struct inode *, struct file *) __attribute__((weak)); -extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) __attribute__((weak)); - -static int jffs2_commit_write (struct file *filp, struct page *pg, - unsigned start, unsigned end); -static int jffs2_prepare_write (struct file *filp, struct page *pg, - unsigned start, unsigned end); +static int jffs2_write_end(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *pg, void *fsdata); +static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); static int jffs2_readpage (struct file *filp, struct page *pg); -int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync) +int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { - struct inode *inode = dentry->d_inode; + struct inode *inode = filp->f_mapping->host; struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + int ret; + + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); /* Trigger GC to flush any pending writes for this inode */ jffs2_flush_wbuf_gc(c, inode->i_ino); - - return 0; + mutex_unlock(&inode->i_mutex); + + return 0; } -struct file_operations jffs2_file_operations = +const struct file_operations jffs2_file_operations = { .llseek = generic_file_llseek, .open = generic_file_open, - .read = generic_file_read, - .write = generic_file_write, - .ioctl = jffs2_ioctl, + .read = new_sync_read, + .read_iter = generic_file_read_iter, + .write = new_sync_write, + .write_iter = generic_file_write_iter, + .unlocked_ioctl=jffs2_ioctl, .mmap = generic_file_readonly_mmap, .fsync = jffs2_fsync, - .sendfile = generic_file_sendfile + .splice_read = generic_file_splice_read, }; /* jffs2_file_inode_operations */ -struct inode_operations jffs2_file_inode_operations = +const struct inode_operations jffs2_file_inode_operations = { - .setattr = jffs2_setattr + .get_acl = jffs2_get_acl, + .set_acl = jffs2_set_acl, + .setattr = jffs2_setattr, + .setxattr = jffs2_setxattr, + .getxattr = jffs2_getxattr, + .listxattr = jffs2_listxattr, + .removexattr = jffs2_removexattr }; -struct address_space_operations jffs2_file_address_operations = +const struct address_space_operations jffs2_file_address_operations = { .readpage = jffs2_readpage, - .prepare_write =jffs2_prepare_write, - .commit_write = jffs2_commit_write + .write_begin = jffs2_write_begin, + .write_end = jffs2_write_end, }; static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) @@ -74,7 +88,8 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) unsigned char *pg_buf; int ret; - D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT)); + jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", + __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); BUG_ON(!PageLocked(pg)); @@ -94,8 +109,8 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) flush_dcache_page(pg); kunmap(pg); - D2(printk(KERN_DEBUG "readpage finished\n")); - return 0; + jffs2_dbg(2, "readpage finished\n"); + return ret; } int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg) @@ -110,38 +125,53 @@ static int jffs2_readpage (struct file *filp, struct page *pg) { struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); int ret; - - down(&f->sem); + + mutex_lock(&f->sem); ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); - up(&f->sem); + mutex_unlock(&f->sem); return ret; } -static int jffs2_prepare_write (struct file *filp, struct page *pg, - unsigned start, unsigned end) +static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) { - struct inode *inode = pg->mapping->host; + struct page *pg; + struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); - uint32_t pageofs = pg->index << PAGE_CACHE_SHIFT; + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + struct jffs2_raw_inode ri; + uint32_t alloc_len = 0; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + uint32_t pageofs = index << PAGE_CACHE_SHIFT; int ret = 0; - D1(printk(KERN_DEBUG "jffs2_prepare_write()\n")); + jffs2_dbg(1, "%s()\n", __func__); if (pageofs > inode->i_size) { + ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); + if (ret) + return ret; + } + + mutex_lock(&f->sem); + pg = grab_cache_page_write_begin(mapping, index, flags); + if (!pg) { + if (alloc_len) + jffs2_complete_reservation(c); + mutex_unlock(&f->sem); + return -ENOMEM; + } + *pagep = pg; + + if (alloc_len) { /* Make new hole frag from old EOF to new page */ - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); - struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; - uint32_t phys_ofs, alloc_len; - - D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", - (unsigned int)inode->i_size, pageofs)); - ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL); - if (ret) - return ret; + jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", + (unsigned int)inode->i_size, pageofs); - down(&f->sem); memset(&ri, 0, sizeof(ri)); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); @@ -152,8 +182,8 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, ri.ino = cpu_to_je32(f->inocache->ino); ri.version = cpu_to_je32(++f->highest_version); ri.mode = cpu_to_jemode(inode->i_mode); - ri.uid = cpu_to_je16(inode->i_uid); - ri.gid = cpu_to_je16(inode->i_gid); + ri.uid = cpu_to_je16(i_uid_read(inode)); + ri.gid = cpu_to_je16(i_gid_read(inode)); ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds()); ri.offset = cpu_to_je32(inode->i_size); @@ -162,14 +192,13 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, ri.compr = JFFS2_COMPR_ZERO; ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ri.data_crc = cpu_to_je32(0); - - fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL); + + fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL); if (IS_ERR(fn)) { ret = PTR_ERR(fn); jffs2_complete_reservation(c); - up(&f->sem); - return ret; + goto out_page; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); if (f->metadata) { @@ -178,65 +207,88 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg, f->metadata = NULL; } if (ret) { - D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret)); + jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", + ret); jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); jffs2_complete_reservation(c); - up(&f->sem); - return ret; + goto out_page; } jffs2_complete_reservation(c); inode->i_size = pageofs; - up(&f->sem); } - - /* Read in the page if it wasn't already present, unless it's a whole page */ - if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) { - down(&f->sem); + + /* + * Read in the page if it wasn't already present. Cannot optimize away + * the whole page write case until jffs2_write_end can handle the + * case of a short-copy. + */ + if (!PageUptodate(pg)) { ret = jffs2_do_readpage_nolock(inode, pg); - up(&f->sem); + if (ret) + goto out_page; } - D1(printk(KERN_DEBUG "end prepare_write(). pg->flags %lx\n", pg->flags)); + mutex_unlock(&f->sem); + jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); + return ret; + +out_page: + unlock_page(pg); + page_cache_release(pg); + mutex_unlock(&f->sem); return ret; } -static int jffs2_commit_write (struct file *filp, struct page *pg, - unsigned start, unsigned end) +static int jffs2_write_end(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *pg, void *fsdata) { /* Actually commit the write from the page cache page we're looking at. * For now, we write the full page out each time. It sucks, but it's simple */ - struct inode *inode = pg->mapping->host; + struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; + unsigned start = pos & (PAGE_CACHE_SIZE - 1); + unsigned end = start + copied; unsigned aligned_start = start & ~3; int ret = 0; uint32_t writtenlen = 0; - D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", - inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); - - if (!start && end == PAGE_CACHE_SIZE) { - /* We need to avoid deadlock with page_cache_read() in - jffs2_garbage_collect_pass(). So we have to mark the - page up to date, to prevent page_cache_read() from - trying to re-lock it. */ - SetPageUptodate(pg); + jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", + __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, + start, end, pg->flags); + + /* We need to avoid deadlock with page_cache_read() in + jffs2_garbage_collect_pass(). So the page must be + up to date to prevent page_cache_read() from trying + to re-lock it. */ + BUG_ON(!PageUptodate(pg)); + + if (end == PAGE_CACHE_SIZE) { + /* When writing out the end of a page, write out the + _whole_ page. This helps to reduce the number of + nodes in files which have many short writes, like + syslog files. */ + aligned_start = 0; } ri = jffs2_alloc_raw_inode(); if (!ri) { - D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n")); + jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", + __func__); + unlock_page(pg); + page_cache_release(pg); return -ENOMEM; } /* Set the fields that the generic jffs2_write_inode_range() code can't find */ ri->ino = cpu_to_je32(inode->i_ino); ri->mode = cpu_to_jemode(inode->i_mode); - ri->uid = cpu_to_je16(inode->i_uid); - ri->gid = cpu_to_je16(inode->i_gid); + ri->uid = cpu_to_je16(i_uid_read(inode)); + ri->gid = cpu_to_je16(i_gid_read(inode)); ri->isize = cpu_to_je32((uint32_t)inode->i_size); ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds()); @@ -254,18 +306,15 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, /* There was an error writing. */ SetPageError(pg); } - + /* Adjust writtenlen for the padding we did, so we don't confuse our caller */ - if (writtenlen < (start&3)) - writtenlen = 0; - else - writtenlen -= (start&3); + writtenlen -= min(writtenlen, (start - aligned_start)); if (writtenlen) { - if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) { - inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen; + if (inode->i_size < pos + writtenlen) { + inode->i_size = pos + writtenlen; inode->i_blocks = (inode->i_size + 511) >> 9; - + inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime)); } } @@ -274,13 +323,17 @@ static int jffs2_commit_write (struct file *filp, struct page *pg, if (start+writtenlen < end) { /* generic_file_write has written more to the page cache than we've - actually written to the medium. Mark the page !Uptodate so that + actually written to the medium. Mark the page !Uptodate so that it gets reread */ - D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n")); + jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n", + __func__); SetPageError(pg); ClearPageUptodate(pg); } - D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",writtenlen?writtenlen:ret)); - return writtenlen?writtenlen:ret; + jffs2_dbg(1, "%s() returning %d\n", + __func__, writtenlen > 0 ? writtenlen : ret); + unlock_page(pg); + page_cache_release(pg); + return writtenlen > 0 ? writtenlen : ret; } diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 5687c3f4200..601afd1afdd 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -1,17 +1,18 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: fs.c,v 1.56 2005/07/06 12:13:09 dwmw2 Exp $ - * */ -#include <linux/config.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/capability.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> @@ -26,22 +27,21 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c); -static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) +int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) { struct jffs2_full_dnode *old_metadata, *new_metadata; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; - unsigned short dev; + union jffs2_device_node dev; unsigned char *mdata = NULL; int mdatalen = 0; unsigned int ivalid; - uint32_t phys_ofs, alloclen; + uint32_t alloclen; int ret; - D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); - ret = inode_change_ok(inode, iattr); - if (ret) - return ret; + int alloc_type = ALLOC_NORMAL; + + jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino); /* Special cases - we don't want more than one data node for these types on the medium at any time. So setattr @@ -50,21 +50,27 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) it out again with the appropriate data attached */ if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { /* For these, we don't actually need to read the old node */ - dev = old_encode_dev(inode->i_rdev); + mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); mdata = (char *)&dev; - mdatalen = sizeof(dev); - D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); + jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n", + __func__, mdatalen); } else if (S_ISLNK(inode->i_mode)) { + mutex_lock(&f->sem); mdatalen = f->metadata->size; mdata = kmalloc(f->metadata->size, GFP_USER); - if (!mdata) + if (!mdata) { + mutex_unlock(&f->sem); return -ENOMEM; + } ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); if (ret) { + mutex_unlock(&f->sem); kfree(mdata); return ret; } - D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); + mutex_unlock(&f->sem); + jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n", + __func__, mdatalen); } ri = jffs2_alloc_raw_inode(); @@ -73,17 +79,18 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) kfree(mdata); return -ENOMEM; } - - ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL); + + ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen, + ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { jffs2_free_raw_inode(ri); - if (S_ISLNK(inode->i_mode & S_IFMT)) + if (S_ISLNK(inode->i_mode)) kfree(mdata); return ret; } - down(&f->sem); + mutex_lock(&f->sem); ivalid = iattr->ia_valid; - + ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); @@ -92,15 +99,13 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) ri->ino = cpu_to_je32(inode->i_ino); ri->version = cpu_to_je32(++f->highest_version); - ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid); - ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); + ri->uid = cpu_to_je16((ivalid & ATTR_UID)? + from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode)); + ri->gid = cpu_to_je16((ivalid & ATTR_GID)? + from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode)); if (ivalid & ATTR_MODE) - if (iattr->ia_mode & S_ISGID && - !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID)) - ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID); - else - ri->mode = cpu_to_jemode(iattr->ia_mode); + ri->mode = cpu_to_jemode(iattr->ia_mode); else ri->mode = cpu_to_jemode(inode->i_mode); @@ -118,6 +123,10 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) ri->compr = JFFS2_COMPR_ZERO; ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size); ri->offset = cpu_to_je32(inode->i_size); + } else if (ivalid & ATTR_SIZE && !iattr->ia_size) { + /* For truncate-to-zero, treat it as deletion because + it'll always be obsoleting all previous nodes */ + alloc_type = ALLOC_DELETION; } ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); if (mdatalen) @@ -125,14 +134,14 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) else ri->data_crc = cpu_to_je32(0); - new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL); + new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type); if (S_ISLNK(inode->i_mode)) kfree(mdata); - + if (IS_ERR(new_metadata)) { jffs2_complete_reservation(c); jffs2_free_raw_inode(ri); - up(&f->sem); + mutex_unlock(&f->sem); return PTR_ERR(new_metadata); } /* It worked. Update the inode */ @@ -140,18 +149,19 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) inode->i_ctime = ITIME(je32_to_cpu(ri->ctime)); inode->i_mtime = ITIME(je32_to_cpu(ri->mtime)); inode->i_mode = jemode_to_cpu(ri->mode); - inode->i_uid = je16_to_cpu(ri->uid); - inode->i_gid = je16_to_cpu(ri->gid); + i_uid_write(inode, je16_to_cpu(ri->uid)); + i_gid_write(inode, je16_to_cpu(ri->gid)); old_metadata = f->metadata; if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) - jffs2_truncate_fraglist (c, &f->fragtree, iattr->ia_size); + jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { jffs2_add_full_dnode_to_inode(c, f, new_metadata); inode->i_size = iattr->ia_size; + inode->i_blocks = (inode->i_size + 511) >> 9; f->metadata = NULL; } else { f->metadata = new_metadata; @@ -162,28 +172,41 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) } jffs2_free_raw_inode(ri); - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - /* We have to do the vmtruncate() without f->sem held, since - some pages may be locked and waiting for it in readpage(). + /* We have to do the truncate_setsize() without f->sem held, since + some pages may be locked and waiting for it in readpage(). We are protected from a simultaneous write() extending i_size back past iattr->ia_size, because do_truncate() holds the generic inode semaphore. */ - if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) - vmtruncate(inode, iattr->ia_size); + if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) { + truncate_setsize(inode, iattr->ia_size); + inode->i_blocks = (inode->i_size + 511) >> 9; + } return 0; } int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) { - return jffs2_do_setattr(dentry->d_inode, iattr); + struct inode *inode = dentry->d_inode; + int rc; + + rc = inode_change_ok(inode, iattr); + if (rc) + return rc; + + rc = jffs2_do_setattr(inode, iattr); + if (!rc && (iattr->ia_valid & ATTR_MODE)) + rc = posix_acl_chmod(inode, inode->i_mode); + + return rc; } -int jffs2_statfs(struct super_block *sb, struct kstatfs *buf) +int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf) { - struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); + struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb); unsigned long avail; buf->f_type = JFFS2_SUPER_MAGIC; @@ -192,92 +215,99 @@ int jffs2_statfs(struct super_block *sb, struct kstatfs *buf) buf->f_files = 0; buf->f_ffree = 0; buf->f_namelen = JFFS2_MAX_NAME_LEN; + buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC; + buf->f_fsid.val[1] = c->mtd->index; spin_lock(&c->erase_completion_lock); - avail = c->dirty_size + c->free_size; if (avail > c->sector_size * c->resv_blocks_write) avail -= c->sector_size * c->resv_blocks_write; else avail = 0; + spin_unlock(&c->erase_completion_lock); buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; - D2(jffs2_dump_block_lists(c)); - - spin_unlock(&c->erase_completion_lock); - return 0; } -void jffs2_clear_inode (struct inode *inode) +void jffs2_evict_inode (struct inode *inode) { - /* We can forget about this inode for now - drop all + /* We can forget about this inode for now - drop all * the nodelists associated with it, etc. */ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); - - D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); + jffs2_dbg(1, "%s(): ino #%lu mode %o\n", + __func__, inode->i_ino, inode->i_mode); + truncate_inode_pages_final(&inode->i_data); + clear_inode(inode); jffs2_do_clear_inode(c, f); } -void jffs2_read_inode (struct inode *inode) +struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) { struct jffs2_inode_info *f; struct jffs2_sb_info *c; struct jffs2_raw_inode latest_node; + union jffs2_device_node jdev; + struct inode *inode; + dev_t rdev = 0; int ret; - D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino)); + jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino); + + inode = iget_locked(sb, ino); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; f = JFFS2_INODE_INFO(inode); c = JFFS2_SB_INFO(inode->i_sb); jffs2_init_inode_info(f); - + mutex_lock(&f->sem); + ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); if (ret) { - make_bad_inode(inode); - up(&f->sem); - return; + mutex_unlock(&f->sem); + iget_failed(inode); + return ERR_PTR(ret); } inode->i_mode = jemode_to_cpu(latest_node.mode); - inode->i_uid = je16_to_cpu(latest_node.uid); - inode->i_gid = je16_to_cpu(latest_node.gid); + i_uid_write(inode, je16_to_cpu(latest_node.uid)); + i_gid_write(inode, je16_to_cpu(latest_node.gid)); inode->i_size = je32_to_cpu(latest_node.isize); inode->i_atime = ITIME(je32_to_cpu(latest_node.atime)); inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime)); inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime)); - inode->i_nlink = f->inocache->nlink; + set_nlink(inode, f->inocache->pino_nlink); - inode->i_blksize = PAGE_SIZE; inode->i_blocks = (inode->i_size + 511) >> 9; - + switch (inode->i_mode & S_IFMT) { - jint16_t rdev; case S_IFLNK: inode->i_op = &jffs2_symlink_inode_operations; break; - + case S_IFDIR: { struct jffs2_full_dirent *fd; + set_nlink(inode, 2); /* parent and '.' */ for (fd=f->dents; fd; fd = fd->next) { if (fd->type == DT_DIR && fd->ino) - inode->i_nlink++; + inc_nlink(inode); } - /* and '..' */ - inode->i_nlink++; /* Root dir gets i_nlink 3 for some reason */ if (inode->i_ino == 1) - inode->i_nlink++; + inc_nlink(inode); inode->i_op = &jffs2_dir_inode_operations; inode->i_fop = &jffs2_dir_operations; @@ -293,42 +323,63 @@ void jffs2_read_inode (struct inode *inode) case S_IFBLK: case S_IFCHR: /* Read the device numbers from the media */ - D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); - if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) { + if (f->metadata->size != sizeof(jdev.old_id) && + f->metadata->size != sizeof(jdev.new_id)) { + pr_notice("Device node has strange size %d\n", + f->metadata->size); + goto error_io; + } + jffs2_dbg(1, "Reading device numbers from flash\n"); + ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); + if (ret < 0) { /* Eep */ - printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); - up(&f->sem); - jffs2_do_clear_inode(c, f); - make_bad_inode(inode); - return; - } + pr_notice("Read device numbers for inode %lu failed\n", + (unsigned long)inode->i_ino); + goto error; + } + if (f->metadata->size == sizeof(jdev.old_id)) + rdev = old_decode_dev(je16_to_cpu(jdev.old_id)); + else + rdev = new_decode_dev(je32_to_cpu(jdev.new_id)); case S_IFSOCK: case S_IFIFO: inode->i_op = &jffs2_file_inode_operations; - init_special_inode(inode, inode->i_mode, - old_decode_dev((je16_to_cpu(rdev)))); + init_special_inode(inode, inode->i_mode, rdev); break; default: - printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino); + pr_warn("%s(): Bogus i_mode %o for ino %lu\n", + __func__, inode->i_mode, (unsigned long)inode->i_ino); } - up(&f->sem); + mutex_unlock(&f->sem); + + jffs2_dbg(1, "jffs2_read_inode() returning\n"); + unlock_new_inode(inode); + return inode; - D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); +error_io: + ret = -EIO; +error: + mutex_unlock(&f->sem); + jffs2_do_clear_inode(c, f); + iget_failed(inode); + return ERR_PTR(ret); } -void jffs2_dirty_inode(struct inode *inode) +void jffs2_dirty_inode(struct inode *inode, int flags) { struct iattr iattr; if (!(inode->i_state & I_DIRTY_DATASYNC)) { - D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino)); + jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n", + __func__, inode->i_ino); return; } - D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino)); + jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n", + __func__, inode->i_ino); iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; iattr.ia_mode = inode->i_mode; @@ -341,7 +392,7 @@ void jffs2_dirty_inode(struct inode *inode) jffs2_do_setattr(inode, &iattr); } -int jffs2_remount_fs (struct super_block *sb, int *flags, char *data) +int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); @@ -354,37 +405,21 @@ int jffs2_remount_fs (struct super_block *sb, int *flags, char *data) Flush the writebuffer, if neccecary, else we loose it */ if (!(sb->s_flags & MS_RDONLY)) { jffs2_stop_garbage_collect_thread(c); - down(&c->alloc_sem); + mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); - up(&c->alloc_sem); - } + mutex_unlock(&c->alloc_sem); + } if (!(*flags & MS_RDONLY)) jffs2_start_garbage_collect_thread(c); - - *flags |= MS_NOATIME; + *flags |= MS_NOATIME; return 0; } -void jffs2_write_super (struct super_block *sb) -{ - struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); - sb->s_dirt = 0; - - if (sb->s_flags & MS_RDONLY) - return; - - D1(printk(KERN_DEBUG "jffs2_write_super()\n")); - jffs2_garbage_collect_trigger(c); - jffs2_erase_pending_blocks(c, 0); - jffs2_flush_wbuf_gc(c, 0); -} - - /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash, fill in the raw_inode while you're at it. */ -struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri) +struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri) { struct inode *inode; struct super_block *sb = dir_i->i_sb; @@ -392,53 +427,88 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i struct jffs2_inode_info *f; int ret; - D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); + jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n", + __func__, dir_i->i_ino, mode); c = JFFS2_SB_INFO(sb); - + inode = new_inode(sb); - + if (!inode) return ERR_PTR(-ENOMEM); f = JFFS2_INODE_INFO(inode); jffs2_init_inode_info(f); + mutex_lock(&f->sem); memset(ri, 0, sizeof(*ri)); /* Set OS-specific defaults for new inodes */ - ri->uid = cpu_to_je16(current->fsuid); + ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid())); if (dir_i->i_mode & S_ISGID) { - ri->gid = cpu_to_je16(dir_i->i_gid); + ri->gid = cpu_to_je16(i_gid_read(dir_i)); if (S_ISDIR(mode)) mode |= S_ISGID; } else { - ri->gid = cpu_to_je16(current->fsgid); + ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid())); + } + + /* POSIX ACLs have to be processed now, at least partly. + The umask is only applied if there's no default ACL */ + ret = jffs2_init_acl_pre(dir_i, inode, &mode); + if (ret) { + mutex_unlock(&f->sem); + make_bad_inode(inode); + iput(inode); + return ERR_PTR(ret); } - ri->mode = cpu_to_jemode(mode); ret = jffs2_do_new_inode (c, f, mode, ri); if (ret) { + mutex_unlock(&f->sem); make_bad_inode(inode); iput(inode); return ERR_PTR(ret); } - inode->i_nlink = 1; + set_nlink(inode, 1); inode->i_ino = je32_to_cpu(ri->ino); inode->i_mode = jemode_to_cpu(ri->mode); - inode->i_gid = je16_to_cpu(ri->gid); - inode->i_uid = je16_to_cpu(ri->uid); + i_gid_write(inode, je16_to_cpu(ri->gid)); + i_uid_write(inode, je16_to_cpu(ri->uid)); inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime)); - inode->i_blksize = PAGE_SIZE; inode->i_blocks = 0; inode->i_size = 0; - insert_inode_hash(inode); + if (insert_inode_locked(inode) < 0) { + mutex_unlock(&f->sem); + make_bad_inode(inode); + iput(inode); + return ERR_PTR(-EINVAL); + } return inode; } +static int calculate_inocache_hashsize(uint32_t flash_size) +{ + /* + * Pick a inocache hash size based on the size of the medium. + * Count how many megabytes we're dealing with, apply a hashsize twice + * that size, but rounding down to the usual big powers of 2. And keep + * to sensible bounds. + */ + + int size_mb = flash_size / 1024 / 1024; + int hashsize = (size_mb * 2) & ~0x3f; + + if (hashsize < INOCACHE_HASHSIZE_MIN) + return INOCACHE_HASHSIZE_MIN; + if (hashsize > INOCACHE_HASHSIZE_MAX) + return INOCACHE_HASHSIZE_MAX; + + return hashsize; +} int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) { @@ -449,81 +519,73 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) c = JFFS2_SB_INFO(sb); + /* Do not support the MLC nand */ + if (c->mtd->type == MTD_MLCNANDFLASH) + return -EINVAL; + #ifndef CONFIG_JFFS2_FS_WRITEBUFFER if (c->mtd->type == MTD_NANDFLASH) { - printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n"); + pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n"); return -EINVAL; } if (c->mtd->type == MTD_DATAFLASH) { - printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n"); + pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n"); return -EINVAL; } #endif c->flash_size = c->mtd->size; - - /* - * Check, if we have to concatenate physical blocks to larger virtual blocks - * to reduce the memorysize for c->blocks. (kmalloc allows max. 128K allocation) - */ - c->sector_size = c->mtd->erasesize; + c->sector_size = c->mtd->erasesize; blocks = c->flash_size / c->sector_size; - if (!(c->mtd->flags & MTD_NO_VIRTBLOCKS)) { - while ((blocks * sizeof (struct jffs2_eraseblock)) > (128 * 1024)) { - blocks >>= 1; - c->sector_size <<= 1; - } - } /* * Size alignment check */ if ((c->sector_size * blocks) != c->flash_size) { - c->flash_size = c->sector_size * blocks; - printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", + c->flash_size = c->sector_size * blocks; + pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n", c->flash_size / 1024); } - if (c->sector_size != c->mtd->erasesize) - printk(KERN_INFO "jffs2: Erase block size too small (%dKiB). Using virtual blocks size (%dKiB) instead\n", - c->mtd->erasesize / 1024, c->sector_size / 1024); - if (c->flash_size < 5*c->sector_size) { - printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); + pr_err("Too few erase blocks (%d)\n", + c->flash_size / c->sector_size); return -EINVAL; } c->cleanmarker_size = sizeof(struct jffs2_unknown_node); - /* Joern -- stick alignment for weird 8-byte-page flash here */ /* NAND (or other bizarre) flash... do setup accordingly */ ret = jffs2_flash_setup(c); if (ret) return ret; - c->inocache_list = kmalloc(INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *), GFP_KERNEL); + c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size); + c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL); if (!c->inocache_list) { ret = -ENOMEM; goto out_wbuf; } - memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *)); + + jffs2_init_xattr_subsystem(c); if ((ret = jffs2_do_mount_fs(c))) goto out_inohash; - ret = -EINVAL; - - D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); - root_i = iget(sb, 1); - if (is_bad_inode(root_i)) { - D1(printk(KERN_WARNING "get root inode failed\n")); - goto out_nodes; + jffs2_dbg(1, "%s(): Getting root inode\n", __func__); + root_i = jffs2_iget(sb, 1); + if (IS_ERR(root_i)) { + jffs2_dbg(1, "get root inode failed\n"); + ret = PTR_ERR(root_i); + goto out_root; } - D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); - sb->s_root = d_alloc_root(root_i); + ret = -ENOMEM; + + jffs2_dbg(1, "%s(): d_make_root()\n", __func__); + sb->s_root = d_make_root(root_i); if (!sb->s_root) - goto out_root_i; + goto out_root; sb->s_maxbytes = 0xFFFFFFFF; sb->s_blocksize = PAGE_CACHE_SIZE; @@ -533,16 +595,15 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) jffs2_start_garbage_collect_thread(c); return 0; - out_root_i: - iput(root_i); - out_nodes: +out_root: jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); - if (c->mtd->flags & MTD_NO_VIRTBLOCKS) + if (jffs2_blocks_use_vmalloc(c)) vfree(c->blocks); else kfree(c->blocks); out_inohash: + jffs2_clear_xattr_subsystem(c); kfree(c->inocache_list); out_wbuf: jffs2_flash_cleanup(c); @@ -557,41 +618,43 @@ void jffs2_gc_release_inode(struct jffs2_sb_info *c, } struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, - int inum, int nlink) + int inum, int unlinked) { struct inode *inode; struct jffs2_inode_cache *ic; - if (!nlink) { + + if (unlinked) { /* The inode has zero nlink but its nodes weren't yet marked - obsolete. This has to be because we're still waiting for + obsolete. This has to be because we're still waiting for the final (close() and) iput() to happen. - There's a possibility that the final iput() could have + There's a possibility that the final iput() could have happened while we were contemplating. In order to ensure that we don't cause a new read_inode() (which would fail) for the inode in question, we use ilookup() in this case instead of iget(). - The nlink can't _become_ zero at this point because we're + The nlink can't _become_ zero at this point because we're holding the alloc_sem, and jffs2_do_unlink() would also need that while decrementing nlink on any inode. */ inode = ilookup(OFNI_BS_2SFFJ(c), inum); if (!inode) { - D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n", - inum)); + jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n", + inum); spin_lock(&c->inocache_lock); ic = jffs2_get_ino_cache(c, inum); if (!ic) { - D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum)); + jffs2_dbg(1, "Inode cache for ino #%u is gone\n", + inum); spin_unlock(&c->inocache_lock); return NULL; } if (ic->state != INO_STATE_CHECKEDABSENT) { /* Wait for progress. Don't just loop */ - D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n", - ic->ino, ic->state)); + jffs2_dbg(1, "Waiting for ino #%u in state %d\n", + ic->ino, ic->state); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); } else { spin_unlock(&c->inocache_lock); @@ -604,13 +667,13 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, jffs2_do_unlink() would need the alloc_sem and we have it. Just iget() it, and if read_inode() is necessary that's OK. */ - inode = iget(OFNI_BS_2SFFJ(c), inum); - if (!inode) - return ERR_PTR(-ENOMEM); + inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum); + if (IS_ERR(inode)) + return ERR_CAST(inode); } if (is_bad_inode(inode)) { - printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n", - inum, nlink); + pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n", + inum, unlinked); /* NB. This will happen again. We need to do something appropriate here. */ iput(inode); return ERR_PTR(-EIO); @@ -619,19 +682,19 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, return JFFS2_INODE_INFO(inode); } -unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, - struct jffs2_inode_info *f, +unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, + struct jffs2_inode_info *f, unsigned long offset, unsigned long *priv) { struct inode *inode = OFNI_EDONI_2SFFJ(f); struct page *pg; - pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, + pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, (void *)jffs2_do_readpage_unlock, inode); if (IS_ERR(pg)) return (void *)pg; - + *priv = (unsigned long)pg; return kmap(pg); } @@ -648,7 +711,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c, static int jffs2_flash_setup(struct jffs2_sb_info *c) { int ret = 0; - + if (jffs2_cleanmarker_oob(c)) { /* NAND flash... do setup accordingly */ ret = jffs2_nand_flash_setup(c); @@ -656,20 +719,27 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c) { return ret; } - /* add setups for other bizarre flashes here... */ - if (jffs2_nor_ecc(c)) { - ret = jffs2_nor_ecc_flash_setup(c); - if (ret) - return ret; - } - /* and Dataflash */ if (jffs2_dataflash(c)) { ret = jffs2_dataflash_setup(c); if (ret) return ret; } - + + /* and Intel "Sibley" flash */ + if (jffs2_nor_wbuf_flash(c)) { + ret = jffs2_nor_wbuf_flash_setup(c); + if (ret) + return ret; + } + + /* and an UBI volume */ + if (jffs2_ubivol(c)) { + ret = jffs2_ubivol_setup(c); + if (ret) + return ret; + } + return ret; } @@ -679,13 +749,18 @@ void jffs2_flash_cleanup(struct jffs2_sb_info *c) { jffs2_nand_flash_cleanup(c); } - /* add cleanups for other bizarre flashes here... */ - if (jffs2_nor_ecc(c)) { - jffs2_nor_ecc_flash_cleanup(c); - } - /* and DataFlash */ if (jffs2_dataflash(c)) { jffs2_dataflash_cleanup(c); } + + /* and Intel "Sibley" flash */ + if (jffs2_nor_wbuf_flash(c)) { + jffs2_nor_wbuf_flash_cleanup(c); + } + + /* and an UBI volume */ + if (jffs2_ubivol(c)) { + jffs2_ubivol_cleanup(c); + } } diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index 7086cd63450..5a2dec2b064 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c @@ -1,16 +1,17 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: gc.c,v 1.148 2005/04/09 10:47:00 dedekind Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include <linux/slab.h> @@ -21,14 +22,14 @@ #include "nodelist.h" #include "compr.h" -static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, +static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_raw_node_ref *raw); -static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, +static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fd); -static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, +static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); -static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, +static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dirent *fd); static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, @@ -52,44 +53,44 @@ static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c) number of free blocks is low. */ again: if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { - D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); + jffs2_dbg(1, "Picking block from bad_used_list to GC next\n"); nextlist = &c->bad_used_list; } else if (n < 50 && !list_empty(&c->erasable_list)) { - /* Note that most of them will have gone directly to be erased. + /* Note that most of them will have gone directly to be erased. So don't favour the erasable_list _too_ much. */ - D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); + jffs2_dbg(1, "Picking block from erasable_list to GC next\n"); nextlist = &c->erasable_list; } else if (n < 110 && !list_empty(&c->very_dirty_list)) { /* Most of the time, pick one off the very_dirty list */ - D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n")); + jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n"); nextlist = &c->very_dirty_list; } else if (n < 126 && !list_empty(&c->dirty_list)) { - D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n")); + jffs2_dbg(1, "Picking block from dirty_list to GC next\n"); nextlist = &c->dirty_list; } else if (!list_empty(&c->clean_list)) { - D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n")); + jffs2_dbg(1, "Picking block from clean_list to GC next\n"); nextlist = &c->clean_list; } else if (!list_empty(&c->dirty_list)) { - D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n")); + jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n"); nextlist = &c->dirty_list; } else if (!list_empty(&c->very_dirty_list)) { - D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n")); + jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"); nextlist = &c->very_dirty_list; } else if (!list_empty(&c->erasable_list)) { - D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n")); + jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"); nextlist = &c->erasable_list; } else if (!list_empty(&c->erasable_pending_wbuf_list)) { /* There are blocks are wating for the wbuf sync */ - D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n")); + jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n"); spin_unlock(&c->erase_completion_lock); jffs2_flush_wbuf_pad(c); spin_lock(&c->erase_completion_lock); goto again; } else { /* Eep. All were empty */ - D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n")); + jffs2_dbg(1, "No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"); return NULL; } @@ -98,20 +99,21 @@ again: c->gcblock = ret; ret->gc_node = ret->first_node; if (!ret->gc_node) { - printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); + pr_warn("Eep. ret->gc_node for block at 0x%08x is NULL\n", + ret->offset); BUG(); } - + /* Have we accidentally picked a clean block with wasted space ? */ if (ret->wasted_size) { - D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); + jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n", + ret->wasted_size); ret->dirty_size += ret->wasted_size; c->wasted_size -= ret->wasted_size; c->dirty_size += ret->wasted_size; ret->wasted_size = 0; } - D2(jffs2_dump_block_lists(c)); return ret; } @@ -125,9 +127,11 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) struct jffs2_inode_cache *ic; struct jffs2_eraseblock *jeb; struct jffs2_raw_node_ref *raw; + uint32_t gcblock_dirty; int ret = 0, inum, nlink; + int xattr = 0; - if (down_interruptible(&c->alloc_sem)) + if (mutex_lock_interruptible(&c->alloc_sem)) return -EINTR; for (;;) { @@ -137,18 +141,22 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) /* We can't start doing GC yet. We haven't finished checking the node CRCs etc. Do it now. */ - + /* checked_ino is protected by the alloc_sem */ - if (c->checked_ino > c->highest_ino) { - printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", - c->unchecked_size); - D2(jffs2_dump_block_lists(c)); + if (c->checked_ino > c->highest_ino && xattr) { + pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n", + c->unchecked_size); + jffs2_dbg_dump_block_lists_nolock(c); spin_unlock(&c->erase_completion_lock); - BUG(); + mutex_unlock(&c->alloc_sem); + return -ENOSPC; } spin_unlock(&c->erase_completion_lock); + if (!xattr) + xattr = jffs2_verify_xattr(c); + spin_lock(&c->inocache_lock); ic = jffs2_get_ino_cache(c, c->checked_ino++); @@ -158,31 +166,39 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) continue; } - if (!ic->nlink) { - D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n", - ic->ino)); + if (!ic->pino_nlink) { + jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n", + ic->ino); spin_unlock(&c->inocache_lock); + jffs2_xattr_delete_inode(c, ic); continue; } switch(ic->state) { case INO_STATE_CHECKEDABSENT: case INO_STATE_PRESENT: - D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino)); + jffs2_dbg(1, "Skipping ino #%u already checked\n", + ic->ino); spin_unlock(&c->inocache_lock); continue; case INO_STATE_GC: case INO_STATE_CHECKING: - printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state); + pr_warn("Inode #%u is in state %d during CRC check phase!\n", + ic->ino, ic->state); spin_unlock(&c->inocache_lock); BUG(); case INO_STATE_READING: /* We need to wait for it to finish, lest we move on - and trigger the BUG() above while we haven't yet + and trigger the BUG() above while we haven't yet finished checking all its nodes */ - D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); - up(&c->alloc_sem); + jffs2_dbg(1, "Waiting for ino #%u to finish reading\n", + ic->ino); + /* We need to come back again for the _same_ inode. We've + made no progress in this case, but that should be OK */ + c->checked_ino--; + + mutex_unlock(&c->alloc_sem); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); return 0; @@ -195,17 +211,33 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) ic->state = INO_STATE_CHECKING; spin_unlock(&c->inocache_lock); - D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino)); + jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n", + __func__, ic->ino); ret = jffs2_do_crccheck_inode(c, ic); if (ret) - printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino); + pr_warn("Returned error for crccheck of ino #%u. Expect badness...\n", + ic->ino); jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT); - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); return ret; } + /* If there are any blocks which need erasing, erase them now */ + if (!list_empty(&c->erase_complete_list) || + !list_empty(&c->erase_pending_list)) { + spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->alloc_sem); + jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__); + if (jffs2_erase_pending_blocks(c, 1)) + return 0; + + jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n"); + mutex_lock(&c->alloc_sem); + spin_lock(&c->erase_completion_lock); + } + /* First, work out which block we're garbage-collecting */ jeb = c->gcblock; @@ -213,60 +245,93 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) jeb = jffs2_find_gc_block(c); if (!jeb) { - D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n")); + /* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */ + if (c->nr_erasing_blocks) { + spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->alloc_sem); + return -EAGAIN; + } + jffs2_dbg(1, "Couldn't find erase block to garbage collect!\n"); spin_unlock(&c->erase_completion_lock); - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); return -EIO; } - D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size)); + jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", + jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size); D1(if (c->nextblock) printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); if (!jeb->used_size) { - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); goto eraseit; } raw = jeb->gc_node; - + gcblock_dirty = jeb->dirty_size; + while(ref_obsolete(raw)) { - D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); - raw = raw->next_phys; + jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n", + ref_offset(raw)); + raw = ref_next(raw); if (unlikely(!raw)) { - printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); - printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", - jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); + pr_warn("eep. End of raw list while still supposedly nodes to GC\n"); + pr_warn("erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", + jeb->offset, jeb->free_size, + jeb->dirty_size, jeb->used_size); jeb->gc_node = raw; spin_unlock(&c->erase_completion_lock); - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); BUG(); } } jeb->gc_node = raw; - D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw))); + jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n", + ref_offset(raw)); if (!raw->next_in_ino) { /* Inode-less node. Clean marker, snapshot or something like that */ - /* FIXME: If it's something that needs to be copied, including something - we don't grok that has JFFS2_NODETYPE_RWCOMPAT_COPY, we should do so */ spin_unlock(&c->erase_completion_lock); - jffs2_mark_node_obsolete(c, raw); - up(&c->alloc_sem); + if (ref_flags(raw) == REF_PRISTINE) { + /* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */ + jffs2_garbage_collect_pristine(c, NULL, raw); + } else { + /* Just mark it obsolete */ + jffs2_mark_node_obsolete(c, raw); + } + mutex_unlock(&c->alloc_sem); goto eraseit_lock; } ic = jffs2_raw_ref_to_ic(raw); +#ifdef CONFIG_JFFS2_FS_XATTR + /* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr. + * We can decide whether this node is inode or xattr by ic->class. */ + if (ic->class == RAWNODE_CLASS_XATTR_DATUM + || ic->class == RAWNODE_CLASS_XATTR_REF) { + spin_unlock(&c->erase_completion_lock); + + if (ic->class == RAWNODE_CLASS_XATTR_DATUM) { + ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw); + } else { + ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw); + } + goto test_gcnode; + } +#endif + /* We need to hold the inocache. Either the erase_completion_lock or - the inocache_lock are sufficient; we trade down since the inocache_lock + the inocache_lock are sufficient; we trade down since the inocache_lock causes less contention. */ spin_lock(&c->inocache_lock); spin_unlock(&c->erase_completion_lock); - D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino)); + jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", + __func__, jeb->offset, ref_offset(raw), ref_flags(raw), + ic->ino); /* Three possibilities: 1. Inode is already in-core. We must iget it and do proper @@ -279,15 +344,15 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) switch(ic->state) { case INO_STATE_CHECKEDABSENT: - /* It's been checked, but it's not currently in-core. + /* It's been checked, but it's not currently in-core. We can just copy any pristine nodes, but have to prevent anyone else from doing read_inode() while we're at it, so we set the state accordingly */ if (ref_flags(raw) == REF_PRISTINE) ic->state = INO_STATE_GC; else { - D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", - ic->ino)); + jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", + ic->ino); } break; @@ -299,13 +364,13 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) case INO_STATE_CHECKING: case INO_STATE_GC: /* Should never happen. We should have finished checking - by the time we actually start doing any GC, and since - we're holding the alloc_sem, no other garbage collection + by the time we actually start doing any GC, and since + we're holding the alloc_sem, no other garbage collection can happen. */ - printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", - ic->ino, ic->state); - up(&c->alloc_sem); + pr_crit("Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", + ic->ino, ic->state); + mutex_unlock(&c->alloc_sem); spin_unlock(&c->inocache_lock); BUG(); @@ -316,25 +381,25 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) the alloc_sem() (for marking nodes invalid) so we must drop the alloc_sem before sleeping. */ - up(&c->alloc_sem); - D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", - ic->ino, ic->state)); + mutex_unlock(&c->alloc_sem); + jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n", + __func__, ic->ino, ic->state); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); - /* And because we dropped the alloc_sem we must start again from the + /* And because we dropped the alloc_sem we must start again from the beginning. Ponder chance of livelock here -- we're returning success without actually making any progress. - Q: What are the chances that the inode is back in INO_STATE_READING + Q: What are the chances that the inode is back in INO_STATE_READING again by the time we next enter this function? And that this happens enough times to cause a real delay? - A: Small enough that I don't care :) + A: Small enough that I don't care :) */ return 0; } /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the - node intact, and we don't have to muck about with the fragtree etc. + node intact, and we don't have to muck about with the fragtree etc. because we know it's not in-core. If it _was_ in-core, we go through all the iget() crap anyway */ @@ -349,7 +414,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) if (ret != -EBADFD) { spin_unlock(&c->inocache_lock); - goto release_sem; + goto test_gcnode; } /* Fall through if it wanted us to, with inocache_lock held */ @@ -363,10 +428,10 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) it's vaguely possible. */ inum = ic->ino; - nlink = ic->nlink; + nlink = ic->pino_nlink; spin_unlock(&c->inocache_lock); - f = jffs2_gc_fetch_inode(c, inum, nlink); + f = jffs2_gc_fetch_inode(c, inum, !nlink); if (IS_ERR(f)) { ret = PTR_ERR(f); goto release_sem; @@ -380,8 +445,15 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) jffs2_gc_release_inode(c, f); + test_gcnode: + if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) { + /* Eep. This really should never happen. GC is broken */ + pr_err("Error garbage collecting node at %08x!\n", + ref_offset(jeb->gc_node)); + ret = -ENOSPC; + } release_sem: - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); eraseit_lock: /* If we've finished this block, start it erasing */ @@ -389,12 +461,13 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) eraseit: if (c->gcblock && !c->gcblock->used_size) { - D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset)); + jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", + c->gcblock->offset); /* We're GC'ing an empty block? */ list_add_tail(&c->gcblock->list, &c->erase_pending_list); c->gcblock = NULL; c->nr_erasing_blocks++; - jffs2_erase_pending_trigger(c); + jffs2_garbage_collect_trigger(c); } spin_unlock(&c->erase_completion_lock); @@ -410,7 +483,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era uint32_t start = 0, end = 0, nrfrags = 0; int ret = 0; - down(&f->sem); + mutex_lock(&f->sem); /* Now we have the lock for this inode. Check that it's still the one at the head of the list. */ @@ -419,12 +492,12 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era if (c->gcblock != jeb) { spin_unlock(&c->erase_completion_lock); - D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n")); + jffs2_dbg(1, "GC block is no longer gcblock. Restart\n"); goto upnout; } if (ref_obsolete(raw)) { spin_unlock(&c->erase_completion_lock); - D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n")); + jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n"); /* They'll call again */ goto upnout; } @@ -454,7 +527,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era if (!ret) { /* Urgh. Return it sensibly. */ frag->node->raw = f->inocache->nodes; - } + } if (ret != -EBADFD) goto upnout; } @@ -468,7 +541,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era } goto upnout; } - + /* Wasn't a dnode. Try dirent */ for (fd = f->dents; fd; fd=fd->next) { if (fd->raw == raw) @@ -480,41 +553,46 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era } else if (fd) { ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd); } else { - printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n", - ref_offset(raw), f->inocache->ino); + pr_warn("Raw node at 0x%08x wasn't in node lists for ino #%u\n", + ref_offset(raw), f->inocache->ino); if (ref_obsolete(raw)) { - printk(KERN_WARNING "But it's obsolete so we don't mind too much\n"); + pr_warn("But it's obsolete so we don't mind too much\n"); } else { - ret = -EIO; + jffs2_dbg_dump_node(c, ref_offset(raw)); + BUG(); } } upnout: - up(&f->sem); + mutex_unlock(&f->sem); return ret; } -static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, +static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_raw_node_ref *raw) { union jffs2_node_union *node; - struct jffs2_raw_node_ref *nraw; size_t retlen; int ret; uint32_t phys_ofs, alloclen; uint32_t crc, rawlen; int retried = 0; - D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw))); + jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n", + ref_offset(raw)); - rawlen = ref_totlen(c, c->gcblock, raw); + alloclen = rawlen = ref_totlen(c, c->gcblock, raw); /* Ask for a small amount of space (or the totlen if smaller) because we don't want to force wastage of the end of a block if splitting would work. */ - ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN, - rawlen), &phys_ofs, &alloclen); + if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) + alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN; + + ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen); + /* 'rawlen' is not the exact summary size; it is only an upper estimation */ + if (ret) return ret; @@ -525,7 +603,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, node = kmalloc(rawlen, GFP_KERNEL); if (!node) - return -ENOMEM; + return -ENOMEM; ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node); if (!ret && retlen != rawlen) @@ -535,8 +613,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4); if (je32_to_cpu(node->u.hdr_crc) != crc) { - printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc); + pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", + ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc); goto bail; } @@ -544,16 +622,18 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, case JFFS2_NODETYPE_INODE: crc = crc32(0, node, sizeof(node->i)-8); if (je32_to_cpu(node->i.node_crc) != crc) { - printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ref_offset(raw), je32_to_cpu(node->i.node_crc), crc); + pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", + ref_offset(raw), je32_to_cpu(node->i.node_crc), + crc); goto bail; } if (je32_to_cpu(node->i.dsize)) { crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize)); if (je32_to_cpu(node->i.data_crc) != crc) { - printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ref_offset(raw), je32_to_cpu(node->i.data_crc), crc); + pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", + ref_offset(raw), + je32_to_cpu(node->i.data_crc), crc); goto bail; } } @@ -562,99 +642,90 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, case JFFS2_NODETYPE_DIRENT: crc = crc32(0, node, sizeof(node->d)-8); if (je32_to_cpu(node->d.node_crc) != crc) { - printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ref_offset(raw), je32_to_cpu(node->d.node_crc), crc); + pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", + ref_offset(raw), + je32_to_cpu(node->d.node_crc), crc); + goto bail; + } + + if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) { + pr_warn("Name in dirent node at 0x%08x contains zeroes\n", + ref_offset(raw)); goto bail; } if (node->d.nsize) { crc = crc32(0, node->d.name, node->d.nsize); if (je32_to_cpu(node->d.name_crc) != crc) { - printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent ode at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ref_offset(raw), je32_to_cpu(node->d.name_crc), crc); + pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", + ref_offset(raw), + je32_to_cpu(node->d.name_crc), crc); goto bail; } } break; default: - printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", - ref_offset(raw), je16_to_cpu(node->u.nodetype)); - goto bail; - } - - nraw = jffs2_alloc_raw_node_ref(); - if (!nraw) { - ret = -ENOMEM; - goto out_node; + /* If it's inode-less, we don't _know_ what it is. Just copy it intact */ + if (ic) { + pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", + ref_offset(raw), je16_to_cpu(node->u.nodetype)); + goto bail; + } } /* OK, all the CRCs are good; this node can just be copied as-is. */ retry: - nraw->flash_offset = phys_ofs; - nraw->__totlen = rawlen; - nraw->next_phys = NULL; + phys_ofs = write_ofs(c); ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); if (ret || (retlen != rawlen)) { - printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", - rawlen, phys_ofs, ret, retlen); + pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", + rawlen, phys_ofs, ret, retlen); if (retlen) { - /* Doesn't belong to any inode */ - nraw->next_in_ino = NULL; - - nraw->flash_offset |= REF_OBSOLETE; - jffs2_add_physical_node_ref(c, nraw); - jffs2_mark_node_obsolete(c, nraw); + jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL); } else { - printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", nraw->flash_offset); - jffs2_free_raw_node_ref(nraw); + pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", + phys_ofs); } - if (!retried && (nraw = jffs2_alloc_raw_node_ref())) { + if (!retried) { /* Try to reallocate space and retry */ uint32_t dummy; struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size]; retried = 1; - D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); - - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n"); + + jffs2_dbg_acct_sanity_check(c,jeb); + jffs2_dbg_acct_paranoia_check(c, jeb); - ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy); + ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen); + /* this is not the exact summary size of it, + it is only an upper estimation */ if (!ret) { - D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); + jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n", + phys_ofs); - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + jffs2_dbg_acct_sanity_check(c,jeb); + jffs2_dbg_acct_paranoia_check(c, jeb); goto retry; } - D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); - jffs2_free_raw_node_ref(nraw); + jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", + ret); } - jffs2_free_raw_node_ref(nraw); if (!ret) ret = -EIO; goto out_node; } - nraw->flash_offset |= REF_PRISTINE; - jffs2_add_physical_node_ref(c, nraw); - - /* Link into per-inode list. This is safe because of the ic - state being INO_STATE_GC. Note that if we're doing this - for an inode which is in-core, the 'nraw' pointer is then - going to be fetched from ic->nodes by our caller. */ - spin_lock(&c->erase_completion_lock); - nraw->next_in_ino = ic->nodes; - ic->nodes = nraw; - spin_unlock(&c->erase_completion_lock); + jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); jffs2_mark_node_obsolete(c, raw); - D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw))); + jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", + ref_offset(raw)); out_node: kfree(node); @@ -664,50 +735,52 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, goto out_node; } -static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, +static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) { struct jffs2_full_dnode *new_fn; struct jffs2_raw_inode ri; struct jffs2_node_frag *last_frag; - jint16_t dev; - char *mdata = NULL, mdatalen = 0; - uint32_t alloclen, phys_ofs, ilen; + union jffs2_device_node dev; + char *mdata = NULL; + int mdatalen = 0; + uint32_t alloclen, ilen; int ret; if (S_ISBLK(JFFS2_F_I_MODE(f)) || S_ISCHR(JFFS2_F_I_MODE(f)) ) { /* For these, we don't actually need to read the old node */ - /* FIXME: for minor or major > 255. */ - dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) | - JFFS2_F_I_RDEV_MIN(f))); + mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); mdata = (char *)&dev; - mdatalen = sizeof(dev); - D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen)); + jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n", + __func__, mdatalen); } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { mdatalen = fn->size; mdata = kmalloc(fn->size, GFP_KERNEL); if (!mdata) { - printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n"); + pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n"); return -ENOMEM; } ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen); if (ret) { - printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret); + pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", + ret); kfree(mdata); return ret; } - D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); + jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n", + __func__, mdatalen); } - - ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen); + + ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen, + JFFS2_SUMMARY_INODE_SIZE); if (ret) { - printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", - sizeof(ri)+ mdatalen, ret); + pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", + sizeof(ri) + mdatalen, ret); goto out; } - + last_frag = frag_last(&f->fragtree); if (last_frag) /* Fetch the inode length from the fragtree rather then @@ -715,7 +788,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ ilen = last_frag->ofs + last_frag->size; else ilen = JFFS2_F_I_SIZE(f); - + memset(&ri, 0, sizeof(ri)); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); @@ -738,10 +811,10 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); - new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, phys_ofs, ALLOC_GC); + new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC); if (IS_ERR(new_fn)) { - printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); + pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn)); ret = PTR_ERR(new_fn); goto out; } @@ -754,12 +827,12 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ return ret; } -static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, +static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) { struct jffs2_full_dirent *new_fd; struct jffs2_raw_dirent rd; - uint32_t alloclen, phys_ofs; + uint32_t alloclen; int ret; rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); @@ -771,28 +844,35 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er rd.pino = cpu_to_je32(f->inocache->ino); rd.version = cpu_to_je32(++f->highest_version); rd.ino = cpu_to_je32(fd->ino); - rd.mctime = cpu_to_je32(max(JFFS2_F_I_MTIME(f), JFFS2_F_I_CTIME(f))); + /* If the times on this inode were set by explicit utime() they can be different, + so refrain from splatting them. */ + if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f)) + rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f)); + else + rd.mctime = cpu_to_je32(0); rd.type = fd->type; rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); - - ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen); + + ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen, + JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); if (ret) { - printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", - sizeof(rd)+rd.nsize, ret); + pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", + sizeof(rd)+rd.nsize, ret); return ret; } - new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, phys_ofs, ALLOC_GC); + new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC); if (IS_ERR(new_fd)) { - printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd)); + pr_warn("jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", + PTR_ERR(new_fd)); return PTR_ERR(new_fd); } jffs2_add_fd_to_list(c, new_fd, &f->dents); return 0; } -static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, +static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, struct jffs2_inode_info *f, struct jffs2_full_dirent *fd) { struct jffs2_full_dirent **fdp = &f->dents; @@ -819,10 +899,12 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct /* Prevent the erase code from nicking the obsolete node refs while we're looking at them. I really don't like this extra lock but can't see any alternative. Suggestions on a postcard to... */ - down(&c->erase_free_sem); + mutex_lock(&c->erase_free_sem); for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) { + cond_resched(); + /* We only care about obsolete ones */ if (!(ref_obsolete(raw))) continue; @@ -831,24 +913,27 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct if (ref_totlen(c, NULL, raw) != rawlen) continue; - /* Doesn't matter if there's one in the same erase block. We're going to + /* Doesn't matter if there's one in the same erase block. We're going to delete it too at the same time. */ if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) continue; - D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw))); + jffs2_dbg(1, "Check potential deletion dirent at %08x\n", + ref_offset(raw)); /* This is an obsolete node belonging to the same directory, and it's of the right length. We need to take a closer look...*/ ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd); if (ret) { - printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw)); + pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n", + __func__, ret, ref_offset(raw)); /* If we can't read it, we don't need to continue to obsolete it. Continue */ continue; } if (retlen != rawlen) { - printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n", - retlen, rawlen, ref_offset(raw)); + pr_warn("%s(): Short read (%zd not %u) reading header from obsolete node at %08x\n", + __func__, retlen, rawlen, + ref_offset(raw)); continue; } @@ -870,19 +955,23 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct /* OK. The name really does match. There really is still an older node on the flash which our deletion dirent obsoletes. So we have to write out a new deletion dirent to replace it */ - up(&c->erase_free_sem); + mutex_unlock(&c->erase_free_sem); - D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", - ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino))); + jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", + ref_offset(fd->raw), fd->name, + ref_offset(raw), je32_to_cpu(rd->ino)); kfree(rd); return jffs2_garbage_collect_dirent(c, jeb, f, fd); } - up(&c->erase_free_sem); + mutex_unlock(&c->erase_free_sem); kfree(rd); } + /* FIXME: If we're deleting a dirent which contains the current mtime and ctime, + we should update the metadata node with those times accordingly */ + /* No need for it any more. Just mark it obsolete and remove it from the list */ while (*fdp) { if ((*fdp) == fd) { @@ -893,7 +982,8 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct fdp = &(*fdp)->next; } if (!found) { - printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino); + pr_warn("Deletion dirent \"%s\" not found in list for ino #%u\n", + fd->name, f->inocache->ino); } jffs2_mark_node_obsolete(c, fd->raw); jffs2_free_full_dirent(fd); @@ -907,50 +997,52 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras struct jffs2_raw_inode ri; struct jffs2_node_frag *frag; struct jffs2_full_dnode *new_fn; - uint32_t alloclen, phys_ofs, ilen; + uint32_t alloclen, ilen; int ret; - D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", - f->inocache->ino, start, end)); - + jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", + f->inocache->ino, start, end); + memset(&ri, 0, sizeof(ri)); if(fn->frags > 1) { size_t readlen; uint32_t crc; - /* It's partially obsoleted by a later write. So we have to + /* It's partially obsoleted by a later write. So we have to write it out again with the _same_ version as before */ ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); if (readlen != sizeof(ri) || ret) { - printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen); + pr_warn("Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", + ret, readlen); goto fill; } if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) { - printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n", - ref_offset(fn->raw), - je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE); + pr_warn("%s(): Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n", + __func__, ref_offset(fn->raw), + je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE); return -EIO; } if (je32_to_cpu(ri.totlen) != sizeof(ri)) { - printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n", - ref_offset(fn->raw), - je32_to_cpu(ri.totlen), sizeof(ri)); + pr_warn("%s(): Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n", + __func__, ref_offset(fn->raw), + je32_to_cpu(ri.totlen), sizeof(ri)); return -EIO; } crc = crc32(0, &ri, sizeof(ri)-8); if (crc != je32_to_cpu(ri.node_crc)) { - printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", - ref_offset(fn->raw), - je32_to_cpu(ri.node_crc), crc); + pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", + __func__, ref_offset(fn->raw), + je32_to_cpu(ri.node_crc), crc); /* FIXME: We could possibly deal with this by writing new holes for each frag */ - printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", - start, end, f->inocache->ino); + pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", + start, end, f->inocache->ino); goto fill; } if (ri.compr != JFFS2_COMPR_ZERO) { - printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw)); - printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", - start, end, f->inocache->ino); + pr_warn("%s(): Node 0x%08x wasn't a hole node!\n", + __func__, ref_offset(fn->raw)); + pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", + start, end, f->inocache->ino); goto fill; } } else { @@ -967,7 +1059,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras ri.csize = cpu_to_je32(0); ri.compr = JFFS2_COMPR_ZERO; } - + frag = frag_last(&f->fragtree); if (frag) /* Fetch the inode length from the fragtree rather then @@ -986,16 +1078,17 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras ri.data_crc = cpu_to_je32(0); ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); - ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen); + ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen, + JFFS2_SUMMARY_INODE_SIZE); if (ret) { - printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", - sizeof(ri), ret); + pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", + sizeof(ri), ret); return ret; } - new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_GC); + new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC); if (IS_ERR(new_fn)) { - printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn)); + pr_warn("Error writing new hole node: %ld\n", PTR_ERR(new_fn)); return PTR_ERR(new_fn); } if (je32_to_cpu(ri.version) == f->highest_version) { @@ -1008,22 +1101,22 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras return 0; } - /* + /* * We should only get here in the case where the node we are * replacing had more than one frag, so we kept the same version - * number as before. (Except in case of error -- see 'goto fill;' + * number as before. (Except in case of error -- see 'goto fill;' * above.) */ D1(if(unlikely(fn->frags <= 1)) { - printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n", - fn->frags, je32_to_cpu(ri.version), f->highest_version, - je32_to_cpu(ri.ino)); + pr_warn("%s(): Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n", + __func__, fn->frags, je32_to_cpu(ri.version), + f->highest_version, je32_to_cpu(ri.ino)); }); /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ mark_ref_normal(new_fn->raw); - for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); + for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs); frag; frag = frag_next(frag)) { if (frag->ofs > fn->size + fn->ofs) break; @@ -1034,36 +1127,36 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras } } if (fn->frags) { - printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n"); + pr_warn("%s(): Old node still has frags!\n", __func__); BUG(); } if (!new_fn->frags) { - printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n"); + pr_warn("%s(): New node has no frags!\n", __func__); BUG(); } - + jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); - + return 0; } -static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, +static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *orig_jeb, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn, uint32_t start, uint32_t end) { struct jffs2_full_dnode *new_fn; struct jffs2_raw_inode ri; - uint32_t alloclen, phys_ofs, offset, orig_end, orig_start; + uint32_t alloclen, offset, orig_end, orig_start; int ret = 0; unsigned char *comprbuf = NULL, *writebuf; unsigned long pg; unsigned char *pg_ptr; - + memset(&ri, 0, sizeof(ri)); - D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", - f->inocache->ino, start, end)); + jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", + f->inocache->ino, start, end); orig_end = end; orig_start = start; @@ -1071,8 +1164,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) { /* Attempt to do some merging. But only expand to cover logically adjacent frags if the block containing them is already considered - to be dirty. Otherwise we end up with GC just going round in - circles dirtying the nodes it already wrote out, especially + to be dirty. Otherwise we end up with GC just going round in + circles dirtying the nodes it already wrote out, especially on NAND where we have small eraseblocks and hence a much higher chance of nodes having to be split to cross boundaries. */ @@ -1094,19 +1187,19 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era /* If the previous frag doesn't even reach the beginning, there's excessive fragmentation. Just merge. */ if (frag->ofs > min) { - D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n", - frag->ofs, frag->ofs+frag->size)); + jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n", + frag->ofs, frag->ofs+frag->size); start = frag->ofs; continue; } /* OK. This frag holds the first byte of the page. */ if (!frag->node || !frag->node->raw) { - D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", - frag->ofs, frag->ofs+frag->size)); + jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", + frag->ofs, frag->ofs+frag->size); break; } else { - /* OK, it's a frag which extends to the beginning of the page. Does it live + /* OK, it's a frag which extends to the beginning of the page. Does it live in a block which is still considered clean? If so, don't obsolete it. If not, cover it anyway. */ @@ -1116,19 +1209,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era jeb = &c->blocks[raw->flash_offset / c->sector_size]; if (jeb == c->gcblock) { - D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", - frag->ofs, frag->ofs+frag->size, ref_offset(raw))); + jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", + frag->ofs, + frag->ofs + frag->size, + ref_offset(raw)); start = frag->ofs; break; } if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { - D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", - frag->ofs, frag->ofs+frag->size, jeb->offset)); + jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", + frag->ofs, + frag->ofs + frag->size, + jeb->offset); break; } - D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", - frag->ofs, frag->ofs+frag->size, jeb->offset)); + jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", + frag->ofs, + frag->ofs + frag->size, + jeb->offset); start = frag->ofs; break; } @@ -1144,19 +1243,19 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era /* If the previous frag doesn't even reach the beginning, there's lots of fragmentation. Just merge. */ if (frag->ofs+frag->size < max) { - D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n", - frag->ofs, frag->ofs+frag->size)); + jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n", + frag->ofs, frag->ofs+frag->size); end = frag->ofs + frag->size; continue; } if (!frag->node || !frag->node->raw) { - D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", - frag->ofs, frag->ofs+frag->size)); + jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", + frag->ofs, frag->ofs+frag->size); break; } else { - /* OK, it's a frag which extends to the beginning of the page. Does it live + /* OK, it's a frag which extends to the beginning of the page. Does it live in a block which is still considered clean? If so, don't obsolete it. If not, cover it anyway. */ @@ -1166,31 +1265,37 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era jeb = &c->blocks[raw->flash_offset / c->sector_size]; if (jeb == c->gcblock) { - D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", - frag->ofs, frag->ofs+frag->size, ref_offset(raw))); + jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", + frag->ofs, + frag->ofs + frag->size, + ref_offset(raw)); end = frag->ofs + frag->size; break; } if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { - D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", - frag->ofs, frag->ofs+frag->size, jeb->offset)); + jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", + frag->ofs, + frag->ofs + frag->size, + jeb->offset); break; } - D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", - frag->ofs, frag->ofs+frag->size, jeb->offset)); + jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", + frag->ofs, + frag->ofs + frag->size, + jeb->offset); end = frag->ofs + frag->size; break; } } - D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", - orig_start, orig_end, start, end)); + jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", + orig_start, orig_end, start, end); D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); BUG_ON(end < orig_end); BUG_ON(start > orig_start); } - + /* First, use readpage() to read the appropriate page into the page cache */ /* Q: What happens if we actually try to GC the _same_ page for which commit_write() * triggered garbage collection in the first place? @@ -1201,7 +1306,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); if (IS_ERR(pg_ptr)) { - printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr)); + pr_warn("read_cache_page() returned error: %ld\n", + PTR_ERR(pg_ptr)); return PTR_ERR(pg_ptr); } @@ -1211,11 +1317,12 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era uint32_t cdatalen; uint16_t comprtype = JFFS2_COMPR_NONE; - ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen); + ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, + &alloclen, JFFS2_SUMMARY_INODE_SIZE); if (ret) { - printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", - sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret); + pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", + sizeof(ri) + JFFS2_MIN_DATA_LEN, ret); break; } cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); @@ -1246,13 +1353,14 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era ri.usercompr = (comprtype >> 8) & 0xff; ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); - - new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC); + + new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC); jffs2_free_comprbuf(comprbuf, writebuf); if (IS_ERR(new_fn)) { - printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); + pr_warn("Error writing new dnode: %ld\n", + PTR_ERR(new_fn)); ret = PTR_ERR(new_fn); break; } @@ -1268,4 +1376,3 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era jffs2_gc_release_page(c, pg_ptr, &pg); return ret; } - diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h deleted file mode 100644 index 84f184f0836..00000000000 --- a/fs/jffs2/histo.h +++ /dev/null @@ -1,3 +0,0 @@ -/* This file provides the bit-probabilities for the input file */ -#define BIT_DIVIDER 629 -static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */ diff --git a/fs/jffs2/histo_mips.h b/fs/jffs2/histo_mips.h deleted file mode 100644 index 9a443268d88..00000000000 --- a/fs/jffs2/histo_mips.h +++ /dev/null @@ -1,2 +0,0 @@ -#define BIT_DIVIDER_MIPS 1043 -static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */ diff --git a/fs/jffs2/ioctl.c b/fs/jffs2/ioctl.c index 238c7992064..859a598af02 100644 --- a/fs/jffs2/ioctl.c +++ b/fs/jffs2/ioctl.c @@ -1,23 +1,22 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: ioctl.c,v 1.9 2004/11/16 20:36:11 dwmw2 Exp $ - * */ #include <linux/fs.h> +#include "nodelist.h" -int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +long jffs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { /* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which will include compression support etc. */ return -ENOTTY; } - + diff --git a/fs/jffs2/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h new file mode 100644 index 00000000000..2e4a86763c0 --- /dev/null +++ b/fs/jffs2/jffs2_fs_i.h @@ -0,0 +1,56 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> + * + * Created by David Woodhouse <dwmw2@infradead.org> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#ifndef _JFFS2_FS_I +#define _JFFS2_FS_I + +#include <linux/rbtree.h> +#include <linux/posix_acl.h> +#include <linux/mutex.h> + +struct jffs2_inode_info { + /* We need an internal mutex similar to inode->i_mutex. + Unfortunately, we can't used the existing one, because + either the GC would deadlock, or we'd have to release it + before letting GC proceed. Or we'd have to put ugliness + into the GC code so it didn't attempt to obtain the i_mutex + for the inode(s) which are already locked */ + struct mutex sem; + + /* The highest (datanode) version number used for this ino */ + uint32_t highest_version; + + /* List of data fragments which make up the file */ + struct rb_root fragtree; + + /* There may be one datanode which isn't referenced by any of the + above fragments, if it contains a metadata update but no actual + data - or if this is a directory inode */ + /* This also holds the _only_ dnode for symlinks/device nodes, + etc. */ + struct jffs2_full_dnode *metadata; + + /* Directory entries */ + struct jffs2_full_dirent *dents; + + /* The target path if this is the inode of a symlink */ + unsigned char *target; + + /* Some stuff we just have to keep in-core at all times, for each inode. */ + struct jffs2_inode_cache *inocache; + + uint16_t flags; + uint8_t usercompr; + struct inode vfs_inode; +}; + +#endif /* _JFFS2_FS_I */ diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h new file mode 100644 index 00000000000..413ef89c2d1 --- /dev/null +++ b/fs/jffs2/jffs2_fs_sb.h @@ -0,0 +1,164 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> + * + * Created by David Woodhouse <dwmw2@infradead.org> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#ifndef _JFFS2_FS_SB +#define _JFFS2_FS_SB + +#include <linux/types.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/completion.h> +#include <linux/mutex.h> +#include <linux/timer.h> +#include <linux/wait.h> +#include <linux/list.h> +#include <linux/rwsem.h> + +#define JFFS2_SB_FLAG_RO 1 +#define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */ +#define JFFS2_SB_FLAG_BUILDING 4 /* File system building is in progress */ + +struct jffs2_inodirty; + +struct jffs2_mount_opts { + bool override_compr; + unsigned int compr; + + /* The size of the reserved pool. The reserved pool is the JFFS2 flash + * space which may only be used by root cannot be used by the other + * users. This is implemented simply by means of not allowing the + * latter users to write to the file system if the amount if the + * available space is less then 'rp_size'. */ + unsigned int rp_size; +}; + +/* A struct for the overall file system control. Pointers to + jffs2_sb_info structs are named `c' in the source code. + Nee jffs_control +*/ +struct jffs2_sb_info { + struct mtd_info *mtd; + + uint32_t highest_ino; + uint32_t checked_ino; + + unsigned int flags; + + struct task_struct *gc_task; /* GC task struct */ + struct completion gc_thread_start; /* GC thread start completion */ + struct completion gc_thread_exit; /* GC thread exit completion port */ + + struct mutex alloc_sem; /* Used to protect all the following + fields, and also to protect against + out-of-order writing of nodes. And GC. */ + uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER + (i.e. zero for OOB CLEANMARKER */ + + uint32_t flash_size; + uint32_t used_size; + uint32_t dirty_size; + uint32_t wasted_size; + uint32_t free_size; + uint32_t erasing_size; + uint32_t bad_size; + uint32_t sector_size; + uint32_t unchecked_size; + + uint32_t nr_free_blocks; + uint32_t nr_erasing_blocks; + + /* Number of free blocks there must be before we... */ + uint8_t resv_blocks_write; /* ... allow a normal filesystem write */ + uint8_t resv_blocks_deletion; /* ... allow a normal filesystem deletion */ + uint8_t resv_blocks_gctrigger; /* ... wake up the GC thread */ + uint8_t resv_blocks_gcbad; /* ... pick a block from the bad_list to GC */ + uint8_t resv_blocks_gcmerge; /* ... merge pages when garbage collecting */ + /* Number of 'very dirty' blocks before we trigger immediate GC */ + uint8_t vdirty_blocks_gctrigger; + + uint32_t nospc_dirty_size; + + uint32_t nr_blocks; + struct jffs2_eraseblock *blocks; /* The whole array of blocks. Used for getting blocks + * from the offset (blocks[ofs / sector_size]) */ + struct jffs2_eraseblock *nextblock; /* The block we're currently filling */ + + struct jffs2_eraseblock *gcblock; /* The block we're currently garbage-collecting */ + + struct list_head clean_list; /* Blocks 100% full of clean data */ + struct list_head very_dirty_list; /* Blocks with lots of dirty space */ + struct list_head dirty_list; /* Blocks with some dirty space */ + struct list_head erasable_list; /* Blocks which are completely dirty, and need erasing */ + struct list_head erasable_pending_wbuf_list; /* Blocks which need erasing but only after the current wbuf is flushed */ + struct list_head erasing_list; /* Blocks which are currently erasing */ + struct list_head erase_checking_list; /* Blocks which are being checked and marked */ + struct list_head erase_pending_list; /* Blocks which need erasing now */ + struct list_head erase_complete_list; /* Blocks which are erased and need the clean marker written to them */ + struct list_head free_list; /* Blocks which are free and ready to be used */ + struct list_head bad_list; /* Bad blocks. */ + struct list_head bad_used_list; /* Bad blocks with valid data in. */ + + spinlock_t erase_completion_lock; /* Protect free_list and erasing_list + against erase completion handler */ + wait_queue_head_t erase_wait; /* For waiting for erases to complete */ + + wait_queue_head_t inocache_wq; + int inocache_hashsize; + struct jffs2_inode_cache **inocache_list; + spinlock_t inocache_lock; + + /* Sem to allow jffs2_garbage_collect_deletion_dirent to + drop the erase_completion_lock while it's holding a pointer + to an obsoleted node. I don't like this. Alternatives welcomed. */ + struct mutex erase_free_sem; + + uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */ + +#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY + unsigned char *wbuf_verify; /* read-back buffer for verification */ +#endif +#ifdef CONFIG_JFFS2_FS_WRITEBUFFER + unsigned char *wbuf; /* Write-behind buffer for NAND flash */ + uint32_t wbuf_ofs; + uint32_t wbuf_len; + struct jffs2_inodirty *wbuf_inodes; + struct rw_semaphore wbuf_sem; /* Protects the write buffer */ + + struct delayed_work wbuf_dwork; /* write-buffer write-out work */ + int wbuf_queued; /* non-zero delayed work is queued */ + spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */ + + unsigned char *oobbuf; + int oobavail; /* How many bytes are available for JFFS2 in OOB */ +#endif + + struct jffs2_summary *summary; /* Summary information */ + struct jffs2_mount_opts mount_opts; + +#ifdef CONFIG_JFFS2_FS_XATTR +#define XATTRINDEX_HASHSIZE (57) + uint32_t highest_xid; + uint32_t highest_xseqno; + struct list_head xattrindex[XATTRINDEX_HASHSIZE]; + struct list_head xattr_unchecked; + struct list_head xattr_dead_list; + struct jffs2_xattr_ref *xref_dead_list; + struct jffs2_xattr_ref *xref_temp; + struct rw_semaphore xattr_sem; + uint32_t xdatum_mem_usage; + uint32_t xdatum_mem_threshold; +#endif + /* OS-private pointer for getting back to master superblock info */ + void *os_priv; +}; + +#endif /* _JFFS2_FS_SB */ diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c index 5abb431c2a0..b8fd651307a 100644 --- a/fs/jffs2/malloc.c +++ b/fs/jffs2/malloc.c @@ -1,84 +1,95 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: malloc.c,v 1.28 2004/11/16 20:36:11 dwmw2 Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/jffs2.h> #include "nodelist.h" -#if 0 -#define JFFS2_SLAB_POISON SLAB_POISON -#else -#define JFFS2_SLAB_POISON 0 -#endif - -// replace this by #define D3 (x) x for cache debugging -#define D3(x) - /* These are initialised to NULL in the kernel startup code. If you're porting to other operating systems, beware */ -static kmem_cache_t *full_dnode_slab; -static kmem_cache_t *raw_dirent_slab; -static kmem_cache_t *raw_inode_slab; -static kmem_cache_t *tmp_dnode_info_slab; -static kmem_cache_t *raw_node_ref_slab; -static kmem_cache_t *node_frag_slab; -static kmem_cache_t *inode_cache_slab; +static struct kmem_cache *full_dnode_slab; +static struct kmem_cache *raw_dirent_slab; +static struct kmem_cache *raw_inode_slab; +static struct kmem_cache *tmp_dnode_info_slab; +static struct kmem_cache *raw_node_ref_slab; +static struct kmem_cache *node_frag_slab; +static struct kmem_cache *inode_cache_slab; +#ifdef CONFIG_JFFS2_FS_XATTR +static struct kmem_cache *xattr_datum_cache; +static struct kmem_cache *xattr_ref_cache; +#endif int __init jffs2_create_slab_caches(void) { - full_dnode_slab = kmem_cache_create("jffs2_full_dnode", + full_dnode_slab = kmem_cache_create("jffs2_full_dnode", sizeof(struct jffs2_full_dnode), - 0, JFFS2_SLAB_POISON, NULL, NULL); + 0, 0, NULL); if (!full_dnode_slab) goto err; raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", sizeof(struct jffs2_raw_dirent), - 0, JFFS2_SLAB_POISON, NULL, NULL); + 0, SLAB_HWCACHE_ALIGN, NULL); if (!raw_dirent_slab) goto err; raw_inode_slab = kmem_cache_create("jffs2_raw_inode", sizeof(struct jffs2_raw_inode), - 0, JFFS2_SLAB_POISON, NULL, NULL); + 0, SLAB_HWCACHE_ALIGN, NULL); if (!raw_inode_slab) goto err; tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", sizeof(struct jffs2_tmp_dnode_info), - 0, JFFS2_SLAB_POISON, NULL, NULL); + 0, 0, NULL); if (!tmp_dnode_info_slab) goto err; - raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref", - sizeof(struct jffs2_raw_node_ref), - 0, JFFS2_SLAB_POISON, NULL, NULL); + raw_node_ref_slab = kmem_cache_create("jffs2_refblock", + sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1), + 0, 0, NULL); if (!raw_node_ref_slab) goto err; node_frag_slab = kmem_cache_create("jffs2_node_frag", sizeof(struct jffs2_node_frag), - 0, JFFS2_SLAB_POISON, NULL, NULL); + 0, 0, NULL); if (!node_frag_slab) goto err; inode_cache_slab = kmem_cache_create("jffs2_inode_cache", sizeof(struct jffs2_inode_cache), - 0, JFFS2_SLAB_POISON, NULL, NULL); - if (inode_cache_slab) - return 0; + 0, 0, NULL); + if (!inode_cache_slab) + goto err; + +#ifdef CONFIG_JFFS2_FS_XATTR + xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum", + sizeof(struct jffs2_xattr_datum), + 0, 0, NULL); + if (!xattr_datum_cache) + goto err; + + xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref", + sizeof(struct jffs2_xattr_ref), + 0, 0, NULL); + if (!xattr_ref_cache) + goto err; +#endif + + return 0; err: jffs2_destroy_slab_caches(); return -ENOMEM; @@ -100,106 +111,214 @@ void jffs2_destroy_slab_caches(void) kmem_cache_destroy(node_frag_slab); if(inode_cache_slab) kmem_cache_destroy(inode_cache_slab); +#ifdef CONFIG_JFFS2_FS_XATTR + if (xattr_datum_cache) + kmem_cache_destroy(xattr_datum_cache); + if (xattr_ref_cache) + kmem_cache_destroy(xattr_ref_cache); +#endif } struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) { - return kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL); + struct jffs2_full_dirent *ret; + ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL); + dbg_memalloc("%p\n", ret); + return ret; } void jffs2_free_full_dirent(struct jffs2_full_dirent *x) { + dbg_memalloc("%p\n", x); kfree(x); } struct jffs2_full_dnode *jffs2_alloc_full_dnode(void) { - struct jffs2_full_dnode *ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL); - D3 (printk (KERN_DEBUG "alloc_full_dnode at %p\n", ret)); + struct jffs2_full_dnode *ret; + ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL); + dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_full_dnode(struct jffs2_full_dnode *x) { - D3 (printk (KERN_DEBUG "free full_dnode at %p\n", x)); + dbg_memalloc("%p\n", x); kmem_cache_free(full_dnode_slab, x); } struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void) { - struct jffs2_raw_dirent *ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL); - D3 (printk (KERN_DEBUG "alloc_raw_dirent\n", ret)); + struct jffs2_raw_dirent *ret; + ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL); + dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x) { - D3 (printk (KERN_DEBUG "free_raw_dirent at %p\n", x)); + dbg_memalloc("%p\n", x); kmem_cache_free(raw_dirent_slab, x); } struct jffs2_raw_inode *jffs2_alloc_raw_inode(void) { - struct jffs2_raw_inode *ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL); - D3 (printk (KERN_DEBUG "alloc_raw_inode at %p\n", ret)); + struct jffs2_raw_inode *ret; + ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL); + dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_raw_inode(struct jffs2_raw_inode *x) { - D3 (printk (KERN_DEBUG "free_raw_inode at %p\n", x)); + dbg_memalloc("%p\n", x); kmem_cache_free(raw_inode_slab, x); } struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void) { - struct jffs2_tmp_dnode_info *ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL); - D3 (printk (KERN_DEBUG "alloc_tmp_dnode_info at %p\n", ret)); + struct jffs2_tmp_dnode_info *ret; + ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL); + dbg_memalloc("%p\n", + ret); return ret; } void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x) { - D3 (printk (KERN_DEBUG "free_tmp_dnode_info at %p\n", x)); + dbg_memalloc("%p\n", x); kmem_cache_free(tmp_dnode_info_slab, x); } -struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void) +static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void) { - struct jffs2_raw_node_ref *ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); - D3 (printk (KERN_DEBUG "alloc_raw_node_ref at %p\n", ret)); + struct jffs2_raw_node_ref *ret; + + ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); + if (ret) { + int i = 0; + for (i=0; i < REFS_PER_BLOCK; i++) { + ret[i].flash_offset = REF_EMPTY_NODE; + ret[i].next_in_ino = NULL; + } + ret[i].flash_offset = REF_LINK_NODE; + ret[i].next_in_ino = NULL; + } return ret; } -void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x) +int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb, int nr) +{ + struct jffs2_raw_node_ref **p, *ref; + int i = nr; + + dbg_memalloc("%d\n", nr); + + p = &jeb->last_node; + ref = *p; + + dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset); + + /* If jeb->last_node is really a valid node then skip over it */ + if (ref && ref->flash_offset != REF_EMPTY_NODE) + ref++; + + while (i) { + if (!ref) { + dbg_memalloc("Allocating new refblock linked from %p\n", p); + ref = *p = jffs2_alloc_refblock(); + if (!ref) + return -ENOMEM; + } + if (ref->flash_offset == REF_LINK_NODE) { + p = &ref->next_in_ino; + ref = *p; + continue; + } + i--; + ref++; + } + jeb->allocated_refs = nr; + + dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n", + nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset, + jeb->last_node->next_in_ino); + + return 0; +} + +void jffs2_free_refblock(struct jffs2_raw_node_ref *x) { - D3 (printk (KERN_DEBUG "free_raw_node_ref at %p\n", x)); + dbg_memalloc("%p\n", x); kmem_cache_free(raw_node_ref_slab, x); } struct jffs2_node_frag *jffs2_alloc_node_frag(void) { - struct jffs2_node_frag *ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL); - D3 (printk (KERN_DEBUG "alloc_node_frag at %p\n", ret)); + struct jffs2_node_frag *ret; + ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL); + dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_node_frag(struct jffs2_node_frag *x) { - D3 (printk (KERN_DEBUG "free_node_frag at %p\n", x)); + dbg_memalloc("%p\n", x); kmem_cache_free(node_frag_slab, x); } struct jffs2_inode_cache *jffs2_alloc_inode_cache(void) { - struct jffs2_inode_cache *ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL); - D3 (printk(KERN_DEBUG "Allocated inocache at %p\n", ret)); + struct jffs2_inode_cache *ret; + ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL); + dbg_memalloc("%p\n", ret); return ret; } void jffs2_free_inode_cache(struct jffs2_inode_cache *x) { - D3 (printk(KERN_DEBUG "Freeing inocache at %p\n", x)); + dbg_memalloc("%p\n", x); kmem_cache_free(inode_cache_slab, x); } +#ifdef CONFIG_JFFS2_FS_XATTR +struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void) +{ + struct jffs2_xattr_datum *xd; + xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL); + dbg_memalloc("%p\n", xd); + if (!xd) + return NULL; + + xd->class = RAWNODE_CLASS_XATTR_DATUM; + xd->node = (void *)xd; + INIT_LIST_HEAD(&xd->xindex); + return xd; +} + +void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd) +{ + dbg_memalloc("%p\n", xd); + kmem_cache_free(xattr_datum_cache, xd); +} + +struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void) +{ + struct jffs2_xattr_ref *ref; + ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL); + dbg_memalloc("%p\n", ref); + if (!ref) + return NULL; + + ref->class = RAWNODE_CLASS_XATTR_REF; + ref->node = (void *)ref; + return ref; +} + +void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref) +{ + dbg_memalloc("%p\n", ref); + kmem_cache_free(xattr_ref_cache, ref); +} +#endif diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 4991c348f6e..9a5449bc3af 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c @@ -1,492 +1,408 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: nodelist.c,v 1.98 2005/07/10 15:15:32 dedekind Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/mtd/mtd.h> #include <linux/rbtree.h> #include <linux/crc32.h> -#include <linux/slab.h> #include <linux/pagemap.h> #include "nodelist.h" +static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, + struct jffs2_node_frag *this); + void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list) { struct jffs2_full_dirent **prev = list; - D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list)); + + dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino); while ((*prev) && (*prev)->nhash <= new->nhash) { if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) { /* Duplicate. Free one */ if (new->version < (*prev)->version) { - D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n")); - D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino)); + dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n", + (*prev)->name, (*prev)->ino); jffs2_mark_node_obsolete(c, new->raw); jffs2_free_full_dirent(new); } else { - D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino)); + dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n", + (*prev)->name, (*prev)->ino); new->next = (*prev)->next; - jffs2_mark_node_obsolete(c, ((*prev)->raw)); + /* It may have been a 'placeholder' deletion dirent, + if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */ + if ((*prev)->raw) + jffs2_mark_node_obsolete(c, ((*prev)->raw)); jffs2_free_full_dirent(*prev); *prev = new; } - goto out; + return; } prev = &((*prev)->next); } new->next = *prev; *prev = new; +} + +uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) +{ + struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); + + dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size); + + /* We know frag->ofs <= size. That's what lookup does for us */ + if (frag && frag->ofs != size) { + if (frag->ofs+frag->size > size) { + frag->size = size - frag->ofs; + } + frag = frag_next(frag); + } + while (frag && frag->ofs >= size) { + struct jffs2_node_frag *next = frag_next(frag); + + frag_erase(frag, list); + jffs2_obsolete_node_frag(c, frag); + frag = next; + } + + if (size == 0) + return 0; + + frag = frag_last(list); - out: - D2(while(*list) { - printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino); - list = &(*list)->next; - }); + /* Sanity check for truncation to longer than we started with... */ + if (!frag) + return 0; + if (frag->ofs + frag->size < size) + return frag->ofs + frag->size; + + /* If the last fragment starts at the RAM page boundary, it is + * REF_PRISTINE irrespective of its size. */ + if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { + dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", + frag->ofs, frag->ofs + frag->size); + frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; + } + return size; } -/* - * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in - * order of increasing version. - */ -static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list) +static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, + struct jffs2_node_frag *this) { - struct rb_node **p = &list->rb_node; - struct rb_node * parent = NULL; - struct jffs2_tmp_dnode_info *this; - - while (*p) { - parent = *p; - this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); - - /* There may actually be a collision here, but it doesn't - actually matter. As long as the two nodes with the same - version are together, it's all fine. */ - if (tn->version < this->version) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; - } + if (this->node) { + this->node->frags--; + if (!this->node->frags) { + /* The node has no valid frags left. It's totally obsoleted */ + dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", + ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size); + jffs2_mark_node_obsolete(c, this->node->raw); + jffs2_free_full_dnode(this->node); + } else { + dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", + ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags); + mark_ref_normal(this->node->raw); + } - rb_link_node(&tn->rb, parent, p); - rb_insert_color(&tn->rb, list); + } + jffs2_free_node_frag(this); } -static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) +static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) { - struct rb_node *this; - struct jffs2_tmp_dnode_info *tn; + struct rb_node *parent = &base->rb; + struct rb_node **link = &parent; + + dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size); - this = list->rb_node; + while (*link) { + parent = *link; + base = rb_entry(parent, struct jffs2_node_frag, rb); - /* Now at bottom of tree */ - while (this) { - if (this->rb_left) - this = this->rb_left; - else if (this->rb_right) - this = this->rb_right; + if (newfrag->ofs > base->ofs) + link = &base->rb.rb_right; + else if (newfrag->ofs < base->ofs) + link = &base->rb.rb_left; else { - tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb); - jffs2_free_full_dnode(tn->fn); - jffs2_free_tmp_dnode_info(tn); - - this = this->rb_parent; - if (!this) - break; - - if (this->rb_left == &tn->rb) - this->rb_left = NULL; - else if (this->rb_right == &tn->rb) - this->rb_right = NULL; - else BUG(); + JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); + BUG(); } } - list->rb_node = NULL; + + rb_link_node(&newfrag->rb, &base->rb, link); } -static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) +/* + * Allocate and initializes a new fragment. + */ +static struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size) { - struct jffs2_full_dirent *next; - - while (fd) { - next = fd->next; - jffs2_free_full_dirent(fd); - fd = next; + struct jffs2_node_frag *newfrag; + + newfrag = jffs2_alloc_node_frag(); + if (likely(newfrag)) { + newfrag->ofs = ofs; + newfrag->size = size; + newfrag->node = fn; + } else { + JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n"); } + + return newfrag; } -/* Returns first valid node after 'ref'. May return 'ref' */ -static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) +/* + * Called when there is no overlapping fragment exist. Inserts a hole before the new + * fragment and inserts the new fragment to the fragtree. + */ +static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root, + struct jffs2_node_frag *newfrag, + struct jffs2_node_frag *this, uint32_t lastend) { - while (ref && ref->next_in_ino) { - if (!ref_obsolete(ref)) - return ref; - D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref))); - ref = ref->next_in_ino; + if (lastend < newfrag->node->ofs) { + /* put a hole in before the new fragment */ + struct jffs2_node_frag *holefrag; + + holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend); + if (unlikely(!holefrag)) { + jffs2_free_node_frag(newfrag); + return -ENOMEM; + } + + if (this) { + /* By definition, the 'this' node has no right-hand child, + because there are no frags with offset greater than it. + So that's where we want to put the hole */ + dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n", + holefrag->ofs, holefrag->ofs + holefrag->size); + rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); + } else { + dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n", + holefrag->ofs, holefrag->ofs + holefrag->size); + rb_link_node(&holefrag->rb, NULL, &root->rb_node); + } + rb_insert_color(&holefrag->rb, root); + this = holefrag; } - return NULL; -} -/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated - with this ino, returning the former in order of version */ + if (this) { + /* By definition, the 'this' node has no right-hand child, + because there are no frags with offset greater than it. + So that's where we want to put new fragment */ + dbg_fragtree2("add the new node at the right\n"); + rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); + } else { + dbg_fragtree2("insert the new node at the root of the tree\n"); + rb_link_node(&newfrag->rb, NULL, &root->rb_node); + } + rb_insert_color(&newfrag->rb, root); -int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, - struct rb_root *tnp, struct jffs2_full_dirent **fdp, - uint32_t *highest_version, uint32_t *latest_mctime, - uint32_t *mctime_ver) + return 0; +} + +/* Doesn't set inode->i_size */ +static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag) { - struct jffs2_raw_node_ref *ref, *valid_ref; - struct jffs2_tmp_dnode_info *tn; - struct rb_root ret_tn = RB_ROOT; - struct jffs2_full_dirent *fd, *ret_fd = NULL; - union jffs2_node_union node; - size_t retlen; - int err; - - *mctime_ver = 0; - - D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%u\n", f->inocache->ino)); - - spin_lock(&c->erase_completion_lock); - - valid_ref = jffs2_first_valid_node(f->inocache->nodes); - - if (!valid_ref && (f->inocache->ino != 1)) - printk(KERN_WARNING "Eep. No valid nodes for ino #%u\n", f->inocache->ino); - - while (valid_ref) { - /* We can hold a pointer to a non-obsolete node without the spinlock, - but _obsolete_ nodes may disappear at any time, if the block - they're in gets erased. So if we mark 'ref' obsolete while we're - not holding the lock, it can go away immediately. For that reason, - we find the next valid node first, before processing 'ref'. + struct jffs2_node_frag *this; + uint32_t lastend; + + /* Skip all the nodes which are completed before this one starts */ + this = jffs2_lookup_node_frag(root, newfrag->node->ofs); + + if (this) { + dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", + this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this); + lastend = this->ofs + this->size; + } else { + dbg_fragtree2("lookup gave no frag\n"); + lastend = 0; + } + + /* See if we ran off the end of the fragtree */ + if (lastend <= newfrag->ofs) { + /* We did */ + + /* Check if 'this' node was on the same page as the new node. + If so, both 'this' and the new node get marked REF_NORMAL so + the GC can take a look. */ - ref = valid_ref; - valid_ref = jffs2_first_valid_node(ref->next_in_ino); - spin_unlock(&c->erase_completion_lock); + if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { + if (this->node) + mark_ref_normal(this->node->raw); + mark_ref_normal(newfrag->node->raw); + } - cond_resched(); + return no_overlapping_node(c, root, newfrag, this, lastend); + } - /* FIXME: point() */ - err = jffs2_flash_read(c, (ref_offset(ref)), - min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node)), - &retlen, (void *)&node); - if (err) { - printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, ref_offset(ref)); - goto free_out; + if (this->node) + dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n", + this->ofs, this->ofs + this->size, + ref_offset(this->node->raw), ref_flags(this->node->raw)); + else + dbg_fragtree2("dealing with hole frag %u-%u.\n", + this->ofs, this->ofs + this->size); + + /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, + * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs + */ + if (newfrag->ofs > this->ofs) { + /* This node isn't completely obsoleted. The start of it remains valid */ + + /* Mark the new node and the partially covered node REF_NORMAL -- let + the GC take a look at them */ + mark_ref_normal(newfrag->node->raw); + if (this->node) + mark_ref_normal(this->node->raw); + + if (this->ofs + this->size > newfrag->ofs + newfrag->size) { + /* The new node splits 'this' frag into two */ + struct jffs2_node_frag *newfrag2; + + if (this->node) + dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n", + this->ofs, this->ofs+this->size, ref_offset(this->node->raw)); + else + dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n", + this->ofs, this->ofs+this->size); + + /* New second frag pointing to this's node */ + newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size, + this->ofs + this->size - newfrag->ofs - newfrag->size); + if (unlikely(!newfrag2)) + return -ENOMEM; + if (this->node) + this->node->frags++; + + /* Adjust size of original 'this' */ + this->size = newfrag->ofs - this->ofs; + + /* Now, we know there's no node with offset + greater than this->ofs but smaller than + newfrag2->ofs or newfrag->ofs, for obvious + reasons. So we can do a tree insert from + 'this' to insert newfrag, and a tree insert + from newfrag to insert newfrag2. */ + jffs2_fragtree_insert(newfrag, this); + rb_insert_color(&newfrag->rb, root); + + jffs2_fragtree_insert(newfrag2, newfrag); + rb_insert_color(&newfrag2->rb, root); + + return 0; } - + /* New node just reduces 'this' frag in size, doesn't split it */ + this->size = newfrag->ofs - this->ofs; + + /* Again, we know it lives down here in the tree */ + jffs2_fragtree_insert(newfrag, this); + rb_insert_color(&newfrag->rb, root); + } else { + /* New frag starts at the same point as 'this' used to. Replace + it in the tree without doing a delete and insertion */ + dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", + newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size); + + rb_replace_node(&this->rb, &newfrag->rb, root); + + if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { + dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size); + jffs2_obsolete_node_frag(c, this); + } else { + this->ofs += newfrag->size; + this->size -= newfrag->size; - /* Check we've managed to read at least the common node header */ - if (retlen < min_t(uint32_t, ref_totlen(c, NULL, ref), sizeof(node.u))) { - printk(KERN_WARNING "short read in get_inode_nodes()\n"); - err = -EIO; - goto free_out; + jffs2_fragtree_insert(this, newfrag); + rb_insert_color(&this->rb, root); + return 0; } - - switch (je16_to_cpu(node.u.nodetype)) { - case JFFS2_NODETYPE_DIRENT: - D1(printk(KERN_DEBUG "Node at %08x (%d) is a dirent node\n", ref_offset(ref), ref_flags(ref))); - if (ref_flags(ref) == REF_UNCHECKED) { - printk(KERN_WARNING "BUG: Dirent node at 0x%08x never got checked? How?\n", ref_offset(ref)); - BUG(); - } - if (retlen < sizeof(node.d)) { - printk(KERN_WARNING "short read in get_inode_nodes()\n"); - err = -EIO; - goto free_out; - } - /* sanity check */ - if (PAD((node.d.nsize + sizeof (node.d))) != PAD(je32_to_cpu (node.d.totlen))) { - printk(KERN_NOTICE "jffs2_get_inode_nodes(): Illegal nsize in node at 0x%08x: nsize 0x%02x, totlen %04x\n", - ref_offset(ref), node.d.nsize, je32_to_cpu(node.d.totlen)); - jffs2_mark_node_obsolete(c, ref); - spin_lock(&c->erase_completion_lock); - continue; - } - if (je32_to_cpu(node.d.version) > *highest_version) - *highest_version = je32_to_cpu(node.d.version); - if (ref_obsolete(ref)) { - /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ - printk(KERN_ERR "Dirent node at 0x%08x became obsolete while we weren't looking\n", - ref_offset(ref)); - BUG(); - } - - fd = jffs2_alloc_full_dirent(node.d.nsize+1); - if (!fd) { - err = -ENOMEM; - goto free_out; - } - fd->raw = ref; - fd->version = je32_to_cpu(node.d.version); - fd->ino = je32_to_cpu(node.d.ino); - fd->type = node.d.type; - - /* Pick out the mctime of the latest dirent */ - if(fd->version > *mctime_ver) { - *mctime_ver = fd->version; - *latest_mctime = je32_to_cpu(node.d.mctime); - } + } + /* OK, now we have newfrag added in the correct place in the tree, but + frag_next(newfrag) may be a fragment which is overlapped by it + */ + while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { + /* 'this' frag is obsoleted completely. */ + dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n", + this, this->ofs, this->ofs+this->size); + rb_erase(&this->rb, root); + jffs2_obsolete_node_frag(c, this); + } + /* Now we're pointing at the first frag which isn't totally obsoleted by + the new frag */ - /* memcpy as much of the name as possible from the raw - dirent we've already read from the flash - */ - if (retlen > sizeof(struct jffs2_raw_dirent)) - memcpy(&fd->name[0], &node.d.name[0], min_t(uint32_t, node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent)))); - - /* Do we need to copy any more of the name directly - from the flash? - */ - if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) { - /* FIXME: point() */ - int already = retlen - sizeof(struct jffs2_raw_dirent); - - err = jffs2_flash_read(c, (ref_offset(ref)) + retlen, - node.d.nsize - already, &retlen, &fd->name[already]); - if (!err && retlen != node.d.nsize - already) - err = -EIO; - - if (err) { - printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err); - jffs2_free_full_dirent(fd); - goto free_out; - } - } - fd->nhash = full_name_hash(fd->name, node.d.nsize); - fd->next = NULL; - fd->name[node.d.nsize] = '\0'; - /* Wheee. We now have a complete jffs2_full_dirent structure, with - the name in it and everything. Link it into the list - */ - D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino)); - jffs2_add_fd_to_list(c, fd, &ret_fd); - break; - - case JFFS2_NODETYPE_INODE: - D1(printk(KERN_DEBUG "Node at %08x (%d) is a data node\n", ref_offset(ref), ref_flags(ref))); - if (retlen < sizeof(node.i)) { - printk(KERN_WARNING "read too short for dnode\n"); - err = -EIO; - goto free_out; - } - if (je32_to_cpu(node.i.version) > *highest_version) - *highest_version = je32_to_cpu(node.i.version); - D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", je32_to_cpu(node.i.version), *highest_version)); - - if (ref_obsolete(ref)) { - /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ - printk(KERN_ERR "Inode node at 0x%08x became obsolete while we weren't looking\n", - ref_offset(ref)); - BUG(); - } + if (!this || newfrag->ofs + newfrag->size == this->ofs) + return 0; - /* If we've never checked the CRCs on this node, check them now. */ - if (ref_flags(ref) == REF_UNCHECKED) { - uint32_t crc, len; - struct jffs2_eraseblock *jeb; - - crc = crc32(0, &node, sizeof(node.i)-8); - if (crc != je32_to_cpu(node.i.node_crc)) { - printk(KERN_NOTICE "jffs2_get_inode_nodes(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ref_offset(ref), je32_to_cpu(node.i.node_crc), crc); - jffs2_mark_node_obsolete(c, ref); - spin_lock(&c->erase_completion_lock); - continue; - } - - /* sanity checks */ - if ( je32_to_cpu(node.i.offset) > je32_to_cpu(node.i.isize) || - PAD(je32_to_cpu(node.i.csize) + sizeof (node.i)) != PAD(je32_to_cpu(node.i.totlen))) { - printk(KERN_NOTICE "jffs2_get_inode_nodes(): Inode corrupted at 0x%08x, totlen %d, #ino %d, version %d, isize %d, csize %d, dsize %d \n", - ref_offset(ref), je32_to_cpu(node.i.totlen), je32_to_cpu(node.i.ino), - je32_to_cpu(node.i.version), je32_to_cpu(node.i.isize), - je32_to_cpu(node.i.csize), je32_to_cpu(node.i.dsize)); - jffs2_mark_node_obsolete(c, ref); - spin_lock(&c->erase_completion_lock); - continue; - } - - if (node.i.compr != JFFS2_COMPR_ZERO && je32_to_cpu(node.i.csize)) { - unsigned char *buf=NULL; - uint32_t pointed = 0; -#ifndef __ECOS - if (c->mtd->point) { - err = c->mtd->point (c->mtd, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), - &retlen, &buf); - if (!err && retlen < je32_to_cpu(node.i.csize)) { - D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); - c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); - } else if (err){ - D1(printk(KERN_DEBUG "MTD point failed %d\n", err)); - } else - pointed = 1; /* succefully pointed to device */ - } -#endif - if(!pointed){ - buf = kmalloc(je32_to_cpu(node.i.csize), GFP_KERNEL); - if (!buf) - return -ENOMEM; - - err = jffs2_flash_read(c, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize), - &retlen, buf); - if (!err && retlen != je32_to_cpu(node.i.csize)) - err = -EIO; - if (err) { - kfree(buf); - return err; - } - } - crc = crc32(0, buf, je32_to_cpu(node.i.csize)); - if(!pointed) - kfree(buf); -#ifndef __ECOS - else - c->mtd->unpoint(c->mtd, buf, ref_offset(ref) + sizeof(node.i), je32_to_cpu(node.i.csize)); -#endif + /* Still some overlap but we don't need to move it in the tree */ + this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); + this->ofs = newfrag->ofs + newfrag->size; - if (crc != je32_to_cpu(node.i.data_crc)) { - printk(KERN_NOTICE "jffs2_get_inode_nodes(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ref_offset(ref), je32_to_cpu(node.i.data_crc), crc); - jffs2_mark_node_obsolete(c, ref); - spin_lock(&c->erase_completion_lock); - continue; - } - - } - - /* Mark the node as having been checked and fix the accounting accordingly */ - spin_lock(&c->erase_completion_lock); - jeb = &c->blocks[ref->flash_offset / c->sector_size]; - len = ref_totlen(c, jeb, ref); - - jeb->used_size += len; - jeb->unchecked_size -= len; - c->used_size += len; - c->unchecked_size -= len; - - /* If node covers at least a whole page, or if it starts at the - beginning of a page and runs to the end of the file, or if - it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. - - If it's actually overlapped, it'll get made NORMAL (or OBSOLETE) - when the overlapping node(s) get added to the tree anyway. - */ - if ((je32_to_cpu(node.i.dsize) >= PAGE_CACHE_SIZE) || - ( ((je32_to_cpu(node.i.offset)&(PAGE_CACHE_SIZE-1))==0) && - (je32_to_cpu(node.i.dsize)+je32_to_cpu(node.i.offset) == je32_to_cpu(node.i.isize)))) { - D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_PRISTINE\n", ref_offset(ref))); - ref->flash_offset = ref_offset(ref) | REF_PRISTINE; - } else { - D1(printk(KERN_DEBUG "Marking node at 0x%08x REF_NORMAL\n", ref_offset(ref))); - ref->flash_offset = ref_offset(ref) | REF_NORMAL; - } - spin_unlock(&c->erase_completion_lock); - } + /* And mark them REF_NORMAL so the GC takes a look at them */ + if (this->node) + mark_ref_normal(this->node->raw); + mark_ref_normal(newfrag->node->raw); - tn = jffs2_alloc_tmp_dnode_info(); - if (!tn) { - D1(printk(KERN_DEBUG "alloc tn failed\n")); - err = -ENOMEM; - goto free_out; - } + return 0; +} - tn->fn = jffs2_alloc_full_dnode(); - if (!tn->fn) { - D1(printk(KERN_DEBUG "alloc fn failed\n")); - err = -ENOMEM; - jffs2_free_tmp_dnode_info(tn); - goto free_out; - } - tn->version = je32_to_cpu(node.i.version); - tn->fn->ofs = je32_to_cpu(node.i.offset); - /* There was a bug where we wrote hole nodes out with - csize/dsize swapped. Deal with it */ - if (node.i.compr == JFFS2_COMPR_ZERO && !je32_to_cpu(node.i.dsize) && je32_to_cpu(node.i.csize)) - tn->fn->size = je32_to_cpu(node.i.csize); - else // normal case... - tn->fn->size = je32_to_cpu(node.i.dsize); - tn->fn->raw = ref; - D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n", - ref_offset(ref), je32_to_cpu(node.i.version), - je32_to_cpu(node.i.offset), je32_to_cpu(node.i.dsize))); - jffs2_add_tn_to_tree(tn, &ret_tn); - break; - - default: - if (ref_flags(ref) == REF_UNCHECKED) { - struct jffs2_eraseblock *jeb; - uint32_t len; - - printk(KERN_ERR "Eep. Unknown node type %04x at %08x was marked REF_UNCHECKED\n", - je16_to_cpu(node.u.nodetype), ref_offset(ref)); - - /* Mark the node as having been checked and fix the accounting accordingly */ - spin_lock(&c->erase_completion_lock); - jeb = &c->blocks[ref->flash_offset / c->sector_size]; - len = ref_totlen(c, jeb, ref); - - jeb->used_size += len; - jeb->unchecked_size -= len; - c->used_size += len; - c->unchecked_size -= len; - - mark_ref_normal(ref); - spin_unlock(&c->erase_completion_lock); - } - node.u.nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(node.u.nodetype)); - if (crc32(0, &node, sizeof(struct jffs2_unknown_node)-4) != je32_to_cpu(node.u.hdr_crc)) { - /* Hmmm. This should have been caught at scan time. */ - printk(KERN_ERR "Node header CRC failed at %08x. But it must have been OK earlier.\n", - ref_offset(ref)); - printk(KERN_ERR "Node was: { %04x, %04x, %08x, %08x }\n", - je16_to_cpu(node.u.magic), je16_to_cpu(node.u.nodetype), je32_to_cpu(node.u.totlen), - je32_to_cpu(node.u.hdr_crc)); - jffs2_mark_node_obsolete(c, ref); - } else switch(je16_to_cpu(node.u.nodetype) & JFFS2_COMPAT_MASK) { - case JFFS2_FEATURE_INCOMPAT: - printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); - /* EEP */ - BUG(); - break; - case JFFS2_FEATURE_ROCOMPAT: - printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); - if (!(c->flags & JFFS2_SB_FLAG_RO)) - BUG(); - break; - case JFFS2_FEATURE_RWCOMPAT_COPY: - printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); - break; - case JFFS2_FEATURE_RWCOMPAT_DELETE: - printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08x\n", je16_to_cpu(node.u.nodetype), ref_offset(ref)); - jffs2_mark_node_obsolete(c, ref); - break; - } +/* + * Given an inode, probably with existing tree of fragments, add the new node + * to the fragment tree. + */ +int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) +{ + int ret; + struct jffs2_node_frag *newfrag; - } - spin_lock(&c->erase_completion_lock); + if (unlikely(!fn->size)) + return 0; + + newfrag = new_fragment(fn, fn->ofs, fn->size); + if (unlikely(!newfrag)) + return -ENOMEM; + newfrag->node->frags = 1; + + dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n", + fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); + ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); + if (unlikely(ret)) + return ret; + + /* If we now share a page with other nodes, mark either previous + or next node REF_NORMAL, as appropriate. */ + if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { + struct jffs2_node_frag *prev = frag_prev(newfrag); + + mark_ref_normal(fn->raw); + /* If we don't start at zero there's _always_ a previous */ + if (prev->node) + mark_ref_normal(prev->node->raw); } - spin_unlock(&c->erase_completion_lock); - *tnp = ret_tn; - *fdp = ret_fd; - return 0; + if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { + struct jffs2_node_frag *next = frag_next(newfrag); - free_out: - jffs2_free_tmp_dnode_info_list(&ret_tn); - jffs2_free_full_dirent_list(ret_fd); - return err; + if (next) { + mark_ref_normal(fn->raw); + if (next->node) + mark_ref_normal(next->node->raw); + } + } + jffs2_dbg_fragtree_paranoia_check_nolock(f); + + return 0; } void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) @@ -499,24 +415,21 @@ void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache /* During mount, this needs no locking. During normal operation, its callers want to do other stuff while still holding the inocache_lock. - Rather than introducing special case get_ino_cache functions or + Rather than introducing special case get_ino_cache functions or callbacks, we just let the caller do the locking itself. */ - + struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inode_cache *ret; - D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino)); - - ret = c->inocache_list[ino % INOCACHE_HASHSIZE]; + ret = c->inocache_list[ino % c->inocache_hashsize]; while (ret && ret->ino < ino) { ret = ret->next; } - + if (ret && ret->ino != ino) ret = NULL; - D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino)); return ret; } @@ -528,9 +441,9 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new if (!new->ino) new->ino = ++c->highest_ino; - D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino)); + dbg_inocache("add %p (ino #%u)\n", new, new->ino); - prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE]; + prev = &c->inocache_list[new->ino % c->inocache_hashsize]; while ((*prev) && (*prev)->ino < new->ino) { prev = &(*prev)->next; @@ -544,11 +457,15 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) { struct jffs2_inode_cache **prev; - D1(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino)); + +#ifdef CONFIG_JFFS2_FS_XATTR + BUG_ON(old->xref); +#endif + dbg_inocache("del %p (ino #%u)\n", old, old->ino); spin_lock(&c->inocache_lock); - - prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE]; - + + prev = &c->inocache_list[old->ino % c->inocache_hashsize]; + while ((*prev) && (*prev)->ino < old->ino) { prev = &(*prev)->next; } @@ -558,7 +475,7 @@ void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old) /* Free it now unless it's in READING or CLEARING state, which are the transitions upon read_inode() and clear_inode(). The - rest of the time we know nobody else is looking at it, and + rest of the time we know nobody else is looking at it, and if it's held by read_inode() or clear_inode() they'll free it for themselves. */ if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING) @@ -571,11 +488,12 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c) { int i; struct jffs2_inode_cache *this, *next; - - for (i=0; i<INOCACHE_HASHSIZE; i++) { + + for (i=0; i < c->inocache_hashsize; i++) { this = c->inocache_list[i]; while (this) { next = this->next; + jffs2_xattr_free_inode(c, this); jffs2_free_inode_cache(this); this = next; } @@ -590,46 +508,42 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c) for (i=0; i<c->nr_blocks; i++) { this = c->blocks[i].first_node; - while(this) { - next = this->next_phys; - jffs2_free_raw_node_ref(this); + while (this) { + if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE) + next = this[REFS_PER_BLOCK].next_in_ino; + else + next = NULL; + + jffs2_free_refblock(this); this = next; } c->blocks[i].first_node = c->blocks[i].last_node = NULL; } } - + struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset) { - /* The common case in lookup is that there will be a node + /* The common case in lookup is that there will be a node which precisely matches. So we go looking for that first */ struct rb_node *next; struct jffs2_node_frag *prev = NULL; struct jffs2_node_frag *frag = NULL; - D2(printk(KERN_DEBUG "jffs2_lookup_node_frag(%p, %d)\n", fragtree, offset)); + dbg_fragtree2("root %p, offset %d\n", fragtree, offset); next = fragtree->rb_node; while(next) { frag = rb_entry(next, struct jffs2_node_frag, rb); - D2(printk(KERN_DEBUG "Considering frag %d-%d (%p). left %p, right %p\n", - frag->ofs, frag->ofs+frag->size, frag, frag->rb.rb_left, frag->rb.rb_right)); if (frag->ofs + frag->size <= offset) { - D2(printk(KERN_DEBUG "Going right from frag %d-%d, before the region we care about\n", - frag->ofs, frag->ofs+frag->size)); /* Remember the closest smaller match on the way down */ if (!prev || frag->ofs > prev->ofs) prev = frag; next = frag->rb.rb_right; } else if (frag->ofs > offset) { - D2(printk(KERN_DEBUG "Going left from frag %d-%d, after the region we care about\n", - frag->ofs, frag->ofs+frag->size)); next = frag->rb.rb_left; } else { - D2(printk(KERN_DEBUG "Returning frag %d,%d, matched\n", - frag->ofs, frag->ofs+frag->size)); return frag; } } @@ -638,11 +552,11 @@ struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_ and return the closest smaller one */ if (prev) - D2(printk(KERN_DEBUG "No match. Returning frag %d,%d, closest previous\n", - prev->ofs, prev->ofs+prev->size)); - else - D2(printk(KERN_DEBUG "Returning NULL, empty fragtree\n")); - + dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n", + prev->ofs, prev->ofs+prev->size); + else + dbg_fragtree2("returning NULL, empty fragtree\n"); + return prev; } @@ -650,77 +564,192 @@ struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_ they're killed. */ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c) { - struct jffs2_node_frag *frag; - struct jffs2_node_frag *parent; - - if (!root->rb_node) - return; - - frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb)); - - while(frag) { - if (frag->rb.rb_left) { - D2(printk(KERN_DEBUG "Going left from frag (%p) %d-%d\n", - frag, frag->ofs, frag->ofs+frag->size)); - frag = frag_left(frag); - continue; - } - if (frag->rb.rb_right) { - D2(printk(KERN_DEBUG "Going right from frag (%p) %d-%d\n", - frag, frag->ofs, frag->ofs+frag->size)); - frag = frag_right(frag); - continue; - } + struct jffs2_node_frag *frag, *next; - D2(printk(KERN_DEBUG "jffs2_kill_fragtree: frag at 0x%x-0x%x: node %p, frags %d--\n", - frag->ofs, frag->ofs+frag->size, frag->node, - frag->node?frag->node->frags:0)); - + dbg_fragtree("killing\n"); + rbtree_postorder_for_each_entry_safe(frag, next, root, rb) { if (frag->node && !(--frag->node->frags)) { - /* Not a hole, and it's the final remaining frag + /* Not a hole, and it's the final remaining frag of this node. Free the node */ if (c) jffs2_mark_node_obsolete(c, frag->node->raw); - + jffs2_free_full_dnode(frag->node); } - parent = frag_parent(frag); - if (parent) { - if (frag_left(parent) == frag) - parent->rb.rb_left = NULL; - else - parent->rb.rb_right = NULL; - } jffs2_free_node_frag(frag); - frag = parent; - cond_resched(); } } -void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base) +struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb, + uint32_t ofs, uint32_t len, + struct jffs2_inode_cache *ic) { - struct rb_node *parent = &base->rb; - struct rb_node **link = &parent; + struct jffs2_raw_node_ref *ref; - D2(printk(KERN_DEBUG "jffs2_fragtree_insert(%p; %d-%d, %p)\n", newfrag, - newfrag->ofs, newfrag->ofs+newfrag->size, base)); + BUG_ON(!jeb->allocated_refs); + jeb->allocated_refs--; - while (*link) { - parent = *link; - base = rb_entry(parent, struct jffs2_node_frag, rb); - - D2(printk(KERN_DEBUG "fragtree_insert considering frag at 0x%x\n", base->ofs)); - if (newfrag->ofs > base->ofs) - link = &base->rb.rb_right; - else if (newfrag->ofs < base->ofs) - link = &base->rb.rb_left; - else { - printk(KERN_CRIT "Duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base); + ref = jeb->last_node; + + dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset, + ref->next_in_ino); + + while (ref->flash_offset != REF_EMPTY_NODE) { + if (ref->flash_offset == REF_LINK_NODE) + ref = ref->next_in_ino; + else + ref++; + } + + dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref, + ref->flash_offset, ofs, ref->next_in_ino, len); + + ref->flash_offset = ofs; + + if (!jeb->first_node) { + jeb->first_node = ref; + BUG_ON(ref_offset(ref) != jeb->offset); + } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) { + uint32_t last_len = ref_totlen(c, jeb, jeb->last_node); + + JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n", + ref, ref_offset(ref), ref_offset(ref)+len, + ref_offset(jeb->last_node), + ref_offset(jeb->last_node)+last_len); + BUG(); + } + jeb->last_node = ref; + + if (ic) { + ref->next_in_ino = ic->nodes; + ic->nodes = ref; + } else { + ref->next_in_ino = NULL; + } + + switch(ref_flags(ref)) { + case REF_UNCHECKED: + c->unchecked_size += len; + jeb->unchecked_size += len; + break; + + case REF_NORMAL: + case REF_PRISTINE: + c->used_size += len; + jeb->used_size += len; + break; + + case REF_OBSOLETE: + c->dirty_size += len; + jeb->dirty_size += len; + break; + } + c->free_size -= len; + jeb->free_size -= len; + +#ifdef TEST_TOTLEN + /* Set (and test) __totlen field... for now */ + ref->__totlen = len; + ref_totlen(c, jeb, ref); +#endif + return ref; +} + +/* No locking, no reservation of 'ref'. Do not use on a live file system */ +int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + uint32_t size) +{ + if (!size) + return 0; + if (unlikely(size > jeb->free_size)) { + pr_crit("Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n", + size, jeb->free_size, jeb->wasted_size); + BUG(); + } + /* REF_EMPTY_NODE is !obsolete, so that works OK */ + if (jeb->last_node && ref_obsolete(jeb->last_node)) { +#ifdef TEST_TOTLEN + jeb->last_node->__totlen += size; +#endif + c->dirty_size += size; + c->free_size -= size; + jeb->dirty_size += size; + jeb->free_size -= size; + } else { + uint32_t ofs = jeb->offset + c->sector_size - jeb->free_size; + ofs |= REF_OBSOLETE; + + jffs2_link_node_ref(c, jeb, ofs, size, NULL); + } + + return 0; +} + +/* Calculate totlen from surrounding nodes or eraseblock */ +static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb, + struct jffs2_raw_node_ref *ref) +{ + uint32_t ref_end; + struct jffs2_raw_node_ref *next_ref = ref_next(ref); + + if (next_ref) + ref_end = ref_offset(next_ref); + else { + if (!jeb) + jeb = &c->blocks[ref->flash_offset / c->sector_size]; + + /* Last node in block. Use free_space */ + if (unlikely(ref != jeb->last_node)) { + pr_crit("ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n", + ref, ref_offset(ref), jeb->last_node, + jeb->last_node ? + ref_offset(jeb->last_node) : 0); BUG(); } + ref_end = jeb->offset + c->sector_size - jeb->free_size; } + return ref_end - ref_offset(ref); +} - rb_link_node(&newfrag->rb, &base->rb, link); +uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + struct jffs2_raw_node_ref *ref) +{ + uint32_t ret; + + ret = __ref_totlen(c, jeb, ref); + +#ifdef TEST_TOTLEN + if (unlikely(ret != ref->__totlen)) { + if (!jeb) + jeb = &c->blocks[ref->flash_offset / c->sector_size]; + + pr_crit("Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", + ref, ref_offset(ref), ref_offset(ref) + ref->__totlen, + ret, ref->__totlen); + if (ref_next(ref)) { + pr_crit("next %p (0x%08x-0x%08x)\n", + ref_next(ref), ref_offset(ref_next(ref)), + ref_offset(ref_next(ref)) + ref->__totlen); + } else + pr_crit("No next ref. jeb->last_node is %p\n", + jeb->last_node); + + pr_crit("jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", + jeb->wasted_size, jeb->dirty_size, jeb->used_size, + jeb->free_size); + +#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) + __jffs2_dbg_dump_node_refs_nolock(c, jeb); +#endif + + WARN_ON(1); + + ret = ref->__totlen; + } +#endif /* TEST_TOTLEN */ + return ret; } diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h index b34c397909e..fa35ff79ab3 100644 --- a/fs/jffs2/nodelist.h +++ b/fs/jffs2/nodelist.h @@ -1,49 +1,32 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: nodelist.h,v 1.131 2005/07/05 21:03:07 dwmw2 Exp $ - * */ #ifndef __JFFS2_NODELIST_H__ #define __JFFS2_NODELIST_H__ -#include <linux/config.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/jffs2.h> -#include <linux/jffs2_fs_sb.h> -#include <linux/jffs2_fs_i.h> +#include "jffs2_fs_sb.h" +#include "jffs2_fs_i.h" +#include "xattr.h" +#include "acl.h" +#include "summary.h" #ifdef __ECOS #include "os-ecos.h" #else -#include <linux/mtd/compatmac.h> /* For min/max in older kernels */ #include "os-linux.h" #endif -#ifndef CONFIG_JFFS2_FS_DEBUG -#define CONFIG_JFFS2_FS_DEBUG 1 -#endif - -#if CONFIG_JFFS2_FS_DEBUG > 0 -#define D1(x) x -#else -#define D1(x) -#endif - -#if CONFIG_JFFS2_FS_DEBUG > 1 -#define D2(x) x -#else -#define D2(x) -#endif - #define JFFS2_NATIVE_ENDIAN /* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from @@ -54,6 +37,9 @@ #define cpu_to_je32(x) ((jint32_t){x}) #define cpu_to_jemode(x) ((jmode_t){os_to_jffs2_mode(x)}) +#define constant_cpu_to_je16(x) ((jint16_t){x}) +#define constant_cpu_to_je32(x) ((jint32_t){x}) + #define je16_to_cpu(x) ((x).v16) #define je32_to_cpu(x) ((x).v32) #define jemode_to_cpu(x) (jffs2_to_os_mode((x).m)) @@ -62,6 +48,9 @@ #define cpu_to_je32(x) ((jint32_t){cpu_to_be32(x)}) #define cpu_to_jemode(x) ((jmode_t){cpu_to_be32(os_to_jffs2_mode(x))}) +#define constant_cpu_to_je16(x) ((jint16_t){__constant_cpu_to_be16(x)}) +#define constant_cpu_to_je32(x) ((jint32_t){__constant_cpu_to_be32(x)}) + #define je16_to_cpu(x) (be16_to_cpu(x.v16)) #define je32_to_cpu(x) (be32_to_cpu(x.v32)) #define jemode_to_cpu(x) (be32_to_cpu(jffs2_to_os_mode((x).m))) @@ -70,34 +59,76 @@ #define cpu_to_je32(x) ((jint32_t){cpu_to_le32(x)}) #define cpu_to_jemode(x) ((jmode_t){cpu_to_le32(os_to_jffs2_mode(x))}) +#define constant_cpu_to_je16(x) ((jint16_t){__constant_cpu_to_le16(x)}) +#define constant_cpu_to_je32(x) ((jint32_t){__constant_cpu_to_le32(x)}) + #define je16_to_cpu(x) (le16_to_cpu(x.v16)) #define je32_to_cpu(x) (le32_to_cpu(x.v32)) #define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m))) -#else +#else #error wibble #endif +/* The minimal node header size */ +#define JFFS2_MIN_NODE_HEADER sizeof(struct jffs2_raw_dirent) + /* This is all we need to keep in-core for each raw node during normal operation. As and when we do read_inode on a particular inode, we can - scan the nodes which are listed for it and build up a proper map of + scan the nodes which are listed for it and build up a proper map of which nodes are currently valid. JFFSv1 always used to keep that whole map in core for each inode. */ struct jffs2_raw_node_ref { struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref - for this inode. If this is the last, it points to the inode_cache - for this inode instead. The inode_cache will have NULL in the first - word so you know when you've got there :) */ - struct jffs2_raw_node_ref *next_phys; + for this object. If this _is_ the last, it points to the inode_cache, + xattr_ref or xattr_datum instead. The common part of those structures + has NULL in the first word. See jffs2_raw_ref_to_ic() below */ uint32_t flash_offset; +#undef TEST_TOTLEN +#ifdef TEST_TOTLEN uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */ +#endif }; - /* flash_offset & 3 always has to be zero, because nodes are +#define REF_LINK_NODE ((int32_t)-1) +#define REF_EMPTY_NODE ((int32_t)-2) + +/* Use blocks of about 256 bytes */ +#define REFS_PER_BLOCK ((255/sizeof(struct jffs2_raw_node_ref))-1) + +static inline struct jffs2_raw_node_ref *ref_next(struct jffs2_raw_node_ref *ref) +{ + ref++; + + /* Link to another block of refs */ + if (ref->flash_offset == REF_LINK_NODE) { + ref = ref->next_in_ino; + if (!ref) + return ref; + } + + /* End of chain */ + if (ref->flash_offset == REF_EMPTY_NODE) + return NULL; + + return ref; +} + +static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw) +{ + while(raw->next_in_ino) + raw = raw->next_in_ino; + + /* NB. This can be a jffs2_xattr_datum or jffs2_xattr_ref and + not actually a jffs2_inode_cache. Check ->class */ + return ((struct jffs2_inode_cache *)raw); +} + + /* flash_offset & 3 always has to be zero, because nodes are always aligned at 4 bytes. So we have a couple of extra bits - to play with, which indicate the node's status; see below: */ + to play with, which indicate the node's status; see below: */ #define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */ #define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */ #define REF_PRISTINE 2 /* Completely clean. GC without looking */ @@ -107,23 +138,48 @@ struct jffs2_raw_node_ref #define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE) #define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0) +/* Dirent nodes should be REF_PRISTINE only if they are not a deletion + dirent. Deletion dirents should be REF_NORMAL so that GC gets to + throw them away when appropriate */ +#define dirent_node_state(rd) ( (je32_to_cpu((rd)->ino)?REF_PRISTINE:REF_NORMAL) ) + +/* NB: REF_PRISTINE for an inode-less node (ref->next_in_ino == NULL) indicates + it is an unknown node of type JFFS2_NODETYPE_RWCOMPAT_COPY, so it'll get + copied. If you need to do anything different to GC inode-less nodes, then + you need to modify gc.c accordingly. */ + /* For each inode in the filesystem, we need to keep a record of nlink, because it would be a PITA to scan the whole directory tree at read_inode() time to calculate it, and to keep sufficient information - in the raw_node_ref (basically both parent and child inode number for + in the raw_node_ref (basically both parent and child inode number for dirent nodes) would take more space than this does. We also keep a pointer to the first physical node which is part of this inode, too. */ struct jffs2_inode_cache { + /* First part of structure is shared with other objects which + can terminate the raw node refs' next_in_ino list -- which + currently struct jffs2_xattr_datum and struct jffs2_xattr_ref. */ + struct jffs2_full_dirent *scan_dents; /* Used during scan to hold temporary lists of dirents, and later must be set to NULL to mark the end of the raw_node_ref->next_in_ino chain. */ - struct jffs2_inode_cache *next; struct jffs2_raw_node_ref *nodes; + uint8_t class; /* It's used for identification */ + + /* end of shared structure */ + + uint8_t flags; + uint16_t state; uint32_t ino; - int nlink; - int state; + struct jffs2_inode_cache *next; +#ifdef CONFIG_JFFS2_FS_XATTR + struct jffs2_xattr_ref *xref; +#endif + uint32_t pino_nlink; /* Directories store parent inode + here; other inodes store nlink. + Zero always means that it's + completely unlinked. */ }; /* Inode states for 'state' above. We need the 'GC' state to prevent @@ -137,10 +193,19 @@ struct jffs2_inode_cache { #define INO_STATE_READING 5 /* In read_inode() */ #define INO_STATE_CLEARING 6 /* In clear_inode() */ -#define INOCACHE_HASHSIZE 128 +#define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */ + +#define RAWNODE_CLASS_INODE_CACHE 0 +#define RAWNODE_CLASS_XATTR_DATUM 1 +#define RAWNODE_CLASS_XATTR_REF 2 + +#define INOCACHE_HASHSIZE_MIN 128 +#define INOCACHE_HASHSIZE_MAX 1024 + +#define write_ofs(c) ((c)->nextblock->offset + (c)->sector_size - (c)->nextblock->free_size) /* - Larger representation of a raw node, kept in-core only when the + Larger representation of a raw node, kept in-core only when the struct inode for this particular ino is instantiated. */ @@ -150,11 +215,11 @@ struct jffs2_full_dnode uint32_t ofs; /* The offset to which the data of this node belongs */ uint32_t size; uint32_t frags; /* Number of fragments which currently refer - to this node. When this reaches zero, + to this node. When this reaches zero, the node is obsolete. */ }; -/* +/* Even larger representation of a raw node, kept in-core only while we're actually building up the original map of which nodes go where, in read_inode() @@ -164,7 +229,23 @@ struct jffs2_tmp_dnode_info struct rb_node rb; struct jffs2_full_dnode *fn; uint32_t version; -}; + uint32_t data_crc; + uint32_t partial_crc; + uint32_t csize; + uint16_t overlapped; +}; + +/* Temporary data structure used during readinode. */ +struct jffs2_readinode_info +{ + struct rb_root tn_root; + struct jffs2_tmp_dnode_info *mdata_tn; + uint32_t highest_version; + uint32_t latest_mctime; + uint32_t mctime_ver; + struct jffs2_full_dirent *fds; + struct jffs2_raw_node_ref *latest_ref; +}; struct jffs2_full_dirent { @@ -178,7 +259,7 @@ struct jffs2_full_dirent }; /* - Fragments - used to build a map of which raw node to obtain + Fragments - used to build a map of which raw node to obtain data from for each part of the ino */ struct jffs2_node_frag @@ -201,135 +282,19 @@ struct jffs2_eraseblock uint32_t wasted_size; uint32_t free_size; /* Note that sector_size - free_size is the address of the first free space */ + uint32_t allocated_refs; struct jffs2_raw_node_ref *first_node; struct jffs2_raw_node_ref *last_node; struct jffs2_raw_node_ref *gc_node; /* Next node to be garbage collected */ }; -#define ACCT_SANITY_CHECK(c, jeb) do { \ - struct jffs2_eraseblock *___j = jeb; \ - if ((___j) && ___j->used_size + ___j->dirty_size + ___j->free_size + ___j->wasted_size + ___j->unchecked_size != c->sector_size) { \ - printk(KERN_NOTICE "Eeep. Space accounting for block at 0x%08x is screwed\n", ___j->offset); \ - printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + wasted %08x + unchecked %08x != total %08x\n", \ - ___j->free_size, ___j->dirty_size, ___j->used_size, ___j->wasted_size, ___j->unchecked_size, c->sector_size); \ - BUG(); \ - } \ - if (c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size + c->wasted_size + c->unchecked_size != c->flash_size) { \ - printk(KERN_NOTICE "Eeep. Space accounting superblock info is screwed\n"); \ - printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + erasing %08x + bad %08x + wasted %08x + unchecked %08x != total %08x\n", \ - c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->wasted_size, c->unchecked_size, c->flash_size); \ - BUG(); \ - } \ -} while(0) - -static inline void paranoia_failed_dump(struct jffs2_eraseblock *jeb) -{ - struct jffs2_raw_node_ref *ref; - int i=0; - - printk(KERN_NOTICE); - for (ref = jeb->first_node; ref; ref = ref->next_phys) { - printk("%08x->", ref_offset(ref)); - if (++i == 8) { - i = 0; - printk("\n" KERN_NOTICE); - } - } - printk("\n"); -} - - -#define ACCT_PARANOIA_CHECK(jeb) do { \ - uint32_t my_used_size = 0; \ - uint32_t my_unchecked_size = 0; \ - struct jffs2_raw_node_ref *ref2 = jeb->first_node; \ - while (ref2) { \ - if (unlikely(ref2->flash_offset < jeb->offset || \ - ref2->flash_offset > jeb->offset + c->sector_size)) { \ - printk(KERN_NOTICE "Node %08x shouldn't be in block at %08x!\n", \ - ref_offset(ref2), jeb->offset); \ - paranoia_failed_dump(jeb); \ - BUG(); \ - } \ - if (ref_flags(ref2) == REF_UNCHECKED) \ - my_unchecked_size += ref_totlen(c, jeb, ref2); \ - else if (!ref_obsolete(ref2)) \ - my_used_size += ref_totlen(c, jeb, ref2); \ - if (unlikely((!ref2->next_phys) != (ref2 == jeb->last_node))) { \ - if (!ref2->next_phys) \ - printk("ref for node at %p (phys %08x) has next_phys->%p (----), last_node->%p (phys %08x)\n", \ - ref2, ref_offset(ref2), ref2->next_phys, \ - jeb->last_node, ref_offset(jeb->last_node)); \ - else \ - printk("ref for node at %p (phys %08x) has next_phys->%p (%08x), last_node->%p (phys %08x)\n", \ - ref2, ref_offset(ref2), ref2->next_phys, ref_offset(ref2->next_phys), \ - jeb->last_node, ref_offset(jeb->last_node)); \ - paranoia_failed_dump(jeb); \ - BUG(); \ - } \ - ref2 = ref2->next_phys; \ - } \ - if (my_used_size != jeb->used_size) { \ - printk(KERN_NOTICE "Calculated used size %08x != stored used size %08x\n", my_used_size, jeb->used_size); \ - BUG(); \ - } \ - if (my_unchecked_size != jeb->unchecked_size) { \ - printk(KERN_NOTICE "Calculated unchecked size %08x != stored unchecked size %08x\n", my_unchecked_size, jeb->unchecked_size); \ - BUG(); \ - } \ - } while(0) - -/* Calculate totlen from surrounding nodes or eraseblock */ -static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, - struct jffs2_eraseblock *jeb, - struct jffs2_raw_node_ref *ref) -{ - uint32_t ref_end; - - if (ref->next_phys) - ref_end = ref_offset(ref->next_phys); - else { - if (!jeb) - jeb = &c->blocks[ref->flash_offset / c->sector_size]; - - /* Last node in block. Use free_space */ - BUG_ON(ref != jeb->last_node); - ref_end = jeb->offset + c->sector_size - jeb->free_size; - } - return ref_end - ref_offset(ref); -} - -static inline uint32_t ref_totlen(struct jffs2_sb_info *c, - struct jffs2_eraseblock *jeb, - struct jffs2_raw_node_ref *ref) +static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c) { - uint32_t ret; - - D1(if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) { - printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n", - jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref)); - BUG(); - }) - -#if 1 - ret = ref->__totlen; -#else - /* This doesn't actually work yet */ - ret = __ref_totlen(c, jeb, ref); - if (ret != ref->__totlen) { - printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", - ref, ref_offset(ref), ref_offset(ref)+ref->__totlen, - ret, ref->__totlen); - if (!jeb) - jeb = &c->blocks[ref->flash_offset / c->sector_size]; - paranoia_failed_dump(jeb); - BUG(); - } -#endif - return ret; + return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024); } +#define ref_totlen(a, b, c) __jffs2_ref_totlen((a), (b), (c)) #define ALLOC_NORMAL 0 /* Normal allocation */ #define ALLOC_DELETION 1 /* Deletion node. Best to allow it */ @@ -340,42 +305,41 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c, #define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2)) /* check if dirty space is more than 255 Byte */ -#define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) +#define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) #define PAD(x) (((x)+3)&~3) -static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw) +static inline int jffs2_encode_dev(union jffs2_device_node *jdev, dev_t rdev) { - while(raw->next_in_ino) { - raw = raw->next_in_ino; + if (old_valid_dev(rdev)) { + jdev->old_id = cpu_to_je16(old_encode_dev(rdev)); + return sizeof(jdev->old_id); + } else { + jdev->new_id = cpu_to_je32(new_encode_dev(rdev)); + return sizeof(jdev->new_id); } - - return ((struct jffs2_inode_cache *)raw); } static inline struct jffs2_node_frag *frag_first(struct rb_root *root) { - struct rb_node *node = root->rb_node; + struct rb_node *node = rb_first(root); if (!node) return NULL; - while(node->rb_left) - node = node->rb_left; + return rb_entry(node, struct jffs2_node_frag, rb); } static inline struct jffs2_node_frag *frag_last(struct rb_root *root) { - struct rb_node *node = root->rb_node; + struct rb_node *node = rb_last(root); if (!node) return NULL; - while(node->rb_right) - node = node->rb_right; + return rb_entry(node, struct jffs2_node_frag, rb); } -#define rb_parent(rb) ((rb)->rb_parent) #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb) @@ -383,13 +347,17 @@ static inline struct jffs2_node_frag *frag_last(struct rb_root *root) #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb) #define frag_erase(frag, list) rb_erase(&frag->rb, list); +#define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) +#define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) +#define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb) +#define tn_left(tn) rb_entry((tn)->rb.rb_left, struct jffs2_tmp_dnode_info, rb) +#define tn_right(tn) rb_entry((tn)->rb.rb_right, struct jffs2_tmp_dnode_info, rb) +#define tn_erase(tn, list) rb_erase(&tn->rb, list); +#define tn_last(list) rb_entry(rb_last(list), struct jffs2_tmp_dnode_info, rb) +#define tn_first(list) rb_entry(rb_first(list), struct jffs2_tmp_dnode_info, rb) + /* nodelist.c */ -D2(void jffs2_print_frag_list(struct jffs2_inode_info *f)); void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list); -int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, - struct rb_root *tnp, struct jffs2_full_dirent **fdp, - uint32_t *highest_version, uint32_t *latest_mctime, - uint32_t *mctime_ver); void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino); void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new); @@ -398,37 +366,50 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c); void jffs2_free_raw_node_refs(struct jffs2_sb_info *c); struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset); void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete); -void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base); -struct rb_node *rb_next(struct rb_node *); -struct rb_node *rb_prev(struct rb_node *); -void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root); +int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); +uint32_t jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); +struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb, + uint32_t ofs, uint32_t len, + struct jffs2_inode_cache *ic); +extern uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb, + struct jffs2_raw_node_ref *ref); /* nodemgmt.c */ int jffs2_thread_should_wake(struct jffs2_sb_info *c); -int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio); -int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); -int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new); +int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, + uint32_t *len, int prio, uint32_t sumsize); +int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, + uint32_t *len, uint32_t sumsize); +struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, + uint32_t ofs, uint32_t len, + struct jffs2_inode_cache *ic); void jffs2_complete_reservation(struct jffs2_sb_info *c); void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); -void jffs2_dump_block_lists(struct jffs2_sb_info *c); /* write.c */ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); -struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode); -struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode); +struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, + struct jffs2_raw_inode *ri, const unsigned char *data, + uint32_t datalen, int alloc_mode); +struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, + struct jffs2_raw_dirent *rd, const unsigned char *name, + uint32_t namelen, int alloc_mode); int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, - struct jffs2_raw_inode *ri, unsigned char *buf, + struct jffs2_raw_inode *ri, unsigned char *buf, uint32_t offset, uint32_t writelen, uint32_t *retlen); -int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen); -int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f); -int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen); +int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, + struct jffs2_raw_inode *ri, const struct qstr *qstr); +int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, + int namelen, struct jffs2_inode_info *dead_f, uint32_t time); +int jffs2_do_link(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, + uint8_t type, const char *name, int namelen, uint32_t time); /* readinode.c */ -void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); -int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); -int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, +int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t ino, struct jffs2_raw_inode *latest_node); int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f); @@ -447,12 +428,19 @@ struct jffs2_raw_inode *jffs2_alloc_raw_inode(void); void jffs2_free_raw_inode(struct jffs2_raw_inode *); struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void); void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *); -struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void); -void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *); +int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb, int nr); +void jffs2_free_refblock(struct jffs2_raw_node_ref *); struct jffs2_node_frag *jffs2_alloc_node_frag(void); void jffs2_free_node_frag(struct jffs2_node_frag *); struct jffs2_inode_cache *jffs2_alloc_inode_cache(void); void jffs2_free_inode_cache(struct jffs2_inode_cache *); +#ifdef CONFIG_JFFS2_FS_XATTR +struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void); +void jffs2_free_xattr_datum(struct jffs2_xattr_datum *); +struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void); +void jffs2_free_xattr_ref(struct jffs2_xattr_ref *); +#endif /* gc.c */ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c); @@ -468,12 +456,16 @@ char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f); /* scan.c */ int jffs2_scan_medium(struct jffs2_sb_info *c); void jffs2_rotate_lists(struct jffs2_sb_info *c); +struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino); +int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); +int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size); /* build.c */ int jffs2_do_mount_fs(struct jffs2_sb_info *c); /* erase.c */ -void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count); +int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count); +void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); #ifdef CONFIG_JFFS2_FS_WRITEBUFFER /* wbuf.c */ @@ -483,4 +475,6 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_erasebloc int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); #endif +#include "debug.h" + #endif /* __JFFS2_NODELIST_H__ */ diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index c1d8b5ed9ab..b6bd4affd9a 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c @@ -1,34 +1,64 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: nodemgmt.c,v 1.122 2005/05/06 09:30:27 dedekind Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> -#include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/compiler.h> #include <linux/sched.h> /* For cond_resched() */ #include "nodelist.h" +#include "debug.h" + +/* + * Check whether the user is allowed to write. + */ +static int jffs2_rp_can_write(struct jffs2_sb_info *c) +{ + uint32_t avail; + struct jffs2_mount_opts *opts = &c->mount_opts; + + avail = c->dirty_size + c->free_size + c->unchecked_size + + c->erasing_size - c->resv_blocks_write * c->sector_size + - c->nospc_dirty_size; + + if (avail < 2 * opts->rp_size) + jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, " + "erasing_size %u, unchecked_size %u, " + "nr_erasing_blocks %u, avail %u, resrv %u\n", + opts->rp_size, c->dirty_size, c->free_size, + c->erasing_size, c->unchecked_size, + c->nr_erasing_blocks, avail, c->nospc_dirty_size); + + if (avail > opts->rp_size) + return 1; + + /* Always allow root */ + if (capable(CAP_SYS_RESOURCE)) + return 1; + + jffs2_dbg(1, "forbid writing\n"); + return 0; +} /** * jffs2_reserve_space - request physical space to write nodes to flash * @c: superblock info * @minsize: Minimum acceptable size of allocation - * @ofs: Returned value of node offset * @len: Returned value of allocation length * @prio: Allocation type - ALLOC_{NORMAL,DELETION} * * Requests a block of physical space on the flash. Returns zero for success - * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC - * or other error if appropriate. + * and puts 'len' into the appropriate place, or returns -ENOSPC or other + * error if appropriate. Doesn't return len since that's * * If it returns zero, jffs2_reserve_space() also downs the per-filesystem * allocation semaphore, to prevent more than one allocation from being @@ -38,26 +68,36 @@ * for the requested allocation. */ -static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); +static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, + uint32_t *len, uint32_t sumsize); -int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio) +int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, + uint32_t *len, int prio, uint32_t sumsize) { int ret = -EAGAIN; int blocksneeded = c->resv_blocks_write; /* align it */ minsize = PAD(minsize); - D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize)); - down(&c->alloc_sem); + jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); + mutex_lock(&c->alloc_sem); - D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n")); + jffs2_dbg(1, "%s(): alloc sem got\n", __func__); spin_lock(&c->erase_completion_lock); + /* + * Check if the free space is greater then size of the reserved pool. + * If not, only allow root to proceed with writing. + */ + if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) { + ret = -ENOSPC; + goto out; + } + /* this needs a little more thought (true <tglx> :)) */ while(ret == -EAGAIN) { while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) { - int ret; uint32_t dirty, avail; /* calculate real dirty size @@ -75,22 +115,24 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; if (dirty < c->nospc_dirty_size) { if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { - D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n")); + jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n", + __func__); break; } - D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", - dirty, c->unchecked_size, c->sector_size)); + jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", + dirty, c->unchecked_size, + c->sector_size); spin_unlock(&c->erase_completion_lock); - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); return -ENOSPC; } - + /* Calc possibly available space. Possibly available means that we * don't know, if unchecked size contains obsoleted nodes, which could give us some * more usable space. This will affect the sum only once, as gc first finishes checking * of nodes. - + Return -ENOSPC, if the maximum possibly available space is less or equal than + + Return -ENOSPC, if the maximum possibly available space is less or equal than * blocksneeded * sector_size. * This blocks endless gc looping on a filesystem, which is nearly full, even if * the check above passes. @@ -98,26 +140,49 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; if ( (avail / c->sector_size) <= blocksneeded) { if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { - D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n")); + jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n", + __func__); break; } - D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", - avail, blocksneeded * c->sector_size)); + jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", + avail, blocksneeded * c->sector_size); spin_unlock(&c->erase_completion_lock); - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); return -ENOSPC; } - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); - D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", - c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, - c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); + jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", + c->nr_free_blocks, c->nr_erasing_blocks, + c->free_size, c->dirty_size, c->wasted_size, + c->used_size, c->erasing_size, c->bad_size, + c->free_size + c->dirty_size + + c->wasted_size + c->used_size + + c->erasing_size + c->bad_size, + c->flash_size); spin_unlock(&c->erase_completion_lock); - + ret = jffs2_garbage_collect_pass(c); - if (ret) + + if (ret == -EAGAIN) { + spin_lock(&c->erase_completion_lock); + if (c->nr_erasing_blocks && + list_empty(&c->erase_pending_list) && + list_empty(&c->erase_complete_list)) { + DECLARE_WAITQUEUE(wait, current); + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&c->erase_wait, &wait); + jffs2_dbg(1, "%s waiting for erase to complete\n", + __func__); + spin_unlock(&c->erase_completion_lock); + + schedule(); + remove_wait_queue(&c->erase_wait, &wait); + } else + spin_unlock(&c->erase_completion_lock); + } else if (ret) return ret; cond_resched(); @@ -125,154 +190,287 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs if (signal_pending(current)) return -EINTR; - down(&c->alloc_sem); + mutex_lock(&c->alloc_sem); spin_lock(&c->erase_completion_lock); } - ret = jffs2_do_reserve_space(c, minsize, ofs, len); + ret = jffs2_do_reserve_space(c, minsize, len, sumsize); if (ret) { - D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); + jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret); } } + +out: spin_unlock(&c->erase_completion_lock); + if (!ret) + ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); if (ret) - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); return ret; } -int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) +int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, + uint32_t *len, uint32_t sumsize) { - int ret = -EAGAIN; + int ret; minsize = PAD(minsize); - D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize)); + jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); - spin_lock(&c->erase_completion_lock); - while(ret == -EAGAIN) { - ret = jffs2_do_reserve_space(c, minsize, ofs, len); + while (true) { + spin_lock(&c->erase_completion_lock); + ret = jffs2_do_reserve_space(c, minsize, len, sumsize); if (ret) { - D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); + jffs2_dbg(1, "%s(): looping, ret is %d\n", + __func__, ret); } + spin_unlock(&c->erase_completion_lock); + + if (ret == -EAGAIN) + cond_resched(); + else + break; } - spin_unlock(&c->erase_completion_lock); + if (!ret) + ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); + return ret; } -/* Called with alloc sem _and_ erase_completion_lock */ -static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) + +/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */ + +static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { - struct jffs2_eraseblock *jeb = c->nextblock; - - restart: - if (jeb && minsize > jeb->free_size) { - /* Skip the end of this block and file it as having some dirty space */ - /* If there's a pending write to it, flush now */ - if (jffs2_wbuf_dirty(c)) { + + if (c->nextblock == NULL) { + jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n", + __func__, jeb->offset); + return; + } + /* Check, if we have a dirty block now, or if it was dirty already */ + if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { + c->dirty_size += jeb->wasted_size; + c->wasted_size -= jeb->wasted_size; + jeb->dirty_size += jeb->wasted_size; + jeb->wasted_size = 0; + if (VERYDIRTY(c, jeb->dirty_size)) { + jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", + jeb->offset, jeb->free_size, jeb->dirty_size, + jeb->used_size); + list_add_tail(&jeb->list, &c->very_dirty_list); + } else { + jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", + jeb->offset, jeb->free_size, jeb->dirty_size, + jeb->used_size); + list_add_tail(&jeb->list, &c->dirty_list); + } + } else { + jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", + jeb->offset, jeb->free_size, jeb->dirty_size, + jeb->used_size); + list_add_tail(&jeb->list, &c->clean_list); + } + c->nextblock = NULL; + +} + +/* Select a new jeb for nextblock */ + +static int jffs2_find_nextblock(struct jffs2_sb_info *c) +{ + struct list_head *next; + + /* Take the next block off the 'free' list */ + + if (list_empty(&c->free_list)) { + + if (!c->nr_erasing_blocks && + !list_empty(&c->erasable_list)) { + struct jffs2_eraseblock *ejeb; + + ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); + list_move_tail(&ejeb->list, &c->erase_pending_list); + c->nr_erasing_blocks++; + jffs2_garbage_collect_trigger(c); + jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n", + __func__, ejeb->offset); + } + + if (!c->nr_erasing_blocks && + !list_empty(&c->erasable_pending_wbuf_list)) { + jffs2_dbg(1, "%s(): Flushing write buffer\n", + __func__); + /* c->nextblock is NULL, no update to c->nextblock allowed */ spin_unlock(&c->erase_completion_lock); - D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); jffs2_flush_wbuf_pad(c); spin_lock(&c->erase_completion_lock); - jeb = c->nextblock; - goto restart; + /* Have another go. It'll be on the erasable_list now */ + return -EAGAIN; } - c->wasted_size += jeb->free_size; - c->free_size -= jeb->free_size; - jeb->wasted_size += jeb->free_size; - jeb->free_size = 0; - - /* Check, if we have a dirty block now, or if it was dirty already */ - if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { - c->dirty_size += jeb->wasted_size; - c->wasted_size -= jeb->wasted_size; - jeb->dirty_size += jeb->wasted_size; - jeb->wasted_size = 0; - if (VERYDIRTY(c, jeb->dirty_size)) { - D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", - jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); - list_add_tail(&jeb->list, &c->very_dirty_list); - } else { - D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", - jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); - list_add_tail(&jeb->list, &c->dirty_list); - } - } else { - D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", - jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); - list_add_tail(&jeb->list, &c->clean_list); + + if (!c->nr_erasing_blocks) { + /* Ouch. We're in GC, or we wouldn't have got here. + And there's no space left. At all. */ + pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", + c->nr_erasing_blocks, c->nr_free_blocks, + list_empty(&c->erasable_list) ? "yes" : "no", + list_empty(&c->erasing_list) ? "yes" : "no", + list_empty(&c->erase_pending_list) ? "yes" : "no"); + return -ENOSPC; } - c->nextblock = jeb = NULL; + + spin_unlock(&c->erase_completion_lock); + /* Don't wait for it; just erase one right now */ + jffs2_erase_pending_blocks(c, 1); + spin_lock(&c->erase_completion_lock); + + /* An erase may have failed, decreasing the + amount of free space available. So we must + restart from the beginning */ + return -EAGAIN; } - - if (!jeb) { - struct list_head *next; - /* Take the next block off the 'free' list */ - if (list_empty(&c->free_list)) { + next = c->free_list.next; + list_del(next); + c->nextblock = list_entry(next, struct jffs2_eraseblock, list); + c->nr_free_blocks--; - if (!c->nr_erasing_blocks && - !list_empty(&c->erasable_list)) { - struct jffs2_eraseblock *ejeb; + jffs2_sum_reset_collected(c->summary); /* reset collected summary */ - ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); - list_del(&ejeb->list); - list_add_tail(&ejeb->list, &c->erase_pending_list); - c->nr_erasing_blocks++; - jffs2_erase_pending_trigger(c); - D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n", - ejeb->offset)); +#ifdef CONFIG_JFFS2_FS_WRITEBUFFER + /* adjust write buffer offset, else we get a non contiguous write bug */ + if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len) + c->wbuf_ofs = 0xffffffff; +#endif + + jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", + __func__, c->nextblock->offset); + + return 0; +} + +/* Called with alloc sem _and_ erase_completion_lock */ +static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, + uint32_t *len, uint32_t sumsize) +{ + struct jffs2_eraseblock *jeb = c->nextblock; + uint32_t reserved_size; /* for summary information at the end of the jeb */ + int ret; + + restart: + reserved_size = 0; + + if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) { + /* NOSUM_SIZE means not to generate summary */ + + if (jeb) { + reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); + dbg_summary("minsize=%d , jeb->free=%d ," + "summary->size=%d , sumsize=%d\n", + minsize, jeb->free_size, + c->summary->sum_size, sumsize); + } + + /* Is there enough space for writing out the current node, or we have to + write out summary information now, close this jeb and select new nextblock? */ + if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize + + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) { + + /* Has summary been disabled for this jeb? */ + if (jffs2_sum_is_disabled(c->summary)) { + sumsize = JFFS2_SUMMARY_NOSUM_SIZE; + goto restart; } - if (!c->nr_erasing_blocks && - !list_empty(&c->erasable_pending_wbuf_list)) { - D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); - /* c->nextblock is NULL, no update to c->nextblock allowed */ + /* Writing out the collected summary information */ + dbg_summary("generating summary for 0x%08x.\n", jeb->offset); + ret = jffs2_sum_write_sumnode(c); + + if (ret) + return ret; + + if (jffs2_sum_is_disabled(c->summary)) { + /* jffs2_write_sumnode() couldn't write out the summary information + diabling summary for this jeb and free the collected information + */ + sumsize = JFFS2_SUMMARY_NOSUM_SIZE; + goto restart; + } + + jffs2_close_nextblock(c, jeb); + jeb = NULL; + /* keep always valid value in reserved_size */ + reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE); + } + } else { + if (jeb && minsize > jeb->free_size) { + uint32_t waste; + + /* Skip the end of this block and file it as having some dirty space */ + /* If there's a pending write to it, flush now */ + + if (jffs2_wbuf_dirty(c)) { spin_unlock(&c->erase_completion_lock); + jffs2_dbg(1, "%s(): Flushing write buffer\n", + __func__); jffs2_flush_wbuf_pad(c); spin_lock(&c->erase_completion_lock); - /* Have another go. It'll be on the erasable_list now */ - return -EAGAIN; - } - - if (!c->nr_erasing_blocks) { - /* Ouch. We're in GC, or we wouldn't have got here. - And there's no space left. At all. */ - printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", - c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", - list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); - return -ENOSPC; + jeb = c->nextblock; + goto restart; } spin_unlock(&c->erase_completion_lock); - /* Don't wait for it; just erase one right now */ - jffs2_erase_pending_blocks(c, 1); + + ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); + + /* Just lock it again and continue. Nothing much can change because + we hold c->alloc_sem anyway. In fact, it's not entirely clear why + we hold c->erase_completion_lock in the majority of this function... + but that's a question for another (more caffeine-rich) day. */ spin_lock(&c->erase_completion_lock); - /* An erase may have failed, decreasing the - amount of free space available. So we must - restart from the beginning */ - return -EAGAIN; + if (ret) + return ret; + + waste = jeb->free_size; + jffs2_link_node_ref(c, jeb, + (jeb->offset + c->sector_size - waste) | REF_OBSOLETE, + waste, NULL); + /* FIXME: that made it count as dirty. Convert to wasted */ + jeb->dirty_size -= waste; + c->dirty_size -= waste; + jeb->wasted_size += waste; + c->wasted_size += waste; + + jffs2_close_nextblock(c, jeb); + jeb = NULL; } + } + + if (!jeb) { - next = c->free_list.next; - list_del(next); - c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list); - c->nr_free_blocks--; + ret = jffs2_find_nextblock(c); + if (ret) + return ret; + + jeb = c->nextblock; if (jeb->free_size != c->sector_size - c->cleanmarker_size) { - printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); + pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", + jeb->offset, jeb->free_size); goto restart; } } /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has enough space */ - *ofs = jeb->offset + (c->sector_size - jeb->free_size); - *len = jeb->free_size; + *len = jeb->free_size - reserved_size; if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && !jeb->first_node->next_in_ino) { - /* Only node in it beforehand was a CLEANMARKER node (we think). + /* Only node in it beforehand was a CLEANMARKER node (we think). So mark it obsolete now that there's going to be another node - in the block. This will reduce used_size to zero but We've + in the block. This will reduce used_size to zero but We've already set c->nextblock so that jffs2_mark_node_obsolete() won't try to refile it to the dirty_list. */ @@ -281,7 +479,9 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui spin_lock(&c->erase_completion_lock); } - D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs)); + jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n", + __func__, + *len, jeb->offset + (c->sector_size - jeb->free_size)); return 0; } @@ -290,55 +490,50 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, ui * @c: superblock info * @new: new node reference to add * @len: length of this physical node - * @dirty: dirty flag for new node * - * Should only be used to report nodes for which space has been allocated + * Should only be used to report nodes for which space has been allocated * by jffs2_reserve_space. * * Must be called with the alloc_sem held. */ - -int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) + +struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, + uint32_t ofs, uint32_t len, + struct jffs2_inode_cache *ic) { struct jffs2_eraseblock *jeb; - uint32_t len; + struct jffs2_raw_node_ref *new; - jeb = &c->blocks[new->flash_offset / c->sector_size]; - len = ref_totlen(c, jeb, new); + jeb = &c->blocks[ofs / c->sector_size]; - D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); + jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n", + __func__, ofs & ~3, ofs & 3, len); #if 1 - /* we could get some obsolete nodes after nextblock was refiled - in wbuf.c */ - if ((c->nextblock || !ref_obsolete(new)) - &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) { - printk(KERN_WARNING "argh. node added in wrong place\n"); - jffs2_free_raw_node_ref(new); - return -EINVAL; + /* Allow non-obsolete nodes only to be added at the end of c->nextblock, + if c->nextblock is set. Note that wbuf.c will file obsolete nodes + even after refiling c->nextblock */ + if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) + && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { + pr_warn("argh. node added in wrong place at 0x%08x(%d)\n", + ofs & ~3, ofs & 3); + if (c->nextblock) + pr_warn("nextblock 0x%08x", c->nextblock->offset); + else + pr_warn("No nextblock"); + pr_cont(", expected at %08x\n", + jeb->offset + (c->sector_size - jeb->free_size)); + return ERR_PTR(-EINVAL); } #endif spin_lock(&c->erase_completion_lock); - if (!jeb->first_node) - jeb->first_node = new; - if (jeb->last_node) - jeb->last_node->next_phys = new; - jeb->last_node = new; - - jeb->free_size -= len; - c->free_size -= len; - if (ref_obsolete(new)) { - jeb->dirty_size += len; - c->dirty_size += len; - } else { - jeb->used_size += len; - c->used_size += len; - } + new = jffs2_link_node_ref(c, jeb, ofs, len, ic); if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ - D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", - jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); + jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", + jeb->offset, jeb->free_size, jeb->dirty_size, + jeb->used_size); if (jffs2_wbuf_dirty(c)) { /* Flush the last write in the block if it's outstanding */ spin_unlock(&c->erase_completion_lock); @@ -349,20 +544,22 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r list_add_tail(&jeb->list, &c->clean_list); c->nextblock = NULL; } - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + jffs2_dbg_acct_sanity_check_nolock(c,jeb); + jffs2_dbg_acct_paranoia_check_nolock(c, jeb); spin_unlock(&c->erase_completion_lock); - return 0; + return new; } void jffs2_complete_reservation(struct jffs2_sb_info *c) { - D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); + jffs2_dbg(1, "jffs2_complete_reservation()\n"); + spin_lock(&c->erase_completion_lock); jffs2_garbage_collect_trigger(c); - up(&c->alloc_sem); + spin_unlock(&c->erase_completion_lock); + mutex_unlock(&c->alloc_sem); } static inline int on_list(struct list_head *obj, struct list_head *head) @@ -371,7 +568,7 @@ static inline int on_list(struct list_head *obj, struct list_head *head) list_for_each(this, head) { if (this == obj) { - D1(printk("%p is on list at %p\n", obj, head)); + jffs2_dbg(1, "%p is on list at %p\n", obj, head); return 1; } @@ -386,71 +583,80 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref struct jffs2_unknown_node n; int ret, addedsize; size_t retlen; + uint32_t freed_len; - if(!ref) { - printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); + if(unlikely(!ref)) { + pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); return; } if (ref_obsolete(ref)) { - D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref))); + jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n", + __func__, ref_offset(ref)); return; } blocknr = ref->flash_offset / c->sector_size; if (blocknr >= c->nr_blocks) { - printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset); + pr_notice("raw node at 0x%08x is off the end of device!\n", + ref->flash_offset); BUG(); } jeb = &c->blocks[blocknr]; if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) { - /* Hm. This may confuse static lock analysis. If any of the above - three conditions is false, we're going to return from this + /* Hm. This may confuse static lock analysis. If any of the above + three conditions is false, we're going to return from this function without actually obliterating any nodes or freeing any jffs2_raw_node_refs. So we don't need to stop erases from happening, or protect against people holding an obsolete jffs2_raw_node_ref without the erase_completion_lock. */ - down(&c->erase_free_sem); + mutex_lock(&c->erase_free_sem); } spin_lock(&c->erase_completion_lock); + freed_len = ref_totlen(c, jeb, ref); + if (ref_flags(ref) == REF_UNCHECKED) { - D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) { - printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", - ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); + D1(if (unlikely(jeb->unchecked_size < freed_len)) { + pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", + freed_len, blocknr, + ref->flash_offset, jeb->used_size); BUG(); }) - D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); - jeb->unchecked_size -= ref_totlen(c, jeb, ref); - c->unchecked_size -= ref_totlen(c, jeb, ref); + jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n", + ref_offset(ref), freed_len); + jeb->unchecked_size -= freed_len; + c->unchecked_size -= freed_len; } else { - D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) { - printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", - ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); + D1(if (unlikely(jeb->used_size < freed_len)) { + pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", + freed_len, blocknr, + ref->flash_offset, jeb->used_size); BUG(); }) - D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); - jeb->used_size -= ref_totlen(c, jeb, ref); - c->used_size -= ref_totlen(c, jeb, ref); + jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ", + ref_offset(ref), freed_len); + jeb->used_size -= freed_len; + c->used_size -= freed_len; } // Take care, that wasted size is taken into concern - if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) { - D1(printk(KERN_DEBUG "Dirtying\n")); - addedsize = ref_totlen(c, jeb, ref); - jeb->dirty_size += ref_totlen(c, jeb, ref); - c->dirty_size += ref_totlen(c, jeb, ref); + if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { + jffs2_dbg(1, "Dirtying\n"); + addedsize = freed_len; + jeb->dirty_size += freed_len; + c->dirty_size += freed_len; /* Convert wasted space to dirty, if not a bad block */ if (jeb->wasted_size) { if (on_list(&jeb->list, &c->bad_used_list)) { - D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n", - jeb->offset)); + jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n", + jeb->offset); addedsize = 0; /* To fool the refiling code later */ } else { - D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n", - jeb->wasted_size, jeb->offset)); + jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n", + jeb->wasted_size, jeb->offset); addedsize += jeb->wasted_size; jeb->dirty_size += jeb->wasted_size; c->dirty_size += jeb->wasted_size; @@ -459,21 +665,20 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref } } } else { - D1(printk(KERN_DEBUG "Wasting\n")); + jffs2_dbg(1, "Wasting\n"); addedsize = 0; - jeb->wasted_size += ref_totlen(c, jeb, ref); - c->wasted_size += ref_totlen(c, jeb, ref); + jeb->wasted_size += freed_len; + c->wasted_size += freed_len; } ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; - - ACCT_SANITY_CHECK(c, jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + jffs2_dbg_acct_sanity_check_nolock(c, jeb); + jffs2_dbg_acct_paranoia_check_nolock(c, jeb); if (c->flags & JFFS2_SB_FLAG_SCANNING) { /* Flash scanning is in progress. Don't muck about with the block lists because they're not ready yet, and don't actually - obliterate nodes that look obsolete. If they weren't + obliterate nodes that look obsolete. If they weren't marked obsolete on the flash at the time they _became_ obsolete, there was probably a reason for that. */ spin_unlock(&c->erase_completion_lock); @@ -482,51 +687,58 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref } if (jeb == c->nextblock) { - D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset)); + jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n", + jeb->offset); } else if (!jeb->used_size && !jeb->unchecked_size) { if (jeb == c->gcblock) { - D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset)); + jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", + jeb->offset); c->gcblock = NULL; } else { - D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset)); + jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", + jeb->offset); list_del(&jeb->list); } if (jffs2_wbuf_dirty(c)) { - D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n")); + jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n"); list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); } else { if (jiffies & 127) { /* Most of the time, we just erase it immediately. Otherwise we spend ages scanning it on mount, etc. */ - D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); + jffs2_dbg(1, "...and adding to erase_pending_list\n"); list_add_tail(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; - jffs2_erase_pending_trigger(c); + jffs2_garbage_collect_trigger(c); } else { /* Sometimes, however, we leave it elsewhere so it doesn't get immediately reused, and we spread the load a bit. */ - D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); + jffs2_dbg(1, "...and adding to erasable_list\n"); list_add_tail(&jeb->list, &c->erasable_list); - } + } } - D1(printk(KERN_DEBUG "Done OK\n")); + jffs2_dbg(1, "Done OK\n"); } else if (jeb == c->gcblock) { - D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset)); + jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n", + jeb->offset); } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { - D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset)); + jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", + jeb->offset); list_del(&jeb->list); - D1(printk(KERN_DEBUG "...and adding to dirty_list\n")); + jffs2_dbg(1, "...and adding to dirty_list\n"); list_add_tail(&jeb->list, &c->dirty_list); } else if (VERYDIRTY(c, jeb->dirty_size) && !VERYDIRTY(c, jeb->dirty_size - addedsize)) { - D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset)); + jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", + jeb->offset); list_del(&jeb->list); - D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n")); + jffs2_dbg(1, "...and adding to very_dirty_list\n"); list_add_tail(&jeb->list, &c->very_dirty_list); } else { - D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", - jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); - } + jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", + jeb->offset, jeb->free_size, jeb->dirty_size, + jeb->used_size); + } spin_unlock(&c->erase_completion_lock); @@ -539,45 +751,52 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref /* The erase_free_sem is locked, and has been since before we marked the node obsolete and potentially put its eraseblock onto the erase_pending_list. Thus, we know that the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet - by jffs2_free_all_node_refs() in erase.c. Which is nice. */ + by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ - D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); + jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n", + ref_offset(ref)); ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); if (ret) { - printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); + pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n", + ref_offset(ref), ret); goto out_erase_sem; } if (retlen != sizeof(n)) { - printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); + pr_warn("Short read from obsoleted node at 0x%08x: %zd\n", + ref_offset(ref), retlen); goto out_erase_sem; } - if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) { - printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref)); + if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { + pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", + je32_to_cpu(n.totlen), freed_len); goto out_erase_sem; } if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { - D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype))); + jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", + ref_offset(ref), je16_to_cpu(n.nodetype)); goto out_erase_sem; } /* XXX FIXME: This is ugly now */ n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE); ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); if (ret) { - printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); + pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n", + ref_offset(ref), ret); goto out_erase_sem; } if (retlen != sizeof(n)) { - printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); + pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n", + ref_offset(ref), retlen); goto out_erase_sem; } /* Nodes which have been marked obsolete no longer need to be associated with any inode. Remove them from the per-inode list. - - Note we can't do this for NAND at the moment because we need + + Note we can't do this for NAND at the moment because we need obsolete dirent nodes to stay on the lists, because of the horridness in jffs2_garbage_collect_deletion_dirent(). Also - because we delete the inocache, and on NAND we need that to + because we delete the inocache, and on NAND we need that to stay around until all the nodes are actually erased, in order to stop us from giving the same inode number to another newly created inode. */ @@ -594,227 +813,41 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *p = ref->next_in_ino; ref->next_in_ino = NULL; - if (ic->nodes == (void *)ic && ic->nlink == 0) - jffs2_del_ino_cache(c, ic); - - spin_unlock(&c->erase_completion_lock); - } - - - /* Merge with the next node in the physical list, if there is one - and if it's also obsolete and if it doesn't belong to any inode */ - if (ref->next_phys && ref_obsolete(ref->next_phys) && - !ref->next_phys->next_in_ino) { - struct jffs2_raw_node_ref *n = ref->next_phys; - - spin_lock(&c->erase_completion_lock); - - ref->__totlen += n->__totlen; - ref->next_phys = n->next_phys; - if (jeb->last_node == n) jeb->last_node = ref; - if (jeb->gc_node == n) { - /* gc will be happy continuing gc on this node */ - jeb->gc_node=ref; + switch (ic->class) { +#ifdef CONFIG_JFFS2_FS_XATTR + case RAWNODE_CLASS_XATTR_DATUM: + jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); + break; + case RAWNODE_CLASS_XATTR_REF: + jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); + break; +#endif + default: + if (ic->nodes == (void *)ic && ic->pino_nlink == 0) + jffs2_del_ino_cache(c, ic); + break; } spin_unlock(&c->erase_completion_lock); - - jffs2_free_raw_node_ref(n); } - - /* Also merge with the previous node in the list, if there is one - and that one is obsolete */ - if (ref != jeb->first_node ) { - struct jffs2_raw_node_ref *p = jeb->first_node; - spin_lock(&c->erase_completion_lock); - - while (p->next_phys != ref) - p = p->next_phys; - - if (ref_obsolete(p) && !ref->next_in_ino) { - p->__totlen += ref->__totlen; - if (jeb->last_node == ref) { - jeb->last_node = p; - } - if (jeb->gc_node == ref) { - /* gc will be happy continuing gc on this node */ - jeb->gc_node=p; - } - p->next_phys = ref->next_phys; - jffs2_free_raw_node_ref(ref); - } - spin_unlock(&c->erase_completion_lock); - } out_erase_sem: - up(&c->erase_free_sem); -} - -#if CONFIG_JFFS2_FS_DEBUG >= 2 -void jffs2_dump_block_lists(struct jffs2_sb_info *c) -{ - - - printk(KERN_DEBUG "jffs2_dump_block_lists:\n"); - printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size); - printk(KERN_DEBUG "used_size: %08x\n", c->used_size); - printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size); - printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size); - printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size); - printk(KERN_DEBUG "free_size: %08x\n", c->free_size); - printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size); - printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size); - printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size); - printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write); - - if (c->nextblock) { - printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size); - } else { - printk(KERN_DEBUG "nextblock: NULL\n"); - } - if (c->gcblock) { - printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); - } else { - printk(KERN_DEBUG "gcblock: NULL\n"); - } - if (list_empty(&c->clean_list)) { - printk(KERN_DEBUG "clean_list: empty\n"); - } else { - struct list_head *this; - int numblocks = 0; - uint32_t dirty = 0; - - list_for_each(this, &c->clean_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - numblocks ++; - dirty += jeb->wasted_size; - printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks); - } - if (list_empty(&c->very_dirty_list)) { - printk(KERN_DEBUG "very_dirty_list: empty\n"); - } else { - struct list_head *this; - int numblocks = 0; - uint32_t dirty = 0; - - list_for_each(this, &c->very_dirty_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - numblocks ++; - dirty += jeb->dirty_size; - printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", - numblocks, dirty, dirty / numblocks); - } - if (list_empty(&c->dirty_list)) { - printk(KERN_DEBUG "dirty_list: empty\n"); - } else { - struct list_head *this; - int numblocks = 0; - uint32_t dirty = 0; - - list_for_each(this, &c->dirty_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - numblocks ++; - dirty += jeb->dirty_size; - printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", - numblocks, dirty, dirty / numblocks); - } - if (list_empty(&c->erasable_list)) { - printk(KERN_DEBUG "erasable_list: empty\n"); - } else { - struct list_head *this; - - list_for_each(this, &c->erasable_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - } - if (list_empty(&c->erasing_list)) { - printk(KERN_DEBUG "erasing_list: empty\n"); - } else { - struct list_head *this; - - list_for_each(this, &c->erasing_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - } - if (list_empty(&c->erase_pending_list)) { - printk(KERN_DEBUG "erase_pending_list: empty\n"); - } else { - struct list_head *this; - - list_for_each(this, &c->erase_pending_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - } - if (list_empty(&c->erasable_pending_wbuf_list)) { - printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n"); - } else { - struct list_head *this; - - list_for_each(this, &c->erasable_pending_wbuf_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - } - if (list_empty(&c->free_list)) { - printk(KERN_DEBUG "free_list: empty\n"); - } else { - struct list_head *this; - - list_for_each(this, &c->free_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - } - if (list_empty(&c->bad_list)) { - printk(KERN_DEBUG "bad_list: empty\n"); - } else { - struct list_head *this; - - list_for_each(this, &c->bad_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - } - if (list_empty(&c->bad_used_list)) { - printk(KERN_DEBUG "bad_used_list: empty\n"); - } else { - struct list_head *this; - - list_for_each(this, &c->bad_used_list) { - struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", - jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); - } - } + mutex_unlock(&c->erase_free_sem); } -#endif /* CONFIG_JFFS2_FS_DEBUG */ int jffs2_thread_should_wake(struct jffs2_sb_info *c) { int ret = 0; uint32_t dirty; + int nr_very_dirty = 0; + struct jffs2_eraseblock *jeb; + + if (!list_empty(&c->erase_complete_list) || + !list_empty(&c->erase_pending_list)) + return 1; if (c->unchecked_size) { - D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", - c->unchecked_size, c->checked_ino)); + jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", + c->unchecked_size, c->checked_ino); return 1; } @@ -828,12 +861,23 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c) */ dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; - if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && - (dirty > c->nospc_dirty_size)) + if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && + (dirty > c->nospc_dirty_size)) ret = 1; - D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", - c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); + list_for_each_entry(jeb, &c->very_dirty_list, list) { + nr_very_dirty++; + if (nr_very_dirty == c->vdirty_blocks_gctrigger) { + ret = 1; + /* In debug mode, actually go through and count them all */ + D1(continue); + break; + } + } + + jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n", + __func__, c->nr_free_blocks, c->nr_erasing_blocks, + c->dirty_size, nr_very_dirty, ret ? "yes" : "no"); return ret; } diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index d900c8929b0..d200a9b8fd5 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h @@ -1,14 +1,12 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2002-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: os-linux.h,v 1.58 2005/07/12 02:34:35 tpoynor Exp $ - * */ #ifndef __JFFS2_OS_LINUX_H__ @@ -29,11 +27,9 @@ struct kvec; #define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size) #define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode) -#define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid) -#define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid) - -#define JFFS2_F_I_RDEV_MIN(f) (iminor(OFNI_EDONI_2SFFJ(f))) -#define JFFS2_F_I_RDEV_MAJ(f) (imajor(OFNI_EDONI_2SFFJ(f))) +#define JFFS2_F_I_UID(f) (i_uid_read(OFNI_EDONI_2SFFJ(f))) +#define JFFS2_F_I_GID(f) (i_gid_read(OFNI_EDONI_2SFFJ(f))) +#define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev) #define ITIME(sec) ((struct timespec){sec, 0}) #define I_SEC(tv) ((tv).tv_sec) @@ -57,6 +53,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) f->fragtree = RB_ROOT; f->metadata = NULL; f->dents = NULL; + f->target = NULL; f->flags = 0; f->usercompr = 0; } @@ -64,17 +61,24 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) #define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY) +#define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) ) #ifndef CONFIG_JFFS2_FS_WRITEBUFFER -#define SECTOR_ADDR(x) ( ((unsigned long)(x) & ~(c->sector_size-1)) ) + + +#ifdef CONFIG_JFFS2_SUMMARY +#define jffs2_can_mark_obsolete(c) (0) +#else #define jffs2_can_mark_obsolete(c) (1) +#endif + #define jffs2_is_writebuffered(c) (0) #define jffs2_cleanmarker_oob(c) (0) #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) -#define jffs2_flash_write(c, ofs, len, retlen, buf) ((c)->mtd->write((c)->mtd, ofs, len, retlen, buf)) -#define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf)) -#define jffs2_flush_wbuf_pad(c) ({ (void)(c), 0; }) -#define jffs2_flush_wbuf_gc(c, i) ({ (void)(c), (void) i, 0; }) +#define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf) +#define jffs2_flash_read(c, ofs, len, retlen, buf) (mtd_read((c)->mtd, ofs, len, retlen, buf)) +#define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; }) +#define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; }) #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) #define jffs2_nand_flash_setup(c) (0) #define jffs2_nand_flash_cleanup(c) do {} while(0) @@ -82,22 +86,29 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) #define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e) #define jffs2_wbuf_timeout NULL #define jffs2_wbuf_process NULL -#define jffs2_nor_ecc(c) (0) #define jffs2_dataflash(c) (0) -#define jffs2_nor_ecc_flash_setup(c) (0) -#define jffs2_nor_ecc_flash_cleanup(c) do {} while (0) #define jffs2_dataflash_setup(c) (0) #define jffs2_dataflash_cleanup(c) do {} while (0) +#define jffs2_nor_wbuf_flash(c) (0) +#define jffs2_nor_wbuf_flash_setup(c) (0) +#define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0) +#define jffs2_ubivol(c) (0) +#define jffs2_ubivol_setup(c) (0) +#define jffs2_ubivol_cleanup(c) do {} while (0) +#define jffs2_dirty_trigger(c) do {} while (0) #else /* NAND and/or ECC'd NOR support present */ #define jffs2_is_writebuffered(c) (c->wbuf != NULL) -#define SECTOR_ADDR(x) ( ((unsigned long)(x) / (unsigned long)(c->sector_size)) * c->sector_size ) -#define jffs2_can_mark_obsolete(c) ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & MTD_ECC)) || c->mtd->type == MTD_RAM) + +#ifdef CONFIG_JFFS2_SUMMARY +#define jffs2_can_mark_obsolete(c) (0) +#else +#define jffs2_can_mark_obsolete(c) (c->mtd->flags & (MTD_BIT_WRITEABLE)) +#endif + #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) -#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf)) -#define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf)) #define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len) /* wbuf.c */ @@ -115,21 +126,19 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c); int jffs2_nand_flash_setup(struct jffs2_sb_info *c); void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c); -#define jffs2_nor_ecc(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_ECC)) -int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c); -void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c); - #define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH) int jffs2_dataflash_setup(struct jffs2_sb_info *c); void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); +#define jffs2_ubivol(c) (c->mtd->type == MTD_UBIVOLUME) +int jffs2_ubivol_setup(struct jffs2_sb_info *c); +void jffs2_ubivol_cleanup(struct jffs2_sb_info *c); -#endif /* WRITEBUFFER */ +#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE)) +int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); +void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c); +void jffs2_dirty_trigger(struct jffs2_sb_info *c); -/* erase.c */ -static inline void jffs2_erase_pending_trigger(struct jffs2_sb_info *c) -{ - OFNI_BS_2SFFJ(c)->s_dirt = 1; -} +#endif /* WRITEBUFFER */ /* background.c */ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c); @@ -137,52 +146,53 @@ void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c); void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c); /* dir.c */ -extern struct file_operations jffs2_dir_operations; -extern struct inode_operations jffs2_dir_inode_operations; +extern const struct file_operations jffs2_dir_operations; +extern const struct inode_operations jffs2_dir_inode_operations; /* file.c */ -extern struct file_operations jffs2_file_operations; -extern struct inode_operations jffs2_file_inode_operations; -extern struct address_space_operations jffs2_file_address_operations; -int jffs2_fsync(struct file *, struct dentry *, int); +extern const struct file_operations jffs2_file_operations; +extern const struct inode_operations jffs2_file_inode_operations; +extern const struct address_space_operations jffs2_file_address_operations; +int jffs2_fsync(struct file *, loff_t, loff_t, int); int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg); /* ioctl.c */ -int jffs2_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +long jffs2_ioctl(struct file *, unsigned int, unsigned long); /* symlink.c */ -extern struct inode_operations jffs2_symlink_inode_operations; +extern const struct inode_operations jffs2_symlink_inode_operations; /* fs.c */ int jffs2_setattr (struct dentry *, struct iattr *); -void jffs2_read_inode (struct inode *); -void jffs2_clear_inode (struct inode *); -void jffs2_dirty_inode(struct inode *inode); -struct inode *jffs2_new_inode (struct inode *dir_i, int mode, +int jffs2_do_setattr (struct inode *, struct iattr *); +struct inode *jffs2_iget(struct super_block *, unsigned long); +void jffs2_evict_inode (struct inode *); +void jffs2_dirty_inode(struct inode *inode, int flags); +struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri); -int jffs2_statfs (struct super_block *, struct kstatfs *); -void jffs2_write_super (struct super_block *); -int jffs2_remount_fs (struct super_block *, int *, char *); +int jffs2_statfs (struct dentry *, struct kstatfs *); +int jffs2_do_remount_fs(struct super_block *, int *, char *); int jffs2_do_fill_super(struct super_block *sb, void *data, int silent); void jffs2_gc_release_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f); struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, - int inum, int nlink); + int inum, int unlinked); -unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, - struct jffs2_inode_info *f, +unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, + struct jffs2_inode_info *f, unsigned long offset, unsigned long *priv); void jffs2_gc_release_page(struct jffs2_sb_info *c, unsigned char *pg, unsigned long *priv); void jffs2_flash_cleanup(struct jffs2_sb_info *c); - + /* writev.c */ -int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, +int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); - +int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, + size_t *retlen, const u_char *buf); #endif /* __JFFS2_OS_LINUX_H__ */ diff --git a/fs/jffs2/pushpull.h b/fs/jffs2/pushpull.h deleted file mode 100644 index c0c2a9158df..00000000000 --- a/fs/jffs2/pushpull.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * JFFS2 -- Journalling Flash File System, Version 2. - * - * Copyright (C) 2001, 2002 Red Hat, Inc. - * - * Created by David Woodhouse <dwmw2@infradead.org> - * - * For licensing information, see the file 'LICENCE' in this directory. - * - * $Id: pushpull.h,v 1.10 2004/11/16 20:36:11 dwmw2 Exp $ - * - */ - -#ifndef __PUSHPULL_H__ -#define __PUSHPULL_H__ - -#include <linux/errno.h> - -struct pushpull { - unsigned char *buf; - unsigned int buflen; - unsigned int ofs; - unsigned int reserve; -}; - - -static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen, unsigned ofs, unsigned reserve) -{ - pp->buf = buf; - pp->buflen = buflen; - pp->ofs = ofs; - pp->reserve = reserve; -} - -static inline int pushbit(struct pushpull *pp, int bit, int use_reserved) -{ - if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) { - return -ENOSPC; - } - - if (bit) { - pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs &7))); - } - else { - pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs &7))); - } - pp->ofs++; - - return 0; -} - -static inline int pushedbits(struct pushpull *pp) -{ - return pp->ofs; -} - -static inline int pullbit(struct pushpull *pp) -{ - int bit; - - bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1; - - pp->ofs++; - return bit; -} - -static inline int pulledbits(struct pushpull *pp) -{ - return pp->ofs; -} - -#endif /* __PUSHPULL_H__ */ diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c index c7f9068907c..0b042b1fc82 100644 --- a/fs/jffs2/read.c +++ b/fs/jffs2/read.c @@ -1,16 +1,16 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: read.c,v 1.39 2005/03/01 10:34:03 dedekind Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/slab.h> #include <linux/crc32.h> @@ -38,43 +38,44 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri); if (ret) { jffs2_free_raw_inode(ri); - printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret); + pr_warn("Error reading node from 0x%08x: %d\n", + ref_offset(fd->raw), ret); return ret; } if (readlen != sizeof(*ri)) { jffs2_free_raw_inode(ri); - printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", - ref_offset(fd->raw), sizeof(*ri), readlen); + pr_warn("Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", + ref_offset(fd->raw), sizeof(*ri), readlen); return -EIO; } crc = crc32(0, ri, sizeof(*ri)-8); - D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", + jffs2_dbg(1, "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", ref_offset(fd->raw), je32_to_cpu(ri->node_crc), crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), - je32_to_cpu(ri->offset), buf)); + je32_to_cpu(ri->offset), buf); if (crc != je32_to_cpu(ri->node_crc)) { - printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n", - je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); + pr_warn("Node CRC %08x != calculated CRC %08x for node at %08x\n", + je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); ret = -EIO; goto out_ri; } /* There was a bug where we wrote hole nodes out with csize/dsize swapped. Deal with it */ - if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && + if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) && je32_to_cpu(ri->csize)) { ri->dsize = ri->csize; ri->csize = cpu_to_je32(0); } D1(if(ofs + len > je32_to_cpu(ri->dsize)) { - printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n", - len, ofs, je32_to_cpu(ri->dsize)); + pr_warn("jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n", + len, ofs, je32_to_cpu(ri->dsize)); ret = -EINVAL; goto out_ri; }); - + if (ri->compr == JFFS2_COMPR_ZERO) { memset(buf, 0, len); goto out_ri; @@ -82,8 +83,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, /* Cases: Reading whole node and it's uncompressed - read directly to buffer provided, check CRC. - Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided - Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy + Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided + Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy */ if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) { @@ -109,8 +110,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, decomprbuf = readbuf; } - D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize), - readbuf)); + jffs2_dbg(2, "Read %d bytes to %p\n", je32_to_cpu(ri->csize), + readbuf); ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), je32_to_cpu(ri->csize), &readlen, readbuf); @@ -121,18 +122,19 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, crc = crc32(0, readbuf, je32_to_cpu(ri->csize)); if (crc != je32_to_cpu(ri->data_crc)) { - printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n", - je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw)); + pr_warn("Data CRC %08x != calculated CRC %08x for node at %08x\n", + je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw)); ret = -EIO; goto out_decomprbuf; } - D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); + jffs2_dbg(2, "Data CRC matches calculated CRC %08x\n", crc); if (ri->compr != JFFS2_COMPR_NONE) { - D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", - je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); + jffs2_dbg(2, "Decompress %d bytes from %p to %d bytes at %p\n", + je32_to_cpu(ri->csize), readbuf, + je32_to_cpu(ri->dsize), decomprbuf); ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); if (ret) { - printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); + pr_warn("Error: jffs2_decompress returned %d\n", ret); goto out_decomprbuf; } } @@ -159,31 +161,38 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_node_frag *frag; int ret; - D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n", - f->inocache->ino, offset, offset+len)); + jffs2_dbg(1, "%s(): ino #%u, range 0x%08x-0x%08x\n", + __func__, f->inocache->ino, offset, offset + len); frag = jffs2_lookup_node_frag(&f->fragtree, offset); /* XXX FIXME: Where a single physical node actually shows up in two frags, we read it twice. Don't do that. */ - /* Now we're pointing at the first frag which overlaps our page */ + /* Now we're pointing at the first frag which overlaps our page + * (or perhaps is before it, if we've been asked to read off the + * end of the file). */ while(offset < end) { - D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); - if (unlikely(!frag || frag->ofs > offset)) { + jffs2_dbg(2, "%s(): offset %d, end %d\n", + __func__, offset, end); + if (unlikely(!frag || frag->ofs > offset || + frag->ofs + frag->size <= offset)) { uint32_t holesize = end - offset; - if (frag) { - D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); + if (frag && frag->ofs > offset) { + jffs2_dbg(1, "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", + f->inocache->ino, frag->ofs, offset); holesize = min(holesize, frag->ofs - offset); - D2(jffs2_print_frag_list(f)); } - D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); + jffs2_dbg(1, "Filling non-frag hole from %d-%d\n", + offset, offset + holesize); memset(buf, 0, holesize); buf += holesize; offset += holesize; continue; } else if (unlikely(!frag->node)) { uint32_t holeend = min(end, frag->ofs + frag->size); - D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size)); + jffs2_dbg(1, "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", + offset, holeend, frag->ofs, + frag->ofs + frag->size); memset(buf, 0, holeend - offset); buf += holeend - offset; offset = holeend; @@ -192,23 +201,26 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, } else { uint32_t readlen; uint32_t fragofs; /* offset within the frag to start reading */ - + fragofs = offset - frag->ofs; readlen = min(frag->size - fragofs, end - offset); - D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", - frag->ofs+fragofs, frag->ofs+fragofs+readlen, - ref_offset(frag->node->raw), ref_flags(frag->node->raw))); + jffs2_dbg(1, "Reading %d-%d from node at 0x%08x (%d)\n", + frag->ofs+fragofs, + frag->ofs + fragofs+readlen, + ref_offset(frag->node->raw), + ref_flags(frag->node->raw)); ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); - D2(printk(KERN_DEBUG "node read done\n")); + jffs2_dbg(2, "node read done\n"); if (ret) { - D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret)); + jffs2_dbg(1, "%s(): error %d\n", + __func__, ret); memset(buf, 0, readlen); return ret; } buf += readlen; offset += readlen; frag = frag_next(frag); - D2(printk(KERN_DEBUG "node read was OK. Looping\n")); + jffs2_dbg(2, "node read was OK. Looping\n"); } } return 0; diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 5b2a83599d7..386303dca38 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -1,17 +1,18 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: readinode.c,v 1.125 2005/07/10 13:13:55 dedekind Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/crc32.h> @@ -20,580 +21,1171 @@ #include <linux/compiler.h> #include "nodelist.h" -static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag); - -#if CONFIG_JFFS2_FS_DEBUG >= 2 -static void jffs2_print_fragtree(struct rb_root *list, int permitbug) +/* + * Check the data CRC of the node. + * + * Returns: 0 if the data CRC is correct; + * 1 - if incorrect; + * error code if an error occurred. + */ +static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { - struct jffs2_node_frag *this = frag_first(list); - uint32_t lastofs = 0; - int buggy = 0; - - while(this) { - if (this->node) - printk(KERN_DEBUG "frag %04x-%04x: 0x%08x(%d) on flash (*%p). left (%p), right (%p), parent (%p)\n", - this->ofs, this->ofs+this->size, ref_offset(this->node->raw), ref_flags(this->node->raw), - this, frag_left(this), frag_right(this), frag_parent(this)); - else - printk(KERN_DEBUG "frag %04x-%04x: hole (*%p). left (%p} right (%p), parent (%p)\n", this->ofs, - this->ofs+this->size, this, frag_left(this), frag_right(this), frag_parent(this)); - if (this->ofs != lastofs) - buggy = 1; - lastofs = this->ofs+this->size; - this = frag_next(this); - } - if (buggy && !permitbug) { - printk(KERN_CRIT "Frag tree got a hole in it\n"); - BUG(); + struct jffs2_raw_node_ref *ref = tn->fn->raw; + int err = 0, pointed = 0; + struct jffs2_eraseblock *jeb; + unsigned char *buffer; + uint32_t crc, ofs, len; + size_t retlen; + + BUG_ON(tn->csize == 0); + + /* Calculate how many bytes were already checked */ + ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode); + len = tn->csize; + + if (jffs2_is_writebuffered(c)) { + int adj = ofs % c->wbuf_pagesize; + if (likely(adj)) + adj = c->wbuf_pagesize - adj; + + if (adj >= tn->csize) { + dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n", + ref_offset(ref), tn->csize, ofs); + goto adj_acc; + } + + ofs += adj; + len -= adj; + } + + dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n", + ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len); + +#ifndef __ECOS + /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(), + * adding and jffs2_flash_read_end() interface. */ + err = mtd_point(c->mtd, ofs, len, &retlen, (void **)&buffer, NULL); + if (!err && retlen < len) { + JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize); + mtd_unpoint(c->mtd, ofs, retlen); + } else if (err) { + if (err != -EOPNOTSUPP) + JFFS2_WARNING("MTD point failed: error code %d.\n", err); + } else + pointed = 1; /* succefully pointed to device */ +#endif + + if (!pointed) { + buffer = kmalloc(len, GFP_KERNEL); + if (unlikely(!buffer)) + return -ENOMEM; + + /* TODO: this is very frequent pattern, make it a separate + * routine */ + err = jffs2_flash_read(c, ofs, len, &retlen, buffer); + if (err) { + JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err); + goto free_out; + } + + if (retlen != len) { + JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len); + err = -EIO; + goto free_out; + } + } + + /* Continue calculating CRC */ + crc = crc32(tn->partial_crc, buffer, len); + if(!pointed) + kfree(buffer); +#ifndef __ECOS + else + mtd_unpoint(c->mtd, ofs, len); +#endif + + if (crc != tn->data_crc) { + JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", + ref_offset(ref), tn->data_crc, crc); + return 1; } + +adj_acc: + jeb = &c->blocks[ref->flash_offset / c->sector_size]; + len = ref_totlen(c, jeb, ref); + /* If it should be REF_NORMAL, it'll get marked as such when + we build the fragtree, shortly. No need to worry about GC + moving it while it's marked REF_PRISTINE -- GC won't happen + till we've finished checking every inode anyway. */ + ref->flash_offset |= REF_PRISTINE; + /* + * Mark the node as having been checked and fix the + * accounting accordingly. + */ + spin_lock(&c->erase_completion_lock); + jeb->used_size += len; + jeb->unchecked_size -= len; + c->used_size += len; + c->unchecked_size -= len; + jffs2_dbg_acct_paranoia_check_nolock(c, jeb); + spin_unlock(&c->erase_completion_lock); + + return 0; + +free_out: + if(!pointed) + kfree(buffer); +#ifndef __ECOS + else + mtd_unpoint(c->mtd, ofs, len); +#endif + return err; } -void jffs2_print_frag_list(struct jffs2_inode_info *f) +/* + * Helper function for jffs2_add_older_frag_to_fragtree(). + * + * Checks the node if we are in the checking stage. + */ +static int check_tn_node(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) { - jffs2_print_fragtree(&f->fragtree, 0); + int ret; - if (f->metadata) { - printk(KERN_DEBUG "metadata at 0x%08x\n", ref_offset(f->metadata->raw)); + BUG_ON(ref_obsolete(tn->fn->raw)); + + /* We only check the data CRC of unchecked nodes */ + if (ref_flags(tn->fn->raw) != REF_UNCHECKED) + return 0; + + dbg_readinode("check node %#04x-%#04x, phys offs %#08x\n", + tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw)); + + ret = check_node_data(c, tn); + if (unlikely(ret < 0)) { + JFFS2_ERROR("check_node_data() returned error: %d.\n", + ret); + } else if (unlikely(ret > 0)) { + dbg_readinode("CRC error, mark it obsolete.\n"); + jffs2_mark_node_obsolete(c, tn->fn->raw); } + + return ret; } -#endif -#if CONFIG_JFFS2_FS_DEBUG >= 1 -static int jffs2_sanitycheck_fragtree(struct jffs2_inode_info *f) +static struct jffs2_tmp_dnode_info *jffs2_lookup_tn(struct rb_root *tn_root, uint32_t offset) { - struct jffs2_node_frag *frag; - int bitched = 0; + struct rb_node *next; + struct jffs2_tmp_dnode_info *tn = NULL; - for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) { + dbg_readinode("root %p, offset %d\n", tn_root, offset); - struct jffs2_full_dnode *fn = frag->node; - if (!fn || !fn->raw) - continue; + next = tn_root->rb_node; - if (ref_flags(fn->raw) == REF_PRISTINE) { + while (next) { + tn = rb_entry(next, struct jffs2_tmp_dnode_info, rb); - if (fn->frags > 1) { - printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2\n", ref_offset(fn->raw), fn->frags); - bitched = 1; - } - /* A hole node which isn't multi-page should be garbage-collected - and merged anyway, so we just check for the frag size here, - rather than mucking around with actually reading the node - and checking the compression type, which is the real way - to tell a hole node. */ - if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { - printk(KERN_WARNING "REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2\n", - ref_offset(fn->raw)); - bitched = 1; + if (tn->fn->ofs < offset) + next = tn->rb.rb_right; + else if (tn->fn->ofs >= offset) + next = tn->rb.rb_left; + else + break; + } + + return tn; +} + + +static void jffs2_kill_tn(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn) +{ + jffs2_mark_node_obsolete(c, tn->fn->raw); + jffs2_free_full_dnode(tn->fn); + jffs2_free_tmp_dnode_info(tn); +} +/* + * This function is used when we read an inode. Data nodes arrive in + * arbitrary order -- they may be older or newer than the nodes which + * are already in the tree. Where overlaps occur, the older node can + * be discarded as long as the newer passes the CRC check. We don't + * bother to keep track of holes in this rbtree, and neither do we deal + * with frags -- we can have multiple entries starting at the same + * offset, and the one with the smallest length will come first in the + * ordering. + * + * Returns 0 if the node was handled (including marking it obsolete) + * < 0 an if error occurred + */ +static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, + struct jffs2_readinode_info *rii, + struct jffs2_tmp_dnode_info *tn) +{ + uint32_t fn_end = tn->fn->ofs + tn->fn->size; + struct jffs2_tmp_dnode_info *this, *ptn; + + dbg_readinode("insert fragment %#04x-%#04x, ver %u at %08x\n", tn->fn->ofs, fn_end, tn->version, ref_offset(tn->fn->raw)); + + /* If a node has zero dsize, we only have to keep if it if it might be the + node with highest version -- i.e. the one which will end up as f->metadata. + Note that such nodes won't be REF_UNCHECKED since there are no data to + check anyway. */ + if (!tn->fn->size) { + if (rii->mdata_tn) { + if (rii->mdata_tn->version < tn->version) { + /* We had a candidate mdata node already */ + dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version); + jffs2_kill_tn(c, rii->mdata_tn); + } else { + dbg_readinode("kill new mdata with ver %d (older than existing %d\n", + tn->version, rii->mdata_tn->version); + jffs2_kill_tn(c, tn); + return 0; } + } + rii->mdata_tn = tn; + dbg_readinode("keep new mdata with ver %d\n", tn->version); + return 0; + } - if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { - printk(KERN_WARNING "REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2\n", - ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); - bitched = 1; + /* Find the earliest node which _may_ be relevant to this one */ + this = jffs2_lookup_tn(&rii->tn_root, tn->fn->ofs); + if (this) { + /* If the node is coincident with another at a lower address, + back up until the other node is found. It may be relevant */ + while (this->overlapped) { + ptn = tn_prev(this); + if (!ptn) { + /* + * We killed a node which set the overlapped + * flags during the scan. Fix it up. + */ + this->overlapped = 0; + break; } + this = ptn; } + dbg_readinode("'this' found %#04x-%#04x (%s)\n", this->fn->ofs, this->fn->ofs + this->fn->size, this->fn ? "data" : "hole"); } - - if (bitched) { - struct jffs2_node_frag *thisfrag; - printk(KERN_WARNING "Inode is #%u\n", f->inocache->ino); - thisfrag = frag_first(&f->fragtree); - while (thisfrag) { - if (!thisfrag->node) { - printk("Frag @0x%x-0x%x; node-less hole\n", - thisfrag->ofs, thisfrag->size + thisfrag->ofs); - } else if (!thisfrag->node->raw) { - printk("Frag @0x%x-0x%x; raw-less hole\n", - thisfrag->ofs, thisfrag->size + thisfrag->ofs); + while (this) { + if (this->fn->ofs > fn_end) + break; + dbg_readinode("Ponder this ver %d, 0x%x-0x%x\n", + this->version, this->fn->ofs, this->fn->size); + + if (this->version == tn->version) { + /* Version number collision means REF_PRISTINE GC. Accept either of them + as long as the CRC is correct. Check the one we have already... */ + if (!check_tn_node(c, this)) { + /* The one we already had was OK. Keep it and throw away the new one */ + dbg_readinode("Like old node. Throw away new\n"); + jffs2_kill_tn(c, tn); + return 0; } else { - printk("Frag @0x%x-0x%x; raw at 0x%08x(%d) (0x%x-0x%x)\n", - thisfrag->ofs, thisfrag->size + thisfrag->ofs, - ref_offset(thisfrag->node->raw), ref_flags(thisfrag->node->raw), - thisfrag->node->ofs, thisfrag->node->ofs+thisfrag->node->size); + /* Who cares if the new one is good; keep it for now anyway. */ + dbg_readinode("Like new node. Throw away old\n"); + rb_replace_node(&this->rb, &tn->rb, &rii->tn_root); + jffs2_kill_tn(c, this); + /* Same overlapping from in front and behind */ + return 0; + } + } + if (this->version < tn->version && + this->fn->ofs >= tn->fn->ofs && + this->fn->ofs + this->fn->size <= fn_end) { + /* New node entirely overlaps 'this' */ + if (check_tn_node(c, tn)) { + dbg_readinode("new node bad CRC\n"); + jffs2_kill_tn(c, tn); + return 0; + } + /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */ + while (this && this->fn->ofs + this->fn->size <= fn_end) { + struct jffs2_tmp_dnode_info *next = tn_next(this); + if (this->version < tn->version) { + tn_erase(this, &rii->tn_root); + dbg_readinode("Kill overlapped ver %d, 0x%x-0x%x\n", + this->version, this->fn->ofs, + this->fn->ofs+this->fn->size); + jffs2_kill_tn(c, this); + } + this = next; } - thisfrag = frag_next(thisfrag); + dbg_readinode("Done killing overlapped nodes\n"); + continue; + } + if (this->version > tn->version && + this->fn->ofs <= tn->fn->ofs && + this->fn->ofs+this->fn->size >= fn_end) { + /* New node entirely overlapped by 'this' */ + if (!check_tn_node(c, this)) { + dbg_readinode("Good CRC on old node. Kill new\n"); + jffs2_kill_tn(c, tn); + return 0; + } + /* ... but 'this' was bad. Replace it... */ + dbg_readinode("Bad CRC on old overlapping node. Kill it\n"); + tn_erase(this, &rii->tn_root); + jffs2_kill_tn(c, this); + break; } + + this = tn_next(this); } - return bitched; + + /* We neither completely obsoleted nor were completely + obsoleted by an earlier node. Insert into the tree */ + { + struct rb_node *parent; + struct rb_node **link = &rii->tn_root.rb_node; + struct jffs2_tmp_dnode_info *insert_point = NULL; + + while (*link) { + parent = *link; + insert_point = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); + if (tn->fn->ofs > insert_point->fn->ofs) + link = &insert_point->rb.rb_right; + else if (tn->fn->ofs < insert_point->fn->ofs || + tn->fn->size < insert_point->fn->size) + link = &insert_point->rb.rb_left; + else + link = &insert_point->rb.rb_right; + } + rb_link_node(&tn->rb, &insert_point->rb, link); + rb_insert_color(&tn->rb, &rii->tn_root); + } + + /* If there's anything behind that overlaps us, note it */ + this = tn_prev(tn); + if (this) { + while (1) { + if (this->fn->ofs + this->fn->size > tn->fn->ofs) { + dbg_readinode("Node is overlapped by %p (v %d, 0x%x-0x%x)\n", + this, this->version, this->fn->ofs, + this->fn->ofs+this->fn->size); + tn->overlapped = 1; + break; + } + if (!this->overlapped) + break; + + ptn = tn_prev(this); + if (!ptn) { + /* + * We killed a node which set the overlapped + * flags during the scan. Fix it up. + */ + this->overlapped = 0; + break; + } + this = ptn; + } + } + + /* If the new node overlaps anything ahead, note it */ + this = tn_next(tn); + while (this && this->fn->ofs < fn_end) { + this->overlapped = 1; + dbg_readinode("Node ver %d, 0x%x-0x%x is overlapped\n", + this->version, this->fn->ofs, + this->fn->ofs+this->fn->size); + this = tn_next(this); + } + return 0; } -#endif /* D1 */ -static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this) +/* Trivial function to remove the last node in the tree. Which by definition + has no right-hand child — so can be removed just by making its left-hand + child (if any) take its place under its parent. Since this is only done + when we're consuming the whole tree, there's no need to use rb_erase() + and let it worry about adjusting colours and balancing the tree. That + would just be a waste of time. */ +static void eat_last(struct rb_root *root, struct rb_node *node) { - if (this->node) { - this->node->frags--; - if (!this->node->frags) { - /* The node has no valid frags left. It's totally obsoleted */ - D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n", - ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size)); - jffs2_mark_node_obsolete(c, this->node->raw); - jffs2_free_full_dnode(this->node); - } else { - D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n", - ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, - this->node->frags)); - mark_ref_normal(this->node->raw); + struct rb_node *parent = rb_parent(node); + struct rb_node **link; + + /* LAST! */ + BUG_ON(node->rb_right); + + if (!parent) + link = &root->rb_node; + else if (node == parent->rb_left) + link = &parent->rb_left; + else + link = &parent->rb_right; + + *link = node->rb_left; + if (node->rb_left) + node->rb_left->__rb_parent_color = node->__rb_parent_color; +} + +/* We put the version tree in reverse order, so we can use the same eat_last() + function that we use to consume the tmpnode tree (tn_root). */ +static void ver_insert(struct rb_root *ver_root, struct jffs2_tmp_dnode_info *tn) +{ + struct rb_node **link = &ver_root->rb_node; + struct rb_node *parent = NULL; + struct jffs2_tmp_dnode_info *this_tn; + + while (*link) { + parent = *link; + this_tn = rb_entry(parent, struct jffs2_tmp_dnode_info, rb); + + if (tn->version > this_tn->version) + link = &parent->rb_left; + else + link = &parent->rb_right; + } + dbg_readinode("Link new node at %p (root is %p)\n", link, ver_root); + rb_link_node(&tn->rb, parent, link); + rb_insert_color(&tn->rb, ver_root); +} + +/* Build final, normal fragtree from tn tree. It doesn't matter which order + we add nodes to the real fragtree, as long as they don't overlap. And + having thrown away the majority of overlapped nodes as we went, there + really shouldn't be many sets of nodes which do overlap. If we start at + the end, we can use the overlap markers -- we can just eat nodes which + aren't overlapped, and when we encounter nodes which _do_ overlap we + sort them all into a temporary tree in version order before replaying them. */ +static int jffs2_build_inode_fragtree(struct jffs2_sb_info *c, + struct jffs2_inode_info *f, + struct jffs2_readinode_info *rii) +{ + struct jffs2_tmp_dnode_info *pen, *last, *this; + struct rb_root ver_root = RB_ROOT; + uint32_t high_ver = 0; + + if (rii->mdata_tn) { + dbg_readinode("potential mdata is ver %d at %p\n", rii->mdata_tn->version, rii->mdata_tn); + high_ver = rii->mdata_tn->version; + rii->latest_ref = rii->mdata_tn->fn->raw; + } +#ifdef JFFS2_DBG_READINODE_MESSAGES + this = tn_last(&rii->tn_root); + while (this) { + dbg_readinode("tn %p ver %d range 0x%x-0x%x ov %d\n", this, this->version, this->fn->ofs, + this->fn->ofs+this->fn->size, this->overlapped); + this = tn_prev(this); + } +#endif + pen = tn_last(&rii->tn_root); + while ((last = pen)) { + pen = tn_prev(last); + + eat_last(&rii->tn_root, &last->rb); + ver_insert(&ver_root, last); + + if (unlikely(last->overlapped)) { + if (pen) + continue; + /* + * We killed a node which set the overlapped + * flags during the scan. Fix it up. + */ + last->overlapped = 0; + } + + /* Now we have a bunch of nodes in reverse version + order, in the tree at ver_root. Most of the time, + there'll actually be only one node in the 'tree', + in fact. */ + this = tn_last(&ver_root); + + while (this) { + struct jffs2_tmp_dnode_info *vers_next; + int ret; + vers_next = tn_prev(this); + eat_last(&ver_root, &this->rb); + if (check_tn_node(c, this)) { + dbg_readinode("node ver %d, 0x%x-0x%x failed CRC\n", + this->version, this->fn->ofs, + this->fn->ofs+this->fn->size); + jffs2_kill_tn(c, this); + } else { + if (this->version > high_ver) { + /* Note that this is different from the other + highest_version, because this one is only + counting _valid_ nodes which could give the + latest inode metadata */ + high_ver = this->version; + rii->latest_ref = this->fn->raw; + } + dbg_readinode("Add %p (v %d, 0x%x-0x%x, ov %d) to fragtree\n", + this, this->version, this->fn->ofs, + this->fn->ofs+this->fn->size, this->overlapped); + + ret = jffs2_add_full_dnode_to_inode(c, f, this->fn); + if (ret) { + /* Free the nodes in vers_root; let the caller + deal with the rest */ + JFFS2_ERROR("Add node to tree failed %d\n", ret); + while (1) { + vers_next = tn_prev(this); + if (check_tn_node(c, this)) + jffs2_mark_node_obsolete(c, this->fn->raw); + jffs2_free_full_dnode(this->fn); + jffs2_free_tmp_dnode_info(this); + this = vers_next; + if (!this) + break; + eat_last(&ver_root, &vers_next->rb); + } + return ret; + } + jffs2_free_tmp_dnode_info(this); + } + this = vers_next; } - } - jffs2_free_node_frag(this); + return 0; } -/* Given an inode, probably with existing list of fragments, add the new node - * to the fragment list. +static void jffs2_free_tmp_dnode_info_list(struct rb_root *list) +{ + struct jffs2_tmp_dnode_info *tn, *next; + + rbtree_postorder_for_each_entry_safe(tn, next, list, rb) { + jffs2_free_full_dnode(tn->fn); + jffs2_free_tmp_dnode_info(tn); + } + + *list = RB_ROOT; +} + +static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd) +{ + struct jffs2_full_dirent *next; + + while (fd) { + next = fd->next; + jffs2_free_full_dirent(fd); + fd = next; + } +} + +/* Returns first valid node after 'ref'. May return 'ref' */ +static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref) +{ + while (ref && ref->next_in_ino) { + if (!ref_obsolete(ref)) + return ref; + dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref)); + ref = ref->next_in_ino; + } + return NULL; +} + +/* + * Helper function for jffs2_get_inode_nodes(). + * It is called every time an directory entry node is found. + * + * Returns: 0 on success; + * negative error code on failure. */ -int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn) +static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, + struct jffs2_raw_dirent *rd, size_t read, + struct jffs2_readinode_info *rii) { - int ret; - struct jffs2_node_frag *newfrag; + struct jffs2_full_dirent *fd; + uint32_t crc; - D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn)); + /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ + BUG_ON(ref_obsolete(ref)); - if (unlikely(!fn->size)) + crc = crc32(0, rd, sizeof(*rd) - 8); + if (unlikely(crc != je32_to_cpu(rd->node_crc))) { + JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n", + ref_offset(ref), je32_to_cpu(rd->node_crc), crc); + jffs2_mark_node_obsolete(c, ref); return 0; + } - newfrag = jffs2_alloc_node_frag(); - if (unlikely(!newfrag)) - return -ENOMEM; + /* If we've never checked the CRCs on this node, check them now */ + if (ref_flags(ref) == REF_UNCHECKED) { + struct jffs2_eraseblock *jeb; + int len; - D2(printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n", - fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag)); - - newfrag->ofs = fn->ofs; - newfrag->size = fn->size; - newfrag->node = fn; - newfrag->node->frags = 1; + /* Sanity check */ + if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { + JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", + ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); + jffs2_mark_node_obsolete(c, ref); + return 0; + } - ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag); - if (ret) - return ret; + jeb = &c->blocks[ref->flash_offset / c->sector_size]; + len = ref_totlen(c, jeb, ref); + + spin_lock(&c->erase_completion_lock); + jeb->used_size += len; + jeb->unchecked_size -= len; + c->used_size += len; + c->unchecked_size -= len; + ref->flash_offset = ref_offset(ref) | dirent_node_state(rd); + spin_unlock(&c->erase_completion_lock); + } + + fd = jffs2_alloc_full_dirent(rd->nsize + 1); + if (unlikely(!fd)) + return -ENOMEM; - /* If we now share a page with other nodes, mark either previous - or next node REF_NORMAL, as appropriate. */ - if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { - struct jffs2_node_frag *prev = frag_prev(newfrag); + fd->raw = ref; + fd->version = je32_to_cpu(rd->version); + fd->ino = je32_to_cpu(rd->ino); + fd->type = rd->type; - mark_ref_normal(fn->raw); - /* If we don't start at zero there's _always_ a previous */ - if (prev->node) - mark_ref_normal(prev->node->raw); + if (fd->version > rii->highest_version) + rii->highest_version = fd->version; + + /* Pick out the mctime of the latest dirent */ + if(fd->version > rii->mctime_ver && je32_to_cpu(rd->mctime)) { + rii->mctime_ver = fd->version; + rii->latest_mctime = je32_to_cpu(rd->mctime); } - if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { - struct jffs2_node_frag *next = frag_next(newfrag); - - if (next) { - mark_ref_normal(fn->raw); - if (next->node) - mark_ref_normal(next->node->raw); + /* + * Copy as much of the name as possible from the raw + * dirent we've already read from the flash. + */ + if (read > sizeof(*rd)) + memcpy(&fd->name[0], &rd->name[0], + min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) )); + + /* Do we need to copy any more of the name directly from the flash? */ + if (rd->nsize + sizeof(*rd) > read) { + /* FIXME: point() */ + int err; + int already = read - sizeof(*rd); + + err = jffs2_flash_read(c, (ref_offset(ref)) + read, + rd->nsize - already, &read, &fd->name[already]); + if (unlikely(read != rd->nsize - already) && likely(!err)) + return -EIO; + + if (unlikely(err)) { + JFFS2_ERROR("read remainder of name: error %d\n", err); + jffs2_free_full_dirent(fd); + return -EIO; } } - D2(if (jffs2_sanitycheck_fragtree(f)) { - printk(KERN_WARNING "Just added node %04x-%04x @0x%08x on flash, newfrag *%p\n", - fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag); - return 0; - }) - D2(jffs2_print_frag_list(f)); + + fd->nhash = full_name_hash(fd->name, rd->nsize); + fd->next = NULL; + fd->name[rd->nsize] = '\0'; + + /* + * Wheee. We now have a complete jffs2_full_dirent structure, with + * the name in it and everything. Link it into the list + */ + jffs2_add_fd_to_list(c, fd, &rii->fds); + return 0; } -/* Doesn't set inode->i_size */ -static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *list, struct jffs2_node_frag *newfrag) +/* + * Helper function for jffs2_get_inode_nodes(). + * It is called every time an inode node is found. + * + * Returns: 0 on success (possibly after marking a bad node obsolete); + * negative error code on failure. + */ +static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, + struct jffs2_raw_inode *rd, int rdlen, + struct jffs2_readinode_info *rii) { - struct jffs2_node_frag *this; - uint32_t lastend; + struct jffs2_tmp_dnode_info *tn; + uint32_t len, csize; + int ret = 0; + uint32_t crc; - /* Skip all the nodes which are completed before this one starts */ - this = jffs2_lookup_node_frag(list, newfrag->node->ofs); + /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ + BUG_ON(ref_obsolete(ref)); - if (this) { - D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", - this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); - lastend = this->ofs + this->size; - } else { - D2(printk(KERN_DEBUG "j_a_f_d_t_f: Lookup gave no frag\n")); - lastend = 0; - } - - /* See if we ran off the end of the list */ - if (lastend <= newfrag->ofs) { - /* We did */ - - /* Check if 'this' node was on the same page as the new node. - If so, both 'this' and the new node get marked REF_NORMAL so - the GC can take a look. - */ - if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { - if (this->node) - mark_ref_normal(this->node->raw); - mark_ref_normal(newfrag->node->raw); + crc = crc32(0, rd, sizeof(*rd) - 8); + if (unlikely(crc != je32_to_cpu(rd->node_crc))) { + JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n", + ref_offset(ref), je32_to_cpu(rd->node_crc), crc); + jffs2_mark_node_obsolete(c, ref); + return 0; + } + + tn = jffs2_alloc_tmp_dnode_info(); + if (!tn) { + JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn)); + return -ENOMEM; + } + + tn->partial_crc = 0; + csize = je32_to_cpu(rd->csize); + + /* If we've never checked the CRCs on this node, check them now */ + if (ref_flags(ref) == REF_UNCHECKED) { + + /* Sanity checks */ + if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || + unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) { + JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref)); + jffs2_dbg_dump_node(c, ref_offset(ref)); + jffs2_mark_node_obsolete(c, ref); + goto free_out; } - if (lastend < newfrag->node->ofs) { - /* ... and we need to put a hole in before the new node */ - struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag(); - if (!holefrag) { - jffs2_free_node_frag(newfrag); - return -ENOMEM; + if (jffs2_is_writebuffered(c) && csize != 0) { + /* At this point we are supposed to check the data CRC + * of our unchecked node. But thus far, we do not + * know whether the node is valid or obsolete. To + * figure this out, we need to walk all the nodes of + * the inode and build the inode fragtree. We don't + * want to spend time checking data of nodes which may + * later be found to be obsolete. So we put off the full + * data CRC checking until we have read all the inode + * nodes and have started building the fragtree. + * + * The fragtree is being built starting with nodes + * having the highest version number, so we'll be able + * to detect whether a node is valid (i.e., it is not + * overlapped by a node with higher version) or not. + * And we'll be able to check only those nodes, which + * are not obsolete. + * + * Of course, this optimization only makes sense in case + * of NAND flashes (or other flashes with + * !jffs2_can_mark_obsolete()), since on NOR flashes + * nodes are marked obsolete physically. + * + * Since NAND flashes (or other flashes with + * jffs2_is_writebuffered(c)) are anyway read by + * fractions of c->wbuf_pagesize, and we have just read + * the node header, it is likely that the starting part + * of the node data is also read when we read the + * header. So we don't mind to check the CRC of the + * starting part of the data of the node now, and check + * the second part later (in jffs2_check_node_data()). + * Of course, we will not need to re-read and re-check + * the NAND page which we have just read. This is why we + * read the whole NAND page at jffs2_get_inode_nodes(), + * while we needed only the node header. + */ + unsigned char *buf; + + /* 'buf' will point to the start of data */ + buf = (unsigned char *)rd + sizeof(*rd); + /* len will be the read data length */ + len = min_t(uint32_t, rdlen - sizeof(*rd), csize); + tn->partial_crc = crc32(0, buf, len); + + dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize); + + /* If we actually calculated the whole data CRC + * and it is wrong, drop the node. */ + if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) { + JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n", + ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc)); + jffs2_mark_node_obsolete(c, ref); + goto free_out; } - holefrag->ofs = lastend; - holefrag->size = newfrag->node->ofs - lastend; - holefrag->node = NULL; - if (this) { - /* By definition, the 'this' node has no right-hand child, - because there are no frags with offset greater than it. - So that's where we want to put the hole */ - D2(printk(KERN_DEBUG "Adding hole frag (%p) on right of node at (%p)\n", holefrag, this)); - rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right); - } else { - D2(printk(KERN_DEBUG "Adding hole frag (%p) at root of tree\n", holefrag)); - rb_link_node(&holefrag->rb, NULL, &list->rb_node); - } - rb_insert_color(&holefrag->rb, list); - this = holefrag; - } - if (this) { - /* By definition, the 'this' node has no right-hand child, - because there are no frags with offset greater than it. - So that's where we want to put the hole */ - D2(printk(KERN_DEBUG "Adding new frag (%p) on right of node at (%p)\n", newfrag, this)); - rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right); - } else { - D2(printk(KERN_DEBUG "Adding new frag (%p) at root of tree\n", newfrag)); - rb_link_node(&newfrag->rb, NULL, &list->rb_node); + + } else if (csize == 0) { + /* + * We checked the header CRC. If the node has no data, adjust + * the space accounting now. For other nodes this will be done + * later either when the node is marked obsolete or when its + * data is checked. + */ + struct jffs2_eraseblock *jeb; + + dbg_readinode("the node has no data.\n"); + jeb = &c->blocks[ref->flash_offset / c->sector_size]; + len = ref_totlen(c, jeb, ref); + + spin_lock(&c->erase_completion_lock); + jeb->used_size += len; + jeb->unchecked_size -= len; + c->used_size += len; + c->unchecked_size -= len; + ref->flash_offset = ref_offset(ref) | REF_NORMAL; + spin_unlock(&c->erase_completion_lock); } - rb_insert_color(&newfrag->rb, list); - return 0; } - D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n", - this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this)); + tn->fn = jffs2_alloc_full_dnode(); + if (!tn->fn) { + JFFS2_ERROR("alloc fn failed\n"); + ret = -ENOMEM; + goto free_out; + } - /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes, - * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs - */ - if (newfrag->ofs > this->ofs) { - /* This node isn't completely obsoleted. The start of it remains valid */ - - /* Mark the new node and the partially covered node REF_NORMAL -- let - the GC take a look at them */ - mark_ref_normal(newfrag->node->raw); - if (this->node) - mark_ref_normal(this->node->raw); - - if (this->ofs + this->size > newfrag->ofs + newfrag->size) { - /* The new node splits 'this' frag into two */ - struct jffs2_node_frag *newfrag2 = jffs2_alloc_node_frag(); - if (!newfrag2) { - jffs2_free_node_frag(newfrag); - return -ENOMEM; - } - D2(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size); - if (this->node) - printk("phys 0x%08x\n", ref_offset(this->node->raw)); - else - printk("hole\n"); - ) - - /* New second frag pointing to this's node */ - newfrag2->ofs = newfrag->ofs + newfrag->size; - newfrag2->size = (this->ofs+this->size) - newfrag2->ofs; - newfrag2->node = this->node; - if (this->node) - this->node->frags++; - - /* Adjust size of original 'this' */ - this->size = newfrag->ofs - this->ofs; - - /* Now, we know there's no node with offset - greater than this->ofs but smaller than - newfrag2->ofs or newfrag->ofs, for obvious - reasons. So we can do a tree insert from - 'this' to insert newfrag, and a tree insert - from newfrag to insert newfrag2. */ - jffs2_fragtree_insert(newfrag, this); - rb_insert_color(&newfrag->rb, list); - - jffs2_fragtree_insert(newfrag2, newfrag); - rb_insert_color(&newfrag2->rb, list); - - return 0; - } - /* New node just reduces 'this' frag in size, doesn't split it */ - this->size = newfrag->ofs - this->ofs; - - /* Again, we know it lives down here in the tree */ - jffs2_fragtree_insert(newfrag, this); - rb_insert_color(&newfrag->rb, list); - } else { - /* New frag starts at the same point as 'this' used to. Replace - it in the tree without doing a delete and insertion */ - D2(printk(KERN_DEBUG "Inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n", - newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, - this, this->ofs, this->ofs+this->size)); - - rb_replace_node(&this->rb, &newfrag->rb, list); - - if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { - D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size)); - jffs2_obsolete_node_frag(c, this); - } else { - this->ofs += newfrag->size; - this->size -= newfrag->size; + tn->version = je32_to_cpu(rd->version); + tn->fn->ofs = je32_to_cpu(rd->offset); + tn->data_crc = je32_to_cpu(rd->data_crc); + tn->csize = csize; + tn->fn->raw = ref; + tn->overlapped = 0; - jffs2_fragtree_insert(this, newfrag); - rb_insert_color(&this->rb, list); - return 0; - } + if (tn->version > rii->highest_version) + rii->highest_version = tn->version; + + /* There was a bug where we wrote hole nodes out with + csize/dsize swapped. Deal with it */ + if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize) + tn->fn->size = csize; + else // normal case... + tn->fn->size = je32_to_cpu(rd->dsize); + + dbg_readinode2("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n", + ref_offset(ref), je32_to_cpu(rd->version), + je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize); + + ret = jffs2_add_tn_to_tree(c, rii, tn); + + if (ret) { + jffs2_free_full_dnode(tn->fn); + free_out: + jffs2_free_tmp_dnode_info(tn); + return ret; } - /* OK, now we have newfrag added in the correct place in the tree, but - frag_next(newfrag) may be a fragment which is overlapped by it - */ - while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { - /* 'this' frag is obsoleted completely. */ - D2(printk(KERN_DEBUG "Obsoleting node frag %p (%x-%x) and removing from tree\n", this, this->ofs, this->ofs+this->size)); - rb_erase(&this->rb, list); - jffs2_obsolete_node_frag(c, this); +#ifdef JFFS2_DBG_READINODE2_MESSAGES + dbg_readinode2("After adding ver %d:\n", je32_to_cpu(rd->version)); + tn = tn_first(&rii->tn_root); + while (tn) { + dbg_readinode2("%p: v %d r 0x%x-0x%x ov %d\n", + tn, tn->version, tn->fn->ofs, + tn->fn->ofs+tn->fn->size, tn->overlapped); + tn = tn_next(tn); } - /* Now we're pointing at the first frag which isn't totally obsoleted by - the new frag */ +#endif + return 0; +} - if (!this || newfrag->ofs + newfrag->size == this->ofs) { +/* + * Helper function for jffs2_get_inode_nodes(). + * It is called every time an unknown node is found. + * + * Returns: 0 on success; + * negative error code on failure. + */ +static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un) +{ + /* We don't mark unknown nodes as REF_UNCHECKED */ + if (ref_flags(ref) == REF_UNCHECKED) { + JFFS2_ERROR("REF_UNCHECKED but unknown node at %#08x\n", + ref_offset(ref)); + JFFS2_ERROR("Node is {%04x,%04x,%08x,%08x}. Please report this error.\n", + je16_to_cpu(un->magic), je16_to_cpu(un->nodetype), + je32_to_cpu(un->totlen), je32_to_cpu(un->hdr_crc)); + jffs2_mark_node_obsolete(c, ref); return 0; } - /* Still some overlap but we don't need to move it in the tree */ - this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); - this->ofs = newfrag->ofs + newfrag->size; - /* And mark them REF_NORMAL so the GC takes a look at them */ - if (this->node) - mark_ref_normal(this->node->raw); - mark_ref_normal(newfrag->node->raw); + un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); + + switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) { + + case JFFS2_FEATURE_INCOMPAT: + JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n", + je16_to_cpu(un->nodetype), ref_offset(ref)); + /* EEP */ + BUG(); + break; + + case JFFS2_FEATURE_ROCOMPAT: + JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n", + je16_to_cpu(un->nodetype), ref_offset(ref)); + BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); + break; + + case JFFS2_FEATURE_RWCOMPAT_COPY: + JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", + je16_to_cpu(un->nodetype), ref_offset(ref)); + break; + + case JFFS2_FEATURE_RWCOMPAT_DELETE: + JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", + je16_to_cpu(un->nodetype), ref_offset(ref)); + jffs2_mark_node_obsolete(c, ref); + return 0; + } return 0; } -void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size) +/* + * Helper function for jffs2_get_inode_nodes(). + * The function detects whether more data should be read and reads it if yes. + * + * Returns: 0 on success; + * negative error code on failure. + */ +static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, + int needed_len, int *rdlen, unsigned char *buf) { - struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size); + int err, to_read = needed_len - *rdlen; + size_t retlen; + uint32_t offs; - D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size)); + if (jffs2_is_writebuffered(c)) { + int rem = to_read % c->wbuf_pagesize; - /* We know frag->ofs <= size. That's what lookup does for us */ - if (frag && frag->ofs != size) { - if (frag->ofs+frag->size >= size) { - D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); - frag->size = size - frag->ofs; - } - frag = frag_next(frag); + if (rem) + to_read += c->wbuf_pagesize - rem; } - while (frag && frag->ofs >= size) { - struct jffs2_node_frag *next = frag_next(frag); - D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", frag->ofs, frag->ofs+frag->size)); - frag_erase(frag, list); - jffs2_obsolete_node_frag(c, frag); - frag = next; + /* We need to read more data */ + offs = ref_offset(ref) + *rdlen; + + dbg_readinode("read more %d bytes\n", to_read); + + err = jffs2_flash_read(c, offs, to_read, &retlen, buf + *rdlen); + if (err) { + JFFS2_ERROR("can not read %d bytes from 0x%08x, " + "error code: %d.\n", to_read, offs, err); + return err; } -} -/* Scan the list of all nodes present for this ino, build map of versions, etc. */ + if (retlen < to_read) { + JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", + offs, retlen, to_read); + return -EIO; + } -static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, - struct jffs2_inode_info *f, - struct jffs2_raw_inode *latest_node); + *rdlen += to_read; + return 0; +} -int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, - uint32_t ino, struct jffs2_raw_inode *latest_node) +/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated + with this ino. Perform a preliminary ordering on data nodes, throwing away + those which are completely obsoleted by newer ones. The naïve approach we + use to take of just returning them _all_ in version order will cause us to + run out of memory in certain degenerate cases. */ +static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f, + struct jffs2_readinode_info *rii) { - D2(printk(KERN_DEBUG "jffs2_do_read_inode(): getting inocache\n")); + struct jffs2_raw_node_ref *ref, *valid_ref; + unsigned char *buf = NULL; + union jffs2_node_union *node; + size_t retlen; + int len, err; - retry_inocache: - spin_lock(&c->inocache_lock); - f->inocache = jffs2_get_ino_cache(c, ino); + rii->mctime_ver = 0; - D2(printk(KERN_DEBUG "jffs2_do_read_inode(): Got inocache at %p\n", f->inocache)); + dbg_readinode("ino #%u\n", f->inocache->ino); + + /* FIXME: in case of NOR and available ->point() this + * needs to be fixed. */ + len = sizeof(union jffs2_node_union) + c->wbuf_pagesize; + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + spin_lock(&c->erase_completion_lock); + valid_ref = jffs2_first_valid_node(f->inocache->nodes); + if (!valid_ref && f->inocache->ino != 1) + JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino); + while (valid_ref) { + /* We can hold a pointer to a non-obsolete node without the spinlock, + but _obsolete_ nodes may disappear at any time, if the block + they're in gets erased. So if we mark 'ref' obsolete while we're + not holding the lock, it can go away immediately. For that reason, + we find the next valid node first, before processing 'ref'. + */ + ref = valid_ref; + valid_ref = jffs2_first_valid_node(ref->next_in_ino); + spin_unlock(&c->erase_completion_lock); + + cond_resched(); + + /* + * At this point we don't know the type of the node we're going + * to read, so we do not know the size of its header. In order + * to minimize the amount of flash IO we assume the header is + * of size = JFFS2_MIN_NODE_HEADER. + */ + len = JFFS2_MIN_NODE_HEADER; + if (jffs2_is_writebuffered(c)) { + int end, rem; + + /* + * We are about to read JFFS2_MIN_NODE_HEADER bytes, + * but this flash has some minimal I/O unit. It is + * possible that we'll need to read more soon, so read + * up to the next min. I/O unit, in order not to + * re-read the same min. I/O unit twice. + */ + end = ref_offset(ref) + len; + rem = end % c->wbuf_pagesize; + if (rem) + end += c->wbuf_pagesize - rem; + len = end - ref_offset(ref); + } + + dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref)); + + /* FIXME: point() */ + err = jffs2_flash_read(c, ref_offset(ref), len, &retlen, buf); + if (err) { + JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ref_offset(ref), err); + goto free_out; + } + + if (retlen < len) { + JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len); + err = -EIO; + goto free_out; + } + + node = (union jffs2_node_union *)buf; + + /* No need to mask in the valid bit; it shouldn't be invalid */ + if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) { + JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n", + ref_offset(ref), je16_to_cpu(node->u.magic), + je16_to_cpu(node->u.nodetype), + je32_to_cpu(node->u.totlen), + je32_to_cpu(node->u.hdr_crc)); + jffs2_dbg_dump_node(c, ref_offset(ref)); + jffs2_mark_node_obsolete(c, ref); + goto cont; + } + if (je16_to_cpu(node->u.magic) != JFFS2_MAGIC_BITMASK) { + /* Not a JFFS2 node, whinge and move on */ + JFFS2_NOTICE("Wrong magic bitmask 0x%04x in node header at %#08x.\n", + je16_to_cpu(node->u.magic), ref_offset(ref)); + jffs2_mark_node_obsolete(c, ref); + goto cont; + } + + switch (je16_to_cpu(node->u.nodetype)) { + + case JFFS2_NODETYPE_DIRENT: + + if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) && + len < sizeof(struct jffs2_raw_dirent)) { + err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf); + if (unlikely(err)) + goto free_out; + } + + err = read_direntry(c, ref, &node->d, retlen, rii); + if (unlikely(err)) + goto free_out; - if (f->inocache) { - /* Check its state. We may need to wait before we can use it */ - switch(f->inocache->state) { - case INO_STATE_UNCHECKED: - case INO_STATE_CHECKEDABSENT: - f->inocache->state = INO_STATE_READING; break; - - case INO_STATE_CHECKING: - case INO_STATE_GC: - /* If it's in either of these states, we need - to wait for whoever's got it to finish and - put it back. */ - D1(printk(KERN_DEBUG "jffs2_get_ino_cache_read waiting for ino #%u in state %d\n", - ino, f->inocache->state)); - sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); - goto retry_inocache; - case INO_STATE_READING: - case INO_STATE_PRESENT: - /* Eep. This should never happen. It can - happen if Linux calls read_inode() again - before clear_inode() has finished though. */ - printk(KERN_WARNING "Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); - /* Fail. That's probably better than allowing it to succeed */ - f->inocache = NULL; + case JFFS2_NODETYPE_INODE: + + if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) && + len < sizeof(struct jffs2_raw_inode)) { + err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf); + if (unlikely(err)) + goto free_out; + } + + err = read_dnode(c, ref, &node->i, len, rii); + if (unlikely(err)) + goto free_out; + break; default: - BUG(); - } - } - spin_unlock(&c->inocache_lock); + if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) && + len < sizeof(struct jffs2_unknown_node)) { + err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf); + if (unlikely(err)) + goto free_out; + } + + err = read_unknown(c, ref, &node->u); + if (unlikely(err)) + goto free_out; - if (!f->inocache && ino == 1) { - /* Special case - no root inode on medium */ - f->inocache = jffs2_alloc_inode_cache(); - if (!f->inocache) { - printk(KERN_CRIT "jffs2_do_read_inode(): Cannot allocate inocache for root inode\n"); - return -ENOMEM; } - D1(printk(KERN_DEBUG "jffs2_do_read_inode(): Creating inocache for root inode\n")); - memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); - f->inocache->ino = f->inocache->nlink = 1; - f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; - f->inocache->state = INO_STATE_READING; - jffs2_add_ino_cache(c, f->inocache); + cont: + spin_lock(&c->erase_completion_lock); } - if (!f->inocache) { - printk(KERN_WARNING "jffs2_do_read_inode() on nonexistent ino %u\n", ino); - return -ENOENT; - } - - return jffs2_do_read_inode_internal(c, f, latest_node); -} -int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) -{ - struct jffs2_raw_inode n; - struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL); - int ret; + spin_unlock(&c->erase_completion_lock); + kfree(buf); - if (!f) - return -ENOMEM; + f->highest_version = rii->highest_version; - memset(f, 0, sizeof(*f)); - init_MUTEX_LOCKED(&f->sem); - f->inocache = ic; + dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n", + f->inocache->ino, rii->highest_version, rii->latest_mctime, + rii->mctime_ver); + return 0; - ret = jffs2_do_read_inode_internal(c, f, &n); - if (!ret) { - up(&f->sem); - jffs2_do_clear_inode(c, f); - } - kfree (f); - return ret; + free_out: + jffs2_free_tmp_dnode_info_list(&rii->tn_root); + jffs2_free_full_dirent_list(rii->fds); + rii->fds = NULL; + kfree(buf); + return err; } -static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, +static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *latest_node) { - struct jffs2_tmp_dnode_info *tn = NULL; - struct rb_root tn_list; - struct rb_node *rb, *repl_rb; - struct jffs2_full_dirent *fd_list; - struct jffs2_full_dnode *fn = NULL; - uint32_t crc; - uint32_t latest_mctime, mctime_ver; - uint32_t mdata_ver = 0; + struct jffs2_readinode_info rii; + uint32_t crc, new_size; size_t retlen; int ret; - D1(printk(KERN_DEBUG "jffs2_do_read_inode_internal(): ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink)); + dbg_readinode("ino #%u pino/nlink is %d\n", f->inocache->ino, + f->inocache->pino_nlink); + + memset(&rii, 0, sizeof(rii)); /* Grab all nodes relevant to this ino */ - ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver); + ret = jffs2_get_inode_nodes(c, f, &rii); if (ret) { - printk(KERN_CRIT "jffs2_get_inode_nodes() for ino %u returned %d\n", f->inocache->ino, ret); + JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret); if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); return ret; } - f->dents = fd_list; - rb = rb_first(&tn_list); - - while (rb) { - tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb); - fn = tn->fn; - - if (f->metadata) { - if (likely(tn->version >= mdata_ver)) { - D1(printk(KERN_DEBUG "Obsoleting old metadata at 0x%08x\n", ref_offset(f->metadata->raw))); - jffs2_mark_node_obsolete(c, f->metadata->raw); - jffs2_free_full_dnode(f->metadata); - f->metadata = NULL; - - mdata_ver = 0; - } else { - /* This should never happen. */ - printk(KERN_WARNING "Er. New metadata at 0x%08x with ver %d is actually older than previous ver %d at 0x%08x\n", - ref_offset(fn->raw), tn->version, mdata_ver, ref_offset(f->metadata->raw)); - jffs2_mark_node_obsolete(c, fn->raw); - jffs2_free_full_dnode(fn); - /* Fill in latest_node from the metadata, not this one we're about to free... */ - fn = f->metadata; - goto next_tn; - } + ret = jffs2_build_inode_fragtree(c, f, &rii); + if (ret) { + JFFS2_ERROR("Failed to build final fragtree for inode #%u: error %d\n", + f->inocache->ino, ret); + if (f->inocache->state == INO_STATE_READING) + jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); + jffs2_free_tmp_dnode_info_list(&rii.tn_root); + /* FIXME: We could at least crc-check them all */ + if (rii.mdata_tn) { + jffs2_free_full_dnode(rii.mdata_tn->fn); + jffs2_free_tmp_dnode_info(rii.mdata_tn); + rii.mdata_tn = NULL; } + return ret; + } - if (fn->size) { - jffs2_add_full_dnode_to_inode(c, f, fn); + if (rii.mdata_tn) { + if (rii.mdata_tn->fn->raw == rii.latest_ref) { + f->metadata = rii.mdata_tn->fn; + jffs2_free_tmp_dnode_info(rii.mdata_tn); } else { - /* Zero-sized node at end of version list. Just a metadata update */ - D1(printk(KERN_DEBUG "metadata @%08x: ver %d\n", ref_offset(fn->raw), tn->version)); - f->metadata = fn; - mdata_ver = tn->version; - } - next_tn: - BUG_ON(rb->rb_left); - if (rb->rb_parent && rb->rb_parent->rb_left == rb) { - /* We were then left-hand child of our parent. We need - to move our own right-hand child into our place. */ - repl_rb = rb->rb_right; - if (repl_rb) - repl_rb->rb_parent = rb->rb_parent; - } else - repl_rb = NULL; - - rb = rb_next(rb); - - /* Remove the spent tn from the tree; don't bother rebalancing - but put our right-hand child in our own place. */ - if (tn->rb.rb_parent) { - if (tn->rb.rb_parent->rb_left == &tn->rb) - tn->rb.rb_parent->rb_left = repl_rb; - else if (tn->rb.rb_parent->rb_right == &tn->rb) - tn->rb.rb_parent->rb_right = repl_rb; - else BUG(); - } else if (tn->rb.rb_right) - tn->rb.rb_right->rb_parent = NULL; - - jffs2_free_tmp_dnode_info(tn); + jffs2_kill_tn(c, rii.mdata_tn); + } + rii.mdata_tn = NULL; } - D1(jffs2_sanitycheck_fragtree(f)); - if (!fn) { + f->dents = rii.fds; + + jffs2_dbg_fragtree_paranoia_check_nolock(f); + + if (unlikely(!rii.latest_ref)) { /* No data nodes for this inode. */ if (f->inocache->ino != 1) { - printk(KERN_WARNING "jffs2_do_read_inode(): No data nodes found for ino #%u\n", f->inocache->ino); - if (!fd_list) { + JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino); + if (!rii.fds) { if (f->inocache->state == INO_STATE_READING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT); return -EIO; } - printk(KERN_WARNING "jffs2_do_read_inode(): But it has children so we fake some modes for it\n"); + JFFS2_NOTICE("but it has children so we fake some modes for it\n"); } latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO); latest_node->version = cpu_to_je32(0); @@ -606,37 +1198,43 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, return 0; } - ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node); + ret = jffs2_flash_read(c, ref_offset(rii.latest_ref), sizeof(*latest_node), &retlen, (void *)latest_node); if (ret || retlen != sizeof(*latest_node)) { - printk(KERN_NOTICE "MTD read in jffs2_do_read_inode() failed: Returned %d, %zd of %zd bytes read\n", - ret, retlen, sizeof(*latest_node)); + JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n", + ret, retlen, sizeof(*latest_node)); /* FIXME: If this fails, there seems to be a memory leak. Find it. */ - up(&f->sem); + mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return ret?ret:-EIO; } crc = crc32(0, latest_node, sizeof(*latest_node)-8); if (crc != je32_to_cpu(latest_node->node_crc)) { - printk(KERN_NOTICE "CRC failed for read_inode of inode %u at physical location 0x%x\n", f->inocache->ino, ref_offset(fn->raw)); - up(&f->sem); + JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n", + f->inocache->ino, ref_offset(rii.latest_ref)); + mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -EIO; } switch(jemode_to_cpu(latest_node->mode) & S_IFMT) { case S_IFDIR: - if (mctime_ver > je32_to_cpu(latest_node->version)) { + if (rii.mctime_ver > je32_to_cpu(latest_node->version)) { /* The times in the latest_node are actually older than mctime in the latest dirent. Cheat. */ - latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime); + latest_node->ctime = latest_node->mtime = cpu_to_je32(rii.latest_mctime); } break; - + case S_IFREG: /* If it was a regular file, truncate it to the latest node's isize */ - jffs2_truncate_fraglist(c, &f->fragtree, je32_to_cpu(latest_node->isize)); + new_size = jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize)); + if (new_size != je32_to_cpu(latest_node->isize)) { + JFFS2_WARNING("Truncating ino #%u to %d bytes failed because it only had %d bytes to start with!\n", + f->inocache->ino, je32_to_cpu(latest_node->isize), new_size); + latest_node->isize = cpu_to_je32(new_size); + } break; case S_IFLNK: @@ -649,37 +1247,39 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, if (f->inocache->state != INO_STATE_CHECKING) { /* Symlink's inode data is the target path. Read it and - * keep in RAM to facilitate quick follow symlink operation. - * We use f->dents field to store the target path, which - * is somewhat ugly. */ - f->dents = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL); - if (!f->dents) { - printk(KERN_WARNING "Can't allocate %d bytes of memory " - "for the symlink target path cache\n", - je32_to_cpu(latest_node->csize)); - up(&f->sem); + * keep in RAM to facilitate quick follow symlink + * operation. */ + uint32_t csize = je32_to_cpu(latest_node->csize); + if (csize > JFFS2_MAX_NAME_LEN) { + mutex_unlock(&f->sem); + jffs2_do_clear_inode(c, f); + return -ENAMETOOLONG; + } + f->target = kmalloc(csize + 1, GFP_KERNEL); + if (!f->target) { + JFFS2_ERROR("can't allocate %u bytes of memory for the symlink target path cache\n", csize); + mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -ENOMEM; } - - ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node), - je32_to_cpu(latest_node->csize), &retlen, (char *)f->dents); - - if (ret || retlen != je32_to_cpu(latest_node->csize)) { - if (retlen != je32_to_cpu(latest_node->csize)) + + ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node), + csize, &retlen, (char *)f->target); + + if (ret || retlen != csize) { + if (retlen != csize) ret = -EIO; - kfree(f->dents); - f->dents = NULL; - up(&f->sem); + kfree(f->target); + f->target = NULL; + mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); - return -ret; + return ret; } - ((char *)f->dents)[je32_to_cpu(latest_node->csize)] = '\0'; - D1(printk(KERN_DEBUG "jffs2_do_read_inode(): symlink's target '%s' cached\n", - (char *)f->dents)); + f->target[csize] = '\0'; + dbg_readinode("symlink's target '%s' cached\n", f->target); } - + /* fall through... */ case S_IFBLK: @@ -687,25 +1287,25 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, /* Certain inode types should have only one data node, and it's kept as the metadata node */ if (f->metadata) { - printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o had metadata node\n", + JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n", f->inocache->ino, jemode_to_cpu(latest_node->mode)); - up(&f->sem); + mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -EIO; } if (!frag_first(&f->fragtree)) { - printk(KERN_WARNING "Argh. Special inode #%u with mode 0%o has no fragments\n", + JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n", f->inocache->ino, jemode_to_cpu(latest_node->mode)); - up(&f->sem); + mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -EIO; } /* ASSERT: f->fraglist != NULL */ if (frag_next(frag_first(&f->fragtree))) { - printk(KERN_WARNING "Argh. Special inode #%u with mode 0x%x had more than one node\n", + JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n", f->inocache->ino, jemode_to_cpu(latest_node->mode)); /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */ - up(&f->sem); + mutex_unlock(&f->sem); jffs2_do_clear_inode(c, f); return -EIO; } @@ -721,13 +1321,102 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, return 0; } +/* Scan the list of all nodes present for this ino, build map of versions, etc. */ +int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, + uint32_t ino, struct jffs2_raw_inode *latest_node) +{ + dbg_readinode("read inode #%u\n", ino); + + retry_inocache: + spin_lock(&c->inocache_lock); + f->inocache = jffs2_get_ino_cache(c, ino); + + if (f->inocache) { + /* Check its state. We may need to wait before we can use it */ + switch(f->inocache->state) { + case INO_STATE_UNCHECKED: + case INO_STATE_CHECKEDABSENT: + f->inocache->state = INO_STATE_READING; + break; + + case INO_STATE_CHECKING: + case INO_STATE_GC: + /* If it's in either of these states, we need + to wait for whoever's got it to finish and + put it back. */ + dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state); + sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); + goto retry_inocache; + + case INO_STATE_READING: + case INO_STATE_PRESENT: + /* Eep. This should never happen. It can + happen if Linux calls read_inode() again + before clear_inode() has finished though. */ + JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state); + /* Fail. That's probably better than allowing it to succeed */ + f->inocache = NULL; + break; + + default: + BUG(); + } + } + spin_unlock(&c->inocache_lock); + + if (!f->inocache && ino == 1) { + /* Special case - no root inode on medium */ + f->inocache = jffs2_alloc_inode_cache(); + if (!f->inocache) { + JFFS2_ERROR("cannot allocate inocache for root inode\n"); + return -ENOMEM; + } + dbg_readinode("creating inocache for root inode\n"); + memset(f->inocache, 0, sizeof(struct jffs2_inode_cache)); + f->inocache->ino = f->inocache->pino_nlink = 1; + f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; + f->inocache->state = INO_STATE_READING; + jffs2_add_ino_cache(c, f->inocache); + } + if (!f->inocache) { + JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino); + return -ENOENT; + } + + return jffs2_do_read_inode_internal(c, f, latest_node); +} + +int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) +{ + struct jffs2_raw_inode n; + struct jffs2_inode_info *f = kzalloc(sizeof(*f), GFP_KERNEL); + int ret; + + if (!f) + return -ENOMEM; + + mutex_init(&f->sem); + mutex_lock(&f->sem); + f->inocache = ic; + + ret = jffs2_do_read_inode_internal(c, f, &n); + if (!ret) { + mutex_unlock(&f->sem); + jffs2_do_clear_inode(c, f); + } + jffs2_xattr_do_crccheck_inode(c, ic); + kfree (f); + return ret; +} + void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) { struct jffs2_full_dirent *fd, *fds; int deleted; - down(&f->sem); - deleted = f->inocache && !f->inocache->nlink; + jffs2_xattr_delete_inode(c, f->inocache); + mutex_lock(&f->sem); + deleted = f->inocache && !f->inocache->pino_nlink; if (f->inocache && f->inocache->state != INO_STATE_CHECKING) jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING); @@ -740,20 +1429,16 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); - /* For symlink inodes we us f->dents to store the target path name */ - if (S_ISLNK(OFNI_EDONI_2SFFJ(f)->i_mode)) { - if (f->dents) { - kfree(f->dents); - f->dents = NULL; - } - } else { - fds = f->dents; + if (f->target) { + kfree(f->target); + f->target = NULL; + } - while(fds) { - fd = fds; - fds = fd->next; - jffs2_free_full_dirent(fd); - } + fds = f->dents; + while(fds) { + fd = fds; + fds = fd->next; + jffs2_free_full_dirent(fd); } if (f->inocache && f->inocache->state != INO_STATE_CHECKING) { @@ -762,5 +1447,5 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f) jffs2_del_ino_cache(c, f->inocache); } - up(&f->sem); + mutex_unlock(&f->sem); } diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index b63160f83ba..7654e87b042 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -1,15 +1,16 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: scan.c,v 1.119 2005/02/17 17:51:13 dedekind Exp $ - * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> @@ -18,52 +19,34 @@ #include <linux/crc32.h> #include <linux/compiler.h> #include "nodelist.h" +#include "summary.h" +#include "debug.h" + +#define DEFAULT_EMPTY_SCAN_SIZE 256 -#define DEFAULT_EMPTY_SCAN_SIZE 1024 - -#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \ - c->free_size -= _x; c->dirty_size += _x; \ - jeb->free_size -= _x ; jeb->dirty_size += _x; \ - }while(0) -#define USED_SPACE(x) do { typeof(x) _x = (x); \ - c->free_size -= _x; c->used_size += _x; \ - jeb->free_size -= _x ; jeb->used_size += _x; \ - }while(0) -#define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \ - c->free_size -= _x; c->unchecked_size += _x; \ - jeb->free_size -= _x ; jeb->unchecked_size += _x; \ - }while(0) - -#define noisy_printk(noise, args...) do { \ - if (*(noise)) { \ - printk(KERN_NOTICE args); \ - (*(noise))--; \ - if (!(*(noise))) { \ - printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \ - } \ - } \ -} while(0) +#define noisy_printk(noise, fmt, ...) \ +do { \ + if (*(noise)) { \ + pr_notice(fmt, ##__VA_ARGS__); \ + (*(noise))--; \ + if (!(*(noise))) \ + pr_notice("Further such events for this erase block will not be printed\n"); \ + } \ +} while (0) static uint32_t pseudo_random; static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, - unsigned char *buf, uint32_t buf_size); + unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s); -/* These helper functions _must_ increase ofs and also do the dirty/used space accounting. +/* These helper functions _must_ increase ofs and also do the dirty/used space accounting. * Returning an error will abort the mount - bad checksums etc. should just mark the space * as dirty. */ -static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, - struct jffs2_raw_inode *ri, uint32_t ofs); +static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s); static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, - struct jffs2_raw_dirent *rd, uint32_t ofs); - -#define BLK_STATE_ALLFF 0 -#define BLK_STATE_CLEAN 1 -#define BLK_STATE_PARTDIRTY 2 -#define BLK_STATE_CLEANMARKER 3 -#define BLK_STATE_ALLDIRTY 4 -#define BLK_STATE_BADBLOCK 5 + struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s); static inline int min_free(struct jffs2_sb_info *c) { @@ -83,60 +66,101 @@ static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) { return DEFAULT_EMPTY_SCAN_SIZE; } +static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) +{ + int ret; + + if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1))) + return ret; + if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size))) + return ret; + /* Turned wasted size into dirty, since we apparently + think it's recoverable now. */ + jeb->dirty_size += jeb->wasted_size; + c->dirty_size += jeb->wasted_size; + c->wasted_size -= jeb->wasted_size; + jeb->wasted_size = 0; + if (VERYDIRTY(c, jeb->dirty_size)) { + list_add(&jeb->list, &c->very_dirty_list); + } else { + list_add(&jeb->list, &c->dirty_list); + } + return 0; +} + int jffs2_scan_medium(struct jffs2_sb_info *c) { int i, ret; uint32_t empty_blocks = 0, bad_blocks = 0; unsigned char *flashbuf = NULL; uint32_t buf_size = 0; + struct jffs2_summary *s = NULL; /* summary info collected by the scan process */ #ifndef __ECOS - size_t pointlen; - - if (c->mtd->point) { - ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf); - if (!ret && pointlen < c->mtd->size) { - /* Don't muck about if it won't let us point to the whole flash */ - D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen)); - c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); - flashbuf = NULL; - } - if (ret) - D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); + size_t pointlen, try_size; + + ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, + (void **)&flashbuf, NULL); + if (!ret && pointlen < c->mtd->size) { + /* Don't muck about if it won't let us point to the whole flash */ + jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", + pointlen); + mtd_unpoint(c->mtd, 0, pointlen); + flashbuf = NULL; } + if (ret && ret != -EOPNOTSUPP) + jffs2_dbg(1, "MTD point failed %d\n", ret); #endif if (!flashbuf) { /* For NAND it's quicker to read a whole eraseblock at a time, apparently */ if (jffs2_cleanmarker_oob(c)) - buf_size = c->sector_size; + try_size = c->sector_size; else - buf_size = PAGE_SIZE; + try_size = PAGE_SIZE; - /* Respect kmalloc limitations */ - if (buf_size > 128*1024) - buf_size = 128*1024; + jffs2_dbg(1, "Trying to allocate readbuf of %zu " + "bytes\n", try_size); - D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size)); - flashbuf = kmalloc(buf_size, GFP_KERNEL); + flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); if (!flashbuf) return -ENOMEM; + + jffs2_dbg(1, "Allocated readbuf of %zu bytes\n", + try_size); + + buf_size = (uint32_t)try_size; + } + + if (jffs2_sum_active()) { + s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); + if (!s) { + JFFS2_WARNING("Can't allocate memory for summary\n"); + ret = -ENOMEM; + goto out; + } } for (i=0; i<c->nr_blocks; i++) { struct jffs2_eraseblock *jeb = &c->blocks[i]; - ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size); + cond_resched(); + + /* reset summary info for next eraseblock scan */ + jffs2_sum_reset_collected(s); + + ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), + buf_size, s); if (ret < 0) goto out; - ACCT_PARANOIA_CHECK(jeb); + jffs2_dbg_acct_paranoia_check_nolock(c, jeb); /* Now decide which list to put it on */ switch(ret) { case BLK_STATE_ALLFF: - /* - * Empty block. Since we can't be sure it + /* + * Empty block. Since we can't be sure it * was entirely erased, we just queue it for erase * again. It will be marked as such when the erase * is complete. Meanwhile we still count it as empty @@ -155,72 +179,66 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) c->nr_free_blocks++; } else { /* Dirt */ - D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset)); + jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n", + jeb->offset); list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; } break; case BLK_STATE_CLEAN: - /* Full (or almost full) of clean data. Clean list */ - list_add(&jeb->list, &c->clean_list); + /* Full (or almost full) of clean data. Clean list */ + list_add(&jeb->list, &c->clean_list); break; case BLK_STATE_PARTDIRTY: - /* Some data, but not full. Dirty list. */ - /* We want to remember the block with most free space - and stick it in the 'nextblock' position to start writing to it. */ - if (jeb->free_size > min_free(c) && - (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { - /* Better candidate for the next writes to go to */ - if (c->nextblock) { - c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; - c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; - c->free_size -= c->nextblock->free_size; - c->wasted_size -= c->nextblock->wasted_size; - c->nextblock->free_size = c->nextblock->wasted_size = 0; - if (VERYDIRTY(c, c->nextblock->dirty_size)) { - list_add(&c->nextblock->list, &c->very_dirty_list); - } else { - list_add(&c->nextblock->list, &c->dirty_list); - } + /* Some data, but not full. Dirty list. */ + /* We want to remember the block with most free space + and stick it in the 'nextblock' position to start writing to it. */ + if (jeb->free_size > min_free(c) && + (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { + /* Better candidate for the next writes to go to */ + if (c->nextblock) { + ret = file_dirty(c, c->nextblock); + if (ret) + goto out; + /* deleting summary information of the old nextblock */ + jffs2_sum_reset_collected(c->summary); } - c->nextblock = jeb; - } else { - jeb->dirty_size += jeb->free_size + jeb->wasted_size; - c->dirty_size += jeb->free_size + jeb->wasted_size; - c->free_size -= jeb->free_size; - c->wasted_size -= jeb->wasted_size; - jeb->free_size = jeb->wasted_size = 0; - if (VERYDIRTY(c, jeb->dirty_size)) { - list_add(&jeb->list, &c->very_dirty_list); - } else { - list_add(&jeb->list, &c->dirty_list); - } - } + /* update collected summary information for the current nextblock */ + jffs2_sum_move_collected(c, s); + jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", + __func__, jeb->offset); + c->nextblock = jeb; + } else { + ret = file_dirty(c, jeb); + if (ret) + goto out; + } break; case BLK_STATE_ALLDIRTY: /* Nothing valid - not even a clean marker. Needs erasing. */ - /* For now we just put it on the erasing list. We'll start the erases later */ - D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); - list_add(&jeb->list, &c->erase_pending_list); + /* For now we just put it on the erasing list. We'll start the erases later */ + jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n", + jeb->offset); + list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; break; - + case BLK_STATE_BADBLOCK: - D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); - list_add(&jeb->list, &c->bad_list); + jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset); + list_add(&jeb->list, &c->bad_list); c->bad_size += c->sector_size; c->free_size -= c->sector_size; bad_blocks++; break; default: - printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n"); - BUG(); + pr_warn("%s(): unknown block state\n", __func__); + BUG(); } } - + /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ if (c->nextblock && (c->nextblock->dirty_size)) { c->nextblock->wasted_size += c->nextblock->dirty_size; @@ -229,71 +247,208 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) c->nextblock->dirty_size = 0; } #ifdef CONFIG_JFFS2_FS_WRITEBUFFER - if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) { - /* If we're going to start writing into a block which already + if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) { + /* If we're going to start writing into a block which already contains data, and the end of the data isn't page-aligned, skip a little and align it. */ - uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1); - - D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", - skip)); - c->nextblock->wasted_size += skip; - c->wasted_size += skip; + uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; - c->nextblock->free_size -= skip; - c->free_size -= skip; + jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n", + __func__, skip); + jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); + jffs2_scan_dirty_space(c, c->nextblock, skip); } #endif if (c->nr_erasing_blocks) { - if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { - printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); - printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks); + if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { + pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); + pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n", + empty_blocks, bad_blocks, c->nr_blocks); ret = -EIO; goto out; } - jffs2_erase_pending_trigger(c); + spin_lock(&c->erase_completion_lock); + jffs2_garbage_collect_trigger(c); + spin_unlock(&c->erase_completion_lock); } ret = 0; out: if (buf_size) kfree(flashbuf); #ifndef __ECOS - else - c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); + else + mtd_unpoint(c->mtd, 0, c->mtd->size); #endif + kfree(s); return ret; } -static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf, - uint32_t ofs, uint32_t len) +static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, + uint32_t ofs, uint32_t len) { int ret; size_t retlen; ret = jffs2_flash_read(c, ofs, len, &retlen, buf); if (ret) { - D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret)); + jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n", + len, ofs, ret); return ret; } if (retlen < len) { - D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen)); + jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n", + ofs, retlen); return -EIO; } - D2(printk(KERN_DEBUG "Read 0x%x bytes from 0x%08x into buf\n", len, ofs)); - D2(printk(KERN_DEBUG "000: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", - buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15])); return 0; } +int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) +{ + if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size + && (!jeb->first_node || !ref_next(jeb->first_node)) ) + return BLK_STATE_CLEANMARKER; + + /* move blocks with max 4 byte dirty space to cleanlist */ + else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { + c->dirty_size -= jeb->dirty_size; + c->wasted_size += jeb->dirty_size; + jeb->wasted_size += jeb->dirty_size; + jeb->dirty_size = 0; + return BLK_STATE_CLEAN; + } else if (jeb->used_size || jeb->unchecked_size) + return BLK_STATE_PARTDIRTY; + else + return BLK_STATE_ALLDIRTY; +} + +#ifdef CONFIG_JFFS2_FS_XATTR +static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + struct jffs2_raw_xattr *rx, uint32_t ofs, + struct jffs2_summary *s) +{ + struct jffs2_xattr_datum *xd; + uint32_t xid, version, totlen, crc; + int err; + + crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4); + if (crc != je32_to_cpu(rx->node_crc)) { + JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", + ofs, je32_to_cpu(rx->node_crc), crc); + if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) + return err; + return 0; + } + + xid = je32_to_cpu(rx->xid); + version = je32_to_cpu(rx->version); + + totlen = PAD(sizeof(struct jffs2_raw_xattr) + + rx->name_len + 1 + je16_to_cpu(rx->value_len)); + if (totlen != je32_to_cpu(rx->totlen)) { + JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n", + ofs, je32_to_cpu(rx->totlen), totlen); + if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen)))) + return err; + return 0; + } + + xd = jffs2_setup_xattr_datum(c, xid, version); + if (IS_ERR(xd)) + return PTR_ERR(xd); + + if (xd->version > version) { + struct jffs2_raw_node_ref *raw + = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL); + raw->next_in_ino = xd->node->next_in_ino; + xd->node->next_in_ino = raw; + } else { + xd->version = version; + xd->xprefix = rx->xprefix; + xd->name_len = rx->name_len; + xd->value_len = je16_to_cpu(rx->value_len); + xd->data_crc = je32_to_cpu(rx->data_crc); + + jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd); + } + + if (jffs2_sum_active()) + jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); + dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n", + ofs, xd->xid, xd->version); + return 0; +} + +static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + struct jffs2_raw_xref *rr, uint32_t ofs, + struct jffs2_summary *s) +{ + struct jffs2_xattr_ref *ref; + uint32_t crc; + int err; + + crc = crc32(0, rr, sizeof(*rr) - 4); + if (crc != je32_to_cpu(rr->node_crc)) { + JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", + ofs, je32_to_cpu(rr->node_crc), crc); + if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen))))) + return err; + return 0; + } + + if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) { + JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n", + ofs, je32_to_cpu(rr->totlen), + PAD(sizeof(struct jffs2_raw_xref))); + if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen)))) + return err; + return 0; + } + + ref = jffs2_alloc_xattr_ref(); + if (!ref) + return -ENOMEM; + + /* BEFORE jffs2_build_xattr_subsystem() called, + * and AFTER xattr_ref is marked as a dead xref, + * ref->xid is used to store 32bit xid, xd is not used + * ref->ino is used to store 32bit inode-number, ic is not used + * Thoes variables are declared as union, thus using those + * are exclusive. In a similar way, ref->next is temporarily + * used to chain all xattr_ref object. It's re-chained to + * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly. + */ + ref->ino = je32_to_cpu(rr->ino); + ref->xid = je32_to_cpu(rr->xid); + ref->xseqno = je32_to_cpu(rr->xseqno); + if (ref->xseqno > c->highest_xseqno) + c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); + ref->next = c->xref_temp; + c->xref_temp = ref; + + jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref); + + if (jffs2_sum_active()) + jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset); + dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n", + ofs, ref->xid, ref->ino); + return 0; +} +#endif + +/* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into + the flash, XIP-style */ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, - unsigned char *buf, uint32_t buf_size) { + unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) { struct jffs2_unknown_node *node; struct jffs2_unknown_node crcnode; - uint32_t ofs, prevofs; + uint32_t ofs, prevofs, max_ofs; uint32_t hdr_crc, buf_ofs, buf_len; int err; int noise = 0; + + #ifdef CONFIG_JFFS2_FS_WRITEBUFFER int cleanmarkerfound = 0; #endif @@ -301,27 +456,98 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo ofs = jeb->offset; prevofs = jeb->offset - 1; - D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs)); + jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs); #ifdef CONFIG_JFFS2_FS_WRITEBUFFER if (jffs2_cleanmarker_oob(c)) { - int ret = jffs2_check_nand_cleanmarker(c, jeb); - D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret)); + int ret; + + if (mtd_block_isbad(c->mtd, jeb->offset)) + return BLK_STATE_BADBLOCK; + + ret = jffs2_check_nand_cleanmarker(c, jeb); + jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret); + /* Even if it's not found, we still scan to see if the block is empty. We use this information to decide whether to erase it or not. */ switch (ret) { case 0: cleanmarkerfound = 1; break; case 1: break; - case 2: return BLK_STATE_BADBLOCK; - case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */ default: return ret; } } #endif + + if (jffs2_sum_active()) { + struct jffs2_sum_marker *sm; + void *sumptr = NULL; + uint32_t sumlen; + + if (!buf_size) { + /* XIP case. Just look, point at the summary if it's there */ + sm = (void *)buf + c->sector_size - sizeof(*sm); + if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { + sumptr = buf + je32_to_cpu(sm->offset); + sumlen = c->sector_size - je32_to_cpu(sm->offset); + } + } else { + /* If NAND flash, read a whole page of it. Else just the end */ + if (c->wbuf_pagesize) + buf_len = c->wbuf_pagesize; + else + buf_len = sizeof(*sm); + + /* Read as much as we want into the _end_ of the preallocated buffer */ + err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len, + jeb->offset + c->sector_size - buf_len, + buf_len); + if (err) + return err; + + sm = (void *)buf + buf_size - sizeof(*sm); + if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) { + sumlen = c->sector_size - je32_to_cpu(sm->offset); + sumptr = buf + buf_size - sumlen; + + /* Now, make sure the summary itself is available */ + if (sumlen > buf_size) { + /* Need to kmalloc for this. */ + sumptr = kmalloc(sumlen, GFP_KERNEL); + if (!sumptr) + return -ENOMEM; + memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len); + } + if (buf_len < sumlen) { + /* Need to read more so that the entire summary node is present */ + err = jffs2_fill_scan_buf(c, sumptr, + jeb->offset + c->sector_size - sumlen, + sumlen - buf_len); + if (err) + return err; + } + } + + } + + if (sumptr) { + err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random); + + if (buf_size && sumlen > buf_size) + kfree(sumptr); + /* If it returns with a real error, bail. + If it returns positive, that's a block classification + (i.e. BLK_STATE_xxx) so return that too. + If it returns zero, fall through to full scan. */ + if (err) + return err; + } + } + buf_ofs = jeb->offset; if (!buf_size) { + /* This is the XIP case -- we're reading _directly_ from the flash chip */ buf_len = c->sector_size; } else { buf_len = EMPTY_SCAN_SIZE(c->sector_size); @@ -329,20 +555,21 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo if (err) return err; } - + /* We temporarily use 'ofs' as a pointer into the buffer/jeb */ ofs = 0; - - /* Scan only 4KiB of 0xFF before declaring it's empty */ - while(ofs < EMPTY_SCAN_SIZE(c->sector_size) && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF) + max_ofs = EMPTY_SCAN_SIZE(c->sector_size); + /* Scan only EMPTY_SCAN_SIZE of 0xFF before declaring it's empty */ + while(ofs < max_ofs && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF) ofs += 4; - if (ofs == EMPTY_SCAN_SIZE(c->sector_size)) { + if (ofs == max_ofs) { #ifdef CONFIG_JFFS2_FS_WRITEBUFFER if (jffs2_cleanmarker_oob(c)) { /* scan oob, take care of cleanmarker */ int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); - D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret)); + jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n", + ret); switch (ret) { case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; case 1: return BLK_STATE_ALLDIRTY; @@ -350,16 +577,20 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo } } #endif - D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset)); + jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n", + jeb->offset); if (c->cleanmarker_size == 0) return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */ else return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */ } if (ofs) { - D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset, - jeb->offset + ofs)); - DIRTY_SPACE(ofs); + jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset, + jeb->offset + ofs); + if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) + return err; + if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) + return err; } /* Now ofs is a complete physical flash offset as it always was... */ @@ -367,37 +598,50 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo noise = 10; -scan_more: + dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset); + +scan_more: while(ofs < jeb->offset + c->sector_size) { - D1(ACCT_PARANOIA_CHECK(jeb)); + jffs2_dbg_acct_paranoia_check_nolock(c, jeb); + + /* Make sure there are node refs available for use */ + err = jffs2_prealloc_raw_node_refs(c, jeb, 2); + if (err) + return err; cond_resched(); if (ofs & 3) { - printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs); + pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs); ofs = PAD(ofs); continue; } if (ofs == prevofs) { - printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs); - DIRTY_SPACE(4); + pr_warn("ofs 0x%08x has already been seen. Skipping\n", + ofs); + if ((err = jffs2_scan_dirty_space(c, jeb, 4))) + return err; ofs += 4; continue; } prevofs = ofs; if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { - D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), - jeb->offset, c->sector_size, ofs, sizeof(*node))); - DIRTY_SPACE((jeb->offset + c->sector_size)-ofs); + jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", + sizeof(struct jffs2_unknown_node), + jeb->offset, c->sector_size, ofs, + sizeof(*node)); + if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) + return err; break; } if (buf_ofs + buf_len < ofs + sizeof(*node)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); - D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", - sizeof(struct jffs2_unknown_node), buf_len, ofs)); + jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", + sizeof(struct jffs2_unknown_node), + buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; @@ -408,19 +652,21 @@ scan_more: if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) { uint32_t inbuf_ofs; - uint32_t empty_start; + uint32_t empty_start, scan_end; empty_start = ofs; ofs += 4; + scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); - D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs)); + jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs); more_empty: inbuf_ofs = ofs - buf_ofs; - while (inbuf_ofs < buf_len) { - if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) { - printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", - empty_start, ofs); - DIRTY_SPACE(ofs-empty_start); + while (inbuf_ofs < scan_end) { + if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) { + pr_warn("Empty flash at 0x%08x ends at 0x%08x\n", + empty_start, ofs); + if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) + return err; goto scan_more; } @@ -428,26 +674,35 @@ scan_more: ofs += 4; } /* Ran off end. */ - D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs)); + jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n", + ofs); /* If we're only checking the beginning of a block with a cleanmarker, bail now */ - if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && - c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) { - D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); + if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && + c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { + jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n", + EMPTY_SCAN_SIZE(c->sector_size)); return BLK_STATE_CLEANMARKER; } - + if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */ + scan_end = buf_len; + goto more_empty; + } + /* See how much more there is to read in this eraseblock... */ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); if (!buf_len) { - /* No more to read. Break out of main loop without marking + /* No more to read. Break out of main loop without marking this range of empty space as dirty (because it's not) */ - D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", - empty_start)); + jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n", + empty_start); break; } - D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs)); + /* point never reaches here */ + scan_end = buf_len; + jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n", + buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; @@ -456,30 +711,36 @@ scan_more: } if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { - printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); - DIRTY_SPACE(4); + pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", + ofs); + if ((err = jffs2_scan_dirty_space(c, jeb, 4))) + return err; ofs += 4; continue; } if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { - D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs)); - DIRTY_SPACE(4); + jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs); + if ((err = jffs2_scan_dirty_space(c, jeb, 4))) + return err; ofs += 4; continue; } if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { - printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs); - printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n"); - DIRTY_SPACE(4); + pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs); + pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n"); + if ((err = jffs2_scan_dirty_space(c, jeb, 4))) + return err; ofs += 4; continue; } if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { /* OK. We're out of possibilities. Whinge and move on */ - noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", - JFFS2_MAGIC_BITMASK, ofs, + noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", + __func__, + JFFS2_MAGIC_BITMASK, ofs, je16_to_cpu(node->magic)); - DIRTY_SPACE(4); + if ((err = jffs2_scan_dirty_space(c, jeb, 4))) + return err; ofs += 4; continue; } @@ -490,32 +751,36 @@ scan_more: hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); if (hdr_crc != je32_to_cpu(node->hdr_crc)) { - noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", + noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", + __func__, ofs, je16_to_cpu(node->magic), - je16_to_cpu(node->nodetype), + je16_to_cpu(node->nodetype), je32_to_cpu(node->totlen), je32_to_cpu(node->hdr_crc), hdr_crc); - DIRTY_SPACE(4); + if ((err = jffs2_scan_dirty_space(c, jeb, 4))) + return err; ofs += 4; continue; } - if (ofs + je32_to_cpu(node->totlen) > - jeb->offset + c->sector_size) { + if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { /* Eep. Node goes over the end of the erase block. */ - printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", - ofs, je32_to_cpu(node->totlen)); - printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n"); - DIRTY_SPACE(4); + pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", + ofs, je32_to_cpu(node->totlen)); + pr_warn("Perhaps the file system was created with the wrong erase size?\n"); + if ((err = jffs2_scan_dirty_space(c, jeb, 4))) + return err; ofs += 4; continue; } if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { /* Wheee. This is an obsoleted node */ - D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs)); - DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); + jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n", + ofs); + if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) + return err; ofs += PAD(je32_to_cpu(node->totlen)); continue; } @@ -524,102 +789,157 @@ scan_more: case JFFS2_NODETYPE_INODE: if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); - D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", - sizeof(struct jffs2_raw_inode), buf_len, ofs)); + jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", + sizeof(struct jffs2_raw_inode), + buf_len, ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } - err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs); + err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; - + case JFFS2_NODETYPE_DIRENT: if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); - D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", - je32_to_cpu(node->totlen), buf_len, ofs)); + jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", + je32_to_cpu(node->totlen), buf_len, + ofs); err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); if (err) return err; buf_ofs = ofs; node = (void *)buf; } - err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs); + err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s); if (err) return err; ofs += PAD(je32_to_cpu(node->totlen)); break; +#ifdef CONFIG_JFFS2_FS_XATTR + case JFFS2_NODETYPE_XATTR: + if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { + buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); + jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n", + je32_to_cpu(node->totlen), buf_len, + ofs); + err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); + if (err) + return err; + buf_ofs = ofs; + node = (void *)buf; + } + err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s); + if (err) + return err; + ofs += PAD(je32_to_cpu(node->totlen)); + break; + case JFFS2_NODETYPE_XREF: + if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { + buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); + jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n", + je32_to_cpu(node->totlen), buf_len, + ofs); + err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); + if (err) + return err; + buf_ofs = ofs; + node = (void *)buf; + } + err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s); + if (err) + return err; + ofs += PAD(je32_to_cpu(node->totlen)); + break; +#endif /* CONFIG_JFFS2_FS_XATTR */ + case JFFS2_NODETYPE_CLEANMARKER: - D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); + jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs); if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { - printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", - ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); - DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); + pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", + ofs, je32_to_cpu(node->totlen), + c->cleanmarker_size); + if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) + return err; ofs += PAD(sizeof(struct jffs2_unknown_node)); } else if (jeb->first_node) { - printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); - DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); + pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", + ofs, jeb->offset); + if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) + return err; ofs += PAD(sizeof(struct jffs2_unknown_node)); } else { - struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref(); - if (!marker_ref) { - printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n"); - return -ENOMEM; - } - marker_ref->next_in_ino = NULL; - marker_ref->next_phys = NULL; - marker_ref->flash_offset = ofs | REF_NORMAL; - marker_ref->__totlen = c->cleanmarker_size; - jeb->first_node = jeb->last_node = marker_ref; - - USED_SPACE(PAD(c->cleanmarker_size)); + jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL); + ofs += PAD(c->cleanmarker_size); } break; case JFFS2_NODETYPE_PADDING: - DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); + if (jffs2_sum_active()) + jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); + if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) + return err; ofs += PAD(je32_to_cpu(node->totlen)); break; default: switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { case JFFS2_FEATURE_ROCOMPAT: - printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); - c->flags |= JFFS2_SB_FLAG_RO; + pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", + je16_to_cpu(node->nodetype), ofs); + c->flags |= JFFS2_SB_FLAG_RO; if (!(jffs2_is_readonly(c))) return -EROFS; - DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); + if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) + return err; ofs += PAD(je32_to_cpu(node->totlen)); break; case JFFS2_FEATURE_INCOMPAT: - printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); + pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n", + je16_to_cpu(node->nodetype), ofs); return -EINVAL; case JFFS2_FEATURE_RWCOMPAT_DELETE: - D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); - DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); + jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", + je16_to_cpu(node->nodetype), ofs); + if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) + return err; ofs += PAD(je32_to_cpu(node->totlen)); break; - case JFFS2_FEATURE_RWCOMPAT_COPY: - D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); - USED_SPACE(PAD(je32_to_cpu(node->totlen))); + case JFFS2_FEATURE_RWCOMPAT_COPY: { + jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", + je16_to_cpu(node->nodetype), ofs); + + jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); + + /* We can't summarise nodes we don't grok */ + jffs2_sum_disable_collecting(s); ofs += PAD(je32_to_cpu(node->totlen)); break; + } } } } + if (jffs2_sum_active()) { + if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) { + dbg_summary("There is not enough space for " + "summary information, disabling for this jeb!\n"); + jffs2_sum_disable_collecting(s); + } + } - D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, - jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size)); - + jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", + jeb->offset, jeb->free_size, jeb->dirty_size, + jeb->unchecked_size, jeb->used_size, jeb->wasted_size); + /* mark_node_obsolete can add to wasted !! */ if (jeb->wasted_size) { jeb->dirty_size += jeb->wasted_size; @@ -628,24 +948,10 @@ scan_more: jeb->wasted_size = 0; } - if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size - && (!jeb->first_node || !jeb->first_node->next_phys) ) - return BLK_STATE_CLEANMARKER; - - /* move blocks with max 4 byte dirty space to cleanlist */ - else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) { - c->dirty_size -= jeb->dirty_size; - c->wasted_size += jeb->dirty_size; - jeb->wasted_size += jeb->dirty_size; - jeb->dirty_size = 0; - return BLK_STATE_CLEAN; - } else if (jeb->used_size || jeb->unchecked_size) - return BLK_STATE_PARTDIRTY; - else - return BLK_STATE_ALLDIRTY; + return jffs2_scan_classify_jeb(c, jeb); } -static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) +struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inode_cache *ic; @@ -658,7 +964,7 @@ static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info ic = jffs2_alloc_inode_cache(); if (!ic) { - printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n"); + pr_notice("%s(): allocation of inode cache failed\n", __func__); return NULL; } memset(ic, 0, sizeof(*ic)); @@ -667,156 +973,136 @@ static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info ic->nodes = (void *)ic; jffs2_add_ino_cache(c, ic); if (ino == 1) - ic->nlink = 1; + ic->pino_nlink = 1; return ic; } -static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, - struct jffs2_raw_inode *ri, uint32_t ofs) +static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s) { - struct jffs2_raw_node_ref *raw; struct jffs2_inode_cache *ic; - uint32_t ino = je32_to_cpu(ri->ino); + uint32_t crc, ino = je32_to_cpu(ri->ino); - D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); + jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); /* We do very little here now. Just check the ino# to which we should attribute - this node; we can do all the CRC checking etc. later. There's a tradeoff here -- + this node; we can do all the CRC checking etc. later. There's a tradeoff here -- we used to scan the flash once only, reading everything we want from it into memory, then building all our in-core data structures and freeing the extra information. Now we allow the first part of the mount to complete a lot quicker, - but we have to go _back_ to the flash in order to finish the CRC checking, etc. + but we have to go _back_ to the flash in order to finish the CRC checking, etc. Which means that the _full_ amount of time to get to proper write mode with GC operational may actually be _longer_ than before. Sucks to be me. */ - raw = jffs2_alloc_raw_node_ref(); - if (!raw) { - printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n"); - return -ENOMEM; + /* Check the node CRC in any case. */ + crc = crc32(0, ri, sizeof(*ri)-8); + if (crc != je32_to_cpu(ri->node_crc)) { + pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", + __func__, ofs, je32_to_cpu(ri->node_crc), crc); + /* + * We believe totlen because the CRC on the node + * _header_ was OK, just the node itself failed. + */ + return jffs2_scan_dirty_space(c, jeb, + PAD(je32_to_cpu(ri->totlen))); } ic = jffs2_get_ino_cache(c, ino); if (!ic) { - /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the - first node we found for this inode. Do a CRC check to protect against the former - case */ - uint32_t crc = crc32(0, ri, sizeof(*ri)-8); - - if (crc != je32_to_cpu(ri->node_crc)) { - printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ofs, je32_to_cpu(ri->node_crc), crc); - /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ - DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen))); - jffs2_free_raw_node_ref(raw); - return 0; - } ic = jffs2_scan_make_ino_cache(c, ino); - if (!ic) { - jffs2_free_raw_node_ref(raw); + if (!ic) return -ENOMEM; - } } /* Wheee. It worked */ + jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); - raw->flash_offset = ofs | REF_UNCHECKED; - raw->__totlen = PAD(je32_to_cpu(ri->totlen)); - raw->next_phys = NULL; - raw->next_in_ino = ic->nodes; - - ic->nodes = raw; - if (!jeb->first_node) - jeb->first_node = raw; - if (jeb->last_node) - jeb->last_node->next_phys = raw; - jeb->last_node = raw; - - D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", + jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n", je32_to_cpu(ri->ino), je32_to_cpu(ri->version), je32_to_cpu(ri->offset), - je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); + je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)); pseudo_random += je32_to_cpu(ri->version); - UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen))); + if (jffs2_sum_active()) { + jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset); + } + return 0; } -static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, - struct jffs2_raw_dirent *rd, uint32_t ofs) +static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s) { - struct jffs2_raw_node_ref *raw; struct jffs2_full_dirent *fd; struct jffs2_inode_cache *ic; + uint32_t checkedlen; uint32_t crc; + int err; - D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs)); + jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); /* We don't get here unless the node is still valid, so we don't have to mask in the ACCURATE bit any more. */ crc = crc32(0, rd, sizeof(*rd)-8); if (crc != je32_to_cpu(rd->node_crc)) { - printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ofs, je32_to_cpu(rd->node_crc), crc); + pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", + __func__, ofs, je32_to_cpu(rd->node_crc), crc); /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ - DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); + if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) + return err; return 0; } pseudo_random += je32_to_cpu(rd->version); - fd = jffs2_alloc_full_dirent(rd->nsize+1); + /* Should never happen. Did. (OLPC trac #4184)*/ + checkedlen = strnlen(rd->name, rd->nsize); + if (checkedlen < rd->nsize) { + pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n", + ofs, checkedlen); + } + fd = jffs2_alloc_full_dirent(checkedlen+1); if (!fd) { return -ENOMEM; } - memcpy(&fd->name, rd->name, rd->nsize); - fd->name[rd->nsize] = 0; + memcpy(&fd->name, rd->name, checkedlen); + fd->name[checkedlen] = 0; crc = crc32(0, fd->name, rd->nsize); if (crc != je32_to_cpu(rd->name_crc)) { - printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", - ofs, je32_to_cpu(rd->name_crc), crc); - D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); + pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", + __func__, ofs, je32_to_cpu(rd->name_crc), crc); + jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n", + fd->name, je32_to_cpu(rd->ino)); jffs2_free_full_dirent(fd); /* FIXME: Why do we believe totlen? */ /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ - DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); + if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) + return err; return 0; } - raw = jffs2_alloc_raw_node_ref(); - if (!raw) { - jffs2_free_full_dirent(fd); - printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n"); - return -ENOMEM; - } ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); if (!ic) { jffs2_free_full_dirent(fd); - jffs2_free_raw_node_ref(raw); return -ENOMEM; } - - raw->__totlen = PAD(je32_to_cpu(rd->totlen)); - raw->flash_offset = ofs | REF_PRISTINE; - raw->next_phys = NULL; - raw->next_in_ino = ic->nodes; - ic->nodes = raw; - if (!jeb->first_node) - jeb->first_node = raw; - if (jeb->last_node) - jeb->last_node->next_phys = raw; - jeb->last_node = raw; - - fd->raw = raw; + + fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd), + PAD(je32_to_cpu(rd->totlen)), ic); + fd->next = NULL; fd->version = je32_to_cpu(rd->version); fd->ino = je32_to_cpu(rd->ino); - fd->nhash = full_name_hash(fd->name, rd->nsize); + fd->nhash = full_name_hash(fd->name, checkedlen); fd->type = rd->type; - USED_SPACE(PAD(je32_to_cpu(rd->totlen))); jffs2_add_fd_to_list(c, fd, &ic->scan_dents); + if (jffs2_sum_active()) { + jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset); + } + return 0; } @@ -852,76 +1138,34 @@ void jffs2_rotate_lists(struct jffs2_sb_info *c) x = count_list(&c->clean_list); if (x) { rotateby = pseudo_random % x; - D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby)); - rotate_list((&c->clean_list), rotateby); - - D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n", - list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset)); - } else { - D1(printk(KERN_DEBUG "Not rotating empty clean_list\n")); } x = count_list(&c->very_dirty_list); if (x) { rotateby = pseudo_random % x; - D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby)); - rotate_list((&c->very_dirty_list), rotateby); - - D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n", - list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset)); - } else { - D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n")); } x = count_list(&c->dirty_list); if (x) { rotateby = pseudo_random % x; - D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby)); - rotate_list((&c->dirty_list), rotateby); - - D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n", - list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset)); - } else { - D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n")); } x = count_list(&c->erasable_list); if (x) { rotateby = pseudo_random % x; - D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby)); - rotate_list((&c->erasable_list), rotateby); - - D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n", - list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset)); - } else { - D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n")); } if (c->nr_erasing_blocks) { rotateby = pseudo_random % c->nr_erasing_blocks; - D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby)); - rotate_list((&c->erase_pending_list), rotateby); - - D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n", - list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset)); - } else { - D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n")); } if (c->nr_free_blocks) { rotateby = pseudo_random % c->nr_free_blocks; - D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby)); - rotate_list((&c->free_list), rotateby); - - D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n", - list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset)); - } else { - D1(printk(KERN_DEBUG "Not rotating empty free_list\n")); } } diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c new file mode 100644 index 00000000000..aca97f35b29 --- /dev/null +++ b/fs/jffs2/security.c @@ -0,0 +1,89 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2006 NEC Corporation + * + * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/time.h> +#include <linux/pagemap.h> +#include <linux/highmem.h> +#include <linux/crc32.h> +#include <linux/jffs2.h> +#include <linux/xattr.h> +#include <linux/mtd/mtd.h> +#include <linux/security.h> +#include "nodelist.h" + +/* ---- Initial Security Label(s) Attachment callback --- */ +static int jffs2_initxattrs(struct inode *inode, + const struct xattr *xattr_array, void *fs_info) +{ + const struct xattr *xattr; + int err = 0; + + for (xattr = xattr_array; xattr->name != NULL; xattr++) { + err = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, + xattr->name, xattr->value, + xattr->value_len, 0); + if (err < 0) + break; + } + return err; +} + +/* ---- Initial Security Label(s) Attachment ----------- */ +int jffs2_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr) +{ + return security_inode_init_security(inode, dir, qstr, + &jffs2_initxattrs, NULL); +} + +/* ---- XATTR Handler for "security.*" ----------------- */ +static int jffs2_security_getxattr(struct dentry *dentry, const char *name, + void *buffer, size_t size, int type) +{ + if (!strcmp(name, "")) + return -EINVAL; + + return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_SECURITY, + name, buffer, size); +} + +static int jffs2_security_setxattr(struct dentry *dentry, const char *name, + const void *buffer, size_t size, int flags, int type) +{ + if (!strcmp(name, "")) + return -EINVAL; + + return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_SECURITY, + name, buffer, size, flags); +} + +static size_t jffs2_security_listxattr(struct dentry *dentry, char *list, + size_t list_size, const char *name, size_t name_len, int type) +{ + size_t retlen = XATTR_SECURITY_PREFIX_LEN + name_len + 1; + + if (list && retlen <= list_size) { + strcpy(list, XATTR_SECURITY_PREFIX); + strcpy(list + XATTR_SECURITY_PREFIX_LEN, name); + } + + return retlen; +} + +const struct xattr_handler jffs2_security_xattr_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .list = jffs2_security_listxattr, + .set = jffs2_security_setxattr, + .get = jffs2_security_getxattr +}; diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c new file mode 100644 index 00000000000..c522d098bb4 --- /dev/null +++ b/fs/jffs2/summary.c @@ -0,0 +1,873 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, + * Zoltan Sogor <weth@inf.u-szeged.hu>, + * Patrik Kluba <pajko@halom.u-szeged.hu>, + * University of Szeged, Hungary + * 2006 KaiGai Kohei <kaigai@ak.jp.nec.com> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/mtd/mtd.h> +#include <linux/pagemap.h> +#include <linux/crc32.h> +#include <linux/compiler.h> +#include <linux/vmalloc.h> +#include "nodelist.h" +#include "debug.h" + +int jffs2_sum_init(struct jffs2_sb_info *c) +{ + uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); + + c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); + + if (!c->summary) { + JFFS2_WARNING("Can't allocate memory for summary information!\n"); + return -ENOMEM; + } + + c->summary->sum_buf = kmalloc(sum_size, GFP_KERNEL); + + if (!c->summary->sum_buf) { + JFFS2_WARNING("Can't allocate buffer for writing out summary information!\n"); + kfree(c->summary); + return -ENOMEM; + } + + dbg_summary("returned successfully\n"); + + return 0; +} + +void jffs2_sum_exit(struct jffs2_sb_info *c) +{ + dbg_summary("called\n"); + + jffs2_sum_disable_collecting(c->summary); + + kfree(c->summary->sum_buf); + c->summary->sum_buf = NULL; + + kfree(c->summary); + c->summary = NULL; +} + +static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item) +{ + if (!s->sum_list_head) + s->sum_list_head = (union jffs2_sum_mem *) item; + if (s->sum_list_tail) + s->sum_list_tail->u.next = (union jffs2_sum_mem *) item; + s->sum_list_tail = (union jffs2_sum_mem *) item; + + switch (je16_to_cpu(item->u.nodetype)) { + case JFFS2_NODETYPE_INODE: + s->sum_size += JFFS2_SUMMARY_INODE_SIZE; + s->sum_num++; + dbg_summary("inode (%u) added to summary\n", + je32_to_cpu(item->i.inode)); + break; + case JFFS2_NODETYPE_DIRENT: + s->sum_size += JFFS2_SUMMARY_DIRENT_SIZE(item->d.nsize); + s->sum_num++; + dbg_summary("dirent (%u) added to summary\n", + je32_to_cpu(item->d.ino)); + break; +#ifdef CONFIG_JFFS2_FS_XATTR + case JFFS2_NODETYPE_XATTR: + s->sum_size += JFFS2_SUMMARY_XATTR_SIZE; + s->sum_num++; + dbg_summary("xattr (xid=%u, version=%u) added to summary\n", + je32_to_cpu(item->x.xid), je32_to_cpu(item->x.version)); + break; + case JFFS2_NODETYPE_XREF: + s->sum_size += JFFS2_SUMMARY_XREF_SIZE; + s->sum_num++; + dbg_summary("xref added to summary\n"); + break; +#endif + default: + JFFS2_WARNING("UNKNOWN node type %u\n", + je16_to_cpu(item->u.nodetype)); + return 1; + } + return 0; +} + + +/* The following 3 functions are called from scan.c to collect summary info for not closed jeb */ + +int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size) +{ + dbg_summary("called with %u\n", size); + s->sum_padded += size; + return 0; +} + +int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, + uint32_t ofs) +{ + struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); + + if (!temp) + return -ENOMEM; + + temp->nodetype = ri->nodetype; + temp->inode = ri->ino; + temp->version = ri->version; + temp->offset = cpu_to_je32(ofs); /* relative offset from the beginning of the jeb */ + temp->totlen = ri->totlen; + temp->next = NULL; + + return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); +} + +int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, + uint32_t ofs) +{ + struct jffs2_sum_dirent_mem *temp = + kmalloc(sizeof(struct jffs2_sum_dirent_mem) + rd->nsize, GFP_KERNEL); + + if (!temp) + return -ENOMEM; + + temp->nodetype = rd->nodetype; + temp->totlen = rd->totlen; + temp->offset = cpu_to_je32(ofs); /* relative from the beginning of the jeb */ + temp->pino = rd->pino; + temp->version = rd->version; + temp->ino = rd->ino; + temp->nsize = rd->nsize; + temp->type = rd->type; + temp->next = NULL; + + memcpy(temp->name, rd->name, rd->nsize); + + return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); +} + +#ifdef CONFIG_JFFS2_FS_XATTR +int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs) +{ + struct jffs2_sum_xattr_mem *temp; + + temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + temp->nodetype = rx->nodetype; + temp->xid = rx->xid; + temp->version = rx->version; + temp->offset = cpu_to_je32(ofs); + temp->totlen = rx->totlen; + temp->next = NULL; + + return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); +} + +int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs) +{ + struct jffs2_sum_xref_mem *temp; + + temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); + if (!temp) + return -ENOMEM; + + temp->nodetype = rr->nodetype; + temp->offset = cpu_to_je32(ofs); + temp->next = NULL; + + return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); +} +#endif +/* Cleanup every collected summary information */ + +static void jffs2_sum_clean_collected(struct jffs2_summary *s) +{ + union jffs2_sum_mem *temp; + + if (!s->sum_list_head) { + dbg_summary("already empty\n"); + } + while (s->sum_list_head) { + temp = s->sum_list_head; + s->sum_list_head = s->sum_list_head->u.next; + kfree(temp); + } + s->sum_list_tail = NULL; + s->sum_padded = 0; + s->sum_num = 0; +} + +void jffs2_sum_reset_collected(struct jffs2_summary *s) +{ + dbg_summary("called\n"); + jffs2_sum_clean_collected(s); + s->sum_size = 0; +} + +void jffs2_sum_disable_collecting(struct jffs2_summary *s) +{ + dbg_summary("called\n"); + jffs2_sum_clean_collected(s); + s->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; +} + +int jffs2_sum_is_disabled(struct jffs2_summary *s) +{ + return (s->sum_size == JFFS2_SUMMARY_NOSUM_SIZE); +} + +/* Move the collected summary information into sb (called from scan.c) */ + +void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s) +{ + dbg_summary("oldsize=0x%x oldnum=%u => newsize=0x%x newnum=%u\n", + c->summary->sum_size, c->summary->sum_num, + s->sum_size, s->sum_num); + + c->summary->sum_size = s->sum_size; + c->summary->sum_num = s->sum_num; + c->summary->sum_padded = s->sum_padded; + c->summary->sum_list_head = s->sum_list_head; + c->summary->sum_list_tail = s->sum_list_tail; + + s->sum_list_head = s->sum_list_tail = NULL; +} + +/* Called from wbuf.c to collect writed node info */ + +int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, + unsigned long count, uint32_t ofs) +{ + union jffs2_node_union *node; + struct jffs2_eraseblock *jeb; + + if (c->summary->sum_size == JFFS2_SUMMARY_NOSUM_SIZE) { + dbg_summary("Summary is disabled for this jeb! Skipping summary info!\n"); + return 0; + } + + node = invecs[0].iov_base; + jeb = &c->blocks[ofs / c->sector_size]; + ofs -= jeb->offset; + + switch (je16_to_cpu(node->u.nodetype)) { + case JFFS2_NODETYPE_INODE: { + struct jffs2_sum_inode_mem *temp = + kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL); + + if (!temp) + goto no_mem; + + temp->nodetype = node->i.nodetype; + temp->inode = node->i.ino; + temp->version = node->i.version; + temp->offset = cpu_to_je32(ofs); + temp->totlen = node->i.totlen; + temp->next = NULL; + + return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); + } + + case JFFS2_NODETYPE_DIRENT: { + struct jffs2_sum_dirent_mem *temp = + kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL); + + if (!temp) + goto no_mem; + + temp->nodetype = node->d.nodetype; + temp->totlen = node->d.totlen; + temp->offset = cpu_to_je32(ofs); + temp->pino = node->d.pino; + temp->version = node->d.version; + temp->ino = node->d.ino; + temp->nsize = node->d.nsize; + temp->type = node->d.type; + temp->next = NULL; + + switch (count) { + case 1: + memcpy(temp->name,node->d.name,node->d.nsize); + break; + + case 2: + memcpy(temp->name,invecs[1].iov_base,node->d.nsize); + break; + + default: + BUG(); /* impossible count value */ + break; + } + + return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); + } +#ifdef CONFIG_JFFS2_FS_XATTR + case JFFS2_NODETYPE_XATTR: { + struct jffs2_sum_xattr_mem *temp; + temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL); + if (!temp) + goto no_mem; + + temp->nodetype = node->x.nodetype; + temp->xid = node->x.xid; + temp->version = node->x.version; + temp->totlen = node->x.totlen; + temp->offset = cpu_to_je32(ofs); + temp->next = NULL; + + return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); + } + case JFFS2_NODETYPE_XREF: { + struct jffs2_sum_xref_mem *temp; + temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL); + if (!temp) + goto no_mem; + temp->nodetype = node->r.nodetype; + temp->offset = cpu_to_je32(ofs); + temp->next = NULL; + + return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); + } +#endif + case JFFS2_NODETYPE_PADDING: + dbg_summary("node PADDING\n"); + c->summary->sum_padded += je32_to_cpu(node->u.totlen); + break; + + case JFFS2_NODETYPE_CLEANMARKER: + dbg_summary("node CLEANMARKER\n"); + break; + + case JFFS2_NODETYPE_SUMMARY: + dbg_summary("node SUMMARY\n"); + break; + + default: + /* If you implement a new node type you should also implement + summary support for it or disable summary. + */ + BUG(); + break; + } + + return 0; + +no_mem: + JFFS2_WARNING("MEMORY ALLOCATION ERROR!"); + return -ENOMEM; +} + +static struct jffs2_raw_node_ref *sum_link_node_ref(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb, + uint32_t ofs, uint32_t len, + struct jffs2_inode_cache *ic) +{ + /* If there was a gap, mark it dirty */ + if ((ofs & ~3) > c->sector_size - jeb->free_size) { + /* Ew. Summary doesn't actually tell us explicitly about dirty space */ + jffs2_scan_dirty_space(c, jeb, (ofs & ~3) - (c->sector_size - jeb->free_size)); + } + + return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic); +} + +/* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */ + +static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + struct jffs2_raw_summary *summary, uint32_t *pseudo_random) +{ + struct jffs2_inode_cache *ic; + struct jffs2_full_dirent *fd; + void *sp; + int i, ino; + int err; + + sp = summary->sum; + + for (i=0; i<je32_to_cpu(summary->sum_num); i++) { + dbg_summary("processing summary index %d\n", i); + + cond_resched(); + + /* Make sure there's a spare ref for dirty space */ + err = jffs2_prealloc_raw_node_refs(c, jeb, 2); + if (err) + return err; + + switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) { + case JFFS2_NODETYPE_INODE: { + struct jffs2_sum_inode_flash *spi; + spi = sp; + + ino = je32_to_cpu(spi->inode); + + dbg_summary("Inode at 0x%08x-0x%08x\n", + jeb->offset + je32_to_cpu(spi->offset), + jeb->offset + je32_to_cpu(spi->offset) + je32_to_cpu(spi->totlen)); + + ic = jffs2_scan_make_ino_cache(c, ino); + if (!ic) { + JFFS2_NOTICE("scan_make_ino_cache failed\n"); + return -ENOMEM; + } + + sum_link_node_ref(c, jeb, je32_to_cpu(spi->offset) | REF_UNCHECKED, + PAD(je32_to_cpu(spi->totlen)), ic); + + *pseudo_random += je32_to_cpu(spi->version); + + sp += JFFS2_SUMMARY_INODE_SIZE; + + break; + } + + case JFFS2_NODETYPE_DIRENT: { + struct jffs2_sum_dirent_flash *spd; + int checkedlen; + spd = sp; + + dbg_summary("Dirent at 0x%08x-0x%08x\n", + jeb->offset + je32_to_cpu(spd->offset), + jeb->offset + je32_to_cpu(spd->offset) + je32_to_cpu(spd->totlen)); + + + /* This should never happen, but https://dev.laptop.org/ticket/4184 */ + checkedlen = strnlen(spd->name, spd->nsize); + if (!checkedlen) { + pr_err("Dirent at %08x has zero at start of name. Aborting mount.\n", + jeb->offset + + je32_to_cpu(spd->offset)); + return -EIO; + } + if (checkedlen < spd->nsize) { + pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n", + jeb->offset + + je32_to_cpu(spd->offset), + checkedlen); + } + + + fd = jffs2_alloc_full_dirent(checkedlen+1); + if (!fd) + return -ENOMEM; + + memcpy(&fd->name, spd->name, checkedlen); + fd->name[checkedlen] = 0; + + ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); + if (!ic) { + jffs2_free_full_dirent(fd); + return -ENOMEM; + } + + fd->raw = sum_link_node_ref(c, jeb, je32_to_cpu(spd->offset) | REF_UNCHECKED, + PAD(je32_to_cpu(spd->totlen)), ic); + + fd->next = NULL; + fd->version = je32_to_cpu(spd->version); + fd->ino = je32_to_cpu(spd->ino); + fd->nhash = full_name_hash(fd->name, checkedlen); + fd->type = spd->type; + + jffs2_add_fd_to_list(c, fd, &ic->scan_dents); + + *pseudo_random += je32_to_cpu(spd->version); + + sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize); + + break; + } +#ifdef CONFIG_JFFS2_FS_XATTR + case JFFS2_NODETYPE_XATTR: { + struct jffs2_xattr_datum *xd; + struct jffs2_sum_xattr_flash *spx; + + spx = (struct jffs2_sum_xattr_flash *)sp; + dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n", + jeb->offset + je32_to_cpu(spx->offset), + jeb->offset + je32_to_cpu(spx->offset) + je32_to_cpu(spx->totlen), + je32_to_cpu(spx->xid), je32_to_cpu(spx->version)); + + xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid), + je32_to_cpu(spx->version)); + if (IS_ERR(xd)) + return PTR_ERR(xd); + if (xd->version > je32_to_cpu(spx->version)) { + /* node is not the newest one */ + struct jffs2_raw_node_ref *raw + = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, + PAD(je32_to_cpu(spx->totlen)), NULL); + raw->next_in_ino = xd->node->next_in_ino; + xd->node->next_in_ino = raw; + } else { + xd->version = je32_to_cpu(spx->version); + sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED, + PAD(je32_to_cpu(spx->totlen)), (void *)xd); + } + *pseudo_random += je32_to_cpu(spx->xid); + sp += JFFS2_SUMMARY_XATTR_SIZE; + + break; + } + case JFFS2_NODETYPE_XREF: { + struct jffs2_xattr_ref *ref; + struct jffs2_sum_xref_flash *spr; + + spr = (struct jffs2_sum_xref_flash *)sp; + dbg_summary("xref at %#08x-%#08x\n", + jeb->offset + je32_to_cpu(spr->offset), + jeb->offset + je32_to_cpu(spr->offset) + + (uint32_t)PAD(sizeof(struct jffs2_raw_xref))); + + ref = jffs2_alloc_xattr_ref(); + if (!ref) { + JFFS2_NOTICE("allocation of xattr_datum failed\n"); + return -ENOMEM; + } + ref->next = c->xref_temp; + c->xref_temp = ref; + + sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED, + PAD(sizeof(struct jffs2_raw_xref)), (void *)ref); + + *pseudo_random += ref->node->flash_offset; + sp += JFFS2_SUMMARY_XREF_SIZE; + + break; + } +#endif + default : { + uint16_t nodetype = je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype); + JFFS2_WARNING("Unsupported node type %x found in summary! Exiting...\n", nodetype); + if ((nodetype & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_INCOMPAT) + return -EIO; + + /* For compatible node types, just fall back to the full scan */ + c->wasted_size -= jeb->wasted_size; + c->free_size += c->sector_size - jeb->free_size; + c->used_size -= jeb->used_size; + c->dirty_size -= jeb->dirty_size; + jeb->wasted_size = jeb->used_size = jeb->dirty_size = 0; + jeb->free_size = c->sector_size; + + jffs2_free_jeb_node_refs(c, jeb); + return -ENOTRECOVERABLE; + } + } + } + return 0; +} + +/* Process the summary node - called from jffs2_scan_eraseblock() */ +int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + struct jffs2_raw_summary *summary, uint32_t sumsize, + uint32_t *pseudo_random) +{ + struct jffs2_unknown_node crcnode; + int ret, ofs; + uint32_t crc; + + ofs = c->sector_size - sumsize; + + dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n", + jeb->offset, jeb->offset + ofs, sumsize); + + /* OK, now check for node validity and CRC */ + crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); + crcnode.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); + crcnode.totlen = summary->totlen; + crc = crc32(0, &crcnode, sizeof(crcnode)-4); + + if (je32_to_cpu(summary->hdr_crc) != crc) { + dbg_summary("Summary node header is corrupt (bad CRC or " + "no summary at all)\n"); + goto crc_err; + } + + if (je32_to_cpu(summary->totlen) != sumsize) { + dbg_summary("Summary node is corrupt (wrong erasesize?)\n"); + goto crc_err; + } + + crc = crc32(0, summary, sizeof(struct jffs2_raw_summary)-8); + + if (je32_to_cpu(summary->node_crc) != crc) { + dbg_summary("Summary node is corrupt (bad CRC)\n"); + goto crc_err; + } + + crc = crc32(0, summary->sum, sumsize - sizeof(struct jffs2_raw_summary)); + + if (je32_to_cpu(summary->sum_crc) != crc) { + dbg_summary("Summary node data is corrupt (bad CRC)\n"); + goto crc_err; + } + + if ( je32_to_cpu(summary->cln_mkr) ) { + + dbg_summary("Summary : CLEANMARKER node \n"); + + ret = jffs2_prealloc_raw_node_refs(c, jeb, 1); + if (ret) + return ret; + + if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { + dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n", + je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); + if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) + return ret; + } else if (jeb->first_node) { + dbg_summary("CLEANMARKER node not first node in block " + "(0x%08x)\n", jeb->offset); + if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr))))) + return ret; + } else { + jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, + je32_to_cpu(summary->cln_mkr), NULL); + } + } + + ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); + /* -ENOTRECOVERABLE isn't a fatal error -- it means we should do a full + scan of this eraseblock. So return zero */ + if (ret == -ENOTRECOVERABLE) + return 0; + if (ret) + return ret; /* real error */ + + /* for PARANOIA_CHECK */ + ret = jffs2_prealloc_raw_node_refs(c, jeb, 2); + if (ret) + return ret; + + sum_link_node_ref(c, jeb, ofs | REF_NORMAL, sumsize, NULL); + + if (unlikely(jeb->free_size)) { + JFFS2_WARNING("Free size 0x%x bytes in eraseblock @0x%08x with summary?\n", + jeb->free_size, jeb->offset); + jeb->wasted_size += jeb->free_size; + c->wasted_size += jeb->free_size; + c->free_size -= jeb->free_size; + jeb->free_size = 0; + } + + return jffs2_scan_classify_jeb(c, jeb); + +crc_err: + JFFS2_WARNING("Summary node crc error, skipping summary information.\n"); + + return 0; +} + +/* Write summary data to flash - helper function for jffs2_sum_write_sumnode() */ + +static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + uint32_t infosize, uint32_t datasize, int padsize) +{ + struct jffs2_raw_summary isum; + union jffs2_sum_mem *temp; + struct jffs2_sum_marker *sm; + struct kvec vecs[2]; + uint32_t sum_ofs; + void *wpage; + int ret; + size_t retlen; + + if (padsize + datasize > MAX_SUMMARY_SIZE) { + /* It won't fit in the buffer. Abort summary for this jeb */ + jffs2_sum_disable_collecting(c->summary); + + JFFS2_WARNING("Summary too big (%d data, %d pad) in eraseblock at %08x\n", + datasize, padsize, jeb->offset); + /* Non-fatal */ + return 0; + } + /* Is there enough space for summary? */ + if (padsize < 0) { + /* don't try to write out summary for this jeb */ + jffs2_sum_disable_collecting(c->summary); + + JFFS2_WARNING("Not enough space for summary, padsize = %d\n", + padsize); + /* Non-fatal */ + return 0; + } + + memset(c->summary->sum_buf, 0xff, datasize); + memset(&isum, 0, sizeof(isum)); + + isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); + isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY); + isum.totlen = cpu_to_je32(infosize); + isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4)); + isum.padded = cpu_to_je32(c->summary->sum_padded); + isum.cln_mkr = cpu_to_je32(c->cleanmarker_size); + isum.sum_num = cpu_to_je32(c->summary->sum_num); + wpage = c->summary->sum_buf; + + while (c->summary->sum_num) { + temp = c->summary->sum_list_head; + + switch (je16_to_cpu(temp->u.nodetype)) { + case JFFS2_NODETYPE_INODE: { + struct jffs2_sum_inode_flash *sino_ptr = wpage; + + sino_ptr->nodetype = temp->i.nodetype; + sino_ptr->inode = temp->i.inode; + sino_ptr->version = temp->i.version; + sino_ptr->offset = temp->i.offset; + sino_ptr->totlen = temp->i.totlen; + + wpage += JFFS2_SUMMARY_INODE_SIZE; + + break; + } + + case JFFS2_NODETYPE_DIRENT: { + struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage; + + sdrnt_ptr->nodetype = temp->d.nodetype; + sdrnt_ptr->totlen = temp->d.totlen; + sdrnt_ptr->offset = temp->d.offset; + sdrnt_ptr->pino = temp->d.pino; + sdrnt_ptr->version = temp->d.version; + sdrnt_ptr->ino = temp->d.ino; + sdrnt_ptr->nsize = temp->d.nsize; + sdrnt_ptr->type = temp->d.type; + + memcpy(sdrnt_ptr->name, temp->d.name, + temp->d.nsize); + + wpage += JFFS2_SUMMARY_DIRENT_SIZE(temp->d.nsize); + + break; + } +#ifdef CONFIG_JFFS2_FS_XATTR + case JFFS2_NODETYPE_XATTR: { + struct jffs2_sum_xattr_flash *sxattr_ptr = wpage; + + temp = c->summary->sum_list_head; + sxattr_ptr->nodetype = temp->x.nodetype; + sxattr_ptr->xid = temp->x.xid; + sxattr_ptr->version = temp->x.version; + sxattr_ptr->offset = temp->x.offset; + sxattr_ptr->totlen = temp->x.totlen; + + wpage += JFFS2_SUMMARY_XATTR_SIZE; + break; + } + case JFFS2_NODETYPE_XREF: { + struct jffs2_sum_xref_flash *sxref_ptr = wpage; + + temp = c->summary->sum_list_head; + sxref_ptr->nodetype = temp->r.nodetype; + sxref_ptr->offset = temp->r.offset; + + wpage += JFFS2_SUMMARY_XREF_SIZE; + break; + } +#endif + default : { + if ((je16_to_cpu(temp->u.nodetype) & JFFS2_COMPAT_MASK) + == JFFS2_FEATURE_RWCOMPAT_COPY) { + dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n", + je16_to_cpu(temp->u.nodetype)); + jffs2_sum_disable_collecting(c->summary); + } else { + BUG(); /* unknown node in summary information */ + } + } + } + + c->summary->sum_list_head = temp->u.next; + kfree(temp); + + c->summary->sum_num--; + } + + jffs2_sum_reset_collected(c->summary); + + wpage += padsize; + + sm = wpage; + sm->offset = cpu_to_je32(c->sector_size - jeb->free_size); + sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC); + + isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize)); + isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8)); + + vecs[0].iov_base = &isum; + vecs[0].iov_len = sizeof(isum); + vecs[1].iov_base = c->summary->sum_buf; + vecs[1].iov_len = datasize; + + sum_ofs = jeb->offset + c->sector_size - jeb->free_size; + + dbg_summary("writing out data to flash to pos : 0x%08x\n", sum_ofs); + + ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0); + + if (ret || (retlen != infosize)) { + + JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n", + infosize, sum_ofs, ret, retlen); + + if (retlen) { + /* Waste remaining space */ + spin_lock(&c->erase_completion_lock); + jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL); + spin_unlock(&c->erase_completion_lock); + } + + c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; + + return 0; + } + + spin_lock(&c->erase_completion_lock); + jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL); + spin_unlock(&c->erase_completion_lock); + + return 0; +} + +/* Write out summary information - called from jffs2_do_reserve_space */ + +int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) +{ + int datasize, infosize, padsize; + struct jffs2_eraseblock *jeb; + int ret = 0; + + dbg_summary("called\n"); + + spin_unlock(&c->erase_completion_lock); + + jeb = c->nextblock; + jffs2_prealloc_raw_node_refs(c, jeb, 1); + + if (!c->summary->sum_num || !c->summary->sum_list_head) { + JFFS2_WARNING("Empty summary info!!!\n"); + BUG(); + } + + datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker); + infosize = sizeof(struct jffs2_raw_summary) + datasize; + padsize = jeb->free_size - infosize; + infosize += padsize; + datasize += padsize; + + ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); + spin_lock(&c->erase_completion_lock); + return ret; +} diff --git a/fs/jffs2/summary.h b/fs/jffs2/summary.h new file mode 100644 index 00000000000..60207a2ae95 --- /dev/null +++ b/fs/jffs2/summary.h @@ -0,0 +1,213 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>, + * Zoltan Sogor <weth@inf.u-szeged.hu>, + * Patrik Kluba <pajko@halom.u-szeged.hu>, + * University of Szeged, Hungary + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#ifndef JFFS2_SUMMARY_H +#define JFFS2_SUMMARY_H + +/* Limit summary size to 64KiB so that we can kmalloc it. If the summary + is larger than that, we have to just ditch it and avoid using summary + for the eraseblock in question... and it probably doesn't hurt us much + anyway. */ +#define MAX_SUMMARY_SIZE 65536 + +#include <linux/uio.h> +#include <linux/jffs2.h> + +#define BLK_STATE_ALLFF 0 +#define BLK_STATE_CLEAN 1 +#define BLK_STATE_PARTDIRTY 2 +#define BLK_STATE_CLEANMARKER 3 +#define BLK_STATE_ALLDIRTY 4 +#define BLK_STATE_BADBLOCK 5 + +#define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff +#define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash)) +#define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x)) +#define JFFS2_SUMMARY_XATTR_SIZE (sizeof(struct jffs2_sum_xattr_flash)) +#define JFFS2_SUMMARY_XREF_SIZE (sizeof(struct jffs2_sum_xref_flash)) + +/* Summary structures used on flash */ + +struct jffs2_sum_unknown_flash +{ + jint16_t nodetype; /* node type */ +}; + +struct jffs2_sum_inode_flash +{ + jint16_t nodetype; /* node type */ + jint32_t inode; /* inode number */ + jint32_t version; /* inode version */ + jint32_t offset; /* offset on jeb */ + jint32_t totlen; /* record length */ +} __attribute__((packed)); + +struct jffs2_sum_dirent_flash +{ + jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */ + jint32_t totlen; /* record length */ + jint32_t offset; /* offset on jeb */ + jint32_t pino; /* parent inode */ + jint32_t version; /* dirent version */ + jint32_t ino; /* == zero for unlink */ + uint8_t nsize; /* dirent name size */ + uint8_t type; /* dirent type */ + uint8_t name[0]; /* dirent name */ +} __attribute__((packed)); + +struct jffs2_sum_xattr_flash +{ + jint16_t nodetype; /* == JFFS2_NODETYPE_XATR */ + jint32_t xid; /* xattr identifier */ + jint32_t version; /* version number */ + jint32_t offset; /* offset on jeb */ + jint32_t totlen; /* node length */ +} __attribute__((packed)); + +struct jffs2_sum_xref_flash +{ + jint16_t nodetype; /* == JFFS2_NODETYPE_XREF */ + jint32_t offset; /* offset on jeb */ +} __attribute__((packed)); + +union jffs2_sum_flash +{ + struct jffs2_sum_unknown_flash u; + struct jffs2_sum_inode_flash i; + struct jffs2_sum_dirent_flash d; + struct jffs2_sum_xattr_flash x; + struct jffs2_sum_xref_flash r; +}; + +/* Summary structures used in the memory */ + +struct jffs2_sum_unknown_mem +{ + union jffs2_sum_mem *next; + jint16_t nodetype; /* node type */ +}; + +struct jffs2_sum_inode_mem +{ + union jffs2_sum_mem *next; + jint16_t nodetype; /* node type */ + jint32_t inode; /* inode number */ + jint32_t version; /* inode version */ + jint32_t offset; /* offset on jeb */ + jint32_t totlen; /* record length */ +} __attribute__((packed)); + +struct jffs2_sum_dirent_mem +{ + union jffs2_sum_mem *next; + jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */ + jint32_t totlen; /* record length */ + jint32_t offset; /* ofset on jeb */ + jint32_t pino; /* parent inode */ + jint32_t version; /* dirent version */ + jint32_t ino; /* == zero for unlink */ + uint8_t nsize; /* dirent name size */ + uint8_t type; /* dirent type */ + uint8_t name[0]; /* dirent name */ +} __attribute__((packed)); + +struct jffs2_sum_xattr_mem +{ + union jffs2_sum_mem *next; + jint16_t nodetype; + jint32_t xid; + jint32_t version; + jint32_t offset; + jint32_t totlen; +} __attribute__((packed)); + +struct jffs2_sum_xref_mem +{ + union jffs2_sum_mem *next; + jint16_t nodetype; + jint32_t offset; +} __attribute__((packed)); + +union jffs2_sum_mem +{ + struct jffs2_sum_unknown_mem u; + struct jffs2_sum_inode_mem i; + struct jffs2_sum_dirent_mem d; + struct jffs2_sum_xattr_mem x; + struct jffs2_sum_xref_mem r; +}; + +/* Summary related information stored in superblock */ + +struct jffs2_summary +{ + uint32_t sum_size; /* collected summary information for nextblock */ + uint32_t sum_num; + uint32_t sum_padded; + union jffs2_sum_mem *sum_list_head; + union jffs2_sum_mem *sum_list_tail; + + jint32_t *sum_buf; /* buffer for writing out summary */ +}; + +/* Summary marker is stored at the end of every sumarized erase block */ + +struct jffs2_sum_marker +{ + jint32_t offset; /* offset of the summary node in the jeb */ + jint32_t magic; /* == JFFS2_SUM_MAGIC */ +}; + +#define JFFS2_SUMMARY_FRAME_SIZE (sizeof(struct jffs2_raw_summary) + sizeof(struct jffs2_sum_marker)) + +#ifdef CONFIG_JFFS2_SUMMARY /* SUMMARY SUPPORT ENABLED */ + +#define jffs2_sum_active() (1) +int jffs2_sum_init(struct jffs2_sb_info *c); +void jffs2_sum_exit(struct jffs2_sb_info *c); +void jffs2_sum_disable_collecting(struct jffs2_summary *s); +int jffs2_sum_is_disabled(struct jffs2_summary *s); +void jffs2_sum_reset_collected(struct jffs2_summary *s); +void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s); +int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs, + unsigned long count, uint32_t to); +int jffs2_sum_write_sumnode(struct jffs2_sb_info *c); +int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size); +int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs); +int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs); +int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs); +int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs); +int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, + struct jffs2_raw_summary *summary, uint32_t sumlen, + uint32_t *pseudo_random); + +#else /* SUMMARY DISABLED */ + +#define jffs2_sum_active() (0) +#define jffs2_sum_init(a) (0) +#define jffs2_sum_exit(a) +#define jffs2_sum_disable_collecting(a) +#define jffs2_sum_is_disabled(a) (0) +#define jffs2_sum_reset_collected(a) +#define jffs2_sum_add_kvec(a,b,c,d) (0) +#define jffs2_sum_move_collected(a,b) +#define jffs2_sum_write_sumnode(a) (0) +#define jffs2_sum_add_padding_mem(a,b) +#define jffs2_sum_add_inode_mem(a,b,c) +#define jffs2_sum_add_dirent_mem(a,b,c) +#define jffs2_sum_add_xattr_mem(a,b,c) +#define jffs2_sum_add_xref_mem(a,b,c) +#define jffs2_sum_scan_sumnode(a,b,c,d,e) (0) + +#endif /* CONFIG_JFFS2_SUMMARY */ + +#endif /* JFFS2_SUMMARY_H */ diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index aaf9475cfb6..0918f0e2e26 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -1,299 +1,345 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: super.c,v 1.107 2005/07/12 16:37:08 dedekind Exp $ - * */ -#include <linux/config.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/list.h> #include <linux/fs.h> +#include <linux/err.h> #include <linux/mount.h> +#include <linux/parser.h> #include <linux/jffs2.h> #include <linux/pagemap.h> -#include <linux/mtd/mtd.h> +#include <linux/mtd/super.h> #include <linux/ctype.h> #include <linux/namei.h> +#include <linux/seq_file.h> +#include <linux/exportfs.h> #include "compr.h" #include "nodelist.h" static void jffs2_put_super(struct super_block *); -static kmem_cache_t *jffs2_inode_cachep; +static struct kmem_cache *jffs2_inode_cachep; static struct inode *jffs2_alloc_inode(struct super_block *sb) { - struct jffs2_inode_info *ei; - ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL); - if (!ei) + struct jffs2_inode_info *f; + + f = kmem_cache_alloc(jffs2_inode_cachep, GFP_KERNEL); + if (!f) return NULL; - return &ei->vfs_inode; + return &f->vfs_inode; } -static void jffs2_destroy_inode(struct inode *inode) +static void jffs2_i_callback(struct rcu_head *head) { + struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); } -static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) +static void jffs2_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, jffs2_i_callback); +} + +static void jffs2_i_init_once(void *foo) { - struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo; + struct jffs2_inode_info *f = foo; + + mutex_init(&f->sem); + inode_init_once(&f->vfs_inode); +} - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) { - init_MUTEX_LOCKED(&ei->sem); - inode_init_once(&ei->vfs_inode); +static const char *jffs2_compr_name(unsigned int compr) +{ + switch (compr) { + case JFFS2_COMPR_MODE_NONE: + return "none"; +#ifdef CONFIG_JFFS2_LZO + case JFFS2_COMPR_MODE_FORCELZO: + return "lzo"; +#endif +#ifdef CONFIG_JFFS2_ZLIB + case JFFS2_COMPR_MODE_FORCEZLIB: + return "zlib"; +#endif + default: + /* should never happen; programmer error */ + WARN_ON(1); + return ""; } } +static int jffs2_show_options(struct seq_file *s, struct dentry *root) +{ + struct jffs2_sb_info *c = JFFS2_SB_INFO(root->d_sb); + struct jffs2_mount_opts *opts = &c->mount_opts; + + if (opts->override_compr) + seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr)); + if (opts->rp_size) + seq_printf(s, ",rp_size=%u", opts->rp_size / 1024); + + return 0; +} + static int jffs2_sync_fs(struct super_block *sb, int wait) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); - down(&c->alloc_sem); +#ifdef CONFIG_JFFS2_FS_WRITEBUFFER + cancel_delayed_work_sync(&c->wbuf_dwork); +#endif + + mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); return 0; } -static struct super_operations jffs2_super_operations = +static struct inode *jffs2_nfs_get_inode(struct super_block *sb, uint64_t ino, + uint32_t generation) { - .alloc_inode = jffs2_alloc_inode, - .destroy_inode =jffs2_destroy_inode, - .read_inode = jffs2_read_inode, - .put_super = jffs2_put_super, - .write_super = jffs2_write_super, - .statfs = jffs2_statfs, - .remount_fs = jffs2_remount_fs, - .clear_inode = jffs2_clear_inode, - .dirty_inode = jffs2_dirty_inode, - .sync_fs = jffs2_sync_fs, -}; + /* We don't care about i_generation. We'll destroy the flash + before we start re-using inode numbers anyway. And even + if that wasn't true, we'd have other problems...*/ + return jffs2_iget(sb, ino); +} -static int jffs2_sb_compare(struct super_block *sb, void *data) +static struct dentry *jffs2_fh_to_dentry(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type) { - struct jffs2_sb_info *p = data; - struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); - - /* The superblocks are considered to be equivalent if the underlying MTD - device is the same one */ - if (c->mtd == p->mtd) { - D1(printk(KERN_DEBUG "jffs2_sb_compare: match on device %d (\"%s\")\n", p->mtd->index, p->mtd->name)); - return 1; - } else { - D1(printk(KERN_DEBUG "jffs2_sb_compare: No match, device %d (\"%s\"), device %d (\"%s\")\n", - c->mtd->index, c->mtd->name, p->mtd->index, p->mtd->name)); - return 0; - } + return generic_fh_to_dentry(sb, fid, fh_len, fh_type, + jffs2_nfs_get_inode); } -static int jffs2_sb_set(struct super_block *sb, void *data) +static struct dentry *jffs2_fh_to_parent(struct super_block *sb, struct fid *fid, + int fh_len, int fh_type) { - struct jffs2_sb_info *p = data; - - /* For persistence of NFS exports etc. we use the same s_dev - each time we mount the device, don't just use an anonymous - device */ - sb->s_fs_info = p; - p->os_priv = sb; - sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, p->mtd->index); - - return 0; + return generic_fh_to_parent(sb, fid, fh_len, fh_type, + jffs2_nfs_get_inode); } -static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data, struct mtd_info *mtd) +static struct dentry *jffs2_get_parent(struct dentry *child) { - struct super_block *sb; - struct jffs2_sb_info *c; - int ret; + struct jffs2_inode_info *f; + uint32_t pino; - c = kmalloc(sizeof(*c), GFP_KERNEL); - if (!c) - return ERR_PTR(-ENOMEM); - memset(c, 0, sizeof(*c)); - c->mtd = mtd; + BUG_ON(!S_ISDIR(child->d_inode->i_mode)); - sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c); + f = JFFS2_INODE_INFO(child->d_inode); - if (IS_ERR(sb)) - goto out_put; + pino = f->inocache->pino_nlink; - if (sb->s_root) { - /* New mountpoint for JFFS2 which is already mounted */ - D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): Device %d (\"%s\") is already mounted\n", - mtd->index, mtd->name)); - goto out_put; - } + JFFS2_DEBUG("Parent of directory ino #%u is #%u\n", + f->inocache->ino, pino); - D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): New superblock for device %d (\"%s\")\n", - mtd->index, mtd->name)); + return d_obtain_alias(jffs2_iget(child->d_inode->i_sb, pino)); +} - /* Initialize JFFS2 superblock locks, the further initialization will be - * done later */ - init_MUTEX(&c->alloc_sem); - init_MUTEX(&c->erase_free_sem); - init_waitqueue_head(&c->erase_wait); - init_waitqueue_head(&c->inocache_wq); - spin_lock_init(&c->erase_completion_lock); - spin_lock_init(&c->inocache_lock); +static const struct export_operations jffs2_export_ops = { + .get_parent = jffs2_get_parent, + .fh_to_dentry = jffs2_fh_to_dentry, + .fh_to_parent = jffs2_fh_to_parent, +}; - sb->s_op = &jffs2_super_operations; - sb->s_flags = flags | MS_NOATIME; +/* + * JFFS2 mount options. + * + * Opt_override_compr: override default compressor + * Opt_rp_size: size of reserved pool in KiB + * Opt_err: just end of array marker + */ +enum { + Opt_override_compr, + Opt_rp_size, + Opt_err, +}; - ret = jffs2_do_fill_super(sb, data, (flags&MS_VERBOSE)?1:0); +static const match_table_t tokens = { + {Opt_override_compr, "compr=%s"}, + {Opt_rp_size, "rp_size=%u"}, + {Opt_err, NULL}, +}; - if (ret) { - /* Failure case... */ - up_write(&sb->s_umount); - deactivate_super(sb); - return ERR_PTR(ret); - } +static int jffs2_parse_options(struct jffs2_sb_info *c, char *data) +{ + substring_t args[MAX_OPT_ARGS]; + char *p, *name; + unsigned int opt; - sb->s_flags |= MS_ACTIVE; - return sb; + if (!data) + return 0; - out_put: - kfree(c); - put_mtd_device(mtd); + while ((p = strsep(&data, ","))) { + int token; - return sb; -} + if (!*p) + continue; -static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data, int mtdnr) -{ - struct mtd_info *mtd; + token = match_token(p, tokens, args); + switch (token) { + case Opt_override_compr: + name = match_strdup(&args[0]); - mtd = get_mtd_device(NULL, mtdnr); - if (!mtd) { - D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", mtdnr)); - return ERR_PTR(-EINVAL); + if (!name) + return -ENOMEM; + if (!strcmp(name, "none")) + c->mount_opts.compr = JFFS2_COMPR_MODE_NONE; +#ifdef CONFIG_JFFS2_LZO + else if (!strcmp(name, "lzo")) + c->mount_opts.compr = JFFS2_COMPR_MODE_FORCELZO; +#endif +#ifdef CONFIG_JFFS2_ZLIB + else if (!strcmp(name, "zlib")) + c->mount_opts.compr = + JFFS2_COMPR_MODE_FORCEZLIB; +#endif + else { + pr_err("Error: unknown compressor \"%s\"\n", + name); + kfree(name); + return -EINVAL; + } + kfree(name); + c->mount_opts.override_compr = true; + break; + case Opt_rp_size: + if (match_int(&args[0], &opt)) + return -EINVAL; + opt *= 1024; + if (opt > c->mtd->size) { + pr_warn("Too large reserve pool specified, max " + "is %llu KB\n", c->mtd->size / 1024); + return -EINVAL; + } + c->mount_opts.rp_size = opt; + break; + default: + pr_err("Error: unrecognized mount option '%s' or missing value\n", + p); + return -EINVAL; + } } - return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd); + return 0; } -static struct super_block *jffs2_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data) +static int jffs2_remount_fs(struct super_block *sb, int *flags, char *data) { + struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); int err; - struct nameidata nd; - int mtdnr; - - if (!dev_name) - return ERR_PTR(-EINVAL); - - D1(printk(KERN_DEBUG "jffs2_get_sb(): dev_name \"%s\"\n", dev_name)); - - /* The preferred way of mounting in future; especially when - CONFIG_BLK_DEV is implemented - we specify the underlying - MTD device by number or by name, so that we don't require - block device support to be present in the kernel. */ - - /* FIXME: How to do the root fs this way? */ - - if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') { - /* Probably mounting without the blkdev crap */ - if (dev_name[3] == ':') { - struct mtd_info *mtd; - - /* Mount by MTD device name */ - D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd:%%s, name \"%s\"\n", dev_name+4)); - for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) { - mtd = get_mtd_device(NULL, mtdnr); - if (mtd) { - if (!strcmp(mtd->name, dev_name+4)) - return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd); - put_mtd_device(mtd); - } - } - printk(KERN_NOTICE "jffs2_get_sb(): MTD device with name \"%s\" not found.\n", dev_name+4); - } else if (isdigit(dev_name[3])) { - /* Mount by MTD device number name */ - char *endptr; - - mtdnr = simple_strtoul(dev_name+3, &endptr, 0); - if (!*endptr) { - /* It was a valid number */ - D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd%%d, mtdnr %d\n", mtdnr)); - return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr); - } - } - } - /* Try the old way - the hack where we allowed users to mount - /dev/mtdblock$(n) but didn't actually _use_ the blkdev */ + sync_filesystem(sb); + err = jffs2_parse_options(c, data); + if (err) + return -EINVAL; - err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); + return jffs2_do_remount_fs(sb, flags, data); +} - D1(printk(KERN_DEBUG "jffs2_get_sb(): path_lookup() returned %d, inode %p\n", - err, nd.dentry->d_inode)); +static const struct super_operations jffs2_super_operations = +{ + .alloc_inode = jffs2_alloc_inode, + .destroy_inode =jffs2_destroy_inode, + .put_super = jffs2_put_super, + .statfs = jffs2_statfs, + .remount_fs = jffs2_remount_fs, + .evict_inode = jffs2_evict_inode, + .dirty_inode = jffs2_dirty_inode, + .show_options = jffs2_show_options, + .sync_fs = jffs2_sync_fs, +}; - if (err) - return ERR_PTR(err); +/* + * fill in the superblock + */ +static int jffs2_fill_super(struct super_block *sb, void *data, int silent) +{ + struct jffs2_sb_info *c; + int ret; - err = -EINVAL; + jffs2_dbg(1, "jffs2_get_sb_mtd():" + " New superblock for device %d (\"%s\")\n", + sb->s_mtd->index, sb->s_mtd->name); - if (!S_ISBLK(nd.dentry->d_inode->i_mode)) - goto out; + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; - if (nd.mnt->mnt_flags & MNT_NODEV) { - err = -EACCES; - goto out; - } + c->mtd = sb->s_mtd; + c->os_priv = sb; + sb->s_fs_info = c; - if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR) { - if (!(flags & MS_VERBOSE)) /* Yes I mean this. Strangely */ - printk(KERN_NOTICE "Attempt to mount non-MTD device \"%s\" as JFFS2\n", - dev_name); - goto out; + ret = jffs2_parse_options(c, data); + if (ret) { + kfree(c); + return -EINVAL; } - mtdnr = iminor(nd.dentry->d_inode); - path_release(&nd); + /* Initialize JFFS2 superblock locks, the further initialization will + * be done later */ + mutex_init(&c->alloc_sem); + mutex_init(&c->erase_free_sem); + init_waitqueue_head(&c->erase_wait); + init_waitqueue_head(&c->inocache_wq); + spin_lock_init(&c->erase_completion_lock); + spin_lock_init(&c->inocache_lock); - return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr); + sb->s_op = &jffs2_super_operations; + sb->s_export_op = &jffs2_export_ops; + sb->s_flags = sb->s_flags | MS_NOATIME; + sb->s_xattr = jffs2_xattr_handlers; +#ifdef CONFIG_JFFS2_FS_POSIX_ACL + sb->s_flags |= MS_POSIXACL; +#endif + ret = jffs2_do_fill_super(sb, data, silent); + return ret; +} -out: - path_release(&nd); - return ERR_PTR(err); +static struct dentry *jffs2_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data) +{ + return mount_mtd(fs_type, flags, dev_name, data, jffs2_fill_super); } static void jffs2_put_super (struct super_block *sb) { struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); - D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n")); + jffs2_dbg(2, "%s()\n", __func__); - down(&c->alloc_sem); + mutex_lock(&c->alloc_sem); jffs2_flush_wbuf_pad(c); - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); + + jffs2_sum_exit(c); + jffs2_free_ino_caches(c); jffs2_free_raw_node_refs(c); - if (c->mtd->flags & MTD_NO_VIRTBLOCKS) + if (jffs2_blocks_use_vmalloc(c)) vfree(c->blocks); else kfree(c->blocks); jffs2_flash_cleanup(c); kfree(c->inocache_list); - if (c->mtd->sync) - c->mtd->sync(c->mtd); - - D1(printk(KERN_DEBUG "jffs2_put_super returning\n")); + jffs2_clear_xattr_subsystem(c); + mtd_sync(c->mtd); + jffs2_dbg(1, "%s(): returning\n", __func__); } static void jffs2_kill_sb(struct super_block *sb) @@ -301,49 +347,65 @@ static void jffs2_kill_sb(struct super_block *sb) struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); if (!(sb->s_flags & MS_RDONLY)) jffs2_stop_garbage_collect_thread(c); - generic_shutdown_super(sb); - put_mtd_device(c->mtd); + kill_mtd_super(sb); kfree(c); } static struct file_system_type jffs2_fs_type = { .owner = THIS_MODULE, .name = "jffs2", - .get_sb = jffs2_get_sb, + .mount = jffs2_mount, .kill_sb = jffs2_kill_sb, }; +MODULE_ALIAS_FS("jffs2"); static int __init init_jffs2_fs(void) { int ret; - printk(KERN_INFO "JFFS2 version 2.2." + /* Paranoia checks for on-medium structures. If we ask GCC + to pack them with __attribute__((packed)) then it _also_ + assumes that they're not aligned -- so it emits crappy + code on some architectures. Ideally we want an attribute + which means just 'no padding', without the alignment + thing. But GCC doesn't have that -- we have to just + hope the structs are the right sizes, instead. */ + BUILD_BUG_ON(sizeof(struct jffs2_unknown_node) != 12); + BUILD_BUG_ON(sizeof(struct jffs2_raw_dirent) != 40); + BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68); + BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32); + + pr_info("version 2.2." #ifdef CONFIG_JFFS2_FS_WRITEBUFFER " (NAND)" #endif - " (C) 2001-2003 Red Hat, Inc.\n"); +#ifdef CONFIG_JFFS2_SUMMARY + " (SUMMARY) " +#endif + " © 2001-2006 Red Hat, Inc.\n"); jffs2_inode_cachep = kmem_cache_create("jffs2_i", sizeof(struct jffs2_inode_info), - 0, SLAB_RECLAIM_ACCOUNT, - jffs2_i_init_once, NULL); + 0, (SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + jffs2_i_init_once); if (!jffs2_inode_cachep) { - printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n"); + pr_err("error: Failed to initialise inode cache\n"); return -ENOMEM; } ret = jffs2_compressors_init(); if (ret) { - printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n"); + pr_err("error: Failed to initialise compressors\n"); goto out; } ret = jffs2_create_slab_caches(); if (ret) { - printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n"); + pr_err("error: Failed to initialise slab caches\n"); goto out_compressors; } ret = register_filesystem(&jffs2_fs_type); if (ret) { - printk(KERN_ERR "JFFS2 error: Failed to register filesystem\n"); + pr_err("error: Failed to register filesystem\n"); goto out_slab; } return 0; @@ -362,6 +424,12 @@ static void __exit exit_jffs2_fs(void) unregister_filesystem(&jffs2_fs_type); jffs2_destroy_slab_caches(); jffs2_compressors_exit(); + + /* + * Make sure all delayed rcu free inodes are flushed before we + * destroy cache. + */ + rcu_barrier(); kmem_cache_destroy(jffs2_inode_cachep); } @@ -370,5 +438,5 @@ module_exit(exit_jffs2_fs); MODULE_DESCRIPTION("The Journalling Flash File System, v2"); MODULE_AUTHOR("Red Hat, Inc."); -MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for +MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for // the sake of this tag. It's Free Software. diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c index 65ab6b001dc..c7c77b0dfcc 100644 --- a/fs/jffs2/symlink.c +++ b/fs/jffs2/symlink.c @@ -1,63 +1,66 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001, 2002 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: symlink.c,v 1.16 2005/03/01 10:50:48 dedekind Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> -#include <linux/slab.h> #include <linux/fs.h> #include <linux/namei.h> #include "nodelist.h" -static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd); +static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd); -struct inode_operations jffs2_symlink_inode_operations = -{ +const struct inode_operations jffs2_symlink_inode_operations = +{ .readlink = generic_readlink, .follow_link = jffs2_follow_link, - .setattr = jffs2_setattr + .setattr = jffs2_setattr, + .setxattr = jffs2_setxattr, + .getxattr = jffs2_getxattr, + .listxattr = jffs2_listxattr, + .removexattr = jffs2_removexattr }; -static int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) +static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) { struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode); - + char *p = (char *)f->target; + /* * We don't acquire the f->sem mutex here since the only data we - * use is f->dents which in case of the symlink inode points to the - * symlink's target path. + * use is f->target. * - * 1. If we are here the inode has already built and f->dents has + * 1. If we are here the inode has already built and f->target has * to point to the target path. - * 2. Nobody uses f->dents (if the inode is symlink's inode). The - * exception is inode freeing function which frees f->dents. But + * 2. Nobody uses f->target (if the inode is symlink's inode). The + * exception is inode freeing function which frees f->target. But * it can't be called while we are here and before VFS has - * stopped using our f->dents string which we provide by means of + * stopped using our f->target string which we provide by means of * nd_set_link() call. */ - - if (!f->dents) { - printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n"); - return -EIO; + + if (!p) { + pr_err("%s(): can't find symlink target\n", __func__); + p = ERR_PTR(-EIO); } - D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->dents)); + jffs2_dbg(1, "%s(): target path is '%s'\n", + __func__, (char *)f->target); + + nd_set_link(nd, p); - nd_set_link(nd, (char *)f->dents); - /* - * We unlock the f->sem mutex but VFS will use the f->dents string. This is safe - * since the only way that may cause f->dents to be changed is iput() operation. - * But VFS will not use f->dents after iput() has been called. + * We will unlock the f->sem mutex but VFS will use the f->target string. This is safe + * since the only way that may cause f->target to be changed is iput() operation. + * But VFS will not use f->target after iput() has been called. */ - return 0; + return NULL; } diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 996d922e503..a6597d60d76 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c @@ -1,23 +1,27 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. - * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de> + * Copyright © 2001-2007 Red Hat, Inc. + * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de> * * Created by David Woodhouse <dwmw2@infradead.org> * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: wbuf.c,v 1.92 2005/04/05 12:51:54 dedekind Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/crc32.h> #include <linux/mtd/nand.h> +#include <linux/jiffies.h> +#include <linux/sched.h> +#include <linux/writeback.h> + #include "nodelist.h" /* For testing write failures */ @@ -28,12 +32,12 @@ static unsigned char *brokenbuf; #endif +#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) +#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) + /* max. erase failures before we mark a block bad */ #define MAX_ERASE_FAILURES 2 -/* two seconds timeout for timed wbuf-flushing */ -#define WBUF_FLUSH_TIMEOUT 2 * HZ - struct jffs2_inodirty { uint32_t ino; struct jffs2_inodirty *next; @@ -82,15 +86,15 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inodirty *new; - /* Mark the superblock dirty so that kupdated will flush... */ - jffs2_erase_pending_trigger(c); + /* Schedule delayed write-buffer write-out */ + jffs2_dirty_trigger(c); if (jffs2_wbuf_pending_for_ino(c, ino)) return; new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) { - D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n")); + jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n"); jffs2_clear_wbuf_ino_list(c); c->wbuf_inodes = &inodirty_nomem; return; @@ -112,19 +116,20 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); - D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset)); + jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", + jeb->offset); list_del(this); if ((jiffies + (n++)) & 127) { /* Most of the time, we just erase it immediately. Otherwise we spend ages scanning it on mount, etc. */ - D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); + jffs2_dbg(1, "...and adding to erase_pending_list\n"); list_add_tail(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; - jffs2_erase_pending_trigger(c); + jffs2_garbage_collect_trigger(c); } else { /* Sometimes, however, we leave it elsewhere so it doesn't get immediately reused, and we spread the load a bit. */ - D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); + jffs2_dbg(1, "...and adding to erasable_list\n"); list_add_tail(&jeb->list, &c->erasable_list); } } @@ -135,36 +140,134 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) { - D1(printk("About to refile bad block at %08x\n", jeb->offset)); + jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset); - D2(jffs2_dump_block_lists(c)); /* File the existing block on the bad_used_list.... */ if (c->nextblock == jeb) c->nextblock = NULL; else /* Not sure this should ever happen... need more coffee */ list_del(&jeb->list); if (jeb->first_node) { - D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset)); + jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n", + jeb->offset); list_add(&jeb->list, &c->bad_used_list); } else { BUG_ON(allow_empty == REFILE_NOTEMPTY); /* It has to have had some nodes or we couldn't be here */ - D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset)); + jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n", + jeb->offset); list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; - jffs2_erase_pending_trigger(c); + jffs2_garbage_collect_trigger(c); + } + + if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) { + uint32_t oldfree = jeb->free_size; + + jffs2_link_node_ref(c, jeb, + (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE, + oldfree, NULL); + /* convert to wasted */ + c->wasted_size += oldfree; + jeb->wasted_size += oldfree; + c->dirty_size -= oldfree; + jeb->dirty_size -= oldfree; + } + + jffs2_dbg_dump_block_lists_nolock(c); + jffs2_dbg_acct_sanity_check_nolock(c,jeb); + jffs2_dbg_acct_paranoia_check_nolock(c, jeb); +} + +static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c, + struct jffs2_inode_info *f, + struct jffs2_raw_node_ref *raw, + union jffs2_node_union *node) +{ + struct jffs2_node_frag *frag; + struct jffs2_full_dirent *fd; + + dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n", + node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype)); + + BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 && + je16_to_cpu(node->u.magic) != 0); + + switch (je16_to_cpu(node->u.nodetype)) { + case JFFS2_NODETYPE_INODE: + if (f->metadata && f->metadata->raw == raw) { + dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata); + return &f->metadata->raw; + } + frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset)); + BUG_ON(!frag); + /* Find a frag which refers to the full_dnode we want to modify */ + while (!frag->node || frag->node->raw != raw) { + frag = frag_next(frag); + BUG_ON(!frag); + } + dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node); + return &frag->node->raw; + + case JFFS2_NODETYPE_DIRENT: + for (fd = f->dents; fd; fd = fd->next) { + if (fd->raw == raw) { + dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd); + return &fd->raw; + } + } + BUG(); + + default: + dbg_noderef("Don't care about replacing raw for nodetype %x\n", + je16_to_cpu(node->u.nodetype)); + break; + } + return NULL; +} + +#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY +static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf, + uint32_t ofs) +{ + int ret; + size_t retlen; + char *eccstr; + + ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); + if (ret && ret != -EUCLEAN && ret != -EBADMSG) { + pr_warn("%s(): Read back of page at %08x failed: %d\n", + __func__, c->wbuf_ofs, ret); + return ret; + } else if (retlen != c->wbuf_pagesize) { + pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n", + __func__, ofs, retlen, c->wbuf_pagesize); + return -EIO; } - D2(jffs2_dump_block_lists(c)); + if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) + return 0; - /* Adjust its size counts accordingly */ - c->wasted_size += jeb->free_size; - c->free_size -= jeb->free_size; - jeb->wasted_size += jeb->free_size; - jeb->free_size = 0; + if (ret == -EUCLEAN) + eccstr = "corrected"; + else if (ret == -EBADMSG) + eccstr = "correction failed"; + else + eccstr = "OK or unused"; + + pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n", + eccstr, c->wbuf_ofs); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, + c->wbuf, c->wbuf_pagesize, 0); - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + pr_warn("Read back:\n"); + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, + c->wbuf_verify, c->wbuf_pagesize, 0); + + return -EIO; } +#else +#define jffs2_verify_write(c,b,o) (0) +#endif /* Recover from failure to write wbuf. Recover the nodes up to the * wbuf, not the one which we were starting to try to write. */ @@ -172,52 +275,62 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock static void jffs2_wbuf_recover(struct jffs2_sb_info *c) { struct jffs2_eraseblock *jeb, *new_jeb; - struct jffs2_raw_node_ref **first_raw, **raw; + struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL; size_t retlen; int ret; + int nr_refile = 0; unsigned char *buf; uint32_t start, end, ofs, len; - spin_lock(&c->erase_completion_lock); - jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; - jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); + spin_lock(&c->erase_completion_lock); + if (c->wbuf_ofs % c->mtd->erasesize) + jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); + else + jffs2_block_refile(c, jeb, REFILE_ANYWAY); + spin_unlock(&c->erase_completion_lock); + + BUG_ON(!ref_obsolete(jeb->last_node)); /* Find the first node to be recovered, by skipping over every node which ends before the wbuf starts, or which is obsolete. */ - first_raw = &jeb->first_node; - while (*first_raw && - (ref_obsolete(*first_raw) || - (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { - D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", - ref_offset(*first_raw), ref_flags(*first_raw), - (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)), - c->wbuf_ofs)); - first_raw = &(*first_raw)->next_phys; + for (next = raw = jeb->first_node; next; raw = next) { + next = ref_next(raw); + + if (ref_obsolete(raw) || + (next && ref_offset(next) <= c->wbuf_ofs)) { + dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", + ref_offset(raw), ref_flags(raw), + (ref_offset(raw) + ref_totlen(c, jeb, raw)), + c->wbuf_ofs); + continue; + } + dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n", + ref_offset(raw), ref_flags(raw), + (ref_offset(raw) + ref_totlen(c, jeb, raw))); + + first_raw = raw; + break; } - if (!*first_raw) { + if (!first_raw) { /* All nodes were obsolete. Nothing to recover. */ - D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); - spin_unlock(&c->erase_completion_lock); + jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n"); + c->wbuf_len = 0; return; } - start = ref_offset(*first_raw); - end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw); + start = ref_offset(first_raw); + end = ref_offset(jeb->last_node); + nr_refile = 1; - /* Find the last node to be recovered */ - raw = first_raw; - while ((*raw)) { - if (!ref_obsolete(*raw)) - end = ref_offset(*raw) + ref_totlen(c, jeb, *raw); + /* Count the number of refs which need to be copied */ + while ((raw = ref_next(raw)) != jeb->last_node) + nr_refile++; - raw = &(*raw)->next_phys; - } - spin_unlock(&c->erase_completion_lock); - - D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end)); + dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n", + start, end, end - start, nr_refile); buf = NULL; if (start < c->wbuf_ofs) { @@ -226,34 +339,44 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) buf = kmalloc(end - start, GFP_KERNEL); if (!buf) { - printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n"); + pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n"); goto read_failed; } /* Do the read... */ - if (jffs2_cleanmarker_oob(c)) - ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo); - else - ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); - - if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { - /* ECC recovered */ + ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen, + buf); + + /* ECC recovered ? */ + if ((ret == -EUCLEAN || ret == -EBADMSG) && + (retlen == c->wbuf_ofs - start)) ret = 0; - } + if (ret || retlen != c->wbuf_ofs - start) { - printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n"); + pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n"); kfree(buf); buf = NULL; read_failed: - first_raw = &(*first_raw)->next_phys; + first_raw = ref_next(first_raw); + nr_refile--; + while (first_raw && ref_obsolete(first_raw)) { + first_raw = ref_next(first_raw); + nr_refile--; + } + /* If this was the only node to be recovered, give up */ - if (!(*first_raw)) + if (!first_raw) { + c->wbuf_len = 0; return; + } /* It wasn't. Go on and try to recover nodes complete in the wbuf */ - start = ref_offset(*first_raw); + start = ref_offset(first_raw); + dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n", + start, end, end - start, nr_refile); + } else { /* Read succeeded. Copy the remaining data from the wbuf */ memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); @@ -262,76 +385,70 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards. Either 'buf' contains the data, or we find it in the wbuf */ - /* ... and get an allocation of space from a shiny new block instead */ - ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len); + ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); + if (ret) { + pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n"); + kfree(buf); + return; + } + + /* The summary is not recovered, so it must be disabled for this erase block */ + jffs2_sum_disable_collecting(c->summary); + + ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); if (ret) { - printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); + pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n"); kfree(buf); return; } + + ofs = write_ofs(c); + if (end-start >= c->wbuf_pagesize) { /* Need to do another write immediately, but it's possible that this is just because the wbuf itself is completely - full, and there's nothing earlier read back from the - flash. Hence 'buf' isn't necessarily what we're writing + full, and there's nothing earlier read back from the + flash. Hence 'buf' isn't necessarily what we're writing from. */ unsigned char *rewrite_buf = buf?:c->wbuf; uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); - D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", - towrite, ofs)); - + jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n", + towrite, ofs); + #ifdef BREAKMEHEADER static int breakme; if (breakme++ == 20) { - printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs); + pr_notice("Faking write error at 0x%08x\n", ofs); breakme = 0; - c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen, - brokenbuf, NULL, c->oobinfo); + mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); ret = -EIO; } else #endif - if (jffs2_cleanmarker_oob(c)) - ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen, - rewrite_buf, NULL, c->oobinfo); - else - ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf); + ret = mtd_write(c->mtd, ofs, towrite, &retlen, + rewrite_buf); - if (ret || retlen != towrite) { + if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { /* Argh. We tried. Really we did. */ - printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n"); + pr_crit("Recovery of wbuf failed due to a second write error\n"); kfree(buf); - if (retlen) { - struct jffs2_raw_node_ref *raw2; - - raw2 = jffs2_alloc_raw_node_ref(); - if (!raw2) - return; - - raw2->flash_offset = ofs | REF_OBSOLETE; - raw2->__totlen = ref_totlen(c, jeb, *first_raw); - raw2->next_phys = NULL; - raw2->next_in_ino = NULL; + if (retlen) + jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL); - jffs2_add_physical_node_ref(c, raw2); - } return; } - printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs); + pr_notice("Recovery of wbuf succeeded to %08x\n", ofs); c->wbuf_len = (end - start) - towrite; c->wbuf_ofs = ofs + towrite; memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ - if (buf) - kfree(buf); } else { /* OK, now we're left with the dregs in whichever buffer we're using */ if (buf) { memcpy(c->wbuf, buf, end-start); - kfree(buf); } else { memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); } @@ -343,62 +460,112 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) new_jeb = &c->blocks[ofs / c->sector_size]; spin_lock(&c->erase_completion_lock); - if (new_jeb->first_node) { - /* Odd, but possible with ST flash later maybe */ - new_jeb->last_node->next_phys = *first_raw; - } else { - new_jeb->first_node = *first_raw; - } + for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) { + uint32_t rawlen = ref_totlen(c, jeb, raw); + struct jffs2_inode_cache *ic; + struct jffs2_raw_node_ref *new_ref; + struct jffs2_raw_node_ref **adjust_ref = NULL; + struct jffs2_inode_info *f = NULL; + + jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n", + rawlen, ref_offset(raw), ref_flags(raw), ofs); + + ic = jffs2_raw_ref_to_ic(raw); + + /* Ick. This XATTR mess should be fixed shortly... */ + if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) { + struct jffs2_xattr_datum *xd = (void *)ic; + BUG_ON(xd->node != raw); + adjust_ref = &xd->node; + raw->next_in_ino = NULL; + ic = NULL; + } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) { + struct jffs2_xattr_datum *xr = (void *)ic; + BUG_ON(xr->node != raw); + adjust_ref = &xr->node; + raw->next_in_ino = NULL; + ic = NULL; + } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) { + struct jffs2_raw_node_ref **p = &ic->nodes; + + /* Remove the old node from the per-inode list */ + while (*p && *p != (void *)ic) { + if (*p == raw) { + (*p) = (raw->next_in_ino); + raw->next_in_ino = NULL; + break; + } + p = &((*p)->next_in_ino); + } - raw = first_raw; - while (*raw) { - uint32_t rawlen = ref_totlen(c, jeb, *raw); + if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) { + /* If it's an in-core inode, then we have to adjust any + full_dirent or full_dnode structure to point to the + new version instead of the old */ + f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink); + if (IS_ERR(f)) { + /* Should never happen; it _must_ be present */ + JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n", + ic->ino, PTR_ERR(f)); + BUG(); + } + /* We don't lock f->sem. There's a number of ways we could + end up in here with it already being locked, and nobody's + going to modify it on us anyway because we hold the + alloc_sem. We're only changing one ->raw pointer too, + which we can get away with without upsetting readers. */ + adjust_ref = jffs2_incore_replace_raw(c, f, raw, + (void *)(buf?:c->wbuf) + (ref_offset(raw) - start)); + } else if (unlikely(ic->state != INO_STATE_PRESENT && + ic->state != INO_STATE_CHECKEDABSENT && + ic->state != INO_STATE_GC)) { + JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state); + BUG(); + } + } - D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", - rawlen, ref_offset(*raw), ref_flags(*raw), ofs)); + new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic); - if (ref_obsolete(*raw)) { - /* Shouldn't really happen much */ - new_jeb->dirty_size += rawlen; - new_jeb->free_size -= rawlen; - c->dirty_size += rawlen; - } else { - new_jeb->used_size += rawlen; - new_jeb->free_size -= rawlen; + if (adjust_ref) { + BUG_ON(*adjust_ref != raw); + *adjust_ref = new_ref; + } + if (f) + jffs2_gc_release_inode(c, f); + + if (!ref_obsolete(raw)) { jeb->dirty_size += rawlen; jeb->used_size -= rawlen; c->dirty_size += rawlen; + c->used_size -= rawlen; + raw->flash_offset = ref_offset(raw) | REF_OBSOLETE; + BUG_ON(raw->next_in_ino); } - c->free_size -= rawlen; - (*raw)->flash_offset = ofs | ref_flags(*raw); ofs += rawlen; - new_jeb->last_node = *raw; - - raw = &(*raw)->next_phys; } + kfree(buf); + /* Fix up the original jeb now it's on the bad_list */ - *first_raw = NULL; - if (first_raw == &jeb->first_node) { - jeb->last_node = NULL; - D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); - list_del(&jeb->list); - list_add(&jeb->list, &c->erase_pending_list); + if (first_raw == jeb->first_node) { + jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n", + jeb->offset); + list_move(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; - jffs2_erase_pending_trigger(c); + jffs2_garbage_collect_trigger(c); } - else - jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys); - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + jffs2_dbg_acct_sanity_check_nolock(c, jeb); + jffs2_dbg_acct_paranoia_check_nolock(c, jeb); - ACCT_SANITY_CHECK(c,new_jeb); - D1(ACCT_PARANOIA_CHECK(new_jeb)); + jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); + jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); spin_unlock(&c->erase_completion_lock); - D1(printk(KERN_DEBUG "wbuf recovery completed OK\n")); + jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", + c->wbuf_ofs, c->wbuf_len); + } /* Meaning of pad argument: @@ -412,6 +579,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) { + struct jffs2_eraseblock *wbuf_jeb; int ret; size_t retlen; @@ -420,28 +588,31 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) if (!jffs2_is_writebuffered(c)) return 0; - if (!down_trylock(&c->alloc_sem)) { - up(&c->alloc_sem); - printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n"); + if (!mutex_is_locked(&c->alloc_sem)) { + pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n"); BUG(); } if (!c->wbuf_len) /* already checked c->wbuf above */ return 0; + wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; + if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1)) + return -ENOMEM; + /* claim remaining space on the page this happens, if we have a change to a new block, or if fsync forces us to flush the writebuffer. if we have a switch to next page, we will not have - enough remaining space for this. + enough remaining space for this. */ - if (pad && !jffs2_dataflash(c)) { + if (pad ) { c->wbuf_len = PAD(c->wbuf_len); /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR with 8 byte page size */ memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); - + if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); @@ -452,63 +623,65 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) } /* else jffs2_flash_writev has actually filled in the rest of the buffer for us, and will deal with the node refs etc. later. */ - + #ifdef BREAKME static int breakme; if (breakme++ == 20) { - printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs); + pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs); breakme = 0; - c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, - &retlen, brokenbuf, NULL, c->oobinfo); + mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, + brokenbuf); ret = -EIO; - } else + } else #endif - - if (jffs2_cleanmarker_oob(c)) - ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo); - else - ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf); - if (ret || retlen != c->wbuf_pagesize) { - if (ret) - printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret); - else { - printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", - retlen, c->wbuf_pagesize); - ret = -EIO; - } + ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, + &retlen, c->wbuf); + if (ret) { + pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret); + goto wfail; + } else if (retlen != c->wbuf_pagesize) { + pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", + retlen, c->wbuf_pagesize); + ret = -EIO; + goto wfail; + } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { + wfail: jffs2_wbuf_recover(c); return ret; } - spin_lock(&c->erase_completion_lock); - /* Adjust free size of the block if we padded. */ - if (pad && !jffs2_dataflash(c)) { - struct jffs2_eraseblock *jeb; - - jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; + if (pad) { + uint32_t waste = c->wbuf_pagesize - c->wbuf_len; - D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", - (jeb==c->nextblock)?"next":"", jeb->offset)); + jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", + (wbuf_jeb == c->nextblock) ? "next" : "", + wbuf_jeb->offset); - /* wbuf_pagesize - wbuf_len is the amount of space that's to be + /* wbuf_pagesize - wbuf_len is the amount of space that's to be padded. If there is less free space in the block than that, something screwed up */ - if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { - printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", - c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len); - printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", - jeb->offset, jeb->free_size); + if (wbuf_jeb->free_size < waste) { + pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", + c->wbuf_ofs, c->wbuf_len, waste); + pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", + wbuf_jeb->offset, wbuf_jeb->free_size); BUG(); } - jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len); - c->free_size -= (c->wbuf_pagesize - c->wbuf_len); - jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len); - c->wasted_size += (c->wbuf_pagesize - c->wbuf_len); - } + + spin_lock(&c->erase_completion_lock); + + jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL); + /* FIXME: that made it count as dirty. Convert to wasted */ + wbuf_jeb->dirty_size -= waste; + c->dirty_size -= waste; + wbuf_jeb->wasted_size += waste; + c->wasted_size += waste; + } else + spin_lock(&c->erase_completion_lock); /* Stick any now-obsoleted blocks on the erase_pending_list */ jffs2_refile_wbuf_blocks(c); @@ -522,9 +695,9 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) return 0; } -/* Trigger garbage collection to flush the write-buffer. +/* Trigger garbage collection to flush the write-buffer. If ino arg is zero, do it if _any_ real (i.e. not GC) writes are - outstanding. If ino arg non-zero, do it only if a write for the + outstanding. If ino arg non-zero, do it only if a write for the given inode is outstanding. */ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) { @@ -532,15 +705,15 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) uint32_t old_wbuf_len; int ret = 0; - D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino)); + jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino); if (!c->wbuf) return 0; - down(&c->alloc_sem); + mutex_lock(&c->alloc_sem); if (!jffs2_wbuf_pending_for_ino(c, ino)) { - D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino)); - up(&c->alloc_sem); + jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino); + mutex_unlock(&c->alloc_sem); return 0; } @@ -549,7 +722,8 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) if (c->unchecked_size) { /* GC won't make any progress for a while */ - D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n")); + jffs2_dbg(1, "%s(): padding. Not finished checking\n", + __func__); down_write(&c->wbuf_sem); ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); /* retry flushing wbuf in case jffs2_wbuf_recover @@ -560,14 +734,14 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) } else while (old_wbuf_len && old_wbuf_ofs == c->wbuf_ofs) { - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); - D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n")); + jffs2_dbg(1, "%s(): calls gc pass\n", __func__); ret = jffs2_garbage_collect_pass(c); if (ret) { /* GC failed. Flush it with padding instead */ - down(&c->alloc_sem); + mutex_lock(&c->alloc_sem); down_write(&c->wbuf_sem); ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); /* retry flushing wbuf in case jffs2_wbuf_recover @@ -577,12 +751,12 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) up_write(&c->wbuf_sem); break; } - down(&c->alloc_sem); + mutex_lock(&c->alloc_sem); } - D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n")); + jffs2_dbg(1, "%s(): ends...\n", __func__); - up(&c->alloc_sem); + mutex_unlock(&c->alloc_sem); return ret; } @@ -604,243 +778,158 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) return ret; } -#ifdef CONFIG_JFFS2_FS_WRITEBUFFER -#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) -#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) -#else -#define PAGE_DIV(x) ( (x) & (~(c->wbuf_pagesize - 1)) ) -#define PAGE_MOD(x) ( (x) & (c->wbuf_pagesize - 1) ) -#endif +static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf, + size_t len) +{ + if (len && !c->wbuf_len && (len >= c->wbuf_pagesize)) + return 0; -int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) + if (len > (c->wbuf_pagesize - c->wbuf_len)) + len = c->wbuf_pagesize - c->wbuf_len; + memcpy(c->wbuf + c->wbuf_len, buf, len); + c->wbuf_len += (uint32_t) len; + return len; +} + +int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, + unsigned long count, loff_t to, size_t *retlen, + uint32_t ino) { - struct kvec outvecs[3]; - uint32_t totlen = 0; - uint32_t split_ofs = 0; - uint32_t old_totlen; - int ret, splitvec = -1; - int invec, outvec; - size_t wbuf_retlen; - unsigned char *wbuf_ptr; - size_t donelen = 0; + struct jffs2_eraseblock *jeb; + size_t wbuf_retlen, donelen = 0; uint32_t outvec_to = to; + int ret, invec; - /* If not NAND flash, don't bother */ + /* If not writebuffered flash, don't bother */ if (!jffs2_is_writebuffered(c)) return jffs2_flash_direct_writev(c, invecs, count, to, retlen); - + down_write(&c->wbuf_sem); /* If wbuf_ofs is not initialized, set it to target address */ if (c->wbuf_ofs == 0xFFFFFFFF) { c->wbuf_ofs = PAGE_DIV(to); - c->wbuf_len = PAGE_MOD(to); + c->wbuf_len = PAGE_MOD(to); memset(c->wbuf,0xff,c->wbuf_pagesize); } - /* Fixup the wbuf if we are moving to a new eraseblock. The checks below - fail for ECC'd NOR because cleanmarker == 16, so a block starts at - xxx0010. */ - if (jffs2_nor_ecc(c)) { - if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) { - c->wbuf_ofs = PAGE_DIV(to); - c->wbuf_len = PAGE_MOD(to); - memset(c->wbuf,0xff,c->wbuf_pagesize); - } - } - - /* Sanity checks on target address. - It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs), - and it's permitted to write at the beginning of a new - erase block. Anything else, and you die. - New block starts at xxx000c (0-b = block header) - */ + /* + * Sanity checks on target address. It's permitted to write + * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to + * write at the beginning of a new erase block. Anything else, + * and you die. New block starts at xxx000c (0-b = block + * header) + */ if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { /* It's a write to a new block */ if (c->wbuf_len) { - D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs)); + jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n", + __func__, (unsigned long)to, c->wbuf_ofs); ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); - if (ret) { - /* the underlying layer has to check wbuf_len to do the cleanup */ - D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret)); - *retlen = 0; - goto exit; - } + if (ret) + goto outerr; } /* set pointer to new block */ c->wbuf_ofs = PAGE_DIV(to); - c->wbuf_len = PAGE_MOD(to); - } + c->wbuf_len = PAGE_MOD(to); + } if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { /* We're not writing immediately after the writebuffer. Bad. */ - printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to); + pr_crit("%s(): Non-contiguous write to %08lx\n", + __func__, (unsigned long)to); if (c->wbuf_len) - printk(KERN_CRIT "wbuf was previously %08x-%08x\n", - c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); - BUG(); - } - - /* Note outvecs[3] above. We know count is never greater than 2 */ - if (count > 2) { - printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count); + pr_crit("wbuf was previously %08x-%08x\n", + c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len); BUG(); } - invec = 0; - outvec = 0; - - /* Fill writebuffer first, if already in use */ - if (c->wbuf_len) { - uint32_t invec_ofs = 0; - - /* adjust alignment offset */ - if (c->wbuf_len != PAGE_MOD(to)) { - c->wbuf_len = PAGE_MOD(to); - /* take care of alignment to next page */ - if (!c->wbuf_len) - c->wbuf_len = c->wbuf_pagesize; - } - - while(c->wbuf_len < c->wbuf_pagesize) { - uint32_t thislen; - - if (invec == count) - goto alldone; - - thislen = c->wbuf_pagesize - c->wbuf_len; - - if (thislen >= invecs[invec].iov_len) - thislen = invecs[invec].iov_len; - - invec_ofs = thislen; - - memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen); - c->wbuf_len += thislen; - donelen += thislen; - /* Get next invec, if actual did not fill the buffer */ - if (c->wbuf_len < c->wbuf_pagesize) - invec++; - } - - /* write buffer is full, flush buffer */ - ret = __jffs2_flush_wbuf(c, NOPAD); - if (ret) { - /* the underlying layer has to check wbuf_len to do the cleanup */ - D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret)); - /* Retlen zero to make sure our caller doesn't mark the space dirty. - We've already done everything that's necessary */ - *retlen = 0; - goto exit; - } - outvec_to += donelen; - c->wbuf_ofs = outvec_to; - - /* All invecs done ? */ - if (invec == count) - goto alldone; - - /* Set up the first outvec, containing the remainder of the - invec we partially used */ - if (invecs[invec].iov_len > invec_ofs) { - outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs; - totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs; - if (totlen > c->wbuf_pagesize) { - splitvec = outvec; - split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen); - } - outvec++; - } - invec++; - } - - /* OK, now we've flushed the wbuf and the start of the bits - we have been asked to write, now to write the rest.... */ - - /* totlen holds the amount of data still to be written */ - old_totlen = totlen; - for ( ; invec < count; invec++,outvec++ ) { - outvecs[outvec].iov_base = invecs[invec].iov_base; - totlen += outvecs[outvec].iov_len = invecs[invec].iov_len; - if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) { - splitvec = outvec; - split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen); - old_totlen = totlen; + /* adjust alignment offset */ + if (c->wbuf_len != PAGE_MOD(to)) { + c->wbuf_len = PAGE_MOD(to); + /* take care of alignment to next page */ + if (!c->wbuf_len) { + c->wbuf_len = c->wbuf_pagesize; + ret = __jffs2_flush_wbuf(c, NOPAD); + if (ret) + goto outerr; } } - /* Now the outvecs array holds all the remaining data to write */ - /* Up to splitvec,split_ofs is to be written immediately. The rest - goes into the (now-empty) wbuf */ - - if (splitvec != -1) { - uint32_t remainder; - - remainder = outvecs[splitvec].iov_len - split_ofs; - outvecs[splitvec].iov_len = split_ofs; + for (invec = 0; invec < count; invec++) { + int vlen = invecs[invec].iov_len; + uint8_t *v = invecs[invec].iov_base; - /* We did cross a page boundary, so we write some now */ - if (jffs2_cleanmarker_oob(c)) - ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo); - else - ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen); - - if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) { - /* At this point we have no problem, - c->wbuf is empty. However refile nextblock to avoid - writing again to same address. - */ - struct jffs2_eraseblock *jeb; + wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); - spin_lock(&c->erase_completion_lock); - - jeb = &c->blocks[outvec_to / c->sector_size]; - jffs2_block_refile(c, jeb, REFILE_ANYWAY); - - *retlen = 0; - spin_unlock(&c->erase_completion_lock); - goto exit; + if (c->wbuf_len == c->wbuf_pagesize) { + ret = __jffs2_flush_wbuf(c, NOPAD); + if (ret) + goto outerr; } - + vlen -= wbuf_retlen; + outvec_to += wbuf_retlen; donelen += wbuf_retlen; - c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); + v += wbuf_retlen; + + if (vlen >= c->wbuf_pagesize) { + ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen), + &wbuf_retlen, v); + if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen)) + goto outfile; + + vlen -= wbuf_retlen; + outvec_to += wbuf_retlen; + c->wbuf_ofs = outvec_to; + donelen += wbuf_retlen; + v += wbuf_retlen; + } - if (remainder) { - outvecs[splitvec].iov_base += split_ofs; - outvecs[splitvec].iov_len = remainder; - } else { - splitvec++; + wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); + if (c->wbuf_len == c->wbuf_pagesize) { + ret = __jffs2_flush_wbuf(c, NOPAD); + if (ret) + goto outerr; } - } else { - splitvec = 0; + outvec_to += wbuf_retlen; + donelen += wbuf_retlen; } - /* Now splitvec points to the start of the bits we have to copy - into the wbuf */ - wbuf_ptr = c->wbuf; + /* + * If there's a remainder in the wbuf and it's a non-GC write, + * remember that the wbuf affects this ino + */ + *retlen = donelen; - for ( ; splitvec < outvec; splitvec++) { - /* Don't copy the wbuf into itself */ - if (outvecs[splitvec].iov_base == c->wbuf) - continue; - memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len); - wbuf_ptr += outvecs[splitvec].iov_len; - donelen += outvecs[splitvec].iov_len; + if (jffs2_sum_active()) { + int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to); + if (res) + return res; } - c->wbuf_len = wbuf_ptr - c->wbuf; - - /* If there's a remainder in the wbuf and it's a non-GC write, - remember that the wbuf affects this ino */ -alldone: - *retlen = donelen; if (c->wbuf_len && ino) jffs2_wbuf_dirties_inode(c, ino); ret = 0; - -exit: + up_write(&c->wbuf_sem); + return ret; + +outfile: + /* + * At this point we have no problem, c->wbuf is empty. However + * refile nextblock to avoid writing again to same address. + */ + + spin_lock(&c->erase_completion_lock); + + jeb = &c->blocks[outvec_to / c->sector_size]; + jffs2_block_refile(c, jeb, REFILE_ANYWAY); + + spin_unlock(&c->erase_completion_lock); + +outerr: + *retlen = 0; up_write(&c->wbuf_sem); return ret; } @@ -849,12 +938,13 @@ exit: * This is the entry for flash write. * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev */ -int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf) +int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, + size_t *retlen, const u_char *buf) { struct kvec vecs[1]; if (!jffs2_is_writebuffered(c)) - return c->mtd->write(c->mtd, ofs, len, retlen, buf); + return jffs2_flash_direct_write(c, ofs, len, retlen, buf); vecs[0].iov_base = (unsigned char *) buf; vecs[0].iov_len = len; @@ -870,30 +960,28 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re int ret; if (!jffs2_is_writebuffered(c)) - return c->mtd->read(c->mtd, ofs, len, retlen, buf); + return mtd_read(c->mtd, ofs, len, retlen, buf); /* Read flash */ down_read(&c->wbuf_sem); - if (jffs2_cleanmarker_oob(c)) - ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo); - else - ret = c->mtd->read(c->mtd, ofs, len, retlen, buf); - - if ( (ret == -EBADMSG) && (*retlen == len) ) { - printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", - len, ofs); - /* - * We have the raw data without ECC correction in the buffer, maybe - * we are lucky and all data or parts are correct. We check the node. - * If data are corrupted node check will sort it out. - * We keep this block, it will fail on write or erase and the we - * mark it bad. Or should we do that now? But we should give him a chance. - * Maybe we had a system crash or power loss before the ecc write or - * a erase was completed. + ret = mtd_read(c->mtd, ofs, len, retlen, buf); + + if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { + if (ret == -EBADMSG) + pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", + len, ofs); + /* + * We have the raw data without ECC correction in the buffer, + * maybe we are lucky and all data or parts are correct. We + * check the node. If data are corrupted node check will sort + * it out. We keep this block, it will fail on write or erase + * and the we mark it bad. Or should we do that now? But we + * should give him a chance. Maybe we had a system crash or + * power loss before the ecc write or a erase was completed. * So we return success. :) */ - ret = 0; - } + ret = 0; + } /* if no writebuffer available or write buffer empty, return */ if (!c->wbuf_pagesize || !c->wbuf_len) @@ -908,16 +996,16 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ goto exit; lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ - if (lwbf > len) + if (lwbf > len) lwbf = len; - } else { + } else { orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ if (orbf > len) /* is write beyond write buffer ? */ goto exit; - lwbf = len - orbf; /* number of bytes to copy */ - if (lwbf > c->wbuf_len) + lwbf = len - orbf; /* number of bytes to copy */ + if (lwbf > c->wbuf_len) lwbf = c->wbuf_len; - } + } if (lwbf > 0) memcpy(buf+orbf,c->wbuf+owbf,lwbf); @@ -926,165 +1014,117 @@ exit: return ret; } +#define NR_OOB_SCAN_PAGES 4 + +/* For historical reasons we use only 8 bytes for OOB clean marker */ +#define OOB_CM_SIZE 8 + +static const struct jffs2_unknown_node oob_cleanmarker = +{ + .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK), + .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), + .totlen = constant_cpu_to_je32(8) +}; + /* - * Check, if the out of band area is empty + * Check, if the out of band area is empty. This function knows about the clean + * marker and if it is present in OOB, treats the OOB as empty anyway. */ -int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode) +int jffs2_check_oob_empty(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb, int mode) { - unsigned char *buf; - int ret = 0; - int i,len,page; - size_t retlen; - int oob_size; - - /* allocate a buffer for all oob data in this sector */ - oob_size = c->mtd->oobsize; - len = 4 * oob_size; - buf = kmalloc(len, GFP_KERNEL); - if (!buf) { - printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); - return -ENOMEM; - } - /* - * if mode = 0, we scan for a total empty oob area, else we have - * to take care of the cleanmarker in the first page of the block - */ - ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf); - if (ret) { - D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); - goto out; - } - - if (retlen < len) { - D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " - "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); - ret = -EIO; - goto out; + int i, ret; + int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); + struct mtd_oob_ops ops; + + ops.mode = MTD_OPS_AUTO_OOB; + ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; + ops.oobbuf = c->oobbuf; + ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; + ops.datbuf = NULL; + + ret = mtd_read_oob(c->mtd, jeb->offset, &ops); + if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { + pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", + jeb->offset, ops.ooblen, ops.oobretlen, ret); + if (!ret || mtd_is_bitflip(ret)) + ret = -EIO; + return ret; } - - /* Special check for first page */ - for(i = 0; i < oob_size ; i++) { - /* Yeah, we know about the cleanmarker. */ - if (mode && i >= c->fsdata_pos && - i < c->fsdata_pos + c->fsdata_len) - continue; - if (buf[i] != 0xFF) { - D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", - buf[page+i], page+i, jeb->offset)); - ret = 1; - goto out; - } - } + for(i = 0; i < ops.ooblen; i++) { + if (mode && i < cmlen) + /* Yeah, we know about the cleanmarker */ + continue; - /* we know, we are aligned :) */ - for (page = oob_size; page < len; page += sizeof(long)) { - unsigned long dat = *(unsigned long *)(&buf[page]); - if(dat != -1) { - ret = 1; - goto out; + if (ops.oobbuf[i] != 0xFF) { + jffs2_dbg(2, "Found %02x at %x in OOB for " + "%08x\n", ops.oobbuf[i], i, jeb->offset); + return 1; } } -out: - kfree(buf); - - return ret; + return 0; } /* -* Scan for a valid cleanmarker and for bad blocks -* For virtual blocks (concatenated physical blocks) check the cleanmarker -* only in the first page of the first physical block, but scan for bad blocks in all -* physical blocks -*/ -int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) + * Check for a valid cleanmarker. + * Returns: 0 if a valid cleanmarker was found + * 1 if no cleanmarker was found + * negative error code if an error occurred + */ +int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb) { - struct jffs2_unknown_node n; - unsigned char buf[2 * NAND_MAX_OOBSIZE]; - unsigned char *p; - int ret, i, cnt, retval = 0; - size_t retlen, offset; - int oob_size; - - offset = jeb->offset; - oob_size = c->mtd->oobsize; - - /* Loop through the physical blocks */ - for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) { - /* Check first if the block is bad. */ - if (c->mtd->block_isbad (c->mtd, offset)) { - D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset)); - return 2; - } - /* - * We read oob data from page 0 and 1 of the block. - * page 0 contains cleanmarker and badblock info - * page 1 contains failure count of this block - */ - ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf); - - if (ret) { - D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); - return ret; - } - if (retlen < (oob_size << 1)) { - D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset)); - return -EIO; - } - - /* Check cleanmarker only on the first physical block */ - if (!cnt) { - n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK); - n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER); - n.totlen = cpu_to_je32 (8); - p = (unsigned char *) &n; - - for (i = 0; i < c->fsdata_len; i++) { - if (buf[c->fsdata_pos + i] != p[i]) { - retval = 1; - } - } - D1(if (retval == 1) { - printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset); - printk(KERN_WARNING "OOB at %08x was ", offset); - for (i=0; i < oob_size; i++) { - printk("%02x ", buf[i]); - } - printk("\n"); - }) - } - offset += c->mtd->erasesize; + struct mtd_oob_ops ops; + int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); + + ops.mode = MTD_OPS_AUTO_OOB; + ops.ooblen = cmlen; + ops.oobbuf = c->oobbuf; + ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; + ops.datbuf = NULL; + + ret = mtd_read_oob(c->mtd, jeb->offset, &ops); + if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { + pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", + jeb->offset, ops.ooblen, ops.oobretlen, ret); + if (!ret || mtd_is_bitflip(ret)) + ret = -EIO; + return ret; } - return retval; + + return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen); } -int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) +int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, + struct jffs2_eraseblock *jeb) { - struct jffs2_unknown_node n; - int ret; - size_t retlen; - - n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); - n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER); - n.totlen = cpu_to_je32(8); - - ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); - - if (ret) { - D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); - return ret; - } - if (retlen != c->fsdata_len) { - D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len)); + int ret; + struct mtd_oob_ops ops; + int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); + + ops.mode = MTD_OPS_AUTO_OOB; + ops.ooblen = cmlen; + ops.oobbuf = (uint8_t *)&oob_cleanmarker; + ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; + ops.datbuf = NULL; + + ret = mtd_write_oob(c->mtd, jeb->offset, &ops); + if (ret || ops.oobretlen != ops.ooblen) { + pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", + jeb->offset, ops.ooblen, ops.oobretlen, ret); + if (!ret) + ret = -EIO; return ret; } + return 0; } -/* +/* * On NAND we try to mark this block bad. If the block was erased more - * than MAX_ERASE_FAILURES we mark it finaly bad. + * than MAX_ERASE_FAILURES we mark it finally bad. * Don't care about failures. This block remains on the erase-pending * or badblock list as long as nobody manipulates the flash with * a bootloader or something like that. @@ -1098,139 +1138,229 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock * if( ++jeb->bad_count < MAX_ERASE_FAILURES) return 0; - if (!c->mtd->block_markbad) - return 1; // What else can we do? + pr_warn("marking eraseblock at %08x as bad\n", bad_offset); + ret = mtd_block_markbad(c->mtd, bad_offset); - D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset)); - ret = c->mtd->block_markbad(c->mtd, bad_offset); - if (ret) { - D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); + jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n", + __func__, jeb->offset, ret); return ret; } return 1; } -#define NAND_JFFS2_OOB16_FSDALEN 8 +static struct jffs2_sb_info *work_to_sb(struct work_struct *work) +{ + struct delayed_work *dwork; + + dwork = container_of(work, struct delayed_work, work); + return container_of(dwork, struct jffs2_sb_info, wbuf_dwork); +} + +static void delayed_wbuf_sync(struct work_struct *work) +{ + struct jffs2_sb_info *c = work_to_sb(work); + struct super_block *sb = OFNI_BS_2SFFJ(c); -static struct nand_oobinfo jffs2_oobinfo_docecc = { - .useecc = MTD_NANDECC_PLACE, - .eccbytes = 6, - .eccpos = {0,1,2,3,4,5} -}; + spin_lock(&c->wbuf_dwork_lock); + c->wbuf_queued = 0; + spin_unlock(&c->wbuf_dwork_lock); + + if (!(sb->s_flags & MS_RDONLY)) { + jffs2_dbg(1, "%s()\n", __func__); + jffs2_flush_wbuf_gc(c, 0); + } +} + +void jffs2_dirty_trigger(struct jffs2_sb_info *c) +{ + struct super_block *sb = OFNI_BS_2SFFJ(c); + unsigned long delay; + + if (sb->s_flags & MS_RDONLY) + return; + spin_lock(&c->wbuf_dwork_lock); + if (!c->wbuf_queued) { + jffs2_dbg(1, "%s()\n", __func__); + delay = msecs_to_jiffies(dirty_writeback_interval * 10); + queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay); + c->wbuf_queued = 1; + } + spin_unlock(&c->wbuf_dwork_lock); +} -static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) +int jffs2_nand_flash_setup(struct jffs2_sb_info *c) { - struct nand_oobinfo *oinfo = &c->mtd->oobinfo; + struct nand_ecclayout *oinfo = c->mtd->ecclayout; - /* Do this only, if we have an oob buffer */ if (!c->mtd->oobsize) return 0; - + /* Cleanmarker is out-of-band, so inline size zero */ c->cleanmarker_size = 0; - /* Should we use autoplacement ? */ - if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) { - D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n")); - /* Get the position of the free bytes */ - if (!oinfo->oobfree[0][1]) { - printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n"); - return -ENOSPC; - } - c->fsdata_pos = oinfo->oobfree[0][0]; - c->fsdata_len = oinfo->oobfree[0][1]; - if (c->fsdata_len > 8) - c->fsdata_len = 8; - } else { - /* This is just a legacy fallback and should go away soon */ - switch(c->mtd->ecctype) { - case MTD_ECC_RS_DiskOnChip: - printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n"); - c->oobinfo = &jffs2_oobinfo_docecc; - c->fsdata_pos = 6; - c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN; - c->badblock_pos = 15; - break; - - default: - D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); - return -EINVAL; - } + if (!oinfo || oinfo->oobavail == 0) { + pr_err("inconsistent device description\n"); + return -EINVAL; } - return 0; -} -int jffs2_nand_flash_setup(struct jffs2_sb_info *c) -{ - int res; + jffs2_dbg(1, "using OOB on NAND\n"); + + c->oobavail = oinfo->oobavail; /* Initialise write buffer */ init_rwsem(&c->wbuf_sem); - c->wbuf_pagesize = c->mtd->oobblock; + spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); + c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; - + c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; - res = jffs2_nand_set_oobinfo(c); + c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL); + if (!c->oobbuf) { + kfree(c->wbuf); + return -ENOMEM; + } -#ifdef BREAKME - if (!brokenbuf) - brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); - if (!brokenbuf) { +#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY + c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); + if (!c->wbuf_verify) { + kfree(c->oobbuf); kfree(c->wbuf); return -ENOMEM; } - memset(brokenbuf, 0xdb, c->wbuf_pagesize); #endif - return res; + return 0; } void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) { +#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY + kfree(c->wbuf_verify); +#endif kfree(c->wbuf); + kfree(c->oobbuf); } int jffs2_dataflash_setup(struct jffs2_sb_info *c) { c->cleanmarker_size = 0; /* No cleanmarkers needed */ - + /* Initialize write buffer */ init_rwsem(&c->wbuf_sem); - c->wbuf_pagesize = c->sector_size; - c->wbuf_ofs = 0xFFFFFFFF; + spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); + c->wbuf_pagesize = c->mtd->erasesize; + + /* Find a suitable c->sector_size + * - Not too much sectors + * - Sectors have to be at least 4 K + some bytes + * - All known dataflashes have erase sizes of 528 or 1056 + * - we take at least 8 eraseblocks and want to have at least 8K size + * - The concatenation should be a power of 2 + */ + + c->sector_size = 8 * c->mtd->erasesize; + while (c->sector_size < 8192) { + c->sector_size *= 2; + } + + /* It may be necessary to adjust the flash size */ + c->flash_size = c->mtd->size; + + if ((c->flash_size % c->sector_size) != 0) { + c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; + pr_warn("flash size adjusted to %dKiB\n", c->flash_size); + }; + + c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; - printk(KERN_INFO "JFFS2 write-buffering enabled (%i)\n", c->wbuf_pagesize); +#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY + c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); + if (!c->wbuf_verify) { + kfree(c->oobbuf); + kfree(c->wbuf); + return -ENOMEM; + } +#endif + + pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", + c->wbuf_pagesize, c->sector_size); return 0; } void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) { +#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY + kfree(c->wbuf_verify); +#endif kfree(c->wbuf); } -int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) { - /* Cleanmarker is actually larger on the flashes */ - c->cleanmarker_size = 16; +int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { + /* Cleanmarker currently occupies whole programming regions, + * either one or 2 for 8Byte STMicro flashes. */ + c->cleanmarker_size = max(16u, c->mtd->writesize); /* Initialize write buffer */ init_rwsem(&c->wbuf_sem); - c->wbuf_pagesize = c->mtd->eccsize; + spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); + + c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; +#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY + c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); + if (!c->wbuf_verify) { + kfree(c->wbuf); + return -ENOMEM; + } +#endif + return 0; +} + +void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { +#ifdef CONFIG_JFFS2_FS_WBUF_VERIFY + kfree(c->wbuf_verify); +#endif + kfree(c->wbuf); +} + +int jffs2_ubivol_setup(struct jffs2_sb_info *c) { + c->cleanmarker_size = 0; + + if (c->mtd->writesize == 1) + /* We do not need write-buffer */ + return 0; + + init_rwsem(&c->wbuf_sem); + spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); + + c->wbuf_pagesize = c->mtd->writesize; + c->wbuf_ofs = 0xFFFFFFFF; + c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); + if (!c->wbuf) + return -ENOMEM; + + pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", + c->wbuf_pagesize, c->sector_size); + return 0; } -void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) { +void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) { kfree(c->wbuf); } diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index 69100615d9a..b634de4c810 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c @@ -1,27 +1,27 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001-2003 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: write.c,v 1.92 2005/04/13 13:22:35 dwmw2 Exp $ - * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/fs.h> #include <linux/crc32.h> -#include <linux/slab.h> #include <linux/pagemap.h> #include <linux/mtd/mtd.h> #include "nodelist.h" #include "compr.h" -int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri) +int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, + uint32_t mode, struct jffs2_raw_inode *ri) { struct jffs2_inode_cache *ic; @@ -33,13 +33,12 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint memset(ic, 0, sizeof(*ic)); f->inocache = ic; - f->inocache->nlink = 1; + f->inocache->pino_nlink = 1; /* Will be overwritten shortly for directories */ f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; f->inocache->state = INO_STATE_PRESENT; - jffs2_add_ino_cache(c, f->inocache); - D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino)); + jffs2_dbg(1, "%s(): Assigned ino# %d\n", __func__, f->inocache->ino); ri->ino = cpu_to_je32(f->inocache->ino); ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); @@ -54,50 +53,24 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint return 0; } -#if CONFIG_JFFS2_FS_DEBUG > 0 -static void writecheck(struct jffs2_sb_info *c, uint32_t ofs) -{ - unsigned char buf[16]; - size_t retlen; - int ret, i; - - ret = jffs2_flash_read(c, ofs, 16, &retlen, buf); - if (ret || (retlen != 16)) { - D1(printk(KERN_DEBUG "read failed or short in writecheck(). ret %d, retlen %zd\n", ret, retlen)); - return; - } - ret = 0; - for (i=0; i<16; i++) { - if (buf[i] != 0xff) - ret = 1; - } - if (ret) { - printk(KERN_WARNING "ARGH. About to write node to 0x%08x on flash, but there are data already there:\n", ofs); - printk(KERN_WARNING "0x%08x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", - ofs, - buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], - buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]); - } -} -#endif - - -/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, +/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, write it to the flash, link it into the existing inode/fragment list */ -struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode) +struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, + struct jffs2_raw_inode *ri, const unsigned char *data, + uint32_t datalen, int alloc_mode) { - struct jffs2_raw_node_ref *raw; struct jffs2_full_dnode *fn; size_t retlen; + uint32_t flash_ofs; struct kvec vecs[2]; int ret; int retried = 0; unsigned long cnt = 2; D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) { - printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode()\n"); + pr_crit("Eep. CRC not correct in jffs2_write_dnode()\n"); BUG(); } ); @@ -106,40 +79,29 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 vecs[1].iov_base = (unsigned char *)data; vecs[1].iov_len = datalen; - D1(writecheck(c, flash_ofs)); - if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { - printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); + pr_warn("%s(): ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", + __func__, je32_to_cpu(ri->totlen), + sizeof(*ri), datalen); } - raw = jffs2_alloc_raw_node_ref(); - if (!raw) - return ERR_PTR(-ENOMEM); - + fn = jffs2_alloc_full_dnode(); - if (!fn) { - jffs2_free_raw_node_ref(raw); + if (!fn) return ERR_PTR(-ENOMEM); - } - - fn->ofs = je32_to_cpu(ri->offset); - fn->size = je32_to_cpu(ri->dsize); - fn->frags = 0; /* check number of valid vecs */ if (!datalen || !data) cnt = 1; retry: - fn->raw = raw; + flash_ofs = write_ofs(c); - raw->flash_offset = flash_ofs; - raw->__totlen = PAD(sizeof(*ri)+datalen); - raw->next_phys = NULL; + jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { BUG_ON(!retried); - D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, " - "highest version %d -> updating dnode\n", - je32_to_cpu(ri->version), f->highest_version)); + jffs2_dbg(1, "%s(): dnode_version %d, highest version %d -> updating dnode\n", + __func__, + je32_to_cpu(ri->version), f->highest_version); ri->version = cpu_to_je32(++f->highest_version); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); } @@ -148,151 +110,156 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 (alloc_mode==ALLOC_GC)?0:f->inocache->ino); if (ret || (retlen != sizeof(*ri) + datalen)) { - printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", - sizeof(*ri)+datalen, flash_ofs, ret, retlen); + pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", + sizeof(*ri) + datalen, flash_ofs, ret, retlen); /* Mark the space as dirtied */ if (retlen) { - /* Doesn't belong to any inode */ - raw->next_in_ino = NULL; - - /* Don't change raw->size to match retlen. We may have + /* Don't change raw->size to match retlen. We may have written the node header already, and only the data will seem corrupted, in which case the scan would skip over - any node we write before the original intended end of + any node we write before the original intended end of this node */ - raw->flash_offset |= REF_OBSOLETE; - jffs2_add_physical_node_ref(c, raw); - jffs2_mark_node_obsolete(c, raw); + jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL); } else { - printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset); - jffs2_free_raw_node_ref(raw); + pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", + flash_ofs); } - if (!retried && alloc_mode != ALLOC_NORETRY && (raw = jffs2_alloc_raw_node_ref())) { + if (!retried && alloc_mode != ALLOC_NORETRY) { /* Try to reallocate space and retry */ uint32_t dummy; struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; retried = 1; - D1(printk(KERN_DEBUG "Retrying failed write.\n")); - - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + jffs2_dbg(1, "Retrying failed write.\n"); + + jffs2_dbg_acct_sanity_check(c,jeb); + jffs2_dbg_acct_paranoia_check(c, jeb); if (alloc_mode == ALLOC_GC) { - ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, &dummy); + ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &dummy, + JFFS2_SUMMARY_INODE_SIZE); } else { /* Locking pain */ - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - - ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, &dummy, alloc_mode); - down(&f->sem); + + ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy, + alloc_mode, JFFS2_SUMMARY_INODE_SIZE); + mutex_lock(&f->sem); } if (!ret) { - D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); + flash_ofs = write_ofs(c); + jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n", + flash_ofs); - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + jffs2_dbg_acct_sanity_check(c,jeb); + jffs2_dbg_acct_paranoia_check(c, jeb); goto retry; } - D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); - jffs2_free_raw_node_ref(raw); + jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", + ret); } /* Release the full_dnode which is now useless, and return */ jffs2_free_full_dnode(fn); return ERR_PTR(ret?ret:-EIO); } /* Mark the space used */ - /* If node covers at least a whole page, or if it starts at the - beginning of a page and runs to the end of the file, or if - it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. + /* If node covers at least a whole page, or if it starts at the + beginning of a page and runs to the end of the file, or if + it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. */ if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { - raw->flash_offset |= REF_PRISTINE; + flash_ofs |= REF_PRISTINE; } else { - raw->flash_offset |= REF_NORMAL; + flash_ofs |= REF_NORMAL; } - jffs2_add_physical_node_ref(c, raw); - - /* Link into per-inode list */ - spin_lock(&c->erase_completion_lock); - raw->next_in_ino = f->inocache->nodes; - f->inocache->nodes = raw; - spin_unlock(&c->erase_completion_lock); + fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache); + if (IS_ERR(fn->raw)) { + void *hold_err = fn->raw; + /* Release the full_dnode which is now useless, and return */ + jffs2_free_full_dnode(fn); + return ERR_CAST(hold_err); + } + fn->ofs = je32_to_cpu(ri->offset); + fn->size = je32_to_cpu(ri->dsize); + fn->frags = 0; - D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", - flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), + jffs2_dbg(1, "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", + flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), - je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); + je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)); if (retried) { - ACCT_SANITY_CHECK(c,NULL); + jffs2_dbg_acct_sanity_check(c,NULL); } return fn; } -struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode) +struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, + struct jffs2_raw_dirent *rd, const unsigned char *name, + uint32_t namelen, int alloc_mode) { - struct jffs2_raw_node_ref *raw; struct jffs2_full_dirent *fd; size_t retlen; struct kvec vecs[2]; + uint32_t flash_ofs; int retried = 0; int ret; - D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", + jffs2_dbg(1, "%s(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", + __func__, je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), - je32_to_cpu(rd->name_crc))); - D1(writecheck(c, flash_ofs)); + je32_to_cpu(rd->name_crc)); D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { - printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); + pr_crit("Eep. CRC not correct in jffs2_write_dirent()\n"); BUG(); + }); + + if (strnlen(name, namelen) != namelen) { + /* This should never happen, but seems to have done on at least one + occasion: https://dev.laptop.org/ticket/4184 */ + pr_crit("Error in jffs2_write_dirent() -- name contains zero bytes!\n"); + pr_crit("Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n", + je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), + je32_to_cpu(rd->name_crc)); + WARN_ON(1); + return ERR_PTR(-EIO); } - ); vecs[0].iov_base = rd; vecs[0].iov_len = sizeof(*rd); vecs[1].iov_base = (unsigned char *)name; vecs[1].iov_len = namelen; - - raw = jffs2_alloc_raw_node_ref(); - - if (!raw) - return ERR_PTR(-ENOMEM); fd = jffs2_alloc_full_dirent(namelen+1); - if (!fd) { - jffs2_free_raw_node_ref(raw); + if (!fd) return ERR_PTR(-ENOMEM); - } fd->version = je32_to_cpu(rd->version); fd->ino = je32_to_cpu(rd->ino); - fd->nhash = full_name_hash(name, strlen(name)); + fd->nhash = full_name_hash(name, namelen); fd->type = rd->type; memcpy(fd->name, name, namelen); fd->name[namelen]=0; retry: - fd->raw = raw; + flash_ofs = write_ofs(c); - raw->flash_offset = flash_ofs; - raw->__totlen = PAD(sizeof(*rd)+namelen); - raw->next_phys = NULL; + jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len); if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { BUG_ON(!retried); - D1(printk(KERN_DEBUG "jffs2_write_dirent : dirent_version %d, " - "highest version %d -> updating dirent\n", - je32_to_cpu(rd->version), f->highest_version)); + jffs2_dbg(1, "%s(): dirent_version %d, highest version %d -> updating dirent\n", + __func__, + je32_to_cpu(rd->version), f->highest_version); rd->version = cpu_to_je32(++f->highest_version); fd->version = je32_to_cpu(rd->version); rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); @@ -301,65 +268,67 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); if (ret || (retlen != sizeof(*rd) + namelen)) { - printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", - sizeof(*rd)+namelen, flash_ofs, ret, retlen); + pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", + sizeof(*rd) + namelen, flash_ofs, ret, retlen); /* Mark the space as dirtied */ if (retlen) { - raw->next_in_ino = NULL; - raw->flash_offset |= REF_OBSOLETE; - jffs2_add_physical_node_ref(c, raw); - jffs2_mark_node_obsolete(c, raw); + jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL); } else { - printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset); - jffs2_free_raw_node_ref(raw); + pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", + flash_ofs); } - if (!retried && (raw = jffs2_alloc_raw_node_ref())) { + if (!retried) { /* Try to reallocate space and retry */ uint32_t dummy; struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; retried = 1; - D1(printk(KERN_DEBUG "Retrying failed write.\n")); + jffs2_dbg(1, "Retrying failed write.\n"); - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + jffs2_dbg_acct_sanity_check(c,jeb); + jffs2_dbg_acct_paranoia_check(c, jeb); if (alloc_mode == ALLOC_GC) { - ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, &dummy); + ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &dummy, + JFFS2_SUMMARY_DIRENT_SIZE(namelen)); } else { /* Locking pain */ - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - - ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, &dummy, alloc_mode); - down(&f->sem); + + ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy, + alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); + mutex_lock(&f->sem); } if (!ret) { - D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); - ACCT_SANITY_CHECK(c,jeb); - D1(ACCT_PARANOIA_CHECK(jeb)); + flash_ofs = write_ofs(c); + jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write\n", + flash_ofs); + jffs2_dbg_acct_sanity_check(c,jeb); + jffs2_dbg_acct_paranoia_check(c, jeb); goto retry; } - D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); - jffs2_free_raw_node_ref(raw); + jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", + ret); } /* Release the full_dnode which is now useless, and return */ jffs2_free_full_dirent(fd); return ERR_PTR(ret?ret:-EIO); } /* Mark the space used */ - raw->flash_offset |= REF_PRISTINE; - jffs2_add_physical_node_ref(c, raw); - - spin_lock(&c->erase_completion_lock); - raw->next_in_ino = f->inocache->nodes; - f->inocache->nodes = raw; - spin_unlock(&c->erase_completion_lock); + fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | dirent_node_state(rd), + PAD(sizeof(*rd)+namelen), f->inocache); + if (IS_ERR(fd->raw)) { + void *hold_err = fd->raw; + /* Release the full_dirent which is now useless, and return */ + jffs2_free_full_dirent(fd); + return ERR_CAST(hold_err); + } if (retried) { - ACCT_SANITY_CHECK(c,NULL); + jffs2_dbg_acct_sanity_check(c,NULL); } return fd; @@ -369,32 +338,34 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff we don't have to go digging in struct inode or its equivalent. It should set: mode, uid, gid, (starting)isize, atime, ctime, mtime */ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, - struct jffs2_raw_inode *ri, unsigned char *buf, + struct jffs2_raw_inode *ri, unsigned char *buf, uint32_t offset, uint32_t writelen, uint32_t *retlen) { int ret = 0; uint32_t writtenlen = 0; - D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n", - f->inocache->ino, offset, writelen)); - + jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n", + __func__, f->inocache->ino, offset, writelen); + while(writelen) { struct jffs2_full_dnode *fn; unsigned char *comprbuf = NULL; uint16_t comprtype = JFFS2_COMPR_NONE; - uint32_t phys_ofs, alloclen; + uint32_t alloclen; uint32_t datalen, cdatalen; int retried = 0; retry: - D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); + jffs2_dbg(2, "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", + writelen, offset); - ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL); + ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, + &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) { - D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); + jffs2_dbg(1, "jffs2_reserve_space returned %d\n", ret); break; } - down(&f->sem); + mutex_lock(&f->sem); datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1))); cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen); @@ -416,18 +387,18 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); - fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, phys_ofs, ALLOC_NORETRY); + fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY); jffs2_free_comprbuf(comprbuf, buf); if (IS_ERR(fn)) { ret = PTR_ERR(fn); - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); if (!retried) { /* Write error to be retried */ retried = 1; - D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n")); + jffs2_dbg(1, "Retrying node write in jffs2_write_inode_range()\n"); goto retry; } break; @@ -440,22 +411,23 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, } if (ret) { /* Eep */ - D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret)); + jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", + ret); jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); break; } - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); if (!datalen) { - printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n"); + pr_warn("Eep. We didn't actually write any data in jffs2_write_inode_range()\n"); ret = -EIO; break; } - D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen)); + jffs2_dbg(1, "increasing writtenlen by %d\n", datalen); writtenlen += datalen; offset += datalen; writelen -= datalen; @@ -465,51 +437,63 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, return ret; } -int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen) +int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, + struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, + const struct qstr *qstr) { struct jffs2_raw_dirent *rd; struct jffs2_full_dnode *fn; struct jffs2_full_dirent *fd; - uint32_t alloclen, phys_ofs; + uint32_t alloclen; int ret; - /* Try to reserve enough space for both node and dirent. - * Just the node will do for now, though + /* Try to reserve enough space for both node and dirent. + * Just the node will do for now, though */ - ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL); - D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); - if (ret) { - up(&f->sem); + ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, + JFFS2_SUMMARY_INODE_SIZE); + jffs2_dbg(1, "%s(): reserved 0x%x bytes\n", __func__, alloclen); + if (ret) return ret; - } + + mutex_lock(&f->sem); ri->data_crc = cpu_to_je32(0); ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); - fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); + fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); - D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n", - jemode_to_cpu(ri->mode))); + jffs2_dbg(1, "jffs2_do_create created file with mode 0x%x\n", + jemode_to_cpu(ri->mode)); if (IS_ERR(fn)) { - D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n")); + jffs2_dbg(1, "jffs2_write_dnode() failed\n"); /* Eeek. Wave bye bye */ - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); return PTR_ERR(fn); } - /* No data here. Only a metadata node, which will be + /* No data here. Only a metadata node, which will be obsoleted by the first data write */ f->metadata = fn; - up(&f->sem); + mutex_unlock(&f->sem); jffs2_complete_reservation(c); - ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); - + + ret = jffs2_init_security(&f->vfs_inode, &dir_f->vfs_inode, qstr); + if (ret) + return ret; + ret = jffs2_init_acl_post(&f->vfs_inode); + if (ret) + return ret; + + ret = jffs2_reserve_space(c, sizeof(*rd)+qstr->len, &alloclen, + ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(qstr->len)); + if (ret) { /* Eep. */ - D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n")); + jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n"); return ret; } @@ -520,31 +504,31 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str return -ENOMEM; } - down(&dir_f->sem); + mutex_lock(&dir_f->sem); rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); - rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); + rd->totlen = cpu_to_je32(sizeof(*rd) + qstr->len); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); rd->pino = cpu_to_je32(dir_f->inocache->ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = ri->ino; rd->mctime = ri->ctime; - rd->nsize = namelen; + rd->nsize = qstr->len; rd->type = DT_REG; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); - rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); + rd->name_crc = cpu_to_je32(crc32(0, qstr->name, qstr->len)); - fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); + fd = jffs2_write_dirent(c, dir_f, rd, qstr->name, qstr->len, ALLOC_NORMAL); jffs2_free_raw_dirent(rd); - + if (IS_ERR(fd)) { - /* dirent failed to write. Delete the inode normally + /* dirent failed to write. Delete the inode normally as if it were the final unlink() */ jffs2_complete_reservation(c); - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); return PTR_ERR(fd); } @@ -553,118 +537,126 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str jffs2_add_fd_to_list(c, fd, &dir_f->dents); jffs2_complete_reservation(c); - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); return 0; } int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, - const char *name, int namelen, struct jffs2_inode_info *dead_f) + const char *name, int namelen, struct jffs2_inode_info *dead_f, + uint32_t time) { struct jffs2_raw_dirent *rd; struct jffs2_full_dirent *fd; - uint32_t alloclen, phys_ofs; + uint32_t alloclen; int ret; - if (1 /* alternative branch needs testing */ || - !jffs2_can_mark_obsolete(c)) { + if (!jffs2_can_mark_obsolete(c)) { /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */ rd = jffs2_alloc_raw_dirent(); if (!rd) return -ENOMEM; - ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_DELETION); + ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, + ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); if (ret) { jffs2_free_raw_dirent(rd); return ret; } - down(&dir_f->sem); + mutex_lock(&dir_f->sem); /* Build a deletion node */ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT); rd->totlen = cpu_to_je32(sizeof(*rd) + namelen); rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)); - + rd->pino = cpu_to_je32(dir_f->inocache->ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = cpu_to_je32(0); - rd->mctime = cpu_to_je32(get_seconds()); + rd->mctime = cpu_to_je32(time); rd->nsize = namelen; rd->type = DT_UNKNOWN; rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); - fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION); - + fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_DELETION); + jffs2_free_raw_dirent(rd); if (IS_ERR(fd)) { jffs2_complete_reservation(c); - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); return PTR_ERR(fd); } /* File it. This will mark the old one obsolete. */ jffs2_add_fd_to_list(c, fd, &dir_f->dents); - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); } else { - struct jffs2_full_dirent **prev = &dir_f->dents; uint32_t nhash = full_name_hash(name, namelen); - down(&dir_f->sem); + fd = dir_f->dents; + /* We don't actually want to reserve any space, but we do + want to be holding the alloc_sem when we write to flash */ + mutex_lock(&c->alloc_sem); + mutex_lock(&dir_f->sem); - while ((*prev) && (*prev)->nhash <= nhash) { - if ((*prev)->nhash == nhash && - !memcmp((*prev)->name, name, namelen) && - !(*prev)->name[namelen]) { - struct jffs2_full_dirent *this = *prev; + for (fd = dir_f->dents; fd; fd = fd->next) { + if (fd->nhash == nhash && + !memcmp(fd->name, name, namelen) && + !fd->name[namelen]) { - D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", - this->ino, ref_offset(this->raw))); - - *prev = this->next; - jffs2_mark_node_obsolete(c, (this->raw)); - jffs2_free_full_dirent(this); + jffs2_dbg(1, "Marking old dirent node (ino #%u) @%08x obsolete\n", + fd->ino, ref_offset(fd->raw)); + jffs2_mark_node_obsolete(c, fd->raw); + /* We don't want to remove it from the list immediately, + because that screws up getdents()/seek() semantics even + more than they're screwed already. Turn it into a + node-less deletion dirent instead -- a placeholder */ + fd->raw = NULL; + fd->ino = 0; break; } - prev = &((*prev)->next); } - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); } /* dead_f is NULL if this was a rename not a real unlink */ /* Also catch the !f->inocache case, where there was a dirent pointing to an inode which didn't exist. */ - if (dead_f && dead_f->inocache) { + if (dead_f && dead_f->inocache) { - down(&dead_f->sem); + mutex_lock(&dead_f->sem); if (S_ISDIR(OFNI_EDONI_2SFFJ(dead_f)->i_mode)) { while (dead_f->dents) { /* There can be only deleted ones */ fd = dead_f->dents; - + dead_f->dents = fd->next; - + if (fd->ino) { - printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", - dead_f->inocache->ino, fd->name, fd->ino); + pr_warn("Deleting inode #%u with active dentry \"%s\"->ino #%u\n", + dead_f->inocache->ino, + fd->name, fd->ino); } else { - D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", - fd->name, dead_f->inocache->ino)); + jffs2_dbg(1, "Removing deletion dirent for \"%s\" from dir ino #%u\n", + fd->name, + dead_f->inocache->ino); } - jffs2_mark_node_obsolete(c, fd->raw); + if (fd->raw) + jffs2_mark_node_obsolete(c, fd->raw); jffs2_free_full_dirent(fd); } - } - - dead_f->inocache->nlink--; + dead_f->inocache->pino_nlink = 0; + } else + dead_f->inocache->pino_nlink--; /* NB: Caller must set inode nlink if appropriate */ - up(&dead_f->sem); + mutex_unlock(&dead_f->sem); } jffs2_complete_reservation(c); @@ -673,24 +665,25 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, } -int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen) +int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time) { struct jffs2_raw_dirent *rd; struct jffs2_full_dirent *fd; - uint32_t alloclen, phys_ofs; + uint32_t alloclen; int ret; rd = jffs2_alloc_raw_dirent(); if (!rd) return -ENOMEM; - ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL); + ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, + ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); if (ret) { jffs2_free_raw_dirent(rd); return ret; } - - down(&dir_f->sem); + + mutex_lock(&dir_f->sem); /* Build a deletion node */ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); @@ -701,7 +694,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint rd->pino = cpu_to_je32(dir_f->inocache->ino); rd->version = cpu_to_je32(++dir_f->highest_version); rd->ino = cpu_to_je32(ino); - rd->mctime = cpu_to_je32(get_seconds()); + rd->mctime = cpu_to_je32(time); rd->nsize = namelen; rd->type = type; @@ -709,13 +702,13 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); - fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); - + fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL); + jffs2_free_raw_dirent(rd); if (IS_ERR(fd)) { jffs2_complete_reservation(c); - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); return PTR_ERR(fd); } @@ -723,7 +716,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint jffs2_add_fd_to_list(c, fd, &dir_f->dents); jffs2_complete_reservation(c); - up(&dir_f->sem); + mutex_unlock(&dir_f->sem); return 0; } diff --git a/fs/jffs2/writev.c b/fs/jffs2/writev.c index f079f838856..a1bda9dab3f 100644 --- a/fs/jffs2/writev.c +++ b/fs/jffs2/writev.c @@ -1,50 +1,51 @@ /* * JFFS2 -- Journalling Flash File System, Version 2. * - * Copyright (C) 2001, 2002 Red Hat, Inc. + * Copyright © 2001-2007 Red Hat, Inc. * * Created by David Woodhouse <dwmw2@infradead.org> * * For licensing information, see the file 'LICENCE' in this directory. * - * $Id: writev.c,v 1.6 2004/11/16 20:36:12 dwmw2 Exp $ - * */ #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include "nodelist.h" -/* This ought to be in core MTD code. All registered MTD devices - without writev should have this put in place. Bug the MTD - maintainer */ -static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs, - unsigned long count, loff_t to, size_t *retlen) +int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen) { - unsigned long i; - size_t totlen = 0, thislen; - int ret = 0; - - for (i=0; i<count; i++) { - if (!vecs[i].iov_len) - continue; - ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base); - totlen += thislen; - if (ret || thislen != vecs[i].iov_len) - break; - to += vecs[i].iov_len; + if (!jffs2_is_writebuffered(c)) { + if (jffs2_sum_active()) { + int res; + res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to); + if (res) { + return res; + } + } } - if (retlen) - *retlen = totlen; - return ret; + + return mtd_writev(c->mtd, vecs, count, to, retlen); } -int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs, - unsigned long count, loff_t to, size_t *retlen) +int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, + size_t *retlen, const u_char *buf) { - if (c->mtd->writev) - return c->mtd->writev(c->mtd, vecs, count, to, retlen); - else - return mtd_fake_writev(c->mtd, vecs, count, to, retlen); -} + int ret; + ret = mtd_write(c->mtd, ofs, len, retlen, buf); + + if (jffs2_sum_active()) { + struct kvec vecs[1]; + int res; + + vecs[0].iov_base = (unsigned char *) buf; + vecs[0].iov_len = len; + res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs); + if (res) { + return res; + } + } + return ret; +} diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c new file mode 100644 index 00000000000..ad0f2e2a170 --- /dev/null +++ b/fs/jffs2/xattr.c @@ -0,0 +1,1342 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2006 NEC Corporation + * + * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#define JFFS2_XATTR_IS_CORRUPTED 1 + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/time.h> +#include <linux/pagemap.h> +#include <linux/highmem.h> +#include <linux/crc32.h> +#include <linux/jffs2.h> +#include <linux/xattr.h> +#include <linux/posix_acl_xattr.h> +#include <linux/mtd/mtd.h> +#include "nodelist.h" +/* -------- xdatum related functions ---------------- + * xattr_datum_hashkey(xprefix, xname, xvalue, xsize) + * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is + * the index of the xattr name/value pair cache (c->xattrindex). + * is_xattr_datum_unchecked(c, xd) + * returns 1, if xdatum contains any unchecked raw nodes. if all raw nodes are not + * unchecked, it returns 0. + * unload_xattr_datum(c, xd) + * is used to release xattr name/value pair and detach from c->xattrindex. + * reclaim_xattr_datum(c) + * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when + * memory usage by cache is over c->xdatum_mem_threshold. Currently, this threshold + * is hard coded as 32KiB. + * do_verify_xattr_datum(c, xd) + * is used to load the xdatum informations without name/value pair from the medium. + * It's necessary once, because those informations are not collected during mounting + * process when EBS is enabled. + * 0 will be returned, if success. An negative return value means recoverable error, and + * positive return value means unrecoverable error. Thus, caller must remove this xdatum + * and xref when it returned positive value. + * do_load_xattr_datum(c, xd) + * is used to load name/value pair from the medium. + * The meanings of return value is same as do_verify_xattr_datum(). + * load_xattr_datum(c, xd) + * is used to be as a wrapper of do_verify_xattr_datum() and do_load_xattr_datum(). + * If xd need to call do_verify_xattr_datum() at first, it's called before calling + * do_load_xattr_datum(). The meanings of return value is same as do_verify_xattr_datum(). + * save_xattr_datum(c, xd) + * is used to write xdatum to medium. xd->version will be incremented. + * create_xattr_datum(c, xprefix, xname, xvalue, xsize) + * is used to create new xdatum and write to medium. + * unrefer_xattr_datum(c, xd) + * is used to delete a xdatum. When nobody refers this xdatum, JFFS2_XFLAGS_DEAD + * is set on xd->flags and chained xattr_dead_list or release it immediately. + * In the first case, the garbage collector release it later. + * -------------------------------------------------- */ +static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize) +{ + int name_len = strlen(xname); + + return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize); +} + +static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) +{ + struct jffs2_raw_node_ref *raw; + int rc = 0; + + spin_lock(&c->erase_completion_lock); + for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { + if (ref_flags(raw) == REF_UNCHECKED) { + rc = 1; + break; + } + } + spin_unlock(&c->erase_completion_lock); + return rc; +} + +static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) +{ + /* must be called under down_write(xattr_sem) */ + D1(dbg_xattr("%s: xid=%u, version=%u\n", __func__, xd->xid, xd->version)); + if (xd->xname) { + c->xdatum_mem_usage -= (xd->name_len + 1 + xd->value_len); + kfree(xd->xname); + } + + list_del_init(&xd->xindex); + xd->hashkey = 0; + xd->xname = NULL; + xd->xvalue = NULL; +} + +static void reclaim_xattr_datum(struct jffs2_sb_info *c) +{ + /* must be called under down_write(xattr_sem) */ + struct jffs2_xattr_datum *xd, *_xd; + uint32_t target, before; + static int index = 0; + int count; + + if (c->xdatum_mem_threshold > c->xdatum_mem_usage) + return; + + before = c->xdatum_mem_usage; + target = c->xdatum_mem_usage * 4 / 5; /* 20% reduction */ + for (count = 0; count < XATTRINDEX_HASHSIZE; count++) { + list_for_each_entry_safe(xd, _xd, &c->xattrindex[index], xindex) { + if (xd->flags & JFFS2_XFLAGS_HOT) { + xd->flags &= ~JFFS2_XFLAGS_HOT; + } else if (!(xd->flags & JFFS2_XFLAGS_BIND)) { + unload_xattr_datum(c, xd); + } + if (c->xdatum_mem_usage <= target) + goto out; + } + index = (index+1) % XATTRINDEX_HASHSIZE; + } + out: + JFFS2_NOTICE("xdatum_mem_usage from %u byte to %u byte (%u byte reclaimed)\n", + before, c->xdatum_mem_usage, before - c->xdatum_mem_usage); +} + +static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) +{ + /* must be called under down_write(xattr_sem) */ + struct jffs2_eraseblock *jeb; + struct jffs2_raw_node_ref *raw; + struct jffs2_raw_xattr rx; + size_t readlen; + uint32_t crc, offset, totlen; + int rc; + + spin_lock(&c->erase_completion_lock); + offset = ref_offset(xd->node); + if (ref_flags(xd->node) == REF_PRISTINE) + goto complete; + spin_unlock(&c->erase_completion_lock); + + rc = jffs2_flash_read(c, offset, sizeof(rx), &readlen, (char *)&rx); + if (rc || readlen != sizeof(rx)) { + JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n", + rc, sizeof(rx), readlen, offset); + return rc ? rc : -EIO; + } + crc = crc32(0, &rx, sizeof(rx) - 4); + if (crc != je32_to_cpu(rx.node_crc)) { + JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", + offset, je32_to_cpu(rx.hdr_crc), crc); + xd->flags |= JFFS2_XFLAGS_INVALID; + return JFFS2_XATTR_IS_CORRUPTED; + } + totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); + if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK + || je16_to_cpu(rx.nodetype) != JFFS2_NODETYPE_XATTR + || je32_to_cpu(rx.totlen) != totlen + || je32_to_cpu(rx.xid) != xd->xid + || je32_to_cpu(rx.version) != xd->version) { + JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, " + "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n", + offset, je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK, + je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR, + je32_to_cpu(rx.totlen), totlen, + je32_to_cpu(rx.xid), xd->xid, + je32_to_cpu(rx.version), xd->version); + xd->flags |= JFFS2_XFLAGS_INVALID; + return JFFS2_XATTR_IS_CORRUPTED; + } + xd->xprefix = rx.xprefix; + xd->name_len = rx.name_len; + xd->value_len = je16_to_cpu(rx.value_len); + xd->data_crc = je32_to_cpu(rx.data_crc); + + spin_lock(&c->erase_completion_lock); + complete: + for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { + jeb = &c->blocks[ref_offset(raw) / c->sector_size]; + totlen = PAD(ref_totlen(c, jeb, raw)); + if (ref_flags(raw) == REF_UNCHECKED) { + c->unchecked_size -= totlen; c->used_size += totlen; + jeb->unchecked_size -= totlen; jeb->used_size += totlen; + } + raw->flash_offset = ref_offset(raw) | ((xd->node==raw) ? REF_PRISTINE : REF_NORMAL); + } + spin_unlock(&c->erase_completion_lock); + + /* unchecked xdatum is chained with c->xattr_unchecked */ + list_del_init(&xd->xindex); + + dbg_xattr("success on verfying xdatum (xid=%u, version=%u)\n", + xd->xid, xd->version); + + return 0; +} + +static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) +{ + /* must be called under down_write(xattr_sem) */ + char *data; + size_t readlen; + uint32_t crc, length; + int i, ret, retry = 0; + + BUG_ON(ref_flags(xd->node) != REF_PRISTINE); + BUG_ON(!list_empty(&xd->xindex)); + retry: + length = xd->name_len + 1 + xd->value_len; + data = kmalloc(length, GFP_KERNEL); + if (!data) + return -ENOMEM; + + ret = jffs2_flash_read(c, ref_offset(xd->node)+sizeof(struct jffs2_raw_xattr), + length, &readlen, data); + + if (ret || length!=readlen) { + JFFS2_WARNING("jffs2_flash_read() returned %d, request=%d, readlen=%zu, at %#08x\n", + ret, length, readlen, ref_offset(xd->node)); + kfree(data); + return ret ? ret : -EIO; + } + + data[xd->name_len] = '\0'; + crc = crc32(0, data, length); + if (crc != xd->data_crc) { + JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XATTR)" + " at %#08x, read: 0x%08x calculated: 0x%08x\n", + ref_offset(xd->node), xd->data_crc, crc); + kfree(data); + xd->flags |= JFFS2_XFLAGS_INVALID; + return JFFS2_XATTR_IS_CORRUPTED; + } + + xd->flags |= JFFS2_XFLAGS_HOT; + xd->xname = data; + xd->xvalue = data + xd->name_len+1; + + c->xdatum_mem_usage += length; + + xd->hashkey = xattr_datum_hashkey(xd->xprefix, xd->xname, xd->xvalue, xd->value_len); + i = xd->hashkey % XATTRINDEX_HASHSIZE; + list_add(&xd->xindex, &c->xattrindex[i]); + if (!retry) { + retry = 1; + reclaim_xattr_datum(c); + if (!xd->xname) + goto retry; + } + + dbg_xattr("success on loading xdatum (xid=%u, xprefix=%u, xname='%s')\n", + xd->xid, xd->xprefix, xd->xname); + + return 0; +} + +static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) +{ + /* must be called under down_write(xattr_sem); + * rc < 0 : recoverable error, try again + * rc = 0 : success + * rc > 0 : Unrecoverable error, this node should be deleted. + */ + int rc = 0; + + BUG_ON(xd->flags & JFFS2_XFLAGS_DEAD); + if (xd->xname) + return 0; + if (xd->flags & JFFS2_XFLAGS_INVALID) + return JFFS2_XATTR_IS_CORRUPTED; + if (unlikely(is_xattr_datum_unchecked(c, xd))) + rc = do_verify_xattr_datum(c, xd); + if (!rc) + rc = do_load_xattr_datum(c, xd); + return rc; +} + +static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) +{ + /* must be called under down_write(xattr_sem) */ + struct jffs2_raw_xattr rx; + struct kvec vecs[2]; + size_t length; + int rc, totlen; + uint32_t phys_ofs = write_ofs(c); + + BUG_ON(!xd->xname); + BUG_ON(xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID)); + + vecs[0].iov_base = ℞ + vecs[0].iov_len = sizeof(rx); + vecs[1].iov_base = xd->xname; + vecs[1].iov_len = xd->name_len + 1 + xd->value_len; + totlen = vecs[0].iov_len + vecs[1].iov_len; + + /* Setup raw-xattr */ + memset(&rx, 0, sizeof(rx)); + rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); + rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR); + rx.totlen = cpu_to_je32(PAD(totlen)); + rx.hdr_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_unknown_node) - 4)); + + rx.xid = cpu_to_je32(xd->xid); + rx.version = cpu_to_je32(++xd->version); + rx.xprefix = xd->xprefix; + rx.name_len = xd->name_len; + rx.value_len = cpu_to_je16(xd->value_len); + rx.data_crc = cpu_to_je32(crc32(0, vecs[1].iov_base, vecs[1].iov_len)); + rx.node_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_raw_xattr) - 4)); + + rc = jffs2_flash_writev(c, vecs, 2, phys_ofs, &length, 0); + if (rc || totlen != length) { + JFFS2_WARNING("jffs2_flash_writev()=%d, req=%u, wrote=%zu, at %#08x\n", + rc, totlen, length, phys_ofs); + rc = rc ? rc : -EIO; + if (length) + jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(totlen), NULL); + + return rc; + } + /* success */ + jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), (void *)xd); + + dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n", + xd->xid, xd->version, xd->xprefix, xd->xname); + + return 0; +} + +static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c, + int xprefix, const char *xname, + const char *xvalue, int xsize) +{ + /* must be called under down_write(xattr_sem) */ + struct jffs2_xattr_datum *xd; + uint32_t hashkey, name_len; + char *data; + int i, rc; + + /* Search xattr_datum has same xname/xvalue by index */ + hashkey = xattr_datum_hashkey(xprefix, xname, xvalue, xsize); + i = hashkey % XATTRINDEX_HASHSIZE; + list_for_each_entry(xd, &c->xattrindex[i], xindex) { + if (xd->hashkey==hashkey + && xd->xprefix==xprefix + && xd->value_len==xsize + && !strcmp(xd->xname, xname) + && !memcmp(xd->xvalue, xvalue, xsize)) { + atomic_inc(&xd->refcnt); + return xd; + } + } + + /* Not found, Create NEW XATTR-Cache */ + name_len = strlen(xname); + + xd = jffs2_alloc_xattr_datum(); + if (!xd) + return ERR_PTR(-ENOMEM); + + data = kmalloc(name_len + 1 + xsize, GFP_KERNEL); + if (!data) { + jffs2_free_xattr_datum(xd); + return ERR_PTR(-ENOMEM); + } + strcpy(data, xname); + memcpy(data + name_len + 1, xvalue, xsize); + + atomic_set(&xd->refcnt, 1); + xd->xid = ++c->highest_xid; + xd->flags |= JFFS2_XFLAGS_HOT; + xd->xprefix = xprefix; + + xd->hashkey = hashkey; + xd->xname = data; + xd->xvalue = data + name_len + 1; + xd->name_len = name_len; + xd->value_len = xsize; + xd->data_crc = crc32(0, data, xd->name_len + 1 + xd->value_len); + + rc = save_xattr_datum(c, xd); + if (rc) { + kfree(xd->xname); + jffs2_free_xattr_datum(xd); + return ERR_PTR(rc); + } + + /* Insert Hash Index */ + i = hashkey % XATTRINDEX_HASHSIZE; + list_add(&xd->xindex, &c->xattrindex[i]); + + c->xdatum_mem_usage += (xd->name_len + 1 + xd->value_len); + reclaim_xattr_datum(c); + + return xd; +} + +static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) +{ + /* must be called under down_write(xattr_sem) */ + if (atomic_dec_and_lock(&xd->refcnt, &c->erase_completion_lock)) { + unload_xattr_datum(c, xd); + xd->flags |= JFFS2_XFLAGS_DEAD; + if (xd->node == (void *)xd) { + BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID)); + jffs2_free_xattr_datum(xd); + } else { + list_add(&xd->xindex, &c->xattr_dead_list); + } + spin_unlock(&c->erase_completion_lock); + + dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", + xd->xid, xd->version); + } +} + +/* -------- xref related functions ------------------ + * verify_xattr_ref(c, ref) + * is used to load xref information from medium. Because summary data does not + * contain xid/ino, it's necessary to verify once while mounting process. + * save_xattr_ref(c, ref) + * is used to write xref to medium. If delete marker is marked, it write + * a delete marker of xref into medium. + * create_xattr_ref(c, ic, xd) + * is used to create a new xref and write to medium. + * delete_xattr_ref(c, ref) + * is used to delete jffs2_xattr_ref. It marks xref XREF_DELETE_MARKER, + * and allows GC to reclaim those physical nodes. + * jffs2_xattr_delete_inode(c, ic) + * is called to remove xrefs related to obsolete inode when inode is unlinked. + * jffs2_xattr_free_inode(c, ic) + * is called to release xattr related objects when unmounting. + * check_xattr_ref_inode(c, ic) + * is used to confirm inode does not have duplicate xattr name/value pair. + * jffs2_xattr_do_crccheck_inode(c, ic) + * is used to force xattr data integrity check during the initial gc scan. + * -------------------------------------------------- */ +static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) +{ + struct jffs2_eraseblock *jeb; + struct jffs2_raw_node_ref *raw; + struct jffs2_raw_xref rr; + size_t readlen; + uint32_t crc, offset, totlen; + int rc; + + spin_lock(&c->erase_completion_lock); + if (ref_flags(ref->node) != REF_UNCHECKED) + goto complete; + offset = ref_offset(ref->node); + spin_unlock(&c->erase_completion_lock); + + rc = jffs2_flash_read(c, offset, sizeof(rr), &readlen, (char *)&rr); + if (rc || sizeof(rr) != readlen) { + JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n", + rc, sizeof(rr), readlen, offset); + return rc ? rc : -EIO; + } + /* obsolete node */ + crc = crc32(0, &rr, sizeof(rr) - 4); + if (crc != je32_to_cpu(rr.node_crc)) { + JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", + offset, je32_to_cpu(rr.node_crc), crc); + return JFFS2_XATTR_IS_CORRUPTED; + } + if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK + || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF + || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) { + JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, " + "nodetype=%#04x/%#04x, totlen=%u/%zu\n", + offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, + je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, + je32_to_cpu(rr.totlen), PAD(sizeof(rr))); + return JFFS2_XATTR_IS_CORRUPTED; + } + ref->ino = je32_to_cpu(rr.ino); + ref->xid = je32_to_cpu(rr.xid); + ref->xseqno = je32_to_cpu(rr.xseqno); + if (ref->xseqno > c->highest_xseqno) + c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER); + + spin_lock(&c->erase_completion_lock); + complete: + for (raw=ref->node; raw != (void *)ref; raw=raw->next_in_ino) { + jeb = &c->blocks[ref_offset(raw) / c->sector_size]; + totlen = PAD(ref_totlen(c, jeb, raw)); + if (ref_flags(raw) == REF_UNCHECKED) { + c->unchecked_size -= totlen; c->used_size += totlen; + jeb->unchecked_size -= totlen; jeb->used_size += totlen; + } + raw->flash_offset = ref_offset(raw) | ((ref->node==raw) ? REF_PRISTINE : REF_NORMAL); + } + spin_unlock(&c->erase_completion_lock); + + dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n", + ref->ino, ref->xid, ref_offset(ref->node)); + return 0; +} + +static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) +{ + /* must be called under down_write(xattr_sem) */ + struct jffs2_raw_xref rr; + size_t length; + uint32_t xseqno, phys_ofs = write_ofs(c); + int ret; + + rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); + rr.nodetype = cpu_to_je16(JFFS2_NODETYPE_XREF); + rr.totlen = cpu_to_je32(PAD(sizeof(rr))); + rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4)); + + xseqno = (c->highest_xseqno += 2); + if (is_xattr_ref_dead(ref)) { + xseqno |= XREF_DELETE_MARKER; + rr.ino = cpu_to_je32(ref->ino); + rr.xid = cpu_to_je32(ref->xid); + } else { + rr.ino = cpu_to_je32(ref->ic->ino); + rr.xid = cpu_to_je32(ref->xd->xid); + } + rr.xseqno = cpu_to_je32(xseqno); + rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4)); + + ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr); + if (ret || sizeof(rr) != length) { + JFFS2_WARNING("jffs2_flash_write() returned %d, request=%zu, retlen=%zu, at %#08x\n", + ret, sizeof(rr), length, phys_ofs); + ret = ret ? ret : -EIO; + if (length) + jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(sizeof(rr)), NULL); + + return ret; + } + /* success */ + ref->xseqno = xseqno; + jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref); + + dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid); + + return 0; +} + +static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, + struct jffs2_xattr_datum *xd) +{ + /* must be called under down_write(xattr_sem) */ + struct jffs2_xattr_ref *ref; + int ret; + + ref = jffs2_alloc_xattr_ref(); + if (!ref) + return ERR_PTR(-ENOMEM); + ref->ic = ic; + ref->xd = xd; + + ret = save_xattr_ref(c, ref); + if (ret) { + jffs2_free_xattr_ref(ref); + return ERR_PTR(ret); + } + + /* Chain to inode */ + ref->next = ic->xref; + ic->xref = ref; + + return ref; /* success */ +} + +static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) +{ + /* must be called under down_write(xattr_sem) */ + struct jffs2_xattr_datum *xd; + + xd = ref->xd; + ref->xseqno |= XREF_DELETE_MARKER; + ref->ino = ref->ic->ino; + ref->xid = ref->xd->xid; + spin_lock(&c->erase_completion_lock); + ref->next = c->xref_dead_list; + c->xref_dead_list = ref; + spin_unlock(&c->erase_completion_lock); + + dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n", + ref->ino, ref->xid, ref->xseqno); + + unrefer_xattr_datum(c, xd); +} + +void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) +{ + /* It's called from jffs2_evict_inode() on inode removing. + When an inode with XATTR is removed, those XATTRs must be removed. */ + struct jffs2_xattr_ref *ref, *_ref; + + if (!ic || ic->pino_nlink > 0) + return; + + down_write(&c->xattr_sem); + for (ref = ic->xref; ref; ref = _ref) { + _ref = ref->next; + delete_xattr_ref(c, ref); + } + ic->xref = NULL; + up_write(&c->xattr_sem); +} + +void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) +{ + /* It's called from jffs2_free_ino_caches() until unmounting FS. */ + struct jffs2_xattr_datum *xd; + struct jffs2_xattr_ref *ref, *_ref; + + down_write(&c->xattr_sem); + for (ref = ic->xref; ref; ref = _ref) { + _ref = ref->next; + xd = ref->xd; + if (atomic_dec_and_test(&xd->refcnt)) { + unload_xattr_datum(c, xd); + jffs2_free_xattr_datum(xd); + } + jffs2_free_xattr_ref(ref); + } + ic->xref = NULL; + up_write(&c->xattr_sem); +} + +static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) +{ + /* success of check_xattr_ref_inode() means that inode (ic) dose not have + * duplicate name/value pairs. If duplicate name/value pair would be found, + * one will be removed. + */ + struct jffs2_xattr_ref *ref, *cmp, **pref, **pcmp; + int rc = 0; + + if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED)) + return 0; + down_write(&c->xattr_sem); + retry: + rc = 0; + for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { + if (!ref->xd->xname) { + rc = load_xattr_datum(c, ref->xd); + if (unlikely(rc > 0)) { + *pref = ref->next; + delete_xattr_ref(c, ref); + goto retry; + } else if (unlikely(rc < 0)) + goto out; + } + for (cmp=ref->next, pcmp=&ref->next; cmp; pcmp=&cmp->next, cmp=cmp->next) { + if (!cmp->xd->xname) { + ref->xd->flags |= JFFS2_XFLAGS_BIND; + rc = load_xattr_datum(c, cmp->xd); + ref->xd->flags &= ~JFFS2_XFLAGS_BIND; + if (unlikely(rc > 0)) { + *pcmp = cmp->next; + delete_xattr_ref(c, cmp); + goto retry; + } else if (unlikely(rc < 0)) + goto out; + } + if (ref->xd->xprefix == cmp->xd->xprefix + && !strcmp(ref->xd->xname, cmp->xd->xname)) { + if (ref->xseqno > cmp->xseqno) { + *pcmp = cmp->next; + delete_xattr_ref(c, cmp); + } else { + *pref = ref->next; + delete_xattr_ref(c, ref); + } + goto retry; + } + } + } + ic->flags |= INO_FLAGS_XATTR_CHECKED; + out: + up_write(&c->xattr_sem); + + return rc; +} + +void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) +{ + check_xattr_ref_inode(c, ic); +} + +/* -------- xattr subsystem functions --------------- + * jffs2_init_xattr_subsystem(c) + * is used to initialize semaphore and list_head, and some variables. + * jffs2_find_xattr_datum(c, xid) + * is used to lookup xdatum while scanning process. + * jffs2_clear_xattr_subsystem(c) + * is used to release any xattr related objects. + * jffs2_build_xattr_subsystem(c) + * is used to associate xdatum and xref while super block building process. + * jffs2_setup_xattr_datum(c, xid, version) + * is used to insert xdatum while scanning process. + * -------------------------------------------------- */ +void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c) +{ + int i; + + for (i=0; i < XATTRINDEX_HASHSIZE; i++) + INIT_LIST_HEAD(&c->xattrindex[i]); + INIT_LIST_HEAD(&c->xattr_unchecked); + INIT_LIST_HEAD(&c->xattr_dead_list); + c->xref_dead_list = NULL; + c->xref_temp = NULL; + + init_rwsem(&c->xattr_sem); + c->highest_xid = 0; + c->highest_xseqno = 0; + c->xdatum_mem_usage = 0; + c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */ +} + +static struct jffs2_xattr_datum *jffs2_find_xattr_datum(struct jffs2_sb_info *c, uint32_t xid) +{ + struct jffs2_xattr_datum *xd; + int i = xid % XATTRINDEX_HASHSIZE; + + /* It's only used in scanning/building process. */ + BUG_ON(!(c->flags & (JFFS2_SB_FLAG_SCANNING|JFFS2_SB_FLAG_BUILDING))); + + list_for_each_entry(xd, &c->xattrindex[i], xindex) { + if (xd->xid==xid) + return xd; + } + return NULL; +} + +void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c) +{ + struct jffs2_xattr_datum *xd, *_xd; + struct jffs2_xattr_ref *ref, *_ref; + int i; + + for (ref=c->xref_temp; ref; ref = _ref) { + _ref = ref->next; + jffs2_free_xattr_ref(ref); + } + + for (ref=c->xref_dead_list; ref; ref = _ref) { + _ref = ref->next; + jffs2_free_xattr_ref(ref); + } + + for (i=0; i < XATTRINDEX_HASHSIZE; i++) { + list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { + list_del(&xd->xindex); + if (xd->xname) + kfree(xd->xname); + jffs2_free_xattr_datum(xd); + } + } + + list_for_each_entry_safe(xd, _xd, &c->xattr_dead_list, xindex) { + list_del(&xd->xindex); + jffs2_free_xattr_datum(xd); + } + list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { + list_del(&xd->xindex); + jffs2_free_xattr_datum(xd); + } +} + +#define XREF_TMPHASH_SIZE (128) +void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) +{ + struct jffs2_xattr_ref *ref, *_ref; + struct jffs2_xattr_ref *xref_tmphash[XREF_TMPHASH_SIZE]; + struct jffs2_xattr_datum *xd, *_xd; + struct jffs2_inode_cache *ic; + struct jffs2_raw_node_ref *raw; + int i, xdatum_count = 0, xdatum_unchecked_count = 0, xref_count = 0; + int xdatum_orphan_count = 0, xref_orphan_count = 0, xref_dead_count = 0; + + BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING)); + + /* Phase.1 : Merge same xref */ + for (i=0; i < XREF_TMPHASH_SIZE; i++) + xref_tmphash[i] = NULL; + for (ref=c->xref_temp; ref; ref=_ref) { + struct jffs2_xattr_ref *tmp; + + _ref = ref->next; + if (ref_flags(ref->node) != REF_PRISTINE) { + if (verify_xattr_ref(c, ref)) { + BUG_ON(ref->node->next_in_ino != (void *)ref); + ref->node->next_in_ino = NULL; + jffs2_mark_node_obsolete(c, ref->node); + jffs2_free_xattr_ref(ref); + continue; + } + } + + i = (ref->ino ^ ref->xid) % XREF_TMPHASH_SIZE; + for (tmp=xref_tmphash[i]; tmp; tmp=tmp->next) { + if (tmp->ino == ref->ino && tmp->xid == ref->xid) + break; + } + if (tmp) { + raw = ref->node; + if (ref->xseqno > tmp->xseqno) { + tmp->xseqno = ref->xseqno; + raw->next_in_ino = tmp->node; + tmp->node = raw; + } else { + raw->next_in_ino = tmp->node->next_in_ino; + tmp->node->next_in_ino = raw; + } + jffs2_free_xattr_ref(ref); + continue; + } else { + ref->next = xref_tmphash[i]; + xref_tmphash[i] = ref; + } + } + c->xref_temp = NULL; + + /* Phase.2 : Bind xref with inode_cache and xattr_datum */ + for (i=0; i < XREF_TMPHASH_SIZE; i++) { + for (ref=xref_tmphash[i]; ref; ref=_ref) { + xref_count++; + _ref = ref->next; + if (is_xattr_ref_dead(ref)) { + ref->next = c->xref_dead_list; + c->xref_dead_list = ref; + xref_dead_count++; + continue; + } + /* At this point, ref->xid and ref->ino contain XID and inode number. + ref->xd and ref->ic are not valid yet. */ + xd = jffs2_find_xattr_datum(c, ref->xid); + ic = jffs2_get_ino_cache(c, ref->ino); + if (!xd || !ic || !ic->pino_nlink) { + dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) is orphan.\n", + ref->ino, ref->xid, ref->xseqno); + ref->xseqno |= XREF_DELETE_MARKER; + ref->next = c->xref_dead_list; + c->xref_dead_list = ref; + xref_orphan_count++; + continue; + } + ref->xd = xd; + ref->ic = ic; + atomic_inc(&xd->refcnt); + ref->next = ic->xref; + ic->xref = ref; + } + } + + /* Phase.3 : Link unchecked xdatum to xattr_unchecked list */ + for (i=0; i < XATTRINDEX_HASHSIZE; i++) { + list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) { + xdatum_count++; + list_del_init(&xd->xindex); + if (!atomic_read(&xd->refcnt)) { + dbg_xattr("xdatum(xid=%u, version=%u) is orphan.\n", + xd->xid, xd->version); + xd->flags |= JFFS2_XFLAGS_DEAD; + list_add(&xd->xindex, &c->xattr_unchecked); + xdatum_orphan_count++; + continue; + } + if (is_xattr_datum_unchecked(c, xd)) { + dbg_xattr("unchecked xdatum(xid=%u, version=%u)\n", + xd->xid, xd->version); + list_add(&xd->xindex, &c->xattr_unchecked); + xdatum_unchecked_count++; + } + } + } + /* build complete */ + JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum" + " (%u unchecked, %u orphan) and " + "%u of xref (%u dead, %u orphan) found.\n", + xdatum_count, xdatum_unchecked_count, xdatum_orphan_count, + xref_count, xref_dead_count, xref_orphan_count); +} + +struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, + uint32_t xid, uint32_t version) +{ + struct jffs2_xattr_datum *xd; + + xd = jffs2_find_xattr_datum(c, xid); + if (!xd) { + xd = jffs2_alloc_xattr_datum(); + if (!xd) + return ERR_PTR(-ENOMEM); + xd->xid = xid; + xd->version = version; + if (xd->xid > c->highest_xid) + c->highest_xid = xd->xid; + list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]); + } + return xd; +} + +/* -------- xattr subsystem functions --------------- + * xprefix_to_handler(xprefix) + * is used to translate xprefix into xattr_handler. + * jffs2_listxattr(dentry, buffer, size) + * is an implementation of listxattr handler on jffs2. + * do_jffs2_getxattr(inode, xprefix, xname, buffer, size) + * is an implementation of getxattr handler on jffs2. + * do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags) + * is an implementation of setxattr handler on jffs2. + * -------------------------------------------------- */ +const struct xattr_handler *jffs2_xattr_handlers[] = { + &jffs2_user_xattr_handler, +#ifdef CONFIG_JFFS2_FS_SECURITY + &jffs2_security_xattr_handler, +#endif +#ifdef CONFIG_JFFS2_FS_POSIX_ACL + &posix_acl_access_xattr_handler, + &posix_acl_default_xattr_handler, +#endif + &jffs2_trusted_xattr_handler, + NULL +}; + +static const struct xattr_handler *xprefix_to_handler(int xprefix) { + const struct xattr_handler *ret; + + switch (xprefix) { + case JFFS2_XPREFIX_USER: + ret = &jffs2_user_xattr_handler; + break; +#ifdef CONFIG_JFFS2_FS_SECURITY + case JFFS2_XPREFIX_SECURITY: + ret = &jffs2_security_xattr_handler; + break; +#endif +#ifdef CONFIG_JFFS2_FS_POSIX_ACL + case JFFS2_XPREFIX_ACL_ACCESS: + ret = &posix_acl_access_xattr_handler; + break; + case JFFS2_XPREFIX_ACL_DEFAULT: + ret = &posix_acl_default_xattr_handler; + break; +#endif + case JFFS2_XPREFIX_TRUSTED: + ret = &jffs2_trusted_xattr_handler; + break; + default: + ret = NULL; + break; + } + return ret; +} + +ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size) +{ + struct inode *inode = dentry->d_inode; + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + struct jffs2_inode_cache *ic = f->inocache; + struct jffs2_xattr_ref *ref, **pref; + struct jffs2_xattr_datum *xd; + const struct xattr_handler *xhandle; + ssize_t len, rc; + int retry = 0; + + rc = check_xattr_ref_inode(c, ic); + if (unlikely(rc)) + return rc; + + down_read(&c->xattr_sem); + retry: + len = 0; + for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { + BUG_ON(ref->ic != ic); + xd = ref->xd; + if (!xd->xname) { + /* xdatum is unchached */ + if (!retry) { + retry = 1; + up_read(&c->xattr_sem); + down_write(&c->xattr_sem); + goto retry; + } else { + rc = load_xattr_datum(c, xd); + if (unlikely(rc > 0)) { + *pref = ref->next; + delete_xattr_ref(c, ref); + goto retry; + } else if (unlikely(rc < 0)) + goto out; + } + } + xhandle = xprefix_to_handler(xd->xprefix); + if (!xhandle) + continue; + if (buffer) { + rc = xhandle->list(dentry, buffer+len, size-len, + xd->xname, xd->name_len, xd->flags); + } else { + rc = xhandle->list(dentry, NULL, 0, xd->xname, + xd->name_len, xd->flags); + } + if (rc < 0) + goto out; + len += rc; + } + rc = len; + out: + if (!retry) { + up_read(&c->xattr_sem); + } else { + up_write(&c->xattr_sem); + } + return rc; +} + +int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname, + char *buffer, size_t size) +{ + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + struct jffs2_inode_cache *ic = f->inocache; + struct jffs2_xattr_datum *xd; + struct jffs2_xattr_ref *ref, **pref; + int rc, retry = 0; + + rc = check_xattr_ref_inode(c, ic); + if (unlikely(rc)) + return rc; + + down_read(&c->xattr_sem); + retry: + for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { + BUG_ON(ref->ic!=ic); + + xd = ref->xd; + if (xd->xprefix != xprefix) + continue; + if (!xd->xname) { + /* xdatum is unchached */ + if (!retry) { + retry = 1; + up_read(&c->xattr_sem); + down_write(&c->xattr_sem); + goto retry; + } else { + rc = load_xattr_datum(c, xd); + if (unlikely(rc > 0)) { + *pref = ref->next; + delete_xattr_ref(c, ref); + goto retry; + } else if (unlikely(rc < 0)) { + goto out; + } + } + } + if (!strcmp(xname, xd->xname)) { + rc = xd->value_len; + if (buffer) { + if (size < rc) { + rc = -ERANGE; + } else { + memcpy(buffer, xd->xvalue, rc); + } + } + goto out; + } + } + rc = -ENODATA; + out: + if (!retry) { + up_read(&c->xattr_sem); + } else { + up_write(&c->xattr_sem); + } + return rc; +} + +int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, + const char *buffer, size_t size, int flags) +{ + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); + struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + struct jffs2_inode_cache *ic = f->inocache; + struct jffs2_xattr_datum *xd; + struct jffs2_xattr_ref *ref, *newref, **pref; + uint32_t length, request; + int rc; + + rc = check_xattr_ref_inode(c, ic); + if (unlikely(rc)) + return rc; + + request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size); + rc = jffs2_reserve_space(c, request, &length, + ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE); + if (rc) { + JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); + return rc; + } + + /* Find existing xattr */ + down_write(&c->xattr_sem); + retry: + for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { + xd = ref->xd; + if (xd->xprefix != xprefix) + continue; + if (!xd->xname) { + rc = load_xattr_datum(c, xd); + if (unlikely(rc > 0)) { + *pref = ref->next; + delete_xattr_ref(c, ref); + goto retry; + } else if (unlikely(rc < 0)) + goto out; + } + if (!strcmp(xd->xname, xname)) { + if (flags & XATTR_CREATE) { + rc = -EEXIST; + goto out; + } + if (!buffer) { + ref->ino = ic->ino; + ref->xid = xd->xid; + ref->xseqno |= XREF_DELETE_MARKER; + rc = save_xattr_ref(c, ref); + if (!rc) { + *pref = ref->next; + spin_lock(&c->erase_completion_lock); + ref->next = c->xref_dead_list; + c->xref_dead_list = ref; + spin_unlock(&c->erase_completion_lock); + unrefer_xattr_datum(c, xd); + } else { + ref->ic = ic; + ref->xd = xd; + ref->xseqno &= ~XREF_DELETE_MARKER; + } + goto out; + } + goto found; + } + } + /* not found */ + if (flags & XATTR_REPLACE) { + rc = -ENODATA; + goto out; + } + if (!buffer) { + rc = -ENODATA; + goto out; + } + found: + xd = create_xattr_datum(c, xprefix, xname, buffer, size); + if (IS_ERR(xd)) { + rc = PTR_ERR(xd); + goto out; + } + up_write(&c->xattr_sem); + jffs2_complete_reservation(c); + + /* create xattr_ref */ + request = PAD(sizeof(struct jffs2_raw_xref)); + rc = jffs2_reserve_space(c, request, &length, + ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE); + down_write(&c->xattr_sem); + if (rc) { + JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request); + unrefer_xattr_datum(c, xd); + up_write(&c->xattr_sem); + return rc; + } + if (ref) + *pref = ref->next; + newref = create_xattr_ref(c, ic, xd); + if (IS_ERR(newref)) { + if (ref) { + ref->next = ic->xref; + ic->xref = ref; + } + rc = PTR_ERR(newref); + unrefer_xattr_datum(c, xd); + } else if (ref) { + delete_xattr_ref(c, ref); + } + out: + up_write(&c->xattr_sem); + jffs2_complete_reservation(c); + return rc; +} + +/* -------- garbage collector functions ------------- + * jffs2_garbage_collect_xattr_datum(c, xd, raw) + * is used to move xdatum into new node. + * jffs2_garbage_collect_xattr_ref(c, ref, raw) + * is used to move xref into new node. + * jffs2_verify_xattr(c) + * is used to call do_verify_xattr_datum() before garbage collecting. + * jffs2_release_xattr_datum(c, xd) + * is used to release an in-memory object of xdatum. + * jffs2_release_xattr_ref(c, ref) + * is used to release an in-memory object of xref. + * -------------------------------------------------- */ +int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd, + struct jffs2_raw_node_ref *raw) +{ + uint32_t totlen, length, old_ofs; + int rc = 0; + + down_write(&c->xattr_sem); + if (xd->node != raw) + goto out; + if (xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID)) + goto out; + + rc = load_xattr_datum(c, xd); + if (unlikely(rc)) { + rc = (rc > 0) ? 0 : rc; + goto out; + } + old_ofs = ref_offset(xd->node); + totlen = PAD(sizeof(struct jffs2_raw_xattr) + + xd->name_len + 1 + xd->value_len); + rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE); + if (rc) { + JFFS2_WARNING("jffs2_reserve_space_gc()=%d, request=%u\n", rc, totlen); + goto out; + } + rc = save_xattr_datum(c, xd); + if (!rc) + dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n", + xd->xid, xd->version, old_ofs, ref_offset(xd->node)); + out: + if (!rc) + jffs2_mark_node_obsolete(c, raw); + up_write(&c->xattr_sem); + return rc; +} + +int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref, + struct jffs2_raw_node_ref *raw) +{ + uint32_t totlen, length, old_ofs; + int rc = 0; + + down_write(&c->xattr_sem); + BUG_ON(!ref->node); + + if (ref->node != raw) + goto out; + if (is_xattr_ref_dead(ref) && (raw->next_in_ino == (void *)ref)) + goto out; + + old_ofs = ref_offset(ref->node); + totlen = ref_totlen(c, c->gcblock, ref->node); + + rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE); + if (rc) { + JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n", + __func__, rc, totlen); + rc = rc ? rc : -EBADFD; + goto out; + } + rc = save_xattr_ref(c, ref); + if (!rc) + dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n", + ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node)); + out: + if (!rc) + jffs2_mark_node_obsolete(c, raw); + up_write(&c->xattr_sem); + return rc; +} + +int jffs2_verify_xattr(struct jffs2_sb_info *c) +{ + struct jffs2_xattr_datum *xd, *_xd; + struct jffs2_eraseblock *jeb; + struct jffs2_raw_node_ref *raw; + uint32_t totlen; + int rc; + + down_write(&c->xattr_sem); + list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { + rc = do_verify_xattr_datum(c, xd); + if (rc < 0) + continue; + list_del_init(&xd->xindex); + spin_lock(&c->erase_completion_lock); + for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) { + if (ref_flags(raw) != REF_UNCHECKED) + continue; + jeb = &c->blocks[ref_offset(raw) / c->sector_size]; + totlen = PAD(ref_totlen(c, jeb, raw)); + c->unchecked_size -= totlen; c->used_size += totlen; + jeb->unchecked_size -= totlen; jeb->used_size += totlen; + raw->flash_offset = ref_offset(raw) + | ((xd->node == (void *)raw) ? REF_PRISTINE : REF_NORMAL); + } + if (xd->flags & JFFS2_XFLAGS_DEAD) + list_add(&xd->xindex, &c->xattr_dead_list); + spin_unlock(&c->erase_completion_lock); + } + up_write(&c->xattr_sem); + return list_empty(&c->xattr_unchecked) ? 1 : 0; +} + +void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd) +{ + /* must be called under spin_lock(&c->erase_completion_lock) */ + if (atomic_read(&xd->refcnt) || xd->node != (void *)xd) + return; + + list_del(&xd->xindex); + jffs2_free_xattr_datum(xd); +} + +void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) +{ + /* must be called under spin_lock(&c->erase_completion_lock) */ + struct jffs2_xattr_ref *tmp, **ptmp; + + if (ref->node != (void *)ref) + return; + + for (tmp=c->xref_dead_list, ptmp=&c->xref_dead_list; tmp; ptmp=&tmp->next, tmp=tmp->next) { + if (ref == tmp) { + *ptmp = tmp->next; + break; + } + } + jffs2_free_xattr_ref(ref); +} diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h new file mode 100644 index 00000000000..467ff376ee2 --- /dev/null +++ b/fs/jffs2/xattr.h @@ -0,0 +1,133 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2006 NEC Corporation + * + * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#ifndef _JFFS2_FS_XATTR_H_ +#define _JFFS2_FS_XATTR_H_ + +#include <linux/xattr.h> +#include <linux/list.h> + +#define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */ +#define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */ +#define JFFS2_XFLAGS_DEAD (0x40) /* This datum is already dead */ +#define JFFS2_XFLAGS_INVALID (0x80) /* This datum contains crc error */ + +struct jffs2_xattr_datum +{ + void *always_null; + struct jffs2_raw_node_ref *node; + uint8_t class; + uint8_t flags; + uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */ + + struct list_head xindex; /* chained from c->xattrindex[n] */ + atomic_t refcnt; /* # of xattr_ref refers this */ + uint32_t xid; + uint32_t version; + + uint32_t data_crc; + uint32_t hashkey; + char *xname; /* XATTR name without prefix */ + uint32_t name_len; /* length of xname */ + char *xvalue; /* XATTR value */ + uint32_t value_len; /* length of xvalue */ +}; + +struct jffs2_inode_cache; +struct jffs2_xattr_ref +{ + void *always_null; + struct jffs2_raw_node_ref *node; + uint8_t class; + uint8_t flags; /* Currently unused */ + u16 unused; + + uint32_t xseqno; + union { + struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */ + uint32_t ino; /* only used in scanning/building */ + }; + union { + struct jffs2_xattr_datum *xd; /* reference to jffs2_xattr_datum */ + uint32_t xid; /* only used in sccanning/building */ + }; + struct jffs2_xattr_ref *next; /* chained from ic->xref_list */ +}; + +#define XREF_DELETE_MARKER (0x00000001) +static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref) +{ + return ((ref->xseqno & XREF_DELETE_MARKER) != 0); +} + +#ifdef CONFIG_JFFS2_FS_XATTR + +extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c); +extern void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c); +extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c); + +extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, + uint32_t xid, uint32_t version); + +extern void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); +extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); +extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); + +extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd, + struct jffs2_raw_node_ref *raw); +extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref, + struct jffs2_raw_node_ref *raw); +extern int jffs2_verify_xattr(struct jffs2_sb_info *c); +extern void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd); +extern void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref); + +extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname, + char *buffer, size_t size); +extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname, + const char *buffer, size_t size, int flags); + +extern const struct xattr_handler *jffs2_xattr_handlers[]; +extern const struct xattr_handler jffs2_user_xattr_handler; +extern const struct xattr_handler jffs2_trusted_xattr_handler; + +extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t); +#define jffs2_getxattr generic_getxattr +#define jffs2_setxattr generic_setxattr +#define jffs2_removexattr generic_removexattr + +#else + +#define jffs2_init_xattr_subsystem(c) +#define jffs2_build_xattr_subsystem(c) +#define jffs2_clear_xattr_subsystem(c) + +#define jffs2_xattr_do_crccheck_inode(c, ic) +#define jffs2_xattr_delete_inode(c, ic) +#define jffs2_xattr_free_inode(c, ic) +#define jffs2_verify_xattr(c) (1) + +#define jffs2_xattr_handlers NULL +#define jffs2_listxattr NULL +#define jffs2_getxattr NULL +#define jffs2_setxattr NULL +#define jffs2_removexattr NULL + +#endif /* CONFIG_JFFS2_FS_XATTR */ + +#ifdef CONFIG_JFFS2_FS_SECURITY +extern int jffs2_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr); +extern const struct xattr_handler jffs2_security_xattr_handler; +#else +#define jffs2_init_security(inode,dir,qstr) (0) +#endif /* CONFIG_JFFS2_FS_SECURITY */ + +#endif /* _JFFS2_FS_XATTR_H_ */ diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c new file mode 100644 index 00000000000..1c868194c50 --- /dev/null +++ b/fs/jffs2/xattr_trusted.c @@ -0,0 +1,55 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2006 NEC Corporation + * + * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/jffs2.h> +#include <linux/xattr.h> +#include <linux/mtd/mtd.h> +#include "nodelist.h" + +static int jffs2_trusted_getxattr(struct dentry *dentry, const char *name, + void *buffer, size_t size, int type) +{ + if (!strcmp(name, "")) + return -EINVAL; + return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, + name, buffer, size); +} + +static int jffs2_trusted_setxattr(struct dentry *dentry, const char *name, + const void *buffer, size_t size, int flags, int type) +{ + if (!strcmp(name, "")) + return -EINVAL; + return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_TRUSTED, + name, buffer, size, flags); +} + +static size_t jffs2_trusted_listxattr(struct dentry *dentry, char *list, + size_t list_size, const char *name, size_t name_len, int type) +{ + size_t retlen = XATTR_TRUSTED_PREFIX_LEN + name_len + 1; + + if (list && retlen<=list_size) { + strcpy(list, XATTR_TRUSTED_PREFIX); + strcpy(list + XATTR_TRUSTED_PREFIX_LEN, name); + } + + return retlen; +} + +const struct xattr_handler jffs2_trusted_xattr_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .list = jffs2_trusted_listxattr, + .set = jffs2_trusted_setxattr, + .get = jffs2_trusted_getxattr +}; diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c new file mode 100644 index 00000000000..916b5c96603 --- /dev/null +++ b/fs/jffs2/xattr_user.c @@ -0,0 +1,55 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright © 2006 NEC Corporation + * + * Created by KaiGai Kohei <kaigai@ak.jp.nec.com> + * + * For licensing information, see the file 'LICENCE' in this directory. + * + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/jffs2.h> +#include <linux/xattr.h> +#include <linux/mtd/mtd.h> +#include "nodelist.h" + +static int jffs2_user_getxattr(struct dentry *dentry, const char *name, + void *buffer, size_t size, int type) +{ + if (!strcmp(name, "")) + return -EINVAL; + return do_jffs2_getxattr(dentry->d_inode, JFFS2_XPREFIX_USER, + name, buffer, size); +} + +static int jffs2_user_setxattr(struct dentry *dentry, const char *name, + const void *buffer, size_t size, int flags, int type) +{ + if (!strcmp(name, "")) + return -EINVAL; + return do_jffs2_setxattr(dentry->d_inode, JFFS2_XPREFIX_USER, + name, buffer, size, flags); +} + +static size_t jffs2_user_listxattr(struct dentry *dentry, char *list, + size_t list_size, const char *name, size_t name_len, int type) +{ + size_t retlen = XATTR_USER_PREFIX_LEN + name_len + 1; + + if (list && retlen <= list_size) { + strcpy(list, XATTR_USER_PREFIX); + strcpy(list + XATTR_USER_PREFIX_LEN, name); + } + + return retlen; +} + +const struct xattr_handler jffs2_user_xattr_handler = { + .prefix = XATTR_USER_PREFIX, + .list = jffs2_user_listxattr, + .set = jffs2_user_setxattr, + .get = jffs2_user_getxattr +}; |
