diff options
Diffstat (limited to 'drivers/mtd')
78 files changed, 6608 insertions, 5053 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index f6b775e63ac..1344ad7a4b1 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -78,7 +78,7 @@ config MTD_REDBOOT_DIRECTORY_BLOCK option. The option specifies which Flash sectors holds the RedBoot - partition table. A zero or positive value gives an absolete + partition table. A zero or positive value gives an absolute erase block number. A negative value specifies a number of sectors before the end of the device. @@ -86,14 +86,14 @@ config MTD_REDBOOT_DIRECTORY_BLOCK block and "-2" means the penultimate block. config MTD_REDBOOT_PARTS_UNALLOCATED - bool " Include unallocated flash regions" + bool "Include unallocated flash regions" depends on MTD_REDBOOT_PARTS help If you need to register each unallocated flash region as a MTD 'partition', enable this option. config MTD_REDBOOT_PARTS_READONLY - bool " Force read-only for RedBoot system images" + bool "Force read-only for RedBoot system images" depends on MTD_REDBOOT_PARTS help If you need to force read-only for 'RedBoot', 'RedBoot Config' and @@ -103,7 +103,7 @@ config MTD_CMDLINE_PARTS bool "Command line partition table parsing" depends on MTD_PARTITIONS = "y" ---help--- - Allow generic configuration of the MTD paritition tables via the kernel + Allow generic configuration of the MTD partition tables via the kernel command line. Multiple flash resources are supported for hardware where different kinds of flash memory are available. diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index a7ec5954caf..6d8f30deb86 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -30,7 +30,6 @@ config MTD_JEDECPROBE config MTD_GEN_PROBE tristate - select OBSOLETE_INTERMODULE config MTD_CFI_ADV_OPTIONS bool "Flash chip driver advanced configuration options" diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile index 8afe3092c4e..75bc1c2a0f4 100644 --- a/drivers/mtd/chips/Makefile +++ b/drivers/mtd/chips/Makefile @@ -3,13 +3,6 @@ # # $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $ -# *** BIG UGLY NOTE *** -# -# The removal of get_module_symbol() and replacement with -# inter_module_register() et al has introduced a link order dependency -# here where previously there was none. We now have to ensure that -# the CFI command set drivers are linked before gen_probe.o - obj-$(CONFIG_MTD) += chipreg.o obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o obj-$(CONFIG_MTD_CFI) += cfi_probe.o diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c index 57115618c49..16eaca69fb5 100644 --- a/drivers/mtd/chips/amd_flash.c +++ b/drivers/mtd/chips/amd_flash.c @@ -97,7 +97,6 @@ struct amd_flash_private { int interleave; int numchips; unsigned long chipshift; -// const char *im_name; struct flchip chips[0]; }; @@ -131,12 +130,6 @@ static struct mtd_chip_driver amd_flash_chipdrv = { .module = THIS_MODULE }; - - -static const char im_name[] = "amd_flash"; - - - static inline __u32 wide_read(struct map_info *map, __u32 addr) { if (map->buswidth == 1) { @@ -737,6 +730,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map) offset += dev_size; } mtd->type = MTD_NORFLASH; + mtd->writesize = 1; mtd->flags = MTD_CAP_NORFLASH; mtd->name = map->name; mtd->erase = amd_flash_erase; diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 1c074d63ff3..39edb8250fb 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -331,13 +331,6 @@ read_pri_intelext(struct map_info *map, __u16 adr) return extp; } -/* This routine is made available to other mtd code via - * inter_module_register. It must only be accessed through - * inter_module_get which will bump the use count of this module. The - * addresses passed back in cfi are valid as long as the use count of - * this module is non-zero, i.e. between inter_module_get and - * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000. - */ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) { struct cfi_private *cfi = map->fldrv_priv; @@ -364,6 +357,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) mtd->resume = cfi_intelext_resume; mtd->flags = MTD_CAP_NORFLASH; mtd->name = map->name; + mtd->writesize = 1; mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; @@ -406,7 +400,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) for (i=0; i< cfi->numchips; i++) { cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; - cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; + cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp; cfi->chips[i].ref_point_counter = 0; init_waitqueue_head(&(cfi->chips[i].wq)); } @@ -415,6 +409,11 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) return cfi_intelext_setup(mtd); } +struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001"))); +struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001"))); +EXPORT_SYMBOL_GPL(cfi_cmdset_0001); +EXPORT_SYMBOL_GPL(cfi_cmdset_0003); +EXPORT_SYMBOL_GPL(cfi_cmdset_0200); static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) { @@ -547,12 +546,12 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, if (extp->MinorVersion >= '4') { struct cfi_intelext_programming_regioninfo *prinfo; prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs]; - MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift; + mtd->writesize = cfi->interleave << prinfo->ProgRegShift; MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid; MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid; - mtd->flags |= MTD_PROGRAM_REGIONS; + mtd->flags &= ~MTD_BIT_WRITEABLE; printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n", - map->name, MTD_PROGREGION_SIZE(mtd), + map->name, mtd->writesize, MTD_PROGREGION_CTRLMODE_VALID(mtd), MTD_PROGREGION_CTRLMODE_INVALID(mtd)); } @@ -896,26 +895,33 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip, /* * When a delay is required for the flash operation to complete, the - * xip_udelay() function is polling for both the given timeout and pending - * (but still masked) hardware interrupts. Whenever there is an interrupt - * pending then the flash erase or write operation is suspended, array mode - * restored and interrupts unmasked. Task scheduling might also happen at that - * point. The CPU eventually returns from the interrupt or the call to - * schedule() and the suspended flash operation is resumed for the remaining - * of the delay period. + * xip_wait_for_operation() function is polling for both the given timeout + * and pending (but still masked) hardware interrupts. Whenever there is an + * interrupt pending then the flash erase or write operation is suspended, + * array mode restored and interrupts unmasked. Task scheduling might also + * happen at that point. The CPU eventually returns from the interrupt or + * the call to schedule() and the suspended flash operation is resumed for + * the remaining of the delay period. * * Warning: this function _will_ fool interrupt latency tracing tools. */ -static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, - unsigned long adr, int usec) +static int __xipram xip_wait_for_operation( + struct map_info *map, struct flchip *chip, + unsigned long adr, int *chip_op_time ) { struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_intelext *cfip = cfi->cmdset_priv; map_word status, OK = CMD(0x80); - unsigned long suspended, start = xip_currtime(); + unsigned long usec, suspended, start, done; flstate_t oldstate, newstate; + start = xip_currtime(); + usec = *chip_op_time * 8; + if (usec == 0) + usec = 500000; + done = 0; + do { cpu_relax(); if (xip_irqpending() && cfip && @@ -932,9 +938,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, * we resume the whole thing at once). Yes, it * can happen! */ + usec -= done; map_write(map, CMD(0xb0), adr); map_write(map, CMD(0x70), adr); - usec -= xip_elapsed_since(start); suspended = xip_currtime(); do { if (xip_elapsed_since(suspended) > 100000) { @@ -944,7 +950,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, * This is a critical error but there * is not much we can do here. */ - return; + return -EIO; } status = map_read(map, adr); } while (!map_word_andequal(map, status, OK, OK)); @@ -1004,65 +1010,107 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, xip_cpu_idle(); } status = map_read(map, adr); + done = xip_elapsed_since(start); } while (!map_word_andequal(map, status, OK, OK) - && xip_elapsed_since(start) < usec); -} + && done < usec); -#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) + return (done >= usec) ? -ETIME : 0; +} /* * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while * the flash is actively programming or erasing since we have to poll for * the operation to complete anyway. We can't do that in a generic way with * a XIP setup so do it before the actual flash operation in this case - * and stub it out from INVALIDATE_CACHE_UDELAY. + * and stub it out from INVAL_CACHE_AND_WAIT. */ #define XIP_INVAL_CACHED_RANGE(map, from, size) \ INVALIDATE_CACHED_RANGE(map, from, size) -#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ - UDELAY(map, chip, cmd_adr, usec) - -/* - * Extra notes: - * - * Activating this XIP support changes the way the code works a bit. For - * example the code to suspend the current process when concurrent access - * happens is never executed because xip_udelay() will always return with the - * same chip state as it was entered with. This is why there is no care for - * the presence of add_wait_queue() or schedule() calls from within a couple - * xip_disable()'d areas of code, like in do_erase_oneblock for example. - * The queueing and scheduling are always happening within xip_udelay(). - * - * Similarly, get_chip() and put_chip() just happen to always be executed - * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state - * is in array mode, therefore never executing many cases therein and not - * causing any problem with XIP. - */ +#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \ + xip_wait_for_operation(map, chip, cmd_adr, p_usec) #else #define xip_disable(map, chip, adr) #define xip_enable(map, chip, adr) #define XIP_INVAL_CACHED_RANGE(x...) +#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation + +static int inval_cache_and_wait_for_operation( + struct map_info *map, struct flchip *chip, + unsigned long cmd_adr, unsigned long inval_adr, int inval_len, + int *chip_op_time ) +{ + struct cfi_private *cfi = map->fldrv_priv; + map_word status, status_OK = CMD(0x80); + int z, chip_state = chip->state; + unsigned long timeo; + + spin_unlock(chip->mutex); + if (inval_len) + INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); + if (*chip_op_time) + cfi_udelay(*chip_op_time); + spin_lock(chip->mutex); + + timeo = *chip_op_time * 8 * HZ / 1000000; + if (timeo < HZ/2) + timeo = HZ/2; + timeo += jiffies; + + z = 0; + for (;;) { + if (chip->state != chip_state) { + /* Someone's suspended the operation: sleep */ + DECLARE_WAITQUEUE(wait, current); + + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&chip->wq, &wait); + spin_unlock(chip->mutex); + schedule(); + remove_wait_queue(&chip->wq, &wait); + timeo = jiffies + (HZ / 2); /* FIXME */ + spin_lock(chip->mutex); + continue; + } -#define UDELAY(map, chip, adr, usec) \ -do { \ - spin_unlock(chip->mutex); \ - cfi_udelay(usec); \ - spin_lock(chip->mutex); \ -} while (0) - -#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ -do { \ - spin_unlock(chip->mutex); \ - INVALIDATE_CACHED_RANGE(map, adr, len); \ - cfi_udelay(usec); \ - spin_lock(chip->mutex); \ -} while (0) + status = map_read(map, cmd_adr); + if (map_word_andequal(map, status, status_OK, status_OK)) + break; + + /* OK Still waiting */ + if (time_after(jiffies, timeo)) { + map_write(map, CMD(0x70), cmd_adr); + chip->state = FL_STATUS; + return -ETIME; + } + + /* Latency issues. Drop the lock, wait a while and retry */ + z++; + spin_unlock(chip->mutex); + cfi_udelay(1); + spin_lock(chip->mutex); + } + + if (!z) { + if (!--(*chip_op_time)) + *chip_op_time = 1; + } else if (z > 1) + ++(*chip_op_time); + + /* Done and happy. */ + chip->state = FL_STATUS; + return 0; +} #endif +#define WAIT_TIMEOUT(map, chip, adr, udelay) \ + ({ int __udelay = (udelay); \ + INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); }) + + static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) { unsigned long cmd_addr; @@ -1252,14 +1300,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum, int mode) { struct cfi_private *cfi = map->fldrv_priv; - map_word status, status_OK, write_cmd; - unsigned long timeo; - int z, ret=0; + map_word status, write_cmd; + int ret=0; adr += chip->start; - /* Let's determine those according to the interleave only once */ - status_OK = CMD(0x80); switch (mode) { case FL_WRITING: write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41); @@ -1285,57 +1330,17 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, map_write(map, datum, adr); chip->state = mode; - INVALIDATE_CACHE_UDELAY(map, chip, adr, - adr, map_bankwidth(map), - chip->word_write_time); - - timeo = jiffies + (HZ/2); - z = 0; - for (;;) { - if (chip->state != mode) { - /* Someone's suspended the write. Sleep */ - DECLARE_WAITQUEUE(wait, current); - - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); - schedule(); - remove_wait_queue(&chip->wq, &wait); - timeo = jiffies + (HZ / 2); /* FIXME */ - spin_lock(chip->mutex); - continue; - } - - status = map_read(map, adr); - if (map_word_andequal(map, status, status_OK, status_OK)) - break; - - /* OK Still waiting */ - if (time_after(jiffies, timeo)) { - map_write(map, CMD(0x70), adr); - chip->state = FL_STATUS; - xip_enable(map, chip, adr); - printk(KERN_ERR "%s: word write error (status timeout)\n", map->name); - ret = -EIO; - goto out; - } - - /* Latency issues. Drop the lock, wait a while and retry */ - z++; - UDELAY(map, chip, adr, 1); - } - if (!z) { - chip->word_write_time--; - if (!chip->word_write_time) - chip->word_write_time = 1; + ret = INVAL_CACHE_AND_WAIT(map, chip, adr, + adr, map_bankwidth(map), + &chip->word_write_time); + if (ret) { + xip_enable(map, chip, adr); + printk(KERN_ERR "%s: word write error (status timeout)\n", map->name); + goto out; } - if (z > 1) - chip->word_write_time++; - - /* Done and happy. */ - chip->state = FL_STATUS; /* check for errors */ + status = map_read(map, adr); if (map_word_bitsset(map, status, CMD(0x1a))) { unsigned long chipstatus = MERGESTATUS(status); @@ -1452,9 +1457,9 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, |