diff options
Diffstat (limited to 'drivers/mtd/chips')
| -rw-r--r-- | drivers/mtd/chips/Kconfig | 121 | ||||
| -rw-r--r-- | drivers/mtd/chips/Makefile | 11 | ||||
| -rw-r--r-- | drivers/mtd/chips/amd_flash.c | 1403 | ||||
| -rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0001.c | 1697 | ||||
| -rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0002.c | 1452 | ||||
| -rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0020.c | 435 | ||||
| -rw-r--r-- | drivers/mtd/chips/cfi_probe.c | 237 | ||||
| -rw-r--r-- | drivers/mtd/chips/cfi_util.c | 133 | ||||
| -rw-r--r-- | drivers/mtd/chips/chipreg.c | 13 | ||||
| -rw-r--r-- | drivers/mtd/chips/fwh_lock.h | 28 | ||||
| -rw-r--r-- | drivers/mtd/chips/gen_probe.c | 99 | ||||
| -rw-r--r-- | drivers/mtd/chips/jedec.c | 934 | ||||
| -rw-r--r-- | drivers/mtd/chips/jedec_probe.c | 1980 | ||||
| -rw-r--r-- | drivers/mtd/chips/map_absent.c | 25 | ||||
| -rw-r--r-- | drivers/mtd/chips/map_ram.c | 38 | ||||
| -rw-r--r-- | drivers/mtd/chips/map_rom.c | 45 | ||||
| -rw-r--r-- | drivers/mtd/chips/sharp.c | 596 |
17 files changed, 3746 insertions, 5501 deletions
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index df95d2158b1..9f02c28c020 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -1,13 +1,10 @@ -# drivers/mtd/chips/Kconfig -# $Id: Kconfig,v 1.15 2005/06/06 23:04:35 tpoynor Exp $ - menu "RAM/ROM/Flash chip drivers" depends on MTD!=n config MTD_CFI tristate "Detect flash chips by Common Flash Interface (CFI) probe" - depends on MTD select MTD_GEN_PROBE + select MTD_CFI_UTIL help The Common Flash Interface specification was developed by Intel, AMD and other flash manufactures that provides a universal method @@ -18,16 +15,14 @@ config MTD_CFI config MTD_JEDECPROBE tristate "Detect non-CFI AMD/JEDEC-compatible flash chips" - depends on MTD select MTD_GEN_PROBE help This option enables JEDEC-style probing of flash chips which are not compatible with the Common Flash Interface, but will use the common - CFI-targetted flash drivers for any chips which are identified which + CFI-targeted flash drivers for any chips which are identified which are in fact compatible in all but the probe method. This actually - covers most AMD/Fujitsu-compatible chips, and will shortly cover also - non-CFI Intel chips (that code is in MTD CVS and should shortly be sent - for inclusion in Linus' tree) + covers most AMD/Fujitsu-compatible chips and also non-CFI + Intel chips. config MTD_GEN_PROBE tristate @@ -39,7 +34,7 @@ config MTD_CFI_ADV_OPTIONS If you need to specify a specific endianness for access to flash chips, or if you wish to reduce the size of the kernel by including support for only specific arrangements of flash chips, say 'Y'. This - option does not directly affect the code, but will enable other + option does not directly affect the code, but will enable other configuration options which allow you to do so. If unsure, say 'N'. @@ -48,24 +43,17 @@ choice prompt "Flash cmd/query data swapping" depends on MTD_CFI_ADV_OPTIONS default MTD_CFI_NOSWAP - -config MTD_CFI_NOSWAP - bool "NO" ---help--- This option defines the way in which the CPU attempts to arrange data bits when writing the 'magic' commands to the chips. Saying 'NO', which is the default when CONFIG_MTD_CFI_ADV_OPTIONS isn't enabled, means that the CPU will not do any swapping; the chips - are expected to be wired to the CPU in 'host-endian' form. + are expected to be wired to the CPU in 'host-endian' form. Specific arrangements are possible with the BIG_ENDIAN_BYTE and LITTLE_ENDIAN_BYTE, if the bytes are reversed. - If you have a LART, on which the data (and address) lines were - connected in a fashion which ensured that the nets were as short - as possible, resulting in a bit-shuffling which seems utterly - random to the untrained eye, you need the LART_ENDIAN_BYTE option. - - Yes, there really exists something sicker than PDP-endian :) +config MTD_CFI_NOSWAP + bool "NO" config MTD_CFI_BE_BYTE_SWAP bool "BIG_ENDIAN_BYTE" @@ -79,10 +67,10 @@ config MTD_CFI_GEOMETRY bool "Specific CFI Flash geometry selection" depends on MTD_CFI_ADV_OPTIONS help - This option does not affect the code directly, but will enable + This option does not affect the code directly, but will enable some other configuration options which would allow you to reduce - the size of the kernel by including support for only certain - arrangements of CFI chips. If unsure, say 'N' and all options + the size of the kernel by including support for only certain + arrangements of CFI chips. If unsure, say 'N' and all options which are supported by the current code will be enabled. config MTD_MAP_BANK_WIDTH_1 @@ -181,75 +169,51 @@ config MTD_OTP in the programming of OTP bits will waste them. config MTD_CFI_INTELEXT - tristate "Support for Intel/Sharp flash chips" + tristate "Support for CFI command set 0001 (Intel/Sharp chips)" depends on MTD_GEN_PROBE select MTD_CFI_UTIL help The Common Flash Interface defines a number of different command sets which a CFI-compliant chip may claim to implement. This code - provides support for one of those command sets, used on Intel - StrataFlash and other parts. + provides support for command set 0001, used on Intel StrataFlash + and other parts. config MTD_CFI_AMDSTD - tristate "Support for AMD/Fujitsu flash chips" + tristate "Support for CFI command set 0002 (AMD/Fujitsu/Spansion chips)" depends on MTD_GEN_PROBE select MTD_CFI_UTIL help The Common Flash Interface defines a number of different command sets which a CFI-compliant chip may claim to implement. This code - provides support for one of those command sets, used on chips - including the AMD Am29LV320. - -config MTD_CFI_AMDSTD_RETRY - int "Retry failed commands (erase/program)" - depends on MTD_CFI_AMDSTD - default "0" - help - Some chips, when attached to a shared bus, don't properly filter - bus traffic that is destined to other devices. This broken - behavior causes erase and program sequences to be aborted when - the sequences are mixed with traffic for other devices. - - SST49LF040 (and related) chips are know to be broken. - -config MTD_CFI_AMDSTD_RETRY_MAX - int "Max retries of failed commands (erase/program)" - depends on MTD_CFI_AMDSTD_RETRY - default "0" - help - If you have an SST49LF040 (or related chip) then this value should - be set to at least 1. This can also be adjusted at driver load - time with the retry_cmd_max module parameter. + provides support for command set 0002, used on chips including + the AMD Am29LV320. config MTD_CFI_STAA - tristate "Support for ST (Advanced Architecture) flash chips" + tristate "Support for CFI command set 0020 (ST (Advanced Architecture) chips)" depends on MTD_GEN_PROBE select MTD_CFI_UTIL help The Common Flash Interface defines a number of different command sets which a CFI-compliant chip may claim to implement. This code - provides support for one of those command sets. + provides support for command set 0020. config MTD_CFI_UTIL tristate config MTD_RAM tristate "Support for RAM chips in bus mapping" - depends on MTD help - This option enables basic support for RAM chips accessed through + This option enables basic support for RAM chips accessed through a bus mapping driver. config MTD_ROM tristate "Support for ROM chips in bus mapping" - depends on MTD help - This option enables basic support for ROM chips accessed through + This option enables basic support for ROM chips accessed through a bus mapping driver. config MTD_ABSENT tristate "Support for absent chips in bus mapping" - depends on MTD help This option enables support for a dummy probing driver used to allocated placeholder MTD devices on systems that have socketed @@ -258,49 +222,9 @@ config MTD_ABSENT the system regardless of media presence. Device nodes created with this driver will return -ENODEV upon access. -config MTD_OBSOLETE_CHIPS - depends on MTD && BROKEN - bool "Older (theoretically obsoleted now) drivers for non-CFI chips" - help - This option does not enable any code directly, but will allow you to - select some other chip drivers which are now considered obsolete, - because the generic CONFIG_JEDECPROBE code above should now detect - the chips which are supported by these drivers, and allow the generic - CFI-compatible drivers to drive the chips. Say 'N' here unless you have - already tried the CONFIG_JEDECPROBE method and reported its failure - to the MTD mailing list at <linux-mtd@lists.infradead.org> - -config MTD_AMDSTD - tristate "AMD compatible flash chip support (non-CFI)" - depends on MTD && MTD_OBSOLETE_CHIPS - help - This option enables support for flash chips using AMD-compatible - commands, including some which are not CFI-compatible and hence - cannot be used with the CONFIG_MTD_CFI_AMDSTD option. - - It also works on AMD compatible chips that do conform to CFI. - -config MTD_SHARP - tristate "pre-CFI Sharp chip support" - depends on MTD && MTD_OBSOLETE_CHIPS - help - This option enables support for flash chips using Sharp-compatible - commands, including some which are not CFI-compatible and hence - cannot be used with the CONFIG_MTD_CFI_INTELxxx options. - -config MTD_JEDEC - tristate "JEDEC device support" - depends on MTD && MTD_OBSOLETE_CHIPS - help - Enable older older JEDEC flash interface devices for self - programming flash. It is commonly used in older AMD chips. It is - only called JEDEC because the JEDEC association - <http://www.jedec.org/> distributes the identification codes for the - chips. - config MTD_XIP bool "XIP aware MTD support" - depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && EXPERIMENTAL && ARM + depends on !SMP && (MTD_CFI_INTELEXT || MTD_CFI_AMDSTD) && ARCH_MTD_XIP default y if XIP_KERNEL help This allows MTD support to work with flash memory which is also @@ -308,4 +232,3 @@ config MTD_XIP then say N. endmenu - diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile index 6830489828c..36582412ccd 100644 --- a/drivers/mtd/chips/Makefile +++ b/drivers/mtd/chips/Makefile @@ -1,26 +1,15 @@ # # linux/drivers/chips/Makefile # -# $Id: Makefile.common,v 1.4 2004/07/12 16:07:30 dwmw2 Exp $ - -# *** BIG UGLY NOTE *** -# -# The removal of get_module_symbol() and replacement with -# inter_module_register() et al has introduced a link order dependency -# here where previously there was none. We now have to ensure that -# the CFI command set drivers are linked before gen_probe.o obj-$(CONFIG_MTD) += chipreg.o -obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o obj-$(CONFIG_MTD_CFI) += cfi_probe.o obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o -obj-$(CONFIG_MTD_JEDEC) += jedec.o obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o obj-$(CONFIG_MTD_RAM) += map_ram.o obj-$(CONFIG_MTD_ROM) += map_rom.o -obj-$(CONFIG_MTD_SHARP) += sharp.o obj-$(CONFIG_MTD_ABSENT) += map_absent.o diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c deleted file mode 100644 index 2dafeba3f3d..00000000000 --- a/drivers/mtd/chips/amd_flash.c +++ /dev/null @@ -1,1403 +0,0 @@ -/* - * MTD map driver for AMD compatible flash chips (non-CFI) - * - * Author: Jonas Holmberg <jonas.holmberg@axis.com> - * - * $Id: amd_flash.c,v 1.27 2005/02/04 07:43:09 jonashg Exp $ - * - * Copyright (c) 2001 Axis Communications AB - * - * This file is under GPL. - * - */ - -#include <linux/module.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/mtd/map.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/flashchip.h> - -/* There's no limit. It exists only to avoid realloc. */ -#define MAX_AMD_CHIPS 8 - -#define DEVICE_TYPE_X8 (8 / 8) -#define DEVICE_TYPE_X16 (16 / 8) -#define DEVICE_TYPE_X32 (32 / 8) - -/* Addresses */ -#define ADDR_MANUFACTURER 0x0000 -#define ADDR_DEVICE_ID 0x0001 -#define ADDR_SECTOR_LOCK 0x0002 -#define ADDR_HANDSHAKE 0x0003 -#define ADDR_UNLOCK_1 0x0555 -#define ADDR_UNLOCK_2 0x02AA - -/* Commands */ -#define CMD_UNLOCK_DATA_1 0x00AA -#define CMD_UNLOCK_DATA_2 0x0055 -#define CMD_MANUFACTURER_UNLOCK_DATA 0x0090 -#define CMD_UNLOCK_BYPASS_MODE 0x0020 -#define CMD_PROGRAM_UNLOCK_DATA 0x00A0 -#define CMD_RESET_DATA 0x00F0 -#define CMD_SECTOR_ERASE_UNLOCK_DATA 0x0080 -#define CMD_SECTOR_ERASE_UNLOCK_DATA_2 0x0030 - -#define CMD_UNLOCK_SECTOR 0x0060 - -/* Manufacturers */ -#define MANUFACTURER_AMD 0x0001 -#define MANUFACTURER_ATMEL 0x001F -#define MANUFACTURER_FUJITSU 0x0004 -#define MANUFACTURER_ST 0x0020 -#define MANUFACTURER_SST 0x00BF -#define MANUFACTURER_TOSHIBA 0x0098 - -/* AMD */ -#define AM29F800BB 0x2258 -#define AM29F800BT 0x22D6 -#define AM29LV800BB 0x225B -#define AM29LV800BT 0x22DA -#define AM29LV160DT 0x22C4 -#define AM29LV160DB 0x2249 -#define AM29BDS323D 0x22D1 - -/* Atmel */ -#define AT49xV16x 0x00C0 -#define AT49xV16xT 0x00C2 - -/* Fujitsu */ -#define MBM29LV160TE 0x22C4 -#define MBM29LV160BE 0x2249 -#define MBM29LV800BB 0x225B - -/* ST - www.st.com */ -#define M29W800T 0x00D7 -#define M29W160DT 0x22C4 -#define M29W160DB 0x2249 - -/* SST */ -#define SST39LF800 0x2781 -#define SST39LF160 0x2782 - -/* Toshiba */ -#define TC58FVT160 0x00C2 -#define TC58FVB160 0x0043 - -#define D6_MASK 0x40 - -struct amd_flash_private { - int device_type; - int interleave; - int numchips; - unsigned long chipshift; -// const char *im_name; - struct flchip chips[0]; -}; - -struct amd_flash_info { - const __u16 mfr_id; - const __u16 dev_id; - const char *name; - const u_long size; - const int numeraseregions; - const struct mtd_erase_region_info regions[4]; -}; - - - -static int amd_flash_read(struct mtd_info *, loff_t, size_t, size_t *, - u_char *); -static int amd_flash_write(struct mtd_info *, loff_t, size_t, size_t *, - const u_char *); -static int amd_flash_erase(struct mtd_info *, struct erase_info *); -static void amd_flash_sync(struct mtd_info *); -static int amd_flash_suspend(struct mtd_info *); -static void amd_flash_resume(struct mtd_info *); -static void amd_flash_destroy(struct mtd_info *); -static struct mtd_info *amd_flash_probe(struct map_info *map); - - -static struct mtd_chip_driver amd_flash_chipdrv = { - .probe = amd_flash_probe, - .destroy = amd_flash_destroy, - .name = "amd_flash", - .module = THIS_MODULE -}; - - - -static const char im_name[] = "amd_flash"; - - - -static inline __u32 wide_read(struct map_info *map, __u32 addr) -{ - if (map->buswidth == 1) { - return map_read8(map, addr); - } else if (map->buswidth == 2) { - return map_read16(map, addr); - } else if (map->buswidth == 4) { - return map_read32(map, addr); - } - - return 0; -} - -static inline void wide_write(struct map_info *map, __u32 val, __u32 addr) -{ - if (map->buswidth == 1) { - map_write8(map, val, addr); - } else if (map->buswidth == 2) { - map_write16(map, val, addr); - } else if (map->buswidth == 4) { - map_write32(map, val, addr); - } -} - -static inline __u32 make_cmd(struct map_info *map, __u32 cmd) -{ - const struct amd_flash_private *private = map->fldrv_priv; - if ((private->interleave == 2) && - (private->device_type == DEVICE_TYPE_X16)) { - cmd |= (cmd << 16); - } - - return cmd; -} - -static inline void send_unlock(struct map_info *map, unsigned long base) -{ - wide_write(map, (CMD_UNLOCK_DATA_1 << 16) | CMD_UNLOCK_DATA_1, - base + (map->buswidth * ADDR_UNLOCK_1)); - wide_write(map, (CMD_UNLOCK_DATA_2 << 16) | CMD_UNLOCK_DATA_2, - base + (map->buswidth * ADDR_UNLOCK_2)); -} - -static inline void send_cmd(struct map_info *map, unsigned long base, __u32 cmd) -{ - send_unlock(map, base); - wide_write(map, make_cmd(map, cmd), - base + (map->buswidth * ADDR_UNLOCK_1)); -} - -static inline void send_cmd_to_addr(struct map_info *map, unsigned long base, - __u32 cmd, unsigned long addr) -{ - send_unlock(map, base); - wide_write(map, make_cmd(map, cmd), addr); -} - -static inline int flash_is_busy(struct map_info *map, unsigned long addr, - int interleave) -{ - - if ((interleave == 2) && (map->buswidth == 4)) { - __u32 read1, read2; - - read1 = wide_read(map, addr); - read2 = wide_read(map, addr); - - return (((read1 >> 16) & D6_MASK) != - ((read2 >> 16) & D6_MASK)) || - (((read1 & 0xffff) & D6_MASK) != - ((read2 & 0xffff) & D6_MASK)); - } - - return ((wide_read(map, addr) & D6_MASK) != - (wide_read(map, addr) & D6_MASK)); -} - -static inline void unlock_sector(struct map_info *map, unsigned long sect_addr, - int unlock) -{ - /* Sector lock address. A6 = 1 for unlock, A6 = 0 for lock */ - int SLA = unlock ? - (sect_addr | (0x40 * map->buswidth)) : - (sect_addr & ~(0x40 * map->buswidth)) ; - - __u32 cmd = make_cmd(map, CMD_UNLOCK_SECTOR); - - wide_write(map, make_cmd(map, CMD_RESET_DATA), 0); - wide_write(map, cmd, SLA); /* 1st cycle: write cmd to any address */ - wide_write(map, cmd, SLA); /* 2nd cycle: write cmd to any address */ - wide_write(map, cmd, SLA); /* 3rd cycle: write cmd to SLA */ -} - -static inline int is_sector_locked(struct map_info *map, - unsigned long sect_addr) -{ - int status; - - wide_write(map, CMD_RESET_DATA, 0); - send_cmd(map, sect_addr, CMD_MANUFACTURER_UNLOCK_DATA); - - /* status is 0x0000 for unlocked and 0x0001 for locked */ - status = wide_read(map, sect_addr + (map->buswidth * ADDR_SECTOR_LOCK)); - wide_write(map, CMD_RESET_DATA, 0); - return status; -} - -static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len, - int is_unlock) -{ - struct map_info *map; - struct mtd_erase_region_info *merip; - int eraseoffset, erasesize, eraseblocks; - int i; - int retval = 0; - int lock_status; - - map = mtd->priv; - - /* Pass the whole chip through sector by sector and check for each - sector if the sector and the given interval overlap */ - for(i = 0; i < mtd->numeraseregions; i++) { - merip = &mtd->eraseregions[i]; - - eraseoffset = merip->offset; - erasesize = merip->erasesize; - eraseblocks = merip->numblocks; - - if (ofs > eraseoffset + erasesize) - continue; - - while (eraseblocks > 0) { - if (ofs < eraseoffset + erasesize && ofs + len > eraseoffset) { - unlock_sector(map, eraseoffset, is_unlock); - - lock_status = is_sector_locked(map, eraseoffset); - - if (is_unlock && lock_status) { - printk("Cannot unlock sector at address %x length %xx\n", - eraseoffset, merip->erasesize); - retval = -1; - } else if (!is_unlock && !lock_status) { - printk("Cannot lock sector at address %x length %x\n", - eraseoffset, merip->erasesize); - retval = -1; - } - } - eraseoffset += erasesize; - eraseblocks --; - } - } - return retval; -} - -static int amd_flash_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) -{ - return amd_flash_do_unlock(mtd, ofs, len, 1); -} - -static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len) -{ - return amd_flash_do_unlock(mtd, ofs, len, 0); -} - - -/* - * Reads JEDEC manufacturer ID and device ID and returns the index of the first - * matching table entry (-1 if not found or alias for already found chip). - */ -static int probe_new_chip(struct mtd_info *mtd, __u32 base, - struct flchip *chips, - struct amd_flash_private *private, - const struct amd_flash_info *table, int table_size) -{ - __u32 mfr_id; - __u32 dev_id; - struct map_info *map = mtd->priv; - struct amd_flash_private temp; - int i; - - temp.device_type = DEVICE_TYPE_X16; // Assume X16 (FIXME) - temp.interleave = 2; - map->fldrv_priv = &temp; - - /* Enter autoselect mode. */ - send_cmd(map, base, CMD_RESET_DATA); - send_cmd(map, base, CMD_MANUFACTURER_UNLOCK_DATA); - - mfr_id = wide_read(map, base + (map->buswidth * ADDR_MANUFACTURER)); - dev_id = wide_read(map, base + (map->buswidth * ADDR_DEVICE_ID)); - - if ((map->buswidth == 4) && ((mfr_id >> 16) == (mfr_id & 0xffff)) && - ((dev_id >> 16) == (dev_id & 0xffff))) { - mfr_id &= 0xffff; - dev_id &= 0xffff; - } else { - temp.interleave = 1; - } - - for (i = 0; i < table_size; i++) { - if ((mfr_id == table[i].mfr_id) && - (dev_id == table[i].dev_id)) { - if (chips) { - int j; - - /* Is this an alias for an already found chip? - * In that case that chip should be in - * autoselect mode now. - */ - for (j = 0; j < private->numchips; j++) { - __u32 mfr_id_other; - __u32 dev_id_other; - - mfr_id_other = - wide_read(map, chips[j].start + - (map->buswidth * - ADDR_MANUFACTURER - )); - dev_id_other = - wide_read(map, chips[j].start + - (map->buswidth * - ADDR_DEVICE_ID)); - if (temp.interleave == 2) { - mfr_id_other &= 0xffff; - dev_id_other &= 0xffff; - } - if ((mfr_id_other == mfr_id) && - (dev_id_other == dev_id)) { - - /* Exit autoselect mode. */ - send_cmd(map, base, - CMD_RESET_DATA); - - return -1; - } - } - - if (private->numchips == MAX_AMD_CHIPS) { - printk(KERN_WARNING - "%s: Too many flash chips " - "detected. Increase " - "MAX_AMD_CHIPS from %d.\n", - map->name, MAX_AMD_CHIPS); - - return -1; - } - - chips[private->numchips].start = base; - chips[private->numchips].state = FL_READY; - chips[private->numchips].mutex = - &chips[private->numchips]._spinlock; - private->numchips++; - } - - printk("%s: Found %d x %ldMiB %s at 0x%x\n", map->name, - temp.interleave, (table[i].size)/(1024*1024), - table[i].name, base); - - mtd->size += table[i].size * temp.interleave; - mtd->numeraseregions += table[i].numeraseregions; - - break; - } - } - - /* Exit autoselect mode. */ - send_cmd(map, base, CMD_RESET_DATA); - - if (i == table_size) { - printk(KERN_DEBUG "%s: unknown flash device at 0x%x, " - "mfr id 0x%x, dev id 0x%x\n", map->name, - base, mfr_id, dev_id); - map->fldrv_priv = NULL; - - return -1; - } - - private->device_type = temp.device_type; - private->interleave = temp.interleave; - - return i; -} - - - -static struct mtd_info *amd_flash_probe(struct map_info *map) -{ - static const struct amd_flash_info table[] = { - { - .mfr_id = MANUFACTURER_AMD, - .dev_id = AM29LV160DT, - .name = "AMD AM29LV160DT", - .size = 0x00200000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, - { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 } - } - }, { - .mfr_id = MANUFACTURER_AMD, - .dev_id = AM29LV160DB, - .name = "AMD AM29LV160DB", - .size = 0x00200000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, - { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } - } - }, { - .mfr_id = MANUFACTURER_TOSHIBA, - .dev_id = TC58FVT160, - .name = "Toshiba TC58FVT160", - .size = 0x00200000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, - { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 } - } - }, { - .mfr_id = MANUFACTURER_FUJITSU, - .dev_id = MBM29LV160TE, - .name = "Fujitsu MBM29LV160TE", - .size = 0x00200000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, - { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 } - } - }, { - .mfr_id = MANUFACTURER_TOSHIBA, - .dev_id = TC58FVB160, - .name = "Toshiba TC58FVB160", - .size = 0x00200000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, - { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } - } - }, { - .mfr_id = MANUFACTURER_FUJITSU, - .dev_id = MBM29LV160BE, - .name = "Fujitsu MBM29LV160BE", - .size = 0x00200000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, - { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } - } - }, { - .mfr_id = MANUFACTURER_AMD, - .dev_id = AM29LV800BB, - .name = "AMD AM29LV800BB", - .size = 0x00100000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, - { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 } - } - }, { - .mfr_id = MANUFACTURER_AMD, - .dev_id = AM29F800BB, - .name = "AMD AM29F800BB", - .size = 0x00100000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, - { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 } - } - }, { - .mfr_id = MANUFACTURER_AMD, - .dev_id = AM29LV800BT, - .name = "AMD AM29LV800BT", - .size = 0x00100000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 }, - { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 } - } - }, { - .mfr_id = MANUFACTURER_AMD, - .dev_id = AM29F800BT, - .name = "AMD AM29F800BT", - .size = 0x00100000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 }, - { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 } - } - }, { - .mfr_id = MANUFACTURER_AMD, - .dev_id = AM29LV800BB, - .name = "AMD AM29LV800BB", - .size = 0x00100000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 }, - { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 } - } - }, { - .mfr_id = MANUFACTURER_FUJITSU, - .dev_id = MBM29LV800BB, - .name = "Fujitsu MBM29LV800BB", - .size = 0x00100000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, - { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 } - } - }, { - .mfr_id = MANUFACTURER_ST, - .dev_id = M29W800T, - .name = "ST M29W800T", - .size = 0x00100000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 }, - { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 } - } - }, { - .mfr_id = MANUFACTURER_ST, - .dev_id = M29W160DT, - .name = "ST M29W160DT", - .size = 0x00200000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, - { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 } - } - }, { - .mfr_id = MANUFACTURER_ST, - .dev_id = M29W160DB, - .name = "ST M29W160DB", - .size = 0x00200000, - .numeraseregions = 4, - .regions = { - { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 }, - { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 }, - { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 }, - { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } - } - }, { - .mfr_id = MANUFACTURER_AMD, - .dev_id = AM29BDS323D, - .name = "AMD AM29BDS323D", - .size = 0x00400000, - .numeraseregions = 3, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 }, - { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 }, - { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks = 8 }, - } - }, { - .mfr_id = MANUFACTURER_ATMEL, - .dev_id = AT49xV16x, - .name = "Atmel AT49xV16x", - .size = 0x00200000, - .numeraseregions = 2, - .regions = { - { .offset = 0x000000, .erasesize = 0x02000, .numblocks = 8 }, - { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 } - } - }, { - .mfr_id = MANUFACTURER_ATMEL, - .dev_id = AT49xV16xT, - .name = "Atmel AT49xV16xT", - .size = 0x00200000, - .numeraseregions = 2, - .regions = { - { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, - { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 } - } - } - }; - - struct mtd_info *mtd; - struct flchip chips[MAX_AMD_CHIPS]; - int table_pos[MAX_AMD_CHIPS]; - struct amd_flash_private temp; - struct amd_flash_private *private; - u_long size; - unsigned long base; - int i; - int reg_idx; - int offset; - - mtd = (struct mtd_info*)kmalloc(sizeof(*mtd), GFP_KERNEL); - if (!mtd) { - printk(KERN_WARNING - "%s: kmalloc failed for info structure\n", map->name); - return NULL; - } - memset(mtd, 0, sizeof(*mtd)); - mtd->priv = map; - - memset(&temp, 0, sizeof(temp)); - - printk("%s: Probing for AMD compatible flash...\n", map->name); - - if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table, - sizeof(table)/sizeof(table[0]))) - == -1) { - printk(KERN_WARNING - "%s: Found no AMD compatible device at location zero\n", - map->name); - kfree(mtd); - - return NULL; - } - - chips[0].start = 0; - chips[0].state = FL_READY; - chips[0].mutex = &chips[0]._spinlock; - temp.numchips = 1; - for (size = mtd->size; size > 1; size >>= 1) { - temp.chipshift++; - } - switch (temp.interleave) { - case 2: - temp.chipshift += 1; - break; - case 4: - temp.chipshift += 2; - break; - } - - /* Find out if there are any more chips in the map. */ - for (base = (1 << temp.chipshift); - base < map->size; - base += (1 << temp.chipshift)) { - int numchips = temp.numchips; - table_pos[numchips] = probe_new_chip(mtd, base, chips, - &temp, table, sizeof(table)/sizeof(table[0])); - } - - mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * - mtd->numeraseregions, GFP_KERNEL); - if (!mtd->eraseregions) { - printk(KERN_WARNING "%s: Failed to allocate " - "memory for MTD erase region info\n", map->name); - kfree(mtd); - map->fldrv_priv = NULL; - return NULL; - } - - reg_idx = 0; - offset = 0; - for (i = 0; i < temp.numchips; i++) { - int dev_size; - int j; - - dev_size = 0; - for (j = 0; j < table[table_pos[i]].numeraseregions; j++) { - mtd->eraseregions[reg_idx].offset = offset + - (table[table_pos[i]].regions[j].offset * - temp.interleave); - mtd->eraseregions[reg_idx].erasesize = - table[table_pos[i]].regions[j].erasesize * - temp.interleave; - mtd->eraseregions[reg_idx].numblocks = - table[table_pos[i]].regions[j].numblocks; - if (mtd->erasesize < - mtd->eraseregions[reg_idx].erasesize) { - mtd->erasesize = - mtd->eraseregions[reg_idx].erasesize; - } - dev_size += mtd->eraseregions[reg_idx].erasesize * - mtd->eraseregions[reg_idx].numblocks; - reg_idx++; - } - offset += dev_size; - } - mtd->type = MTD_NORFLASH; - mtd->flags = MTD_CAP_NORFLASH; - mtd->name = map->name; - mtd->erase = amd_flash_erase; - mtd->read = amd_flash_read; - mtd->write = amd_flash_write; - mtd->sync = amd_flash_sync; - mtd->suspend = amd_flash_suspend; - mtd->resume = amd_flash_resume; - mtd->lock = amd_flash_lock; - mtd->unlock = amd_flash_unlock; - - private = kmalloc(sizeof(*private) + (sizeof(struct flchip) * - temp.numchips), GFP_KERNEL); - if (!private) { - printk(KERN_WARNING - "%s: kmalloc failed for private structure\n", map->name); - kfree(mtd); - map->fldrv_priv = NULL; - return NULL; - } - memcpy(private, &temp, sizeof(temp)); - memcpy(private->chips, chips, - sizeof(struct flchip) * private->numchips); - for (i = 0; i < private->numchips; i++) { - init_waitqueue_head(&private->chips[i].wq); - spin_lock_init(&private->chips[i]._spinlock); - } - - map->fldrv_priv = private; - - map->fldrv = &amd_flash_chipdrv; - - __module_get(THIS_MODULE); - return mtd; -} - - - -static inline int read_one_chip(struct map_info *map, struct flchip *chip, - loff_t adr, size_t len, u_char *buf) -{ - DECLARE_WAITQUEUE(wait, current); - unsigned long timeo = jiffies + HZ; - -retry: - spin_lock_bh(chip->mutex); - - if (chip->state != FL_READY){ - printk(KERN_INFO "%s: waiting for chip to read, state = %d\n", - map->name, chip->state); - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - - spin_unlock_bh(chip->mutex); - - schedule(); - remove_wait_queue(&chip->wq, &wait); - - if(signal_pending(current)) { - return -EINTR; - } - - timeo = jiffies + HZ; - - goto retry; - } - - adr += chip->start; - - chip->state = FL_READY; - - map_copy_from(map, buf, adr, len); - - wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); - - return 0; -} - - - -static int amd_flash_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - struct map_info *map = mtd->priv; - struct amd_flash_private *private = map->fldrv_priv; - unsigned long ofs; - int chipnum; - int ret = 0; - - if ((from + len) > mtd->size) { - printk(KERN_WARNING "%s: read request past end of device " - "(0x%lx)\n", map->name, (unsigned long)from + len); - - return -EINVAL; - } - - /* Offset within the first chip that the first read should start. */ - chipnum = (from >> private->chipshift); - ofs = from - (chipnum << private->chipshift); - - *retlen = 0; - - while (len) { - unsigned long this_len; - - if (chipnum >= private->numchips) { - break; - } - - if ((len + ofs - 1) >> private->chipshift) { - this_len = (1 << private->chipshift) - ofs; - } else { - this_len = len; - } - - ret = read_one_chip(map, &private->chips[chipnum], ofs, - this_len, buf); - if (ret) { - break; - } - - *retlen += this_len; - len -= this_len; - buf += this_len; - - ofs = 0; - chipnum++; - } - - return ret; -} - - - -static int write_one_word(struct map_info *map, struct flchip *chip, - unsigned long adr, __u32 datum) -{ - unsigned long timeo = jiffies + HZ; - struct amd_flash_private *private = map->fldrv_priv; - DECLARE_WAITQUEUE(wait, current); - int ret = 0; - int times_left; - -retry: - spin_lock_bh(chip->mutex); - - if (chip->state != FL_READY){ - printk("%s: waiting for chip to write, state = %d\n", - map->name, chip->state); - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - - spin_unlock_bh(chip->mutex); - - schedule(); - remove_wait_queue(&chip->wq, &wait); - printk(KERN_INFO "%s: woke up to write\n", map->name); - if(signal_pending(current)) - return -EINTR; - - timeo = jiffies + HZ; - - goto retry; - } - - chip->state = FL_WRITING; - - adr += chip->start; - ENABLE_VPP(map); - send_cmd(map, chip->start, CMD_PROGRAM_UNLOCK_DATA); - wide_write(map, datum, adr); - - times_left = 500000; - while (times_left-- && flash_is_busy(map, adr, private->interleave)) { - if (need_resched()) { - spin_unlock_bh(chip->mutex); - schedule(); - spin_lock_bh(chip->mutex); - } - } - - if (!times_left) { - printk(KERN_WARNING "%s: write to 0x%lx timed out!\n", - map->name, adr); - ret = -EIO; - } else { - __u32 verify; - if ((verify = wide_read(map, adr)) != datum) { - printk(KERN_WARNING "%s: write to 0x%lx failed. " - "datum = %x, verify = %x\n", - map->name, adr, datum, verify); - ret = -EIO; - } - } - - DISABLE_VPP(map); - chip->state = FL_READY; - wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); - - return ret; -} - - - -static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len, - size_t *retlen, const u_char *buf) -{ - struct map_info *map = mtd->priv; - struct amd_flash_private *private = map->fldrv_priv; - int ret = 0; - int chipnum; - unsigned long ofs; - unsigned long chipstart; - - *retlen = 0; - if (!len) { - return 0; - } - - chipnum = to >> private->chipshift; - ofs = to - (chipnum << private->chipshift); - chipstart = private->chips[chipnum].start; - - /* If it's not bus-aligned, do the first byte write. */ - if (ofs & (map->buswidth - 1)) { - unsigned long bus_ofs = ofs & ~(map->buswidth - 1); - int i = ofs - bus_ofs; - int n = 0; - u_char tmp_buf[4]; - __u32 datum; - - map_copy_from(map, tmp_buf, - bus_ofs + private->chips[chipnum].start, - map->buswidth); - while (len && i < map->buswidth) - tmp_buf[i++] = buf[n++], len--; - - if (map->buswidth == 2) { - datum = *(__u16*)tmp_buf; - } else if (map->buswidth == 4) { - datum = *(__u32*)tmp_buf; - } else { - return -EINVAL; /* should never happen, but be safe */ - } - - ret = write_one_word(map, &private->chips[chipnum], bus_ofs, - datum); - if (ret) { - return ret; - } - - ofs += n; - buf += n; - (*retlen) += n; - - if (ofs >> private->chipshift) { - chipnum++; - ofs = 0; - if (chipnum == private->numchips) { - return 0; - } - } - } - - /* We are now aligned, write as much as possible. */ - while(len >= map->buswidth) { - __u32 datum; - - if (map->buswidth == 1) { - datum = *(__u8*)buf; - } else if (map->buswidth == 2) { - datum = *(__u16*)buf; - } else if (map->buswidth == 4) { - datum = *(__u32*)buf; - } else { - return -EINVAL; - } - - ret = write_one_word(map, &private->chips[chipnum], ofs, datum); - - if (ret) { - return ret; - } - - ofs += map->buswidth; - buf += map->buswidth; - (*retlen) += map->buswidth; - len -= map->buswidth; - - if (ofs >> private->chipshift) { - chipnum++; - ofs = 0; - if (chipnum == private->numchips) { - return 0; - } - chipstart = private->chips[chipnum].start; - } - } - - if (len & (map->buswidth - 1)) { - int i = 0, n = 0; - u_char tmp_buf[2]; - __u32 datum; - - map_copy_from(map, tmp_buf, - ofs + private->chips[chipnum].start, - map->buswidth); - while (len--) { - tmp_buf[i++] = buf[n++]; - } - - if (map->buswidth == 2) { - datum = *(__u16*)tmp_buf; - } else if (map->buswidth == 4) { - datum = *(__u32*)tmp_buf; - } else { - return -EINVAL; /* should never happen, but be safe */ - } - - ret = write_one_word(map, &private->chips[chipnum], ofs, datum); - - if (ret) { - return ret; - } - - (*retlen) += n; - } - - return 0; -} - - - -static inline int erase_one_block(struct map_info *map, struct flchip *chip, - unsigned long adr, u_long size) -{ - unsigned long timeo = jiffies + HZ; - struct amd_flash_private *private = map->fldrv_priv; - DECLARE_WAITQUEUE(wait, current); - -retry: - spin_lock_bh(chip->mutex); - - if (chip->state != FL_READY){ - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - - spin_unlock_bh(chip->mutex); - - schedule(); - remove_wait_queue(&chip->wq, &wait); - - if (signal_pending(current)) { - return -EINTR; - } - - timeo = jiffies + HZ; - - goto retry; - } - - chip->state = FL_ERASING; - - adr += chip->start; - ENABLE_VPP(map); - send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA); - send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr); - - timeo = jiffies + (HZ * 20); - - spin_unlock_bh(chip->mutex); - msleep(1000); - spin_lock_bh(chip->mutex); - - while (flash_is_busy(map, adr, private->interleave)) { - - if (chip->state != FL_ERASING) { - /* Someone's suspended the erase. Sleep */ - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - - spin_unlock_bh(chip->mutex); - printk(KERN_INFO "%s: erase suspended. Sleeping\n", - map->name); - schedule(); - remove_wait_queue(&chip->wq, &wait); - - if (signal_pending(current)) { - return -EINTR; - } - - timeo = jiffies + (HZ*2); /* FIXME */ - spin_lock_bh(chip->mutex); - continue; - } - - /* OK Still waiting */ - if (time_after(jiffies, timeo)) { - chip->state = FL_READY; - spin_unlock_bh(chip->mutex); - printk(KERN_WARNING "%s: waiting for erase to complete " - "timed out.\n", map->name); - DISABLE_VPP(map); - - return -EIO; - } - - /* Latency issues. Drop the lock, wait a while and retry */ - spin_unlock_bh(chip->mutex); - - if (need_resched()) - schedule(); - else - udelay(1); - - spin_lock_bh(chip->mutex); - } - - /* Verify every single word */ - { - int address; - int error = 0; - __u8 verify; - - for (address = adr; address < (adr + size); address++) { - if ((verify = map_read8(map, address)) != 0xFF) { - error = 1; - break; - } - } - if (error) { - chip->state = FL_READY; - spin_unlock_bh(chip->mutex); - printk(KERN_WARNING - "%s: verify error at 0x%x, size %ld.\n", - map->name, address, size); - DISABLE_VPP(map); - - return -EIO; - } - } - - DISABLE_VPP(map); - chip->state = FL_READY; - wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); - - return 0; -} - - - -static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - struct map_info *map = mtd->priv; - struct amd_flash_private *private = map->fldrv_priv; - unsigned long adr, len; - int chipnum; - int ret = 0; - int i; - int first; - struct mtd_erase_region_info *regions = mtd->eraseregions; - - if (instr->addr > mtd->size) { - return -EINVAL; - } - - if ((instr->len + instr->addr) > mtd->size) { - return -EINVAL; - } - - /* Check that both start and end of the requested erase are - * aligned with the erasesize at the appropriate addresses. - */ - - i = 0; - - /* Skip all erase regions which are ended before the start of - the requested erase. Actually, to save on the calculations, - we skip to the first erase region which starts after the - start of the requested erase, and then go back one. - */ - - while ((i < mtd->numeraseregions) && - (instr->addr >= regions[i].offset)) { - i++; - } - i--; - - /* OK, now i is pointing at the erase region in which this - * erase request starts. Check the start of the requested - * erase range is aligned with the erase size which is in - * effect here. - */ - - if (instr->addr & (regions[i].erasesize-1)) { - return -EINVAL; - } - - /* Remember the erase region we start on. */ - - first = i; - - /* Next, check that the end of the requested erase is aligned - * with the erase region at that address. - */ - - while ((i < mtd->numeraseregions) && - ((instr->addr + instr->len) >= regions[i].offset)) { - i++; - } - - /* As before, drop back one to point at the region in which - * the address actually falls. - */ - - i--; - - if ((instr->addr + instr->len) & (regions[i].erasesize-1)) { - return -EINVAL; - } - - chipnum = instr->addr >> private->chipshift; - adr = instr->addr - (chipnum << private->chipshift); - len = instr->len; - - i = first; - - while (len) { - ret = erase_one_block(map, &private->chips[chipnum], adr, - regions[i].erasesize); - - if (ret) { - return ret; - } - - adr += regions[i].erasesize; - len -= regions[i].erasesize; - - if ((adr % (1 << private->chipshift)) == - ((regions[i].offset + (regions[i].erasesize * - regions[i].numblocks)) - % (1 << private->chipshift))) { - i++; - } - - if (adr >> private->chipshift) { - adr = 0; - chipnum++; - if (chipnum >= private->numchips) { - break; - } - } - } - - instr->state = MTD_ERASE_DONE; - mtd_erase_callback(instr); - - return 0; -} - - - -static void amd_flash_sync(struct mtd_info *mtd) -{ - struct map_info *map = mtd->priv; - struct amd_flash_private *private = map->fldrv_priv; - int i; - struct flchip *chip; - int ret = 0; - DECLARE_WAITQUEUE(wait, current); - - for (i = 0; !ret && (i < private->numchips); i++) { - chip = &private->chips[i]; - - retry: - spin_lock_bh(chip->mutex); - - switch(chip->state) { - case FL_READY: - case FL_STATUS: - case FL_CFI_QUERY: - case FL_JEDEC_QUERY: - chip->oldstate = chip->state; - chip->state = FL_SYNCING; - /* No need to wake_up() on this state change - - * as the whole point is that nobody can do anything - * with the chip now anyway. - */ - case FL_SYNCING: - spin_unlock_bh(chip->mutex); - break; - - default: - /* Not an idle state */ - add_wait_queue(&chip->wq, &wait); - - spin_unlock_bh(chip->mutex); - - schedule(); - - remove_wait_queue(&chip->wq, &wait); - - goto retry; - } - } - - /* Unlock the chips again */ - for (i--; i >= 0; i--) { - chip = &private->chips[i]; - - spin_lock_bh(chip->mutex); - - if (chip->state == FL_SYNCING) { - chip->state = chip->oldstate; - wake_up(&chip->wq); - } - spin_unlock_bh(chip->mutex); - } -} - - - -static int amd_flash_suspend(struct mtd_info *mtd) -{ -printk("amd_flash_suspend(): not implemented!\n"); - return -EINVAL; -} - - - -static void amd_flash_resume(struct mtd_info *mtd) -{ -printk("amd_flash_resume(): not implemented!\n"); -} - - - -static void amd_flash_destroy(struct mtd_info *mtd) -{ - struct map_info *map = mtd->priv; - struct amd_flash_private *private = map->fldrv_priv; - kfree(private); -} - -int __init amd_flash_init(void) -{ - register_mtd_chip_driver(&amd_flash_chipdrv); - return 0; -} - -void __exit amd_flash_exit(void) -{ - unregister_mtd_chip_driver(&amd_flash_chipdrv); -} - -module_init(amd_flash_init); -module_exit(amd_flash_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jonas Holmberg <jonas.holmberg@axis.com>"); -MODULE_DESCRIPTION("Old MTD chip driver for AMD flash chips"); diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 0cfcd88468e..a7543ba3e19 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -4,10 +4,8 @@ * * (C) 2000 Red Hat. GPL'd * - * $Id: cfi_cmdset_0001.c,v 1.178 2005/05/19 17:05:43 nico Exp $ * - * - * 10/10/2000 Nicolas Pitre <nico@cam.org> + * 10/10/2000 Nicolas Pitre <nico@fluxnic.net> * - completely revamped method functions so they are aware and * independent of the flash geometry (buswidth, interleave, etc.) * - scalability vs code size is completely set at compile-time @@ -15,13 +13,14 @@ * - optimized write buffer method * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com> * - reworked lock/unlock/erase support for var size flash + * 21/03/2007 Rodolfo Giometti <giometti@linux.it> + * - auto unlock sectors on resume for auto locking flash on power up */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/init.h> #include <asm/io.h> #include <asm/byteorder.h> @@ -30,10 +29,10 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/reboot.h> +#include <linux/bitmap.h> #include <linux/mtd/xip.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> -#include <linux/mtd/compatmac.h> #include <linux/mtd/cfi.h> /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */ @@ -42,28 +41,42 @@ // debugging, turns off buffer write mode if set to 1 #define FORCE_WORD_WRITE 0 -#define MANUFACTURER_INTEL 0x0089 +/* Intel chips */ #define I82802AB 0x00ad #define I82802AC 0x00ac -#define MANUFACTURER_ST 0x0020 +#define PF38F4476 0x881c +/* STMicroelectronics chips */ #define M50LPW080 0x002F +#define M50FLW080A 0x0080 +#define M50FLW080B 0x0081 +/* Atmel chips */ +#define AT49BV640D 0x02de +#define AT49BV640DT 0x02db +/* Sharp chips */ +#define LH28F640BFHE_PTTL90 0x00b0 +#define LH28F640BFHE_PBTL90 0x00b1 +#define LH28F640BFHE_PTTL70A 0x00b2 +#define LH28F640BFHE_PBTL70A 0x00b3 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); +static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *); static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *); static void cfi_intelext_sync (struct mtd_info *); -static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len); -static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); +static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs, + uint64_t len); #ifdef CONFIG_MTD_OTP static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t); -static int cfi_intelext_get_fact_prot_info (struct mtd_info *, - struct otp_info *, size_t); -static int cfi_intelext_get_user_prot_info (struct mtd_info *, - struct otp_info *, size_t); +static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t, + size_t *, struct otp_info *); +static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t, + size_t *, struct otp_info *); #endif static int cfi_intelext_suspend (struct mtd_info *); static void cfi_intelext_resume (struct mtd_info *); @@ -77,10 +90,10 @@ static struct mtd_info *cfi_intelext_setup (struct mtd_info *); static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **); static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char **mtdbuf); -static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, - size_t len); + size_t *retlen, void **virt, resource_size_t *phys); +static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len); +static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode); static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); #include "fwh_lock.h" @@ -105,6 +118,7 @@ static struct mtd_chip_driver cfi_intelext_chipdrv = { static void cfi_tell_features(struct cfi_pri_intelext *extp) { int i; + printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion); printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport); printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported"); printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported"); @@ -116,41 +130,99 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp) printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported"); printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported"); printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported"); - for (i=10; i<32; i++) { - if (extp->FeatureSupport & (1<<i)) + printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported"); + for (i=11; i<32; i++) { + if (extp->FeatureSupport & (1<<i)) printk(" - Unknown Bit %X: supported\n", i); } - + printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport); printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported"); for (i=1; i<8; i++) { if (extp->SuspendCmdSupport & (1<<i)) printk(" - Unknown Bit %X: supported\n", i); } - + printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask); printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no"); - printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); - for (i=2; i<16; i++) { + printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); + for (i=2; i<3; i++) { + if (extp->BlkStatusRegMask & (1<<i)) + printk(" - Unknown Bit %X Active: yes\n",i); + } + printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no"); + printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no"); + for (i=6; i<16; i++) { if (extp->BlkStatusRegMask & (1<<i)) printk(" - Unknown Bit %X Active: yes\n",i); } - - printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", + + printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", extp->VccOptimal >> 4, extp->VccOptimal & 0xf); if (extp->VppOptimal) - printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", + printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", extp->VppOptimal >> 4, extp->VppOptimal & 0xf); } #endif +/* Atmel chips don't use the same PRI format as Intel chips */ +static void fixup_convert_atmel_pri(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *extp = cfi->cmdset_priv; + struct cfi_pri_atmel atmel_pri; + uint32_t features = 0; + + /* Reverse byteswapping */ + extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport); + extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask); + extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr); + + memcpy(&atmel_pri, extp, sizeof(atmel_pri)); + memset((char *)extp + 5, 0, sizeof(*extp) - 5); + + printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features); + + if (atmel_pri.Features & 0x01) /* chip erase supported */ + features |= (1<<0); + if (atmel_pri.Features & 0x02) /* erase suspend supported */ + features |= (1<<1); + if (atmel_pri.Features & 0x04) /* program suspend supported */ + features |= (1<<2); + if (atmel_pri.Features & 0x08) /* simultaneous operations supported */ + features |= (1<<9); + if (atmel_pri.Features & 0x20) /* page mode read supported */ + features |= (1<<7); + if (atmel_pri.Features & 0x40) /* queued erase supported */ + features |= (1<<4); + if (atmel_pri.Features & 0x80) /* Protection bits supported */ + features |= (1<<6); + + extp->FeatureSupport = features; + + /* burst write mode not supported */ + cfi->cfiq->BufWriteTimeoutTyp = 0; + cfi->cfiq->BufWriteTimeoutMax = 0; +} + +static void fixup_at49bv640dx_lock(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *cfip = cfi->cmdset_priv; + + cfip->FeatureSupport |= (1 << 5); + mtd->flags |= MTD_POWERUP_LOCK; +} + #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE -/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ -static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) +/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ +static void fixup_intel_strataflash(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - struct cfi_pri_amdstd *extp = cfi->cmdset_priv; + struct cfi_pri_intelext *extp = cfi->cmdset_priv; printk(KERN_WARNING "cfi_cmdset_0001: Suspend " "erase on write disabled.\n"); @@ -159,7 +231,7 @@ static void fixup_intel_strataflash(struct mtd_info *mtd, void* param) #endif #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND -static void fixup_no_write_suspend(struct mtd_info *mtd, void* param) +static void fixup_no_write_suspend(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; @@ -172,64 +244,118 @@ static void fixup_no_write_suspend(struct mtd_info *mtd, void* param) } #endif -static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param) +static void fixup_st_m28w320ct(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - + cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */ cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */ } -static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param) +static void fixup_st_m28w320cb(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - + /* Note this is done after the region info is endian swapped */ cfi->cfiq->EraseRegionInfo[1] = (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; }; -static void fixup_use_point(struct mtd_info *mtd, void *param) +static int is_LH28F640BF(struct cfi_private *cfi) +{ + /* Sharp LH28F640BF Family */ + if (cfi->mfr == CFI_MFR_SHARP && ( + cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 || + cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A)) + return 1; + return 0; +} + +static void fixup_LH28F640BF(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *extp = cfi->cmdset_priv; + + /* Reset the Partition Configuration Register on LH28F640BF + * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */ + if (is_LH28F640BF(cfi)) { + printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n"); + map_write(map, CMD(0x60), 0); + map_write(map, CMD(0x04), 0); + + /* We have set one single partition thus + * Simultaneous Operations are not allowed */ + printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n"); + extp->FeatureSupport &= ~512; + } +} + +static void fixup_use_point(struct mtd_info *mtd) { struct map_info *map = mtd->priv; - if (!mtd->point && map_is_linear(map)) { - mtd->point = cfi_intelext_point; - mtd->unpoint = cfi_intelext_unpoint; + if (!mtd->_point && map_is_linear(map)) { + mtd->_point = cfi_intelext_point; + mtd->_unpoint = cfi_intelext_unpoint; } } -static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) +static void fixup_use_write_buffers(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; if (cfi->cfiq->BufWriteTimeoutTyp) { printk(KERN_INFO "Using buffer write method\n" ); - mtd->write = cfi_intelext_write_buffers; + mtd->_write = cfi_intelext_write_buffers; + mtd->_writev = cfi_intelext_writev; + } +} + +/* + * Some chips power-up with all sectors locked by default. + */ +static void fixup_unlock_powerup_lock(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *cfip = cfi->cmdset_priv; + + if (cfip->FeatureSupport&32) { + printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); + mtd->flags |= MTD_POWERUP_LOCK; } } static struct cfi_fixup cfi_fixup_table[] = { + { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, + { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock }, + { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock }, #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE - { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, + { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash }, #endif #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND - { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL }, + { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend }, #endif #if !FORCE_WORD_WRITE - { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL }, + { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, #endif - { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, - { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, - { 0, 0, NULL, NULL } + { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct }, + { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb }, + { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock }, + { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock }, + { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF }, + { 0, 0, NULL } }; static struct cfi_fixup jedec_fixup_table[] = { - { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, }, - { 0, 0, NULL, NULL } + { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock }, + { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock }, + { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock }, + { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock }, + { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock }, + { 0, 0, NULL } }; static struct cfi_fixup fixup_table[] = { /* The CFI vendor ids and the JEDEC vendor IDs appear @@ -237,14 +363,24 @@ static struct cfi_fixup fixup_table[] = { * well. This table is to pick all cases where * we know that is the case. */ - { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL }, - { 0, 0, NULL, NULL } + { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point }, + { 0, 0, NULL } }; +static void cfi_fixup_major_minor(struct cfi_private *cfi, + struct cfi_pri_intelext *extp) +{ + if (cfi->mfr == CFI_MFR_INTEL && + cfi->id == PF38F4476 && extp->MinorVersion == '3') + extp->MinorVersion = '1'; +} + static inline struct cfi_pri_intelext * read_pri_intelext(struct map_info *map, __u16 adr) { + struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_intelext *extp; + unsigned int extra_size = 0; unsigned int extp_size = sizeof(*extp); again: @@ -252,21 +388,40 @@ read_pri_intelext(struct map_info *map, __u16 adr) if (!extp) return NULL; + cfi_fixup_major_minor(cfi, extp); + + if (extp->MajorVersion != '1' || + (extp->MinorVersion < '0' || extp->MinorVersion > '5')) { + printk(KERN_ERR " Unknown Intel/Sharp Extended Query " + "version %c.%c.\n", extp->MajorVersion, + extp->MinorVersion); + kfree(extp); + return NULL; + } + /* Do some byteswapping if necessary */ extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport); extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask); extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr); - if (extp->MajorVersion == '1' && extp->MinorVersion == '3') { - unsigned int extra_size = 0; - int nb_parts, i; + if (extp->MinorVersion >= '0') { + extra_size = 0; /* Protection Register info */ extra_size += (extp->NumProtectionFields - 1) * sizeof(struct cfi_intelext_otpinfo); + } + if (extp->MinorVersion >= '1') { /* Burst Read info */ - extra_size += 6; + extra_size += 2; + if (extp_size < sizeof(*extp) + extra_size) + goto need_more; + extra_size += extp->extra[extra_size - 1]; + } + + if (extp->MinorVersion >= '3') { + int nb_parts, i; /* Number of hardware-partitions */ extra_size += 1; @@ -274,6 +429,10 @@ read_pri_intelext(struct map_info *map, __u16 adr) goto need_more; nb_parts = extp->extra[extra_size - 1]; + /* skip the sizeof(partregion) field in CFI 1.4 */ + if (extp->MinorVersion >= '4') + extra_size += 2; + for (i = 0; i < nb_parts; i++) { struct cfi_intelext_regioninfo *rinfo; rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size]; @@ -285,6 +444,9 @@ read_pri_intelext(struct map_info *map, __u16 adr) * sizeof(struct cfi_intelext_blockinfo); } + if (extp->MinorVersion >= '4') + extra_size += sizeof(struct cfi_intelext_programming_regioninfo); + if (extp_size < sizeof(*extp) + extra_size) { need_more: extp_size = sizeof(*extp) + extra_size; @@ -292,54 +454,47 @@ read_pri_intelext(struct map_info *map, __u16 adr) if (extp_size > 4096) { printk(KERN_ERR "%s: cfi_pri_intelext is too fat\n", - __FUNCTION__); + __func__); return NULL; } goto again; } } - + return extp; } -/* This routine is made available to other mtd code via - * inter_module_register. It must only be accessed through - * inter_module_get which will bump the use count of this module. The - * addresses passed back in cfi are valid as long as the use count of - * this module is non-zero, i.e. between inter_module_get and - * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000. - */ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) { struct cfi_private *cfi = map->fldrv_priv; struct mtd_info *mtd; int i; - mtd = kmalloc(sizeof(*mtd), GFP_KERNEL); - if (!mtd) { - printk(KERN_ERR "Failed to allocate memory for MTD device\n"); + mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); + if (!mtd) return NULL; - } - memset(mtd, 0, sizeof(*mtd)); mtd->priv = map; mtd->type = MTD_NORFLASH; /* Fill in the default mtd operations */ - mtd->erase = cfi_intelext_erase_varsize; - mtd->read = cfi_intelext_read; - mtd->write = cfi_intelext_write_words; - mtd->sync = cfi_intelext_sync; - mtd->lock = cfi_intelext_lock; - mtd->unlock = cfi_intelext_unlock; - mtd->suspend = cfi_intelext_suspend; - mtd->resume = cfi_intelext_resume; + mtd->_erase = cfi_intelext_erase_varsize; + mtd->_read = cfi_intelext_read; + mtd->_write = cfi_intelext_write_words; + mtd->_sync = cfi_intelext_sync; + mtd->_lock = cfi_intelext_lock; + mtd->_unlock = cfi_intelext_unlock; + mtd->_is_locked = cfi_intelext_is_locked; + mtd->_suspend = cfi_intelext_suspend; + mtd->_resume = cfi_intelext_resume; mtd->flags = MTD_CAP_NORFLASH; mtd->name = map->name; + mtd->writesize = 1; + mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; if (cfi->cfi_mode == CFI_MODE_CFI) { - /* + /* * It's a real CFI chip, not one for which the probe * routine faked a CFI structure. So we read the feature * table from it. @@ -354,14 +509,14 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) } /* Install our own private info structure */ - cfi->cmdset_priv = extp; + cfi->cmdset_priv = extp; cfi_fixup(mtd, cfi_fixup_table); #ifdef DEBUG_CFI_FEATURES /* Tell the user about it in lots of lovely detail */ cfi_tell_features(extp); -#endif +#endif if(extp->SuspendCmdSupport & 1) { printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n"); @@ -375,16 +530,58 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) cfi_fixup(mtd, fixup_table); for (i=0; i< cfi->numchips; i++) { - cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; - cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; - cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; + if (cfi->cfiq->WordWriteTimeoutTyp) + cfi->chips[i].word_write_time = + 1<<cfi->cfiq->WordWriteTimeoutTyp; + else + cfi->chips[i].word_write_time = 50000; + + if (cfi->cfiq->BufWriteTimeoutTyp) + cfi->chips[i].buffer_write_time = + 1<<cfi->cfiq->BufWriteTimeoutTyp; + /* No default; if it isn't specified, we won't use it */ + + if (cfi->cfiq->BlockEraseTimeoutTyp) + cfi->chips[i].erase_time = + 1000<<cfi->cfiq->BlockEraseTimeoutTyp; + else + cfi->chips[i].erase_time = 2000000; + + if (cfi->cfiq->WordWriteTimeoutTyp && + cfi->cfiq->WordWriteTimeoutMax) + cfi->chips[i].word_write_time_max = + 1<<(cfi->cfiq->WordWriteTimeoutTyp + + cfi->cfiq->WordWriteTimeoutMax); + else + cfi->chips[i].word_write_time_max = 50000 * 8; + + if (cfi->cfiq->BufWriteTimeoutTyp && + cfi->cfiq->BufWriteTimeoutMax) + cfi->chips[i].buffer_write_time_max = + 1<<(cfi->cfiq->BufWriteTimeoutTyp + + cfi->cfiq->BufWriteTimeoutMax); + + if (cfi->cfiq->BlockEraseTimeoutTyp && + cfi->cfiq->BlockEraseTimeoutMax) + cfi->chips[i].erase_time_max = + 1000<<(cfi->cfiq->BlockEraseTimeoutTyp + + cfi->cfiq->BlockEraseTimeoutMax); + else + cfi->chips[i].erase_time_max = 2000000 * 8; + cfi->chips[i].ref_point_counter = 0; - } + init_waitqueue_head(&(cfi->chips[i].wq)); + } map->fldrv = &cfi_intelext_chipdrv; - + return cfi_intelext_setup(mtd); } +struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001"))); +struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001"))); +EXPORT_SYMBOL_GPL(cfi_cmdset_0001); +EXPORT_SYMBOL_GPL(cfi_cmdset_0003); +EXPORT_SYMBOL_GPL(cfi_cmdset_0200); static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) { @@ -399,13 +596,11 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) mtd->size = devsize * cfi->numchips; mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; - mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) + mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL); - if (!mtd->eraseregions) { - printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); + if (!mtd->eraseregions) goto setup_err; - } - + for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { unsigned long ernum, ersize; ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; @@ -418,6 +613,7 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; + mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL); } offset += (ersize * ernum); } @@ -429,19 +625,19 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) } for (i=0; i<mtd->numeraseregions;i++){ - printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", - i,mtd->eraseregions[i].offset, + printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n", + i,(unsigned long long)mtd->eraseregions[i].offset, mtd->eraseregions[i].erasesize, mtd->eraseregions[i].numblocks); } #ifdef CONFIG_MTD_OTP - mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg; - mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg; - mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg; - mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg; - mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info; - mtd->get_user_prot_info = cfi_intelext_get_user_prot_info; + mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg; + mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg; + mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg; + mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg; + mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info; + mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info; #endif /* This function has the potential to distort the reality @@ -454,11 +650,8 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) return mtd; setup_err: - if(mtd) { - if(mtd->eraseregions) - kfree(mtd->eraseregions); - kfree(mtd); - } + kfree(mtd->eraseregions); + kfree(mtd); kfree(cfi->cmdset_priv); return NULL; } @@ -471,7 +664,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, struct cfi_pri_intelext *extp = cfi->cmdset_priv; /* - * Probing of multi-partition flash ships. + * Probing of multi-partition flash chips. * * To support multiple partitions when available, we simply arrange * for each of them to have their own flchip structure even if they @@ -481,7 +674,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, * arrangement at this point. This can be rearranged in the future * if someone feels motivated enough. --nico */ - if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3' + if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3' && extp->FeatureSupport & (1 << 9)) { struct cfi_private *newcfi; struct flchip *chip; @@ -493,12 +686,16 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, sizeof(struct cfi_intelext_otpinfo); /* Burst Read info */ - offs += 6; + offs += extp->extra[offs+1]+2; /* Number of partition regions */ numregions = extp->extra[offs]; offs += 1; + /* skip the sizeof(partregion) field in CFI 1.4 */ + if (extp->MinorVersion >= '4') + offs += 2; + /* Number of hardware partitions */ numparts = 0; for (i = 0; i < numregions; i++) { @@ -510,6 +707,21 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, sizeof(struct cfi_intelext_blockinfo); } + if (!numparts) + numparts = 1; + + /* Programming Region info */ + if (extp->MinorVersion >= '4') { + struct cfi_intelext_programming_regioninfo *prinfo; + prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs]; + mtd->writesize = cfi->interleave << prinfo->ProgRegShift; + mtd->flags &= ~MTD_BIT_WRITEABLE; + printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n", + map->name, mtd->writesize, + cfi->interleave * prinfo->ControlValid, + cfi->interleave * prinfo->ControlInvalid); + } + /* * All functions below currently rely on all chips having * the same geometry so we'll just assume that all hardware @@ -520,7 +732,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, if ((1 << partshift) < mtd->erasesize) { printk( KERN_ERR "%s: bad number of hw partitions (%d)\n", - __FUNCTION__, numparts); + __func__, numparts); return -EINVAL; } @@ -540,7 +752,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, chip = &newcfi->chips[0]; for (i = 0; i < cfi->numchips; i++) { shared[i].writing = shared[i].erasing = NULL; - spin_lock_init(&shared[i].lock); + mutex_init(&shared[i].lock); for (j = 0; j < numparts; j++) { *chip = cfi->chips[i]; chip->start += j << partshift; @@ -548,8 +760,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, /* those should be reset too since they create memory references. */ init_waitqueue_head(&chip->wq); - spin_lock_init(&chip->_spinlock); - chip->mutex = &chip->_spinlock; + mutex_init(&chip->mutex); chip++; } } @@ -570,75 +781,17 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, /* * *********** CHIP ACCESS FUNCTIONS *********** */ - -static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) +static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode) { DECLARE_WAITQUEUE(wait, current); struct cfi_private *cfi = map->fldrv_priv; map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01); - unsigned long timeo; struct cfi_pri_intelext *cfip = cfi->cmdset_priv; + unsigned long timeo = jiffies + HZ; - resettime: - timeo = jiffies + HZ; - retry: - if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) { - /* - * OK. We have possibility for contension on the write/erase - * operations which are global to the real chip and not per - * partition. So let's fight it over in the partition which - * currently has authority on the operation. - * - * The rules are as follows: - * - * - any write operation must own shared->writing. - * - * - any erase operation must own _both_ shared->writing and - * shared->erasing. - * - * - contension arbitration is handled in the owner's context. - * - * The 'shared' struct can be read when its lock is taken. - * However any writes to it can only be made when the current - * owner's lock is also held. - */ - struct flchip_shared *shared = chip->priv; - struct flchip *contender; - spin_lock(&shared->lock); - contender = shared->writing; - if (contender && contender != chip) { - /* - * The engine to perform desired operation on this - * partition is already in use by someone else. - * Let's fight over it in the context of the chip - * currently using it. If it is possible to suspend, - * that other partition will do just that, otherwise - * it'll happily send us to sleep. In any case, when - * get_chip returns success we're clear to go ahead. - */ - int ret = spin_trylock(contender->mutex); - spin_unlock(&shared->lock); - if (!ret) - goto retry; - spin_unlock(chip->mutex); - ret = get_chip(map, contender, contender->start, mode); - spin_lock(chip->mutex); - if (ret) { - spin_unlock(contender->mutex); - return ret; - } - timeo = jiffies + HZ; - spin_lock(&shared->lock); - } - - /* We now own it */ - shared->writing = chip; - if (mode == FL_ERASING) - shared->erasing = chip; - if (contender && contender != chip) - spin_unlock(contender->mutex); - spin_unlock(&shared->lock); - } + /* Prevent setting state FL_SYNCING for chip in suspended state. */ + if (mode == FL_SYNCING && chip->oldstate != FL_READY) + goto sleep; switch (chip->state) { @@ -653,18 +806,13 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) break; - if (time_after(jiffies, timeo)) { - printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", - status.x[0]); - return -EIO; - } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); /* Someone else might have been playing with it. */ - goto retry; + return -EAGAIN; } - + /* Fall through */ case FL_READY: case FL_CFI_QUERY: case FL_JEDEC_QUERY: @@ -696,20 +844,17 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr break; if (time_after(jiffies, timeo)) { - /* Urgh. Resume and pretend we weren't here. */ - map_write(map, CMD(0xd0), adr); - /* Make sure we're in 'read status' mode if it had finished */ - map_write(map, CMD(0x70), adr); - chip->state = FL_ERASING; - chip->oldstate = FL_READY; - printk(KERN_ERR "Chip not ready after erase " - "suspended: status = 0x%lx\n", status.x[0]); + /* Urgh. Resume and pretend we weren't here. + * Make sure we're in 'read status' mode if it had finished */ + put_chip(map, chip, adr); + printk(KERN_ERR "%s: Chip not ready after erase " + "suspended: status = 0x%lx\n", map->name, status.x[0]); return -EIO; } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. So we can just loop here. */ } @@ -724,21 +869,120 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr chip->state = FL_READY; return 0; + case FL_SHUTDOWN: + /* The machine is rebooting now,so no one can get chip anymore */ + return -EIO; case FL_POINT: /* Only if there's no operation suspended... */ if (mode == FL_READY && chip->oldstate == FL_READY) return 0; - + /* Fall through */ default: sleep: set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); - spin_lock(chip->mutex); - goto resettime; + mutex_lock(&chip->mutex); + return -EAGAIN; + } +} + +static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) +{ + int ret; + DECLARE_WAITQUEUE(wait, current); + + retry: + if (chip->priv && + (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE + || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) { + /* + * OK. We have possibility for contention on the write/erase + * operations which are global to the real chip and not per + * partition. So let's fight it over in the partition which + * currently has authority on the operation. + * + * The rules are as follows: + * + * - any write operation must own shared->writing. + * + * - any erase operation must own _both_ shared->writing and + * shared->erasing. + * + * - contention arbitration is handled in the owner's context. + * + * The 'shared' struct can be read and/or written only when + * its lock is taken. + */ + struct flchip_shared *shared = chip->priv; + struct flchip *contender; + mutex_lock(&shared->lock); + contender = shared->writing; + if (contender && contender != chip) { + /* + * The engine to perform desired operation on this + * partition is already in use by someone else. + * Let's fight over it in the context of the chip + * currently using it. If it is possible to suspend, + * that other partition will do just that, otherwise + * it'll happily send us to sleep. In any case, when + * get_chip returns success we're clear to go ahead. + */ + ret = mutex_trylock(&contender->mutex); + mutex_unlock(&shared->lock); + if (!ret) + goto retry; + mutex_unlock(&chip->mutex); + ret = chip_ready(map, contender, contender->start, mode); + mutex_lock(&chip->mutex); + + if (ret == -EAGAIN) { + mutex_unlock(&contender->mutex); + goto retry; + } + if (ret) { + mutex_unlock(&contender->mutex); + return ret; + } + mutex_lock(&shared->lock); + + /* We should not own chip if it is already + * in FL_SYNCING state. Put contender and retry. */ + if (chip->state == FL_SYNCING) { + put_chip(map, contender, contender->start); + mutex_unlock(&contender->mutex); + goto retry; + } + mutex_unlock(&contender->mutex); + } + + /* Check if we already have suspended erase + * on this chip. Sleep. */ + if (mode == FL_ERASING && shared->erasing + && shared->erasing->oldstate == FL_ERASING) { + mutex_unlock(&shared->lock); + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&chip->wq, &wait); + mutex_unlock(&chip->mutex); + schedule(); + remove_wait_queue(&chip->wq, &wait); + mutex_lock(&chip->mutex); + goto retry; + } + + /* We now own it */ + shared->writing = chip; + if (mode == FL_ERASING) + shared->erasing = chip; + mutex_unlock(&shared->lock); } + ret = chip_ready(map, chip, adr, mode); + if (ret == -EAGAIN) + goto retry; + + return ret; } static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) @@ -747,19 +991,19 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad if (chip->priv) { struct flchip_shared *shared = chip->priv; - spin_lock(&shared->lock); + mutex_lock(&shared->lock); if (shared->writing == chip && chip->oldstate == FL_READY) { /* We own the ability to write, but we're done */ shared->writing = shared->erasing; if (shared->writing && shared->writing != chip) { /* give back ownership to who we loaned it from */ struct flchip *loaner = shared->writing; - spin_lock(loaner->mutex); - spin_unlock(&shared->lock); - spin_unlock(chip->mutex); + mutex_lock(&loaner->mutex); + mutex_unlock(&shared->lock); + mutex_unlock(&chip->mutex); put_chip(map, loaner, loaner->start); - spin_lock(chip->mutex); - spin_unlock(loaner->mutex); + mutex_lock(&chip->mutex); + mutex_unlock(&loaner->mutex); wake_up(&chip->wq); return; } @@ -773,24 +1017,23 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad * Don't let the switch below mess things up since * we don't have ownership to resume anything. */ - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); wake_up(&chip->wq); return; } - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); } switch(chip->oldstate) { case FL_ERASING: - chip->state = chip->oldstate; - /* What if one interleaved chip has finished and the + /* What if one interleaved chip has finished and the other hasn't? The old code would leave the finished - one in READY mode. That's bad, and caused -EROFS + one in READY mode. That's bad, and caused -EROFS errors to be returned from do_erase_oneblock because that's the only bit it checked for at the time. - As the state machine appears to explicitly allow + As the state machine appears to explicitly allow sending the 0x70 (Read Status) command to an erasing - chip and expecting it to be ignored, that's what we + chip and expecting it to be ignored, that's what we do. */ map_write(map, CMD(0xd0), adr); map_write(map, CMD(0x70), adr); @@ -806,11 +1049,9 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad case FL_READY: case FL_STATUS: case FL_JEDEC_QUERY: - /* We should really make set_vpp() count, rather than doing this */ - DISABLE_VPP(map); break; default: - printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate); + printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate); } wake_up(&chip->wq); } @@ -851,26 +1092,33 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip, /* * When a delay is required for the flash operation to complete, the - * xip_udelay() function is polling for both the given timeout and pending - * (but still masked) hardware interrupts. Whenever there is an interrupt - * pending then the flash erase or write operation is suspended, array mode - * restored and interrupts unmasked. Task scheduling might also happen at that - * point. The CPU eventually returns from the interrupt or the call to - * schedule() and the suspended flash operation is resumed for the remaining - * of the delay period. + * xip_wait_for_operation() function is polling for both the given timeout + * and pending (but still masked) hardware interrupts. Whenever there is an + * interrupt pending then the flash erase or write operation is suspended, + * array mode restored and interrupts unmasked. Task scheduling might also + * happen at that point. The CPU eventually returns from the interrupt or + * the call to schedule() and the suspended flash operation is resumed for + * the remaining of the delay period. * * Warning: this function _will_ fool interrupt latency tracing tools. */ -static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, - unsigned long adr, int usec) +static int __xipram xip_wait_for_operation( + struct map_info *map, struct flchip *chip, + unsigned long adr, unsigned int chip_op_time_max) { struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_intelext *cfip = cfi->cmdset_priv; map_word status, OK = CMD(0x80); - unsigned long suspended, start = xip_currtime(); + unsigned long usec, suspended, start, done; flstate_t oldstate, newstate; + start = xip_currtime(); + usec = chip_op_time_max; + if (usec == 0) + usec = 500000; + done = 0; + do { cpu_relax(); if (xip_irqpending() && cfip && @@ -887,9 +1135,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, * we resume the whole thing at once). Yes, it * can happen! */ + usec -= done; map_write(map, CMD(0xb0), adr); map_write(map, CMD(0x70), adr); - usec -= xip_elapsed_since(start); suspended = xip_currtime(); do { if (xip_elapsed_since(suspended) > 100000) { @@ -899,7 +1147,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, * This is a critical error but there * is not much we can do here. */ - return; + return -EIO; } status = map_read(map, adr); } while (!map_word_andequal(map, status, OK, OK)); @@ -920,10 +1168,10 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, chip->state = newstate; map_write(map, CMD(0xff), adr); (void) map_read(map, adr); - asm volatile (".rep 8; nop; .endr"); + xip_iprefetch(); local_irq_enable(); - spin_unlock(chip->mutex); - asm volatile (".rep 8; nop; .endr"); + mutex_unlock(&chip->mutex); + xip_iprefetch(); cond_resched(); /* @@ -932,15 +1180,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, * a suspended erase state. If so let's wait * until it's done. */ - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); while (chip->state != newstate) { DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); } /* Disallow XIP again */ local_irq_disable(); @@ -959,65 +1207,117 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, xip_cpu_idle(); } status = map_read(map, adr); + done = xip_elapsed_since(start); } while (!map_word_andequal(map, status, OK, OK) - && xip_elapsed_since(start) < usec); -} + && done < usec); -#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) + return (done >= usec) ? -ETIME : 0; +} /* * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while * the flash is actively programming or erasing since we have to poll for * the operation to complete anyway. We can't do that in a generic way with * a XIP setup so do it before the actual flash operation in this case - * and stub it out from INVALIDATE_CACHE_UDELAY. + * and stub it out from INVAL_CACHE_AND_WAIT. */ #define XIP_INVAL_CACHED_RANGE(map, from, size) \ INVALIDATE_CACHED_RANGE(map, from, size) -#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ - UDELAY(map, chip, adr, usec) - -/* - * Extra notes: - * - * Activating this XIP support changes the way the code works a bit. For - * example the code to suspend the current process when concurrent access - * happens is never executed because xip_udelay() will always return with the - * same chip state as it was entered with. This is why there is no care for - * the presence of add_wait_queue() or schedule() calls from within a couple - * xip_disable()'d areas of code, like in do_erase_oneblock for example. - * The queueing and scheduling are always happening within xip_udelay(). - * - * Similarly, get_chip() and put_chip() just happen to always be executed - * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state - * is in array mode, therefore never executing many cases therein and not - * causing any problem with XIP. - */ +#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \ + xip_wait_for_operation(map, chip, cmd_adr, usec_max) #else #define xip_disable(map, chip, adr) #define xip_enable(map, chip, adr) #define XIP_INVAL_CACHED_RANGE(x...) +#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation + +static int inval_cache_and_wait_for_operation( + struct map_info *map, struct flchip *chip, + unsigned long cmd_adr, unsigned long inval_adr, int inval_len, + unsigned int chip_op_time, unsigned int chip_op_time_max) +{ + struct cfi_private *cfi = map->fldrv_priv; + map_word status, status_OK = CMD(0x80); + int chip_state = chip->state; + unsigned int timeo, sleep_time, reset_timeo; -#define UDELAY(map, chip, adr, usec) \ -do { \ - spin_unlock(chip->mutex); \ - cfi_udelay(usec); \ - spin_lock(chip->mutex); \ -} while (0) - -#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ -do { \ - spin_unlock(chip->mutex); \ - INVALIDATE_CACHED_RANGE(map, adr, len); \ - cfi_udelay(usec); \ - spin_lock(chip->mutex); \ -} while (0) + mutex_unlock(&chip->mutex); + if (inval_len) + INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); + mutex_lock(&chip->mutex); + + timeo = chip_op_time_max; + if (!timeo) + timeo = 500000; + reset_timeo = timeo; + sleep_time = chip_op_time / 2; + + for (;;) { + if (chip->state != chip_state) { + /* Someone's suspended the operation: sleep */ + DECLARE_WAITQUEUE(wait, current); + set_current_state(TASK_UNINTERRUPTIBLE); + add_wait_queue(&chip->wq, &wait); + mutex_unlock(&chip->mutex); + schedule(); + remove_wait_queue(&chip->wq, &wait); + mutex_lock(&chip->mutex); + continue; + } + + status = map_read(map, cmd_adr); + if (map_word_andequal(map, status, status_OK, status_OK)) + break; + + if (chip->erase_suspended && chip_state == FL_ERASING) { + /* Erase suspend occurred while sleep: reset timeout */ + timeo = reset_timeo; + chip->erase_suspended = 0; + } + if (chip->write_suspended && chip_state == FL_WRITING) { + /* Write suspend occurred while sleep: reset timeout */ + timeo = reset_timeo; + chip->write_suspended = 0; + } + if (!timeo) { + map_write(map, CMD(0x70), cmd_adr); + chip->state = FL_STATUS; + return -ETIME; + } + + /* OK Still waiting. Drop the lock, wait a while and retry. */ + mutex_unlock(&chip->mutex); + if (sleep_time >= 1000000/HZ) { + /* + * Half of the normal delay still remaining + * can be performed with a sleeping delay instead + * of busy waiting. + */ + msleep(sleep_time/1000); + timeo -= sleep_time; + sleep_time = 1000000/HZ; + } else { + udelay(1); + cond_resched(); + timeo--; + } + mutex_lock(&chip->mutex); + } + + /* Done and happy. */ + chip->state = FL_STATUS; + return 0; +} #endif +#define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \ + INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max); + + static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) { unsigned long cmd_addr; @@ -1026,10 +1326,10 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a adr += chip->start; - /* Ensure cmd read/writes are aligned. */ - cmd_addr = adr & ~(map_bankwidth(map)-1); + /* Ensure cmd read/writes are aligned. */ + cmd_addr = adr & ~(map_bankwidth(map)-1); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, cmd_addr, FL_POINT); @@ -1040,24 +1340,22 @@ static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t a chip->state = FL_POINT; chip->ref_point_counter++; } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } -static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf) +static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - unsigned long ofs; + unsigned long ofs, last_end = 0; int chipnum; int ret = 0; - if (!map->virt || (from + len > mtd->size)) + if (!map->virt) return -EINVAL; - - *mtdbuf = (void *)map->virt + from; - *retlen = 0; /* Now lock the chip(s) to POINT state */ @@ -1065,12 +1363,22 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si chipnum = (from >> cfi->chipshift); ofs = from - (chipnum << cfi->chipshift); + *virt = map->virt + cfi->chips[chipnum].start + ofs; + if (phys) + *phys = map->phys + cfi->chips[chipnum].start + ofs; + while (len) { unsigned long thislen; if (chipnum >= cfi->numchips) break; + /* We cannot point across chips that are virtually disjoint */ + if (!last_end) + last_end = cfi->chips[chipnum].start; + else if (cfi->chips[chipnum].start != last_end) + break; + if ((len + ofs -1) >> cfi->chipshift) thislen = (1<<cfi->chipshift) - ofs; else @@ -1082,19 +1390,20 @@ static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, si *retlen += thislen; len -= thislen; - + ofs = 0; + last_end += 1 << cfi->chipshift; chipnum++; } return 0; } -static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) +static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long ofs; - int chipnum; + int chipnum, err = 0; /* Now unlock the chip(s) POINT state */ @@ -1102,7 +1411,7 @@ static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t fro chipnum = (from >> cfi->chipshift); ofs = from - (chipnum << cfi->chipshift); - while (len) { + while (len && !err) { unsigned long thislen; struct flchip *chip; @@ -1115,21 +1424,25 @@ static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t fro else thislen = len; - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); if (chip->state == FL_POINT) { chip->ref_point_counter--; if(chip->ref_point_counter == 0) chip->state = FL_READY; - } else - printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */ + } else { + printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name); + err = -EINVAL; + } put_chip(map, chip, chip->start); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); len -= thislen; ofs = 0; chipnum++; } + + return err; } static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) @@ -1140,13 +1453,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof adr += chip->start; - /* Ensure cmd read/writes are aligned. */ - cmd_addr = adr & ~(map_bankwidth(map)-1); + /* Ensure cmd read/writes are aligned. */ + cmd_addr = adr & ~(map_bankwidth(map)-1); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, cmd_addr, FL_READY); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } @@ -1160,7 +1473,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof put_chip(map, chip, cmd_addr); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return 0; } @@ -1176,8 +1489,6 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz chipnum = (from >> cfi->chipshift); ofs = from - (chipnum << cfi->chipshift); - *retlen = 0; - while (len) { unsigned long thislen; @@ -1196,7 +1507,7 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz *retlen += thislen; len -= thislen; buf += thislen; - + ofs = 0; chipnum++; } @@ -1207,24 +1518,26 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum, int mode) { struct cfi_private *cfi = map->fldrv_priv; - map_word status, status_OK, write_cmd; - unsigned long timeo; - int z, ret=0; + map_word status, write_cmd; + int ret=0; adr += chip->start; - /* Let's determine this according to the interleave only once */ - status_OK = CMD(0x80); switch (mode) { - case FL_WRITING: write_cmd = CMD(0x40); break; - case FL_OTP_WRITE: write_cmd = CMD(0xc0); break; - default: return -EINVAL; + case FL_WRITING: + write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41); + break; + case FL_OTP_WRITE: + write_cmd = CMD(0xc0); + break; + default: + return -EINVAL; } - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, mode); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } @@ -1235,68 +1548,43 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, map_write(map, datum, adr); chip->state = mode; - INVALIDATE_CACHE_UDELAY(map, chip, - adr, map_bankwidth(map), - chip->word_write_time); + ret = INVAL_CACHE_AND_WAIT(map, chip, adr, + adr, map_bankwidth(map), + chip->word_write_time, + chip->word_write_time_max); + if (ret) { + xip_enable(map, chip, adr); + printk(KERN_ERR "%s: word write error (status timeout)\n", map->name); + goto out; + } - timeo = jiffies + (HZ/2); - z = 0; - for (;;) { - if (chip->state != mode) { - /* Someone's suspended the write. Sleep */ - DECLARE_WAITQUEUE(wait, current); + /* check for errors */ + status = map_read(map, adr); + if (map_word_bitsset(map, status, CMD(0x1a))) { + unsigned long chipstatus = MERGESTATUS(status); - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); - schedule(); - remove_wait_queue(&chip->wq, &wait); - timeo = jiffies + (HZ / 2); /* FIXME */ - spin_lock(chip->mutex); - continue; - } + /* reset status */ + map_write(map, CMD(0x50), adr); + map_write(map, CMD(0x70), adr); + xip_enable(map, chip, adr); - status = map_read(map, adr); - if (map_word_andequal(map, status, status_OK, status_OK)) - break; - - /* OK Still waiting */ - if (time_after(jiffies, timeo)) { - chip->state = FL_STATUS; - xip_enable(map, chip, adr); - printk(KERN_ERR "waiting for chip to be ready timed out in word write\n"); + if (chipstatus & 0x02) { + ret = -EROFS; + } else if (chipstatus & 0x08) { + printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name); ret = -EIO; - goto out; + } else { + printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus); + ret = -EINVAL; } - /* Latency issues. Drop the lock, wait a while and retry */ - z++; - UDELAY(map, chip, adr, 1); - } - if (!z) { - chip->word_write_time--; - if (!chip->word_write_time) - chip->word_write_time++; - } - if (z > 1) - chip->word_write_time++; - - /* Done and happy. */ - chip->state = FL_STATUS; - - /* check for lock bit */ - if (map_word_bitsset(map, status, CMD(0x02))) { - /* clear status */ - map_write(map, CMD(0x50), adr); - /* put back into read status register mode */ - map_write(map, CMD(0x70), adr); - ret = -EROFS; + goto out; } xip_enable(map, chip, adr); - out: put_chip(map, chip, adr); - spin_unlock(chip->mutex); - + out: DISABLE_VPP(map); + put_chip(map, chip, adr); + mutex_unlock(&chip->mutex); return ret; } @@ -1309,10 +1597,6 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le int chipnum; unsigned long ofs; - *retlen = 0; - if (!len) - return 0; - chipnum = to >> cfi->chipshift; ofs = to - (chipnum << cfi->chipshift); @@ -1329,7 +1613,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le ret = do_write_oneword(map, &cfi->chips[chipnum], bus_ofs, datum, FL_WRITING); - if (ret) + if (ret) return ret; len -= n; @@ -1338,13 +1622,13 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le (*retlen) += n; if (ofs >> cfi->chipshift) { - chipnum ++; + chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; } } - + while(len >= map_bankwidth(map)) { map_word datum = map_word_load(map, buf); @@ -1359,7 +1643,7 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le len -= map_bankwidth(map); if (ofs >> cfi->chipshift) { - chipnum ++; + chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; @@ -1374,9 +1658,9 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le ret = do_write_oneword(map, &cfi->chips[chipnum], ofs, datum, FL_WRITING); - if (ret) + if (ret) return ret; - + (*retlen) += len; } @@ -1384,38 +1668,52 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le } -static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, - unsigned long adr, const u_char *buf, int len) +static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, + unsigned long adr, const struct kvec **pvec, + unsigned long *pvec_seek, int len) { struct cfi_private *cfi = map->fldrv_priv; - map_word status, status_OK; - unsigned long cmd_adr, timeo; - int wbufsize, z, ret=0, bytes, words; + map_word status, write_cmd, datum; + unsigned long cmd_adr; + int ret, wbufsize, word_gap, words; + const struct kvec *vec; + unsigned long vec_seek; + unsigned long initial_adr; + int initial_len = len; wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; adr += chip->start; + initial_adr = adr; cmd_adr = adr & ~(wbufsize-1); - + + /* Sharp LH28F640BF chips need the first address for the + * Page Buffer Program command. See Table 5 of + * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */ + if (is_LH28F640BF(cfi)) + cmd_adr = adr; + /* Let's determine this according to the interleave only once */ - status_OK = CMD(0x80); + write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, cmd_adr, FL_WRITING); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } - XIP_INVAL_CACHED_RANGE(map, adr, len); + XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len); ENABLE_VPP(map); xip_disable(map, chip, cmd_adr); - /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set - [...], the device will not accept any more Write to Buffer commands". + /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set + [...], the device will not accept any more Write to Buffer commands". So we must check here and reset those bits if they're set. Otherwise we're just pissing in the wind */ - if (chip->state != FL_STATUS) + if (chip->state != FL_STATUS) { map_write(map, CMD(0x70), cmd_adr); + chip->state = FL_STATUS; + } status = map_read(map, cmd_adr); if (map_word_bitsset(map, status, CMD(0x30))) { xip_enable(map, chip, cmd_adr); @@ -1426,208 +1724,194 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, } chip->state = FL_WRITING_TO_BUFFER; - - z = 0; - for (;;) { - map_write(map, CMD(0xe8), cmd_adr); - + map_write(map, write_cmd, cmd_adr); + ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0); + if (ret) { + /* Argh. Not ready for write to buffer */ + map_word Xstatus = map_read(map, cmd_adr); + map_write(map, CMD(0x70), cmd_adr); + chip->state = FL_STATUS; status = map_read(map, cmd_adr); - if (map_word_andequal(map, status, status_OK, status_OK)) - break; - - UDELAY(map, chip, cmd_adr, 1); + map_write(map, CMD(0x50), cmd_adr); + map_write(map, CMD(0x70), cmd_adr); + xip_enable(map, chip, cmd_adr); + printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n", + map->name, Xstatus.x[0], status.x[0]); + goto out; + } - if (++z > 20) { - /* Argh. Not ready for write to buffer */ - map_word Xstatus; - map_write(map, CMD(0x70), cmd_adr); - chip->state = FL_STATUS; - Xstatus = map_read(map, cmd_adr); - /* Odd. Clear status bits */ - map_write(map, CMD(0x50), cmd_adr); - map_write(map, CMD(0x70), cmd_adr); - xip_enable(map, chip, cmd_adr); - printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n", - status.x[0], Xstatus.x[0]); - ret = -EIO; - goto out; - } + /* Figure out the number of words to write */ + word_gap = (-adr & (map_bankwidth(map)-1)); + words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map)); + if (!word_gap) { + words--; + } else { + word_gap = map_bankwidth(map) - word_gap; + adr -= word_gap; + datum = map_word_ff(map); } /* Write length of data to come */ - bytes = len & (map_bankwidth(map)-1); - words = len / map_bankwidth(map); - map_write(map, CMD(words - !bytes), cmd_adr ); + map_write(map, CMD(words), cmd_adr ); /* Write data */ - z = 0; - while(z < words * map_bankwidth(map)) { - map_word datum = map_word_load(map, buf); - map_write(map, datum, adr+z); + vec = *pvec; + vec_seek = *pvec_seek; + do { + int n = map_bankwidth(map) - word_gap; + if (n > vec->iov_len - vec_seek) + n = vec->iov_len - vec_seek; + if (n > len) + n = len; - z += map_bankwidth(map); - buf += map_bankwidth(map); - } + if (!word_gap && len < map_bankwidth(map)) + datum = map_word_ff(map); - if (bytes) { - map_word datum; + datum = map_word_load_partial(map, datum, + vec->iov_base + vec_seek, + word_gap, n); - datum = map_word_ff(map); - datum = map_word_load_partial(map, datum, buf, 0, bytes); - map_write(map, datum, adr+z); - } + len -= n; + word_gap += n; + if (!len || word_gap == map_bankwidth(map)) { + map_write(map, datum, adr); + adr += map_bankwidth(map); + word_gap = 0; + } + + vec_seek += n; + if (vec_seek == vec->iov_len) { + vec++; + vec_seek = 0; + } + } while (len); + *pvec = vec; + *pvec_seek = vec_seek; /* GO GO GO */ map_write(map, CMD(0xd0), cmd_adr); chip->state = FL_WRITING; - INVALIDATE_CACHE_UDELAY(map, chip, - cmd_adr, len, - chip->buffer_write_time); + ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, + initial_adr, initial_len, + chip->buffer_write_time, + chip->buffer_write_time_max); + if (ret) { + map_write(map, CMD(0x70), cmd_adr); + chip->state = FL_STATUS; + xip_enable(map, chip, cmd_adr); + printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name); + goto out; + } - timeo = jiffies + (HZ/2); - z = 0; - for (;;) { - if (chip->state != FL_WRITING) { - /* Someone's suspended the write. Sleep */ - DECLARE_WAITQUEUE(wait, current); - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); - schedule(); - remove_wait_queue(&chip->wq, &wait); - timeo = jiffies + (HZ / 2); /* FIXME */ - spin_lock(chip->mutex); - continue; - } + /* check for errors */ + status = map_read(map, cmd_adr); + if (map_word_bitsset(map, status, CMD(0x1a))) { + unsigned long chipstatus = MERGESTATUS(status); - status = map_read(map, cmd_adr); - if (map_word_andequal(map, status, status_OK, status_OK)) - break; + /* reset status */ + map_write(map, CMD(0x50), cmd_adr); + map_write(map, CMD(0x70), cmd_adr); + xip_enable(map, chip, cmd_adr); - /* OK Still waiting */ - if (time_after(jiffies, timeo)) { - chip->state = FL_STATUS; - xip_enable(map, chip, cmd_adr); - printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); + if (chipstatus & 0x02) { + ret = -EROFS; + } else if (chipstatus & 0x08) { + printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name); ret = -EIO; - goto out; + } else { + printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus); + ret = -EINVAL; } - - /* Latency issues. Drop the lock, wait a while and retry */ - z++; - UDELAY(map, chip, cmd_adr, 1); - } - if (!z) { - chip->buffer_write_time--; - if (!chip->buffer_write_time) - chip->buffer_write_time++; - } - if (z > 1) - chip->buffer_write_time++; - - /* Done and happy. */ - chip->state = FL_STATUS; - /* check for lock bit */ - if (map_word_bitsset(map, status, CMD(0x02))) { - /* clear status */ - map_write(map, CMD(0x50), cmd_adr); - /* put back into read status register mode */ - map_write(map, CMD(0x70), adr); - ret = -EROFS; + goto out; } xip_enable(map, chip, cmd_adr); - out: put_chip(map, chip, cmd_adr); - spin_unlock(chip->mutex); + out: DISABLE_VPP(map); + put_chip(map, chip, cmd_adr); + mutex_unlock(&chip->mutex); return ret; } -static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, - size_t len, size_t *retlen, const u_char *buf) +static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; int ret = 0; int chipnum; - unsigned long ofs; + unsigned long ofs, vec_seek, i; + size_t len = 0; + + for (i = 0; i < count; i++) + len += vecs[i].iov_len; - *retlen = 0; if (!len) return 0; chipnum = to >> cfi->chipshift; - ofs = to - (chipnum << cfi->chipshift); + ofs = to - (chipnum << cfi->chipshift); + vec_seek = 0; - /* If it's not bus-aligned, do the first word write */ - if (ofs & (map_bankwidth(map)-1)) { - size_t local_len = (-ofs)&(map_bankwidth(map)-1); - if (local_len > len) - local_len = len; - ret = cfi_intelext_write_words(mtd, to, local_len, - retlen, buf); - if (ret) - return ret; - ofs += local_len; - buf += local_len; - len -= local_len; - - if (ofs >> cfi->chipshift) { - chipnum ++; - ofs = 0; - if (chipnum == cfi->numchips) - return 0; - } - } - - while(len) { + do { /* We must not cross write block boundaries */ int size = wbufsize - (ofs & (wbufsize-1)); if (size > len) size = len; - ret = do_write_buffer(map, &cfi->chips[chipnum], - ofs, buf, size); + ret = do_write_buffer(map, &cfi->chips[chipnum], + ofs, &vecs, &vec_seek, size); if (ret) return ret; ofs += size; - buf += size; (*retlen) += size; len -= size; if (ofs >> cfi->chipshift) { - chipnum ++; + chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; } - } + + /* Be nice and reschedule with the chip in a usable state for other + processes. */ + cond_resched(); + + } while (len); + return 0; } +static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, + size_t len, size_t *retlen, const u_char *buf) +{ + struct kvec vec; + + vec.iov_base = (void *) buf; + vec.iov_len = len; + + return cfi_intelext_writev(mtd, &vec, 1, to, retlen); +} + static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) { struct cfi_private *cfi = map->fldrv_priv; - map_word status, status_OK; - unsigned long timeo; + map_word status; int retries = 3; - DECLARE_WAITQUEUE(wait, current); - int ret = 0; + int ret; adr += chip->start; - /* Let's determine this according to the interleave only once */ - status_OK = CMD(0x80); - retry: - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_ERASING); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } @@ -1644,54 +1928,16 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->state = FL_ERASING; chip->erase_suspended = 0; - INVALIDATE_CACHE_UDELAY(map, chip, - adr, len, - chip->erase_time*1000/2); - - /* FIXME. Use a timer to check this, and return immediately. */ - /* Once the state machine's known to be working I'll do that */ - - timeo = jiffies + (HZ*20); - for (;;) { - if (chip->state != FL_ERASING) { - /* Someone's suspended the erase. Sleep */ - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); - schedule(); - remove_wait_queue(&chip->wq, &wait); - spin_lock(chip->mutex); - continue; - } - if (chip->erase_suspended) { - /* This erase was suspended and resumed. - Adjust the timeout */ - timeo = jiffies + (HZ*20); /* FIXME */ - chip->erase_suspended = 0; - } - - status = map_read(map, adr); - if (map_word_andequal(map, status, status_OK, status_OK)) - break; - - /* OK Still waiting */ - if (time_after(jiffies, timeo)) { - map_word Xstatus; - map_write(map, CMD(0x70), adr); - chip->state = FL_STATUS; - Xstatus = map_read(map, adr); - /* Clear status bits */ - map_write(map, CMD(0x50), adr); - map_write(map, CMD(0x70), adr); - xip_enable(map, chip, adr); - printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n", - adr, status.x[0], Xstatus.x[0]); - ret = -EIO; - goto out; - } - - /* Latency issues. Drop the lock, wait a while and retry */ - UDELAY(map, chip, adr, 1000000/HZ); + ret = INVAL_CACHE_AND_WAIT(map, chip, adr, + adr, len, + chip->erase_time, + chip->erase_time_max); + if (ret) { + map_write(map, CMD(0x70), adr); + chip->state = FL_STATUS; + xip_enable(map, chip, adr); + printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name); + goto out; } /* We've broken this before. It doesn't hurt to be safe */ @@ -1699,49 +1945,47 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->state = FL_STATUS; status = map_read(map, adr); - /* check for lock bit */ + /* check for errors */ if (map_word_bitsset(map, status, CMD(0x3a))) { - unsigned long chipstatus; + unsigned long chipstatus = MERGESTATUS(status); /* Reset the error bits */ map_write(map, CMD(0x50), adr); map_write(map, CMD(0x70), adr); xip_enable(map, chip, adr); - chipstatus = MERGESTATUS(status); - if ((chipstatus & 0x30) == 0x30) { - printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%lx\n", chipstatus); - ret = -EIO; + printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus); + ret = -EINVAL; } else if (chipstatus & 0x02) { /* Protection bit set */ ret = -EROFS; } else if (chipstatus & 0x8) { /* Voltage */ - printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%lx\n", chipstatus); + printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name); ret = -EIO; - } else if (chipstatus & 0x20) { - if (retries--) { - printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); - timeo = jiffies + HZ; - put_chip(map, chip, adr); - spin_unlock(chip->mutex); - goto retry; - } - printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%lx\n", adr, chipstatus); + } else if (chipstatus & 0x20 && retries--) { + printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); + DISABLE_VPP(map); + put_chip(map, chip, adr); + mutex_unlock(&chip->mutex); + goto retry; + } else { + printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus); ret = -EIO; } - } else { - xip_enable(map, chip, adr); - ret = 0; + + goto out; } - out: put_chip(map, chip, adr); - spin_unlock(chip->mutex); + xip_enable(map, chip, adr); + out: DISABLE_VPP(map); + put_chip(map, chip, adr); + mutex_unlock(&chip->mutex); return ret; } -int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) +static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) { unsigned long ofs, len; int ret; @@ -1755,7 +1999,7 @@ int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); - + return 0; } @@ -1770,18 +2014,18 @@ static void cfi_intelext_sync (struct mtd_info *mtd) for (i=0; !ret && i<cfi->numchips; i++) { chip = &cfi->chips[i]; - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, chip->start, FL_SYNCING); if (!ret) { chip->oldstate = chip->state; chip->state = FL_SYNCING; - /* No need to wake_up() on this state change - + /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } /* Unlock the chips again */ @@ -1789,19 +2033,18 @@ static void cfi_intelext_sync (struct mtd_info *mtd) for (i--; i >=0; i--) { chip = &cfi->chips[i]; - spin_lock(chip->mutex); - + mutex_lock(&chip->mutex); + if (chip->state == FL_SYNCING) { chip->state = chip->oldstate; chip->oldstate = FL_READY; wake_up(&chip->wq); } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } } -#ifdef DEBUG_LOCK_BITS -static int __xipram do_printlockstatus_oneblock(struct map_info *map, +static int __xipram do_getlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) @@ -1815,8 +2058,17 @@ static int __xipram do_printlockstatus_oneblock(struct map_info *map, chip->state = FL_JEDEC_QUERY; status = cfi_read_query(map, adr+(2*ofs_factor)); xip_enable(map, chip, 0); + return status; +} + +#ifdef DEBUG_LOCK_BITS +static int __xipram do_printlockstatus_oneblock(struct map_info *map, + struct flchip *chip, + unsigned long adr, + int len, void *thunk) +{ printk(KERN_DEBUG "block status register for 0x%08lx is %x\n", - adr, status); + adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk)); return 0; } #endif @@ -1829,25 +2081,21 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip { struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_intelext *extp = cfi->cmdset_priv; - map_word status, status_OK; - unsigned long timeo = jiffies + HZ; + int mdelay; int ret; adr += chip->start; - /* Let's determine this according to the interleave only once */ - status_OK = CMD(0x80); - - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_LOCKING); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } ENABLE_VPP(map); xip_disable(map, chip, adr); - + map_write(map, CMD(0x60), adr); if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { map_write(map, CMD(0x01), adr); @@ -1862,97 +2110,90 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip * If Instant Individual Block Locking supported then no need * to delay. */ + /* + * Unlocking may take up to 1.4 seconds on some Intel flashes. So + * lets use a max of 1.5 seconds (1500ms) as timeout. + * + * See "Clear Block Lock-Bits Time" on page 40 in + * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual + * from February 2003 + */ + mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0; - if (!extp || !(extp->FeatureSupport & (1 << 5))) - UDELAY(map, chip, adr, 1000000/HZ); - - /* FIXME. Use a timer to check this, and return immediately. */ - /* Once the state machine's known to be working I'll do that */ - - timeo = jiffies + (HZ*20); - for (;;) { - - status = map_read(map, adr); - if (map_word_andequal(map, status, status_OK, status_OK)) - break; - - /* OK Still waiting */ - if (time_after(jiffies, timeo)) { - map_word Xstatus; - map_write(map, CMD(0x70), adr); - chip->state = FL_STATUS; - Xstatus = map_read(map, adr); - xip_enable(map, chip, adr); - printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n", - status.x[0], Xstatus.x[0]); - put_chip(map, chip, adr); - spin_unlock(chip->mutex); - return -EIO; - } - - /* Latency issues. Drop the lock, wait a while and retry */ - UDELAY(map, chip, adr, 1); + ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000); + if (ret) { + map_write(map, CMD(0x70), adr); + chip->state = FL_STATUS; + xip_enable(map, chip, adr); + printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name); + goto out; } - - /* Done and happy. */ - chip->state = FL_STATUS; + xip_enable(map, chip, adr); + out: DISABLE_VPP(map); put_chip(map, chip, adr); - spin_unlock(chip->mutex); - return 0; + mutex_unlock(&chip->mutex); + return ret; } -static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) +static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { int ret; #ifdef DEBUG_LOCK_BITS printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n", - __FUNCTION__, ofs, len); + __func__, ofs, len); cfi_varsize_frob(mtd, do_printlockstatus_oneblock, - ofs, len, 0); + ofs, len, NULL); #endif - ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, + ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, ofs, len, DO_XXLOCK_ONEBLOCK_LOCK); - + #ifdef DEBUG_LOCK_BITS printk(KERN_DEBUG "%s: lock status after, ret=%d\n", - __FUNCTION__, ret); + __func__, ret); cfi_varsize_frob(mtd, do_printlockstatus_oneblock, - ofs, len, 0); + ofs, len, NULL); #endif return ret; } -static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) +static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { int ret; #ifdef DEBUG_LOCK_BITS printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n", - __FUNCTION__, ofs, len); + __func__, ofs, len); cfi_varsize_frob(mtd, do_printlockstatus_oneblock, - ofs, len, 0); + ofs, len, NULL); #endif ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK); - + #ifdef DEBUG_LOCK_BITS printk(KERN_DEBUG "%s: lock status after, ret=%d\n", - __FUNCTION__, ret); - cfi_varsize_frob(mtd, do_printlockstatus_oneblock, - ofs, len, 0); + __func__, ret); + cfi_varsize_frob(mtd, do_printlockstatus_oneblock, + ofs, len, NULL); #endif - + return ret; } +static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs, + uint64_t len) +{ + return cfi_varsize_frob(mtd, do_getlockstatus_oneblock, + ofs, len, NULL) ? 1 : 0; +} + #ifdef CONFIG_MTD_OTP -typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, +typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, u_long data_offset, u_char *buf, u_int size, u_long prot_offset, u_int groupno, u_int groupsize); @@ -1963,10 +2204,10 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset, struct cfi_private *cfi = map->fldrv_priv; int ret; - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } @@ -1985,7 +2226,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset, INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); put_chip(map, chip, chip->start); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return 0; } @@ -2003,7 +2244,7 @@ do_otp_write(struct map_info *map, struct flchip *chip, u_long offset, datum = map_word_load_partial(map, datum, buf, gap, n); ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); - if (ret) + if (ret) return ret; offset += n; @@ -2057,7 +2298,7 @@ static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, /* Some chips have OTP located in the _top_ partition only. For example: Intel 28F256L18T (T means top-parameter device) */ - if (cfi->mfr == MANUFACTURER_INTEL) { + if (cfi->mfr == CFI_MFR_INTEL) { switch (cfi->id) { case 0x880b: case 0x880c: @@ -2196,40 +2437,66 @@ static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd, NULL, do_otp_lock, 1); } -static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, - struct otp_info *buf, size_t len) -{ - size_t retlen; - int ret; +static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf) - ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0); - return ret ? : retlen; +{ + return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf, + NULL, 0); } -static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, - struct otp_info *buf, size_t len) +static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf) { - size_t retlen; - int ret; - - ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1); - return ret ? : retlen; + return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf, + NULL, 1); } #endif +static void cfi_intelext_save_locks(struct mtd_info *mtd) +{ + struct mtd_erase_region_info *region; + int block, status, i; + unsigned long adr; + size_t len; + + for (i = 0; i < mtd->numeraseregions; i++) { + region = &mtd->eraseregions[i]; + if (!region->lockmap) + continue; + + for (block = 0; block < region->numblocks; block++){ + len = region->erasesize; + adr = region->offset + block * len; + + status = cfi_varsize_frob(mtd, + do_getlockstatus_oneblock, adr, len, NULL); + if (status) + set_bit(block, region->lockmap); + else + clear_bit(block, region->lockmap); + } + } +} + static int cfi_intelext_suspend(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *extp = cfi->cmdset_priv; int i; struct flchip *chip; int ret = 0; + if ((mtd->flags & MTD_POWERUP_LOCK) + && extp && (extp->FeatureSupport & (1 << 5))) + cfi_intelext_save_locks(mtd); + for (i=0; !ret && i<cfi->numchips; i++) { chip = &cfi->chips[i]; - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); switch (chip->state) { case FL_READY: @@ -2237,9 +2504,11 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) case FL_CFI_QUERY: case FL_JEDEC_QUERY: if (chip->oldstate == FL_READY) { + /* place the chip in a known state before suspend */ + map_write(map, CMD(0xFF), cfi->chips[i].start); chip->oldstate = chip->state; chip->state = FL_PM_SUSPENDED; - /* No need to wake_up() on this state change - + /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ @@ -2254,12 +2523,12 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) allowed to. Or should we return -EAGAIN, because the upper layers ought to have already shut down anything which was using the device anyway? The latter for now. */ - printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate); + printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state); ret = -EAGAIN; case FL_PM_SUSPENDED: break; } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } /* Unlock the chips again */ @@ -2267,9 +2536,9 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) if (ret) { for (i--; i >=0; i--) { chip = &cfi->chips[i]; - - spin_lock(chip->mutex); - + + mutex_lock(&chip->mutex); + if (chip->state == FL_PM_SUSPENDED) { /* No need to force it into a known state here, because we're returning failure, and it didn't @@ -2278,26 +2547,47 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) chip->oldstate = FL_READY; wake_up(&chip->wq); } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } - } - + } + return ret; } +static void cfi_intelext_restore_locks(struct mtd_info *mtd) +{ + struct mtd_erase_region_info *region; + int block, i; + unsigned long adr; + size_t len; + + for (i = 0; i < mtd->numeraseregions; i++) { + region = &mtd->eraseregions[i]; + if (!region->lockmap) + continue; + + for_each_clear_bit(block, region->lockmap, region->numblocks) { + len = region->erasesize; + adr = region->offset + block * len; + cfi_intelext_unlock(mtd, adr, len); + } + } +} + static void cfi_intelext_resume(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_intelext *extp = cfi->cmdset_priv; int i; struct flchip *chip; for (i=0; i<cfi->numchips; i++) { - + chip = &cfi->chips[i]; - spin_lock(chip->mutex); - + mutex_lock(&chip->mutex); + /* Go to known state. Chip may have been power cycled */ if (chip->state == FL_PM_SUSPENDED) { map_write(map, CMD(0xFF), cfi->chips[i].start); @@ -2305,8 +2595,12 @@ static void cfi_intelext_resume(struct mtd_info *mtd) wake_up(&chip->wq); } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } + + if ((mtd->flags & MTD_POWERUP_LOCK) + && extp && (extp->FeatureSupport & (1 << 5))) + cfi_intelext_restore_locks(mtd); } static int cfi_intelext_reset(struct mtd_info *mtd) @@ -2319,15 +2613,16 @@ static int cfi_intelext_reset(struct mtd_info *mtd) struct flchip *chip = &cfi->chips[i]; /* force the completion of any ongoing operation - and switch to array mode so any bootloader in + and switch to array mode so any bootloader in flash is accessible for soft reboot. */ - spin_lock(chip->mutex); - ret = get_chip(map, chip, chip->start, FL_SYNCING); + mutex_lock(&chip->mutex); + ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); if (!ret) { map_write(map, CMD(0xff), chip->start); - chip->state = FL_READY; + chip->state = FL_SHUTDOWN; + put_chip(map, chip, chip->start); } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } return 0; @@ -2347,34 +2642,24 @@ static void cfi_intelext_destroy(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; + struct mtd_erase_region_info *region; + int i; cfi_intelext_reset(mtd); unregister_reboot_notifier(&mtd->reboot_notifier); kfree(cfi->cmdset_priv); kfree(cfi->cfiq); kfree(cfi->chips[0].priv); kfree(cfi); + for (i = 0; i < mtd->numeraseregions; i++) { + region = &mtd->eraseregions[i]; + if (region->lockmap) + kfree(region->lockmap); + } kfree(mtd->eraseregions); } -static char im_name_1[]="cfi_cmdset_0001"; -static char im_name_3[]="cfi_cmdset_0003"; - -static int __init cfi_intelext_init(void) -{ - inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001); - inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001); - return 0; -} - -static void __exit cfi_intelext_exit(void) -{ - inter_module_unregister(im_name_1); - inter_module_unregister(im_name_3); -} - -module_init(cfi_intelext_init); -module_exit(cfi_intelext_exit); - MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al."); MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips"); +MODULE_ALIAS("cfi_cmdset_0003"); +MODULE_ALIAS("cfi_cmdset_0200"); diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 8505f118f2d..e21fde9d4d7 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c @@ -10,23 +10,20 @@ * * 4_by_16 work by Carolyn J. Smith * - * XIP support hooks by Vitaly Wool (based on code for Intel flash + * XIP support hooks by Vitaly Wool (based on code for Intel flash * by Nicolas Pitre) - * - * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com * - * This code is GPL + * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 * - * $Id: cfi_cmdset_0002.c,v 1.118 2005/07/04 22:34:29 gleixner Exp $ + * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com * + * This code is GPL */ -#include <linux/config.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/init.h> #include <asm/io.h> #include <asm/byteorder.h> @@ -34,7 +31,9 @@ #include <linux/slab.h> #include <linux/delay.h> #include <linux/interrupt.h> -#include <linux/mtd/compatmac.h> +#include <linux/reboot.h> +#include <linux/of.h> +#include <linux/of_platform.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> #include <linux/mtd/cfi.h> @@ -45,10 +44,10 @@ #define MAX_WORD_RETRIES 3 -#define MANUFACTURER_AMD 0x0001 -#define MANUFACTURER_SST 0x00BF #define SST49LF004B 0x0060 +#define SST49LF040B 0x0050 #define SST49LF008A 0x005a +#define AT49BV6416 0x00d6 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); @@ -58,8 +57,12 @@ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); static void cfi_amdstd_sync (struct mtd_info *); static int cfi_amdstd_suspend (struct mtd_info *); static void cfi_amdstd_resume (struct mtd_info *); +static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); +static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf); + static void cfi_amdstd_destroy(struct mtd_info *); struct mtd_info *cfi_cmdset_0002(struct map_info *, int); @@ -69,6 +72,13 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); #include "fwh_lock.h" +static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); + +static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); + static struct mtd_chip_driver cfi_amdstd_chipdrv = { .probe = NULL, /* Not usable directly */ .destroy = cfi_amdstd_destroy, @@ -93,7 +103,7 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp) }; printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); - printk(" Address sensitive unlock: %s\n", + printk(" Address sensitive unlock: %s\n", (extp->SiliconRevision & 1) ? "Not required" : "Required"); if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) @@ -118,9 +128,9 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp) else printk(" Page mode: %d word page\n", extp->PageMode << 2); - printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", + printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n", extp->VppMin >> 4, extp->VppMin & 0xf); - printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", + printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n", extp->VppMax >> 4, extp->VppMax & 0xf); if (extp->TopBottom < ARRAY_SIZE(top_bottom)) @@ -132,7 +142,7 @@ static void cfi_tell_features(struct cfi_pri_amdstd *extp) #ifdef AMD_BOOTLOC_BUG /* Wheee. Bring me the head of someone at AMD. */ -static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) +static void fixup_amd_bootblock(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; @@ -142,63 +152,249 @@ static void fixup_amd_bootblock(struct mtd_info *mtd, void* param) if (((major << 8) | minor) < 0x3131) { /* CFI version 1.0 => don't trust bootloc */ + + pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", + map->name, cfi->mfr, cfi->id); + + /* AFAICS all 29LV400 with a bottom boot block have a device ID + * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. + * These were badly detected as they have the 0x80 bit set + * so treat them as a special case. + */ + if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && + + /* Macronix added CFI to their 2nd generation + * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, + * Fujitsu, Spansion, EON, ESI and older Macronix) + * has CFI. + * + * Therefore also check the manufacturer. + * This reduces the risk of false detection due to + * the 8-bit device ID. + */ + (cfi->mfr == CFI_MFR_MACRONIX)) { + pr_debug("%s: Macronix MX29LV400C with bottom boot block" + " detected\n", map->name); + extp->TopBottom = 2; /* bottom boot */ + } else if (cfi->id & 0x80) { printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); extp->TopBottom = 3; /* top boot */ } else { extp->TopBottom = 2; /* bottom boot */ } + + pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" + " deduced %s from Device ID\n", map->name, major, minor, + extp->TopBottom == 2 ? "bottom" : "top"); } } #endif -static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) +static void fixup_use_write_buffers(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; if (cfi->cfiq->BufWriteTimeoutTyp) { - DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" ); - mtd->write = cfi_amdstd_write_buffers; + pr_debug("Using buffer write method\n" ); + mtd->_write = cfi_amdstd_write_buffers; } } -static void fixup_use_secsi(struct mtd_info *mtd, void *param) +/* Atmel chips don't use the same PRI format as AMD chips */ +static void fixup_convert_atmel_pri(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_pri_amdstd *extp = cfi->cmdset_priv; + struct cfi_pri_atmel atmel_pri; + + memcpy(&atmel_pri, extp, sizeof(atmel_pri)); + memset((char *)extp + 5, 0, sizeof(*extp) - 5); + + if (atmel_pri.Features & 0x02) + extp->EraseSuspend = 2; + + /* Some chips got it backwards... */ + if (cfi->id == AT49BV6416) { + if (atmel_pri.BottomBoot) + extp->TopBottom = 3; + else + extp->TopBottom = 2; + } else { + if (atmel_pri.BottomBoot) + extp->TopBottom = 2; + else + extp->TopBottom = 3; + } + + /* burst write mode not supported */ + cfi->cfiq->BufWriteTimeoutTyp = 0; + cfi->cfiq->BufWriteTimeoutMax = 0; +} + +static void fixup_use_secsi(struct mtd_info *mtd) { /* Setup for chips with a secsi area */ - mtd->read_user_prot_reg = cfi_amdstd_secsi_read; - mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; + mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; + mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; } -static void fixup_use_erase_chip(struct mtd_info *mtd, void *param) +static void fixup_use_erase_chip(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; if ((cfi->cfiq->NumEraseRegions == 1) && ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { - mtd->erase = cfi_amdstd_erase_chip; + mtd->_erase = cfi_amdstd_erase_chip; + } + +} + +/* + * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors + * locked by default. + */ +static void fixup_use_atmel_lock(struct mtd_info *mtd) +{ + mtd->_lock = cfi_atmel_lock; + mtd->_unlock = cfi_atmel_unlock; + mtd->flags |= MTD_POWERUP_LOCK; +} + +static void fixup_old_sst_eraseregion(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + /* + * These flashes report two separate eraseblock regions based on the + * sector_erase-size and block_erase-size, although they both operate on the + * same memory. This is not allowed according to CFI, so we just pick the + * sector_erase-size. + */ + cfi->cfiq->NumEraseRegions = 1; +} + +static void fixup_sst39vf(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + fixup_old_sst_eraseregion(mtd); + + cfi->addr_unlock1 = 0x5555; + cfi->addr_unlock2 = 0x2AAA; +} + +static void fixup_sst39vf_rev_b(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + fixup_old_sst_eraseregion(mtd); + + cfi->addr_unlock1 = 0x555; + cfi->addr_unlock2 = 0x2AA; + + cfi->sector_erase_cmd = CMD(0x50); +} + +static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + fixup_sst39vf_rev_b(mtd); + + /* + * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where + * it should report a size of 8KBytes (0x0020*256). + */ + cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; + pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name); +} + +static void fixup_s29gl064n_sectors(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { + cfi->cfiq->EraseRegionInfo[0] |= 0x0040; + pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name); } - } +static void fixup_s29gl032n_sectors(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { + cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; + pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name); + } +} + +static void fixup_s29ns512p_sectors(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + /* + * S29NS512P flash uses more than 8bits to report number of sectors, + * which is not permitted by CFI. + */ + cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; + pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name); +} + +/* Used to fix CFI-Tables of chips without Extended Query Tables */ +static struct cfi_fixup cfi_nopri_fixup_table[] = { + { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ + { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */ + { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */ + { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */ + { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */ + { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */ + { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */ + { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */ + { 0, 0, NULL } +}; + static struct cfi_fixup cfi_fixup_table[] = { + { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, #ifdef AMD_BOOTLOC_BUG - { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL }, + { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, + { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, + { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, #endif - { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, }, - { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, }, - { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, }, - { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, }, - { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, }, - { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, }, + { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, + { CFI_MFR_AMD, 0x0053, fixup_use_secsi }, + { CFI_MFR_AMD, 0x0055, fixup_use_secsi }, + { CFI_MFR_AMD, 0x0056, fixup_use_secsi }, + { CFI_MFR_AMD, 0x005C, fixup_use_secsi }, + { CFI_MFR_AMD, 0x005F, fixup_use_secsi }, + { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors }, + { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, + { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, + { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, + { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors }, + { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ + { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ + { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ + { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */ #if !FORCE_WORD_WRITE - { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, }, + { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers }, #endif - { 0, 0, NULL, NULL } + { 0, 0, NULL } }; static struct cfi_fixup jedec_fixup_table[] = { - { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, }, - { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, }, - { 0, 0, NULL, NULL } + { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock }, + { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock }, + { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock }, + { 0, 0, NULL } }; static struct cfi_fixup fixup_table[] = { @@ -207,95 +403,217 @@ static struct cfi_fixup fixup_table[] = { * well. This table is to pick all cases where * we know that is the case. */ - { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL }, - { 0, 0, NULL, NULL } + { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip }, + { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock }, + { 0, 0, NULL } }; +static void cfi_fixup_major_minor(struct cfi_private *cfi, + struct cfi_pri_amdstd *extp) +{ + if (cfi->mfr == CFI_MFR_SAMSUNG) { + if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || + (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { + /* + * Samsung K8P2815UQB and K8D6x16UxM chips + * report major=0 / minor=0. + * K8D3x16UxC chips report major=3 / minor=3. + */ + printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" + " Extended Query version to 1.%c\n", + extp->MinorVersion); + extp->MajorVersion = '1'; + } + } + + /* + * SST 38VF640x chips report major=0xFF / minor=0xFF. + */ + if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { + extp->MajorVersion = '1'; + extp->MinorVersion = '0'; + } +} + +static int is_m29ew(struct cfi_private *cfi) +{ + if (cfi->mfr == CFI_MFR_INTEL && + ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || + (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) + return 1; + return 0; +} + +/* + * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: + * Some revisions of the M29EW suffer from erase suspend hang ups. In + * particular, it can occur when the sequence + * Erase Confirm -> Suspend -> Program -> Resume + * causes a lockup due to internal timing issues. The consequence is that the + * erase cannot be resumed without inserting a dummy command after programming + * and prior to resuming. [...] The work-around is to issue a dummy write cycle + * that writes an F0 command code before the RESUME command. + */ +static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, + unsigned long adr) +{ + struct cfi_private *cfi = map->fldrv_priv; + /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ + if (is_m29ew(cfi)) + map_write(map, CMD(0xF0), adr); +} + +/* + * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: + * + * Some revisions of the M29EW (for example, A1 and A2 step revisions) + * are affected by a problem that could cause a hang up when an ERASE SUSPEND + * command is issued after an ERASE RESUME operation without waiting for a + * minimum delay. The result is that once the ERASE seems to be completed + * (no bits are toggling), the contents of the Flash memory block on which + * the erase was ongoing could be inconsistent with the expected values + * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 + * values), causing a consequent failure of the ERASE operation. + * The occurrence of this issue could be high, especially when file system + * operations on the Flash are intensive. As a result, it is recommended + * that a patch be applied. Intensive file system operations can cause many + * calls to the garbage routine to free Flash space (also by erasing physical + * Flash blocks) and as a result, many consecutive SUSPEND and RESUME + * commands can occur. The problem disappears when a delay is inserted after + * the RESUME command by using the udelay() function available in Linux. + * The DELAY value must be tuned based on the customer's platform. + * The maximum value that fixes the problem in all cases is 500us. + * But, in our experience, a delay of 30 µs to 50 µs is sufficient + * in most cases. + * We have chosen 500µs because this latency is acceptable. + */ +static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi) +{ + /* + * Resolving the Delay After Resume Issue see Micron TN-13-07 + * Worst case delay must be 500µs but 30-50µs should be ok as well + */ + if (is_m29ew(cfi)) + cfi_udelay(500); +} + struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) { struct cfi_private *cfi = map->fldrv_priv; + struct device_node __maybe_unused *np = map->device_node; struct mtd_info *mtd; int i; - mtd = kmalloc(sizeof(*mtd), GFP_KERNEL); - if (!mtd) { - printk(KERN_WARNING "Failed to allocate memory for MTD device\n"); + mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); + if (!mtd) return NULL; - } - memset(mtd, 0, sizeof(*mtd)); mtd->priv = map; mtd->type = MTD_NORFLASH; /* Fill in the default mtd operations */ - mtd->erase = cfi_amdstd_erase_varsize; - mtd->write = cfi_amdstd_write_words; - mtd->read = cfi_amdstd_read; - mtd->sync = cfi_amdstd_sync; - mtd->suspend = cfi_amdstd_suspend; - mtd->resume = cfi_amdstd_resume; + mtd->_erase = cfi_amdstd_erase_varsize; + mtd->_write = cfi_amdstd_write_words; + mtd->_read = cfi_amdstd_read; + mtd->_sync = cfi_amdstd_sync; + mtd->_suspend = cfi_amdstd_suspend; + mtd->_resume = cfi_amdstd_resume; mtd->flags = MTD_CAP_NORFLASH; mtd->name = map->name; + mtd->writesize = 1; + mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; + + pr_debug("MTD %s(): write buffer size %d\n", __func__, + mtd->writebufsize); + + mtd->_panic_write = cfi_amdstd_panic_write; + mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; if (cfi->cfi_mode==CFI_MODE_CFI){ unsigned char bootloc; - /* - * It's a real CFI chip, not one for which the probe - * routine faked a CFI structure. So we read the feature - * table from it. - */ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; struct cfi_pri_amdstd *extp; extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); - if (!extp) { - kfree(mtd); - return NULL; - } + if (extp) { + /* + * It's a real CFI chip, not one for which the probe + * routine faked a CFI structure. + */ + cfi_fixup_major_minor(cfi, extp); + + /* + * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5 + * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 + * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf + * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf + * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf + */ + if (extp->MajorVersion != '1' || + (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { + printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query " + "version %c.%c (%#02x/%#02x).\n", + extp->MajorVersion, extp->MinorVersion, + extp->MajorVersion, extp->MinorVersion); + kfree(extp); + kfree(mtd); + return NULL; + } - /* Install our own private info structure */ - cfi->cmdset_priv = extp; + printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n", + extp->MajorVersion, extp->MinorVersion); - /* Apply cfi device specific fixups */ - cfi_fixup(mtd, cfi_fixup_table); + /* Install our own private info structure */ + cfi->cmdset_priv = extp; + + /* Apply cfi device specific fixups */ + cfi_fixup(mtd, cfi_fixup_table); #ifdef DEBUG_CFI_FEATURES - /* Tell the user about it in lots of lovely detail */ - cfi_tell_features(extp); -#endif - - bootloc = extp->TopBottom; - if ((bootloc != 2) && (bootloc != 3)) { - printk(KERN_WARNING "%s: CFI does not contain boot " - "bank location. Assuming top.\n", map->name); - bootloc = 2; - } + /* Tell the user about it in lots of lovely detail */ + cfi_tell_features(extp); +#endif - if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { - printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name); - - for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { - int j = (cfi->cfiq->NumEraseRegions-1)-i; - __u32 swap; - - swap = cfi->cfiq->EraseRegionInfo[i]; - cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; - cfi->cfiq->EraseRegionInfo[j] = swap; +#ifdef CONFIG_OF + if (np && of_property_read_bool( + np, "use-advanced-sector-protection") + && extp->BlkProtUnprot == 8) { + printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n"); + mtd->_lock = cfi_ppb_lock; + mtd->_unlock = cfi_ppb_unlock; + mtd->_is_locked = cfi_ppb_is_locked; } +#endif + + bootloc = extp->TopBottom; + if ((bootloc < 2) || (bootloc > 5)) { + printk(KERN_WARNING "%s: CFI contains unrecognised boot " + "bank location (%d). Assuming bottom.\n", + map->name, bootloc); + bootloc = 2; + } + + if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { + printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); + + for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { + int j = (cfi->cfiq->NumEraseRegions-1)-i; + __u32 swap; + + swap = cfi->cfiq->EraseRegionInfo[i]; + cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j]; + cfi->cfiq->EraseRegionInfo[j] = swap; + } + } + /* Set the default CFI lock/unlock addresses */ + cfi->addr_unlock1 = 0x555; + cfi->addr_unlock2 = 0x2aa; } - /* Set the default CFI lock/unlock addresses */ - cfi->addr_unlock1 = 0x555; - cfi->addr_unlock2 = 0x2aa; - /* Modify the unlock address if we are in compatibility mode */ - if ( /* x16 in x8 mode */ - ((cfi->device_type == CFI_DEVICETYPE_X8) && - (cfi->cfiq->InterfaceDesc == 2)) || - /* x32 in x16 mode */ - ((cfi->device_type == CFI_DEVICETYPE_X16) && - (cfi->cfiq->InterfaceDesc == 4))) - { - cfi->addr_unlock1 = 0xaaa; - cfi->addr_unlock2 = 0x555; + cfi_fixup(mtd, cfi_nopri_fixup_table); + + if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { + kfree(mtd); + return NULL; } } /* CFI mode */ @@ -310,13 +628,19 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; - } - + cfi->chips[i].ref_point_counter = 0; + init_waitqueue_head(&(cfi->chips[i].wq)); + } + map->fldrv = &cfi_amdstd_chipdrv; - + return cfi_amdstd_setup(mtd); } - +struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); +struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); +EXPORT_SYMBOL_GPL(cfi_cmdset_0002); +EXPORT_SYMBOL_GPL(cfi_cmdset_0006); +EXPORT_SYMBOL_GPL(cfi_cmdset_0701); static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) { @@ -326,24 +650,22 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) unsigned long offset = 0; int i,j; - printk(KERN_NOTICE "number of %s chips: %d\n", + printk(KERN_NOTICE "number of %s chips: %d\n", (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); - /* Select the correct geometry setup */ + /* Select the correct geometry setup */ mtd->size = devsize * cfi->numchips; mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL); - if (!mtd->eraseregions) { - printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n"); + if (!mtd->eraseregions) goto setup_err; - } - + for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { unsigned long ernum, ersize; ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; - + if (mtd->erasesize < ersize) { mtd->erasesize = ersize; } @@ -359,29 +681,14 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize); goto setup_err; } -#if 0 - // debug - for (i=0; i<mtd->numeraseregions;i++){ - printk("%d: offset=0x%x,size=0x%x,blocks=%d\n", - i,mtd->eraseregions[i].offset, - mtd->eraseregions[i].erasesize, - mtd->eraseregions[i].numblocks); - } -#endif - - /* FIXME: erase-suspend-program is broken. See - http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */ - printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n"); __module_get(THIS_MODULE); + register_reboot_notifier(&mtd->reboot_notifier); return mtd; setup_err: - if(mtd) { - if(mtd->eraseregions) - kfree(mtd->eraseregions); - kfree(mtd); - } + kfree(mtd->eraseregions); + kfree(mtd); kfree(cfi->cmdset_priv); kfree(cfi->cfiq); return NULL; @@ -395,8 +702,8 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) * * Note that anything more complicated than checking if no bits are toggling * (including checking DQ5 for an error status) is tricky to get working - * correctly and is therefore not done (particulary with interleaved chips - * as each chip must be checked independantly of the others). + * correctly and is therefore not done (particularly with interleaved chips + * as each chip must be checked independently of the others). */ static int __xipram chip_ready(struct map_info *map, unsigned long addr) { @@ -419,8 +726,8 @@ static int __xipram chip_ready(struct map_info *map, unsigned long addr) * * Note that anything more complicated than checking if no bits are toggling * (including checking DQ5 for an error status) is tricky to get working - * correctly and is therefore not done (particulary with interleaved chips - * as each chip must be checked independantly of the others). + * correctly and is therefore not done (particularly with interleaved chips + * as each chip must be checked independently of the others). * */ static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) @@ -430,7 +737,7 @@ static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word oldd = map_read(map, addr); curd = map_read(map, addr); - return map_word_equal(map, oldd, curd) && + return map_word_equal(map, oldd, curd) && map_word_equal(map, curd, expected); } @@ -453,29 +760,24 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr if (time_after(jiffies, timeo)) { printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); - spin_unlock(chip->mutex); return -EIO; } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); /* Someone else might have been playing with it. */ goto retry; } - + case FL_READY: case FL_CFI_QUERY: case FL_JEDEC_QUERY: return 0; case FL_ERASING: - if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */ - goto sleep; - - if (!(mode == FL_READY || mode == FL_POINT - || !cfip - || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)) - || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1)))) + if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || + !(mode == FL_READY || mode == FL_POINT || + (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) goto sleep; /* We could check to see if we're trying to access the sector @@ -499,16 +801,14 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr * there was an error (so leave the erase * routine to recover from it) or we trying to * use the erase-in-progress sector. */ - map_write(map, CMD(0x30), chip->in_progress_block_addr); - chip->state = FL_ERASING; - chip->oldstate = FL_READY; + put_chip(map, chip, adr); printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); return -EIO; } - - spin_unlock(chip->mutex); + + mutex_unlock(&chip->mutex); cfi_udelay(1); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. So we can just loop here. */ } @@ -523,6 +823,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr chip->state = FL_READY; return 0; + case FL_SHUTDOWN: + /* The machine is rebooting */ + return -EIO; + case FL_POINT: /* Only if there's no operation suspended... */ if (mode == FL_READY && chip->oldstate == FL_READY) @@ -532,10 +836,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr sleep: set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); goto resettime; } } @@ -547,8 +851,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad switch(chip->oldstate) { case FL_ERASING: - chip->state = chip->oldstate; - map_write(map, CMD(0x30), chip->in_progress_block_addr); + cfi_fixup_m29ew_erase_suspend(map, + chip->in_progress_block_addr); + map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); + cfi_fixup_m29ew_delay_after_resume(cfi); chip->oldstate = FL_READY; chip->state = FL_ERASING; break; @@ -560,8 +866,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad case FL_READY: case FL_STATUS: - /* We should really make set_vpp() count, rather than doing this */ - DISABLE_VPP(map); break; default: printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); @@ -608,7 +912,7 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip, * When a delay is required for the flash operation to complete, the * xip_udelay() function is polling for both the given timeout and pending * (but still masked) hardware interrupts. Whenever there is an interrupt - * pending then the flash erase operation is suspended, array mode restored + * pending then the flash erase operation is suspended, array mode restored * and interrupts unmasked. Task scheduling might also happen at that * point. The CPU eventually returns from the interrupt or the call to * schedule() and the suspended flash operation is resumed for the remaining @@ -632,9 +936,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { /* - * Let's suspend the erase operation when supported. - * Note that we currently don't try to suspend - * interleaved chips if there is already another + * Let's suspend the erase operation when supported. + * Note that we currently don't try to suspend + * interleaved chips if there is already another * operation suspended (imagine what happens * when one chip was already done with the current * operation while another chip suspended it, then @@ -665,10 +969,10 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, chip->erase_suspended = 1; map_write(map, CMD(0xf0), adr); (void) map_read(map, adr); - asm volatile (".rep 8; nop; .endr"); + xip_iprefetch(); local_irq_enable(); - spin_unlock(chip->mutex); - asm volatile (".rep 8; nop; .endr"); + mutex_unlock(&chip->mutex); + xip_iprefetch(); cond_resched(); /* @@ -677,21 +981,23 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, * a suspended erase state. If so let's wait * until it's done. */ - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); while (chip->state != FL_XIP_WHILE_ERASING) { DECLARE_WAITQUEUE(wait, current); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); } /* Disallow XIP again */ local_irq_disable(); + /* Correct Erase Suspend Hangups for M29EW */ + cfi_fixup_m29ew_erase_suspend(map, adr); /* Resume the write or erase operation */ - map_write(map, CMD(0x30), adr); + map_write(map, cfi->sector_erase_cmd, adr); chip->state = oldstate; start = xip_currtime(); } else if (usec >= 1000000/HZ) { @@ -747,17 +1053,17 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, #define UDELAY(map, chip, adr, usec) \ do { \ - spin_unlock(chip->mutex); \ + mutex_unlock(&chip->mutex); \ cfi_udelay(usec); \ - spin_lock(chip->mutex); \ + mutex_lock(&chip->mutex); \ } while (0) #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ do { \ - spin_unlock(chip->mutex); \ + mutex_unlock(&chip->mutex); \ INVALIDATE_CACHED_RANGE(map, adr, len); \ cfi_udelay(usec); \ - spin_lock(chip->mutex); \ + mutex_lock(&chip->mutex); \ } while (0) #endif @@ -770,13 +1076,13 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof adr += chip->start; - /* Ensure cmd read/writes are aligned. */ - cmd_addr = adr & ~(map_bankwidth(map)-1); + /* Ensure cmd read/writes are aligned. */ + cmd_addr = adr & ~(map_bankwidth(map)-1); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, cmd_addr, FL_READY); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } @@ -789,7 +1095,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof put_chip(map, chip, cmd_addr); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return 0; } @@ -803,13 +1109,9 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_ int ret = 0; /* ofs: offset within the first chip that the first read should start */ - chipnum = (from >> cfi->chipshift); ofs = from - (chipnum << cfi->chipshift); - - *retlen = 0; - while (len) { unsigned long thislen; @@ -843,27 +1145,20 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi struct cfi_private *cfi = map->fldrv_priv; retry: - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); if (chip->state != FL_READY){ -#if 0 - printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state); -#endif set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - - spin_unlock(chip->mutex); + + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); -#if 0 - if(signal_pending(current)) - return -EINTR; -#endif timeo = jiffies + HZ; goto retry; - } + } adr += chip->start; @@ -872,16 +1167,16 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); - + map_copy_from(map, buf, adr, len); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); - + wake_up(&chip->wq); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return 0; } @@ -894,16 +1189,11 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, int chipnum; int ret = 0; - /* ofs: offset within the first chip that the first read should start */ - /* 8 secsi bytes per chip */ chipnum=from>>3; ofs=from & 7; - - *retlen = 0; - while (len) { unsigned long thislen; @@ -950,14 +1240,14 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, adr += chip->start; - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_WRITING); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", + pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, datum.x[0] ); /* @@ -968,7 +1258,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, */ oldd = map_read(map, adr); if (map_word_equal(map, oldd, datum)) { - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n", + pr_debug("MTD %s(): NOP\n", __func__); goto op_done; } @@ -988,7 +1278,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, chip->word_write_time); /* See comment above for timeout value. */ - timeo = jiffies + uWriteTimeout; + timeo = jiffies + uWriteTimeout; for (;;) { if (chip->state != FL_WRITING) { /* Someone's suspended the write. Sleep */ @@ -996,24 +1286,24 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + (HZ / 2); /* FIXME */ - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); continue; } - if (chip_ready(map, adr)) - break; - - if (time_after(jiffies, timeo)) { + if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ xip_enable(map, chip, adr); printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); xip_disable(map, chip, adr); - break; + break; } + if (chip_ready(map, adr)) + break; + /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); } @@ -1023,7 +1313,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, map_write( map, CMD(0xF0), chip->start ); /* FIXME - should have reset delay before continuing */ - if (++retry_cnt <= MAX_WORD_RETRIES) + if (++retry_cnt <= MAX_WORD_RETRIES) goto retry; ret = -EIO; @@ -1031,8 +1321,9 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, xip_enable(map, chip, adr); op_done: chip->state = FL_READY; + DISABLE_VPP(map); put_chip(map, chip, adr); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } @@ -1048,10 +1339,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, unsigned long ofs, chipstart; DECLARE_WAITQUEUE(wait, current); - *retlen = 0; - if (!len) - return 0; - chipnum = to >> cfi->chipshift; ofs = to - (chipnum << cfi->chipshift); chipstart = cfi->chips[chipnum].start; @@ -1064,54 +1351,47 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, map_word tmp_buf; retry: - spin_lock(cfi->chips[chipnum].mutex); + mutex_lock(&cfi->chips[chipnum].mutex); if (cfi->chips[chipnum].state != FL_READY) { -#if 0 - printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); -#endif set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&cfi->chips[chipnum].wq, &wait); - spin_unlock(cfi->chips[chipnum].mutex); + mutex_unlock(&cfi->chips[chipnum].mutex); schedule(); remove_wait_queue(&cfi->chips[chipnum].wq, &wait); -#if 0 - if(signal_pending(current)) - return -EINTR; -#endif goto retry; } /* Load 'tmp_buf' with old contents of flash */ tmp_buf = map_read(map, bus_ofs+chipstart); - spin_unlock(cfi->chips[chipnum].mutex); + mutex_unlock(&cfi->chips[chipnum].mutex); /* Number of bytes to copy from buffer */ n = min_t(int, len, map_bankwidth(map)-i); - + tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); - ret = do_write_oneword(map, &cfi->chips[chipnum], + ret = do_write_oneword(map, &cfi->chips[chipnum], bus_ofs, tmp_buf); - if (ret) + if (ret) return ret; - + ofs += n; buf += n; (*retlen) += n; len -= n; if (ofs >> cfi->chipshift) { - chipnum ++; + chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; } } - + /* We are now aligned, write as much as possible */ while(len >= map_bankwidth(map)) { map_word datum; @@ -1129,7 +1409,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, len -= map_bankwidth(map); if (ofs >> cfi->chipshift) { - chipnum ++; + chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; @@ -1142,37 +1422,30 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, map_word tmp_buf; retry1: - spin_lock(cfi->chips[chipnum].mutex); + mutex_lock(&cfi->chips[chipnum].mutex); if (cfi->chips[chipnum].state != FL_READY) { -#if 0 - printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state); -#endif set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&cfi->chips[chipnum].wq, &wait); - spin_unlock(cfi->chips[chipnum].mutex); + mutex_unlock(&cfi->chips[chipnum].mutex); schedule(); remove_wait_queue(&cfi->chips[chipnum].wq, &wait); -#if 0 - if(signal_pending(current)) - return -EINTR; -#endif goto retry1; } tmp_buf = map_read(map, ofs + chipstart); - spin_unlock(cfi->chips[chipnum].mutex); + mutex_unlock(&cfi->chips[chipnum].mutex); tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); - - ret = do_write_oneword(map, &cfi->chips[chipnum], + + ret = do_write_oneword(map, &cfi->chips[chipnum], ofs, tmp_buf); - if (ret) + if (ret) return ret; - + (*retlen) += len; } @@ -1184,7 +1457,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, * FIXME: interleaved mode not tested, and probably not supported! */ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, - unsigned long adr, const u_char *buf, + unsigned long adr, const u_char *buf, int len) { struct cfi_private *cfi = map->fldrv_priv; @@ -1199,25 +1472,24 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, adr += chip->start; cmd_adr = adr; - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_WRITING); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } datum = map_word_load(map, buf); - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", + pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, datum.x[0] ); XIP_INVAL_CACHED_RANGE(map, adr, len); ENABLE_VPP(map); xip_disable(map, chip, cmd_adr); - + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); - //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); /* Write Buffer Load */ map_write(map, CMD(0x25), cmd_adr); @@ -1248,8 +1520,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, adr, map_bankwidth(map), chip->word_write_time); - timeo = jiffies + uWriteTimeout; - + timeo = jiffies + uWriteTimeout; + for (;;) { if (chip->state != FL_WRITING) { /* Someone's suspended the write. Sleep */ @@ -1257,39 +1529,52 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + (HZ / 2); /* FIXME */ - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); continue; } + if (time_after(jiffies, timeo) && !chip_ready(map, adr)) + break; + if (chip_ready(map, adr)) { xip_enable(map, chip, adr); goto op_done; } - - if( time_after(jiffies, timeo)) - break; /* Latency issues. Drop the lock, wait a while and retry */ UDELAY(map, chip, adr, 1); } - /* reset on all failures. */ - map_write( map, CMD(0xF0), chip->start ); + /* + * Recovery from write-buffer programming failures requires + * the write-to-buffer-reset sequence. Since the last part + * of the sequence also works as a normal reset, we can run + * the same commands regardless of why we are here. + * See e.g. + * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf + */ + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); xip_enable(map, chip, adr); /* FIXME - should have reset delay before continuing */ - printk(KERN_WARNING "MTD %s(): software timeout\n", - __func__ ); + printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n", + __func__, adr); ret = -EIO; op_done: chip->state = FL_READY; + DISABLE_VPP(map); put_chip(map, chip, adr); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } @@ -1305,10 +1590,6 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, int chipnum; unsigned long ofs; - *retlen = 0; - if (!len) - return 0; - chipnum = to >> cfi->chipshift; ofs = to - (chipnum << cfi->chipshift); @@ -1343,7 +1624,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, if (size % map_bankwidth(map)) size -= size % map_bankwidth(map); - ret = do_write_buffer(map, &cfi->chips[chipnum], + ret = do_write_buffer(map, &cfi->chips[chipnum], ofs, buf, size); if (ret) return ret; @@ -1354,7 +1635,7 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, len -= size; if (ofs >> cfi->chipshift) { - chipnum ++; + chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; @@ -1374,6 +1655,238 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, return 0; } +/* + * Wait for the flash chip to become ready to write data + * + * This is only called during the panic_write() path. When panic_write() + * is called, the kernel is in the process of a panic, and will soon be + * dead. Therefore we don't take any locks, and attempt to get access + * to the chip as soon as possible. + */ +static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, + unsigned long adr) +{ + struct cfi_private *cfi = map->fldrv_priv; + int retries = 10; + int i; + + /* + * If the driver thinks the chip is idle, and no toggle bits + * are changing, then the chip is actually idle for sure. + */ + if (chip->state == FL_READY && chip_ready(map, adr)) + return 0; + + /* + * Try several times to reset the chip and then wait for it + * to become idle. The upper limit of a few milliseconds of + * delay isn't a big problem: the kernel is dying anyway. It + * is more important to save the messages. + */ + while (retries > 0) { + const unsigned long timeo = (HZ / 1000) + 1; + + /* send the reset command */ + map_write(map, CMD(0xF0), chip->start); + + /* wait for the chip to become ready */ + for (i = 0; i < jiffies_to_usecs(timeo); i++) { + if (chip_ready(map, adr)) + return 0; + + udelay(1); + } + } + + /* the chip never became ready */ + return -EBUSY; +} + +/* + * Write out one word of data to a single flash chip during a kernel panic + * + * This is only called during the panic_write() path. When panic_write() + * is called, the kernel is in the process of a panic, and will soon be + * dead. Therefore we don't take any locks, and attempt to get access + * to the chip as soon as possible. + * + * The implementation of this routine is intentionally similar to + * do_write_oneword(), in order to ease code maintenance. + */ +static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, + unsigned long adr, map_word datum) +{ + const unsigned long uWriteTimeout = (HZ / 1000) + 1; + struct cfi_private *cfi = map->fldrv_priv; + int retry_cnt = 0; + map_word oldd; + int ret = 0; + int i; + + adr += chip->start; + + ret = cfi_amdstd_panic_wait(map, chip, adr); + if (ret) + return ret; + + pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", + __func__, adr, datum.x[0]); + + /* + * Check for a NOP for the case when the datum to write is already + * present - it saves time and works around buggy chips that corrupt + * data at other locations when 0xff is written to a location that + * already contains 0xff. + */ + oldd = map_read(map, adr); + if (map_word_equal(map, oldd, datum)) { + pr_debug("MTD %s(): NOP\n", __func__); + goto op_done; + } + + ENABLE_VPP(map); + +retry: + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); + map_write(map, datum, adr); + + for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { + if (chip_ready(map, adr)) + break; + + udelay(1); + } + + if (!chip_good(map, adr, datum)) { + /* reset on all failures. */ + map_write(map, CMD(0xF0), chip->start); + /* FIXME - should have reset delay before continuing */ + + if (++retry_cnt <= MAX_WORD_RETRIES) + goto retry; + + ret = -EIO; + } + +op_done: + DISABLE_VPP(map); + return ret; +} + +/* + * Write out some data during a kernel panic + * + * This is used by the mtdoops driver to save the dying messages from a + * kernel which has panic'd. + * + * This routine ignores all of the locking used throughout the rest of the + * driver, in order to ensure that the data gets written out no matter what + * state this driver (and the flash chip itself) was in when the kernel crashed. + * + * The implementation of this routine is intentionally similar to + * cfi_amdstd_write_words(), in order to ease code maintenance. + */ +static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + unsigned long ofs, chipstart; + int ret = 0; + int chipnum; + + chipnum = to >> cfi->chipshift; + ofs = to - (chipnum << cfi->chipshift); + chipstart = cfi->chips[chipnum].start; + + /* If it's not bus aligned, do the first byte write */ + if (ofs & (map_bankwidth(map) - 1)) { + unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); + int i = ofs - bus_ofs; + int n = 0; + map_word tmp_buf; + + ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); + if (ret) + return ret; + + /* Load 'tmp_buf' with old contents of flash */ + tmp_buf = map_read(map, bus_ofs + chipstart); + + /* Number of bytes to copy from buffer */ + n = min_t(int, len, map_bankwidth(map) - i); + + tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); + + ret = do_panic_write_oneword(map, &cfi->chips[chipnum], + bus_ofs, tmp_buf); + if (ret) + return ret; + + ofs += n; + buf += n; + (*retlen) += n; + len -= n; + + if (ofs >> cfi->chipshift) { + chipnum++; + ofs = 0; + if (chipnum == cfi->numchips) + return 0; + } + } + + /* We are now aligned, write as much as possible */ + while (len >= map_bankwidth(map)) { + map_word datum; + + datum = map_word_load(map, buf); + + ret = do_panic_write_oneword(map, &cfi->chips[chipnum], + ofs, datum); + if (ret) + return ret; + + ofs += map_bankwidth(map); + buf += map_bankwidth(map); + (*retlen) += map_bankwidth(map); + len -= map_bankwidth(map); + + if (ofs >> cfi->chipshift) { + chipnum++; + ofs = 0; + if (chipnum == cfi->numchips) + return 0; + + chipstart = cfi->chips[chipnum].start; + } + } + + /* Write the trailing bytes if any */ + if (len & (map_bankwidth(map) - 1)) { + map_word tmp_buf; + + ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); + if (ret) + return ret; + + tmp_buf = map_read(map, ofs + chipstart); + + tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); + + ret = do_panic_write_oneword(map, &cfi->chips[chipnum], + ofs, tmp_buf); + if (ret) + return ret; + + (*retlen) += len; + } + + return 0; +} + /* * Handle devices with one erase region, that only implement @@ -1389,14 +1902,14 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) adr = cfi->addr_unlock1; - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_WRITING); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", + pr_debug("MTD %s(): ERASE 0x%.8lx\n", __func__, chip->start ); XIP_INVAL_CACHED_RANGE(map, adr, map->size); @@ -1425,10 +1938,10 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) /* Someone's suspended the erase. Sleep */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); continue; } if (chip->erase_suspended) { @@ -1461,8 +1974,9 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) chip->state = FL_READY; xip_enable(map, chip, adr); + DISABLE_VPP(map); put_chip(map, chip, adr); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } @@ -1477,14 +1991,14 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, adr += chip->start; - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_ERASING); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n", + pr_debug("MTD %s(): ERASE 0x%.8lx\n", __func__, adr ); XIP_INVAL_CACHED_RANGE(map, adr, len); @@ -1496,7 +2010,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); - map_write(map, CMD(0x30), adr); + map_write(map, cfi->sector_erase_cmd, adr); chip->state = FL_ERASING; chip->erase_suspended = 0; @@ -1513,10 +2027,10 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, /* Someone's suspended the erase. Sleep */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); continue; } if (chip->erase_suspended) { @@ -1551,13 +2065,14 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, } chip->state = FL_READY; + DISABLE_VPP(map); put_chip(map, chip, adr); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } -int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) +static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) { unsigned long ofs, len; int ret; @@ -1571,7 +2086,7 @@ int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); - + return 0; } @@ -1594,10 +2109,281 @@ static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr) instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); - + return 0; } +static int do_atmel_lock(struct map_info *map, struct flchip *chip, + unsigned long adr, int len, void *thunk) +{ + struct cfi_private *cfi = map->fldrv_priv; + int ret; + + mutex_lock(&chip->mutex); + ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); + if (ret) + goto out_unlock; + chip->state = FL_LOCKING; + + pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); + + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, + cfi->device_type, NULL); + map_write(map, CMD(0x40), chip->start + adr); + + chip->state = FL_READY; + put_chip(map, chip, adr + chip->start); + ret = 0; + +out_unlock: + mutex_unlock(&chip->mutex); + return ret; +} + +static int do_atmel_unlock(struct map_info *map, struct flchip *chip, + unsigned long adr, int len, void *thunk) +{ + struct cfi_private *cfi = map->fldrv_priv; + int ret; + + mutex_lock(&chip->mutex); + ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); + if (ret) + goto out_unlock; + chip->state = FL_UNLOCKING; + + pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); + + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + map_write(map, CMD(0x70), adr); + + chip->state = FL_READY; + put_chip(map, chip, adr + chip->start); + ret = 0; + +out_unlock: + mutex_unlock(&chip->mutex); + return ret; +} + +static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL); +} + +static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL); +} + +/* + * Advanced Sector Protection - PPB (Persistent Protection Bit) locking + */ + +struct ppb_lock { + struct flchip *chip; + loff_t offset; + int locked; +}; + +#define MAX_SECTORS 512 + +#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) +#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) +#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) + +static int __maybe_unused do_ppb_xxlock(struct map_info *map, + struct flchip *chip, + unsigned long adr, int len, void *thunk) +{ + struct cfi_private *cfi = map->fldrv_priv; + unsigned long timeo; + int ret; + + mutex_lock(&chip->mutex); + ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); + if (ret) { + mutex_unlock(&chip->mutex); + return ret; + } + + pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); + + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, + cfi->device_type, NULL); + /* PPB entry command */ + cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, + cfi->device_type, NULL); + + if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) { + chip->state = FL_LOCKING; + map_write(map, CMD(0xA0), chip->start + adr); + map_write(map, CMD(0x00), chip->start + adr); + } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) { + /* + * Unlocking of one specific sector is not supported, so we + * have to unlock all sectors of this device instead + */ + chip->state = FL_UNLOCKING; + map_write(map, CMD(0x80), chip->start); + map_write(map, CMD(0x30), chip->start); + } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) { + chip->state = FL_JEDEC_QUERY; + /* Return locked status: 0->locked, 1->unlocked */ + ret = !cfi_read_query(map, adr); + } else + BUG(); + + /* + * Wait for some time as unlocking of all sectors takes quite long + */ + timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ + for (;;) { + if (chip_ready(map, adr)) + break; + + if (time_after(jiffies, timeo)) { + printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); + ret = -EIO; + break; + } + + UDELAY(map, chip, adr, 1); + } + + /* Exit BC commands */ + map_write(map, CMD(0x90), chip->start); + map_write(map, CMD(0x00), chip->start); + + chip->state = FL_READY; + put_chip(map, chip, adr + chip->start); + mutex_unlock(&chip->mutex); + + return ret; +} + +static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, + uint64_t len) +{ + return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, + DO_XXLOCK_ONEBLOCK_LOCK); +} + +static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, + uint64_t len) +{ + struct mtd_erase_region_info *regions = mtd->eraseregions; + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + struct ppb_lock *sect; + unsigned long adr; + loff_t offset; + uint64_t length; + int chipnum; + int i; + int sectors; + int ret; + + /* + * PPB unlocking always unlocks all sectors of the flash chip. + * We need to re-lock all previously locked sectors. So lets + * first check the locking status of all sectors and save + * it for future use. + */ + sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL); + if (!sect) + return -ENOMEM; + + /* + * This code to walk all sectors is a slightly modified version + * of the cfi_varsize_frob() code. + */ + i = 0; + chipnum = 0; + adr = 0; + sectors = 0; + offset = 0; + length = mtd->size; + + while (length) { + int size = regions[i].erasesize; + + /* + * Only test sectors that shall not be unlocked. The other + * sectors shall be unlocked, so lets keep their locking + * status at "unlocked" (locked=0) for the final re-locking. + */ + if ((adr < ofs) || (adr >= (ofs + len))) { + sect[sectors].chip = &cfi->chips[chipnum]; + sect[sectors].offset = offset; + sect[sectors].locked = do_ppb_xxlock( + map, &cfi->chips[chipnum], adr, 0, + DO_XXLOCK_ONEBLOCK_GETLOCK); + } + + adr += size; + offset += size; + length -= size; + + if (offset == regions[i].offset + size * regions[i].numblocks) + i++; + + if (adr >> cfi->chipshift) { + adr = 0; + chipnum++; + + if (chipnum >= cfi->numchips) + break; + } + + sectors++; + if (sectors >= MAX_SECTORS) { + printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", + MAX_SECTORS); + kfree(sect); + return -EINVAL; + } + } + + /* Now unlock the whole chip */ + ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, + DO_XXLOCK_ONEBLOCK_UNLOCK); + if (ret) { + kfree(sect); + return ret; + } + + /* + * PPB unlocking always unlocks all sectors of the flash chip. + * We need to re-lock all previously locked sectors. + */ + for (i = 0; i < sectors; i++) { + if (sect[i].locked) + do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0, + DO_XXLOCK_ONEBLOCK_LOCK); + } + + kfree(sect); + return ret; +} + +static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, + uint64_t len) +{ + return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len, + DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0; +} static void cfi_amdstd_sync (struct mtd_info *mtd) { @@ -1612,7 +2398,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) chip = &cfi->chips[i]; retry: - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); switch(chip->state) { case FL_READY: @@ -1621,24 +2407,25 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) case FL_JEDEC_QUERY: chip->oldstate = chip->state; chip->state = FL_SYNCING; - /* No need to wake_up() on this state change - + /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ case FL_SYNCING: - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); break; default: /* Not an idle state */ + set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - - spin_unlock(chip->mutex); + + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); - + goto retry; } } @@ -1648,13 +2435,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd) for (i--; i >=0; i--) { chip = &cfi->chips[i]; - spin_lock(chip->mutex); - + mutex_lock(&chip->mutex); + if (chip->state == FL_SYNCING) { chip->state = chip->oldstate; wake_up(&chip->wq); } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } } @@ -1670,7 +2457,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) for (i=0; !ret && i<cfi->numchips; i++) { chip = &cfi->chips[i]; - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); switch(chip->state) { case FL_READY: @@ -1679,7 +2466,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) case FL_JEDEC_QUERY: chip->oldstate = chip->state; chip->state = FL_PM_SUSPENDED; - /* No need to wake_up() on this state change - + /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ @@ -1690,7 +2477,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) ret = -EAGAIN; break; } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } /* Unlock the chips again */ @@ -1699,16 +2486,16 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd) for (i--; i >=0; i--) { chip = &cfi->chips[i]; - spin_lock(chip->mutex); - + mutex_lock(&chip->mutex); + if (chip->state == FL_PM_SUSPENDED) { chip->state = chip->oldstate; wake_up(&chip->wq); } - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } } - + return ret; } @@ -1721,11 +2508,11 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) struct flchip *chip; for (i=0; i<cfi->numchips; i++) { - + chip = &cfi->chips[i]; - spin_lock(chip->mutex); - + mutex_lock(&chip->mutex); + if (chip->state == FL_PM_SUSPENDED) { chip->state = FL_READY; map_write(map, CMD(0xF0), chip->start); @@ -1734,39 +2521,70 @@ static void cfi_amdstd_resume(struct mtd_info *mtd) else printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n"); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); } } -static void cfi_amdstd_destroy(struct mtd_info *mtd) + +/* + * Ensure that the flash device is put back into read array mode before + * unloading the driver or rebooting. On some systems, rebooting while + * the flash is in query/program/erase mode will prevent the CPU from + * fetching the bootloader code, requiring a hard reset or power cycle. + */ +static int cfi_amdstd_reset(struct mtd_info *mtd) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; - kfree(cfi->cmdset_priv); - kfree(cfi->cfiq); - kfree(cfi); - kfree(mtd->eraseregions); -} + int i, ret; + struct flchip *chip; -static char im_name[]="cfi_cmdset_0002"; + for (i = 0; i < cfi->numchips; i++) { + chip = &cfi->chips[i]; + + mutex_lock(&chip->mutex); + + ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); + if (!ret) { + map_write(map, CMD(0xF0), chip->start); + chip->state = FL_SHUTDOWN; + put_chip(map, chip, chip->start); + } + + mutex_unlock(&chip->mutex); + } -static int __init cfi_amdstd_init(void) -{ - inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002); return 0; } -static void __exit cfi_amdstd_exit(void) +static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val, + void *v) { - inter_module_unregister(im_name); + struct mtd_info *mtd; + + mtd = container_of(nb, struct mtd_info, reboot_notifier); + cfi_amdstd_reset(mtd); + return NOTIFY_DONE; } -module_init(cfi_amdstd_init); -module_exit(cfi_amdstd_exit); +static void cfi_amdstd_destroy(struct mtd_info *mtd) +{ + struct map_info *map = mtd->priv; + struct cfi_private *cfi = map->fldrv_priv; + + cfi_amdstd_reset(mtd); + unregister_reboot_notifier(&mtd->reboot_notifier); + kfree(cfi->cmdset_priv); + kfree(cfi->cfiq); + kfree(cfi); + kfree(mtd->eraseregions); +} MODULE_LICENSE("GPL"); MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); +MODULE_ALIAS("cfi_cmdset_0006"); +MODULE_ALIAS("cfi_cmdset_0701"); diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index c894f880157..423666b51ef 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c @@ -4,9 +4,7 @@ * * (C) 2000 Red Hat. GPL'd * - * $Id: cfi_cmdset_0020.c,v 1.19 2005/07/13 15:52:45 dwmw2 Exp $ - * - * 10/10/2000 Nicolas Pitre <nico@cam.org> + * 10/10/2000 Nicolas Pitre <nico@fluxnic.net> * - completely revamped method functions so they are aware and * independent of the flash geometry (buswidth, interleave, etc.) * - scalability vs code size is completely set at compile-time @@ -20,12 +18,10 @@ * - Plugged memory leak in cfi_staa_writev(). */ -#include <linux/version.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/init.h> #include <asm/io.h> #include <asm/byteorder.h> @@ -36,7 +32,6 @@ #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> #include <linux/mtd/mtd.h> -#include <linux/mtd/compatmac.h> static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); @@ -45,8 +40,8 @@ static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *); static void cfi_staa_sync (struct mtd_info *); -static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len); -static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len); +static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); static int cfi_staa_suspend (struct mtd_info *); static void cfi_staa_resume (struct mtd_info *); @@ -81,17 +76,17 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp) printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported"); printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported"); for (i=9; i<32; i++) { - if (extp->FeatureSupport & (1<<i)) + if (extp->FeatureSupport & (1<<i)) printk(" - Unknown Bit %X: supported\n", i); } - + printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport); printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported"); for (i=1; i<8; i++) { if (extp->SuspendCmdSupport & (1<<i)) printk(" - Unknown Bit %X: supported\n", i); } - + printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask); printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no"); printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no"); @@ -99,11 +94,11 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp) if (extp->BlkStatusRegMask & (1<<i)) printk(" - Unknown Bit %X Active: yes\n",i); } - - printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", + + printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", extp->VccOptimal >> 8, extp->VccOptimal & 0xf); if (extp->VppOptimal) - printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", + printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", extp->VppOptimal >> 8, extp->VppOptimal & 0xf); } #endif @@ -121,7 +116,7 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary) int i; if (cfi->cfi_mode) { - /* + /* * It's a real CFI chip, not one for which the probe * routine faked a CFI structure. So we read the feature * table from it. @@ -133,27 +128,40 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary) if (!extp) return NULL; + if (extp->MajorVersion != '1' || + (extp->MinorVersion < '0' || extp->MinorVersion > '3')) { + printk(KERN_ERR " Unknown ST Microelectronics" + " Extended Query version %c.%c.\n", + extp->MajorVersion, extp->MinorVersion); + kfree(extp); + return NULL; + } + /* Do some byteswapping if necessary */ - extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport); - extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask); - + extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport); + extp->BlkStatusRegMask = cfi32_to_cpu(map, + extp->BlkStatusRegMask); + #ifdef DEBUG_CFI_FEATURES /* Tell the user about it in lots of lovely detail */ cfi_tell_features(extp); -#endif +#endif /* Install our own private info structure */ cfi->cmdset_priv = extp; - } + } for (i=0; i< cfi->numchips; i++) { cfi->chips[i].word_write_time = 128; cfi->chips[i].buffer_write_time = 128; cfi->chips[i].erase_time = 1024; - } + cfi->chips[i].ref_point_counter = 0; + init_waitqueue_head(&(cfi->chips[i].wq)); + } return cfi_staa_setup(map); } +EXPORT_SYMBOL_GPL(cfi_cmdset_0020); static struct mtd_info *cfi_staa_setup(struct map_info *map) { @@ -163,30 +171,27 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map) int i,j; unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; - mtd = kmalloc(sizeof(*mtd), GFP_KERNEL); + mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips); if (!mtd) { - printk(KERN_ERR "Failed to allocate memory for MTD device\n"); kfree(cfi->cmdset_priv); return NULL; } - memset(mtd, 0, sizeof(*mtd)); mtd->priv = map; mtd->type = MTD_NORFLASH; mtd->size = devsize * cfi->numchips; mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; - mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) + mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL); - if (!mtd->eraseregions) { - printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n"); + if (!mtd->eraseregions) { kfree(cfi->cmdset_priv); kfree(mtd); return NULL; } - + for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { unsigned long ernum, ersize; ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; @@ -213,25 +218,25 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map) } for (i=0; i<mtd->numeraseregions;i++){ - printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n", - i,mtd->eraseregions[i].offset, + printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n", + i, (unsigned long long)mtd->eraseregions[i].offset, mtd->eraseregions[i].erasesize, mtd->eraseregions[i].numblocks); } - /* Also select the correct geometry setup too */ - mtd->erase = cfi_staa_erase_varsize; - mtd->read = cfi_staa_read; - mtd->write = cfi_staa_write_buffers; - mtd->writev = cfi_staa_writev; - mtd->sync = cfi_staa_sync; - mtd->lock = cfi_staa_lock; - mtd->unlock = cfi_staa_unlock; - mtd->suspend = cfi_staa_suspend; - mtd->resume = cfi_staa_resume; - mtd->flags = MTD_CAP_NORFLASH; - mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */ - mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ + /* Also select the correct geometry setup too */ + mtd->_erase = cfi_staa_erase_varsize; + mtd->_read = cfi_staa_read; + mtd->_write = cfi_staa_write_buffers; + mtd->_writev = cfi_staa_writev; + mtd->_sync = cfi_staa_sync; + mtd->_lock = cfi_staa_lock; + mtd->_unlock = cfi_staa_unlock; + mtd->_suspend = cfi_staa_suspend; + mtd->_resume = cfi_staa_resume; + mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; + mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ + mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; map->fldrv = &cfi_staa_chipdrv; __module_get(THIS_MODULE); mtd->name = map->name; @@ -250,15 +255,15 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof adr += chip->start; - /* Ensure cmd read/writes are aligned. */ - cmd_addr = adr & ~(map_bankwidth(map)-1); + /* Ensure cmd read/writes are aligned. */ + cmd_addr = adr & ~(map_bankwidth(map)-1); /* Let's determine this according to the interleave only once */ status_OK = CMD(0x80); timeo = jiffies + HZ; retry: - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); /* Check that the chip's ready to talk to us. * If it's in FL_ERASING state, suspend it and make it talk now. @@ -267,7 +272,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof case FL_ERASING: if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)) goto sleep; /* We don't support erase suspend */ - + map_write (map, CMD(0xb0), cmd_addr); /* If the flash has finished erasing, then 'erase suspend' * appears to make some (28F320) flash devices switch to @@ -282,29 +287,30 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof status = map_read(map, cmd_addr); if (map_word_andequal(map, status, status_OK, status_OK)) break; - + if (time_after(jiffies, timeo)) { /* Urgh */ map_write(map, CMD(0xd0), cmd_addr); /* make sure we're in 'read status' mode */ map_write(map, CMD(0x70), cmd_addr); chip->state = FL_ERASING; - spin_unlock_bh(chip->mutex); + wake_up(&chip->wq); + mutex_unlock(&chip->mutex); printk(KERN_ERR "Chip not ready after erase " "suspended: status = 0x%lx\n", status.x[0]); return -EIO; } - - spin_unlock_bh(chip->mutex); + + mutex_unlock(&chip->mutex); cfi_udelay(1); - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); } - + suspended = 1; map_write(map, CMD(0xff), cmd_addr); chip->state = FL_READY; break; - + #if 0 case FL_WRITING: /* Not quite yet */ @@ -325,16 +331,16 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof chip->state = FL_READY; break; } - + /* Urgh. Chip not yet ready to talk to us. */ if (time_after(jiffies, timeo)) { - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]); return -EIO; } /* Latency issues. Drop the lock, wait a while and retry */ - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); goto retry; @@ -344,7 +350,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof someone changes the status */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + HZ; @@ -355,21 +361,21 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof if (suspended) { chip->state = chip->oldstate; - /* What if one interleaved chip has finished and the + /* What if one interleaved chip has finished and the other hasn't? The old code would leave the finished - one in READY mode. That's bad, and caused -EROFS + one in READY mode. That's bad, and caused -EROFS errors to be returned from do_erase_oneblock because that's the only bit it checked for at the time. - As the state machine appears to explicitly allow + As the state machine appears to explicitly allow sending the 0x70 (Read Status) command to an erasing - chip and expecting it to be ignored, that's what we + chip and expecting it to be ignored, that's what we do. */ map_write(map, CMD(0xd0), cmd_addr); - map_write(map, CMD(0x70), cmd_addr); + map_write(map, CMD(0x70), cmd_addr); } wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); return 0; } @@ -385,8 +391,6 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t chipnum = (from >> cfi->chipshift); ofs = from - (chipnum << cfi->chipshift); - *retlen = 0; - while (len) { unsigned long thislen; @@ -405,14 +409,14 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen += thislen; len -= thislen; buf += thislen; - + ofs = 0; chipnum++; } return ret; } -static inline int do_write_buffer(struct map_info *map, struct flchip *chip, +static inline int do_write_buffer(struct map_info *map, struct flchip *chip, unsigned long adr, const u_char *buf, int len) { struct cfi_private *cfi = map->fldrv_priv; @@ -420,7 +424,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, unsigned long cmd_adr, timeo; DECLARE_WAITQUEUE(wait, current); int wbufsize, z; - + /* M58LW064A requires bus alignment for buffer wriets -- saw */ if (adr & (map_bankwidth(map)-1)) return -EINVAL; @@ -428,18 +432,18 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; adr += chip->start; cmd_adr = adr & ~(wbufsize-1); - + /* Let's determine this according to the interleave only once */ status_OK = CMD(0x80); - + timeo = jiffies + HZ; retry: #ifdef DEBUG_CFI_FEATURES - printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state); + printk("%s: chip->state[%d]\n", __func__, chip->state); #endif - spin_lock_bh(chip->mutex); - + mutex_lock(&chip->mutex); + /* Check that the chip's ready to talk to us. * Later, we can actually think about interrupting it * if it's in FL_ERASING state. @@ -448,13 +452,13 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, switch (chip->state) { case FL_READY: break; - + case FL_CFI_QUERY: case FL_JEDEC_QUERY: map_write(map, CMD(0x70), cmd_adr); chip->state = FL_STATUS; #ifdef DEBUG_CFI_FEATURES - printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr)); + printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr)); #endif case FL_STATUS: @@ -463,14 +467,14 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, break; /* Urgh. Chip not yet ready to talk to us. */ if (time_after(jiffies, timeo)) { - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n", status.x[0], map_read(map, cmd_adr).x[0]); return -EIO; } /* Latency issues. Drop the lock, wait a while and retry */ - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); goto retry; @@ -479,7 +483,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, someone changes the status */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + HZ; @@ -496,16 +500,16 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, if (map_word_andequal(map, status, status_OK, status_OK)) break; - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); if (++z > 100) { /* Argh. Not ready for write to buffer */ DISABLE_VPP(map); map_write(map, CMD(0x70), cmd_adr); chip->state = FL_STATUS; - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]); return -EIO; } @@ -513,7 +517,7 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, /* Write length of data to come */ map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr ); - + /* Write data */ for (z = 0; z < len; z += map_bankwidth(map), buf += map_bankwidth(map)) { @@ -525,9 +529,9 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, map_write(map, CMD(0xd0), cmd_adr); chip->state = FL_WRITING; - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(chip->buffer_write_time); - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); timeo = jiffies + (HZ/2); z = 0; @@ -536,11 +540,11 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, /* Someone's suspended the write. Sleep */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + (HZ / 2); /* FIXME */ - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); continue; } @@ -556,25 +560,25 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, map_write(map, CMD(0x70), adr); chip->state = FL_STATUS; DISABLE_VPP(map); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n"); return -EIO; } - + /* Latency issues. Drop the lock, wait a while and retry */ - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); z++; - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); } if (!z) { chip->buffer_write_time--; if (!chip->buffer_write_time) chip->buffer_write_time++; } - if (z > 1) + if (z > 1) chip->buffer_write_time++; - + /* Done and happy. */ DISABLE_VPP(map); chip->state = FL_STATUS; @@ -582,23 +586,23 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip, /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */ if (map_word_bitsset(map, status, CMD(0x3a))) { #ifdef DEBUG_CFI_FEATURES - printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]); + printk("%s: 2 status[%lx]\n", __func__, status.x[0]); #endif /* clear status */ map_write(map, CMD(0x50), cmd_adr); /* put back into read status register mode */ map_write(map, CMD(0x70), adr); wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; } wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); return 0; } -static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, +static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct map_info *map = mtd->priv; @@ -608,19 +612,15 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, int chipnum; unsigned long ofs; - *retlen = 0; - if (!len) - return 0; - chipnum = to >> cfi->chipshift; ofs = to - (chipnum << cfi->chipshift); #ifdef DEBUG_CFI_FEATURES - printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map)); - printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize); - printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len); + printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map)); + printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize); + printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len); #endif - + /* Write buffer is worth it only if more than one word to write... */ while (len > 0) { /* We must not cross write block boundaries */ @@ -629,7 +629,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, if (size > len) size = len; - ret = do_write_buffer(map, &cfi->chips[chipnum], + ret = do_write_buffer(map, &cfi->chips[chipnum], ofs, buf, size); if (ret) return ret; @@ -640,13 +640,13 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, len -= size; if (ofs >> cfi->chipshift) { - chipnum ++; + chipnum ++; ofs = 0; if (chipnum == cfi->numchips) return 0; } } - + return 0; } @@ -655,7 +655,7 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, * a small buffer for this. * XXX: If the buffer size is not a multiple of 2, this will break */ -#define ECCBUF_SIZE (mtd->eccsize) +#define ECCBUF_SIZE (mtd->writesize) #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1)) #define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1)) static int @@ -690,7 +690,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, continue; } memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen); - ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer); + ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen, + buffer); totlen += thislen; if (ret || thislen != ECCBUF_SIZE) goto write_error; @@ -699,7 +700,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, to += ECCBUF_SIZE; } if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */ - ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base); + ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len), + &thislen, elem_base); totlen += thislen; if (ret || thislen != ECCBUF_DIV(elem_len)) goto write_error; @@ -713,7 +715,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, } if (buflen) { /* flush last page, even if not full */ /* This is sometimes intended behaviour, really */ - ret = mtd->write(mtd, to, buflen, &thislen, buffer); + ret = mtd_write(mtd, to, buflen, &thislen, buffer); totlen += thislen; if (ret || thislen != ECCBUF_SIZE) goto write_error; @@ -742,7 +744,7 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u timeo = jiffies + HZ; retry: - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); /* Check that the chip's ready to talk to us. */ switch (chip->state) { @@ -756,16 +758,16 @@ retry: status = map_read(map, adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; - + /* Urgh. Chip not yet ready to talk to us. */ if (time_after(jiffies, timeo)) { - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); printk(KERN_ERR "waiting for chip to be ready timed out in erase\n"); return -EIO; } /* Latency issues. Drop the lock, wait a while and retry */ - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); goto retry; @@ -774,7 +776,7 @@ retry: someone changes the status */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + HZ; @@ -789,10 +791,10 @@ retry: map_write(map, CMD(0x20), adr); map_write(map, CMD(0xD0), adr); chip->state = FL_ERASING; - - spin_unlock_bh(chip->mutex); + + mutex_unlock(&chip->mutex); msleep(1000); - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); /* FIXME. Use a timer to check this, and return immediately. */ /* Once the state machine's known to be working I'll do that */ @@ -803,34 +805,34 @@ retry: /* Someone's suspended the erase. Sleep */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + (HZ*20); /* FIXME */ - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); continue; } status = map_read(map, adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; - + /* OK Still waiting */ if (time_after(jiffies, timeo)) { map_write(map, CMD(0x70), adr); chip->state = FL_STATUS; printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); DISABLE_VPP(map); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); return -EIO; } - + /* Latency issues. Drop the lock, wait a while and retry */ - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); } - + DISABLE_VPP(map); ret = 0; @@ -855,7 +857,7 @@ retry: /* Reset the error bits */ map_write(map, CMD(0x50), adr); map_write(map, CMD(0x70), adr); - + if ((chipstatus & 0x30) == 0x30) { printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus); ret = -EIO; @@ -871,7 +873,7 @@ retry: printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus); timeo = jiffies + HZ; chip->state = FL_STATUS; - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); goto retry; } printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus); @@ -880,11 +882,12 @@ retry: } wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } -int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) +static int cfi_staa_erase_varsize(struct mtd_info *mtd, + struct erase_info *instr) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; unsigned long adr, len; @@ -892,29 +895,23 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) int i, first; struct mtd_erase_region_info *regions = mtd->eraseregions; - if (instr->addr > mtd->size) - return -EINVAL; - - if ((instr->len + instr->addr) > mtd->size) - return -EINVAL; - /* Check that both start and end of the requested erase are * aligned with the erasesize at the appropriate addresses. */ i = 0; - /* Skip all erase regions which are ended before the start of + /* Skip all erase regions which are ended before the start of the requested erase. Actually, to save on the calculations, we skip to the first erase region which starts after the start of the requested erase, and then go back one. */ - + while (i < mtd->numeraseregions && instr->addr >= regions[i].offset) i++; i--; - /* OK, now i is pointing at the erase region in which this + /* OK, now i is pointing at the erase region in which this erase request starts. Check the start of the requested erase range is aligned with the erase size which is in effect here. @@ -937,7 +934,7 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) the address actually falls */ i--; - + if ((instr->addr + instr->len) & (regions[i].erasesize-1)) return -EINVAL; @@ -949,28 +946,28 @@ int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr) while(len) { ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr); - + if (ret) return ret; adr += regions[i].erasesize; len -= regions[i].erasesize; - if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) + if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift))) i++; if (adr >> cfi->chipshift) { adr = 0; chipnum++; - + if (chipnum >= cfi->numchips) - break; + break; } } - + instr->state = MTD_ERASE_DONE; mtd_erase_callback(instr); - + return 0; } @@ -987,7 +984,7 @@ static void cfi_staa_sync (struct mtd_info *mtd) chip = &cfi->chips[i]; retry: - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); switch(chip->state) { case FL_READY: @@ -996,22 +993,23 @@ static void cfi_staa_sync (struct mtd_info *mtd) case FL_JEDEC_QUERY: chip->oldstate = chip->state; chip->state = FL_SYNCING; - /* No need to wake_up() on this state change - + /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ case FL_SYNCING: - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); break; default: /* Not an idle state */ + set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - - spin_unlock_bh(chip->mutex); + + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); - + goto retry; } } @@ -1021,13 +1019,13 @@ static void cfi_staa_sync (struct mtd_info *mtd) for (i--; i >=0; i--) { chip = &cfi->chips[i]; - spin_lock_bh(chip->mutex); - + mutex_lock(&chip->mutex); + if (chip->state == FL_SYNCING) { chip->state = chip->oldstate; wake_up(&chip->wq); } - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); } } @@ -1045,7 +1043,7 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un timeo = jiffies + HZ; retry: - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); /* Check that the chip's ready to talk to us. */ switch (chip->state) { @@ -1057,18 +1055,18 @@ retry: case FL_STATUS: status = map_read(map, adr); - if (map_word_andequal(map, status, status_OK, status_OK)) + if (map_word_andequal(map, status, status_OK, status_OK)) break; - + /* Urgh. Chip not yet ready to talk to us. */ if (time_after(jiffies, timeo)) { - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); printk(KERN_ERR "waiting for chip to be ready timed out in lock\n"); return -EIO; } /* Latency issues. Drop the lock, wait a while and retry */ - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); goto retry; @@ -1077,7 +1075,7 @@ retry: someone changes the status */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + HZ; @@ -1088,10 +1086,10 @@ retry: map_write(map, CMD(0x60), adr); map_write(map, CMD(0x01), adr); chip->state = FL_LOCKING; - - spin_unlock_bh(chip->mutex); + + mutex_unlock(&chip->mutex); msleep(1000); - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); /* FIXME. Use a timer to check this, and return immediately. */ /* Once the state machine's known to be working I'll do that */ @@ -1102,31 +1100,31 @@ retry: status = map_read(map, adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; - + /* OK Still waiting */ if (time_after(jiffies, timeo)) { map_write(map, CMD(0x70), adr); chip->state = FL_STATUS; printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); DISABLE_VPP(map); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); return -EIO; } - + /* Latency issues. Drop the lock, wait a while and retry */ - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); } - + /* Done and happy. */ chip->state = FL_STATUS; DISABLE_VPP(map); wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); return 0; } -static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len) +static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; @@ -1142,9 +1140,6 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len) if (len & (mtd->erasesize -1)) return -EINVAL; - if ((len + ofs) > mtd->size) - return -EINVAL; - chipnum = ofs >> cfi->chipshift; adr = ofs - (chipnum << cfi->chipshift); @@ -1162,8 +1157,8 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len) cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); -#endif - +#endif + if (ret) return ret; @@ -1173,9 +1168,9 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len) if (adr >> cfi->chipshift) { adr = 0; chipnum++; - + if (chipnum >= cfi->numchips) - break; + break; } } return 0; @@ -1194,7 +1189,7 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, timeo = jiffies + HZ; retry: - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); /* Check that the chip's ready to talk to us. */ switch (chip->state) { @@ -1208,16 +1203,16 @@ retry: status = map_read(map, adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; - + /* Urgh. Chip not yet ready to talk to us. */ if (time_after(jiffies, timeo)) { - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n"); return -EIO; } /* Latency issues. Drop the lock, wait a while and retry */ - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); goto retry; @@ -1226,7 +1221,7 @@ retry: someone changes the status */ set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); schedule(); remove_wait_queue(&chip->wq, &wait); timeo = jiffies + HZ; @@ -1237,10 +1232,10 @@ retry: map_write(map, CMD(0x60), adr); map_write(map, CMD(0xD0), adr); chip->state = FL_UNLOCKING; - - spin_unlock_bh(chip->mutex); + + mutex_unlock(&chip->mutex); msleep(1000); - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); /* FIXME. Use a timer to check this, and return immediately. */ /* Once the state machine's known to be working I'll do that */ @@ -1251,31 +1246,31 @@ retry: status = map_read(map, adr); if (map_word_andequal(map, status, status_OK, status_OK)) break; - + /* OK Still waiting */ if (time_after(jiffies, timeo)) { map_write(map, CMD(0x70), adr); chip->state = FL_STATUS; printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); DISABLE_VPP(map); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); return -EIO; } - + /* Latency issues. Drop the unlock, wait a while and retry */ - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); cfi_udelay(1); - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); } - + /* Done and happy. */ chip->state = FL_STATUS; DISABLE_VPP(map); wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); return 0; } -static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) +static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; @@ -1292,7 +1287,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) { unsigned long temp_adr = adr; unsigned long temp_len = len; - + cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); while (temp_len) { printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor))); @@ -1310,7 +1305,7 @@ static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); #endif - + return ret; } @@ -1325,7 +1320,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) for (i=0; !ret && i<cfi->numchips; i++) { chip = &cfi->chips[i]; - spin_lock_bh(chip->mutex); + mutex_lock(&chip->mutex); switch(chip->state) { case FL_READY: @@ -1334,7 +1329,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) case FL_JEDEC_QUERY: chip->oldstate = chip->state; chip->state = FL_PM_SUSPENDED; - /* No need to wake_up() on this state change - + /* No need to wake_up() on this state change - * as the whole point is that nobody can do anything * with the chip now anyway. */ @@ -1345,7 +1340,7 @@ static int cfi_staa_suspend(struct mtd_info *mtd) ret = -EAGAIN; break; } - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); } /* Unlock the chips again */ @@ -1353,9 +1348,9 @@ static int cfi_staa_suspend(struct mtd_info *mtd) if (ret) { for (i--; i >=0; i--) { chip = &cfi->chips[i]; - - spin_lock_bh(chip->mutex); - + + mutex_lock(&chip->mutex); + if (chip->state == FL_PM_SUSPENDED) { /* No need to force it into a known state here, because we're returning failure, and it didn't @@ -1363,10 +1358,10 @@ static int cfi_staa_suspend(struct mtd_info *mtd) chip->state = chip->oldstate; wake_up(&chip->wq); } - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); } - } - + } + return ret; } @@ -1378,11 +1373,11 @@ static void cfi_staa_resume(struct mtd_info *mtd) struct flchip *chip; for (i=0; i<cfi->numchips; i++) { - + chip = &cfi->chips[i]; - spin_lock_bh(chip->mutex); - + mutex_lock(&chip->mutex); + /* Go to known state. Chip may have been power cycled */ if (chip->state == FL_PM_SUSPENDED) { map_write(map, CMD(0xFF), 0); @@ -1390,7 +1385,7 @@ static void cfi_staa_resume(struct mtd_info *mtd) wake_up(&chip->wq); } - spin_unlock_bh(chip->mutex); + mutex_unlock(&chip->mutex); } } @@ -1402,20 +1397,4 @@ static void cfi_staa_destroy(struct mtd_info *mtd) kfree(cfi); } -static char im_name[]="cfi_cmdset_0020"; - -static int __init cfi_staa_init(void) -{ - inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020); - return 0; -} - -static void __exit cfi_staa_exit(void) -{ - inter_module_unregister(im_name); -} - -module_init(cfi_staa_init); -module_exit(cfi_staa_exit); - MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c index cf750038ce6..e8d0164498b 100644 --- a/drivers/mtd/chips/cfi_probe.c +++ b/drivers/mtd/chips/cfi_probe.c @@ -1,10 +1,8 @@ -/* +/* Common Flash Interface probe code. (C) 2000 Red Hat. GPL'd. - $Id: cfi_probe.c,v 1.83 2004/11/16 18:19:02 nico Exp $ */ -#include <linux/config.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> @@ -20,7 +18,7 @@ #include <linux/mtd/cfi.h> #include <linux/mtd/gen_probe.h> -//#define DEBUG_CFI +//#define DEBUG_CFI #ifdef DEBUG_CFI static void print_cfi_ident(struct cfi_ident *); @@ -40,23 +38,20 @@ struct mtd_info *cfi_probe(struct map_info *map); #define xip_allowed(base, map) \ do { \ (void) map_read(map, base); \ - asm volatile (".rep 8; nop; .endr"); \ + xip_iprefetch(); \ local_irq_enable(); \ } while (0) #define xip_enable(base, map, cfi) \ do { \ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \ - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \ + cfi_qry_mode_off(base, map, cfi); \ xip_allowed(base, map); \ } while (0) #define xip_disable_qry(base, map, cfi) \ do { \ xip_disable(); \ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \ - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \ - cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); \ + cfi_qry_mode_on(base, map, cfi); \ } while (0) #else @@ -72,38 +67,12 @@ do { \ in: interleave,type,mode ret: table index, <0 for error */ -static int __xipram qry_present(struct map_info *map, __u32 base, - struct cfi_private *cfi) -{ - int osf = cfi->interleave * cfi->device_type; // scale factor - map_word val[3]; - map_word qry[3]; - - qry[0] = cfi_build_cmd('Q', map, cfi); - qry[1] = cfi_build_cmd('R', map, cfi); - qry[2] = cfi_build_cmd('Y', map, cfi); - - val[0] = map_read(map, base + osf*0x10); - val[1] = map_read(map, base + osf*0x11); - val[2] = map_read(map, base + osf*0x12); - - if (!map_word_equal(map, qry[0], val[0])) - return 0; - - if (!map_word_equal(map, qry[1], val[1])) - return 0; - - if (!map_word_equal(map, qry[2], val[2])) - return 0; - - return 1; // "QRY" found -} static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, unsigned long *chip_map, struct cfi_private *cfi) { int i; - + if ((base + 0) >= map->size) { printk(KERN_NOTICE "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n", @@ -118,17 +87,13 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, } xip_disable(); - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); - - if (!qry_present(map,base,cfi)) { + if (!cfi_qry_mode_on(base, map, cfi)) { xip_enable(base, map, cfi); return 0; } if (!cfi->numchips) { - /* This is the first time we're called. Set up the CFI + /* This is the first time we're called. Set up the CFI stuff accordingly and return */ return cfi_chip_setup(map, cfi); } @@ -138,32 +103,30 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, unsigned long start; if(!test_bit(i, chip_map)) { /* Skip location; no valid chip at this address */ - continue; + continue; } start = i << cfi->chipshift; /* This chip should be in read mode if it's one we've already touched. */ - if (qry_present(map, start, cfi)) { - /* Eep. This chip also had the QRY marker. + if (cfi_qry_present(map, start, cfi)) { + /* Eep. This chip also had the QRY marker. * Is it an alias for the new one? */ - cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL); + cfi_qry_mode_off(start, map, cfi); /* If the QRY marker goes away, it's an alias */ - if (!qry_present(map, start, cfi)) { + if (!cfi_qry_present(map, start, cfi)) { xip_allowed(base, map); printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", map->name, base, start); return 0; } - /* Yes, it's actually got QRY for data. Most + /* Yes, it's actually got QRY for data. Most * unfortunate. Stick the new chip in read mode * too and if it's the same, assume it's an alias. */ /* FIXME: Use other modes to do a proper check */ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL); - - if (qry_present(map, base, cfi)) { + cfi_qry_mode_off(base, map, cfi); + + if (cfi_qry_present(map, base, cfi)) { xip_allowed(base, map); printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n", map->name, base, start); @@ -171,31 +134,31 @@ static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, } } } - + /* OK, if we got to here, then none of the previous chips appear to be aliases for the current one. */ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */ cfi->numchips++; - + /* Put it back into Read Mode */ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); + cfi_qry_mode_off(base, map, cfi); xip_allowed(base, map); printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", map->name, cfi->interleave, cfi->device_type*8, base, map->bankwidth*8); - + return 1; } -static int __xipram cfi_chip_setup(struct map_info *map, +static int __xipram cfi_chip_setup(struct map_info *map, struct cfi_private *cfi) { int ofs_factor = cfi->interleave*cfi->device_type; __u32 base = 0; int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor); int i; + int addr_unlock1 = 0x555, addr_unlock2 = 0x2AA; xip_enable(base, map, cfi); #ifdef DEBUG_CFI @@ -205,40 +168,20 @@ static int __xipram cfi_chip_setup(struct map_info *map, return 0; cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); - if (!cfi->cfiq) { - printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); + if (!cfi->cfiq) return 0; - } - - memset(cfi->cfiq,0,sizeof(struct cfi_ident)); - + + memset(cfi->cfiq,0,sizeof(struct cfi_ident)); + cfi->cfi_mode = CFI_MODE_CFI; - + + cfi->sector_erase_cmd = CMD(0x30); + /* Read the CFI info structure */ xip_disable_qry(base, map, cfi); for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor); - /* Note we put the device back into Read Mode BEFORE going into Auto - * Select Mode, as some devices support nesting of modes, others - * don't. This way should always work. - * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and - * so should be treated as nops or illegal (and so put the device - * back into Read Mode, which is a nop in this case). - */ - cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL); - cfi->mfr = cfi_read_query(map, base); - cfi->id = cfi_read_query(map, base + ofs_factor); - - /* Put it back into Read Mode */ - cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); - /* ... even if it's an Intel chip */ - cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); - xip_allowed(base, map); - /* Do any necessary byteswapping */ cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID); @@ -255,64 +198,96 @@ static int __xipram cfi_chip_setup(struct map_info *map, for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]); - -#ifdef DEBUG_CFI + +#ifdef DEBUG_CFI printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n", - i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff, + i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff, (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1); #endif } - printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", + if (cfi->cfiq->P_ID == P_ID_SST_OLD) { + addr_unlock1 = 0x5555; + addr_unlock2 = 0x2AAA; + } + + /* + * Note we put the device back into Read Mode BEFORE going into Auto + * Select Mode, as some devices support nesting of modes, others + * don't. This way should always work. + * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and + * so should be treated as nops or illegal (and so put the device + * back into Read Mode, which is a nop in this case). + */ + cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL); + cfi->mfr = cfi_read_query16(map, base); + cfi->id = cfi_read_query16(map, base + ofs_factor); + + /* Get AMD/Spansion extended JEDEC ID */ + if (cfi->mfr == CFI_MFR_AMD && (cfi->id & 0xff) == 0x7e) + cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 | + cfi_read_query(map, base + 0xf * ofs_factor); + + /* Put it back into Read Mode */ + cfi_qry_mode_off(base, map, cfi); + xip_allowed(base, map); + + printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank. Manufacturer ID %#08x Chip ID %#08x\n", map->name, cfi->interleave, cfi->device_type*8, base, - map->bankwidth*8); + map->bankwidth*8, cfi->mfr, cfi->id); return 1; } #ifdef DEBUG_CFI -static char *vendorname(__u16 vendor) +static char *vendorname(__u16 vendor) { switch (vendor) { case P_ID_NONE: return "None"; - + case P_ID_INTEL_EXT: return "Intel/Sharp Extended"; - + case P_ID_AMD_STD: return "AMD/Fujitsu Standard"; - + case P_ID_INTEL_STD: return "Intel/Sharp Standard"; - + case P_ID_AMD_EXT: return "AMD/Fujitsu Extended"; case P_ID_WINBOND: return "Winbond Standard"; - + case P_ID_ST_ADV: return "ST Advanced"; case P_ID_MITSUBISHI_STD: return "Mitsubishi Standard"; - + case P_ID_MITSUBISHI_EXT: return "Mitsubishi Extended"; case P_ID_SST_PAGE: return "SST Page Write"; + case P_ID_SST_OLD: + return "SST 39VF160x/39VF320x"; + case P_ID_INTEL_PERFORMANCE: return "Intel Performance Code"; - + case P_ID_INTEL_DATA: return "Intel Data"; - + case P_ID_RESERVED: return "Not Allowed / Reserved for Future Use"; - + default: return "Unknown"; } @@ -325,21 +300,21 @@ static void print_cfi_ident(struct cfi_ident *cfip) if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') { printk("Invalid CFI ident structure.\n"); return; - } -#endif + } +#endif printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID)); if (cfip->P_ADR) printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR); else printk("No Primary Algorithm Table\n"); - + printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID)); if (cfip->A_ADR) printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR); else printk("No Alternate Algorithm Table\n"); - - + + printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf); printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf); if (cfip->VppMin) { @@ -348,61 +323,61 @@ static void print_cfi_ident(struct cfi_ident *cfip) } else printk("No Vpp line\n"); - - printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp); - printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp)); - + + printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp); + printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp)); + if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) { - printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp); - printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp)); + printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp); + printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp)); } else printk("Full buffer write not supported\n"); - + printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp); printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp)); if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) { - printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp); + printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp); printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp)); } else printk("Chip erase not supported\n"); - + printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20)); printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc); switch(cfip->InterfaceDesc) { - case 0: + case CFI_INTERFACE_X8_ASYNC: printk(" - x8-only asynchronous interface\n"); break; - - case 1: + + case CFI_INTERFACE_X16_ASYNC: printk(" - x16-only asynchronous interface\n"); break; - - case 2: + + case CFI_INTERFACE_X8_BY_X16_ASYNC: printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n"); break; - - case 3: + + case CFI_INTERFACE_X32_ASYNC: printk(" - x32-only asynchronous interface\n"); break; - - case 4: + + case CFI_INTERFACE_X16_BY_X32_ASYNC: printk(" - supports x16 and x32 via Word# with asynchronous interface\n"); break; - - case 65535: + + case CFI_INTERFACE_NOT_ALLOWED: printk(" - Not Allowed / Reserved\n"); break; - + default: printk(" - Unknown\n"); break; } - + printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize); printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions); - + } #endif /* DEBUG_CFI */ @@ -426,7 +401,7 @@ static struct mtd_chip_driver cfi_chipdrv = { .module = THIS_MODULE }; -int __init cfi_probe_init(void) +static int __init cfi_probe_init(void) { register_mtd_chip_driver(&cfi_chipdrv); return 0; diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index 2b2ede2bfcc..09c79bd0b4f 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c @@ -1,20 +1,16 @@ /* * Common Flash Interface support: - * Generic utility functions not dependant on command set + * Generic utility functions not dependent on command set * * Copyright (C) 2002 Red Hat * Copyright (C) 2003 STMicroelectronics Limited * * This code is covered by the GPL. - * - * $Id: cfi_util.c,v 1.8 2004/12/14 19:55:56 nico Exp $ - * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> -#include <linux/sched.h> #include <asm/io.h> #include <asm/byteorder.h> @@ -26,7 +22,84 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> -#include <linux/mtd/compatmac.h> + +int __xipram cfi_qry_present(struct map_info *map, __u32 base, + struct cfi_private *cfi) +{ + int osf = cfi->interleave * cfi->device_type; /* scale factor */ + map_word val[3]; + map_word qry[3]; + + qry[0] = cfi_build_cmd('Q', map, cfi); + qry[1] = cfi_build_cmd('R', map, cfi); + qry[2] = cfi_build_cmd('Y', map, cfi); + + val[0] = map_read(map, base + osf*0x10); + val[1] = map_read(map, base + osf*0x11); + val[2] = map_read(map, base + osf*0x12); + + if (!map_word_equal(map, qry[0], val[0])) + return 0; + + if (!map_word_equal(map, qry[1], val[1])) + return 0; + + if (!map_word_equal(map, qry[2], val[2])) + return 0; + + return 1; /* "QRY" found */ +} +EXPORT_SYMBOL_GPL(cfi_qry_present); + +int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, + struct cfi_private *cfi) +{ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); + if (cfi_qry_present(map, base, cfi)) + return 1; + /* QRY not found probably we deal with some odd CFI chips */ + /* Some revisions of some old Intel chips? */ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); + if (cfi_qry_present(map, base, cfi)) + return 1; + /* ST M29DW chips */ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); + if (cfi_qry_present(map, base, cfi)) + return 1; + /* some old SST chips, e.g. 39VF160x/39VF320x */ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL); + if (cfi_qry_present(map, base, cfi)) + return 1; + /* SST 39VF640xB */ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); + if (cfi_qry_present(map, base, cfi)) + return 1; + /* QRY not found */ + return 0; +} +EXPORT_SYMBOL_GPL(cfi_qry_mode_on); + +void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, + struct cfi_private *cfi) +{ + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); + /* M29W128G flashes require an additional reset command + when exit qry mode */ + if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E)) + cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); +} +EXPORT_SYMBOL_GPL(cfi_qry_mode_off); struct cfi_extquery * __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) @@ -37,48 +110,36 @@ __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* n int i; struct cfi_extquery *extp = NULL; - printk(" %s Extended Query Table at 0x%4.4X\n", name, adr); if (!adr) goto out; + printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr); + extp = kmalloc(size, GFP_KERNEL); - if (!extp) { - printk(KERN_ERR "Failed to allocate memory\n"); + if (!extp) goto out; - } #ifdef CONFIG_MTD_XIP local_irq_disable(); #endif /* Switch it into Query Mode */ - cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); - + cfi_qry_mode_on(base, map, cfi); /* Read in the Extended Query Table */ for (i=0; i<size; i++) { - ((unsigned char *)extp)[i] = + ((unsigned char *)extp)[i] = cfi_read_query(map, base+((adr+i)*ofs_factor)); } /* Make sure it returns to read mode */ - cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); - cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL); + cfi_qry_mode_off(base, map, cfi); #ifdef CONFIG_MTD_XIP (void) map_read(map, base); - asm volatile (".rep 8; nop; .endr"); + xip_iprefetch(); local_irq_enable(); #endif - if (extp->MajorVersion != '1' || - (extp->MinorVersion < '0' || extp->MinorVersion > '3')) { - printk(KERN_WARNING " Unknown %s Extended Query " - "version %c.%c.\n", name, extp->MajorVersion, - extp->MinorVersion); - kfree(extp); - extp = NULL; - } - out: return extp; } @@ -93,7 +154,7 @@ void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups) for (f=fixups; f->fixup; f++) { if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) && ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) { - f->fixup(mtd, f->param); + f->fixup(mtd); } } } @@ -110,29 +171,23 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, int i, first; struct mtd_erase_region_info *regions = mtd->eraseregions; - if (ofs > mtd->size) - return -EINVAL; - - if ((len + ofs) > mtd->size) - return -EINVAL; - /* Check that both start and end of the requested erase are * aligned with the erasesize at the appropriate addresses. */ i = 0; - /* Skip all erase regions which are ended before the start of + /* Skip all erase regions which are ended before the start of the requested erase. Actually, to save on the calculations, we skip to the first erase region which starts after the start of the requested erase, and then go back one. */ - + while (i < mtd->numeraseregions && ofs >= regions[i].offset) i++; i--; - /* OK, now i is pointing at the erase region in which this + /* OK, now i is pointing at the erase region in which this erase request starts. Check the start of the requested erase range is aligned with the erase size which is in effect here. @@ -155,7 +210,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, the address actually falls */ i--; - + if ((ofs + len) & (regions[i].erasesize-1)) return -EINVAL; @@ -168,7 +223,7 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, int size = regions[i].erasesize; ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk); - + if (ret) return ret; @@ -182,9 +237,9 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, if (adr >> cfi->chipshift) { adr = 0; chipnum++; - + if (chipnum >= cfi->numchips) - break; + break; } } diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c index d7d739a108a..0bbc61ba952 100644 --- a/drivers/mtd/chips/chipreg.c +++ b/drivers/mtd/chips/chipreg.c @@ -1,19 +1,15 @@ /* - * $Id: chipreg.c,v 1.17 2004/11/16 18:29:00 dwmw2 Exp $ - * * Registration for chip drivers * */ #include <linux/kernel.h> -#include <linux/config.h> #include <linux/module.h> #include <linux/kmod.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/mtd/map.h> #include <linux/mtd/mtd.h> -#include <linux/mtd/compatmac.h> static DEFINE_SPINLOCK(chip_drvs_lock); static LIST_HEAD(chip_drvs_list); @@ -41,7 +37,7 @@ static struct mtd_chip_driver *get_mtd_chip_driver (const char *name) list_for_each(pos, &chip_drvs_list) { this = list_entry(pos, typeof(*this), list); - + if (!strcmp(this->name, name)) { ret = this; break; @@ -73,17 +69,14 @@ struct mtd_info *do_map_probe(const char *name, struct map_info *map) ret = drv->probe(map); - /* We decrease the use count here. It may have been a + /* We decrease the use count here. It may have been a probe-only module, which is no longer required from this point, having given us a handle on (and increased the use count of) the actual driver code. */ module_put(drv->module); - if (ret) - return ret; - - return NULL; + return ret; } /* * Destroy an MTD device which was created for a map device. diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h index e1a5b76596c..800b0e853e8 100644 --- a/drivers/mtd/chips/fwh_lock.h +++ b/drivers/mtd/chips/fwh_lock.h @@ -25,7 +25,7 @@ struct fwh_xxlock_thunk { * so this code has not been tested with interleaved chips, * and will likely fail in that context. */ -static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, +static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) { struct cfi_private *cfi = map->fldrv_priv; @@ -34,8 +34,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, /* Refuse the operation if the we cannot look behind the chip */ if (chip->start < 0x400000) { - DEBUG( MTD_DEBUG_LEVEL3, - "MTD %s(): chip->start: %lx wanted >= 0x400000\n", + pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n", __func__, chip->start ); return -EIO; } @@ -44,7 +43,7 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, * - on 64k boundariesand * - bit 1 set high * - block lock registers are 4MiB lower - overflow subtract (danger) - * + * * The address manipulation is first done on the logical address * which is 0 at the start of the chip, and then the offset of * the individual chip is addted to it. Any other order a weird @@ -58,25 +57,26 @@ static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, * to flash memory - that means that we don't have to check status * and timeout. */ - spin_lock(chip->mutex); + mutex_lock(&chip->mutex); ret = get_chip(map, chip, adr, FL_LOCKING); if (ret) { - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return ret; } + chip->oldstate = chip->state; chip->state = xxlt->state; map_write(map, CMD(xxlt->val), adr); /* Done and happy. */ - chip->state = FL_READY; + chip->state = chip->oldstate; put_chip(map, chip, adr); - spin_unlock(chip->mutex); + mutex_unlock(&chip->mutex); return 0; } -static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) +static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len) { int ret; @@ -87,21 +87,21 @@ static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) } -static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len) +static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, uint64_t len) { int ret; ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len, (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK); - + return ret; } -static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param) +static void fixup_use_fwh_lock(struct mtd_info *mtd) { printk(KERN_NOTICE "using fwh lock/unlock method\n"); /* Setup for the chips with the fwh lock method */ - mtd->lock = fwh_lock_varsize; - mtd->unlock = fwh_unlock_varsize; + mtd->_lock = fwh_lock_varsize; + mtd->_unlock = fwh_unlock_varsize; } #endif /* FWH_LOCK_H */ diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c index dc065b22f79..b57ceea2151 100644 --- a/drivers/mtd/chips/gen_probe.c +++ b/drivers/mtd/chips/gen_probe.c @@ -2,7 +2,6 @@ * Routines common to all CFI-type probes. * (C) 2001-2003 Red Hat, Inc. * GPL'd - * $Id: gen_probe.c,v 1.22 2005/01/24 23:49:50 rmk Exp $ */ #include <linux/kernel.h> @@ -26,7 +25,7 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp) /* First probe the map to see if we have CFI stuff there. */ cfi = genprobe_ident_chips(map, cp); - + if (!cfi) return NULL; @@ -36,12 +35,19 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp) mtd = check_cmd_set(map, 1); /* First the primary cmdset */ if (!mtd) mtd = check_cmd_set(map, 0); /* Then the secondary */ - - if (mtd) + + if (mtd) { + if (mtd->size > map->size) { + printk(KERN_WARNING "Reducing visibility of %ldKiB chip to %ldKiB\n", + (unsigned long)mtd->size >> 10, + (unsigned long)map->size >> 10); + mtd->size = map->size; + } return mtd; + } printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n"); - + kfree(cfi->cfiq); kfree(cfi); map->fldrv_priv = NULL; @@ -60,14 +66,14 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi memset(&cfi, 0, sizeof(cfi)); - /* Call the probetype-specific code with all permutations of + /* Call the probetype-specific code with all permutations of interleave and device type, etc. */ if (!genprobe_new_chip(map, cp, &cfi)) { /* The probe didn't like it */ - printk(KERN_DEBUG "%s: Found no %s device at location zero\n", - cp->name, map->name); + pr_debug("%s: Found no %s device at location zero\n", + cp->name, map->name); return NULL; - } + } #if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD probe routines won't ever return a broken CFI structure anyway, @@ -92,22 +98,25 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi } else { BUG(); } - + cfi.numchips = 1; - /* - * Allocate memory for bitmap of valid chips. - * Align bitmap storage size to full byte. - */ + /* + * Allocate memory for bitmap of valid chips. + * Align bitmap storage size to full byte. + */ max_chips = map->size >> cfi.chipshift; - mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0); - chip_map = kmalloc(mapsize, GFP_KERNEL); + if (!max_chips) { + printk(KERN_WARNING "NOR chip too large to fit in mapping. Attempting to cope...\n"); + max_chips = 1; + } + + mapsize = sizeof(long) * DIV_ROUND_UP(max_chips, BITS_PER_LONG); + chip_map = kzalloc(mapsize, GFP_KERNEL); if (!chip_map) { - printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name); kfree(cfi.cfiq); return NULL; } - memset (chip_map, 0, mapsize); set_bit(0, chip_map); /* Mark first chip valid */ @@ -122,14 +131,13 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi } /* - * Now allocate the space for the structures we need to return to + * Now allocate the space for the structures we need to return to * our caller, and copy the appropriate data into them. */ retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL); if (!retcfi) { - printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name); kfree(cfi.cfiq); kfree(chip_map); return NULL; @@ -145,8 +153,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi pchip->start = (i << cfi.chipshift); pchip->state = FL_READY; init_waitqueue_head(&pchip->wq); - spin_lock_init(&pchip->_spinlock); - pchip->mutex = &pchip->_spinlock; + mutex_init(&pchip->mutex); } } @@ -154,7 +161,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi return retcfi; } - + static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp, struct cfi_private *cfi) { @@ -189,30 +196,33 @@ extern cfi_cmdset_fn_t cfi_cmdset_0001; extern cfi_cmdset_fn_t cfi_cmdset_0002; extern cfi_cmdset_fn_t cfi_cmdset_0020; -static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map, +static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map, int primary) { struct cfi_private *cfi = map->fldrv_priv; __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID; -#if defined(CONFIG_MODULES) && defined(HAVE_INTER_MODULE) - char probename[32]; +#ifdef CONFIG_MODULES + char probename[sizeof(VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X))]; cfi_cmdset_fn_t *probe_function; - sprintf(probename, "cfi_cmdset_%4.4X", type); - - probe_function = inter_module_get_request(probename, probename); + sprintf(probename, VMLINUX_SYMBOL_STR(cfi_cmdset_%4.4X), type); + + probe_function = __symbol_get(probename); + if (!probe_function) { + request_module("cfi_cmdset_%4.4X", type); + probe_function = __symbol_get(probename); + } if (probe_function) { struct mtd_info *mtd; mtd = (*probe_function)(map, primary); /* If it was happy, it'll have increased its own use count */ - inter_module_put(probename); + symbol_put_addr(probe_function); return mtd; } #endif - printk(KERN_NOTICE "Support for command set %04X not present\n", - type); + printk(KERN_NOTICE "Support for command set %04X not present\n", type); return NULL; } @@ -221,33 +231,32 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary) { struct cfi_private *cfi = map->fldrv_priv; __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID; - + if (type == P_ID_NONE || type == P_ID_RESERVED) return NULL; switch(type){ - /* Urgh. Ifdefs. The version with weak symbols was - * _much_ nicer. Shame it didn't seem to work on - * anything but x86, really. - * But we can't rely in inter_module_get() because - * that'd mean we depend on link order. - */ + /* We need these for the !CONFIG_MODULES case, + because symbol_get() doesn't work there */ #ifdef CONFIG_MTD_CFI_INTELEXT - case 0x0001: - case 0x0003: + case P_ID_INTEL_EXT: + case P_ID_INTEL_STD: + case P_ID_INTEL_PERFORMANCE: return cfi_cmdset_0001(map, primary); #endif #ifdef CONFIG_MTD_CFI_AMDSTD - case 0x0002: + case P_ID_AMD_STD: + case P_ID_SST_OLD: + case P_ID_WINBOND: return cfi_cmdset_0002(map, primary); #endif #ifdef CONFIG_MTD_CFI_STAA - case 0x0020: + case P_ID_ST_ADV: return cfi_cmdset_0020(map, primary); #endif + default: + return cfi_cmdset_unknown(map, primary); } - - return cfi_cmdset_unknown(map, primary); } MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c deleted file mode 100644 index 62d235a9a4e..00000000000 --- a/drivers/mtd/chips/jedec.c +++ /dev/null @@ -1,934 +0,0 @@ - -/* JEDEC Flash Interface. - * This is an older type of interface for self programming flash. It is - * commonly use in older AMD chips and is obsolete compared with CFI. - * It is called JEDEC because the JEDEC association distributes the ID codes - * for the chips. - * - * See the AMD flash databook for information on how to operate the interface. - * - * This code does not support anything wider than 8 bit flash chips, I am - * not going to guess how to send commands to them, plus I expect they will - * all speak CFI.. - * - * $Id: jedec.c,v 1.22 2005/01/05 18:05:11 dwmw2 Exp $ - */ - -#include <linux/init.h> -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/mtd/jedec.h> -#include <linux/mtd/map.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/compatmac.h> - -static struct mtd_info *jedec_probe(struct map_info *); -static int jedec_probe8(struct map_info *map,unsigned long base, - struct jedec_private *priv); -static int jedec_probe16(struct map_info *map,unsigned long base, - struct jedec_private *priv); -static int jedec_probe32(struct map_info *map,unsigned long base, - struct jedec_private *priv); -static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start, - unsigned long len); -static int flash_erase(struct mtd_info *mtd, struct erase_info *instr); -static int flash_write(struct mtd_info *mtd, loff_t start, size_t len, - size_t *retlen, const u_char *buf); - -static unsigned long my_bank_size; - -/* Listing of parts and sizes. We need this table to learn the sector - size of the chip and the total length */ -static const struct JEDECTable JEDEC_table[] = { - { - .jedec = 0x013D, - .name = "AMD Am29F017D", - .size = 2*1024*1024, - .sectorsize = 64*1024, - .capabilities = MTD_CAP_NORFLASH - }, - { - .jedec = 0x01AD, - .name = "AMD Am29F016", - .size = 2*1024*1024, - .sectorsize = 64*1024, - .capabilities = MTD_CAP_NORFLASH - }, - { - .jedec = 0x01D5, - .name = "AMD Am29F080", - .size = 1*1024*1024, - .sectorsize = 64*1024, - .capabilities = MTD_CAP_NORFLASH - }, - { - .jedec = 0x01A4, - .name = "AMD Am29F040", - .size = 512*1024, - .sectorsize = 64*1024, - .capabilities = MTD_CAP_NORFLASH - }, - { - .jedec = 0x20E3, - .name = "AMD Am29W040B", - .size = 512*1024, - .sectorsize = 64*1024, - .capabilities = MTD_CAP_NORFLASH - }, - { - .jedec = 0xC2AD, - .name = "Macronix MX29F016", - .size = 2*1024*1024, - .sectorsize = 64*1024, - .capabilities = MTD_CAP_NORFLASH - }, - { .jedec = 0x0 } -}; - -static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id); -static void jedec_sync(struct mtd_info *mtd) {}; -static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf); -static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf); - -static struct mtd_info *jedec_probe(struct map_info *map); - - - -static struct mtd_chip_driver jedec_chipdrv = { - .probe = jedec_probe, - .name = "jedec", - .module = THIS_MODULE -}; - -/* Probe entry point */ - -static struct mtd_info *jedec_probe(struct map_info *map) -{ - struct mtd_info *MTD; - struct jedec_private *priv; - unsigned long Base; - unsigned long SectorSize; - unsigned count; - unsigned I,Uniq; - char Part[200]; - memset(&priv,0,sizeof(priv)); - - MTD = kmalloc(sizeof(struct mtd_info) + sizeof(struct jedec_private), GFP_KERNEL); - if (!MTD) - return NULL; - - memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private)); - priv = (struct jedec_private *)&MTD[1]; - - my_bank_size = map->size; - - if (map->size/my_bank_size > MAX_JEDEC_CHIPS) - { - printk("mtd: Increase MAX_JEDEC_CHIPS, too many banks.\n"); - kfree(MTD); - return NULL; - } - - for (Base = 0; Base < map->size; Base += my_bank_size) - { - // Perhaps zero could designate all tests? - if (map->buswidth == 0) - map->buswidth = 1; - - if (map->buswidth == 1){ - if (jedec_probe8(map,Base,priv) == 0) { - printk("did recognize jedec chip\n"); - kfree(MTD); - return NULL; - } - } - if (map->buswidth == 2) - jedec_probe16(map,Base,priv); - if (map->buswidth == 4) - jedec_probe32(map,Base,priv); - } - - // Get the biggest sector size - SectorSize = 0; - for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) - { - // printk("priv->chips[%d].jedec is %x\n",I,priv->chips[I].jedec); - // printk("priv->chips[%d].sectorsize is %lx\n",I,priv->chips[I].sectorsize); - if (priv->chips[I].sectorsize > SectorSize) - SectorSize = priv->chips[I].sectorsize; - } - - // Quickly ensure that the other sector sizes are factors of the largest - for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) - { - if ((SectorSize/priv->chips[I].sectorsize)*priv->chips[I].sectorsize != SectorSize) - { - printk("mtd: Failed. Device has incompatible mixed sector sizes\n"); - kfree(MTD); - return NULL; - } - } - - /* Generate a part name that includes the number of different chips and - other configuration information */ - count = 1; - strlcpy(Part,map->name,sizeof(Part)-10); - strcat(Part," "); - Uniq = 0; - for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) - { - const struct JEDECTable *JEDEC; - - if (priv->chips[I+1].jedec == priv->chips[I].jedec) - { - count++; - continue; - } - - // Locate the chip in the jedec table - JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec); - if (JEDEC == 0) - { - printk("mtd: Internal Error, JEDEC not set\n"); - kfree(MTD); - return NULL; - } - - if (Uniq != 0) - strcat(Part,","); - Uniq++; - - if (count != 1) - sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name); - else - sprintf(Part+strlen(Part),"%s",JEDEC->name); - if (strlen(Part) > sizeof(Part)*2/3) - break; - count = 1; - } - - /* Determine if the chips are organized in a linear fashion, or if there - are empty banks. Note, the last bank does not count here, only the - first banks are important. Holes on non-bank boundaries can not exist - due to the way the detection algorithm works. */ - if (priv->size < my_bank_size) - my_bank_size = priv->size; - priv->is_banked = 0; - //printk("priv->size is %x, my_bank_size is %x\n",priv->size,my_bank_size); - //printk("priv->bank_fill[0] is %x\n",priv->bank_fill[0]); - if (!priv->size) { - printk("priv->size is zero\n"); - kfree(MTD); - return NULL; - } - if (priv->size/my_bank_size) { - if (priv->size/my_bank_size == 1) { - priv->size = my_bank_size; - } - else { - for (I = 0; I != priv->size/my_bank_size - 1; I++) - { - if (priv->bank_fill[I] != my_bank_size) - priv->is_banked = 1; - - /* This even could be eliminated, but new de-optimized read/write - functions have to be written */ - printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]); - if (priv->bank_fill[I] != priv->bank_fill[0]) - { - printk("mtd: Failed. Cannot handle unsymmetric banking\n"); - kfree(MTD); - return NULL; - } - } - } - } - if (priv->is_banked == 1) - strcat(Part,", banked"); - - // printk("Part: '%s'\n",Part); - - memset(MTD,0,sizeof(*MTD)); - // strlcpy(MTD->name,Part,sizeof(MTD->name)); - MTD->name = map->name; - MTD->type = MTD_NORFLASH; - MTD->flags = MTD_CAP_NORFLASH; - MTD->erasesize = SectorSize*(map->buswidth); - // printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize); - MTD->size = priv->size; - // printk("MTD->size is %x\n",(unsigned int)MTD->size); - //MTD->module = THIS_MODULE; // ? Maybe this should be the low level module? - MTD->erase = flash_erase; - if (priv->is_banked == 1) - MTD->read = jedec_read_banked; - else - MTD->read = jedec_read; - MTD->write = flash_write; - MTD->sync = jedec_sync; - MTD->priv = map; - map->fldrv_priv = priv; - map->fldrv = &jedec_chipdrv; - __module_get(THIS_MODULE); - return MTD; -} - -/* Helper for the JEDEC function, JEDEC numbers all have odd parity */ -static int checkparity(u_char C) -{ - u_char parity = 0; - while (C != 0) - { - parity ^= C & 1; - C >>= 1; - } - - return parity == 1; -} - - -/* Take an array of JEDEC numbers that represent interleved flash chips - and process them. Check to make sure they are good JEDEC numbers, look - them up and then add them to the chip list */ -static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count, - unsigned long base,struct jedec_private *priv) -{ - unsigned I,J; - unsigned long Size; - unsigned long SectorSize; - const struct JEDECTable *JEDEC; - - // Test #2 JEDEC numbers exhibit odd parity - for (I = 0; I != Count; I++) - { - if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0) - return 0; - } - - // Finally, just make sure all the chip sizes are the same - JEDEC = jedec_idtoinf(Mfg[0],Id[0]); - - if (JEDEC == 0) - { - printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]); - return 0; - } - - Size = JEDEC->size; - SectorSize = JEDEC->sectorsize; - for (I = 0; I != Count; I++) - { - JEDEC = jedec_idtoinf(Mfg[0],Id[0]); - if (JEDEC == 0) - { - printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]); - return 0; - } - - if (Size != JEDEC->size || SectorSize != JEDEC->sectorsize) - { - printk("mtd: Failed. Interleved flash does not have matching characteristics\n"); - return 0; - } - } - - // Load the Chips - for (I = 0; I != MAX_JEDEC_CHIPS; I++) - { - if (priv->chips[I].jedec == 0) - break; - } - - if (I + Count > MAX_JEDEC_CHIPS) - { - printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n"); - return 0; - } - - // Add them to the table - for (J = 0; J != Count; J++) - { - unsigned long Bank; - - JEDEC = jedec_idtoinf(Mfg[J],Id[J]); - priv->chips[I].jedec = (Mfg[J] << 8) | Id[J]; - priv->chips[I].size = JEDEC->size; - priv->chips[I].sectorsize = JEDEC->sectorsize; - priv->chips[I].base = base + J; - priv->chips[I].datashift = J*8; - priv->chips[I].capabilities = JEDEC->capabilities; - priv->chips[I].offset = priv->size + J; - - // log2 n :| - priv->chips[I].addrshift = 0; - for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++); - - // Determine how filled this bank is. - Bank = base & (~(my_bank_size-1)); - if (priv->bank_fill[Bank/my_bank_size] < base + - (JEDEC->size << priv->chips[I].addrshift) - Bank) - priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank; - I++; - } - - priv->size += priv->chips[I-1].size*Count; - - return priv->chips[I-1].size; -} - -/* Lookup the chip information from the JEDEC ID table. */ -static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id) -{ - __u16 Id = (mfr << 8) | id; - unsigned long I = 0; - for (I = 0; JEDEC_table[I].jedec != 0; I++) - if (JEDEC_table[I].jedec == Id) - return JEDEC_table + I; - return NULL; -} - -// Look for flash using an 8 bit bus interface -static int jedec_probe8(struct map_info *map,unsigned long base, - struct jedec_private *priv) -{ - #define flread(x) map_read8(map,base+x) - #define flwrite(v,x) map_write8(map,v,base+x) - - const unsigned long AutoSel1 = 0xAA; - const unsigned long AutoSel2 = 0x55; - const unsigned long AutoSel3 = 0x90; - const unsigned long Reset = 0xF0; - __u32 OldVal; - __u8 Mfg[1]; - __u8 Id[1]; - unsigned I; - unsigned long Size; - - // Wait for any write/erase operation to settle - OldVal = flread(base); - for (I = 0; OldVal != flread(base) && I < 10000; I++) - OldVal = flread(base); - - // Reset the chip - flwrite(Reset,0x555); - - // Send the sequence - flwrite(AutoSel1,0x555); - flwrite(AutoSel2,0x2AA); - flwrite(AutoSel3,0x555); - - // Get the JEDEC numbers - Mfg[0] = flread(0); - Id[0] = flread(1); - // printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]); - - Size = handle_jedecs(map,Mfg,Id,1,base,priv); - // printk("handle_jedecs Size is %x\n",(unsigned int)Size); - if (Size == 0) - { - flwrite(Reset,0x555); - return 0; - } - - - // Reset. - flwrite(Reset,0x555); - - return 1; - - #undef flread - #undef flwrite -} - -// Look for flash using a 16 bit bus interface (ie 2 8-bit chips) -static int jedec_probe16(struct map_info *map,unsigned long base, - struct jedec_private *priv) -{ - return 0; -} - -// Look for flash using a 32 bit bus interface (ie 4 8-bit chips) -static int jedec_probe32(struct map_info *map,unsigned long base, - struct jedec_private *priv) -{ - #define flread(x) map_read32(map,base+((x)<<2)) - #define flwrite(v,x) map_write32(map,v,base+((x)<<2)) - - const unsigned long AutoSel1 = 0xAAAAAAAA; - const unsigned long AutoSel2 = 0x55555555; - const unsigned long AutoSel3 = 0x90909090; - const unsigned long Reset = 0xF0F0F0F0; - __u32 OldVal; - __u8 Mfg[4]; - __u8 Id[4]; - unsigned I; - unsigned long Size; - - // Wait for any write/erase operation to settle - OldVal = flread(base); - for (I = 0; OldVal != flread(base) && I < 10000; I++) - OldVal = flread(base); - - // Reset the chip - flwrite(Reset,0x555); - - // Send the sequence - flwrite(AutoSel1,0x555); - flwrite(AutoSel2,0x2AA); - flwrite(AutoSel3,0x555); - - // Test #1, JEDEC numbers are readable from 0x??00/0x??01 - if (flread(0) != flread(0x100) || - flread(1) != flread(0x101)) - { - flwrite(Reset,0x555); - return 0; - } - - // Split up the JEDEC numbers - OldVal = flread(0); - for (I = 0; I != 4; I++) - Mfg[I] = (OldVal >> (I*8)); - OldVal = flread(1); - for (I = 0; I != 4; I++) - Id[I] = (OldVal >> (I*8)); - - Size = handle_jedecs(map,Mfg,Id,4,base,priv); - if (Size == 0) - { - flwrite(Reset,0x555); - return 0; - } - - /* Check if there is address wrap around within a single bank, if this - returns JEDEC numbers then we assume that it is wrap around. Notice - we call this routine with the JEDEC return still enabled, if two or - more flashes have a truncated address space the probe test will still - work */ - if (base + (Size<<2)+0x555 < map->size && - base + (Size<<2)+0x555 < (base & (~(my_bank_size-1))) + my_bank_size) - { - if (flread(base+Size) != flread(base+Size + 0x100) || - flread(base+Size + 1) != flread(base+Size + 0x101)) - { - jedec_probe32(map,base+Size,priv); - } - } - - // Reset. - flwrite(0xF0F0F0F0,0x555); - - return 1; - - #undef flread - #undef flwrite -} - -/* Linear read. */ -static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - struct map_info *map = mtd->priv; - - map_copy_from(map, buf, from, len); - *retlen = len; - return 0; -} - -/* Banked read. Take special care to jump past the holes in the bank - mapping. This version assumes symetry in the holes.. */ -static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - struct map_info *map = mtd->priv; - struct jedec_private *priv = map->fldrv_priv; - - *retlen = 0; - while (len > 0) - { - // Determine what bank and offset into that bank the first byte is - unsigned long bank = from & (~(priv->bank_fill[0]-1)); - unsigned long offset = from & (priv->bank_fill[0]-1); - unsigned long get = len; - if (priv->bank_fill[0] - offset < len) - get = priv->bank_fill[0] - offset; - - bank /= priv->bank_fill[0]; - map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get); - - len -= get; - *retlen += get; - from += get; - } - return 0; -} - -/* Pass the flags value that the flash return before it re-entered read - mode. */ -static void jedec_flash_failed(unsigned char code) -{ - /* Bit 5 being high indicates that there was an internal device - failure, erasure time limits exceeded or something */ - if ((code & (1 << 5)) != 0) - { - printk("mtd: Internal Flash failure\n"); - return; - } - printk("mtd: Programming didn't take\n"); -} - -/* This uses the erasure function described in the AMD Flash Handbook, - it will work for flashes with a fixed sector size only. Flashes with - a selection of sector sizes (ie the AMD Am29F800B) will need a different - routine. This routine tries to parallize erasing multiple chips/sectors - where possible */ -static int flash_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - // Does IO to the currently selected chip - #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift)) - #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift)) - - unsigned long Time = 0; - unsigned long NoTime = 0; - unsigned long start = instr->addr, len = instr->len; - unsigned int I; - struct map_info *map = mtd->priv; - struct jedec_private *priv = map->fldrv_priv; - - // Verify the arguments.. - if (start + len > mtd->size || - (start % mtd->erasesize) != 0 || - (len % mtd->erasesize) != 0 || - (len/mtd->erasesize) == 0) - return -EINVAL; - - jedec_flash_chip_scan(priv,start,len); - - // Start the erase sequence on each chip - for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) - { - unsigned long off; - struct jedec_flash_chip *chip = priv->chips + I; - - if (chip->length == 0) - continue; - - if (chip->start + chip->length > chip->size) - { - printk("DIE\n"); - return -EIO; - } - - flwrite(0xF0,chip->start + 0x555); - flwrite(0xAA,chip->start + 0x555); - flwrite(0x55,chip->start + 0x2AA); - flwrite(0x80,chip->start + 0x555); - flwrite(0xAA,chip->start + 0x555); - flwrite(0x55,chip->start + 0x2AA); - - /* Once we start selecting the erase sectors the delay between each - command must not exceed 50us or it will immediately start erasing - and ignore the other sectors */ - for (off = 0; off < len; off += chip->sectorsize) - { - // Check to make sure we didn't timeout - flwrite(0x30,chip->start + off); - if (off == 0) - continue; - if ((flread(chip->start + off) & (1 << 3)) != 0) - { - printk("mtd: Ack! We timed out the erase timer!\n"); - return -EIO; - } - } - } - - /* We could split this into a timer routine and return early, performing - background erasure.. Maybe later if the need warrents */ - - /* Poll the flash for erasure completion, specs say this can take as long - as 480 seconds to do all the sectors (for a 2 meg flash). - Erasure time is dependent on chip age, temp and wear.. */ - - /* This being a generic routine assumes a 32 bit bus. It does read32s - and bundles interleved chips into the same grouping. This will work - for all bus widths */ - Time = 0; - NoTime = 0; - for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) - { - struct jedec_flash_chip *chip = priv->chips + I; - unsigned long off = 0; - unsigned todo[4] = {0,0,0,0}; - unsigned todo_left = 0; - unsigned J; - - if (chip->length == 0) - continue; - - /* Find all chips in this data line, realistically this is all - or nothing up to the interleve count */ - for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++) - { - if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) == - (chip->base & (~((1<<chip->addrshift)-1)))) - { - todo_left++; - todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1; - } - } - - /* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1], - (short)todo[2],(short)todo[3]); - */ - while (1) - { - __u32 Last[4]; - unsigned long Count = 0; - - /* During erase bit 7 is held low and bit 6 toggles, we watch this, - should it stop toggling or go high then the erase is completed, - or this is not really flash ;> */ - switch (map->buswidth) { - case 1: - Last[0] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off); - Last[1] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off); - Last[2] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off); - break; - case 2: - Last[0] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off); - Last[1] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off); - Last[2] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off); - break; - case 3: - Last[0] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off); - Last[1] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off); - Last[2] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off); - break; - } - Count = 3; - while (todo_left != 0) - { - for (J = 0; J != 4; J++) - { - __u8 Byte1 = (Last[(Count-1)%4] >> (J*8)) & 0xFF; - __u8 Byte2 = (Last[(Count-2)%4] >> (J*8)) & 0xFF; - __u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF; - if (todo[J] == 0) - continue; - - if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2) - { -// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2); - continue; - } - - if (Byte1 == Byte2) - { - jedec_flash_failed(Byte3); - return -EIO; - } - - todo[J] = 0; - todo_left--; - } - -/* if (NoTime == 0) - Time += HZ/10 - schedule_timeout(HZ/10);*/ - NoTime = 0; - - switch (map->buswidth) { - case 1: - Last[Count % 4] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off); - break; - case 2: - Last[Count % 4] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off); - break; - case 4: - Last[Count % 4] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off); - break; - } - Count++; - -/* // Count time, max of 15s per sector (according to AMD) - if (Time > 15*len/mtd->erasesize*HZ) - { - printk("mtd: Flash Erase Timed out\n"); - return -EIO; - } */ - } - - // Skip to the next chip if we used chip erase - if (chip->length == chip->size) - off = chip->size; - else - off += chip->sectorsize; - - if (off >= chip->length) - break; - NoTime = 1; - } - - for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++) - { - if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) == - (chip->base & (~((1<<chip->addrshift)-1)))) - priv->chips[J].length = 0; - } - } - - //printk("done\n"); - instr->state = MTD_ERASE_DONE; - mtd_erase_callback(instr); - return 0; - - #undef flread - #undef flwrite -} - -/* This is the simple flash writing function. It writes to every byte, in - sequence. It takes care of how to properly address the flash if - the flash is interleved. It can only be used if all the chips in the - array are identical!*/ -static int flash_write(struct mtd_info *mtd, loff_t start, size_t len, - size_t *retlen, const u_char *buf) -{ - /* Does IO to the currently selected chip. It takes the bank addressing - base (which is divisible by the chip size) adds the necessary lower bits - of addrshift (interleave index) and then adds the control register index. */ - #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift)) - #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift)) - - struct map_info *map = mtd->priv; - struct jedec_private *priv = map->fldrv_priv; - unsigned long base; - unsigned long off; - size_t save_len = len; - - if (start + len > mtd->size) - return -EIO; - - //printk("Here"); - - //printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len); - while (len != 0) - { - struct jedec_flash_chip *chip = priv->chips; - unsigned long bank; - unsigned long boffset; - - // Compute the base of the flash. - off = ((unsigned long)start) % (chip->size << chip->addrshift); - base = start - off; - - // Perform banked addressing translation. - bank = base & (~(priv->bank_fill[0]-1)); - boffset = base & (priv->bank_fill[0]-1); - bank = (bank/priv->bank_fill[0])*my_bank_size; - base = bank + boffset; - - // printk("Flasing %X %X %X\n",base,chip->size,len); - // printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift); - - // Loop over this page - for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++) - { - unsigned char oldbyte = map_read8(map,base+off); - unsigned char Last[4]; - unsigned long Count = 0; - - if (oldbyte == *buf) { - // printk("oldbyte and *buf is %x,len is %x\n",oldbyte,len); - continue; - } - if (((~oldbyte) & *buf) != 0) - printk("mtd: warn: Trying to set a 0 to a 1\n"); - - // Write - flwrite(0xAA,0x555); - flwrite(0x55,0x2AA); - flwrite(0xA0,0x555); - map_write8(map,*buf,base + off); - Last[0] = map_read8(map,base + off); - Last[1] = map_read8(map,base + off); - Last[2] = map_read8(map,base + off); - - /* Wait for the flash to finish the operation. We store the last 4 - status bytes that have been retrieved so we can determine why - it failed. The toggle bits keep toggling when there is a - failure */ - for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] && - Count < 10000; Count++) - Last[Count % 4] = map_read8(map,base + off); - if (Last[(Count - 1) % 4] != *buf) - { - jedec_flash_failed(Last[(Count - 3) % 4]); - return -EIO; - } - } - } - *retlen = save_len; - return 0; -} - -/* This is used to enhance the speed of the erase routine, - when things are being done to multiple chips it is possible to - parallize the operations, particularly full memory erases of multi - chip memories benifit */ -static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start, - unsigned long len) -{ - unsigned int I; - - // Zero the records - for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) - priv->chips[I].start = priv->chips[I].length = 0; - - // Intersect the region with each chip - for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++) - { - struct jedec_flash_chip *chip = priv->chips + I; - unsigned long ByteStart; - unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift); - - // End is before this chip or the start is after it - if (start+len < chip->offset || - ChipEndByte - (1 << chip->addrshift) < start) - continue; - - if (start < chip->offset) - { - ByteStart = chip->offset; - chip->start = 0; - } - else - { - chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift; - ByteStart = start; - } - - if (start + len >= ChipEndByte) - chip->length = (ChipEndByte - ByteStart) >> chip->addrshift; - else - chip->length = (start + len - ByteStart + (1 << chip->addrshift)-1) >> chip->addrshift; - } -} - -int __init jedec_init(void) -{ - register_mtd_chip_driver(&jedec_chipdrv); - return 0; -} - -static void __exit jedec_exit(void) -{ - unregister_mtd_chip_driver(&jedec_chipdrv); -} - -module_init(jedec_init); -module_exit(jedec_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jason Gunthorpe <jgg@deltatee.com> et al."); -MODULE_DESCRIPTION("Old MTD chip driver for JEDEC-compliant flash chips"); diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index 30da428eb7b..7c0b27d132b 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c @@ -1,14 +1,12 @@ -/* +/* Common Flash Interface probe code. (C) 2000 Red Hat. GPL'd. - $Id: jedec_probe.c,v 1.63 2005/02/14 16:30:32 bjd Exp $ See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5) for the standard this probe goes back to. Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com */ -#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> @@ -18,30 +16,14 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/cfi.h> #include <linux/mtd/gen_probe.h> -/* Manufacturers */ -#define MANUFACTURER_AMD 0x0001 -#define MANUFACTURER_ATMEL 0x001f -#define MANUFACTURER_FUJITSU 0x0004 -#define MANUFACTURER_HYUNDAI 0x00AD -#define MANUFACTURER_INTEL 0x0089 -#define MANUFACTURER_MACRONIX 0x00C2 -#define MANUFACTURER_NEC 0x0010 -#define MANUFACTURER_PMC 0x009D -#define MANUFACTURER_SST 0x00BF -#define MANUFACTURER_ST 0x0020 -#define MANUFACTURER_TOSHIBA 0x0098 -#define MANUFACTURER_WINBOND 0x00da - - /* AMD */ -#define AM29DL800BB 0x22C8 +#define AM29DL800BB 0x22CB #define AM29DL800BT 0x224A #define AM29F800BB 0x2258 @@ -59,6 +41,8 @@ #define AM29LV040B 0x004F #define AM29F032B 0x0041 #define AM29F002T 0x00B0 +#define AM29SL800DB 0x226B +#define AM29SL800DT 0x22EA /* Atmel */ #define AT49BV512 0x0003 @@ -68,8 +52,13 @@ #define AT49BV32X 0x00C8 #define AT49BV32XT 0x00C9 +/* Eon */ +#define EN29SL800BB 0x226B +#define EN29SL800BT 0x22EA + /* Fujitsu */ #define MBM29F040C 0x00A4 +#define MBM29F800BA 0x2258 #define MBM29LV650UE 0x22D7 #define MBM29LV320TE 0x22F6 #define MBM29LV320BE 0x22F9 @@ -104,6 +93,11 @@ #define I28F320B3B 0x8897 #define I28F640B3T 0x8898 #define I28F640B3B 0x8899 +#define I28F640C3B 0x88CD +#define I28F160F3T 0x88F3 +#define I28F160F3B 0x88F4 +#define I28F160C3T 0x88C2 +#define I28F160C3B 0x88C3 #define I82802AB 0x00ad #define I82802AC 0x00ac @@ -111,6 +105,7 @@ #define MX29LV040C 0x004F #define MX29LV160T 0x22C4 #define MX29LV160B 0x2249 +#define MX29F040 0x00A4 #define MX29F016 0x00AD #define MX29F002T 0x00B0 #define MX29F004T 0x0045 @@ -124,9 +119,15 @@ #define PM49FL004 0x006E #define PM49FL008 0x006A +/* Sharp */ +#define LH28F640BF 0x00B0 + /* ST - www.st.com */ -#define M29W800DT 0x00D7 -#define M29W800DB 0x005B +#define M29F800AB 0x0058 +#define M29W800DT 0x22D7 +#define M29W800DB 0x225B +#define M29W400DT 0x00EE +#define M29W400DB 0x00EF #define M29W160DT 0x22C4 #define M29W160DB 0x2249 #define M29W040B 0x00E3 @@ -134,6 +135,9 @@ #define M50FW080 0x002D #define M50FW016 0x002E #define M50LPW080 0x002F +#define M50FLW080A 0x0080 +#define M50FLW080B 0x0081 +#define PSD4256G6V 0x00e9 /* SST */ #define SST29EE020 0x0010 @@ -143,17 +147,23 @@ #define SST39LF800 0x2781 #define SST39LF160 0x2782 #define SST39VF1601 0x234b +#define SST39VF3201 0x235b +#define SST39WF1601 0x274b +#define SST39WF1602 0x274a #define SST39LF512 0x00D4 #define SST39LF010 0x00D5 #define SST39LF020 0x00D6 #define SST39LF040 0x00D7 #define SST39SF010A 0x00B5 #define SST39SF020A 0x00B6 +#define SST39SF040 0x00B7 #define SST49LF004B 0x0060 +#define SST49LF040B 0x0050 #define SST49LF008A 0x005a #define SST49LF030A 0x001C #define SST49LF040A 0x0051 #define SST49LF080A 0x005B +#define SST36VF3203 0x7354 /* Toshiba */ #define TC58FVT160 0x00C2 @@ -181,15 +191,17 @@ enum uaddr { MTD_UADDR_0x0555_0x02AA, MTD_UADDR_0x0555_0x0AAA, MTD_UADDR_0x5555_0x2AAA, + MTD_UADDR_0x0AAA_0x0554, MTD_UADDR_0x0AAA_0x0555, + MTD_UADDR_0xAAAA_0x5555, MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */ MTD_UADDR_UNNECESSARY, /* Does not require any address */ }; struct unlock_addr { - u32 addr1; - u32 addr2; + uint32_t addr1; + uint32_t addr2; }; @@ -198,7 +210,7 @@ struct unlock_addr { * exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore, * should not be used. The problem is that structures with * initializers have extra fields initialized to 0. It is _very_ - * desireable to have the unlock address entries for unsupported + * desirable to have the unlock address entries for unsupported * data widths automatically initialized - that means that * MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here * must go unused. @@ -224,11 +236,21 @@ static const struct unlock_addr unlock_addrs[] = { .addr2 = 0x2aaa }, + [MTD_UADDR_0x0AAA_0x0554] = { + .addr1 = 0x0AAA, + .addr2 = 0x0554 + }, + [MTD_UADDR_0x0AAA_0x0555] = { .addr1 = 0x0AAA, .addr2 = 0x0555 }, + [MTD_UADDR_0xAAAA_0x5555] = { + .addr1 = 0xaaaa, + .addr2 = 0x5555 + }, + [MTD_UADDR_DONT_CARE] = { .addr1 = 0x0000, /* Doesn't matter which address */ .addr2 = 0x0000 /* is used - must be last entry */ @@ -240,16 +262,16 @@ static const struct unlock_addr unlock_addrs[] = { } }; - struct amd_flash_info { - const __u16 mfr_id; - const __u16 dev_id; const char *name; - const int DevSize; - const int NumEraseRegions; - const int CmdSet; - const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */ - const ulong regions[6]; + const uint16_t mfr_id; + const uint16_t dev_id; + const uint8_t dev_size; + const uint8_t nr_regions; + const uint16_t cmd_set; + const uint32_t regions[6]; + const uint8_t devtypes; /* Bitmask for x8, x16 etc. */ + const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */ }; #define ERASEINFO(size,blocks) (size<<8)|(blocks-1) @@ -271,29 +293,26 @@ struct amd_flash_info { */ static const struct amd_flash_info jedec_table[] = { { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29F032B, .name = "AMD AM29F032B", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_4MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .devtypes = CFI_DEVICETYPE_X8, + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,64) } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29LV160DT, .name = "AMD AM29LV160DT", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,31), ERASEINFO(0x08000,1), @@ -301,16 +320,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29LV160DB, .name = "AMD AM29LV160DB", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -318,16 +335,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,31) } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29LV400BB, .name = "AMD AM29LV400BB", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -335,16 +350,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,7) } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29LV400BT, .name = "AMD AM29LV400BT", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,7), ERASEINFO(0x08000,1), @@ -352,16 +365,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29LV800BB, .name = "AMD AM29LV800BB", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -370,16 +381,14 @@ static const struct amd_flash_info jedec_table[] = { } }, { /* add DL */ - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29DL800BB, .name = "AMD AM29DL800BB", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 6, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 6, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x08000,1), @@ -389,16 +398,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,14) } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29DL800BT, .name = "AMD AM29DL800BT", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 6, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 6, .regions = { ERASEINFO(0x10000,14), ERASEINFO(0x04000,1), @@ -408,16 +415,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29F800BB, .name = "AMD AM29F800BB", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -425,16 +430,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,15), } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29LV800BT, .name = "AMD AM29LV800BT", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,15), ERASEINFO(0x08000,1), @@ -442,16 +445,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29F800BT, .name = "AMD AM29F800BT", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,15), ERASEINFO(0x08000,1), @@ -459,80 +460,74 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29F017D, .name = "AMD AM29F017D", - .uaddr = { - [0] = MTD_UADDR_DONT_CARE /* x8 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_DONT_CARE, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,32), } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29F016D, .name = "AMD AM29F016D", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,32), } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29F080, .name = "AMD AM29F080", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,16), } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29F040, .name = "AMD AM29F040", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,8), } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29LV040B, .name = "AMD AM29LV040B", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,8), } }, { - .mfr_id = MANUFACTURER_AMD, + .mfr_id = CFI_MFR_AMD, .dev_id = AM29F002T, .name = "AMD AM29F002T", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_256KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_256KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,3), ERASEINFO(0x08000,1), @@ -540,159 +535,216 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1), } }, { - .mfr_id = MANUFACTURER_ATMEL, + .mfr_id = CFI_MFR_AMD, + .dev_id = AM29SL800DT, + .name = "AMD AM29SL800DT", + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, + .regions = { + ERASEINFO(0x10000,15), + ERASEINFO(0x08000,1), + ERASEINFO(0x02000,2), + ERASEINFO(0x04000,1), + } + }, { + .mfr_id = CFI_MFR_AMD, + .dev_id = AM29SL800DB, + .name = "AMD AM29SL800DB", + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, + .regions = { + ERASEINFO(0x04000,1), + ERASEINFO(0x02000,2), + ERASEINFO(0x08000,1), + ERASEINFO(0x10000,15), + } + }, { + .mfr_id = CFI_MFR_ATMEL, .dev_id = AT49BV512, .name = "Atmel AT49BV512", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_64KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_64KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,1) } }, { - .mfr_id = MANUFACTURER_ATMEL, + .mfr_id = CFI_MFR_ATMEL, .dev_id = AT29LV512, .name = "Atmel AT29LV512", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_64KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_64KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x80,256), ERASEINFO(0x80,256) } }, { - .mfr_id = MANUFACTURER_ATMEL, + .mfr_id = CFI_MFR_ATMEL, .dev_id = AT49BV16X, .name = "Atmel AT49BV16X", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000,8), ERASEINFO(0x10000,31) } }, { - .mfr_id = MANUFACTURER_ATMEL, + .mfr_id = CFI_MFR_ATMEL, .dev_id = AT49BV16XT, .name = "Atmel AT49BV16XT", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000,31), ERASEINFO(0x02000,8) } }, { - .mfr_id = MANUFACTURER_ATMEL, + .mfr_id = CFI_MFR_ATMEL, .dev_id = AT49BV32X, .name = "Atmel AT49BV32X", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ - }, - .DevSize = SIZE_4MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000,8), ERASEINFO(0x10000,63) } }, { - .mfr_id = MANUFACTURER_ATMEL, + .mfr_id = CFI_MFR_ATMEL, .dev_id = AT49BV32XT, .name = "Atmel AT49BV32XT", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */ - [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */ - }, - .DevSize = SIZE_4MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */ + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000,63), ERASEINFO(0x02000,8) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_EON, + .dev_id = EN29SL800BT, + .name = "Eon EN29SL800BT", + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, + .regions = { + ERASEINFO(0x10000,15), + ERASEINFO(0x08000,1), + ERASEINFO(0x02000,2), + ERASEINFO(0x04000,1), + } + }, { + .mfr_id = CFI_MFR_EON, + .dev_id = EN29SL800BB, + .name = "Eon EN29SL800BB", + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, + .regions = { + ERASEINFO(0x04000,1), + ERASEINFO(0x02000,2), + ERASEINFO(0x08000,1), + ERASEINFO(0x10000,15), + } + }, { + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29F040C, .name = "Fujitsu MBM29F040C", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,8) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_FUJITSU, + .dev_id = MBM29F800BA, + .name = "Fujitsu MBM29F800BA", + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, + .regions = { + ERASEINFO(0x04000,1), + ERASEINFO(0x02000,2), + ERASEINFO(0x08000,1), + ERASEINFO(0x10000,15), + } + }, { + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29LV650UE, .name = "Fujitsu MBM29LV650UE", - .uaddr = { - [0] = MTD_UADDR_DONT_CARE /* x16 */ - }, - .DevSize = SIZE_8MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_DONT_CARE, + .dev_size = SIZE_8MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,128) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29LV320TE, .name = "Fujitsu MBM29LV320TE", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_4MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000,63), ERASEINFO(0x02000,8) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29LV320BE, .name = "Fujitsu MBM29LV320BE", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_4MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000,8), ERASEINFO(0x10000,63) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29LV160TE, .name = "Fujitsu MBM29LV160TE", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,31), ERASEINFO(0x08000,1), @@ -700,16 +752,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29LV160BE, .name = "Fujitsu MBM29LV160BE", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -717,16 +767,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,31) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29LV800BA, .name = "Fujitsu MBM29LV800BA", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -734,16 +782,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,15) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29LV800TA, .name = "Fujitsu MBM29LV800TA", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,15), ERASEINFO(0x08000,1), @@ -751,16 +797,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29LV400BC, .name = "Fujitsu MBM29LV400BC", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -768,16 +812,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,7) } }, { - .mfr_id = MANUFACTURER_FUJITSU, + .mfr_id = CFI_MFR_FUJITSU, .dev_id = MBM29LV400TC, .name = "Fujitsu MBM29LV400TC", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,7), ERASEINFO(0x08000,1), @@ -785,15 +827,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_HYUNDAI, + .mfr_id = CFI_MFR_HYUNDAI, .dev_id = HY29F002T, .name = "Hyundai HY29F002T", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_256KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_256KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,3), ERASEINFO(0x08000,1), @@ -801,333 +842,319 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F004B3B, .name = "Intel 28F004B3B", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000, 8), ERASEINFO(0x10000, 7), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F004B3T, .name = "Intel 28F004B3T", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000, 7), ERASEINFO(0x02000, 8), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F400B3B, .name = "Intel 28F400B3B", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000, 8), ERASEINFO(0x10000, 7), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F400B3T, .name = "Intel 28F400B3T", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000, 7), ERASEINFO(0x02000, 8), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F008B3B, .name = "Intel 28F008B3B", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000, 8), ERASEINFO(0x10000, 15), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F008B3T, .name = "Intel 28F008B3T", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000, 15), ERASEINFO(0x02000, 8), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F008S5, .name = "Intel 28F008S5", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_INTEL_EXT, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,16), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F016S5, .name = "Intel 28F016S5", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_INTEL_EXT, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,32), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F008SA, .name = "Intel 28F008SA", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000, 16), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F800B3B, .name = "Intel 28F800B3B", - .uaddr = { - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000, 8), ERASEINFO(0x10000, 15), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F800B3T, .name = "Intel 28F800B3T", - .uaddr = { - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000, 15), ERASEINFO(0x02000, 8), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F016B3B, .name = "Intel 28F016B3B", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000, 8), ERASEINFO(0x10000, 31), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F016S3, .name = "Intel I28F016S3", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000, 32), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F016B3T, .name = "Intel 28F016B3T", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000, 31), ERASEINFO(0x02000, 8), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F160B3B, .name = "Intel 28F160B3B", - .uaddr = { - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000, 8), ERASEINFO(0x10000, 31), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F160B3T, .name = "Intel 28F160B3T", - .uaddr = { - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000, 31), ERASEINFO(0x02000, 8), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F320B3B, .name = "Intel 28F320B3B", - .uaddr = { - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_4MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000, 8), ERASEINFO(0x10000, 63), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F320B3T, .name = "Intel 28F320B3T", - .uaddr = { - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_4MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000, 63), ERASEINFO(0x02000, 8), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F640B3B, .name = "Intel 28F640B3B", - .uaddr = { - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_8MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_8MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000, 8), ERASEINFO(0x10000, 127), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I28F640B3T, .name = "Intel 28F640B3T", - .uaddr = { - [1] = MTD_UADDR_UNNECESSARY, /* x16 */ - }, - .DevSize = SIZE_8MiB, - .CmdSet = P_ID_INTEL_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_8MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000, 127), ERASEINFO(0x02000, 8), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, + .dev_id = I28F640C3B, + .name = "Intel 28F640C3B", + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_8MiB, + .cmd_set = P_ID_INTEL_STD, + .nr_regions = 2, + .regions = { + ERASEINFO(0x02000, 8), + ERASEINFO(0x10000, 127), + } + }, { + .mfr_id = CFI_MFR_INTEL, .dev_id = I82802AB, .name = "Intel 82802AB", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_INTEL_EXT, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,8), } }, { - .mfr_id = MANUFACTURER_INTEL, + .mfr_id = CFI_MFR_INTEL, .dev_id = I82802AC, .name = "Intel 82802AC", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_INTEL_EXT, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,16), } }, { - .mfr_id = MANUFACTURER_MACRONIX, + .mfr_id = CFI_MFR_MACRONIX, .dev_id = MX29LV040C, .name = "Macronix MX29LV040C", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,8), } }, { - .mfr_id = MANUFACTURER_MACRONIX, + .mfr_id = CFI_MFR_MACRONIX, .dev_id = MX29LV160T, .name = "MXIC MX29LV160T", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,31), ERASEINFO(0x08000,1), @@ -1135,32 +1162,28 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_NEC, + .mfr_id = CFI_MFR_NEC, .dev_id = UPD29F064115, .name = "NEC uPD29F064115", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_8MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 3, + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_0xAAAA_0x5555, + .dev_size = SIZE_8MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 3, .regions = { ERASEINFO(0x2000,8), ERASEINFO(0x10000,126), ERASEINFO(0x2000,8), } }, { - .mfr_id = MANUFACTURER_MACRONIX, + .mfr_id = CFI_MFR_MACRONIX, .dev_id = MX29LV160B, .name = "MXIC MX29LV160B", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -1168,44 +1191,53 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,31) } }, { - .mfr_id = MANUFACTURER_MACRONIX, + .mfr_id = CFI_MFR_MACRONIX, + .dev_id = MX29F040, + .name = "Macronix MX29F040", + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, + .regions = { + ERASEINFO(0x10000,8), + } + }, { + .mfr_id = CFI_MFR_MACRONIX, .dev_id = MX29F016, .name = "Macronix MX29F016", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,32), } - }, { - .mfr_id = MANUFACTURER_MACRONIX, + }, { + .mfr_id = CFI_MFR_MACRONIX, .dev_id = MX29F004T, .name = "Macronix MX29F004T", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,7), ERASEINFO(0x08000,1), ERASEINFO(0x02000,2), ERASEINFO(0x04000,1), } - }, { - .mfr_id = MANUFACTURER_MACRONIX, + }, { + .mfr_id = CFI_MFR_MACRONIX, .dev_id = MX29F004B, .name = "Macronix MX29F004B", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -1213,15 +1245,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,7), } }, { - .mfr_id = MANUFACTURER_MACRONIX, + .mfr_id = CFI_MFR_MACRONIX, .dev_id = MX29F002T, .name = "Macronix MX29F002T", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_256KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_256KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,3), ERASEINFO(0x08000,1), @@ -1229,253 +1260,338 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1), } }, { - .mfr_id = MANUFACTURER_PMC, + .mfr_id = CFI_MFR_PMC, .dev_id = PM49FL002, .name = "PMC Pm49FL002", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_256KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_256KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO( 0x01000, 64 ) } }, { - .mfr_id = MANUFACTURER_PMC, + .mfr_id = CFI_MFR_PMC, .dev_id = PM49FL004, .name = "PMC Pm49FL004", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO( 0x01000, 128 ) } }, { - .mfr_id = MANUFACTURER_PMC, + .mfr_id = CFI_MFR_PMC, .dev_id = PM49FL008, .name = "PMC Pm49FL008", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO( 0x01000, 256 ) } - }, { - .mfr_id = MANUFACTURER_SST, + }, { + .mfr_id = CFI_MFR_SHARP, + .dev_id = LH28F640BF, + .name = "LH28F640BF", + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_8MiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 2, + .regions = { + ERASEINFO(0x10000, 127), + ERASEINFO(0x02000, 8), + } + }, { + .mfr_id = CFI_MFR_SST, .dev_id = SST39LF512, .name = "SST 39LF512", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_64KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_64KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,16), } - }, { - .mfr_id = MANUFACTURER_SST, + }, { + .mfr_id = CFI_MFR_SST, .dev_id = SST39LF010, .name = "SST 39LF010", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_128KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_128KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,32), } - }, { - .mfr_id = MANUFACTURER_SST, - .dev_id = SST29EE020, + }, { + .mfr_id = CFI_MFR_SST, + .dev_id = SST29EE020, .name = "SST 29EE020", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_256KiB, - .CmdSet = P_ID_SST_PAGE, - .NumEraseRegions= 1, - .regions = {ERASEINFO(0x01000,64), - } - }, { - .mfr_id = MANUFACTURER_SST, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_256KiB, + .cmd_set = P_ID_SST_PAGE, + .nr_regions = 1, + .regions = {ERASEINFO(0x01000,64), + } + }, { + .mfr_id = CFI_MFR_SST, .dev_id = SST29LE020, - .name = "SST 29LE020", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_256KiB, - .CmdSet = P_ID_SST_PAGE, - .NumEraseRegions= 1, - .regions = {ERASEINFO(0x01000,64), - } + .name = "SST 29LE020", + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_256KiB, + .cmd_set = P_ID_SST_PAGE, + .nr_regions = 1, + .regions = {ERASEINFO(0x01000,64), + } }, { - .mfr_id = MANUFACTURER_SST, + .mfr_id = CFI_MFR_SST, .dev_id = SST39LF020, .name = "SST 39LF020", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_256KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_256KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,64), } - }, { - .mfr_id = MANUFACTURER_SST, + }, { + .mfr_id = CFI_MFR_SST, .dev_id = SST39LF040, .name = "SST 39LF040", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,128), } - }, { - .mfr_id = MANUFACTURER_SST, + }, { + .mfr_id = CFI_MFR_SST, .dev_id = SST39SF010A, .name = "SST 39SF010A", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_128KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_128KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,32), } - }, { - .mfr_id = MANUFACTURER_SST, + }, { + .mfr_id = CFI_MFR_SST, .dev_id = SST39SF020A, .name = "SST 39SF020A", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_256KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_256KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,64), } }, { - .mfr_id = MANUFACTURER_SST, + .mfr_id = CFI_MFR_SST, + .dev_id = SST39SF040, + .name = "SST 39SF040", + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, + .regions = { + ERASEINFO(0x01000,128), + } + }, { + .mfr_id = CFI_MFR_SST, + .dev_id = SST49LF040B, + .name = "SST 49LF040B", + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, + .regions = { + ERASEINFO(0x01000,128), + } + }, { + + .mfr_id = CFI_MFR_SST, .dev_id = SST49LF004B, .name = "SST 49LF004B", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,128), } }, { - .mfr_id = MANUFACTURER_SST, + .mfr_id = CFI_MFR_SST, .dev_id = SST49LF008A, .name = "SST 49LF008A", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,256), } }, { - .mfr_id = MANUFACTURER_SST, + .mfr_id = CFI_MFR_SST, .dev_id = SST49LF030A, .name = "SST 49LF030A", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,96), } }, { - .mfr_id = MANUFACTURER_SST, + .mfr_id = CFI_MFR_SST, .dev_id = SST49LF040A, .name = "SST 49LF040A", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,128), } }, { - .mfr_id = MANUFACTURER_SST, + .mfr_id = CFI_MFR_SST, .dev_id = SST49LF080A, .name = "SST 49LF080A", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x01000,256), } }, { - .mfr_id = MANUFACTURER_SST, /* should be CFI */ - .dev_id = SST39LF160, - .name = "SST 39LF160", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, - .regions = { - ERASEINFO(0x1000,256), - ERASEINFO(0x1000,256) - } - }, { - .mfr_id = MANUFACTURER_SST, /* should be CFI */ - .dev_id = SST39VF1601, - .name = "SST 39VF1601", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, - .regions = { - ERASEINFO(0x1000,256), - ERASEINFO(0x1000,256) - } - - }, { - .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ + .mfr_id = CFI_MFR_SST, /* should be CFI */ + .dev_id = SST39LF160, + .name = "SST 39LF160", + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_0xAAAA_0x5555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, + .regions = { + ERASEINFO(0x1000,256), + ERASEINFO(0x1000,256) + } + }, { + .mfr_id = CFI_MFR_SST, /* should be CFI */ + .dev_id = SST39VF1601, + .name = "SST 39VF1601", + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_0xAAAA_0x5555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, + .regions = { + ERASEINFO(0x1000,256), + ERASEINFO(0x1000,256) + } + }, { + /* CFI is broken: reports AMD_STD, but needs custom uaddr */ + .mfr_id = CFI_MFR_SST, + .dev_id = SST39WF1601, + .name = "SST 39WF1601", + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_0xAAAA_0x5555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, + .regions = { + ERASEINFO(0x1000,256), + ERASEINFO(0x1000,256) + } + }, { + /* CFI is broken: reports AMD_STD, but needs custom uaddr */ + .mfr_id = CFI_MFR_SST, + .dev_id = SST39WF1602, + .name = "SST 39WF1602", + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_0xAAAA_0x5555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, + .regions = { + ERASEINFO(0x1000,256), + ERASEINFO(0x1000,256) + } + }, { + .mfr_id = CFI_MFR_SST, /* should be CFI */ + .dev_id = SST39VF3201, + .name = "SST 39VF3201", + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_0xAAAA_0x5555, + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, + .regions = { + ERASEINFO(0x1000,256), + ERASEINFO(0x1000,256), + ERASEINFO(0x1000,256), + ERASEINFO(0x1000,256) + } + }, { + .mfr_id = CFI_MFR_SST, + .dev_id = SST36VF3203, + .name = "SST 36VF3203", + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, + .regions = { + ERASEINFO(0x10000,64), + } + }, { + .mfr_id = CFI_MFR_ST, + .dev_id = M29F800AB, + .name = "ST M29F800AB", + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, + .regions = { + ERASEINFO(0x04000,1), + ERASEINFO(0x02000,2), + ERASEINFO(0x08000,1), + ERASEINFO(0x10000,15), + } + }, { + .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ .dev_id = M29W800DT, .name = "ST M29W800DT", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,15), ERASEINFO(0x08000,1), @@ -1483,33 +1599,59 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ + .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ .dev_id = M29W800DB, .name = "ST M29W800DB", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */ - [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), ERASEINFO(0x08000,1), ERASEINFO(0x10000,15) } + }, { + .mfr_id = CFI_MFR_ST, + .dev_id = M29W400DT, + .name = "ST M29W400DT", + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, + .regions = { + ERASEINFO(0x04000,7), + ERASEINFO(0x02000,1), + ERASEINFO(0x08000,2), + ERASEINFO(0x10000,1) + } + }, { + .mfr_id = CFI_MFR_ST, + .dev_id = M29W400DB, + .name = "ST M29W400DB", + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, + .regions = { + ERASEINFO(0x04000,1), + ERASEINFO(0x02000,2), + ERASEINFO(0x08000,1), + ERASEINFO(0x10000,7) + } }, { - .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ + .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ .dev_id = M29W160DT, .name = "ST M29W160DT", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */ + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,31), ERASEINFO(0x08000,1), @@ -1517,98 +1659,131 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */ + .mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */ .dev_id = M29W160DB, .name = "ST M29W160DB", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */ + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), ERASEINFO(0x08000,1), ERASEINFO(0x10000,31) } - }, { - .mfr_id = MANUFACTURER_ST, + }, { + .mfr_id = CFI_MFR_ST, .dev_id = M29W040B, .name = "ST M29W040B", - .uaddr = { - [0] = MTD_UADDR_0x0555_0x02AA /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0555_0x02AA, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,8), } - }, { - .mfr_id = MANUFACTURER_ST, + }, { + .mfr_id = CFI_MFR_ST, .dev_id = M50FW040, .name = "ST M50FW040", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_512KiB, - .CmdSet = P_ID_INTEL_EXT, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_512KiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,8), } - }, { - .mfr_id = MANUFACTURER_ST, + }, { + .mfr_id = CFI_MFR_ST, .dev_id = M50FW080, .name = "ST M50FW080", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_INTEL_EXT, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,16), } - }, { - .mfr_id = MANUFACTURER_ST, + }, { + .mfr_id = CFI_MFR_ST, .dev_id = M50FW016, .name = "ST M50FW016", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_INTEL_EXT, - .NumEraseRegions= 1, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,32), } }, { - .mfr_id = MANUFACTURER_ST, + .mfr_id = CFI_MFR_ST, .dev_id = M50LPW080, .name = "ST M50LPW080", - .uaddr = { - [0] = MTD_UADDR_UNNECESSARY, /* x8 */ + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 1, + .regions = { + ERASEINFO(0x10000,16), }, - .DevSize = SIZE_1MiB, - .CmdSet = P_ID_INTEL_EXT, - .NumEraseRegions= 1, + }, { + .mfr_id = CFI_MFR_ST, + .dev_id = M50FLW080A, + .name = "ST M50FLW080A", + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 4, + .regions = { + ERASEINFO(0x1000,16), + ERASEINFO(0x10000,13), + ERASEINFO(0x1000,16), + ERASEINFO(0x1000,16), + } + }, { + .mfr_id = CFI_MFR_ST, + .dev_id = M50FLW080B, + .name = "ST M50FLW080B", + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_UNNECESSARY, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_INTEL_EXT, + .nr_regions = 4, + .regions = { + ERASEINFO(0x1000,16), + ERASEINFO(0x1000,16), + ERASEINFO(0x10000,13), + ERASEINFO(0x1000,16), + } + }, { + .mfr_id = 0xff00 | CFI_MFR_ST, + .dev_id = 0xff00 | PSD4256G6V, + .name = "ST PSD4256G6V", + .devtypes = CFI_DEVICETYPE_X16, + .uaddr = MTD_UADDR_0x0AAA_0x0554, + .dev_size = SIZE_1MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 1, .regions = { ERASEINFO(0x10000,16), } }, { - .mfr_id = MANUFACTURER_TOSHIBA, + .mfr_id = CFI_MFR_TOSHIBA, .dev_id = TC58FVT160, .name = "Toshiba TC58FVT160", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000,31), ERASEINFO(0x08000,1), @@ -1616,16 +1791,14 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x04000,1) } }, { - .mfr_id = MANUFACTURER_TOSHIBA, + .mfr_id = CFI_MFR_TOSHIBA, .dev_id = TC58FVB160, .name = "Toshiba TC58FVB160", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ - }, - .DevSize = SIZE_2MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_2MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x04000,1), ERASEINFO(0x02000,2), @@ -1633,75 +1806,66 @@ static const struct amd_flash_info jedec_table[] = { ERASEINFO(0x10000,31) } }, { - .mfr_id = MANUFACTURER_TOSHIBA, + .mfr_id = CFI_MFR_TOSHIBA, .dev_id = TC58FVB321, .name = "Toshiba TC58FVB321", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ - }, - .DevSize = SIZE_4MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000,8), ERASEINFO(0x10000,63) } }, { - .mfr_id = MANUFACTURER_TOSHIBA, + .mfr_id = CFI_MFR_TOSHIBA, .dev_id = TC58FVT321, .name = "Toshiba TC58FVT321", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA /* x16 */ - }, - .DevSize = SIZE_4MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_4MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000,63), ERASEINFO(0x02000,8) } }, { - .mfr_id = MANUFACTURER_TOSHIBA, + .mfr_id = CFI_MFR_TOSHIBA, .dev_id = TC58FVB641, .name = "Toshiba TC58FVB641", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_8MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_8MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x02000,8), ERASEINFO(0x10000,127) } }, { - .mfr_id = MANUFACTURER_TOSHIBA, + .mfr_id = CFI_MFR_TOSHIBA, .dev_id = TC58FVT641, .name = "Toshiba TC58FVT641", - .uaddr = { - [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */ - [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */ - }, - .DevSize = SIZE_8MiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 2, + .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x0AAA_0x0555, + .dev_size = SIZE_8MiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 2, .regions = { ERASEINFO(0x10000,127), ERASEINFO(0x02000,8) } }, { - .mfr_id = MANUFACTURER_WINBOND, + .mfr_id = CFI_MFR_WINBOND, .dev_id = W49V002A, .name = "Winbond W49V002A", - .uaddr = { - [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ - }, - .DevSize = SIZE_256KiB, - .CmdSet = P_ID_AMD_STD, - .NumEraseRegions= 4, + .devtypes = CFI_DEVICETYPE_X8, + .uaddr = MTD_UADDR_0x5555_0x2AAA, + .dev_size = SIZE_256KiB, + .cmd_set = P_ID_AMD_STD, + .nr_regions = 4, .regions = { ERASEINFO(0x10000, 3), ERASEINFO(0x08000, 1), @@ -1711,38 +1875,39 @@ static const struct amd_flash_info jedec_table[] = { } }; - -static int cfi_jedec_setup(struct cfi_private *p_cfi, int index); - -static int jedec_probe_chip(struct map_info *map, __u32 base, - unsigned long *chip_map, struct cfi_private *cfi); - -static struct mtd_info *jedec_probe(struct map_info *map); - -static inline u32 jedec_read_mfr(struct map_info *map, __u32 base, +static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, struct cfi_private *cfi) { map_word result; unsigned long mask; - u32 ofs = cfi_build_cmd_addr(0, cfi_interleave(cfi), cfi->device_type); - mask = (1 << (cfi->device_type * 8)) -1; - result = map_read(map, base + ofs); + int bank = 0; + + /* According to JEDEC "Standard Manufacturer's Identification Code" + * (http://www.jedec.org/download/search/jep106W.pdf) + * several first banks can contain 0x7f instead of actual ID + */ + do { + uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi); + mask = (1 << (cfi->device_type * 8)) - 1; + result = map_read(map, base + ofs); + bank++; + } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION); + return result.x[0] & mask; } -static inline u32 jedec_read_id(struct map_info *map, __u32 base, +static inline u32 jedec_read_id(struct map_info *map, uint32_t base, struct cfi_private *cfi) { map_word result; unsigned long mask; - u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type); + u32 ofs = cfi_build_cmd_addr(1, map, cfi); mask = (1 << (cfi->device_type * 8)) -1; result = map_read(map, base + ofs); return result.x[0] & mask; } -static inline void jedec_reset(u32 base, struct map_info *map, - struct cfi_private *cfi) +static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) { /* Reset */ @@ -1750,114 +1915,92 @@ static inline void jedec_reset(u32 base, struct map_info *map, * (oh and incidentaly the jedec spec - 3.5.3.3) the reset * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips - * as they will ignore the writes and dont care what address + * as they will ignore the writes and don't care what address * the F0 is written to */ - if(cfi->addr_unlock1) { - DEBUG( MTD_DEBUG_LEVEL3, - "reset unlock called %x %x \n", + if (cfi->addr_unlock1) { + pr_debug( "reset unlock called %x %x \n", cfi->addr_unlock1,cfi->addr_unlock2); cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); } cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); - /* Some misdesigned intel chips do not respond for 0xF0 for a reset, + /* Some misdesigned Intel chips do not respond for 0xF0 for a reset, * so ensure we're in read mode. Send both the Intel and the AMD command * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so * this should be safe. - */ + */ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); /* FIXME - should have reset delay before continuing */ } -static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type) +static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index) { - int uaddr_idx; - __u8 uaddr = MTD_UADDR_NOT_SUPPORTED; - - switch ( device_type ) { - case CFI_DEVICETYPE_X8: uaddr_idx = 0; break; - case CFI_DEVICETYPE_X16: uaddr_idx = 1; break; - case CFI_DEVICETYPE_X32: uaddr_idx = 2; break; - default: - printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n", - __func__, device_type); - goto uaddr_done; - } - - uaddr = finfo->uaddr[uaddr_idx]; + int i,num_erase_regions; + uint8_t uaddr; - if (uaddr != MTD_UADDR_NOT_SUPPORTED ) { - /* ASSERT("The unlock addresses for non-8-bit mode - are bollocks. We don't really need an array."); */ - uaddr = finfo->uaddr[0]; + if (!(jedec_table[index].devtypes & cfi->device_type)) { + pr_debug("Rejecting potential %s with incompatible %d-bit device type\n", + jedec_table[index].name, 4 * (1<<cfi->device_type)); + return 0; } - uaddr_done: - return uaddr; -} + printk(KERN_INFO "Found: %s\n",jedec_table[index].name); + num_erase_regions = jedec_table[index].nr_regions; -static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) -{ - int i,num_erase_regions; - __u8 uaddr; - - printk("Found: %s\n",jedec_table[index].name); - - num_erase_regions = jedec_table[index].NumEraseRegions; - - p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); - if (!p_cfi->cfiq) { + cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); + if (!cfi->cfiq) { //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); return 0; } - memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); + memset(cfi->cfiq, 0, sizeof(struct cfi_ident)); - p_cfi->cfiq->P_ID = jedec_table[index].CmdSet; - p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions; - p_cfi->cfiq->DevSize = jedec_table[index].DevSize; - p_cfi->cfi_mode = CFI_MODE_JEDEC; + cfi->cfiq->P_ID = jedec_table[index].cmd_set; + cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; + cfi->cfiq->DevSize = jedec_table[index].dev_size; + cfi->cfi_mode = CFI_MODE_JEDEC; + cfi->sector_erase_cmd = CMD(0x30); for (i=0; i<num_erase_regions; i++){ - p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; + cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; } - p_cfi->cmdset_priv = NULL; + cfi->cmdset_priv = NULL; /* This may be redundant for some cases, but it doesn't hurt */ - p_cfi->mfr = jedec_table[index].mfr_id; - p_cfi->id = jedec_table[index].dev_id; + cfi->mfr = jedec_table[index].mfr_id; + cfi->id = jedec_table[index].dev_id; - uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type); - if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) { - kfree( p_cfi->cfiq ); - return 0; - } + uaddr = jedec_table[index].uaddr; - p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1; - p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2; + /* The table has unlock addresses in _bytes_, and we try not to let + our brains explode when we see the datasheets talking about address + lines numbered from A-1 to A18. The CFI table has unlock addresses + in device-words according to the mode the device is connected in */ + cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type; + cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type; - return 1; /* ok */ + return 1; /* ok */ } /* - * There is a BIG problem properly ID'ing the JEDEC devic and guaranteeing + * There is a BIG problem properly ID'ing the JEDEC device and guaranteeing * the mapped address, unlock addresses, and proper chip ID. This function * attempts to minimize errors. It is doubtfull that this probe will ever * be perfect - consequently there should be some module parameters that * could be manually specified to force the chip info. */ -static inline int jedec_match( __u32 base, +static inline int jedec_match( uint32_t base, struct map_info *map, struct cfi_private *cfi, const struct amd_flash_info *finfo ) { int rc = 0; /* failure until all tests pass */ u32 mfr, id; - __u8 uaddr; + uint8_t uaddr; /* * The IDs must match. For X16 and X32 devices operating in @@ -1870,26 +2013,26 @@ static inline int jedec_match( __u32 base, */ switch (cfi->device_type) { case CFI_DEVICETYPE_X8: - mfr = (__u8)finfo->mfr_id; - id = (__u8)finfo->dev_id; + mfr = (uint8_t)finfo->mfr_id; + id = (uint8_t)finfo->dev_id; /* bjd: it seems that if we do this, we can end up * detecting 16bit flashes as an 8bit device, even though * there aren't. */ if (finfo->dev_id > 0xff) { - DEBUG( MTD_DEBUG_LEVEL3, "%s(): ID is not 8bit\n", + pr_debug("%s(): ID is not 8bit\n", __func__); goto match_done; } break; case CFI_DEVICETYPE_X16: - mfr = (__u16)finfo->mfr_id; - id = (__u16)finfo->dev_id; + mfr = (uint16_t)finfo->mfr_id; + id = (uint16_t)finfo->dev_id; break; case CFI_DEVICETYPE_X32: - mfr = (__u16)finfo->mfr_id; - id = (__u32)finfo->dev_id; + mfr = (uint16_t)finfo->mfr_id; + id = (uint32_t)finfo->dev_id; break; default: printk(KERN_WARNING @@ -1902,29 +2045,26 @@ static inline int jedec_match( __u32 base, } /* the part size must fit in the memory window */ - DEBUG( MTD_DEBUG_LEVEL3, - "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", - __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) ); - if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) { - DEBUG( MTD_DEBUG_LEVEL3, - "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", + pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n", + __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) ); + if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) { + pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n", __func__, finfo->mfr_id, finfo->dev_id, - 1 << finfo->DevSize ); + 1 << finfo->dev_size ); goto match_done; } - uaddr = finfo_uaddr(finfo, cfi->device_type); - if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) { + if (! (finfo->devtypes & cfi->device_type)) goto match_done; - } - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", + uaddr = finfo->uaddr; + + pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n", __func__, cfi->addr_unlock1, cfi->addr_unlock2 ); if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr - && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 || - unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) { - DEBUG( MTD_DEBUG_LEVEL3, - "MTD %s(): 0x%.4x 0x%.4x did not match\n", + && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 || + unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) { + pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n", __func__, unlock_addrs[uaddr].addr1, unlock_addrs[uaddr].addr2); @@ -1932,7 +2072,7 @@ static inline int jedec_match( __u32 base, } /* - * Make sure the ID's dissappear when the device is taken out of + * Make sure the ID's disappear when the device is taken out of * ID mode. The only time this should fail when it should succeed * is when the ID's are written as data to the same * addresses. For this rare and unfortunate case the chip @@ -1940,15 +2080,13 @@ static inline int jedec_match( __u32 base, * FIXME - write a driver that takes all of the chip info as * module parameters, doesn't probe but forces a load. */ - DEBUG( MTD_DEBUG_LEVEL3, - "MTD %s(): check ID's disappear when not in ID mode\n", + pr_debug("MTD %s(): check ID's disappear when not in ID mode\n", __func__ ); jedec_reset( base, map, cfi ); mfr = jedec_read_mfr( map, base, cfi ); id = jedec_read_id( map, base, cfi ); if ( mfr == cfi->mfr && id == cfi->id ) { - DEBUG( MTD_DEBUG_LEVEL3, - "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n" + pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n" "You might need to manually specify JEDEC parameters.\n", __func__, cfi->mfr, cfi->id ); goto match_done; @@ -1961,15 +2099,15 @@ static inline int jedec_match( __u32 base, * Put the device back in ID mode - only need to do this if we * were truly frobbing a real device. */ - DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ ); - if(cfi->addr_unlock1) { + pr_debug("MTD %s(): return to ID mode\n", __func__ ); + if (cfi->addr_unlock1) { cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); } cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); /* FIXME - should have a delay before continuing */ - match_done: + match_done: return rc; } @@ -1988,8 +2126,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, if (MTD_UADDR_UNNECESSARY == uaddr_idx) return 0; - cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1; - cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2; + cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type; + cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type; } /* Make certain we aren't probing past the end of map */ @@ -1998,23 +2136,15 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, "Probe at base(0x%08x) past the end of the map(0x%08lx)\n", base, map->size -1); return 0; - + } /* Ensure the unlock addresses we try stay inside the map */ - probe_offset1 = cfi_build_cmd_addr( - cfi->addr_unlock1, - cfi_interleave(cfi), - cfi->device_type); - probe_offset2 = cfi_build_cmd_addr( - cfi->addr_unlock1, - cfi_interleave(cfi), - cfi->device_type); + probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi); + probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi); if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) - { goto retry; - } - + /* Reset */ jedec_reset(base, map, cfi); @@ -2027,29 +2157,27 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, /* FIXME - should have a delay before continuing */ if (!cfi->numchips) { - /* This is the first time we're called. Set up the CFI + /* This is the first time we're called. Set up the CFI stuff accordingly and return */ - + cfi->mfr = jedec_read_mfr(map, base, cfi); cfi->id = jedec_read_id(map, base, cfi); - DEBUG(MTD_DEBUG_LEVEL3, - "Search for id:(%02x %02x) interleave(%d) type(%d)\n", + pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n", cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type); - for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) { + for (i = 0; i < ARRAY_SIZE(jedec_table); i++) { if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { - DEBUG( MTD_DEBUG_LEVEL3, - "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", + pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", __func__, cfi->mfr, cfi->id, cfi->addr_unlock1, cfi->addr_unlock2 ); - if (!cfi_jedec_setup(cfi, i)) + if (!cfi_jedec_setup(map, cfi, i)) return 0; goto ok_out; } } goto retry; } else { - __u16 mfr; - __u16 id; + uint16_t mfr; + uint16_t id; /* Make sure it is a chip of the same manufacturer and id */ mfr = jedec_read_mfr(map, base, cfi); @@ -2062,7 +2190,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, return 0; } } - + /* Check each previous chip locations to see if it's an alias */ for (i=0; i < (base >> cfi->chipshift); i++) { unsigned long start; @@ -2083,7 +2211,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, map->name, base, start); return 0; } - + /* Yes, it's actually got the device IDs as data. Most * unfortunate. Stick the new chip in read mode * too and if it's the same, assume it's an alias. */ @@ -2097,20 +2225,20 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, } } } - + /* OK, if we got to here, then none of the previous chips appear to be aliases for the current one. */ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */ cfi->numchips++; - + ok_out: /* Put it back into Read Mode */ jedec_reset(base, map, cfi); printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n", - map->name, cfi_interleave(cfi), cfi->device_type*8, base, + map->name, cfi_interleave(cfi), cfi->device_type*8, base, map->bankwidth*8); - + return 1; } diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c index c6c83833cc3..f7a5bca92ae 100644 --- a/drivers/mtd/chips/map_absent.c +++ b/drivers/mtd/chips/map_absent.c @@ -1,11 +1,10 @@ /* * Common code to handle absent "placeholder" devices * Copyright 2001 Resilience Corporation <ebrower@resilience.com> - * $Id: map_absent.c,v 1.5 2004/11/16 18:29:00 dwmw2 Exp $ * * This map driver is used to allocate "placeholder" MTD - * devices on systems that have socketed/removable media. - * Use of this driver as a fallback preserves the expected + * devices on systems that have socketed/removable media. + * Use of this driver as a fallback preserves the expected * registration of MTD device nodes regardless of probe outcome. * A usage example is as follows: * @@ -26,7 +25,6 @@ #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> -#include <linux/mtd/compatmac.h> static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *); @@ -47,24 +45,23 @@ static struct mtd_info *map_absent_probe(struct map_info *map) { struct mtd_info *mtd; - mtd = kmalloc(sizeof(*mtd), GFP_KERNEL); + mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); if (!mtd) { return NULL; } - memset(mtd, 0, sizeof(*mtd)); - map->fldrv = &map_absent_chipdrv; mtd->priv = map; mtd->name = map->name; mtd->type = MTD_ABSENT; mtd->size = map->size; - mtd->erase = map_absent_erase; - mtd->read = map_absent_read; - mtd->write = map_absent_write; - mtd->sync = map_absent_sync; + mtd->_erase = map_absent_erase; + mtd->_read = map_absent_read; + mtd->_write = map_absent_write; + mtd->_sync = map_absent_sync; mtd->flags = 0; - mtd->erasesize = PAGE_SIZE; + mtd->erasesize = PAGE_SIZE; + mtd->writesize = 1; __module_get(THIS_MODULE); return mtd; @@ -73,14 +70,12 @@ static struct mtd_info *map_absent_probe(struct map_info *map) static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { - *retlen = 0; return -ENODEV; } static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { - *retlen = 0; - return -ENODEV; + return -ENODEV; } static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr) diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c index bd2e876a814..991c2a1c05d 100644 --- a/drivers/mtd/chips/map_ram.c +++ b/drivers/mtd/chips/map_ram.c @@ -1,7 +1,6 @@ /* * Common code to handle map devices which are simple RAM * (C) 2000 Red Hat. GPL'd. - * $Id: map_ram.c,v 1.22 2005/01/05 18:05:12 dwmw2 Exp $ */ #include <linux/module.h> @@ -14,7 +13,6 @@ #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> -#include <linux/mtd/compatmac.h> static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); @@ -22,6 +20,8 @@ static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch static int mapram_erase (struct mtd_info *, struct erase_info *); static void mapram_nop (struct mtd_info *); static struct mtd_info *map_ram_probe(struct map_info *map); +static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long, + unsigned long, unsigned long); static struct mtd_chip_driver mapram_chipdrv = { @@ -55,22 +55,22 @@ static struct mtd_info *map_ram_probe(struct map_info *map) #endif /* OK. It seems to be RAM. */ - mtd = kmalloc(sizeof(*mtd), GFP_KERNEL); + mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); if (!mtd) return NULL; - memset(mtd, 0, sizeof(*mtd)); - map->fldrv = &mapram_chipdrv; mtd->priv = map; mtd->name = map->name; mtd->type = MTD_RAM; mtd->size = map->size; - mtd->erase = mapram_erase; - mtd->read = mapram_read; - mtd->write = mapram_write; - mtd->sync = mapram_nop; - mtd->flags = MTD_CAP_RAM | MTD_VOLATILE; + mtd->_erase = mapram_erase; + mtd->_get_unmapped_area = mapram_unmapped_area; + mtd->_read = mapram_read; + mtd->_write = mapram_write; + mtd->_sync = mapram_nop; + mtd->flags = MTD_CAP_RAM; + mtd->writesize = 1; mtd->erasesize = PAGE_SIZE; while(mtd->size & (mtd->erasesize - 1)) @@ -81,6 +81,20 @@ static struct mtd_info *map_ram_probe(struct map_info *map) } +/* + * Allow NOMMU mmap() to directly map the device (if not NULL) + * - return the address to which the offset maps + * - return -ENOSYS to indicate refusal to do the mapping + */ +static unsigned long mapram_unmapped_area(struct mtd_info *mtd, + unsigned long len, + unsigned long offset, + unsigned long flags) +{ + struct map_info *map = mtd->priv; + return (unsigned long) map->virt + offset; +} + static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct map_info *map = mtd->priv; @@ -108,14 +122,10 @@ static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr) unsigned long i; allff = map_word_ff(map); - for (i=0; i<instr->len; i += map_bankwidth(map)) map_write(map, allff, instr->addr + i); - instr->state = MTD_ERASE_DONE; - mtd_erase_callback(instr); - return 0; } diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c index 624c12c232c..47a43cf7e5c 100644 --- a/drivers/mtd/chips/map_rom.c +++ b/drivers/mtd/chips/map_rom.c @@ -1,7 +1,6 @@ /* * Common code to handle map devices which are simple ROM * (C) 2000 Red Hat. GPL'd. - * $Id: map_rom.c,v 1.23 2005/01/05 18:05:12 dwmw2 Exp $ */ #include <linux/module.h> @@ -14,12 +13,14 @@ #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> -#include <linux/mtd/compatmac.h> static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *); static void maprom_nop (struct mtd_info *); static struct mtd_info *map_rom_probe(struct map_info *map); +static int maprom_erase (struct mtd_info *mtd, struct erase_info *info); +static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long, + unsigned long, unsigned long); static struct mtd_chip_driver maprom_chipdrv = { .probe = map_rom_probe, @@ -31,30 +32,43 @@ static struct mtd_info *map_rom_probe(struct map_info *map) { struct mtd_info *mtd; - mtd = kmalloc(sizeof(*mtd), GFP_KERNEL); + mtd = kzalloc(sizeof(*mtd), GFP_KERNEL); if (!mtd) return NULL; - memset(mtd, 0, sizeof(*mtd)); - map->fldrv = &maprom_chipdrv; mtd->priv = map; mtd->name = map->name; mtd->type = MTD_ROM; mtd->size = map->size; - mtd->read = maprom_read; - mtd->write = maprom_write; - mtd->sync = maprom_nop; + mtd->_get_unmapped_area = maprom_unmapped_area; + mtd->_read = maprom_read; + mtd->_write = maprom_write; + mtd->_sync = maprom_nop; + mtd->_erase = maprom_erase; mtd->flags = MTD_CAP_ROM; - mtd->erasesize = 131072; - while(mtd->size & (mtd->erasesize - 1)) - mtd->erasesize >>= 1; + mtd->erasesize = map->size; + mtd->writesize = 1; __module_get(THIS_MODULE); return mtd; } +/* + * Allow NOMMU mmap() to directly map the device (if not NULL) + * - return the address to which the offset maps + * - return -ENOSYS to indicate refusal to do the mapping + */ +static unsigned long maprom_unmapped_area(struct mtd_info *mtd, + unsigned long len, + unsigned long offset, + unsigned long flags) +{ + struct map_info *map = mtd->priv; + return (unsigned long) map->virt + offset; +} + static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct map_info *map = mtd->priv; @@ -71,8 +85,13 @@ static void maprom_nop(struct mtd_info *mtd) static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { - printk(KERN_NOTICE "maprom_write called\n"); - return -EIO; + return -EROFS; +} + +static int maprom_erase (struct mtd_info *mtd, struct erase_info *info) +{ + /* We do our best 8) */ + return -EROFS; } static int __init map_rom_init(void) diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c deleted file mode 100644 index c3cf0f63bc9..00000000000 --- a/drivers/mtd/chips/sharp.c +++ /dev/null @@ -1,596 +0,0 @@ -/* - * MTD chip driver for pre-CFI Sharp flash chips - * - * Copyright 2000,2001 David A. Schleef <ds@schleef.org> - * 2000,2001 Lineo, Inc. - * - * $Id: sharp.c,v 1.14 2004/08/09 13:19:43 dwmw2 Exp $ - * - * Devices supported: - * LH28F016SCT Symmetrical block flash memory, 2Mx8 - * LH28F008SCT Symmetrical block flash memory, 1Mx8 - * - * Documentation: - * http://www.sharpmeg.com/datasheets/memic/flashcmp/ - * http://www.sharpmeg.com/datasheets/memic/flashcmp/01symf/16m/016sctl9.pdf - * 016sctl9.pdf - * - * Limitations: - * This driver only supports 4x1 arrangement of chips. - * Not tested on anything but PowerPC. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/types.h> -#include <linux/sched.h> -#include <linux/errno.h> -#include <linux/interrupt.h> -#include <linux/mtd/map.h> -#include <linux/mtd/mtd.h> -#include <linux/mtd/cfi.h> -#include <linux/delay.h> -#include <linux/init.h> - -#define CMD_RESET 0xffffffff -#define CMD_READ_ID 0x90909090 -#define CMD_READ_STATUS 0x70707070 -#define CMD_CLEAR_STATUS 0x50505050 -#define CMD_BLOCK_ERASE_1 0x20202020 -#define CMD_BLOCK_ERASE_2 0xd0d0d0d0 -#define CMD_BYTE_WRITE 0x40404040 -#define CMD_SUSPEND 0xb0b0b0b0 -#define CMD_RESUME 0xd0d0d0d0 -#define CMD_SET_BLOCK_LOCK_1 0x60606060 -#define CMD_SET_BLOCK_LOCK_2 0x01010101 -#define CMD_SET_MASTER_LOCK_1 0x60606060 -#define CMD_SET_MASTER_LOCK_2 0xf1f1f1f1 -#define CMD_CLEAR_BLOCK_LOCKS_1 0x60606060 -#define CMD_CLEAR_BLOCK_LOCKS_2 0xd0d0d0d0 - -#define SR_READY 0x80808080 // 1 = ready -#define SR_ERASE_SUSPEND 0x40404040 // 1 = block erase suspended -#define SR_ERROR_ERASE 0x20202020 // 1 = error in block erase or clear lock bits -#define SR_ERROR_WRITE 0x10101010 // 1 = error in byte write or set lock bit -#define SR_VPP 0x08080808 // 1 = Vpp is low -#define SR_WRITE_SUSPEND 0x04040404 // 1 = byte write suspended -#define SR_PROTECT 0x02020202 // 1 = lock bit set -#define SR_RESERVED 0x01010101 - -#define SR_ERRORS (SR_ERROR_ERASE|SR_ERROR_WRITE|SR_VPP|SR_PROTECT) - -/* Configuration options */ - -#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */ - -struct mtd_info *sharp_probe(struct map_info *); - -static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd); - -static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf); -static int sharp_write(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, const u_char *buf); -static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr); -static void sharp_sync(struct mtd_info *mtd); -static int sharp_suspend(struct mtd_info *mtd); -static void sharp_resume(struct mtd_info *mtd); -static void sharp_destroy(struct mtd_info *mtd); - -static int sharp_write_oneword(struct map_info *map, struct flchip *chip, - unsigned long adr, __u32 datum); -static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip, - unsigned long adr); -#ifdef AUTOUNLOCK -static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip, - unsigned long adr); -#endif - - -struct sharp_info{ - struct flchip *chip; - int bogus; - int chipshift; - int numchips; - struct flchip chips[1]; -}; - -struct mtd_info *sharp_probe(struct map_info *map); -static void sharp_destroy(struct mtd_info *mtd); - -static struct mtd_chip_driver sharp_chipdrv = { - .probe = sharp_probe, - .destroy = sharp_destroy, - .name = "sharp", - .module = THIS_MODULE -}; - - -struct mtd_info *sharp_probe(struct map_info *map) -{ - struct mtd_info *mtd = NULL; - struct sharp_info *sharp = NULL; - int width; - - mtd = kmalloc(sizeof(*mtd), GFP_KERNEL); - if(!mtd) - return NULL; - - sharp = kmalloc(sizeof(*sharp), GFP_KERNEL); - if(!sharp) { - kfree(mtd); - return NULL; - } - - memset(mtd, 0, sizeof(*mtd)); - - width = sharp_probe_map(map,mtd); - if(!width){ - kfree(mtd); - kfree(sharp); - return NULL; - } - - mtd->priv = map; - mtd->type = MTD_NORFLASH; - mtd->erase = sharp_erase; - mtd->read = sharp_read; - mtd->write = sharp_write; - mtd->sync = sharp_sync; - mtd->suspend = sharp_suspend; - mtd->resume = sharp_resume; - mtd->flags = MTD_CAP_NORFLASH; - mtd->name = map->name; - - memset(sharp, 0, sizeof(*sharp)); - sharp->chipshift = 23; - sharp->numchips = 1; - sharp->chips[0].start = 0; - sharp->chips[0].state = FL_READY; - sharp->chips[0].mutex = &sharp->chips[0]._spinlock; - sharp->chips[0].word_write_time = 0; - init_waitqueue_head(&sharp->chips[0].wq); - spin_lock_init(&sharp->chips[0]._spinlock); - - map->fldrv = &sharp_chipdrv; - map->fldrv_priv = sharp; - - __module_get(THIS_MODULE); - return mtd; -} - -static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd) -{ - unsigned long tmp; - unsigned long base = 0; - u32 read0, read4; - int width = 4; - - tmp = map_read32(map, base+0); - - map_write32(map, CMD_READ_ID, base+0); - - read0=map_read32(map, base+0); - read4=map_read32(map, base+4); - if(read0 == 0x89898989){ - printk("Looks like sharp flash\n"); - switch(read4){ - case 0xaaaaaaaa: - case 0xa0a0a0a0: - /* aa - LH28F016SCT-L95 2Mx8, 32 64k blocks*/ - /* a0 - LH28F016SCT-Z4 2Mx8, 32 64k blocks*/ - mtd->erasesize = 0x10000 * width; - mtd->size = 0x200000 * width; - return width; - case 0xa6a6a6a6: - /* a6 - LH28F008SCT-L12 1Mx8, 16 64k blocks*/ - /* a6 - LH28F008SCR-L85 1Mx8, 16 64k blocks*/ - mtd->erasesize = 0x10000 * width; - mtd->size = 0x100000 * width; - return width; -#if 0 - case 0x00000000: /* unknown */ - /* XX - LH28F004SCT 512kx8, 8 64k blocks*/ - mtd->erasesize = 0x10000 * width; - mtd->size = 0x80000 * width; - return width; -#endif - default: - printk("Sort-of looks like sharp flash, 0x%08x 0x%08x\n", - read0,read4); - } - }else if((map_read32(map, base+0) == CMD_READ_ID)){ - /* RAM, probably */ - printk("Looks like RAM\n"); - map_write32(map, tmp, base+0); - }else{ - printk("Doesn't look like sharp flash, 0x%08x 0x%08x\n", - read0,read4); - } - - return 0; -} - -/* This function returns with the chip->mutex lock held. */ -static int sharp_wait(struct map_info *map, struct flchip *chip) -{ - __u16 status; - unsigned long timeo = jiffies + HZ; - DECLARE_WAITQUEUE(wait, current); - int adr = 0; - -retry: - spin_lock_bh(chip->mutex); - - switch(chip->state){ - case FL_READY: - map_write32(map,CMD_READ_STATUS,adr); - chip->state = FL_STATUS; - case FL_STATUS: - status = map_read32(map,adr); -//printk("status=%08x\n",status); - - udelay(100); - if((status & SR_READY)!=SR_READY){ -//printk(".status=%08x\n",status); - udelay(100); - } - break; - default: - printk("Waiting for chip\n"); - - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - - spin_unlock_bh(chip->mutex); - - schedule(); - remove_wait_queue(&chip->wq, &wait); - - if(signal_pending(current)) - return -EINTR; - - timeo = jiffies + HZ; - - goto retry; - } - - map_write32(map,CMD_RESET, adr); - - chip->state = FL_READY; - - return 0; -} - -static void sharp_release(struct flchip *chip) -{ - wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); -} - -static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - struct map_info *map = mtd->priv; - struct sharp_info *sharp = map->fldrv_priv; - int chipnum; - int ret = 0; - int ofs = 0; - - chipnum = (from >> sharp->chipshift); - ofs = from & ((1 << sharp->chipshift)-1); - - *retlen = 0; - - while(len){ - unsigned long thislen; - - if(chipnum>=sharp->numchips) - break; - - thislen = len; - if(ofs+thislen >= (1<<sharp->chipshift)) - thislen = (1<<sharp->chipshift) - ofs; - - ret = sharp_wait(map,&sharp->chips[chipnum]); - if(ret<0) - break; - - map_copy_from(map,buf,ofs,thislen); - - sharp_release(&sharp->chips[chipnum]); - - *retlen += thislen; - len -= thislen; - buf += thislen; - - ofs = 0; - chipnum++; - } - return ret; -} - -static int sharp_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) -{ - struct map_info *map = mtd->priv; - struct sharp_info *sharp = map->fldrv_priv; - int ret = 0; - int i,j; - int chipnum; - unsigned long ofs; - union { u32 l; unsigned char uc[4]; } tbuf; - - *retlen = 0; - - while(len){ - tbuf.l = 0xffffffff; - chipnum = to >> sharp->chipshift; - ofs = to & ((1<<sharp->chipshift)-1); - - j=0; - for(i=ofs&3;i<4 && len;i++){ - tbuf.uc[i] = *buf; - buf++; - to++; - len--; - j++; - } - sharp_write_oneword(map, &sharp->chips[chipnum], ofs&~3, tbuf.l); - if(ret<0) - return ret; - (*retlen)+=j; - } - - return 0; -} - -static int sharp_write_oneword(struct map_info *map, struct flchip *chip, - unsigned long adr, __u32 datum) -{ - int ret; - int timeo; - int try; - int i; - int status = 0; - - ret = sharp_wait(map,chip); - - for(try=0;try<10;try++){ - map_write32(map,CMD_BYTE_WRITE,adr); - /* cpu_to_le32 -> hack to fix the writel be->le conversion */ - map_write32(map,cpu_to_le32(datum),adr); - - chip->state = FL_WRITING; - - timeo = jiffies + (HZ/2); - - map_write32(map,CMD_READ_STATUS,adr); - for(i=0;i<100;i++){ - status = map_read32(map,adr); - if((status & SR_READY)==SR_READY) - break; - } - if(i==100){ - printk("sharp: timed out writing\n"); - } - - if(!(status&SR_ERRORS)) - break; - - printk("sharp: error writing byte at addr=%08lx status=%08x\n",adr,status); - - map_write32(map,CMD_CLEAR_STATUS,adr); - } - map_write32(map,CMD_RESET,adr); - chip->state = FL_READY; - - wake_up(&chip->wq); - spin_unlock_bh(chip->mutex); - - return 0; -} - -static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - struct map_info *map = mtd->priv; - struct sharp_info *sharp = map->fldrv_priv; - unsigned long adr,len; - int chipnum, ret=0; - -//printk("sharp_erase()\n"); - if(instr->addr & (mtd->erasesize - 1)) - return -EINVAL; - if(instr->len & (mtd->erasesize - 1)) - return -EINVAL; - if(instr->len + instr->addr > mtd->size) - return -EINVAL; - - chipnum = instr->addr >> sharp->chipshift; - adr = instr->addr & ((1<<sharp->chipshift)-1); - len = instr->len; - - while(len){ - ret = sharp_erase_oneblock(map, &sharp->chips[chipnum], adr); - if(ret)return ret; - - adr += mtd->erasesize; - len -= mtd->erasesize; - if(adr >> sharp->chipshift){ - adr = 0; - chipnum++; - if(chipnum>=sharp->numchips) - break; - } - } - - instr->state = MTD_ERASE_DONE; - mtd_erase_callback(instr); - - return 0; -} - -static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip, - unsigned long adr) -{ - int ret; - unsigned long timeo; - int status; - DECLARE_WAITQUEUE(wait, current); - - map_write32(map,CMD_READ_STATUS,adr); - status = map_read32(map,adr); - - timeo = jiffies + HZ; - - while(time_before(jiffies, timeo)){ - map_write32(map,CMD_READ_STATUS,adr); - status = map_read32(map,adr); - if((status & SR_READY)==SR_READY){ - ret = 0; - goto out; - } - set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(&chip->wq, &wait); - - //spin_unlock_bh(chip->mutex); - - schedule_timeout(1); - schedule(); - remove_wait_queue(&chip->wq, &wait); - - //spin_lock_bh(chip->mutex); - - if (signal_pending(current)){ - ret = -EINTR; - goto out; - } - - } - ret = -ETIME; -out: - return ret; -} - -static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip, - unsigned long adr) -{ - int ret; - //int timeo; - int status; - //int i; - -//printk("sharp_erase_oneblock()\n"); - -#ifdef AUTOUNLOCK - /* This seems like a good place to do an unlock */ - sharp_unlock_oneblock(map,chip,adr); -#endif - - map_write32(map,CMD_BLOCK_ERASE_1,adr); - map_write32(map,CMD_BLOCK_ERASE_2,adr); - - chip->state = FL_ERASING; - - ret = sharp_do_wait_for_ready(map,chip,adr); - if(ret<0)return ret; - - map_write32(map,CMD_READ_STATUS,adr); - status = map_read32(map,adr); - - if(!(status&SR_ERRORS)){ - map_write32(map,CMD_RESET,adr); - chip->state = FL_READY; - //spin_unlock_bh(chip->mutex); - return 0; - } - - printk("sharp: error erasing block at addr=%08lx status=%08x\n",adr,status); - map_write32(map,CMD_CLEAR_STATUS,adr); - - //spin_unlock_bh(chip->mutex); - - return -EIO; -} - -#ifdef AUTOUNLOCK -static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip, - unsigned long adr) -{ - int i; - int status; - - map_write32(map,CMD_CLEAR_BLOCK_LOCKS_1,adr); - map_write32(map,CMD_CLEAR_BLOCK_LOCKS_2,adr); - - udelay(100); - - status = map_read32(map,adr); - printk("status=%08x\n",status); - - for(i=0;i<1000;i++){ - //map_write32(map,CMD_READ_STATUS,adr); - status = map_read32(map,adr); - if((status & SR_READY)==SR_READY) - break; - udelay(100); - } - if(i==1000){ - printk("sharp: timed out unlocking block\n"); - } - - if(!(status&SR_ERRORS)){ - map_write32(map,CMD_RESET,adr); - chip->state = FL_READY; - return; - } - - printk("sharp: error unlocking block at addr=%08lx status=%08x\n",adr,status); - map_write32(map,CMD_CLEAR_STATUS,adr); -} -#endif - -static void sharp_sync(struct mtd_info *mtd) -{ - //printk("sharp_sync()\n"); -} - -static int sharp_suspend(struct mtd_info *mtd) -{ - printk("sharp_suspend()\n"); - return -EINVAL; -} - -static void sharp_resume(struct mtd_info *mtd) -{ - printk("sharp_resume()\n"); - -} - -static void sharp_destroy(struct mtd_info *mtd) -{ - printk("sharp_destroy()\n"); - -} - -int __init sharp_probe_init(void) -{ - printk("MTD Sharp chip driver <ds@lineo.com>\n"); - - register_mtd_chip_driver(&sharp_chipdrv); - - return 0; -} - -static void __exit sharp_probe_exit(void) -{ - unregister_mtd_chip_driver(&sharp_chipdrv); -} - -module_init(sharp_probe_init); -module_exit(sharp_probe_exit); - - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Schleef <ds@schleef.org>"); -MODULE_DESCRIPTION("Old MTD chip driver for pre-CFI Sharp flash chips"); |
